Commit 05cc6c5b authored by David S. Miller's avatar David S. Miller

Merge branch 'net-atlantic-MACSec-support-for-AQC-devices'

Igor Russkikh says:

====================
net: atlantic: MACSec support for AQC devices

This patchset introduces MACSec HW offloading support in
Marvell(Aquantia) AQC atlantic driver.

This implementation is a joint effort of Marvell developers on top of
the work started by Antoine Tenart.

v2:
 * clean up the generated code (removed useless bit operations);
 * use WARN_ONCE to avoid log spam;
 * use put_unaligned_be64;
 * removed trailing \0 and length limit for format strings;

v1: https://patchwork.ozlabs.org/cover/1259998/

RFC v2: https://patchwork.ozlabs.org/cover/1252204/

RFC v1: https://patchwork.ozlabs.org/cover/1238082/

Several patches introduce backward-incompatible changes and are
subject for discussion/drop:

1) patch 0007:
  multicast/broadcast when offloading is needed to handle ARP requests,
  because they have broadcast destination address;
  With this patch we also match and encrypt/decrypt packets between macsec
  hw and realdev based on device's mac address.
  This can potentially be used to support multiple macsec offloaded
  interfaces on top of one realdev.
  However in some environments this could lead to problems, e.g. the
  'bridge over macsec' configuration will expect the packets with unknown
  src MAC should come through macsec.
  The patch is questionable, we've used it because our current hw setup
  and requirements both assume that the decryption is done based on mac
  address match only.
  This could be changed by encrypting/decripting all the traffic (except
  control).

2) patch 0009:
  real_dev features are now propagated to macsec device (when HW
  offloading is enabled), otherwise feature set might lead to HW
  reconfiguration during MACSec configuration.
  Also, HW offloaded macsec should be able to keep LRO LSO features,
  since they are transparent for macsec engine (at least in our hardware).
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 673040c3 e8e9e13c
......@@ -20,6 +20,7 @@ config AQTION
tristate "aQuantia AQtion(tm) Support"
depends on PCI
depends on X86_64 || ARM64 || COMPILE_TEST
depends on MACSEC || MACSEC=n
---help---
This enables the support for the aQuantia AQtion(tm) Ethernet card.
......
......@@ -8,6 +8,8 @@
obj-$(CONFIG_AQTION) += atlantic.o
ccflags-y += -I$(src)
atlantic-objs := aq_main.o \
aq_nic.o \
aq_pci_func.o \
......@@ -22,6 +24,9 @@ atlantic-objs := aq_main.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
hw_atl/hw_atl_utils_fw2x.o \
hw_atl/hw_atl_llh.o
hw_atl/hw_atl_llh.o \
macsec/macsec_api.o
atlantic-$(CONFIG_MACSEC) += aq_macsec.o
atlantic-$(CONFIG_PTP_1588_CLOCK) += aq_ptp.o
\ No newline at end of file
......@@ -11,6 +11,7 @@
#include "aq_vec.h"
#include "aq_ptp.h"
#include "aq_filters.h"
#include "aq_macsec.h"
#include <linux/ptp_clock_kernel.h>
......@@ -96,6 +97,62 @@ static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
"Queue[%d] InErrors",
};
#if IS_ENABLED(CONFIG_MACSEC)
static const char aq_macsec_stat_names[][ETH_GSTRING_LEN] = {
"MACSec InCtlPackets",
"MACSec InTaggedMissPackets",
"MACSec InUntaggedMissPackets",
"MACSec InNotagPackets",
"MACSec InUntaggedPackets",
"MACSec InBadTagPackets",
"MACSec InNoSciPackets",
"MACSec InUnknownSciPackets",
"MACSec InCtrlPortPassPackets",
"MACSec InUnctrlPortPassPackets",
"MACSec InCtrlPortFailPackets",
"MACSec InUnctrlPortFailPackets",
"MACSec InTooLongPackets",
"MACSec InIgpocCtlPackets",
"MACSec InEccErrorPackets",
"MACSec InUnctrlHitDropRedir",
"MACSec OutCtlPackets",
"MACSec OutUnknownSaPackets",
"MACSec OutUntaggedPackets",
"MACSec OutTooLong",
"MACSec OutEccErrorPackets",
"MACSec OutUnctrlHitDropRedir",
};
static const char *aq_macsec_txsc_stat_names[] = {
"MACSecTXSC%d ProtectedPkts",
"MACSecTXSC%d EncryptedPkts",
"MACSecTXSC%d ProtectedOctets",
"MACSecTXSC%d EncryptedOctets",
};
static const char *aq_macsec_txsa_stat_names[] = {
"MACSecTXSC%dSA%d HitDropRedirect",
"MACSecTXSC%dSA%d Protected2Pkts",
"MACSecTXSC%dSA%d ProtectedPkts",
"MACSecTXSC%dSA%d EncryptedPkts",
};
static const char *aq_macsec_rxsa_stat_names[] = {
"MACSecRXSC%dSA%d UntaggedHitPkts",
"MACSecRXSC%dSA%d CtrlHitDrpRedir",
"MACSecRXSC%dSA%d NotUsingSa",
"MACSecRXSC%dSA%d UnusedSa",
"MACSecRXSC%dSA%d NotValidPkts",
"MACSecRXSC%dSA%d InvalidPkts",
"MACSecRXSC%dSA%d OkPkts",
"MACSecRXSC%dSA%d LatePkts",
"MACSecRXSC%dSA%d DelayedPkts",
"MACSecRXSC%dSA%d UncheckedPkts",
"MACSecRXSC%dSA%d ValidatedOctets",
"MACSecRXSC%dSA%d DecryptedOctets",
};
#endif
static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
"DMASystemLoopback",
"PKTSystemLoopback",
......@@ -104,18 +161,38 @@ static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
"PHYExternalLoopback",
};
static u32 aq_ethtool_n_stats(struct net_device *ndev)
{
struct aq_nic_s *nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(nic);
u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs;
#if IS_ENABLED(CONFIG_MACSEC)
if (nic->macsec_cfg) {
n_stats += ARRAY_SIZE(aq_macsec_stat_names) +
ARRAY_SIZE(aq_macsec_txsc_stat_names) *
aq_macsec_tx_sc_cnt(nic) +
ARRAY_SIZE(aq_macsec_txsa_stat_names) *
aq_macsec_tx_sa_cnt(nic) +
ARRAY_SIZE(aq_macsec_rxsa_stat_names) *
aq_macsec_rx_sa_cnt(nic);
}
#endif
return n_stats;
}
static void aq_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
cfg = aq_nic_get_cfg(aq_nic);
memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
ARRAY_SIZE(aq_ethtool_queue_stat_names) *
cfg->vecs) * sizeof(u64));
aq_nic_get_stats(aq_nic, data);
memset(data, 0, aq_ethtool_n_stats(ndev) * sizeof(u64));
data = aq_nic_get_stats(aq_nic, data);
#if IS_ENABLED(CONFIG_MACSEC)
data = aq_macsec_get_stats(aq_nic, data);
#endif
}
static void aq_ethtool_get_drvinfo(struct net_device *ndev,
......@@ -123,11 +200,9 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
{
struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
u32 firmware_version;
u32 regs_count;
cfg = aq_nic_get_cfg(aq_nic);
firmware_version = aq_nic_get_fw_version(aq_nic);
regs_count = aq_nic_get_regs_count(aq_nic);
......@@ -139,8 +214,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
sizeof(drvinfo->bus_info));
drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
drvinfo->n_stats = aq_ethtool_n_stats(ndev);
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = regs_count;
drvinfo->eedump_len = 0;
......@@ -153,6 +227,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
struct aq_nic_cfg_s *cfg;
u8 *p = data;
int i, si;
#if IS_ENABLED(CONFIG_MACSEC)
int sa;
#endif
cfg = aq_nic_get_cfg(aq_nic);
......@@ -170,6 +247,60 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
p += ETH_GSTRING_LEN;
}
}
#if IS_ENABLED(CONFIG_MACSEC)
if (!aq_nic->macsec_cfg)
break;
memcpy(p, aq_macsec_stat_names, sizeof(aq_macsec_stat_names));
p = p + sizeof(aq_macsec_stat_names);
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
struct aq_macsec_txsc *aq_txsc;
if (!(test_bit(i, &aq_nic->macsec_cfg->txsc_idx_busy)))
continue;
for (si = 0;
si < ARRAY_SIZE(aq_macsec_txsc_stat_names);
si++) {
snprintf(p, ETH_GSTRING_LEN,
aq_macsec_txsc_stat_names[si], i);
p += ETH_GSTRING_LEN;
}
aq_txsc = &aq_nic->macsec_cfg->aq_txsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
if (!(test_bit(sa, &aq_txsc->tx_sa_idx_busy)))
continue;
for (si = 0;
si < ARRAY_SIZE(aq_macsec_txsa_stat_names);
si++) {
snprintf(p, ETH_GSTRING_LEN,
aq_macsec_txsa_stat_names[si],
i, sa);
p += ETH_GSTRING_LEN;
}
}
}
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
struct aq_macsec_rxsc *aq_rxsc;
if (!(test_bit(i, &aq_nic->macsec_cfg->rxsc_idx_busy)))
continue;
aq_rxsc = &aq_nic->macsec_cfg->aq_rxsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
if (!(test_bit(sa, &aq_rxsc->rx_sa_idx_busy)))
continue;
for (si = 0;
si < ARRAY_SIZE(aq_macsec_rxsa_stat_names);
si++) {
snprintf(p, ETH_GSTRING_LEN,
aq_macsec_rxsa_stat_names[si],
i, sa);
p += ETH_GSTRING_LEN;
}
}
}
#endif
break;
case ETH_SS_PRIV_FLAGS:
memcpy(p, aq_ethtool_priv_flag_names,
......@@ -209,16 +340,11 @@ static int aq_ethtool_set_phys_id(struct net_device *ndev,
static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
int ret = 0;
cfg = aq_nic_get_cfg(aq_nic);
switch (stringset) {
case ETH_SS_STATS:
ret = ARRAY_SIZE(aq_ethtool_stat_names) +
cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
ret = aq_ethtool_n_stats(ndev);
break;
case ETH_SS_PRIV_FLAGS:
ret = ARRAY_SIZE(aq_ethtool_priv_flag_names);
......
......@@ -343,6 +343,12 @@ struct aq_fw_ops {
int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
u32 *supported_rates);
u32 (*get_link_capabilities)(struct aq_hw_s *self);
int (*send_macsec_req)(struct aq_hw_s *self,
struct macsec_msg_fw_request *msg,
struct macsec_msg_fw_response *resp);
};
#endif /* AQ_HW_H */
// SPDX-License-Identifier: GPL-2.0-only
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#include "aq_macsec.h"
#include "aq_nic.h"
#include <linux/rtnetlink.h>
#include "macsec/macsec_api.h"
#define AQ_MACSEC_KEY_LEN_128_BIT 16
#define AQ_MACSEC_KEY_LEN_192_BIT 24
#define AQ_MACSEC_KEY_LEN_256_BIT 32
enum aq_clear_type {
/* update HW configuration */
AQ_CLEAR_HW = BIT(0),
/* update SW configuration (busy bits, pointers) */
AQ_CLEAR_SW = BIT(1),
/* update both HW and SW configuration */
AQ_CLEAR_ALL = AQ_CLEAR_HW | AQ_CLEAR_SW,
};
static int aq_clear_txsc(struct aq_nic_s *nic, const int txsc_idx,
enum aq_clear_type clear_type);
static int aq_clear_txsa(struct aq_nic_s *nic, struct aq_macsec_txsc *aq_txsc,
const int sa_num, enum aq_clear_type clear_type);
static int aq_clear_rxsc(struct aq_nic_s *nic, const int rxsc_idx,
enum aq_clear_type clear_type);
static int aq_clear_rxsa(struct aq_nic_s *nic, struct aq_macsec_rxsc *aq_rxsc,
const int sa_num, enum aq_clear_type clear_type);
static int aq_clear_secy(struct aq_nic_s *nic, const struct macsec_secy *secy,
enum aq_clear_type clear_type);
static int aq_apply_macsec_cfg(struct aq_nic_s *nic);
static int aq_apply_secy_cfg(struct aq_nic_s *nic,
const struct macsec_secy *secy);
static void aq_ether_addr_to_mac(u32 mac[2], unsigned char *emac)
{
u32 tmp[2] = { 0 };
memcpy(((u8 *)tmp) + 2, emac, ETH_ALEN);
mac[0] = swab32(tmp[1]);
mac[1] = swab32(tmp[0]);
}
/* There's a 1:1 mapping between SecY and TX SC */
static int aq_get_txsc_idx_from_secy(struct aq_macsec_cfg *macsec_cfg,
const struct macsec_secy *secy)
{
int i;
if (unlikely(!secy))
return -1;
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (macsec_cfg->aq_txsc[i].sw_secy == secy)
return i;
}
return -1;
}
static int aq_get_rxsc_idx_from_rxsc(struct aq_macsec_cfg *macsec_cfg,
const struct macsec_rx_sc *rxsc)
{
int i;
if (unlikely(!rxsc))
return -1;
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (macsec_cfg->aq_rxsc[i].sw_rxsc == rxsc)
return i;
}
return -1;
}
static int aq_get_txsc_idx_from_sc_idx(const enum aq_macsec_sc_sa sc_sa,
const int sc_idx)
{
switch (sc_sa) {
case aq_macsec_sa_sc_4sa_8sc:
return sc_idx >> 2;
case aq_macsec_sa_sc_2sa_16sc:
return sc_idx >> 1;
case aq_macsec_sa_sc_1sa_32sc:
return sc_idx;
default:
WARN_ONCE(true, "Invalid sc_sa");
}
return -1;
}
/* Rotate keys u32[8] */
static void aq_rotate_keys(u32 (*key)[8], const int key_len)
{
u32 tmp[8] = { 0 };
memcpy(&tmp, key, sizeof(tmp));
memset(*key, 0, sizeof(*key));
if (key_len == AQ_MACSEC_KEY_LEN_128_BIT) {
(*key)[0] = swab32(tmp[3]);
(*key)[1] = swab32(tmp[2]);
(*key)[2] = swab32(tmp[1]);
(*key)[3] = swab32(tmp[0]);
} else if (key_len == AQ_MACSEC_KEY_LEN_192_BIT) {
(*key)[0] = swab32(tmp[5]);
(*key)[1] = swab32(tmp[4]);
(*key)[2] = swab32(tmp[3]);
(*key)[3] = swab32(tmp[2]);
(*key)[4] = swab32(tmp[1]);
(*key)[5] = swab32(tmp[0]);
} else if (key_len == AQ_MACSEC_KEY_LEN_256_BIT) {
(*key)[0] = swab32(tmp[7]);
(*key)[1] = swab32(tmp[6]);
(*key)[2] = swab32(tmp[5]);
(*key)[3] = swab32(tmp[4]);
(*key)[4] = swab32(tmp[3]);
(*key)[5] = swab32(tmp[2]);
(*key)[6] = swab32(tmp[1]);
(*key)[7] = swab32(tmp[0]);
} else {
pr_warn("Rotate_keys: invalid key_len\n");
}
}
#define STATS_2x32_TO_64(stat_field) \
(((u64)stat_field[1] << 32) | stat_field[0])
static int aq_get_macsec_common_stats(struct aq_hw_s *hw,
struct aq_macsec_common_stats *stats)
{
struct aq_mss_ingress_common_counters ingress_counters;
struct aq_mss_egress_common_counters egress_counters;
int ret;
/* MACSEC counters */
ret = aq_mss_get_ingress_common_counters(hw, &ingress_counters);
if (unlikely(ret))
return ret;
stats->in.ctl_pkts = STATS_2x32_TO_64(ingress_counters.ctl_pkts);
stats->in.tagged_miss_pkts =
STATS_2x32_TO_64(ingress_counters.tagged_miss_pkts);
stats->in.untagged_miss_pkts =
STATS_2x32_TO_64(ingress_counters.untagged_miss_pkts);
stats->in.notag_pkts = STATS_2x32_TO_64(ingress_counters.notag_pkts);
stats->in.untagged_pkts =
STATS_2x32_TO_64(ingress_counters.untagged_pkts);
stats->in.bad_tag_pkts =
STATS_2x32_TO_64(ingress_counters.bad_tag_pkts);
stats->in.no_sci_pkts = STATS_2x32_TO_64(ingress_counters.no_sci_pkts);
stats->in.unknown_sci_pkts =
STATS_2x32_TO_64(ingress_counters.unknown_sci_pkts);
stats->in.ctrl_prt_pass_pkts =
STATS_2x32_TO_64(ingress_counters.ctrl_prt_pass_pkts);
stats->in.unctrl_prt_pass_pkts =
STATS_2x32_TO_64(ingress_counters.unctrl_prt_pass_pkts);
stats->in.ctrl_prt_fail_pkts =
STATS_2x32_TO_64(ingress_counters.ctrl_prt_fail_pkts);
stats->in.unctrl_prt_fail_pkts =
STATS_2x32_TO_64(ingress_counters.unctrl_prt_fail_pkts);
stats->in.too_long_pkts =
STATS_2x32_TO_64(ingress_counters.too_long_pkts);
stats->in.igpoc_ctl_pkts =
STATS_2x32_TO_64(ingress_counters.igpoc_ctl_pkts);
stats->in.ecc_error_pkts =
STATS_2x32_TO_64(ingress_counters.ecc_error_pkts);
stats->in.unctrl_hit_drop_redir =
STATS_2x32_TO_64(ingress_counters.unctrl_hit_drop_redir);
ret = aq_mss_get_egress_common_counters(hw, &egress_counters);
if (unlikely(ret))
return ret;
stats->out.ctl_pkts = STATS_2x32_TO_64(egress_counters.ctl_pkt);
stats->out.unknown_sa_pkts =
STATS_2x32_TO_64(egress_counters.unknown_sa_pkts);
stats->out.untagged_pkts =
STATS_2x32_TO_64(egress_counters.untagged_pkts);
stats->out.too_long = STATS_2x32_TO_64(egress_counters.too_long);
stats->out.ecc_error_pkts =
STATS_2x32_TO_64(egress_counters.ecc_error_pkts);
stats->out.unctrl_hit_drop_redir =
STATS_2x32_TO_64(egress_counters.unctrl_hit_drop_redir);
return 0;
}
static int aq_get_rxsa_stats(struct aq_hw_s *hw, const int sa_idx,
struct aq_macsec_rx_sa_stats *stats)
{
struct aq_mss_ingress_sa_counters i_sa_counters;
int ret;
ret = aq_mss_get_ingress_sa_counters(hw, &i_sa_counters, sa_idx);
if (unlikely(ret))
return ret;
stats->untagged_hit_pkts =
STATS_2x32_TO_64(i_sa_counters.untagged_hit_pkts);
stats->ctrl_hit_drop_redir_pkts =
STATS_2x32_TO_64(i_sa_counters.ctrl_hit_drop_redir_pkts);
stats->not_using_sa = STATS_2x32_TO_64(i_sa_counters.not_using_sa);
stats->unused_sa = STATS_2x32_TO_64(i_sa_counters.unused_sa);
stats->not_valid_pkts = STATS_2x32_TO_64(i_sa_counters.not_valid_pkts);
stats->invalid_pkts = STATS_2x32_TO_64(i_sa_counters.invalid_pkts);
stats->ok_pkts = STATS_2x32_TO_64(i_sa_counters.ok_pkts);
stats->late_pkts = STATS_2x32_TO_64(i_sa_counters.late_pkts);
stats->delayed_pkts = STATS_2x32_TO_64(i_sa_counters.delayed_pkts);
stats->unchecked_pkts = STATS_2x32_TO_64(i_sa_counters.unchecked_pkts);
stats->validated_octets =
STATS_2x32_TO_64(i_sa_counters.validated_octets);
stats->decrypted_octets =
STATS_2x32_TO_64(i_sa_counters.decrypted_octets);
return 0;
}
static int aq_get_txsa_stats(struct aq_hw_s *hw, const int sa_idx,
struct aq_macsec_tx_sa_stats *stats)
{
struct aq_mss_egress_sa_counters e_sa_counters;
int ret;
ret = aq_mss_get_egress_sa_counters(hw, &e_sa_counters, sa_idx);
if (unlikely(ret))
return ret;
stats->sa_hit_drop_redirect =
STATS_2x32_TO_64(e_sa_counters.sa_hit_drop_redirect);
stats->sa_protected2_pkts =
STATS_2x32_TO_64(e_sa_counters.sa_protected2_pkts);
stats->sa_protected_pkts =
STATS_2x32_TO_64(e_sa_counters.sa_protected_pkts);
stats->sa_encrypted_pkts =
STATS_2x32_TO_64(e_sa_counters.sa_encrypted_pkts);
return 0;
}
static int aq_get_txsa_next_pn(struct aq_hw_s *hw, const int sa_idx, u32 *pn)
{
struct aq_mss_egress_sa_record sa_rec;
int ret;
ret = aq_mss_get_egress_sa_record(hw, &sa_rec, sa_idx);
if (likely(!ret))
*pn = sa_rec.next_pn;
return ret;
}
static int aq_get_rxsa_next_pn(struct aq_hw_s *hw, const int sa_idx, u32 *pn)
{
struct aq_mss_ingress_sa_record sa_rec;
int ret;
ret = aq_mss_get_ingress_sa_record(hw, &sa_rec, sa_idx);
if (likely(!ret))
*pn = (!sa_rec.sat_nextpn) ? sa_rec.next_pn : 0;
return ret;
}
static int aq_get_txsc_stats(struct aq_hw_s *hw, const int sc_idx,
struct aq_macsec_tx_sc_stats *stats)
{
struct aq_mss_egress_sc_counters e_sc_counters;
int ret;
ret = aq_mss_get_egress_sc_counters(hw, &e_sc_counters, sc_idx);
if (unlikely(ret))
return ret;
stats->sc_protected_pkts =
STATS_2x32_TO_64(e_sc_counters.sc_protected_pkts);
stats->sc_encrypted_pkts =
STATS_2x32_TO_64(e_sc_counters.sc_encrypted_pkts);
stats->sc_protected_octets =
STATS_2x32_TO_64(e_sc_counters.sc_protected_octets);
stats->sc_encrypted_octets =
STATS_2x32_TO_64(e_sc_counters.sc_encrypted_octets);
return 0;
}
static int aq_mdo_dev_open(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
if (ctx->prepare)
return 0;
if (netif_carrier_ok(nic->ndev))
ret = aq_apply_secy_cfg(nic, ctx->secy);
return ret;
}
static int aq_mdo_dev_stop(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int i;
if (ctx->prepare)
return 0;
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (nic->macsec_cfg->txsc_idx_busy & BIT(i))
aq_clear_secy(nic, nic->macsec_cfg->aq_txsc[i].sw_secy,
AQ_CLEAR_HW);
}
return 0;
}
static int aq_set_txsc(struct aq_nic_s *nic, const int txsc_idx)
{
struct aq_macsec_txsc *aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
struct aq_mss_egress_class_record tx_class_rec = { 0 };
const struct macsec_secy *secy = aq_txsc->sw_secy;
struct aq_mss_egress_sc_record sc_rec = { 0 };
unsigned int sc_idx = aq_txsc->hw_sc_idx;
struct aq_hw_s *hw = nic->aq_hw;
int ret = 0;
aq_ether_addr_to_mac(tx_class_rec.mac_sa, secy->netdev->dev_addr);
put_unaligned_be64((__force u64)secy->sci, tx_class_rec.sci);
tx_class_rec.sci_mask = 0;
tx_class_rec.sa_mask = 0x3f;
tx_class_rec.action = 0; /* forward to SA/SC table */
tx_class_rec.valid = 1;
tx_class_rec.sc_idx = sc_idx;
tx_class_rec.sc_sa = nic->macsec_cfg->sc_sa;
ret = aq_mss_set_egress_class_record(hw, &tx_class_rec, txsc_idx);
if (ret)
return ret;
sc_rec.protect = secy->protect_frames;
if (secy->tx_sc.encrypt)
sc_rec.tci |= BIT(1);
if (secy->tx_sc.scb)
sc_rec.tci |= BIT(2);
if (secy->tx_sc.send_sci)
sc_rec.tci |= BIT(3);
if (secy->tx_sc.end_station)
sc_rec.tci |= BIT(4);
/* The C bit is clear if and only if the Secure Data is
* exactly the same as the User Data and the ICV is 16 octets long.
*/
if (!(secy->icv_len == 16 && !secy->tx_sc.encrypt))
sc_rec.tci |= BIT(0);
sc_rec.an_roll = 0;
switch (secy->key_len) {
case AQ_MACSEC_KEY_LEN_128_BIT:
sc_rec.sak_len = 0;
break;
case AQ_MACSEC_KEY_LEN_192_BIT:
sc_rec.sak_len = 1;
break;
case AQ_MACSEC_KEY_LEN_256_BIT:
sc_rec.sak_len = 2;
break;
default:
WARN_ONCE(true, "Invalid sc_sa");
return -EINVAL;
}
sc_rec.curr_an = secy->tx_sc.encoding_sa;
sc_rec.valid = 1;
sc_rec.fresh = 1;
return aq_mss_set_egress_sc_record(hw, &sc_rec, sc_idx);
}
static u32 aq_sc_idx_max(const enum aq_macsec_sc_sa sc_sa)
{
u32 result = 0;
switch (sc_sa) {
case aq_macsec_sa_sc_4sa_8sc:
result = 8;
break;
case aq_macsec_sa_sc_2sa_16sc:
result = 16;
break;
case aq_macsec_sa_sc_1sa_32sc:
result = 32;
break;
default:
break;
};
return result;
}
static u32 aq_to_hw_sc_idx(const u32 sc_idx, const enum aq_macsec_sc_sa sc_sa)
{
switch (sc_sa) {
case aq_macsec_sa_sc_4sa_8sc:
return sc_idx << 2;
case aq_macsec_sa_sc_2sa_16sc:
return sc_idx << 1;
case aq_macsec_sa_sc_1sa_32sc:
return sc_idx;
default:
WARN_ONCE(true, "Invalid sc_sa");
};
return sc_idx;
}
static enum aq_macsec_sc_sa sc_sa_from_num_an(const int num_an)
{
enum aq_macsec_sc_sa sc_sa = aq_macsec_sa_sc_not_used;
switch (num_an) {
case 4:
sc_sa = aq_macsec_sa_sc_4sa_8sc;
break;
case 2:
sc_sa = aq_macsec_sa_sc_2sa_16sc;
break;
case 1:
sc_sa = aq_macsec_sa_sc_1sa_32sc;
break;
default:
break;
}
return sc_sa;
}
static int aq_mdo_add_secy(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
const struct macsec_secy *secy = ctx->secy;
enum aq_macsec_sc_sa sc_sa;
u32 txsc_idx;
int ret = 0;
if (secy->xpn)
return -EOPNOTSUPP;
sc_sa = sc_sa_from_num_an(MACSEC_NUM_AN);
if (sc_sa == aq_macsec_sa_sc_not_used)
return -EINVAL;
if (hweight32(cfg->txsc_idx_busy) >= aq_sc_idx_max(sc_sa))
return -ENOSPC;
txsc_idx = ffz(cfg->txsc_idx_busy);
if (txsc_idx == AQ_MACSEC_MAX_SC)
return -ENOSPC;
if (ctx->prepare)
return 0;
cfg->sc_sa = sc_sa;
cfg->aq_txsc[txsc_idx].hw_sc_idx = aq_to_hw_sc_idx(txsc_idx, sc_sa);
cfg->aq_txsc[txsc_idx].sw_secy = secy;
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_set_txsc(nic, txsc_idx);
set_bit(txsc_idx, &cfg->txsc_idx_busy);
return 0;
}
static int aq_mdo_upd_secy(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
const struct macsec_secy *secy = ctx->secy;
int txsc_idx;
int ret = 0;
txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy);
if (txsc_idx < 0)
return -ENOENT;
if (ctx->prepare)
return 0;
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_set_txsc(nic, txsc_idx);
return ret;
}
static int aq_clear_txsc(struct aq_nic_s *nic, const int txsc_idx,
enum aq_clear_type clear_type)
{
struct aq_macsec_txsc *tx_sc = &nic->macsec_cfg->aq_txsc[txsc_idx];
struct aq_mss_egress_class_record tx_class_rec = { 0 };
struct aq_mss_egress_sc_record sc_rec = { 0 };
struct aq_hw_s *hw = nic->aq_hw;
int ret = 0;
int sa_num;
for_each_set_bit (sa_num, &tx_sc->tx_sa_idx_busy, AQ_MACSEC_MAX_SA) {
ret = aq_clear_txsa(nic, tx_sc, sa_num, clear_type);
if (ret)
return ret;
}
if (clear_type & AQ_CLEAR_HW) {
ret = aq_mss_set_egress_class_record(hw, &tx_class_rec,
txsc_idx);
if (ret)
return ret;
sc_rec.fresh = 1;
ret = aq_mss_set_egress_sc_record(hw, &sc_rec,
tx_sc->hw_sc_idx);
if (ret)
return ret;
}
if (clear_type & AQ_CLEAR_SW) {
clear_bit(txsc_idx, &nic->macsec_cfg->txsc_idx_busy);
nic->macsec_cfg->aq_txsc[txsc_idx].sw_secy = NULL;
}
return ret;
}
static int aq_mdo_del_secy(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
if (ctx->prepare)
return 0;
if (!nic->macsec_cfg)
return 0;
ret = aq_clear_secy(nic, ctx->secy, AQ_CLEAR_ALL);
return ret;
}
static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx,
const struct macsec_secy *secy,
const struct macsec_tx_sa *tx_sa,
const unsigned char *key, const unsigned char an)
{
const u32 next_pn = tx_sa->next_pn_halves.lower;
struct aq_mss_egress_sakey_record key_rec;
const unsigned int sa_idx = sc_idx | an;
struct aq_mss_egress_sa_record sa_rec;
struct aq_hw_s *hw = nic->aq_hw;
int ret = 0;
memset(&sa_rec, 0, sizeof(sa_rec));
sa_rec.valid = tx_sa->active;
sa_rec.fresh = 1;
sa_rec.next_pn = next_pn;
ret = aq_mss_set_egress_sa_record(hw, &sa_rec, sa_idx);
if (ret)
return ret;
if (!key)
return ret;
memset(&key_rec, 0, sizeof(key_rec));
memcpy(&key_rec.key, key, secy->key_len);
aq_rotate_keys(&key_rec.key, secy->key_len);
ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
return ret;
}
static int aq_mdo_add_txsa(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
const struct macsec_secy *secy = ctx->secy;
struct aq_macsec_txsc *aq_txsc;
int txsc_idx;
int ret = 0;
txsc_idx = aq_get_txsc_idx_from_secy(cfg, secy);
if (txsc_idx < 0)
return -EINVAL;
if (ctx->prepare)
return 0;
aq_txsc = &cfg->aq_txsc[txsc_idx];
set_bit(ctx->sa.assoc_num, &aq_txsc->tx_sa_idx_busy);
memcpy(aq_txsc->tx_sa_key[ctx->sa.assoc_num], ctx->sa.key,
secy->key_len);
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
ctx->sa.tx_sa, ctx->sa.key,
ctx->sa.assoc_num);
return ret;
}
static int aq_mdo_upd_txsa(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
const struct macsec_secy *secy = ctx->secy;
struct aq_macsec_txsc *aq_txsc;
int txsc_idx;
int ret = 0;
txsc_idx = aq_get_txsc_idx_from_secy(cfg, secy);
if (txsc_idx < 0)
return -EINVAL;
if (ctx->prepare)
return 0;
aq_txsc = &cfg->aq_txsc[txsc_idx];
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
ctx->sa.tx_sa, NULL, ctx->sa.assoc_num);
return ret;
}
static int aq_clear_txsa(struct aq_nic_s *nic, struct aq_macsec_txsc *aq_txsc,
const int sa_num, enum aq_clear_type clear_type)
{
const int sa_idx = aq_txsc->hw_sc_idx | sa_num;
struct aq_hw_s *hw = nic->aq_hw;
int ret = 0;
if (clear_type & AQ_CLEAR_SW)
clear_bit(sa_num, &aq_txsc->tx_sa_idx_busy);
if ((clear_type & AQ_CLEAR_HW) && netif_carrier_ok(nic->ndev)) {
struct aq_mss_egress_sakey_record key_rec;
struct aq_mss_egress_sa_record sa_rec;
memset(&sa_rec, 0, sizeof(sa_rec));
sa_rec.fresh = 1;
ret = aq_mss_set_egress_sa_record(hw, &sa_rec, sa_idx);
if (ret)
return ret;
memset(&key_rec, 0, sizeof(key_rec));
return aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
}
return 0;
}
static int aq_mdo_del_txsa(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
int txsc_idx;
int ret = 0;
txsc_idx = aq_get_txsc_idx_from_secy(cfg, ctx->secy);
if (txsc_idx < 0)
return -EINVAL;
if (ctx->prepare)
return 0;
ret = aq_clear_txsa(nic, &cfg->aq_txsc[txsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
return ret;
}
static int aq_rxsc_validate_frames(const enum macsec_validation_type validate)
{
switch (validate) {
case MACSEC_VALIDATE_DISABLED:
return 2;
case MACSEC_VALIDATE_CHECK:
return 1;
case MACSEC_VALIDATE_STRICT:
return 0;
default:
WARN_ONCE(true, "Invalid validation type");
}
return 0;
}
static int aq_set_rxsc(struct aq_nic_s *nic, const u32 rxsc_idx)
{
const struct aq_macsec_rxsc *aq_rxsc =
&nic->macsec_cfg->aq_rxsc[rxsc_idx];
struct aq_mss_ingress_preclass_record pre_class_record;
const struct macsec_rx_sc *rx_sc = aq_rxsc->sw_rxsc;
const struct macsec_secy *secy = aq_rxsc->sw_secy;
const u32 hw_sc_idx = aq_rxsc->hw_sc_idx;
struct aq_mss_ingress_sc_record sc_record;
struct aq_hw_s *hw = nic->aq_hw;
int ret = 0;
memset(&pre_class_record, 0, sizeof(pre_class_record));
put_unaligned_be64((__force u64)rx_sc->sci, pre_class_record.sci);
pre_class_record.sci_mask = 0xff;
/* match all MACSEC ethertype packets */
pre_class_record.eth_type = ETH_P_MACSEC;
pre_class_record.eth_type_mask = 0x3;
aq_ether_addr_to_mac(pre_class_record.mac_sa, (char *)&rx_sc->sci);
pre_class_record.sa_mask = 0x3f;
pre_class_record.an_mask = nic->macsec_cfg->sc_sa;
pre_class_record.sc_idx = hw_sc_idx;
/* strip SecTAG & forward for decryption */
pre_class_record.action = 0x0;
pre_class_record.valid = 1;
ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
2 * rxsc_idx + 1);
if (ret)
return ret;
/* If SCI is absent, then match by SA alone */
pre_class_record.sci_mask = 0;
pre_class_record.sci_from_table = 1;
ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
2 * rxsc_idx);
if (ret)
return ret;
memset(&sc_record, 0, sizeof(sc_record));
sc_record.validate_frames =
aq_rxsc_validate_frames(secy->validate_frames);
if (secy->replay_protect) {
sc_record.replay_protect = 1;
sc_record.anti_replay_window = secy->replay_window;
}
sc_record.valid = 1;
sc_record.fresh = 1;
ret = aq_mss_set_ingress_sc_record(hw, &sc_record, hw_sc_idx);
if (ret)
return ret;
return ret;
}
static int aq_mdo_add_rxsc(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
const u32 rxsc_idx_max = aq_sc_idx_max(cfg->sc_sa);
u32 rxsc_idx;
int ret = 0;
if (hweight32(cfg->rxsc_idx_busy) >= rxsc_idx_max)
return -ENOSPC;
rxsc_idx = ffz(cfg->rxsc_idx_busy);
if (rxsc_idx >= rxsc_idx_max)
return -ENOSPC;
if (ctx->prepare)
return 0;
cfg->aq_rxsc[rxsc_idx].hw_sc_idx = aq_to_hw_sc_idx(rxsc_idx,
cfg->sc_sa);
cfg->aq_rxsc[rxsc_idx].sw_secy = ctx->secy;
cfg->aq_rxsc[rxsc_idx].sw_rxsc = ctx->rx_sc;
if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
ret = aq_set_rxsc(nic, rxsc_idx);
if (ret < 0)
return ret;
set_bit(rxsc_idx, &cfg->rxsc_idx_busy);
return 0;
}
static int aq_mdo_upd_rxsc(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int rxsc_idx;
int ret = 0;
rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, ctx->rx_sc);
if (rxsc_idx < 0)
return -ENOENT;
if (ctx->prepare)
return 0;
if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
ret = aq_set_rxsc(nic, rxsc_idx);
return ret;
}
static int aq_clear_rxsc(struct aq_nic_s *nic, const int rxsc_idx,
enum aq_clear_type clear_type)
{
struct aq_macsec_rxsc *rx_sc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
struct aq_hw_s *hw = nic->aq_hw;
int ret = 0;
int sa_num;
for_each_set_bit (sa_num, &rx_sc->rx_sa_idx_busy, AQ_MACSEC_MAX_SA) {
ret = aq_clear_rxsa(nic, rx_sc, sa_num, clear_type);
if (ret)
return ret;
}
if (clear_type & AQ_CLEAR_HW) {
struct aq_mss_ingress_preclass_record pre_class_record;
struct aq_mss_ingress_sc_record sc_record;
memset(&pre_class_record, 0, sizeof(pre_class_record));
memset(&sc_record, 0, sizeof(sc_record));
ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
2 * rxsc_idx);
if (ret)
return ret;
ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
2 * rxsc_idx + 1);
if (ret)
return ret;
sc_record.fresh = 1;
ret = aq_mss_set_ingress_sc_record(hw, &sc_record,
rx_sc->hw_sc_idx);
if (ret)
return ret;
}
if (clear_type & AQ_CLEAR_SW) {
clear_bit(rxsc_idx, &nic->macsec_cfg->rxsc_idx_busy);
rx_sc->sw_secy = NULL;
rx_sc->sw_rxsc = NULL;
}
return ret;
}
static int aq_mdo_del_rxsc(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
enum aq_clear_type clear_type = AQ_CLEAR_SW;
int rxsc_idx;
int ret = 0;
rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, ctx->rx_sc);
if (rxsc_idx < 0)
return -ENOENT;
if (ctx->prepare)
return 0;
if (netif_carrier_ok(nic->ndev))
clear_type = AQ_CLEAR_ALL;
ret = aq_clear_rxsc(nic, rxsc_idx, clear_type);
return ret;
}
static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx,
const struct macsec_secy *secy,
const struct macsec_rx_sa *rx_sa,
const unsigned char *key, const unsigned char an)
{
struct aq_mss_ingress_sakey_record sa_key_record;
const u32 next_pn = rx_sa->next_pn_halves.lower;
struct aq_mss_ingress_sa_record sa_record;
struct aq_hw_s *hw = nic->aq_hw;
const int sa_idx = sc_idx | an;
int ret = 0;
memset(&sa_record, 0, sizeof(sa_record));
sa_record.valid = rx_sa->active;
sa_record.fresh = 1;
sa_record.next_pn = next_pn;
ret = aq_mss_set_ingress_sa_record(hw, &sa_record, sa_idx);
if (ret)
return ret;
if (!key)
return ret;
memset(&sa_key_record, 0, sizeof(sa_key_record));
memcpy(&sa_key_record.key, key, secy->key_len);
switch (secy->key_len) {
case AQ_MACSEC_KEY_LEN_128_BIT:
sa_key_record.key_len = 0;
break;
case AQ_MACSEC_KEY_LEN_192_BIT:
sa_key_record.key_len = 1;
break;
case AQ_MACSEC_KEY_LEN_256_BIT:
sa_key_record.key_len = 2;
break;
default:
return -1;
}
aq_rotate_keys(&sa_key_record.key, secy->key_len);
ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx);
return ret;
}
static int aq_mdo_add_rxsa(struct macsec_context *ctx)
{
const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
const struct macsec_secy *secy = ctx->secy;
struct aq_macsec_rxsc *aq_rxsc;
int rxsc_idx;
int ret = 0;
rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc);
if (rxsc_idx < 0)
return -EINVAL;
if (ctx->prepare)
return 0;
aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
set_bit(ctx->sa.assoc_num, &aq_rxsc->rx_sa_idx_busy);
memcpy(aq_rxsc->rx_sa_key[ctx->sa.assoc_num], ctx->sa.key,
secy->key_len);
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_rxsa(nic, aq_rxsc->hw_sc_idx, secy,
ctx->sa.rx_sa, ctx->sa.key,
ctx->sa.assoc_num);
return ret;
}
static int aq_mdo_upd_rxsa(struct macsec_context *ctx)
{
const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
const struct macsec_secy *secy = ctx->secy;
int rxsc_idx;
int ret = 0;
rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, rx_sc);
if (rxsc_idx < 0)
return -EINVAL;
if (ctx->prepare)
return 0;
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_rxsa(nic, cfg->aq_rxsc[rxsc_idx].hw_sc_idx,
secy, ctx->sa.rx_sa, NULL,
ctx->sa.assoc_num);
return ret;
}
static int aq_clear_rxsa(struct aq_nic_s *nic, struct aq_macsec_rxsc *aq_rxsc,
const int sa_num, enum aq_clear_type clear_type)
{
int sa_idx = aq_rxsc->hw_sc_idx | sa_num;
struct aq_hw_s *hw = nic->aq_hw;
int ret = 0;
if (clear_type & AQ_CLEAR_SW)
clear_bit(sa_num, &aq_rxsc->rx_sa_idx_busy);
if ((clear_type & AQ_CLEAR_HW) && netif_carrier_ok(nic->ndev)) {
struct aq_mss_ingress_sakey_record sa_key_record;
struct aq_mss_ingress_sa_record sa_record;
memset(&sa_key_record, 0, sizeof(sa_key_record));
memset(&sa_record, 0, sizeof(sa_record));
sa_record.fresh = 1;
ret = aq_mss_set_ingress_sa_record(hw, &sa_record, sa_idx);
if (ret)
return ret;
return aq_mss_set_ingress_sakey_record(hw, &sa_key_record,
sa_idx);
}
return ret;
}
static int aq_mdo_del_rxsa(struct macsec_context *ctx)
{
const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
int rxsc_idx;
int ret = 0;
rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, rx_sc);
if (rxsc_idx < 0)
return -EINVAL;
if (ctx->prepare)
return 0;
ret = aq_clear_rxsa(nic, &cfg->aq_rxsc[rxsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
return ret;
}
static int aq_mdo_get_dev_stats(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_common_stats *stats = &nic->macsec_cfg->stats;
struct aq_hw_s *hw = nic->aq_hw;
if (ctx->prepare)
return 0;
aq_get_macsec_common_stats(hw, stats);
ctx->stats.dev_stats->OutPktsUntagged = stats->out.untagged_pkts;
ctx->stats.dev_stats->InPktsUntagged = stats->in.untagged_pkts;
ctx->stats.dev_stats->OutPktsTooLong = stats->out.too_long;
ctx->stats.dev_stats->InPktsNoTag = stats->in.notag_pkts;
ctx->stats.dev_stats->InPktsBadTag = stats->in.bad_tag_pkts;
ctx->stats.dev_stats->InPktsUnknownSCI = stats->in.unknown_sci_pkts;
ctx->stats.dev_stats->InPktsNoSCI = stats->in.no_sci_pkts;
ctx->stats.dev_stats->InPktsOverrun = 0;
return 0;
}
static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_tx_sc_stats *stats;
struct aq_hw_s *hw = nic->aq_hw;
struct aq_macsec_txsc *aq_txsc;
int txsc_idx;
txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, ctx->secy);
if (txsc_idx < 0)
return -ENOENT;
if (ctx->prepare)
return 0;
aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
stats = &aq_txsc->stats;
aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, stats);
ctx->stats.tx_sc_stats->OutPktsProtected = stats->sc_protected_pkts;
ctx->stats.tx_sc_stats->OutPktsEncrypted = stats->sc_encrypted_pkts;
ctx->stats.tx_sc_stats->OutOctetsProtected = stats->sc_protected_octets;
ctx->stats.tx_sc_stats->OutOctetsEncrypted = stats->sc_encrypted_octets;
return 0;
}
static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
struct aq_macsec_tx_sa_stats *stats;
struct aq_hw_s *hw = nic->aq_hw;
const struct macsec_secy *secy;
struct aq_macsec_txsc *aq_txsc;
struct macsec_tx_sa *tx_sa;
unsigned int sa_idx;
int txsc_idx;
u32 next_pn;
int ret;
txsc_idx = aq_get_txsc_idx_from_secy(cfg, ctx->secy);
if (txsc_idx < 0)
return -EINVAL;
if (ctx->prepare)
return 0;
aq_txsc = &cfg->aq_txsc[txsc_idx];
sa_idx = aq_txsc->hw_sc_idx | ctx->sa.assoc_num;
stats = &aq_txsc->tx_sa_stats[ctx->sa.assoc_num];
ret = aq_get_txsa_stats(hw, sa_idx, stats);
if (ret)
return ret;
ctx->stats.tx_sa_stats->OutPktsProtected = stats->sa_protected_pkts;
ctx->stats.tx_sa_stats->OutPktsEncrypted = stats->sa_encrypted_pkts;
secy = aq_txsc->sw_secy;
tx_sa = rcu_dereference_bh(secy->tx_sc.sa[ctx->sa.assoc_num]);
ret = aq_get_txsa_next_pn(hw, sa_idx, &next_pn);
if (ret == 0) {
spin_lock_bh(&tx_sa->lock);
tx_sa->next_pn = next_pn;
spin_unlock_bh(&tx_sa->lock);
}
return ret;
}
static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
struct aq_macsec_rx_sa_stats *stats;
struct aq_hw_s *hw = nic->aq_hw;
struct aq_macsec_rxsc *aq_rxsc;
unsigned int sa_idx;
int rxsc_idx;
int ret = 0;
int i;
rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, ctx->rx_sc);
if (rxsc_idx < 0)
return -ENOENT;
if (ctx->prepare)
return 0;
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
for (i = 0; i < MACSEC_NUM_AN; i++) {
if (!test_bit(i, &aq_rxsc->rx_sa_idx_busy))
continue;
stats = &aq_rxsc->rx_sa_stats[i];
sa_idx = aq_rxsc->hw_sc_idx | i;
ret = aq_get_rxsa_stats(hw, sa_idx, stats);
if (ret)
break;
ctx->stats.rx_sc_stats->InOctetsValidated +=
stats->validated_octets;
ctx->stats.rx_sc_stats->InOctetsDecrypted +=
stats->decrypted_octets;
ctx->stats.rx_sc_stats->InPktsUnchecked +=
stats->unchecked_pkts;
ctx->stats.rx_sc_stats->InPktsDelayed += stats->delayed_pkts;
ctx->stats.rx_sc_stats->InPktsOK += stats->ok_pkts;
ctx->stats.rx_sc_stats->InPktsInvalid += stats->invalid_pkts;
ctx->stats.rx_sc_stats->InPktsLate += stats->late_pkts;
ctx->stats.rx_sc_stats->InPktsNotValid += stats->not_valid_pkts;
ctx->stats.rx_sc_stats->InPktsNotUsingSA += stats->not_using_sa;
ctx->stats.rx_sc_stats->InPktsUnusedSA += stats->unused_sa;
}
return ret;
}
static int aq_mdo_get_rx_sa_stats(struct macsec_context *ctx)
{
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
struct aq_macsec_rx_sa_stats *stats;
struct aq_hw_s *hw = nic->aq_hw;
struct aq_macsec_rxsc *aq_rxsc;
struct macsec_rx_sa *rx_sa;
unsigned int sa_idx;
int rxsc_idx;
u32 next_pn;
int ret;
rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, ctx->rx_sc);
if (rxsc_idx < 0)
return -EINVAL;
if (ctx->prepare)
return 0;
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
stats = &aq_rxsc->rx_sa_stats[ctx->sa.assoc_num];
sa_idx = aq_rxsc->hw_sc_idx | ctx->sa.assoc_num;
ret = aq_get_rxsa_stats(hw, sa_idx, stats);
if (ret)
return ret;
ctx->stats.rx_sa_stats->InPktsOK = stats->ok_pkts;
ctx->stats.rx_sa_stats->InPktsInvalid = stats->invalid_pkts;
ctx->stats.rx_sa_stats->InPktsNotValid = stats->not_valid_pkts;
ctx->stats.rx_sa_stats->InPktsNotUsingSA = stats->not_using_sa;
ctx->stats.rx_sa_stats->InPktsUnusedSA = stats->unused_sa;
rx_sa = rcu_dereference_bh(aq_rxsc->sw_rxsc->sa[ctx->sa.assoc_num]);
ret = aq_get_rxsa_next_pn(hw, sa_idx, &next_pn);
if (ret == 0) {
spin_lock_bh(&rx_sa->lock);
rx_sa->next_pn = next_pn;
spin_unlock_bh(&rx_sa->lock);
}
return ret;
}
static int apply_txsc_cfg(struct aq_nic_s *nic, const int txsc_idx)
{
struct aq_macsec_txsc *aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
const struct macsec_secy *secy = aq_txsc->sw_secy;
struct macsec_tx_sa *tx_sa;
int ret = 0;
int i;
if (!netif_running(secy->netdev))
return ret;
ret = aq_set_txsc(nic, txsc_idx);
if (ret)
return ret;
for (i = 0; i < MACSEC_NUM_AN; i++) {
tx_sa = rcu_dereference_bh(secy->tx_sc.sa[i]);
if (tx_sa) {
ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
tx_sa, aq_txsc->tx_sa_key[i], i);
if (ret)
return ret;
}
}
return ret;
}
static int apply_rxsc_cfg(struct aq_nic_s *nic, const int rxsc_idx)
{
struct aq_macsec_rxsc *aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
const struct macsec_secy *secy = aq_rxsc->sw_secy;
struct macsec_rx_sa *rx_sa;
int ret = 0;
int i;
if (!netif_running(secy->netdev))
return ret;
ret = aq_set_rxsc(nic, rxsc_idx);
if (ret)
return ret;
for (i = 0; i < MACSEC_NUM_AN; i++) {
rx_sa = rcu_dereference_bh(aq_rxsc->sw_rxsc->sa[i]);
if (rx_sa) {
ret = aq_update_rxsa(nic, aq_rxsc->hw_sc_idx, secy,
rx_sa, aq_rxsc->rx_sa_key[i], i);
if (ret)
return ret;
}
}
return ret;
}
static int aq_clear_secy(struct aq_nic_s *nic, const struct macsec_secy *secy,
enum aq_clear_type clear_type)
{
struct macsec_rx_sc *rx_sc;
int txsc_idx;
int rxsc_idx;
int ret = 0;
txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy);
if (txsc_idx >= 0) {
ret = aq_clear_txsc(nic, txsc_idx, clear_type);
if (ret)
return ret;
}
for (rx_sc = rcu_dereference_bh(secy->rx_sc); rx_sc;
rx_sc = rcu_dereference_bh(rx_sc->next)) {
rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc);
if (rxsc_idx < 0)
continue;
ret = aq_clear_rxsc(nic, rxsc_idx, clear_type);
if (ret)
return ret;
}
return ret;
}
static int aq_apply_secy_cfg(struct aq_nic_s *nic,
const struct macsec_secy *secy)
{
struct macsec_rx_sc *rx_sc;
int txsc_idx;
int rxsc_idx;
int ret = 0;
txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy);
if (txsc_idx >= 0)
apply_txsc_cfg(nic, txsc_idx);
for (rx_sc = rcu_dereference_bh(secy->rx_sc); rx_sc && rx_sc->active;
rx_sc = rcu_dereference_bh(rx_sc->next)) {
rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc);
if (unlikely(rxsc_idx < 0))
continue;
ret = apply_rxsc_cfg(nic, rxsc_idx);
if (ret)
return ret;
}
return ret;
}
static int aq_apply_macsec_cfg(struct aq_nic_s *nic)
{
int ret = 0;
int i;
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (nic->macsec_cfg->txsc_idx_busy & BIT(i)) {
ret = apply_txsc_cfg(nic, i);
if (ret)
return ret;
}
}
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (nic->macsec_cfg->rxsc_idx_busy & BIT(i)) {
ret = apply_rxsc_cfg(nic, i);
if (ret)
return ret;
}
}
return ret;
}
static int aq_sa_from_sa_idx(const enum aq_macsec_sc_sa sc_sa, const int sa_idx)
{
switch (sc_sa) {
case aq_macsec_sa_sc_4sa_8sc:
return sa_idx & 3;
case aq_macsec_sa_sc_2sa_16sc:
return sa_idx & 1;
case aq_macsec_sa_sc_1sa_32sc:
return 0;
default:
WARN_ONCE(true, "Invalid sc_sa");
}
return -EINVAL;
}
static int aq_sc_idx_from_sa_idx(const enum aq_macsec_sc_sa sc_sa,
const int sa_idx)
{
switch (sc_sa) {
case aq_macsec_sa_sc_4sa_8sc:
return sa_idx & ~3;
case aq_macsec_sa_sc_2sa_16sc:
return sa_idx & ~1;
case aq_macsec_sa_sc_1sa_32sc:
return sa_idx;
default:
WARN_ONCE(true, "Invalid sc_sa");
}
return -EINVAL;
}
static void aq_check_txsa_expiration(struct aq_nic_s *nic)
{
u32 egress_sa_expired, egress_sa_threshold_expired;
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
struct aq_hw_s *hw = nic->aq_hw;
struct aq_macsec_txsc *aq_txsc;
const struct macsec_secy *secy;
int sc_idx = 0, txsc_idx = 0;
enum aq_macsec_sc_sa sc_sa;
struct macsec_tx_sa *tx_sa;
unsigned char an = 0;
int ret;
int i;
sc_sa = cfg->sc_sa;
ret = aq_mss_get_egress_sa_expired(hw, &egress_sa_expired);
if (unlikely(ret))
return;
ret = aq_mss_get_egress_sa_threshold_expired(hw,
&egress_sa_threshold_expired);
for (i = 0; i < AQ_MACSEC_MAX_SA; i++) {
if (egress_sa_expired & BIT(i)) {
an = aq_sa_from_sa_idx(sc_sa, i);
sc_idx = aq_sc_idx_from_sa_idx(sc_sa, i);
txsc_idx = aq_get_txsc_idx_from_sc_idx(sc_sa, sc_idx);
if (txsc_idx < 0)
continue;
aq_txsc = &cfg->aq_txsc[txsc_idx];
if (!(cfg->txsc_idx_busy & BIT(txsc_idx))) {
netdev_warn(nic->ndev,
"PN threshold expired on invalid TX SC");
continue;
}
secy = aq_txsc->sw_secy;
if (!netif_running(secy->netdev)) {
netdev_warn(nic->ndev,
"PN threshold expired on down TX SC");
continue;
}
if (unlikely(!(aq_txsc->tx_sa_idx_busy & BIT(an)))) {
netdev_warn(nic->ndev,
"PN threshold expired on invalid TX SA");
continue;
}
tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
macsec_pn_wrapped((struct macsec_secy *)secy, tx_sa);
}
}
aq_mss_set_egress_sa_expired(hw, egress_sa_expired);
if (likely(!ret))
aq_mss_set_egress_sa_threshold_expired(hw,
egress_sa_threshold_expired);
}
const struct macsec_ops aq_macsec_ops = {
.mdo_dev_open = aq_mdo_dev_open,
.mdo_dev_stop = aq_mdo_dev_stop,
.mdo_add_secy = aq_mdo_add_secy,
.mdo_upd_secy = aq_mdo_upd_secy,
.mdo_del_secy = aq_mdo_del_secy,
.mdo_add_rxsc = aq_mdo_add_rxsc,
.mdo_upd_rxsc = aq_mdo_upd_rxsc,
.mdo_del_rxsc = aq_mdo_del_rxsc,
.mdo_add_rxsa = aq_mdo_add_rxsa,
.mdo_upd_rxsa = aq_mdo_upd_rxsa,
.mdo_del_rxsa = aq_mdo_del_rxsa,
.mdo_add_txsa = aq_mdo_add_txsa,
.mdo_upd_txsa = aq_mdo_upd_txsa,
.mdo_del_txsa = aq_mdo_del_txsa,
.mdo_get_dev_stats = aq_mdo_get_dev_stats,
.mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats,
.mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats,
.mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats,
.mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats,
};
int aq_macsec_init(struct aq_nic_s *nic)
{
struct aq_macsec_cfg *cfg;
u32 caps_lo;
if (!nic->aq_fw_ops->get_link_capabilities)
return 0;
caps_lo = nic->aq_fw_ops->get_link_capabilities(nic->aq_hw);
if (!(caps_lo & BIT(CAPS_LO_MACSEC)))
return 0;
nic->macsec_cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!nic->macsec_cfg)
return -ENOMEM;
nic->ndev->features |= NETIF_F_HW_MACSEC;
nic->ndev->macsec_ops = &aq_macsec_ops;
return 0;
}
void aq_macsec_free(struct aq_nic_s *nic)
{
kfree(nic->macsec_cfg);
nic->macsec_cfg = NULL;
}
int aq_macsec_enable(struct aq_nic_s *nic)
{
u32 ctl_ether_types[1] = { ETH_P_PAE };
struct macsec_msg_fw_response resp = { 0 };
struct macsec_msg_fw_request msg = { 0 };
struct aq_hw_s *hw = nic->aq_hw;
int num_ctl_ether_types = 0;
int index = 0, tbl_idx;
int ret;
if (!nic->macsec_cfg)
return 0;
rtnl_lock();
if (nic->aq_fw_ops->send_macsec_req) {
struct macsec_cfg_request cfg = { 0 };
cfg.enabled = 1;
cfg.egress_threshold = 0xffffffff;
cfg.ingress_threshold = 0xffffffff;
cfg.interrupts_enabled = 1;
msg.msg_type = macsec_cfg_msg;
msg.cfg = cfg;
ret = nic->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
if (ret)
goto unlock;
}
/* Init Ethertype bypass filters */
for (index = 0; index < ARRAY_SIZE(ctl_ether_types); index++) {
struct aq_mss_ingress_prectlf_record rx_prectlf_rec;
struct aq_mss_egress_ctlf_record tx_ctlf_rec;
if (ctl_ether_types[index] == 0)
continue;
memset(&tx_ctlf_rec, 0, sizeof(tx_ctlf_rec));
tx_ctlf_rec.eth_type = ctl_ether_types[index];
tx_ctlf_rec.match_type = 4; /* Match eth_type only */
tx_ctlf_rec.match_mask = 0xf; /* match for eth_type */
tx_ctlf_rec.action = 0; /* Bypass MACSEC modules */
tbl_idx = NUMROWS_EGRESSCTLFRECORD - num_ctl_ether_types - 1;
aq_mss_set_egress_ctlf_record(hw, &tx_ctlf_rec, tbl_idx);
memset(&rx_prectlf_rec, 0, sizeof(rx_prectlf_rec));
rx_prectlf_rec.eth_type = ctl_ether_types[index];
rx_prectlf_rec.match_type = 4; /* Match eth_type only */
rx_prectlf_rec.match_mask = 0xf; /* match for eth_type */
rx_prectlf_rec.action = 0; /* Bypass MACSEC modules */
tbl_idx =
NUMROWS_INGRESSPRECTLFRECORD - num_ctl_ether_types - 1;
aq_mss_set_ingress_prectlf_record(hw, &rx_prectlf_rec, tbl_idx);
num_ctl_ether_types++;
}
ret = aq_apply_macsec_cfg(nic);
unlock:
rtnl_unlock();
return ret;
}
void aq_macsec_work(struct aq_nic_s *nic)
{
if (!nic->macsec_cfg)
return;
if (!netif_carrier_ok(nic->ndev))
return;
rtnl_lock();
aq_check_txsa_expiration(nic);
rtnl_unlock();
}
int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
{
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
int i, cnt = 0;
if (!cfg)
return 0;
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (!test_bit(i, &cfg->rxsc_idx_busy))
continue;
cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy);
}
return cnt;
}
int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic)
{
if (!nic->macsec_cfg)
return 0;
return hweight_long(nic->macsec_cfg->txsc_idx_busy);
}
int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
{
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
int i, cnt = 0;
if (!cfg)
return 0;
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (!test_bit(i, &cfg->txsc_idx_busy))
continue;
cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy);
}
return cnt;
}
static int aq_macsec_update_stats(struct aq_nic_s *nic)
{
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
struct aq_hw_s *hw = nic->aq_hw;
struct aq_macsec_txsc *aq_txsc;
struct aq_macsec_rxsc *aq_rxsc;
int i, sa_idx, assoc_num;
int ret = 0;
aq_get_macsec_common_stats(hw, &cfg->stats);
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (!(cfg->txsc_idx_busy & BIT(i)))
continue;
aq_txsc = &cfg->aq_txsc[i];
ret = aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx,
&aq_txsc->stats);
if (ret)
return ret;
for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
if (!test_bit(assoc_num, &aq_txsc->tx_sa_idx_busy))
continue;
sa_idx = aq_txsc->hw_sc_idx | assoc_num;
ret = aq_get_txsa_stats(hw, sa_idx,
&aq_txsc->tx_sa_stats[assoc_num]);
if (ret)
return ret;
}
}
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (!(test_bit(i, &cfg->rxsc_idx_busy)))
continue;
aq_rxsc = &cfg->aq_rxsc[i];
for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
if (!test_bit(assoc_num, &aq_rxsc->rx_sa_idx_busy))
continue;
sa_idx = aq_rxsc->hw_sc_idx | assoc_num;
ret = aq_get_rxsa_stats(hw, sa_idx,
&aq_rxsc->rx_sa_stats[assoc_num]);
if (ret)
return ret;
}
}
return ret;
}
u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
{
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
struct aq_macsec_common_stats *common_stats;
struct aq_macsec_tx_sc_stats *txsc_stats;
struct aq_macsec_tx_sa_stats *txsa_stats;
struct aq_macsec_rx_sa_stats *rxsa_stats;
struct aq_macsec_txsc *aq_txsc;
struct aq_macsec_rxsc *aq_rxsc;
unsigned int assoc_num;
unsigned int sc_num;
unsigned int i = 0U;
if (!cfg)
return data;
aq_macsec_update_stats(nic);
common_stats = &cfg->stats;
data[i] = common_stats->in.ctl_pkts;
data[++i] = common_stats->in.tagged_miss_pkts;
data[++i] = common_stats->in.untagged_miss_pkts;
data[++i] = common_stats->in.notag_pkts;
data[++i] = common_stats->in.untagged_pkts;
data[++i] = common_stats->in.bad_tag_pkts;
data[++i] = common_stats->in.no_sci_pkts;
data[++i] = common_stats->in.unknown_sci_pkts;
data[++i] = common_stats->in.ctrl_prt_pass_pkts;
data[++i] = common_stats->in.unctrl_prt_pass_pkts;
data[++i] = common_stats->in.ctrl_prt_fail_pkts;
data[++i] = common_stats->in.unctrl_prt_fail_pkts;
data[++i] = common_stats->in.too_long_pkts;
data[++i] = common_stats->in.igpoc_ctl_pkts;
data[++i] = common_stats->in.ecc_error_pkts;
data[++i] = common_stats->in.unctrl_hit_drop_redir;
data[++i] = common_stats->out.ctl_pkts;
data[++i] = common_stats->out.unknown_sa_pkts;
data[++i] = common_stats->out.untagged_pkts;
data[++i] = common_stats->out.too_long;
data[++i] = common_stats->out.ecc_error_pkts;
data[++i] = common_stats->out.unctrl_hit_drop_redir;
for (sc_num = 0; sc_num < AQ_MACSEC_MAX_SC; sc_num++) {
if (!(test_bit(sc_num, &cfg->txsc_idx_busy)))
continue;
aq_txsc = &cfg->aq_txsc[sc_num];
txsc_stats = &aq_txsc->stats;
data[++i] = txsc_stats->sc_protected_pkts;
data[++i] = txsc_stats->sc_encrypted_pkts;
data[++i] = txsc_stats->sc_protected_octets;
data[++i] = txsc_stats->sc_encrypted_octets;
for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
if (!test_bit(assoc_num, &aq_txsc->tx_sa_idx_busy))
continue;
txsa_stats = &aq_txsc->tx_sa_stats[assoc_num];
data[++i] = txsa_stats->sa_hit_drop_redirect;
data[++i] = txsa_stats->sa_protected2_pkts;
data[++i] = txsa_stats->sa_protected_pkts;
data[++i] = txsa_stats->sa_encrypted_pkts;
}
}
for (sc_num = 0; sc_num < AQ_MACSEC_MAX_SC; sc_num++) {
if (!(test_bit(sc_num, &cfg->rxsc_idx_busy)))
continue;
aq_rxsc = &cfg->aq_rxsc[sc_num];
for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
if (!test_bit(assoc_num, &aq_rxsc->rx_sa_idx_busy))
continue;
rxsa_stats = &aq_rxsc->rx_sa_stats[assoc_num];
data[++i] = rxsa_stats->untagged_hit_pkts;
data[++i] = rxsa_stats->ctrl_hit_drop_redir_pkts;
data[++i] = rxsa_stats->not_using_sa;
data[++i] = rxsa_stats->unused_sa;
data[++i] = rxsa_stats->not_valid_pkts;
data[++i] = rxsa_stats->invalid_pkts;
data[++i] = rxsa_stats->ok_pkts;
data[++i] = rxsa_stats->late_pkts;
data[++i] = rxsa_stats->delayed_pkts;
data[++i] = rxsa_stats->unchecked_pkts;
data[++i] = rxsa_stats->validated_octets;
data[++i] = rxsa_stats->decrypted_octets;
}
}
i++;
data += i;
return data;
}
/* SPDX-License-Identifier: GPL-2.0-only */
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef AQ_MACSEC_H
#define AQ_MACSEC_H
#include <linux/netdevice.h>
#if IS_ENABLED(CONFIG_MACSEC)
#include "net/macsec.h"
struct aq_nic_s;
#define AQ_MACSEC_MAX_SC 32
#define AQ_MACSEC_MAX_SA 32
enum aq_macsec_sc_sa {
aq_macsec_sa_sc_4sa_8sc,
aq_macsec_sa_sc_not_used,
aq_macsec_sa_sc_2sa_16sc,
aq_macsec_sa_sc_1sa_32sc,
};
struct aq_macsec_common_stats {
/* Ingress Common Counters */
struct {
u64 ctl_pkts;
u64 tagged_miss_pkts;
u64 untagged_miss_pkts;
u64 notag_pkts;
u64 untagged_pkts;
u64 bad_tag_pkts;
u64 no_sci_pkts;
u64 unknown_sci_pkts;
u64 ctrl_prt_pass_pkts;
u64 unctrl_prt_pass_pkts;
u64 ctrl_prt_fail_pkts;
u64 unctrl_prt_fail_pkts;
u64 too_long_pkts;
u64 igpoc_ctl_pkts;
u64 ecc_error_pkts;
u64 unctrl_hit_drop_redir;
} in;
/* Egress Common Counters */
struct {
u64 ctl_pkts;
u64 unknown_sa_pkts;
u64 untagged_pkts;
u64 too_long;
u64 ecc_error_pkts;
u64 unctrl_hit_drop_redir;
} out;
};
/* Ingress SA Counters */
struct aq_macsec_rx_sa_stats {
u64 untagged_hit_pkts;
u64 ctrl_hit_drop_redir_pkts;
u64 not_using_sa;
u64 unused_sa;
u64 not_valid_pkts;
u64 invalid_pkts;
u64 ok_pkts;
u64 late_pkts;
u64 delayed_pkts;
u64 unchecked_pkts;
u64 validated_octets;
u64 decrypted_octets;
};
/* Egress SA Counters */
struct aq_macsec_tx_sa_stats {
u64 sa_hit_drop_redirect;
u64 sa_protected2_pkts;
u64 sa_protected_pkts;
u64 sa_encrypted_pkts;
};
/* Egress SC Counters */
struct aq_macsec_tx_sc_stats {
u64 sc_protected_pkts;
u64 sc_encrypted_pkts;
u64 sc_protected_octets;
u64 sc_encrypted_octets;
};
struct aq_macsec_txsc {
u32 hw_sc_idx;
unsigned long tx_sa_idx_busy;
const struct macsec_secy *sw_secy;
u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
struct aq_macsec_tx_sc_stats stats;
struct aq_macsec_tx_sa_stats tx_sa_stats[MACSEC_NUM_AN];
};
struct aq_macsec_rxsc {
u32 hw_sc_idx;
unsigned long rx_sa_idx_busy;
const struct macsec_secy *sw_secy;
const struct macsec_rx_sc *sw_rxsc;
u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
struct aq_macsec_rx_sa_stats rx_sa_stats[MACSEC_NUM_AN];
};
struct aq_macsec_cfg {
enum aq_macsec_sc_sa sc_sa;
/* Egress channel configuration */
unsigned long txsc_idx_busy;
struct aq_macsec_txsc aq_txsc[AQ_MACSEC_MAX_SC];
/* Ingress channel configuration */
unsigned long rxsc_idx_busy;
struct aq_macsec_rxsc aq_rxsc[AQ_MACSEC_MAX_SC];
/* Statistics / counters */
struct aq_macsec_common_stats stats;
};
extern const struct macsec_ops aq_macsec_ops;
int aq_macsec_init(struct aq_nic_s *nic);
void aq_macsec_free(struct aq_nic_s *nic);
int aq_macsec_enable(struct aq_nic_s *nic);
void aq_macsec_work(struct aq_nic_s *nic);
u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data);
int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic);
int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic);
int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic);
#endif
#endif /* AQ_MACSEC_H */
......@@ -11,6 +11,7 @@
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
#include "aq_macsec.h"
#include "aq_main.h"
#include "aq_phy.h"
#include "aq_ptp.h"
......@@ -176,6 +177,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
aq_utils_obj_clear(&self->flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
#if IS_ENABLED(CONFIG_MACSEC)
aq_macsec_enable(self);
#endif
netif_tx_wake_all_queues(self->ndev);
}
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
......@@ -217,6 +221,10 @@ static void aq_nic_service_task(struct work_struct *work)
if (err)
return;
#if IS_ENABLED(CONFIG_MACSEC)
aq_macsec_work(self);
#endif
mutex_lock(&self->fwreq_mutex);
if (self->aq_fw_ops->update_stats)
self->aq_fw_ops->update_stats(self->aq_hw);
......@@ -262,6 +270,10 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
if (err)
goto err_exit;
#if IS_ENABLED(CONFIG_MACSEC)
aq_macsec_init(self);
#endif
mutex_lock(&self->fwreq_mutex);
err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
self->ndev->dev_addr);
......@@ -296,6 +308,10 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
goto err_exit;
err_exit:
#if IS_ENABLED(CONFIG_MACSEC)
if (err)
aq_macsec_free(self);
#endif
return err;
}
......@@ -765,7 +781,7 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
}
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
......@@ -815,7 +831,10 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
aq_vec_get_sw_stats(aq_vec, data, &count);
}
data += count;
err_exit:;
return data;
}
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
......
......@@ -17,6 +17,7 @@ struct aq_ring_s;
struct aq_hw_ops;
struct aq_fw_s;
struct aq_vec_s;
struct aq_macsec_cfg;
struct aq_ptp_s;
enum aq_rx_filter_type;
......@@ -129,6 +130,9 @@ struct aq_nic_s {
u32 irqvecs;
/* mutex to serialize FW interface access operations */
struct mutex fwreq_mutex;
#if IS_ENABLED(CONFIG_MACSEC)
struct aq_macsec_cfg *macsec_cfg;
#endif
/* PTP support */
struct aq_ptp_s *aq_ptp;
struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
......@@ -154,7 +158,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
int aq_nic_stop(struct aq_nic_s *self);
void aq_nic_deinit(struct aq_nic_s *self, bool link_down);
void aq_nic_set_power(struct aq_nic_s *self);
......
......@@ -18,6 +18,7 @@
#include "hw_atl/hw_atl_b0.h"
#include "aq_filters.h"
#include "aq_drvinfo.h"
#include "aq_macsec.h"
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
......@@ -324,6 +325,10 @@ static void aq_pci_remove(struct pci_dev *pdev)
aq_clear_rxnfc_all_rules(self);
if (self->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(self->ndev);
#if IS_ENABLED(CONFIG_MACSEC)
aq_macsec_free(self);
#endif
aq_nic_free_vectors(self);
aq_pci_free_irq_vectors(self);
iounmap(self->aq_hw->mmio);
......
......@@ -319,6 +319,32 @@ struct __packed hw_atl_utils_settings {
u32 media_detect;
};
enum macsec_msg_type {
macsec_cfg_msg = 0,
macsec_add_rx_sc_msg,
macsec_add_tx_sc_msg,
macsec_add_rx_sa_msg,
macsec_add_tx_sa_msg,
macsec_get_stats_msg,
};
struct __packed macsec_cfg_request {
u32 enabled;
u32 egress_threshold;
u32 ingress_threshold;
u32 interrupts_enabled;
};
struct __packed macsec_msg_fw_request {
u32 msg_id; /* not used */
u32 msg_type;
struct macsec_cfg_request cfg;
};
struct __packed macsec_msg_fw_response {
u32 result;
};
enum hw_atl_rx_action_with_traffic {
HW_ATL_RX_DISCARD,
HW_ATL_RX_HOST,
......@@ -437,34 +463,43 @@ enum hw_atl_fw2x_caps_lo {
CAPS_LO_2P5GBASET_FD,
CAPS_LO_5GBASET_FD = 10,
CAPS_LO_10GBASET_FD,
CAPS_LO_AUTONEG,
CAPS_LO_SMBUS_READ,
CAPS_LO_SMBUS_WRITE,
CAPS_LO_MACSEC = 15,
CAPS_LO_RESERVED1,
CAPS_LO_WAKE_ON_LINK_FORCED,
CAPS_LO_HIGH_TEMP_WARNING = 29,
CAPS_LO_DRIVER_SCRATCHPAD = 30,
CAPS_LO_GLOBAL_FAULT = 31
};
/* 0x374
* Status register
*/
enum hw_atl_fw2x_caps_hi {
CAPS_HI_RESERVED1 = 0,
CAPS_HI_TPO2EN = 0,
CAPS_HI_10BASET_EEE,
CAPS_HI_RESERVED2,
CAPS_HI_PAUSE,
CAPS_HI_ASYMMETRIC_PAUSE,
CAPS_HI_100BASETX_EEE = 5,
CAPS_HI_RESERVED3,
CAPS_HI_RESERVED4,
CAPS_HI_PHY_BUF_SEND,
CAPS_HI_PHY_BUF_RECV,
CAPS_HI_1000BASET_FD_EEE,
CAPS_HI_2P5GBASET_FD_EEE,
CAPS_HI_5GBASET_FD_EEE = 10,
CAPS_HI_10GBASET_FD_EEE,
CAPS_HI_FW_REQUEST,
CAPS_HI_RESERVED6,
CAPS_HI_RESERVED7,
CAPS_HI_RESERVED8 = 15,
CAPS_HI_RESERVED9,
CAPS_HI_PHY_LOG,
CAPS_HI_EEE_AUTO_DISABLE_SETTINGS,
CAPS_HI_PFC = 15,
CAPS_HI_WAKE_ON_LINK,
CAPS_HI_CABLE_DIAG,
CAPS_HI_TEMPERATURE,
CAPS_HI_DOWNSHIFT,
CAPS_HI_PTP_AVB_EN_FW2X = 20,
CAPS_HI_MEDIA_DETECT,
CAPS_HI_THERMAL_SHUTDOWN,
CAPS_HI_LINK_DROP,
CAPS_HI_SLEEP_PROXY,
CAPS_HI_WOL,
......
......@@ -55,6 +55,8 @@
#define HW_ATL_FW2X_CAP_EEE_5G_MASK BIT(CAPS_HI_5GBASET_FD_EEE)
#define HW_ATL_FW2X_CAP_EEE_10G_MASK BIT(CAPS_HI_10GBASET_FD_EEE)
#define HW_ATL_FW2X_CAP_MACSEC BIT(CAPS_LO_MACSEC)
#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8
#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E
......@@ -86,6 +88,7 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
static u32 aq_fw2x_mbox_get(struct aq_hw_s *self);
static u32 aq_fw2x_rpc_get(struct aq_hw_s *self);
static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr);
static u32 aq_fw2x_state_get(struct aq_hw_s *self);
static u32 aq_fw2x_state2_get(struct aq_hw_s *self);
static int aq_fw2x_init(struct aq_hw_s *self)
......@@ -619,11 +622,75 @@ static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr)
return err;
}
static u32 aq_fw2x_state_get(struct aq_hw_s *self)
{
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
}
static u32 aq_fw2x_state2_get(struct aq_hw_s *self)
{
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
}
static u32 aq_fw2x_get_link_capabilities(struct aq_hw_s *self)
{
int err = 0;
u32 offset;
u32 val;
offset = self->mbox_addr +
offsetof(struct hw_atl_utils_mbox, info.caps_lo);
err = hw_atl_utils_fw_downld_dwords(self, offset, &val, 1);
if (err)
return 0;
return val;
}
static int aq_fw2x_send_macsec_req(struct aq_hw_s *hw,
struct macsec_msg_fw_request *req,
struct macsec_msg_fw_response *response)
{
u32 low_status, low_req = 0;
u32 dword_cnt;
u32 caps_lo;
u32 offset;
int err;
if (!req || !response)
return -EINVAL;
caps_lo = aq_fw2x_get_link_capabilities(hw);
if (!(caps_lo & BIT(CAPS_LO_MACSEC)))
return -EOPNOTSUPP;
/* Write macsec request to cfg memory */
dword_cnt = (sizeof(*req) + sizeof(u32) - 1) / sizeof(u32);
err = hw_atl_write_fwcfg_dwords(hw, (void *)req, dword_cnt);
if (err < 0)
return err;
/* Toggle 0x368.CAPS_LO_MACSEC bit */
low_req = aq_hw_read_reg(hw, HW_ATL_FW2X_MPI_CONTROL_ADDR);
low_req ^= HW_ATL_FW2X_CAP_MACSEC;
aq_hw_write_reg(hw, HW_ATL_FW2X_MPI_CONTROL_ADDR, low_req);
/* Wait FW to report back */
err = readx_poll_timeout_atomic(aq_fw2x_state_get, hw, low_status,
low_req != (low_status & BIT(CAPS_LO_MACSEC)), 1U, 10000U);
if (err)
return -EIO;
/* Read status of write operation */
offset = hw->rpc_addr + sizeof(u32);
err = hw_atl_utils_fw_downld_dwords(hw, offset, (u32 *)(void *)response,
sizeof(*response) / sizeof(u32));
return err;
}
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
.deinit = aq_fw2x_deinit,
......@@ -645,4 +712,6 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.led_control = aq_fw2x_led_control,
.set_phyloopback = aq_fw2x_set_phyloopback,
.adjust_ptp = aq_fw3x_adjust_ptp,
.get_link_capabilities = aq_fw2x_get_link_capabilities,
.send_macsec_req = aq_fw2x_send_macsec_req,
};
/* SPDX-License-Identifier: GPL-2.0-only */
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef MSS_EGRESS_REGS_HEADER
#define MSS_EGRESS_REGS_HEADER
#define MSS_EGRESS_CTL_REGISTER_ADDR 0x00005002
#define MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR 0x00005060
#define MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR 0x00005062
#define MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR 0x00005080
#define MSS_EGRESS_LUT_CTL_REGISTER_ADDR 0x00005081
#define MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR 0x000050A0
struct mss_egress_ctl_register {
union {
struct {
unsigned int soft_reset : 1;
unsigned int drop_kay_packet : 1;
unsigned int drop_egprc_lut_miss : 1;
unsigned int gcm_start : 1;
unsigned int gcm_test_mode : 1;
unsigned int unmatched_use_sc_0 : 1;
unsigned int drop_invalid_sa_sc_packets : 1;
unsigned int reserved0 : 1;
/* Should always be set to 0. */
unsigned int external_classification_enable : 1;
unsigned int icv_lsb_8bytes_enable : 1;
unsigned int high_prio : 1;
unsigned int clear_counter : 1;
unsigned int clear_global_time : 1;
unsigned int ethertype_explicit_sectag_lsb : 3;
} bits_0;
unsigned short word_0;
};
union {
struct {
unsigned int ethertype_explicit_sectag_msb : 13;
unsigned int reserved0 : 3;
} bits_1;
unsigned short word_1;
};
};
struct mss_egress_lut_addr_ctl_register {
union {
struct {
unsigned int lut_addr : 9;
unsigned int reserved0 : 3;
/* 0x0 : Egress MAC Control FIlter (CTLF) LUT
* 0x1 : Egress Classification LUT
* 0x2 : Egress SC/SA LUT
* 0x3 : Egress SMIB
*/
unsigned int lut_select : 4;
} bits_0;
unsigned short word_0;
};
};
struct mss_egress_lut_ctl_register {
union {
struct {
unsigned int reserved0 : 14;
unsigned int lut_read : 1;
unsigned int lut_write : 1;
} bits_0;
unsigned short word_0;
};
};
#endif /* MSS_EGRESS_REGS_HEADER */
/* SPDX-License-Identifier: GPL-2.0-only */
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef MSS_INGRESS_REGS_HEADER
#define MSS_INGRESS_REGS_HEADER
#define MSS_INGRESS_CTL_REGISTER_ADDR 0x0000800E
#define MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR 0x00008080
#define MSS_INGRESS_LUT_CTL_REGISTER_ADDR 0x00008081
#define MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR 0x000080A0
struct mss_ingress_ctl_register {
union {
struct {
unsigned int soft_reset : 1;
unsigned int operation_point_to_point : 1;
unsigned int create_sci : 1;
/* Unused */
unsigned int mask_short_length_error : 1;
unsigned int drop_kay_packet : 1;
unsigned int drop_igprc_miss : 1;
/* Unused */
unsigned int check_icv : 1;
unsigned int clear_global_time : 1;
unsigned int clear_count : 1;
unsigned int high_prio : 1;
unsigned int remove_sectag : 1;
unsigned int global_validate_frames : 2;
unsigned int icv_lsb_8bytes_enabled : 1;
unsigned int reserved0 : 2;
} bits_0;
unsigned short word_0;
};
union {
struct {
unsigned int reserved0 : 16;
} bits_1;
unsigned short word_1;
};
};
struct mss_ingress_lut_addr_ctl_register {
union {
struct {
unsigned int lut_addr : 9;
unsigned int reserved0 : 3;
/* 0x0 : Ingress Pre-Security MAC Control FIlter
* (IGPRCTLF) LUT
* 0x1 : Ingress Pre-Security Classification LUT (IGPRC)
* 0x2 : Ingress Packet Format (IGPFMT) SAKey LUT
* 0x3 : Ingress Packet Format (IGPFMT) SC/SA LUT
* 0x4 : Ingress Post-Security Classification LUT
* (IGPOC)
* 0x5 : Ingress Post-Security MAC Control Filter
* (IGPOCTLF) LUT
* 0x6 : Ingress MIB (IGMIB)
*/
unsigned int lut_select : 4;
} bits_0;
unsigned short word_0;
};
};
struct mss_ingress_lut_ctl_register {
union {
struct {
unsigned int reserved0 : 14;
unsigned int lut_read : 1;
unsigned int lut_write : 1;
} bits_0;
unsigned short word_0;
};
};
#endif /* MSS_INGRESS_REGS_HEADER */
// SPDX-License-Identifier: GPL-2.0-only
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#include "macsec_api.h"
#include <linux/mdio.h>
#include "MSS_Ingress_registers.h"
#include "MSS_Egress_registers.h"
#include "aq_phy.h"
#define AQ_API_CALL_SAFE(func, ...) \
({ \
int ret; \
do { \
ret = aq_mss_mdio_sem_get(hw); \
if (unlikely(ret)) \
break; \
\
ret = func(__VA_ARGS__); \
\
aq_mss_mdio_sem_put(hw); \
} while (0); \
ret; \
})
/*******************************************************************************
* MDIO wrappers
******************************************************************************/
static int aq_mss_mdio_sem_get(struct aq_hw_s *hw)
{
u32 val;
return readx_poll_timeout_atomic(hw_atl_sem_mdio_get, hw, val,
val == 1U, 10U, 100000U);
}
static void aq_mss_mdio_sem_put(struct aq_hw_s *hw)
{
hw_atl_reg_glb_cpu_sem_set(hw, 1U, HW_ATL_FW_SM_MDIO);
}
static int aq_mss_mdio_read(struct aq_hw_s *hw, u16 mmd, u16 addr, u16 *data)
{
*data = aq_mdio_read_word(hw, mmd, addr);
return (*data != 0xffff) ? 0 : -ETIME;
}
static int aq_mss_mdio_write(struct aq_hw_s *hw, u16 mmd, u16 addr, u16 data)
{
aq_mdio_write_word(hw, mmd, addr, data);
return 0;
}
/*******************************************************************************
* MACSEC config and status
******************************************************************************/
static int set_raw_ingress_record(struct aq_hw_s *hw, u16 *packed_record,
u8 num_words, u8 table_id,
u16 table_index)
{
struct mss_ingress_lut_addr_ctl_register lut_sel_reg;
struct mss_ingress_lut_ctl_register lut_op_reg;
unsigned int i;
/* NOTE: MSS registers must always be read/written as adjacent pairs.
* For instance, to write either or both 1E.80A0 and 80A1, we have to:
* 1. Write 1E.80A0 first
* 2. Then write 1E.80A1
*
* For HHD devices: These writes need to be performed consecutively, and
* to ensure this we use the PIF mailbox to delegate the reads/writes to
* the FW.
*
* For EUR devices: Not need to use the PIF mailbox; it is safe to
* write to the registers directly.
*/
/* Write the packed record words to the data buffer registers. */
for (i = 0; i < num_words; i += 2) {
aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i,
packed_record[i]);
aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i +
1,
packed_record[i + 1]);
}
/* Clear out the unused data buffer registers. */
for (i = num_words; i < 24; i += 2) {
aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i,
0);
aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i + 1, 0);
}
/* Select the table and row index to write to */
lut_sel_reg.bits_0.lut_select = table_id;
lut_sel_reg.bits_0.lut_addr = table_index;
lut_op_reg.bits_0.lut_read = 0;
lut_op_reg.bits_0.lut_write = 1;
aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
lut_sel_reg.word_0);
aq_mss_mdio_write(hw, MDIO_MMD_VEND1, MSS_INGRESS_LUT_CTL_REGISTER_ADDR,
lut_op_reg.word_0);
return 0;
}
/*! Read the specified Ingress LUT table row.
* packed_record - [OUT] The table row data (raw).
*/
static int get_raw_ingress_record(struct aq_hw_s *hw, u16 *packed_record,
u8 num_words, u8 table_id,
u16 table_index)
{
struct mss_ingress_lut_addr_ctl_register lut_sel_reg;
struct mss_ingress_lut_ctl_register lut_op_reg;
int ret;
unsigned int i;
/* Select the table and row index to read */
lut_sel_reg.bits_0.lut_select = table_id;
lut_sel_reg.bits_0.lut_addr = table_index;
lut_op_reg.bits_0.lut_read = 1;
lut_op_reg.bits_0.lut_write = 0;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
lut_sel_reg.word_0);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_LUT_CTL_REGISTER_ADDR,
lut_op_reg.word_0);
if (unlikely(ret))
return ret;
memset(packed_record, 0, sizeof(u16) * num_words);
for (i = 0; i < num_words; i += 2) {
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR +
i,
&packed_record[i]);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR +
i + 1,
&packed_record[i + 1]);
if (unlikely(ret))
return ret;
}
return 0;
}
/*! Write packed_record to the specified Egress LUT table row. */
static int set_raw_egress_record(struct aq_hw_s *hw, u16 *packed_record,
u8 num_words, u8 table_id,
u16 table_index)
{
struct mss_egress_lut_addr_ctl_register lut_sel_reg;
struct mss_egress_lut_ctl_register lut_op_reg;
unsigned int i;
/* Write the packed record words to the data buffer registers. */
for (i = 0; i < num_words; i += 2) {
aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i,
packed_record[i]);
aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i + 1,
packed_record[i + 1]);
}
/* Clear out the unused data buffer registers. */
for (i = num_words; i < 28; i += 2) {
aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i, 0);
aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i + 1,
0);
}
/* Select the table and row index to write to */
lut_sel_reg.bits_0.lut_select = table_id;
lut_sel_reg.bits_0.lut_addr = table_index;
lut_op_reg.bits_0.lut_read = 0;
lut_op_reg.bits_0.lut_write = 1;
aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
lut_sel_reg.word_0);
aq_mss_mdio_write(hw, MDIO_MMD_VEND1, MSS_EGRESS_LUT_CTL_REGISTER_ADDR,
lut_op_reg.word_0);
return 0;
}
static int get_raw_egress_record(struct aq_hw_s *hw, u16 *packed_record,
u8 num_words, u8 table_id,
u16 table_index)
{
struct mss_egress_lut_addr_ctl_register lut_sel_reg;
struct mss_egress_lut_ctl_register lut_op_reg;
int ret;
unsigned int i;
/* Select the table and row index to read */
lut_sel_reg.bits_0.lut_select = table_id;
lut_sel_reg.bits_0.lut_addr = table_index;
lut_op_reg.bits_0.lut_read = 1;
lut_op_reg.bits_0.lut_write = 0;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
lut_sel_reg.word_0);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_CTL_REGISTER_ADDR,
lut_op_reg.word_0);
if (unlikely(ret))
return ret;
memset(packed_record, 0, sizeof(u16) * num_words);
for (i = 0; i < num_words; i += 2) {
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR +
i,
&packed_record[i]);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR +
i + 1,
&packed_record[i + 1]);
if (unlikely(ret))
return ret;
}
return 0;
}
static int
set_ingress_prectlf_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_prectlf_record *rec,
u16 table_index)
{
u16 packed_record[6];
if (table_index >= NUMROWS_INGRESSPRECTLFRECORD)
return -EINVAL;
memset(packed_record, 0, sizeof(u16) * 6);
packed_record[0] = rec->sa_da[0] & 0xFFFF;
packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
packed_record[2] = rec->sa_da[1] & 0xFFFF;
packed_record[3] = rec->eth_type & 0xFFFF;
packed_record[4] = rec->match_mask & 0xFFFF;
packed_record[5] = rec->match_type & 0xF;
packed_record[5] |= (rec->action & 0x1) << 4;
return set_raw_ingress_record(hw, packed_record, 6, 0,
ROWOFFSET_INGRESSPRECTLFRECORD +
table_index);
}
int aq_mss_set_ingress_prectlf_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_prectlf_record *rec,
u16 table_index)
{
return AQ_API_CALL_SAFE(set_ingress_prectlf_record, hw, rec,
table_index);
}
static int get_ingress_prectlf_record(struct aq_hw_s *hw,
struct aq_mss_ingress_prectlf_record *rec,
u16 table_index)
{
u16 packed_record[6];
int ret;
if (table_index >= NUMROWS_INGRESSPRECTLFRECORD)
return -EINVAL;
/* If the row that we want to read is odd, first read the previous even
* row, throw that value away, and finally read the desired row.
* This is a workaround for EUR devices that allows us to read
* odd-numbered rows. For HHD devices: this workaround will not work,
* so don't bother; odd-numbered rows are not readable.
*/
if ((table_index % 2) > 0) {
ret = get_raw_ingress_record(hw, packed_record, 6, 0,
ROWOFFSET_INGRESSPRECTLFRECORD +
table_index - 1);
if (unlikely(ret))
return ret;
}
ret = get_raw_ingress_record(hw, packed_record, 6, 0,
ROWOFFSET_INGRESSPRECTLFRECORD +
table_index);
if (unlikely(ret))
return ret;
rec->sa_da[0] = packed_record[0];
rec->sa_da[0] |= packed_record[1] << 16;
rec->sa_da[1] = packed_record[2];
rec->eth_type = packed_record[3];
rec->match_mask = packed_record[4];
rec->match_type = packed_record[5] & 0xF;
rec->action = (packed_record[5] >> 4) & 0x1;
return 0;
}
int aq_mss_get_ingress_prectlf_record(struct aq_hw_s *hw,
struct aq_mss_ingress_prectlf_record *rec,
u16 table_index)
{
memset(rec, 0, sizeof(*rec));
return AQ_API_CALL_SAFE(get_ingress_prectlf_record, hw, rec,
table_index);
}
static int
set_ingress_preclass_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_preclass_record *rec,
u16 table_index)
{
u16 packed_record[20];
if (table_index >= NUMROWS_INGRESSPRECLASSRECORD)
return -EINVAL;
memset(packed_record, 0, sizeof(u16) * 20);
packed_record[0] = rec->sci[0] & 0xFFFF;
packed_record[1] = (rec->sci[0] >> 16) & 0xFFFF;
packed_record[2] = rec->sci[1] & 0xFFFF;
packed_record[3] = (rec->sci[1] >> 16) & 0xFFFF;
packed_record[4] = rec->tci & 0xFF;
packed_record[4] |= (rec->encr_offset & 0xFF) << 8;
packed_record[5] = rec->eth_type & 0xFFFF;
packed_record[6] = rec->snap[0] & 0xFFFF;
packed_record[7] = (rec->snap[0] >> 16) & 0xFFFF;
packed_record[8] = rec->snap[1] & 0xFF;
packed_record[8] |= (rec->llc & 0xFF) << 8;
packed_record[9] = (rec->llc >> 8) & 0xFFFF;
packed_record[10] = rec->mac_sa[0] & 0xFFFF;
packed_record[11] = (rec->mac_sa[0] >> 16) & 0xFFFF;
packed_record[12] = rec->mac_sa[1] & 0xFFFF;
packed_record[13] = rec->mac_da[0] & 0xFFFF;
packed_record[14] = (rec->mac_da[0] >> 16) & 0xFFFF;
packed_record[15] = rec->mac_da[1] & 0xFFFF;
packed_record[16] = rec->lpbk_packet & 0x1;
packed_record[16] |= (rec->an_mask & 0x3) << 1;
packed_record[16] |= (rec->tci_mask & 0x3F) << 3;
packed_record[16] |= (rec->sci_mask & 0x7F) << 9;
packed_record[17] = (rec->sci_mask >> 7) & 0x1;
packed_record[17] |= (rec->eth_type_mask & 0x3) << 1;
packed_record[17] |= (rec->snap_mask & 0x1F) << 3;
packed_record[17] |= (rec->llc_mask & 0x7) << 8;
packed_record[17] |= (rec->_802_2_encapsulate & 0x1) << 11;
packed_record[17] |= (rec->sa_mask & 0xF) << 12;
packed_record[18] = (rec->sa_mask >> 4) & 0x3;
packed_record[18] |= (rec->da_mask & 0x3F) << 2;
packed_record[18] |= (rec->lpbk_mask & 0x1) << 8;
packed_record[18] |= (rec->sc_idx & 0x1F) << 9;
packed_record[18] |= (rec->proc_dest & 0x1) << 14;
packed_record[18] |= (rec->action & 0x1) << 15;
packed_record[19] = (rec->action >> 1) & 0x1;
packed_record[19] |= (rec->ctrl_unctrl & 0x1) << 1;
packed_record[19] |= (rec->sci_from_table & 0x1) << 2;
packed_record[19] |= (rec->reserved & 0xF) << 3;
packed_record[19] |= (rec->valid & 0x1) << 7;
return set_raw_ingress_record(hw, packed_record, 20, 1,
ROWOFFSET_INGRESSPRECLASSRECORD +
table_index);
}
int aq_mss_set_ingress_preclass_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_preclass_record *rec,
u16 table_index)
{
int err = AQ_API_CALL_SAFE(set_ingress_preclass_record, hw, rec,
table_index);
WARN_ONCE(err, "%s failed with %d\n", __func__, err);
return err;
}
static int
get_ingress_preclass_record(struct aq_hw_s *hw,
struct aq_mss_ingress_preclass_record *rec,
u16 table_index)
{
u16 packed_record[20];
int ret;
if (table_index >= NUMROWS_INGRESSPRECLASSRECORD)
return -EINVAL;
/* If the row that we want to read is odd, first read the previous even
* row, throw that value away, and finally read the desired row.
*/
if ((table_index % 2) > 0) {
ret = get_raw_ingress_record(hw, packed_record, 20, 1,
ROWOFFSET_INGRESSPRECLASSRECORD +
table_index - 1);
if (unlikely(ret))
return ret;
}
ret = get_raw_ingress_record(hw, packed_record, 20, 1,
ROWOFFSET_INGRESSPRECLASSRECORD +
table_index);
if (unlikely(ret))
return ret;
rec->sci[0] = packed_record[0];
rec->sci[0] |= packed_record[1] << 16;
rec->sci[1] = packed_record[2];
rec->sci[1] |= packed_record[3] << 16;
rec->tci = packed_record[4] & 0xFF;
rec->encr_offset = (packed_record[4] >> 8) & 0xFF;
rec->eth_type = packed_record[5];
rec->snap[0] = packed_record[6];
rec->snap[0] |= packed_record[7] << 16;
rec->snap[1] = packed_record[8] & 0xFF;
rec->llc = (packed_record[8] >> 8) & 0xFF;
rec->llc = packed_record[9] << 8;
rec->mac_sa[0] = packed_record[10];
rec->mac_sa[0] |= packed_record[11] << 16;
rec->mac_sa[1] = packed_record[12];
rec->mac_da[0] = packed_record[13];
rec->mac_da[0] |= packed_record[14] << 16;
rec->mac_da[1] = packed_record[15];
rec->lpbk_packet = packed_record[16] & 0x1;
rec->an_mask = (packed_record[16] >> 1) & 0x3;
rec->tci_mask = (packed_record[16] >> 3) & 0x3F;
rec->sci_mask = (packed_record[16] >> 9) & 0x7F;
rec->sci_mask |= (packed_record[17] & 0x1) << 7;
rec->eth_type_mask = (packed_record[17] >> 1) & 0x3;
rec->snap_mask = (packed_record[17] >> 3) & 0x1F;
rec->llc_mask = (packed_record[17] >> 8) & 0x7;
rec->_802_2_encapsulate = (packed_record[17] >> 11) & 0x1;
rec->sa_mask = (packed_record[17] >> 12) & 0xF;
rec->sa_mask |= (packed_record[18] & 0x3) << 4;
rec->da_mask = (packed_record[18] >> 2) & 0x3F;
rec->lpbk_mask = (packed_record[18] >> 8) & 0x1;
rec->sc_idx = (packed_record[18] >> 9) & 0x1F;
rec->proc_dest = (packed_record[18] >> 14) & 0x1;
rec->action = (packed_record[18] >> 15) & 0x1;
rec->action |= (packed_record[19] & 0x1) << 1;
rec->ctrl_unctrl = (packed_record[19] >> 1) & 0x1;
rec->sci_from_table = (packed_record[19] >> 2) & 0x1;
rec->reserved = (packed_record[19] >> 3) & 0xF;
rec->valid = (packed_record[19] >> 7) & 0x1;
return 0;
}
int aq_mss_get_ingress_preclass_record(struct aq_hw_s *hw,
struct aq_mss_ingress_preclass_record *rec,
u16 table_index)
{
memset(rec, 0, sizeof(*rec));
return AQ_API_CALL_SAFE(get_ingress_preclass_record, hw, rec,
table_index);
}
static int set_ingress_sc_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_sc_record *rec,
u16 table_index)
{
u16 packed_record[8];
if (table_index >= NUMROWS_INGRESSSCRECORD)
return -EINVAL;
memset(packed_record, 0, sizeof(u16) * 8);
packed_record[0] = rec->stop_time & 0xFFFF;
packed_record[1] = (rec->stop_time >> 16) & 0xFFFF;
packed_record[2] = rec->start_time & 0xFFFF;
packed_record[3] = (rec->start_time >> 16) & 0xFFFF;
packed_record[4] = rec->validate_frames & 0x3;
packed_record[4] |= (rec->replay_protect & 0x1) << 2;
packed_record[4] |= (rec->anti_replay_window & 0x1FFF) << 3;
packed_record[5] = (rec->anti_replay_window >> 13) & 0xFFFF;
packed_record[6] = (rec->anti_replay_window >> 29) & 0x7;
packed_record[6] |= (rec->receiving & 0x1) << 3;
packed_record[6] |= (rec->fresh & 0x1) << 4;
packed_record[6] |= (rec->an_rol & 0x1) << 5;
packed_record[6] |= (rec->reserved & 0x3FF) << 6;
packed_record[7] = (rec->reserved >> 10) & 0x7FFF;
packed_record[7] |= (rec->valid & 0x1) << 15;
return set_raw_ingress_record(hw, packed_record, 8, 3,
ROWOFFSET_INGRESSSCRECORD + table_index);
}
int aq_mss_set_ingress_sc_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_sc_record *rec,
u16 table_index)
{
int err = AQ_API_CALL_SAFE(set_ingress_sc_record, hw, rec, table_index);
WARN_ONCE(err, "%s failed with %d\n", __func__, err);
return err;
}
static int get_ingress_sc_record(struct aq_hw_s *hw,
struct aq_mss_ingress_sc_record *rec,
u16 table_index)
{
u16 packed_record[8];
int ret;
if (table_index >= NUMROWS_INGRESSSCRECORD)
return -EINVAL;
ret = get_raw_ingress_record(hw, packed_record, 8, 3,
ROWOFFSET_INGRESSSCRECORD + table_index);
if (unlikely(ret))
return ret;
rec->stop_time = packed_record[0];
rec->stop_time |= packed_record[1] << 16;
rec->start_time = packed_record[2];
rec->start_time |= packed_record[3] << 16;
rec->validate_frames = packed_record[4] & 0x3;
rec->replay_protect = (packed_record[4] >> 2) & 0x1;
rec->anti_replay_window = (packed_record[4] >> 3) & 0x1FFF;
rec->anti_replay_window |= packed_record[5] << 13;
rec->anti_replay_window |= (packed_record[6] & 0x7) << 29;
rec->receiving = (packed_record[6] >> 3) & 0x1;
rec->fresh = (packed_record[6] >> 4) & 0x1;
rec->an_rol = (packed_record[6] >> 5) & 0x1;
rec->reserved = (packed_record[6] >> 6) & 0x3FF;
rec->reserved |= (packed_record[7] & 0x7FFF) << 10;
rec->valid = (packed_record[7] >> 15) & 0x1;
return 0;
}
int aq_mss_get_ingress_sc_record(struct aq_hw_s *hw,
struct aq_mss_ingress_sc_record *rec,
u16 table_index)
{
memset(rec, 0, sizeof(*rec));
return AQ_API_CALL_SAFE(get_ingress_sc_record, hw, rec, table_index);
}
static int set_ingress_sa_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_sa_record *rec,
u16 table_index)
{
u16 packed_record[8];
if (table_index >= NUMROWS_INGRESSSARECORD)
return -EINVAL;
memset(packed_record, 0, sizeof(u16) * 8);
packed_record[0] = rec->stop_time & 0xFFFF;
packed_record[1] = (rec->stop_time >> 16) & 0xFFFF;
packed_record[2] = rec->start_time & 0xFFFF;
packed_record[3] = (rec->start_time >> 16) & 0xFFFF;
packed_record[4] = rec->next_pn & 0xFFFF;
packed_record[5] = (rec->next_pn >> 16) & 0xFFFF;
packed_record[6] = rec->sat_nextpn & 0x1;
packed_record[6] |= (rec->in_use & 0x1) << 1;
packed_record[6] |= (rec->fresh & 0x1) << 2;
packed_record[6] |= (rec->reserved & 0x1FFF) << 3;
packed_record[7] = (rec->reserved >> 13) & 0x7FFF;
packed_record[7] |= (rec->valid & 0x1) << 15;
return set_raw_ingress_record(hw, packed_record, 8, 3,
ROWOFFSET_INGRESSSARECORD + table_index);
}
int aq_mss_set_ingress_sa_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_sa_record *rec,
u16 table_index)
{
int err = AQ_API_CALL_SAFE(set_ingress_sa_record, hw, rec, table_index);
WARN_ONCE(err, "%s failed with %d\n", __func__, err);
return err;
}
static int get_ingress_sa_record(struct aq_hw_s *hw,
struct aq_mss_ingress_sa_record *rec,
u16 table_index)
{
u16 packed_record[8];
int ret;
if (table_index >= NUMROWS_INGRESSSARECORD)
return -EINVAL;
ret = get_raw_ingress_record(hw, packed_record, 8, 3,
ROWOFFSET_INGRESSSARECORD + table_index);
if (unlikely(ret))
return ret;
rec->stop_time = packed_record[0];
rec->stop_time |= packed_record[1] << 16;
rec->start_time = packed_record[2];
rec->start_time |= packed_record[3] << 16;
rec->next_pn = packed_record[4];
rec->next_pn |= packed_record[5] << 16;
rec->sat_nextpn = packed_record[6] & 0x1;
rec->in_use = (packed_record[6] >> 1) & 0x1;
rec->fresh = (packed_record[6] >> 2) & 0x1;
rec->reserved = (packed_record[6] >> 3) & 0x1FFF;
rec->reserved |= (packed_record[7] & 0x7FFF) << 13;
rec->valid = (packed_record[7] >> 15) & 0x1;
return 0;
}
int aq_mss_get_ingress_sa_record(struct aq_hw_s *hw,
struct aq_mss_ingress_sa_record *rec,
u16 table_index)
{
memset(rec, 0, sizeof(*rec));
return AQ_API_CALL_SAFE(get_ingress_sa_record, hw, rec, table_index);
}
static int
set_ingress_sakey_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_sakey_record *rec,
u16 table_index)
{
u16 packed_record[18];
if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
return -EINVAL;
memset(packed_record, 0, sizeof(u16) * 18);
packed_record[0] = rec->key[0] & 0xFFFF;
packed_record[1] = (rec->key[0] >> 16) & 0xFFFF;
packed_record[2] = rec->key[1] & 0xFFFF;
packed_record[3] = (rec->key[1] >> 16) & 0xFFFF;
packed_record[4] = rec->key[2] & 0xFFFF;
packed_record[5] = (rec->key[2] >> 16) & 0xFFFF;
packed_record[6] = rec->key[3] & 0xFFFF;
packed_record[7] = (rec->key[3] >> 16) & 0xFFFF;
packed_record[8] = rec->key[4] & 0xFFFF;
packed_record[9] = (rec->key[4] >> 16) & 0xFFFF;
packed_record[10] = rec->key[5] & 0xFFFF;
packed_record[11] = (rec->key[5] >> 16) & 0xFFFF;
packed_record[12] = rec->key[6] & 0xFFFF;
packed_record[13] = (rec->key[6] >> 16) & 0xFFFF;
packed_record[14] = rec->key[7] & 0xFFFF;
packed_record[15] = (rec->key[7] >> 16) & 0xFFFF;
packed_record[16] = rec->key_len & 0x3;
return set_raw_ingress_record(hw, packed_record, 18, 2,
ROWOFFSET_INGRESSSAKEYRECORD +
table_index);
}
int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_sakey_record *rec,
u16 table_index)
{
int err = AQ_API_CALL_SAFE(set_ingress_sakey_record, hw, rec,
table_index);
WARN_ONCE(err, "%s failed with %d\n", __func__, err);
return err;
}
static int get_ingress_sakey_record(struct aq_hw_s *hw,
struct aq_mss_ingress_sakey_record *rec,
u16 table_index)
{
u16 packed_record[18];
int ret;
if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
return -EINVAL;
ret = get_raw_ingress_record(hw, packed_record, 18, 2,
ROWOFFSET_INGRESSSAKEYRECORD +
table_index);
if (unlikely(ret))
return ret;
rec->key[0] = packed_record[0];
rec->key[0] |= packed_record[1] << 16;
rec->key[1] = packed_record[2];
rec->key[1] |= packed_record[3] << 16;
rec->key[2] = packed_record[4];
rec->key[2] |= packed_record[5] << 16;
rec->key[3] = packed_record[6];
rec->key[3] |= packed_record[7] << 16;
rec->key[4] = packed_record[8];
rec->key[4] |= packed_record[9] << 16;
rec->key[5] = packed_record[10];
rec->key[5] |= packed_record[11] << 16;
rec->key[6] = packed_record[12];
rec->key[6] |= packed_record[13] << 16;
rec->key[7] = packed_record[14];
rec->key[7] |= packed_record[15] << 16;
rec->key_len = (rec->key_len & 0xFFFFFFFC) |
(packed_record[16] & 0x3);
return 0;
}
int aq_mss_get_ingress_sakey_record(struct aq_hw_s *hw,
struct aq_mss_ingress_sakey_record *rec,
u16 table_index)
{
memset(rec, 0, sizeof(*rec));
return AQ_API_CALL_SAFE(get_ingress_sakey_record, hw, rec, table_index);
}
static int
set_ingress_postclass_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_postclass_record *rec,
u16 table_index)
{
u16 packed_record[8];
if (table_index >= NUMROWS_INGRESSPOSTCLASSRECORD)
return -EINVAL;
memset(packed_record, 0, sizeof(u16) * 8);
packed_record[0] = rec->byte0 & 0xFF;
packed_record[0] |= (rec->byte1 & 0xFF) << 8;
packed_record[1] = rec->byte2 & 0xFF;
packed_record[1] |= (rec->byte3 & 0xFF) << 8;
packed_record[2] = rec->eth_type & 0xFFFF;
packed_record[3] = rec->eth_type_valid & 0x1;
packed_record[3] |= (rec->vlan_id & 0xFFF) << 1;
packed_record[3] |= (rec->vlan_up & 0x7) << 13;
packed_record[4] = rec->vlan_valid & 0x1;
packed_record[4] |= (rec->sai & 0x1F) << 1;
packed_record[4] |= (rec->sai_hit & 0x1) << 6;
packed_record[4] |= (rec->eth_type_mask & 0xF) << 7;
packed_record[4] |= (rec->byte3_location & 0x1F) << 11;
packed_record[5] = (rec->byte3_location >> 5) & 0x1;
packed_record[5] |= (rec->byte3_mask & 0x3) << 1;
packed_record[5] |= (rec->byte2_location & 0x3F) << 3;
packed_record[5] |= (rec->byte2_mask & 0x3) << 9;
packed_record[5] |= (rec->byte1_location & 0x1F) << 11;
packed_record[6] = (rec->byte1_location >> 5) & 0x1;
packed_record[6] |= (rec->byte1_mask & 0x3) << 1;
packed_record[6] |= (rec->byte0_location & 0x3F) << 3;
packed_record[6] |= (rec->byte0_mask & 0x3) << 9;
packed_record[6] |= (rec->eth_type_valid_mask & 0x3) << 11;
packed_record[6] |= (rec->vlan_id_mask & 0x7) << 13;
packed_record[7] = (rec->vlan_id_mask >> 3) & 0x1;
packed_record[7] |= (rec->vlan_up_mask & 0x3) << 1;
packed_record[7] |= (rec->vlan_valid_mask & 0x3) << 3;
packed_record[7] |= (rec->sai_mask & 0x3) << 5;
packed_record[7] |= (rec->sai_hit_mask & 0x3) << 7;
packed_record[7] |= (rec->firstlevel_actions & 0x1) << 9;
packed_record[7] |= (rec->secondlevel_actions & 0x1) << 10;
packed_record[7] |= (rec->reserved & 0xF) << 11;
packed_record[7] |= (rec->valid & 0x1) << 15;
return set_raw_ingress_record(hw, packed_record, 8, 4,
ROWOFFSET_INGRESSPOSTCLASSRECORD +
table_index);
}
int aq_mss_set_ingress_postclass_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_postclass_record *rec,
u16 table_index)
{
return AQ_API_CALL_SAFE(set_ingress_postclass_record, hw, rec,
table_index);
}
static int
get_ingress_postclass_record(struct aq_hw_s *hw,
struct aq_mss_ingress_postclass_record *rec,
u16 table_index)
{
u16 packed_record[8];
int ret;
if (table_index >= NUMROWS_INGRESSPOSTCLASSRECORD)
return -EINVAL;
/* If the row that we want to read is odd, first read the previous even
* row, throw that value away, and finally read the desired row.
*/
if ((table_index % 2) > 0) {
ret = get_raw_ingress_record(hw, packed_record, 8, 4,
ROWOFFSET_INGRESSPOSTCLASSRECORD +
table_index - 1);
if (unlikely(ret))
return ret;
}
ret = get_raw_ingress_record(hw, packed_record, 8, 4,
ROWOFFSET_INGRESSPOSTCLASSRECORD +
table_index);
if (unlikely(ret))
return ret;
rec->byte0 = packed_record[0] & 0xFF;
rec->byte1 = (packed_record[0] >> 8) & 0xFF;
rec->byte2 = packed_record[1] & 0xFF;
rec->byte3 = (packed_record[1] >> 8) & 0xFF;
rec->eth_type = packed_record[2];
rec->eth_type_valid = packed_record[3] & 0x1;
rec->vlan_id = (packed_record[3] >> 1) & 0xFFF;
rec->vlan_up = (packed_record[3] >> 13) & 0x7;
rec->vlan_valid = packed_record[4] & 0x1;
rec->sai = (packed_record[4] >> 1) & 0x1F;
rec->sai_hit = (packed_record[4] >> 6) & 0x1;
rec->eth_type_mask = (packed_record[4] >> 7) & 0xF;
rec->byte3_location = (packed_record[4] >> 11) & 0x1F;
rec->byte3_location |= (packed_record[5] & 0x1) << 5;
rec->byte3_mask = (packed_record[5] >> 1) & 0x3;
rec->byte2_location = (packed_record[5] >> 3) & 0x3F;
rec->byte2_mask = (packed_record[5] >> 9) & 0x3;
rec->byte1_location = (packed_record[5] >> 11) & 0x1F;
rec->byte1_location |= (packed_record[6] & 0x1) << 5;
rec->byte1_mask = (packed_record[6] >> 1) & 0x3;
rec->byte0_location = (packed_record[6] >> 3) & 0x3F;
rec->byte0_mask = (packed_record[6] >> 9) & 0x3;
rec->eth_type_valid_mask = (packed_record[6] >> 11) & 0x3;
rec->vlan_id_mask = (packed_record[6] >> 13) & 0x7;
rec->vlan_id_mask |= (packed_record[7] & 0x1) << 3;
rec->vlan_up_mask = (packed_record[7] >> 1) & 0x3;
rec->vlan_valid_mask = (packed_record[7] >> 3) & 0x3;
rec->sai_mask = (packed_record[7] >> 5) & 0x3;
rec->sai_hit_mask = (packed_record[7] >> 7) & 0x3;
rec->firstlevel_actions = (packed_record[7] >> 9) & 0x1;
rec->secondlevel_actions = (packed_record[7] >> 10) & 0x1;
rec->reserved = (packed_record[7] >> 11) & 0xF;
rec->valid = (packed_record[7] >> 15) & 0x1;
return 0;
}
int aq_mss_get_ingress_postclass_record(struct aq_hw_s *hw,
struct aq_mss_ingress_postclass_record *rec,
u16 table_index)
{
memset(rec, 0, sizeof(*rec));
return AQ_API_CALL_SAFE(get_ingress_postclass_record, hw, rec,
table_index);
}
static int
set_ingress_postctlf_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_postctlf_record *rec,
u16 table_index)
{
u16 packed_record[6];
if (table_index >= NUMROWS_INGRESSPOSTCTLFRECORD)
return -EINVAL;
memset(packed_record, 0, sizeof(u16) * 6);
packed_record[0] = rec->sa_da[0] & 0xFFFF;
packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
packed_record[2] = rec->sa_da[1] & 0xFFFF;
packed_record[3] = rec->eth_type & 0xFFFF;
packed_record[4] = rec->match_mask & 0xFFFF;
packed_record[5] = rec->match_type & 0xF;
packed_record[5] |= (rec->action & 0x1) << 4;
return set_raw_ingress_record(hw, packed_record, 6, 5,
ROWOFFSET_INGRESSPOSTCTLFRECORD +
table_index);
}
int aq_mss_set_ingress_postctlf_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_postctlf_record *rec,
u16 table_index)
{
return AQ_API_CALL_SAFE(set_ingress_postctlf_record, hw, rec,
table_index);
}
static int
get_ingress_postctlf_record(struct aq_hw_s *hw,
struct aq_mss_ingress_postctlf_record *rec,
u16 table_index)
{
u16 packed_record[6];
int ret;
if (table_index >= NUMROWS_INGRESSPOSTCTLFRECORD)
return -EINVAL;
/* If the row that we want to read is odd, first read the previous even
* row, throw that value away, and finally read the desired row.
*/
if ((table_index % 2) > 0) {
ret = get_raw_ingress_record(hw, packed_record, 6, 5,
ROWOFFSET_INGRESSPOSTCTLFRECORD +
table_index - 1);
if (unlikely(ret))
return ret;
}
ret = get_raw_ingress_record(hw, packed_record, 6, 5,
ROWOFFSET_INGRESSPOSTCTLFRECORD +
table_index);
if (unlikely(ret))
return ret;
rec->sa_da[0] = packed_record[0];
rec->sa_da[0] |= packed_record[1] << 16;
rec->sa_da[1] = packed_record[2];
rec->eth_type = packed_record[3];
rec->match_mask = packed_record[4];
rec->match_type = packed_record[5] & 0xF;
rec->action = (packed_record[5] >> 4) & 0x1;
return 0;
}
int aq_mss_get_ingress_postctlf_record(struct aq_hw_s *hw,
struct aq_mss_ingress_postctlf_record *rec,
u16 table_index)
{
memset(rec, 0, sizeof(*rec));
return AQ_API_CALL_SAFE(get_ingress_postctlf_record, hw, rec,
table_index);
}
static int set_egress_ctlf_record(struct aq_hw_s *hw,
const struct aq_mss_egress_ctlf_record *rec,
u16 table_index)
{
u16 packed_record[6];
if (table_index >= NUMROWS_EGRESSCTLFRECORD)
return -EINVAL;
memset(packed_record, 0, sizeof(u16) * 6);
packed_record[0] = rec->sa_da[0] & 0xFFFF;
packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
packed_record[2] = rec->sa_da[1] & 0xFFFF;
packed_record[3] = rec->eth_type & 0xFFFF;
packed_record[4] = rec->match_mask & 0xFFFF;
packed_record[5] = rec->match_type & 0xF;
packed_record[5] |= (rec->action & 0x1) << 4;
return set_raw_egress_record(hw, packed_record, 6, 0,
ROWOFFSET_EGRESSCTLFRECORD + table_index);
}
int aq_mss_set_egress_ctlf_record(struct aq_hw_s *hw,
const struct aq_mss_egress_ctlf_record *rec,
u16 table_index)
{
return AQ_API_CALL_SAFE(set_egress_ctlf_record, hw, rec, table_index);
}
static int get_egress_ctlf_record(struct aq_hw_s *hw,
struct aq_mss_egress_ctlf_record *rec,
u16 table_index)
{
u16 packed_record[6];
int ret;
if (table_index >= NUMROWS_EGRESSCTLFRECORD)
return -EINVAL;
/* If the row that we want to read is odd, first read the previous even
* row, throw that value away, and finally read the desired row.
*/
if ((table_index % 2) > 0) {
ret = get_raw_egress_record(hw, packed_record, 6, 0,
ROWOFFSET_EGRESSCTLFRECORD +
table_index - 1);
if (unlikely(ret))
return ret;
}
ret = get_raw_egress_record(hw, packed_record, 6, 0,
ROWOFFSET_EGRESSCTLFRECORD + table_index);
if (unlikely(ret))
return ret;
rec->sa_da[0] = packed_record[0];
rec->sa_da[0] |= packed_record[1] << 16;
rec->sa_da[1] = packed_record[2];
rec->eth_type = packed_record[3];
rec->match_mask = packed_record[4];
rec->match_type = packed_record[5] & 0xF;
rec->action = (packed_record[5] >> 4) & 0x1;
return 0;
}
int aq_mss_get_egress_ctlf_record(struct aq_hw_s *hw,
struct aq_mss_egress_ctlf_record *rec,
u16 table_index)
{
memset(rec, 0, sizeof(*rec));
return AQ_API_CALL_SAFE(get_egress_ctlf_record, hw, rec, table_index);
}
static int set_egress_class_record(struct aq_hw_s *hw,
const struct aq_mss_egress_class_record *rec,
u16 table_index)
{
u16 packed_record[28];
if (table_index >= NUMROWS_EGRESSCLASSRECORD)
return -EINVAL;
memset(packed_record, 0, sizeof(u16) * 28);
packed_record[0] = rec->vlan_id & 0xFFF;
packed_record[0] |= (rec->vlan_up & 0x7) << 12;
packed_record[0] |= (rec->vlan_valid & 0x1) << 15;
packed_record[1] = rec->byte3 & 0xFF;
packed_record[1] |= (rec->byte2 & 0xFF) << 8;
packed_record[2] = rec->byte1 & 0xFF;
packed_record[2] |= (rec->byte0 & 0xFF) << 8;
packed_record[3] = rec->tci & 0xFF;
packed_record[3] |= (rec->sci[0] & 0xFF) << 8;
packed_record[4] = (rec->sci[0] >> 8) & 0xFFFF;
packed_record[5] = (rec->sci[0] >> 24) & 0xFF;
packed_record[5] |= (rec->sci[1] & 0xFF) << 8;
packed_record[6] = (rec->sci[1] >> 8) & 0xFFFF;
packed_record[7] = (rec->sci[1] >> 24) & 0xFF;
packed_record[7] |= (rec->eth_type & 0xFF) << 8;
packed_record[8] = (rec->eth_type >> 8) & 0xFF;
packed_record[8] |= (rec->snap[0] & 0xFF) << 8;
packed_record[9] = (rec->snap[0] >> 8) & 0xFFFF;
packed_record[10] = (rec->snap[0] >> 24) & 0xFF;
packed_record[10] |= (rec->snap[1] & 0xFF) << 8;
packed_record[11] = rec->llc & 0xFFFF;
packed_record[12] = (rec->llc >> 16) & 0xFF;
packed_record[12] |= (rec->mac_sa[0] & 0xFF) << 8;
packed_record[13] = (rec->mac_sa[0] >> 8) & 0xFFFF;
packed_record[14] = (rec->mac_sa[0] >> 24) & 0xFF;
packed_record[14] |= (rec->mac_sa[1] & 0xFF) << 8;
packed_record[15] = (rec->mac_sa[1] >> 8) & 0xFF;
packed_record[15] |= (rec->mac_da[0] & 0xFF) << 8;
packed_record[16] = (rec->mac_da[0] >> 8) & 0xFFFF;
packed_record[17] = (rec->mac_da[0] >> 24) & 0xFF;
packed_record[17] |= (rec->mac_da[1] & 0xFF) << 8;
packed_record[18] = (rec->mac_da[1] >> 8) & 0xFF;
packed_record[18] |= (rec->pn & 0xFF) << 8;
packed_record[19] = (rec->pn >> 8) & 0xFFFF;
packed_record[20] = (rec->pn >> 24) & 0xFF;
packed_record[20] |= (rec->byte3_location & 0x3F) << 8;
packed_record[20] |= (rec->byte3_mask & 0x1) << 14;
packed_record[20] |= (rec->byte2_location & 0x1) << 15;
packed_record[21] = (rec->byte2_location >> 1) & 0x1F;
packed_record[21] |= (rec->byte2_mask & 0x1) << 5;
packed_record[21] |= (rec->byte1_location & 0x3F) << 6;
packed_record[21] |= (rec->byte1_mask & 0x1) << 12;
packed_record[21] |= (rec->byte0_location & 0x7) << 13;
packed_record[22] = (rec->byte0_location >> 3) & 0x7;
packed_record[22] |= (rec->byte0_mask & 0x1) << 3;
packed_record[22] |= (rec->vlan_id_mask & 0x3) << 4;
packed_record[22] |= (rec->vlan_up_mask & 0x1) << 6;
packed_record[22] |= (rec->vlan_valid_mask & 0x1) << 7;
packed_record[22] |= (rec->tci_mask & 0xFF) << 8;
packed_record[23] = rec->sci_mask & 0xFF;
packed_record[23] |= (rec->eth_type_mask & 0x3) << 8;
packed_record[23] |= (rec->snap_mask & 0x1F) << 10;
packed_record[23] |= (rec->llc_mask & 0x1) << 15;
packed_record[24] = (rec->llc_mask >> 1) & 0x3;
packed_record[24] |= (rec->sa_mask & 0x3F) << 2;
packed_record[24] |= (rec->da_mask & 0x3F) << 8;
packed_record[24] |= (rec->pn_mask & 0x3) << 14;
packed_record[25] = (rec->pn_mask >> 2) & 0x3;
packed_record[25] |= (rec->eight02dot2 & 0x1) << 2;
packed_record[25] |= (rec->tci_sc & 0x1) << 3;
packed_record[25] |= (rec->tci_87543 & 0x1) << 4;
packed_record[25] |= (rec->exp_sectag_en & 0x1) << 5;
packed_record[25] |= (rec->sc_idx & 0x1F) << 6;
packed_record[25] |= (rec->sc_sa & 0x3) << 11;
packed_record[25] |= (rec->debug & 0x1) << 13;
packed_record[25] |= (rec->action & 0x3) << 14;
packed_record[26] = (rec->valid & 0x1) << 3;
return set_raw_egress_record(hw, packed_record, 28, 1,
ROWOFFSET_EGRESSCLASSRECORD + table_index);
}
int aq_mss_set_egress_class_record(struct aq_hw_s *hw,
const struct aq_mss_egress_class_record *rec,
u16 table_index)
{
return AQ_API_CALL_SAFE(set_egress_class_record, hw, rec, table_index);
}
static int get_egress_class_record(struct aq_hw_s *hw,
struct aq_mss_egress_class_record *rec,
u16 table_index)
{
u16 packed_record[28];
int ret;
if (table_index >= NUMROWS_EGRESSCLASSRECORD)
return -EINVAL;
/* If the row that we want to read is odd, first read the previous even
* row, throw that value away, and finally read the desired row.
*/
if ((table_index % 2) > 0) {
ret = get_raw_egress_record(hw, packed_record, 28, 1,
ROWOFFSET_EGRESSCLASSRECORD +
table_index - 1);
if (unlikely(ret))
return ret;
}
ret = get_raw_egress_record(hw, packed_record, 28, 1,
ROWOFFSET_EGRESSCLASSRECORD + table_index);
if (unlikely(ret))
return ret;
rec->vlan_id = packed_record[0] & 0xFFF;
rec->vlan_up = (packed_record[0] >> 12) & 0x7;
rec->vlan_valid = (packed_record[0] >> 15) & 0x1;
rec->byte3 = packed_record[1] & 0xFF;
rec->byte2 = (packed_record[1] >> 8) & 0xFF;
rec->byte1 = packed_record[2] & 0xFF;
rec->byte0 = (packed_record[2] >> 8) & 0xFF;
rec->tci = packed_record[3] & 0xFF;
rec->sci[0] = (packed_record[3] >> 8) & 0xFF;
rec->sci[0] |= packed_record[4] << 8;
rec->sci[0] |= (packed_record[5] & 0xFF) << 24;
rec->sci[1] = (packed_record[5] >> 8) & 0xFF;
rec->sci[1] |= packed_record[6] << 8;
rec->sci[1] |= (packed_record[7] & 0xFF) << 24;
rec->eth_type = (packed_record[7] >> 8) & 0xFF;
rec->eth_type |= (packed_record[8] & 0xFF) << 8;
rec->snap[0] = (packed_record[8] >> 8) & 0xFF;
rec->snap[0] |= packed_record[9] << 8;
rec->snap[0] |= (packed_record[10] & 0xFF) << 24;
rec->snap[1] = (packed_record[10] >> 8) & 0xFF;
rec->llc = packed_record[11];
rec->llc |= (packed_record[12] & 0xFF) << 16;
rec->mac_sa[0] = (packed_record[12] >> 8) & 0xFF;
rec->mac_sa[0] |= packed_record[13] << 8;
rec->mac_sa[0] |= (packed_record[14] & 0xFF) << 24;
rec->mac_sa[1] = (packed_record[14] >> 8) & 0xFF;
rec->mac_sa[1] |= (packed_record[15] & 0xFF) << 8;
rec->mac_da[0] = (packed_record[15] >> 8) & 0xFF;
rec->mac_da[0] |= packed_record[16] << 8;
rec->mac_da[0] |= (packed_record[17] & 0xFF) << 24;
rec->mac_da[1] = (packed_record[17] >> 8) & 0xFF;
rec->mac_da[1] |= (packed_record[18] & 0xFF) << 8;
rec->pn = (packed_record[18] >> 8) & 0xFF;
rec->pn |= packed_record[19] << 8;
rec->pn |= (packed_record[20] & 0xFF) << 24;
rec->byte3_location = (packed_record[20] >> 8) & 0x3F;
rec->byte3_mask = (packed_record[20] >> 14) & 0x1;
rec->byte2_location = (packed_record[20] >> 15) & 0x1;
rec->byte2_location |= (packed_record[21] & 0x1F) << 1;
rec->byte2_mask = (packed_record[21] >> 5) & 0x1;
rec->byte1_location = (packed_record[21] >> 6) & 0x3F;
rec->byte1_mask = (packed_record[21] >> 12) & 0x1;
rec->byte0_location = (packed_record[21] >> 13) & 0x7;
rec->byte0_location |= (packed_record[22] & 0x7) << 3;
rec->byte0_mask = (packed_record[22] >> 3) & 0x1;
rec->vlan_id_mask = (packed_record[22] >> 4) & 0x3;
rec->vlan_up_mask = (packed_record[22] >> 6) & 0x1;
rec->vlan_valid_mask = (packed_record[22] >> 7) & 0x1;
rec->tci_mask = (packed_record[22] >> 8) & 0xFF;
rec->sci_mask = packed_record[23] & 0xFF;
rec->eth_type_mask = (packed_record[23] >> 8) & 0x3;
rec->snap_mask = (packed_record[23] >> 10) & 0x1F;
rec->llc_mask = (packed_record[23] >> 15) & 0x1;
rec->llc_mask |= (packed_record[24] & 0x3) << 1;
rec->sa_mask = (packed_record[24] >> 2) & 0x3F;
rec->da_mask = (packed_record[24] >> 8) & 0x3F;
rec->pn_mask = (packed_record[24] >> 14) & 0x3;
rec->pn_mask |= (packed_record[25] & 0x3) << 2;
rec->eight02dot2 = (packed_record[25] >> 2) & 0x1;
rec->tci_sc = (packed_record[25] >> 3) & 0x1;
rec->tci_87543 = (packed_record[25] >> 4) & 0x1;
rec->exp_sectag_en = (packed_record[25] >> 5) & 0x1;
rec->sc_idx = (packed_record[25] >> 6) & 0x1F;
rec->sc_sa = (packed_record[25] >> 11) & 0x3;
rec->debug = (packed_record[25] >> 13) & 0x1;
rec->action = (packed_record[25] >> 14) & 0x3;
rec->valid = (packed_record[26] >> 3) & 0x1;
return 0;
}
int aq_mss_get_egress_class_record(struct aq_hw_s *hw,
struct aq_mss_egress_class_record *rec,
u16 table_index)
{
memset(rec, 0, sizeof(*rec));
return AQ_API_CALL_SAFE(get_egress_class_record, hw, rec, table_index);
}
static int set_egress_sc_record(struct aq_hw_s *hw,
const struct aq_mss_egress_sc_record *rec,
u16 table_index)
{
u16 packed_record[8];
if (table_index >= NUMROWS_EGRESSSCRECORD)
return -EINVAL;
memset(packed_record, 0, sizeof(u16) * 8);
packed_record[0] = rec->start_time & 0xFFFF;
packed_record[1] = (rec->start_time >> 16) & 0xFFFF;
packed_record[2] = rec->stop_time & 0xFFFF;
packed_record[3] = (rec->stop_time >> 16) & 0xFFFF;
packed_record[4] = rec->curr_an & 0x3;
packed_record[4] |= (rec->an_roll & 0x1) << 2;
packed_record[4] |= (rec->tci & 0x3F) << 3;
packed_record[4] |= (rec->enc_off & 0x7F) << 9;
packed_record[5] = (rec->enc_off >> 7) & 0x1;
packed_record[5] |= (rec->protect & 0x1) << 1;
packed_record[5] |= (rec->recv & 0x1) << 2;
packed_record[5] |= (rec->fresh & 0x1) << 3;
packed_record[5] |= (rec->sak_len & 0x3) << 4;
packed_record[7] |= (rec->valid & 0x1) << 15;
return set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSCRECORD + table_index);
}
int aq_mss_set_egress_sc_record(struct aq_hw_s *hw,
const struct aq_mss_egress_sc_record *rec,
u16 table_index)
{
return AQ_API_CALL_SAFE(set_egress_sc_record, hw, rec, table_index);
}
static int get_egress_sc_record(struct aq_hw_s *hw,
struct aq_mss_egress_sc_record *rec,
u16 table_index)
{
u16 packed_record[8];
int ret;
if (table_index >= NUMROWS_EGRESSSCRECORD)
return -EINVAL;
ret = get_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSCRECORD + table_index);
if (unlikely(ret))
return ret;
rec->start_time = packed_record[0];
rec->start_time |= packed_record[1] << 16;
rec->stop_time = packed_record[2];
rec->stop_time |= packed_record[3] << 16;
rec->curr_an = packed_record[4] & 0x3;
rec->an_roll = (packed_record[4] >> 2) & 0x1;
rec->tci = (packed_record[4] >> 3) & 0x3F;
rec->enc_off = (packed_record[4] >> 9) & 0x7F;
rec->enc_off |= (packed_record[5] & 0x1) << 7;
rec->protect = (packed_record[5] >> 1) & 0x1;
rec->recv = (packed_record[5] >> 2) & 0x1;
rec->fresh = (packed_record[5] >> 3) & 0x1;
rec->sak_len = (packed_record[5] >> 4) & 0x3;
rec->valid = (packed_record[7] >> 15) & 0x1;
return 0;
}
int aq_mss_get_egress_sc_record(struct aq_hw_s *hw,
struct aq_mss_egress_sc_record *rec,
u16 table_index)
{
memset(rec, 0, sizeof(*rec));
return AQ_API_CALL_SAFE(get_egress_sc_record, hw, rec, table_index);
}
static int set_egress_sa_record(struct aq_hw_s *hw,
const struct aq_mss_egress_sa_record *rec,
u16 table_index)
{
u16 packed_record[8];
if (table_index >= NUMROWS_EGRESSSARECORD)
return -EINVAL;
memset(packed_record, 0, sizeof(u16) * 8);
packed_record[0] = rec->start_time & 0xFFFF;
packed_record[1] = (rec->start_time >> 16) & 0xFFFF;
packed_record[2] = rec->stop_time & 0xFFFF;
packed_record[3] = (rec->stop_time >> 16) & 0xFFFF;
packed_record[4] = rec->next_pn & 0xFFFF;
packed_record[5] = (rec->next_pn >> 16) & 0xFFFF;
packed_record[6] = rec->sat_pn & 0x1;
packed_record[6] |= (rec->fresh & 0x1) << 1;
packed_record[7] = (rec->valid & 0x1) << 15;
return set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSARECORD + table_index);
}
int aq_mss_set_egress_sa_record(struct aq_hw_s *hw,
const struct aq_mss_egress_sa_record *rec,
u16 table_index)
{
int err = AQ_API_CALL_SAFE(set_egress_sa_record, hw, rec, table_index);
WARN_ONCE(err, "%s failed with %d\n", __func__, err);
return err;
}
static int get_egress_sa_record(struct aq_hw_s *hw,
struct aq_mss_egress_sa_record *rec,
u16 table_index)
{
u16 packed_record[8];
int ret;
if (table_index >= NUMROWS_EGRESSSARECORD)
return -EINVAL;
ret = get_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSARECORD + table_index);
if (unlikely(ret))
return ret;
rec->start_time = packed_record[0];
rec->start_time |= packed_record[1] << 16;
rec->stop_time = packed_record[2];
rec->stop_time |= packed_record[3] << 16;
rec->next_pn = packed_record[4];
rec->next_pn |= packed_record[5] << 16;
rec->sat_pn = packed_record[6] & 0x1;
rec->fresh = (packed_record[6] >> 1) & 0x1;
rec->valid = (packed_record[7] >> 15) & 0x1;
return 0;
}
int aq_mss_get_egress_sa_record(struct aq_hw_s *hw,
struct aq_mss_egress_sa_record *rec,
u16 table_index)
{
memset(rec, 0, sizeof(*rec));
return AQ_API_CALL_SAFE(get_egress_sa_record, hw, rec, table_index);
}
static int set_egress_sakey_record(struct aq_hw_s *hw,
const struct aq_mss_egress_sakey_record *rec,
u16 table_index)
{
u16 packed_record[16];
int ret;
if (table_index >= NUMROWS_EGRESSSAKEYRECORD)
return -EINVAL;
memset(packed_record, 0, sizeof(u16) * 16);
packed_record[0] = rec->key[0] & 0xFFFF;
packed_record[1] = (rec->key[0] >> 16) & 0xFFFF;
packed_record[2] = rec->key[1] & 0xFFFF;
packed_record[3] = (rec->key[1] >> 16) & 0xFFFF;
packed_record[4] = rec->key[2] & 0xFFFF;
packed_record[5] = (rec->key[2] >> 16) & 0xFFFF;
packed_record[6] = rec->key[3] & 0xFFFF;
packed_record[7] = (rec->key[3] >> 16) & 0xFFFF;
packed_record[8] = rec->key[4] & 0xFFFF;
packed_record[9] = (rec->key[4] >> 16) & 0xFFFF;
packed_record[10] = rec->key[5] & 0xFFFF;
packed_record[11] = (rec->key[5] >> 16) & 0xFFFF;
packed_record[12] = rec->key[6] & 0xFFFF;
packed_record[13] = (rec->key[6] >> 16) & 0xFFFF;
packed_record[14] = rec->key[7] & 0xFFFF;
packed_record[15] = (rec->key[7] >> 16) & 0xFFFF;
ret = set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index);
if (unlikely(ret))
return ret;
ret = set_raw_egress_record(hw, packed_record + 8, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index -
32);
if (unlikely(ret))
return ret;
return 0;
}
int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
const struct aq_mss_egress_sakey_record *rec,
u16 table_index)
{
int err = AQ_API_CALL_SAFE(set_egress_sakey_record, hw, rec,
table_index);
WARN_ONCE(err, "%s failed with %d\n", __func__, err);
return err;
}
static int get_egress_sakey_record(struct aq_hw_s *hw,
struct aq_mss_egress_sakey_record *rec,
u16 table_index)
{
u16 packed_record[16];
int ret;
if (table_index >= NUMROWS_EGRESSSAKEYRECORD)
return -EINVAL;
ret = get_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index);
if (unlikely(ret))
return ret;
ret = get_raw_egress_record(hw, packed_record + 8, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index -
32);
if (unlikely(ret))
return ret;
rec->key[0] = packed_record[0];
rec->key[0] |= packed_record[1] << 16;
rec->key[1] = packed_record[2];
rec->key[1] |= packed_record[3] << 16;
rec->key[2] = packed_record[4];
rec->key[2] |= packed_record[5] << 16;
rec->key[3] = packed_record[6];
rec->key[3] |= packed_record[7] << 16;
rec->key[4] = packed_record[8];
rec->key[4] |= packed_record[9] << 16;
rec->key[5] = packed_record[10];
rec->key[5] |= packed_record[11] << 16;
rec->key[6] = packed_record[12];
rec->key[6] |= packed_record[13] << 16;
rec->key[7] = packed_record[14];
rec->key[7] |= packed_record[15] << 16;
return 0;
}
int aq_mss_get_egress_sakey_record(struct aq_hw_s *hw,
struct aq_mss_egress_sakey_record *rec,
u16 table_index)
{
memset(rec, 0, sizeof(*rec));
return AQ_API_CALL_SAFE(get_egress_sakey_record, hw, rec, table_index);
}
static int get_egress_sc_counters(struct aq_hw_s *hw,
struct aq_mss_egress_sc_counters *counters,
u16 sc_index)
{
u16 packed_record[4];
int ret;
if (sc_index >= NUMROWS_EGRESSSCRECORD)
return -EINVAL;
ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 4);
if (unlikely(ret))
return ret;
counters->sc_protected_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->sc_protected_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 5);
if (unlikely(ret))
return ret;
counters->sc_encrypted_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->sc_encrypted_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 6);
if (unlikely(ret))
return ret;
counters->sc_protected_octets[0] =
packed_record[0] | (packed_record[1] << 16);
counters->sc_protected_octets[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 7);
if (unlikely(ret))
return ret;
counters->sc_encrypted_octets[0] =
packed_record[0] | (packed_record[1] << 16);
counters->sc_encrypted_octets[1] =
packed_record[2] | (packed_record[3] << 16);
return 0;
}
int aq_mss_get_egress_sc_counters(struct aq_hw_s *hw,
struct aq_mss_egress_sc_counters *counters,
u16 sc_index)
{
memset(counters, 0, sizeof(*counters));
return AQ_API_CALL_SAFE(get_egress_sc_counters, hw, counters, sc_index);
}
static int get_egress_sa_counters(struct aq_hw_s *hw,
struct aq_mss_egress_sa_counters *counters,
u16 sa_index)
{
u16 packed_record[4];
int ret;
if (sa_index >= NUMROWS_EGRESSSARECORD)
return -EINVAL;
ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 0);
if (unlikely(ret))
return ret;
counters->sa_hit_drop_redirect[0] =
packed_record[0] | (packed_record[1] << 16);
counters->sa_hit_drop_redirect[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 1);
if (unlikely(ret))
return ret;
counters->sa_protected2_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->sa_protected2_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 2);
if (unlikely(ret))
return ret;
counters->sa_protected_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->sa_protected_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 3);
if (unlikely(ret))
return ret;
counters->sa_encrypted_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->sa_encrypted_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
return 0;
}
int aq_mss_get_egress_sa_counters(struct aq_hw_s *hw,
struct aq_mss_egress_sa_counters *counters,
u16 sa_index)
{
memset(counters, 0, sizeof(*counters));
return AQ_API_CALL_SAFE(get_egress_sa_counters, hw, counters, sa_index);
}
static int
get_egress_common_counters(struct aq_hw_s *hw,
struct aq_mss_egress_common_counters *counters)
{
u16 packed_record[4];
int ret;
ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 0);
if (unlikely(ret))
return ret;
counters->ctl_pkt[0] = packed_record[0] | (packed_record[1] << 16);
counters->ctl_pkt[1] = packed_record[2] | (packed_record[3] << 16);
ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 1);
if (unlikely(ret))
return ret;
counters->unknown_sa_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->unknown_sa_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 2);
if (unlikely(ret))
return ret;
counters->untagged_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->untagged_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 3);
if (unlikely(ret))
return ret;
counters->too_long[0] = packed_record[0] | (packed_record[1] << 16);
counters->too_long[1] = packed_record[2] | (packed_record[3] << 16);
ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 4);
if (unlikely(ret))
return ret;
counters->ecc_error_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->ecc_error_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 5);
if (unlikely(ret))
return ret;
counters->unctrl_hit_drop_redir[0] =
packed_record[0] | (packed_record[1] << 16);
counters->unctrl_hit_drop_redir[1] =
packed_record[2] | (packed_record[3] << 16);
return 0;
}
int aq_mss_get_egress_common_counters(struct aq_hw_s *hw,
struct aq_mss_egress_common_counters *counters)
{
memset(counters, 0, sizeof(*counters));
return AQ_API_CALL_SAFE(get_egress_common_counters, hw, counters);
}
static int clear_egress_counters(struct aq_hw_s *hw)
{
struct mss_egress_ctl_register ctl_reg;
int ret;
memset(&ctl_reg, 0, sizeof(ctl_reg));
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1, MSS_EGRESS_CTL_REGISTER_ADDR,
&ctl_reg.word_0);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR + 4,
&ctl_reg.word_1);
if (unlikely(ret))
return ret;
/* Toggle the Egress MIB clear bit 0->1->0 */
ctl_reg.bits_0.clear_counter = 0;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR + 4,
ctl_reg.word_1);
if (unlikely(ret))
return ret;
ctl_reg.bits_0.clear_counter = 1;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR + 4,
ctl_reg.word_1);
if (unlikely(ret))
return ret;
ctl_reg.bits_0.clear_counter = 0;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR + 4,
ctl_reg.word_1);
if (unlikely(ret))
return ret;
return 0;
}
int aq_mss_clear_egress_counters(struct aq_hw_s *hw)
{
return AQ_API_CALL_SAFE(clear_egress_counters, hw);
}
static int get_ingress_sa_counters(struct aq_hw_s *hw,
struct aq_mss_ingress_sa_counters *counters,
u16 sa_index)
{
u16 packed_record[4];
int ret;
if (sa_index >= NUMROWS_INGRESSSARECORD)
return -EINVAL;
ret = get_raw_ingress_record(hw, packed_record, 4, 6,
sa_index * 12 + 0);
if (unlikely(ret))
return ret;
counters->untagged_hit_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->untagged_hit_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6,
sa_index * 12 + 1);
if (unlikely(ret))
return ret;
counters->ctrl_hit_drop_redir_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->ctrl_hit_drop_redir_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6,
sa_index * 12 + 2);
if (unlikely(ret))
return ret;
counters->not_using_sa[0] = packed_record[0] | (packed_record[1] << 16);
counters->not_using_sa[1] = packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6,
sa_index * 12 + 3);
if (unlikely(ret))
return ret;
counters->unused_sa[0] = packed_record[0] | (packed_record[1] << 16);
counters->unused_sa[1] = packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6,
sa_index * 12 + 4);
if (unlikely(ret))
return ret;
counters->not_valid_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->not_valid_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6,
sa_index * 12 + 5);
if (unlikely(ret))
return ret;
counters->invalid_pkts[0] = packed_record[0] | (packed_record[1] << 16);
counters->invalid_pkts[1] = packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6,
sa_index * 12 + 6);
if (unlikely(ret))
return ret;
counters->ok_pkts[0] = packed_record[0] | (packed_record[1] << 16);
counters->ok_pkts[1] = packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6,
sa_index * 12 + 7);
if (unlikely(ret))
return ret;
counters->late_pkts[0] = packed_record[0] | (packed_record[1] << 16);
counters->late_pkts[1] = packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6,
sa_index * 12 + 8);
if (unlikely(ret))
return ret;
counters->delayed_pkts[0] = packed_record[0] | (packed_record[1] << 16);
counters->delayed_pkts[1] = packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6,
sa_index * 12 + 9);
if (unlikely(ret))
return ret;
counters->unchecked_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->unchecked_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6,
sa_index * 12 + 10);
if (unlikely(ret))
return ret;
counters->validated_octets[0] =
packed_record[0] | (packed_record[1] << 16);
counters->validated_octets[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6,
sa_index * 12 + 11);
if (unlikely(ret))
return ret;
counters->decrypted_octets[0] =
packed_record[0] | (packed_record[1] << 16);
counters->decrypted_octets[1] =
packed_record[2] | (packed_record[3] << 16);
return 0;
}
int aq_mss_get_ingress_sa_counters(struct aq_hw_s *hw,
struct aq_mss_ingress_sa_counters *counters,
u16 sa_index)
{
memset(counters, 0, sizeof(*counters));
return AQ_API_CALL_SAFE(get_ingress_sa_counters, hw, counters,
sa_index);
}
static int
get_ingress_common_counters(struct aq_hw_s *hw,
struct aq_mss_ingress_common_counters *counters)
{
u16 packed_record[4];
int ret;
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 0);
if (unlikely(ret))
return ret;
counters->ctl_pkts[0] = packed_record[0] | (packed_record[1] << 16);
counters->ctl_pkts[1] = packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 1);
if (unlikely(ret))
return ret;
counters->tagged_miss_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->tagged_miss_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 2);
if (unlikely(ret))
return ret;
counters->untagged_miss_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->untagged_miss_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 3);
if (unlikely(ret))
return ret;
counters->notag_pkts[0] = packed_record[0] | (packed_record[1] << 16);
counters->notag_pkts[1] = packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 4);
if (unlikely(ret))
return ret;
counters->untagged_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->untagged_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 5);
if (unlikely(ret))
return ret;
counters->bad_tag_pkts[0] = packed_record[0] | (packed_record[1] << 16);
counters->bad_tag_pkts[1] = packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 6);
if (unlikely(ret))
return ret;
counters->no_sci_pkts[0] = packed_record[0] | (packed_record[1] << 16);
counters->no_sci_pkts[1] = packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 7);
if (unlikely(ret))
return ret;
counters->unknown_sci_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->unknown_sci_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 8);
if (unlikely(ret))
return ret;
counters->ctrl_prt_pass_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->ctrl_prt_pass_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 9);
if (unlikely(ret))
return ret;
counters->unctrl_prt_pass_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->unctrl_prt_pass_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 10);
if (unlikely(ret))
return ret;
counters->ctrl_prt_fail_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->ctrl_prt_fail_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 11);
if (unlikely(ret))
return ret;
counters->unctrl_prt_fail_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->unctrl_prt_fail_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 12);
if (unlikely(ret))
return ret;
counters->too_long_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->too_long_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 13);
if (unlikely(ret))
return ret;
counters->igpoc_ctl_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->igpoc_ctl_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 14);
if (unlikely(ret))
return ret;
counters->ecc_error_pkts[0] =
packed_record[0] | (packed_record[1] << 16);
counters->ecc_error_pkts[1] =
packed_record[2] | (packed_record[3] << 16);
ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 15);
if (unlikely(ret))
return ret;
counters->unctrl_hit_drop_redir[0] =
packed_record[0] | (packed_record[1] << 16);
counters->unctrl_hit_drop_redir[1] =
packed_record[2] | (packed_record[3] << 16);
return 0;
}
int aq_mss_get_ingress_common_counters(struct aq_hw_s *hw,
struct aq_mss_ingress_common_counters *counters)
{
memset(counters, 0, sizeof(*counters));
return AQ_API_CALL_SAFE(get_ingress_common_counters, hw, counters);
}
static int clear_ingress_counters(struct aq_hw_s *hw)
{
struct mss_ingress_ctl_register ctl_reg;
int ret;
memset(&ctl_reg, 0, sizeof(ctl_reg));
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_INGRESS_CTL_REGISTER_ADDR, &ctl_reg.word_0);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_INGRESS_CTL_REGISTER_ADDR + 4,
&ctl_reg.word_1);
if (unlikely(ret))
return ret;
/* Toggle the Ingress MIB clear bit 0->1->0 */
ctl_reg.bits_0.clear_count = 0;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_CTL_REGISTER_ADDR + 4,
ctl_reg.word_1);
if (unlikely(ret))
return ret;
ctl_reg.bits_0.clear_count = 1;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_CTL_REGISTER_ADDR + 4,
ctl_reg.word_1);
if (unlikely(ret))
return ret;
ctl_reg.bits_0.clear_count = 0;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_INGRESS_CTL_REGISTER_ADDR + 4,
ctl_reg.word_1);
if (unlikely(ret))
return ret;
return 0;
}
int aq_mss_clear_ingress_counters(struct aq_hw_s *hw)
{
return AQ_API_CALL_SAFE(clear_ingress_counters, hw);
}
static int get_egress_sa_expired(struct aq_hw_s *hw, u32 *expired)
{
u16 val;
int ret;
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR,
&val);
if (unlikely(ret))
return ret;
*expired = val;
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR + 1,
&val);
if (unlikely(ret))
return ret;
*expired |= val << 16;
return 0;
}
int aq_mss_get_egress_sa_expired(struct aq_hw_s *hw, u32 *expired)
{
*expired = 0;
return AQ_API_CALL_SAFE(get_egress_sa_expired, hw, expired);
}
static int get_egress_sa_threshold_expired(struct aq_hw_s *hw,
u32 *expired)
{
u16 val;
int ret;
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR, &val);
if (unlikely(ret))
return ret;
*expired = val;
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR + 1, &val);
if (unlikely(ret))
return ret;
*expired |= val << 16;
return 0;
}
int aq_mss_get_egress_sa_threshold_expired(struct aq_hw_s *hw,
u32 *expired)
{
*expired = 0;
return AQ_API_CALL_SAFE(get_egress_sa_threshold_expired, hw, expired);
}
static int set_egress_sa_expired(struct aq_hw_s *hw, u32 expired)
{
int ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR,
expired & 0xFFFF);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR + 1,
expired >> 16);
if (unlikely(ret))
return ret;
return 0;
}
int aq_mss_set_egress_sa_expired(struct aq_hw_s *hw, u32 expired)
{
return AQ_API_CALL_SAFE(set_egress_sa_expired, hw, expired);
}
static int set_egress_sa_threshold_expired(struct aq_hw_s *hw, u32 expired)
{
int ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR,
expired & 0xFFFF);
if (unlikely(ret))
return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR + 1,
expired >> 16);
if (unlikely(ret))
return ret;
return 0;
}
int aq_mss_set_egress_sa_threshold_expired(struct aq_hw_s *hw, u32 expired)
{
return AQ_API_CALL_SAFE(set_egress_sa_threshold_expired, hw, expired);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __MACSEC_API_H__
#define __MACSEC_API_H__
#include "aq_hw.h"
#include "macsec_struct.h"
#define NUMROWS_INGRESSPRECTLFRECORD 24
#define ROWOFFSET_INGRESSPRECTLFRECORD 0
#define NUMROWS_INGRESSPRECLASSRECORD 48
#define ROWOFFSET_INGRESSPRECLASSRECORD 0
#define NUMROWS_INGRESSPOSTCLASSRECORD 48
#define ROWOFFSET_INGRESSPOSTCLASSRECORD 0
#define NUMROWS_INGRESSSCRECORD 32
#define ROWOFFSET_INGRESSSCRECORD 0
#define NUMROWS_INGRESSSARECORD 32
#define ROWOFFSET_INGRESSSARECORD 32
#define NUMROWS_INGRESSSAKEYRECORD 32
#define ROWOFFSET_INGRESSSAKEYRECORD 0
#define NUMROWS_INGRESSPOSTCTLFRECORD 24
#define ROWOFFSET_INGRESSPOSTCTLFRECORD 0
#define NUMROWS_EGRESSCTLFRECORD 24
#define ROWOFFSET_EGRESSCTLFRECORD 0
#define NUMROWS_EGRESSCLASSRECORD 48
#define ROWOFFSET_EGRESSCLASSRECORD 0
#define NUMROWS_EGRESSSCRECORD 32
#define ROWOFFSET_EGRESSSCRECORD 0
#define NUMROWS_EGRESSSARECORD 32
#define ROWOFFSET_EGRESSSARECORD 32
#define NUMROWS_EGRESSSAKEYRECORD 32
#define ROWOFFSET_EGRESSSAKEYRECORD 96
/*! Read the raw table data from the specified row of the Egress CTL
* Filter table, and unpack it into the fields of rec.
* rec - [OUT] The raw table row data will be unpacked into the fields of rec.
* table_index - The table row to read (max 23).
*/
int aq_mss_get_egress_ctlf_record(struct aq_hw_s *hw,
struct aq_mss_egress_ctlf_record *rec,
u16 table_index);
/*! Pack the fields of rec, and write the packed data into the
* specified row of the Egress CTL Filter table.
* rec - [IN] The bitfield values to write to the table row.
* table_index - The table row to write(max 23).
*/
int aq_mss_set_egress_ctlf_record(struct aq_hw_s *hw,
const struct aq_mss_egress_ctlf_record *rec,
u16 table_index);
/*! Read the raw table data from the specified row of the Egress
* Packet Classifier table, and unpack it into the fields of rec.
* rec - [OUT] The raw table row data will be unpacked into the fields of rec.
* table_index - The table row to read (max 47).
*/
int aq_mss_get_egress_class_record(struct aq_hw_s *hw,
struct aq_mss_egress_class_record *rec,
u16 table_index);
/*! Pack the fields of rec, and write the packed data into the
* specified row of the Egress Packet Classifier table.
* rec - [IN] The bitfield values to write to the table row.
* table_index - The table row to write (max 47).
*/
int aq_mss_set_egress_class_record(struct aq_hw_s *hw,
const struct aq_mss_egress_class_record *rec,
u16 table_index);
/*! Read the raw table data from the specified row of the Egress SC
* Lookup table, and unpack it into the fields of rec.
* rec - [OUT] The raw table row data will be unpacked into the fields of rec.
* table_index - The table row to read (max 31).
*/
int aq_mss_get_egress_sc_record(struct aq_hw_s *hw,
struct aq_mss_egress_sc_record *rec,
u16 table_index);
/*! Pack the fields of rec, and write the packed data into the
* specified row of the Egress SC Lookup table.
* rec - [IN] The bitfield values to write to the table row.
* table_index - The table row to write (max 31).
*/
int aq_mss_set_egress_sc_record(struct aq_hw_s *hw,
const struct aq_mss_egress_sc_record *rec,
u16 table_index);
/*! Read the raw table data from the specified row of the Egress SA
* Lookup table, and unpack it into the fields of rec.
* rec - [OUT] The raw table row data will be unpacked into the fields of rec.
* table_index - The table row to read (max 31).
*/
int aq_mss_get_egress_sa_record(struct aq_hw_s *hw,
struct aq_mss_egress_sa_record *rec,
u16 table_index);
/*! Pack the fields of rec, and write the packed data into the
* specified row of the Egress SA Lookup table.
* rec - [IN] The bitfield values to write to the table row.
* table_index - The table row to write (max 31).
*/
int aq_mss_set_egress_sa_record(struct aq_hw_s *hw,
const struct aq_mss_egress_sa_record *rec,
u16 table_index);
/*! Read the raw table data from the specified row of the Egress SA
* Key Lookup table, and unpack it into the fields of rec.
* rec - [OUT] The raw table row data will be unpacked into the fields of rec.
* table_index - The table row to read (max 31).
*/
int aq_mss_get_egress_sakey_record(struct aq_hw_s *hw,
struct aq_mss_egress_sakey_record *rec,
u16 table_index);
/*! Pack the fields of rec, and write the packed data into the
* specified row of the Egress SA Key Lookup table.
* rec - [IN] The bitfield values to write to the table row.
* table_index - The table row to write (max 31).
*/
int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
const struct aq_mss_egress_sakey_record *rec,
u16 table_index);
/*! Read the raw table data from the specified row of the Ingress
* Pre-MACSec CTL Filter table, and unpack it into the fields of rec.
* rec - [OUT] The raw table row data will be unpacked into the fields of rec.
* table_index - The table row to read (max 23).
*/
int aq_mss_get_ingress_prectlf_record(struct aq_hw_s *hw,
struct aq_mss_ingress_prectlf_record *rec,
u16 table_index);
/*! Pack the fields of rec, and write the packed data into the
* specified row of the Ingress Pre-MACSec CTL Filter table.
* rec - [IN] The bitfield values to write to the table row.
* table_index - The table row to write(max 23).
*/
int aq_mss_set_ingress_prectlf_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_prectlf_record *rec,
u16 table_index);
/*! Read the raw table data from the specified row of the Ingress
* Pre-MACSec Packet Classifier table, and unpack it into the fields of rec.
* rec - [OUT] The raw table row data will be unpacked into the fields of rec.
* table_index - The table row to read (max 47).
*/
int aq_mss_get_ingress_preclass_record(struct aq_hw_s *hw,
struct aq_mss_ingress_preclass_record *rec,
u16 table_index);
/*! Pack the fields of rec, and write the packed data into the
* specified row of the Ingress Pre-MACSec Packet Classifier table.
* rec - [IN] The bitfield values to write to the table row.
* table_index - The table row to write(max 47).
*/
int aq_mss_set_ingress_preclass_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_preclass_record *rec,
u16 table_index);
/*! Read the raw table data from the specified row of the Ingress SC
* Lookup table, and unpack it into the fields of rec.
* rec - [OUT] The raw table row data will be unpacked into the fields of rec.
* table_index - The table row to read (max 31).
*/
int aq_mss_get_ingress_sc_record(struct aq_hw_s *hw,
struct aq_mss_ingress_sc_record *rec,
u16 table_index);
/*! Pack the fields of rec, and write the packed data into the
* specified row of the Ingress SC Lookup table.
* rec - [IN] The bitfield values to write to the table row.
* table_index - The table row to write(max 31).
*/
int aq_mss_set_ingress_sc_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_sc_record *rec,
u16 table_index);
/*! Read the raw table data from the specified row of the Ingress SA
* Lookup table, and unpack it into the fields of rec.
* rec - [OUT] The raw table row data will be unpacked into the fields of rec.
* table_index - The table row to read (max 31).
*/
int aq_mss_get_ingress_sa_record(struct aq_hw_s *hw,
struct aq_mss_ingress_sa_record *rec,
u16 table_index);
/*! Pack the fields of rec, and write the packed data into the
* specified row of the Ingress SA Lookup table.
* rec - [IN] The bitfield values to write to the table row.
* table_index - The table row to write(max 31).
*/
int aq_mss_set_ingress_sa_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_sa_record *rec,
u16 table_index);
/*! Read the raw table data from the specified row of the Ingress SA
* Key Lookup table, and unpack it into the fields of rec.
* rec - [OUT] The raw table row data will be unpacked into the fields of rec.
* table_index - The table row to read (max 31).
*/
int aq_mss_get_ingress_sakey_record(struct aq_hw_s *hw,
struct aq_mss_ingress_sakey_record *rec,
u16 table_index);
/*! Pack the fields of rec, and write the packed data into the
* specified row of the Ingress SA Key Lookup table.
* rec - [IN] The bitfield values to write to the table row.
* table_index - The table row to write(max 31).
*/
int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_sakey_record *rec,
u16 table_index);
/*! Read the raw table data from the specified row of the Ingress
* Post-MACSec Packet Classifier table, and unpack it into the
* fields of rec.
* rec - [OUT] The raw table row data will be unpacked into the fields of rec.
* table_index - The table row to read (max 48).
*/
int aq_mss_get_ingress_postclass_record(struct aq_hw_s *hw,
struct aq_mss_ingress_postclass_record *rec,
u16 table_index);
/*! Pack the fields of rec, and write the packed data into the
* specified row of the Ingress Post-MACSec Packet Classifier table.
* rec - [IN] The bitfield values to write to the table row.
* table_index - The table row to write(max 48).
*/
int aq_mss_set_ingress_postclass_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_postclass_record *rec,
u16 table_index);
/*! Read the raw table data from the specified row of the Ingress
* Post-MACSec CTL Filter table, and unpack it into the fields of rec.
* rec - [OUT] The raw table row data will be unpacked into the fields of rec.
* table_index - The table row to read (max 23).
*/
int aq_mss_get_ingress_postctlf_record(struct aq_hw_s *hw,
struct aq_mss_ingress_postctlf_record *rec,
u16 table_index);
/*! Pack the fields of rec, and write the packed data into the
* specified row of the Ingress Post-MACSec CTL Filter table.
* rec - [IN] The bitfield values to write to the table row.
* table_index - The table row to write(max 23).
*/
int aq_mss_set_ingress_postctlf_record(struct aq_hw_s *hw,
const struct aq_mss_ingress_postctlf_record *rec,
u16 table_index);
/*! Read the counters for the specified SC, and unpack them into the
* fields of counters.
* counters - [OUT] The raw table row data will be unpacked here.
* sc_index - The table row to read (max 31).
*/
int aq_mss_get_egress_sc_counters(struct aq_hw_s *hw,
struct aq_mss_egress_sc_counters *counters,
u16 sc_index);
/*! Read the counters for the specified SA, and unpack them into the
* fields of counters.
* counters - [OUT] The raw table row data will be unpacked here.
* sa_index - The table row to read (max 31).
*/
int aq_mss_get_egress_sa_counters(struct aq_hw_s *hw,
struct aq_mss_egress_sa_counters *counters,
u16 sa_index);
/*! Read the counters for the common egress counters, and unpack them
* into the fields of counters.
* counters - [OUT] The raw table row data will be unpacked here.
*/
int aq_mss_get_egress_common_counters(struct aq_hw_s *hw,
struct aq_mss_egress_common_counters *counters);
/*! Clear all Egress counters to 0.*/
int aq_mss_clear_egress_counters(struct aq_hw_s *hw);
/*! Read the counters for the specified SA, and unpack them into the
* fields of counters.
* counters - [OUT] The raw table row data will be unpacked here.
* sa_index - The table row to read (max 31).
*/
int aq_mss_get_ingress_sa_counters(struct aq_hw_s *hw,
struct aq_mss_ingress_sa_counters *counters,
u16 sa_index);
/*! Read the counters for the common ingress counters, and unpack them
* into the fields of counters.
* counters - [OUT] The raw table row data will be unpacked here.
*/
int aq_mss_get_ingress_common_counters(struct aq_hw_s *hw,
struct aq_mss_ingress_common_counters *counters);
/*! Clear all Ingress counters to 0. */
int aq_mss_clear_ingress_counters(struct aq_hw_s *hw);
/*! Get Egress SA expired. */
int aq_mss_get_egress_sa_expired(struct aq_hw_s *hw, u32 *expired);
/*! Get Egress SA threshold expired. */
int aq_mss_get_egress_sa_threshold_expired(struct aq_hw_s *hw,
u32 *expired);
/*! Set Egress SA expired. */
int aq_mss_set_egress_sa_expired(struct aq_hw_s *hw, u32 expired);
/*! Set Egress SA threshold expired. */
int aq_mss_set_egress_sa_threshold_expired(struct aq_hw_s *hw,
u32 expired);
#endif /* __MACSEC_API_H__ */
/* SPDX-License-Identifier: GPL-2.0-only */
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _MACSEC_STRUCT_H_
#define _MACSEC_STRUCT_H_
/*! Represents the bitfields of a single row in the Egress CTL Filter
* table.
*/
struct aq_mss_egress_ctlf_record {
/*! This is used to store the 48 bit value used to compare SA, DA or
* halfDA+half SA value.
*/
u32 sa_da[2];
/*! This is used to store the 16 bit ethertype value used for
* comparison.
*/
u32 eth_type;
/*! The match mask is per-nibble. 0 means don't care, i.e. every value
* will match successfully. The total data is 64 bit, i.e. 16 nibbles
* masks.
*/
u32 match_mask;
/*! 0: No compare, i.e. This entry is not used
* 1: compare DA only
* 2: compare SA only
* 3: compare half DA + half SA
* 4: compare ether type only
* 5: compare DA + ethertype
* 6: compare SA + ethertype
* 7: compare DA+ range.
*/
u32 match_type;
/*! 0: Bypass the remaining modules if matched.
* 1: Forward to next module for more classifications.
*/
u32 action;
};
/*! Represents the bitfields of a single row in the Egress Packet
* Classifier table.
*/
struct aq_mss_egress_class_record {
/*! VLAN ID field. */
u32 vlan_id;
/*! VLAN UP field. */
u32 vlan_up;
/*! VLAN Present in the Packet. */
u32 vlan_valid;
/*! The 8 bit value used to compare with extracted value for byte 3. */
u32 byte3;
/*! The 8 bit value used to compare with extracted value for byte 2. */
u32 byte2;
/*! The 8 bit value used to compare with extracted value for byte 1. */
u32 byte1;
/*! The 8 bit value used to compare with extracted value for byte 0. */
u32 byte0;
/*! The 8 bit TCI field used to compare with extracted value. */
u32 tci;
/*! The 64 bit SCI field in the SecTAG. */
u32 sci[2];
/*! The 16 bit Ethertype (in the clear) field used to compare with
* extracted value.
*/
u32 eth_type;
/*! This is to specify the 40bit SNAP header if the SNAP header's mask
* is enabled.
*/
u32 snap[2];
/*! This is to specify the 24bit LLC header if the LLC header's mask is
* enabled.
*/
u32 llc;
/*! The 48 bit MAC_SA field used to compare with extracted value. */
u32 mac_sa[2];
/*! The 48 bit MAC_DA field used to compare with extracted value. */
u32 mac_da[2];
/*! The 32 bit Packet number used to compare with extracted value. */
u32 pn;
/*! 0~63: byte location used extracted by packets comparator, which
* can be anything from the first 64 bytes of the MAC packets.
* This byte location counted from MAC' DA address. i.e. set to 0
* will point to byte 0 of DA address.
*/
u32 byte3_location;
/*! 0: don't care
* 1: enable comparison of extracted byte pointed by byte 3 location.
*/
u32 byte3_mask;
/*! 0~63: byte location used extracted by packets comparator, which
* can be anything from the first 64 bytes of the MAC packets.
* This byte location counted from MAC' DA address. i.e. set to 0
* will point to byte 0 of DA address.
*/
u32 byte2_location;
/*! 0: don't care
* 1: enable comparison of extracted byte pointed by byte 2 location.
*/
u32 byte2_mask;
/*! 0~63: byte location used extracted by packets comparator, which
* can be anything from the first 64 bytes of the MAC packets.
* This byte location counted from MAC' DA address. i.e. set to 0
* will point to byte 0 of DA address.
*/
u32 byte1_location;
/*! 0: don't care
* 1: enable comparison of extracted byte pointed by byte 1 location.
*/
u32 byte1_mask;
/*! 0~63: byte location used extracted by packets comparator, which
* can be anything from the first 64 bytes of the MAC packets.
* This byte location counted from MAC' DA address. i.e. set to 0
* will point to byte 0 of DA address.
*/
u32 byte0_location;
/*! 0: don't care
* 1: enable comparison of extracted byte pointed by byte 0 location.
*/
u32 byte0_mask;
/*! Mask is per-byte.
* 0: don't care
* 1: enable comparison of extracted VLAN ID field.
*/
u32 vlan_id_mask;
/*! 0: don't care
* 1: enable comparison of extracted VLAN UP field.
*/
u32 vlan_up_mask;
/*! 0: don't care
* 1: enable comparison of extracted VLAN Valid field.
*/
u32 vlan_valid_mask;
/*! This is bit mask to enable comparison the 8 bit TCI field,
* including the AN field.
* For explicit SECTAG, AN is hardware controlled. For sending
* packet w/ explicit SECTAG, rest of the TCI fields are directly
* from the SECTAG.
*/
u32 tci_mask;
/*! Mask is per-byte.
* 0: don't care
* 1: enable comparison of SCI
* Note: If this field is not 0, this means the input packet's
* SECTAG is explicitly tagged and MACSEC module will only update
* the MSDU.
* PN number is hardware controlled.
*/
u32 sci_mask;
/*! Mask is per-byte.
* 0: don't care
* 1: enable comparison of Ethertype.
*/
u32 eth_type_mask;
/*! Mask is per-byte.
* 0: don't care and no SNAP header exist.
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
* next 5 bytes after the the LLC header is SNAP header.
*/
u32 snap_mask;
/*! 0: don't care and no LLC header exist.
* 1: compare the LLC header.
* If this bit is set to 1, the extracted filed will assume the
* LLC header exist as encapsulated in 802.3 (RFC 1042). I.E. the
* next three bytes after the 802.3MAC header is LLC header.
*/
u32 llc_mask;
/*! Mask is per-byte.
* 0: don't care
* 1: enable comparison of MAC_SA.
*/
u32 sa_mask;
/*! Mask is per-byte.
* 0: don't care
* 1: enable comparison of MAC_DA.
*/
u32 da_mask;
/*! Mask is per-byte. */
u32 pn_mask;
/*! Reserved. This bit should be always 0. */
u32 eight02dot2;
/*! 1: For explicit sectag case use TCI_SC from table
* 0: use TCI_SC from explicit sectag.
*/
u32 tci_sc;
/*! 1: For explicit sectag case,use TCI_V,ES,SCB,E,C from table
* 0: use TCI_V,ES,SCB,E,C from explicit sectag.
*/
u32 tci_87543;
/*! 1: indicates that incoming packet has explicit sectag. */
u32 exp_sectag_en;
/*! If packet matches and tagged as controlled-packet, this SC/SA
* index is used for later SC and SA table lookup.
*/
u32 sc_idx;
/*! This field is used to specify how many SA entries are
* associated with 1 SC entry.
* 2'b00: 1 SC has 4 SA.
* SC index is equivalent to {SC_Index[4:2], 1'b0}.
* SA index is equivalent to {SC_Index[4:2], SC entry's current AN[1:0]
* 2'b10: 1 SC has 2 SA.
* SC index is equivalent to SC_Index[4:1]
* SA index is equivalent to {SC_Index[4:1], SC entry's current AN[0]}
* 2'b11: 1 SC has 1 SA. No SC entry exists for the specific SA.
* SA index is equivalent to SC_Index[4:0]
* Note: if specified as 2'b11, hardware AN roll over is not
* supported.
*/
u32 sc_sa;
/*! 0: the packets will be sent to MAC FIFO
* 1: The packets will be sent to Debug/Loopback FIFO.
* If the above's action is drop, this bit has no meaning.
*/
u32 debug;
/*! 0: forward to remaining modules
* 1: bypass the next encryption modules. This packet is considered
* un-control packet.
* 2: drop
* 3: Reserved.
*/
u32 action;
/*! 0: Not valid entry. This entry is not used
* 1: valid entry.
*/
u32 valid;
};
/*! Represents the bitfields of a single row in the Egress SC Lookup table. */
struct aq_mss_egress_sc_record {
/*! This is to specify when the SC was first used. Set by HW. */
u32 start_time;
/*! This is to specify when the SC was last used. Set by HW. */
u32 stop_time;
/*! This is to specify which of the SA entries are used by current HW.
* Note: This value need to be set by SW after reset. It will be
* automatically updated by HW, if AN roll over is enabled.
*/
u32 curr_an;
/*! 0: Clear the SA Valid Bit after PN expiry.
* 1: Do not Clear the SA Valid bit after PN expiry of the current SA.
* When the Enable AN roll over is set, S/W does not need to
* program the new SA's and the H/W will automatically roll over
* between the SA's without session expiry.
* For normal operation, Enable AN Roll over will be set to '0'
* and in which case, the SW needs to program the new SA values
* after the current PN expires.
*/
u32 an_roll;
/*! This is the TCI field used if packet is not explicitly tagged. */
u32 tci;
/*! This value indicates the offset where the decryption will start.
* [[Values of 0, 4, 8-50].
*/
u32 enc_off;
/*! 0: Do not protect frames, all the packets will be forwarded
* unchanged. MIB counter (OutPktsUntagged) will be updated.
* 1: Protect.
*/
u32 protect;
/*! 0: when none of the SA related to SC has inUse set.
* 1: when either of the SA related to the SC has inUse set.
* This bit is set by HW.
*/
u32 recv;
/*! 0: H/W Clears this bit on the first use.
* 1: SW updates this entry, when programming the SC Table.
*/
u32 fresh;
/*! AES Key size
* 00 - 128bits
* 01 - 192bits
* 10 - 256bits
* 11 - Reserved.
*/
u32 sak_len;
/*! 0: Invalid SC
* 1: Valid SC.
*/
u32 valid;
};
/*! Represents the bitfields of a single row in the Egress SA Lookup table. */
struct aq_mss_egress_sa_record {
/*! This is to specify when the SC was first used. Set by HW. */
u32 start_time;
/*! This is to specify when the SC was last used. Set by HW. */
u32 stop_time;
/*! This is set by SW and updated by HW to store the Next PN number
* used for encryption.
*/
u32 next_pn;
/*! The Next_PN number is going to wrapped around from 0xFFFF_FFFF
* to 0. set by HW.
*/
u32 sat_pn;
/*! 0: This SA is in use.
* 1: This SA is Fresh and set by SW.
*/
u32 fresh;
/*! 0: Invalid SA
* 1: Valid SA.
*/
u32 valid;
};
/*! Represents the bitfields of a single row in the Egress SA Key
* Lookup table.
*/
struct aq_mss_egress_sakey_record {
/*! Key for AES-GCM processing. */
u32 key[8];
};
/*! Represents the bitfields of a single row in the Ingress Pre-MACSec
* CTL Filter table.
*/
struct aq_mss_ingress_prectlf_record {
/*! This is used to store the 48 bit value used to compare SA, DA
* or halfDA+half SA value.
*/
u32 sa_da[2];
/*! This is used to store the 16 bit ethertype value used for
* comparison.
*/
u32 eth_type;
/*! The match mask is per-nibble. 0 means don't care, i.e. every
* value will match successfully. The total data is 64 bit, i.e.
* 16 nibbles masks.
*/
u32 match_mask;
/*! 0: No compare, i.e. This entry is not used
* 1: compare DA only
* 2: compare SA only
* 3: compare half DA + half SA
* 4: compare ether type only
* 5: compare DA + ethertype
* 6: compare SA + ethertype
* 7: compare DA+ range.
*/
u32 match_type;
/*! 0: Bypass the remaining modules if matched.
* 1: Forward to next module for more classifications.
*/
u32 action;
};
/*! Represents the bitfields of a single row in the Ingress Pre-MACSec
* Packet Classifier table.
*/
struct aq_mss_ingress_preclass_record {
/*! The 64 bit SCI field used to compare with extracted value.
* Should have SCI value in case TCI[SCI_SEND] == 0. This will be
* used for ICV calculation.
*/
u32 sci[2];
/*! The 8 bit TCI field used to compare with extracted value. */
u32 tci;
/*! 8 bit encryption offset. */
u32 encr_offset;
/*! The 16 bit Ethertype (in the clear) field used to compare with
* extracted value.
*/
u32 eth_type;
/*! This is to specify the 40bit SNAP header if the SNAP header's
* mask is enabled.
*/
u32 snap[2];
/*! This is to specify the 24bit LLC header if the LLC header's
* mask is enabled.
*/
u32 llc;
/*! The 48 bit MAC_SA field used to compare with extracted value. */
u32 mac_sa[2];
/*! The 48 bit MAC_DA field used to compare with extracted value. */
u32 mac_da[2];
/*! 0: this is to compare with non-LPBK packet
* 1: this is to compare with LPBK packet.
* This value is used to compare with a controlled-tag which goes
* with the packet when looped back from Egress port.
*/
u32 lpbk_packet;
/*! The value of this bit mask will affects how the SC index and SA
* index created.
* 2'b00: 1 SC has 4 SA.
* SC index is equivalent to {SC_Index[4:2], 1'b0}.
* SA index is equivalent to {SC_Index[4:2], SECTAG's AN[1:0]}
* Here AN bits are not compared.
* 2'b10: 1 SC has 2 SA.
* SC index is equivalent to SC_Index[4:1]
* SA index is equivalent to {SC_Index[4:1], SECTAG's AN[0]}
* Compare AN[1] field only
* 2'b11: 1 SC has 1 SA. No SC entry exists for the specific SA.
* SA index is equivalent to SC_Index[4:0]
* AN[1:0] bits are compared.
* NOTE: This design is to supports different usage of AN. User
* can either ping-pong buffer 2 SA by using only the AN[0] bit.
* Or use 4 SA per SC by use AN[1:0] bits. Or even treat each SA
* as independent. i.e. AN[1:0] is just another matching pointer
* to select SA.
*/
u32 an_mask;
/*! This is bit mask to enable comparison the upper 6 bits TCI
* field, which does not include the AN field.
* 0: don't compare
* 1: enable comparison of the bits.
*/
u32 tci_mask;
/*! 0: don't care
* 1: enable comparison of SCI.
*/
u32 sci_mask;
/*! Mask is per-byte.
* 0: don't care
* 1: enable comparison of Ethertype.
*/
u32 eth_type_mask;
/*! Mask is per-byte.
* 0: don't care and no SNAP header exist.
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
* next 5 bytes after the the LLC header is SNAP header.
*/
u32 snap_mask;
/*! Mask is per-byte.
* 0: don't care and no LLC header exist.
* 1: compare the LLC header.
* If this bit is set to 1, the extracted filed will assume the
* LLC header exist as encapsulated in 802.3 (RFC 1042). I.E. the
* next three bytes after the 802.3MAC header is LLC header.
*/
u32 llc_mask;
/*! Reserved. This bit should be always 0. */
u32 _802_2_encapsulate;
/*! Mask is per-byte.
* 0: don't care
* 1: enable comparison of MAC_SA.
*/
u32 sa_mask;
/*! Mask is per-byte.
* 0: don't care
* 1: enable comparison of MAC_DA.
*/
u32 da_mask;
/*! 0: don't care
* 1: enable checking if this is loopback packet or not.
*/
u32 lpbk_mask;
/*! If packet matches and tagged as controlled-packet. This SC/SA
* index is used for later SC and SA table lookup.
*/
u32 sc_idx;
/*! 0: the packets will be sent to MAC FIFO
* 1: The packets will be sent to Debug/Loopback FIFO.
* If the above's action is drop. This bit has no meaning.
*/
u32 proc_dest;
/*! 0: Process: Forward to next two modules for 802.1AE decryption.
* 1: Process but keep SECTAG: Forward to next two modules for
* 802.1AE decryption but keep the MACSEC header with added error
* code information. ICV will be stripped for all control packets.
* 2: Bypass: Bypass the next two decryption modules but processed
* by post-classification.
* 3: Drop: drop this packet and update counts accordingly.
*/
u32 action;
/*! 0: This is a controlled-port packet if matched.
* 1: This is an uncontrolled-port packet if matched.
*/
u32 ctrl_unctrl;
/*! Use the SCI value from the Table if 'SC' bit of the input
* packet is not present.
*/
u32 sci_from_table;
/*! Reserved. */
u32 reserved;
/*! 0: Not valid entry. This entry is not used
* 1: valid entry.
*/
u32 valid;
};
/*! Represents the bitfields of a single row in the Ingress SC Lookup table. */
struct aq_mss_ingress_sc_record {
/*! This is to specify when the SC was first used. Set by HW. */
u32 stop_time;
/*! This is to specify when the SC was first used. Set by HW. */
u32 start_time;
/*! 0: Strict
* 1: Check
* 2: Disabled.
*/
u32 validate_frames;
/*! 1: Replay control enabled.
* 0: replay control disabled.
*/
u32 replay_protect;
/*! This is to specify the window range for anti-replay. Default is 0.
* 0: is strict order enforcement.
*/
u32 anti_replay_window;
/*! 0: when none of the SA related to SC has inUse set.
* 1: when either of the SA related to the SC has inUse set.
* This bit is set by HW.
*/
u32 receiving;
/*! 0: when hardware processed the SC for the first time, it clears
* this bit
* 1: This bit is set by SW, when it sets up the SC.
*/
u32 fresh;
/*! 0: The AN number will not automatically roll over if Next_PN is
* saturated.
* 1: The AN number will automatically roll over if Next_PN is
* saturated.
* Rollover is valid only after expiry. Normal roll over between
* SA's should be normal process.
*/
u32 an_rol;
/*! Reserved. */
u32 reserved;
/*! 0: Invalid SC
* 1: Valid SC.
*/
u32 valid;
};
/*! Represents the bitfields of a single row in the Ingress SA Lookup table. */
struct aq_mss_ingress_sa_record {
/*! This is to specify when the SC was first used. Set by HW. */
u32 stop_time;
/*! This is to specify when the SC was first used. Set by HW. */
u32 start_time;
/*! This is updated by HW to store the expected NextPN number for
* anti-replay.
*/
u32 next_pn;
/*! The Next_PN number is going to wrapped around from 0XFFFF_FFFF
* to 0. set by HW.
*/
u32 sat_nextpn;
/*! 0: This SA is not yet used.
* 1: This SA is inUse.
*/
u32 in_use;
/*! 0: when hardware processed the SC for the first time, it clears
* this timer
* 1: This bit is set by SW, when it sets up the SC.
*/
u32 fresh;
/*! Reserved. */
u32 reserved;
/*! 0: Invalid SA.
* 1: Valid SA.
*/
u32 valid;
};
/*! Represents the bitfields of a single row in the Ingress SA Key
* Lookup table.
*/
struct aq_mss_ingress_sakey_record {
/*! Key for AES-GCM processing. */
u32 key[8];
/*! AES key size
* 00 - 128bits
* 01 - 192bits
* 10 - 256bits
* 11 - reserved.
*/
u32 key_len;
};
/*! Represents the bitfields of a single row in the Ingress Post-
* MACSec Packet Classifier table.
*/
struct aq_mss_ingress_postclass_record {
/*! The 8 bit value used to compare with extracted value for byte 0. */
u32 byte0;
/*! The 8 bit value used to compare with extracted value for byte 1. */
u32 byte1;
/*! The 8 bit value used to compare with extracted value for byte 2. */
u32 byte2;
/*! The 8 bit value used to compare with extracted value for byte 3. */
u32 byte3;
/*! Ethertype in the packet. */
u32 eth_type;
/*! Ether Type value > 1500 (0x5dc). */
u32 eth_type_valid;
/*! VLAN ID after parsing. */
u32 vlan_id;
/*! VLAN priority after parsing. */
u32 vlan_up;
/*! Valid VLAN coding. */
u32 vlan_valid;
/*! SA index. */
u32 sai;
/*! SAI hit, i.e. controlled packet. */
u32 sai_hit;
/*! Mask for payload ethertype field. */
u32 eth_type_mask;
/*! 0~63: byte location used extracted by packets comparator, which
* can be anything from the first 64 bytes of the MAC packets.
* This byte location counted from MAC' DA address. i.e. set to 0
* will point to byte 0 of DA address.
*/
u32 byte3_location;
/*! Mask for Byte Offset 3. */
u32 byte3_mask;
/*! 0~63: byte location used extracted by packets comparator, which
* can be anything from the first 64 bytes of the MAC packets.
* This byte location counted from MAC' DA address. i.e. set to 0
* will point to byte 0 of DA address.
*/
u32 byte2_location;
/*! Mask for Byte Offset 2. */
u32 byte2_mask;
/*! 0~63: byte location used extracted by packets comparator, which
* can be anything from the first 64 bytes of the MAC packets.
* This byte location counted from MAC' DA address. i.e. set to 0
* will point to byte 0 of DA address.
*/
u32 byte1_location;
/*! Mask for Byte Offset 1. */
u32 byte1_mask;
/*! 0~63: byte location used extracted by packets comparator, which
* can be anything from the first 64 bytes of the MAC packets.
* This byte location counted from MAC' DA address. i.e. set to 0
* will point to byte 0 of DA address.
*/
u32 byte0_location;
/*! Mask for Byte Offset 0. */
u32 byte0_mask;
/*! Mask for Ethertype valid field. Indicates 802.3 vs. Other. */
u32 eth_type_valid_mask;
/*! Mask for VLAN ID field. */
u32 vlan_id_mask;
/*! Mask for VLAN UP field. */
u32 vlan_up_mask;
/*! Mask for VLAN valid field. */
u32 vlan_valid_mask;
/*! Mask for SAI. */
u32 sai_mask;
/*! Mask for SAI_HIT. */
u32 sai_hit_mask;
/*! Action if only first level matches and second level does not.
* 0: pass
* 1: drop (fail).
*/
u32 firstlevel_actions;
/*! Action if both first and second level matched.
* 0: pass
* 1: drop (fail).
*/
u32 secondlevel_actions;
/*! Reserved. */
u32 reserved;
/*! 0: Not valid entry. This entry is not used
* 1: valid entry.
*/
u32 valid;
};
/*! Represents the bitfields of a single row in the Ingress Post-
* MACSec CTL Filter table.
*/
struct aq_mss_ingress_postctlf_record {
/*! This is used to store the 48 bit value used to compare SA, DA
* or halfDA+half SA value.
*/
u32 sa_da[2];
/*! This is used to store the 16 bit ethertype value used for
* comparison.
*/
u32 eth_type;
/*! The match mask is per-nibble. 0 means don't care, i.e. every
* value will match successfully. The total data is 64 bit, i.e.
* 16 nibbles masks.
*/
u32 match_mask;
/*! 0: No compare, i.e. This entry is not used
* 1: compare DA only
* 2: compare SA only
* 3: compare half DA + half SA
* 4: compare ether type only
* 5: compare DA + ethertype
* 6: compare SA + ethertype
* 7: compare DA+ range.
*/
u32 match_type;
/*! 0: Bypass the remaining modules if matched.
* 1: Forward to next module for more classifications.
*/
u32 action;
};
/*! Represents the Egress MIB counters for a single SC. Counters are
* 64 bits, lower 32 bits in field[0].
*/
struct aq_mss_egress_sc_counters {
/*! The number of integrity protected but not encrypted packets
* for this transmitting SC.
*/
u32 sc_protected_pkts[2];
/*! The number of integrity protected and encrypted packets for
* this transmitting SC.
*/
u32 sc_encrypted_pkts[2];
/*! The number of plain text octets that are integrity protected
* but not encrypted on the transmitting SC.
*/
u32 sc_protected_octets[2];
/*! The number of plain text octets that are integrity protected
* and encrypted on the transmitting SC.
*/
u32 sc_encrypted_octets[2];
};
/*! Represents the Egress MIB counters for a single SA. Counters are
* 64 bits, lower 32 bits in field[0].
*/
struct aq_mss_egress_sa_counters {
/*! The number of dropped packets for this transmitting SA. */
u32 sa_hit_drop_redirect[2];
/*! TODO */
u32 sa_protected2_pkts[2];
/*! The number of integrity protected but not encrypted packets
* for this transmitting SA.
*/
u32 sa_protected_pkts[2];
/*! The number of integrity protected and encrypted packets for
* this transmitting SA.
*/
u32 sa_encrypted_pkts[2];
};
/*! Represents the common Egress MIB counters; the counter not
* associated with a particular SC/SA. Counters are 64 bits, lower 32
* bits in field[0].
*/
struct aq_mss_egress_common_counters {
/*! The number of transmitted packets classified as MAC_CTL packets. */
u32 ctl_pkt[2];
/*! The number of transmitted packets that did not match any rows
* in the Egress Packet Classifier table.
*/
u32 unknown_sa_pkts[2];
/*! The number of transmitted packets where the SC table entry has
* protect=0 (so packets are forwarded unchanged).
*/
u32 untagged_pkts[2];
/*! The number of transmitted packets discarded because the packet
* length is greater than the ifMtu of the Common Port interface.
*/
u32 too_long[2];
/*! The number of transmitted packets for which table memory was
* affected by an ECC error during processing.
*/
u32 ecc_error_pkts[2];
/*! The number of transmitted packets for where the matched row in
* the Egress Packet Classifier table has action=drop.
*/
u32 unctrl_hit_drop_redir[2];
};
/*! Represents the Ingress MIB counters for a single SA. Counters are
* 64 bits, lower 32 bits in field[0].
*/
struct aq_mss_ingress_sa_counters {
/*! For this SA, the number of received packets without a SecTAG. */
u32 untagged_hit_pkts[2];
/*! For this SA, the number of received packets that were dropped. */
u32 ctrl_hit_drop_redir_pkts[2];
/*! For this SA which is not currently in use, the number of
* received packets that have been discarded, and have either the
* packets encrypted or the matched row in the Ingress SC Lookup
* table has validate_frames=Strict.
*/
u32 not_using_sa[2];
/*! For this SA which is not currently in use, the number of
* received, unencrypted, packets with the matched row in the
* Ingress SC Lookup table has validate_frames!=Strict.
*/
u32 unused_sa[2];
/*! For this SA, the number discarded packets with the condition
* that the packets are not valid and one of the following
* conditions are true: either the matched row in the Ingress SC
* Lookup table has validate_frames=Strict or the packets
* encrypted.
*/
u32 not_valid_pkts[2];
/*! For this SA, the number of packets with the condition that the
* packets are not valid and the matched row in the Ingress SC
* Lookup table has validate_frames=Check.
*/
u32 invalid_pkts[2];
/*! For this SA, the number of validated packets. */
u32 ok_pkts[2];
/*! For this SC, the number of received packets that have been
* discarded with the condition: the matched row in the Ingress
* SC Lookup table has replay_protect=1 and the PN of the packet
* is lower than the lower bound replay check PN.
*/
u32 late_pkts[2];
/*! For this SA, the number of packets with the condition that the
* PN of the packets is lower than the lower bound replay
* protection PN.
*/
u32 delayed_pkts[2];
/*! For this SC, the number of packets with the following condition:
* - the matched row in the Ingress SC Lookup table has
* replay_protect=0 or
* - the matched row in the Ingress SC Lookup table has
* replay_protect=1 and the packet is not encrypted and the
* integrity check has failed or
* - the matched row in the Ingress SC Lookup table has
* replay_protect=1 and the packet is encrypted and integrity
* check has failed.
*/
u32 unchecked_pkts[2];
/*! The number of octets of plaintext recovered from received
* packets that were integrity protected but not encrypted.
*/
u32 validated_octets[2];
/*! The number of octets of plaintext recovered from received
* packets that were integrity protected and encrypted.
*/
u32 decrypted_octets[2];
};
/*! Represents the common Ingress MIB counters; the counter not
* associated with a particular SA. Counters are 64 bits, lower 32
* bits in field[0].
*/
struct aq_mss_ingress_common_counters {
/*! The number of received packets classified as MAC_CTL packets. */
u32 ctl_pkts[2];
/*! The number of received packets with the MAC security tag
* (SecTAG), not matching any rows in the Ingress Pre-MACSec
* Packet Classifier table.
*/
u32 tagged_miss_pkts[2];
/*! The number of received packets without the MAC security tag
* (SecTAG), not matching any rows in the Ingress Pre-MACSec
* Packet Classifier table.
*/
u32 untagged_miss_pkts[2];
/*! The number of received packets discarded without the MAC
* security tag (SecTAG) and with the matched row in the Ingress
* SC Lookup table having validate_frames=Strict.
*/
u32 notag_pkts[2];
/*! The number of received packets without the MAC security tag
* (SecTAG) and with the matched row in the Ingress SC Lookup
* table having validate_frames!=Strict.
*/
u32 untagged_pkts[2];
/*! The number of received packets discarded with an invalid
* SecTAG or a zero value PN or an invalid ICV.
*/
u32 bad_tag_pkts[2];
/*! The number of received packets discarded with unknown SCI
* information with the condition:
* the matched row in the Ingress SC Lookup table has
* validate_frames=Strict or the C bit in the SecTAG is set.
*/
u32 no_sci_pkts[2];
/*! The number of received packets with unknown SCI with the condition:
* The matched row in the Ingress SC Lookup table has
* validate_frames!=Strict and the C bit in the SecTAG is not set.
*/
u32 unknown_sci_pkts[2];
/*! The number of received packets by the controlled port service
* that passed the Ingress Post-MACSec Packet Classifier table
* check.
*/
u32 ctrl_prt_pass_pkts[2];
/*! The number of received packets by the uncontrolled port
* service that passed the Ingress Post-MACSec Packet Classifier
* table check.
*/
u32 unctrl_prt_pass_pkts[2];
/*! The number of received packets by the controlled port service
* that failed the Ingress Post-MACSec Packet Classifier table
* check.
*/
u32 ctrl_prt_fail_pkts[2];
/*! The number of received packets by the uncontrolled port
* service that failed the Ingress Post-MACSec Packet Classifier
* table check.
*/
u32 unctrl_prt_fail_pkts[2];
/*! The number of received packets discarded because the packet
* length is greater than the ifMtu of the Common Port interface.
*/
u32 too_long_pkts[2];
/*! The number of received packets classified as MAC_CTL by the
* Ingress Post-MACSec CTL Filter table.
*/
u32 igpoc_ctl_pkts[2];
/*! The number of received packets for which table memory was
* affected by an ECC error during processing.
*/
u32 ecc_error_pkts[2];
/*! The number of received packets by the uncontrolled port
* service that were dropped.
*/
u32 unctrl_hit_drop_redir[2];
};
#endif
......@@ -88,17 +88,6 @@ struct gcm_iv {
__be32 pn;
};
struct macsec_dev_stats {
__u64 OutPktsUntagged;
__u64 InPktsUntagged;
__u64 OutPktsTooLong;
__u64 InPktsNoTag;
__u64 InPktsBadTag;
__u64 InPktsUnknownSCI;
__u64 InPktsNoSCI;
__u64 InPktsOverrun;
};
#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
struct pcpu_secy_stats {
......@@ -339,7 +328,8 @@ static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
/* Checks if a MACsec interface is being offloaded to an hardware engine */
static bool macsec_is_offloaded(struct macsec_dev *macsec)
{
if (macsec->offload == MACSEC_OFFLOAD_PHY)
if (macsec->offload == MACSEC_OFFLOAD_MAC ||
macsec->offload == MACSEC_OFFLOAD_PHY)
return true;
return false;
......@@ -355,6 +345,9 @@ static bool macsec_check_offload(enum macsec_offload offload,
if (offload == MACSEC_OFFLOAD_PHY)
return macsec->real_dev->phydev &&
macsec->real_dev->phydev->macsec_ops;
else if (offload == MACSEC_OFFLOAD_MAC)
return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
macsec->real_dev->macsec_ops;
return false;
}
......@@ -369,9 +362,14 @@ static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
if (offload == MACSEC_OFFLOAD_PHY)
ctx->phydev = macsec->real_dev->phydev;
else if (offload == MACSEC_OFFLOAD_MAC)
ctx->netdev = macsec->real_dev;
}
return macsec->real_dev->phydev->macsec_ops;
if (offload == MACSEC_OFFLOAD_PHY)
return macsec->real_dev->phydev->macsec_ops;
else
return macsec->real_dev->macsec_ops;
}
/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
......@@ -997,22 +995,53 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
{
/* Deliver to the uncontrolled port by default */
enum rx_handler_result ret = RX_HANDLER_PASS;
struct ethhdr *hdr = eth_hdr(skb);
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
/* 10.6 If the management control validateFrames is not
* Strict, frames without a SecTAG are received, counted, and
* delivered to the Controlled Port
*/
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
struct net_device *ndev = macsec->secy.netdev;
if (!macsec_is_offloaded(macsec) &&
macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
/* If h/w offloading is enabled, HW decodes frames and strips
* the SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
if (ether_addr_equal_64bits(hdr->h_dest,
ndev->dev_addr)) {
/* exact match, divert skb to this port */
skb->dev = ndev;
skb->pkt_type = PACKET_HOST;
ret = RX_HANDLER_ANOTHER;
goto out;
} else if (is_multicast_ether_addr_64bits(
hdr->h_dest)) {
/* multicast frame, deliver on this port too */
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
break;
nskb->dev = ndev;
if (ether_addr_equal_64bits(hdr->h_dest,
ndev->broadcast))
nskb->pkt_type = PACKET_BROADCAST;
else
nskb->pkt_type = PACKET_MULTICAST;
netif_rx(nskb);
}
continue;
}
/* 10.6 If the management control validateFrames is not
* Strict, frames without a SecTAG are received, counted, and
* delivered to the Controlled Port
*/
if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
......@@ -1024,19 +1053,13 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
if (!nskb)
break;
nskb->dev = macsec->secy.netdev;
nskb->dev = ndev;
if (netif_rx(nskb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
}
if (netif_running(macsec->secy.netdev) &&
macsec_is_offloaded(macsec)) {
ret = RX_HANDLER_EXACT;
goto out;
}
}
out:
......@@ -1785,6 +1808,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
ctx.secy = secy;
memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
MACSEC_KEYID_LEN);
......@@ -1832,6 +1856,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
struct nlattr **attrs = info->attrs;
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct macsec_secy *secy;
bool was_active;
int ret;
......@@ -1851,6 +1876,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(dev);
}
secy = &macsec_priv(dev)->secy;
sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
rx_sc = create_rx_sc(dev, sci);
......@@ -1874,6 +1900,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
}
ctx.rx_sc = rx_sc;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
if (ret)
......@@ -2023,6 +2050,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
ctx.secy = secy;
memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
MACSEC_KEYID_LEN);
......@@ -2098,6 +2126,7 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
if (ret)
......@@ -2163,6 +2192,7 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
}
ctx.rx_sc = rx_sc;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
if (ret)
goto cleanup;
......@@ -2221,6 +2251,7 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_del_txsa, &ctx);
if (ret)
......@@ -2332,6 +2363,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
if (ret)
......@@ -2424,6 +2456,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
if (ret)
......@@ -2494,6 +2527,7 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
}
ctx.rx_sc = rx_sc;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
if (ret)
......@@ -2533,11 +2567,10 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
enum macsec_offload offload, prev_offload;
int (*func)(struct macsec_context *ctx);
struct nlattr **attrs = info->attrs;
struct net_device *dev, *loop_dev;
struct net_device *dev;
const struct macsec_ops *ops;
struct macsec_context ctx;
struct macsec_dev *macsec;
struct net *loop_net;
int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
......@@ -2565,28 +2598,6 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
!macsec_check_offload(offload, macsec))
return -EOPNOTSUPP;
if (offload == MACSEC_OFFLOAD_OFF)
goto skip_limitation;
/* Check the physical interface isn't offloading another interface
* first.
*/
for_each_net(loop_net) {
for_each_netdev(loop_net, loop_dev) {
struct macsec_dev *priv;
if (!netif_is_macsec(loop_dev))
continue;
priv = macsec_priv(loop_dev);
if (priv->real_dev == macsec->real_dev &&
priv->offload != MACSEC_OFFLOAD_OFF)
return -EBUSY;
}
}
skip_limitation:
/* Check if the net device is busy. */
if (netif_running(dev))
return -EBUSY;
......@@ -2622,6 +2633,10 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
goto rollback;
rtnl_unlock();
/* Force features update, since they are different for SW MACSec and
* HW offloading cases.
*/
netdev_update_features(dev);
return 0;
rollback:
......@@ -2631,207 +2646,309 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
return ret;
}
static int copy_tx_sa_stats(struct sk_buff *skb,
struct macsec_tx_sa_stats __percpu *pstats)
static void get_tx_sa_stats(struct net_device *dev, int an,
struct macsec_tx_sa *tx_sa,
struct macsec_tx_sa_stats *sum)
{
struct macsec_tx_sa_stats sum = {0, };
struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops) {
ctx.sa.assoc_num = an;
ctx.sa.tx_sa = tx_sa;
ctx.stats.tx_sa_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
}
return;
}
for_each_possible_cpu(cpu) {
const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
const struct macsec_tx_sa_stats *stats =
per_cpu_ptr(tx_sa->stats, cpu);
sum.OutPktsProtected += stats->OutPktsProtected;
sum.OutPktsEncrypted += stats->OutPktsEncrypted;
sum->OutPktsProtected += stats->OutPktsProtected;
sum->OutPktsEncrypted += stats->OutPktsEncrypted;
}
}
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
{
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
sum->OutPktsProtected) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
sum->OutPktsEncrypted))
return -EMSGSIZE;
return 0;
}
static noinline_for_stack int
copy_rx_sa_stats(struct sk_buff *skb,
struct macsec_rx_sa_stats __percpu *pstats)
static void get_rx_sa_stats(struct net_device *dev,
struct macsec_rx_sc *rx_sc, int an,
struct macsec_rx_sa *rx_sa,
struct macsec_rx_sa_stats *sum)
{
struct macsec_rx_sa_stats sum = {0, };
struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops) {
ctx.sa.assoc_num = an;
ctx.sa.rx_sa = rx_sa;
ctx.stats.rx_sa_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
ctx.rx_sc = rx_sc;
macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
}
return;
}
for_each_possible_cpu(cpu) {
const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
const struct macsec_rx_sa_stats *stats =
per_cpu_ptr(rx_sa->stats, cpu);
sum.InPktsOK += stats->InPktsOK;
sum.InPktsInvalid += stats->InPktsInvalid;
sum.InPktsNotValid += stats->InPktsNotValid;
sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
sum.InPktsUnusedSA += stats->InPktsUnusedSA;
sum->InPktsOK += stats->InPktsOK;
sum->InPktsInvalid += stats->InPktsInvalid;
sum->InPktsNotValid += stats->InPktsNotValid;
sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
sum->InPktsUnusedSA += stats->InPktsUnusedSA;
}
}
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
static int copy_rx_sa_stats(struct sk_buff *skb,
struct macsec_rx_sa_stats *sum)
{
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
sum->InPktsInvalid) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
sum->InPktsNotValid) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
sum->InPktsNotUsingSA) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
sum->InPktsUnusedSA))
return -EMSGSIZE;
return 0;
}
static noinline_for_stack int
copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats)
static void get_rx_sc_stats(struct net_device *dev,
struct macsec_rx_sc *rx_sc,
struct macsec_rx_sc_stats *sum)
{
struct macsec_rx_sc_stats sum = {0, };
struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops) {
ctx.stats.rx_sc_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
ctx.rx_sc = rx_sc;
macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
}
return;
}
for_each_possible_cpu(cpu) {
const struct pcpu_rx_sc_stats *stats;
struct macsec_rx_sc_stats tmp;
unsigned int start;
stats = per_cpu_ptr(pstats, cpu);
stats = per_cpu_ptr(rx_sc->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sum.InOctetsValidated += tmp.InOctetsValidated;
sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
sum.InPktsUnchecked += tmp.InPktsUnchecked;
sum.InPktsDelayed += tmp.InPktsDelayed;
sum.InPktsOK += tmp.InPktsOK;
sum.InPktsInvalid += tmp.InPktsInvalid;
sum.InPktsLate += tmp.InPktsLate;
sum.InPktsNotValid += tmp.InPktsNotValid;
sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA;
sum.InPktsUnusedSA += tmp.InPktsUnusedSA;
sum->InOctetsValidated += tmp.InOctetsValidated;
sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
sum->InPktsUnchecked += tmp.InPktsUnchecked;
sum->InPktsDelayed += tmp.InPktsDelayed;
sum->InPktsOK += tmp.InPktsOK;
sum->InPktsInvalid += tmp.InPktsInvalid;
sum->InPktsLate += tmp.InPktsLate;
sum->InPktsNotValid += tmp.InPktsNotValid;
sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA;
sum->InPktsUnusedSA += tmp.InPktsUnusedSA;
}
}
static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
{
if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
sum.InOctetsValidated,
sum->InOctetsValidated,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
sum.InOctetsDecrypted,
sum->InOctetsDecrypted,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
sum.InPktsUnchecked,
sum->InPktsUnchecked,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
sum.InPktsDelayed,
sum->InPktsDelayed,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
sum.InPktsOK,
sum->InPktsOK,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
sum.InPktsInvalid,
sum->InPktsInvalid,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
sum.InPktsLate,
sum->InPktsLate,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
sum.InPktsNotValid,
sum->InPktsNotValid,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
sum.InPktsNotUsingSA,
sum->InPktsNotUsingSA,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
sum.InPktsUnusedSA,
sum->InPktsUnusedSA,
MACSEC_RXSC_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
}
static noinline_for_stack int
copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats)
static void get_tx_sc_stats(struct net_device *dev,
struct macsec_tx_sc_stats *sum)
{
struct macsec_tx_sc_stats sum = {0, };
struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops) {
ctx.stats.tx_sc_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
}
return;
}
for_each_possible_cpu(cpu) {
const struct pcpu_tx_sc_stats *stats;
struct macsec_tx_sc_stats tmp;
unsigned int start;
stats = per_cpu_ptr(pstats, cpu);
stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sum.OutPktsProtected += tmp.OutPktsProtected;
sum.OutPktsEncrypted += tmp.OutPktsEncrypted;
sum.OutOctetsProtected += tmp.OutOctetsProtected;
sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
sum->OutPktsProtected += tmp.OutPktsProtected;
sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
sum->OutOctetsProtected += tmp.OutOctetsProtected;
sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
}
}
static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
{
if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
sum.OutPktsProtected,
sum->OutPktsProtected,
MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
sum.OutPktsEncrypted,
sum->OutPktsEncrypted,
MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
sum.OutOctetsProtected,
sum->OutOctetsProtected,
MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
sum.OutOctetsEncrypted,
sum->OutOctetsEncrypted,
MACSEC_TXSC_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
}
static noinline_for_stack int
copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
{
struct macsec_dev_stats sum = {0, };
struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops) {
ctx.stats.dev_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
macsec_offload(ops->mdo_get_dev_stats, &ctx);
}
return;
}
for_each_possible_cpu(cpu) {
const struct pcpu_secy_stats *stats;
struct macsec_dev_stats tmp;
unsigned int start;
stats = per_cpu_ptr(pstats, cpu);
stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sum.OutPktsUntagged += tmp.OutPktsUntagged;
sum.InPktsUntagged += tmp.InPktsUntagged;
sum.OutPktsTooLong += tmp.OutPktsTooLong;
sum.InPktsNoTag += tmp.InPktsNoTag;
sum.InPktsBadTag += tmp.InPktsBadTag;
sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
sum.InPktsNoSCI += tmp.InPktsNoSCI;
sum.InPktsOverrun += tmp.InPktsOverrun;
sum->OutPktsUntagged += tmp.OutPktsUntagged;
sum->InPktsUntagged += tmp.InPktsUntagged;
sum->OutPktsTooLong += tmp.OutPktsTooLong;
sum->InPktsNoTag += tmp.InPktsNoTag;
sum->InPktsBadTag += tmp.InPktsBadTag;
sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
sum->InPktsNoSCI += tmp.InPktsNoSCI;
sum->InPktsOverrun += tmp.InPktsOverrun;
}
}
static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
{
if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
sum.OutPktsUntagged,
sum->OutPktsUntagged,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
sum.InPktsUntagged,
sum->InPktsUntagged,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
sum.OutPktsTooLong,
sum->OutPktsTooLong,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
sum.InPktsNoTag,
sum->InPktsNoTag,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
sum.InPktsBadTag,
sum->InPktsBadTag,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
sum.InPktsUnknownSCI,
sum->InPktsUnknownSCI,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
sum.InPktsNoSCI,
sum->InPktsNoSCI,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
sum.InPktsOverrun,
sum->InPktsOverrun,
MACSEC_SECY_STATS_ATTR_PAD))
return -EMSGSIZE;
......@@ -2892,7 +3009,12 @@ static noinline_for_stack int
dump_secy(struct macsec_secy *secy, struct net_device *dev,
struct sk_buff *skb, struct netlink_callback *cb)
{
struct macsec_tx_sc_stats tx_sc_stats = {0, };
struct macsec_tx_sa_stats tx_sa_stats = {0, };
struct macsec_rx_sc_stats rx_sc_stats = {0, };
struct macsec_rx_sa_stats rx_sa_stats = {0, };
struct macsec_dev *macsec = netdev_priv(dev);
struct macsec_dev_stats dev_stats = {0, };
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
struct nlattr *txsa_list, *rxsc_list;
struct macsec_rx_sc *rx_sc;
......@@ -2923,7 +3045,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
if (!attr)
goto nla_put_failure;
if (copy_tx_sc_stats(skb, tx_sc->stats)) {
get_tx_sc_stats(dev, &tx_sc_stats);
if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
nla_nest_cancel(skb, attr);
goto nla_put_failure;
}
......@@ -2932,7 +3056,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
if (!attr)
goto nla_put_failure;
if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
get_secy_stats(dev, &dev_stats);
if (copy_secy_stats(skb, &dev_stats)) {
nla_nest_cancel(skb, attr);
goto nla_put_failure;
}
......@@ -2956,6 +3081,22 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure;
}
attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
nla_nest_end(skb, attr);
if (secy->xpn) {
pn = tx_sa->next_pn;
pn_len = MACSEC_XPN_PN_LEN;
......@@ -2974,20 +3115,6 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure;
}
attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
if (copy_tx_sa_stats(skb, tx_sa->stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
nla_nest_end(skb, attr);
nla_nest_end(skb, txsa_nest);
}
nla_nest_end(skb, txsa_list);
......@@ -3021,7 +3148,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_cancel(skb, rxsc_list);
goto nla_put_failure;
}
if (copy_rx_sc_stats(skb, rx_sc->stats)) {
memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, rxsc_nest);
nla_nest_cancel(skb, rxsc_list);
......@@ -3062,7 +3191,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_cancel(skb, rxsc_list);
goto nla_put_failure;
}
if (copy_rx_sa_stats(skb, rx_sa->stats)) {
memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, rxsa_list);
nla_nest_cancel(skb, rxsc_nest);
......@@ -3272,9 +3403,16 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
return ret;
}
#define MACSEC_FEATURES \
#define SW_MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
/* If h/w offloading is enabled, use real device features save for
* VLAN_FEATURES - they require additional ops
* HW_MACSEC - no reason to report it
*/
#define REAL_DEV_FEATURES(dev) \
((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
......@@ -3291,8 +3429,12 @@ static int macsec_dev_init(struct net_device *dev)
return err;
}
dev->features = real_dev->features & MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
if (macsec_is_offloaded(macsec)) {
dev->features = REAL_DEV_FEATURES(real_dev);
} else {
dev->features = real_dev->features & SW_MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
}
dev->needed_headroom = real_dev->needed_headroom +
MACSEC_NEEDED_HEADROOM;
......@@ -3321,7 +3463,10 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
features &= (real_dev->features & MACSEC_FEATURES) |
if (macsec_is_offloaded(macsec))
return REAL_DEV_FEATURES(real_dev);
features &= (real_dev->features & SW_MACSEC_FEATURES) |
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
features |= NETIF_F_LLTX;
......@@ -3361,6 +3506,7 @@ static int macsec_dev_open(struct net_device *dev)
goto clear_allmulti;
}
ctx.secy = &macsec->secy;
err = macsec_offload(ops->mdo_dev_open, &ctx);
if (err)
goto clear_allmulti;
......@@ -3392,8 +3538,10 @@ static int macsec_dev_stop(struct net_device *dev)
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops)
if (ops) {
ctx.secy = &macsec->secy;
macsec_offload(ops->mdo_dev_stop, &ctx);
}
}
dev_mc_unsync(real_dev, dev);
......
......@@ -83,6 +83,8 @@ enum {
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */
NETIF_F_HW_MACSEC_BIT, /* Offload MACsec operations */
/*
* Add your fresh new feature above and remember to update
* netdev_features_strings[] in net/core/ethtool.c and maybe
......@@ -154,6 +156,7 @@ enum {
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
#define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST)
#define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST)
#define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC)
/* Finds the next feature with the highest number of the range of start till 0.
*/
......
......@@ -53,6 +53,8 @@ struct netpoll_info;
struct device;
struct phy_device;
struct dsa_port;
struct macsec_context;
struct macsec_ops;
struct sfp_bus;
/* 802.11 specific */
......@@ -1819,6 +1821,8 @@ enum netdev_priv_flags {
* that follow this device when it is moved
* to another network namespace.
*
* @macsec_ops: MACsec offloading ops
*
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
......@@ -2113,6 +2117,11 @@ struct net_device {
unsigned wol_enabled:1;
struct list_head net_notifier_list;
#if IS_ENABLED(CONFIG_MACSEC)
/* MACsec management functions */
const struct macsec_ops *macsec_ops;
#endif
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
......
......@@ -88,6 +88,17 @@ struct macsec_tx_sc_stats {
__u64 OutOctetsEncrypted;
};
struct macsec_dev_stats {
__u64 OutPktsUntagged;
__u64 InPktsUntagged;
__u64 OutPktsTooLong;
__u64 InPktsNoTag;
__u64 InPktsBadTag;
__u64 InPktsUnknownSCI;
__u64 InPktsNoSCI;
__u64 InPktsOverrun;
};
/**
* struct macsec_rx_sa - receive secure association
* @active:
......@@ -220,7 +231,10 @@ struct macsec_secy {
* struct macsec_context - MACsec context for hardware offloading
*/
struct macsec_context {
struct phy_device *phydev;
union {
struct net_device *netdev;
struct phy_device *phydev;
};
enum macsec_offload offload;
struct macsec_secy *secy;
......@@ -233,6 +247,13 @@ struct macsec_context {
struct macsec_tx_sa *tx_sa;
};
} sa;
union {
struct macsec_tx_sc_stats *tx_sc_stats;
struct macsec_tx_sa_stats *tx_sa_stats;
struct macsec_rx_sc_stats *rx_sc_stats;
struct macsec_rx_sa_stats *rx_sa_stats;
struct macsec_dev_stats *dev_stats;
} stats;
u8 prepare:1;
};
......@@ -259,6 +280,12 @@ struct macsec_ops {
int (*mdo_add_txsa)(struct macsec_context *ctx);
int (*mdo_upd_txsa)(struct macsec_context *ctx);
int (*mdo_del_txsa)(struct macsec_context *ctx);
/* Statistics */
int (*mdo_get_dev_stats)(struct macsec_context *ctx);
int (*mdo_get_tx_sc_stats)(struct macsec_context *ctx);
int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
};
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
......
......@@ -489,6 +489,7 @@ enum macsec_validation_type {
enum macsec_offload {
MACSEC_OFFLOAD_OFF = 0,
MACSEC_OFFLOAD_PHY = 1,
MACSEC_OFFLOAD_MAC = 2,
__MACSEC_OFFLOAD_END,
MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1,
};
......
......@@ -60,6 +60,7 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
[NETIF_F_HW_TLS_TX_BIT] = "tls-hw-tx-offload",
[NETIF_F_HW_TLS_RX_BIT] = "tls-hw-rx-offload",
[NETIF_F_GRO_FRAGLIST_BIT] = "rx-gro-list",
[NETIF_F_HW_MACSEC_BIT] = "macsec-hw-offload",
};
const char
......
......@@ -489,6 +489,7 @@ enum macsec_validation_type {
enum macsec_offload {
MACSEC_OFFLOAD_OFF = 0,
MACSEC_OFFLOAD_PHY = 1,
MACSEC_OFFLOAD_MAC = 2,
__MACSEC_OFFLOAD_END,
MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1,
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment