Commit f68a8ebd authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4-qcn'

Or Gerlitz says:

====================
Add QCN support to the DCB NL layer

This series from Shani Michaeli adds support for the IEEE QCN attribute
to the kernel DCB NL stack, and implementation in the mlx4 driver which
programs the firmware according to the admin directives.

changes from V0:

 - applied feedback from John and added his acked-by to patch #1
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 34de26d3 708b869b
...@@ -1499,6 +1499,15 @@ static struct mlx4_cmd_info cmd_info[] = { ...@@ -1499,6 +1499,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL, .verify = NULL,
.wrapper = mlx4_ACCESS_REG_wrapper, .wrapper = mlx4_ACCESS_REG_wrapper,
}, },
{
.opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CMD_EPERM_wrapper,
},
/* Native multicast commands are not available for guests */ /* Native multicast commands are not available for guests */
{ {
.opcode = MLX4_CMD_QP_ATTACH, .opcode = MLX4_CMD_QP_ATTACH,
......
...@@ -36,6 +36,49 @@ ...@@ -36,6 +36,49 @@
#include "mlx4_en.h" #include "mlx4_en.h"
/* Definitions for QCN
*/
struct mlx4_congestion_control_mb_prio_802_1_qau_params {
__be32 modify_enable_high;
__be32 modify_enable_low;
__be32 reserved1;
__be32 extended_enable;
__be32 rppp_max_rps;
__be32 rpg_time_reset;
__be32 rpg_byte_reset;
__be32 rpg_threshold;
__be32 rpg_max_rate;
__be32 rpg_ai_rate;
__be32 rpg_hai_rate;
__be32 rpg_gd;
__be32 rpg_min_dec_fac;
__be32 rpg_min_rate;
__be32 max_time_rise;
__be32 max_byte_rise;
__be32 max_qdelta;
__be32 min_qoffset;
__be32 gd_coefficient;
__be32 reserved2[5];
__be32 cp_sample_base;
__be32 reserved3[39];
};
struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
__be64 rppp_rp_centiseconds;
__be32 reserved1;
__be32 ignored_cnm;
__be32 rppp_created_rps;
__be32 estimated_total_rate;
__be32 max_active_rate_limiter_index;
__be32 dropped_cnms_busy_fw;
__be32 reserved2;
__be32 cnms_handled_successfully;
__be32 min_total_limiters_rate;
__be32 max_total_limiters_rate;
__be32 reserved3[4];
};
static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets) struct ieee_ets *ets)
{ {
...@@ -242,6 +285,178 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev, ...@@ -242,6 +285,178 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
return 0; return 0;
} }
#define RPG_ENABLE_BIT 31
#define CN_TAG_BIT 30
static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
struct ieee_qcn *qcn)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
struct mlx4_cmd_mailbox *mailbox_out = NULL;
u64 mailbox_in_dma = 0;
u32 inmod = 0;
int i, err;
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
return -EOPNOTSUPP;
mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
if (IS_ERR(mailbox_out))
return -ENOMEM;
hw_qcn =
(struct mlx4_congestion_control_mb_prio_802_1_qau_params *)
mailbox_out->buf;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
inmod = priv->port | ((1 << i) << 8) |
(MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
mailbox_out->dma,
inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS,
MLX4_CMD_CONGESTION_CTRL_OPCODE,
MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (err) {
mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
return err;
}
qcn->rpg_enable[i] =
be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT;
qcn->rppp_max_rps[i] =
be32_to_cpu(hw_qcn->rppp_max_rps);
qcn->rpg_time_reset[i] =
be32_to_cpu(hw_qcn->rpg_time_reset);
qcn->rpg_byte_reset[i] =
be32_to_cpu(hw_qcn->rpg_byte_reset);
qcn->rpg_threshold[i] =
be32_to_cpu(hw_qcn->rpg_threshold);
qcn->rpg_max_rate[i] =
be32_to_cpu(hw_qcn->rpg_max_rate);
qcn->rpg_ai_rate[i] =
be32_to_cpu(hw_qcn->rpg_ai_rate);
qcn->rpg_hai_rate[i] =
be32_to_cpu(hw_qcn->rpg_hai_rate);
qcn->rpg_gd[i] =
be32_to_cpu(hw_qcn->rpg_gd);
qcn->rpg_min_dec_fac[i] =
be32_to_cpu(hw_qcn->rpg_min_dec_fac);
qcn->rpg_min_rate[i] =
be32_to_cpu(hw_qcn->rpg_min_rate);
qcn->cndd_state_machine[i] =
priv->cndd_state[i];
}
mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
return 0;
}
static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
struct ieee_qcn *qcn)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
struct mlx4_cmd_mailbox *mailbox_in = NULL;
u64 mailbox_in_dma = 0;
u32 inmod = 0;
int i, err;
#define MODIFY_ENABLE_HIGH_MASK 0xc0000000
#define MODIFY_ENABLE_LOW_MASK 0xffc00000
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
return -EOPNOTSUPP;
mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
if (IS_ERR(mailbox_in))
return -ENOMEM;
mailbox_in_dma = mailbox_in->dma;
hw_qcn =
(struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
inmod = priv->port | ((1 << i) << 8) |
(MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
/* Before updating QCN parameter,
* need to set it's modify enable bit to 1
*/
hw_qcn->modify_enable_high = cpu_to_be32(
MODIFY_ENABLE_HIGH_MASK);
hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK);
hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT);
hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]);
hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]);
hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]);
hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]);
hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]);
hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]);
hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]);
hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]);
hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]);
hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]);
priv->cndd_state[i] = qcn->cndd_state_machine[i];
if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY)
hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT);
err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
MLX4_CONGESTION_CONTROL_SET_PARAMS,
MLX4_CMD_CONGESTION_CTRL_OPCODE,
MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (err) {
mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
return err;
}
}
mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
return 0;
}
static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
struct ieee_qcn_stats *qcn_stats)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats;
struct mlx4_cmd_mailbox *mailbox_out = NULL;
u64 mailbox_in_dma = 0;
u32 inmod = 0;
int i, err;
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
return -EOPNOTSUPP;
mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
if (IS_ERR(mailbox_out))
return -ENOMEM;
hw_qcn_stats =
(struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *)
mailbox_out->buf;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
inmod = priv->port | ((1 << i) << 8) |
(MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
mailbox_out->dma, inmod,
MLX4_CONGESTION_CONTROL_GET_STATISTICS,
MLX4_CMD_CONGESTION_CTRL_OPCODE,
MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (err) {
mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
return err;
}
qcn_stats->rppp_rp_centiseconds[i] =
be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds);
qcn_stats->rppp_created_rps[i] =
be32_to_cpu(hw_qcn_stats->rppp_created_rps);
}
mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
return 0;
}
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
.ieee_getets = mlx4_en_dcbnl_ieee_getets, .ieee_getets = mlx4_en_dcbnl_ieee_getets,
.ieee_setets = mlx4_en_dcbnl_ieee_setets, .ieee_setets = mlx4_en_dcbnl_ieee_setets,
...@@ -252,6 +467,9 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { ...@@ -252,6 +467,9 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
.getdcbx = mlx4_en_dcbnl_getdcbx, .getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx, .setdcbx = mlx4_en_dcbnl_setdcbx,
.ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
.ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
.ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
}; };
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
......
...@@ -143,7 +143,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) ...@@ -143,7 +143,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[18] = "More than 80 VFs support", [18] = "More than 80 VFs support",
[19] = "Performance optimized for limited rule configuration flow steering support", [19] = "Performance optimized for limited rule configuration flow steering support",
[20] = "Recoverable error events support", [20] = "Recoverable error events support",
[21] = "Port Remap support" [21] = "Port Remap support",
[22] = "QCN support"
}; };
int i; int i;
...@@ -675,7 +676,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -675,7 +676,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a #define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
...@@ -777,6 +778,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -777,6 +778,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
dev_cap->fs_max_num_qp_per_entry = field; dev_cap->fs_max_num_qp_per_entry = field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
if (field & 0x1)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate; dev_cap->stat_rate_support = stat_rate;
MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
...@@ -1149,6 +1153,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1149,6 +1153,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
DEV_CAP_EXT_2_FLAG_FSM); DEV_CAP_EXT_2_FLAG_FSM);
MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
/* turn off QCN for guests */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
field &= 0xfe;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
return 0; return 0;
} }
......
...@@ -608,6 +608,7 @@ struct mlx4_en_priv { ...@@ -608,6 +608,7 @@ struct mlx4_en_priv {
#ifdef CONFIG_MLX4_EN_DCB #ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets; struct ieee_ets ets;
u16 maxrate[IEEE_8021QAZ_MAX_TCS]; u16 maxrate[IEEE_8021QAZ_MAX_TCS];
enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
#endif #endif
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
spinlock_t filters_lock; spinlock_t filters_lock;
......
...@@ -163,6 +163,9 @@ enum { ...@@ -163,6 +163,9 @@ enum {
MLX4_QP_FLOW_STEERING_ATTACH = 0x65, MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
MLX4_QP_FLOW_STEERING_DETACH = 0x66, MLX4_QP_FLOW_STEERING_DETACH = 0x66,
MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64, MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64,
/* Update and read QCN parameters */
MLX4_CMD_CONGESTION_CTRL_OPCODE = 0x68,
}; };
enum { enum {
...@@ -233,6 +236,16 @@ struct mlx4_config_dev_params { ...@@ -233,6 +236,16 @@ struct mlx4_config_dev_params {
u8 rx_csum_flags_port_2; u8 rx_csum_flags_port_2;
}; };
enum mlx4_en_congestion_control_algorithm {
MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT = 0,
};
enum mlx4_en_congestion_control_opmod {
MLX4_CONGESTION_CONTROL_GET_PARAMS,
MLX4_CONGESTION_CONTROL_GET_STATISTICS,
MLX4_CONGESTION_CONTROL_SET_PARAMS = 4,
};
struct mlx4_dev; struct mlx4_dev;
struct mlx4_cmd_mailbox { struct mlx4_cmd_mailbox {
......
...@@ -203,7 +203,8 @@ enum { ...@@ -203,7 +203,8 @@ enum {
MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19, MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19,
MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20, MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21 MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21,
MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22,
}; };
enum { enum {
......
...@@ -49,6 +49,9 @@ struct dcbnl_rtnl_ops { ...@@ -49,6 +49,9 @@ struct dcbnl_rtnl_ops {
int (*ieee_setets) (struct net_device *, struct ieee_ets *); int (*ieee_setets) (struct net_device *, struct ieee_ets *);
int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *); int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *);
int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *);
int (*ieee_getqcn) (struct net_device *, struct ieee_qcn *);
int (*ieee_setqcn) (struct net_device *, struct ieee_qcn *);
int (*ieee_getqcnstats) (struct net_device *, struct ieee_qcn_stats *);
int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *); int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *);
int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *); int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
int (*ieee_getapp) (struct net_device *, struct dcb_app *); int (*ieee_getapp) (struct net_device *, struct dcb_app *);
......
...@@ -78,6 +78,70 @@ struct ieee_maxrate { ...@@ -78,6 +78,70 @@ struct ieee_maxrate {
__u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS]; __u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS];
}; };
enum dcbnl_cndd_states {
DCB_CNDD_RESET = 0,
DCB_CNDD_EDGE,
DCB_CNDD_INTERIOR,
DCB_CNDD_INTERIOR_READY,
};
/* This structure contains the IEEE 802.1Qau QCN managed object.
*
*@rpg_enable: enable QCN RP
*@rppp_max_rps: maximum number of RPs allowed for this CNPV on this port
*@rpg_time_reset: time between rate increases if no CNMs received.
* given in u-seconds
*@rpg_byte_reset: transmitted data between rate increases if no CNMs received.
* given in Bytes
*@rpg_threshold: The number of times rpByteStage or rpTimeStage can count
* before RP rate control state machine advances states
*@rpg_max_rate: the maxinun rate, in Mbits per second,
* at which an RP can transmit
*@rpg_ai_rate: The rate, in Mbits per second,
* used to increase rpTargetRate in the RPR_ACTIVE_INCREASE
*@rpg_hai_rate: The rate, in Mbits per second,
* used to increase rpTargetRate in the RPR_HYPER_INCREASE state
*@rpg_gd: Upon CNM receive, flow rate is limited to (Fb/Gd)*CurrentRate.
* rpgGd is given as log2(Gd), where Gd may only be powers of 2
*@rpg_min_dec_fac: The minimum factor by which the current transmit rate
* can be changed by reception of a CNM.
* value is given as percentage (1-100)
*@rpg_min_rate: The minimum value, in bits per second, for rate to limit
*@cndd_state_machine: The state of the congestion notification domain
* defense state machine, as defined by IEEE 802.3Qau
* section 32.1.1. In the interior ready state,
* the QCN capable hardware may add CN-TAG TLV to the
* outgoing traffic, to specifically identify outgoing
* flows.
*/
struct ieee_qcn {
__u8 rpg_enable[IEEE_8021QAZ_MAX_TCS];
__u32 rppp_max_rps[IEEE_8021QAZ_MAX_TCS];
__u32 rpg_time_reset[IEEE_8021QAZ_MAX_TCS];
__u32 rpg_byte_reset[IEEE_8021QAZ_MAX_TCS];
__u32 rpg_threshold[IEEE_8021QAZ_MAX_TCS];
__u32 rpg_max_rate[IEEE_8021QAZ_MAX_TCS];
__u32 rpg_ai_rate[IEEE_8021QAZ_MAX_TCS];
__u32 rpg_hai_rate[IEEE_8021QAZ_MAX_TCS];
__u32 rpg_gd[IEEE_8021QAZ_MAX_TCS];
__u32 rpg_min_dec_fac[IEEE_8021QAZ_MAX_TCS];
__u32 rpg_min_rate[IEEE_8021QAZ_MAX_TCS];
__u32 cndd_state_machine[IEEE_8021QAZ_MAX_TCS];
};
/* This structure contains the IEEE 802.1Qau QCN statistics.
*
*@rppp_rp_centiseconds: the number of RP-centiseconds accumulated
* by RPs at this priority level on this Port
*@rppp_created_rps: number of active RPs(flows) that react to CNMs
*/
struct ieee_qcn_stats {
__u64 rppp_rp_centiseconds[IEEE_8021QAZ_MAX_TCS];
__u32 rppp_created_rps[IEEE_8021QAZ_MAX_TCS];
};
/* This structure contains the IEEE 802.1Qaz PFC managed object /* This structure contains the IEEE 802.1Qaz PFC managed object
* *
* @pfc_cap: Indicates the number of traffic classes on the local device * @pfc_cap: Indicates the number of traffic classes on the local device
...@@ -334,6 +398,8 @@ enum ieee_attrs { ...@@ -334,6 +398,8 @@ enum ieee_attrs {
DCB_ATTR_IEEE_PEER_PFC, DCB_ATTR_IEEE_PEER_PFC,
DCB_ATTR_IEEE_PEER_APP, DCB_ATTR_IEEE_PEER_APP,
DCB_ATTR_IEEE_MAXRATE, DCB_ATTR_IEEE_MAXRATE,
DCB_ATTR_IEEE_QCN,
DCB_ATTR_IEEE_QCN_STATS,
__DCB_ATTR_IEEE_MAX __DCB_ATTR_IEEE_MAX
}; };
#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1) #define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
......
...@@ -177,6 +177,8 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { ...@@ -177,6 +177,8 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
[DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
[DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
[DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)}, [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
[DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)},
[DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
}; };
static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = { static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
...@@ -1030,7 +1032,7 @@ static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, ...@@ -1030,7 +1032,7 @@ static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
return err; return err;
} }
/* Handle IEEE 802.1Qaz GET commands. */ /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
{ {
struct nlattr *ieee, *app; struct nlattr *ieee, *app;
...@@ -1067,6 +1069,32 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) ...@@ -1067,6 +1069,32 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
} }
} }
if (ops->ieee_getqcn) {
struct ieee_qcn qcn;
memset(&qcn, 0, sizeof(qcn));
err = ops->ieee_getqcn(netdev, &qcn);
if (!err) {
err = nla_put(skb, DCB_ATTR_IEEE_QCN,
sizeof(qcn), &qcn);
if (err)
return -EMSGSIZE;
}
}
if (ops->ieee_getqcnstats) {
struct ieee_qcn_stats qcn_stats;
memset(&qcn_stats, 0, sizeof(qcn_stats));
err = ops->ieee_getqcnstats(netdev, &qcn_stats);
if (!err) {
err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
sizeof(qcn_stats), &qcn_stats);
if (err)
return -EMSGSIZE;
}
}
if (ops->ieee_getpfc) { if (ops->ieee_getpfc) {
struct ieee_pfc pfc; struct ieee_pfc pfc;
memset(&pfc, 0, sizeof(pfc)); memset(&pfc, 0, sizeof(pfc));
...@@ -1379,8 +1407,9 @@ int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, ...@@ -1379,8 +1407,9 @@ int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
} }
EXPORT_SYMBOL(dcbnl_cee_notify); EXPORT_SYMBOL(dcbnl_cee_notify);
/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
* be completed the entire msg is aborted and error value is returned. * If any requested operation can not be completed
* the entire msg is aborted and error value is returned.
* No attempt is made to reconcile the case where only part of the * No attempt is made to reconcile the case where only part of the
* cmd can be completed. * cmd can be completed.
*/ */
...@@ -1417,6 +1446,15 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, ...@@ -1417,6 +1446,15 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
goto err; goto err;
} }
if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
struct ieee_qcn *qcn =
nla_data(ieee[DCB_ATTR_IEEE_QCN]);
err = ops->ieee_setqcn(netdev, qcn);
if (err)
goto err;
}
if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
err = ops->ieee_setpfc(netdev, pfc); err = ops->ieee_setpfc(netdev, pfc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment