Commit e02a5ac6 authored by Daniel Machon's avatar Daniel Machon Committed by David S. Miller

net: microchip: sparx5: add support for offloading tbf qdisc

Add support for offloading tbf qdisc to sparx5 qdisc.

The tbf qdisc makes it possible to attach a shaper on traffic egressing
from a port or a queue. Per-port tbf qdiscs are attached as a root qdisc
directly and queue tbf qdiscs are attached to one of the classes of a
parent qdisc (such as mqprio).
Signed-off-by: default avatarDaniel Machon <daniel.machon@microchip.com>
Signed-off-by: default avatarSteen Hegelund <steen.hegelund@microchip.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ab0e493e
......@@ -27,6 +27,7 @@
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
#include "sparx5_qos.h"
#define QLIM_WM(fraction) \
((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100)
......@@ -868,6 +869,12 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
goto cleanup_ports;
}
err = sparx5_qos_init(sparx5);
if (err) {
dev_err(sparx5->dev, "Failed to initialize QoS\n");
goto cleanup_ports;
}
err = sparx5_ptp_init(sparx5);
if (err) {
dev_err(sparx5->dev, "PTP failed\n");
......
......@@ -2993,6 +2993,132 @@ enum sparx5_target {
#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\
FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
/* HSCH:HSCH_CFG:CIR_CFG */
#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6)
#define HSCH_CIR_CFG_CIR_RATE_SET(x)\
FIELD_PREP(HSCH_CIR_CFG_CIR_RATE, x)
#define HSCH_CIR_CFG_CIR_RATE_GET(x)\
FIELD_GET(HSCH_CIR_CFG_CIR_RATE, x)
#define HSCH_CIR_CFG_CIR_BURST GENMASK(5, 0)
#define HSCH_CIR_CFG_CIR_BURST_SET(x)\
FIELD_PREP(HSCH_CIR_CFG_CIR_BURST, x)
#define HSCH_CIR_CFG_CIR_BURST_GET(x)\
FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x)
/* HSCH:HSCH_CFG:EIR_CFG */
#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6)
#define HSCH_EIR_CFG_EIR_RATE_SET(x)\
FIELD_PREP(HSCH_EIR_CFG_EIR_RATE, x)
#define HSCH_EIR_CFG_EIR_RATE_GET(x)\
FIELD_GET(HSCH_EIR_CFG_EIR_RATE, x)
#define HSCH_EIR_CFG_EIR_BURST GENMASK(5, 0)
#define HSCH_EIR_CFG_EIR_BURST_SET(x)\
FIELD_PREP(HSCH_EIR_CFG_EIR_BURST, x)
#define HSCH_EIR_CFG_EIR_BURST_GET(x)\
FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x)
/* HSCH:HSCH_CFG:SE_CFG */
#define HSCH_SE_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6)
#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\
FIELD_PREP(HSCH_SE_CFG_SE_DWRR_CNT, x)
#define HSCH_SE_CFG_SE_DWRR_CNT_GET(x)\
FIELD_GET(HSCH_SE_CFG_SE_DWRR_CNT, x)
#define HSCH_SE_CFG_SE_AVB_ENA BIT(5)
#define HSCH_SE_CFG_SE_AVB_ENA_SET(x)\
FIELD_PREP(HSCH_SE_CFG_SE_AVB_ENA, x)
#define HSCH_SE_CFG_SE_AVB_ENA_GET(x)\
FIELD_GET(HSCH_SE_CFG_SE_AVB_ENA, x)
#define HSCH_SE_CFG_SE_FRM_MODE GENMASK(4, 3)
#define HSCH_SE_CFG_SE_FRM_MODE_SET(x)\
FIELD_PREP(HSCH_SE_CFG_SE_FRM_MODE, x)
#define HSCH_SE_CFG_SE_FRM_MODE_GET(x)\
FIELD_GET(HSCH_SE_CFG_SE_FRM_MODE, x)
#define HSCH_SE_CFG_SE_DWRR_FRM_MODE GENMASK(2, 1)
#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_SET(x)\
FIELD_PREP(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_GET(x)\
FIELD_GET(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
#define HSCH_SE_CFG_SE_STOP BIT(0)
#define HSCH_SE_CFG_SE_STOP_SET(x)\
FIELD_PREP(HSCH_SE_CFG_SE_STOP, x)
#define HSCH_SE_CFG_SE_STOP_GET(x)\
FIELD_GET(HSCH_SE_CFG_SE_STOP, x)
/* HSCH:HSCH_CFG:SE_CONNECT */
#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0)
#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\
FIELD_PREP(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
#define HSCH_SE_CONNECT_SE_LEAK_LINK_GET(x)\
FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
/* HSCH:HSCH_CFG:SE_DLB_SENSE */
#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\
FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT GENMASK(9, 3)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_SET(x)\
FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA BIT(2)
#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_SET(x)\
FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(1)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_SET(x)\
FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_SET(x)\
FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\
FIELD_PREP(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_GET(x)\
FIELD_GET(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
#define HSCH_HSCH_CFG_CFG_HSCH_LAYER GENMASK(13, 12)
#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(x)\
FIELD_PREP(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_GET(x)\
FIELD_GET(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
#define HSCH_HSCH_CFG_CFG_CSR_GRANT GENMASK(11, 0)
#define HSCH_HSCH_CFG_CFG_CSR_GRANT_SET(x)\
FIELD_PREP(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
#define HSCH_HSCH_CFG_CFG_CSR_GRANT_GET(x)\
FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
/* HSCH:HSCH_MISC:SYS_CLK_PER */
#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
......@@ -3002,6 +3128,30 @@ enum sparx5_target {
#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\
FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\
FIELD_PREP(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(x)\
FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\
FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(x)\
FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
#define HSCH_HSCH_LEAK_CFG_LEAK_ERR BIT(0)
#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_SET(x)\
FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_GET(x)\
FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
/* HSCH:SYSTEM:FLUSH_CTRL */
#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
......
......@@ -9,8 +9,59 @@
#include <linux/netdevice.h>
/* Number of Layers */
#define SPX5_HSCH_LAYER_CNT 3
/* Scheduling elements per layer */
#define SPX5_HSCH_L0_SE_CNT 5040
#define SPX5_HSCH_L1_SE_CNT 64
#define SPX5_HSCH_L2_SE_CNT 64
/* Calculate Layer 0 Scheduler Element when using normal hierarchy */
#define SPX5_HSCH_L0_GET_IDX(port, queue) ((64 * (port)) + (8 * (queue)))
/* Number of leak groups */
#define SPX5_HSCH_LEAK_GRP_CNT 4
/* Scheduler modes */
#define SPX5_SE_MODE_LINERATE 0
#define SPX5_SE_MODE_DATARATE 1
/* Rate and burst */
#define SPX5_SE_RATE_MAX 262143
#define SPX5_SE_BURST_MAX 127
#define SPX5_SE_RATE_MIN 1
#define SPX5_SE_BURST_MIN 1
#define SPX5_SE_BURST_UNIT 4096
struct sparx5_shaper {
u32 mode;
u32 rate;
u32 burst;
};
struct sparx5_lg {
u32 max_rate;
u32 resolution;
u32 leak_time;
u32 max_ses;
};
struct sparx5_layer {
struct sparx5_lg leak_groups[SPX5_HSCH_LEAK_GRP_CNT];
};
int sparx5_qos_init(struct sparx5 *sparx5);
/* Multi-Queue Priority */
int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc);
int sparx5_tc_mqprio_del(struct net_device *ndev);
/* Token Bucket Filter */
struct tc_tbf_qopt_offload_replace_params;
int sparx5_tc_tbf_add(struct sparx5_port *port,
struct tc_tbf_qopt_offload_replace_params *params,
u32 layer, u32 idx);
int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx);
#endif /* __SPARX5_QOS_H__ */
......@@ -10,6 +10,19 @@
#include "sparx5_main.h"
#include "sparx5_qos.h"
static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer,
u32 *idx)
{
if (parent == TC_H_ROOT) {
*layer = 2;
*idx = portno;
} else {
u32 queue = TC_H_MIN(parent) - 1;
*layer = 0;
*idx = SPX5_HSCH_L0_GET_IDX(portno, queue);
}
}
static int sparx5_tc_setup_qdisc_mqprio(struct net_device *ndev,
struct tc_mqprio_qopt_offload *m)
{
......@@ -21,12 +34,38 @@ static int sparx5_tc_setup_qdisc_mqprio(struct net_device *ndev,
return sparx5_tc_mqprio_add(ndev, m->qopt.num_tc);
}
static int sparx5_tc_setup_qdisc_tbf(struct net_device *ndev,
struct tc_tbf_qopt_offload *qopt)
{
struct sparx5_port *port = netdev_priv(ndev);
u32 layer, se_idx;
sparx5_tc_get_layer_and_idx(qopt->parent, port->portno, &layer,
&se_idx);
switch (qopt->command) {
case TC_TBF_REPLACE:
return sparx5_tc_tbf_add(port, &qopt->replace_params, layer,
se_idx);
case TC_TBF_DESTROY:
return sparx5_tc_tbf_del(port, layer, se_idx);
case TC_TBF_STATS:
return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_MQPRIO:
return sparx5_tc_setup_qdisc_mqprio(ndev, type_data);
case TC_SETUP_QDISC_TBF:
return sparx5_tc_setup_qdisc_tbf(ndev, type_data);
default:
return -EOPNOTSUPP;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment