Commit 47bf2b23 authored by Marc Kleine-Budde's avatar Marc Kleine-Budde

Merge patch series "can: m_can: Optimizations for tcan and peripheral chips"

Markus Schneider-Pargmann <msp@baylibre.com> says:

as requested I split the series into two parts. This is the first
parts with simple improvements to reduce the number of SPI transfers.
The second part will be the rest with coalescing support and more
complex optimizations.

Changes since v1: https://lore.kernel.org/all/20221116205308.2996556-1-msp@baylibre.com
- Fixed register ranges
- Added fixes: tag for two patches

Link: https://lore.kernel.org/all/20221206115728.1056014-1-msp@baylibre.comSigned-off-by: default avatarMarc Kleine-Budde <mkl@pengutronix.de>
parents 3abcc01c 39dbb21b
...@@ -369,9 +369,14 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val) ...@@ -369,9 +369,14 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
return cdev->ops->read_fifo(cdev, addr_offset, val, 1); return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
} }
static inline bool _m_can_tx_fifo_full(u32 txfqs)
{
return !!(txfqs & TXFQS_TFQF);
}
static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev) static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
{ {
return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF); return _m_can_tx_fifo_full(m_can_read(cdev, M_CAN_TXFQS));
} }
static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
...@@ -472,19 +477,16 @@ static void m_can_receive_skb(struct m_can_classdev *cdev, ...@@ -472,19 +477,16 @@ static void m_can_receive_skb(struct m_can_classdev *cdev,
} }
} }
static int m_can_read_fifo(struct net_device *dev, u32 rxfs) static int m_can_read_fifo(struct net_device *dev, u32 fgi)
{ {
struct net_device_stats *stats = &dev->stats; struct net_device_stats *stats = &dev->stats;
struct m_can_classdev *cdev = netdev_priv(dev); struct m_can_classdev *cdev = netdev_priv(dev);
struct canfd_frame *cf; struct canfd_frame *cf;
struct sk_buff *skb; struct sk_buff *skb;
struct id_and_dlc fifo_header; struct id_and_dlc fifo_header;
u32 fgi;
u32 timestamp = 0; u32 timestamp = 0;
int err; int err;
/* calculate the fifo get index for where to read data */
fgi = FIELD_GET(RXFS_FGI_MASK, rxfs);
err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2); err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2);
if (err) if (err)
goto out_fail; goto out_fail;
...@@ -528,9 +530,6 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs) ...@@ -528,9 +530,6 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
} }
stats->rx_packets++; stats->rx_packets++;
/* acknowledge rx fifo 0 */
m_can_write(cdev, M_CAN_RXF0A, fgi);
timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16; timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16;
m_can_receive_skb(cdev, skb, timestamp); m_can_receive_skb(cdev, skb, timestamp);
...@@ -549,7 +548,11 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) ...@@ -549,7 +548,11 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
struct m_can_classdev *cdev = netdev_priv(dev); struct m_can_classdev *cdev = netdev_priv(dev);
u32 pkts = 0; u32 pkts = 0;
u32 rxfs; u32 rxfs;
int err; u32 rx_count;
u32 fgi;
int ack_fgi = -1;
int i;
int err = 0;
rxfs = m_can_read(cdev, M_CAN_RXF0S); rxfs = m_can_read(cdev, M_CAN_RXF0S);
if (!(rxfs & RXFS_FFL_MASK)) { if (!(rxfs & RXFS_FFL_MASK)) {
...@@ -557,16 +560,26 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) ...@@ -557,16 +560,26 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
return 0; return 0;
} }
while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { rx_count = FIELD_GET(RXFS_FFL_MASK, rxfs);
err = m_can_read_fifo(dev, rxfs); fgi = FIELD_GET(RXFS_FGI_MASK, rxfs);
for (i = 0; i < rx_count && quota > 0; ++i) {
err = m_can_read_fifo(dev, fgi);
if (err) if (err)
return err; break;
quota--; quota--;
pkts++; pkts++;
rxfs = m_can_read(cdev, M_CAN_RXF0S); ack_fgi = fgi;
fgi = (++fgi >= cdev->mcfg[MRAM_RXF0].num ? 0 : fgi);
} }
if (ack_fgi != -1)
m_can_write(cdev, M_CAN_RXF0A, ack_fgi);
if (err)
return err;
return pkts; return pkts;
} }
...@@ -900,14 +913,12 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, ...@@ -900,14 +913,12 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
return work_done; return work_done;
} }
static int m_can_rx_handler(struct net_device *dev, int quota) static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus)
{ {
struct m_can_classdev *cdev = netdev_priv(dev); struct m_can_classdev *cdev = netdev_priv(dev);
int rx_work_or_err; int rx_work_or_err;
int work_done = 0; int work_done = 0;
u32 irqstatus, psr;
irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR);
if (!irqstatus) if (!irqstatus)
goto end; goto end;
...@@ -932,13 +943,13 @@ static int m_can_rx_handler(struct net_device *dev, int quota) ...@@ -932,13 +943,13 @@ static int m_can_rx_handler(struct net_device *dev, int quota)
} }
} }
psr = m_can_read(cdev, M_CAN_PSR);
if (irqstatus & IR_ERR_STATE) if (irqstatus & IR_ERR_STATE)
work_done += m_can_handle_state_errors(dev, psr); work_done += m_can_handle_state_errors(dev,
m_can_read(cdev, M_CAN_PSR));
if (irqstatus & IR_ERR_BUS_30X) if (irqstatus & IR_ERR_BUS_30X)
work_done += m_can_handle_bus_errors(dev, irqstatus, psr); work_done += m_can_handle_bus_errors(dev, irqstatus,
m_can_read(cdev, M_CAN_PSR));
if (irqstatus & IR_RF0N) { if (irqstatus & IR_RF0N) {
rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done)); rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done));
...@@ -951,12 +962,12 @@ static int m_can_rx_handler(struct net_device *dev, int quota) ...@@ -951,12 +962,12 @@ static int m_can_rx_handler(struct net_device *dev, int quota)
return work_done; return work_done;
} }
static int m_can_rx_peripheral(struct net_device *dev) static int m_can_rx_peripheral(struct net_device *dev, u32 irqstatus)
{ {
struct m_can_classdev *cdev = netdev_priv(dev); struct m_can_classdev *cdev = netdev_priv(dev);
int work_done; int work_done;
work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT); work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, irqstatus);
/* Don't re-enable interrupts if the driver had a fatal error /* Don't re-enable interrupts if the driver had a fatal error
* (e.g., FIFO read failure). * (e.g., FIFO read failure).
...@@ -972,8 +983,11 @@ static int m_can_poll(struct napi_struct *napi, int quota) ...@@ -972,8 +983,11 @@ static int m_can_poll(struct napi_struct *napi, int quota)
struct net_device *dev = napi->dev; struct net_device *dev = napi->dev;
struct m_can_classdev *cdev = netdev_priv(dev); struct m_can_classdev *cdev = netdev_priv(dev);
int work_done; int work_done;
u32 irqstatus;
irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR);
work_done = m_can_rx_handler(dev, quota); work_done = m_can_rx_handler(dev, quota, irqstatus);
/* Don't re-enable interrupts if the driver had a fatal error /* Don't re-enable interrupts if the driver had a fatal error
* (e.g., FIFO read failure). * (e.g., FIFO read failure).
...@@ -1014,7 +1028,9 @@ static int m_can_echo_tx_event(struct net_device *dev) ...@@ -1014,7 +1028,9 @@ static int m_can_echo_tx_event(struct net_device *dev)
u32 txe_count = 0; u32 txe_count = 0;
u32 m_can_txefs; u32 m_can_txefs;
u32 fgi = 0; u32 fgi = 0;
int ack_fgi = -1;
int i = 0; int i = 0;
int err = 0;
unsigned int msg_mark; unsigned int msg_mark;
struct m_can_classdev *cdev = netdev_priv(dev); struct m_can_classdev *cdev = netdev_priv(dev);
...@@ -1024,34 +1040,34 @@ static int m_can_echo_tx_event(struct net_device *dev) ...@@ -1024,34 +1040,34 @@ static int m_can_echo_tx_event(struct net_device *dev)
/* Get Tx Event fifo element count */ /* Get Tx Event fifo element count */
txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs); txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs);
fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_txefs);
/* Get and process all sent elements */ /* Get and process all sent elements */
for (i = 0; i < txe_count; i++) { for (i = 0; i < txe_count; i++) {
u32 txe, timestamp = 0; u32 txe, timestamp = 0;
int err;
/* retrieve get index */
fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_read(cdev, M_CAN_TXEFS));
/* get message marker, timestamp */ /* get message marker, timestamp */
err = m_can_txe_fifo_read(cdev, fgi, 4, &txe); err = m_can_txe_fifo_read(cdev, fgi, 4, &txe);
if (err) { if (err) {
netdev_err(dev, "TXE FIFO read returned %d\n", err); netdev_err(dev, "TXE FIFO read returned %d\n", err);
return err; break;
} }
msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe); msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16; timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16;
/* ack txe element */ ack_fgi = fgi;
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK, fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
fgi));
/* update stats */ /* update stats */
m_can_tx_update_stats(cdev, msg_mark, timestamp); m_can_tx_update_stats(cdev, msg_mark, timestamp);
} }
return 0; if (ack_fgi != -1)
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
ack_fgi));
return err;
} }
static irqreturn_t m_can_isr(int irq, void *dev_id) static irqreturn_t m_can_isr(int irq, void *dev_id)
...@@ -1083,7 +1099,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) ...@@ -1083,7 +1099,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
m_can_disable_all_interrupts(cdev); m_can_disable_all_interrupts(cdev);
if (!cdev->is_peripheral) if (!cdev->is_peripheral)
napi_schedule(&cdev->napi); napi_schedule(&cdev->napi);
else if (m_can_rx_peripheral(dev) < 0) else if (m_can_rx_peripheral(dev, ir) < 0)
goto out_fail; goto out_fail;
} }
...@@ -1609,6 +1625,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) ...@@ -1609,6 +1625,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
struct sk_buff *skb = cdev->tx_skb; struct sk_buff *skb = cdev->tx_skb;
struct id_and_dlc fifo_header; struct id_and_dlc fifo_header;
u32 cccr, fdflags; u32 cccr, fdflags;
u32 txfqs;
int err; int err;
int putidx; int putidx;
...@@ -1665,8 +1682,10 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) ...@@ -1665,8 +1682,10 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
} else { } else {
/* Transmit routine for version >= v3.1.x */ /* Transmit routine for version >= v3.1.x */
txfqs = m_can_read(cdev, M_CAN_TXFQS);
/* Check if FIFO full */ /* Check if FIFO full */
if (m_can_tx_fifo_full(cdev)) { if (_m_can_tx_fifo_full(txfqs)) {
/* This shouldn't happen */ /* This shouldn't happen */
netif_stop_queue(dev); netif_stop_queue(dev);
netdev_warn(dev, netdev_warn(dev,
...@@ -1682,8 +1701,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) ...@@ -1682,8 +1701,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
} }
/* get put index for frame */ /* get put index for frame */
putidx = FIELD_GET(TXFQS_TFQPI_MASK, putidx = FIELD_GET(TXFQS_TFQPI_MASK, txfqs);
m_can_read(cdev, M_CAN_TXFQS));
/* Construct DLC Field, with CAN-FD configuration. /* Construct DLC Field, with CAN-FD configuration.
* Use the put index of the fifo as the message marker, * Use the put index of the fifo as the message marker,
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#define TCAN4X5X_DEV_ID1 0x04 #define TCAN4X5X_DEV_ID1 0x04
#define TCAN4X5X_REV 0x08 #define TCAN4X5X_REV 0x08
#define TCAN4X5X_STATUS 0x0C #define TCAN4X5X_STATUS 0x0C
#define TCAN4X5X_ERROR_STATUS 0x10 #define TCAN4X5X_ERROR_STATUS_MASK 0x10
#define TCAN4X5X_CONTROL 0x14 #define TCAN4X5X_CONTROL 0x14
#define TCAN4X5X_CONFIG 0x800 #define TCAN4X5X_CONFIG 0x800
...@@ -204,17 +204,7 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev) ...@@ -204,17 +204,7 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev)
if (ret) if (ret)
return ret; return ret;
ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG, return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
TCAN4X5X_ENABLE_MCAN_INT);
if (ret)
return ret;
ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
TCAN4X5X_CLEAR_ALL_INT);
if (ret)
return ret;
return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS,
TCAN4X5X_CLEAR_ALL_INT); TCAN4X5X_CLEAR_ALL_INT);
} }
...@@ -234,6 +224,11 @@ static int tcan4x5x_init(struct m_can_classdev *cdev) ...@@ -234,6 +224,11 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
if (ret) if (ret)
return ret; return ret;
ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS_MASK,
TCAN4X5X_CLEAR_ALL_INT);
if (ret)
return ret;
ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL); TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL);
if (ret) if (ret)
......
...@@ -90,16 +90,47 @@ static int tcan4x5x_regmap_read(void *context, ...@@ -90,16 +90,47 @@ static int tcan4x5x_regmap_read(void *context,
return 0; return 0;
} }
static const struct regmap_range tcan4x5x_reg_table_yes_range[] = { static const struct regmap_range tcan4x5x_reg_table_wr_range[] = {
regmap_reg_range(0x0000, 0x002c), /* Device ID and SPI Registers */ /* Device ID and SPI Registers */
regmap_reg_range(0x0800, 0x083c), /* Device configuration registers and Interrupt Flags*/ regmap_reg_range(0x000c, 0x0010),
/* Device configuration registers and Interrupt Flags*/
regmap_reg_range(0x0800, 0x080c),
regmap_reg_range(0x0814, 0x0814),
regmap_reg_range(0x0820, 0x0820),
regmap_reg_range(0x0830, 0x0830),
/* M_CAN */
regmap_reg_range(0x100c, 0x102c),
regmap_reg_range(0x1048, 0x1048),
regmap_reg_range(0x1050, 0x105c),
regmap_reg_range(0x1080, 0x1088),
regmap_reg_range(0x1090, 0x1090),
regmap_reg_range(0x1098, 0x10a0),
regmap_reg_range(0x10a8, 0x10b0),
regmap_reg_range(0x10b8, 0x10c0),
regmap_reg_range(0x10c8, 0x10c8),
regmap_reg_range(0x10d0, 0x10d4),
regmap_reg_range(0x10e0, 0x10e4),
regmap_reg_range(0x10f0, 0x10f0),
regmap_reg_range(0x10f8, 0x10f8),
/* MRAM */
regmap_reg_range(0x8000, 0x87fc),
};
static const struct regmap_range tcan4x5x_reg_table_rd_range[] = {
regmap_reg_range(0x0000, 0x0010), /* Device ID and SPI Registers */
regmap_reg_range(0x0800, 0x0830), /* Device configuration registers and Interrupt Flags*/
regmap_reg_range(0x1000, 0x10fc), /* M_CAN */ regmap_reg_range(0x1000, 0x10fc), /* M_CAN */
regmap_reg_range(0x8000, 0x87fc), /* MRAM */ regmap_reg_range(0x8000, 0x87fc), /* MRAM */
}; };
static const struct regmap_access_table tcan4x5x_reg_table = { static const struct regmap_access_table tcan4x5x_reg_table_wr = {
.yes_ranges = tcan4x5x_reg_table_yes_range, .yes_ranges = tcan4x5x_reg_table_wr_range,
.n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_yes_range), .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_wr_range),
};
static const struct regmap_access_table tcan4x5x_reg_table_rd = {
.yes_ranges = tcan4x5x_reg_table_rd_range,
.n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_rd_range),
}; };
static const struct regmap_config tcan4x5x_regmap = { static const struct regmap_config tcan4x5x_regmap = {
...@@ -107,8 +138,8 @@ static const struct regmap_config tcan4x5x_regmap = { ...@@ -107,8 +138,8 @@ static const struct regmap_config tcan4x5x_regmap = {
.reg_stride = 4, .reg_stride = 4,
.pad_bits = 8, .pad_bits = 8,
.val_bits = 32, .val_bits = 32,
.wr_table = &tcan4x5x_reg_table, .wr_table = &tcan4x5x_reg_table_wr,
.rd_table = &tcan4x5x_reg_table, .rd_table = &tcan4x5x_reg_table_rd,
.max_register = TCAN4X5X_MAX_REGISTER, .max_register = TCAN4X5X_MAX_REGISTER,
.cache_type = REGCACHE_NONE, .cache_type = REGCACHE_NONE,
.read_flag_mask = (__force unsigned long) .read_flag_mask = (__force unsigned long)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment