Commit 08eeccb2 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'linux-can-next-for-6.5-20230622' of...

Merge tag 'linux-can-next-for-6.5-20230622' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next

Marc Kleine-Budde says:

====================
pull-request: can-next 2023-06-22

The first patch is by Carsten Schmidt, targets the kvaser_usb driver
and adds len8_dlc support.

Marcel Hellwig's patch for the xilinx_can driver adds support for CAN
transceivers via the PHY framework.

Frank Jungclaus contributes 6+2 patches for the esd_usb driver in
preparation for the upcoming CAN-USB/3 support.

The 2 patches by Miquel Raynal for the sja1000 driver work around
overruns stalls on the Renesas SoCs.

The next 3 patches are by me and fix the coding style in the
rx-offload helper and in the m_can and ti_hecc driver.

Vincent Mailhol contributes 3 patches to fix and update the
calculation of the length of CAN frames on the wire.

Oliver Hartkopp's patch moves the CAN_RAW_FILTER_MAX definition into
the correct header.

The remaining 14 patches are by Jimmy Assarsson, target the
kvaser_pciefd driver and bring various updates and improvements.

* tag 'linux-can-next-for-6.5-20230622' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next: (33 commits)
  can: kvaser_pciefd: Use TX FIFO size read from CAN controller
  can: kvaser_pciefd: Refactor code
  can: kvaser_pciefd: Add len8_dlc support
  can: kvaser_pciefd: Use FIELD_{GET,PREP} and GENMASK where appropriate
  can: kvaser_pciefd: Sort register definitions
  can: kvaser_pciefd: Change return type for kvaser_pciefd_{receive,transmit,set_tx}_irq()
  can: kvaser_pciefd: Rename device ID defines
  can: kvaser_pciefd: Sort includes in alphabetic order
  can: kvaser_pciefd: Remove SPI flash parameter read functionality
  can: uapi: move CAN_RAW_FILTER_MAX definition to raw.h
  can: kvaser_pciefd: Define unsigned constants with type suffix 'U'
  can: kvaser_pciefd: Set hardware timestamp on transmitted packets
  can: kvaser_pciefd: Add function to set skb hwtstamps
  can: kvaser_pciefd: Remove handler for unused KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK
  can: kvaser_pciefd: Remove useless write to interrupt register
  can: length: refactor frame lengths definition to add size in bits
  can: length: fix bitstuffing count
  can: length: fix description of the RRS field
  can: m_can: fix coding style
  can: ti_hecc: fix coding style
  ...
====================

Link: https://lore.kernel.org/r/20230622082658.571150-1-mkl@pengutronix.deSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 0ec92a8f 790ef390
...@@ -153,8 +153,7 @@ config CAN_JANZ_ICAN3 ...@@ -153,8 +153,7 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD config CAN_KVASER_PCIEFD
depends on PCI depends on PCI
tristate "Kvaser PCIe FD cards" tristate "Kvaser PCIe FD cards"
select CRC32 help
help
This is a driver for the Kvaser PCI Express CAN FD family. This is a driver for the Kvaser PCI Express CAN FD family.
Supported devices: Supported devices:
......
...@@ -78,18 +78,7 @@ unsigned int can_skb_get_frame_len(const struct sk_buff *skb) ...@@ -78,18 +78,7 @@ unsigned int can_skb_get_frame_len(const struct sk_buff *skb)
else else
len = cf->len; len = cf->len;
if (can_is_canfd_skb(skb)) { return can_frame_bytes(can_is_canfd_skb(skb), cf->can_id & CAN_EFF_FLAG,
if (cf->can_id & CAN_EFF_FLAG) false, len);
len += CANFD_FRAME_OVERHEAD_EFF;
else
len += CANFD_FRAME_OVERHEAD_SFF;
} else {
if (cf->can_id & CAN_EFF_FLAG)
len += CAN_FRAME_OVERHEAD_EFF;
else
len += CAN_FRAME_OVERHEAD_SFF;
}
return len;
} }
EXPORT_SYMBOL_GPL(can_skb_get_frame_len); EXPORT_SYMBOL_GPL(can_skb_get_frame_len);
...@@ -220,7 +220,7 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) ...@@ -220,7 +220,7 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
int can_rx_offload_queue_timestamp(struct can_rx_offload *offload, int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp) struct sk_buff *skb, u32 timestamp)
{ {
struct can_rx_offload_cb *cb; struct can_rx_offload_cb *cb;
......
...@@ -3,19 +3,19 @@ ...@@ -3,19 +3,19 @@
* Parts of this driver are based on the following: * Parts of this driver are based on the following:
* - Kvaser linux pciefd driver (version 5.25) * - Kvaser linux pciefd driver (version 5.25)
* - PEAK linux canfd driver * - PEAK linux canfd driver
* - Altera Avalon EPCS flash controller driver
*/ */
#include <linux/kernel.h> #include <linux/bitfield.h>
#include <linux/module.h> #include <linux/can/dev.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/can/dev.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/netdevice.h>
#include <linux/crc32.h>
#include <linux/iopoll.h>
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
...@@ -25,42 +25,25 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); ...@@ -25,42 +25,25 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
#define KVASER_PCIEFD_MAX_ERR_REP 256 #define KVASER_PCIEFD_MAX_ERR_REP 256U
#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17 #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4 #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4UL
#define KVASER_PCIEFD_DMA_COUNT 2 #define KVASER_PCIEFD_DMA_COUNT 2U
#define KVASER_PCIEFD_DMA_SIZE (4 * 1024) #define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
#define KVASER_PCIEFD_VENDOR 0x1a07 #define KVASER_PCIEFD_VENDOR 0x1a07
#define KVASER_PCIEFD_4HS_ID 0x0d #define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d
#define KVASER_PCIEFD_2HS_ID 0x0e #define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e
#define KVASER_PCIEFD_HS_ID 0x0f #define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f
#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10 #define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010
#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11 #define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011
/* PCIe IRQ registers */ /* PCIe IRQ registers */
#define KVASER_PCIEFD_IRQ_REG 0x40 #define KVASER_PCIEFD_IRQ_REG 0x40
#define KVASER_PCIEFD_IEN_REG 0x50 #define KVASER_PCIEFD_IEN_REG 0x50
/* DMA map */ /* DMA address translation map register base */
#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000 #define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
/* Kvaser KCAN CAN controller registers */
#define KVASER_PCIEFD_KCAN0_BASE 0x10000
#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
#define KVASER_PCIEFD_KCAN_CMD_REG 0x400
#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
#define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
#define KVASER_PCIEFD_KCAN_STAT_REG 0x418
#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
/* Loopback control register */ /* Loopback control register */
#define KVASER_PCIEFD_LOOP_REG 0x1f000 #define KVASER_PCIEFD_LOOP_REG 0x1f000
/* System identification and information registers */ /* System identification and information registers */
...@@ -78,183 +61,196 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); ...@@ -78,183 +61,196 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210) #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214) #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214)
#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218) #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
/* EPCS flash controller registers */ /* Kvaser KCAN CAN controller registers */
#define KVASER_PCIEFD_SPI_BASE 0x1fc00 #define KVASER_PCIEFD_KCAN0_BASE 0x10000
#define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE #define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4) #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8) #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc) #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14) #define KVASER_PCIEFD_KCAN_CMD_REG 0x400
#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414
#define KVASER_PCIEFD_KCAN_STAT_REG 0x418
#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
/* PCI interrupt fields */
#define KVASER_PCIEFD_IRQ_SRB BIT(4) #define KVASER_PCIEFD_IRQ_SRB BIT(4)
#define KVASER_PCIEFD_IRQ_ALL_MASK GENMASK(4, 0)
#define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24 /* Enable 64-bit DMA address translation */
#define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16 #define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
#define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
/* System build information fields */
#define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24)
#define KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK GENMASK(23, 16)
#define KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK GENMASK(7, 0)
#define KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK GENMASK(15, 1)
/* Reset DMA buffer 0, 1 and FIFO offset */ /* Reset DMA buffer 0, 1 and FIFO offset */
#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
/* DMA packet done, buffer 0 and 1 */
#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
/* DMA overflow, buffer 0 and 1 */
#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
/* DMA underflow, buffer 0 and 1 */ /* DMA underflow, buffer 0 and 1 */
#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
/* DMA overflow, buffer 0 and 1 */
#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
/* DMA packet done, buffer 0 and 1 */
#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
/* Got DMA support */
#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
/* DMA idle */ /* DMA idle */
#define KVASER_PCIEFD_SRB_STAT_DI BIT(15) #define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
/* DMA support */
#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
/* SRB current packet level */ /* SRB current packet level */
#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK 0xff #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK GENMASK(7, 0)
/* DMA Enable */ /* DMA Enable */
#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
/* EPCS flash controller definitions */ /* KCAN CTRL packet types */
#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024) #define KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK GENMASK(31, 29)
#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L) #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH 0x4
#define KVASER_PCIEFD_CFG_MAX_PARAMS 256 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFRAME 0x5
#define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24 /* Command sequence number */
#define KVASER_PCIEFD_CFG_SYS_VER 1 #define KVASER_PCIEFD_KCAN_CMD_SEQ_MASK GENMASK(23, 16)
#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130 /* Command bits */
#define KVASER_PCIEFD_SPI_TMT BIT(5) #define KVASER_PCIEFD_KCAN_CMD_MASK GENMASK(5, 0)
#define KVASER_PCIEFD_SPI_TRDY BIT(6)
#define KVASER_PCIEFD_SPI_RRDY BIT(7)
#define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
/* Commands for controlling the onboard flash */
#define KVASER_PCIEFD_FLASH_RES_CMD 0xab
#define KVASER_PCIEFD_FLASH_READ_CMD 0x3
#define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
/* Kvaser KCAN definitions */
#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
#define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
/* Request status packet */
#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
/* Abort, flush and reset */ /* Abort, flush and reset */
#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
/* Request status packet */
#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
/* Tx FIFO unaligned read */
#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
/* Tx FIFO unaligned end */
#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
/* Bus parameter protection error */
#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
/* FDF bit when controller is in classic mode */
#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
/* Rx FIFO overflow */
#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
/* Abort done */
#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
/* Tx buffer flush done */
#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
/* Tx FIFO overflow */
#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
/* Tx FIFO empty */
#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
/* Transmitter unaligned */ /* Transmitter unaligned */
#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
/* Tx FIFO empty */
#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
/* Tx FIFO overflow */
#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
/* Tx buffer flush done */
#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
/* Abort done */
#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
/* Rx FIFO overflow */
#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
/* FDF bit when controller is in classic CAN mode */
#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
/* Bus parameter protection error */
#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
/* Tx FIFO unaligned end */
#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
/* Tx FIFO unaligned read */
#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
#define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16 /* Tx FIFO size */
#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK GENMASK(23, 16)
/* Tx FIFO current packet level */
#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK GENMASK(7, 0)
#define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24 /* Current status packet sequence number */
/* Abort request */ #define KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK GENMASK(31, 24)
#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
/* Idle state. Controller in reset mode and no abort or flush pending */
#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
/* Bus off */
#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
/* Reset mode request */
#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
/* Controller in reset mode */
#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
/* Controller got one-shot capability */
#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
/* Controller got CAN FD capability */ /* Controller got CAN FD capability */
#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \ /* Controller got one-shot capability */
KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \ #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
KVASER_PCIEFD_KCAN_STAT_IRM) /* Controller in reset mode */
#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
/* Reset mode request */
#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
/* Bus off */
#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
/* Idle state. Controller in reset mode and no abort or flush pending */
#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
/* Abort request */
#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
/* Controller is bus off */
#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK \
(KVASER_PCIEFD_KCAN_STAT_AR | KVASER_PCIEFD_KCAN_STAT_BOFF | \
KVASER_PCIEFD_KCAN_STAT_RMR | KVASER_PCIEFD_KCAN_STAT_IRM)
/* Reset mode */
#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
/* Listen only mode */
#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
/* Error packet enable */
#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
/* CAN FD non-ISO */
#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
/* Acknowledgment packet type */
#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
/* Active error flag enable. Clear to force error passive */
#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
/* Classic CAN mode */ /* Classic CAN mode */
#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
/* Active error flag enable. Clear to force error passive */
#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
/* Acknowledgment packet type */
#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
/* CAN FD non-ISO */
#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
/* Error packet enable */
#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
/* Listen only mode */
#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
/* Reset mode */
#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
#define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13 /* BTRN and BTRD fields */
#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17 #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK GENMASK(30, 26)
#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26 #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK GENMASK(25, 17)
#define KVASER_PCIEFD_KCAN_BTRN_SJW_MASK GENMASK(16, 13)
#define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16 #define KVASER_PCIEFD_KCAN_BTRN_BRP_MASK GENMASK(12, 0)
/* Kvaser KCAN packet types */ /* PWM Control fields */
#define KVASER_PCIEFD_PACK_TYPE_DATA 0 #define KVASER_PCIEFD_KCAN_PWM_TOP_MASK GENMASK(23, 16)
#define KVASER_PCIEFD_PACK_TYPE_ACK 1 #define KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK GENMASK(7, 0)
#define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
#define KVASER_PCIEFD_PACK_TYPE_ERROR 3 /* KCAN packet type IDs */
#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4 #define KVASER_PCIEFD_PACK_TYPE_DATA 0x0
#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5 #define KVASER_PCIEFD_PACK_TYPE_ACK 0x1
#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6 #define KVASER_PCIEFD_PACK_TYPE_TXRQ 0x2
#define KVASER_PCIEFD_PACK_TYPE_STATUS 8 #define KVASER_PCIEFD_PACK_TYPE_ERROR 0x3
#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9 #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 0x4
#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 0x5
/* Kvaser KCAN packet common definitions */ #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 0x6
#define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff #define KVASER_PCIEFD_PACK_TYPE_STATUS 0x8
#define KVASER_PCIEFD_PACKET_CHID_SHIFT 25 #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 0x9
#define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
/* Common KCAN packet definitions, second word */
/* Kvaser KCAN TDATA and RDATA first word */ #define KVASER_PCIEFD_PACKET_TYPE_MASK GENMASK(31, 28)
#define KVASER_PCIEFD_PACKET_CHID_MASK GENMASK(27, 25)
#define KVASER_PCIEFD_PACKET_SEQ_MASK GENMASK(7, 0)
/* KCAN Transmit/Receive data packet, first word */
#define KVASER_PCIEFD_RPACKET_IDE BIT(30) #define KVASER_PCIEFD_RPACKET_IDE BIT(30)
#define KVASER_PCIEFD_RPACKET_RTR BIT(29) #define KVASER_PCIEFD_RPACKET_RTR BIT(29)
/* Kvaser KCAN TDATA and RDATA second word */ #define KVASER_PCIEFD_RPACKET_ID_MASK GENMASK(28, 0)
#define KVASER_PCIEFD_RPACKET_ESI BIT(13) /* KCAN Transmit data packet, second word */
#define KVASER_PCIEFD_RPACKET_BRS BIT(14)
#define KVASER_PCIEFD_RPACKET_FDF BIT(15)
#define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
/* Kvaser KCAN TDATA second word */
#define KVASER_PCIEFD_TPACKET_SMS BIT(16)
#define KVASER_PCIEFD_TPACKET_AREQ BIT(31) #define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
#define KVASER_PCIEFD_TPACKET_SMS BIT(16)
/* KCAN Transmit/Receive data packet, second word */
#define KVASER_PCIEFD_RPACKET_FDF BIT(15)
#define KVASER_PCIEFD_RPACKET_BRS BIT(14)
#define KVASER_PCIEFD_RPACKET_ESI BIT(13)
#define KVASER_PCIEFD_RPACKET_DLC_MASK GENMASK(11, 8)
/* Kvaser KCAN APACKET */ /* KCAN Transmit acknowledge packet, first word */
#define KVASER_PCIEFD_APACKET_FLU BIT(8)
#define KVASER_PCIEFD_APACKET_CT BIT(9)
#define KVASER_PCIEFD_APACKET_ABL BIT(10)
#define KVASER_PCIEFD_APACKET_NACK BIT(11) #define KVASER_PCIEFD_APACKET_NACK BIT(11)
#define KVASER_PCIEFD_APACKET_ABL BIT(10)
#define KVASER_PCIEFD_APACKET_CT BIT(9)
#define KVASER_PCIEFD_APACKET_FLU BIT(8)
/* Kvaser KCAN SPACK first word */ /* KCAN Status packet, first word */
#define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
#define KVASER_PCIEFD_SPACK_BOFF BIT(16)
#define KVASER_PCIEFD_SPACK_IDET BIT(20)
#define KVASER_PCIEFD_SPACK_IRM BIT(21)
#define KVASER_PCIEFD_SPACK_RMCD BIT(22) #define KVASER_PCIEFD_SPACK_RMCD BIT(22)
/* Kvaser KCAN SPACK second word */ #define KVASER_PCIEFD_SPACK_IRM BIT(21)
#define KVASER_PCIEFD_SPACK_AUTO BIT(21) #define KVASER_PCIEFD_SPACK_IDET BIT(20)
#define KVASER_PCIEFD_SPACK_EWLR BIT(23) #define KVASER_PCIEFD_SPACK_BOFF BIT(16)
#define KVASER_PCIEFD_SPACK_RXERR_MASK GENMASK(15, 8)
#define KVASER_PCIEFD_SPACK_TXERR_MASK GENMASK(7, 0)
/* KCAN Status packet, second word */
#define KVASER_PCIEFD_SPACK_EPLR BIT(24) #define KVASER_PCIEFD_SPACK_EPLR BIT(24)
#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
#define KVASER_PCIEFD_SPACK_AUTO BIT(21)
/* Kvaser KCAN_EPACK second word */ /* KCAN Error detected packet, second word */
#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
struct kvaser_pciefd; struct kvaser_pciefd;
...@@ -306,195 +302,43 @@ static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { ...@@ -306,195 +302,43 @@ static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
.brp_inc = 1, .brp_inc = 1,
}; };
struct kvaser_pciefd_cfg_param {
__le32 magic;
__le32 nr;
__le32 len;
u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];
};
struct kvaser_pciefd_cfg_img {
__le32 version;
__le32 magic;
__le32 crc;
struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
};
static struct pci_device_id kvaser_pciefd_id_table[] = { static struct pci_device_id kvaser_pciefd_id_table[] = {
{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), }, {
{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), }, PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID),
{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), }, },
{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), }, {
{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), }, PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID),
{ 0,}, },
{
PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID),
},
{
PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID),
},
{
PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID),
},
{
0,
},
}; };
MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
/* Onboard flash memory functions */ static inline void kvaser_pciefd_send_kcan_cmd(struct kvaser_pciefd_can *can, u32 cmd)
static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
{
u32 res;
return readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
res, res & msk, 0, 10);
}
static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
u32 tx_len, u8 *rx, u32 rx_len)
{
int c;
iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
c = tx_len;
while (c--) {
if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
return -EIO;
iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
return -EIO;
ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
}
c = rx_len;
while (c-- > 0) {
if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
return -EIO;
iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
return -EIO;
*rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
}
if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
return -EIO;
iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
if (c != -1) {
dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
return -EIO;
}
return 0;
}
static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
struct kvaser_pciefd_cfg_img *img)
{
int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
int res, crc;
u8 *crc_buff;
u8 cmd[] = {
KVASER_PCIEFD_FLASH_READ_CMD,
(u8)((offset >> 16) & 0xff),
(u8)((offset >> 8) & 0xff),
(u8)(offset & 0xff)
};
res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
KVASER_PCIEFD_CFG_IMG_SZ);
if (res)
return res;
crc_buff = (u8 *)img->params;
if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
dev_err(&pcie->pci->dev,
"Config flash corrupted, version number is wrong\n");
return -ENODEV;
}
if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
dev_err(&pcie->pci->dev,
"Config flash corrupted, magic number is wrong\n");
return -ENODEV;
}
crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
if (le32_to_cpu(img->crc) != crc) {
dev_err(&pcie->pci->dev,
"Stored CRC does not match flash image contents\n");
return -EIO;
}
return 0;
}
static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
struct kvaser_pciefd_cfg_img *img)
{ {
struct kvaser_pciefd_cfg_param *param; iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_MASK, cmd) |
FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_SEQ_MASK, ++can->cmd_seq),
param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN]; can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
} }
static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie) static inline void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
{ {
int res; kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_SRQ);
struct kvaser_pciefd_cfg_img *img;
/* Read electronic signature */
u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};
res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
if (res)
return -EIO;
img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
if (!img)
return -ENOMEM;
if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
dev_err(&pcie->pci->dev,
"Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);
res = -ENODEV;
goto image_free;
}
cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
if (res) {
goto image_free;
} else if (cmd[0] & 1) {
res = -EIO;
/* No write is ever done, the WIP should never be set */
dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
goto image_free;
}
res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
if (res) {
res = -EIO;
goto image_free;
}
kvaser_pciefd_cfg_read_params(pcie, img);
image_free:
kfree(img);
return res;
} }
static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can)
{ {
u32 cmd; kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT);
cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
} }
static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
...@@ -523,7 +367,7 @@ static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) ...@@ -523,7 +367,7 @@ static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
spin_unlock_irqrestore(&can->lock, irq); spin_unlock_irqrestore(&can->lock, irq);
} }
static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) static void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
{ {
u32 msk; u32 msk;
...@@ -534,8 +378,13 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) ...@@ -534,8 +378,13 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
KVASER_PCIEFD_KCAN_IRQ_TAR; KVASER_PCIEFD_KCAN_IRQ_TAR;
iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
}
return 0; static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie,
struct sk_buff *skb, u64 timestamp)
{
skb_hwtstamps(skb)->hwtstamp =
ns_to_ktime(div_u64(timestamp * 1000, pcie->freq_to_ticks_div));
} }
static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
...@@ -544,7 +393,6 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) ...@@ -544,7 +393,6 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
unsigned long irq; unsigned long irq;
spin_lock_irqsave(&can->lock, irq); spin_lock_irqsave(&can->lock, irq);
mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
if (can->can.ctrlmode & CAN_CTRLMODE_FD) { if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
...@@ -561,7 +409,6 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) ...@@ -561,7 +409,6 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
mode |= KVASER_PCIEFD_KCAN_MODE_LOM; mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
else else
mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM; mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM;
mode |= KVASER_PCIEFD_KCAN_MODE_EEN; mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
/* Use ACK packet type */ /* Use ACK packet type */
...@@ -578,18 +425,13 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) ...@@ -578,18 +425,13 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
unsigned long irq; unsigned long irq;
spin_lock_irqsave(&can->lock, irq); spin_lock_irqsave(&can->lock, irq);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
u32 cmd;
/* If controller is already idle, run abort, flush and reset */ /* If controller is already idle, run abort, flush and reset */
cmd = KVASER_PCIEFD_KCAN_CMD_AT; kvaser_pciefd_abort_flush_reset(can);
cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
} else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
u32 mode; u32 mode;
...@@ -598,7 +440,6 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) ...@@ -598,7 +440,6 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
mode |= KVASER_PCIEFD_KCAN_MODE_RM; mode |= KVASER_PCIEFD_KCAN_MODE_RM;
iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
} }
spin_unlock_irqrestore(&can->lock, irq); spin_unlock_irqrestore(&can->lock, irq);
} }
...@@ -608,7 +449,6 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) ...@@ -608,7 +449,6 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
unsigned long irq; unsigned long irq;
del_timer(&can->bec_poll_timer); del_timer(&can->bec_poll_timer);
if (!completion_done(&can->flush_comp)) if (!completion_done(&can->flush_comp))
kvaser_pciefd_start_controller_flush(can); kvaser_pciefd_start_controller_flush(can);
...@@ -620,11 +460,9 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) ...@@ -620,11 +460,9 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
spin_lock_irqsave(&can->lock, irq); spin_lock_irqsave(&can->lock, irq);
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
...@@ -637,11 +475,10 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) ...@@ -637,11 +475,10 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
} }
/* Reset interrupt handling */ /* Reset interrupt handling */
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
kvaser_pciefd_set_tx_irq(can); kvaser_pciefd_set_tx_irq(can);
kvaser_pciefd_setup_controller(can); kvaser_pciefd_setup_controller(can);
can->can.state = CAN_STATE_ERROR_ACTIVE; can->can.state = CAN_STATE_ERROR_ACTIVE;
netif_wake_queue(can->can.dev); netif_wake_queue(can->can.dev);
can->bec.txerr = 0; can->bec.txerr = 0;
...@@ -659,10 +496,9 @@ static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) ...@@ -659,10 +496,9 @@ static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
spin_lock_irqsave(&can->lock, irq); spin_lock_irqsave(&can->lock, irq);
pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff; top = FIELD_GET(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, pwm_ctrl);
/* Set duty cycle to zero */ /* Set duty cycle to zero */
pwm_ctrl |= top; pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top);
iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
spin_unlock_irqrestore(&can->lock, irq); spin_unlock_irqrestore(&can->lock, irq);
} }
...@@ -675,18 +511,17 @@ static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) ...@@ -675,18 +511,17 @@ static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
kvaser_pciefd_pwm_stop(can); kvaser_pciefd_pwm_stop(can);
spin_lock_irqsave(&can->lock, irq); spin_lock_irqsave(&can->lock, irq);
/* Set frequency to 500 KHz */
/* Set frequency to 500 KHz*/
top = can->kv_pcie->bus_freq / (2 * 500000) - 1; top = can->kv_pcie->bus_freq / (2 * 500000) - 1;
pwm_ctrl = top & 0xff; pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top);
pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top);
iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
/* Set duty cycle to 95 */ /* Set duty cycle to 95 */
trigger = (100 * top - 95 * (top + 1) + 50) / 100; trigger = (100 * top - 95 * (top + 1) + 50) / 100;
pwm_ctrl = trigger & 0xff; pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, trigger);
pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top);
iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
spin_unlock_irqrestore(&can->lock, irq); spin_unlock_irqrestore(&can->lock, irq);
} }
...@@ -741,7 +576,6 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, ...@@ -741,7 +576,6 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
int seq = can->echo_idx; int seq = can->echo_idx;
memset(p, 0, sizeof(*p)); memset(p, 0, sizeof(*p));
if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
...@@ -751,19 +585,24 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, ...@@ -751,19 +585,24 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
if (cf->can_id & CAN_EFF_FLAG) if (cf->can_id & CAN_EFF_FLAG)
p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
p->header[0] |= cf->can_id & CAN_EFF_MASK; p->header[0] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_ID_MASK, cf->can_id);
p->header[1] |= can_fd_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
if (can_is_canfd_skb(skb)) { if (can_is_canfd_skb(skb)) {
p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
can_fd_len2dlc(cf->len));
p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
if (cf->flags & CANFD_BRS) if (cf->flags & CANFD_BRS)
p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
if (cf->flags & CANFD_ESI) if (cf->flags & CANFD_ESI)
p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
} else {
p->header[1] |=
FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode));
} }
p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK; p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
packet_size = cf->len; packet_size = cf->len;
memcpy(p->data, cf->data, packet_size); memcpy(p->data, cf->data, packet_size);
...@@ -777,16 +616,15 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, ...@@ -777,16 +616,15 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
struct kvaser_pciefd_can *can = netdev_priv(netdev); struct kvaser_pciefd_can *can = netdev_priv(netdev);
unsigned long irq_flags; unsigned long irq_flags;
struct kvaser_pciefd_tx_packet packet; struct kvaser_pciefd_tx_packet packet;
int nwords; int nr_words;
u8 count; u8 count;
if (can_dev_dropped_skb(netdev, skb)) if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK; return NETDEV_TX_OK;
nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
spin_lock_irqsave(&can->echo_lock, irq_flags); spin_lock_irqsave(&can->echo_lock, irq_flags);
/* Prepare and save echo skb in internal slot */ /* Prepare and save echo skb in internal slot */
can_put_echo_skb(skb, netdev, can->echo_idx, 0); can_put_echo_skb(skb, netdev, can->echo_idx, 0);
...@@ -799,13 +637,13 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, ...@@ -799,13 +637,13 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
iowrite32(packet.header[1], iowrite32(packet.header[1],
can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
if (nwords) { if (nr_words) {
u32 data_last = ((u32 *)packet.data)[nwords - 1]; u32 data_last = ((u32 *)packet.data)[nr_words - 1];
/* Write data to fifo, except last word */ /* Write data to fifo, except last word */
iowrite32_rep(can->reg_base + iowrite32_rep(can->reg_base +
KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
nwords - 1); nr_words - 1);
/* Write last word to end of fifo */ /* Write last word to end of fifo */
__raw_writel(data_last, can->reg_base + __raw_writel(data_last, can->reg_base +
KVASER_PCIEFD_KCAN_FIFO_LAST_REG); KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
...@@ -815,14 +653,13 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, ...@@ -815,14 +653,13 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
KVASER_PCIEFD_KCAN_FIFO_LAST_REG); KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
} }
count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
/* No room for a new message, stop the queue until at least one /* No room for a new message, stop the queue until at least one
* successful transmit * successful transmit
*/ */
if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT || if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx])
can->can.echo_skb[can->echo_idx])
netif_stop_queue(netdev); netif_stop_queue(netdev);
spin_unlock_irqrestore(&can->echo_lock, irq_flags); spin_unlock_irqrestore(&can->echo_lock, irq_flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -840,25 +677,20 @@ static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) ...@@ -840,25 +677,20 @@ static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
else else
bt = &can->can.bittiming; bt = &can->can.bittiming;
btrn = ((bt->phase_seg2 - 1) & 0x1f) << btrn = FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK, bt->phase_seg2 - 1) |
KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT | FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) |
(((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) << FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_SJW_MASK, bt->sjw - 1) |
KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT | FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_BRP_MASK, bt->brp - 1);
((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT |
((bt->brp - 1) & 0x1fff);
spin_lock_irqsave(&can->lock, irq_flags); spin_lock_irqsave(&can->lock, irq_flags);
mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
/* Put the circuit in reset mode */ /* Put the circuit in reset mode */
iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
/* Can only set bittiming if in reset mode */ /* Can only set bittiming if in reset mode */
ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
test, test & KVASER_PCIEFD_KCAN_MODE_RM, test, test & KVASER_PCIEFD_KCAN_MODE_RM, 0, 10);
0, 10);
if (ret) { if (ret) {
spin_unlock_irqrestore(&can->lock, irq_flags); spin_unlock_irqrestore(&can->lock, irq_flags);
return -EBUSY; return -EBUSY;
...@@ -868,11 +700,10 @@ static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) ...@@ -868,11 +700,10 @@ static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
else else
iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
/* Restore previous reset mode status */ /* Restore previous reset mode status */
iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
spin_unlock_irqrestore(&can->lock, irq_flags); spin_unlock_irqrestore(&can->lock, irq_flags);
return 0; return 0;
} }
...@@ -910,6 +741,7 @@ static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, ...@@ -910,6 +741,7 @@ static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
bec->rxerr = can->bec.rxerr; bec->rxerr = can->bec.rxerr;
bec->txerr = can->bec.txerr; bec->txerr = can->bec.txerr;
return 0; return 0;
} }
...@@ -941,7 +773,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) ...@@ -941,7 +773,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
for (i = 0; i < pcie->nr_channels; i++) { for (i = 0; i < pcie->nr_channels; i++) {
struct net_device *netdev; struct net_device *netdev;
struct kvaser_pciefd_can *can; struct kvaser_pciefd_can *can;
u32 status, tx_npackets; u32 status, tx_nr_packets_max;
netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
KVASER_PCIEFD_CAN_TX_MAX_COUNT); KVASER_PCIEFD_CAN_TX_MAX_COUNT);
...@@ -953,7 +785,6 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) ...@@ -953,7 +785,6 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops; netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops;
can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE + can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
i * KVASER_PCIEFD_KCAN_BASE_OFFSET; i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
can->kv_pcie = pcie; can->kv_pcie = pcie;
can->cmd_seq = 0; can->cmd_seq = 0;
can->err_rep_cnt = 0; can->err_rep_cnt = 0;
...@@ -962,41 +793,31 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) ...@@ -962,41 +793,31 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
init_completion(&can->start_comp); init_completion(&can->start_comp);
init_completion(&can->flush_comp); init_completion(&can->flush_comp);
timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0);
0);
/* Disable Bus load reporting */ /* Disable Bus load reporting */
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
tx_npackets = ioread32(can->reg_base + tx_nr_packets_max =
KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) & ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
dev_err(&pcie->pci->dev,
"Max Tx count is smaller than expected\n");
free_candev(netdev);
return -ENODEV;
}
can->can.clock.freq = pcie->freq; can->can.clock.freq = pcie->freq;
can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT; can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
can->echo_idx = 0; can->echo_idx = 0;
spin_lock_init(&can->echo_lock); spin_lock_init(&can->echo_lock);
spin_lock_init(&can->lock); spin_lock_init(&can->lock);
can->can.bittiming_const = &kvaser_pciefd_bittiming_const; can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
can->can.do_set_data_bittiming = can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming;
kvaser_pciefd_set_data_bittiming;
can->can.do_set_mode = kvaser_pciefd_set_mode; can->can.do_set_mode = kvaser_pciefd_set_mode;
can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO; CAN_CTRLMODE_FD_NON_ISO |
CAN_CTRLMODE_CC_LEN8_DLC;
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
...@@ -1011,10 +832,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) ...@@ -1011,10 +832,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
netdev->flags |= IFF_ECHO; netdev->flags |= IFF_ECHO;
SET_NETDEV_DEV(netdev, &pcie->pci->dev); SET_NETDEV_DEV(netdev, &pcie->pci->dev);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
...@@ -1073,18 +893,16 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) ...@@ -1073,18 +893,16 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i; unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
pcie->dma_data[i] = pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev,
dmam_alloc_coherent(&pcie->pci->dev, KVASER_PCIEFD_DMA_SIZE,
KVASER_PCIEFD_DMA_SIZE, &dma_addr[i],
&dma_addr[i], GFP_KERNEL);
GFP_KERNEL);
if (!pcie->dma_data[i] || !dma_addr[i]) { if (!pcie->dma_data[i] || !dma_addr[i]) {
dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
KVASER_PCIEFD_DMA_SIZE); KVASER_PCIEFD_DMA_SIZE);
return -ENOMEM; return -ENOMEM;
} }
kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset); kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
} }
...@@ -1092,10 +910,10 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) ...@@ -1092,10 +910,10 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
KVASER_PCIEFD_SRB_CMD_RDB1, KVASER_PCIEFD_SRB_CMD_RDB1,
pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
/* Empty Rx FIFO */ /* Empty Rx FIFO */
srb_packet_count = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG) & srb_packet_count =
KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK; FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK,
ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG));
while (srb_packet_count) { while (srb_packet_count) {
/* Drop current packet in FIFO */ /* Drop current packet in FIFO */
ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG); ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
...@@ -1117,37 +935,21 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) ...@@ -1117,37 +935,21 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
{ {
u32 sysid, srb_status, build; u32 version, srb_status, build;
u8 sysid_nr_chan;
int ret;
ret = kvaser_pciefd_read_cfg(pcie);
if (ret)
return ret;
sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;
if (pcie->nr_channels != sysid_nr_chan) {
dev_err(&pcie->pci->dev,
"Number of channels does not match: %u vs %u\n",
pcie->nr_channels,
sysid_nr_chan);
return -ENODEV;
}
if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS) version = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS; pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS,
FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version));
build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG); build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n", dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n",
(sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff, FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version),
sysid & 0xff, FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version),
(build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff); FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build));
srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
dev_err(&pcie->pci->dev, dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n");
"Hardware without DMA is not supported\n");
return -ENODEV; return -ENODEV;
} }
...@@ -1157,10 +959,10 @@ static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) ...@@ -1157,10 +959,10 @@ static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
pcie->freq_to_ticks_div = pcie->freq / 1000000; pcie->freq_to_ticks_div = pcie->freq / 1000000;
if (pcie->freq_to_ticks_div == 0) if (pcie->freq_to_ticks_div == 0)
pcie->freq_to_ticks_div = 1; pcie->freq_to_ticks_div = 1;
/* Turn off all loopback functionality */ /* Turn off all loopback functionality */
iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG); iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
return ret;
return 0;
} }
static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
...@@ -1170,56 +972,48 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, ...@@ -1170,56 +972,48 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
struct sk_buff *skb; struct sk_buff *skb;
struct canfd_frame *cf; struct canfd_frame *cf;
struct can_priv *priv; struct can_priv *priv;
struct net_device_stats *stats; u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
struct skb_shared_hwtstamps *shhwtstamps; u8 dlc;
u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
if (ch_id >= pcie->nr_channels) if (ch_id >= pcie->nr_channels)
return -EIO; return -EIO;
priv = &pcie->can[ch_id]->can; priv = &pcie->can[ch_id]->can;
stats = &priv->dev->stats; dlc = FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, p->header[1]);
if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
skb = alloc_canfd_skb(priv->dev, &cf); skb = alloc_canfd_skb(priv->dev, &cf);
if (!skb) { if (!skb) {
stats->rx_dropped++; priv->dev->stats.rx_dropped++;
return -ENOMEM; return -ENOMEM;
} }
cf->len = can_fd_dlc2len(dlc);
if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
cf->flags |= CANFD_BRS; cf->flags |= CANFD_BRS;
if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
cf->flags |= CANFD_ESI; cf->flags |= CANFD_ESI;
} else { } else {
skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
if (!skb) { if (!skb) {
stats->rx_dropped++; priv->dev->stats.rx_dropped++;
return -ENOMEM; return -ENOMEM;
} }
can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode);
} }
cf->can_id = p->header[0] & CAN_EFF_MASK; cf->can_id = FIELD_GET(KVASER_PCIEFD_RPACKET_ID_MASK, p->header[0]);
if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
cf->can_id |= CAN_EFF_FLAG; cf->can_id |= CAN_EFF_FLAG;
cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) { if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) {
cf->can_id |= CAN_RTR_FLAG; cf->can_id |= CAN_RTR_FLAG;
} else { } else {
memcpy(cf->data, data, cf->len); memcpy(cf->data, data, cf->len);
priv->dev->stats.rx_bytes += cf->len;
stats->rx_bytes += cf->len;
} }
stats->rx_packets++; priv->dev->stats.rx_packets++;
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp =
ns_to_ktime(div_u64(p->timestamp * 1000,
pcie->freq_to_ticks_div));
return netif_rx(skb); return netif_rx(skb);
} }
...@@ -1239,7 +1033,6 @@ static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, ...@@ -1239,7 +1033,6 @@ static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
spin_lock_irqsave(&can->lock, irq_flags); spin_lock_irqsave(&can->lock, irq_flags);
netif_stop_queue(can->can.dev); netif_stop_queue(can->can.dev);
spin_unlock_irqrestore(&can->lock, irq_flags); spin_unlock_irqrestore(&can->lock, irq_flags);
/* Prevent CAN controller from auto recover from bus off */ /* Prevent CAN controller from auto recover from bus off */
if (!can->can.restart_ms) { if (!can->can.restart_ms) {
kvaser_pciefd_start_controller_flush(can); kvaser_pciefd_start_controller_flush(can);
...@@ -1257,7 +1050,7 @@ static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, ...@@ -1257,7 +1050,7 @@ static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
p->header[0] & KVASER_PCIEFD_SPACK_IRM) p->header[0] & KVASER_PCIEFD_SPACK_IRM)
*new_state = CAN_STATE_BUS_OFF; *new_state = CAN_STATE_BUS_OFF;
else if (bec->txerr >= 255 || bec->rxerr >= 255) else if (bec->txerr >= 255 || bec->rxerr >= 255)
*new_state = CAN_STATE_BUS_OFF; *new_state = CAN_STATE_BUS_OFF;
else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
*new_state = CAN_STATE_ERROR_PASSIVE; *new_state = CAN_STATE_ERROR_PASSIVE;
...@@ -1282,23 +1075,16 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, ...@@ -1282,23 +1075,16 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
struct net_device *ndev = can->can.dev; struct net_device *ndev = can->can.dev;
struct sk_buff *skb; struct sk_buff *skb;
struct can_frame *cf = NULL; struct can_frame *cf = NULL;
struct skb_shared_hwtstamps *shhwtstamps;
struct net_device_stats *stats = &ndev->stats;
old_state = can->can.state; old_state = can->can.state;
bec.txerr = p->header[0] & 0xff; bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]);
bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
&rx_state);
kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
skb = alloc_can_err_skb(ndev, &cf); skb = alloc_can_err_skb(ndev, &cf);
if (new_state != old_state) { if (new_state != old_state) {
kvaser_pciefd_change_state(can, cf, new_state, tx_state, kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
rx_state);
if (old_state == CAN_STATE_BUS_OFF && if (old_state == CAN_STATE_BUS_OFF &&
new_state == CAN_STATE_ERROR_ACTIVE && new_state == CAN_STATE_ERROR_ACTIVE &&
can->can.restart_ms) { can->can.restart_ms) {
...@@ -1311,28 +1097,25 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, ...@@ -1311,28 +1097,25 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
can->err_rep_cnt++; can->err_rep_cnt++;
can->can.can_stats.bus_error++; can->can.can_stats.bus_error++;
if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
stats->tx_errors++; ndev->stats.tx_errors++;
else else
stats->rx_errors++; ndev->stats.rx_errors++;
can->bec.txerr = bec.txerr; can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr; can->bec.rxerr = bec.rxerr;
if (!skb) { if (!skb) {
stats->rx_dropped++; ndev->stats.rx_dropped++;
return -ENOMEM; return -ENOMEM;
} }
shhwtstamps = skb_hwtstamps(skb); kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
shhwtstamps->hwtstamp =
ns_to_ktime(div_u64(p->timestamp * 1000,
can->kv_pcie->freq_to_ticks_div));
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT; cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
cf->data[6] = bec.txerr; cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr; cf->data[7] = bec.rxerr;
netif_rx(skb); netif_rx(skb);
return 0; return 0;
} }
...@@ -1340,19 +1123,19 @@ static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, ...@@ -1340,19 +1123,19 @@ static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
struct kvaser_pciefd_rx_packet *p) struct kvaser_pciefd_rx_packet *p)
{ {
struct kvaser_pciefd_can *can; struct kvaser_pciefd_can *can;
u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
if (ch_id >= pcie->nr_channels) if (ch_id >= pcie->nr_channels)
return -EIO; return -EIO;
can = pcie->can[ch_id]; can = pcie->can[ch_id];
kvaser_pciefd_rx_error_frame(can, p); kvaser_pciefd_rx_error_frame(can, p);
if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
/* Do not report more errors, until bec_poll_timer expires */ /* Do not report more errors, until bec_poll_timer expires */
kvaser_pciefd_disable_err_gen(can); kvaser_pciefd_disable_err_gen(can);
/* Start polling the error counters */ /* Start polling the error counters */
mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
return 0; return 0;
} }
...@@ -1364,29 +1147,22 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, ...@@ -1364,29 +1147,22 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
old_state = can->can.state; old_state = can->can.state;
bec.txerr = p->header[0] & 0xff; bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]);
bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
&rx_state);
kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
if (new_state != old_state) { if (new_state != old_state) {
struct net_device *ndev = can->can.dev; struct net_device *ndev = can->can.dev;
struct sk_buff *skb; struct sk_buff *skb;
struct can_frame *cf; struct can_frame *cf;
struct skb_shared_hwtstamps *shhwtstamps;
skb = alloc_can_err_skb(ndev, &cf); skb = alloc_can_err_skb(ndev, &cf);
if (!skb) { if (!skb) {
struct net_device_stats *stats = &ndev->stats; ndev->stats.rx_dropped++;
stats->rx_dropped++;
return -ENOMEM; return -ENOMEM;
} }
kvaser_pciefd_change_state(can, cf, new_state, tx_state, kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
rx_state);
if (old_state == CAN_STATE_BUS_OFF && if (old_state == CAN_STATE_BUS_OFF &&
new_state == CAN_STATE_ERROR_ACTIVE && new_state == CAN_STATE_ERROR_ACTIVE &&
can->can.restart_ms) { can->can.restart_ms) {
...@@ -1394,10 +1170,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, ...@@ -1394,10 +1170,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
cf->can_id |= CAN_ERR_RESTARTED; cf->can_id |= CAN_ERR_RESTARTED;
} }
shhwtstamps = skb_hwtstamps(skb); kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
shhwtstamps->hwtstamp =
ns_to_ktime(div_u64(p->timestamp * 1000,
can->kv_pcie->freq_to_ticks_div));
cf->data[6] = bec.txerr; cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr; cf->data[7] = bec.rxerr;
...@@ -1419,7 +1192,7 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, ...@@ -1419,7 +1192,7 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
struct kvaser_pciefd_can *can; struct kvaser_pciefd_can *can;
u8 cmdseq; u8 cmdseq;
u32 status; u32 status;
u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
if (ch_id >= pcie->nr_channels) if (ch_id >= pcie->nr_channels)
return -EIO; return -EIO;
...@@ -1427,43 +1200,40 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, ...@@ -1427,43 +1200,40 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
can = pcie->can[ch_id]; can = pcie->can[ch_id];
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff; cmdseq = FIELD_GET(KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK, status);
/* Reset done, start abort and flush */ /* Reset done, start abort and flush */
if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
p->header[0] & KVASER_PCIEFD_SPACK_RMCD && p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
p->header[1] & KVASER_PCIEFD_SPACK_AUTO && p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) &&
status & KVASER_PCIEFD_KCAN_STAT_IDLE) { status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
u32 cmd;
iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
cmd = KVASER_PCIEFD_KCAN_CMD_AT; kvaser_pciefd_abort_flush_reset(can);
cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
} else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
p->header[0] & KVASER_PCIEFD_SPACK_IRM && p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) &&
status & KVASER_PCIEFD_KCAN_STAT_IDLE) { status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
/* Reset detected, send end of flush if no packet are in FIFO */ /* Reset detected, send end of flush if no packet are in FIFO */
u8 count = ioread32(can->reg_base + u8 count;
KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
if (!count) if (!count)
iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK,
KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH),
can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
} else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) { cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1])) {
/* Response to status request received */ /* Response to status request received */
kvaser_pciefd_handle_status_resp(can, p); kvaser_pciefd_handle_status_resp(can, p);
if (can->can.state != CAN_STATE_BUS_OFF && if (can->can.state != CAN_STATE_BUS_OFF &&
can->can.state != CAN_STATE_ERROR_ACTIVE) { can->can.state != CAN_STATE_ERROR_ACTIVE) {
mod_timer(&can->bec_poll_timer, mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
KVASER_PCIEFD_BEC_POLL_FREQ);
} }
} else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
!(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) { !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK)) {
/* Reset to bus on detected */ /* Reset to bus on detected */
if (!completion_done(&can->start_comp)) if (!completion_done(&can->start_comp))
complete(&can->start_comp); complete(&can->start_comp);
...@@ -1472,50 +1242,14 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, ...@@ -1472,50 +1242,14 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
return 0; return 0;
} }
static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
struct kvaser_pciefd_rx_packet *p)
{
struct kvaser_pciefd_can *can;
u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
if (ch_id >= pcie->nr_channels)
return -EIO;
can = pcie->can[ch_id];
/* If this is the last flushed packet, send end of flush */
if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
u8 count = ioread32(can->reg_base +
KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
if (count == 0)
iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
} else {
int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
struct net_device_stats *stats = &can->can.dev->stats;
stats->tx_bytes += dlc;
stats->tx_packets++;
if (netif_queue_stopped(can->can.dev))
netif_wake_queue(can->can.dev);
}
return 0;
}
static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
struct kvaser_pciefd_rx_packet *p) struct kvaser_pciefd_rx_packet *p)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct net_device_stats *stats = &can->can.dev->stats;
struct can_frame *cf; struct can_frame *cf;
skb = alloc_can_err_skb(can->can.dev, &cf); skb = alloc_can_err_skb(can->can.dev, &cf);
can->can.dev->stats.tx_errors++;
stats->tx_errors++;
if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
if (skb) if (skb)
cf->can_id |= CAN_ERR_LOSTARB; cf->can_id |= CAN_ERR_LOSTARB;
...@@ -1526,9 +1260,10 @@ static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, ...@@ -1526,9 +1260,10 @@ static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
if (skb) { if (skb) {
cf->can_id |= CAN_ERR_BUSERROR; cf->can_id |= CAN_ERR_BUSERROR;
kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
netif_rx(skb); netif_rx(skb);
} else { } else {
stats->rx_dropped++; can->can.dev->stats.rx_dropped++;
netdev_warn(can->can.dev, "No memory left for err_skb\n"); netdev_warn(can->can.dev, "No memory left for err_skb\n");
} }
} }
...@@ -1538,7 +1273,7 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, ...@@ -1538,7 +1273,7 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
{ {
struct kvaser_pciefd_can *can; struct kvaser_pciefd_can *can;
bool one_shot_fail = false; bool one_shot_fail = false;
u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
if (ch_id >= pcie->nr_channels) if (ch_id >= pcie->nr_channels)
return -EIO; return -EIO;
...@@ -1556,20 +1291,24 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, ...@@ -1556,20 +1291,24 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
netdev_dbg(can->can.dev, "Packet was flushed\n"); netdev_dbg(can->can.dev, "Packet was flushed\n");
} else { } else {
int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL); int len;
u8 count = ioread32(can->reg_base + u8 count;
KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; struct sk_buff *skb;
if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT && skb = can->can.echo_skb[echo_idx];
netif_queue_stopped(can->can.dev)) if (skb)
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
len = can_get_echo_skb(can->can.dev, echo_idx, NULL);
count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev))
netif_wake_queue(can->can.dev); netif_wake_queue(can->can.dev);
if (!one_shot_fail) { if (!one_shot_fail) {
struct net_device_stats *stats = &can->can.dev->stats; can->can.dev->stats.tx_bytes += len;
can->can.dev->stats.tx_packets++;
stats->tx_bytes += dlc;
stats->tx_packets++;
} }
} }
...@@ -1580,7 +1319,7 @@ static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, ...@@ -1580,7 +1319,7 @@ static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
struct kvaser_pciefd_rx_packet *p) struct kvaser_pciefd_rx_packet *p)
{ {
struct kvaser_pciefd_can *can; struct kvaser_pciefd_can *can;
u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
if (ch_id >= pcie->nr_channels) if (ch_id >= pcie->nr_channels)
return -EIO; return -EIO;
...@@ -1619,15 +1358,15 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, ...@@ -1619,15 +1358,15 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
pos += 2; pos += 2;
p->timestamp = le64_to_cpu(timestamp); p->timestamp = le64_to_cpu(timestamp);
type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf; type = FIELD_GET(KVASER_PCIEFD_PACKET_TYPE_MASK, p->header[1]);
switch (type) { switch (type) {
case KVASER_PCIEFD_PACK_TYPE_DATA: case KVASER_PCIEFD_PACK_TYPE_DATA:
ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
u8 data_len; u8 data_len;
data_len = can_fd_dlc2len(p->header[1] >> data_len = can_fd_dlc2len(FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK,
KVASER_PCIEFD_RPACKET_DLC_SHIFT); p->header[1]));
pos += DIV_ROUND_UP(data_len, 4); pos += DIV_ROUND_UP(data_len, 4);
} }
break; break;
...@@ -1644,16 +1383,13 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, ...@@ -1644,16 +1383,13 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
ret = kvaser_pciefd_handle_error_packet(pcie, p); ret = kvaser_pciefd_handle_error_packet(pcie, p);
break; break;
case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
ret = kvaser_pciefd_handle_eack_packet(pcie, p);
break;
case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
ret = kvaser_pciefd_handle_eflush_packet(pcie, p); ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
break; break;
case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
case KVASER_PCIEFD_PACK_TYPE_TXRQ: case KVASER_PCIEFD_PACK_TYPE_TXRQ:
dev_info(&pcie->pci->dev, dev_info(&pcie->pci->dev,
"Received unexpected packet type 0x%08X\n", type); "Received unexpected packet type 0x%08X\n", type);
...@@ -1692,7 +1428,7 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) ...@@ -1692,7 +1428,7 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
return res; return res;
} }
static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{ {
u32 irq; u32 irq;
...@@ -1718,10 +1454,9 @@ static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) ...@@ -1718,10 +1454,9 @@ static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
return 0;
} }
static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
{ {
u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
...@@ -1739,7 +1474,6 @@ static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) ...@@ -1739,7 +1474,6 @@ static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
netdev_err(can->can.dev, "Rx FIFO overflow\n"); netdev_err(can->can.dev, "Rx FIFO overflow\n");
iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
return 0;
} }
static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
...@@ -1750,7 +1484,7 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) ...@@ -1750,7 +1484,7 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG); board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK)) if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MASK))
return IRQ_NONE; return IRQ_NONE;
if (board_irq & KVASER_PCIEFD_IRQ_SRB) if (board_irq & KVASER_PCIEFD_IRQ_SRB)
...@@ -1768,20 +1502,18 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) ...@@ -1768,20 +1502,18 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
kvaser_pciefd_transmit_irq(pcie->can[i]); kvaser_pciefd_transmit_irq(pcie->can[i]);
} }
iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
{ {
int i; int i;
struct kvaser_pciefd_can *can;
for (i = 0; i < pcie->nr_channels; i++) { for (i = 0; i < pcie->nr_channels; i++) {
can = pcie->can[i]; struct kvaser_pciefd_can *can = pcie->can[i];
if (can) { if (can) {
iowrite32(0, iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
kvaser_pciefd_pwm_stop(can); kvaser_pciefd_pwm_stop(can);
free_candev(can->can.dev); free_candev(can->can.dev);
} }
...@@ -1842,10 +1574,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, ...@@ -1842,10 +1574,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG); pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
/* Reset IRQ handling, expected to be off before */ /* Enable PCI interrupts */
iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, iowrite32(KVASER_PCIEFD_IRQ_ALL_MASK,
pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
pcie->reg_base + KVASER_PCIEFD_IEN_REG); pcie->reg_base + KVASER_PCIEFD_IEN_REG);
/* Ready the DMA buffers */ /* Ready the DMA buffers */
...@@ -1884,14 +1614,13 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, ...@@ -1884,14 +1614,13 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
{ {
struct kvaser_pciefd_can *can;
int i; int i;
for (i = 0; i < pcie->nr_channels; i++) { for (i = 0; i < pcie->nr_channels; i++) {
can = pcie->can[i]; struct kvaser_pciefd_can *can = pcie->can[i];
if (can) { if (can) {
iowrite32(0, iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
unregister_candev(can->can.dev); unregister_candev(can->can.dev);
del_timer(&can->bec_poll_timer); del_timer(&can->bec_poll_timer);
kvaser_pciefd_pwm_stop(can); kvaser_pciefd_pwm_stop(can);
...@@ -1906,10 +1635,8 @@ static void kvaser_pciefd_remove(struct pci_dev *pdev) ...@@ -1906,10 +1635,8 @@ static void kvaser_pciefd_remove(struct pci_dev *pdev)
kvaser_pciefd_remove_all_ctrls(pcie); kvaser_pciefd_remove_all_ctrls(pcie);
/* Turn off IRQ generation */ /* Disable interrupts */
iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
free_irq(pcie->pci->irq, pcie); free_irq(pcie->pci->irq, pcie);
......
...@@ -469,7 +469,7 @@ static void m_can_receive_skb(struct m_can_classdev *cdev, ...@@ -469,7 +469,7 @@ static void m_can_receive_skb(struct m_can_classdev *cdev,
int err; int err;
err = can_rx_offload_queue_timestamp(&cdev->offload, skb, err = can_rx_offload_queue_timestamp(&cdev->offload, skb,
timestamp); timestamp);
if (err) if (err)
stats->rx_fifo_errors++; stats->rx_fifo_errors++;
} else { } else {
...@@ -895,7 +895,7 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, ...@@ -895,7 +895,7 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
netdev_dbg(dev, "Arbitration phase error detected\n"); netdev_dbg(dev, "Arbitration phase error detected\n");
work_done += m_can_handle_lec_err(dev, lec); work_done += m_can_handle_lec_err(dev, lec);
} }
if (is_lec_err(dlec)) { if (is_lec_err(dlec)) {
netdev_dbg(dev, "Data phase error detected\n"); netdev_dbg(dev, "Data phase error detected\n");
work_done += m_can_handle_lec_err(dev, dlec); work_done += m_can_handle_lec_err(dev, dlec);
......
...@@ -387,6 +387,16 @@ static void sja1000_rx(struct net_device *dev) ...@@ -387,6 +387,16 @@ static void sja1000_rx(struct net_device *dev)
netif_rx(skb); netif_rx(skb);
} }
static irqreturn_t sja1000_reset_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
netdev_dbg(dev, "performing a soft reset upon overrun\n");
sja1000_start(dev);
return IRQ_HANDLED;
}
static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
{ {
struct sja1000_priv *priv = netdev_priv(dev); struct sja1000_priv *priv = netdev_priv(dev);
...@@ -397,6 +407,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) ...@@ -397,6 +407,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
enum can_state rx_state, tx_state; enum can_state rx_state, tx_state;
unsigned int rxerr, txerr; unsigned int rxerr, txerr;
uint8_t ecc, alc; uint8_t ecc, alc;
int ret = 0;
skb = alloc_can_err_skb(dev, &cf); skb = alloc_can_err_skb(dev, &cf);
if (skb == NULL) if (skb == NULL)
...@@ -413,6 +424,15 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) ...@@ -413,6 +424,15 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
stats->rx_over_errors++; stats->rx_over_errors++;
stats->rx_errors++; stats->rx_errors++;
sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */ sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
/* Some controllers needs additional handling upon overrun
* condition: the controller may sometimes be totally confused
* and refuse any new frame while its buffer is empty. The only
* way to re-sync the read vs. write buffer offsets is to
* stop any current handling and perform a reset.
*/
if (priv->flags & SJA1000_QUIRK_RESET_ON_OVERRUN)
ret = IRQ_WAKE_THREAD;
} }
if (isrc & IRQ_EI) { if (isrc & IRQ_EI) {
...@@ -492,7 +512,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) ...@@ -492,7 +512,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
netif_rx(skb); netif_rx(skb);
return 0; return ret;
} }
irqreturn_t sja1000_interrupt(int irq, void *dev_id) irqreturn_t sja1000_interrupt(int irq, void *dev_id)
...@@ -501,7 +521,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) ...@@ -501,7 +521,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
struct sja1000_priv *priv = netdev_priv(dev); struct sja1000_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats; struct net_device_stats *stats = &dev->stats;
uint8_t isrc, status; uint8_t isrc, status;
int n = 0; irqreturn_t ret = 0;
int n = 0, err;
if (priv->pre_irq) if (priv->pre_irq)
priv->pre_irq(priv); priv->pre_irq(priv);
...@@ -546,19 +567,25 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) ...@@ -546,19 +567,25 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
} }
if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
/* error interrupt */ /* error interrupt */
if (sja1000_err(dev, isrc, status)) err = sja1000_err(dev, isrc, status);
if (err == IRQ_WAKE_THREAD)
ret = err;
if (err)
break; break;
} }
n++; n++;
} }
out: out:
if (!ret)
ret = (n) ? IRQ_HANDLED : IRQ_NONE;
if (priv->post_irq) if (priv->post_irq)
priv->post_irq(priv); priv->post_irq(priv);
if (n >= SJA1000_MAX_IRQ) if (n >= SJA1000_MAX_IRQ)
netdev_dbg(dev, "%d messages handled in ISR", n); netdev_dbg(dev, "%d messages handled in ISR", n);
return (n) ? IRQ_HANDLED : IRQ_NONE; return ret;
} }
EXPORT_SYMBOL_GPL(sja1000_interrupt); EXPORT_SYMBOL_GPL(sja1000_interrupt);
...@@ -577,8 +604,9 @@ static int sja1000_open(struct net_device *dev) ...@@ -577,8 +604,9 @@ static int sja1000_open(struct net_device *dev)
/* register interrupt handler, if not done by the device driver */ /* register interrupt handler, if not done by the device driver */
if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) { if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) {
err = request_irq(dev->irq, sja1000_interrupt, priv->irq_flags, err = request_threaded_irq(dev->irq, sja1000_interrupt,
dev->name, (void *)dev); sja1000_reset_interrupt,
priv->irq_flags, dev->name, (void *)dev);
if (err) { if (err) {
close_candev(dev); close_candev(dev);
return -EAGAIN; return -EAGAIN;
......
...@@ -147,6 +147,7 @@ ...@@ -147,6 +147,7 @@
*/ */
#define SJA1000_CUSTOM_IRQ_HANDLER BIT(0) #define SJA1000_CUSTOM_IRQ_HANDLER BIT(0)
#define SJA1000_QUIRK_NO_CDR_REG BIT(1) #define SJA1000_QUIRK_NO_CDR_REG BIT(1)
#define SJA1000_QUIRK_RESET_ON_OVERRUN BIT(2)
/* /*
* SJA1000 private data structure * SJA1000 private data structure
......
...@@ -106,7 +106,7 @@ static void sp_technologic_init(struct sja1000_priv *priv, struct device_node *o ...@@ -106,7 +106,7 @@ static void sp_technologic_init(struct sja1000_priv *priv, struct device_node *o
static void sp_rzn1_init(struct sja1000_priv *priv, struct device_node *of) static void sp_rzn1_init(struct sja1000_priv *priv, struct device_node *of)
{ {
priv->flags = SJA1000_QUIRK_NO_CDR_REG; priv->flags = SJA1000_QUIRK_NO_CDR_REG | SJA1000_QUIRK_RESET_ON_OVERRUN;
} }
static void sp_populate(struct sja1000_priv *priv, static void sp_populate(struct sja1000_priv *priv,
...@@ -277,6 +277,9 @@ static int sp_probe(struct platform_device *pdev) ...@@ -277,6 +277,9 @@ static int sp_probe(struct platform_device *pdev)
priv->irq_flags = IRQF_SHARED; priv->irq_flags = IRQF_SHARED;
} }
if (priv->flags & SJA1000_QUIRK_RESET_ON_OVERRUN)
priv->irq_flags |= IRQF_ONESHOT;
dev->irq = irq; dev->irq = irq;
priv->reg_base = addr; priv->reg_base = addr;
......
...@@ -625,7 +625,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, ...@@ -625,7 +625,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
timestamp = hecc_read(priv, HECC_CANLNT); timestamp = hecc_read(priv, HECC_CANLNT);
err = can_rx_offload_queue_timestamp(&priv->offload, skb, err = can_rx_offload_queue_timestamp(&priv->offload, skb,
timestamp); timestamp);
if (err) if (err)
ndev->stats.rx_fifo_errors++; ndev->stats.rx_fifo_errors++;
} }
......
...@@ -3,18 +3,19 @@ ...@@ -3,18 +3,19 @@
* CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Micro * CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Micro
* *
* Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu> * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu>
* Copyright (C) 2022 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu> * Copyright (C) 2022-2023 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
*/ */
#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
#include <linux/can.h> #include <linux/can.h>
#include <linux/can/dev.h> #include <linux/can/dev.h>
#include <linux/can/error.h> #include <linux/can/error.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/units.h>
#include <linux/usb.h>
MODULE_AUTHOR("Matthias Fuchs <socketcan@esd.eu>"); MODULE_AUTHOR("Matthias Fuchs <socketcan@esd.eu>");
MODULE_AUTHOR("Frank Jungclaus <frank.jungclaus@esd.eu>"); MODULE_AUTHOR("Frank Jungclaus <frank.jungclaus@esd.eu>");
...@@ -22,95 +23,87 @@ MODULE_DESCRIPTION("CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Mi ...@@ -22,95 +23,87 @@ MODULE_DESCRIPTION("CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Mi
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
/* USB vendor and product ID */ /* USB vendor and product ID */
#define USB_ESDGMBH_VENDOR_ID 0x0ab4 #define ESD_USB_ESDGMBH_VENDOR_ID 0x0ab4
#define USB_CANUSB2_PRODUCT_ID 0x0010 #define ESD_USB_CANUSB2_PRODUCT_ID 0x0010
#define USB_CANUSBM_PRODUCT_ID 0x0011 #define ESD_USB_CANUSBM_PRODUCT_ID 0x0011
/* CAN controller clock frequencies */ /* CAN controller clock frequencies */
#define ESD_USB2_CAN_CLOCK 60000000 #define ESD_USB_2_CAN_CLOCK (60 * MEGA) /* Hz */
#define ESD_USBM_CAN_CLOCK 36000000 #define ESD_USB_M_CAN_CLOCK (36 * MEGA) /* Hz */
/* Maximum number of CAN nets */ /* Maximum number of CAN nets */
#define ESD_USB_MAX_NETS 2 #define ESD_USB_MAX_NETS 2
/* USB commands */ /* USB commands */
#define CMD_VERSION 1 /* also used for VERSION_REPLY */ #define ESD_USB_CMD_VERSION 1 /* also used for VERSION_REPLY */
#define CMD_CAN_RX 2 /* device to host only */ #define ESD_USB_CMD_CAN_RX 2 /* device to host only */
#define CMD_CAN_TX 3 /* also used for TX_DONE */ #define ESD_USB_CMD_CAN_TX 3 /* also used for TX_DONE */
#define CMD_SETBAUD 4 /* also used for SETBAUD_REPLY */ #define ESD_USB_CMD_SETBAUD 4 /* also used for SETBAUD_REPLY */
#define CMD_TS 5 /* also used for TS_REPLY */ #define ESD_USB_CMD_TS 5 /* also used for TS_REPLY */
#define CMD_IDADD 6 /* also used for IDADD_REPLY */ #define ESD_USB_CMD_IDADD 6 /* also used for IDADD_REPLY */
/* esd CAN message flags - dlc field */ /* esd CAN message flags - dlc field */
#define ESD_RTR 0x10 #define ESD_USB_RTR BIT(4)
/* esd CAN message flags - id field */ /* esd CAN message flags - id field */
#define ESD_EXTID 0x20000000 #define ESD_USB_EXTID BIT(29)
#define ESD_EVENT 0x40000000 #define ESD_USB_EVENT BIT(30)
#define ESD_IDMASK 0x1fffffff #define ESD_USB_IDMASK GENMASK(28, 0)
/* esd CAN event ids */ /* esd CAN event ids */
#define ESD_EV_CAN_ERROR_EXT 2 /* CAN controller specific diagnostic data */ #define ESD_USB_EV_CAN_ERROR_EXT 2 /* CAN controller specific diagnostic data */
/* baudrate message flags */ /* baudrate message flags */
#define ESD_USB_UBR 0x80000000 #define ESD_USB_LOM BIT(30) /* Listen Only Mode */
#define ESD_USB_LOM 0x40000000 #define ESD_USB_UBR BIT(31) /* User Bit Rate (controller BTR) in bits 0..27 */
#define ESD_USB_NO_BAUDRATE 0x7fffffff #define ESD_USB_NO_BAUDRATE GENMASK(30, 0) /* bit rate unconfigured */
/* bit timing CAN-USB/2 */ /* bit timing esd CAN-USB */
#define ESD_USB2_TSEG1_MIN 1 #define ESD_USB_2_TSEG1_SHIFT 16
#define ESD_USB2_TSEG1_MAX 16 #define ESD_USB_2_TSEG2_SHIFT 20
#define ESD_USB2_TSEG1_SHIFT 16 #define ESD_USB_2_SJW_SHIFT 14
#define ESD_USB2_TSEG2_MIN 1 #define ESD_USB_M_SJW_SHIFT 24
#define ESD_USB2_TSEG2_MAX 8 #define ESD_USB_TRIPLE_SAMPLES BIT(23)
#define ESD_USB2_TSEG2_SHIFT 20
#define ESD_USB2_SJW_MAX 4
#define ESD_USB2_SJW_SHIFT 14
#define ESD_USBM_SJW_SHIFT 24
#define ESD_USB2_BRP_MIN 1
#define ESD_USB2_BRP_MAX 1024
#define ESD_USB2_BRP_INC 1
#define ESD_USB2_3_SAMPLES 0x00800000
/* esd IDADD message */ /* esd IDADD message */
#define ESD_ID_ENABLE 0x80 #define ESD_USB_ID_ENABLE BIT(7)
#define ESD_MAX_ID_SEGMENT 64 #define ESD_USB_MAX_ID_SEGMENT 64
/* SJA1000 ECC register (emulated by usb firmware) */ /* SJA1000 ECC register (emulated by usb firmware) */
#define SJA1000_ECC_SEG 0x1F #define ESD_USB_SJA1000_ECC_SEG GENMASK(4, 0)
#define SJA1000_ECC_DIR 0x20 #define ESD_USB_SJA1000_ECC_DIR BIT(5)
#define SJA1000_ECC_ERR 0x06 #define ESD_USB_SJA1000_ECC_ERR BIT(2, 1)
#define SJA1000_ECC_BIT 0x00 #define ESD_USB_SJA1000_ECC_BIT 0x00
#define SJA1000_ECC_FORM 0x40 #define ESD_USB_SJA1000_ECC_FORM BIT(6)
#define SJA1000_ECC_STUFF 0x80 #define ESD_USB_SJA1000_ECC_STUFF BIT(7)
#define SJA1000_ECC_MASK 0xc0 #define ESD_USB_SJA1000_ECC_MASK GENMASK(7, 6)
/* esd bus state event codes */ /* esd bus state event codes */
#define ESD_BUSSTATE_MASK 0xc0 #define ESD_USB_BUSSTATE_MASK GENMASK(7, 6)
#define ESD_BUSSTATE_WARN 0x40 #define ESD_USB_BUSSTATE_WARN BIT(6)
#define ESD_BUSSTATE_ERRPASSIVE 0x80 #define ESD_USB_BUSSTATE_ERRPASSIVE BIT(7)
#define ESD_BUSSTATE_BUSOFF 0xc0 #define ESD_USB_BUSSTATE_BUSOFF GENMASK(7, 6)
#define RX_BUFFER_SIZE 1024 #define ESD_USB_RX_BUFFER_SIZE 1024
#define MAX_RX_URBS 4 #define ESD_USB_MAX_RX_URBS 4
#define MAX_TX_URBS 16 /* must be power of 2 */ #define ESD_USB_MAX_TX_URBS 16 /* must be power of 2 */
struct header_msg { struct esd_usb_header_msg {
u8 len; /* len is always the total message length in 32bit words */ u8 len; /* total message length in 32bit words */
u8 cmd; u8 cmd;
u8 rsvd[2]; u8 rsvd[2];
}; };
struct version_msg { struct esd_usb_version_msg {
u8 len; u8 len; /* total message length in 32bit words */
u8 cmd; u8 cmd;
u8 rsvd; u8 rsvd;
u8 flags; u8 flags;
__le32 drv_version; __le32 drv_version;
}; };
struct version_reply_msg { struct esd_usb_version_reply_msg {
u8 len; u8 len; /* total message length in 32bit words */
u8 cmd; u8 cmd;
u8 nets; u8 nets;
u8 features; u8 features;
...@@ -120,15 +113,15 @@ struct version_reply_msg { ...@@ -120,15 +113,15 @@ struct version_reply_msg {
__le32 ts; __le32 ts;
}; };
struct rx_msg { struct esd_usb_rx_msg {
u8 len; u8 len; /* total message length in 32bit words */
u8 cmd; u8 cmd;
u8 net; u8 net;
u8 dlc; u8 dlc;
__le32 ts; __le32 ts;
__le32 id; /* upper 3 bits contain flags */ __le32 id; /* upper 3 bits contain flags */
union { union {
u8 data[8]; u8 data[CAN_MAX_DLEN];
struct { struct {
u8 status; /* CAN Controller Status */ u8 status; /* CAN Controller Status */
u8 ecc; /* Error Capture Register */ u8 ecc; /* Error Capture Register */
...@@ -138,18 +131,18 @@ struct rx_msg { ...@@ -138,18 +131,18 @@ struct rx_msg {
}; };
}; };
struct tx_msg { struct esd_usb_tx_msg {
u8 len; u8 len; /* total message length in 32bit words */
u8 cmd; u8 cmd;
u8 net; u8 net;
u8 dlc; u8 dlc;
u32 hnd; /* opaque handle, not used by device */ u32 hnd; /* opaque handle, not used by device */
__le32 id; /* upper 3 bits contain flags */ __le32 id; /* upper 3 bits contain flags */
u8 data[8]; u8 data[CAN_MAX_DLEN];
}; };
struct tx_done_msg { struct esd_usb_tx_done_msg {
u8 len; u8 len; /* total message length in 32bit words */
u8 cmd; u8 cmd;
u8 net; u8 net;
u8 status; u8 status;
...@@ -157,16 +150,16 @@ struct tx_done_msg { ...@@ -157,16 +150,16 @@ struct tx_done_msg {
__le32 ts; __le32 ts;
}; };
struct id_filter_msg { struct esd_usb_id_filter_msg {
u8 len; u8 len; /* total message length in 32bit words */
u8 cmd; u8 cmd;
u8 net; u8 net;
u8 option; u8 option;
__le32 mask[ESD_MAX_ID_SEGMENT + 1]; __le32 mask[ESD_USB_MAX_ID_SEGMENT + 1]; /* +1 for 29bit extended IDs */
}; };
struct set_baudrate_msg { struct esd_usb_set_baudrate_msg {
u8 len; u8 len; /* total message length in 32bit words */
u8 cmd; u8 cmd;
u8 net; u8 net;
u8 rsvd; u8 rsvd;
...@@ -175,19 +168,19 @@ struct set_baudrate_msg { ...@@ -175,19 +168,19 @@ struct set_baudrate_msg {
/* Main message type used between library and application */ /* Main message type used between library and application */
union __packed esd_usb_msg { union __packed esd_usb_msg {
struct header_msg hdr; struct esd_usb_header_msg hdr;
struct version_msg version; struct esd_usb_version_msg version;
struct version_reply_msg version_reply; struct esd_usb_version_reply_msg version_reply;
struct rx_msg rx; struct esd_usb_rx_msg rx;
struct tx_msg tx; struct esd_usb_tx_msg tx;
struct tx_done_msg txdone; struct esd_usb_tx_done_msg txdone;
struct set_baudrate_msg setbaud; struct esd_usb_set_baudrate_msg setbaud;
struct id_filter_msg filter; struct esd_usb_id_filter_msg filter;
}; };
static struct usb_device_id esd_usb_table[] = { static struct usb_device_id esd_usb_table[] = {
{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)}, {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSB2_PRODUCT_ID)},
{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSBM_PRODUCT_ID)}, {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSBM_PRODUCT_ID)},
{} {}
}; };
MODULE_DEVICE_TABLE(usb, esd_usb_table); MODULE_DEVICE_TABLE(usb, esd_usb_table);
...@@ -208,8 +201,8 @@ struct esd_usb { ...@@ -208,8 +201,8 @@ struct esd_usb {
int net_count; int net_count;
u32 version; u32 version;
int rxinitdone; int rxinitdone;
void *rxbuf[MAX_RX_URBS]; void *rxbuf[ESD_USB_MAX_RX_URBS];
dma_addr_t rxbuf_dma[MAX_RX_URBS]; dma_addr_t rxbuf_dma[ESD_USB_MAX_RX_URBS];
}; };
struct esd_usb_net_priv { struct esd_usb_net_priv {
...@@ -217,7 +210,7 @@ struct esd_usb_net_priv { ...@@ -217,7 +210,7 @@ struct esd_usb_net_priv {
atomic_t active_tx_jobs; atomic_t active_tx_jobs;
struct usb_anchor tx_submitted; struct usb_anchor tx_submitted;
struct esd_tx_urb_context tx_contexts[MAX_TX_URBS]; struct esd_tx_urb_context tx_contexts[ESD_USB_MAX_TX_URBS];
struct esd_usb *usb; struct esd_usb *usb;
struct net_device *netdev; struct net_device *netdev;
...@@ -232,9 +225,9 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv, ...@@ -232,9 +225,9 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
struct net_device_stats *stats = &priv->netdev->stats; struct net_device_stats *stats = &priv->netdev->stats;
struct can_frame *cf; struct can_frame *cf;
struct sk_buff *skb; struct sk_buff *skb;
u32 id = le32_to_cpu(msg->rx.id) & ESD_IDMASK; u32 id = le32_to_cpu(msg->rx.id) & ESD_USB_IDMASK;
if (id == ESD_EV_CAN_ERROR_EXT) { if (id == ESD_USB_EV_CAN_ERROR_EXT) {
u8 state = msg->rx.ev_can_err_ext.status; u8 state = msg->rx.ev_can_err_ext.status;
u8 ecc = msg->rx.ev_can_err_ext.ecc; u8 ecc = msg->rx.ev_can_err_ext.ecc;
...@@ -261,15 +254,15 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv, ...@@ -261,15 +254,15 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
priv->old_state = state; priv->old_state = state;
switch (state & ESD_BUSSTATE_MASK) { switch (state & ESD_USB_BUSSTATE_MASK) {
case ESD_BUSSTATE_BUSOFF: case ESD_USB_BUSSTATE_BUSOFF:
new_state = CAN_STATE_BUS_OFF; new_state = CAN_STATE_BUS_OFF;
can_bus_off(priv->netdev); can_bus_off(priv->netdev);
break; break;
case ESD_BUSSTATE_WARN: case ESD_USB_BUSSTATE_WARN:
new_state = CAN_STATE_ERROR_WARNING; new_state = CAN_STATE_ERROR_WARNING;
break; break;
case ESD_BUSSTATE_ERRPASSIVE: case ESD_USB_BUSSTATE_ERRPASSIVE:
new_state = CAN_STATE_ERROR_PASSIVE; new_state = CAN_STATE_ERROR_PASSIVE;
break; break;
default: default:
...@@ -291,14 +284,14 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv, ...@@ -291,14 +284,14 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (ecc & SJA1000_ECC_MASK) { switch (ecc & ESD_USB_SJA1000_ECC_MASK) {
case SJA1000_ECC_BIT: case ESD_USB_SJA1000_ECC_BIT:
cf->data[2] |= CAN_ERR_PROT_BIT; cf->data[2] |= CAN_ERR_PROT_BIT;
break; break;
case SJA1000_ECC_FORM: case ESD_USB_SJA1000_ECC_FORM:
cf->data[2] |= CAN_ERR_PROT_FORM; cf->data[2] |= CAN_ERR_PROT_FORM;
break; break;
case SJA1000_ECC_STUFF: case ESD_USB_SJA1000_ECC_STUFF:
cf->data[2] |= CAN_ERR_PROT_STUFF; cf->data[2] |= CAN_ERR_PROT_STUFF;
break; break;
default: default:
...@@ -306,11 +299,11 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv, ...@@ -306,11 +299,11 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
} }
/* Error occurred during transmission? */ /* Error occurred during transmission? */
if (!(ecc & SJA1000_ECC_DIR)) if (!(ecc & ESD_USB_SJA1000_ECC_DIR))
cf->data[2] |= CAN_ERR_PROT_TX; cf->data[2] |= CAN_ERR_PROT_TX;
/* Bit stream position in CAN frame as the error was detected */ /* Bit stream position in CAN frame as the error was detected */
cf->data[3] = ecc & SJA1000_ECC_SEG; cf->data[3] = ecc & ESD_USB_SJA1000_ECC_SEG;
} }
if (skb) { if (skb) {
...@@ -337,7 +330,7 @@ static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv, ...@@ -337,7 +330,7 @@ static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv,
id = le32_to_cpu(msg->rx.id); id = le32_to_cpu(msg->rx.id);
if (id & ESD_EVENT) { if (id & ESD_USB_EVENT) {
esd_usb_rx_event(priv, msg); esd_usb_rx_event(priv, msg);
} else { } else {
skb = alloc_can_skb(priv->netdev, &cf); skb = alloc_can_skb(priv->netdev, &cf);
...@@ -346,14 +339,14 @@ static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv, ...@@ -346,14 +339,14 @@ static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv,
return; return;
} }
cf->can_id = id & ESD_IDMASK; cf->can_id = id & ESD_USB_IDMASK;
can_frame_set_cc_len(cf, msg->rx.dlc & ~ESD_RTR, can_frame_set_cc_len(cf, msg->rx.dlc & ~ESD_USB_RTR,
priv->can.ctrlmode); priv->can.ctrlmode);
if (id & ESD_EXTID) if (id & ESD_USB_EXTID)
cf->can_id |= CAN_EFF_FLAG; cf->can_id |= CAN_EFF_FLAG;
if (msg->rx.dlc & ESD_RTR) { if (msg->rx.dlc & ESD_USB_RTR) {
cf->can_id |= CAN_RTR_FLAG; cf->can_id |= CAN_RTR_FLAG;
} else { } else {
for (i = 0; i < cf->len; i++) for (i = 0; i < cf->len; i++)
...@@ -377,7 +370,7 @@ static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv, ...@@ -377,7 +370,7 @@ static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv,
if (!netif_device_present(netdev)) if (!netif_device_present(netdev))
return; return;
context = &priv->tx_contexts[msg->txdone.hnd & (MAX_TX_URBS - 1)]; context = &priv->tx_contexts[msg->txdone.hnd & (ESD_USB_MAX_TX_URBS - 1)];
if (!msg->txdone.status) { if (!msg->txdone.status) {
stats->tx_packets++; stats->tx_packets++;
...@@ -389,7 +382,7 @@ static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv, ...@@ -389,7 +382,7 @@ static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv,
} }
/* Release context */ /* Release context */
context->echo_index = MAX_TX_URBS; context->echo_index = ESD_USB_MAX_TX_URBS;
atomic_dec(&priv->active_tx_jobs); atomic_dec(&priv->active_tx_jobs);
netif_wake_queue(netdev); netif_wake_queue(netdev);
...@@ -424,7 +417,7 @@ static void esd_usb_read_bulk_callback(struct urb *urb) ...@@ -424,7 +417,7 @@ static void esd_usb_read_bulk_callback(struct urb *urb)
msg = (union esd_usb_msg *)(urb->transfer_buffer + pos); msg = (union esd_usb_msg *)(urb->transfer_buffer + pos);
switch (msg->hdr.cmd) { switch (msg->hdr.cmd) {
case CMD_CAN_RX: case ESD_USB_CMD_CAN_RX:
if (msg->rx.net >= dev->net_count) { if (msg->rx.net >= dev->net_count) {
dev_err(dev->udev->dev.parent, "format error\n"); dev_err(dev->udev->dev.parent, "format error\n");
break; break;
...@@ -433,7 +426,7 @@ static void esd_usb_read_bulk_callback(struct urb *urb) ...@@ -433,7 +426,7 @@ static void esd_usb_read_bulk_callback(struct urb *urb)
esd_usb_rx_can_msg(dev->nets[msg->rx.net], msg); esd_usb_rx_can_msg(dev->nets[msg->rx.net], msg);
break; break;
case CMD_CAN_TX: case ESD_USB_CMD_CAN_TX:
if (msg->txdone.net >= dev->net_count) { if (msg->txdone.net >= dev->net_count) {
dev_err(dev->udev->dev.parent, "format error\n"); dev_err(dev->udev->dev.parent, "format error\n");
break; break;
...@@ -444,7 +437,7 @@ static void esd_usb_read_bulk_callback(struct urb *urb) ...@@ -444,7 +437,7 @@ static void esd_usb_read_bulk_callback(struct urb *urb)
break; break;
} }
pos += msg->hdr.len << 2; pos += msg->hdr.len * sizeof(u32); /* convert to # of bytes */
if (pos > urb->actual_length) { if (pos > urb->actual_length) {
dev_err(dev->udev->dev.parent, "format error\n"); dev_err(dev->udev->dev.parent, "format error\n");
...@@ -454,7 +447,7 @@ static void esd_usb_read_bulk_callback(struct urb *urb) ...@@ -454,7 +447,7 @@ static void esd_usb_read_bulk_callback(struct urb *urb)
resubmit_urb: resubmit_urb:
usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
urb->transfer_buffer, RX_BUFFER_SIZE, urb->transfer_buffer, ESD_USB_RX_BUFFER_SIZE,
esd_usb_read_bulk_callback, dev); esd_usb_read_bulk_callback, dev);
retval = usb_submit_urb(urb, GFP_ATOMIC); retval = usb_submit_urb(urb, GFP_ATOMIC);
...@@ -538,7 +531,7 @@ static int esd_usb_send_msg(struct esd_usb *dev, union esd_usb_msg *msg) ...@@ -538,7 +531,7 @@ static int esd_usb_send_msg(struct esd_usb *dev, union esd_usb_msg *msg)
return usb_bulk_msg(dev->udev, return usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev, 2), usb_sndbulkpipe(dev->udev, 2),
msg, msg,
msg->hdr.len << 2, msg->hdr.len * sizeof(u32), /* convert to # of bytes */
&actual_length, &actual_length,
1000); 1000);
} }
...@@ -563,7 +556,7 @@ static int esd_usb_setup_rx_urbs(struct esd_usb *dev) ...@@ -563,7 +556,7 @@ static int esd_usb_setup_rx_urbs(struct esd_usb *dev)
if (dev->rxinitdone) if (dev->rxinitdone)
return 0; return 0;
for (i = 0; i < MAX_RX_URBS; i++) { for (i = 0; i < ESD_USB_MAX_RX_URBS; i++) {
struct urb *urb = NULL; struct urb *urb = NULL;
u8 *buf = NULL; u8 *buf = NULL;
dma_addr_t buf_dma; dma_addr_t buf_dma;
...@@ -575,7 +568,7 @@ static int esd_usb_setup_rx_urbs(struct esd_usb *dev) ...@@ -575,7 +568,7 @@ static int esd_usb_setup_rx_urbs(struct esd_usb *dev)
break; break;
} }
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, buf = usb_alloc_coherent(dev->udev, ESD_USB_RX_BUFFER_SIZE, GFP_KERNEL,
&buf_dma); &buf_dma);
if (!buf) { if (!buf) {
dev_warn(dev->udev->dev.parent, dev_warn(dev->udev->dev.parent,
...@@ -588,7 +581,7 @@ static int esd_usb_setup_rx_urbs(struct esd_usb *dev) ...@@ -588,7 +581,7 @@ static int esd_usb_setup_rx_urbs(struct esd_usb *dev)
usb_fill_bulk_urb(urb, dev->udev, usb_fill_bulk_urb(urb, dev->udev,
usb_rcvbulkpipe(dev->udev, 1), usb_rcvbulkpipe(dev->udev, 1),
buf, RX_BUFFER_SIZE, buf, ESD_USB_RX_BUFFER_SIZE,
esd_usb_read_bulk_callback, dev); esd_usb_read_bulk_callback, dev);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->rx_submitted); usb_anchor_urb(urb, &dev->rx_submitted);
...@@ -596,7 +589,7 @@ static int esd_usb_setup_rx_urbs(struct esd_usb *dev) ...@@ -596,7 +589,7 @@ static int esd_usb_setup_rx_urbs(struct esd_usb *dev)
err = usb_submit_urb(urb, GFP_KERNEL); err = usb_submit_urb(urb, GFP_KERNEL);
if (err) { if (err) {
usb_unanchor_urb(urb); usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, usb_free_coherent(dev->udev, ESD_USB_RX_BUFFER_SIZE, buf,
urb->transfer_dma); urb->transfer_dma);
goto freeurb; goto freeurb;
} }
...@@ -618,7 +611,7 @@ static int esd_usb_setup_rx_urbs(struct esd_usb *dev) ...@@ -618,7 +611,7 @@ static int esd_usb_setup_rx_urbs(struct esd_usb *dev)
} }
/* Warn if we've couldn't transmit all the URBs */ /* Warn if we've couldn't transmit all the URBs */
if (i < MAX_RX_URBS) { if (i < ESD_USB_MAX_RX_URBS) {
dev_warn(dev->udev->dev.parent, dev_warn(dev->udev->dev.parent,
"rx performance may be slow\n"); "rx performance may be slow\n");
} }
...@@ -653,14 +646,14 @@ static int esd_usb_start(struct esd_usb_net_priv *priv) ...@@ -653,14 +646,14 @@ static int esd_usb_start(struct esd_usb_net_priv *priv)
* the number of the starting bitmask (0..64) to the filter.option * the number of the starting bitmask (0..64) to the filter.option
* field followed by only some bitmasks. * field followed by only some bitmasks.
*/ */
msg->hdr.cmd = CMD_IDADD; msg->hdr.cmd = ESD_USB_CMD_IDADD;
msg->hdr.len = 2 + ESD_MAX_ID_SEGMENT; msg->hdr.len = sizeof(struct esd_usb_id_filter_msg) / sizeof(u32); /* # of 32bit words */
msg->filter.net = priv->index; msg->filter.net = priv->index;
msg->filter.option = ESD_ID_ENABLE; /* start with segment 0 */ msg->filter.option = ESD_USB_ID_ENABLE; /* start with segment 0 */
for (i = 0; i < ESD_MAX_ID_SEGMENT; i++) for (i = 0; i < ESD_USB_MAX_ID_SEGMENT; i++)
msg->filter.mask[i] = cpu_to_le32(0xffffffff); msg->filter.mask[i] = cpu_to_le32(GENMASK(31, 0));
/* enable 29bit extended IDs */ /* enable 29bit extended IDs */
msg->filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001); msg->filter.mask[ESD_USB_MAX_ID_SEGMENT] = cpu_to_le32(BIT(0));
err = esd_usb_send_msg(dev, msg); err = esd_usb_send_msg(dev, msg);
if (err) if (err)
...@@ -689,8 +682,8 @@ static void unlink_all_urbs(struct esd_usb *dev) ...@@ -689,8 +682,8 @@ static void unlink_all_urbs(struct esd_usb *dev)
usb_kill_anchored_urbs(&dev->rx_submitted); usb_kill_anchored_urbs(&dev->rx_submitted);
for (i = 0; i < MAX_RX_URBS; ++i) for (i = 0; i < ESD_USB_MAX_RX_URBS; ++i)
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, usb_free_coherent(dev->udev, ESD_USB_RX_BUFFER_SIZE,
dev->rxbuf[i], dev->rxbuf_dma[i]); dev->rxbuf[i], dev->rxbuf_dma[i]);
for (i = 0; i < dev->net_count; i++) { for (i = 0; i < dev->net_count; i++) {
...@@ -699,8 +692,8 @@ static void unlink_all_urbs(struct esd_usb *dev) ...@@ -699,8 +692,8 @@ static void unlink_all_urbs(struct esd_usb *dev)
usb_kill_anchored_urbs(&priv->tx_submitted); usb_kill_anchored_urbs(&priv->tx_submitted);
atomic_set(&priv->active_tx_jobs, 0); atomic_set(&priv->active_tx_jobs, 0);
for (j = 0; j < MAX_TX_URBS; j++) for (j = 0; j < ESD_USB_MAX_TX_URBS; j++)
priv->tx_contexts[j].echo_index = MAX_TX_URBS; priv->tx_contexts[j].echo_index = ESD_USB_MAX_TX_URBS;
} }
} }
} }
...@@ -765,25 +758,27 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb, ...@@ -765,25 +758,27 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
msg = (union esd_usb_msg *)buf; msg = (union esd_usb_msg *)buf;
msg->hdr.len = 3; /* minimal length */ /* minimal length as # of 32bit words */
msg->hdr.cmd = CMD_CAN_TX; msg->hdr.len = offsetof(struct esd_usb_tx_msg, data) / sizeof(u32);
msg->hdr.cmd = ESD_USB_CMD_CAN_TX;
msg->tx.net = priv->index; msg->tx.net = priv->index;
msg->tx.dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); msg->tx.dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
msg->tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK); msg->tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
if (cf->can_id & CAN_RTR_FLAG) if (cf->can_id & CAN_RTR_FLAG)
msg->tx.dlc |= ESD_RTR; msg->tx.dlc |= ESD_USB_RTR;
if (cf->can_id & CAN_EFF_FLAG) if (cf->can_id & CAN_EFF_FLAG)
msg->tx.id |= cpu_to_le32(ESD_EXTID); msg->tx.id |= cpu_to_le32(ESD_USB_EXTID);
for (i = 0; i < cf->len; i++) for (i = 0; i < cf->len; i++)
msg->tx.data[i] = cf->data[i]; msg->tx.data[i] = cf->data[i];
msg->hdr.len += (cf->len + 3) >> 2; /* round up, then divide by 4 to add the payload length as # of 32bit words */
msg->hdr.len += DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < MAX_TX_URBS; i++) { for (i = 0; i < ESD_USB_MAX_TX_URBS; i++) {
if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { if (priv->tx_contexts[i].echo_index == ESD_USB_MAX_TX_URBS) {
context = &priv->tx_contexts[i]; context = &priv->tx_contexts[i];
break; break;
} }
...@@ -800,10 +795,10 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb, ...@@ -800,10 +795,10 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
context->echo_index = i; context->echo_index = i;
/* hnd must not be 0 - MSB is stripped in txdone handling */ /* hnd must not be 0 - MSB is stripped in txdone handling */
msg->tx.hnd = 0x80000000 | i; /* returned in TX done message */ msg->tx.hnd = BIT(31) | i; /* returned in TX done message */
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf, usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
msg->hdr.len << 2, msg->hdr.len * sizeof(u32), /* convert to # of bytes */
esd_usb_write_bulk_callback, context); esd_usb_write_bulk_callback, context);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
...@@ -815,7 +810,7 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb, ...@@ -815,7 +810,7 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
atomic_inc(&priv->active_tx_jobs); atomic_inc(&priv->active_tx_jobs);
/* Slow down tx path */ /* Slow down tx path */
if (atomic_read(&priv->active_tx_jobs) >= MAX_TX_URBS) if (atomic_read(&priv->active_tx_jobs) >= ESD_USB_MAX_TX_URBS)
netif_stop_queue(netdev); netif_stop_queue(netdev);
err = usb_submit_urb(urb, GFP_ATOMIC); err = usb_submit_urb(urb, GFP_ATOMIC);
...@@ -865,18 +860,18 @@ static int esd_usb_close(struct net_device *netdev) ...@@ -865,18 +860,18 @@ static int esd_usb_close(struct net_device *netdev)
return -ENOMEM; return -ENOMEM;
/* Disable all IDs (see esd_usb_start()) */ /* Disable all IDs (see esd_usb_start()) */
msg->hdr.cmd = CMD_IDADD; msg->hdr.cmd = ESD_USB_CMD_IDADD;
msg->hdr.len = 2 + ESD_MAX_ID_SEGMENT; msg->hdr.len = sizeof(struct esd_usb_id_filter_msg) / sizeof(u32);/* # of 32bit words */
msg->filter.net = priv->index; msg->filter.net = priv->index;
msg->filter.option = ESD_ID_ENABLE; /* start with segment 0 */ msg->filter.option = ESD_USB_ID_ENABLE; /* start with segment 0 */
for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++) for (i = 0; i <= ESD_USB_MAX_ID_SEGMENT; i++)
msg->filter.mask[i] = 0; msg->filter.mask[i] = 0;
if (esd_usb_send_msg(priv->usb, msg) < 0) if (esd_usb_send_msg(priv->usb, msg) < 0)
netdev_err(netdev, "sending idadd message failed\n"); netdev_err(netdev, "sending idadd message failed\n");
/* set CAN controller to reset mode */ /* set CAN controller to reset mode */
msg->hdr.len = 2; msg->hdr.len = sizeof(struct esd_usb_set_baudrate_msg) / sizeof(u32); /* # of 32bit words */
msg->hdr.cmd = CMD_SETBAUD; msg->hdr.cmd = ESD_USB_CMD_SETBAUD;
msg->setbaud.net = priv->index; msg->setbaud.net = priv->index;
msg->setbaud.rsvd = 0; msg->setbaud.rsvd = 0;
msg->setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE); msg->setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE);
...@@ -905,20 +900,21 @@ static const struct ethtool_ops esd_usb_ethtool_ops = { ...@@ -905,20 +900,21 @@ static const struct ethtool_ops esd_usb_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info, .get_ts_info = ethtool_op_get_ts_info,
}; };
static const struct can_bittiming_const esd_usb2_bittiming_const = { static const struct can_bittiming_const esd_usb_2_bittiming_const = {
.name = "esd_usb2", .name = "esd_usb_2",
.tseg1_min = ESD_USB2_TSEG1_MIN, .tseg1_min = 1,
.tseg1_max = ESD_USB2_TSEG1_MAX, .tseg1_max = 16,
.tseg2_min = ESD_USB2_TSEG2_MIN, .tseg2_min = 1,
.tseg2_max = ESD_USB2_TSEG2_MAX, .tseg2_max = 8,
.sjw_max = ESD_USB2_SJW_MAX, .sjw_max = 4,
.brp_min = ESD_USB2_BRP_MIN, .brp_min = 1,
.brp_max = ESD_USB2_BRP_MAX, .brp_max = 1024,
.brp_inc = ESD_USB2_BRP_INC, .brp_inc = 1,
}; };
static int esd_usb2_set_bittiming(struct net_device *netdev) static int esd_usb_2_set_bittiming(struct net_device *netdev)
{ {
const struct can_bittiming_const *btc = &esd_usb_2_bittiming_const;
struct esd_usb_net_priv *priv = netdev_priv(netdev); struct esd_usb_net_priv *priv = netdev_priv(netdev);
struct can_bittiming *bt = &priv->can.bittiming; struct can_bittiming *bt = &priv->can.bittiming;
union esd_usb_msg *msg; union esd_usb_msg *msg;
...@@ -930,35 +926,35 @@ static int esd_usb2_set_bittiming(struct net_device *netdev) ...@@ -930,35 +926,35 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
canbtr |= ESD_USB_LOM; canbtr |= ESD_USB_LOM;
canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1); canbtr |= (bt->brp - 1) & (btc->brp_max - 1);
if (le16_to_cpu(priv->usb->udev->descriptor.idProduct) == if (le16_to_cpu(priv->usb->udev->descriptor.idProduct) ==
USB_CANUSBM_PRODUCT_ID) ESD_USB_CANUSBM_PRODUCT_ID)
sjw_shift = ESD_USBM_SJW_SHIFT; sjw_shift = ESD_USB_M_SJW_SHIFT;
else else
sjw_shift = ESD_USB2_SJW_SHIFT; sjw_shift = ESD_USB_2_SJW_SHIFT;
canbtr |= ((bt->sjw - 1) & (ESD_USB2_SJW_MAX - 1)) canbtr |= ((bt->sjw - 1) & (btc->sjw_max - 1))
<< sjw_shift; << sjw_shift;
canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1) canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1)
& (ESD_USB2_TSEG1_MAX - 1)) & (btc->tseg1_max - 1))
<< ESD_USB2_TSEG1_SHIFT; << ESD_USB_2_TSEG1_SHIFT;
canbtr |= ((bt->phase_seg2 - 1) & (ESD_USB2_TSEG2_MAX - 1)) canbtr |= ((bt->phase_seg2 - 1) & (btc->tseg2_max - 1))
<< ESD_USB2_TSEG2_SHIFT; << ESD_USB_2_TSEG2_SHIFT;
if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
canbtr |= ESD_USB2_3_SAMPLES; canbtr |= ESD_USB_TRIPLE_SAMPLES;
msg = kmalloc(sizeof(*msg), GFP_KERNEL); msg = kmalloc(sizeof(*msg), GFP_KERNEL);
if (!msg) if (!msg)
return -ENOMEM; return -ENOMEM;
msg->hdr.len = 2; msg->hdr.len = sizeof(struct esd_usb_set_baudrate_msg) / sizeof(u32); /* # of 32bit words */
msg->hdr.cmd = CMD_SETBAUD; msg->hdr.cmd = ESD_USB_CMD_SETBAUD;
msg->setbaud.net = priv->index; msg->setbaud.net = priv->index;
msg->setbaud.rsvd = 0; msg->setbaud.rsvd = 0;
msg->setbaud.baud = cpu_to_le32(canbtr); msg->setbaud.baud = cpu_to_le32(canbtr);
netdev_info(netdev, "setting BTR=%#x\n", canbtr); netdev_dbg(netdev, "setting BTR=%#x\n", canbtr);
err = esd_usb_send_msg(priv->usb, msg); err = esd_usb_send_msg(priv->usb, msg);
...@@ -999,7 +995,7 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index) ...@@ -999,7 +995,7 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
int err = 0; int err = 0;
int i; int i;
netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); netdev = alloc_candev(sizeof(*priv), ESD_USB_MAX_TX_URBS);
if (!netdev) { if (!netdev) {
dev_err(&intf->dev, "couldn't alloc candev\n"); dev_err(&intf->dev, "couldn't alloc candev\n");
err = -ENOMEM; err = -ENOMEM;
...@@ -1011,8 +1007,8 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index) ...@@ -1011,8 +1007,8 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
init_usb_anchor(&priv->tx_submitted); init_usb_anchor(&priv->tx_submitted);
atomic_set(&priv->active_tx_jobs, 0); atomic_set(&priv->active_tx_jobs, 0);
for (i = 0; i < MAX_TX_URBS; i++) for (i = 0; i < ESD_USB_MAX_TX_URBS; i++)
priv->tx_contexts[i].echo_index = MAX_TX_URBS; priv->tx_contexts[i].echo_index = ESD_USB_MAX_TX_URBS;
priv->usb = dev; priv->usb = dev;
priv->netdev = netdev; priv->netdev = netdev;
...@@ -1024,15 +1020,15 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index) ...@@ -1024,15 +1020,15 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
CAN_CTRLMODE_BERR_REPORTING; CAN_CTRLMODE_BERR_REPORTING;
if (le16_to_cpu(dev->udev->descriptor.idProduct) == if (le16_to_cpu(dev->udev->descriptor.idProduct) ==
USB_CANUSBM_PRODUCT_ID) ESD_USB_CANUSBM_PRODUCT_ID)
priv->can.clock.freq = ESD_USBM_CAN_CLOCK; priv->can.clock.freq = ESD_USB_M_CAN_CLOCK;
else { else {
priv->can.clock.freq = ESD_USB2_CAN_CLOCK; priv->can.clock.freq = ESD_USB_2_CAN_CLOCK;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
} }
priv->can.bittiming_const = &esd_usb2_bittiming_const; priv->can.bittiming_const = &esd_usb_2_bittiming_const;
priv->can.do_set_bittiming = esd_usb2_set_bittiming; priv->can.do_set_bittiming = esd_usb_2_set_bittiming;
priv->can.do_set_mode = esd_usb_set_mode; priv->can.do_set_mode = esd_usb_set_mode;
priv->can.do_get_berr_counter = esd_usb_get_berr_counter; priv->can.do_get_berr_counter = esd_usb_get_berr_counter;
...@@ -1090,8 +1086,8 @@ static int esd_usb_probe(struct usb_interface *intf, ...@@ -1090,8 +1086,8 @@ static int esd_usb_probe(struct usb_interface *intf,
} }
/* query number of CAN interfaces (nets) */ /* query number of CAN interfaces (nets) */
msg->hdr.cmd = CMD_VERSION; msg->hdr.cmd = ESD_USB_CMD_VERSION;
msg->hdr.len = 2; msg->hdr.len = sizeof(struct esd_usb_version_msg) / sizeof(u32); /* # of 32bit words */
msg->version.rsvd = 0; msg->version.rsvd = 0;
msg->version.flags = 0; msg->version.flags = 0;
msg->version.drv_version = 0; msg->version.drv_version = 0;
......
...@@ -816,7 +816,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) ...@@ -816,7 +816,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
init_completion(&priv->stop_comp); init_completion(&priv->stop_comp);
init_completion(&priv->flush_comp); init_completion(&priv->flush_comp);
init_completion(&priv->get_busparams_comp); init_completion(&priv->get_busparams_comp);
priv->can.ctrlmode_supported = 0; priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
priv->dev = dev; priv->dev = dev;
priv->netdev = netdev; priv->netdev = netdev;
......
...@@ -1263,7 +1263,7 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev, ...@@ -1263,7 +1263,7 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN) if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN)
kvaser_usb_can_rx_over_error(priv->netdev); kvaser_usb_can_rx_over_error(priv->netdev);
cf->len = can_cc_dlc2len(cmd->rx_can.dlc); can_frame_set_cc_len((struct can_frame *)cf, cmd->rx_can.dlc, priv->can.ctrlmode);
if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) { if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) {
cf->can_id |= CAN_RTR_FLAG; cf->can_id |= CAN_RTR_FLAG;
...@@ -1342,7 +1342,7 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev, ...@@ -1342,7 +1342,7 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI) if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI)
cf->flags |= CANFD_ESI; cf->flags |= CANFD_ESI;
} else { } else {
cf->len = can_cc_dlc2len(dlc); can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->can.ctrlmode);
} }
if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) { if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) {
...@@ -1442,7 +1442,7 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv, ...@@ -1442,7 +1442,7 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
struct kvaser_usb *dev = priv->dev; struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd_ext *cmd; struct kvaser_cmd_ext *cmd;
struct canfd_frame *cf = (struct canfd_frame *)skb->data; struct canfd_frame *cf = (struct canfd_frame *)skb->data;
u8 dlc = can_fd_len2dlc(cf->len); u8 dlc;
u8 nbr_of_bytes = cf->len; u8 nbr_of_bytes = cf->len;
u32 flags; u32 flags;
u32 id; u32 id;
...@@ -1467,6 +1467,11 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv, ...@@ -1467,6 +1467,11 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
cmd->len = cpu_to_le16(*cmd_len); cmd->len = cpu_to_le16(*cmd_len);
if (can_is_canfd_skb(skb))
dlc = can_fd_len2dlc(cf->len);
else
dlc = can_get_cc_dlc((struct can_frame *)cf, priv->can.ctrlmode);
cmd->tx_can.databytes = nbr_of_bytes; cmd->tx_can.databytes = nbr_of_bytes;
cmd->tx_can.dlc = dlc; cmd->tx_can.dlc = dlc;
...@@ -1542,7 +1547,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, ...@@ -1542,7 +1547,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
id = cf->can_id & CAN_SFF_MASK; id = cf->can_id & CAN_SFF_MASK;
} }
cmd->tx_can.dlc = cf->len; cmd->tx_can.dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
flags = (cf->can_id & CAN_EFF_FLAG ? flags = (cf->can_id & CAN_EFF_FLAG ?
KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0); KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0);
......
...@@ -573,7 +573,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, ...@@ -573,7 +573,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
cmd->u.tx_can.data[1] = cf->can_id & 0x3f; cmd->u.tx_can.data[1] = cf->can_id & 0x3f;
} }
cmd->u.tx_can.data[5] = cf->len; cmd->u.tx_can.data[5] = can_get_cc_dlc(cf, priv->can.ctrlmode);
memcpy(&cmd->u.tx_can.data[6], cf->data, cf->len); memcpy(&cmd->u.tx_can.data[6], cf->data, cf->len);
if (cf->can_id & CAN_RTR_FLAG) if (cf->can_id & CAN_RTR_FLAG)
...@@ -1349,7 +1349,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, ...@@ -1349,7 +1349,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
else else
cf->can_id &= CAN_SFF_MASK; cf->can_id &= CAN_SFF_MASK;
cf->len = can_cc_dlc2len(cmd->u.leaf.log_message.dlc); can_frame_set_cc_len(cf, cmd->u.leaf.log_message.dlc & 0xF, priv->can.ctrlmode);
if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME) if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
cf->can_id |= CAN_RTR_FLAG; cf->can_id |= CAN_RTR_FLAG;
...@@ -1367,7 +1367,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, ...@@ -1367,7 +1367,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
cf->can_id |= CAN_EFF_FLAG; cf->can_id |= CAN_EFF_FLAG;
} }
cf->len = can_cc_dlc2len(rx_data[5]); can_frame_set_cc_len(cf, rx_data[5] & 0xF, priv->can.ctrlmode);
if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME) if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
cf->can_id |= CAN_RTR_FLAG; cf->can_id |= CAN_RTR_FLAG;
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/can/dev.h> #include <linux/can/dev.h>
#include <linux/can/error.h> #include <linux/can/error.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#define DRIVER_NAME "xilinx_can" #define DRIVER_NAME "xilinx_can"
...@@ -198,6 +199,7 @@ struct xcan_devtype_data { ...@@ -198,6 +199,7 @@ struct xcan_devtype_data {
* @bus_clk: Pointer to struct clk * @bus_clk: Pointer to struct clk
* @can_clk: Pointer to struct clk * @can_clk: Pointer to struct clk
* @devtype: Device type specific constants * @devtype: Device type specific constants
* @transceiver: Optional pointer to associated CAN transceiver
*/ */
struct xcan_priv { struct xcan_priv {
struct can_priv can; struct can_priv can;
...@@ -215,6 +217,7 @@ struct xcan_priv { ...@@ -215,6 +217,7 @@ struct xcan_priv {
struct clk *bus_clk; struct clk *bus_clk;
struct clk *can_clk; struct clk *can_clk;
struct xcan_devtype_data devtype; struct xcan_devtype_data devtype;
struct phy *transceiver;
}; };
/* CAN Bittiming constants as per Xilinx CAN specs */ /* CAN Bittiming constants as per Xilinx CAN specs */
...@@ -1419,6 +1422,10 @@ static int xcan_open(struct net_device *ndev) ...@@ -1419,6 +1422,10 @@ static int xcan_open(struct net_device *ndev)
struct xcan_priv *priv = netdev_priv(ndev); struct xcan_priv *priv = netdev_priv(ndev);
int ret; int ret;
ret = phy_power_on(priv->transceiver);
if (ret)
return ret;
ret = pm_runtime_get_sync(priv->dev); ret = pm_runtime_get_sync(priv->dev);
if (ret < 0) { if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
...@@ -1462,6 +1469,7 @@ static int xcan_open(struct net_device *ndev) ...@@ -1462,6 +1469,7 @@ static int xcan_open(struct net_device *ndev)
free_irq(ndev->irq, ndev); free_irq(ndev->irq, ndev);
err: err:
pm_runtime_put(priv->dev); pm_runtime_put(priv->dev);
phy_power_off(priv->transceiver);
return ret; return ret;
} }
...@@ -1483,6 +1491,7 @@ static int xcan_close(struct net_device *ndev) ...@@ -1483,6 +1491,7 @@ static int xcan_close(struct net_device *ndev)
close_candev(ndev); close_candev(ndev);
pm_runtime_put(priv->dev); pm_runtime_put(priv->dev);
phy_power_off(priv->transceiver);
return 0; return 0;
} }
...@@ -1713,6 +1722,7 @@ static int xcan_probe(struct platform_device *pdev) ...@@ -1713,6 +1722,7 @@ static int xcan_probe(struct platform_device *pdev)
{ {
struct net_device *ndev; struct net_device *ndev;
struct xcan_priv *priv; struct xcan_priv *priv;
struct phy *transceiver;
const struct of_device_id *of_id; const struct of_device_id *of_id;
const struct xcan_devtype_data *devtype = &xcan_axi_data; const struct xcan_devtype_data *devtype = &xcan_axi_data;
void __iomem *addr; void __iomem *addr;
...@@ -1843,6 +1853,14 @@ static int xcan_probe(struct platform_device *pdev) ...@@ -1843,6 +1853,14 @@ static int xcan_probe(struct platform_device *pdev)
goto err_free; goto err_free;
} }
transceiver = devm_phy_optional_get(&pdev->dev, NULL);
if (IS_ERR(transceiver)) {
ret = PTR_ERR(transceiver);
dev_err_probe(&pdev->dev, ret, "failed to get phy\n");
goto err_free;
}
priv->transceiver = transceiver;
priv->write_reg = xcan_write_reg_le; priv->write_reg = xcan_write_reg_le;
priv->read_reg = xcan_read_reg_le; priv->read_reg = xcan_read_reg_le;
...@@ -1869,6 +1887,7 @@ static int xcan_probe(struct platform_device *pdev) ...@@ -1869,6 +1887,7 @@ static int xcan_probe(struct platform_device *pdev)
goto err_disableclks; goto err_disableclks;
} }
of_can_transceiver(ndev);
pm_runtime_put(&pdev->dev); pm_runtime_put(&pdev->dev);
if (priv->devtype.flags & XCAN_FLAG_CANFD_2) { if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2020 Oliver Hartkopp <socketcan@hartkopp.net> /* Copyright (C) 2020 Oliver Hartkopp <socketcan@hartkopp.net>
* Copyright (C) 2020 Marc Kleine-Budde <kernel@pengutronix.de> * Copyright (C) 2020 Marc Kleine-Budde <kernel@pengutronix.de>
* Copyright (C) 2020, 2023 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/ */
#ifndef _CAN_LENGTH_H #ifndef _CAN_LENGTH_H
#define _CAN_LENGTH_H #define _CAN_LENGTH_H
#include <linux/bits.h>
#include <linux/can.h> #include <linux/can.h>
#include <linux/can/netlink.h> #include <linux/can/netlink.h>
#include <linux/math.h>
/* /*
* Size of a Classical CAN Standard Frame * Size of a Classical CAN Standard Frame header in bits
* *
* Name of Field Bits * Name of Field Bits
* --------------------------------------------------------- * ---------------------------------------------------------
* Start-of-frame 1 * Start Of Frame (SOF) 1
* Identifier 11 * Arbitration field:
* Remote transmission request (RTR) 1 * base ID 11
* Identifier extension bit (IDE) 1 * Remote Transmission Request (RTR) 1
* Reserved bit (r0) 1 * Control field:
* Data length code (DLC) 4 * IDentifier Extension bit (IDE) 1
* Data field 0...64 * FD Format indicator (FDF) 1
* CRC 15 * Data Length Code (DLC) 4
* CRC delimiter 1 *
* ACK slot 1 * including all fields preceding the data field, ignoring bitstuffing
* ACK delimiter 1 */
* End-of-frame (EOF) 7 #define CAN_FRAME_HEADER_SFF_BITS 19
* Inter frame spacing 3
/*
* Size of a Classical CAN Extended Frame header in bits
* *
* rounded up and ignoring bitstuffing * Name of Field Bits
* ---------------------------------------------------------
* Start Of Frame (SOF) 1
* Arbitration field:
* base ID 11
* Substitute Remote Request (SRR) 1
* IDentifier Extension bit (IDE) 1
* ID extension 18
* Remote Transmission Request (RTR) 1
* Control field:
* FD Format indicator (FDF) 1
* Reserved bit (r0) 1
* Data length code (DLC) 4
*
* including all fields preceding the data field, ignoring bitstuffing
*/ */
#define CAN_FRAME_OVERHEAD_SFF DIV_ROUND_UP(47, 8) #define CAN_FRAME_HEADER_EFF_BITS 39
/* /*
* Size of a Classical CAN Extended Frame * Size of a CAN-FD Standard Frame in bits
*
* Name of Field Bits
* ---------------------------------------------------------
* Start Of Frame (SOF) 1
* Arbitration field:
* base ID 11
* Remote Request Substitution (RRS) 1
* Control field:
* IDentifier Extension bit (IDE) 1
* FD Format indicator (FDF) 1
* Reserved bit (res) 1
* Bit Rate Switch (BRS) 1
* Error Status Indicator (ESI) 1
* Data length code (DLC) 4
*
* including all fields preceding the data field, ignoring bitstuffing
*/
#define CANFD_FRAME_HEADER_SFF_BITS 22
/*
* Size of a CAN-FD Extended Frame in bits
*
* Name of Field Bits
* ---------------------------------------------------------
* Start Of Frame (SOF) 1
* Arbitration field:
* base ID 11
* Substitute Remote Request (SRR) 1
* IDentifier Extension bit (IDE) 1
* ID extension 18
* Remote Request Substitution (RRS) 1
* Control field:
* FD Format indicator (FDF) 1
* Reserved bit (res) 1
* Bit Rate Switch (BRS) 1
* Error Status Indicator (ESI) 1
* Data length code (DLC) 4
*
* including all fields preceding the data field, ignoring bitstuffing
*/
#define CANFD_FRAME_HEADER_EFF_BITS 41
/*
* Size of a CAN CRC Field in bits
* *
* Name of Field Bits * Name of Field Bits
* --------------------------------------------------------- * ---------------------------------------------------------
* Start-of-frame 1 * CRC sequence (CRC15) 15
* Identifier A 11 * CRC Delimiter 1
* Substitute remote request (SRR) 1 *
* Identifier extension bit (IDE) 1 * ignoring bitstuffing
* Identifier B 18 */
* Remote transmission request (RTR) 1 #define CAN_FRAME_CRC_FIELD_BITS 16
* Reserved bits (r1, r0) 2
* Data length code (DLC) 4 /*
* Data field 0...64 * Size of a CAN-FD CRC17 Field in bits (length: 0..16)
* CRC 15
* CRC delimiter 1
* ACK slot 1
* ACK delimiter 1
* End-of-frame (EOF) 7
* Inter frame spacing 3
* *
* rounded up and ignoring bitstuffing * Name of Field Bits
* ---------------------------------------------------------
* Stuff Count 4
* CRC Sequence (CRC17) 17
* CRC Delimiter 1
* Fixed stuff bits 6
*/ */
#define CAN_FRAME_OVERHEAD_EFF DIV_ROUND_UP(67, 8) #define CANFD_FRAME_CRC17_FIELD_BITS 28
/* /*
* Size of a CAN-FD Standard Frame * Size of a CAN-FD CRC21 Field in bits (length: 20..64)
* *
* Name of Field Bits * Name of Field Bits
* --------------------------------------------------------- * ---------------------------------------------------------
* Start-of-frame 1 * Stuff Count 4
* Identifier 11 * CRC sequence (CRC21) 21
* Reserved bit (r1) 1 * CRC Delimiter 1
* Identifier extension bit (IDE) 1 * Fixed stuff bits 7
* Flexible data rate format (FDF) 1 */
* Reserved bit (r0) 1 #define CANFD_FRAME_CRC21_FIELD_BITS 33
* Bit Rate Switch (BRS) 1
* Error Status Indicator (ESI) 1
* Data length code (DLC) 4
* Data field 0...512
* Stuff Bit Count (SBC) 0...16: 4 20...64:5
* CRC 0...16: 17 20...64:21
* CRC delimiter (CD) 1
* ACK slot (AS) 1
* ACK delimiter (AD) 1
* End-of-frame (EOF) 7
* Inter frame spacing 3
*
* assuming CRC21, rounded up and ignoring bitstuffing
*/
#define CANFD_FRAME_OVERHEAD_SFF DIV_ROUND_UP(61, 8)
/* /*
* Size of a CAN-FD Extended Frame * Size of a CAN(-FD) Frame footer in bits
* *
* Name of Field Bits * Name of Field Bits
* --------------------------------------------------------- * ---------------------------------------------------------
* Start-of-frame 1 * ACK slot 1
* Identifier A 11 * ACK delimiter 1
* Substitute remote request (SRR) 1 * End Of Frame (EOF) 7
* Identifier extension bit (IDE) 1 *
* Identifier B 18 * including all fields following the CRC field
* Reserved bit (r1) 1 */
* Flexible data rate format (FDF) 1 #define CAN_FRAME_FOOTER_BITS 9
* Reserved bit (r0) 1
* Bit Rate Switch (BRS) 1 /*
* Error Status Indicator (ESI) 1 * First part of the Inter Frame Space
* Data length code (DLC) 4 * (a.k.a. IMF - intermission field)
* Data field 0...512 */
* Stuff Bit Count (SBC) 0...16: 4 20...64:5 #define CAN_INTERMISSION_BITS 3
* CRC 0...16: 17 20...64:21
* CRC delimiter (CD) 1 /**
* ACK slot (AS) 1 * can_bitstuffing_len() - Calculate the maximum length with bitstuffing
* ACK delimiter (AD) 1 * @destuffed_len: length of a destuffed bit stream
* End-of-frame (EOF) 7 *
* Inter frame spacing 3 * The worst bit stuffing case is a sequence in which dominant and
* * recessive bits alternate every four bits:
* assuming CRC21, rounded up and ignoring bitstuffing *
*/ * Destuffed: 1 1111 0000 1111 0000 1111
#define CANFD_FRAME_OVERHEAD_EFF DIV_ROUND_UP(80, 8) * Stuffed: 1 1111o 0000i 1111o 0000i 1111o
*
* Nomenclature
*
* - "0": dominant bit
* - "o": dominant stuff bit
* - "1": recessive bit
* - "i": recessive stuff bit
*
* Aside from the first bit, one stuff bit is added every four bits.
*
* Return: length of the stuffed bit stream in the worst case scenario.
*/
#define can_bitstuffing_len(destuffed_len) \
(destuffed_len + (destuffed_len - 1) / 4)
#define __can_bitstuffing_len(bitstuffing, destuffed_len) \
(bitstuffing ? can_bitstuffing_len(destuffed_len) : \
destuffed_len)
#define __can_cc_frame_bits(is_eff, bitstuffing, \
intermission, data_len) \
( \
__can_bitstuffing_len(bitstuffing, \
(is_eff ? CAN_FRAME_HEADER_EFF_BITS : \
CAN_FRAME_HEADER_SFF_BITS) + \
(data_len) * BITS_PER_BYTE + \
CAN_FRAME_CRC_FIELD_BITS) + \
CAN_FRAME_FOOTER_BITS + \
(intermission ? CAN_INTERMISSION_BITS : 0) \
)
#define __can_fd_frame_bits(is_eff, bitstuffing, \
intermission, data_len) \
( \
__can_bitstuffing_len(bitstuffing, \
(is_eff ? CANFD_FRAME_HEADER_EFF_BITS : \
CANFD_FRAME_HEADER_SFF_BITS) + \
(data_len) * BITS_PER_BYTE) + \
((data_len) <= 16 ? \
CANFD_FRAME_CRC17_FIELD_BITS : \
CANFD_FRAME_CRC21_FIELD_BITS) + \
CAN_FRAME_FOOTER_BITS + \
(intermission ? CAN_INTERMISSION_BITS : 0) \
)
/**
* can_frame_bits() - Calculate the number of bits on the wire in a
* CAN frame
* @is_fd: true: CAN-FD frame; false: Classical CAN frame.
* @is_eff: true: Extended frame; false: Standard frame.
* @bitstuffing: true: calculate the bitstuffing worst case; false:
* calculate the bitstuffing best case (no dynamic
* bitstuffing). CAN-FD's fixed stuff bits are always included.
* @intermission: if and only if true, include the inter frame space
* assuming no bus idle (i.e. only the intermission). Strictly
* speaking, the inter frame space is not part of the
* frame. However, it is needed when calculating the delay
* between the Start Of Frame of two consecutive frames.
* @data_len: length of the data field in bytes. Correspond to
* can(fd)_frame->len. Should be zero for remote frames. No
* sanitization is done on @data_len and it shall have no side
* effects.
*
* Return: the numbers of bits on the wire of a CAN frame.
*/
#define can_frame_bits(is_fd, is_eff, bitstuffing, \
intermission, data_len) \
( \
is_fd ? __can_fd_frame_bits(is_eff, bitstuffing, \
intermission, data_len) : \
__can_cc_frame_bits(is_eff, bitstuffing, \
intermission, data_len) \
)
/*
* Number of bytes in a CAN frame
* (rounded up, including intermission)
*/
#define can_frame_bytes(is_fd, is_eff, bitstuffing, data_len) \
DIV_ROUND_UP(can_frame_bits(is_fd, is_eff, bitstuffing, \
true, data_len), \
BITS_PER_BYTE)
/* /*
* Maximum size of a Classical CAN frame * Maximum size of a Classical CAN frame
* (rounded up and ignoring bitstuffing) * (rounded up, ignoring bitstuffing but including intermission)
*/ */
#define CAN_FRAME_LEN_MAX (CAN_FRAME_OVERHEAD_EFF + CAN_MAX_DLEN) #define CAN_FRAME_LEN_MAX can_frame_bytes(false, true, false, CAN_MAX_DLEN)
/* /*
* Maximum size of a CAN-FD frame * Maximum size of a CAN-FD frame
* (rounded up and ignoring bitstuffing) * (rounded up, ignoring dynamic bitstuffing but including intermission)
*/ */
#define CANFD_FRAME_LEN_MAX (CANFD_FRAME_OVERHEAD_EFF + CANFD_MAX_DLEN) #define CANFD_FRAME_LEN_MAX can_frame_bytes(true, true, false, CANFD_MAX_DLEN)
/* /*
* can_cc_dlc2len(value) - convert a given data length code (dlc) of a * can_cc_dlc2len(value) - convert a given data length code (dlc) of a
......
...@@ -285,6 +285,5 @@ struct can_filter { ...@@ -285,6 +285,5 @@ struct can_filter {
}; };
#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */ #define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
#endif /* !_UAPI_CAN_H */ #endif /* !_UAPI_CAN_H */
...@@ -49,6 +49,8 @@ ...@@ -49,6 +49,8 @@
#include <linux/can.h> #include <linux/can.h>
#define SOL_CAN_RAW (SOL_CAN_BASE + CAN_RAW) #define SOL_CAN_RAW (SOL_CAN_BASE + CAN_RAW)
#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
enum { enum {
SCM_CAN_RAW_ERRQUEUE = 1, SCM_CAN_RAW_ERRQUEUE = 1,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment