Commit 40bc471d authored by Shannon Nelson's avatar Shannon Nelson Committed by David S. Miller

ionic: add tx/rx-push support with device Component Memory Buffers

The ionic device has on-board memory (CMB) that can be used
for descriptors as a way to speed descriptor access for faster
packet processing.  It is rumored to improve latency and/or
packets-per-second for some profiles of small packet traffic,
although your mileage may vary.
Signed-off-by: default avatarShannon Nelson <shannon.nelson@amd.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5b4e9a7a
...@@ -352,6 +352,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -352,6 +352,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_reset: err_out_reset:
ionic_reset(ionic); ionic_reset(ionic);
err_out_teardown: err_out_teardown:
ionic_dev_teardown(ionic);
pci_clear_master(pdev); pci_clear_master(pdev);
/* Don't fail the probe for these errors, keep /* Don't fail the probe for these errors, keep
* the hw interface around for inspection * the hw interface around for inspection
...@@ -390,6 +391,7 @@ static void ionic_remove(struct pci_dev *pdev) ...@@ -390,6 +391,7 @@ static void ionic_remove(struct pci_dev *pdev)
ionic_port_reset(ionic); ionic_port_reset(ionic);
ionic_reset(ionic); ionic_reset(ionic);
ionic_dev_teardown(ionic);
pci_clear_master(pdev); pci_clear_master(pdev);
ionic_unmap_bars(ionic); ionic_unmap_bars(ionic);
pci_release_regions(pdev); pci_release_regions(pdev);
......
...@@ -92,6 +92,7 @@ int ionic_dev_setup(struct ionic *ionic) ...@@ -92,6 +92,7 @@ int ionic_dev_setup(struct ionic *ionic)
unsigned int num_bars = ionic->num_bars; unsigned int num_bars = ionic->num_bars;
struct ionic_dev *idev = &ionic->idev; struct ionic_dev *idev = &ionic->idev;
struct device *dev = ionic->dev; struct device *dev = ionic->dev;
int size;
u32 sig; u32 sig;
/* BAR0: dev_cmd and interrupts */ /* BAR0: dev_cmd and interrupts */
...@@ -133,9 +134,36 @@ int ionic_dev_setup(struct ionic *ionic) ...@@ -133,9 +134,36 @@ int ionic_dev_setup(struct ionic *ionic)
idev->db_pages = bar->vaddr; idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr; idev->phy_db_pages = bar->bus_addr;
/* BAR2: optional controller memory mapping */
bar++;
mutex_init(&idev->cmb_inuse_lock);
if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) {
idev->cmb_inuse = NULL;
return 0;
}
idev->phy_cmb_pages = bar->bus_addr;
idev->cmb_npages = bar->len / PAGE_SIZE;
size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
idev->cmb_inuse = kzalloc(size, GFP_KERNEL);
if (!idev->cmb_inuse)
dev_warn(dev, "No memory for CMB, disabling\n");
return 0; return 0;
} }
void ionic_dev_teardown(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
kfree(idev->cmb_inuse);
idev->cmb_inuse = NULL;
idev->phy_cmb_pages = 0;
idev->cmb_npages = 0;
mutex_destroy(&idev->cmb_inuse_lock);
}
/* Devcmd Interface */ /* Devcmd Interface */
bool ionic_is_fw_running(struct ionic_dev *idev) bool ionic_is_fw_running(struct ionic_dev *idev)
{ {
...@@ -571,6 +599,33 @@ int ionic_db_page_num(struct ionic_lif *lif, int pid) ...@@ -571,6 +599,33 @@ int ionic_db_page_num(struct ionic_lif *lif, int pid)
return (lif->hw_index * lif->dbid_count) + pid; return (lif->hw_index * lif->dbid_count) + pid;
} }
int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order)
{
struct ionic_dev *idev = &lif->ionic->idev;
int ret;
mutex_lock(&idev->cmb_inuse_lock);
ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
mutex_unlock(&idev->cmb_inuse_lock);
if (ret < 0)
return ret;
*pgid = ret;
*pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE;
return 0;
}
void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
{
struct ionic_dev *idev = &lif->ionic->idev;
mutex_lock(&idev->cmb_inuse_lock);
bitmap_release_region(idev->cmb_inuse, pgid, order);
mutex_unlock(&idev->cmb_inuse_lock);
}
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr, struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size) unsigned int num_descs, size_t desc_size)
...@@ -679,6 +734,18 @@ void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) ...@@ -679,6 +734,18 @@ void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
cur->desc = base + (i * q->desc_size); cur->desc = base + (i * q->desc_size);
} }
void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa)
{
struct ionic_desc_info *cur;
unsigned int i;
q->cmb_base = base;
q->cmb_base_pa = base_pa;
for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
cur->cmb_desc = base + (i * q->desc_size);
}
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
{ {
struct ionic_desc_info *cur; struct ionic_desc_info *cur;
......
...@@ -159,6 +159,11 @@ struct ionic_dev { ...@@ -159,6 +159,11 @@ struct ionic_dev {
struct ionic_intr __iomem *intr_ctrl; struct ionic_intr __iomem *intr_ctrl;
u64 __iomem *intr_status; u64 __iomem *intr_status;
struct mutex cmb_inuse_lock; /* for cmb_inuse */
unsigned long *cmb_inuse;
dma_addr_t phy_cmb_pages;
u32 cmb_npages;
u32 port_info_sz; u32 port_info_sz;
struct ionic_port_info *port_info; struct ionic_port_info *port_info;
dma_addr_t port_info_pa; dma_addr_t port_info_pa;
...@@ -203,6 +208,7 @@ struct ionic_desc_info { ...@@ -203,6 +208,7 @@ struct ionic_desc_info {
struct ionic_rxq_desc *rxq_desc; struct ionic_rxq_desc *rxq_desc;
struct ionic_admin_cmd *adminq_desc; struct ionic_admin_cmd *adminq_desc;
}; };
void __iomem *cmb_desc;
union { union {
void *sg_desc; void *sg_desc;
struct ionic_txq_sg_desc *txq_sg_desc; struct ionic_txq_sg_desc *txq_sg_desc;
...@@ -241,12 +247,14 @@ struct ionic_queue { ...@@ -241,12 +247,14 @@ struct ionic_queue {
struct ionic_rxq_desc *rxq; struct ionic_rxq_desc *rxq;
struct ionic_admin_cmd *adminq; struct ionic_admin_cmd *adminq;
}; };
void __iomem *cmb_base;
union { union {
void *sg_base; void *sg_base;
struct ionic_txq_sg_desc *txq_sgl; struct ionic_txq_sg_desc *txq_sgl;
struct ionic_rxq_sg_desc *rxq_sgl; struct ionic_rxq_sg_desc *rxq_sgl;
}; };
dma_addr_t base_pa; dma_addr_t base_pa;
dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa; dma_addr_t sg_base_pa;
unsigned int desc_size; unsigned int desc_size;
unsigned int sg_desc_size; unsigned int sg_desc_size;
...@@ -309,6 +317,7 @@ static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want) ...@@ -309,6 +317,7 @@ static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
void ionic_init_devinfo(struct ionic *ionic); void ionic_init_devinfo(struct ionic *ionic);
int ionic_dev_setup(struct ionic *ionic); int ionic_dev_setup(struct ionic *ionic);
void ionic_dev_teardown(struct ionic *ionic);
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd); void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd);
u8 ionic_dev_cmd_status(struct ionic_dev *idev); u8 ionic_dev_cmd_status(struct ionic_dev *idev);
...@@ -344,6 +353,9 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, ...@@ -344,6 +353,9 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
int ionic_db_page_num(struct ionic_lif *lif, int pid); int ionic_db_page_num(struct ionic_lif *lif, int pid);
int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order);
void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr, struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size); unsigned int num_descs, size_t desc_size);
...@@ -360,6 +372,7 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, ...@@ -360,6 +372,7 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
unsigned int num_descs, size_t desc_size, unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid); size_t sg_desc_size, unsigned int pid);
void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa);
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg); void *cb_arg);
......
...@@ -511,6 +511,87 @@ static int ionic_set_coalesce(struct net_device *netdev, ...@@ -511,6 +511,87 @@ static int ionic_set_coalesce(struct net_device *netdev,
return 0; return 0;
} }
static int ionic_validate_cmb_config(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
int pages_have, pages_required = 0;
unsigned long sz;
if (!lif->ionic->idev.cmb_inuse &&
(qparam->cmb_tx || qparam->cmb_rx)) {
netdev_info(lif->netdev, "CMB rings are not supported on this device\n");
return -EOPNOTSUPP;
}
if (qparam->cmb_tx) {
if (!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_CMB)) {
netdev_info(lif->netdev,
"CMB rings for tx-push are not supported on this device\n");
return -EOPNOTSUPP;
}
sz = sizeof(struct ionic_txq_desc) * qparam->ntxq_descs * qparam->nxqs;
pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
}
if (qparam->cmb_rx) {
if (!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_CMB)) {
netdev_info(lif->netdev,
"CMB rings for rx-push are not supported on this device\n");
return -EOPNOTSUPP;
}
sz = sizeof(struct ionic_rxq_desc) * qparam->nrxq_descs * qparam->nxqs;
pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
}
pages_have = lif->ionic->bars[IONIC_PCI_BAR_CMB].len / PAGE_SIZE;
if (pages_required > pages_have) {
netdev_info(lif->netdev,
"Not enough CMB pages for number of queues and size of descriptor rings, need %d have %d",
pages_required, pages_have);
return -ENOMEM;
}
return pages_required;
}
static int ionic_cmb_rings_toggle(struct ionic_lif *lif, bool cmb_tx, bool cmb_rx)
{
struct ionic_queue_params qparam;
int pages_used;
if (netif_running(lif->netdev)) {
netdev_info(lif->netdev, "Please stop device to toggle CMB for tx/rx-push\n");
return -EBUSY;
}
ionic_init_queue_params(lif, &qparam);
qparam.cmb_tx = cmb_tx;
qparam.cmb_rx = cmb_rx;
pages_used = ionic_validate_cmb_config(lif, &qparam);
if (pages_used < 0)
return pages_used;
if (cmb_tx)
set_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
if (cmb_rx)
set_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
if (cmb_tx || cmb_rx)
netdev_info(lif->netdev, "Enabling CMB %s %s rings - %d pages\n",
cmb_tx ? "TX" : "", cmb_rx ? "RX" : "", pages_used);
else
netdev_info(lif->netdev, "Disabling CMB rings\n");
return 0;
}
static void ionic_get_ringparam(struct net_device *netdev, static void ionic_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring, struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring, struct kernel_ethtool_ringparam *kernel_ring,
...@@ -522,6 +603,8 @@ static void ionic_get_ringparam(struct net_device *netdev, ...@@ -522,6 +603,8 @@ static void ionic_get_ringparam(struct net_device *netdev,
ring->tx_pending = lif->ntxq_descs; ring->tx_pending = lif->ntxq_descs;
ring->rx_max_pending = IONIC_MAX_RX_DESC; ring->rx_max_pending = IONIC_MAX_RX_DESC;
ring->rx_pending = lif->nrxq_descs; ring->rx_pending = lif->nrxq_descs;
kernel_ring->tx_push = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
kernel_ring->rx_push = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
} }
static int ionic_set_ringparam(struct net_device *netdev, static int ionic_set_ringparam(struct net_device *netdev,
...@@ -551,9 +634,28 @@ static int ionic_set_ringparam(struct net_device *netdev, ...@@ -551,9 +634,28 @@ static int ionic_set_ringparam(struct net_device *netdev,
/* if nothing to do return success */ /* if nothing to do return success */
if (ring->tx_pending == lif->ntxq_descs && if (ring->tx_pending == lif->ntxq_descs &&
ring->rx_pending == lif->nrxq_descs) ring->rx_pending == lif->nrxq_descs &&
kernel_ring->tx_push == test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) &&
kernel_ring->rx_push == test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
return 0; return 0;
qparam.ntxq_descs = ring->tx_pending;
qparam.nrxq_descs = ring->rx_pending;
qparam.cmb_tx = kernel_ring->tx_push;
qparam.cmb_rx = kernel_ring->rx_push;
err = ionic_validate_cmb_config(lif, &qparam);
if (err < 0)
return err;
if (kernel_ring->tx_push != test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) ||
kernel_ring->rx_push != test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state)) {
err = ionic_cmb_rings_toggle(lif, kernel_ring->tx_push,
kernel_ring->rx_push);
if (err < 0)
return err;
}
if (ring->tx_pending != lif->ntxq_descs) if (ring->tx_pending != lif->ntxq_descs)
netdev_info(netdev, "Changing Tx ring size from %d to %d\n", netdev_info(netdev, "Changing Tx ring size from %d to %d\n",
lif->ntxq_descs, ring->tx_pending); lif->ntxq_descs, ring->tx_pending);
...@@ -569,9 +671,6 @@ static int ionic_set_ringparam(struct net_device *netdev, ...@@ -569,9 +671,6 @@ static int ionic_set_ringparam(struct net_device *netdev,
return 0; return 0;
} }
qparam.ntxq_descs = ring->tx_pending;
qparam.nrxq_descs = ring->rx_pending;
mutex_lock(&lif->queue_lock); mutex_lock(&lif->queue_lock);
err = ionic_reconfigure_queues(lif, &qparam); err = ionic_reconfigure_queues(lif, &qparam);
mutex_unlock(&lif->queue_lock); mutex_unlock(&lif->queue_lock);
...@@ -638,7 +737,7 @@ static int ionic_set_channels(struct net_device *netdev, ...@@ -638,7 +737,7 @@ static int ionic_set_channels(struct net_device *netdev,
lif->nxqs, ch->combined_count); lif->nxqs, ch->combined_count);
qparam.nxqs = ch->combined_count; qparam.nxqs = ch->combined_count;
qparam.intr_split = 0; qparam.intr_split = false;
} else { } else {
max_cnt /= 2; max_cnt /= 2;
if (ch->rx_count > max_cnt) if (ch->rx_count > max_cnt)
...@@ -654,9 +753,13 @@ static int ionic_set_channels(struct net_device *netdev, ...@@ -654,9 +753,13 @@ static int ionic_set_channels(struct net_device *netdev,
lif->nxqs, ch->rx_count); lif->nxqs, ch->rx_count);
qparam.nxqs = ch->rx_count; qparam.nxqs = ch->rx_count;
qparam.intr_split = 1; qparam.intr_split = true;
} }
err = ionic_validate_cmb_config(lif, &qparam);
if (err < 0)
return err;
/* if we're not running, just set the values and return */ /* if we're not running, just set the values and return */
if (!netif_running(lif->netdev)) { if (!netif_running(lif->netdev)) {
lif->nxqs = qparam.nxqs; lif->nxqs = qparam.nxqs;
...@@ -965,6 +1068,8 @@ static const struct ethtool_ops ionic_ethtool_ops = { ...@@ -965,6 +1068,8 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS | .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX | ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_USE_ADAPTIVE_TX, ETHTOOL_COALESCE_USE_ADAPTIVE_TX,
.supported_ring_params = ETHTOOL_RING_USE_TX_PUSH |
ETHTOOL_RING_USE_RX_PUSH,
.get_drvinfo = ionic_get_drvinfo, .get_drvinfo = ionic_get_drvinfo,
.get_regs_len = ionic_get_regs_len, .get_regs_len = ionic_get_regs_len,
.get_regs = ionic_get_regs, .get_regs = ionic_get_regs,
......
...@@ -3073,9 +3073,10 @@ union ionic_adminq_comp { ...@@ -3073,9 +3073,10 @@ union ionic_adminq_comp {
#define IONIC_BARS_MAX 6 #define IONIC_BARS_MAX 6
#define IONIC_PCI_BAR_DBELL 1 #define IONIC_PCI_BAR_DBELL 1
#define IONIC_PCI_BAR_CMB 2
/* BAR0 */
#define IONIC_BAR0_SIZE 0x8000 #define IONIC_BAR0_SIZE 0x8000
#define IONIC_BAR2_SIZE 0x800000
#define IONIC_BAR0_DEV_INFO_REGS_OFFSET 0x0000 #define IONIC_BAR0_DEV_INFO_REGS_OFFSET 0x0000
#define IONIC_BAR0_DEV_CMD_REGS_OFFSET 0x0800 #define IONIC_BAR0_DEV_CMD_REGS_OFFSET 0x0800
......
...@@ -26,9 +26,12 @@ ...@@ -26,9 +26,12 @@
static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
[IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
[IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
[IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */ [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support
[IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support * 2 = ... with CMB rings
*/
[IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support
* 1 = ... with Tx SG version 1 * 1 = ... with Tx SG version 1
* 3 = ... with CMB rings
*/ */
}; };
...@@ -397,6 +400,15 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) ...@@ -397,6 +400,15 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->q_base_pa = 0; qcq->q_base_pa = 0;
} }
if (qcq->cmb_q_base) {
iounmap(qcq->cmb_q_base);
ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order);
qcq->cmb_pgid = 0;
qcq->cmb_order = 0;
qcq->cmb_q_base = NULL;
qcq->cmb_q_base_pa = 0;
}
if (qcq->cq_base) { if (qcq->cq_base) {
dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
qcq->cq_base = NULL; qcq->cq_base = NULL;
...@@ -608,6 +620,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, ...@@ -608,6 +620,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
ionic_cq_map(&new->cq, cq_base, cq_base_pa); ionic_cq_map(&new->cq, cq_base, cq_base_pa);
ionic_cq_bind(&new->cq, &new->q); ionic_cq_bind(&new->cq, &new->q);
} else { } else {
/* regular DMA q descriptors */
new->q_size = PAGE_SIZE + (num_descs * desc_size); new->q_size = PAGE_SIZE + (num_descs * desc_size);
new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
GFP_KERNEL); GFP_KERNEL);
...@@ -620,6 +633,33 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, ...@@ -620,6 +633,33 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
ionic_q_map(&new->q, q_base, q_base_pa); ionic_q_map(&new->q, q_base, q_base_pa);
if (flags & IONIC_QCQ_F_CMB_RINGS) {
/* on-chip CMB q descriptors */
new->cmb_q_size = num_descs * desc_size;
new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
new->cmb_order);
if (err) {
netdev_err(lif->netdev,
"Cannot allocate queue order %d from cmb: err %d\n",
new->cmb_order, err);
goto err_out_free_q;
}
new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size);
if (!new->cmb_q_base) {
netdev_err(lif->netdev, "Cannot map queue from cmb\n");
ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
err = -ENOMEM;
goto err_out_free_q;
}
new->cmb_q_base_pa -= idev->phy_cmb_pages;
ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa);
}
/* cq DMA descriptors */
new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
GFP_KERNEL); GFP_KERNEL);
...@@ -658,6 +698,10 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, ...@@ -658,6 +698,10 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err_out_free_cq: err_out_free_cq:
dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
err_out_free_q: err_out_free_q:
if (new->cmb_q_base) {
iounmap(new->cmb_q_base);
ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
}
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
err_out_free_cq_info: err_out_free_cq_info:
vfree(new->cq.info); vfree(new->cq.info);
...@@ -739,6 +783,8 @@ static void ionic_qcq_sanitize(struct ionic_qcq *qcq) ...@@ -739,6 +783,8 @@ static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
qcq->cq.tail_idx = 0; qcq->cq.tail_idx = 0;
qcq->cq.done_color = 1; qcq->cq.done_color = 1;
memset(qcq->q_base, 0, qcq->q_size); memset(qcq->q_base, 0, qcq->q_size);
if (qcq->cmb_q_base)
memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size);
memset(qcq->cq_base, 0, qcq->cq_size); memset(qcq->cq_base, 0, qcq->cq_size);
memset(qcq->sg_base, 0, qcq->sg_size); memset(qcq->sg_base, 0, qcq->sg_size);
} }
...@@ -758,6 +804,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) ...@@ -758,6 +804,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.index = cpu_to_le32(q->index), .index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ | .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG), IONIC_QINIT_F_SG),
.intr_index = cpu_to_le16(qcq->intr.index),
.pid = cpu_to_le16(q->pid), .pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs), .ring_size = ilog2(q->num_descs),
.ring_base = cpu_to_le64(q->base_pa), .ring_base = cpu_to_le64(q->base_pa),
...@@ -766,17 +813,19 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) ...@@ -766,17 +813,19 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.features = cpu_to_le64(q->features), .features = cpu_to_le64(q->features),
}, },
}; };
unsigned int intr_index;
int err; int err;
intr_index = qcq->intr.index; if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index); ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
}
dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base);
dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base);
dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
...@@ -834,6 +883,11 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) ...@@ -834,6 +883,11 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
}; };
int err; int err;
if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
}
dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
...@@ -2010,8 +2064,13 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) ...@@ -2010,8 +2064,13 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sg_desc_sz = sizeof(struct ionic_txq_sg_desc); sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state))
flags |= IONIC_QCQ_F_CMB_RINGS;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
flags |= IONIC_QCQ_F_INTR; flags |= IONIC_QCQ_F_INTR;
for (i = 0; i < lif->nxqs; i++) { for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz, num_desc, desc_sz, comp_sz, sg_desc_sz,
...@@ -2032,6 +2091,9 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) ...@@ -2032,6 +2091,9 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
flags |= IONIC_QCQ_F_CMB_RINGS;
num_desc = lif->nrxq_descs; num_desc = lif->nrxq_descs;
desc_sz = sizeof(struct ionic_rxq_desc); desc_sz = sizeof(struct ionic_rxq_desc);
comp_sz = sizeof(struct ionic_rxq_comp); comp_sz = sizeof(struct ionic_rxq_comp);
...@@ -2707,6 +2769,55 @@ static const struct net_device_ops ionic_netdev_ops = { ...@@ -2707,6 +2769,55 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_get_vf_stats = ionic_get_vf_stats, .ndo_get_vf_stats = ionic_get_vf_stats,
}; };
static int ionic_cmb_reconfig(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
struct ionic_queue_params start_qparams;
int err = 0;
/* When changing CMB queue parameters, we're using limited
* on-device memory and don't have extra memory to use for
* duplicate allocations, so we free it all first then
* re-allocate with the new parameters.
*/
/* Checkpoint for possible unwind */
ionic_init_queue_params(lif, &start_qparams);
/* Stop and free the queues */
ionic_stop_queues_reconfig(lif);
ionic_txrx_free(lif);
/* Set up new qparams */
ionic_set_queue_params(lif, qparam);
if (netif_running(lif->netdev)) {
/* Alloc and start the new configuration */
err = ionic_txrx_alloc(lif);
if (err) {
dev_warn(lif->ionic->dev,
"CMB reconfig failed, restoring values: %d\n", err);
/* Back out the changes */
ionic_set_queue_params(lif, &start_qparams);
err = ionic_txrx_alloc(lif);
if (err) {
dev_err(lif->ionic->dev,
"CMB restore failed: %d\n", err);
goto errout;
}
}
ionic_start_queues_reconfig(lif);
} else {
/* This was detached in ionic_stop_queues_reconfig() */
netif_device_attach(lif->netdev);
}
errout:
return err;
}
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
{ {
/* only swapping the queues, not the napi, flags, or other stuff */ /* only swapping the queues, not the napi, flags, or other stuff */
...@@ -2749,6 +2860,11 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, ...@@ -2749,6 +2860,11 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
unsigned int flags, i; unsigned int flags, i;
int err = 0; int err = 0;
/* Are we changing q params while CMB is on */
if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) ||
(test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx))
return ionic_cmb_reconfig(lif, qparam);
/* allocate temporary qcq arrays to hold new queue structs */ /* allocate temporary qcq arrays to hold new queue structs */
if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
...@@ -2785,6 +2901,16 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, ...@@ -2785,6 +2901,16 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
sg_desc_sz = sizeof(struct ionic_txq_sg_desc); sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
for (i = 0; i < qparam->nxqs; i++) { for (i = 0; i < qparam->nxqs; i++) {
/* If missing, short placeholder qcq needed for swap */
if (!lif->txqcqs[i]) {
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
}
flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz, num_desc, desc_sz, comp_sz, sg_desc_sz,
...@@ -2804,6 +2930,16 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, ...@@ -2804,6 +2930,16 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
comp_sz *= 2; comp_sz *= 2;
for (i = 0; i < qparam->nxqs; i++) { for (i = 0; i < qparam->nxqs; i++) {
/* If missing, short placeholder qcq needed for swap */
if (!lif->rxqcqs[i]) {
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
}
flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz, num_desc, desc_sz, comp_sz, sg_desc_sz,
...@@ -2853,9 +2989,14 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, ...@@ -2853,9 +2989,14 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
lif->tx_coalesce_hw = lif->rx_coalesce_hw; lif->tx_coalesce_hw = lif->rx_coalesce_hw;
} }
/* clear existing interrupt assignments */ /* Clear existing interrupt assignments. We check for NULL here
* because we're checking the whole array for potential qcqs, not
* just those qcqs that have just been set up.
*/
for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
if (lif->txqcqs[i])
ionic_qcq_intr_free(lif, lif->txqcqs[i]); ionic_qcq_intr_free(lif, lif->txqcqs[i]);
if (lif->rxqcqs[i])
ionic_qcq_intr_free(lif, lif->rxqcqs[i]); ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
} }
......
...@@ -59,6 +59,7 @@ struct ionic_rx_stats { ...@@ -59,6 +59,7 @@ struct ionic_rx_stats {
#define IONIC_QCQ_F_TX_STATS BIT(3) #define IONIC_QCQ_F_TX_STATS BIT(3)
#define IONIC_QCQ_F_RX_STATS BIT(4) #define IONIC_QCQ_F_RX_STATS BIT(4)
#define IONIC_QCQ_F_NOTIFYQ BIT(5) #define IONIC_QCQ_F_NOTIFYQ BIT(5)
#define IONIC_QCQ_F_CMB_RINGS BIT(6)
struct ionic_qcq { struct ionic_qcq {
void *q_base; void *q_base;
...@@ -70,6 +71,11 @@ struct ionic_qcq { ...@@ -70,6 +71,11 @@ struct ionic_qcq {
void *sg_base; void *sg_base;
dma_addr_t sg_base_pa; dma_addr_t sg_base_pa;
u32 sg_size; u32 sg_size;
void __iomem *cmb_q_base;
phys_addr_t cmb_q_base_pa;
u32 cmb_q_size;
u32 cmb_pgid;
u32 cmb_order;
struct dim dim; struct dim dim;
struct ionic_queue q; struct ionic_queue q;
struct ionic_cq cq; struct ionic_cq cq;
...@@ -142,6 +148,8 @@ enum ionic_lif_state_flags { ...@@ -142,6 +148,8 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_BROKEN, IONIC_LIF_F_BROKEN,
IONIC_LIF_F_TX_DIM_INTR, IONIC_LIF_F_TX_DIM_INTR,
IONIC_LIF_F_RX_DIM_INTR, IONIC_LIF_F_RX_DIM_INTR,
IONIC_LIF_F_CMB_TX_RINGS,
IONIC_LIF_F_CMB_RX_RINGS,
/* leave this as last */ /* leave this as last */
IONIC_LIF_F_STATE_SIZE IONIC_LIF_F_STATE_SIZE
...@@ -245,8 +253,10 @@ struct ionic_queue_params { ...@@ -245,8 +253,10 @@ struct ionic_queue_params {
unsigned int nxqs; unsigned int nxqs;
unsigned int ntxq_descs; unsigned int ntxq_descs;
unsigned int nrxq_descs; unsigned int nrxq_descs;
unsigned int intr_split;
u64 rxq_features; u64 rxq_features;
bool intr_split;
bool cmb_tx;
bool cmb_rx;
}; };
static inline void ionic_init_queue_params(struct ionic_lif *lif, static inline void ionic_init_queue_params(struct ionic_lif *lif,
...@@ -255,8 +265,34 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif, ...@@ -255,8 +265,34 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif,
qparam->nxqs = lif->nxqs; qparam->nxqs = lif->nxqs;
qparam->ntxq_descs = lif->ntxq_descs; qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs; qparam->nrxq_descs = lif->nrxq_descs;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->rxq_features = lif->rxq_features; qparam->rxq_features = lif->rxq_features;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->cmb_tx = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
qparam->cmb_rx = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
}
static inline void ionic_set_queue_params(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
lif->nxqs = qparam->nxqs;
lif->ntxq_descs = qparam->ntxq_descs;
lif->nrxq_descs = qparam->nrxq_descs;
lif->rxq_features = qparam->rxq_features;
if (qparam->intr_split)
set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
else
clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
if (qparam->cmb_tx)
set_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
if (qparam->cmb_rx)
set_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
} }
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs) static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
......
...@@ -402,6 +402,14 @@ bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) ...@@ -402,6 +402,14 @@ bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return true; return true;
} }
static inline void ionic_write_cmb_desc(struct ionic_queue *q,
void __iomem *cmb_desc,
void *desc)
{
if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)
memcpy_toio(cmb_desc, desc, q->desc_size);
}
void ionic_rx_fill(struct ionic_queue *q) void ionic_rx_fill(struct ionic_queue *q)
{ {
struct net_device *netdev = q->lif->netdev; struct net_device *netdev = q->lif->netdev;
...@@ -480,6 +488,8 @@ void ionic_rx_fill(struct ionic_queue *q) ...@@ -480,6 +488,8 @@ void ionic_rx_fill(struct ionic_queue *q)
IONIC_RXQ_DESC_OPCODE_SIMPLE; IONIC_RXQ_DESC_OPCODE_SIMPLE;
desc_info->nbufs = nfrags; desc_info->nbufs = nfrags;
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
ionic_rxq_post(q, false, ionic_rx_clean, NULL); ionic_rxq_post(q, false, ionic_rx_clean, NULL);
} }
...@@ -943,7 +953,8 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) ...@@ -943,7 +953,8 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
return 0; return 0;
} }
static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, static void ionic_tx_tso_post(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct sk_buff *skb, struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len, dma_addr_t addr, u8 nsge, u16 len,
unsigned int hdrlen, unsigned int mss, unsigned int hdrlen, unsigned int mss,
...@@ -951,6 +962,7 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc ...@@ -951,6 +962,7 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
u16 vlan_tci, bool has_vlan, u16 vlan_tci, bool has_vlan,
bool start, bool done) bool start, bool done)
{ {
struct ionic_txq_desc *desc = desc_info->desc;
u8 flags = 0; u8 flags = 0;
u64 cmd; u64 cmd;
...@@ -966,6 +978,8 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc ...@@ -966,6 +978,8 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
desc->hdr_len = cpu_to_le16(hdrlen); desc->hdr_len = cpu_to_le16(hdrlen);
desc->mss = cpu_to_le16(mss); desc->mss = cpu_to_le16(mss);
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
if (start) { if (start) {
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
...@@ -1084,7 +1098,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) ...@@ -1084,7 +1098,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
seg_rem = min(tso_rem, mss); seg_rem = min(tso_rem, mss);
done = (tso_rem == 0); done = (tso_rem == 0);
/* post descriptor */ /* post descriptor */
ionic_tx_tso_post(q, desc, skb, ionic_tx_tso_post(q, desc_info, skb,
desc_addr, desc_nsge, desc_len, desc_addr, desc_nsge, desc_len,
hdrlen, mss, outer_csum, vlan_tci, has_vlan, hdrlen, mss, outer_csum, vlan_tci, has_vlan,
start, done); start, done);
...@@ -1133,6 +1147,8 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, ...@@ -1133,6 +1147,8 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset); desc->csum_offset = cpu_to_le16(skb->csum_offset);
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
if (skb_csum_is_sctp(skb)) if (skb_csum_is_sctp(skb))
stats->crc32_csum++; stats->crc32_csum++;
else else
...@@ -1170,6 +1186,8 @@ static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb, ...@@ -1170,6 +1186,8 @@ static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = 0; desc->csum_start = 0;
desc->csum_offset = 0; desc->csum_offset = 0;
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
stats->csum_none++; stats->csum_none++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment