Commit 3c8b3efc authored by Shrikrishna Khare's avatar Shrikrishna Khare Committed by David S. Miller

vmxnet3: allow variable length transmit data ring buffer

vmxnet3 driver supports transmit data ring viz. a set of fixed size
buffers used by the driver to copy packet headers. Small packets that
fit these buffers are copied into these buffers entirely.

Currently this buffer size of fixed at 128 bytes. This patch extends
transmit data ring implementation to allow variable length transmit
data ring buffers. The length of the buffer is read from the emulation
during initialization.
Signed-off-by: default avatarSriram Rangarajan <rangarajans@vmware.com>
Signed-off-by: default avatarShrikrishna Khare <skhare@vmware.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f35c7480
...@@ -92,6 +92,7 @@ enum { ...@@ -92,6 +92,7 @@ enum {
VMXNET3_CMD_GET_DEV_EXTRA_INFO, VMXNET3_CMD_GET_DEV_EXTRA_INFO,
VMXNET3_CMD_GET_CONF_INTR, VMXNET3_CMD_GET_CONF_INTR,
VMXNET3_CMD_GET_RESERVED1, VMXNET3_CMD_GET_RESERVED1,
VMXNET3_CMD_GET_TXDATA_DESC_SIZE
}; };
/* /*
...@@ -377,6 +378,10 @@ union Vmxnet3_GenericDesc { ...@@ -377,6 +378,10 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_RING_SIZE_ALIGN 32 #define VMXNET3_RING_SIZE_ALIGN 32
#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1) #define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
/* Tx Data Ring buffer size must be a multiple of 64 */
#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64
#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1)
/* Max ring size */ /* Max ring size */
#define VMXNET3_TX_RING_MAX_SIZE 4096 #define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096 #define VMXNET3_TC_RING_MAX_SIZE 4096
...@@ -384,6 +389,9 @@ union Vmxnet3_GenericDesc { ...@@ -384,6 +389,9 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_RX_RING2_MAX_SIZE 4096 #define VMXNET3_RX_RING2_MAX_SIZE 4096
#define VMXNET3_RC_RING_MAX_SIZE 8192 #define VMXNET3_RC_RING_MAX_SIZE 8192
#define VMXNET3_TXDATA_DESC_MIN_SIZE 128
#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048
/* a list of reasons for queue stop */ /* a list of reasons for queue stop */
enum { enum {
...@@ -470,7 +478,9 @@ struct Vmxnet3_TxQueueConf { ...@@ -470,7 +478,9 @@ struct Vmxnet3_TxQueueConf {
__le32 compRingSize; /* # of comp desc */ __le32 compRingSize; /* # of comp desc */
__le32 ddLen; /* size of driver data */ __le32 ddLen; /* size of driver data */
u8 intrIdx; u8 intrIdx;
u8 _pad[7]; u8 _pad1[1];
__le16 txDataRingDescSize;
u8 _pad2[4];
}; };
......
...@@ -435,8 +435,8 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, ...@@ -435,8 +435,8 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
tq->tx_ring.base = NULL; tq->tx_ring.base = NULL;
} }
if (tq->data_ring.base) { if (tq->data_ring.base) {
dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size * dma_free_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_TxDataDesc), tq->data_ring.size * tq->txdata_desc_size,
tq->data_ring.base, tq->data_ring.basePA); tq->data_ring.base, tq->data_ring.basePA);
tq->data_ring.base = NULL; tq->data_ring.base = NULL;
} }
...@@ -478,8 +478,8 @@ vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, ...@@ -478,8 +478,8 @@ vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
tq->tx_ring.gen = VMXNET3_INIT_GEN; tq->tx_ring.gen = VMXNET3_INIT_GEN;
memset(tq->data_ring.base, 0, tq->data_ring.size * memset(tq->data_ring.base, 0,
sizeof(struct Vmxnet3_TxDataDesc)); tq->data_ring.size * tq->txdata_desc_size);
/* reset the tx comp ring contents to 0 and reset comp ring states */ /* reset the tx comp ring contents to 0 and reset comp ring states */
memset(tq->comp_ring.base, 0, tq->comp_ring.size * memset(tq->comp_ring.base, 0, tq->comp_ring.size *
...@@ -514,10 +514,10 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, ...@@ -514,10 +514,10 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
} }
tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc), tq->data_ring.size * tq->txdata_desc_size,
&tq->data_ring.basePA, GFP_KERNEL); &tq->data_ring.basePA, GFP_KERNEL);
if (!tq->data_ring.base) { if (!tq->data_ring.base) {
netdev_err(adapter->netdev, "failed to allocate data ring\n"); netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
goto err; goto err;
} }
...@@ -689,7 +689,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, ...@@ -689,7 +689,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
if (ctx->copy_size) { if (ctx->copy_size) {
ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
tq->tx_ring.next2fill * tq->tx_ring.next2fill *
sizeof(struct Vmxnet3_TxDataDesc)); tq->txdata_desc_size);
ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size); ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
ctx->sop_txd->dword[3] = 0; ctx->sop_txd->dword[3] = 0;
...@@ -873,8 +873,9 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ...@@ -873,8 +873,9 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
ctx->eth_ip_hdr_size = 0; ctx->eth_ip_hdr_size = 0;
ctx->l4_hdr_size = 0; ctx->l4_hdr_size = 0;
/* copy as much as allowed */ /* copy as much as allowed */
ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE ctx->copy_size = min_t(unsigned int,
, skb_headlen(skb)); tq->txdata_desc_size,
skb_headlen(skb));
} }
if (skb->len <= VMXNET3_HDR_COPY_SIZE) if (skb->len <= VMXNET3_HDR_COPY_SIZE)
...@@ -885,7 +886,7 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ...@@ -885,7 +886,7 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
goto err; goto err;
} }
if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) { if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
tq->stats.oversized_hdr++; tq->stats.oversized_hdr++;
ctx->copy_size = 0; ctx->copy_size = 0;
return 0; return 0;
...@@ -2336,6 +2337,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) ...@@ -2336,6 +2337,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
tqc->ddPA = cpu_to_le64(tq->buf_info_pa); tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
tqc->ddLen = cpu_to_le32( tqc->ddLen = cpu_to_le32(
sizeof(struct vmxnet3_tx_buf_info) * sizeof(struct vmxnet3_tx_buf_info) *
...@@ -2689,7 +2691,8 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) ...@@ -2689,7 +2691,8 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
int int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
u32 rx_ring_size, u32 rx_ring2_size) u32 rx_ring_size, u32 rx_ring2_size,
u16 txdata_desc_size)
{ {
int err = 0, i; int err = 0, i;
...@@ -2698,6 +2701,7 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, ...@@ -2698,6 +2701,7 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
tq->tx_ring.size = tx_ring_size; tq->tx_ring.size = tx_ring_size;
tq->data_ring.size = tx_ring_size; tq->data_ring.size = tx_ring_size;
tq->comp_ring.size = tx_ring_size; tq->comp_ring.size = tx_ring_size;
tq->txdata_desc_size = txdata_desc_size;
tq->shared = &adapter->tqd_start[i].ctrl; tq->shared = &adapter->tqd_start[i].ctrl;
tq->stopped = true; tq->stopped = true;
tq->adapter = adapter; tq->adapter = adapter;
...@@ -2754,9 +2758,34 @@ vmxnet3_open(struct net_device *netdev) ...@@ -2754,9 +2758,34 @@ vmxnet3_open(struct net_device *netdev)
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
spin_lock_init(&adapter->tx_queue[i].tx_lock); spin_lock_init(&adapter->tx_queue[i].tx_lock);
err = vmxnet3_create_queues(adapter, adapter->tx_ring_size, if (VMXNET3_VERSION_GE_3(adapter)) {
unsigned long flags;
u16 txdata_desc_size;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
(txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
(txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
adapter->txdata_desc_size =
sizeof(struct Vmxnet3_TxDataDesc);
} else {
adapter->txdata_desc_size = txdata_desc_size;
}
} else {
adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
}
err = vmxnet3_create_queues(adapter,
adapter->tx_ring_size,
adapter->rx_ring_size, adapter->rx_ring_size,
adapter->rx_ring2_size); adapter->rx_ring2_size,
adapter->txdata_desc_size);
if (err) if (err)
goto queue_err; goto queue_err;
......
...@@ -396,8 +396,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) ...@@ -396,8 +396,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA); buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA); buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA);
buf[j++] = tq->data_ring.size; buf[j++] = tq->data_ring.size;
/* transmit data ring buffer size */ buf[j++] = tq->txdata_desc_size;
buf[j++] = VMXNET3_HDR_COPY_SIZE;
buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA); buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA); buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA);
...@@ -591,7 +590,8 @@ vmxnet3_set_ringparam(struct net_device *netdev, ...@@ -591,7 +590,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
vmxnet3_rq_destroy_all(adapter); vmxnet3_rq_destroy_all(adapter);
err = vmxnet3_create_queues(adapter, new_tx_ring_size, err = vmxnet3_create_queues(adapter, new_tx_ring_size,
new_rx_ring_size, new_rx_ring2_size); new_rx_ring_size, new_rx_ring2_size,
adapter->txdata_desc_size);
if (err) { if (err) {
/* failed, most likely because of OOM, try default /* failed, most likely because of OOM, try default
...@@ -604,7 +604,8 @@ vmxnet3_set_ringparam(struct net_device *netdev, ...@@ -604,7 +604,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
err = vmxnet3_create_queues(adapter, err = vmxnet3_create_queues(adapter,
new_tx_ring_size, new_tx_ring_size,
new_rx_ring_size, new_rx_ring_size,
new_rx_ring2_size); new_rx_ring2_size,
adapter->txdata_desc_size);
if (err) { if (err) {
netdev_err(netdev, "failed to create queues " netdev_err(netdev, "failed to create queues "
"with default sizes. Closing it\n"); "with default sizes. Closing it\n");
......
...@@ -241,6 +241,7 @@ struct vmxnet3_tx_queue { ...@@ -241,6 +241,7 @@ struct vmxnet3_tx_queue {
int num_stop; /* # of times the queue is int num_stop; /* # of times the queue is
* stopped */ * stopped */
int qid; int qid;
u16 txdata_desc_size;
} __attribute__((__aligned__(SMP_CACHE_BYTES))); } __attribute__((__aligned__(SMP_CACHE_BYTES)));
enum vmxnet3_rx_buf_type { enum vmxnet3_rx_buf_type {
...@@ -363,6 +364,9 @@ struct vmxnet3_adapter { ...@@ -363,6 +364,9 @@ struct vmxnet3_adapter {
u32 rx_ring_size; u32 rx_ring_size;
u32 rx_ring2_size; u32 rx_ring2_size;
/* Size of buffer in the data ring */
u16 txdata_desc_size;
struct work_struct work; struct work_struct work;
unsigned long state; /* VMXNET3_STATE_BIT_xxx */ unsigned long state; /* VMXNET3_STATE_BIT_xxx */
...@@ -427,7 +431,8 @@ vmxnet3_set_features(struct net_device *netdev, netdev_features_t features); ...@@ -427,7 +431,8 @@ vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
int int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter, vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size); u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size,
u16 txdata_desc_size);
void vmxnet3_set_ethtool_ops(struct net_device *netdev); void vmxnet3_set_ethtool_ops(struct net_device *netdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment