Commit f019fb63 authored by Thomas Falcon's avatar Thomas Falcon Committed by Jakub Kicinski

ibmvnic: Introduce indirect subordinate Command Response Queue buffer

This patch introduces the infrastructure to send batched subordinate
Command Response Queue descriptors, which are used by the ibmvnic
driver to send TX frame and RX buffer descriptors.
Signed-off-by: default avatarThomas Falcon <tlfalcon@linux.ibm.com>
Acked-by: default avatarLijun Pan <ljp@linux.ibm.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent c9003783
...@@ -2858,6 +2858,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, ...@@ -2858,6 +2858,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
memset(scrq->msgs, 0, 4 * PAGE_SIZE); memset(scrq->msgs, 0, 4 * PAGE_SIZE);
atomic_set(&scrq->used, 0); atomic_set(&scrq->used, 0);
scrq->cur = 0; scrq->cur = 0;
scrq->ind_buf.index = 0;
rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
...@@ -2909,6 +2910,11 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, ...@@ -2909,6 +2910,11 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
} }
} }
dma_free_coherent(dev,
IBMVNIC_IND_ARR_SZ,
scrq->ind_buf.indir_arr,
scrq->ind_buf.indir_dma);
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
free_pages((unsigned long)scrq->msgs, 2); free_pages((unsigned long)scrq->msgs, 2);
...@@ -2955,6 +2961,17 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter ...@@ -2955,6 +2961,17 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->adapter = adapter; scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
scrq->ind_buf.index = 0;
scrq->ind_buf.indir_arr =
dma_alloc_coherent(dev,
IBMVNIC_IND_ARR_SZ,
&scrq->ind_buf.indir_dma,
GFP_KERNEL);
if (!scrq->ind_buf.indir_arr)
goto indir_failed;
spin_lock_init(&scrq->lock); spin_lock_init(&scrq->lock);
netdev_dbg(adapter->netdev, netdev_dbg(adapter->netdev,
...@@ -2963,6 +2980,12 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter ...@@ -2963,6 +2980,12 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
return scrq; return scrq;
indir_failed:
do {
rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
adapter->vdev->unit_address,
scrq->crq_num);
} while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
reg_failed: reg_failed:
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#define IBMVNIC_BUFFS_PER_POOL 100 #define IBMVNIC_BUFFS_PER_POOL 100
#define IBMVNIC_MAX_QUEUES 16 #define IBMVNIC_MAX_QUEUES 16
#define IBMVNIC_MAX_QUEUE_SZ 4096 #define IBMVNIC_MAX_QUEUE_SZ 4096
#define IBMVNIC_MAX_IND_DESCS 128
#define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
#define IBMVNIC_TSO_BUF_SZ 65536 #define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64 #define IBMVNIC_TSO_BUFS 64
...@@ -861,6 +863,12 @@ union sub_crq { ...@@ -861,6 +863,12 @@ union sub_crq {
struct ibmvnic_rx_buff_add_desc rx_add; struct ibmvnic_rx_buff_add_desc rx_add;
}; };
struct ibmvnic_ind_xmit_queue {
union sub_crq *indir_arr;
dma_addr_t indir_dma;
int index;
};
struct ibmvnic_sub_crq_queue { struct ibmvnic_sub_crq_queue {
union sub_crq *msgs; union sub_crq *msgs;
int size, cur; int size, cur;
...@@ -873,6 +881,7 @@ struct ibmvnic_sub_crq_queue { ...@@ -873,6 +881,7 @@ struct ibmvnic_sub_crq_queue {
spinlock_t lock; spinlock_t lock;
struct sk_buff *rx_skb_top; struct sk_buff *rx_skb_top;
struct ibmvnic_adapter *adapter; struct ibmvnic_adapter *adapter;
struct ibmvnic_ind_xmit_queue ind_buf;
atomic_t used; atomic_t used;
char name[32]; char name[32];
u64 handle; u64 handle;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment