Commit 1a4a6975 authored by Mintz, Yuval's avatar Mintz, Yuval Committed by David S. Miller

qed: Chain support for external PBL

iWARP would require the chains to allocate/free their PBL memory
independently, so add the infrastructure to provide it externally.
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8917a777
......@@ -276,7 +276,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
QED_CHAIN_CNT_TYPE_U16,
n_entries,
sizeof(struct regpair *),
&cnq->pbl);
&cnq->pbl, NULL);
if (rc)
goto err4;
......
......@@ -925,7 +925,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
QED_CHAIN_CNT_TYPE_U32,
chain_entries,
sizeof(union rdma_cqe),
&cq->pbl);
&cq->pbl, NULL);
if (rc)
goto err1;
......@@ -1413,7 +1413,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
QED_CHAIN_CNT_TYPE_U32,
n_sq_elems,
QEDR_SQE_ELEMENT_SIZE,
&qp->sq.pbl);
&qp->sq.pbl, NULL);
if (rc)
return rc;
......@@ -1427,7 +1427,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
QED_CHAIN_CNT_TYPE_U32,
n_rq_elems,
QEDR_RQE_ELEMENT_SIZE,
&qp->rq.pbl);
&qp->rq.pbl, NULL);
if (rc)
return rc;
......
......@@ -3075,12 +3075,15 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
}
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
dma_free_coherent(&cdev->pdev->dev,
pbl_size,
p_chain->pbl_sp.p_virt_table,
p_chain->pbl_sp.p_phys_table);
if (!p_chain->b_external_pbl)
dma_free_coherent(&cdev->pdev->dev,
pbl_size,
p_chain->pbl_sp.p_virt_table,
p_chain->pbl_sp.p_phys_table);
out:
vfree(p_chain->pbl.pp_virt_addr_tbl);
p_chain->pbl.pp_virt_addr_tbl = NULL;
}
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
......@@ -3174,7 +3177,10 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
return 0;
}
static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
static int
qed_chain_alloc_pbl(struct qed_dev *cdev,
struct qed_chain *p_chain,
struct qed_chain_ext_pbl *ext_pbl)
{
u32 page_cnt = p_chain->page_cnt, size, i;
dma_addr_t p_phys = 0, p_pbl_phys = 0;
......@@ -3194,8 +3200,16 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
* should be saved to allow its freeing during the error flow.
*/
size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
size, &p_pbl_phys, GFP_KERNEL);
if (!ext_pbl) {
p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
size, &p_pbl_phys, GFP_KERNEL);
} else {
p_pbl_virt = ext_pbl->p_pbl_virt;
p_pbl_phys = ext_pbl->p_pbl_phys;
p_chain->b_external_pbl = true;
}
qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
if (!p_pbl_virt)
......@@ -3228,7 +3242,10 @@ int qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type,
u32 num_elems, size_t elem_size, struct qed_chain *p_chain)
u32 num_elems,
size_t elem_size,
struct qed_chain *p_chain,
struct qed_chain_ext_pbl *ext_pbl)
{
u32 page_cnt;
int rc = 0;
......@@ -3259,7 +3276,7 @@ int qed_chain_alloc(struct qed_dev *cdev,
rc = qed_chain_alloc_single(cdev, p_chain);
break;
case QED_CHAIN_MODE_PBL:
rc = qed_chain_alloc_pbl(cdev, p_chain);
rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl);
break;
}
if (rc)
......
......@@ -307,6 +307,7 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
* @param num_elems
* @param elem_size
* @param p_chain
* @param ext_pbl - a possible external PBL
*
* @return int
*/
......@@ -315,7 +316,9 @@ qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type,
u32 num_elems, size_t elem_size, struct qed_chain *p_chain);
u32 num_elems,
size_t elem_size,
struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl);
/**
* @brief qed_chain_free - Free chain DMA memory
......
......@@ -752,7 +752,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
r2tq_num_elements, 0x80, &p_conn->r2tq);
r2tq_num_elements, 0x80, &p_conn->r2tq, NULL);
if (rc)
goto nomem_r2tq;
......@@ -763,7 +763,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
uhq_num_elements,
sizeof(struct iscsi_uhqe), &p_conn->uhq);
sizeof(struct iscsi_uhqe), &p_conn->uhq, NULL);
if (rc)
goto nomem_uhq;
......@@ -773,7 +773,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
xhq_num_elements,
sizeof(struct iscsi_xhqe), &p_conn->xhq);
sizeof(struct iscsi_xhqe), &p_conn->xhq, NULL);
if (rc)
goto nomem;
......
......@@ -1056,7 +1056,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
QED_CHAIN_CNT_TYPE_U16,
p_ll2_info->input.rx_num_desc,
sizeof(struct core_rx_bd),
&p_ll2_info->rx_queue.rxq_chain);
&p_ll2_info->rx_queue.rxq_chain, NULL);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
goto out;
......@@ -1078,7 +1078,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
QED_CHAIN_CNT_TYPE_U16,
p_ll2_info->input.rx_num_desc,
sizeof(struct core_rx_fast_path_cqe),
&p_ll2_info->rx_queue.rcq_chain);
&p_ll2_info->rx_queue.rcq_chain, NULL);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
goto out;
......@@ -1108,7 +1108,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
QED_CHAIN_CNT_TYPE_U16,
p_ll2_info->input.tx_num_desc,
sizeof(struct core_tx_bd),
&p_ll2_info->tx_queue.txq_chain);
&p_ll2_info->tx_queue.txq_chain, NULL);
if (rc)
goto out;
......
......@@ -419,7 +419,7 @@ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
QED_CHAIN_CNT_TYPE_U16,
num_elem,
sizeof(union event_ring_element),
&p_eq->chain))
&p_eq->chain, NULL))
goto eq_allocate_fail;
/* register EQ completion on the SP SB */
......@@ -547,7 +547,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_CNT_TYPE_U16,
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
&p_spq->chain))
&p_spq->chain, NULL))
goto spq_allocate_fail;
/* allocate and fill the SPQ elements (incl. ramrod data list) */
......@@ -953,7 +953,7 @@ int qed_consq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
QED_CHAIN_PAGE_SIZE / 0x80,
0x80, &p_consq->chain))
0x80, &p_consq->chain, NULL))
goto consq_allocate_fail;
p_hwfn->p_consq = p_consq;
......
......@@ -1317,8 +1317,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(struct eth_rx_bd),
&rxq->rx_bd_ring);
&rxq->rx_bd_ring, NULL);
if (rc)
goto err;
......@@ -1329,7 +1328,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(union eth_rx_cqe),
&rxq->rx_comp_ring);
&rxq->rx_comp_ring, NULL);
if (rc)
goto err;
......@@ -1387,7 +1386,8 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
txq->num_tx_buffers,
sizeof(*p_virt), &txq->tx_pbl);
sizeof(*p_virt),
&txq->tx_pbl, NULL);
if (rc)
goto err;
......
......@@ -80,6 +80,11 @@ struct qed_chain_pbl_u32 {
u32 cons_page_idx;
};
struct qed_chain_ext_pbl {
dma_addr_t p_pbl_phys;
void *p_pbl_virt;
};
struct qed_chain_u16 {
/* Cyclic index of next element to produce/consme */
u16 prod_idx;
......@@ -155,6 +160,8 @@ struct qed_chain {
u32 size;
u8 intended_use;
bool b_external_pbl;
};
#define QED_CHAIN_PBL_ENTRY_SIZE (8)
......
......@@ -634,7 +634,8 @@ struct qed_common_ops {
enum qed_chain_cnt_type cnt_type,
u32 num_elems,
size_t elem_size,
struct qed_chain *p_chain);
struct qed_chain *p_chain,
struct qed_chain_ext_pbl *ext_pbl);
void (*chain_free)(struct qed_dev *cdev,
struct qed_chain *p_chain);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment