Commit 5e776d80 authored by Alexander Lobakin's avatar Alexander Lobakin Committed by David S. Miller

qed: move chain initialization inlines next to allocation functions

qed_chain_init*() are used in one file/place on "cold" path only, so they
can be uninlined and moved next to the call sites.
Signed-off-by: default avatarAlexander Lobakin <alobakin@marvell.com>
Signed-off-by: default avatarIgor Russkikh <irusskikh@marvell.com>
Signed-off-by: default avatarMichal Kalderon <michal.kalderon@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9b6ee3cf
...@@ -7,6 +7,53 @@ ...@@ -7,6 +7,53 @@
#include "qed_dev_api.h" #include "qed_dev_api.h"
static void qed_chain_init_params(struct qed_chain *chain,
u32 page_cnt, u8 elem_size,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type)
{
memset(chain, 0, sizeof(*chain));
chain->elem_size = elem_size;
chain->intended_use = intended_use;
chain->mode = mode;
chain->cnt_type = cnt_type;
chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
chain->elem_per_page_mask = chain->elem_per_page - 1;
chain->next_page_mask = chain->usable_per_page &
chain->elem_per_page_mask;
chain->page_cnt = page_cnt;
chain->capacity = chain->usable_per_page * page_cnt;
chain->size = chain->elem_per_page * page_cnt;
}
static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain,
void *virt_curr, void *virt_next,
dma_addr_t phys_next)
{
struct qed_chain_next *next;
u32 size;
size = chain->elem_size * chain->usable_per_page;
next = virt_curr + size;
DMA_REGPAIR_LE(next->next_phys, phys_next);
next->next_virt = virt_next;
}
static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr,
dma_addr_t phys_addr)
{
chain->p_virt_addr = virt_addr;
chain->p_phys_addr = phys_addr;
}
static void qed_chain_free_next_ptr(struct qed_dev *cdev, static void qed_chain_free_next_ptr(struct qed_dev *cdev,
struct qed_chain *chain) struct qed_chain *chain)
{ {
......
...@@ -490,118 +490,6 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) ...@@ -490,118 +490,6 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
} }
} }
/**
* @brief qed_chain_init - Initalizes a basic chain struct
*
* @param p_chain
* @param p_virt_addr
* @param p_phys_addr physical address of allocated buffer's beginning
* @param page_cnt number of pages in the allocated buffer
* @param elem_size size of each element in the chain
* @param intended_use
* @param mode
*/
static inline void qed_chain_init_params(struct qed_chain *p_chain,
u32 page_cnt,
u8 elem_size,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type)
{
/* chain fixed parameters */
p_chain->p_virt_addr = NULL;
p_chain->p_phys_addr = 0;
p_chain->elem_size = elem_size;
p_chain->intended_use = (u8)intended_use;
p_chain->mode = mode;
p_chain->cnt_type = (u8)cnt_type;
p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->next_page_mask = (p_chain->usable_per_page &
p_chain->elem_per_page_mask);
p_chain->page_cnt = page_cnt;
p_chain->capacity = p_chain->usable_per_page * page_cnt;
p_chain->size = p_chain->elem_per_page * page_cnt;
p_chain->pbl_sp.table_phys = 0;
p_chain->pbl_sp.table_virt = NULL;
p_chain->pbl.pp_addr_tbl = NULL;
}
/**
* @brief qed_chain_init_mem -
*
* Initalizes a basic chain struct with its chain buffers
*
* @param p_chain
* @param p_virt_addr virtual address of allocated buffer's beginning
* @param p_phys_addr physical address of allocated buffer's beginning
*
*/
static inline void qed_chain_init_mem(struct qed_chain *p_chain,
void *p_virt_addr, dma_addr_t p_phys_addr)
{
p_chain->p_virt_addr = p_virt_addr;
p_chain->p_phys_addr = p_phys_addr;
}
/**
* @brief qed_chain_init_pbl_mem -
*
* Initalizes a basic chain struct with its pbl buffers
*
* @param p_chain
* @param p_virt_pbl pointer to a pre allocated side table which will hold
* virtual page addresses.
* @param p_phys_pbl pointer to a pre-allocated side table which will hold
* physical page addresses.
* @param pp_virt_addr_tbl
* pointer to a pre-allocated side table which will hold
* the virtual addresses of the chain pages.
*
*/
static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
void *p_virt_pbl,
dma_addr_t p_phys_pbl,
struct addr_tbl_entry *pp_addr_tbl)
{
p_chain->pbl_sp.table_phys = p_phys_pbl;
p_chain->pbl_sp.table_virt = p_virt_pbl;
p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
}
/**
* @brief qed_chain_init_next_ptr_elem -
*
* Initalizes a next pointer element
*
* @param p_chain
* @param p_virt_curr virtual address of a chain page of which the next
* pointer element is initialized
* @param p_virt_next virtual address of the next chain page
* @param p_phys_next physical address of the next chain page
*
*/
static inline void
qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
void *p_virt_curr,
void *p_virt_next, dma_addr_t p_phys_next)
{
struct qed_chain_next *p_next;
u32 size;
size = p_chain->elem_size * p_chain->usable_per_page;
p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
p_next->next_virt = p_virt_next;
}
/** /**
* @brief qed_chain_get_last_elem - * @brief qed_chain_get_last_elem -
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment