Commit 105183b1 authored by Emmanuel Grumbach's avatar Emmanuel Grumbach Committed by John W. Linville

iwlagn: move scd_bc_tbls and scd_base_addr to iwl_trans_pcie

Needed for PCIe only
Signed-off-by: default avatarEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: default avatarWey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 04e1cabe
...@@ -1242,9 +1242,6 @@ struct iwl_priv { ...@@ -1242,9 +1242,6 @@ struct iwl_priv {
struct iwl_tx_queue *txq; struct iwl_tx_queue *txq;
unsigned long txq_ctx_active_msk; unsigned long txq_ctx_active_msk;
struct iwl_dma_ptr kw; /* keep warm address */ struct iwl_dma_ptr kw; /* keep warm address */
struct iwl_dma_ptr scd_bc_tbls;
u32 scd_base_addr; /* scheduler sram base address */
/* counts mgmt, ctl, and data packets */ /* counts mgmt, ctl, and data packets */
struct traffic_stats tx_stats; struct traffic_stats tx_stats;
......
...@@ -91,6 +91,8 @@ struct iwl_rx_queue { ...@@ -91,6 +91,8 @@ struct iwl_rx_queue {
* @rxq: all the RX queue data * @rxq: all the RX queue data
* @rx_replenish: work that will be called when buffers need to be allocated * @rx_replenish: work that will be called when buffers need to be allocated
* @trans: pointer to the generic transport area * @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
*/ */
struct iwl_trans_pcie { struct iwl_trans_pcie {
struct iwl_rx_queue rxq; struct iwl_rx_queue rxq;
...@@ -109,6 +111,8 @@ struct iwl_trans_pcie { ...@@ -109,6 +111,8 @@ struct iwl_trans_pcie {
struct isr_statistics isr_stats; struct isr_statistics isr_stats;
u32 inta_mask; u32 inta_mask;
u32 scd_base_addr;
struct iwl_dma_ptr scd_bc_tbls;
}; };
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
......
...@@ -45,7 +45,10 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv, ...@@ -45,7 +45,10 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
struct iwl_tx_queue *txq, struct iwl_tx_queue *txq,
u16 byte_cnt) u16 byte_cnt)
{ {
struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; struct iwlagn_scd_bc_tbl *scd_bc_tbl;
struct iwl_trans *trans = trans(priv);
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
int write_ptr = txq->q.write_ptr; int write_ptr = txq->q.write_ptr;
int txq_id = txq->q.id; int txq_id = txq->q.id;
u8 sec_ctl = 0; u8 sec_ctl = 0;
...@@ -53,6 +56,8 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv, ...@@ -53,6 +56,8 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent; __le16 bc_ent;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
...@@ -335,12 +340,17 @@ int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, ...@@ -335,12 +340,17 @@ int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
struct iwl_tx_queue *txq) struct iwl_tx_queue *txq)
{ {
struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; struct iwlagn_scd_bc_tbl *scd_bc_tbl;
struct iwl_trans *trans = trans(priv);
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id = txq->q.id; int txq_id = txq->q.id;
int read_ptr = txq->q.read_ptr; int read_ptr = txq->q.read_ptr;
u8 sta_id = 0; u8 sta_id = 0;
__le16 bc_ent; __le16 bc_ent;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
if (txq_id != priv->shrd->cmd_queue) if (txq_id != priv->shrd->cmd_queue)
...@@ -361,9 +371,13 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, ...@@ -361,9 +371,13 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
u32 tbl_dw; u32 tbl_dw;
u16 scd_q2ratid; u16 scd_q2ratid;
struct iwl_trans *trans = trans(priv);
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
tbl_dw_addr = priv->scd_base_addr + tbl_dw_addr = trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
...@@ -424,6 +438,10 @@ void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid, ...@@ -424,6 +438,10 @@ void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
unsigned long flags; unsigned long flags;
struct iwl_tid_data *tid_data; struct iwl_tid_data *tid_data;
struct iwl_trans *trans = trans(priv);
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
if (WARN_ON(sta_id == IWL_INVALID_STATION)) if (WARN_ON(sta_id == IWL_INVALID_STATION))
return; return;
if (WARN_ON(tid >= MAX_TID_COUNT)) if (WARN_ON(tid >= MAX_TID_COUNT))
...@@ -459,7 +477,7 @@ void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid, ...@@ -459,7 +477,7 @@ void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx); iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
/* Set up Tx window size and frame limit for this queue */ /* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(priv, priv->scd_base_addr + iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
sizeof(u32), sizeof(u32),
((frame_limit << ((frame_limit <<
......
...@@ -469,6 +469,9 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) ...@@ -469,6 +469,9 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
static void iwl_trans_pcie_tx_free(struct iwl_priv *priv) static void iwl_trans_pcie_tx_free(struct iwl_priv *priv)
{ {
int txq_id; int txq_id;
struct iwl_trans *trans = trans(priv);
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
/* Tx queues */ /* Tx queues */
if (priv->txq) { if (priv->txq) {
...@@ -482,7 +485,7 @@ static void iwl_trans_pcie_tx_free(struct iwl_priv *priv) ...@@ -482,7 +485,7 @@ static void iwl_trans_pcie_tx_free(struct iwl_priv *priv)
iwlagn_free_dma_ptr(priv, &priv->kw); iwlagn_free_dma_ptr(priv, &priv->kw);
iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); iwlagn_free_dma_ptr(priv, &trans_pcie->scd_bc_tbls);
} }
/** /**
...@@ -496,6 +499,9 @@ static int iwl_trans_tx_alloc(struct iwl_priv *priv) ...@@ -496,6 +499,9 @@ static int iwl_trans_tx_alloc(struct iwl_priv *priv)
{ {
int ret; int ret;
int txq_id, slots_num; int txq_id, slots_num;
struct iwl_trans *trans = trans(priv);
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
/*It is not allowed to alloc twice, so warn when this happens. /*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */ * We cannot rely on the previous allocation, so free and fail */
...@@ -504,7 +510,7 @@ static int iwl_trans_tx_alloc(struct iwl_priv *priv) ...@@ -504,7 +510,7 @@ static int iwl_trans_tx_alloc(struct iwl_priv *priv)
goto error; goto error;
} }
ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, ret = iwlagn_alloc_dma_ptr(priv, &trans_pcie->scd_bc_tbls,
hw_params(priv).scd_bc_tbls_size); hw_params(priv).scd_bc_tbls_size);
if (ret) { if (ret) {
IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
...@@ -785,30 +791,33 @@ static void iwl_trans_pcie_tx_start(struct iwl_priv *priv) ...@@ -785,30 +791,33 @@ static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
{ {
const struct queue_to_fifo_ac *queue_to_fifo; const struct queue_to_fifo_ac *queue_to_fifo;
struct iwl_rxon_context *ctx; struct iwl_rxon_context *ctx;
struct iwl_trans *trans = trans(priv);
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
u32 a; u32 a;
unsigned long flags; unsigned long flags;
int i, chan; int i, chan;
u32 reg_val; u32 reg_val;
spin_lock_irqsave(&priv->shrd->lock, flags); spin_lock_irqsave(&trans->shrd->lock, flags);
priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR); trans_pcie->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
/* reset conext data memory */ /* reset conext data memory */
for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
a += 4) a += 4)
iwl_write_targ_mem(priv, a, 0); iwl_write_targ_mem(priv, a, 0);
/* reset tx status memory */ /* reset tx status memory */
for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
a += 4) a += 4)
iwl_write_targ_mem(priv, a, 0); iwl_write_targ_mem(priv, a, 0);
for (; a < priv->scd_base_addr + for (; a < trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num); SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
a += 4) a += 4)
iwl_write_targ_mem(priv, a, 0); iwl_write_targ_mem(priv, a, 0);
iwl_write_prph(priv, SCD_DRAM_BASE_ADDR, iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
priv->scd_bc_tbls.dma >> 10); trans_pcie->scd_bc_tbls.dma >> 10);
/* Enable DMA channel */ /* Enable DMA channel */
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
...@@ -829,9 +838,9 @@ static void iwl_trans_pcie_tx_start(struct iwl_priv *priv) ...@@ -829,9 +838,9 @@ static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
for (i = 0; i < hw_params(priv).max_txq_num; i++) { for (i = 0; i < hw_params(priv).max_txq_num; i++) {
iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0); iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
iwl_write_targ_mem(priv, priv->scd_base_addr + iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(i), 0); SCD_CONTEXT_QUEUE_OFFSET(i), 0);
iwl_write_targ_mem(priv, priv->scd_base_addr + iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(i) + SCD_CONTEXT_QUEUE_OFFSET(i) +
sizeof(u32), sizeof(u32),
((SCD_WIN_SIZE << ((SCD_WIN_SIZE <<
...@@ -843,7 +852,7 @@ static void iwl_trans_pcie_tx_start(struct iwl_priv *priv) ...@@ -843,7 +852,7 @@ static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
} }
iwl_write_prph(priv, SCD_INTERRUPT_MASK, iwl_write_prph(priv, SCD_INTERRUPT_MASK,
IWL_MASK(0, hw_params(priv).max_txq_num)); IWL_MASK(0, hw_params(trans).max_txq_num));
/* Activate all Tx DMA/FIFO channels */ /* Activate all Tx DMA/FIFO channels */
iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7)); iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment