Commit eb801f4f authored by Johannes Berg's avatar Johannes Berg

wifi: iwlwifi: mvm: clean up reorder buffer data

We really don't need to maintain the buffer size per
queue buffer, it's the same for the whole BA session.
Also, we no longer use the mvm pointer inside each
queue's data structure. Clean that up.
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarMiri Korenblit <miriam.rachel.korenblit@intel.com>
Link: https://patch.msgid.link/20240703125541.64ea1ba75379.I2a25af040061efaf82379e96a84a76c5fb65c677@changeidSigned-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent 187accaa
......@@ -739,24 +739,20 @@ struct iwl_mvm_tcm {
* struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
* @head_sn: reorder window head sn
* @num_stored: number of mpdus stored in the buffer
* @buf_size: the reorder buffer size as set by the last addba request
* @queue: queue of this reorder buffer
* @last_amsdu: track last ASMDU SN for duplication detection
* @last_sub_index: track ASMDU sub frame index for duplication detection
* @valid: reordering is valid for this queue
* @lock: protect reorder buffer internal state
* @mvm: mvm pointer, needed for frame timer context
*/
struct iwl_mvm_reorder_buffer {
u16 head_sn;
u16 num_stored;
u16 buf_size;
int queue;
u16 last_amsdu;
u8 last_sub_index;
bool valid;
spinlock_t lock;
struct iwl_mvm *mvm;
} ____cacheline_aligned_in_smp;
/**
......@@ -778,6 +774,7 @@ __aligned(roundup_pow_of_two(sizeof(struct sk_buff_head)))
* @tid: tid of the session
* @baid: baid of the session
* @timeout: the timeout set in the addba request
* @buf_size: the reorder buffer size as set by the last addba request
* @entries_per_queue: # of buffers per queue, this actually gets
* aligned up to avoid cache line sharing between queues
* @last_rx: last rx jiffies, updated only if timeout passed from last update
......@@ -794,6 +791,7 @@ struct iwl_mvm_baid_data {
u8 tid;
u8 baid;
u16 timeout;
u16 buf_size;
u16 entries_per_queue;
unsigned long last_rx;
struct timer_list session_timer;
......
......@@ -566,7 +566,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
lockdep_assert_held(&reorder_buf->lock);
while (ieee80211_sn_less(ssn, nssn)) {
int index = ssn % reorder_buf->buf_size;
int index = ssn % baid_data->buf_size;
struct sk_buff_head *skb_list = &entries[index].frames;
struct sk_buff *skb;
......@@ -617,7 +617,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
ieee80211_sn_add(reorder_buf->head_sn,
reorder_buf->buf_size));
ba_data->buf_size));
spin_unlock_bh(&reorder_buf->lock);
out:
......@@ -839,7 +839,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
}
/* put in reorder buffer */
index = sn % buffer->buf_size;
index = sn % baid_data->buf_size;
__skb_queue_tail(&entries[index].frames, skb);
buffer->num_stored++;
......
......@@ -2743,7 +2743,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
*/
WARN_ON(1);
for (j = 0; j < reorder_buf->buf_size; j++)
for (j = 0; j < data->buf_size; j++)
__skb_queue_purge(&entries[j].frames);
spin_unlock_bh(&reorder_buf->lock);
......@@ -2752,7 +2752,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
struct iwl_mvm_baid_data *data,
u16 ssn, u16 buf_size)
u16 ssn)
{
int i;
......@@ -2765,12 +2765,10 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
reorder_buf->num_stored = 0;
reorder_buf->head_sn = ssn;
reorder_buf->buf_size = buf_size;
spin_lock_init(&reorder_buf->lock);
reorder_buf->mvm = mvm;
reorder_buf->queue = i;
reorder_buf->valid = false;
for (j = 0; j < reorder_buf->buf_size; j++)
for (j = 0; j < data->buf_size; j++)
__skb_queue_head_init(&entries[j].frames);
}
}
......@@ -2979,13 +2977,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
baid_data->mvm = mvm;
baid_data->tid = tid;
baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
baid_data->buf_size = buf_size;
mvm_sta->tid_to_baid[tid] = baid;
if (timeout)
mod_timer(&baid_data->session_timer,
TU_TO_EXP_TIME(timeout * 2));
iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn);
/*
* protect the BA data with RCU to cover a case where our
* internal RX sync mechanism will timeout (not that it's
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment