Commit d3d3e001 authored by John W. Linville's avatar John W. Linville
parents 7a0a260a 48849a41
......@@ -81,7 +81,7 @@
#define IWL8000_NVM_VERSION 0x0a1d
#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL8000_FW_PRE "iwlwifi-8000-"
#define IWL8000_FW_PRE "iwlwifi-8000"
#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_8000 10
......
......@@ -69,6 +69,7 @@
#include <linux/vmalloc.h>
#include "iwl-drv.h"
#include "iwl-csr.h"
#include "iwl-debug.h"
#include "iwl-trans.h"
#include "iwl-op-mode.h"
......@@ -244,6 +245,23 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
name_pre, tag);
/*
* Starting 8000B - FW name format has changed. This overwrites the
* previous name and uses the new format.
*/
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
char rev_step[2] = {
'A' + CSR_HW_REV_STEP(drv->trans->hw_rev), 0
};
/* A-step doesn't have an indication */
if (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP)
rev_step[0] = 0;
snprintf(drv->firmware_name, sizeof(drv->firmware_name),
"%s%s-%s.ucode", name_pre, rev_step, tag);
}
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
? "EXPERIMENTAL " : "",
......
......@@ -145,9 +145,24 @@ enum iwl_ucode_tlv_api {
/**
* enum iwl_ucode_tlv_capa - ucode capabilities
* @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
* @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
* tx power value into TPC Report action frame and Link Measurement Report
* action frame
* @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports adding DS params
* element in probe requests.
* @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
* probe requests.
* @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
* @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
* which also implies support for the scheduler configuration command
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
};
/* The default calibrate table size if not specified by firmware file */
......
......@@ -90,9 +90,10 @@
#define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62
#define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65
#define IWL_MVM_BT_COEX_SYNC2SCO 1
#define IWL_MVM_BT_COEX_CORUNNING 1
#define IWL_MVM_BT_COEX_CORUNNING 0
#define IWL_MVM_BT_COEX_MPLUT 1
#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0
#define IWL_MVM_QUOTA_THRESHOLD 8
#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
#endif /* __MVM_CONSTANTS_H */
......@@ -326,6 +326,29 @@ static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm,
return count;
}
static ssize_t iwl_dbgfs_nic_temp_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
char buf[16];
int pos, temp;
if (!mvm->ucode_loaded)
return -EIO;
mutex_lock(&mvm->mutex);
temp = iwl_mvm_get_temp(mvm);
mutex_unlock(&mvm->mutex);
if (temp < 0)
return temp;
pos = scnprintf(buf , sizeof(buf), "%d\n", temp);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
......@@ -1378,6 +1401,7 @@ MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
......@@ -1420,6 +1444,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir,
S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
......
......@@ -670,6 +670,8 @@ struct iwl_scan_channel_opt {
* @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification
* @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS multiple SSID matching
* @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
* @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
* and DS parameter set IEs into probe requests.
*/
enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
......@@ -678,6 +680,7 @@ enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3),
IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
};
enum iwl_scan_priority {
......
......@@ -66,6 +66,7 @@
/**
* enum iwl_tx_flags - bitmasks for tx_flags in TX command
* @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame
* @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame
* @TX_CMD_FLG_ACK: expect ACK from receiving station
* @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command.
* Otherwise, use rate_n_flags from the TX command
......@@ -97,6 +98,7 @@
*/
enum iwl_tx_flags {
TX_CMD_FLG_PROT_REQUIRE = BIT(0),
TX_CMD_FLG_WRITE_TX_POWER = BIT(1),
TX_CMD_FLG_ACK = BIT(3),
TX_CMD_FLG_STA_RATE = BIT(4),
TX_CMD_FLG_BAR = BIT(6),
......
......@@ -116,6 +116,9 @@ enum {
TXPATH_FLUSH = 0x1e,
MGMT_MCAST_KEY = 0x1f,
/* scheduler config */
SCD_QUEUE_CFG = 0x1d,
/* global key */
WEP_KEY = 0x20,
......@@ -1650,4 +1653,61 @@ struct iwl_dts_measurement_notif {
__le32 voltage;
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */
/**
* enum iwl_scd_control - scheduler config command control flags
* @IWL_SCD_CONTROL_RM_TID: remove TID from this queue
* @IWL_SCD_CONTROL_SET_SSN: use the SSN and program it into HW
*/
enum iwl_scd_control {
IWL_SCD_CONTROL_RM_TID = BIT(4),
IWL_SCD_CONTROL_SET_SSN = BIT(5),
};
/**
* enum iwl_scd_flags - scheduler config command flags
* @IWL_SCD_FLAGS_SHARE_TID: multiple TIDs map to this queue
* @IWL_SCD_FLAGS_SHARE_RA: multiple RAs map to this queue
* @IWL_SCD_FLAGS_DQA_ENABLED: DQA is enabled
*/
enum iwl_scd_flags {
IWL_SCD_FLAGS_SHARE_TID = BIT(0),
IWL_SCD_FLAGS_SHARE_RA = BIT(1),
IWL_SCD_FLAGS_DQA_ENABLED = BIT(2),
};
#define IWL_SCDQ_INVALID_STA 0xff
/**
* struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
* @token: dialog token addba - unused legacy
* @sta_id: station id 4-bit
* @tid: TID 0..7
* @scd_queue: TFD queue num 0 .. 31
* @enable: 1 queue enable, 0 queue disable
* @aggregate: 1 aggregated queue, 0 otherwise
* @tx_fifo: tx fifo num 0..7
* @window: up to 64
* @ssn: starting seq num 12-bit
* @control: command control flags
* @flags: flags - see &enum iwl_scd_flags
*
* Note that every time the command is sent, all parameters must
* be filled with the exception of
* - the SSN, which is only used with @IWL_SCD_CONTROL_SET_SSN
* - the window, which is only relevant when starting aggregation
*/
struct iwl_scd_txq_cfg_cmd {
u8 token;
u8 sta_id;
u8 tid;
u8 scd_queue;
u8 enable;
u8 aggregate;
u8 tx_fifo;
u8 window;
__le16 ssn;
u8 control;
u8 flags;
} __packed;
#endif /* __fw_api_h__ */
......@@ -427,16 +427,16 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
iwl_trans_ac_txq_enable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE,
iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_TX_FIFO_VO);
break;
case NL80211_IFTYPE_AP:
iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue,
iwl_mvm_enable_ac_txq(mvm, vif->cab_queue,
IWL_MVM_TX_FIFO_MCAST);
/* fall through */
default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_trans_ac_txq_enable(mvm->trans, vif->hw_queue[ac],
iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
iwl_mvm_ac_to_tx_fifo[ac]);
break;
}
......@@ -452,16 +452,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
iwl_trans_txq_disable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE,
true);
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE);
break;
case NL80211_IFTYPE_AP:
iwl_trans_txq_disable(mvm->trans, vif->cab_queue, true);
iwl_mvm_disable_txq(mvm, vif->cab_queue);
/* fall through */
default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_trans_txq_disable(mvm->trans, vif->hw_queue[ac],
true);
iwl_mvm_disable_txq(mvm, vif->hw_queue[ac]);
}
}
......
......@@ -279,14 +279,6 @@ static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
}
}
static int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
{
/* we create the 802.11 header and SSID element */
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID)
return mvm->fw->ucode_capa.max_probe_length - 24 - 2;
return mvm->fw->ucode_capa.max_probe_length - 24 - 34;
}
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
......@@ -303,7 +295,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_TIMING_BEACON_ONLY |
IEEE80211_HW_CONNECTION_MONITOR |
IEEE80211_HW_CHANCTX_STA_CSA;
IEEE80211_HW_CHANCTX_STA_CSA |
IEEE80211_HW_SUPPORTS_CLONED_SKBS;
hw->queues = mvm->first_agg_queue;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
......@@ -378,7 +371,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm, false);
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
......@@ -411,6 +404,22 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
NL80211_FEATURE_DYNAMIC_SMPS |
NL80211_FEATURE_STATIC_SMPS;
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)
hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)
hw->wiphy->features |= NL80211_FEATURE_QUIET;
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)
hw->wiphy->features |=
NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
/* currently FW API supports only one optional cipher scheme */
......@@ -2135,7 +2144,13 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (!iwl_mvm_is_idle(mvm)) {
/* Newest FW fixes sched scan while connected on another interface */
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) {
if (!vif->bss_conf.idle) {
ret = -EBUSY;
goto out;
}
} else if (!iwl_mvm_is_idle(mvm)) {
ret = -EBUSY;
goto out;
}
......
......@@ -779,6 +779,11 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
{
return mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_DQA_SUPPORT;
}
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
......@@ -930,6 +935,7 @@ int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
/* Scheduled scan */
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
......@@ -984,6 +990,9 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
struct iwl_mvm_frame_stats *stats,
u32 rate, bool agg);
int rs_pretty_print_rate(char *buf, const u32 rate);
void rs_update_last_rssi(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct ieee80211_rx_status *rx_status);
/* power management */
int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
......@@ -1141,6 +1150,39 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
return mvmvif->low_latency;
}
/* hw scheduler queue config */
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg);
void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue);
static inline void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
u8 fifo)
{
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
iwl_mvm_enable_txq(mvm, queue, 0, &cfg);
}
static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
int fifo, int sta_id, int tid,
int frame_limit, u16 ssn)
{
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo,
.sta_id = sta_id,
.tid = tid,
.frame_limit = frame_limit,
.aggregate = true,
};
iwl_mvm_enable_txq(mvm, queue, ssn, &cfg);
}
/* Assoc status */
bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
......@@ -1150,6 +1192,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
int iwl_mvm_get_temp(struct iwl_mvm *mvm);
/* smart fifo */
int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
......
......@@ -342,6 +342,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BT_COEX_UPDATE_REDUCED_TXP),
CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
CMD(ANTENNA_COUPLING_NOTIFICATION),
CMD(SCD_QUEUE_CFG),
};
#undef CMD
......@@ -421,7 +422,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->first_agg_queue = 12;
}
mvm->sf_state = SF_UNINIT;
mvm->low_latency_agg_frame_limit = 1;
mvm->low_latency_agg_frame_limit = 6;
mutex_init(&mvm->mutex);
mutex_init(&mvm->d0i3_suspend_mutex);
......
This diff is collapsed.
......@@ -376,6 +376,10 @@ struct iwl_lq_sta {
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum ieee80211_band band, bool init);
/* Notify RS about Tx status */
void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, struct ieee80211_tx_info *info);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
*
......
......@@ -246,6 +246,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_info *phy_info;
struct iwl_rx_mpdu_res_start *rx_res;
struct ieee80211_sta *sta;
u32 len;
u32 ampdu_status;
u32 rate_n_flags;
......@@ -260,23 +261,6 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
memset(&rx_status, 0, sizeof(rx_status));
/*
* We have tx blocked stations (with CS bit). If we heard frames from
* a blocked station on a new channel we can TX to it again.
*/
if (unlikely(mvm->csa_tx_block_bcn_timeout)) {
struct ieee80211_sta *sta;
rcu_read_lock();
sta = ieee80211_find_sta(
rcu_dereference(mvm->csa_tx_blocked_vif), hdr->addr2);
if (sta)
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
rcu_read_unlock();
}
/*
* drop the packet if it has failed being decrypted by HW
*/
......@@ -325,6 +309,29 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
(unsigned long long)rx_status.mactime);
rcu_read_lock();
/*
* We have tx blocked stations (with CS bit). If we heard frames from
* a blocked station on a new channel we can TX to it again.
*/
if (unlikely(mvm->csa_tx_block_bcn_timeout)) {
sta = ieee80211_find_sta(
rcu_dereference(mvm->csa_tx_blocked_vif), hdr->addr2);
if (sta)
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
}
/* This is fine since we don't support multiple AP interfaces */
sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
if (sta) {
struct iwl_mvm_sta *mvmsta;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
rs_update_last_rssi(mvm, &mvmsta->lq_sta,
&rx_status);
}
rcu_read_unlock();
/* set the preamble flag if appropriate */
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
rx_status.flag |= RX_FLAG_SHORTPRE;
......
......@@ -339,6 +339,55 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
}
}
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
{
/* require rrm scan whenever the fw supports it */
return mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT;
}
static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
bool is_sched_scan)
{
int max_probe_len;
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
else
max_probe_len = mvm->fw->ucode_capa.max_probe_length;
/* we create the 802.11 header and SSID element */
max_probe_len -= 24 + 2;
/* basic ssid is added only for hw_scan with and old api */
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID) &&
!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) &&
!is_sched_scan)
max_probe_len -= 32;
return max_probe_len;
}
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
{
int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan);
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN))
return max_ie_len;
/* TODO: [BUG] This function should return the maximum allowed size of
* scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
* in the same command. So the correct implementation of this function
* is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
* command has only 512 bytes and it would leave us with about 240
* bytes for scan IEs, which is clearly not enough. So meanwhile
* we will report an incorrect value. This may result in a failure to
* issue a scan in unified_scan_lmac and unified_sched_scan_lmac
* functions with -ENOBUFS, if a large enough probe will be provided.
*/
return max_ie_len;
}
int iwl_mvm_scan_request(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
......@@ -1153,6 +1202,10 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
}
if (iwl_mvm_rrm_scan_needed(mvm))
cmd->scan_flags |=
cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
}
int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
......@@ -1180,13 +1233,12 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
if (WARN_ON(mvm->scan_cmd == NULL))
return -ENOMEM;
if (WARN_ON_ONCE(req->req.n_ssids > PROBE_OPTION_MAX ||
req->ies.common_ie_len + req->ies.len[0] +
req->ies.len[1] + 24 + 2 >
SCAN_OFFLOAD_PROBE_REQ_SIZE ||
req->req.n_channels >
mvm->fw->ucode_capa.n_scan_channels))
return -1;
if (req->req.n_ssids > PROBE_OPTION_MAX ||
req->ies.common_ie_len + req->ies.len[NL80211_BAND_2GHZ] +
req->ies.len[NL80211_BAND_5GHZ] >
iwl_mvm_max_scan_ie_fw_cmd_room(mvm, false) ||
req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
return -ENOBUFS;
mvm->scan_status = IWL_MVM_SCAN_OS;
......@@ -1208,7 +1260,7 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
if (req->req.n_ssids == 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
cmd->scan_flags = cpu_to_le32(flags);
cmd->scan_flags |= cpu_to_le32(flags);
cmd->flags = iwl_mvm_scan_rxon_flags(req->req.channels[0]->band);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
......@@ -1274,10 +1326,11 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
if (WARN_ON(mvm->scan_cmd == NULL))
return -ENOMEM;
if (WARN_ON_ONCE(req->n_ssids > PROBE_OPTION_MAX ||
ies->common_ie_len + ies->len[0] + ies->len[1] + 24 + 2
> SCAN_OFFLOAD_PROBE_REQ_SIZE ||
req->n_channels > mvm->fw->ucode_capa.n_scan_channels))
if (req->n_ssids > PROBE_OPTION_MAX ||
ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
ies->len[NL80211_BAND_5GHZ] >
iwl_mvm_max_scan_ie_fw_cmd_room(mvm, true) ||
req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
return -ENOBUFS;
iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
......@@ -1305,7 +1358,7 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
if (req->n_ssids == 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
cmd->scan_flags = cpu_to_le32(flags);
cmd->scan_flags |= cpu_to_le32(flags);
cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
......
......@@ -247,6 +247,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
mvm_sta->tid_data[i].seq_number = seq;
}
mvm_sta->agg_tids = 0;
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
if (ret)
......@@ -535,7 +536,7 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
/* Map Aux queue to fifo - needs to happen before adding Aux station */
iwl_trans_ac_txq_enable(mvm->trans, mvm->aux_queue,
iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue,
IWL_MVM_TX_FIFO_MCAST);
/* Allocate aux station and assign to it the aux queue */
......@@ -872,12 +873,16 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int queue, fifo, ret;
u16 ssn;
BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
!= IWL_MAX_TID_COUNT);
buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
spin_lock_bh(&mvmsta->lock);
ssn = tid_data->ssn;
queue = tid_data->txq_id;
tid_data->state = IWL_AGG_ON;
mvmsta->agg_tids |= BIT(tid);
tid_data->ssn = 0xffff;
spin_unlock_bh(&mvmsta->lock);
......@@ -887,7 +892,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
return -EIO;
iwl_trans_txq_enable(mvm->trans, queue, fifo, mvmsta->sta_id, tid,
iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
buf_size, ssn);
/*
......@@ -932,6 +937,8 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
mvmsta->sta_id, tid, txq_id, tid_data->state);
mvmsta->agg_tids &= ~BIT(tid);
switch (tid_data->state) {
case IWL_AGG_ON:
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
......@@ -956,7 +963,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
iwl_trans_txq_disable(mvm->trans, txq_id, true);
iwl_mvm_disable_txq(mvm, txq_id);
return 0;
case IWL_AGG_STARTING:
case IWL_EMPTYING_HW_QUEUE_ADDBA:
......@@ -1005,6 +1012,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->sta_id, tid, txq_id, tid_data->state);
old_state = tid_data->state;
tid_data->state = IWL_AGG_OFF;
mvmsta->agg_tids &= ~BIT(tid);
spin_unlock_bh(&mvmsta->lock);
if (old_state >= IWL_AGG_ON) {
......@@ -1013,7 +1021,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
iwl_trans_txq_disable(mvm->trans, tid_data->txq_id, true);
iwl_mvm_disable_txq(mvm, tid_data->txq_id);
}
mvm->queue_to_mac80211[tid_data->txq_id] =
......
......@@ -299,6 +299,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
* @tx_protection: reference counter for controlling the Tx protection.
* @tt_tx_protection: is thermal throttling enable Tx protection?
* @disable_tx: is tx to this STA disabled?
* @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
......@@ -323,6 +324,7 @@ struct iwl_mvm_sta {
bool tt_tx_protection;
bool disable_tx;
u8 agg_tids;
};
static inline struct iwl_mvm_sta *
......
......@@ -135,7 +135,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
sizeof(cmd), &cmd);
}
static int iwl_mvm_get_temp(struct iwl_mvm *mvm)
int iwl_mvm_get_temp(struct iwl_mvm *mvm)
{
struct iwl_notification_wait wait_temp_notif;
static const u8 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
......
......@@ -133,6 +133,11 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
!is_multicast_ether_addr(ieee80211_get_DA(hdr)))
tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
if ((mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
ieee80211_action_contains_tpc(skb))
tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
tx_cmd->tx_flags = cpu_to_le32(tx_flags);
/* Total # bytes to be transmitted */
tx_cmd->len = cpu_to_le16((u16)skb->len);
......@@ -488,11 +493,11 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm,
"Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed);
iwl_trans_txq_disable(mvm->trans, tid_data->txq_id, true);
iwl_mvm_disable_txq(mvm, tid_data->txq_id);
tid_data->state = IWL_AGG_OFF;
/*
* we can't hold the mutex - but since we are after a sequence
* point (call to iwl_trans_txq_disable), so we don't even need
* point (call to iwl_mvm_disable_txq(), so we don't even need
* a memory barrier.
*/
mvm->queue_to_mac80211[tid_data->txq_id] =
......@@ -868,6 +873,19 @@ int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
struct iwl_mvm_ba_notif *ba_notif,
struct iwl_mvm_tid_data *tid_data)
{
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_notif->txed_2_done;
info->status.ampdu_len = ba_notif->txed;
iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
info);
info->status.status_driver_data[0] =
(void *)(uintptr_t)tid_data->reduced_tpc;
}
int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
......@@ -954,21 +972,37 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
*/
info->flags |= IEEE80211_TX_STAT_ACK;
if (freed == 1) {
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_notif->txed_2_done;
info->status.ampdu_len = ba_notif->txed;
iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
info);
info->status.status_driver_data[0] =
(void *)(uintptr_t)tid_data->reduced_tpc;
}
if (freed == 1)
iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data);
}
spin_unlock_bh(&mvmsta->lock);
/* We got a BA notif with 0 acked or scd_ssn didn't progress which is
* possible (i.e. first MPDU in the aggregation wasn't acked)
* Still it's important to update RS about sent vs. acked.
*/
if (skb_queue_empty(&reclaimed_skbs)) {
struct ieee80211_tx_info ba_info = {};
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
if (mvmsta->vif)
chanctx_conf =
rcu_dereference(mvmsta->vif->chanctx_conf);
if (WARN_ON_ONCE(!chanctx_conf))
goto out;
ba_info.band = chanctx_conf->def.chan->band;
iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data);
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info);
}
out:
rcu_read_unlock();
while (!skb_queue_empty(&reclaimed_skbs)) {
......
......@@ -530,6 +530,52 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
}
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg)
{
if (iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.enable = 1,
.window = cfg->frame_limit,
.sta_id = cfg->sta_id,
.ssn = cpu_to_le16(ssn),
.tx_fifo = cfg->fifo,
.aggregate = cfg->aggregate,
.flags = IWL_SCD_FLAGS_DQA_ENABLED,
.tid = cfg->tid,
.control = IWL_SCD_CONTROL_SET_SSN,
};
int ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm,
"Failed to configure queue %d on FIFO %d\n",
queue, cfg->fifo);
}
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
iwl_mvm_is_dqa_supported(mvm) ? NULL : cfg);
}
void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue)
{
iwl_trans_txq_disable(mvm->trans, queue,
!iwl_mvm_is_dqa_supported(mvm));
if (iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.enable = 0,
};
int ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, CMD_ASYNC,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
queue, ret);
}
}
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right
......
......@@ -275,6 +275,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
......@@ -318,6 +320,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
......
......@@ -2190,7 +2190,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
*/
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
trans->hw_rev = (trans->hw_rev & 0xfff0) |
(CSR_HW_REV_STEP(trans->hw_rev << 2));
(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment