Commit 41effc2a authored by Johannes Berg's avatar Johannes Berg

Merge remote-tracking branch 'iwlwifi-fixes/master' into HEAD

This is needed to resolve some conflicts that would otherwise
happen between wireless-next and the code here.
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parents 6dbe51c2 2470b36e
...@@ -151,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv, ...@@ -151,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
if (!(flags & CMD_ASYNC)) { if (!(flags & CMD_ASYNC)) {
cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD; cmd.flags |= CMD_WANT_SKB;
might_sleep(); might_sleep();
} }
......
...@@ -349,25 +349,23 @@ TRACE_EVENT(iwlwifi_dev_rx_data, ...@@ -349,25 +349,23 @@ TRACE_EVENT(iwlwifi_dev_rx_data,
TRACE_EVENT(iwlwifi_dev_hcmd, TRACE_EVENT(iwlwifi_dev_hcmd,
TP_PROTO(const struct device *dev, TP_PROTO(const struct device *dev,
struct iwl_host_cmd *cmd, u16 total_size, struct iwl_host_cmd *cmd, u16 total_size,
const void *hdr, size_t hdr_len), struct iwl_cmd_header *hdr),
TP_ARGS(dev, cmd, total_size, hdr, hdr_len), TP_ARGS(dev, cmd, total_size, hdr),
TP_STRUCT__entry( TP_STRUCT__entry(
DEV_ENTRY DEV_ENTRY
__dynamic_array(u8, hcmd, total_size) __dynamic_array(u8, hcmd, total_size)
__field(u32, flags) __field(u32, flags)
), ),
TP_fast_assign( TP_fast_assign(
int i, offset = hdr_len; int i, offset = sizeof(*hdr);
DEV_ASSIGN; DEV_ASSIGN;
__entry->flags = cmd->flags; __entry->flags = cmd->flags;
memcpy(__get_dynamic_array(hcmd), hdr, hdr_len); memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
if (!cmd->len[i]) if (!cmd->len[i])
continue; continue;
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
continue;
memcpy((u8 *)__get_dynamic_array(hcmd) + offset, memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
cmd->data[i], cmd->len[i]); cmd->data[i], cmd->len[i]);
offset += cmd->len[i]; offset += cmd->len[i];
......
...@@ -1102,7 +1102,6 @@ void iwl_drv_stop(struct iwl_drv *drv) ...@@ -1102,7 +1102,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
/* shared module parameters */ /* shared module parameters */
struct iwl_mod_params iwlwifi_mod_params = { struct iwl_mod_params iwlwifi_mod_params = {
.amsdu_size_8K = 1,
.restart_fw = 1, .restart_fw = 1,
.plcp_check = true, .plcp_check = true,
.bt_coex_active = true, .bt_coex_active = true,
...@@ -1207,7 +1206,7 @@ MODULE_PARM_DESC(11n_disable, ...@@ -1207,7 +1206,7 @@ MODULE_PARM_DESC(11n_disable,
"disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
int, S_IRUGO); int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO); module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
......
...@@ -91,7 +91,7 @@ enum iwl_power_level { ...@@ -91,7 +91,7 @@ enum iwl_power_level {
* @sw_crypto: using hardware encryption, default = 0 * @sw_crypto: using hardware encryption, default = 0
* @disable_11n: disable 11n capabilities, default = 0, * @disable_11n: disable 11n capabilities, default = 0,
* use IWL_DISABLE_HT_* constants * use IWL_DISABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 1 * @amsdu_size_8K: enable 8K amsdu size, default = 0
* @restart_fw: restart firmware, default = 1 * @restart_fw: restart firmware, default = 1
* @plcp_check: enable plcp health check, default = true * @plcp_check: enable plcp health check, default = true
* @wd_disable: enable stuck queue check, default = 0 * @wd_disable: enable stuck queue check, default = 0
......
...@@ -136,12 +136,6 @@ struct iwl_calib_res_notif_phy_db { ...@@ -136,12 +136,6 @@ struct iwl_calib_res_notif_phy_db {
u8 data[]; u8 data[];
} __packed; } __packed;
#define IWL_PHY_DB_STATIC_PIC cpu_to_le32(0x21436587)
static inline void iwl_phy_db_test_pic(__le32 pic)
{
WARN_ON(IWL_PHY_DB_STATIC_PIC != pic);
}
struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans) struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
{ {
struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db), struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
...@@ -260,11 +254,6 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, ...@@ -260,11 +254,6 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
(size - CHANNEL_NUM_SIZE) / phy_db->channel_num; (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
} }
/* Test PIC */
if (type != IWL_PHY_DB_CFG)
iwl_phy_db_test_pic(*(((__le32 *)phy_db_notif->data) +
(size / sizeof(__le32)) - 1));
IWL_DEBUG_INFO(phy_db->trans, IWL_DEBUG_INFO(phy_db->trans,
"%s(%d): [PHYDB]SET: Type %d , Size: %d\n", "%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
__func__, __LINE__, type, size); __func__, __LINE__, type, size);
...@@ -372,11 +361,6 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db, ...@@ -372,11 +361,6 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
*size = entry->size; *size = entry->size;
} }
/* Test PIC */
if (type != IWL_PHY_DB_CFG)
iwl_phy_db_test_pic(*(((__le32 *)*data) +
(*size / sizeof(__le32)) - 1));
IWL_DEBUG_INFO(phy_db->trans, IWL_DEBUG_INFO(phy_db->trans,
"%s(%d): [PHYDB] GET: Type %d , Size: %d\n", "%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
__func__, __LINE__, type, *size); __func__, __LINE__, type, *size);
......
...@@ -186,19 +186,13 @@ struct iwl_rx_packet { ...@@ -186,19 +186,13 @@ struct iwl_rx_packet {
* @CMD_ASYNC: Return right away and don't want for the response * @CMD_ASYNC: Return right away and don't want for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
* response. The caller needs to call iwl_free_resp when done. * response. The caller needs to call iwl_free_resp when done.
* @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
* response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
* copied. The pointer passed to the response handler is in the transport
* ownership and don't need to be freed by the op_mode. This also means
* that the pointer is invalidated after the op_mode's handler returns.
* @CMD_ON_DEMAND: This command is sent by the test mode pipe. * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
*/ */
enum CMD_MODE { enum CMD_MODE {
CMD_SYNC = 0, CMD_SYNC = 0,
CMD_ASYNC = BIT(0), CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1), CMD_WANT_SKB = BIT(1),
CMD_WANT_HCMD = BIT(2), CMD_ON_DEMAND = BIT(2),
CMD_ON_DEMAND = BIT(3),
}; };
#define DEF_CMD_PAYLOAD_SIZE 320 #define DEF_CMD_PAYLOAD_SIZE 320
...@@ -217,7 +211,11 @@ struct iwl_device_cmd { ...@@ -217,7 +211,11 @@ struct iwl_device_cmd {
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
#define IWL_MAX_CMD_TFDS 2 /*
* number of transfer buffers (fragments) per transmit frame descriptor;
* this is just the driver's idea, the hardware supports 20
*/
#define IWL_MAX_CMD_TBS_PER_TFD 2
/** /**
* struct iwl_hcmd_dataflag - flag for each one of the chunks of the command * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
...@@ -254,15 +252,15 @@ enum iwl_hcmd_dataflag { ...@@ -254,15 +252,15 @@ enum iwl_hcmd_dataflag {
* @id: id of the host command * @id: id of the host command
*/ */
struct iwl_host_cmd { struct iwl_host_cmd {
const void *data[IWL_MAX_CMD_TFDS]; const void *data[IWL_MAX_CMD_TBS_PER_TFD];
struct iwl_rx_packet *resp_pkt; struct iwl_rx_packet *resp_pkt;
unsigned long _rx_page_addr; unsigned long _rx_page_addr;
u32 _rx_page_order; u32 _rx_page_order;
int handler_status; int handler_status;
u32 flags; u32 flags;
u16 len[IWL_MAX_CMD_TFDS]; u16 len[IWL_MAX_CMD_TBS_PER_TFD];
u8 dataflags[IWL_MAX_CMD_TFDS]; u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
u8 id; u8 id;
}; };
......
...@@ -61,6 +61,7 @@ ...@@ -61,6 +61,7 @@
* *
*****************************************************************************/ *****************************************************************************/
#include <linux/etherdevice.h>
#include <net/cfg80211.h> #include <net/cfg80211.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include "iwl-modparams.h" #include "iwl-modparams.h"
...@@ -192,6 +193,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, ...@@ -192,6 +193,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
sizeof(wkc), &wkc); sizeof(wkc), &wkc);
data->error = ret != 0; data->error = ret != 0;
mvm->ptk_ivlen = key->iv_len;
mvm->ptk_icvlen = key->icv_len;
mvm->gtk_ivlen = key->iv_len;
mvm->gtk_icvlen = key->icv_len;
/* don't upload key again */ /* don't upload key again */
goto out_unlock; goto out_unlock;
} }
...@@ -304,9 +310,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, ...@@ -304,9 +310,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
*/ */
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
key->hw_key_idx = 0; key->hw_key_idx = 0;
mvm->ptk_ivlen = key->iv_len;
mvm->ptk_icvlen = key->icv_len;
} else { } else {
data->gtk_key_idx++; data->gtk_key_idx++;
key->hw_key_idx = data->gtk_key_idx; key->hw_key_idx = data->gtk_key_idx;
mvm->gtk_ivlen = key->iv_len;
mvm->gtk_icvlen = key->icv_len;
} }
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true); ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
...@@ -649,6 +659,11 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) ...@@ -649,6 +659,11 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
/* We reprogram keys and shouldn't allocate new key indices */ /* We reprogram keys and shouldn't allocate new key indices */
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
mvm->ptk_ivlen = 0;
mvm->ptk_icvlen = 0;
mvm->ptk_ivlen = 0;
mvm->ptk_icvlen = 0;
/* /*
* The D3 firmware still hardcodes the AP station ID for the * The D3 firmware still hardcodes the AP station ID for the
* BSS we're associated with as 0. As a result, we have to move * BSS we're associated with as 0. As a result, we have to move
...@@ -783,7 +798,6 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, ...@@ -783,7 +798,6 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct iwl_wowlan_status *status; struct iwl_wowlan_status *status;
u32 reasons; u32 reasons;
int ret, len; int ret, len;
bool pkt8023 = false;
struct sk_buff *pkt = NULL; struct sk_buff *pkt = NULL;
iwl_trans_read_mem_bytes(mvm->trans, base, iwl_trans_read_mem_bytes(mvm->trans, base,
...@@ -824,7 +838,8 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, ...@@ -824,7 +838,8 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
status = (void *)cmd.resp_pkt->data; status = (void *)cmd.resp_pkt->data;
if (len - sizeof(struct iwl_cmd_header) != if (len - sizeof(struct iwl_cmd_header) !=
sizeof(*status) + le32_to_cpu(status->wake_packet_bufsize)) { sizeof(*status) +
ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out; goto out;
} }
...@@ -836,61 +851,96 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, ...@@ -836,61 +851,96 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
goto report; goto report;
} }
if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) { if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
wakeup.magic_pkt = true; wakeup.magic_pkt = true;
pkt8023 = true;
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) { if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
wakeup.pattern_idx = wakeup.pattern_idx =
le16_to_cpu(status->pattern_number); le16_to_cpu(status->pattern_number);
pkt8023 = true;
}
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
wakeup.disconnect = true; wakeup.disconnect = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) { if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
wakeup.gtk_rekey_failure = true; wakeup.gtk_rekey_failure = true;
pkt8023 = true;
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) { if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
wakeup.rfkill_release = true; wakeup.rfkill_release = true;
pkt8023 = true;
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) { if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
wakeup.eap_identity_req = true; wakeup.eap_identity_req = true;
pkt8023 = true;
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) { if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
wakeup.four_way_handshake = true; wakeup.four_way_handshake = true;
pkt8023 = true;
}
if (status->wake_packet_bufsize) { if (status->wake_packet_bufsize) {
u32 pktsize = le32_to_cpu(status->wake_packet_bufsize); int pktsize = le32_to_cpu(status->wake_packet_bufsize);
u32 pktlen = le32_to_cpu(status->wake_packet_length); int pktlen = le32_to_cpu(status->wake_packet_length);
const u8 *pktdata = status->wake_packet;
struct ieee80211_hdr *hdr = (void *)pktdata;
int truncated = pktlen - pktsize;
/* this would be a firmware bug */
if (WARN_ON_ONCE(truncated < 0))
truncated = 0;
if (ieee80211_is_data(hdr->frame_control)) {
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
int ivlen = 0, icvlen = 4; /* also FCS */
if (pkt8023) {
pkt = alloc_skb(pktsize, GFP_KERNEL); pkt = alloc_skb(pktsize, GFP_KERNEL);
if (!pkt) if (!pkt)
goto report; goto report;
memcpy(skb_put(pkt, pktsize), status->wake_packet,
pktsize); memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen);
pktdata += hdrlen;
pktsize -= hdrlen;
if (ieee80211_has_protected(hdr->frame_control)) {
if (is_multicast_ether_addr(hdr->addr1)) {
ivlen = mvm->gtk_ivlen;
icvlen += mvm->gtk_icvlen;
} else {
ivlen = mvm->ptk_ivlen;
icvlen += mvm->ptk_icvlen;
}
}
/* if truncated, FCS/ICV is (partially) gone */
if (truncated >= icvlen) {
icvlen = 0;
truncated -= icvlen;
} else {
icvlen -= truncated;
truncated = 0;
}
pktsize -= ivlen + icvlen;
pktdata += ivlen;
memcpy(skb_put(pkt, pktsize), pktdata, pktsize);
if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
goto report; goto report;
wakeup.packet = pkt->data; wakeup.packet = pkt->data;
wakeup.packet_present_len = pkt->len; wakeup.packet_present_len = pkt->len;
wakeup.packet_len = pkt->len - (pktlen - pktsize); wakeup.packet_len = pkt->len - truncated;
wakeup.packet_80211 = false; wakeup.packet_80211 = false;
} else { } else {
int fcslen = 4;
if (truncated >= 4) {
truncated -= 4;
fcslen = 0;
} else {
fcslen -= truncated;
truncated = 0;
}
pktsize -= fcslen;
wakeup.packet = status->wake_packet; wakeup.packet = status->wake_packet;
wakeup.packet_present_len = pktsize; wakeup.packet_present_len = pktsize;
wakeup.packet_len = pktlen; wakeup.packet_len = pktlen - truncated;
wakeup.packet_80211 = true; wakeup.packet_80211 = true;
} }
} }
......
...@@ -762,18 +762,20 @@ struct iwl_phy_context_cmd { ...@@ -762,18 +762,20 @@ struct iwl_phy_context_cmd {
#define IWL_RX_INFO_PHY_CNT 8 #define IWL_RX_INFO_PHY_CNT 8
#define IWL_RX_INFO_AGC_IDX 1 #define IWL_RX_INFO_AGC_IDX 1
#define IWL_RX_INFO_RSSI_AB_IDX 2 #define IWL_RX_INFO_RSSI_AB_IDX 2
#define IWL_RX_INFO_RSSI_C_IDX 3 #define IWL_OFDM_AGC_A_MSK 0x0000007f
#define IWL_OFDM_AGC_DB_MSK 0xfe00 #define IWL_OFDM_AGC_A_POS 0
#define IWL_OFDM_AGC_DB_POS 9 #define IWL_OFDM_AGC_B_MSK 0x00003f80
#define IWL_OFDM_AGC_B_POS 7
#define IWL_OFDM_AGC_CODE_MSK 0x3fe00000
#define IWL_OFDM_AGC_CODE_POS 20
#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff #define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
#define IWL_OFDM_RSSI_A_POS 0 #define IWL_OFDM_RSSI_A_POS 0
#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
#define IWL_OFDM_RSSI_ALLBAND_A_POS 8
#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000 #define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
#define IWL_OFDM_RSSI_B_POS 16 #define IWL_OFDM_RSSI_B_POS 16
#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff #define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00 #define IWL_OFDM_RSSI_ALLBAND_B_POS 24
#define IWL_OFDM_RSSI_C_POS 0
/** /**
* struct iwl_rx_phy_info - phy info * struct iwl_rx_phy_info - phy info
......
...@@ -79,17 +79,8 @@ ...@@ -79,17 +79,8 @@
#define UCODE_VALID_OK cpu_to_le32(0x1) #define UCODE_VALID_OK cpu_to_le32(0x1)
/* Default calibration values for WkP - set to INIT image w/o running */ /* Default calibration values for WkP - set to INIT image w/o running */
static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f,
0x00, 0x18, 0x00 };
static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f };
static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 };
static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00,
0x00 };
static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 };
static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 }; static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 }; static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 };
struct iwl_calib_default_data { struct iwl_calib_default_data {
u16 size; u16 size;
...@@ -99,12 +90,7 @@ struct iwl_calib_default_data { ...@@ -99,12 +90,7 @@ struct iwl_calib_default_data {
#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf} #define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
static const struct iwl_calib_default_data wkp_calib_default_data[12] = { static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
[5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc),
[6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter),
[7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo),
[8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq),
[9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew), [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
[10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq),
[11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew), [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
}; };
...@@ -241,20 +227,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ...@@ -241,20 +227,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return 0; return 0;
} }
#define IWL_HW_REV_ID_RAINBOW 0x2
#define IWL_PROJ_TYPE_LHP 0x5
static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm)
{
struct iwl_nvm_data *data = mvm->nvm_data;
/* Temp calls to static definitions, will be changed to CSR calls */
u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW;
u8 project_type = IWL_PROJ_TYPE_LHP;
return data->radio_cfg_dash | (data->radio_cfg_step << 2) |
(hw_rev_id << 4) | ((project_type & 0x7f) << 6) |
(data->valid_tx_ant << 16) | (data->valid_rx_ant << 20);
}
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{ {
...@@ -262,7 +234,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) ...@@ -262,7 +234,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
enum iwl_ucode_type ucode_type = mvm->cur_ucode; enum iwl_ucode_type ucode_type = mvm->cur_ucode;
/* Set parameters */ /* Set parameters */
phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm)); phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config);
phy_cfg_cmd.calib_control.event_trigger = phy_cfg_cmd.calib_control.event_trigger =
mvm->fw->default_calib[ucode_type].event_trigger; mvm->fw->default_calib[ucode_type].event_trigger;
phy_cfg_cmd.calib_control.flow_trigger = phy_cfg_cmd.calib_control.flow_trigger =
...@@ -275,103 +247,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) ...@@ -275,103 +247,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
sizeof(phy_cfg_cmd), &phy_cfg_cmd); sizeof(phy_cfg_cmd), &phy_cfg_cmd);
} }
/* Starting with the new PHY DB implementation - New calibs are enabled */
/* Value - 0x405e7 */
#define IWL_CALIB_DEFAULT_FLOW_INIT (IWL_CALIB_CFG_XTAL_IDX |\
IWL_CALIB_CFG_TEMPERATURE_IDX |\
IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
IWL_CALIB_CFG_DC_IDX |\
IWL_CALIB_CFG_BB_FILTER_IDX |\
IWL_CALIB_CFG_LO_LEAKAGE_IDX |\
IWL_CALIB_CFG_TX_IQ_IDX |\
IWL_CALIB_CFG_RX_IQ_IDX |\
IWL_CALIB_CFG_AGC_IDX)
#define IWL_CALIB_DEFAULT_EVENT_INIT 0x0
/* Value 0x41567 */
#define IWL_CALIB_DEFAULT_FLOW_RUN (IWL_CALIB_CFG_XTAL_IDX |\
IWL_CALIB_CFG_TEMPERATURE_IDX |\
IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
IWL_CALIB_CFG_BB_FILTER_IDX |\
IWL_CALIB_CFG_DC_IDX |\
IWL_CALIB_CFG_TX_IQ_IDX |\
IWL_CALIB_CFG_RX_IQ_IDX |\
IWL_CALIB_CFG_SENSITIVITY_IDX |\
IWL_CALIB_CFG_AGC_IDX)
#define IWL_CALIB_DEFAULT_EVENT_RUN (IWL_CALIB_CFG_XTAL_IDX |\
IWL_CALIB_CFG_TEMPERATURE_IDX |\
IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
IWL_CALIB_CFG_TX_PWR_IDX |\
IWL_CALIB_CFG_DC_IDX |\
IWL_CALIB_CFG_TX_IQ_IDX |\
IWL_CALIB_CFG_SENSITIVITY_IDX)
/*
* Sets the calibrations trigger values that will be sent to the FW for runtime
* and init calibrations.
* The ones given in the FW TLV are not correct.
*/
static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm)
{
struct iwl_tlv_calib_ctrl default_calib;
/*
* WkP FW TLV calib bits are wrong, overwrite them.
* This defines the dynamic calibrations which are implemented in the
* uCode both for init(flow) calculation and event driven calibs.
*/
/* Init Image */
default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT);
default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT);
if (default_calib.event_trigger !=
mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger)
IWL_ERR(mvm,
"Updating the event calib for INIT image: 0x%x -> 0x%x\n",
mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger,
default_calib.event_trigger);
if (default_calib.flow_trigger !=
mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger)
IWL_ERR(mvm,
"Updating the flow calib for INIT image: 0x%x -> 0x%x\n",
mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger,
default_calib.flow_trigger);
memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT],
&default_calib, sizeof(struct iwl_tlv_calib_ctrl));
IWL_ERR(mvm,
"Setting uCode init calibrations event 0x%x, trigger 0x%x\n",
default_calib.event_trigger,
default_calib.flow_trigger);
/* Run time image */
default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN);
default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN);
if (default_calib.event_trigger !=
mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger)
IWL_ERR(mvm,
"Updating the event calib for RT image: 0x%x -> 0x%x\n",
mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger,
default_calib.event_trigger);
if (default_calib.flow_trigger !=
mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger)
IWL_ERR(mvm,
"Updating the flow calib for RT image: 0x%x -> 0x%x\n",
mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger,
default_calib.flow_trigger);
memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR],
&default_calib, sizeof(struct iwl_tlv_calib_ctrl));
IWL_ERR(mvm,
"Setting uCode runtime calibs event 0x%x, trigger 0x%x\n",
default_calib.event_trigger,
default_calib.flow_trigger);
}
static int iwl_set_default_calibrations(struct iwl_mvm *mvm) static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
{ {
u8 cmd_raw[16]; /* holds the variable size commands */ u8 cmd_raw[16]; /* holds the variable size commands */
...@@ -446,8 +321,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ...@@ -446,8 +321,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
WARN_ON(ret); WARN_ON(ret);
/* Override the calibrations from TLV and the const of fw */ /* Send TX valid antennas before triggering calibrations */
iwl_set_default_calib_trigger(mvm); ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
if (ret)
goto error;
/* WkP doesn't have all calibrations, need to set default values */ /* WkP doesn't have all calibrations, need to set default values */
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
......
...@@ -557,11 +557,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, ...@@ -557,11 +557,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
return ret; return ret;
} }
static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
struct ieee80211_vif *vif) struct ieee80211_vif *vif)
{ {
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u32 tfd_msk = 0, ac; u32 tfd_msk = 0, ac;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
...@@ -594,12 +592,21 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, ...@@ -594,12 +592,21 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
*/ */
flush_work(&mvm->sta_drained_wk); flush_work(&mvm->sta_drained_wk);
} }
}
static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_prepare_mac_removal(mvm, vif);
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
/* /*
* For AP/GO interface, the tear down of the resources allocated to the * For AP/GO interface, the tear down of the resources allocated to the
* interface should be handled as part of the bss_info_changed flow. * interface is be handled as part of the stop_ap flow.
*/ */
if (vif->type == NL80211_IFTYPE_AP) { if (vif->type == NL80211_IFTYPE_AP) {
iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
...@@ -763,6 +770,8 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) ...@@ -763,6 +770,8 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_prepare_mac_removal(mvm, vif);
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
mvmvif->ap_active = false; mvmvif->ap_active = false;
......
...@@ -80,7 +80,8 @@ ...@@ -80,7 +80,8 @@
#define IWL_INVALID_MAC80211_QUEUE 0xff #define IWL_INVALID_MAC80211_QUEUE 0xff
#define IWL_MVM_MAX_ADDRESSES 2 #define IWL_MVM_MAX_ADDRESSES 2
#define IWL_RSSI_OFFSET 44 /* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
enum iwl_mvm_tx_fifo { enum iwl_mvm_tx_fifo {
IWL_MVM_TX_FIFO_BK = 0, IWL_MVM_TX_FIFO_BK = 0,
...@@ -327,6 +328,10 @@ struct iwl_mvm { ...@@ -327,6 +328,10 @@ struct iwl_mvm {
struct led_classdev led; struct led_classdev led;
struct ieee80211_vif *p2p_device_vif; struct ieee80211_vif *p2p_device_vif;
#ifdef CONFIG_PM_SLEEP
int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
#endif
}; };
/* Extract MVM priv from op_mode and _hw */ /* Extract MVM priv from op_mode and _hw */
......
...@@ -624,12 +624,8 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) ...@@ -624,12 +624,8 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
ieee80211_free_txskb(mvm->hw, skb); ieee80211_free_txskb(mvm->hw, skb);
} }
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
{ {
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
iwl_mvm_dump_nic_error_log(mvm);
iwl_abort_notification_waits(&mvm->notif_wait); iwl_abort_notification_waits(&mvm->notif_wait);
/* /*
...@@ -663,9 +659,21 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) ...@@ -663,9 +659,21 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
} }
} }
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
iwl_mvm_dump_nic_error_log(mvm);
iwl_mvm_nic_restart(mvm);
}
static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
{ {
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
WARN_ON(1); WARN_ON(1);
iwl_mvm_nic_restart(mvm);
} }
static const struct iwl_op_mode_ops iwl_mvm_ops = { static const struct iwl_op_mode_ops iwl_mvm_ops = {
......
...@@ -131,33 +131,42 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, ...@@ -131,33 +131,42 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
struct iwl_rx_phy_info *phy_info) struct iwl_rx_phy_info *phy_info)
{ {
u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db; int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
int rssi_all_band_a, rssi_all_band_b;
u32 agc_a, agc_b, max_agc;
u32 val; u32 val;
/* Find max rssi among 3 possible receivers. /* Find max rssi among 2 possible receivers.
* These values are measured by the Digital Signal Processor (DSP). * These values are measured by the Digital Signal Processor (DSP).
* They should stay fairly constant even as the signal strength varies, * They should stay fairly constant even as the signal strength varies,
* if the radio's Automatic Gain Control (AGC) is working right. * if the radio's Automatic Gain Control (AGC) is working right.
* AGC value (see below) will provide the "interesting" info. * AGC value (see below) will provide the "interesting" info.
*/ */
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
max_agc = max_t(u32, agc_a, agc_b);
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]); val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS; rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS; rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]); rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS; IWL_OFDM_RSSI_ALLBAND_A_POS;
rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); IWL_OFDM_RSSI_ALLBAND_B_POS;
agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS;
max_rssi = max_t(u32, rssi_a, rssi_b); /*
max_rssi = max_t(u32, max_rssi, rssi_c); * dBm = rssi dB - agc dB - constant.
* Higher AGC (higher radio gain) means lower signal.
*/
rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
rssi_a, rssi_b, rssi_c, max_rssi, agc_db); rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
/* dBm = max_rssi dB - agc dB - constant. return max_rssi_dbm;
* Higher AGC (higher radio gain) means lower signal. */
return max_rssi - agc_db - IWL_RSSI_OFFSET;
} }
/* /*
......
...@@ -770,6 +770,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -770,6 +770,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u16 txq_id; u16 txq_id;
int err; int err;
/*
* If mac80211 is cleaning its state, then say that we finished since
* our state has been cleared anyway.
*/
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
return 0;
}
spin_lock_bh(&mvmsta->lock); spin_lock_bh(&mvmsta->lock);
txq_id = tid_data->txq_id; txq_id = tid_data->txq_id;
......
...@@ -607,12 +607,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -607,12 +607,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
/* Single frame failure in an AMPDU queue => send BAR */ /* Single frame failure in an AMPDU queue => send BAR */
if (txq_id >= IWL_FIRST_AMPDU_QUEUE && if (txq_id >= IWL_FIRST_AMPDU_QUEUE &&
!(info->flags & IEEE80211_TX_STAT_ACK)) { !(info->flags & IEEE80211_TX_STAT_ACK))
/* there must be only one skb in the skb_list */
WARN_ON_ONCE(skb_freed > 1 ||
!skb_queue_empty(&skbs));
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
}
/* W/A FW bug: seq_ctl is wrong when the queue is flushed */ /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
......
...@@ -137,10 +137,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd) ...@@ -137,10 +137,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
struct iwl_cmd_meta { struct iwl_cmd_meta {
/* only for SYNC commands, iff the reply skb is wanted */ /* only for SYNC commands, iff the reply skb is wanted */
struct iwl_host_cmd *source; struct iwl_host_cmd *source;
DEFINE_DMA_UNMAP_ADDR(mapping);
DEFINE_DMA_UNMAP_LEN(len);
u32 flags; u32 flags;
}; };
...@@ -182,19 +178,39 @@ struct iwl_queue { ...@@ -182,19 +178,39 @@ struct iwl_queue {
#define TFD_TX_CMD_SLOTS 256 #define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32 #define TFD_CMD_SLOTS 32
/*
* The FH will write back to the first TB only, so we need
* to copy some data into the buffer regardless of whether
* it should be mapped or not. This indicates how big the
* first TB must be to include the scratch buffer. Since
* the scratch is 4 bytes at offset 12, it's 16 now. If we
* make it bigger then allocations will be bigger and copy
* slower, so that's probably not useful.
*/
#define IWL_HCMD_SCRATCHBUF_SIZE 16
struct iwl_pcie_txq_entry { struct iwl_pcie_txq_entry {
struct iwl_device_cmd *cmd; struct iwl_device_cmd *cmd;
struct iwl_device_cmd *copy_cmd;
struct sk_buff *skb; struct sk_buff *skb;
/* buffer to free after command completes */ /* buffer to free after command completes */
const void *free_buf; const void *free_buf;
struct iwl_cmd_meta meta; struct iwl_cmd_meta meta;
}; };
struct iwl_pcie_txq_scratch_buf {
struct iwl_cmd_header hdr;
u8 buf[8];
__le32 scratch;
};
/** /**
* struct iwl_txq - Tx Queue for DMA * struct iwl_txq - Tx Queue for DMA
* @q: generic Rx/Tx queue descriptor * @q: generic Rx/Tx queue descriptor
* @tfds: transmit frame descriptors (DMA memory) * @tfds: transmit frame descriptors (DMA memory)
* @scratchbufs: start of command headers, including scratch buffers, for
* the writeback -- this is DMA memory and an array holding one buffer
* for each command on the queue
* @scratchbufs_dma: DMA address for the scratchbufs start
* @entries: transmit entries (driver state) * @entries: transmit entries (driver state)
* @lock: queue lock * @lock: queue lock
* @stuck_timer: timer that fires if queue gets stuck * @stuck_timer: timer that fires if queue gets stuck
...@@ -208,6 +224,8 @@ struct iwl_pcie_txq_entry { ...@@ -208,6 +224,8 @@ struct iwl_pcie_txq_entry {
struct iwl_txq { struct iwl_txq {
struct iwl_queue q; struct iwl_queue q;
struct iwl_tfd *tfds; struct iwl_tfd *tfds;
struct iwl_pcie_txq_scratch_buf *scratchbufs;
dma_addr_t scratchbufs_dma;
struct iwl_pcie_txq_entry *entries; struct iwl_pcie_txq_entry *entries;
spinlock_t lock; spinlock_t lock;
struct timer_list stuck_timer; struct timer_list stuck_timer;
...@@ -216,6 +234,13 @@ struct iwl_txq { ...@@ -216,6 +234,13 @@ struct iwl_txq {
u8 active; u8 active;
}; };
static inline dma_addr_t
iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
{
return txq->scratchbufs_dma +
sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
}
/** /**
* struct iwl_trans_pcie - PCIe transport specific data * struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data * @rxq: all the RX queue data
......
...@@ -637,22 +637,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -637,22 +637,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
index = SEQ_TO_INDEX(sequence); index = SEQ_TO_INDEX(sequence);
cmd_index = get_cmd_index(&txq->q, index); cmd_index = get_cmd_index(&txq->q, index);
if (reclaim) { if (reclaim)
struct iwl_pcie_txq_entry *ent; cmd = txq->entries[cmd_index].cmd;
ent = &txq->entries[cmd_index]; else
cmd = ent->copy_cmd;
WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
} else {
cmd = NULL; cmd = NULL;
}
err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
if (reclaim) { if (reclaim) {
/* The original command isn't needed any more */
kfree(txq->entries[cmd_index].copy_cmd);
txq->entries[cmd_index].copy_cmd = NULL;
/* nor is the duplicated part of the command */
kfree(txq->entries[cmd_index].free_buf); kfree(txq->entries[cmd_index].free_buf);
txq->entries[cmd_index].free_buf = NULL; txq->entries[cmd_index].free_buf = NULL;
} }
......
...@@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) ...@@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
} }
for (i = q->read_ptr; i != q->write_ptr; for (i = q->read_ptr; i != q->write_ptr;
i = iwl_queue_inc_wrap(i, q->n_bd)) { i = iwl_queue_inc_wrap(i, q->n_bd))
struct iwl_tx_cmd *tx_cmd =
(struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
IWL_ERR(trans, "scratch %d = 0x%08x\n", i, IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
get_unaligned_le32(&tx_cmd->scratch)); le32_to_cpu(txq->scratchbufs[i].scratch));
}
iwl_op_mode_nic_error(trans->op_mode); iwl_op_mode_nic_error(trans->op_mode);
} }
...@@ -367,8 +364,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd) ...@@ -367,8 +364,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
} }
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, struct iwl_cmd_meta *meta,
enum dma_data_direction dma_dir) struct iwl_tfd *tfd)
{ {
int i; int i;
int num_tbs; int num_tbs;
...@@ -382,17 +379,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, ...@@ -382,17 +379,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
return; return;
} }
/* Unmap tx_cmd */ /* first TB is never freed - it's the scratchbuf data */
if (num_tbs)
dma_unmap_single(trans->dev,
dma_unmap_addr(meta, mapping),
dma_unmap_len(meta, len),
DMA_BIDIRECTIONAL);
/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++) for (i = 1; i < num_tbs; i++)
dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir); iwl_pcie_tfd_tb_get_len(tfd, i),
DMA_TO_DEVICE);
tfd->num_tbs = 0; tfd->num_tbs = 0;
} }
...@@ -406,8 +398,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, ...@@ -406,8 +398,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
* Does NOT advance any TFD circular buffer read/write indexes * Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer) * Does NOT free the TFD itself (which is within circular buffer)
*/ */
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
enum dma_data_direction dma_dir)
{ {
struct iwl_tfd *tfd_tmp = txq->tfds; struct iwl_tfd *tfd_tmp = txq->tfds;
...@@ -418,8 +409,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, ...@@ -418,8 +409,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
lockdep_assert_held(&txq->lock); lockdep_assert_held(&txq->lock);
/* We have only q->n_window txq->entries, but we use q->n_bd tfds */ /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
dma_dir);
/* free SKB */ /* free SKB */
if (txq->entries) { if (txq->entries) {
...@@ -479,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, ...@@ -479,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
size_t scratchbuf_sz;
int i; int i;
if (WARN_ON(txq->entries || txq->tfds)) if (WARN_ON(txq->entries || txq->tfds))
...@@ -514,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, ...@@ -514,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
goto error; goto error;
} }
BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch));
scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
&txq->scratchbufs_dma,
GFP_KERNEL);
if (!txq->scratchbufs)
goto err_free_tfds;
txq->q.id = txq_id; txq->q.id = txq_id;
return 0; return 0;
err_free_tfds:
dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
error: error:
if (txq->entries && txq_id == trans_pcie->cmd_queue) if (txq->entries && txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++) for (i = 0; i < slots_num; i++)
...@@ -565,22 +572,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) ...@@ -565,22 +572,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id]; struct iwl_txq *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q; struct iwl_queue *q = &txq->q;
enum dma_data_direction dma_dir;
if (!q->n_bd) if (!q->n_bd)
return; return;
/* In the command queue, all the TBs are mapped as BIDI
* so unmap them as such.
*/
if (txq_id == trans_pcie->cmd_queue)
dma_dir = DMA_BIDIRECTIONAL;
else
dma_dir = DMA_TO_DEVICE;
spin_lock_bh(&txq->lock); spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) { while (q->write_ptr != q->read_ptr) {
iwl_pcie_txq_free_tfd(trans, txq, dma_dir); iwl_pcie_txq_free_tfd(trans, txq);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
} }
spin_unlock_bh(&txq->lock); spin_unlock_bh(&txq->lock);
...@@ -610,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) ...@@ -610,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
if (txq_id == trans_pcie->cmd_queue) if (txq_id == trans_pcie->cmd_queue)
for (i = 0; i < txq->q.n_window; i++) { for (i = 0; i < txq->q.n_window; i++) {
kfree(txq->entries[i].cmd); kfree(txq->entries[i].cmd);
kfree(txq->entries[i].copy_cmd);
kfree(txq->entries[i].free_buf); kfree(txq->entries[i].free_buf);
} }
...@@ -619,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) ...@@ -619,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
dma_free_coherent(dev, sizeof(struct iwl_tfd) * dma_free_coherent(dev, sizeof(struct iwl_tfd) *
txq->q.n_bd, txq->tfds, txq->q.dma_addr); txq->q.n_bd, txq->tfds, txq->q.dma_addr);
txq->q.dma_addr = 0; txq->q.dma_addr = 0;
dma_free_coherent(dev,
sizeof(*txq->scratchbufs) * txq->q.n_window,
txq->scratchbufs, txq->scratchbufs_dma);
} }
kfree(txq->entries); kfree(txq->entries);
...@@ -962,7 +963,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, ...@@ -962,7 +963,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); iwl_pcie_txq_free_tfd(trans, txq);
} }
iwl_pcie_txq_progress(trans_pcie, txq); iwl_pcie_txq_progress(trans_pcie, txq);
...@@ -1152,20 +1153,37 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, ...@@ -1152,20 +1153,37 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
void *dup_buf = NULL; void *dup_buf = NULL;
dma_addr_t phys_addr; dma_addr_t phys_addr;
int idx; int idx;
u16 copy_size, cmd_size; u16 copy_size, cmd_size, scratch_size;
bool had_nocopy = false; bool had_nocopy = false;
int i; int i;
u32 cmd_pos; u32 cmd_pos;
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
copy_size = sizeof(out_cmd->hdr); copy_size = sizeof(out_cmd->hdr);
cmd_size = sizeof(out_cmd->hdr); cmd_size = sizeof(out_cmd->hdr);
/* need one for the header if the first is NOCOPY */ /* need one for the header if the first is NOCOPY */
BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
cmddata[i] = cmd->data[i];
cmdlen[i] = cmd->len[i];
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
if (!cmd->len[i]) if (!cmd->len[i])
continue; continue;
/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
if (copy > cmdlen[i])
copy = cmdlen[i];
cmdlen[i] -= copy;
cmddata[i] += copy;
copy_size += copy;
}
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
had_nocopy = true; had_nocopy = true;
if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
...@@ -1185,7 +1203,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, ...@@ -1185,7 +1203,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf; goto free_dup_buf;
} }
dup_buf = kmemdup(cmd->data[i], cmd->len[i], dup_buf = kmemdup(cmddata[i], cmdlen[i],
GFP_ATOMIC); GFP_ATOMIC);
if (!dup_buf) if (!dup_buf)
return -ENOMEM; return -ENOMEM;
...@@ -1195,7 +1213,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, ...@@ -1195,7 +1213,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
idx = -EINVAL; idx = -EINVAL;
goto free_dup_buf; goto free_dup_buf;
} }
copy_size += cmd->len[i]; copy_size += cmdlen[i];
} }
cmd_size += cmd->len[i]; cmd_size += cmd->len[i];
} }
...@@ -1242,30 +1260,30 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, ...@@ -1242,30 +1260,30 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
/* and copy the data that needs to be copied */ /* and copy the data that needs to be copied */
cmd_pos = offsetof(struct iwl_device_cmd, payload); cmd_pos = offsetof(struct iwl_device_cmd, payload);
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { copy_size = sizeof(out_cmd->hdr);
if (!cmd->len[i]) for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
int copy = 0;
if (!cmd->len)
continue; continue;
if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
IWL_HCMD_DFL_DUP))
break;
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
cmd_pos += cmd->len[i];
}
WARN_ON_ONCE(txq->entries[idx].copy_cmd); /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
/* if (copy > cmd->len[i])
* since out_cmd will be the source address of the FH, it will write copy = cmd->len[i];
* the retry count there. So when the user needs to receivce the HCMD }
* that corresponds to the response in the response handler, it needs
* to set CMD_WANT_HCMD. /* copy everything if not nocopy/dup */
*/ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
if (cmd->flags & CMD_WANT_HCMD) { IWL_HCMD_DFL_DUP)))
txq->entries[idx].copy_cmd = copy = cmd->len[i];
kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
if (unlikely(!txq->entries[idx].copy_cmd)) { if (copy) {
idx = -ENOMEM; memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
goto out; cmd_pos += copy;
copy_size += copy;
} }
} }
...@@ -1275,22 +1293,35 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, ...@@ -1275,22 +1293,35 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, /* start the TFD with the scratchbuf */
DMA_BIDIRECTIONAL); scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
idx = -ENOMEM; iwl_pcie_txq_build_tfd(trans, txq,
goto out; iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
} scratch_size, 1);
dma_unmap_addr_set(out_meta, mapping, phys_addr); /* map first command fragment, if any remains */
dma_unmap_len_set(out_meta, len, copy_size); if (copy_size > scratch_size) {
phys_addr = dma_map_single(trans->dev,
((u8 *)&out_cmd->hdr) + scratch_size,
copy_size - scratch_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
iwl_pcie_tfd_unmap(trans, out_meta,
&txq->tfds[q->write_ptr]);
idx = -ENOMEM;
goto out;
}
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1); iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
copy_size - scratch_size, 0);
}
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { /* map the remaining (adjusted) nocopy/dup fragments */
const void *data = cmd->data[i]; for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
const void *data = cmddata[i];
if (!cmd->len[i]) if (!cmdlen[i])
continue; continue;
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
IWL_HCMD_DFL_DUP))) IWL_HCMD_DFL_DUP)))
...@@ -1298,16 +1329,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, ...@@ -1298,16 +1329,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
data = dup_buf; data = dup_buf;
phys_addr = dma_map_single(trans->dev, (void *)data, phys_addr = dma_map_single(trans->dev, (void *)data,
cmd->len[i], DMA_BIDIRECTIONAL); cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) { if (dma_mapping_error(trans->dev, phys_addr)) {
iwl_pcie_tfd_unmap(trans, out_meta, iwl_pcie_tfd_unmap(trans, out_meta,
&txq->tfds[q->write_ptr], &txq->tfds[q->write_ptr]);
DMA_BIDIRECTIONAL);
idx = -ENOMEM; idx = -ENOMEM;
goto out; goto out;
} }
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0); iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
} }
out_meta->flags = cmd->flags; out_meta->flags = cmd->flags;
...@@ -1317,8 +1347,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, ...@@ -1317,8 +1347,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
txq->need_update = 1; txq->need_update = 1;
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
&out_cmd->hdr, copy_size);
/* start timer if queue currently empty */ /* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
...@@ -1377,7 +1406,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, ...@@ -1377,7 +1406,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
cmd = txq->entries[cmd_index].cmd; cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta; meta = &txq->entries[cmd_index].meta;
iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
/* Input error checking is done when commands are added to queue. */ /* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) { if (meta->flags & CMD_WANT_SKB) {
...@@ -1556,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -1556,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *out_meta; struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq; struct iwl_txq *txq;
struct iwl_queue *q; struct iwl_queue *q;
dma_addr_t phys_addr = 0; dma_addr_t tb0_phys, tb1_phys, scratch_phys;
dma_addr_t txcmd_phys; void *tb1_addr;
dma_addr_t scratch_phys; u16 len, tb1_len, tb2_len;
u16 len, firstlen, secondlen;
u8 wait_write_ptr = 0; u8 wait_write_ptr = 0;
__le16 fc = hdr->frame_control; __le16 fc = hdr->frame_control;
u8 hdr_len = ieee80211_hdrlen(fc); u8 hdr_len = ieee80211_hdrlen(fc);
...@@ -1597,85 +1625,80 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -1597,85 +1625,80 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
INDEX_TO_SEQ(q->write_ptr))); INDEX_TO_SEQ(q->write_ptr)));
tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
/* Set up first empty entry in queue's array of Tx/cmd buffers */ /* Set up first empty entry in queue's array of Tx/cmd buffers */
out_meta = &txq->entries[q->write_ptr].meta; out_meta = &txq->entries[q->write_ptr].meta;
/* /*
* Use the first empty entry in this queue's command buffer array * The second TB (tb1) points to the remainder of the TX command
* to contain the Tx command and MAC header concatenated together * and the 802.11 header - dword aligned size
* (payload data will be in another buffer). * (This calculation modifies the TX command, so do it before the
* Size of this varies, due to varying MAC header length. * setup of the first TB)
* If end is not dword aligned, we'll have 2 extra bytes at the end
* of the MAC header (device reads on dword boundaries).
* We'll tell device about this padding later.
*/ */
len = sizeof(struct iwl_tx_cmd) + len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
sizeof(struct iwl_cmd_header) + hdr_len; hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
firstlen = (len + 3) & ~3; tb1_len = (len + 3) & ~3;
/* Tell NIC about any 2-byte padding after MAC header */ /* Tell NIC about any 2-byte padding after MAC header */
if (firstlen != len) if (tb1_len != len)
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
/* Physical address of this Tx command's header (not MAC header!), /* The first TB points to the scratchbuf data - min_copy bytes */
* within command buffer array. */ memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
txcmd_phys = dma_map_single(trans->dev, IWL_HCMD_SCRATCHBUF_SIZE);
&dev_cmd->hdr, firstlen, iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
DMA_BIDIRECTIONAL); IWL_HCMD_SCRATCHBUF_SIZE, 1);
if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
goto out_err;
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
dma_unmap_len_set(out_meta, len, firstlen);
if (!ieee80211_has_morefrags(fc)) { /* there must be data left over for TB1 or this code must be changed */
txq->need_update = 1; BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
} else {
wait_write_ptr = 1; /* map the data for TB1 */
txq->need_update = 0; tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
} tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
goto out_err;
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
/* Set up TFD's 2nd entry to point directly to remainder of skb, /*
* if any (802.11 null frames have no payload). */ * Set up TFD's third entry to point directly to remainder
secondlen = skb->len - hdr_len; * of skb, if any (802.11 null frames have no payload).
if (secondlen > 0) { */
phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, tb2_len = skb->len - hdr_len;
secondlen, DMA_TO_DEVICE); if (tb2_len > 0) {
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { dma_addr_t tb2_phys = dma_map_single(trans->dev,
dma_unmap_single(trans->dev, skb->data + hdr_len,
dma_unmap_addr(out_meta, mapping), tb2_len, DMA_TO_DEVICE);
dma_unmap_len(out_meta, len), if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
DMA_BIDIRECTIONAL); iwl_pcie_tfd_unmap(trans, out_meta,
&txq->tfds[q->write_ptr]);
goto out_err; goto out_err;
} }
iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
} }
/* Attach buffers to TFD */
iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
if (secondlen > 0)
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
/* take back ownership of DMA buffer to enable update */
dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
/* Set up entry for this TFD in Tx byte-count array */ /* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
trace_iwlwifi_dev_tx(trans->dev, skb, trace_iwlwifi_dev_tx(trans->dev, skb,
&txq->tfds[txq->q.write_ptr], &txq->tfds[txq->q.write_ptr],
sizeof(struct iwl_tfd), sizeof(struct iwl_tfd),
&dev_cmd->hdr, firstlen, &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
skb->data + hdr_len, secondlen); skb->data + hdr_len, tb2_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb, trace_iwlwifi_dev_tx_data(trans->dev, skb,
skb->data + hdr_len, secondlen); skb->data + hdr_len, tb2_len);
if (!ieee80211_has_morefrags(fc)) {
txq->need_update = 1;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
}
/* start timer if queue currently empty */ /* start timer if queue currently empty */
if (txq->need_update && q->read_ptr == q->write_ptr && if (txq->need_update && q->read_ptr == q->write_ptr &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment