Commit e23535ca authored by John W. Linville's avatar John W. Linville

Merge branch 'master' of...

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6 into for-davem
parents 0c1ad04a 51e65257
...@@ -1218,10 +1218,10 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c ...@@ -1218,10 +1218,10 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
* receive commit_rxon request * receive commit_rxon request
* abort any previous channel switch if still in process * abort any previous channel switch if still in process
*/ */
if (priv->switch_rxon.switch_in_progress && if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
(priv->switch_rxon.channel != ctx->staging.channel)) { (priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n", IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
le16_to_cpu(priv->switch_rxon.channel)); le16_to_cpu(priv->switch_channel));
iwl_legacy_chswitch_done(priv, false); iwl_legacy_chswitch_done(priv, false);
} }
...@@ -1237,7 +1237,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c ...@@ -1237,7 +1237,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
iwl_legacy_print_rx_config_cmd(priv, ctx); iwl_legacy_print_rx_config_cmd(priv, ctx);
return 0; goto set_tx_power;
} }
/* If we are currently associated and the new config requires /* If we are currently associated and the new config requires
...@@ -1317,6 +1317,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c ...@@ -1317,6 +1317,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
iwl4965_init_sensitivity(priv); iwl4965_init_sensitivity(priv);
set_tx_power:
/* If we issue a new RXON command which required a tune then we must /* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */ * send a new TXPOWER command or we won't be able to Tx any frames */
ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
...@@ -1403,9 +1404,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, ...@@ -1403,9 +1404,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
return rc; return rc;
} }
priv->switch_rxon.channel = cmd.channel;
priv->switch_rxon.switch_in_progress = true;
return iwl_legacy_send_cmd_pdu(priv, return iwl_legacy_send_cmd_pdu(priv,
REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
} }
......
...@@ -859,12 +859,8 @@ void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success) ...@@ -859,12 +859,8 @@ void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
if (test_bit(STATUS_EXIT_PENDING, &priv->status)) if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return; return;
if (priv->switch_rxon.switch_in_progress) { if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
ieee80211_chswitch_done(ctx->vif, is_success); ieee80211_chswitch_done(ctx->vif, is_success);
mutex_lock(&priv->mutex);
priv->switch_rxon.switch_in_progress = false;
mutex_unlock(&priv->mutex);
}
} }
EXPORT_SYMBOL(iwl_legacy_chswitch_done); EXPORT_SYMBOL(iwl_legacy_chswitch_done);
...@@ -876,19 +872,19 @@ void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) ...@@ -876,19 +872,19 @@ void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active; struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
if (priv->switch_rxon.switch_in_progress) { if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
if (!le32_to_cpu(csa->status) && return;
(csa->channel == priv->switch_rxon.channel)) {
rxon->channel = csa->channel; if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
ctx->staging.channel = csa->channel; rxon->channel = csa->channel;
IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", ctx->staging.channel = csa->channel;
le16_to_cpu(csa->channel)); IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
iwl_legacy_chswitch_done(priv, true);
} else {
IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
le16_to_cpu(csa->channel)); le16_to_cpu(csa->channel));
iwl_legacy_chswitch_done(priv, false); iwl_legacy_chswitch_done(priv, true);
} } else {
IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
le16_to_cpu(csa->channel));
iwl_legacy_chswitch_done(priv, false);
} }
} }
EXPORT_SYMBOL(iwl_legacy_rx_csa); EXPORT_SYMBOL(iwl_legacy_rx_csa);
......
...@@ -560,7 +560,7 @@ void iwl_legacy_free_geos(struct iwl_priv *priv); ...@@ -560,7 +560,7 @@ void iwl_legacy_free_geos(struct iwl_priv *priv);
#define STATUS_SCAN_HW 15 #define STATUS_SCAN_HW 15
#define STATUS_POWER_PMI 16 #define STATUS_POWER_PMI 16
#define STATUS_FW_ERROR 17 #define STATUS_FW_ERROR 17
#define STATUS_CHANNEL_SWITCH_PENDING 18
static inline int iwl_legacy_is_ready(struct iwl_priv *priv) static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
{ {
......
...@@ -854,17 +854,6 @@ struct traffic_stats { ...@@ -854,17 +854,6 @@ struct traffic_stats {
#endif #endif
}; };
/*
* iwl_switch_rxon: "channel switch" structure
*
* @ switch_in_progress: channel switch in progress
* @ channel: new channel
*/
struct iwl_switch_rxon {
bool switch_in_progress;
__le16 channel;
};
/* /*
* schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
* to perform continuous uCode event logging operation if enabled * to perform continuous uCode event logging operation if enabled
...@@ -1115,7 +1104,7 @@ struct iwl_priv { ...@@ -1115,7 +1104,7 @@ struct iwl_priv {
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
struct iwl_switch_rxon switch_rxon; __le16 switch_channel;
/* 1st responses from initialize and runtime uCode images. /* 1st responses from initialize and runtime uCode images.
* _4965's initialize alive response contains some calibration data. */ * _4965's initialize alive response contains some calibration data. */
......
...@@ -2861,16 +2861,13 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw, ...@@ -2861,16 +2861,13 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
goto out; goto out;
if (test_bit(STATUS_EXIT_PENDING, &priv->status) || if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
test_bit(STATUS_SCANNING, &priv->status)) test_bit(STATUS_SCANNING, &priv->status) ||
test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
goto out; goto out;
if (!iwl_legacy_is_associated_ctx(ctx)) if (!iwl_legacy_is_associated_ctx(ctx))
goto out; goto out;
/* channel switch in progress */
if (priv->switch_rxon.switch_in_progress == true)
goto out;
if (priv->cfg->ops->lib->set_channel_switch) { if (priv->cfg->ops->lib->set_channel_switch) {
ch = channel->hw_value; ch = channel->hw_value;
...@@ -2919,15 +2916,18 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw, ...@@ -2919,15 +2916,18 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
* at this point, staging_rxon has the * at this point, staging_rxon has the
* configuration for channel switch * configuration for channel switch
*/ */
if (priv->cfg->ops->lib->set_channel_switch(priv, set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
ch_switch)) priv->switch_channel = cpu_to_le16(ch);
priv->switch_rxon.switch_in_progress = false; if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
&priv->status);
priv->switch_channel = 0;
ieee80211_chswitch_done(ctx->vif, false);
}
} }
} }
out: out:
mutex_unlock(&priv->mutex); mutex_unlock(&priv->mutex);
if (!priv->switch_rxon.switch_in_progress)
ieee80211_chswitch_done(ctx->vif, false);
IWL_DEBUG_MAC80211(priv, "leave\n"); IWL_DEBUG_MAC80211(priv, "leave\n");
} }
......
...@@ -423,7 +423,6 @@ static struct iwl_base_params iwl5000_base_params = { ...@@ -423,7 +423,6 @@ static struct iwl_base_params iwl5000_base_params = {
}; };
static struct iwl_ht_params iwl5000_ht_params = { static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true, .ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
}; };
#define IWL_DEVICE_5000 \ #define IWL_DEVICE_5000 \
......
...@@ -163,17 +163,9 @@ static void iwlagn_tx_cmd_protection(struct iwl_priv *priv, ...@@ -163,17 +163,9 @@ static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
__le16 fc, __le32 *tx_flags) __le16 fc, __le32 *tx_flags)
{ {
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS || if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
info->flags & IEEE80211_TX_CTL_AMPDU)
*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK; *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
return;
}
if (priv->cfg->ht_params &&
priv->cfg->ht_params->use_rts_for_aggregation &&
info->flags & IEEE80211_TX_CTL_AMPDU) {
*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
return;
}
} }
/* Calc max signal level (dBm) among 3 possible receivers */ /* Calc max signal level (dBm) among 3 possible receivers */
......
...@@ -325,6 +325,14 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) ...@@ -325,6 +325,14 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return 0; return 0;
} }
/*
* force CTS-to-self frames protection if RTS-CTS is not preferred
* one aggregation protection method
*/
if (!(priv->cfg->ht_params &&
priv->cfg->ht_params->use_rts_for_aggregation))
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
...@@ -362,6 +370,11 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) ...@@ -362,6 +370,11 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
} }
memcpy(active, &ctx->staging, sizeof(*active)); memcpy(active, &ctx->staging, sizeof(*active));
/*
* We do not commit tx power settings while channel changing,
* do it now if after settings changed.
*/
iwl_set_tx_power(priv, priv->tx_power_next, false);
return 0; return 0;
} }
......
...@@ -250,7 +250,8 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, ...@@ -250,7 +250,8 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL)
rt2x00link_reset_tuner(rt2x00dev, false); rt2x00link_reset_tuner(rt2x00dev, false);
if (test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) && if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
(ieee80211_flags & IEEE80211_CONF_CHANGE_PS) && (ieee80211_flags & IEEE80211_CONF_CHANGE_PS) &&
(conf->flags & IEEE80211_CONF_PS)) { (conf->flags & IEEE80211_CONF_PS)) {
beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon; beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon;
......
...@@ -146,6 +146,9 @@ static void rt2x00lib_autowakeup(struct work_struct *work) ...@@ -146,6 +146,9 @@ static void rt2x00lib_autowakeup(struct work_struct *work)
struct rt2x00_dev *rt2x00dev = struct rt2x00_dev *rt2x00dev =
container_of(work, struct rt2x00_dev, autowakeup_work.work); container_of(work, struct rt2x00_dev, autowakeup_work.work);
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return;
if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
ERROR(rt2x00dev, "Device failed to wakeup.\n"); ERROR(rt2x00dev, "Device failed to wakeup.\n");
clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
...@@ -1160,6 +1163,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) ...@@ -1160,6 +1163,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
* Stop all work. * Stop all work.
*/ */
cancel_work_sync(&rt2x00dev->intf_work); cancel_work_sync(&rt2x00dev->intf_work);
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
if (rt2x00_is_usb(rt2x00dev)) { if (rt2x00_is_usb(rt2x00dev)) {
del_timer_sync(&rt2x00dev->txstatus_timer); del_timer_sync(&rt2x00dev->txstatus_timer);
cancel_work_sync(&rt2x00dev->rxdone_work); cancel_work_sync(&rt2x00dev->rxdone_work);
......
...@@ -669,6 +669,19 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) ...@@ -669,6 +669,19 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
&rx_status, &rx_status,
(u8 *) pdesc, skb); (u8 *) pdesc, skb);
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (unlikely(!new_skb)) {
RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
DBG_DMESG,
("can't alloc skb for rx\n"));
goto done;
}
pci_unmap_single(rtlpci->pdev,
*((dma_addr_t *) skb->cb),
rtlpci->rxbuffersize,
PCI_DMA_FROMDEVICE);
skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
false, false,
HW_DESC_RXPKT_LEN)); HW_DESC_RXPKT_LEN));
...@@ -685,22 +698,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) ...@@ -685,22 +698,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
hdr = rtl_get_hdr(skb); hdr = rtl_get_hdr(skb);
fc = rtl_get_fc(skb); fc = rtl_get_fc(skb);
/* try for new buffer - if allocation fails, drop if (!stats.crc && !stats.hwerror) {
* frame and reuse old buffer
*/
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (unlikely(!new_skb)) {
RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
DBG_DMESG,
("can't alloc skb for rx\n"));
goto done;
}
pci_unmap_single(rtlpci->pdev,
*((dma_addr_t *) skb->cb),
rtlpci->rxbuffersize,
PCI_DMA_FROMDEVICE);
if (!stats.crc || !stats.hwerror) {
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
sizeof(rx_status)); sizeof(rx_status));
......
...@@ -965,6 +965,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) ...@@ -965,6 +965,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
mutex_lock(&sdata->u.ibss.mtx); mutex_lock(&sdata->u.ibss.mtx);
sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
sdata->u.ibss.ssid_len = 0;
active_ibss = ieee80211_sta_active_ibss(sdata); active_ibss = ieee80211_sta_active_ibss(sdata);
if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
...@@ -999,8 +1003,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) ...@@ -999,8 +1003,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
kfree_skb(skb); kfree_skb(skb);
skb_queue_purge(&sdata->skb_queue); skb_queue_purge(&sdata->skb_queue);
memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
sdata->u.ibss.ssid_len = 0;
del_timer_sync(&sdata->u.ibss.timer); del_timer_sync(&sdata->u.ibss.timer);
......
...@@ -775,9 +775,6 @@ struct ieee80211_local { ...@@ -775,9 +775,6 @@ struct ieee80211_local {
int tx_headroom; /* required headroom for hardware/radiotap */ int tx_headroom; /* required headroom for hardware/radiotap */
/* count for keys needing tailroom space allocation */
int crypto_tx_tailroom_needed_cnt;
/* Tasklet and skb queue to process calls from IRQ mode. All frames /* Tasklet and skb queue to process calls from IRQ mode. All frames
* added to skb_queue will be processed, but frames in * added to skb_queue will be processed, but frames in
* skb_queue_unreliable may be dropped if the total length of these * skb_queue_unreliable may be dropped if the total length of these
......
...@@ -101,11 +101,6 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) ...@@ -101,11 +101,6 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!ret) { if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
key->local->crypto_tx_tailroom_needed_cnt--;
return 0; return 0;
} }
...@@ -161,10 +156,6 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) ...@@ -161,10 +156,6 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
key->local->crypto_tx_tailroom_needed_cnt++;
} }
void ieee80211_key_removed(struct ieee80211_key_conf *key_conf) void ieee80211_key_removed(struct ieee80211_key_conf *key_conf)
...@@ -403,10 +394,8 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key) ...@@ -403,10 +394,8 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
ieee80211_aes_key_free(key->u.ccmp.tfm); ieee80211_aes_key_free(key->u.ccmp.tfm);
if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC) if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
if (key->local) { if (key->local)
ieee80211_debugfs_key_remove(key); ieee80211_debugfs_key_remove(key);
key->local->crypto_tx_tailroom_needed_cnt--;
}
kfree(key); kfree(key);
} }
...@@ -468,8 +457,6 @@ int ieee80211_key_link(struct ieee80211_key *key, ...@@ -468,8 +457,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
ieee80211_debugfs_key_add(key); ieee80211_debugfs_key_add(key);
key->local->crypto_tx_tailroom_needed_cnt++;
ret = ieee80211_key_enable_hw_accel(key); ret = ieee80211_key_enable_hw_accel(key);
mutex_unlock(&sdata->local->key_mtx); mutex_unlock(&sdata->local->key_mtx);
...@@ -511,12 +498,8 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) ...@@ -511,12 +498,8 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
mutex_lock(&sdata->local->key_mtx); mutex_lock(&sdata->local->key_mtx);
sdata->local->crypto_tx_tailroom_needed_cnt = 0; list_for_each_entry(key, &sdata->key_list, list)
list_for_each_entry(key, &sdata->key_list, list) {
sdata->local->crypto_tx_tailroom_needed_cnt++;
ieee80211_key_enable_hw_accel(key); ieee80211_key_enable_hw_accel(key);
}
mutex_unlock(&sdata->local->key_mtx); mutex_unlock(&sdata->local->key_mtx);
} }
......
...@@ -232,9 +232,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, ...@@ -232,9 +232,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
} }
ieee80211_stop_queues_by_reason(&sdata->local->hw,
IEEE80211_QUEUE_STOP_REASON_CSA);
/* channel_type change automatically detected */ /* channel_type change automatically detected */
ieee80211_hw_config(local, 0); ieee80211_hw_config(local, 0);
...@@ -248,9 +245,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, ...@@ -248,9 +245,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock(); rcu_read_unlock();
} }
ieee80211_wake_queues_by_reason(&sdata->local->hw,
IEEE80211_QUEUE_STOP_REASON_CSA);
ht_opmode = le16_to_cpu(hti->operation_mode); ht_opmode = le16_to_cpu(hti->operation_mode);
/* if bss configuration changed store the new one */ /* if bss configuration changed store the new one */
......
...@@ -1480,7 +1480,12 @@ static int ieee80211_skb_resize(struct ieee80211_local *local, ...@@ -1480,7 +1480,12 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
{ {
int tail_need = 0; int tail_need = 0;
if (may_encrypt && local->crypto_tx_tailroom_needed_cnt) { /*
* This could be optimised, devices that do full hardware
* crypto (including TKIP MMIC) need no tailroom... But we
* have no drivers for such devices currently.
*/
if (may_encrypt) {
tail_need = IEEE80211_ENCRYPT_TAILROOM; tail_need = IEEE80211_ENCRYPT_TAILROOM;
tail_need -= skb_tailroom(skb); tail_need -= skb_tailroom(skb);
tail_need = max_t(int, tail_need, 0); tail_need = max_t(int, tail_need, 0);
......
...@@ -3406,11 +3406,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) ...@@ -3406,11 +3406,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
i = 0; i = 0;
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
request->ssids[i].ssid_len = nla_len(attr); if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL; err = -EINVAL;
goto out_free; goto out_free;
} }
request->ssids[i].ssid_len = nla_len(attr);
memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
i++; i++;
} }
...@@ -3572,12 +3572,11 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, ...@@ -3572,12 +3572,11 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
tmp) { tmp) {
request->ssids[i].ssid_len = nla_len(attr); if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
if (request->ssids[i].ssid_len >
IEEE80211_MAX_SSID_LEN) {
err = -EINVAL; err = -EINVAL;
goto out_free; goto out_free;
} }
request->ssids[i].ssid_len = nla_len(attr);
memcpy(request->ssids[i].ssid, nla_data(attr), memcpy(request->ssids[i].ssid, nla_data(attr),
nla_len(attr)); nla_len(attr));
i++; i++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment