Commit 9fe37a80 authored by David S. Miller's avatar David S. Miller

Merge tag 'mac80211-for-net-2021-05-11' of...

Merge tag 'mac80211-for-net-2021-05-11' of git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211

Johannes Berg says:

====================
pull-request: mac80211 2021-05-11

So exciting times, for the first pull request for fixes I
have a bunch of security things that have been under embargo
for a while - see more details in the tag below, and at the
patch posting message I linked to.

I organized with Kalle to just have a single set of fixes
for mac80211 and ath10k/ath11k, we don't know about any of
the other vendors (the mac80211 + already released firmware
is sufficient to fix iwlwifi.)

Please pull and let me know if there's any problem.

Several security issues in the 802.11 implementations were found by
Mathy Vanhoef (New York University Abu Dhabi), and this contains the
fixes developed for mac80211 and specifically Qualcomm drivers, I'm
sending this together (as agreed with Kalle) to have just a single
set of patches for now. We don't know about other vendors though.

More details in the patch posting:
https://lore.kernel.org/r/20210511180259.159598-1-johannes@sipsolutions.net
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 576f9eac 210f563b
......@@ -845,6 +845,7 @@ enum htt_security_types {
#define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
#define ATH10K_TXRX_NUM_EXT_TIDS 19
#define ATH10K_TXRX_NON_QOS_TID 16
enum htt_security_flags {
#define HTT_SECURITY_TYPE_MASK 0x7F
......
......@@ -1746,16 +1746,97 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
}
static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
u16 offset,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
hdr = (struct ieee80211_hdr *)(skb->data + offset);
ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control);
if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
pn = ehdr[0];
pn |= (u64)ehdr[1] << 8;
pn |= (u64)ehdr[4] << 16;
pn |= (u64)ehdr[5] << 24;
pn |= (u64)ehdr[6] << 32;
pn |= (u64)ehdr[7] << 40;
}
return pn;
}
static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
struct sk_buff *skb,
u16 offset)
{
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *)(skb->data + offset);
return !is_multicast_ether_addr(hdr->addr1);
}
static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
struct sk_buff *skb,
u16 peer_id,
u16 offset,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ath10k_peer *peer;
union htt_rx_pn_t *last_pn, new_pn = {0};
struct ieee80211_hdr *hdr;
bool more_frags;
u8 tid, frag_number;
u32 seq;
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
return false;
}
hdr = (struct ieee80211_hdr *)(skb->data + offset);
if (ieee80211_is_data_qos(hdr->frame_control))
tid = ieee80211_get_tid(hdr);
else
tid = ATH10K_TXRX_NON_QOS_TID;
last_pn = &peer->frag_tids_last_pn[tid];
new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype);
more_frags = ieee80211_has_morefrags(hdr->frame_control);
frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
if (frag_number == 0) {
last_pn->pn48 = new_pn.pn48;
peer->frag_tids_seq[tid] = seq;
} else {
if (seq != peer->frag_tids_seq[tid])
return false;
if (new_pn.pn48 != last_pn->pn48 + 1)
return false;
last_pn->pn48 = new_pn.pn48;
}
return true;
}
static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status,
bool fill_crypt_header,
u8 *rx_hdr,
enum ath10k_pkt_rx_err *err)
enum ath10k_pkt_rx_err *err,
u16 peer_id,
bool frag)
{
struct sk_buff *first;
struct sk_buff *last;
struct sk_buff *msdu;
struct sk_buff *msdu, *temp;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum htt_rx_mpdu_encrypt_type enctype;
......@@ -1768,6 +1849,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
bool is_decrypted;
bool is_mgmt;
u32 attention;
bool frag_pn_check = true, multicast_check = true;
if (skb_queue_empty(amsdu))
return;
......@@ -1866,7 +1948,37 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
}
skb_queue_walk(amsdu, msdu) {
if (frag && !fill_crypt_header && is_decrypted &&
enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
msdu,
peer_id,
0,
enctype);
if (frag)
multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
msdu,
0);
if (!frag_pn_check || !multicast_check) {
/* Discard the fragment with invalid PN or multicast DA
*/
temp = msdu->prev;
__skb_unlink(msdu, amsdu);
dev_kfree_skb_any(msdu);
msdu = temp;
frag_pn_check = true;
multicast_check = true;
continue;
}
ath10k_htt_rx_h_csum_offload(msdu);
if (frag && !fill_crypt_header &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
status->flag &= ~RX_FLAG_MMIC_STRIPPED;
ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
is_decrypted);
......@@ -1884,6 +1996,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
if (frag && !fill_crypt_header &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
status->flag &= ~RX_FLAG_IV_STRIPPED &
~RX_FLAG_MMIC_STRIPPED;
}
}
......@@ -1991,14 +2108,62 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
ath10k_unchain_msdu(amsdu, unchain_cnt);
}
static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
struct sk_buff_head *amsdu)
{
u8 *subframe_hdr;
struct sk_buff *first;
bool is_first, is_last;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
size_t hdr_len, crypto_len;
enum htt_rx_mpdu_encrypt_type enctype;
int bytes_aligned = ar->hw_params.decap_align_bytes;
first = skb_peek(amsdu);
rxd = (void *)first->data - sizeof(*rxd);
hdr = (void *)rxd->rx_hdr_status;
is_first = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
is_last = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Return in case of non-aggregated msdu */
if (is_first && is_last)
return true;
/* First msdu flag is not set for the first msdu of the list */
if (!is_first)
return false;
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
crypto_len;
/* Validate if the amsdu has a proper first subframe.
* There are chances a single msdu can be received as amsdu when
* the unauthenticated amsdu flag of a QoS header
* gets flipped in non-SPP AMSDU's, in such cases the first
* subframe has llc/snap header in place of a valid da.
* return false if the da matches rfc1042 pattern
*/
if (ether_addr_equal(subframe_hdr, rfc1042_header))
return false;
return true;
}
static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *rx_status)
{
/* FIXME: It might be a good idea to do some fuzzy-testing to drop
* invalid/dangerous frames.
*/
if (!rx_status->freq) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
return false;
......@@ -2009,6 +2174,11 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
return false;
}
if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
return false;
}
return true;
}
......@@ -2071,7 +2241,8 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
false);
msdus_to_queue = skb_queue_len(&amsdu);
ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
......@@ -2204,6 +2375,11 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
fw_desc = &rx->fw_desc;
rx_desc_len = fw_desc->len;
if (fw_desc->u.bits.discard) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
goto err;
}
/* I have not yet seen any case where num_mpdu_ranges > 1.
* qcacld does not seem handle that case either, so we introduce the
* same limitiation here as well.
......@@ -2509,6 +2685,13 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
rx_desc_info = __le32_to_cpu(rx_desc->info);
hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
if (is_multicast_ether_addr(hdr->addr1)) {
/* Discard the fragment with multicast DA */
goto err;
}
if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
spin_unlock_bh(&ar->data_lock);
return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
......@@ -2516,8 +2699,6 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
HTT_RX_NON_TKIP_MIC);
}
hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
if (ieee80211_has_retry(hdr->frame_control))
goto err;
......@@ -3027,7 +3208,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
NULL);
NULL, peer_id, frag);
ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
break;
case -EAGAIN:
......
......@@ -1282,7 +1282,19 @@ struct fw_rx_desc_base {
#define FW_RX_DESC_UDP (1 << 6)
struct fw_rx_desc_hl {
u8 info0;
union {
struct {
u8 discard:1,
forward:1,
any_err:1,
dup_err:1,
reserved:1,
inspect:1,
extension:2;
} bits;
u8 info0;
} u;
u8 version;
u8 len;
u8 flags;
......
......@@ -260,6 +260,16 @@ static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
}
static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
(!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
__le32_to_cpu(attn->info1)));
}
static void ath11k_dp_service_mon_ring(struct timer_list *t)
{
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
......@@ -852,6 +862,24 @@ static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_d
__skb_queue_purge(&rx_tid->rx_frags);
}
void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
{
struct dp_rx_tid *rx_tid;
int i;
lockdep_assert_held(&ar->ab->base_lock);
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
spin_unlock_bh(&ar->ab->base_lock);
del_timer_sync(&rx_tid->frag_timer);
spin_lock_bh(&ar->ab->base_lock);
ath11k_dp_rx_frags_cleanup(rx_tid, true);
}
}
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
{
struct dp_rx_tid *rx_tid;
......@@ -3450,6 +3478,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
u8 tid;
int ret = 0;
bool more_frags;
bool is_mcbc;
rx_desc = (struct hal_rx_desc *)msdu->data;
peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
......@@ -3457,6 +3486,11 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
/* Multicast/Broadcast fragments are not expected */
if (is_mcbc)
return -EINVAL;
if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
!ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
......
......@@ -49,6 +49,7 @@ int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key);
void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer);
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
struct ath11k_peer *peer, u8 tid);
......
......@@ -2779,6 +2779,12 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
*/
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
/* flush the fragments cache during key (re)install to
* ensure all frags in the new frag list belong to the same key.
*/
if (peer && cmd == SET_KEY)
ath11k_peer_frags_flush(ar, peer);
spin_unlock_bh(&ab->base_lock);
if (!peer) {
......
......@@ -5760,7 +5760,7 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
*/
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
const u8 *addr, enum nl80211_iftype iftype,
u8 data_offset);
u8 data_offset, bool is_amsdu);
/**
* ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
......@@ -5772,7 +5772,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype)
{
return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0);
return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false);
}
/**
......
......@@ -50,12 +50,6 @@ struct ieee80211_local;
#define IEEE80211_ENCRYPT_HEADROOM 8
#define IEEE80211_ENCRYPT_TAILROOM 18
/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent
* reception of at least three fragmented frames. This limit can be increased
* by changing this define, at the cost of slower frame reassembly and
* increased memory use (about 2 kB of RAM per entry). */
#define IEEE80211_FRAGMENT_MAX 4
/* power level hasn't been configured (or set to automatic) */
#define IEEE80211_UNSET_POWER_LEVEL INT_MIN
......@@ -88,18 +82,6 @@ extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS];
#define IEEE80211_MAX_NAN_INSTANCE_ID 255
struct ieee80211_fragment_entry {
struct sk_buff_head skb_list;
unsigned long first_frag_time;
u16 seq;
u16 extra_len;
u16 last_frag;
u8 rx_queue;
bool check_sequential_pn; /* needed for CCMP/GCMP */
u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
};
struct ieee80211_bss {
u32 device_ts_beacon, device_ts_presp;
......@@ -241,8 +223,15 @@ struct ieee80211_rx_data {
*/
int security_idx;
u32 tkip_iv32;
u16 tkip_iv16;
union {
struct {
u32 iv32;
u16 iv16;
} tkip;
struct {
u8 pn[IEEE80211_CCMP_PN_LEN];
} ccm_gcm;
};
};
struct ieee80211_csa_settings {
......@@ -902,9 +891,7 @@ struct ieee80211_sub_if_data {
char name[IFNAMSIZ];
/* Fragment table for host-based reassembly */
struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
unsigned int fragment_next;
struct ieee80211_fragment_cache frags;
/* TID bitmap for NoAck policy */
u16 noack_map;
......@@ -2320,4 +2307,7 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
#define debug_noinline
#endif
void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
#endif /* IEEE80211_I_H */
......@@ -8,7 +8,7 @@
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 Intel Deutschland GmbH
* Copyright (C) 2018-2020 Intel Corporation
* Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
......@@ -677,16 +677,12 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
*/
static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
{
int i;
/* free extra data */
ieee80211_free_keys(sdata, false);
ieee80211_debugfs_remove_netdev(sdata);
for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
__skb_queue_purge(&sdata->fragments[i].skb_list);
sdata->fragment_next = 0;
ieee80211_destroy_frag_cache(&sdata->frags);
if (ieee80211_vif_is_mesh(&sdata->vif))
ieee80211_mesh_teardown_sdata(sdata);
......@@ -1930,8 +1926,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sdata->wdev.wiphy = local->hw.wiphy;
sdata->local = local;
for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
skb_queue_head_init(&sdata->fragments[i].skb_list);
ieee80211_init_frag_cache(&sdata->frags);
INIT_LIST_HEAD(&sdata->key_list);
......
......@@ -799,6 +799,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
static atomic_t key_color = ATOMIC_INIT(0);
struct ieee80211_key *old_key;
int idx = key->conf.keyidx;
bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
......@@ -850,6 +851,12 @@ int ieee80211_key_link(struct ieee80211_key *key,
key->sdata = sdata;
key->sta = sta;
/*
* Assign a unique ID to every key so we can easily prevent mixed
* key and fragment cache attacks.
*/
key->color = atomic_inc_return(&key_color);
increment_tailroom_need_count(sdata);
ret = ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
......
......@@ -128,6 +128,8 @@ struct ieee80211_key {
} debugfs;
#endif
unsigned int color;
/*
* key config, must be last because it contains key
* material as variable length member
......
......@@ -6,7 +6,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018-2020 Intel Corporation
* Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/jiffies.h>
......@@ -2123,19 +2123,34 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
return result;
}
void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
{
int i;
for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
skb_queue_head_init(&cache->entries[i].skb_list);
}
void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
{
int i;
for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
__skb_queue_purge(&cache->entries[i].skb_list);
}
static inline struct ieee80211_fragment_entry *
ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
unsigned int frag, unsigned int seq, int rx_queue,
struct sk_buff **skb)
{
struct ieee80211_fragment_entry *entry;
entry = &sdata->fragments[sdata->fragment_next++];
if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
sdata->fragment_next = 0;
entry = &cache->entries[cache->next++];
if (cache->next >= IEEE80211_FRAGMENT_MAX)
cache->next = 0;
if (!skb_queue_empty(&entry->skb_list))
__skb_queue_purge(&entry->skb_list);
__skb_queue_purge(&entry->skb_list);
__skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
*skb = NULL;
......@@ -2150,14 +2165,14 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
}
static inline struct ieee80211_fragment_entry *
ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
unsigned int frag, unsigned int seq,
int rx_queue, struct ieee80211_hdr *hdr)
{
struct ieee80211_fragment_entry *entry;
int i, idx;
idx = sdata->fragment_next;
idx = cache->next;
for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
struct ieee80211_hdr *f_hdr;
struct sk_buff *f_skb;
......@@ -2166,7 +2181,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
if (idx < 0)
idx = IEEE80211_FRAGMENT_MAX - 1;
entry = &sdata->fragments[idx];
entry = &cache->entries[idx];
if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
entry->rx_queue != rx_queue ||
entry->last_frag + 1 != frag)
......@@ -2194,15 +2209,27 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
return NULL;
}
static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
{
return rx->key &&
(rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
ieee80211_has_protected(fc);
}
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
{
struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
struct ieee80211_hdr *hdr;
u16 sc;
__le16 fc;
unsigned int frag, seq;
struct ieee80211_fragment_entry *entry;
struct sk_buff *skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
hdr = (struct ieee80211_hdr *)rx->skb->data;
fc = hdr->frame_control;
......@@ -2218,6 +2245,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
goto out_no_led;
}
if (rx->sta)
cache = &rx->sta->frags;
if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
goto out;
......@@ -2236,20 +2266,17 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (frag == 0) {
/* This is the first fragment of a new frame. */
entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
entry = ieee80211_reassemble_add(cache, frag, seq,
rx->seqno_idx, &(rx->skb));
if (rx->key &&
(rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
ieee80211_has_protected(fc)) {
if (requires_sequential_pn(rx, fc)) {
int queue = rx->security_idx;
/* Store CCMP/GCMP PN so that we can verify that the
* next fragment has a sequential PN value.
*/
entry->check_sequential_pn = true;
entry->is_protected = true;
entry->key_color = rx->key->color;
memcpy(entry->last_pn,
rx->key->u.ccmp.rx_pn[queue],
IEEE80211_CCMP_PN_LEN);
......@@ -2261,6 +2288,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sizeof(rx->key->u.gcmp.rx_pn[queue]));
BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
IEEE80211_GCMP_PN_LEN);
} else if (rx->key &&
(ieee80211_has_protected(fc) ||
(status->flag & RX_FLAG_DECRYPTED))) {
entry->is_protected = true;
entry->key_color = rx->key->color;
}
return RX_QUEUED;
}
......@@ -2268,7 +2300,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
/* This is a fragment for a frame that should already be pending in
* fragment cache. Add this fragment to the end of the pending entry.
*/
entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
entry = ieee80211_reassemble_find(cache, frag, seq,
rx->seqno_idx, hdr);
if (!entry) {
I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
......@@ -2283,25 +2315,39 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (entry->check_sequential_pn) {
int i;
u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
int queue;
if (!rx->key ||
(rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
if (!requires_sequential_pn(rx, fc))
return RX_DROP_UNUSABLE;
/* Prevent mixed key and fragment cache attacks */
if (entry->key_color != rx->key->color)
return RX_DROP_UNUSABLE;
memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
pn[i]++;
if (pn[i])
break;
}
queue = rx->security_idx;
rpn = rx->key->u.ccmp.rx_pn[queue];
rpn = rx->ccm_gcm.pn;
if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
return RX_DROP_UNUSABLE;
memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
} else if (entry->is_protected &&
(!rx->key ||
(!ieee80211_has_protected(fc) &&
!(status->flag & RX_FLAG_DECRYPTED)) ||
rx->key->color != entry->key_color)) {
/* Drop this as a mixed key or fragment cache attack, even
* if for TKIP Michael MIC should protect us, and WEP is a
* lost cause anyway.
*/
return RX_DROP_UNUSABLE;
} else if (entry->is_protected && rx->key &&
entry->key_color != rx->key->color &&
(status->flag & RX_FLAG_DECRYPTED)) {
return RX_DROP_UNUSABLE;
}
skb_pull(rx->skb, ieee80211_hdrlen(fc));
......@@ -2494,13 +2540,13 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
/*
* Allow EAPOL frames to us/the PAE group address regardless
* of whether the frame was encrypted or not.
* Allow EAPOL frames to us/the PAE group address regardless of
* whether the frame was encrypted or not, and always disallow
* all other destination addresses for them.
*/
if (ehdr->h_proto == rx->sdata->control_port_protocol &&
(ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
ether_addr_equal(ehdr->h_dest, pae_group_addr)))
return true;
if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol))
return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
ether_addr_equal(ehdr->h_dest, pae_group_addr);
if (ieee80211_802_1x_port_control(rx) ||
ieee80211_drop_unencrypted(rx, fc))
......@@ -2525,8 +2571,28 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
cfg80211_rx_control_port(dev, skb, noencrypt);
dev_kfree_skb(skb);
} else {
struct ethhdr *ehdr = (void *)skb_mac_header(skb);
memset(skb->cb, 0, sizeof(skb->cb));
/*
* 802.1X over 802.11 requires that the authenticator address
* be used for EAPOL frames. However, 802.1X allows the use of
* the PAE group address instead. If the interface is part of
* a bridge and we pass the frame with the PAE group address,
* then the bridge will forward it to the network (even if the
* client was not associated yet), which isn't supposed to
* happen.
* To avoid that, rewrite the destination address to our own
* address, so that the authenticator (e.g. hostapd) will see
* the frame, but bridge won't forward it anywhere else. Note
* that due to earlier filtering, the only other address can
* be the PAE group address.
*/
if (unlikely(skb->protocol == sdata->control_port_protocol &&
!ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
/* deliver to local stack */
if (rx->list)
list_add_tail(&skb->list, rx->list);
......@@ -2566,6 +2632,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
if ((sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
ehdr->h_proto != rx->sdata->control_port_protocol &&
(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
if (is_multicast_ether_addr(ehdr->h_dest) &&
ieee80211_vif_get_num_mcast_if(sdata) != 0) {
......@@ -2675,7 +2742,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
rx->sdata->vif.addr,
rx->sdata->vif.type,
data_offset))
data_offset, true))
return RX_DROP_UNUSABLE;
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
......@@ -2732,6 +2799,23 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (is_multicast_ether_addr(hdr->addr1))
return RX_DROP_UNUSABLE;
if (rx->key) {
/*
* We should not receive A-MSDUs on pre-HT connections,
* and HT connections cannot use old ciphers. Thus drop
* them, as in those cases we couldn't even have SPP
* A-MSDUs or such.
*/
switch (rx->key->conf.cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
return RX_DROP_UNUSABLE;
default:
break;
}
}
return __ieee80211_rx_h_amsdu(rx, 0);
}
......
......@@ -4,7 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018-2020 Intel Corporation
* Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/module.h>
......@@ -392,6 +392,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
u64_stats_init(&sta->rx_stats.syncp);
ieee80211_init_frag_cache(&sta->frags);
sta->sta_state = IEEE80211_STA_NONE;
/* Mark TID as unreserved */
......@@ -1102,6 +1104,8 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
ieee80211_sta_debugfs_remove(sta);
ieee80211_destroy_frag_cache(&sta->frags);
cleanup_single_sta(sta);
}
......
......@@ -3,7 +3,7 @@
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
* Copyright(c) 2020 Intel Corporation
* Copyright(c) 2020-2021 Intel Corporation
*/
#ifndef STA_INFO_H
......@@ -438,6 +438,34 @@ struct ieee80211_sta_rx_stats {
u64 msdu[IEEE80211_NUM_TIDS + 1];
};
/*
* IEEE 802.11-2016 (10.6 "Defragmentation") recommends support for "concurrent
* reception of at least one MSDU per access category per associated STA"
* on APs, or "at least one MSDU per access category" on other interface types.
*
* This limit can be increased by changing this define, at the cost of slower
* frame reassembly and increased memory use while fragments are pending.
*/
#define IEEE80211_FRAGMENT_MAX 4
struct ieee80211_fragment_entry {
struct sk_buff_head skb_list;
unsigned long first_frag_time;
u16 seq;
u16 extra_len;
u16 last_frag;
u8 rx_queue;
u8 check_sequential_pn:1, /* needed for CCMP/GCMP */
is_protected:1;
u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
unsigned int key_color;
};
struct ieee80211_fragment_cache {
struct ieee80211_fragment_entry entries[IEEE80211_FRAGMENT_MAX];
unsigned int next;
};
/*
* The bandwidth threshold below which the per-station CoDel parameters will be
* scaled to be more lenient (to prevent starvation of slow stations). This
......@@ -531,6 +559,7 @@ struct ieee80211_sta_rx_stats {
* @status_stats.last_ack_signal: last ACK signal
* @status_stats.ack_signal_filled: last ACK signal validity
* @status_stats.avg_ack_signal: average ACK signal
* @frags: fragment cache
*/
struct sta_info {
/* General information, mostly static */
......@@ -639,6 +668,8 @@ struct sta_info {
struct cfg80211_chan_def tdls_chandef;
struct ieee80211_fragment_cache frags;
/* keep last! */
struct ieee80211_sta sta;
};
......
......@@ -3,6 +3,7 @@
* Copyright 2002-2004, Instant802 Networks, Inc.
* Copyright 2008, Jouni Malinen <j@w1.fi>
* Copyright (C) 2016-2017 Intel Deutschland GmbH
* Copyright (C) 2020-2021 Intel Corporation
*/
#include <linux/netdevice.h>
......@@ -167,8 +168,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
update_iv:
/* update IV in key information to be able to detect replays */
rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32;
rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16;
rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip.iv32;
rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip.iv16;
return RX_CONTINUE;
......@@ -294,8 +295,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
key, skb->data + hdrlen,
skb->len - hdrlen, rx->sta->sta.addr,
hdr->addr1, hwaccel, rx->security_idx,
&rx->tkip_iv32,
&rx->tkip_iv16);
&rx->tkip.iv32,
&rx->tkip.iv16);
if (res != TKIP_DECRYPT_OK)
return RX_DROP_UNUSABLE;
......@@ -553,6 +554,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
}
memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
if (unlikely(ieee80211_is_frag(hdr)))
memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
}
/* Remove CCMP header and MIC */
......@@ -781,6 +784,8 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
}
memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
if (unlikely(ieee80211_is_frag(hdr)))
memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
}
/* Remove GCMP header and MIC */
......
......@@ -542,7 +542,7 @@ EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
const u8 *addr, enum nl80211_iftype iftype,
u8 data_offset)
u8 data_offset, bool is_amsdu)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct {
......@@ -629,7 +629,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
skb_copy_bits(skb, hdrlen, &payload, sizeof(payload));
tmp.h_proto = payload.proto;
if (likely((ether_addr_equal(payload.hdr, rfc1042_header) &&
if (likely((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
tmp.h_proto != htons(ETH_P_AARP) &&
tmp.h_proto != htons(ETH_P_IPX)) ||
ether_addr_equal(payload.hdr, bridge_tunnel_header)))
......@@ -771,6 +771,9 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
remaining = skb->len - offset;
if (subframe_len > remaining)
goto purge;
/* mitigate A-MSDU aggregation injection attacks */
if (ether_addr_equal(eth.h_dest, rfc1042_header))
goto purge;
offset += sizeof(struct ethhdr);
last = remaining <= subframe_len + padding;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment