Commit 09552ccd authored by Michael Buesch's avatar Michael Buesch Committed by David S. Miller

b43: Drop packets that we are not able to encrypt

We must not transmit packets we're not able to encrypt.

This fixes a bug where in a tiny timeframe after machine resume
packets can get sent unencrypted and might leak information.

This also fixes three small resource leakages I spotted while fixing
the security problem. Properly deallocate the DMA slots in any DMA
allocation error path.
Signed-off-by: default avatarMichael Buesch <mb@bu3sch.de>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7be1bb6b
...@@ -1114,7 +1114,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring, ...@@ -1114,7 +1114,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
{ {
const struct b43_dma_ops *ops = ring->ops; const struct b43_dma_ops *ops = ring->ops;
u8 *header; u8 *header;
int slot; int slot, old_top_slot, old_used_slots;
int err; int err;
struct b43_dmadesc_generic *desc; struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta; struct b43_dmadesc_meta *meta;
...@@ -1126,6 +1126,9 @@ static int dma_tx_fragment(struct b43_dmaring *ring, ...@@ -1126,6 +1126,9 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
#define SLOTS_PER_PACKET 2 #define SLOTS_PER_PACKET 2
B43_WARN_ON(skb_shinfo(skb)->nr_frags); B43_WARN_ON(skb_shinfo(skb)->nr_frags);
old_top_slot = ring->current_slot;
old_used_slots = ring->used_slots;
/* Get a slot for the header. */ /* Get a slot for the header. */
slot = request_slot(ring); slot = request_slot(ring);
desc = ops->idx2desc(ring, slot, &meta_hdr); desc = ops->idx2desc(ring, slot, &meta_hdr);
...@@ -1133,13 +1136,21 @@ static int dma_tx_fragment(struct b43_dmaring *ring, ...@@ -1133,13 +1136,21 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
header = &(ring->txhdr_cache[slot * hdrsize]); header = &(ring->txhdr_cache[slot * hdrsize]);
cookie = generate_cookie(ring, slot); cookie = generate_cookie(ring, slot);
b43_generate_txhdr(ring->dev, header, err = b43_generate_txhdr(ring->dev, header,
skb->data, skb->len, ctl, cookie); skb->data, skb->len, ctl, cookie);
if (unlikely(err)) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
return err;
}
meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
hdrsize, 1); hdrsize, 1);
if (dma_mapping_error(meta_hdr->dmaaddr)) if (dma_mapping_error(meta_hdr->dmaaddr)) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
return -EIO; return -EIO;
}
ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
hdrsize, 1, 0, 0); hdrsize, 1, 0, 0);
...@@ -1157,6 +1168,8 @@ static int dma_tx_fragment(struct b43_dmaring *ring, ...@@ -1157,6 +1168,8 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
if (dma_mapping_error(meta->dmaaddr)) { if (dma_mapping_error(meta->dmaaddr)) {
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) { if (!bounce_skb) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -ENOMEM; err = -ENOMEM;
goto out_unmap_hdr; goto out_unmap_hdr;
} }
...@@ -1167,6 +1180,8 @@ static int dma_tx_fragment(struct b43_dmaring *ring, ...@@ -1167,6 +1180,8 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
meta->skb = skb; meta->skb = skb;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
if (dma_mapping_error(meta->dmaaddr)) { if (dma_mapping_error(meta->dmaaddr)) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -EIO; err = -EIO;
goto out_free_bounce; goto out_free_bounce;
} }
...@@ -1252,6 +1267,13 @@ int b43_dma_tx(struct b43_wldev *dev, ...@@ -1252,6 +1267,13 @@ int b43_dma_tx(struct b43_wldev *dev,
B43_WARN_ON(ring->stopped); B43_WARN_ON(ring->stopped);
err = dma_tx_fragment(ring, skb, ctl); err = dma_tx_fragment(ring, skb, ctl);
if (unlikely(err == -ENOKEY)) {
/* Drop this packet, as we don't have the encryption key
* anymore and must not transmit it unencrypted. */
dev_kfree_skb_any(skb);
err = 0;
goto out_unlock;
}
if (unlikely(err)) { if (unlikely(err)) {
b43err(dev->wl, "DMA tx mapping failure\n"); b43err(dev->wl, "DMA tx mapping failure\n");
goto out_unlock; goto out_unlock;
......
...@@ -178,7 +178,7 @@ static u8 b43_calc_fallback_rate(u8 bitrate) ...@@ -178,7 +178,7 @@ static u8 b43_calc_fallback_rate(u8 bitrate)
} }
/* Generate a TX data header. */ /* Generate a TX data header. */
void b43_generate_txhdr(struct b43_wldev *dev, int b43_generate_txhdr(struct b43_wldev *dev,
u8 *_txhdr, u8 *_txhdr,
const unsigned char *fragment_data, const unsigned char *fragment_data,
unsigned int fragment_len, unsigned int fragment_len,
...@@ -238,8 +238,14 @@ void b43_generate_txhdr(struct b43_wldev *dev, ...@@ -238,8 +238,14 @@ void b43_generate_txhdr(struct b43_wldev *dev,
B43_WARN_ON(key_idx >= dev->max_nr_keys); B43_WARN_ON(key_idx >= dev->max_nr_keys);
key = &(dev->key[key_idx]); key = &(dev->key[key_idx]);
if (likely(key->keyconf)) { if (unlikely(!key->keyconf)) {
/* This key is valid. Use it for encryption. */ /* This key is invalid. This might only happen
* in a short timeframe after machine resume before
* we were able to reconfigure keys.
* Drop this packet completely. Do not transmit it
* unencrypted to avoid leaking information. */
return -ENOKEY;
}
/* Hardware appends ICV. */ /* Hardware appends ICV. */
plcp_fragment_len += txctl->icv_len; plcp_fragment_len += txctl->icv_len;
...@@ -254,7 +260,6 @@ void b43_generate_txhdr(struct b43_wldev *dev, ...@@ -254,7 +260,6 @@ void b43_generate_txhdr(struct b43_wldev *dev,
ARRAY_SIZE(txhdr->iv)); ARRAY_SIZE(txhdr->iv));
memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len); memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len);
} }
}
if (b43_is_old_txhdr_format(dev)) { if (b43_is_old_txhdr_format(dev)) {
b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->old_format.plcp), b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->old_format.plcp),
plcp_fragment_len, rate); plcp_fragment_len, rate);
...@@ -411,6 +416,7 @@ void b43_generate_txhdr(struct b43_wldev *dev, ...@@ -411,6 +416,7 @@ void b43_generate_txhdr(struct b43_wldev *dev,
txhdr->phy_ctl = cpu_to_le16(phy_ctl); txhdr->phy_ctl = cpu_to_le16(phy_ctl);
txhdr->extra_ft = extra_ft; txhdr->extra_ft = extra_ft;
return 0;
} }
static s8 b43_rssi_postprocess(struct b43_wldev *dev, static s8 b43_rssi_postprocess(struct b43_wldev *dev,
......
...@@ -174,7 +174,7 @@ size_t b43_txhdr_size(struct b43_wldev *dev) ...@@ -174,7 +174,7 @@ size_t b43_txhdr_size(struct b43_wldev *dev)
} }
void b43_generate_txhdr(struct b43_wldev *dev, int b43_generate_txhdr(struct b43_wldev *dev,
u8 * txhdr, u8 * txhdr,
const unsigned char *fragment_data, const unsigned char *fragment_data,
unsigned int fragment_len, unsigned int fragment_len,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment