Commit e2530083 authored by Johannes Berg's avatar Johannes Berg Committed by John W. Linville

mac80211: use multi-queue master netdevice

This patch updates mac80211 and drivers to be multi-queue aware and
use that instead of the internal queue mapping. Also does a number
of cleanups in various pieces of the code that fall out and reduces
internal mac80211 state size.
Signed-off-by: default avatarJohannes Berg <johannes@sipsolutions.net>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent eefce91a
...@@ -2657,7 +2657,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) ...@@ -2657,7 +2657,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (list_empty(&sc->txbuf)) { if (list_empty(&sc->txbuf)) {
ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
spin_unlock_irqrestore(&sc->txbuflock, flags); spin_unlock_irqrestore(&sc->txbuflock, flags);
ieee80211_stop_queue(hw, info->queue); ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
return -1; return -1;
} }
bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
......
...@@ -1297,7 +1297,8 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -1297,7 +1297,8 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
} else { } else {
/* Decide by priority where to put this frame. */ /* Decide by priority where to put this frame. */
ring = select_ring_by_priority(dev, info->queue); ring = select_ring_by_priority(
dev, skb_get_queue_mapping(skb));
} }
spin_lock_irqsave(&ring->lock, flags); spin_lock_irqsave(&ring->lock, flags);
...@@ -1315,7 +1316,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -1315,7 +1316,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
/* Assign the queue number to the ring (if not already done before) /* Assign the queue number to the ring (if not already done before)
* so TX status handling can use it. The queue to ring mapping is * so TX status handling can use it. The queue to ring mapping is
* static, so we don't need to store it per frame. */ * static, so we don't need to store it per frame. */
ring->queue_prio = info->queue; ring->queue_prio = skb_get_queue_mapping(skb);
err = dma_tx_fragment(ring, skb); err = dma_tx_fragment(ring, skb);
if (unlikely(err == -ENOKEY)) { if (unlikely(err == -ENOKEY)) {
...@@ -1333,7 +1334,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -1333,7 +1334,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
if ((free_slots(ring) < SLOTS_PER_PACKET) || if ((free_slots(ring) < SLOTS_PER_PACKET) ||
should_inject_overflow(ring)) { should_inject_overflow(ring)) {
/* This TX ring is full. */ /* This TX ring is full. */
ieee80211_stop_queue(dev->wl->hw, info->queue); ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
ring->stopped = 1; ring->stopped = 1;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
......
...@@ -509,7 +509,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -509,7 +509,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
} else { } else {
/* Decide by priority where to put this frame. */ /* Decide by priority where to put this frame. */
q = select_queue_by_priority(dev, info->queue); q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
} }
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
...@@ -532,7 +532,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -532,7 +532,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (total_len > (q->buffer_size - q->buffer_used)) { if (total_len > (q->buffer_size - q->buffer_used)) {
/* Not enough memory on the queue. */ /* Not enough memory on the queue. */
err = -EBUSY; err = -EBUSY;
ieee80211_stop_queue(dev->wl->hw, info->queue); ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
q->stopped = 1; q->stopped = 1;
goto out_unlock; goto out_unlock;
} }
...@@ -540,7 +540,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -540,7 +540,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
/* Assign the queue number to the ring (if not already done before) /* Assign the queue number to the ring (if not already done before)
* so TX status handling can use it. The mac80211-queue to b43-queue * so TX status handling can use it. The mac80211-queue to b43-queue
* mapping is static, so we don't need to store it per frame. */ * mapping is static, so we don't need to store it per frame. */
q->queue_prio = info->queue; q->queue_prio = skb_get_queue_mapping(skb);
err = pio_tx_frame(q, skb); err = pio_tx_frame(q, skb);
if (unlikely(err == -ENOKEY)) { if (unlikely(err == -ENOKEY)) {
...@@ -560,7 +560,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -560,7 +560,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) || if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
(q->free_packet_slots == 0)) { (q->free_packet_slots == 0)) {
/* The queue is full. */ /* The queue is full. */
ieee80211_stop_queue(dev->wl->hw, info->queue); ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
q->stopped = 1; q->stopped = 1;
} }
......
...@@ -1325,11 +1325,10 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev, ...@@ -1325,11 +1325,10 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct b43legacy_dmaring *ring; struct b43legacy_dmaring *ring;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int err = 0; int err = 0;
unsigned long flags; unsigned long flags;
ring = priority_to_txring(dev, info->queue); ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
spin_lock_irqsave(&ring->lock, flags); spin_lock_irqsave(&ring->lock, flags);
B43legacy_WARN_ON(!ring->tx); B43legacy_WARN_ON(!ring->tx);
if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) { if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
......
...@@ -696,7 +696,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) ...@@ -696,7 +696,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_tfd_frame *tfd; struct iwl_tfd_frame *tfd;
u32 *control_flags; u32 *control_flags;
int txq_id = info->queue; int txq_id = skb_get_queue_mapping(skb);
struct iwl_tx_queue *txq = NULL; struct iwl_tx_queue *txq = NULL;
struct iwl_queue *q = NULL; struct iwl_queue *q = NULL;
dma_addr_t phys_addr; dma_addr_t phys_addr;
...@@ -917,7 +917,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) ...@@ -917,7 +917,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
} }
ieee80211_stop_queue(priv->hw, info->queue); ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
} }
return 0; return 0;
......
...@@ -2552,7 +2552,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb) ...@@ -2552,7 +2552,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl3945_tfd_frame *tfd; struct iwl3945_tfd_frame *tfd;
u32 *control_flags; u32 *control_flags;
int txq_id = info->queue; int txq_id = skb_get_queue_mapping(skb);
struct iwl3945_tx_queue *txq = NULL; struct iwl3945_tx_queue *txq = NULL;
struct iwl3945_queue *q = NULL; struct iwl3945_queue *q = NULL;
dma_addr_t phys_addr; dma_addr_t phys_addr;
...@@ -2765,7 +2765,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb) ...@@ -2765,7 +2765,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
} }
ieee80211_stop_queue(priv->hw, info->queue); ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
} }
return 0; return 0;
......
...@@ -407,7 +407,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb) ...@@ -407,7 +407,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
last_addr = range->end_addr; last_addr = range->end_addr;
__skb_unlink(entry, &priv->tx_queue); __skb_unlink(entry, &priv->tx_queue);
memset(&info->status, 0, sizeof(info->status)); memset(&info->status, 0, sizeof(info->status));
priv->tx_stats[info->queue].len--; priv->tx_stats[skb_get_queue_mapping(skb)].len--;
entry_hdr = (struct p54_control_hdr *) entry->data; entry_hdr = (struct p54_control_hdr *) entry->data;
entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data;
if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0)
...@@ -551,13 +551,13 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb) ...@@ -551,13 +551,13 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
size_t padding, len; size_t padding, len;
u8 rate; u8 rate;
current_queue = &priv->tx_stats[info->queue]; current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)];
if (unlikely(current_queue->len > current_queue->limit)) if (unlikely(current_queue->len > current_queue->limit))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
current_queue->len++; current_queue->len++;
current_queue->count++; current_queue->count++;
if (current_queue->len == current_queue->limit) if (current_queue->len == current_queue->limit)
ieee80211_stop_queue(dev, info->queue); ieee80211_stop_queue(dev, skb_get_queue_mapping(skb));
padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
len = skb->len; len = skb->len;
...@@ -589,7 +589,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb) ...@@ -589,7 +589,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
memset(txhdr->rateset, rate, 8); memset(txhdr->rateset, rate, 8);
txhdr->wep_key_present = 0; txhdr->wep_key_present = 0;
txhdr->wep_key_len = 0; txhdr->wep_key_len = 0;
txhdr->frame_type = cpu_to_le32(info->queue + 4); txhdr->frame_type = cpu_to_le32(skb_get_queue_mapping(skb) + 4);
txhdr->magic4 = 0; txhdr->magic4 = 0;
txhdr->antenna = (info->antenna_sel_tx == 0) ? txhdr->antenna = (info->antenna_sel_tx == 0) ?
2 : info->antenna_sel_tx - 1; 2 : info->antenna_sel_tx - 1;
......
...@@ -102,7 +102,7 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) ...@@ -102,7 +102,7 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2x00_dev *rt2x00dev = hw->priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
enum data_queue_qid qid = mac80211_queue_to_qid(tx_info->queue); enum data_queue_qid qid = skb_get_queue_mapping(skb);
struct data_queue *queue; struct data_queue *queue;
u16 frame_control; u16 frame_control;
...@@ -149,23 +149,23 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) ...@@ -149,23 +149,23 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
IEEE80211_TX_CTL_USE_CTS_PROTECT)) && IEEE80211_TX_CTL_USE_CTS_PROTECT)) &&
!rt2x00dev->ops->hw->set_rts_threshold) { !rt2x00dev->ops->hw->set_rts_threshold) {
if (rt2x00queue_available(queue) <= 1) { if (rt2x00queue_available(queue) <= 1) {
ieee80211_stop_queue(rt2x00dev->hw, tx_info->queue); ieee80211_stop_queue(rt2x00dev->hw, qid);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) { if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) {
ieee80211_stop_queue(rt2x00dev->hw, tx_info->queue); ieee80211_stop_queue(rt2x00dev->hw, qid);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
} }
if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb)) { if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb)) {
ieee80211_stop_queue(rt2x00dev->hw, tx_info->queue); ieee80211_stop_queue(rt2x00dev->hw, qid);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
if (rt2x00queue_full(queue)) if (rt2x00queue_full(queue))
ieee80211_stop_queue(rt2x00dev->hw, tx_info->queue); ieee80211_stop_queue(rt2x00dev->hw, qid);
if (rt2x00dev->ops->lib->kick_tx_queue) if (rt2x00dev->ops->lib->kick_tx_queue)
rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, qid); rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, qid);
......
...@@ -79,19 +79,6 @@ enum data_queue_qid { ...@@ -79,19 +79,6 @@ enum data_queue_qid {
QID_ATIM, QID_ATIM,
}; };
/**
* mac80211_queue_to_qid - Convert mac80211 queue to rt2x00 qid
* @queue: mac80211 queue.
*/
static inline enum data_queue_qid mac80211_queue_to_qid(unsigned int queue)
{
/* Regular TX queues are mapped directly */
if (queue < 4)
return queue;
WARN_ON(1);
return QID_OTHER;
}
/** /**
* enum skb_frame_desc_flags: Flags for &struct skb_frame_desc * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc
* *
......
...@@ -246,7 +246,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) ...@@ -246,7 +246,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
u16 plcp_len = 0; u16 plcp_len = 0;
__le16 rts_duration = 0; __le16 rts_duration = 0;
prio = info->queue; prio = skb_get_queue_mapping(skb);
ring = &priv->tx_ring[prio]; ring = &priv->tx_ring[prio];
mapping = pci_map_single(priv->pdev, skb->data, mapping = pci_map_single(priv->pdev, skb->data,
...@@ -298,7 +298,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) ...@@ -298,7 +298,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
entry->flags = cpu_to_le32(tx_flags); entry->flags = cpu_to_le32(tx_flags);
__skb_queue_tail(&ring->queue, skb); __skb_queue_tail(&ring->queue, skb);
if (ring->entries - skb_queue_len(&ring->queue) < 2) if (ring->entries - skb_queue_len(&ring->queue) < 2)
ieee80211_stop_queue(dev, info->queue); ieee80211_stop_queue(dev, skb_get_queue_mapping(skb));
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
......
...@@ -293,7 +293,7 @@ struct ieee80211_tx_info { ...@@ -293,7 +293,7 @@ struct ieee80211_tx_info {
s8 tx_rate_idx; s8 tx_rate_idx;
u8 antenna_sel_tx; u8 antenna_sel_tx;
u8 queue; /* use skb_queue_mapping soon */ /* 1 byte hole */
union { union {
struct { struct {
...@@ -802,6 +802,24 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) ...@@ -802,6 +802,24 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr)
memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN); memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN);
} }
static inline int ieee80211_num_regular_queues(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_QOS
return hw->queues;
#else
return 1;
#endif
}
static inline int ieee80211_num_queues(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_QOS
return hw->queues + hw->ampdu_queues;
#else
return 1;
#endif
}
static inline struct ieee80211_rate * static inline struct ieee80211_rate *
ieee80211_get_tx_rate(const struct ieee80211_hw *hw, ieee80211_get_tx_rate(const struct ieee80211_hw *hw,
const struct ieee80211_tx_info *c) const struct ieee80211_tx_info *c)
......
...@@ -7,11 +7,23 @@ config MAC80211 ...@@ -7,11 +7,23 @@ config MAC80211
select CRC32 select CRC32
select WIRELESS_EXT select WIRELESS_EXT
select CFG80211 select CFG80211
select NET_SCH_FIFO
---help--- ---help---
This option enables the hardware independent IEEE 802.11 This option enables the hardware independent IEEE 802.11
networking stack. networking stack.
config MAC80211_QOS
def_bool y
depends on MAC80211
depends on NET_SCHED
depends on NETDEVICES_MULTIQUEUE
comment "QoS/HT support disabled"
depends on MAC80211 && !MAC80211_QOS
comment "QoS/HT support needs CONFIG_NET_SCHED"
depends on MAC80211 && !NET_SCHED
comment "QoS/HT support needs CONFIG_NETDEVICES_MULTIQUEUE"
depends on MAC80211 && !NETDEVICES_MULTIQUEUE
menu "Rate control algorithm selection" menu "Rate control algorithm selection"
depends on MAC80211 != n depends on MAC80211 != n
......
...@@ -29,7 +29,7 @@ mac80211-y := \ ...@@ -29,7 +29,7 @@ mac80211-y := \
event.o event.o
mac80211-$(CONFIG_MAC80211_LEDS) += led.o mac80211-$(CONFIG_MAC80211_LEDS) += led.o
mac80211-$(CONFIG_NET_SCHED) += wme.o mac80211-$(CONFIG_MAC80211_QOS) += wme.o
mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
debugfs.o \ debugfs.o \
debugfs_sta.o \ debugfs_sta.o \
......
...@@ -594,7 +594,7 @@ struct ieee80211_local { ...@@ -594,7 +594,7 @@ struct ieee80211_local {
struct sta_info *sta_hash[STA_HASH_SIZE]; struct sta_info *sta_hash[STA_HASH_SIZE];
struct timer_list sta_cleanup; struct timer_list sta_cleanup;
unsigned long state[IEEE80211_MAX_QUEUES + IEEE80211_MAX_AMPDU_QUEUES]; unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)];
struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES]; struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES];
struct tasklet_struct tx_pending_tasklet; struct tasklet_struct tx_pending_tasklet;
...@@ -758,6 +758,15 @@ struct ieee80211_local { ...@@ -758,6 +758,15 @@ struct ieee80211_local {
#endif #endif
}; };
static inline int ieee80211_is_multiqueue(struct ieee80211_local *local)
{
#ifdef CONFIG_MAC80211_QOS
return netif_is_multiqueue(local->mdev);
#else
return 0;
#endif
}
/* this struct represents 802.11n's RA/TID combination */ /* this struct represents 802.11n's RA/TID combination */
struct ieee80211_ra_tid { struct ieee80211_ra_tid {
u8 ra[ETH_ALEN]; u8 ra[ETH_ALEN];
...@@ -827,11 +836,6 @@ static inline struct ieee80211_hw *local_to_hw( ...@@ -827,11 +836,6 @@ static inline struct ieee80211_hw *local_to_hw(
return &local->hw; return &local->hw;
} }
enum ieee80211_link_state_t {
IEEE80211_LINK_STATE_XOFF = 0,
IEEE80211_LINK_STATE_PENDING,
};
struct sta_attribute { struct sta_attribute {
struct attribute attr; struct attribute attr;
ssize_t (*show)(const struct sta_info *, char *buf); ssize_t (*show)(const struct sta_info *, char *buf);
......
...@@ -168,7 +168,7 @@ void ieee80211_if_set_type(struct net_device *dev, int type) ...@@ -168,7 +168,7 @@ void ieee80211_if_set_type(struct net_device *dev, int type)
ifsta->flags |= IEEE80211_STA_CREATE_IBSS | ifsta->flags |= IEEE80211_STA_CREATE_IBSS |
IEEE80211_STA_AUTO_BSSID_SEL | IEEE80211_STA_AUTO_BSSID_SEL |
IEEE80211_STA_AUTO_CHANNEL_SEL; IEEE80211_STA_AUTO_CHANNEL_SEL;
if (sdata->local->hw.queues >= 4) if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4)
ifsta->flags |= IEEE80211_STA_WMM_ENABLED; ifsta->flags |= IEEE80211_STA_WMM_ENABLED;
msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev); msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev);
......
...@@ -1634,12 +1634,32 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) ...@@ -1634,12 +1634,32 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (result < 0) if (result < 0)
return result; return result;
/*
* We use the number of queues for feature tests (QoS, HT) internally
* so restrict them appropriately.
*/
#ifdef CONFIG_MAC80211_QOS
if (hw->queues > IEEE80211_MAX_QUEUES)
hw->queues = IEEE80211_MAX_QUEUES;
if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
if (hw->queues < 4)
hw->ampdu_queues = 0;
#else
hw->queues = 1;
hw->ampdu_queues = 0;
#endif
/* for now, mdev needs sub_if_data :/ */ /* for now, mdev needs sub_if_data :/ */
mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data), mdev = alloc_netdev_mq(sizeof(struct ieee80211_sub_if_data),
"wmaster%d", ether_setup); "wmaster%d", ether_setup,
ieee80211_num_queues(hw));
if (!mdev) if (!mdev)
goto fail_mdev_alloc; goto fail_mdev_alloc;
if (ieee80211_num_queues(hw) > 1)
mdev->features |= NETIF_F_MULTI_QUEUE;
sdata = IEEE80211_DEV_TO_SUB_IF(mdev); sdata = IEEE80211_DEV_TO_SUB_IF(mdev);
mdev->ieee80211_ptr = &sdata->wdev; mdev->ieee80211_ptr = &sdata->wdev;
sdata->wdev.wiphy = local->hw.wiphy; sdata->wdev.wiphy = local->hw.wiphy;
...@@ -1728,11 +1748,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) ...@@ -1728,11 +1748,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
goto fail_wep; goto fail_wep;
} }
if (hw->queues > IEEE80211_MAX_QUEUES)
hw->queues = IEEE80211_MAX_QUEUES;
if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
ieee80211_install_qdisc(local->mdev); ieee80211_install_qdisc(local->mdev);
/* add one default STA interface */ /* add one default STA interface */
......
...@@ -255,7 +255,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, ...@@ -255,7 +255,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
* sta_rx_agg_session_timer_expired for useage */ * sta_rx_agg_session_timer_expired for useage */
sta->timer_to_tid[i] = i; sta->timer_to_tid[i] = i;
/* tid to tx queue: initialize according to HW (0 is valid) */ /* tid to tx queue: initialize according to HW (0 is valid) */
sta->tid_to_tx_q[i] = local->hw.queues + local->hw.ampdu_queues; sta->tid_to_tx_q[i] = ieee80211_num_queues(&local->hw);
/* rx */ /* rx */
sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE; sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
sta->ampdu_mlme.tid_rx[i] = NULL; sta->ampdu_mlme.tid_rx[i] = NULL;
......
...@@ -213,18 +213,6 @@ static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, ...@@ -213,18 +213,6 @@ static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
return dur; return dur;
} }
static inline int __ieee80211_queue_stopped(const struct ieee80211_local *local,
int queue)
{
return test_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]);
}
static inline int __ieee80211_queue_pending(const struct ieee80211_local *local,
int queue)
{
return test_bit(IEEE80211_LINK_STATE_PENDING, &local->state[queue]);
}
static int inline is_ieee80211_device(struct net_device *dev, static int inline is_ieee80211_device(struct net_device *dev,
struct net_device *master) struct net_device *master)
{ {
...@@ -680,7 +668,8 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) ...@@ -680,7 +668,8 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
* etc. * etc.
*/ */
if (WARN_ON(tx->flags & IEEE80211_TX_CTL_AMPDU || if (WARN_ON(tx->flags & IEEE80211_TX_CTL_AMPDU ||
IEEE80211_SKB_CB(tx->skb)->queue >= tx->local->hw.queues)) skb_get_queue_mapping(tx->skb) >=
ieee80211_num_regular_queues(&tx->local->hw)))
return TX_DROP; return TX_DROP;
first = tx->skb; first = tx->skb;
...@@ -1098,11 +1087,9 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, ...@@ -1098,11 +1087,9 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int ret, i; int ret, i;
if (!ieee80211_qdisc_installed(local->mdev) && if (netif_subqueue_stopped(local->mdev, skb))
__ieee80211_queue_stopped(local, 0)) {
netif_stop_queue(local->mdev);
return IEEE80211_TX_AGAIN; return IEEE80211_TX_AGAIN;
}
if (skb) { if (skb) {
ieee80211_dump_frame(wiphy_name(local->hw.wiphy), ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
"TX to low-level driver", skb); "TX to low-level driver", skb);
...@@ -1121,7 +1108,8 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, ...@@ -1121,7 +1108,8 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
IEEE80211_TX_CTL_USE_CTS_PROTECT | IEEE80211_TX_CTL_USE_CTS_PROTECT |
IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_CLEAR_PS_FILT |
IEEE80211_TX_CTL_FIRST_FRAGMENT); IEEE80211_TX_CTL_FIRST_FRAGMENT);
if (__ieee80211_queue_stopped(local, info->queue)) if (netif_subqueue_stopped(local->mdev,
tx->extra_frag[i]))
return IEEE80211_TX_FRAG_AGAIN; return IEEE80211_TX_FRAG_AGAIN;
if (i == tx->num_extra_frag) { if (i == tx->num_extra_frag) {
info->tx_rate_idx = tx->last_frag_rate_idx; info->tx_rate_idx = tx->last_frag_rate_idx;
...@@ -1160,9 +1148,11 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) ...@@ -1160,9 +1148,11 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
ieee80211_tx_result res = TX_DROP, res_prepare; ieee80211_tx_result res = TX_DROP, res_prepare;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int ret, i; int ret, i;
int queue = info->queue; u16 queue;
WARN_ON(__ieee80211_queue_pending(local, queue)); queue = skb_get_queue_mapping(skb);
WARN_ON(test_bit(queue, local->queues_pending));
if (unlikely(skb->len < 10)) { if (unlikely(skb->len < 10)) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
...@@ -1233,28 +1223,28 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) ...@@ -1233,28 +1223,28 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
* queues, there's no reason for a driver to reject * queues, there's no reason for a driver to reject
* a frame there, warn and drop it. * a frame there, warn and drop it.
*/ */
if (WARN_ON(queue >= local->hw.queues)) if (WARN_ON(queue >= ieee80211_num_regular_queues(&local->hw)))
goto drop; goto drop;
store = &local->pending_packet[queue]; store = &local->pending_packet[queue];
if (ret == IEEE80211_TX_FRAG_AGAIN) if (ret == IEEE80211_TX_FRAG_AGAIN)
skb = NULL; skb = NULL;
set_bit(IEEE80211_LINK_STATE_PENDING, set_bit(queue, local->queues_pending);
&local->state[queue]);
smp_mb(); smp_mb();
/* When the driver gets out of buffers during sending of /*
* fragments and calls ieee80211_stop_queue, there is * When the driver gets out of buffers during sending of
* a small window between IEEE80211_LINK_STATE_XOFF and * fragments and calls ieee80211_stop_queue, the netif
* IEEE80211_LINK_STATE_PENDING flags are set. If a buffer * subqueue is stopped. There is, however, a small window
* in which the PENDING bit is not yet set. If a buffer
* gets available in that window (i.e. driver calls * gets available in that window (i.e. driver calls
* ieee80211_wake_queue), we would end up with ieee80211_tx * ieee80211_wake_queue), we would end up with ieee80211_tx
* called with IEEE80211_LINK_STATE_PENDING. Prevent this by * called with the PENDING bit still set. Prevent this by
* continuing transmitting here when that situation is * continuing transmitting here when that situation is
* possible to have happened. */ * possible to have happened.
if (!__ieee80211_queue_stopped(local, queue)) { */
clear_bit(IEEE80211_LINK_STATE_PENDING, if (!__netif_subqueue_stopped(local->mdev, queue)) {
&local->state[queue]); clear_bit(queue, local->queues_pending);
goto retry; goto retry;
} }
store->skb = skb; store->skb = skb;
...@@ -1509,7 +1499,8 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, ...@@ -1509,7 +1499,8 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
} }
/* receiver and we are QoS enabled, use a QoS type frame */ /* receiver and we are QoS enabled, use a QoS type frame */
if (sta_flags & WLAN_STA_WME && local->hw.queues >= 4) { if (sta_flags & WLAN_STA_WME &&
ieee80211_num_regular_queues(&local->hw) >= 4) {
fc |= IEEE80211_STYPE_QOS_DATA; fc |= IEEE80211_STYPE_QOS_DATA;
hdrlen += 2; hdrlen += 2;
} }
...@@ -1661,41 +1652,51 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, ...@@ -1661,41 +1652,51 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
return ret; return ret;
} }
/* helper functions for pending packets for when queues are stopped */
/*
* ieee80211_clear_tx_pending may not be called in a context where
* it is possible that it packets could come in again.
*/
void ieee80211_clear_tx_pending(struct ieee80211_local *local) void ieee80211_clear_tx_pending(struct ieee80211_local *local)
{ {
int i, j; int i, j;
struct ieee80211_tx_stored_packet *store; struct ieee80211_tx_stored_packet *store;
for (i = 0; i < local->hw.queues; i++) { for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
if (!__ieee80211_queue_pending(local, i)) if (!test_bit(i, local->queues_pending))
continue; continue;
store = &local->pending_packet[i]; store = &local->pending_packet[i];
kfree_skb(store->skb); kfree_skb(store->skb);
for (j = 0; j < store->num_extra_frag; j++) for (j = 0; j < store->num_extra_frag; j++)
kfree_skb(store->extra_frag[j]); kfree_skb(store->extra_frag[j]);
kfree(store->extra_frag); kfree(store->extra_frag);
clear_bit(IEEE80211_LINK_STATE_PENDING, &local->state[i]); clear_bit(i, local->queues_pending);
} }
} }
/*
* Transmit all pending packets. Called from tasklet, locks master device
* TX lock so that no new packets can come in.
*/
void ieee80211_tx_pending(unsigned long data) void ieee80211_tx_pending(unsigned long data)
{ {
struct ieee80211_local *local = (struct ieee80211_local *)data; struct ieee80211_local *local = (struct ieee80211_local *)data;
struct net_device *dev = local->mdev; struct net_device *dev = local->mdev;
struct ieee80211_tx_stored_packet *store; struct ieee80211_tx_stored_packet *store;
struct ieee80211_tx_data tx; struct ieee80211_tx_data tx;
int i, ret, reschedule = 0; int i, ret;
netif_tx_lock_bh(dev); netif_tx_lock_bh(dev);
for (i = 0; i < local->hw.queues; i++) { for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
if (__ieee80211_queue_stopped(local, i)) /* Check that this queue is ok */
if (__netif_subqueue_stopped(local->mdev, i))
continue; continue;
if (!__ieee80211_queue_pending(local, i)) {
reschedule = 1; if (!test_bit(i, local->queues_pending)) {
ieee80211_wake_queue(&local->hw, i);
continue; continue;
} }
store = &local->pending_packet[i]; store = &local->pending_packet[i];
tx.extra_frag = store->extra_frag; tx.extra_frag = store->extra_frag;
tx.num_extra_frag = store->num_extra_frag; tx.num_extra_frag = store->num_extra_frag;
...@@ -1708,19 +1709,11 @@ void ieee80211_tx_pending(unsigned long data) ...@@ -1708,19 +1709,11 @@ void ieee80211_tx_pending(unsigned long data)
if (ret == IEEE80211_TX_FRAG_AGAIN) if (ret == IEEE80211_TX_FRAG_AGAIN)
store->skb = NULL; store->skb = NULL;
} else { } else {
clear_bit(IEEE80211_LINK_STATE_PENDING, clear_bit(i, local->queues_pending);
&local->state[i]); ieee80211_wake_queue(&local->hw, i);
reschedule = 1;
} }
} }
netif_tx_unlock_bh(dev); netif_tx_unlock_bh(dev);
if (reschedule) {
if (!ieee80211_qdisc_installed(dev)) {
if (!__ieee80211_queue_stopped(local, 0))
netif_wake_queue(dev);
} else
netif_schedule(dev);
}
} }
/* functions for drivers to get certain frames */ /* functions for drivers to get certain frames */
......
...@@ -331,17 +331,15 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) ...@@ -331,17 +331,15 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue)
{ {
struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_local *local = hw_to_local(hw);
if (test_and_clear_bit(IEEE80211_LINK_STATE_XOFF, if (test_bit(queue, local->queues_pending)) {
&local->state[queue])) { tasklet_schedule(&local->tx_pending_tasklet);
if (test_bit(IEEE80211_LINK_STATE_PENDING, } else {
&local->state[queue])) if (ieee80211_is_multiqueue(local)) {
tasklet_schedule(&local->tx_pending_tasklet); netif_wake_subqueue(local->mdev, queue);
else } else {
if (!ieee80211_qdisc_installed(local->mdev)) { WARN_ON(queue != 0);
if (queue == 0) netif_wake_queue(local->mdev);
netif_wake_queue(local->mdev); }
} else
__netif_schedule(local->mdev);
} }
} }
EXPORT_SYMBOL(ieee80211_wake_queue); EXPORT_SYMBOL(ieee80211_wake_queue);
...@@ -350,9 +348,12 @@ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) ...@@ -350,9 +348,12 @@ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue)
{ {
struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_local *local = hw_to_local(hw);
if (!ieee80211_qdisc_installed(local->mdev) && queue == 0) if (ieee80211_is_multiqueue(local)) {
netif_stop_subqueue(local->mdev, queue);
} else {
WARN_ON(queue != 0);
netif_stop_queue(local->mdev); netif_stop_queue(local->mdev);
set_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]); }
} }
EXPORT_SYMBOL(ieee80211_stop_queue); EXPORT_SYMBOL(ieee80211_stop_queue);
...@@ -360,7 +361,7 @@ void ieee80211_stop_queues(struct ieee80211_hw *hw) ...@@ -360,7 +361,7 @@ void ieee80211_stop_queues(struct ieee80211_hw *hw)
{ {
int i; int i;
for (i = 0; i < hw->queues + hw->ampdu_queues; i++) for (i = 0; i < ieee80211_num_queues(hw); i++)
ieee80211_stop_queue(hw, i); ieee80211_stop_queue(hw, i);
} }
EXPORT_SYMBOL(ieee80211_stop_queues); EXPORT_SYMBOL(ieee80211_stop_queues);
......
...@@ -158,7 +158,7 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) ...@@ -158,7 +158,7 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
u8 tid; u8 tid;
if (info->flags & IEEE80211_TX_CTL_REQUEUE) { if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
queue = info->queue; queue = skb_get_queue_mapping(skb);
rcu_read_lock(); rcu_read_lock();
sta = sta_info_get(local, hdr->addr1); sta = sta_info_get(local, hdr->addr1);
tid = skb->priority & QOS_CONTROL_TAG1D_MASK; tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
...@@ -219,7 +219,7 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) ...@@ -219,7 +219,7 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
err = NET_XMIT_DROP; err = NET_XMIT_DROP;
} else { } else {
tid = skb->priority & QOS_CONTROL_TAG1D_MASK; tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
info->queue = (unsigned int) queue; skb_set_queue_mapping(skb, queue);
qdisc = q->queues[queue]; qdisc = q->queues[queue];
err = qdisc->enqueue(skb, qdisc); err = qdisc->enqueue(skb, qdisc);
if (err == NET_XMIT_SUCCESS) { if (err == NET_XMIT_SUCCESS) {
...@@ -240,12 +240,11 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) ...@@ -240,12 +240,11 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd) static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
{ {
struct ieee80211_sched_data *q = qdisc_priv(qd); struct ieee80211_sched_data *q = qdisc_priv(qd);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct Qdisc *qdisc; struct Qdisc *qdisc;
int err; int err;
/* we recorded which queue to use earlier! */ /* we recorded which queue to use earlier! */
qdisc = q->queues[info->queue]; qdisc = q->queues[skb_get_queue_mapping(skb)];
if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) { if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
qd->q.qlen++; qd->q.qlen++;
...@@ -269,11 +268,8 @@ static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd) ...@@ -269,11 +268,8 @@ static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
/* check all the h/w queues in numeric/priority order */ /* check all the h/w queues in numeric/priority order */
for (queue = 0; queue < QD_NUM(hw); queue++) { for (queue = 0; queue < QD_NUM(hw); queue++) {
/* see if there is room in this hardware queue */ /* see if there is room in this hardware queue */
if ((test_bit(IEEE80211_LINK_STATE_XOFF, if (__netif_subqueue_stopped(local->mdev, queue) ||
&local->state[queue])) || !test_bit(queue, q->qdisc_pool))
(test_bit(IEEE80211_LINK_STATE_PENDING,
&local->state[queue])) ||
(!test_bit(queue, q->qdisc_pool)))
continue; continue;
/* there is space - try and get a frame */ /* there is space - try and get a frame */
......
...@@ -31,7 +31,7 @@ static inline int WLAN_FC_IS_QOS_DATA(u16 fc) ...@@ -31,7 +31,7 @@ static inline int WLAN_FC_IS_QOS_DATA(u16 fc)
return (fc & 0x8C) == 0x88; return (fc & 0x8C) == 0x88;
} }
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_MAC80211_QOS
void ieee80211_install_qdisc(struct net_device *dev); void ieee80211_install_qdisc(struct net_device *dev);
int ieee80211_qdisc_installed(struct net_device *dev); int ieee80211_qdisc_installed(struct net_device *dev);
int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment