Commit d4a76f8a authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (21 commits)
  r8169: avoid losing MSI interrupts
  tcp: tcp_vegas ssthresh bugfix
  mac8390: fix regression caused during net_device_ops conversion
  gianfar: fix BUG under load after introduction of skb recycling
  wimax/i2400m: usb: fix device reset on autosuspend while not yet idle
  RxRPC: Error handling for rxrpc_alloc_connection()
  ipv4: Fix oops with FIB_TRIE
  pktgen: do not access flows[] beyond its length
  gigaset: beyond ARRAY_SIZE of iwb->data
  IPv6: set RTPROT_KERNEL to initial route
  net: fix rtable leak in net/ipv4/route.c
  net: fix length computation in rt_check_expire()
  wireless: beyond ARRAY_SIZE of intf->crypto_stats
  iwlwifi: update 5000 ucode support to version 2 of API
  cfg80211: fix race between core hint and driver's custom apply
  airo: fix airo_get_encode{,ext} buffer overflow like I mean it...
  ath5k: fix interpolation with equal power levels
  iwlwifi: do not cancel delayed work inside spin_lock_irqsave
  ath5k: fix exp off-by-one when computing OFDM delta slope
  wext: verify buffer size for SIOCSIWENCODEEXT
  ...
parents 60a0cd52 7682455e
...@@ -175,7 +175,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size) ...@@ -175,7 +175,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
return -EINVAL; return -EINVAL;
} }
src = iwb->read; src = iwb->read;
if (unlikely(limit > BAS_OUTBUFSIZE + BAS_OUTBUFPAD || if (unlikely(limit >= BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
(read < src && limit >= src))) { (read < src && limit >= src))) {
pr_err("isoc write buffer frame reservation violated\n"); pr_err("isoc write buffer frame reservation violated\n");
return -EFAULT; return -EFAULT;
......
...@@ -102,7 +102,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o ...@@ -102,7 +102,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o
obj-$(CONFIG_NET) += Space.o loopback.o obj-$(CONFIG_NET) += Space.o loopback.o
obj-$(CONFIG_SEEQ8005) += seeq8005.o obj-$(CONFIG_SEEQ8005) += seeq8005.o
obj-$(CONFIG_NET_SB1000) += sb1000.o obj-$(CONFIG_NET_SB1000) += sb1000.o
obj-$(CONFIG_MAC8390) += mac8390.o 8390.o obj-$(CONFIG_MAC8390) += mac8390.o
obj-$(CONFIG_APNE) += apne.o 8390.o obj-$(CONFIG_APNE) += apne.o 8390.o
obj-$(CONFIG_PCMCIA_PCNET) += 8390.o obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
obj-$(CONFIG_HP100) += hp100.o obj-$(CONFIG_HP100) += hp100.o
......
...@@ -1885,8 +1885,17 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) ...@@ -1885,8 +1885,17 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
if (unlikely(!newskb)) if (unlikely(!newskb))
newskb = skb; newskb = skb;
else if (skb) else if (skb) {
/*
* We need to reset ->data to what it
* was before gfar_new_skb() re-aligned
* it to an RXBUF_ALIGNMENT boundary
* before we put the skb back on the
* recycle list.
*/
skb->data = skb->head + NET_SKB_PAD;
__skb_queue_head(&priv->rx_recycle, skb); __skb_queue_head(&priv->rx_recycle, skb);
}
} else { } else {
/* Increment the number of packets */ /* Increment the number of packets */
dev->stats.rx_packets++; dev->stats.rx_packets++;
......
...@@ -304,7 +304,7 @@ struct net_device * __init mac8390_probe(int unit) ...@@ -304,7 +304,7 @@ struct net_device * __init mac8390_probe(int unit)
if (!MACH_IS_MAC) if (!MACH_IS_MAC)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
dev = alloc_ei_netdev(); dev = ____alloc_ei_netdev(0);
if (!dev) if (!dev)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -481,10 +481,10 @@ void cleanup_module(void) ...@@ -481,10 +481,10 @@ void cleanup_module(void)
static const struct net_device_ops mac8390_netdev_ops = { static const struct net_device_ops mac8390_netdev_ops = {
.ndo_open = mac8390_open, .ndo_open = mac8390_open,
.ndo_stop = mac8390_close, .ndo_stop = mac8390_close,
.ndo_start_xmit = ei_start_xmit, .ndo_start_xmit = __ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout, .ndo_tx_timeout = __ei_tx_timeout,
.ndo_get_stats = ei_get_stats, .ndo_get_stats = __ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list, .ndo_set_multicast_list = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr, .ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
......
...@@ -3554,54 +3554,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) ...@@ -3554,54 +3554,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
int handled = 0; int handled = 0;
int status; int status;
/* loop handling interrupts until we have no new ones or
* we hit a invalid/hotplug case.
*/
status = RTL_R16(IntrStatus); status = RTL_R16(IntrStatus);
while (status && status != 0xffff) {
handled = 1;
/* hotplug/major error/no more work/shared irq */ /* Handle all of the error cases first. These will reset
if ((status == 0xffff) || !status) * the chip, so just exit the loop.
goto out; */
if (unlikely(!netif_running(dev))) {
handled = 1; rtl8169_asic_down(ioaddr);
break;
}
if (unlikely(!netif_running(dev))) { /* Work around for rx fifo overflow */
rtl8169_asic_down(ioaddr); if (unlikely(status & RxFIFOOver) &&
goto out; (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
} netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
}
status &= tp->intr_mask; if (unlikely(status & SYSErr)) {
RTL_W16(IntrStatus, rtl8169_pcierr_interrupt(dev);
(status & RxFIFOOver) ? (status | RxOverflow) : status); break;
}
if (!(status & tp->intr_event)) if (status & LinkChg)
goto out; rtl8169_check_link_status(dev, tp, ioaddr);
/* Work around for rx fifo overflow */ /* We need to see the lastest version of tp->intr_mask to
if (unlikely(status & RxFIFOOver) && * avoid ignoring an MSI interrupt and having to wait for
(tp->mac_version == RTL_GIGA_MAC_VER_11)) { * another event which may never come.
netif_stop_queue(dev); */
rtl8169_tx_timeout(dev); smp_rmb();
goto out; if (status & tp->intr_mask & tp->napi_event) {
} RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
tp->intr_mask = ~tp->napi_event;
if (likely(napi_schedule_prep(&tp->napi)))
__napi_schedule(&tp->napi);
else if (netif_msg_intr(tp)) {
printk(KERN_INFO "%s: interrupt %04x in poll\n",
dev->name, status);
}
}
if (unlikely(status & SYSErr)) { /* We only get a new MSI interrupt when all active irq
rtl8169_pcierr_interrupt(dev); * sources on the chip have been acknowledged. So, ack
goto out; * everything we've seen and check if new sources have become
* active to avoid blocking all interrupts from the chip.
*/
RTL_W16(IntrStatus,
(status & RxFIFOOver) ? (status | RxOverflow) : status);
status = RTL_R16(IntrStatus);
} }
if (status & LinkChg)
rtl8169_check_link_status(dev, tp, ioaddr);
if (status & tp->napi_event) {
RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
tp->intr_mask = ~tp->napi_event;
if (likely(napi_schedule_prep(&tp->napi)))
__napi_schedule(&tp->napi);
else if (netif_msg_intr(tp)) {
printk(KERN_INFO "%s: interrupt %04x in poll\n",
dev->name, status);
}
}
out:
return IRQ_RETVAL(handled); return IRQ_RETVAL(handled);
} }
...@@ -3617,13 +3627,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) ...@@ -3617,13 +3627,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
if (work_done < budget) { if (work_done < budget) {
napi_complete(napi); napi_complete(napi);
tp->intr_mask = 0xffff;
/* /* We need for force the visibility of tp->intr_mask
* 20040426: the barrier is not strictly required but the * for other CPUs, as we can loose an MSI interrupt
* behavior of the irq handler could be less predictable * and potentially wait for a retransmit timeout if we don't.
* without it. Btw, the lack of flush for the posted pci * The posted write to IntrMask is safe, as it will
* write is safe - FR * eventually make it to the chip and we won't loose anything
* until it does.
*/ */
tp->intr_mask = 0xffff;
smp_wmb(); smp_wmb();
RTL_W16(IntrMask, tp->intr_event); RTL_W16(IntrMask, tp->intr_event);
} }
......
...@@ -505,27 +505,52 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg) ...@@ -505,27 +505,52 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
#ifdef CONFIG_PM #ifdef CONFIG_PM
struct usb_device *usb_dev = i2400mu->usb_dev; struct usb_device *usb_dev = i2400mu->usb_dev;
#endif #endif
unsigned is_autosuspend = 0;
struct i2400m *i2400m = &i2400mu->i2400m; struct i2400m *i2400m = &i2400mu->i2400m;
#ifdef CONFIG_PM
if (usb_dev->auto_pm > 0)
is_autosuspend = 1;
#endif
d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event); d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event);
if (i2400m->updown == 0) if (i2400m->updown == 0)
goto no_firmware; goto no_firmware;
d_printf(1, dev, "fw up, requesting standby\n"); if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) {
/* ugh -- the device is connected and this suspend
* request is an autosuspend one (not a system standby
* / hibernate).
*
* The only way the device can go to standby is if the
* link with the base station is in IDLE mode; that
* were the case, we'd be in status
* I2400M_SS_CONNECTED_IDLE. But we are not.
*
* If we *tell* him to go power save now, it'll reset
* as a precautionary measure, so if this is an
* autosuspend thing, say no and it'll come back
* later, when the link is IDLE
*/
result = -EBADF;
d_printf(1, dev, "fw up, link up, not-idle, autosuspend: "
"not entering powersave\n");
goto error_not_now;
}
d_printf(1, dev, "fw up: entering powersave\n");
atomic_dec(&i2400mu->do_autopm); atomic_dec(&i2400mu->do_autopm);
result = i2400m_cmd_enter_powersave(i2400m); result = i2400m_cmd_enter_powersave(i2400m);
atomic_inc(&i2400mu->do_autopm); atomic_inc(&i2400mu->do_autopm);
#ifdef CONFIG_PM if (result < 0 && !is_autosuspend) {
if (result < 0 && usb_dev->auto_pm == 0) {
/* System suspend, can't fail */ /* System suspend, can't fail */
dev_err(dev, "failed to suspend, will reset on resume\n"); dev_err(dev, "failed to suspend, will reset on resume\n");
result = 0; result = 0;
} }
#endif
if (result < 0) if (result < 0)
goto error_enter_powersave; goto error_enter_powersave;
i2400mu_notification_release(i2400mu); i2400mu_notification_release(i2400mu);
d_printf(1, dev, "fw up, got standby\n"); d_printf(1, dev, "powersave requested\n");
error_enter_powersave: error_enter_powersave:
error_not_now:
no_firmware: no_firmware:
d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n", d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n",
iface, pm_msg.event, result); iface, pm_msg.event, result);
......
...@@ -6467,6 +6467,7 @@ static int airo_get_encode(struct net_device *dev, ...@@ -6467,6 +6467,7 @@ static int airo_get_encode(struct net_device *dev,
{ {
struct airo_info *local = dev->ml_priv; struct airo_info *local = dev->ml_priv;
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
int wep_key_len;
u8 buf[16]; u8 buf[16];
if (!local->wep_capable) if (!local->wep_capable)
...@@ -6500,11 +6501,13 @@ static int airo_get_encode(struct net_device *dev, ...@@ -6500,11 +6501,13 @@ static int airo_get_encode(struct net_device *dev,
dwrq->flags |= index + 1; dwrq->flags |= index + 1;
/* Copy the key to the user buffer */ /* Copy the key to the user buffer */
dwrq->length = get_wep_key(local, index, &buf[0], sizeof(buf)); wep_key_len = get_wep_key(local, index, &buf[0], sizeof(buf));
if (dwrq->length != -1) if (wep_key_len < 0) {
memcpy(extra, buf, dwrq->length);
else
dwrq->length = 0; dwrq->length = 0;
} else {
dwrq->length = wep_key_len;
memcpy(extra, buf, dwrq->length);
}
return 0; return 0;
} }
...@@ -6617,7 +6620,7 @@ static int airo_get_encodeext(struct net_device *dev, ...@@ -6617,7 +6620,7 @@ static int airo_get_encodeext(struct net_device *dev,
struct airo_info *local = dev->ml_priv; struct airo_info *local = dev->ml_priv;
struct iw_point *encoding = &wrqu->encoding; struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
int idx, max_key_len; int idx, max_key_len, wep_key_len;
u8 buf[16]; u8 buf[16];
if (!local->wep_capable) if (!local->wep_capable)
...@@ -6661,11 +6664,13 @@ static int airo_get_encodeext(struct net_device *dev, ...@@ -6661,11 +6664,13 @@ static int airo_get_encodeext(struct net_device *dev,
memset(extra, 0, 16); memset(extra, 0, 16);
/* Copy the key to the user buffer */ /* Copy the key to the user buffer */
ext->key_len = get_wep_key(local, idx, &buf[0], sizeof(buf)); wep_key_len = get_wep_key(local, idx, &buf[0], sizeof(buf));
if (ext->key_len != -1) if (wep_key_len < 0) {
memcpy(extra, buf, ext->key_len);
else
ext->key_len = 0; ext->key_len = 0;
} else {
ext->key_len = wep_key_len;
memcpy(extra, buf, ext->key_len);
}
return 0; return 0;
} }
......
...@@ -1487,28 +1487,35 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR, ...@@ -1487,28 +1487,35 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
{ {
s8 tmp; s8 tmp;
s16 min_pwrL, min_pwrR; s16 min_pwrL, min_pwrR;
s16 pwr_i = pwrL[0]; s16 pwr_i;
do { if (pwrL[0] == pwrL[1])
pwr_i--; min_pwrL = pwrL[0];
tmp = (s8) ath5k_get_interpolated_value(pwr_i, else {
pwrL[0], pwrL[1], pwr_i = pwrL[0];
stepL[0], stepL[1]); do {
pwr_i--;
} while (tmp > 1); tmp = (s8) ath5k_get_interpolated_value(pwr_i,
pwrL[0], pwrL[1],
min_pwrL = pwr_i; stepL[0], stepL[1]);
} while (tmp > 1);
pwr_i = pwrR[0];
do { min_pwrL = pwr_i;
pwr_i--; }
tmp = (s8) ath5k_get_interpolated_value(pwr_i,
pwrR[0], pwrR[1],
stepR[0], stepR[1]);
} while (tmp > 1);
min_pwrR = pwr_i; if (pwrR[0] == pwrR[1])
min_pwrR = pwrR[0];
else {
pwr_i = pwrR[0];
do {
pwr_i--;
tmp = (s8) ath5k_get_interpolated_value(pwr_i,
pwrR[0], pwrR[1],
stepR[0], stepR[1]);
} while (tmp > 1);
min_pwrR = pwr_i;
}
/* Keep the right boundary so that it works for both curves */ /* Keep the right boundary so that it works for both curves */
return max(min_pwrL, min_pwrR); return max(min_pwrL, min_pwrR);
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
\*****************************/ \*****************************/
#include <linux/pci.h> /* To determine if a card is pci-e */ #include <linux/pci.h> /* To determine if a card is pci-e */
#include <linux/bitops.h> /* For get_bitmask_order */ #include <linux/log2.h>
#include "ath5k.h" #include "ath5k.h"
#include "reg.h" #include "reg.h"
#include "base.h" #include "base.h"
...@@ -69,10 +69,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, ...@@ -69,10 +69,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
/* Get exponent /* Get exponent
* ALGO: coef_exp = 14 - highest set bit position */ * ALGO: coef_exp = 14 - highest set bit position */
coef_exp = get_bitmask_order(coef_scaled); coef_exp = ilog2(coef_scaled);
/* Doesn't make sense if it's zero*/ /* Doesn't make sense if it's zero*/
if (!coef_exp) if (!coef_scaled || !coef_exp)
return -EINVAL; return -EINVAL;
/* Note: we've shifted coef_scaled by 24 */ /* Note: we've shifted coef_scaled by 24 */
...@@ -359,7 +359,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial) ...@@ -359,7 +359,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
mode |= AR5K_PHY_MODE_FREQ_5GHZ; mode |= AR5K_PHY_MODE_FREQ_5GHZ;
if (ah->ah_radio == AR5K_RF5413) if (ah->ah_radio == AR5K_RF5413)
clock |= AR5K_PHY_PLL_40MHZ_5413; clock = AR5K_PHY_PLL_40MHZ_5413;
else else
clock |= AR5K_PHY_PLL_40MHZ; clock |= AR5K_PHY_PLL_40MHZ;
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
#include "iwl-6000-hw.h" #include "iwl-6000-hw.h"
/* Highest firmware API version supported */ /* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 1 #define IWL5000_UCODE_API_MAX 2
#define IWL5150_UCODE_API_MAX 2 #define IWL5150_UCODE_API_MAX 2
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
......
...@@ -669,13 +669,6 @@ static int iwl_set_mode(struct iwl_priv *priv, int mode) ...@@ -669,13 +669,6 @@ static int iwl_set_mode(struct iwl_priv *priv, int mode)
if (!iwl_is_ready_rf(priv)) if (!iwl_is_ready_rf(priv))
return -EAGAIN; return -EAGAIN;
cancel_delayed_work(&priv->scan_check);
if (iwl_scan_cancel_timeout(priv, 100)) {
IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
return -EAGAIN;
}
iwl_commit_rxon(priv); iwl_commit_rxon(priv);
return 0; return 0;
......
...@@ -227,9 +227,6 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, ...@@ -227,9 +227,6 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
/* The HW is no longer scanning */ /* The HW is no longer scanning */
clear_bit(STATUS_SCAN_HW, &priv->status); clear_bit(STATUS_SCAN_HW, &priv->status);
/* The scan completion notification came in, so kill that timer... */
cancel_delayed_work(&priv->scan_check);
IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n", IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n",
(priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ? (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
"2.4" : "5.2", "2.4" : "5.2",
...@@ -712,6 +709,8 @@ static void iwl_bg_request_scan(struct work_struct *data) ...@@ -712,6 +709,8 @@ static void iwl_bg_request_scan(struct work_struct *data)
mutex_lock(&priv->mutex); mutex_lock(&priv->mutex);
cancel_delayed_work(&priv->scan_check);
if (!iwl_is_ready(priv)) { if (!iwl_is_ready(priv)) {
IWL_WARN(priv, "request scan called when driver not ready.\n"); IWL_WARN(priv, "request scan called when driver not ready.\n");
goto done; goto done;
...@@ -925,6 +924,8 @@ void iwl_bg_scan_completed(struct work_struct *work) ...@@ -925,6 +924,8 @@ void iwl_bg_scan_completed(struct work_struct *work)
IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); IWL_DEBUG_SCAN(priv, "SCAN complete scan\n");
cancel_delayed_work(&priv->scan_check);
ieee80211_scan_completed(priv->hw, false); ieee80211_scan_completed(priv->hw, false);
if (test_bit(STATUS_EXIT_PENDING, &priv->status)) if (test_bit(STATUS_EXIT_PENDING, &priv->status))
......
...@@ -782,13 +782,6 @@ static int iwl3945_set_mode(struct iwl_priv *priv, int mode) ...@@ -782,13 +782,6 @@ static int iwl3945_set_mode(struct iwl_priv *priv, int mode)
if (!iwl_is_ready_rf(priv)) if (!iwl_is_ready_rf(priv))
return -EAGAIN; return -EAGAIN;
cancel_delayed_work(&priv->scan_check);
if (iwl_scan_cancel_timeout(priv, 100)) {
IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
return -EAGAIN;
}
iwl3945_commit_rxon(priv); iwl3945_commit_rxon(priv);
return 0; return 0;
...@@ -3298,6 +3291,8 @@ static void iwl3945_bg_request_scan(struct work_struct *data) ...@@ -3298,6 +3291,8 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
mutex_lock(&priv->mutex); mutex_lock(&priv->mutex);
cancel_delayed_work(&priv->scan_check);
if (!iwl_is_ready(priv)) { if (!iwl_is_ready(priv)) {
IWL_WARN(priv, "request scan called when driver not ready.\n"); IWL_WARN(priv, "request scan called when driver not ready.\n");
goto done; goto done;
......
...@@ -138,7 +138,7 @@ void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, ...@@ -138,7 +138,7 @@ void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
if (cipher == CIPHER_TKIP_NO_MIC) if (cipher == CIPHER_TKIP_NO_MIC)
cipher = CIPHER_TKIP; cipher = CIPHER_TKIP;
if (cipher == CIPHER_NONE || cipher > CIPHER_MAX) if (cipher == CIPHER_NONE || cipher >= CIPHER_MAX)
return; return;
/* Remove CIPHER_NONE index */ /* Remove CIPHER_NONE index */
......
...@@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev) ...@@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
if (pkt_dev->cflows) { if (pkt_dev->cflows) {
/* let go of the SAs if we have them */ /* let go of the SAs if we have them */
int i = 0; int i = 0;
for (; i < pkt_dev->nflows; i++){ for (; i < pkt_dev->cflows; i++) {
struct xfrm_state *x = pkt_dev->flows[i].x; struct xfrm_state *x = pkt_dev->flows[i].x;
if (x) { if (x) {
xfrm_state_put(x); xfrm_state_put(x);
......
...@@ -986,9 +986,12 @@ fib_find_node(struct trie *t, u32 key) ...@@ -986,9 +986,12 @@ fib_find_node(struct trie *t, u32 key)
static struct node *trie_rebalance(struct trie *t, struct tnode *tn) static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
{ {
int wasfull; int wasfull;
t_key cindex, key = tn->key; t_key cindex, key;
struct tnode *tp; struct tnode *tp;
preempt_disable();
key = tn->key;
while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) { while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits); cindex = tkey_extract_bits(key, tp->pos, tp->bits);
wasfull = tnode_full(tp, tnode_get_child(tp, cindex)); wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
...@@ -1007,6 +1010,7 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn) ...@@ -1007,6 +1010,7 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
if (IS_TNODE(tn)) if (IS_TNODE(tn))
tn = (struct tnode *)resize(t, (struct tnode *)tn); tn = (struct tnode *)resize(t, (struct tnode *)tn);
preempt_enable();
return (struct node *)tn; return (struct node *)tn;
} }
......
...@@ -784,8 +784,8 @@ static void rt_check_expire(void) ...@@ -784,8 +784,8 @@ static void rt_check_expire(void)
{ {
static unsigned int rover; static unsigned int rover;
unsigned int i = rover, goal; unsigned int i = rover, goal;
struct rtable *rth, **rthp; struct rtable *rth, *aux, **rthp;
unsigned long length = 0, samples = 0; unsigned long samples = 0;
unsigned long sum = 0, sum2 = 0; unsigned long sum = 0, sum2 = 0;
u64 mult; u64 mult;
...@@ -795,9 +795,9 @@ static void rt_check_expire(void) ...@@ -795,9 +795,9 @@ static void rt_check_expire(void)
goal = (unsigned int)mult; goal = (unsigned int)mult;
if (goal > rt_hash_mask) if (goal > rt_hash_mask)
goal = rt_hash_mask + 1; goal = rt_hash_mask + 1;
length = 0;
for (; goal > 0; goal--) { for (; goal > 0; goal--) {
unsigned long tmo = ip_rt_gc_timeout; unsigned long tmo = ip_rt_gc_timeout;
unsigned long length;
i = (i + 1) & rt_hash_mask; i = (i + 1) & rt_hash_mask;
rthp = &rt_hash_table[i].chain; rthp = &rt_hash_table[i].chain;
...@@ -809,8 +809,10 @@ static void rt_check_expire(void) ...@@ -809,8 +809,10 @@ static void rt_check_expire(void)
if (*rthp == NULL) if (*rthp == NULL)
continue; continue;
length = 0;
spin_lock_bh(rt_hash_lock_addr(i)); spin_lock_bh(rt_hash_lock_addr(i));
while ((rth = *rthp) != NULL) { while ((rth = *rthp) != NULL) {
prefetch(rth->u.dst.rt_next);
if (rt_is_expired(rth)) { if (rt_is_expired(rth)) {
*rthp = rth->u.dst.rt_next; *rthp = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
...@@ -819,33 +821,30 @@ static void rt_check_expire(void) ...@@ -819,33 +821,30 @@ static void rt_check_expire(void)
if (rth->u.dst.expires) { if (rth->u.dst.expires) {
/* Entry is expired even if it is in use */ /* Entry is expired even if it is in use */
if (time_before_eq(jiffies, rth->u.dst.expires)) { if (time_before_eq(jiffies, rth->u.dst.expires)) {
nofree:
tmo >>= 1; tmo >>= 1;
rthp = &rth->u.dst.rt_next; rthp = &rth->u.dst.rt_next;
/* /*
* Only bump our length if the hash * We only count entries on
* inputs on entries n and n+1 are not
* the same, we only count entries on
* a chain with equal hash inputs once * a chain with equal hash inputs once
* so that entries for different QOS * so that entries for different QOS
* levels, and other non-hash input * levels, and other non-hash input
* attributes don't unfairly skew * attributes don't unfairly skew
* the length computation * the length computation
*/ */
if ((*rthp == NULL) || for (aux = rt_hash_table[i].chain;;) {
!compare_hash_inputs(&(*rthp)->fl, if (aux == rth) {
&rth->fl)) length += ONE;
length += ONE; break;
}
if (compare_hash_inputs(&aux->fl, &rth->fl))
break;
aux = aux->u.dst.rt_next;
}
continue; continue;
} }
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
tmo >>= 1; goto nofree;
rthp = &rth->u.dst.rt_next;
if ((*rthp == NULL) ||
!compare_hash_inputs(&(*rthp)->fl,
&rth->fl))
length += ONE;
continue;
}
/* Cleanup aged off entries. */ /* Cleanup aged off entries. */
*rthp = rth->u.dst.rt_next; *rthp = rth->u.dst.rt_next;
...@@ -1068,7 +1067,6 @@ out: return 0; ...@@ -1068,7 +1067,6 @@ out: return 0;
static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
{ {
struct rtable *rth, **rthp; struct rtable *rth, **rthp;
struct rtable *rthi;
unsigned long now; unsigned long now;
struct rtable *cand, **candp; struct rtable *cand, **candp;
u32 min_score; u32 min_score;
...@@ -1088,7 +1086,6 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) ...@@ -1088,7 +1086,6 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
} }
rthp = &rt_hash_table[hash].chain; rthp = &rt_hash_table[hash].chain;
rthi = NULL;
spin_lock_bh(rt_hash_lock_addr(hash)); spin_lock_bh(rt_hash_lock_addr(hash));
while ((rth = *rthp) != NULL) { while ((rth = *rthp) != NULL) {
...@@ -1134,17 +1131,6 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) ...@@ -1134,17 +1131,6 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
chain_length++; chain_length++;
rthp = &rth->u.dst.rt_next; rthp = &rth->u.dst.rt_next;
/*
* check to see if the next entry in the chain
* contains the same hash input values as rt. If it does
* This is where we will insert into the list, instead of
* at the head. This groups entries that differ by aspects not
* relvant to the hash function together, which we use to adjust
* our chain length
*/
if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
rthi = rth;
} }
if (cand) { if (cand) {
...@@ -1205,10 +1191,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) ...@@ -1205,10 +1191,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
} }
} }
if (rthi) rt->u.dst.rt_next = rt_hash_table[hash].chain;
rt->u.dst.rt_next = rthi->u.dst.rt_next;
else
rt->u.dst.rt_next = rt_hash_table[hash].chain;
#if RT_CACHE_DEBUG >= 2 #if RT_CACHE_DEBUG >= 2
if (rt->u.dst.rt_next) { if (rt->u.dst.rt_next) {
...@@ -1224,10 +1207,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) ...@@ -1224,10 +1207,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
* previous writes to rt are comitted to memory * previous writes to rt are comitted to memory
* before making rt visible to other CPUS. * before making rt visible to other CPUS.
*/ */
if (rthi) rcu_assign_pointer(rt_hash_table[hash].chain, rt);
rcu_assign_pointer(rthi->u.dst.rt_next, rt);
else
rcu_assign_pointer(rt_hash_table[hash].chain, rt);
spin_unlock_bh(rt_hash_lock_addr(hash)); spin_unlock_bh(rt_hash_lock_addr(hash));
*rp = rt; *rp = rt;
......
...@@ -158,6 +158,11 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event) ...@@ -158,6 +158,11 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
} }
EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
{
return min(tp->snd_ssthresh, tp->snd_cwnd-1);
}
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -221,11 +226,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) ...@@ -221,11 +226,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
*/ */
diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
if (diff > gamma && tp->snd_ssthresh > 2 ) { if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
/* Going too fast. Time to slow down /* Going too fast. Time to slow down
* and switch to congestion avoidance. * and switch to congestion avoidance.
*/ */
tp->snd_ssthresh = 2;
/* Set cwnd to match the actual rate /* Set cwnd to match the actual rate
* exactly: * exactly:
...@@ -235,6 +239,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) ...@@ -235,6 +239,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
* utilization. * utilization.
*/ */
tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
} else if (tp->snd_cwnd <= tp->snd_ssthresh) { } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* Slow start. */ /* Slow start. */
...@@ -250,6 +255,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) ...@@ -250,6 +255,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
* we slow down. * we slow down.
*/ */
tp->snd_cwnd--; tp->snd_cwnd--;
tp->snd_ssthresh
= tcp_vegas_ssthresh(tp);
} else if (diff < alpha) { } else if (diff < alpha) {
/* We don't have enough extra packets /* We don't have enough extra packets
* in the network, so speed up. * in the network, so speed up.
......
...@@ -137,6 +137,7 @@ static struct rt6_info ip6_null_entry_template = { ...@@ -137,6 +137,7 @@ static struct rt6_info ip6_null_entry_template = {
} }
}, },
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
.rt6i_metric = ~(u32) 0, .rt6i_metric = ~(u32) 0,
.rt6i_ref = ATOMIC_INIT(1), .rt6i_ref = ATOMIC_INIT(1),
}; };
...@@ -159,6 +160,7 @@ static struct rt6_info ip6_prohibit_entry_template = { ...@@ -159,6 +160,7 @@ static struct rt6_info ip6_prohibit_entry_template = {
} }
}, },
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
.rt6i_metric = ~(u32) 0, .rt6i_metric = ~(u32) 0,
.rt6i_ref = ATOMIC_INIT(1), .rt6i_ref = ATOMIC_INIT(1),
}; };
...@@ -176,6 +178,7 @@ static struct rt6_info ip6_blk_hole_entry_template = { ...@@ -176,6 +178,7 @@ static struct rt6_info ip6_blk_hole_entry_template = {
} }
}, },
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
.rt6i_metric = ~(u32) 0, .rt6i_metric = ~(u32) 0,
.rt6i_ref = ATOMIC_INIT(1), .rt6i_ref = ATOMIC_INIT(1),
}; };
......
...@@ -343,9 +343,9 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx, ...@@ -343,9 +343,9 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
/* not yet present - create a candidate for a new connection /* not yet present - create a candidate for a new connection
* and then redo the check */ * and then redo the check */
conn = rxrpc_alloc_connection(gfp); conn = rxrpc_alloc_connection(gfp);
if (IS_ERR(conn)) { if (!conn) {
_leave(" = %ld", PTR_ERR(conn)); _leave(" = -ENOMEM");
return PTR_ERR(conn); return -ENOMEM;
} }
conn->trans = trans; conn->trans = trans;
...@@ -508,9 +508,9 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, ...@@ -508,9 +508,9 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
/* not yet present - create a candidate for a new connection and then /* not yet present - create a candidate for a new connection and then
* redo the check */ * redo the check */
candidate = rxrpc_alloc_connection(gfp); candidate = rxrpc_alloc_connection(gfp);
if (IS_ERR(candidate)) { if (!candidate) {
_leave(" = %ld", PTR_ERR(candidate)); _leave(" = -ENOMEM");
return PTR_ERR(candidate); return -ENOMEM;
} }
candidate->trans = trans; candidate->trans = trans;
......
...@@ -1551,6 +1551,13 @@ static int regulatory_hint_core(const char *alpha2) ...@@ -1551,6 +1551,13 @@ static int regulatory_hint_core(const char *alpha2)
queue_regulatory_request(request); queue_regulatory_request(request);
/*
* This ensures last_request is populated once modules
* come swinging in and calling regulatory hints and
* wiphy_apply_custom_regulatory().
*/
flush_scheduled_work();
return 0; return 0;
} }
......
...@@ -786,6 +786,13 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, ...@@ -786,6 +786,13 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
err = -EFAULT; err = -EFAULT;
goto out; goto out;
} }
if (cmd == SIOCSIWENCODEEXT) {
struct iw_encode_ext *ee = (void *) extra;
if (iwp->length < sizeof(*ee) + ee->key_len)
return -EFAULT;
}
} }
err = handler(dev, info, (union iwreq_data *) iwp, extra); err = handler(dev, info, (union iwreq_data *) iwp, extra);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment