Commit d122179a authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
	net/core/ethtool.c
parents 419c2046 b00916b1
...@@ -3156,7 +3156,6 @@ static int __devinit ia_init_one(struct pci_dev *pdev, ...@@ -3156,7 +3156,6 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
{ {
struct atm_dev *dev; struct atm_dev *dev;
IADEV *iadev; IADEV *iadev;
unsigned long flags;
int ret; int ret;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
...@@ -3188,19 +3187,14 @@ static int __devinit ia_init_one(struct pci_dev *pdev, ...@@ -3188,19 +3187,14 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
ia_dev[iadev_count] = iadev; ia_dev[iadev_count] = iadev;
_ia_dev[iadev_count] = dev; _ia_dev[iadev_count] = dev;
iadev_count++; iadev_count++;
spin_lock_init(&iadev->misc_lock);
/* First fixes first. I don't want to think about this now. */
spin_lock_irqsave(&iadev->misc_lock, flags);
if (ia_init(dev) || ia_start(dev)) { if (ia_init(dev) || ia_start(dev)) {
IF_INIT(printk("IA register failed!\n");) IF_INIT(printk("IA register failed!\n");)
iadev_count--; iadev_count--;
ia_dev[iadev_count] = NULL; ia_dev[iadev_count] = NULL;
_ia_dev[iadev_count] = NULL; _ia_dev[iadev_count] = NULL;
spin_unlock_irqrestore(&iadev->misc_lock, flags);
ret = -EINVAL; ret = -EINVAL;
goto err_out_deregister_dev; goto err_out_deregister_dev;
} }
spin_unlock_irqrestore(&iadev->misc_lock, flags);
IF_EVENT(printk("iadev_count = %d\n", iadev_count);) IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
iadev->next_board = ia_boards; iadev->next_board = ia_boards;
......
...@@ -1022,7 +1022,7 @@ typedef struct iadev_t { ...@@ -1022,7 +1022,7 @@ typedef struct iadev_t {
struct dle_q rx_dle_q; struct dle_q rx_dle_q;
struct free_desc_q *rx_free_desc_qhead; struct free_desc_q *rx_free_desc_qhead;
struct sk_buff_head rx_dma_q; struct sk_buff_head rx_dma_q;
spinlock_t rx_lock, misc_lock; spinlock_t rx_lock;
struct atm_vcc **rx_open; /* list of all open VCs */ struct atm_vcc **rx_open; /* list of all open VCs */
u16 num_rx_desc, rx_buf_sz, rxing; u16 num_rx_desc, rx_buf_sz, rxing;
u32 rx_pkt_ram, rx_tmp_cnt; u32 rx_pkt_ram, rx_tmp_cnt;
......
...@@ -444,6 +444,7 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr, ...@@ -444,6 +444,7 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
struct solos_card *card = atmdev->dev_data; struct solos_card *card = atmdev->dev_data;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int len;
spin_lock(&card->cli_queue_lock); spin_lock(&card->cli_queue_lock);
skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]); skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
...@@ -451,11 +452,12 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr, ...@@ -451,11 +452,12 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
if(skb == NULL) if(skb == NULL)
return sprintf(buf, "No data.\n"); return sprintf(buf, "No data.\n");
memcpy(buf, skb->data, skb->len); len = skb->len;
dev_dbg(&card->dev->dev, "len: %d\n", skb->len); memcpy(buf, skb->data, len);
dev_dbg(&card->dev->dev, "len: %d\n", len);
kfree_skb(skb); kfree_skb(skb);
return skb->len; return len;
} }
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size) static int send_command(struct solos_card *card, int dev, const char *buf, size_t size)
......
...@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst) ...@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
} }
else if(callid>=0x0000 && callid<=0x7FFF) else if(callid>=0x0000 && callid<=0x7FFF)
{ {
int len;
pr_debug("%s: Got Incoming Call\n", pr_debug("%s: Got Incoming Call\n",
sc_adapter[card]->devicename); sc_adapter[card]->devicename);
strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4])); len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
strcpy(setup.eazmsn, sizeof(setup.phone));
sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn); if (len >= sizeof(setup.phone))
continue;
len = strlcpy(setup.eazmsn,
sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
sizeof(setup.eazmsn));
if (len >= sizeof(setup.eazmsn))
continue;
setup.si1 = 7; setup.si1 = 7;
setup.si2 = 0; setup.si2 = 0;
setup.plan = 0; setup.plan = 0;
...@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst) ...@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
* Handle a GetMyNumber Rsp * Handle a GetMyNumber Rsp
*/ */
if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){ if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array); strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
rcvmsg.msg_data.byte_array,
sizeof(rcvmsg.msg_data.byte_array));
continue; continue;
} }
......
...@@ -1311,6 +1311,9 @@ fec_probe(struct platform_device *pdev) ...@@ -1311,6 +1311,9 @@ fec_probe(struct platform_device *pdev)
if (ret) if (ret)
goto failed_mii_init; goto failed_mii_init;
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(ndev);
ret = register_netdev(ndev); ret = register_netdev(ndev);
if (ret) if (ret)
goto failed_register; goto failed_register;
......
...@@ -1217,7 +1217,8 @@ static void rtl8169_update_counters(struct net_device *dev) ...@@ -1217,7 +1217,8 @@ static void rtl8169_update_counters(struct net_device *dev)
if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
return; return;
counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr); counters = dma_alloc_coherent(&tp->pci_dev->dev, sizeof(*counters),
&paddr, GFP_KERNEL);
if (!counters) if (!counters)
return; return;
...@@ -1238,7 +1239,8 @@ static void rtl8169_update_counters(struct net_device *dev) ...@@ -1238,7 +1239,8 @@ static void rtl8169_update_counters(struct net_device *dev)
RTL_W32(CounterAddrLow, 0); RTL_W32(CounterAddrLow, 0);
RTL_W32(CounterAddrHigh, 0); RTL_W32(CounterAddrHigh, 0);
pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr); dma_free_coherent(&tp->pci_dev->dev, sizeof(*counters), counters,
paddr);
} }
static void rtl8169_get_ethtool_stats(struct net_device *dev, static void rtl8169_get_ethtool_stats(struct net_device *dev,
...@@ -3298,15 +3300,15 @@ static int rtl8169_open(struct net_device *dev) ...@@ -3298,15 +3300,15 @@ static int rtl8169_open(struct net_device *dev)
/* /*
* Rx and Tx desscriptors needs 256 bytes alignment. * Rx and Tx desscriptors needs 256 bytes alignment.
* pci_alloc_consistent provides more. * dma_alloc_coherent provides more.
*/ */
tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
&tp->TxPhyAddr); &tp->TxPhyAddr, GFP_KERNEL);
if (!tp->TxDescArray) if (!tp->TxDescArray)
goto err_pm_runtime_put; goto err_pm_runtime_put;
tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
&tp->RxPhyAddr); &tp->RxPhyAddr, GFP_KERNEL);
if (!tp->RxDescArray) if (!tp->RxDescArray)
goto err_free_tx_0; goto err_free_tx_0;
...@@ -3340,11 +3342,11 @@ static int rtl8169_open(struct net_device *dev) ...@@ -3340,11 +3342,11 @@ static int rtl8169_open(struct net_device *dev)
err_release_ring_2: err_release_ring_2:
rtl8169_rx_clear(tp); rtl8169_rx_clear(tp);
err_free_rx_1: err_free_rx_1:
pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr); tp->RxPhyAddr);
tp->RxDescArray = NULL; tp->RxDescArray = NULL;
err_free_tx_0: err_free_tx_0:
pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
tp->TxPhyAddr); tp->TxPhyAddr);
tp->TxDescArray = NULL; tp->TxDescArray = NULL;
err_pm_runtime_put: err_pm_runtime_put:
...@@ -3981,7 +3983,7 @@ static void rtl8169_free_rx_skb(struct rtl8169_private *tp, ...@@ -3981,7 +3983,7 @@ static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
{ {
struct pci_dev *pdev = tp->pci_dev; struct pci_dev *pdev = tp->pci_dev;
pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz, dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
dev_kfree_skb(*sk_buff); dev_kfree_skb(*sk_buff);
*sk_buff = NULL; *sk_buff = NULL;
...@@ -4006,7 +4008,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, ...@@ -4006,7 +4008,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
struct net_device *dev, struct net_device *dev,
struct RxDesc *desc, int rx_buf_sz, struct RxDesc *desc, int rx_buf_sz,
unsigned int align) unsigned int align, gfp_t gfp)
{ {
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t mapping; dma_addr_t mapping;
...@@ -4014,13 +4016,13 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, ...@@ -4014,13 +4016,13 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
pad = align ? align : NET_IP_ALIGN; pad = align ? align : NET_IP_ALIGN;
skb = netdev_alloc_skb(dev, rx_buf_sz + pad); skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
if (!skb) if (!skb)
goto err_out; goto err_out;
skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad); skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad);
mapping = pci_map_single(pdev, skb->data, rx_buf_sz, mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
rtl8169_map_to_asic(desc, mapping, rx_buf_sz); rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
...@@ -4045,7 +4047,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp) ...@@ -4045,7 +4047,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
} }
static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
u32 start, u32 end) u32 start, u32 end, gfp_t gfp)
{ {
u32 cur; u32 cur;
...@@ -4060,7 +4062,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, ...@@ -4060,7 +4062,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev, skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
tp->RxDescArray + i, tp->RxDescArray + i,
tp->rx_buf_sz, tp->align); tp->rx_buf_sz, tp->align, gfp);
if (!skb) if (!skb)
break; break;
...@@ -4088,7 +4090,7 @@ static int rtl8169_init_ring(struct net_device *dev) ...@@ -4088,7 +4090,7 @@ static int rtl8169_init_ring(struct net_device *dev)
memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
goto err_out; goto err_out;
rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
...@@ -4105,7 +4107,8 @@ static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb, ...@@ -4105,7 +4107,8 @@ static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
{ {
unsigned int len = tx_skb->len; unsigned int len = tx_skb->len;
pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
PCI_DMA_TODEVICE);
desc->opts1 = 0x00; desc->opts1 = 0x00;
desc->opts2 = 0x00; desc->opts2 = 0x00;
desc->addr = 0x00; desc->addr = 0x00;
...@@ -4249,7 +4252,8 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, ...@@ -4249,7 +4252,8 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
txd = tp->TxDescArray + entry; txd = tp->TxDescArray + entry;
len = frag->size; len = frag->size;
addr = ((void *) page_address(frag->page)) + frag->page_offset; addr = ((void *) page_address(frag->page)) + frag->page_offset;
mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE); mapping = dma_map_single(&tp->pci_dev->dev, addr, len,
PCI_DMA_TODEVICE);
/* anti gcc 2.95.3 bugware (sic) */ /* anti gcc 2.95.3 bugware (sic) */
status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
...@@ -4319,7 +4323,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, ...@@ -4319,7 +4323,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->tx_skb[entry].skb = skb; tp->tx_skb[entry].skb = skb;
} }
mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
PCI_DMA_TODEVICE);
tp->tx_skb[entry].len = len; tp->tx_skb[entry].len = len;
txd->addr = cpu_to_le64(mapping); txd->addr = cpu_to_le64(mapping);
...@@ -4482,7 +4487,7 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, ...@@ -4482,7 +4487,7 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
if (!skb) if (!skb)
goto out; goto out;
pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size, dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size); skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
*sk_buff = skb; *sk_buff = skb;
...@@ -4552,11 +4557,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev, ...@@ -4552,11 +4557,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
} }
if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) { if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
pci_dma_sync_single_for_device(pdev, addr, dma_sync_single_for_device(&pdev->dev, addr,
pkt_size, PCI_DMA_FROMDEVICE); pkt_size, PCI_DMA_FROMDEVICE);
rtl8169_mark_to_asic(desc, tp->rx_buf_sz); rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
} else { } else {
pci_unmap_single(pdev, addr, tp->rx_buf_sz, dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
tp->Rx_skbuff[entry] = NULL; tp->Rx_skbuff[entry] = NULL;
} }
...@@ -4587,7 +4592,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, ...@@ -4587,7 +4592,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
count = cur_rx - tp->cur_rx; count = cur_rx - tp->cur_rx;
tp->cur_rx = cur_rx; tp->cur_rx = cur_rx;
delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
if (!delta && count) if (!delta && count)
netif_info(tp, intr, dev, "no Rx buffer allocated\n"); netif_info(tp, intr, dev, "no Rx buffer allocated\n");
tp->dirty_rx += delta; tp->dirty_rx += delta;
...@@ -4773,9 +4778,9 @@ static int rtl8169_close(struct net_device *dev) ...@@ -4773,9 +4778,9 @@ static int rtl8169_close(struct net_device *dev)
free_irq(dev->irq, dev); free_irq(dev->irq, dev);
pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr); tp->RxPhyAddr);
pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
tp->TxPhyAddr); tp->TxPhyAddr);
tp->TxDescArray = NULL; tp->TxDescArray = NULL;
tp->RxDescArray = NULL; tp->RxDescArray = NULL;
......
...@@ -1244,16 +1244,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) ...@@ -1244,16 +1244,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
int i, result; int i, result;
struct device *dev = i2400m_dev(i2400m); struct device *dev = i2400m_dev(i2400m);
const struct i2400m_msg_hdr *msg_hdr; const struct i2400m_msg_hdr *msg_hdr;
size_t pl_itr, pl_size, skb_len; size_t pl_itr, pl_size;
unsigned long flags; unsigned long flags;
unsigned num_pls, single_last; unsigned num_pls, single_last, skb_len;
skb_len = skb->len; skb_len = skb->len;
d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n", d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
i2400m, skb, skb_len); i2400m, skb, skb_len);
result = -EIO; result = -EIO;
msg_hdr = (void *) skb->data; msg_hdr = (void *) skb->data;
result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len); result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
if (result < 0) if (result < 0)
goto error_msg_hdr_check; goto error_msg_hdr_check;
result = -EIO; result = -EIO;
...@@ -1261,10 +1261,10 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) ...@@ -1261,10 +1261,10 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */ pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */
num_pls * sizeof(msg_hdr->pld[0]); num_pls * sizeof(msg_hdr->pld[0]);
pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN); pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
if (pl_itr > skb->len) { /* got all the payload descriptors? */ if (pl_itr > skb_len) { /* got all the payload descriptors? */
dev_err(dev, "RX: HW BUG? message too short (%u bytes) for " dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
"%u payload descriptors (%zu each, total %zu)\n", "%u payload descriptors (%zu each, total %zu)\n",
skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr); skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
goto error_pl_descr_short; goto error_pl_descr_short;
} }
/* Walk each payload payload--check we really got it */ /* Walk each payload payload--check we really got it */
...@@ -1272,7 +1272,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) ...@@ -1272,7 +1272,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
/* work around old gcc warnings */ /* work around old gcc warnings */
pl_size = i2400m_pld_size(&msg_hdr->pld[i]); pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i], result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
pl_itr, skb->len); pl_itr, skb_len);
if (result < 0) if (result < 0)
goto error_pl_descr_check; goto error_pl_descr_check;
single_last = num_pls == 1 || i == num_pls - 1; single_last = num_pls == 1 || i == num_pls - 1;
...@@ -1290,16 +1290,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) ...@@ -1290,16 +1290,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
if (i < i2400m->rx_pl_min) if (i < i2400m->rx_pl_min)
i2400m->rx_pl_min = i; i2400m->rx_pl_min = i;
i2400m->rx_num++; i2400m->rx_num++;
i2400m->rx_size_acc += skb->len; i2400m->rx_size_acc += skb_len;
if (skb->len < i2400m->rx_size_min) if (skb_len < i2400m->rx_size_min)
i2400m->rx_size_min = skb->len; i2400m->rx_size_min = skb_len;
if (skb->len > i2400m->rx_size_max) if (skb_len > i2400m->rx_size_max)
i2400m->rx_size_max = skb->len; i2400m->rx_size_max = skb_len;
spin_unlock_irqrestore(&i2400m->rx_lock, flags); spin_unlock_irqrestore(&i2400m->rx_lock, flags);
error_pl_descr_check: error_pl_descr_check:
error_pl_descr_short: error_pl_descr_short:
error_msg_hdr_check: error_msg_hdr_check:
d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n", d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
i2400m, skb, skb_len, result); i2400m, skb, skb_len, result);
return result; return result;
} }
......
...@@ -161,12 +161,30 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long l ...@@ -161,12 +161,30 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long l
{ {
struct sk_buff *skb; struct sk_buff *skb;
release_sock(sk);
if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) { if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) {
skb_reserve(skb, BT_SKB_RESERVE); skb_reserve(skb, BT_SKB_RESERVE);
bt_cb(skb)->incoming = 0; bt_cb(skb)->incoming = 0;
} }
lock_sock(sk);
if (!skb && *err)
return NULL;
*err = sock_error(sk);
if (*err)
goto out;
if (sk->sk_shutdown) {
*err = -ECONNRESET;
goto out;
}
return skb; return skb;
out:
kfree_skb(skb);
return NULL;
} }
int bt_err(__u16 code); int bt_err(__u16 code);
......
...@@ -778,7 +778,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) ...@@ -778,7 +778,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
eg->packets_rcvd++; eg->packets_rcvd++;
mpc->eg_ops->put(eg); mpc->eg_ops->put(eg);
memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); memset(ATM_SKB(new_skb), 0, sizeof(struct atm_skb_data));
netif_rx(new_skb); netif_rx(new_skb);
} }
......
...@@ -1441,33 +1441,23 @@ static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb) ...@@ -1441,33 +1441,23 @@ static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
static void l2cap_streaming_send(struct sock *sk) static void l2cap_streaming_send(struct sock *sk)
{ {
struct sk_buff *skb, *tx_skb; struct sk_buff *skb;
struct l2cap_pinfo *pi = l2cap_pi(sk); struct l2cap_pinfo *pi = l2cap_pi(sk);
u16 control, fcs; u16 control, fcs;
while ((skb = sk->sk_send_head)) { while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
tx_skb = skb_clone(skb, GFP_ATOMIC); control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
if (pi->fcs == L2CAP_FCS_CRC16) { if (pi->fcs == L2CAP_FCS_CRC16) {
fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); put_unaligned_le16(fcs, skb->data + skb->len - 2);
} }
l2cap_do_send(sk, tx_skb); l2cap_do_send(sk, skb);
pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
if (skb_queue_is_last(TX_QUEUE(sk), skb))
sk->sk_send_head = NULL;
else
sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
skb = skb_dequeue(TX_QUEUE(sk));
kfree_skb(skb);
} }
} }
...@@ -1960,6 +1950,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us ...@@ -1960,6 +1950,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
switch (optname) { switch (optname) {
case L2CAP_OPTIONS: case L2CAP_OPTIONS:
if (sk->sk_state == BT_CONNECTED) {
err = -EINVAL;
break;
}
opts.imtu = l2cap_pi(sk)->imtu; opts.imtu = l2cap_pi(sk)->imtu;
opts.omtu = l2cap_pi(sk)->omtu; opts.omtu = l2cap_pi(sk)->omtu;
opts.flush_to = l2cap_pi(sk)->flush_to; opts.flush_to = l2cap_pi(sk)->flush_to;
...@@ -2771,10 +2766,10 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, ...@@ -2771,10 +2766,10 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
case L2CAP_CONF_MTU: case L2CAP_CONF_MTU:
if (val < L2CAP_DEFAULT_MIN_MTU) { if (val < L2CAP_DEFAULT_MIN_MTU) {
*result = L2CAP_CONF_UNACCEPT; *result = L2CAP_CONF_UNACCEPT;
pi->omtu = L2CAP_DEFAULT_MIN_MTU; pi->imtu = L2CAP_DEFAULT_MIN_MTU;
} else } else
pi->omtu = val; pi->imtu = val;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
break; break;
case L2CAP_CONF_FLUSH_TO: case L2CAP_CONF_FLUSH_TO:
...@@ -3071,6 +3066,17 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd ...@@ -3071,6 +3066,17 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
return 0; return 0;
} }
static inline void set_default_fcs(struct l2cap_pinfo *pi)
{
/* FCS is enabled only in ERTM or streaming mode, if one or both
* sides request it.
*/
if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
pi->fcs = L2CAP_FCS_NONE;
else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
pi->fcs = L2CAP_FCS_CRC16;
}
static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
{ {
struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
...@@ -3088,14 +3094,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr ...@@ -3088,14 +3094,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (!sk) if (!sk)
return -ENOENT; return -ENOENT;
if (sk->sk_state != BT_CONFIG) { if (sk->sk_state == BT_DISCONN)
struct l2cap_cmd_rej rej;
rej.reason = cpu_to_le16(0x0002);
l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
sizeof(rej), &rej);
goto unlock; goto unlock;
}
/* Reject if config buffer is too small. */ /* Reject if config buffer is too small. */
len = cmd_len - sizeof(*req); len = cmd_len - sizeof(*req);
...@@ -3135,9 +3135,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr ...@@ -3135,9 +3135,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
goto unlock; goto unlock;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) || set_default_fcs(l2cap_pi(sk));
l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
sk->sk_state = BT_CONNECTED; sk->sk_state = BT_CONNECTED;
...@@ -3225,9 +3223,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr ...@@ -3225,9 +3223,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) || set_default_fcs(l2cap_pi(sk));
l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
sk->sk_state = BT_CONNECTED; sk->sk_state = BT_CONNECTED;
l2cap_pi(sk)->next_tx_seq = 0; l2cap_pi(sk)->next_tx_seq = 0;
......
...@@ -82,11 +82,14 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) ...@@ -82,11 +82,14 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
{ {
struct sock *sk = d->owner, *parent; struct sock *sk = d->owner, *parent;
unsigned long flags;
if (!sk) if (!sk)
return; return;
BT_DBG("dlc %p state %ld err %d", d, d->state, err); BT_DBG("dlc %p state %ld err %d", d, d->state, err);
local_irq_save(flags);
bh_lock_sock(sk); bh_lock_sock(sk);
if (err) if (err)
...@@ -108,6 +111,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) ...@@ -108,6 +111,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
} }
bh_unlock_sock(sk); bh_unlock_sock(sk);
local_irq_restore(flags);
if (parent && sock_flag(sk, SOCK_ZAPPED)) { if (parent && sock_flag(sk, SOCK_ZAPPED)) {
/* We have to drop DLC lock here, otherwise /* We have to drop DLC lock here, otherwise
......
...@@ -355,7 +355,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, ...@@ -355,7 +355,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
if (info.cmd == ETHTOOL_GRXCLSRLALL) { if (info.cmd == ETHTOOL_GRXCLSRLALL) {
if (info.rule_cnt > 0) { if (info.rule_cnt > 0) {
if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
rule_buf = kmalloc(info.rule_cnt * sizeof(u32), rule_buf = kzalloc(info.rule_cnt * sizeof(u32),
GFP_USER); GFP_USER);
if (!rule_buf) if (!rule_buf)
return -ENOMEM; return -ENOMEM;
...@@ -404,7 +404,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, ...@@ -404,7 +404,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
(KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index)) (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
return -ENOMEM; return -ENOMEM;
full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size; full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
indir = kmalloc(full_size, GFP_USER); indir = kzalloc(full_size, GFP_USER);
if (!indir) if (!indir)
return -ENOMEM; return -ENOMEM;
...@@ -579,7 +579,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) ...@@ -579,7 +579,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
gstrings.len = ret; gstrings.len = ret;
data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); data = kzalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment