Commit 78b962bd authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/jgarzik/net-drivers-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 18955165 69e93c72
......@@ -1297,6 +1297,13 @@ static int __devinit vortex_probe1(struct device *gendev,
for (i = 0; i < 6; i++)
printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
}
/* Unfortunately an all zero eeprom passes the checksum and this
gets found in the wild in failure cases. Crypto is hard 8) */
if (!is_valid_ether_addr(dev->dev_addr)) {
retval = -EINVAL;
printk(KERN_ERR "*** EEPROM MAC address is invalid.\n");
goto free_ring; /* With every pack */
}
EL3WINDOW(2);
for (i = 0; i < 6; i++)
outb(dev->dev_addr[i], ioaddr + i);
......
......@@ -84,7 +84,8 @@ config 3C359
config TMS380TR
tristate "Generic TMS380 Token Ring ISA/PCI adapter support"
depends on TR && (PCI || ISA)
depends on TR && (PCI || ISA) && HOTPLUG
select FW_LOADER
---help---
This driver provides generic support for token ring adapters
based on the Texas Instruments TMS380 series chipsets. This
......
......@@ -221,6 +221,8 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device
olympic_priv = dev->priv ;
spin_lock_init(&olympic_priv->olympic_lock) ;
init_waitqueue_head(&olympic_priv->srb_wait);
init_waitqueue_head(&olympic_priv->trb_wait);
#if OLYMPIC_DEBUG
......@@ -311,7 +313,6 @@ static int __devinit olympic_init(struct net_device *dev)
}
}
spin_lock_init(&olympic_priv->olympic_lock) ;
/* Needed for cardbus */
if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
......@@ -442,6 +443,8 @@ static int olympic_open(struct net_device *dev)
DECLARE_WAITQUEUE(wait,current) ;
olympic_init(dev);
if(request_irq(dev->irq, &olympic_interrupt, SA_SHIRQ , "olympic", dev)) {
return -EAGAIN;
}
......@@ -898,7 +901,10 @@ static void olympic_freemem(struct net_device *dev)
int i;
for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
}
if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
pci_unmap_single(olympic_priv->pdev,
le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
......@@ -944,9 +950,6 @@ static irqreturn_t olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs
/* Hotswap gives us this on removal */
if (sisr == 0xffffffff) {
printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
olympic_freemem(dev) ;
free_irq(dev->irq, dev) ;
dev->stop = NULL ;
spin_unlock(&olympic_priv->olympic_lock) ;
return IRQ_NONE;
}
......@@ -961,9 +964,7 @@ static irqreturn_t olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs
printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
printk(KERN_ERR "or the linux-tr mailing list.\n") ;
olympic_freemem(dev) ;
free_irq(dev->irq, dev) ;
dev->stop = NULL ;
wake_up_interruptible(&olympic_priv->srb_wait);
spin_unlock(&olympic_priv->olympic_lock) ;
return IRQ_HANDLED;
} /* SISR_ERR */
......@@ -1006,9 +1007,6 @@ static irqreturn_t olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs
writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
olympic_freemem(dev) ;
free_irq(dev->irq, dev) ;
dev->stop = NULL ;
spin_unlock(&olympic_priv->olympic_lock) ;
return IRQ_HANDLED;
} /* SISR_ADAPTER_CHECK */
......@@ -1094,34 +1092,32 @@ static int olympic_close(struct net_device *dev)
writeb(0,srb+1);
writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
add_wait_queue(&olympic_priv->srb_wait,&wait) ;
set_current_state(TASK_INTERRUPTIBLE) ;
spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
olympic_priv->srb_queued=1;
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
t = jiffies ;
add_wait_queue(&olympic_priv->srb_wait,&wait) ;
set_current_state(TASK_INTERRUPTIBLE) ;
while(olympic_priv->srb_queued) {
schedule() ;
t = schedule_timeout(60*HZ);
if(signal_pending(current)) {
printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
olympic_priv->srb_queued=0;
break;
}
if ((jiffies-t) > 60*HZ) {
if (t == 0) {
printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
olympic_priv->srb_queued=0;
break ;
}
set_current_state(TASK_INTERRUPTIBLE) ;
olympic_priv->srb_queued=0;
}
remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
set_current_state(TASK_RUNNING) ;
olympic_priv->rx_status_last_received++;
olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
......@@ -1513,29 +1509,6 @@ static void olympic_arb_cmd(struct net_device *dev)
writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
netif_stop_queue(dev);
olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
pci_unmap_single(olympic_priv->pdev,
le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
}
olympic_priv->rx_status_last_received++;
olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
}
/* unmap rings */
pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
free_irq(dev->irq,dev);
dev->stop=NULL;
printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
} /* If serious error */
......
......@@ -359,6 +359,8 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(dev);
velocity_nics--;
}
/**
......@@ -462,7 +464,7 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
{
struct mac_regs * regs = vptr->mac_regs;
/* T urn on MCFG_PQEN, turn off MCFG_RTGOPT */
/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
......@@ -490,12 +492,6 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
}
}
static inline void velocity_give_rx_desc(struct rx_desc *rd)
{
*(u32 *)&rd->rdesc0 = 0;
rd->rdesc0.owner = cpu_to_le32(OWNED_BY_NIC);
}
/**
* velocity_rx_reset - handle a receive reset
* @vptr: velocity we are resetting
......@@ -516,7 +512,7 @@ static void velocity_rx_reset(struct velocity_info *vptr)
* Init state, all RD entries belong to the NIC
*/
for (i = 0; i < vptr->options.numrx; ++i)
velocity_give_rx_desc(vptr->rd_ring + i);
vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC;
writew(vptr->options.numrx, &regs->RBRDU);
writel(vptr->rd_pool_dma, &regs->RDBaseLo);
......@@ -591,10 +587,15 @@ static void velocity_init_registers(struct velocity_info *vptr,
writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
/*
* Bback off algorithm use original IEEE standard
* Back off algorithm use original IEEE standard
*/
BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
/*
* Init CAM filter
*/
velocity_init_cam_filter(vptr);
/*
* Set packet filter: Receive directed and broadcast address
*/
......@@ -619,8 +620,6 @@ static void velocity_init_registers(struct velocity_info *vptr,
mac_tx_queue_run(regs, i);
}
velocity_init_cam_filter(vptr);
init_flow_control_register(vptr);
writel(CR0_STOP, &regs->CR0Clr);
......@@ -628,7 +627,6 @@ static void velocity_init_registers(struct velocity_info *vptr,
mii_status = velocity_get_opt_media_mode(vptr);
netif_stop_queue(vptr->dev);
mac_clear_isr(regs);
mii_init(vptr, mii_status);
......@@ -695,7 +693,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
struct mac_regs * regs;
int ret = -ENOMEM;
if (velocity_nics++ >= MAX_UNITS) {
if (velocity_nics >= MAX_UNITS) {
printk(KERN_NOTICE VELOCITY_NAME ": already found %d NICs.\n",
velocity_nics);
return -ENODEV;
......@@ -727,7 +725,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
vptr->dev = dev;
dev->priv = vptr;
dev->irq = pdev->irq;
ret = pci_enable_device(pdev);
......@@ -762,7 +759,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
dev->dev_addr[i] = readb(&regs->PAR[i]);
velocity_get_options(&vptr->options, velocity_nics - 1, dev->name);
velocity_get_options(&vptr->options, velocity_nics, dev->name);
/*
* Mask out the options cannot be set to the chip
......@@ -817,6 +814,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
}
#endif
velocity_nics++;
out:
return ret;
......@@ -869,10 +867,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_i
vptr->io_size = info->io_size;
vptr->num_txq = info->txqueue;
vptr->multicast_limit = MCAM_SIZE;
spin_lock_init(&vptr->lock);
spin_lock_init(&vptr->xmit_lock);
INIT_LIST_HEAD(&vptr->list);
}
......@@ -1024,11 +1019,11 @@ static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
wmb();
unusable = vptr->rd_filled | 0x0003;
dirty = vptr->rd_dirty - unusable + 1;
unusable = vptr->rd_filled & 0x0003;
dirty = vptr->rd_dirty - unusable;
for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
velocity_give_rx_desc(vptr->rd_ring + dirty);
vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC;
}
writew(vptr->rd_filled & 0xfffc, &regs->RBRDU);
......@@ -1043,7 +1038,7 @@ static int velocity_rx_refill(struct velocity_info *vptr)
struct rx_desc *rd = vptr->rd_ring + dirty;
/* Fine for an all zero Rx desc at init time as well */
if (rd->rdesc0.owner == cpu_to_le32(OWNED_BY_NIC))
if (rd->rdesc0.owner == OWNED_BY_NIC)
break;
if (!vptr->rd_info[dirty].skb) {
......@@ -1096,7 +1091,7 @@ static int velocity_init_rd_ring(struct velocity_info *vptr)
}
/**
* velocity_free_rd_ring - set up receive ring
* velocity_free_rd_ring - free receive ring
* @vptr: velocity to clean up
*
* Free the receive buffers for each ring slot and any
......@@ -1161,8 +1156,10 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) {
td = &(vptr->td_rings[j][i]);
td_info = &(vptr->td_infos[j][i]);
td_info->buf = vptr->tx_bufs + (i + j) * PKT_BUF_SZ;
td_info->buf_dma = vptr->tx_bufs_dma + (i + j) * PKT_BUF_SZ;
td_info->buf = vptr->tx_bufs +
(j * vptr->options.numtx + i) * PKT_BUF_SZ;
td_info->buf_dma = vptr->tx_bufs_dma +
(j * vptr->options.numtx + i) * PKT_BUF_SZ;
}
vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0;
}
......@@ -1238,15 +1235,17 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
int rd_curr = vptr->rd_curr;
int works = 0;
while (1) {
do {
struct rx_desc *rd = vptr->rd_ring + rd_curr;
if (!vptr->rd_info[rd_curr].skb || (works++ > 15))
if (!vptr->rd_info[rd_curr].skb)
break;
if (rd->rdesc0.owner == OWNED_BY_NIC)
break;
rmb();
/*
* Don't drop CE or RL error frame although RXOK is off
*/
......@@ -1269,14 +1268,15 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
rd_curr++;
if (rd_curr >= vptr->options.numrx)
rd_curr = 0;
}
} while (++works <= 15);
if (velocity_rx_refill(vptr) < 0) {
vptr->rd_curr = rd_curr;
if (works > 0 && velocity_rx_refill(vptr) < 0) {
VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
"%s: rx buf allocation failure\n", vptr->dev->name);
}
vptr->rd_curr = rd_curr;
VAR_USED(stats);
return works;
}
......
......@@ -1319,7 +1319,7 @@ static inline void mac_set_cam_mask(struct mac_regs * regs, u8 * mask, enum velo
/* disable CAMEN */
writeb(0, &regs->CAMADDR);
/* Select CAM mask */
/* Select mar */
BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
}
......@@ -1360,7 +1360,7 @@ static inline void mac_set_cam(struct mac_regs * regs, int idx, u8 *addr, enum v
writeb(0, &regs->CAMADDR);
/* Select CAM mask */
/* Select mar */
BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
}
......@@ -1401,7 +1401,7 @@ static inline void mac_get_cam(struct mac_regs * regs, int idx, u8 *addr, enum v
writeb(0, &regs->CAMADDR);
/* Select CAM mask */
/* Select mar */
BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
}
......@@ -1792,7 +1792,6 @@ struct velocity_info {
u8 mCAMmask[(MCAM_SIZE / 8)];
spinlock_t lock;
spinlock_t xmit_lock;
int wol_opts;
u8 wol_passwd[6];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment