Commit 50441791 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/gregkh/linux/linus-2.5

into penguin.transmeta.com:/home/torvalds/v2.5/linux
parents 66731c77 6bd4301a
......@@ -3027,8 +3027,8 @@ E: torvalds@transmeta.com
W: http://www.cs.helsinki.fi/Linus.Torvalds
P: 1024/A86B35C5 96 54 50 29 EC 11 44 7A BE 67 3C 24 03 13 62 C8
D: Original kernel hacker
S: 1050 Woodduck Avenue
S: Santa Clara, California 95051
S: 3990 Freedom Circle
S: Santa Clara, California 95054
S: USA
N: Marcelo W. Tosatti
......
......@@ -440,5 +440,19 @@ config ATM_FORE200E
default m if ATM_FORE200E_MAYBE!=y
default y if ATM_FORE200E_MAYBE=y
config ATM_HE
tristate "ForeRunner HE Series"
depends on PCI && ATM
help
This is a driver for the Marconi ForeRunner HE-series ATM adapter
cards. It simultaneously supports the 155 and 622 versions.
config ATM_HE_USE_SUNI
bool "Use S/UNI PHY driver"
depends on ATM_HE
help
Support for the S/UNI-Ultra and S/UNI-622 found in the ForeRunner
HE cards. This driver provides carrier detection some statistics.
endmenu
......@@ -49,6 +49,10 @@ ifeq ($(CONFIG_ATM_FORE200E_SBA),y)
CONFIG_ATM_FORE200E_SBA_FW := $(obj)/sba200e_ecd.bin2
endif
endif
obj-$(CONFIG_ATM_HE) += he.o
ifeq ($(CONFIG_ATM_HE_USE_SUNI),y)
obj-$(CONFIG_ATM_HE) += suni.o
endif
# FORE Systems 200E-series firmware magic
$(obj)/fore200e_pca_fw.c: $(patsubst "%", %, $(CONFIG_ATM_FORE200E_PCA_FW)) \
......
/* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
/*
he.c
ForeRunnerHE ATM Adapter driver for ATM on Linux
Copyright (C) 1999-2001 Naval Research Laboratory
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
he.c
ForeRunnerHE ATM Adapter driver for ATM on Linux
Copyright (C) 1999-2001 Naval Research Laboratory
Permission to use, copy, modify and distribute this software and its
documentation is hereby granted, provided that both the copyright
notice and this permission notice appear in all copies of the software,
derivative works or modified versions, and any portions thereof, and
that both notices appear in supporting documentation.
NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
RESULTING FROM THE USE OF THIS SOFTWARE.
This driver was written using the "Programmer's Reference Manual for
ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
AUTHORS:
chas williams <chas@cmf.nrl.navy.mil>
eric kinzie <ekinzie@cmf.nrl.navy.mil>
NOTES:
4096 supported 'connections'
group 0 is used for all traffic
interrupt queue 0 is used for all interrupts
aal0 support for receive only
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#include <linux/atmdev.h>
#include <linux/atm.h>
#include <linux/sonet.h>
#ifndef ATM_OC12_PCR
#define ATM_OC12_PCR (622080000/1080*1040/8/53)
#endif
#ifdef BUS_INT_WAR
void sn_add_polled_interrupt(int irq, int interval);
void sn_delete_polled_interrupt(int irq);
#endif
#define USE_TASKLET
#define USE_HE_FIND_VCC
#undef USE_SCATTERGATHER
#undef USE_CHECKSUM_HW /* still confused about this */
#define USE_RBPS
#undef USE_RBPS_POOL /* if memory is tight try this */
#undef USE_RBPL_POOL /* if memory is tight try this */
#define USE_TPD_POOL
/* #undef CONFIG_ATM_HE_USE_SUNI */
/* 2.2 kernel support */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,43)
#define dev_kfree_skb_irq(skb) dev_kfree_skb(skb)
#define dev_kfree_skb_any(skb) dev_kfree_skb(skb)
#undef USE_TASKLET
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,2,18)
#define set_current_state(x) current->state = (x);
#endif
#include "he.h"
#include "suni.h"
#include <linux/atm_he.h>
#define hprintk(fmt,args...) printk(DEV_LABEL "%d: " fmt, he_dev->number, args)
#define hprintk1(fmt) printk(DEV_LABEL "%d: " fmt, he_dev->number)
#undef DEBUG
#ifdef DEBUG
#define HPRINTK(fmt,args...) hprintk(fmt,args)
#define HPRINTK1(fmt) hprintk1(fmt)
#else
#define HPRINTK(fmt,args...)
#define HPRINTK1(fmt,args...)
#endif /* DEBUG */
/* version definition */
static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
/* defines */
#define ALIGN_ADDRESS(addr, alignment) \
((((unsigned long) (addr)) + (((unsigned long) (alignment)) - 1)) & ~(((unsigned long) (alignment)) - 1))
/* declarations */
static int he_open(struct atm_vcc *vcc, short vpi, int vci);
static void he_close(struct atm_vcc *vcc);
static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
static int he_sg_send(struct atm_vcc *vcc, unsigned long start, unsigned long size);
static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,69)
static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
#else
static void he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
#endif
static void he_tasklet(unsigned long data);
static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
static int he_start(struct atm_dev *dev);
static void he_stop(struct he_dev *dev);
static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
static unsigned char he_phy_get(struct atm_dev *, unsigned long);
static u8 read_prom_byte(struct he_dev *he_dev, int addr);
/* globals */
struct he_dev *he_devs = NULL;
static short disable64 = -1;
static short nvpibits = -1;
static short nvcibits = -1;
static short rx_skb_reserve = 16;
static short irq_coalesce = 1;
static short sdh = 1;
static struct atmdev_ops he_ops =
{
open: he_open,
close: he_close,
ioctl: he_ioctl,
send: he_send,
sg_send: he_sg_send,
phy_put: he_phy_put,
phy_get: he_phy_get,
proc_read: he_proc_read,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,1)
owner: THIS_MODULE
#endif
};
/* see the comments in he.h about global_lock */
#define HE_SPIN_LOCK(dev, flags) spin_lock_irqsave(&(dev)->global_lock, flags)
#define HE_SPIN_UNLOCK(dev, flags) spin_unlock_irqrestore(&(dev)->global_lock, flags)
#define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while(0)
#define he_readl(dev, reg) readl((dev)->membase + (reg))
/* section 2.12 connection memory access */
static __inline__ void
he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
unsigned flags)
{
he_writel(he_dev, val, CON_DAT);
#ifdef CONFIG_IA64_SGI_SN2
(void) he_readl(he_dev, CON_DAT);
#endif
he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
while(he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
}
#define he_writel_rcm(dev, val, reg) \
he_writel_internal(dev, val, reg, CON_CTL_RCM)
#define he_writel_tcm(dev, val, reg) \
he_writel_internal(dev, val, reg, CON_CTL_TCM)
#define he_writel_mbox(dev, val, reg) \
he_writel_internal(dev, val, reg, CON_CTL_MBOX)
static unsigned
he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
{
he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
while(he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
return he_readl(he_dev, CON_DAT);
}
#define he_readl_rcm(dev, reg) \
he_readl_internal(dev, reg, CON_CTL_RCM)
#define he_readl_tcm(dev, reg) \
he_readl_internal(dev, reg, CON_CTL_TCM)
#define he_readl_mbox(dev, reg) \
he_readl_internal(dev, reg, CON_CTL_MBOX)
/* figure 2.2 connection id */
#define he_mkcid(dev, vpi, vci) (((vpi<<(dev)->vcibits) | vci) & 0x1fff)
/* 2.5.1 per connection transmit state registers */
#define he_writel_tsr0(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 0)
#define he_readl_tsr0(dev, cid) \
he_readl_tcm(dev, CONFIG_TSRA | (cid<<3) | 0)
#define he_writel_tsr1(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 1)
#define he_writel_tsr2(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 2)
#define he_writel_tsr3(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 3)
#define he_writel_tsr4(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 4)
/* from page 2-20
*
* NOTE While the transmit connection is active, bits 23 through 0
* of this register must not be written by the host. Byte
* enables should be used during normal operation when writing
* the most significant byte.
*/
#define he_writel_tsr4_upper(dev, val, cid) \
he_writel_internal(dev, val, CONFIG_TSRA | (cid<<3) | 4, \
CON_CTL_TCM \
| CON_BYTE_DISABLE_2 \
| CON_BYTE_DISABLE_1 \
| CON_BYTE_DISABLE_0)
#define he_readl_tsr4(dev, cid) \
he_readl_tcm(dev, CONFIG_TSRA | (cid<<3) | 4)
#define he_writel_tsr5(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 5)
#define he_writel_tsr6(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 6)
#define he_writel_tsr7(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 7)
#define he_writel_tsr8(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRB | (cid<<2) | 0)
#define he_writel_tsr9(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRB | (cid<<2) | 1)
#define he_writel_tsr10(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRB | (cid<<2) | 2)
#define he_writel_tsr11(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRB | (cid<<2) | 3)
#define he_writel_tsr12(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRC | (cid<<1) | 0)
#define he_writel_tsr13(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRC | (cid<<1) | 1)
#define he_writel_tsr14(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRD | cid)
#define he_writel_tsr14_upper(dev, val, cid) \
he_writel_internal(dev, val, CONFIG_TSRD | cid, \
CON_CTL_TCM \
| CON_BYTE_DISABLE_2 \
| CON_BYTE_DISABLE_1 \
| CON_BYTE_DISABLE_0)
/* 2.7.1 per connection receive state registers */
#define he_writel_rsr0(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 0)
#define he_readl_rsr0(dev, cid) \
he_readl_rcm(dev, 0x00000 | (cid<<3) | 0)
#define he_writel_rsr1(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 1)
#define he_writel_rsr2(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 2)
#define he_writel_rsr3(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 3)
#define he_writel_rsr4(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 4)
#define he_writel_rsr5(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 5)
#define he_writel_rsr6(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 6)
#define he_writel_rsr7(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 7)
static __inline__ struct atm_vcc*
he_find_vcc(struct he_dev *he_dev, unsigned cid)
{
struct atm_vcc *vcc;
short vpi;
int vci;
vpi = cid >> he_dev->vcibits;
vci = cid & ((1<<he_dev->vcibits)-1);
for (vcc = he_dev->atm_dev->vccs; vcc; vcc = vcc->next)
if (vcc->vci == vci && vcc->vpi == vpi
&& vcc->qos.rxtp.traffic_class != ATM_NONE) return vcc;
return NULL;
}
static int __devinit
he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
{
struct atm_dev *atm_dev;
struct he_dev *he_dev;
printk(KERN_INFO "he: %s\n", version);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,43)
if (pci_enable_device(pci_dev)) return -EIO;
#endif
if (pci_set_dma_mask(pci_dev, HE_DMA_MASK) != 0)
{
printk(KERN_WARNING "he: no suitable dma available\n");
return -EIO;
}
atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, 0);
if (!atm_dev) return -ENODEV;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,3)
pci_set_drvdata(pci_dev, atm_dev);
#else
pci_dev->driver_data = atm_dev;
#endif
he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
GFP_KERNEL);
if (!he_dev) return -ENOMEM;
memset(he_dev, 0, sizeof(struct he_dev));
he_dev->pci_dev = pci_dev;
he_dev->atm_dev = atm_dev;
he_dev->atm_dev->dev_data = he_dev;
HE_DEV(atm_dev) = he_dev;
he_dev->number = atm_dev->number; /* was devs */
if (he_start(atm_dev)) {
atm_dev_deregister(atm_dev);
he_stop(he_dev);
kfree(he_dev);
return -ENODEV;
}
he_dev->next = NULL;
if (he_devs) he_dev->next = he_devs;
he_devs = he_dev;
return 0;
}
static void __devexit
he_remove_one (struct pci_dev *pci_dev)
{
struct atm_dev *atm_dev;
struct he_dev *he_dev;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,3)
atm_dev = pci_get_drvdata(pci_dev);
#else
atm_dev = pci_dev->driver_data;
#endif
he_dev = HE_DEV(atm_dev);
/* need to remove from he_devs */
he_stop(he_dev);
atm_dev_deregister(atm_dev);
kfree(he_dev);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,3)
pci_set_drvdata(pci_dev, NULL);
#else
pci_dev->driver_data = NULL;
#endif
}
static unsigned
rate_to_atmf(unsigned rate) /* cps to atm forum format */
{
#define NONZERO (1<<14)
unsigned exp = 0;
if (rate == 0) return(0);
rate <<= 9;
while (rate > 0x3ff)
{
++exp;
rate >>= 1;
}
return (NONZERO | (exp << 9) | (rate & 0x1ff));
}
static void __init
he_init_rx_lbfp0(struct he_dev *he_dev)
{
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
lbufd_index = 0;
lbm_offset = he_readl(he_dev, RCMLBM_BA);
he_writel(he_dev, lbufd_index, RLBF0_H);
for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i)
{
lbufd_index += 2;
lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
if (++lbuf_count == lbufs_per_row)
{
lbuf_count = 0;
row_offset += he_dev->bytes_per_row;
}
lbm_offset += 4;
}
he_writel(he_dev, lbufd_index - 2, RLBF0_T);
he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
}
static void __init
he_init_rx_lbfp1(struct he_dev *he_dev)
{
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
lbufd_index = 1;
lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
he_writel(he_dev, lbufd_index, RLBF1_H);
for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i)
{
lbufd_index += 2;
lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
if (++lbuf_count == lbufs_per_row)
{
lbuf_count = 0;
row_offset += he_dev->bytes_per_row;
}
lbm_offset += 4;
}
he_writel(he_dev, lbufd_index - 2, RLBF1_T);
he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
}
static void __init
he_init_tx_lbfp(struct he_dev *he_dev)
{
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
he_writel(he_dev, lbufd_index, TLBF_H);
for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i)
{
lbufd_index += 1;
lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
if (++lbuf_count == lbufs_per_row)
{
lbuf_count = 0;
row_offset += he_dev->bytes_per_row;
}
lbm_offset += 2;
}
he_writel(he_dev, lbufd_index - 1, TLBF_T);
}
static int __init
he_init_tpdrq(struct he_dev *he_dev)
{
he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
if (he_dev->tpdrq_base == NULL)
{
hprintk1("failed to alloc tpdrq\n");
return -ENOMEM;
}
memset(he_dev->tpdrq_base, 0,
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
he_dev->tpdrq_tail = he_dev->tpdrq_base;
he_dev->tpdrq_head = he_dev->tpdrq_base;
he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
he_writel(he_dev, 0, TPDRQ_T);
he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
return 0;
}
static void __init
he_init_cs_block(struct he_dev *he_dev)
{
unsigned clock, rate, delta;
int reg;
/* 5.1.7 cs block initialization */
for(reg = 0; reg < 0x20; ++reg)
he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
/* rate grid timer reload values */
clock = he_is622(he_dev) ? 66667000 : 50000000;
rate = he_dev->atm_dev->link_rate;
delta = rate / 16 / 2;
for(reg = 0; reg < 0x10; ++reg)
{
/* 2.4 internal transmit function
*
* we initialize the first row in the rate grid.
* values are period (in clock cycles) of timer
*/
unsigned period = clock / rate;
he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
rate -= delta;
}
if (he_is622(he_dev))
{
/* table 5.2 (4 cells per lbuf) */
he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
he_writel_mbox(he_dev, 0x4680, CS_RTATR);
/* table 5.8 */
he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
/* table 5.9 */
he_writel_mbox(he_dev, 0x5, CS_OTPPER);
he_writel_mbox(he_dev, 0x14, CS_OTWPER);
}
else
{
/* table 5.1 (4 cells per lbuf) */
he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
he_writel_mbox(he_dev, 0x4680, CS_RTATR);
/* table 5.8 */
he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
/* table 5.9 */
he_writel_mbox(he_dev, 0x6, CS_OTPPER);
he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
}
he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
for(reg = 0; reg < 0x8; ++reg)
he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
}
static void __init
he_init_cs_block_rcm(struct he_dev *he_dev)
{
unsigned rategrid[16][16];
unsigned rate, delta;
int i, j, reg;
unsigned rate_atmf, exp, man;
unsigned long long rate_cps;
int mult, buf, buf_limit = 4;
/* initialize rate grid group table */
for (reg = 0x0; reg < 0xff; ++reg)
he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
/* initialize rate controller groups */
for (reg = 0x100; reg < 0x1ff; ++reg)
he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
/* initialize tNrm lookup table */
/* the manual makes reference to a routine in a sample driver
for proper configuration; fortunately, we only need this
in order to support abr connection */
/* initialize rate to group table */
rate = he_dev->atm_dev->link_rate;
delta = rate / 32;
/*
* 2.4 transmit internal functions
*
* we construct a copy of the rate grid used by the scheduler
* in order to construct the rate to group table below
*/
for (j = 0; j < 16; j++)
{
rategrid[0][j] = rate;
rate -= delta;
}
for (i = 1; i < 16; i++)
for (j = 0; j < 16; j++)
if (i > 14)
rategrid[i][j] = rategrid[i - 1][j] / 4;
else
rategrid[i][j] = rategrid[i - 1][j] / 2;
/*
* 2.4 transmit internal function
*
* this table maps the upper 5 bits of exponent and mantissa
* of the atm forum representation of the rate into an index
* on rate grid
*/
rate_atmf = 0;
while (rate_atmf < 0x400)
{
man = (rate_atmf & 0x1f) << 4;
exp = rate_atmf >> 5;
/*
instead of '/ 512', use '>> 9' to prevent a call
to divdu3 on x86 platforms
*/
rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
if (rate_cps < 10) rate_cps = 10;
/* 2.2.1 minimum payload rate is 10 cps */
for (i = 255; i > 0; i--)
if (rategrid[i/16][i%16] >= rate_cps) break;
/* pick nearest rate instead? */
/*
* each table entry is 16 bits: (rate grid index (8 bits)
* and a buffer limit (8 bits)
* there are two table entries in each 32-bit register
*/
#ifdef notdef
buf = rate_cps * he_dev->tx_numbuffs /
(he_dev->atm_dev->link_rate * 2);
#else
/* this is pretty, but avoids _divdu3 and is mostly correct */
buf = 0;
mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
if (rate_cps > (68 * mult)) buf = 1;
if (rate_cps > (136 * mult)) buf = 2;
if (rate_cps > (204 * mult)) buf = 3;
if (rate_cps > (272 * mult)) buf = 4;
#endif
if (buf > buf_limit) buf = buf_limit;
reg = (reg<<16) | ((i<<8) | buf);
#define RTGTBL_OFFSET 0x400
if (rate_atmf & 0x1)
he_writel_rcm(he_dev, reg,
CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf>>1));
++rate_atmf;
}
}
static int __init
he_init_group(struct he_dev *he_dev, int group)
{
int i;
#ifdef USE_RBPS
/* small buffer pool */
#ifdef USE_RBPS_POOL
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,44)
he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
CONFIG_RBPS_BUFSIZE, 8, 0, SLAB_KERNEL);
#else
he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
CONFIG_RBPS_BUFSIZE, 8, 0);
#endif
if (he_dev->rbps_pool == NULL)
{
hprintk1("unable to create rbps pages\n");
return -ENOMEM;
}
#else /* !USE_RBPS_POOL */
he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
if (he_dev->rbps_pages == NULL) {
hprintk1("unable to create rbps page pool\n");
return -ENOMEM;
}
#endif /* USE_RBPS_POOL */
he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
if (he_dev->rbps_base == NULL)
{
hprintk1("failed to alloc rbps\n");
return -ENOMEM;
}
memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
for (i = 0; i < CONFIG_RBPS_SIZE; ++i)
{
dma_addr_t dma_handle;
void *cpuaddr;
#ifdef USE_RBPS_POOL
cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
if (cpuaddr == NULL)
return -ENOMEM;
#else
cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
#endif
he_dev->rbps_virt[i].virt = cpuaddr;
he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
he_dev->rbps_base[i].phys = dma_handle;
}
he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE-1];
he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
G0_RBPS_T + (group * 32));
he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
G0_RBPS_BS + (group * 32));
he_writel(he_dev,
RBP_THRESH(CONFIG_RBPS_THRESH) |
RBP_QSIZE(CONFIG_RBPS_SIZE-1) |
RBP_INT_ENB,
G0_RBPS_QI + (group * 32));
#else /* !USE_RBPS */
he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
G0_RBPS_BS + (group * 32));
#endif /* USE_RBPS */
/* large buffer pool */
#ifdef USE_RBPL_POOL
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,44)
he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
CONFIG_RBPL_BUFSIZE, 8, 0, SLAB_KERNEL);
#else
he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
CONFIG_RBPL_BUFSIZE, 8, 0);
#endif
if (he_dev->rbpl_pool == NULL)
{
hprintk1("unable to create rbpl pool\n");
return -ENOMEM;
}
#else /* !USE_RBPL_POOL */
he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
if (he_dev->rbpl_pages == NULL)
{
hprintk1("unable to create rbpl pages\n");
return -ENOMEM;
}
#endif /* USE_RBPL_POOL */
he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
if (he_dev->rbpl_base == NULL)
{
hprintk1("failed to alloc rbpl\n");
return -ENOMEM;
}
memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
for (i = 0; i < CONFIG_RBPL_SIZE; ++i)
{
dma_addr_t dma_handle;
void *cpuaddr;
#ifdef USE_RBPL_POOL
cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
if (cpuaddr == NULL)
return -ENOMEM;
#else
cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
#endif
he_dev->rbpl_virt[i].virt = cpuaddr;
he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
he_dev->rbpl_base[i].phys = dma_handle;
}
he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE-1];
he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
G0_RBPL_T + (group * 32));
he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
G0_RBPL_BS + (group * 32));
he_writel(he_dev,
RBP_THRESH(CONFIG_RBPL_THRESH) |
RBP_QSIZE(CONFIG_RBPL_SIZE-1) |
RBP_INT_ENB,
G0_RBPL_QI + (group * 32));
/* rx buffer ready queue */
he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
if (he_dev->rbrq_base == NULL)
{
hprintk1("failed to allocate rbrq\n");
return -ENOMEM;
}
memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
he_dev->rbrq_head = he_dev->rbrq_base;
he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
he_writel(he_dev,
RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE-1),
G0_RBRQ_Q + (group * 16));
if (irq_coalesce)
{
hprintk1("coalescing interrupts\n");
he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
G0_RBRQ_I + (group * 16));
}
else
he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
G0_RBRQ_I + (group * 16));
/* tx buffer ready queue */
he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
if (he_dev->tbrq_base == NULL)
{
hprintk1("failed to allocate tbrq\n");
return -ENOMEM;
}
memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
he_dev->tbrq_head = he_dev->tbrq_base;
he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
return 0;
}
static int __init
he_init_irq(struct he_dev *he_dev)
{
int i;
/* 2.9.3.5 tail offset for each interrupt queue is located after the
end of the interrupt queue */
he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
if (he_dev->irq_base == NULL)
{
hprintk1("failed to allocate irq\n");
return -ENOMEM;
}
he_dev->irq_tailoffset = (unsigned *)
&he_dev->irq_base[CONFIG_IRQ_SIZE];
*he_dev->irq_tailoffset = 0;
he_dev->irq_head = he_dev->irq_base;
he_dev->irq_tail = he_dev->irq_base;
for(i=0; i < CONFIG_IRQ_SIZE; ++i)
he_dev->irq_base[i].isw = ITYPE_INVALID;
he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
he_writel(he_dev,
IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
IRQ0_HEAD);
he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
he_writel(he_dev, 0x0, IRQ0_DATA);
he_writel(he_dev, 0x0, IRQ1_BASE);
he_writel(he_dev, 0x0, IRQ1_HEAD);
he_writel(he_dev, 0x0, IRQ1_CNTL);
he_writel(he_dev, 0x0, IRQ1_DATA);
he_writel(he_dev, 0x0, IRQ2_BASE);
he_writel(he_dev, 0x0, IRQ2_HEAD);
he_writel(he_dev, 0x0, IRQ2_CNTL);
he_writel(he_dev, 0x0, IRQ2_DATA);
he_writel(he_dev, 0x0, IRQ3_BASE);
he_writel(he_dev, 0x0, IRQ3_HEAD);
he_writel(he_dev, 0x0, IRQ3_CNTL);
he_writel(he_dev, 0x0, IRQ3_DATA);
/* 2.9.3.2 interrupt queue mapping registers */
he_writel(he_dev, 0x0, GRP_10_MAP);
he_writel(he_dev, 0x0, GRP_32_MAP);
he_writel(he_dev, 0x0, GRP_54_MAP);
he_writel(he_dev, 0x0, GRP_76_MAP);
if (request_irq(he_dev->pci_dev->irq, he_irq_handler, SA_INTERRUPT|SA_SHIRQ, DEV_LABEL, he_dev))
{
hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
return -EINVAL;
}
he_dev->irq = he_dev->pci_dev->irq;
#ifdef BUS_INT_WAR
HPRINTK("sn_add_polled_interrupt(irq %d, 1)\n", he_dev->irq);
sn_add_polled_interrupt(he_dev->irq, 1);
#endif
return 0;
}
static int __init
he_start(struct atm_dev *dev)
{
struct he_dev *he_dev;
struct pci_dev *pci_dev;
u16 command;
u32 gen_cntl_0, host_cntl, lb_swap;
u8 cache_size, timer;
unsigned err;
unsigned int status, reg;
int i, group;
he_dev = HE_DEV(dev);
pci_dev = he_dev->pci_dev;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,3)
he_dev->membase = pci_dev->resource[0].start;
#else
he_dev->membase = pci_dev->base_address[0] & PCI_BASE_ADDRESS_MEM_MASK;
#endif
HPRINTK("membase = 0x%lx irq = %d.\n", he_dev->membase, pci_dev->irq);
/*
* pci bus controller initialization
*/
/* 4.3 pci bus controller-specific initialization */
if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0)
{
hprintk1("can't read GEN_CNTL_0\n");
return -EINVAL;
}
gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0)
{
hprintk1("can't write GEN_CNTL_0.\n");
return -EINVAL;
}
if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0)
{
hprintk1("can't read PCI_COMMAND.\n");
return -EINVAL;
}
command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0)
{
hprintk1("can't enable memory.\n");
return -EINVAL;
}
if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size))
{
hprintk1("can't read cache line size?\n");
return -EINVAL;
}
if (cache_size < 16)
{
cache_size = 16;
if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
hprintk("can't set cache line size to %d\n", cache_size);
}
if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer))
{
hprintk1("can't read latency timer?\n");
return -EINVAL;
}
/* from table 3.9
*
* LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
*
* AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
* BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
*
*/
#define LAT_TIMER 209
if (timer < LAT_TIMER)
{
HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
timer = LAT_TIMER;
if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
hprintk("can't set latency timer to %d\n", timer);
}
if (!(he_dev->membase = (unsigned long) ioremap(he_dev->membase, HE_REGMAP_SIZE))) {
hprintk1("can't set up page mapping\n");
return -EINVAL;
}
/* 4.4 card reset */
he_writel(he_dev, 0x0, RESET_CNTL);
he_writel(he_dev, 0xff, RESET_CNTL);
udelay(16*1000); /* 16 ms */
status = he_readl(he_dev, RESET_CNTL);
if ((status & BOARD_RST_STATUS) == 0)
{
hprintk1("reset failed\n");
return -EINVAL;
}
/* 4.5 set bus width */
host_cntl = he_readl(he_dev, HOST_CNTL);
if (host_cntl & PCI_BUS_SIZE64)
gen_cntl_0 |= ENBL_64;
else
gen_cntl_0 &= ~ENBL_64;
if (disable64 == 1)
{
hprintk1("disabling 64-bit pci bus transfers\n");
gen_cntl_0 &= ~ENBL_64;
}
if (gen_cntl_0 & ENBL_64) hprintk1("64-bit transfers enabled\n");
pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
/* 4.7 read prom contents */
for(i=0; i<PROD_ID_LEN; ++i)
he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
he_dev->media = read_prom_byte(he_dev, MEDIA);
for(i=0; i<6; ++i)
dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
he_dev->prod_id,
he_dev->media & 0x40 ? "SM" : "MM",
dev->esi[0],
dev->esi[1],
dev->esi[2],
dev->esi[3],
dev->esi[4],
dev->esi[5]);
he_dev->atm_dev->link_rate = he_is622(he_dev) ?
ATM_OC12_PCR : ATM_OC3_PCR;
/* 4.6 set host endianess */
lb_swap = he_readl(he_dev, LB_SWAP);
if (he_is622(he_dev))
lb_swap &= ~XFER_SIZE; /* 4 cells */
else
lb_swap |= XFER_SIZE; /* 8 cells */
#ifdef __BIG_ENDIAN
lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
#else
lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
#endif /* __BIG_ENDIAN */
he_writel(he_dev, lb_swap, LB_SWAP);
/* 4.8 sdram controller initialization */
he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
/* 4.9 initialize rnum value */
lb_swap |= SWAP_RNUM_MAX(0xf);
he_writel(he_dev, lb_swap, LB_SWAP);
/* 4.10 initialize the interrupt queues */
if ((err = he_init_irq(he_dev)) != 0) return err;
#ifdef USE_TASKLET
tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
#endif
spin_lock_init(&he_dev->global_lock);
/* 4.11 enable pci bus controller state machines */
host_cntl |= (OUTFF_ENB | CMDFF_ENB |
QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
he_writel(he_dev, host_cntl, HOST_CNTL);
gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
/*
* atm network controller initialization
*/
/* 5.1.1 generic configuration state */
/*
* local (cell) buffer memory map
*
* HE155 HE622
*
* 0 ____________1023 bytes 0 _______________________2047 bytes
* | | | | |
* | utility | | rx0 | |
* 5|____________| 255|___________________| u |
* 6| | 256| | t |
* | | | | i |
* | rx0 | row | tx | l |
* | | | | i |
* | | 767|___________________| t |
* 517|____________| 768| | y |
* row 518| | | rx1 | |
* | | 1023|___________________|___|
* | |
* | tx |
* | |
* | |
* 1535|____________|
* 1536| |
* | rx1 |
* 2047|____________|
*
*/
/* total 4096 connections */
he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS)
{
hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
return -ENODEV;
}
if (nvpibits != -1)
{
he_dev->vpibits = nvpibits;
he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
}
if (nvcibits != -1)
{
he_dev->vcibits = nvcibits;
he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
}
if (he_is622(he_dev))
{
he_dev->cells_per_row = 40;
he_dev->bytes_per_row = 2048;
he_dev->r0_numrows = 256;
he_dev->tx_numrows = 512;
he_dev->r1_numrows = 256;
he_dev->r0_startrow = 0;
he_dev->tx_startrow = 256;
he_dev->r1_startrow = 768;
}
else
{
he_dev->cells_per_row = 20;
he_dev->bytes_per_row = 1024;
he_dev->r0_numrows = 512;
he_dev->tx_numrows = 1018;
he_dev->r1_numrows = 512;
he_dev->r0_startrow = 6;
he_dev->tx_startrow = 518;
he_dev->r1_startrow = 1536;
}
he_dev->cells_per_lbuf = 4;
he_dev->buffer_limit = 4;
he_dev->r0_numbuffs = he_dev->r0_numrows *
he_dev->cells_per_row / he_dev->cells_per_lbuf;
if (he_dev->r0_numbuffs > 2560) he_dev->r0_numbuffs = 2560;
he_dev->r1_numbuffs = he_dev->r1_numrows *
he_dev->cells_per_row / he_dev->cells_per_lbuf;
if (he_dev->r1_numbuffs > 2560) he_dev->r1_numbuffs = 2560;
he_dev->tx_numbuffs = he_dev->tx_numrows *
he_dev->cells_per_row / he_dev->cells_per_lbuf;
if (he_dev->tx_numbuffs > 5120) he_dev->tx_numbuffs = 5120;
/* 5.1.2 configure hardware dependent registers */
he_writel(he_dev,
SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
LBARB);
he_writel(he_dev, BANK_ON |
(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
SDRAMCON);
he_writel(he_dev,
(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
RM_RW_WAIT(1), RCMCONFIG);
he_writel(he_dev,
(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
TM_RW_WAIT(1), TCMCONFIG);
he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
he_writel(he_dev,
(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
RX_VALVP(he_dev->vpibits) |
RX_VALVC(he_dev->vcibits), RC_CONFIG);
he_writel(he_dev, DRF_THRESH(0x20) |
(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
TX_VCI_MASK(he_dev->vcibits) |
LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
he_writel(he_dev, 0x0, TXAAL5_PROTO);
he_writel(he_dev, PHY_INT_ENB |
(he_is622(he_dev) ? PTMR_PRE(67-1) : PTMR_PRE(50-1)),
RH_CONFIG);
/* 5.1.3 initialize connection memory */
for(i=0; i < TCM_MEM_SIZE; ++i)
he_writel_tcm(he_dev, 0, i);
for(i=0; i < RCM_MEM_SIZE; ++i)
he_writel_rcm(he_dev, 0, i);
/*
* transmit connection memory map
*
* tx memory
* 0x0 ___________________
* | |
* | |
* | TSRa |
* | |
* | |
* 0x8000|___________________|
* | |
* | TSRb |
* 0xc000|___________________|
* | |
* | TSRc |
* 0xe000|___________________|
* | TSRd |
* 0xf000|___________________|
* | tmABR |
* 0x10000|___________________|
* | |
* | tmTPD |
* |___________________|
* | |
* ....
* 0x1ffff|___________________|
*
*
*/
he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
/*
* receive connection memory map
*
* 0x0 ___________________
* | |
* | |
* | RSRa |
* | |
* | |
* 0x8000|___________________|
* | |
* | rx0/1 |
* | LBM | link lists of local
* | tx | buffer memory
* | |
* 0xd000|___________________|
* | |
* | rmABR |
* 0xe000|___________________|
* | |
* | RSRb |
* |___________________|
* | |
* ....
* 0xffff|___________________|
*/
he_writel(he_dev, 0x08000, RCMLBM_BA);
he_writel(he_dev, 0x0e000, RCMRSRB_BA);
he_writel(he_dev, 0x0d800, RCMABR_BA);
/* 5.1.4 initialize local buffer free pools linked lists */
he_init_rx_lbfp0(he_dev);
he_init_rx_lbfp1(he_dev);
he_writel(he_dev, 0x0, RLBC_H);
he_writel(he_dev, 0x0, RLBC_T);
he_writel(he_dev, 0x0, RLBC_H2);
he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
he_init_tx_lbfp(he_dev);
he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
/* 5.1.5 initialize intermediate receive queues */
if (he_is622(he_dev))
{
he_writel(he_dev, 0x000f, G0_INMQ_S);
he_writel(he_dev, 0x200f, G0_INMQ_L);
he_writel(he_dev, 0x001f, G1_INMQ_S);
he_writel(he_dev, 0x201f, G1_INMQ_L);
he_writel(he_dev, 0x002f, G2_INMQ_S);
he_writel(he_dev, 0x202f, G2_INMQ_L);
he_writel(he_dev, 0x003f, G3_INMQ_S);
he_writel(he_dev, 0x203f, G3_INMQ_L);
he_writel(he_dev, 0x004f, G4_INMQ_S);
he_writel(he_dev, 0x204f, G4_INMQ_L);
he_writel(he_dev, 0x005f, G5_INMQ_S);
he_writel(he_dev, 0x205f, G5_INMQ_L);
he_writel(he_dev, 0x006f, G6_INMQ_S);
he_writel(he_dev, 0x206f, G6_INMQ_L);
he_writel(he_dev, 0x007f, G7_INMQ_S);
he_writel(he_dev, 0x207f, G7_INMQ_L);
}
else
{
he_writel(he_dev, 0x0000, G0_INMQ_S);
he_writel(he_dev, 0x0008, G0_INMQ_L);
he_writel(he_dev, 0x0001, G1_INMQ_S);
he_writel(he_dev, 0x0009, G1_INMQ_L);
he_writel(he_dev, 0x0002, G2_INMQ_S);
he_writel(he_dev, 0x000a, G2_INMQ_L);
he_writel(he_dev, 0x0003, G3_INMQ_S);
he_writel(he_dev, 0x000b, G3_INMQ_L);
he_writel(he_dev, 0x0004, G4_INMQ_S);
he_writel(he_dev, 0x000c, G4_INMQ_L);
he_writel(he_dev, 0x0005, G5_INMQ_S);
he_writel(he_dev, 0x000d, G5_INMQ_L);
he_writel(he_dev, 0x0006, G6_INMQ_S);
he_writel(he_dev, 0x000e, G6_INMQ_L);
he_writel(he_dev, 0x0007, G7_INMQ_S);
he_writel(he_dev, 0x000f, G7_INMQ_L);
}
/* 5.1.6 application tunable parameters */
he_writel(he_dev, 0x0, MCC);
he_writel(he_dev, 0x0, OEC);
he_writel(he_dev, 0x0, DCC);
he_writel(he_dev, 0x0, CEC);
/* 5.1.7 cs block initialization */
he_init_cs_block(he_dev);
/* 5.1.8 cs block connection memory initialization */
he_init_cs_block_rcm(he_dev);
/* 5.1.10 initialize host structures */
he_init_tpdrq(he_dev);
#ifdef USE_TPD_POOL
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,44)
he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
sizeof(struct he_tpd), TPD_ALIGNMENT, 0, SLAB_KERNEL);
#else
he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
#endif
if (he_dev->tpd_pool == NULL)
{
hprintk1("unable to create tpd pci_pool\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&he_dev->outstanding_tpds);
#else
he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
if (!he_dev->tpd_base)
return -ENOMEM;
for(i = 0; i < CONFIG_NUMTPDS; ++i)
{
he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
he_dev->tpd_base[i].inuse = 0;
}
he_dev->tpd_head = he_dev->tpd_base;
he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS-1];
#endif
if (he_init_group(he_dev, 0) != 0)
return -ENOMEM;
for (group = 1; group < HE_NUM_GROUPS; ++group)
{
he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
G0_RBPS_BS + (group * 32));
he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
G0_RBPL_QI + (group * 32));
he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
G0_RBRQ_Q + (group * 16));
he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
he_writel(he_dev, TBRQ_THRESH(0x1),
G0_TBRQ_THRESH + (group * 16));
he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
}
/* host status page */
he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
sizeof(struct he_hsp), &he_dev->hsp_phys);
if (he_dev->hsp == NULL)
{
hprintk1("failed to allocate host status page\n");
return -ENOMEM;
}
memset(he_dev->hsp, 0, sizeof(struct he_hsp));
he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
/* initialize framer */
#ifdef CONFIG_ATM_HE_USE_SUNI
suni_init(he_dev->atm_dev);
if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
he_dev->atm_dev->phy->start(he_dev->atm_dev);
#endif /* CONFIG_ATM_HE_USE_SUNI */
if (sdh)
{
/* this really should be in suni.c but for now... */
int val;
val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
val = (val & ~SUNI_TPOP_APM_S) | ( 0x2 << SUNI_TPOP_APM_S_SHIFT);
he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
}
/* 5.1.12 enable transmit and receive */
reg = he_readl_mbox(he_dev, CS_ERCTL0);
reg |= TX_ENABLE|ER_ENABLE;
he_writel_mbox(he_dev, reg, CS_ERCTL0);
reg = he_readl(he_dev, RC_CONFIG);
reg |= RX_ENABLE;
he_writel(he_dev, reg, RC_CONFIG);
#ifndef USE_HE_FIND_VCC
he_dev->he_vcc_table = kmalloc(sizeof(struct he_vcc_table) *
(1 << (he_dev->vcibits + he_dev->vpibits)), GFP_KERNEL);
if (he_dev->he_vcc_table == NULL)
{
hprintk1("failed to alloc he_vcc_table\n");
return -ENOMEM;
}
memset(he_dev->he_vcc_table, 0, sizeof(struct he_vcc_table) *
(1 << (he_dev->vcibits + he_dev->vpibits)));
#endif
for (i = 0; i < HE_NUM_CS_STPER; ++i)
{
he_dev->cs_stper[i].inuse = 0;
he_dev->cs_stper[i].pcr = -1;
}
he_dev->total_bw = 0;
/* atm linux initialization */
he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
he_dev->irq_peak = 0;
he_dev->rbrq_peak = 0;
he_dev->rbpl_peak = 0;
he_dev->tbrq_peak = 0;
HPRINTK("hell bent for leather!\n");
return 0;
}
static void
he_stop(struct he_dev *he_dev)
{
u16 command;
u32 gen_cntl_0, reg;
struct pci_dev *pci_dev;
pci_dev = he_dev->pci_dev;
/* disable interrupts */
if (he_dev->membase)
{
pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
#ifdef USE_TASKLET
tasklet_disable(&he_dev->tasklet);
#endif
/* disable recv and transmit */
reg = he_readl_mbox(he_dev, CS_ERCTL0);
reg &= ~(TX_ENABLE|ER_ENABLE);
he_writel_mbox(he_dev, reg, CS_ERCTL0);
reg = he_readl(he_dev, RC_CONFIG);
reg &= ~(RX_ENABLE);
he_writel(he_dev, reg, RC_CONFIG);
}
#ifdef CONFIG_ATM_HE_USE_SUNI
if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
he_dev->atm_dev->phy->stop(he_dev->atm_dev);
#endif /* CONFIG_ATM_HE_USE_SUNI */
if (he_dev->irq)
{
#ifdef BUS_INT_WAR
sn_delete_polled_interrupt(he_dev->irq);
#endif
free_irq(he_dev->irq, he_dev);
}
if (he_dev->irq_base)
pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
if (he_dev->hsp)
pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
he_dev->hsp, he_dev->hsp_phys);
if (he_dev->rbpl_base)
{
#ifdef USE_RBPL_POOL
for (i=0; i<CONFIG_RBPL_SIZE; ++i)
{
void *cpuaddr = he_dev->rbpl_virt[i].virt;
dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
}
#else
pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
* CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
#endif
pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
}
#ifdef USE_RBPL_POOL
if (he_dev->rbpl_pool)
pci_pool_destroy(he_dev->rbpl_pool);
#endif
#ifdef USE_RBPS
if (he_dev->rbps_base)
{
#ifdef USE_RBPS_POOL
for (i=0; i<CONFIG_RBPS_SIZE; ++i)
{
void *cpuaddr = he_dev->rbps_virt[i].virt;
dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
}
#else
pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
* CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
#endif
pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
* sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
}
#ifdef USE_RBPS_POOL
if (he_dev->rbps_pool)
pci_pool_destroy(he_dev->rbps_pool);
#endif
#endif /* USE_RBPS */
if (he_dev->rbrq_base)
pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
he_dev->rbrq_base, he_dev->rbrq_phys);
if (he_dev->tbrq_base)
pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
he_dev->tbrq_base, he_dev->tbrq_phys);
if (he_dev->tpdrq_base)
pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
he_dev->tpdrq_base, he_dev->tpdrq_phys);
#ifdef USE_TPD_POOL
if (he_dev->tpd_pool)
pci_pool_destroy(he_dev->tpd_pool);
#else
if (he_dev->tpd_base)
pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
he_dev->tpd_base, he_dev->tpd_base_phys);
#endif
#ifndef USE_HE_FIND_VCC
if (he_dev->he_vcc_table)
kfree(he_dev->he_vcc_table);
#endif
if (he_dev->pci_dev)
{
pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
}
if (he_dev->membase) iounmap((void *) he_dev->membase);
}
static struct he_tpd *
__alloc_tpd(struct he_dev *he_dev)
{
#ifdef USE_TPD_POOL
struct he_tpd *tpd;
dma_addr_t dma_handle;
tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);
if (tpd == NULL)
return NULL;
tpd->status = TPD_ADDR(dma_handle);
tpd->reserved = 0;
tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
return tpd;
#else
int i;
for(i = 0; i < CONFIG_NUMTPDS; ++i)
{
++he_dev->tpd_head;
if (he_dev->tpd_head > he_dev->tpd_end) {
he_dev->tpd_head = he_dev->tpd_base;
}
if (!he_dev->tpd_head->inuse) {
he_dev->tpd_head->inuse = 1;
he_dev->tpd_head->status &= TPD_MASK;
he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
return he_dev->tpd_head;
}
}
hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
return NULL;
#endif
}
#define AAL5_LEN(buf,len) \
((((unsigned char *)(buf))[(len)-6]<<8) | \
(((unsigned char *)(buf))[(len)-5]))
/* 2.10.1.2 receive
*
* aal5 packets can optionally return the tcp checksum in the lower
* 16 bits of the crc (RSR0_TCP_CKSUM)
*/
#define TCP_CKSUM(buf,len) \
((((unsigned char *)(buf))[(len)-2]<<8) | \
(((unsigned char *)(buf))[(len-1)]))
static int
he_service_rbrq(struct he_dev *he_dev, int group)
{
struct he_rbrq *rbrq_tail = (struct he_rbrq *)
((unsigned long)he_dev->rbrq_base |
he_dev->hsp->group[group].rbrq_tail);
struct he_rbp *rbp = NULL;
unsigned cid, lastcid = -1;
unsigned buf_len = 0;
struct sk_buff *skb;
struct atm_vcc *vcc = NULL;
struct he_vcc *he_vcc;
struct iovec *iov;
int pdus_assembled = 0;
int updated = 0;
while (he_dev->rbrq_head != rbrq_tail)
{
++updated;
HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
he_dev->rbrq_head, group,
RBRQ_ADDR(he_dev->rbrq_head),
RBRQ_BUFLEN(he_dev->rbrq_head),
RBRQ_CID(he_dev->rbrq_head),
RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
#ifdef USE_RBPS
if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
else
#endif
rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
cid = RBRQ_CID(he_dev->rbrq_head);
#ifdef USE_HE_FIND_VCC
if (cid != lastcid)
vcc = he_find_vcc(he_dev, cid);
lastcid = cid;
#else
vcc = HE_LOOKUP_VCC(he_dev, cid);
#endif
if (vcc == NULL)
{
hprintk("vcc == NULL (cid 0x%x)\n", cid);
if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
rbp->status &= ~RBP_LOANED;
goto next_rbrq_entry;
}
he_vcc = HE_VCC(vcc);
if (he_vcc == NULL)
{
hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
rbp->status &= ~RBP_LOANED;
goto next_rbrq_entry;
}
if (RBRQ_HBUF_ERR(he_dev->rbrq_head))
{
hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++vcc->stats->rx_drop;
#else
atomic_inc(&vcc->stats->rx_drop);
#endif
goto return_host_buffers;
}
he_vcc->iov_tail->iov_base = (void *) RBRQ_ADDR(he_dev->rbrq_head);
he_vcc->iov_tail->iov_len = buf_len;
he_vcc->pdu_len += buf_len;
++he_vcc->iov_tail;
if (RBRQ_CON_CLOSED(he_dev->rbrq_head))
{
lastcid = -1;
HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
wake_up(&he_vcc->rx_waitq);
goto return_host_buffers;
}
#ifdef notdef
if (he_vcc->iov_tail - he_vcc->iov_head > 32)
{
hprintk("iovec full! cid 0x%x\n", cid);
goto return_host_buffers;
}
#endif
if (!RBRQ_END_PDU(he_dev->rbrq_head)) goto next_rbrq_entry;
if (RBRQ_LEN_ERR(he_dev->rbrq_head)
|| RBRQ_CRC_ERR(he_dev->rbrq_head))
{
HPRINTK("%s%s (%d.%d)\n",
RBRQ_CRC_ERR(he_dev->rbrq_head)
? "CRC_ERR " : "",
RBRQ_LEN_ERR(he_dev->rbrq_head)
? "LEN_ERR" : "",
vcc->vpi, vcc->vci);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++vcc->stats->rx_err;
#else
atomic_inc(&vcc->stats->rx_err);
#endif
goto return_host_buffers;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,15)
skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
GFP_ATOMIC);
#else
if (!atm_charge(vcc, atm_pdu2truesize(he_vcc->pdu_len + rx_skb_reserve)))
skb = NULL;
else
{
skb = alloc_skb(he_vcc->pdu_len + rx_skb_reserve, GFP_ATOMIC);
if (!skb) atm_return(vcc,
atm_pdu2truesize(he_vcc->pdu_len + rx_skb_reserve));
}
#endif
if (!skb)
{
HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
goto return_host_buffers;
}
if (rx_skb_reserve > 0) skb_reserve(skb, rx_skb_reserve);
do_gettimeofday(&skb->stamp);
for(iov = he_vcc->iov_head;
iov < he_vcc->iov_tail; ++iov)
{
#ifdef USE_RBPS
if ((u32)iov->iov_base & RBP_SMALLBUF)
memcpy(skb_put(skb, iov->iov_len),
he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
else
#endif
memcpy(skb_put(skb, iov->iov_len),
he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
}
switch(vcc->qos.aal)
{
case ATM_AAL0:
/* 2.10.1.5 raw cell receive */
skb->len = ATM_AAL0_SDU;
skb->tail = skb->data + skb->len;
break;
case ATM_AAL5:
/* 2.10.1.2 aal5 receive */
skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
skb->tail = skb->data + skb->len;
#ifdef USE_CHECKSUM_HW
if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI)
{
skb->ip_summed = CHECKSUM_HW;
skb->csum = TCP_CKSUM(skb->data,
he_vcc->pdu_len);
}
#endif
break;
}
#ifdef should_never_happen
if (skb->len > vcc->qos.rxtp.max_sdu)
hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
#endif
#ifdef notdef
ATM_SKB(skb)->vcc = vcc;
#endif
vcc->push(vcc, skb);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++vcc->stats->rx;
#else
atomic_inc(&vcc->stats->rx);
#endif
return_host_buffers:
++pdus_assembled;
for(iov = he_vcc->iov_head;
iov < he_vcc->iov_tail; ++iov)
{
#ifdef USE_RBPS
if ((u32)iov->iov_base & RBP_SMALLBUF)
rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
else
#endif
rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
rbp->status &= ~RBP_LOANED;
}
he_vcc->iov_tail = he_vcc->iov_head;
he_vcc->pdu_len = 0;
next_rbrq_entry:
he_dev->rbrq_head = (struct he_rbrq *)
((unsigned long) he_dev->rbrq_base |
RBRQ_MASK(++he_dev->rbrq_head));
}
if (updated)
{
if (updated > he_dev->rbrq_peak) he_dev->rbrq_peak = updated;
he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
G0_RBRQ_H + (group * 16));
#ifdef CONFIG_IA64_SGI_SN2
(void) he_readl(he_dev, G0_RBRQ_H + (group * 16));
#endif
}
return pdus_assembled;
}
static void
he_service_tbrq(struct he_dev *he_dev, int group)
{
struct he_tbrq *tbrq_tail = (struct he_tbrq *)
((unsigned long)he_dev->tbrq_base |
he_dev->hsp->group[group].tbrq_tail);
struct he_tpd *tpd;
int slot, updated = 0;
#ifdef USE_TPD_POOL
struct list_head *p;
#endif
/* 2.1.6 transmit buffer return queue */
while (he_dev->tbrq_head != tbrq_tail)
{
++updated;
HPRINTK("tbrq%d 0x%x%s%s\n",
group,
TBRQ_TPD(he_dev->tbrq_head),
TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
#ifdef USE_TPD_POOL
tpd = NULL;
p = &he_dev->outstanding_tpds;
while ((p = p->next) != &he_dev->outstanding_tpds)
{
struct he_tpd *__tpd = list_entry(p, struct he_tpd, entry);
if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head))
{
tpd = __tpd;
list_del(&__tpd->entry);
break;
}
}
if (tpd == NULL)
{
hprintk("unable to locate tpd for dma buffer %x\n",
TBRQ_TPD(he_dev->tbrq_head));
goto next_tbrq_entry;
}
#else
tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
#endif
if (TBRQ_EOS(he_dev->tbrq_head))
{
HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
if (tpd->vcc)
wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
goto next_tbrq_entry;
}
for(slot = 0; slot < TPD_MAXIOV; ++slot)
{
if (tpd->iovec[slot].addr)
pci_unmap_single(he_dev->pci_dev,
tpd->iovec[slot].addr,
tpd->iovec[slot].len & TPD_LEN_MASK,
PCI_DMA_TODEVICE);
if (tpd->iovec[slot].len & TPD_LST) break;
}
if (tpd->skb) /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
{
if (tpd->vcc && tpd->vcc->pop)
tpd->vcc->pop(tpd->vcc, tpd->skb);
else
dev_kfree_skb_any(tpd->skb);
}
next_tbrq_entry:
#ifdef USE_TPD_POOL
if (tpd) pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
#else
tpd->inuse = 0;
#endif
he_dev->tbrq_head = (struct he_tbrq *)
((unsigned long) he_dev->tbrq_base |
TBRQ_MASK(++he_dev->tbrq_head));
}
if (updated)
{
if (updated > he_dev->tbrq_peak) he_dev->tbrq_peak = updated;
he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
G0_TBRQ_H + (group * 16));
#ifdef CONFIG_IA64_SGI_SN2
(void) he_readl(he_dev, G0_TBRQ_H + (group * 16));
#endif
}
}
static void
he_service_rbpl(struct he_dev *he_dev, int group)
{
struct he_rbp *newtail;
struct he_rbp *rbpl_head;
int moved = 0;
rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
for(;;)
{
newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
RBPL_MASK(he_dev->rbpl_tail+1));
/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
break;
newtail->status |= RBP_LOANED;
he_dev->rbpl_tail = newtail;
++moved;
}
if (moved) {
he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
#ifdef CONFIG_IA64_SGI_SN2
(void) he_readl(he_dev, G0_RBPL_T);
#endif
}
}
#ifdef USE_RBPS
static void
he_service_rbps(struct he_dev *he_dev, int group)
{
struct he_rbp *newtail;
struct he_rbp *rbps_head;
int moved = 0;
rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
for(;;)
{
newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
RBPS_MASK(he_dev->rbps_tail+1));
/* table 3.42 -- rbps_tail should never be set to rbps_head */
if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
break;
newtail->status |= RBP_LOANED;
he_dev->rbps_tail = newtail;
++moved;
}
if (moved) {
he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
#ifdef CONFIG_IA64_SGI_SN2
(void) he_readl(he_dev, G0_RBPS_T);
#endif
}
}
#endif /* USE_RBPS */
static void
he_tasklet(unsigned long data)
{
unsigned long flags;
struct he_dev *he_dev = (struct he_dev *) data;
int group, type;
int updated = 0;
HPRINTK("tasklet (0x%lx)\n", data);
#ifdef USE_TASKLET
HE_SPIN_LOCK(he_dev, flags);
#endif
while(he_dev->irq_head != he_dev->irq_tail)
{
++updated;
type = ITYPE_TYPE(he_dev->irq_head->isw);
group = ITYPE_GROUP(he_dev->irq_head->isw);
switch (type)
{
case ITYPE_RBRQ_THRESH:
hprintk("rbrq%d threshold\n", group);
case ITYPE_RBRQ_TIMER:
if (he_service_rbrq(he_dev, group))
{
he_service_rbpl(he_dev, group);
#ifdef USE_RBPS
he_service_rbps(he_dev, group);
#endif /* USE_RBPS */
}
break;
case ITYPE_TBRQ_THRESH:
hprintk("tbrq%d threshold\n", group);
case ITYPE_TPD_COMPLETE:
he_service_tbrq(he_dev, group);
break;
case ITYPE_RBPL_THRESH:
he_service_rbpl(he_dev, group);
break;
case ITYPE_RBPS_THRESH:
#ifdef USE_RBPS
he_service_rbps(he_dev, group);
#endif /* USE_RBPS */
break;
case ITYPE_PHY:
#ifdef CONFIG_ATM_HE_USE_SUNI
HE_SPIN_UNLOCK(he_dev, flags);
if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
HE_SPIN_LOCK(he_dev, flags);
#endif
HPRINTK1("phy interrupt\n");
break;
case ITYPE_OTHER:
switch (type|group)
{
case ITYPE_PARITY:
hprintk1("parity error\n");
break;
case ITYPE_ABORT:
hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
break;
}
break;
default:
if (he_dev->irq_head->isw == ITYPE_INVALID)
{
/* see 8.1.1 -- check all queues */
HPRINTK("isw not updated 0x%x\n",
he_dev->irq_head->isw);
he_service_rbrq(he_dev, 0);
he_service_rbpl(he_dev, 0);
#ifdef USE_RBPS
he_service_rbps(he_dev, 0);
#endif /* USE_RBPS */
he_service_tbrq(he_dev, 0);
}
else
hprintk("bad isw = 0x%x?\n",
he_dev->irq_head->isw);
}
he_dev->irq_head->isw = ITYPE_INVALID;
he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
}
if (updated)
{
if (updated > he_dev->irq_peak) he_dev->irq_peak = updated;
he_writel(he_dev,
IRQ_SIZE(CONFIG_IRQ_SIZE) |
IRQ_THRESH(CONFIG_IRQ_THRESH) |
IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
}
#ifdef USE_TASKLET
HE_SPIN_UNLOCK(he_dev, flags);
#endif
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,69)
static irqreturn_t
#else
static void
#endif
he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
struct he_dev *he_dev = (struct he_dev * )dev_id;
int handled = 0;
if (he_dev == NULL)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,69)
return IRQ_NONE;
#else
return;
#endif
HE_SPIN_LOCK(he_dev, flags);
he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
(*he_dev->irq_tailoffset << 2));
if (he_dev->irq_tail == he_dev->irq_head)
{
HPRINTK1("tailoffset not updated?\n");
he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
}
#ifdef DEBUG
if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
hprintk1("spurious (or shared) interrupt?\n");
#endif
if (he_dev->irq_head != he_dev->irq_tail)
{
handled = 1;
#ifdef USE_TASKLET
tasklet_schedule(&he_dev->tasklet);
#else
he_tasklet((unsigned long) he_dev);
#endif
he_writel(he_dev, INT_CLEAR_A, INT_FIFO);
/* clear interrupt */
#ifdef CONFIG_IA64_SGI_SN2
(void) he_readl(he_dev, INT_FIFO);
#endif
}
HE_SPIN_UNLOCK(he_dev, flags);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,69)
return IRQ_RETVAL(handled);
#else
return;
#endif
}
static __inline__ void
__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
{
struct he_tpdrq *new_tail;
HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
tpd, cid, he_dev->tpdrq_tail);
/* new_tail = he_dev->tpdrq_tail; */
new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
TPDRQ_MASK(he_dev->tpdrq_tail+1));
/*
* check to see if we are about to set the tail == head
* if true, update the head pointer from the adapter
* to see if this is really the case (reading the queue
* head for every enqueue would be unnecessarily slow)
*/
if (new_tail == he_dev->tpdrq_head)
{
he_dev->tpdrq_head = (struct he_tpdrq *)
(((unsigned long)he_dev->tpdrq_base) |
TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
if (new_tail == he_dev->tpdrq_head)
{
hprintk("tpdrq full (cid 0x%x)\n", cid);
/*
* FIXME
* push tpd onto a transmit backlog queue
* after service_tbrq, service the backlog
* for now, we just drop the pdu
*/
if (tpd->skb)
{
if (tpd->vcc->pop)
tpd->vcc->pop(tpd->vcc, tpd->skb);
else
dev_kfree_skb_any(tpd->skb);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++tpd->vcc->stats->tx_err;
#else
atomic_inc(&tpd->vcc->stats->tx_err);
#endif
}
#ifdef USE_TPD_POOL
pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
#else
tpd->inuse = 0;
#endif
return;
}
}
/* 2.1.5 transmit packet descriptor ready queue */
#ifdef USE_TPD_POOL
list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
#else
he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
(TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
#endif
he_dev->tpdrq_tail->cid = cid;
wmb();
he_dev->tpdrq_tail = new_tail;
he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
#ifdef CONFIG_IA64_SGI_SN2
(void) he_readl(he_dev, TPDRQ_T);
#endif
}
static int
he_open(struct atm_vcc *vcc, short vpi, int vci)
{
unsigned long flags;
struct he_dev *he_dev = HE_DEV(vcc->dev);
struct he_vcc *he_vcc;
int err = 0;
unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
if ((err = atm_find_ci(vcc, &vpi, &vci)))
{
HPRINTK("atm_find_ci err = %d\n", err);
return err;
}
if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC) return 0;
vcc->vpi = vpi;
vcc->vci = vci;
HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
vcc->flags |= ATM_VF_ADDR;
#else
set_bit(ATM_VF_ADDR, &vcc->flags);
#endif
cid = he_mkcid(he_dev, vpi, vci);
he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
if (he_vcc == NULL)
{
hprintk1("unable to allocate he_vcc during open\n");
return -ENOMEM;
}
he_vcc->iov_tail = he_vcc->iov_head;
he_vcc->pdu_len = 0;
he_vcc->rc_index = -1;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
init_waitqueue(&he_vcc->rx_waitq);
init_waitqueue(&he_vcc->tx_waitq);
#else
init_waitqueue_head(&he_vcc->rx_waitq);
init_waitqueue_head(&he_vcc->tx_waitq);
#endif
HE_VCC(vcc) = he_vcc;
if (vcc->qos.txtp.traffic_class != ATM_NONE)
{
int pcr_goal;
pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
if (pcr_goal == 0)
pcr_goal = he_dev->atm_dev->link_rate;
if (pcr_goal < 0) /* means round down, technically */
pcr_goal = -pcr_goal;
HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
switch (vcc->qos.aal)
{
case ATM_AAL5:
tsr0_aal = TSR0_AAL5;
tsr4 = TSR4_AAL5;
break;
case ATM_AAL0:
tsr0_aal = TSR0_AAL0_SDU;
tsr4 = TSR4_AAL0_SDU;
break;
default:
err = -EINVAL;
goto open_failed;
}
HE_SPIN_LOCK(he_dev, flags);
tsr0 = he_readl_tsr0(he_dev, cid);
HE_SPIN_UNLOCK(he_dev, flags);
if (TSR0_CONN_STATE(tsr0) != 0)
{
hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
err = -EBUSY;
goto open_failed;
}
switch(vcc->qos.txtp.traffic_class)
{
case ATM_UBR:
/* 2.3.3.1 open connection ubr */
tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
TSR0_USE_WMIN | TSR0_UPDATE_GER;
break;
case ATM_CBR:
/* 2.3.3.2 open connection cbr */
/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
if ((he_dev->total_bw + pcr_goal)
> (he_dev->atm_dev->link_rate * 9 / 10))
{
err = -EBUSY;
goto open_failed;
}
HE_SPIN_LOCK(he_dev, flags); /* also protects he_dev->cs_stper[] */
/* find an unused cs_stper register */
for(reg = 0; reg < HE_NUM_CS_STPER; ++reg)
if (he_dev->cs_stper[reg].inuse == 0 ||
he_dev->cs_stper[reg].pcr == pcr_goal)
break;
if (reg == HE_NUM_CS_STPER)
{
err = -EBUSY;
HE_SPIN_UNLOCK(he_dev, flags);
goto open_failed;
}
he_dev->total_bw += pcr_goal;
he_vcc->rc_index = reg;
++he_dev->cs_stper[reg].inuse;
he_dev->cs_stper[reg].pcr = pcr_goal;
clock = he_is622(he_dev) ? 66667000 : 50000000;
period = clock / pcr_goal;
HPRINTK("rc_index = %d period = %d\n",
reg, period);
he_writel_mbox(he_dev, rate_to_atmf(period/2),
CS_STPER0 + reg);
HE_SPIN_UNLOCK(he_dev, flags);
tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
TSR0_RC_INDEX(reg);
break;
default:
err = -EINVAL;
goto open_failed;
}
HE_SPIN_LOCK(he_dev, flags);
he_writel_tsr0(he_dev, tsr0, cid);
he_writel_tsr4(he_dev, tsr4 | 1, cid);
he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
he_writel_tsr3(he_dev, 0x0, cid);
he_writel_tsr5(he_dev, 0x0, cid);
he_writel_tsr6(he_dev, 0x0, cid);
he_writel_tsr7(he_dev, 0x0, cid);
he_writel_tsr8(he_dev, 0x0, cid);
he_writel_tsr10(he_dev, 0x0, cid);
he_writel_tsr11(he_dev, 0x0, cid);
he_writel_tsr12(he_dev, 0x0, cid);
he_writel_tsr13(he_dev, 0x0, cid);
he_writel_tsr14(he_dev, 0x0, cid);
#ifdef CONFIG_IA64_SGI_SN2
(void) he_readl_tsr0(he_dev, cid);
#endif
HE_SPIN_UNLOCK(he_dev, flags);
}
if (vcc->qos.rxtp.traffic_class != ATM_NONE)
{
unsigned aal;
HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
&HE_VCC(vcc)->rx_waitq);
switch (vcc->qos.aal)
{
case ATM_AAL5:
aal = RSR0_AAL5;
break;
case ATM_AAL0:
aal = RSR0_RAWCELL;
break;
default:
err = -EINVAL;
goto open_failed;
}
HE_SPIN_LOCK(he_dev, flags);
rsr0 = he_readl_rsr0(he_dev, cid);
if (rsr0 & RSR0_OPEN_CONN)
{
HE_SPIN_UNLOCK(he_dev, flags);
hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
err = -EBUSY;
goto open_failed;
}
#ifdef USE_RBPS
rsr1 = RSR1_GROUP(0);
rsr4 = RSR4_GROUP(0);
#else /* !USE_RBPS */
rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
#endif /* USE_RBPS */
rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
#ifdef USE_CHECKSUM_HW
if (vpi == 0 && vci >= ATM_NOT_RSV_VCI) rsr0 |= RSR0_TCP_CKSUM;
#endif
he_writel_rsr4(he_dev, rsr4, cid);
he_writel_rsr1(he_dev, rsr1, cid);
/* 5.1.11 last parameter initialized should be
the open/closed indication in rsr0 */
he_writel_rsr0(he_dev,
rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
#ifdef CONFIG_IA64_SGI_SN2
(void) he_readl_rsr0(he_dev, cid);
#endif
HE_SPIN_UNLOCK(he_dev, flags);
#ifndef USE_HE_FIND_VCC
HE_LOOKUP_VCC(he_dev, cid) = vcc;
#endif
}
open_failed:
if (err)
{
if (he_vcc) kfree(he_vcc);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
vcc->flags &= ~ATM_VF_ADDR;
#else
clear_bit(ATM_VF_ADDR, &vcc->flags);
#endif
}
else
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
vcc->flags |= ATM_VF_READY;
#else
set_bit(ATM_VF_READY, &vcc->flags);
#endif
}
return err;
}
static void
he_close(struct atm_vcc *vcc)
{
unsigned long flags;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,1)
DECLARE_WAITQUEUE(wait, current);
#else
struct wait_queue wait = { current, NULL };
#endif
struct he_dev *he_dev = HE_DEV(vcc->dev);
struct he_tpd *tpd;
unsigned cid;
struct he_vcc *he_vcc = HE_VCC(vcc);
#define MAX_RETRY 30
int retry = 0, sleep = 1, tx_inuse;
HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
vcc->flags &= ~ATM_VF_READY;
#else
clear_bit(ATM_VF_READY, &vcc->flags);
#endif
cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
if (vcc->qos.rxtp.traffic_class != ATM_NONE)
{
int timeout;
HPRINTK("close rx cid 0x%x\n", cid);
/* 2.7.2.2 close receive operation */
/* wait for previous close (if any) to finish */
HE_SPIN_LOCK(he_dev, flags);
while(he_readl(he_dev, RCC_STAT) & RCC_BUSY)
{
HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
udelay(250);
}
add_wait_queue(&he_vcc->rx_waitq, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
#ifdef CONFIG_IA64_SGI_SN2
(void) he_readl_rsr0(he_dev, cid);
#endif
he_writel_mbox(he_dev, cid, RXCON_CLOSE);
HE_SPIN_UNLOCK(he_dev, flags);
timeout = schedule_timeout(30*HZ);
remove_wait_queue(&he_vcc->rx_waitq, &wait);
set_current_state(TASK_RUNNING);
if (timeout == 0)
hprintk("close rx timeout cid 0x%x\n", cid);
#ifndef USE_HE_FIND_VCC
HE_LOOKUP_VCC(he_dev, cid) = NULL;
#endif
HPRINTK("close rx cid 0x%x complete\n", cid);
}
if (vcc->qos.txtp.traffic_class != ATM_NONE)
{
volatile unsigned tsr4, tsr0;
int timeout;
HPRINTK("close tx cid 0x%x\n", cid);
/* 2.1.2
*
* ... the host must first stop queueing packets to the TPDRQ
* on the connection to be closed, then wait for all outstanding
* packets to be transmitted and their buffers returned to the
* TBRQ. When the last packet on the connection arrives in the
* TBRQ, the host issues the close command to the adapter.
*/
while (((tx_inuse = atomic_read(&vcc->sk->wmem_alloc)) > 0)
&& (retry < MAX_RETRY))
{
set_current_state(TASK_UNINTERRUPTIBLE);
(void) schedule_timeout(sleep);
set_current_state(TASK_RUNNING);
if (sleep < HZ) sleep = sleep * 2;
++retry;
}
if (tx_inuse) hprintk("close tx cid 0x%x tx_inuse = %d\n",
cid, tx_inuse);
/* 2.3.1.1 generic close operations with flush */
HE_SPIN_LOCK(he_dev, flags);
he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
/* also clears TSR4_SESSION_ENDED */
#ifdef CONFIG_IA64_SGI_SN2
(void) he_readl_tsr4(he_dev, cid);
#endif
switch(vcc->qos.txtp.traffic_class)
{
case ATM_UBR:
he_writel_tsr1(he_dev,
TSR1_MCR(rate_to_atmf(200000))
| TSR1_PCR(0), cid);
break;
case ATM_CBR:
he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
break;
}
tpd = __alloc_tpd(he_dev);
if (tpd == NULL)
{
hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
goto close_tx_incomplete;
}
tpd->status |= TPD_EOS | TPD_INT;
tpd->skb = NULL;
tpd->vcc = vcc;
wmb();
add_wait_queue(&he_vcc->tx_waitq, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
__enqueue_tpd(he_dev, tpd, cid);
HE_SPIN_UNLOCK(he_dev, flags);
timeout = schedule_timeout(30*HZ);
remove_wait_queue(&he_vcc->tx_waitq, &wait);
set_current_state(TASK_RUNNING);
if (timeout == 0)
{
hprintk("close tx timeout cid 0x%x\n", cid);
goto close_tx_incomplete;
}
HE_SPIN_LOCK(he_dev, flags);
while (!((tsr4 = he_readl_tsr4(he_dev, cid))
& TSR4_SESSION_ENDED))
{
HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
udelay(250);
}
while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0)
{
HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
udelay(250);
}
close_tx_incomplete:
if (vcc->qos.txtp.traffic_class == ATM_CBR)
{
int reg = he_vcc->rc_index;
HPRINTK("cs_stper reg = %d\n", reg);
if (he_dev->cs_stper[reg].inuse == 0)
hprintk("cs_stper[%d].inuse = 0!\n", reg);
else
--he_dev->cs_stper[reg].inuse;
he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
}
HE_SPIN_UNLOCK(he_dev, flags);
HPRINTK("close tx cid 0x%x complete\n", cid);
}
kfree(he_vcc);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
vcc->flags &= ~ATM_VF_ADDR;
#else
clear_bit(ATM_VF_ADDR, &vcc->flags);
#endif
}
static int
he_sg_send(struct atm_vcc *vcc, unsigned long start, unsigned long size)
{
#ifdef USE_SCATTERGATHER
return 1;
#else
return 0;
#endif
}
static int
he_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
unsigned long flags;
struct he_dev *he_dev = HE_DEV(vcc->dev);
unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
struct he_tpd *tpd;
#ifdef USE_SCATTERGATHER
int i, slot = 0;
#endif
#define HE_TPD_BUFSIZE 0xffff
HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
if ((skb->len > HE_TPD_BUFSIZE) ||
((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU)))
{
hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
if (vcc->pop)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++vcc->stats->tx_err;
#else
atomic_inc(&vcc->stats->tx_err);
#endif
return -EINVAL;
}
#ifndef USE_SCATTERGATHER
if (skb_shinfo(skb)->nr_frags)
{
hprintk1("no scatter/gather support\n");
if (vcc->pop)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++vcc->stats->tx_err;
#else
atomic_inc(&vcc->stats->tx_err);
#endif
return -EINVAL;
}
#endif
HE_SPIN_LOCK(he_dev, flags);
tpd = __alloc_tpd(he_dev);
if (tpd == NULL)
{
if (vcc->pop)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++vcc->stats->tx_err;
#else
atomic_inc(&vcc->stats->tx_err);
#endif
HE_SPIN_UNLOCK(he_dev, flags);
return -ENOMEM;
}
if (vcc->qos.aal == ATM_AAL5)
tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
else
{
char *pti_clp = (void *) (skb->data + 3);
int clp, pti;
pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
clp = (*pti_clp & ATM_HDR_CLP);
tpd->status |= TPD_CELLTYPE(pti);
if (clp) tpd->status |= TPD_CLP;
skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
}
#ifdef USE_SCATTERGATHER
tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
skb->len - skb->data_len, PCI_DMA_TODEVICE);
tpd->iovec[slot].len = skb->len - skb->data_len;
++slot;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (slot == TPD_MAXIOV) /* send tpd; start new tpd */
{
tpd->vcc = vcc;
tpd->skb = NULL; /* not the last fragment
so dont ->push() yet */
wmb();
__enqueue_tpd(he_dev, tpd, cid);
tpd = __alloc_tpd(he_dev);
if (tpd == NULL)
{
if (vcc->pop)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++vcc->stats->tx_err;
#else
atomic_inc(&vcc->stats->tx_err);
HE_SPIN_UNLOCK(he_dev, flags);
#endif
return -ENOMEM;
}
tpd->status |= TPD_USERCELL;
slot = 0;
}
tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
(void *) page_address(frag->page) + frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
tpd->iovec[slot].len = frag->size;
++slot;
}
tpd->iovec[slot-1].len |= TPD_LST;
#else
tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
tpd->length0 = skb->len | TPD_LST;
#endif
tpd->status |= TPD_INT;
tpd->vcc = vcc;
tpd->skb = skb;
wmb();
ATM_SKB(skb)->vcc = vcc;
__enqueue_tpd(he_dev, tpd, cid);
HE_SPIN_UNLOCK(he_dev, flags);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++vcc->stats->tx;
#else
atomic_inc(&vcc->stats->tx);
#endif
return 0;
}
static int
he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void *arg)
{
unsigned long flags;
struct he_dev *he_dev = HE_DEV(atm_dev);
struct he_ioctl_reg reg;
int err = 0;
switch (cmd)
{
case HE_GET_REG:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
copy_from_user(&reg, (struct he_ioctl_reg *) arg,
sizeof(struct he_ioctl_reg));
HE_SPIN_LOCK(he_dev, flags);
switch (reg.type)
{
case HE_REGTYPE_PCI:
reg.val = he_readl(he_dev, reg.addr);
break;
case HE_REGTYPE_RCM:
reg.val =
he_readl_rcm(he_dev, reg.addr);
break;
case HE_REGTYPE_TCM:
reg.val =
he_readl_tcm(he_dev, reg.addr);
break;
case HE_REGTYPE_MBOX:
reg.val =
he_readl_mbox(he_dev, reg.addr);
break;
default:
err = -EINVAL;
break;
}
HE_SPIN_UNLOCK(he_dev, flags);
if (err == 0) copy_to_user((struct he_ioctl_reg *) arg, &reg,
sizeof(struct he_ioctl_reg));
break;
default:
#ifdef CONFIG_ATM_HE_USE_SUNI
if (atm_dev->phy && atm_dev->phy->ioctl)
err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
#else /* CONFIG_ATM_HE_USE_SUNI */
return -EINVAL;
#endif /* CONFIG_ATM_HE_USE_SUNI */
break;
}
return err;
}
static void
he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
{
unsigned long flags;
struct he_dev *he_dev = HE_DEV(atm_dev);
HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
HE_SPIN_LOCK(he_dev, flags);
he_writel(he_dev, val, FRAMER + (addr*4));
#ifdef CONFIG_IA64_SGI_SN2
(void) he_readl(he_dev, FRAMER + (addr*4));
#endif
HE_SPIN_UNLOCK(he_dev, flags);
}
static unsigned char
he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
{
unsigned long flags;
struct he_dev *he_dev = HE_DEV(atm_dev);
unsigned reg;
HE_SPIN_LOCK(he_dev, flags);
reg = he_readl(he_dev, FRAMER + (addr*4));
HE_SPIN_UNLOCK(he_dev, flags);
HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
return reg;
}
static int
he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
{
unsigned long flags;
struct he_dev *he_dev = HE_DEV(dev);
int left, i;
#ifdef notdef
struct he_rbrq *rbrq_tail;
struct he_tpdrq *tpdrq_head;
int rbpl_head, rbpl_tail;
#endif
static long mcc = 0, oec = 0, dcc = 0, cec = 0;
left = *pos;
if (!left--)
return sprintf(page, "%s\n", version);
if (!left--)
return sprintf(page, "%s%s\n\n",
he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
if (!left--)
return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
HE_SPIN_LOCK(he_dev, flags);
mcc += he_readl(he_dev, MCC);
oec += he_readl(he_dev, OEC);
dcc += he_readl(he_dev, DCC);
cec += he_readl(he_dev, CEC);
HE_SPIN_UNLOCK(he_dev, flags);
if (!left--)
return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
mcc, oec, dcc, cec);
if (!left--)
return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
CONFIG_IRQ_SIZE, he_dev->irq_peak);
if (!left--)
return sprintf(page, "tpdrq_size = %d inuse = ?\n",
CONFIG_TPDRQ_SIZE);
if (!left--)
return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
if (!left--)
return sprintf(page, "tbrq_size = %d peak = %d\n",
CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
#ifdef notdef
rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
inuse = rbpl_head - rbpl_tail;
if (inuse < 0) inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
inuse /= sizeof(struct he_rbp);
if (!left--)
return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
CONFIG_RBPL_SIZE, inuse);
#endif
if (!left--)
return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
for (i = 0; i < HE_NUM_CS_STPER; ++i)
if (!left--)
return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
he_dev->cs_stper[i].pcr,
he_dev->cs_stper[i].inuse);
if (!left--)
return sprintf(page, "total bw (cbr): %d (limit %d)\n",
he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
return 0;
}
/* eeprom routines -- see 4.7 */
u8
read_prom_byte(struct he_dev *he_dev, int addr)
{
u32 val = 0, tmp_read = 0;
int i, j = 0;
u8 byte_read = 0;
val = readl(he_dev->membase + HOST_CNTL);
val &= 0xFFFFE0FF;
/* Turn on write enable */
val |= 0x800;
he_writel(he_dev, val, HOST_CNTL);
/* Send READ instruction */
for (i=0; i<sizeof(readtab)/sizeof(readtab[0]); i++) {
he_writel(he_dev, val | readtab[i], HOST_CNTL);
udelay(EEPROM_DELAY);
}
/* Next, we need to send the byte address to read from */
for (i=7; i>=0; i--) {
he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
udelay(EEPROM_DELAY);
he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
udelay(EEPROM_DELAY);
}
j=0;
val &= 0xFFFFF7FF; /* Turn off write enable */
he_writel(he_dev, val, HOST_CNTL);
/* Now, we can read data from the EEPROM by clocking it in */
for (i=7; i>=0; i--) {
he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
udelay(EEPROM_DELAY);
tmp_read = he_readl(he_dev, HOST_CNTL);
byte_read |= (unsigned char)
((tmp_read & ID_DOUT)
>> ID_DOFFSET << i);
he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
udelay(EEPROM_DELAY);
}
he_writel(he_dev, val | ID_CS, HOST_CNTL);
udelay(EEPROM_DELAY);
return (byte_read);
}
MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
MODULE_PARM(disable64, "h");
MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
MODULE_PARM(nvpibits, "i");
MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
MODULE_PARM(nvcibits, "i");
MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
MODULE_PARM(rx_skb_reserve, "i");
MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
MODULE_PARM(irq_coalesce, "i");
MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
MODULE_PARM(sdh, "i");
MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,1)
static struct pci_device_id he_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
0, 0, 0 },
{ 0, }
};
static struct pci_driver he_driver = {
.name = "he",
.probe = he_init_one,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,9)
.remove = __devexit_p(he_remove_one),
#else
.remove = he_remove_one,
#endif
.id_table = he_pci_tbl,
};
static int __init he_init(void)
{
return pci_module_init(&he_driver);
}
static void __exit he_cleanup(void)
{
pci_unregister_driver(&he_driver);
}
module_init(he_init);
module_exit(he_cleanup);
#else
static int __init
he_init()
{
if (!pci_present())
return -EIO;
#ifdef CONFIG_ATM_HE_USE_SUNI_MODULE
/* request_module("suni"); */
#endif
pci_dev = NULL;
while ((pci_dev = pci_find_device(PCI_VENDOR_ID_FORE,
PCI_DEVICE_ID_FORE_HE, pci_dev)) != NULL)
if (he_init_one(pci_dev, NULL) == 0)
++ndevs;
return (ndevs ? 0 : -ENODEV);
}
static void __devexit
he_cleanup(void)
{
while (he_devs)
{
struct he_dev *next = he_devs->next;
he_stop(he_devs);
atm_dev_deregister(he_devs->atm_dev);
kfree(he_devs);
he_devs = next;
}
}
int init_module(void)
{
return he_init();
}
void cleanup_module(void)
{
he_cleanup();
}
#endif
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,7)
MODULE_LICENSE("GPL");
#endif
/* $Id: he.h,v 1.4 2003/05/06 22:48:00 chas Exp $ */
/*
he.h
ForeRunnerHE ATM Adapter driver for ATM on Linux
Copyright (C) 1999-2001 Naval Research Laboratory
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
he.h
ForeRunnerHE ATM Adapter driver for ATM on Linux
Copyright (C) 1999-2000 Naval Research Laboratory
Permission to use, copy, modify and distribute this software and its
documentation is hereby granted, provided that both the copyright
notice and this permission notice appear in all copies of the software,
derivative works or modified versions, and any portions thereof, and
that both notices appear in supporting documentation.
NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
RESULTING FROM THE USE OF THIS SOFTWARE.
*/
#ifndef _HE_H_
#define _HE_H_
#define DEV_LABEL "he"
#define CONFIG_DEFAULT_VCIBITS 12
#define CONFIG_DEFAULT_VPIBITS 0
#define CONFIG_IRQ_SIZE 128
#define CONFIG_IRQ_THRESH (CONFIG_IRQ_SIZE/2)
#define CONFIG_NUMTPDS 256
#define CONFIG_TPDRQ_SIZE 512
#define TPDRQ_MASK(x) (((unsigned long)(x))&((CONFIG_TPDRQ_SIZE<<3)-1))
#define CONFIG_RBRQ_SIZE 512
#define CONFIG_RBRQ_THRESH 400
#define RBRQ_MASK(x) (((unsigned long)(x))&((CONFIG_RBRQ_SIZE<<3)-1))
#define CONFIG_TBRQ_SIZE 512
#define CONFIG_TBRQ_THRESH 400
#define TBRQ_MASK(x) (((unsigned long)(x))&((CONFIG_TBRQ_SIZE<<2)-1))
#define CONFIG_RBPL_SIZE 512
#define CONFIG_RBPL_THRESH 64
#define CONFIG_RBPL_BUFSIZE 4096
#define RBPL_MASK(x) (((unsigned long)(x))&((CONFIG_RBPL_SIZE<<3)-1))
#define CONFIG_RBPS_SIZE 1024
#define CONFIG_RBPS_THRESH 64
#define CONFIG_RBPS_BUFSIZE 128
#define RBPS_MASK(x) (((unsigned long)(x))&((CONFIG_RBPS_SIZE<<3)-1))
/* 5.1.3 initialize connection memory */
#define CONFIG_RSRA 0x00000
#define CONFIG_RCMLBM 0x08000
#define CONFIG_RCMABR 0x0d800
#define CONFIG_RSRB 0x0e000
#define CONFIG_TSRA 0x00000
#define CONFIG_TSRB 0x08000
#define CONFIG_TSRC 0x0c000
#define CONFIG_TSRD 0x0e000
#define CONFIG_TMABR 0x0f000
#define CONFIG_TPDBA 0x10000
#define HE_MAXCIDBITS 12
/* 2.9.3.3 interrupt encodings */
struct he_irq {
volatile u32 isw;
};
#define IRQ_ALIGNMENT 0x1000
#define NEXT_ENTRY(base, tail, mask) \
(((unsigned long)base)|(((unsigned long)(tail+1))&mask))
#define ITYPE_INVALID 0xffffffff
#define ITYPE_TBRQ_THRESH (0<<3)
#define ITYPE_TPD_COMPLETE (1<<3)
#define ITYPE_RBPS_THRESH (2<<3)
#define ITYPE_RBPL_THRESH (3<<3)
#define ITYPE_RBRQ_THRESH (4<<3)
#define ITYPE_RBRQ_TIMER (5<<3)
#define ITYPE_PHY (6<<3)
#define ITYPE_OTHER 0x80
#define ITYPE_PARITY 0x81
#define ITYPE_ABORT 0x82
#define ITYPE_GROUP(x) (x & 0x7)
#define ITYPE_TYPE(x) (x & 0xf8)
#define HE_NUM_GROUPS 8
/* 2.1.4 transmit packet descriptor */
struct he_tpd {
/* read by the adapter */
volatile u32 status;
volatile u32 reserved;
#define TPD_MAXIOV 3
struct {
u32 addr, len;
} iovec[TPD_MAXIOV];
#define address0 iovec[0].addr
#define length0 iovec[0].len
/* linux-atm extensions */
struct sk_buff *skb;
struct atm_vcc *vcc;
#ifdef USE_TPD_POOL
struct list_head entry;
#else
u32 inuse;
char padding[32 - sizeof(u32) - (2*sizeof(void*))];
#endif
};
#define TPD_ALIGNMENT 64
#define TPD_LEN_MASK 0xffff
#define TPD_ADDR_SHIFT 6
#define TPD_MASK 0xffffffc0
#define TPD_ADDR(x) ((x) & TPD_MASK)
#define TPD_INDEX(x) (TPD_ADDR(x) >> TPD_ADDR_SHIFT)
/* table 2.3 transmit buffer return elements */
struct he_tbrq {
volatile u32 tbre;
};
#define TBRQ_ALIGNMENT CONFIG_TBRQ_SIZE
#define TBRQ_TPD(tbrq) ((tbrq)->tbre & 0xffffffc0)
#define TBRQ_EOS(tbrq) ((tbrq)->tbre & (1<<3))
#define TBRQ_MULTIPLE(tbrq) ((tbrq)->tbre & (1))
/* table 2.21 receive buffer return queue element field organization */
struct he_rbrq {
volatile u32 addr;
volatile u32 cidlen;
};
#define RBRQ_ALIGNMENT CONFIG_RBRQ_SIZE
#define RBRQ_ADDR(rbrq) ((rbrq)->addr & 0xffffffc0)
#define RBRQ_CRC_ERR(rbrq) ((rbrq)->addr & (1<<5))
#define RBRQ_LEN_ERR(rbrq) ((rbrq)->addr & (1<<4))
#define RBRQ_END_PDU(rbrq) ((rbrq)->addr & (1<<3))
#define RBRQ_AAL5_PROT(rbrq) ((rbrq)->addr & (1<<2))
#define RBRQ_CON_CLOSED(rbrq) ((rbrq)->addr & (1<<1))
#define RBRQ_HBUF_ERR(rbrq) ((rbrq)->addr & 1)
#define RBRQ_CID(rbrq) (((rbrq)->cidlen >> 16) & 0x1fff)
#define RBRQ_BUFLEN(rbrq) ((rbrq)->cidlen & 0xffff)
/* figure 2.3 transmit packet descriptor ready queue */
struct he_tpdrq {
volatile u32 tpd;
volatile u32 cid;
};
#define TPDRQ_ALIGNMENT CONFIG_TPDRQ_SIZE
/* table 2.30 host status page detail */
#define HSP_ALIGNMENT 0x400 /* must align on 1k boundary */
struct he_hsp {
struct he_hsp_entry {
volatile u32 tbrq_tail;
volatile u32 reserved1[15];
volatile u32 rbrq_tail;
volatile u32 reserved2[15];
} group[HE_NUM_GROUPS];
};
/* figure 2.9 receive buffer pools */
struct he_rbp {
volatile u32 phys;
volatile u32 status;
};
/* NOTE: it is suggested that virt be the virtual address of the host
buffer. on a 64-bit machine, this would not work. Instead, we
store the real virtual address in another list, and store an index
(and buffer status) in the virt member.
*/
#define RBP_INDEX_OFF 6
#define RBP_INDEX(x) (((long)(x) >> RBP_INDEX_OFF) & 0xffff)
#define RBP_LOANED 0x80000000
#define RBP_SMALLBUF 0x40000000
struct he_virt {
void *virt;
};
#define RBPL_ALIGNMENT CONFIG_RBPL_SIZE
#define RBPS_ALIGNMENT CONFIG_RBPS_SIZE
#ifdef notyet
struct he_group {
u32 rpbs_size, rpbs_qsize;
struct he_rbp rbps_ba;
u32 rpbl_size, rpbl_qsize;
struct he_rpb_entry *rbpl_ba;
};
#endif
#define HE_LOOKUP_VCC(dev, cid) ((dev)->he_vcc_table[(cid)].vcc)
struct he_vcc_table
{
struct atm_vcc *vcc;
};
struct he_cs_stper
{
long pcr;
int inuse;
};
#define HE_NUM_CS_STPER 16
struct he_dev {
unsigned int number;
unsigned int irq;
unsigned long membase;
char prod_id[30];
char mac_addr[6];
int media; /*
* 0x26 = HE155 MM
* 0x27 = HE622 MM
* 0x46 = HE155 SM
* 0x47 = HE622 SM
*/
unsigned int vcibits, vpibits;
unsigned int cells_per_row;
unsigned int bytes_per_row;
unsigned int cells_per_lbuf;
unsigned int r0_numrows, r0_startrow, r0_numbuffs;
unsigned int r1_numrows, r1_startrow, r1_numbuffs;
unsigned int tx_numrows, tx_startrow, tx_numbuffs;
unsigned int buffer_limit;
struct he_vcc_table *he_vcc_table;
#ifdef notyet
struct he_group group[HE_NUM_GROUPS];
#endif
struct he_cs_stper cs_stper[HE_NUM_CS_STPER];
unsigned total_bw;
dma_addr_t irq_phys;
struct he_irq *irq_base, *irq_head, *irq_tail;
volatile unsigned *irq_tailoffset;
int irq_peak;
#ifdef USE_TASKLET
struct tasklet_struct tasklet;
#endif
#ifdef USE_TPD_POOL
struct pci_pool *tpd_pool;
struct list_head outstanding_tpds;
#else
struct he_tpd *tpd_head, *tpd_base, *tpd_end;
dma_addr_t tpd_base_phys;
#endif
dma_addr_t tpdrq_phys;
struct he_tpdrq *tpdrq_base, *tpdrq_tail, *tpdrq_head;
spinlock_t global_lock; /* 8.1.5 pci transaction ordering
error problem */
dma_addr_t rbrq_phys;
struct he_rbrq *rbrq_base, *rbrq_head;
int rbrq_peak;
#ifdef USE_RBPL_POOL
struct pci_pool *rbpl_pool;
#else
void *rbpl_pages;
dma_addr_t rbpl_pages_phys;
#endif
dma_addr_t rbpl_phys;
struct he_rbp *rbpl_base, *rbpl_tail;
struct he_virt *rbpl_virt;
int rbpl_peak;
#ifdef USE_RBPS
#ifdef USE_RBPS_POOL
struct pci_pool *rbps_pool;
#else
void *rbps_pages;
dma_addr_t rbps_pages_phys;
#endif
#endif
dma_addr_t rbps_phys;
struct he_rbp *rbps_base, *rbps_tail;
struct he_virt *rbps_virt;
int rbps_peak;
dma_addr_t tbrq_phys;
struct he_tbrq *tbrq_base, *tbrq_head;
int tbrq_peak;
dma_addr_t hsp_phys;
struct he_hsp *hsp;
struct pci_dev *pci_dev;
struct atm_dev *atm_dev;
struct he_dev *next;
};
struct he_vcc
{
struct iovec iov_head[32];
struct iovec *iov_tail;
int pdu_len;
int rc_index;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
struct wait_queue *rx_waitq;
atruct wait_queue *tx_waitq;
#else
wait_queue_head_t rx_waitq;
wait_queue_head_t tx_waitq;
#endif
};
#define HE_VCC(vcc) ((struct he_vcc *)(vcc->dev_data))
#define PCI_VENDOR_ID_FORE 0x1127
#define PCI_DEVICE_ID_FORE_HE 0x400
#define HE_DMA_MASK 0xffffffff
#define GEN_CNTL_0 0x40
#define INT_PROC_ENBL (1<<25)
#define SLAVE_ENDIAN_MODE (1<<16)
#define MRL_ENB (1<<5)
#define MRM_ENB (1<<4)
#define INIT_ENB (1<<2)
#define IGNORE_TIMEOUT (1<<1)
#define ENBL_64 (1<<0)
#define MIN_PCI_LATENCY 32 /* errata 8.1.3 */
#define HE_DEV(dev) ((struct he_dev *) (dev)->dev_data)
#define he_is622(dev) ((dev)->media & 0x1)
#define HE_REGMAP_SIZE 0x100000
#define RESET_CNTL 0x80000
#define BOARD_RST_STATUS (1<<6)
#define HOST_CNTL 0x80004
#define PCI_BUS_SIZE64 (1<<27)
#define DESC_RD_STATIC_64 (1<<26)
#define DATA_RD_STATIC_64 (1<<25)
#define DATA_WR_STATIC_64 (1<<24)
#define ID_CS (1<<12)
#define ID_WREN (1<<11)
#define ID_DOUT (1<<10)
#define ID_DOFFSET 10
#define ID_DIN (1<<9)
#define ID_CLOCK (1<<8)
#define QUICK_RD_RETRY (1<<7)
#define QUICK_WR_RETRY (1<<6)
#define OUTFF_ENB (1<<5)
#define CMDFF_ENB (1<<4)
#define PERR_INT_ENB (1<<2)
#define IGNORE_INTR (1<<0)
#define LB_SWAP 0x80008
#define SWAP_RNUM_MAX(x) (x<<27)
#define DATA_WR_SWAP (1<<20)
#define DESC_RD_SWAP (1<<19)
#define DATA_RD_SWAP (1<<18)
#define INTR_SWAP (1<<17)
#define DESC_WR_SWAP (1<<16)
#define SDRAM_INIT (1<<15)
#define BIG_ENDIAN_HOST (1<<14)
#define XFER_SIZE (1<<7)
#define LB_MEM_ADDR 0x8000c
#define LB_MEM_DATA 0x80010
#define LB_MEM_ACCESS 0x80014
#define LB_MEM_HNDSHK (1<<30)
#define LM_MEM_WRITE (0x7)
#define LM_MEM_READ (0x3)
#define SDRAM_CTL 0x80018
#define LB_64_ENB (1<<3)
#define LB_TWR (1<<2)
#define LB_TRP (1<<1)
#define LB_TRAS (1<<0)
#define INT_FIFO 0x8001c
#define INT_MASK_D (1<<15)
#define INT_MASK_C (1<<14)
#define INT_MASK_B (1<<13)
#define INT_MASK_A (1<<12)
#define INT_CLEAR_D (1<<11)
#define INT_CLEAR_C (1<<10)
#define INT_CLEAR_B (1<<9)
#define INT_CLEAR_A (1<<8)
#define ABORT_ADDR 0x80020
#define IRQ0_BASE 0x80080
#define IRQ_BASE(x) (x<<12)
#define IRQ_MASK ((CONFIG_IRQ_SIZE<<2)-1) /* was 0x3ff */
#define IRQ_TAIL(x) (((unsigned long)(x)) & IRQ_MASK)
#define IRQ0_HEAD 0x80084
#define IRQ_SIZE(x) (x<<22)
#define IRQ_THRESH(x) (x<<12)
#define IRQ_HEAD(x) (x<<2)
/* #define IRQ_PENDING (1) conflict with linux/irq.h */
#define IRQ0_CNTL 0x80088
#define IRQ_ADDRSEL(x) (x<<2)
#define IRQ_INT_A (0<<2)
#define IRQ_INT_B (1<<2)
#define IRQ_INT_C (2<<2)
#define IRQ_INT_D (3<<2)
#define IRQ_TYPE_ADDR 0x1
#define IRQ_TYPE_LINE 0x0
#define IRQ0_DATA 0x8008c
#define IRQ1_BASE 0x80090
#define IRQ1_HEAD 0x80094
#define IRQ1_CNTL 0x80098
#define IRQ1_DATA 0x8009c
#define IRQ2_BASE 0x800a0
#define IRQ2_HEAD 0x800a4
#define IRQ2_CNTL 0x800a8
#define IRQ2_DATA 0x800ac
#define IRQ3_BASE 0x800b0
#define IRQ3_HEAD 0x800b4
#define IRQ3_CNTL 0x800b8
#define IRQ3_DATA 0x800bc
#define GRP_10_MAP 0x800c0
#define GRP_32_MAP 0x800c4
#define GRP_54_MAP 0x800c8
#define GRP_76_MAP 0x800cc
#define G0_RBPS_S 0x80400
#define G0_RBPS_T 0x80404
#define RBP_TAIL(x) ((x)<<3)
#define RBP_MASK(x) ((x)|0x1fff)
#define G0_RBPS_QI 0x80408
#define RBP_QSIZE(x) ((x)<<14)
#define RBP_INT_ENB (1<<13)
#define RBP_THRESH(x) (x)
#define G0_RBPS_BS 0x8040c
#define G0_RBPL_S 0x80410
#define G0_RBPL_T 0x80414
#define G0_RBPL_QI 0x80418
#define G0_RBPL_BS 0x8041c
#define G1_RBPS_S 0x80420
#define G1_RBPS_T 0x80424
#define G1_RBPS_QI 0x80428
#define G1_RBPS_BS 0x8042c
#define G1_RBPL_S 0x80430
#define G1_RBPL_T 0x80434
#define G1_RBPL_QI 0x80438
#define G1_RBPL_BS 0x8043c
#define G2_RBPS_S 0x80440
#define G2_RBPS_T 0x80444
#define G2_RBPS_QI 0x80448
#define G2_RBPS_BS 0x8044c
#define G2_RBPL_S 0x80450
#define G2_RBPL_T 0x80454
#define G2_RBPL_QI 0x80458
#define G2_RBPL_BS 0x8045c
#define G3_RBPS_S 0x80460
#define G3_RBPS_T 0x80464
#define G3_RBPS_QI 0x80468
#define G3_RBPS_BS 0x8046c
#define G3_RBPL_S 0x80470
#define G3_RBPL_T 0x80474
#define G3_RBPL_QI 0x80478
#define G3_RBPL_BS 0x8047c
#define G4_RBPS_S 0x80480
#define G4_RBPS_T 0x80484
#define G4_RBPS_QI 0x80488
#define G4_RBPS_BS 0x8048c
#define G4_RBPL_S 0x80490
#define G4_RBPL_T 0x80494
#define G4_RBPL_QI 0x80498
#define G4_RBPL_BS 0x8049c
#define G5_RBPS_S 0x804a0
#define G5_RBPS_T 0x804a4
#define G5_RBPS_QI 0x804a8
#define G5_RBPS_BS 0x804ac
#define G5_RBPL_S 0x804b0
#define G5_RBPL_T 0x804b4
#define G5_RBPL_QI 0x804b8
#define G5_RBPL_BS 0x804bc
#define G6_RBPS_S 0x804c0
#define G6_RBPS_T 0x804c4
#define G6_RBPS_QI 0x804c8
#define G6_RBPS_BS 0x804cc
#define G6_RBPL_S 0x804d0
#define G6_RBPL_T 0x804d4
#define G6_RBPL_QI 0x804d8
#define G6_RBPL_BS 0x804dc
#define G7_RBPS_S 0x804e0
#define G7_RBPS_T 0x804e4
#define G7_RBPS_QI 0x804e8
#define G7_RBPS_BS 0x804ec
#define G7_RBPL_S 0x804f0
#define G7_RBPL_T 0x804f4
#define G7_RBPL_QI 0x804f8
#define G7_RBPL_BS 0x804fc
#define G0_RBRQ_ST 0x80500
#define G0_RBRQ_H 0x80504
#define G0_RBRQ_Q 0x80508
#define RBRQ_THRESH(x) ((x)<<13)
#define RBRQ_SIZE(x) (x)
#define G0_RBRQ_I 0x8050c
#define RBRQ_TIME(x) ((x)<<8)
#define RBRQ_COUNT(x) (x)
/* fill in 1 ... 7 later */
#define G0_TBRQ_B_T 0x80600
#define G0_TBRQ_H 0x80604
#define G0_TBRQ_S 0x80608
#define G0_TBRQ_THRESH 0x8060c
#define TBRQ_THRESH(x) (x)
/* fill in 1 ... 7 later */
#define RH_CONFIG 0x805c0
#define PHY_INT_ENB (1<<10)
#define OAM_GID(x) (x<<7)
#define PTMR_PRE(x) (x)
#define G0_INMQ_S 0x80580
#define G0_INMQ_L 0x80584
#define G1_INMQ_S 0x80588
#define G1_INMQ_L 0x8058c
#define G2_INMQ_S 0x80590
#define G2_INMQ_L 0x80594
#define G3_INMQ_S 0x80598
#define G3_INMQ_L 0x8059c
#define G4_INMQ_S 0x805a0
#define G4_INMQ_L 0x805a4
#define G5_INMQ_S 0x805a8
#define G5_INMQ_L 0x805ac
#define G6_INMQ_S 0x805b0
#define G6_INMQ_L 0x805b4
#define G7_INMQ_S 0x805b8
#define G7_INMQ_L 0x805bc
#define TPDRQ_B_H 0x80680
#define TPDRQ_T 0x80684
#define TPDRQ_S 0x80688
#define UBUFF_BA 0x8068c
#define RLBF0_H 0x806c0
#define RLBF0_T 0x806c4
#define RLBF1_H 0x806c8
#define RLBF1_T 0x806cc
#define RLBC_H 0x806d0
#define RLBC_T 0x806d4
#define RLBC_H2 0x806d8
#define TLBF_H 0x806e0
#define TLBF_T 0x806e4
#define RLBF0_C 0x806e8
#define RLBF1_C 0x806ec
#define RXTHRSH 0x806f0
#define LITHRSH 0x806f4
#define LBARB 0x80700
#define SLICE_X(x) (x<<28)
#define ARB_RNUM_MAX(x) (x<<23)
#define TH_PRTY(x) (x<<21)
#define RH_PRTY(x) (x<<19)
#define TL_PRTY(x) (x<<17)
#define RL_PRTY(x) (x<<15)
#define BUS_MULTI(x) (x<<8)
#define NET_PREF(x) (x)
#define SDRAMCON 0x80704
#define BANK_ON (1<<14)
#define WIDE_DATA (1<<13)
#define TWR_WAIT (1<<12)
#define TRP_WAIT (1<<11)
#define TRAS_WAIT (1<<10)
#define REF_RATE(x) (x)
#define LBSTAT 0x80708
#define RCC_STAT 0x8070c
#define RCC_BUSY (1)
#define TCMCONFIG 0x80740
#define TM_DESL2 (1<<10)
#define TM_BANK_WAIT(x) (x<<6)
#define TM_ADD_BANK4(x) (x<<4)
#define TM_PAR_CHECK(x) (x<<3)
#define TM_RW_WAIT(x) (x<<2)
#define TM_SRAM_TYPE(x) (x)
#define TSRB_BA 0x80744
#define TSRC_BA 0x80748
#define TMABR_BA 0x8074c
#define TPD_BA 0x80750
#define TSRD_BA 0x80758
#define TX_CONFIG 0x80760
#define DRF_THRESH(x) (x<<22)
#define TX_UT_MODE(x) (x<<21)
#define TX_VCI_MASK(x) (x<<17)
#define LBFREE_CNT(x) (x)
#define TXAAL5_PROTO 0x80764
#define CPCS_UU(x) (x<<8)
#define CPI(x) (x)
#define RCMCONFIG 0x80780
#define RM_DESL2(x) (x<<10)
#define RM_BANK_WAIT(x) (x<<6)
#define RM_ADD_BANK(x) (x<<4)
#define RM_PAR_CHECK(x) (x<<3)
#define RM_RW_WAIT(x) (x<<2)
#define RM_SRAM_TYPE(x) (x)
#define RCMRSRB_BA 0x80784
#define RCMLBM_BA 0x80788
#define RCMABR_BA 0x8078c
#define RC_CONFIG 0x807c0
#define UT_RD_DELAY(x) (x<<11)
#define WRAP_MODE(x) (x<<10)
#define RC_UT_MODE(x) (x<<9)
#define RX_ENABLE (1<<8)
#define RX_VALVP(x) (x<<4)
#define RX_VALVC(x) (x)
#define MCC 0x807c4
#define OEC 0x807c8
#define DCC 0x807cc
#define CEC 0x807d0
#define HSP_BA 0x807f0
#define LB_CONFIG 0x807f4
#define LB_SIZE(x) (x)
#define CON_DAT 0x807f8
#define CON_CTL 0x807fc
#define CON_CTL_MBOX (2<<30)
#define CON_CTL_TCM (1<<30)
#define CON_CTL_RCM (0<<30)
#define CON_CTL_WRITE (1<<29)
#define CON_CTL_READ (0<<29)
#define CON_CTL_BUSY (1<<28)
#define CON_BYTE_DISABLE_3 (1<<22) /* 24..31 */
#define CON_BYTE_DISABLE_2 (1<<21) /* 16..23 */
#define CON_BYTE_DISABLE_1 (1<<20) /* 8..15 */
#define CON_BYTE_DISABLE_0 (1<<19) /* 0..7 */
#define CON_CTL_ADDR(x) (x)
#define FRAMER 0x80800 /* to 0x80bfc */
/* 3.3 network controller (internal) mailbox registers */
#define CS_STPER0 0x0
/* ... */
#define CS_STPER31 0x01f
#define CS_STTIM0 0x020
/* ... */
#define CS_STTIM31 0x03f
#define CS_TGRLD0 0x040
/* ... */
#define CS_TGRLD15 0x04f
#define CS_ERTHR0 0x050
#define CS_ERTHR1 0x051
#define CS_ERTHR2 0x052
#define CS_ERTHR3 0x053
#define CS_ERTHR4 0x054
#define CS_ERCTL0 0x055
#define TX_ENABLE (1<<28)
#define ER_ENABLE (1<<27)
#define CS_ERCTL1 0x056
#define CS_ERCTL2 0x057
#define CS_ERSTAT0 0x058
#define CS_ERSTAT1 0x059
#define CS_RTCCT 0x060
#define CS_RTFWC 0x061
#define CS_RTFWR 0x062
#define CS_RTFTC 0x063
#define CS_RTATR 0x064
#define CS_TFBSET 0x070
#define CS_TFBADD 0x071
#define CS_TFBSUB 0x072
#define CS_WCRMAX 0x073
#define CS_WCRMIN 0x074
#define CS_WCRINC 0x075
#define CS_WCRDEC 0x076
#define CS_WCRCEIL 0x077
#define CS_BWDCNT 0x078
#define CS_OTPPER 0x080
#define CS_OTWPER 0x081
#define CS_OTTLIM 0x082
#define CS_OTTCNT 0x083
#define CS_HGRRT0 0x090
/* ... */
#define CS_HGRRT7 0x097
#define CS_ORPTRS 0x0a0
#define RXCON_CLOSE 0x100
#define RCM_MEM_SIZE 0x10000 /* 1M of 32-bit registers */
#define TCM_MEM_SIZE 0x20000 /* 2M of 32-bit registers */
/* 2.5 transmit connection memory registers */
#define TSR0_CONN_STATE(x) ((x>>28) & 0x7)
#define TSR0_USE_WMIN (1<<23)
#define TSR0_GROUP(x) ((x & 0x7)<<18)
#define TSR0_ABR (2<<16)
#define TSR0_UBR (1<<16)
#define TSR0_CBR (0<<16)
#define TSR0_PROT (1<<15)
#define TSR0_AAL0_SDU (2<<12)
#define TSR0_AAL0 (1<<12)
#define TSR0_AAL5 (0<<12)
#define TSR0_HALT_ER (1<<11)
#define TSR0_MARK_CI (1<<10)
#define TSR0_MARK_ER (1<<9)
#define TSR0_UPDATE_GER (1<<8)
#define TSR0_RC_INDEX(x) (x & 0x1F)
#define TSR1_PCR(x) ((x & 0x7FFF)<<16)
#define TSR1_MCR(x) (x & 0x7FFF)
#define TSR2_ACR(x) ((x & 0x7FFF)<<16)
#define TSR3_NRM_CNT(x) ((x & 0xFF)<<24)
#define TSR3_CRM_CNT(x) (x & 0xFFFF)
#define TSR4_FLUSH_CONN (1<<31)
#define TSR4_SESSION_ENDED (1<<30)
#define TSR4_CRC10 (1<<28)
#define TSR4_NULL_CRC10 (1<<27)
#define TSR4_PROT (1<<26)
#define TSR4_AAL0_SDU (2<<23)
#define TSR4_AAL0 (1<<23)
#define TSR4_AAL5 (0<<23)
#define TSR9_OPEN_CONN (1<<20)
#define TSR11_ICR(x) ((x & 0x7FFF)<<16)
#define TSR11_TRM(x) ((x & 0x7)<<13)
#define TSR11_NRM(x) ((x & 0x7)<<10)
#define TSR11_ADTF(x) (x & 0x3FF)
#define TSR13_RDF(x) ((x & 0xF)<<23)
#define TSR13_RIF(x) ((x & 0xF)<<19)
#define TSR13_CDF(x) ((x & 0x7)<<16)
#define TSR13_CRM(x) (x & 0xFFFF)
#define TSR14_DELETE (1<<31)
#define TSR14_ABR_CLOSE (1<<16)
/* 2.7.1 per connection receieve state registers */
#define RSR0_START_PDU (1<<10)
#define RSR0_OPEN_CONN (1<<6)
#define RSR0_CLOSE_CONN (0<<6)
#define RSR0_PPD_ENABLE (1<<5)
#define RSR0_EPD_ENABLE (1<<4)
#define RSR0_TCP_CKSUM (1<<3)
#define RSR0_AAL5 (0)
#define RSR0_AAL0 (1)
#define RSR0_AAL0_SDU (2)
#define RSR0_RAWCELL (3)
#define RSR0_RAWCELL_CRC10 (4)
#define RSR1_AQI_ENABLE (1<<20)
#define RSR1_RBPL_ONLY (1<<19)
#define RSR1_GROUP(x) ((x)<<16)
#define RSR4_AQI_ENABLE (1<<30)
#define RSR4_GROUP(x) ((x)<<27)
#define RSR4_RBPL_ONLY (1<<26)
/* 2.1.4 transmit packet descriptor */
#define TPD_USERCELL 0x0
#define TPD_SEGMENT_OAMF5 0x4
#define TPD_END2END_OAMF5 0x5
#define TPD_RMCELL 0x6
#define TPD_CELLTYPE(x) (x<<3)
#define TPD_EOS (1<<2)
#define TPD_CLP (1<<1)
#define TPD_INT (1<<0)
#define TPD_LST (1<<31)
/* table 4.3 serial eeprom information */
#define PROD_ID 0x08 /* char[] */
#define PROD_ID_LEN 30
#define HW_REV 0x26 /* char[] */
#define M_SN 0x3a /* integer */
#define MEDIA 0x3e /* integer */
#define HE155MM 0x26
#define HE155SM 0x27
#define HE622MM 0x46
#define HE622SM 0x47
#define MAC_ADDR 0x42 /* char[] */
#define CS_LOW 0x0
#define CS_HIGH ID_CS /* HOST_CNTL_ID_PROM_SEL */
#define CLK_LOW 0x0
#define CLK_HIGH ID_CLOCK /* HOST_CNTL_ID_PROM_CLOCK */
#define SI_HIGH ID_DIN /* HOST_CNTL_ID_PROM_DATA_IN */
#define EEPROM_DELAY 400 /* microseconds */
/* Read from EEPROM = 0000 0011b */
unsigned int readtab[] = {
CS_HIGH | CLK_HIGH,
CS_LOW | CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW | SI_HIGH,
CLK_HIGH | SI_HIGH, /* 1 */
CLK_LOW | SI_HIGH,
CLK_HIGH | SI_HIGH /* 1 */
};
/* Clock to read from/write to the EEPROM */
unsigned int clocktab[] = {
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW
};
#endif /* _HE_H_ */
/* atm_he.h */
#ifndef LINUX_ATM_HE_H
#define LINUX_ATM_HE_H
#include <linux/atmioc.h>
#define HE_GET_REG _IOW('a', ATMIOC_SARPRV, struct atmif_sioc)
#define HE_REGTYPE_PCI 1
#define HE_REGTYPE_RCM 2
#define HE_REGTYPE_TCM 3
#define HE_REGTYPE_MBOX 4
struct he_ioctl_reg {
unsigned addr, val;
char type;
};
#endif /* LINUX_ATM_HE_H */
#ifndef _LINUX_ATMBR2684_H
#define _LINUX_ATMBR2684_H
#include <linux/atm.h>
#include <linux/if.h> /* For IFNAMSIZ */
/*
* Type of media we're bridging (ethernet, token ring, etc) Currently only
* ethernet is supported
*/
#define BR2684_MEDIA_ETHERNET (0) /* 802.3 */
#define BR2684_MEDIA_802_4 (1) /* 802.4 */
#define BR2684_MEDIA_TR (2) /* 802.5 - token ring */
#define BR2684_MEDIA_FDDI (3)
#define BR2684_MEDIA_802_6 (4) /* 802.6 */
/*
* Is there FCS inbound on this VC? This currently isn't supported.
*/
#define BR2684_FCSIN_NO (0)
#define BR2684_FCSIN_IGNORE (1)
#define BR2684_FCSIN_VERIFY (2)
/*
* Is there FCS outbound on this VC? This currently isn't supported.
*/
#define BR2684_FCSOUT_NO (0)
#define BR2684_FCSOUT_SENDZERO (1)
#define BR2684_FCSOUT_GENERATE (2)
/*
* Does this VC include LLC encapsulation?
*/
#define BR2684_ENCAPS_VC (0) /* VC-mux */
#define BR2684_ENCAPS_LLC (1)
#define BR2684_ENCAPS_AUTODETECT (2) /* Unsuported */
/*
* This is for the ATM_NEWBACKENDIF call - these are like socket families:
* the first element of the structure is the backend number and the rest
* is per-backend specific
*/
struct atm_newif_br2684 {
atm_backend_t backend_num; /* ATM_BACKEND_BR2684 */
int media; /* BR2684_MEDIA_* */
char ifname[IFNAMSIZ];
int mtu;
};
/*
* This structure is used to specify a br2684 interface - either by a
* positive integer (returned by ATM_NEWBACKENDIF) or the interfaces name
*/
#define BR2684_FIND_BYNOTHING (0)
#define BR2684_FIND_BYNUM (1)
#define BR2684_FIND_BYIFNAME (2)
struct br2684_if_spec {
int method; /* BR2684_FIND_* */
union {
char ifname[IFNAMSIZ];
int devnum;
} spec;
};
/*
* This is for the ATM_SETBACKEND call - these are like socket families:
* the first element of the structure is the backend number and the rest
* is per-backend specific
*/
struct atm_backend_br2684 {
atm_backend_t backend_num; /* ATM_BACKEND_BR2684 */
struct br2684_if_spec ifspec;
int fcs_in; /* BR2684_FCSIN_* */
int fcs_out; /* BR2684_FCSOUT_* */
int fcs_auto; /* 1: fcs_{in,out} disabled if no FCS rx'ed */
int encaps; /* BR2684_ENCAPS_* */
int has_vpiid; /* 1: use vpn_id - Unsupported */
__u8 vpn_id[7];
int send_padding; /* unsupported */
int min_size; /* we will pad smaller packets than this */
};
/*
* The BR2684_SETFILT ioctl is an experimental mechanism for folks
* terminating a large number of IP-only vcc's. When netfilter allows
* efficient per-if in/out filters, this support will be removed
*/
struct br2684_filter {
__u32 prefix; /* network byte order */
__u32 netmask; /* 0 = disable filter */
};
struct br2684_filter_set {
struct br2684_if_spec ifspec;
struct br2684_filter filter;
};
#define BR2684_SETFILT _IOW( 'a', ATMIOC_BACKEND + 0, \
struct br2684_filter_set)
#endif /* _LINUX_ATMBR2684_H */
......@@ -96,6 +96,8 @@ struct atm_dev_stats {
/* enable or disable single-copy */
#define ATM_SETBACKEND _IOW('a',ATMIOC_SPECIAL+2,atm_backend_t)
/* set backend handler */
#define ATM_NEWBACKENDIF _IOW('a',ATMIOC_SPECIAL+3,atm_backend_t)
/* use backend to make new if */
/*
* These are backend handkers that can be set via the ATM_SETBACKEND call
......@@ -104,7 +106,7 @@ struct atm_dev_stats {
*/
#define ATM_BACKEND_RAW 0
#define ATM_BACKEND_PPP 1 /* PPPoATM - RFC2364 */
#define ATM_BACKEND_BR_2684 2 /* Bridged RFC1483/2684 */
#define ATM_BACKEND_BR2684 2 /* Bridged RFC1483/2684 */
/* for ATM_GETTYPE */
#define ATM_ITFTYP_LEN 8 /* maximum length of interface type name */
......@@ -304,9 +306,6 @@ struct atm_vcc {
struct sockaddr_atmsvc local;
struct sockaddr_atmsvc remote;
void (*callback)(struct atm_vcc *vcc);
struct sk_buff_head listenq;
int backlog_quota; /* number of connection requests we */
/* can still accept */
int reply; /* also used by ATMTCP */
/* Multipoint part ------------------------------------------------- */
struct atm_vcc *session; /* session VCC descriptor */
......
......@@ -78,7 +78,7 @@ struct rtattr
/******************************************************************************
* Definitions used in routing table administation.
* Definitions used in routing table administration.
****/
struct rtmsg
......@@ -129,14 +129,14 @@ enum
#define RTPROT_STATIC 4 /* Route installed by administrator */
/* Values of protocol >= RTPROT_STATIC are not interpreted by kernel;
they just passed from user and back as is.
they are just passed from user and back as is.
It will be used by hypothetical multiple routing daemons.
Note that protocol values should be standardized in order to
avoid conflicts.
*/
#define RTPROT_GATED 8 /* Apparently, GateD */
#define RTPROT_RA 9 /* RDISC/ND router advertisments */
#define RTPROT_RA 9 /* RDISC/ND router advertisements */
#define RTPROT_MRT 10 /* Merit MRT */
#define RTPROT_ZEBRA 11 /* Zebra */
#define RTPROT_BIRD 12 /* BIRD */
......@@ -210,8 +210,8 @@ enum rtattr_type_t
/* RTM_MULTIPATH --- array of struct rtnexthop.
*
* "struct rtnexthop" describres all necessary nexthop information,
* i.e. parameters of path to a destination via this nextop.
* "struct rtnexthop" describes all necessary nexthop information,
* i.e. parameters of path to a destination via this nexthop.
*
* At the moment it is impossible to set different prefsrc, mtu, window
* and rtt for different paths from multipath.
......@@ -485,7 +485,7 @@ enum
Comments:
- Combination IFF_BROADCAST|IFF_POINTOPOINT is invalid
- If neiher of these three flags are set;
- If neither of these three flags are set;
the interface is NBMA.
- IFF_MULTICAST does not mean anything special:
......
......@@ -266,6 +266,23 @@ config ATM_MPOA
subnetwork boundaries. These shortcut connections bypass routers
enhancing overall network performance.
config ATM_BR2684
tristate "RFC1483/2684 Bridged protocols"
depends on ATM && INET
help
ATM PVCs can carry ethernet PDUs according to rfc2684 (formerly 1483)
This device will act like an ethernet from the kernels point of view,
with the traffic being carried by ATM PVCs (currently 1 PVC/device).
This is sometimes used over DSL lines. If in doubt, say N.
config ATM_BR2684_IPFILTER
bool "Per-VC IP filter kludge"
depends on ATM_BR2684
help
This is an experimental mechanism for users who need to terminating a
large number of IP-only vcc's. Do not enable this unless you are sure
you know what you are doing.
config VLAN_8021Q
tristate "802.1Q VLAN Support"
......
......@@ -7,6 +7,7 @@ mpoa-objs := mpc.o mpoa_caches.o mpoa_proc.o
obj-$(CONFIG_ATM) := addr.o pvc.o signaling.o svc.o common.o atm_misc.o raw.o resources.o
obj-$(CONFIG_ATM_CLIP) += clip.o ipcommon.o
obj-$(CONFIG_ATM_BR2684) += br2684.o ipcommon.o
obj-$(CONFIG_NET_SCH_ATM) += ipcommon.o
obj-$(CONFIG_PROC_FS) += proc.o
......
/*
Experimental ethernet netdevice using ATM AAL5 as underlying carrier
(RFC1483 obsoleted by RFC2684) for Linux 2.4
Author: Marcell GAL, 2000, XDSL Ltd, Hungary
*/
#include <linux/module.h>
#include <linux/config.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/ip.h>
#include <asm/uaccess.h>
#include <net/arp.h>
#include <linux/atmbr2684.h>
#include "ipcommon.h"
/*
* Define this to use a version of the code which interacts with the higher
* layers in a more intellegent way, by always reserving enough space for
* our header at the begining of the packet. However, there may still be
* some problems with programs like tcpdump. In 2.5 we'll sort out what
* we need to do to get this perfect. For now we just will copy the packet
* if we need space for the header
*/
/* #define FASTER_VERSION */
#ifdef DEBUG
#define DPRINTK(format, args...) printk(KERN_DEBUG "br2684: " format, ##args)
#else
#define DPRINTK(format, args...)
#endif
#ifdef SKB_DEBUG
static void skb_debug(const struct sk_buff *skb)
{
#define NUM2PRINT 50
char buf[NUM2PRINT * 3 + 1]; /* 3 chars per byte */
int i = 0;
for (i = 0; i < skb->len && i < NUM2PRINT; i++) {
sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
}
printk(KERN_DEBUG "br2684: skb: %s\n", buf);
}
#else
#define skb_debug(skb) do {} while (0)
#endif
static unsigned char llc_oui_pid_pad[] =
{ 0xAA, 0xAA, 0x03, 0x00, 0x80, 0xC2, 0x00, 0x07, 0x00, 0x00 };
#define PADLEN (2)
enum br2684_encaps {
e_vc = BR2684_ENCAPS_VC,
e_llc = BR2684_ENCAPS_LLC,
};
struct br2684_vcc {
struct atm_vcc *atmvcc;
struct br2684_dev *brdev;
/* keep old push,pop functions for chaining */
void (*old_push)(struct atm_vcc *vcc,struct sk_buff *skb);
/* void (*old_pop)(struct atm_vcc *vcc,struct sk_buff *skb); */
enum br2684_encaps encaps;
struct list_head brvccs;
#ifdef CONFIG_ATM_BR2684_IPFILTER
struct br2684_filter filter;
#endif /* CONFIG_ATM_BR2684_IPFILTER */
#ifndef FASTER_VERSION
unsigned copies_needed, copies_failed;
#endif /* FASTER_VERSION */
};
struct br2684_dev {
struct net_device net_dev;
struct list_head br2684_devs;
int number;
struct list_head brvccs; /* one device <=> one vcc (before xmas) */
struct net_device_stats stats;
int mac_was_set;
};
/*
* This lock should be held for writing any time the list of devices or
* their attached vcc's could be altered. It should be held for reading
* any time these are being queried. Note that we sometimes need to
* do read-locking under interrupt context, so write locking must block
* the current CPU's interrupts
*/
static rwlock_t devs_lock = RW_LOCK_UNLOCKED;
static LIST_HEAD(br2684_devs);
static inline struct br2684_dev *BRPRIV(const struct net_device *net_dev)
{
return (struct br2684_dev *) ((char *) (net_dev) -
(unsigned long) (&((struct br2684_dev *) 0)->net_dev));
}
static inline struct br2684_dev *list_entry_brdev(const struct list_head *le)
{
return list_entry(le, struct br2684_dev, br2684_devs);
}
static inline struct br2684_vcc *BR2684_VCC(const struct atm_vcc *atmvcc)
{
return (struct br2684_vcc *) (atmvcc->user_back);
}
static inline struct br2684_vcc *list_entry_brvcc(const struct list_head *le)
{
return list_entry(le, struct br2684_vcc, brvccs);
}
/* Caller should hold read_lock(&devs_lock) */
static struct br2684_dev *br2684_find_dev(const struct br2684_if_spec *s)
{
struct list_head *lh;
struct br2684_dev *brdev;
switch (s->method) {
case BR2684_FIND_BYNUM:
list_for_each(lh, &br2684_devs) {
brdev = list_entry_brdev(lh);
if (brdev->number == s->spec.devnum)
return brdev;
}
break;
case BR2684_FIND_BYIFNAME:
list_for_each(lh, &br2684_devs) {
brdev = list_entry_brdev(lh);
if (!strncmp(brdev->net_dev.name, s->spec.ifname,
sizeof brdev->net_dev.name))
return brdev;
}
break;
}
return NULL;
}
/*
* Send a packet out a particular vcc. Not to useful right now, but paves
* the way for multiple vcc's per itf. Returns true if we can send,
* otherwise false
*/
static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev,
struct br2684_vcc *brvcc)
{
struct atm_vcc *atmvcc;
#ifdef FASTER_VERSION
if (brvcc->encaps == e_llc)
memcpy(skb_push(skb, 8), llc_oui_pid_pad, 8);
/* last 2 bytes of llc_oui_pid_pad are managed by header routines;
yes, you got it: 8 + 2 = sizeof(llc_oui_pid_pad)
*/
#else
int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2;
if (skb_headroom(skb) < minheadroom) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom);
brvcc->copies_needed++;
dev_kfree_skb(skb);
if (skb2 == NULL) {
brvcc->copies_failed++;
return 0;
}
skb = skb2;
}
skb_push(skb, minheadroom);
if (brvcc->encaps == e_llc)
memcpy(skb->data, llc_oui_pid_pad, 10);
else
memset(skb->data, 0, 2);
#endif /* FASTER_VERSION */
skb_debug(skb);
ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
if (!atm_may_send(atmvcc, skb->truesize)) {
/* we free this here for now, because we cannot know in a higher
layer whether the skb point it supplied wasn't freed yet.
now, it always is.
*/
dev_kfree_skb(skb);
return 0;
}
atomic_add(skb->truesize, &atmvcc->sk->wmem_alloc);
ATM_SKB(skb)->atm_options = atmvcc->atm_options;
brdev->stats.tx_packets++;
brdev->stats.tx_bytes += skb->len;
atmvcc->send(atmvcc, skb);
return 1;
}
static inline struct br2684_vcc *pick_outgoing_vcc(struct sk_buff *skb,
struct br2684_dev *brdev)
{
return list_empty(&brdev->brvccs) ? NULL :
list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */
}
static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct br2684_dev *brdev = BRPRIV(dev);
struct br2684_vcc *brvcc;
DPRINTK("br2684_start_xmit, skb->dst=%p\n", skb->dst);
read_lock(&devs_lock);
brvcc = pick_outgoing_vcc(skb, brdev);
if (brvcc == NULL) {
DPRINTK("no vcc attached to dev %s\n", dev->name);
brdev->stats.tx_errors++;
brdev->stats.tx_carrier_errors++;
/* netif_stop_queue(dev); */
dev_kfree_skb(skb);
read_unlock(&devs_lock);
return -EUNATCH;
}
if (!br2684_xmit_vcc(skb, brdev, brvcc)) {
/*
* We should probably use netif_*_queue() here, but that
* involves added complication. We need to walk before
* we can run
*/
/* don't free here! this pointer might be no longer valid!
dev_kfree_skb(skb);
*/
brdev->stats.tx_errors++;
brdev->stats.tx_fifo_errors++;
}
read_unlock(&devs_lock);
return 0;
}
static struct net_device_stats *br2684_get_stats(struct net_device *dev)
{
DPRINTK("br2684_get_stats\n");
return &BRPRIV(dev)->stats;
}
#ifdef FASTER_VERSION
/*
* These mirror eth_header and eth_header_cache. They are not usually
* exported for use in modules, so we grab them from net_device
* after ether_setup() is done with it. Bit of a hack.
*/
static int (*my_eth_header)(struct sk_buff *, struct net_device *,
unsigned short, void *, void *, unsigned);
static int (*my_eth_header_cache)(struct neighbour *, struct hh_cache *);
static int
br2684_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, void *daddr, void *saddr, unsigned len)
{
u16 *pad_before_eth;
int t = my_eth_header(skb, dev, type, daddr, saddr, len);
if (t > 0) {
pad_before_eth = (u16 *) skb_push(skb, 2);
*pad_before_eth = 0;
return dev->hard_header_len; /* or return 16; ? */
} else
return t;
}
static int
br2684_header_cache(struct neighbour *neigh, struct hh_cache *hh)
{
/* hh_data is 16 bytes long. if encaps is ether-llc we need 24, so
xmit will add the additional header part in that case */
u16 *pad_before_eth = (u16 *)(hh->hh_data);
int t = my_eth_header_cache(neigh, hh);
DPRINTK("br2684_header_cache, neigh=%p, hh_cache=%p\n", neigh, hh);
if (t < 0)
return t;
else {
*pad_before_eth = 0;
hh->hh_len = PADLEN + ETH_HLEN;
}
return 0;
}
/*
* This is similar to eth_type_trans, which cannot be used because of
* our dev->hard_header_len
*/
static inline unsigned short br_type_trans(struct sk_buff *skb,
struct net_device *dev)
{
struct ethhdr *eth;
unsigned char *rawp;
eth = skb->mac.ethernet;
if (*eth->h_dest & 1) {
if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
}
else if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
skb->pkt_type = PACKET_OTHERHOST;
if (ntohs(eth->h_proto) >= 1536)
return eth->h_proto;
rawp = skb->data;
/*
* This is a magic hack to spot IPX packets. Older Novell breaks
* the protocol design and runs IPX over 802.3 without an 802.2 LLC
* layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
* won't work for fault tolerant netware but does for the rest.
*/
if (*(unsigned short *) rawp == 0xFFFF)
return htons(ETH_P_802_3);
/*
* Real 802.2 LLC
*/
return htons(ETH_P_802_2);
}
#endif /* FASTER_VERSION */
/*
* We remember when the MAC gets set, so we don't override it later with
* the ESI of the ATM card of the first VC
*/
static int (*my_eth_mac_addr)(struct net_device *, void *);
static int br2684_mac_addr(struct net_device *dev, void *p)
{
int err = my_eth_mac_addr(dev, p);
if (!err)
BRPRIV(dev)->mac_was_set = 1;
return err;
}
#ifdef CONFIG_ATM_BR2684_IPFILTER
/* this IOCTL is experimental. */
static int br2684_setfilt(struct atm_vcc *atmvcc, unsigned long arg)
{
struct br2684_vcc *brvcc;
struct br2684_filter_set fs;
if (copy_from_user(&fs, (void *) arg, sizeof fs))
return -EFAULT;
if (fs.ifspec.method != BR2684_FIND_BYNOTHING) {
/*
* This is really a per-vcc thing, but we can also search
* by device
*/
struct br2684_dev *brdev;
read_lock(&devs_lock);
brdev = br2684_find_dev(&fs.ifspec);
if (brdev == NULL || list_empty(&brdev->brvccs) ||
brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */
brvcc = NULL;
else
brvcc = list_entry_brvcc(brdev->brvccs.next);
read_unlock(&devs_lock);
if (brvcc == NULL)
return -ESRCH;
} else
brvcc = BR2684_VCC(atmvcc);
memcpy(&brvcc->filter, &fs.filter, sizeof(brvcc->filter));
return 0;
}
/* Returns 1 if packet should be dropped */
static inline int
packet_fails_filter(u16 type, struct br2684_vcc *brvcc, struct sk_buff *skb)
{
if (brvcc->filter.netmask == 0)
return 0; /* no filter in place */
if (type == __constant_htons(ETH_P_IP) &&
(((struct iphdr *) (skb->data))->daddr & brvcc->filter.
netmask) == brvcc->filter.prefix)
return 0;
if (type == __constant_htons(ETH_P_ARP))
return 0;
/* TODO: we should probably filter ARPs too.. don't want to have
* them returning values that don't make sense, or is that ok?
*/
return 1; /* drop */
}
#endif /* CONFIG_ATM_BR2684_IPFILTER */
static void br2684_close_vcc(struct br2684_vcc *brvcc)
{
DPRINTK("removing VCC %p from dev %p\n", brvcc, brvcc->brdev);
write_lock_irq(&devs_lock);
list_del(&brvcc->brvccs);
write_unlock_irq(&devs_lock);
brvcc->atmvcc->user_back = NULL; /* what about vcc->recvq ??? */
brvcc->old_push(brvcc->atmvcc, NULL); /* pass on the bad news */
kfree(brvcc);
MOD_DEC_USE_COUNT;
}
/* when AAL5 PDU comes in: */
static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct br2684_vcc *brvcc = BR2684_VCC(atmvcc);
struct br2684_dev *brdev = brvcc->brdev;
int plen = sizeof(llc_oui_pid_pad) + ETH_HLEN;
DPRINTK("br2684_push\n");
if (skb == NULL) { /* skb==NULL means VCC is being destroyed */
br2684_close_vcc(brvcc);
if (list_empty(&brdev->brvccs)) {
read_lock(&devs_lock);
list_del(&brdev->br2684_devs);
read_unlock(&devs_lock);
unregister_netdev(&brdev->net_dev);
kfree(brdev);
}
return;
}
skb_debug(skb);
atm_return(atmvcc, skb->truesize);
DPRINTK("skb from brdev %p\n", brdev);
if (brvcc->encaps == e_llc) {
/* let us waste some time for checking the encapsulation.
Note, that only 7 char is checked so frames with a valid FCS
are also accepted (but FCS is not checked of course) */
if (memcmp(skb->data, llc_oui_pid_pad, 7)) {
brdev->stats.rx_errors++;
dev_kfree_skb(skb);
return;
}
} else {
plen = PADLEN + ETH_HLEN; /* pad, dstmac,srcmac, ethtype */
/* first 2 chars should be 0 */
if (*((u16 *) (skb->data)) != 0) {
brdev->stats.rx_errors++;
dev_kfree_skb(skb);
return;
}
}
if (skb->len < plen) {
brdev->stats.rx_errors++;
dev_kfree_skb(skb); /* dev_ not needed? */
return;
}
#ifdef FASTER_VERSION
/* FIXME: tcpdump shows that pointer to mac header is 2 bytes earlier,
than should be. What else should I set? */
skb_pull(skb, plen);
skb->mac.raw = ((char *) (skb->data)) - ETH_HLEN;
skb->pkt_type = PACKET_HOST;
#ifdef CONFIG_BR2684_FAST_TRANS
skb->protocol = ((u16 *) skb->data)[-1];
#else /* some protocols might require this: */
skb->protocol = br_type_trans(skb, &brdev->net_dev);
#endif /* CONFIG_BR2684_FAST_TRANS */
#else
skb_pull(skb, plen - ETH_HLEN);
skb->protocol = eth_type_trans(skb, &brdev->net_dev);
#endif /* FASTER_VERSION */
#ifdef CONFIG_ATM_BR2684_IPFILTER
if (packet_fails_filter(skb->protocol, brvcc, skb)) {
brdev->stats.rx_dropped++;
dev_kfree_skb(skb);
return;
}
#endif /* CONFIG_ATM_BR2684_IPFILTER */
skb->dev = &brdev->net_dev;
ATM_SKB(skb)->vcc = atmvcc; /* needed ? */
DPRINTK("received packet's protocol: %x\n", ntohs(skb->protocol));
skb_debug(skb);
if (!(brdev->net_dev.flags & IFF_UP)) { /* sigh, interface is down */
brdev->stats.rx_dropped++;
dev_kfree_skb(skb);
return;
}
brdev->stats.rx_packets++;
brdev->stats.rx_bytes += skb->len;
netif_rx(skb);
}
static int br2684_regvcc(struct atm_vcc *atmvcc, unsigned long arg)
{
/* assign a vcc to a dev
Note: we do not have explicit unassign, but look at _push()
*/
int err;
struct br2684_vcc *brvcc;
struct sk_buff_head copy;
struct sk_buff *skb;
struct br2684_dev *brdev;
struct atm_backend_br2684 be;
MOD_INC_USE_COUNT;
if (copy_from_user(&be, (void *) arg, sizeof be)) {
MOD_DEC_USE_COUNT;
return -EFAULT;
}
write_lock_irq(&devs_lock);
brdev = br2684_find_dev(&be.ifspec);
if (brdev == NULL) {
printk(KERN_ERR
"br2684: tried to attach to non-existant device\n");
err = -ENXIO;
goto error;
}
if (atmvcc->push == NULL) {
err = -EBADFD;
goto error;
}
if (!list_empty(&brdev->brvccs)) { /* Only 1 VCC/dev right now */
err = -EEXIST;
goto error;
}
if (be.fcs_in != BR2684_FCSIN_NO || be.fcs_out != BR2684_FCSOUT_NO ||
be.fcs_auto || be.has_vpiid || be.send_padding || (be.encaps !=
BR2684_ENCAPS_VC && be.encaps != BR2684_ENCAPS_LLC) ||
be.min_size != 0) {
err = -EINVAL;
goto error;
}
brvcc = kmalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
if (!brvcc) {
err = -ENOMEM;
goto error;
}
memset(brvcc, 0, sizeof(struct br2684_vcc));
DPRINTK("br2684_regvcc vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps,
brvcc);
if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) {
unsigned char *esi = atmvcc->dev->esi;
if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5])
memcpy(brdev->net_dev.dev_addr, esi,
brdev->net_dev.addr_len);
else
brdev->net_dev.dev_addr[2] = 1;
}
list_add(&brvcc->brvccs, &brdev->brvccs);
write_unlock_irq(&devs_lock);
brvcc->brdev = brdev;
brvcc->atmvcc = atmvcc;
atmvcc->user_back = brvcc;
brvcc->encaps = (enum br2684_encaps) be.encaps;
brvcc->old_push = atmvcc->push;
barrier();
atmvcc->push = br2684_push;
skb_queue_head_init(&copy);
skb_migrate(&atmvcc->sk->receive_queue, &copy);
while ((skb = skb_dequeue(&copy))) {
BRPRIV(skb->dev)->stats.rx_bytes -= skb->len;
BRPRIV(skb->dev)->stats.rx_packets--;
br2684_push(atmvcc, skb);
}
return 0;
error:
write_unlock_irq(&devs_lock);
MOD_DEC_USE_COUNT;
return err;
}
static int br2684_create(unsigned long arg)
{
int err;
struct br2684_dev *brdev;
struct atm_newif_br2684 ni;
DPRINTK("br2684_create\n");
/*
* We track module use by vcc's NOT the devices they're on. We're
* protected here against module death by the kernel_lock, but if
* we need to sleep we should make sure that the module doesn't
* disappear under us.
*/
MOD_INC_USE_COUNT;
if (copy_from_user(&ni, (void *) arg, sizeof ni)) {
MOD_DEC_USE_COUNT;
return -EFAULT;
}
if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500) {
MOD_DEC_USE_COUNT;
return -EINVAL;
}
if ((brdev = kmalloc(sizeof(struct br2684_dev), GFP_KERNEL)) == NULL) {
MOD_DEC_USE_COUNT;
return -ENOMEM;
}
memset(brdev, 0, sizeof(struct br2684_dev));
INIT_LIST_HEAD(&brdev->brvccs);
write_lock_irq(&devs_lock);
brdev->number = list_empty(&br2684_devs) ? 1 :
list_entry_brdev(br2684_devs.prev)->number + 1;
list_add_tail(&brdev->br2684_devs, &br2684_devs);
write_unlock_irq(&devs_lock);
if (ni.ifname[0] != '\0') {
memcpy(brdev->net_dev.name, ni.ifname,
sizeof(brdev->net_dev.name));
brdev->net_dev.name[sizeof(brdev->net_dev.name) - 1] = '\0';
} else
sprintf(brdev->net_dev.name, "nas%d", brdev->number);
DPRINTK("registered netdev %s\n", brdev->net_dev.name);
ether_setup(&brdev->net_dev);
brdev->mac_was_set = 0;
#ifdef FASTER_VERSION
my_eth_header = brdev->net_dev.hard_header;
brdev->net_dev.hard_header = br2684_header;
my_eth_header_cache = brdev->net_dev.hard_header_cache;
brdev->net_dev.hard_header_cache = br2684_header_cache;
brdev->net_dev.hard_header_len = sizeof(llc_oui_pid_pad) + ETH_HLEN; /* 10 + 14 */
#endif
my_eth_mac_addr = brdev->net_dev.set_mac_address;
brdev->net_dev.set_mac_address = br2684_mac_addr;
brdev->net_dev.hard_start_xmit = br2684_start_xmit;
brdev->net_dev.get_stats = br2684_get_stats;
/* open, stop, do_ioctl ? */
err = register_netdev(&brdev->net_dev);
MOD_DEC_USE_COUNT;
if (err < 0) {
printk(KERN_ERR "br2684_create: register_netdev failed\n");
write_lock_irq(&devs_lock);
list_del(&brdev->br2684_devs);
write_unlock_irq(&devs_lock);
kfree(brdev);
return err;
}
return 0;
}
/*
* This handles ioctls actually performed on our vcc - we must return
* -ENOIOCTLCMD for any unrecognized ioctl
*/
static int br2684_ioctl(struct atm_vcc *atmvcc, unsigned int cmd,
unsigned long arg)
{
int err;
switch(cmd) {
case ATM_SETBACKEND:
case ATM_NEWBACKENDIF: {
atm_backend_t b;
MOD_INC_USE_COUNT;
err = get_user(b, (atm_backend_t *) arg);
MOD_DEC_USE_COUNT;
if (err)
return -EFAULT;
if (b != ATM_BACKEND_BR2684)
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd == ATM_SETBACKEND)
return br2684_regvcc(atmvcc, arg);
else
return br2684_create(arg);
}
#ifdef CONFIG_ATM_BR2684_IPFILTER
case BR2684_SETFILT:
if (atmvcc->push != br2684_push)
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
MOD_INC_USE_COUNT;
err = br2684_setfilt(atmvcc, arg);
MOD_DEC_USE_COUNT;
return err;
#endif /* CONFIG_ATM_BR2684_IPFILTER */
}
return -ENOIOCTLCMD;
}
/* Never put more than 256 bytes in at once */
static int br2684_proc_engine(loff_t pos, char *buf)
{
struct list_head *lhd, *lhc;
struct br2684_dev *brdev;
struct br2684_vcc *brvcc;
list_for_each(lhd, &br2684_devs) {
brdev = list_entry_brdev(lhd);
if (pos-- == 0)
return sprintf(buf, "dev %.16s: num=%d, mac=%02X:%02X:"
"%02X:%02X:%02X:%02X (%s)\n", brdev->net_dev.name,
brdev->number,
brdev->net_dev.dev_addr[0],
brdev->net_dev.dev_addr[1],
brdev->net_dev.dev_addr[2],
brdev->net_dev.dev_addr[3],
brdev->net_dev.dev_addr[4],
brdev->net_dev.dev_addr[5],
brdev->mac_was_set ? "set" : "auto");
list_for_each(lhc, &brdev->brvccs) {
brvcc = list_entry_brvcc(lhc);
if (pos-- == 0)
return sprintf(buf, " vcc %d.%d.%d: encaps=%s"
#ifndef FASTER_VERSION
", failed copies %u/%u"
#endif /* FASTER_VERSION */
"\n", brvcc->atmvcc->dev->number,
brvcc->atmvcc->vpi, brvcc->atmvcc->vci,
(brvcc->encaps == e_llc) ? "LLC" : "VC"
#ifndef FASTER_VERSION
, brvcc->copies_failed
, brvcc->copies_needed
#endif /* FASTER_VERSION */
);
#ifdef CONFIG_ATM_BR2684_IPFILTER
#define b1(var, byte) ((u8 *) &brvcc->filter.var)[byte]
#define bs(var) b1(var, 0), b1(var, 1), b1(var, 2), b1(var, 3)
if (brvcc->filter.netmask != 0 && pos-- == 0)
return sprintf(buf, " filter=%d.%d.%d.%d/"
"%d.%d.%d.%d\n", bs(prefix), bs(netmask));
#undef bs
#undef b1
#endif /* CONFIG_ATM_BR2684_IPFILTER */
}
}
return 0;
}
static ssize_t br2684_proc_read(struct file *file, char *buf, size_t count,
loff_t *pos)
{
unsigned long page;
int len = 0, x, left;
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
left = PAGE_SIZE - 256;
if (count < left)
left = count;
read_lock(&devs_lock);
for (;;) {
x = br2684_proc_engine(*pos, &((char *) page)[len]);
if (x == 0)
break;
if (x > left)
/*
* This should only happen if the user passed in
* a "count" too small for even one line
*/
x = -EINVAL;
if (x < 0) {
len = x;
break;
}
len += x;
left -= x;
(*pos)++;
if (left < 256)
break;
}
read_unlock(&devs_lock);
if (len > 0 && copy_to_user(buf, (char *) page, len))
len = -EFAULT;
free_page(page);
return len;
}
static struct file_operations br2684_proc_operations = {
read: br2684_proc_read,
};
extern struct proc_dir_entry *atm_proc_root; /* from proc.c */
extern int (*br2684_ioctl_hook)(struct atm_vcc *, unsigned int, unsigned long);
/* the following avoids some spurious warnings from the compiler */
#define UNUSED __attribute__((unused))
static int __init UNUSED br2684_init(void)
{
struct proc_dir_entry *p;
if ((p = create_proc_entry("br2684", 0, atm_proc_root)) == NULL)
return -ENOMEM;
p->proc_fops = &br2684_proc_operations;
br2684_ioctl_hook = br2684_ioctl;
return 0;
}
static void __exit UNUSED br2684_exit(void)
{
struct br2684_dev *brdev;
br2684_ioctl_hook = NULL;
remove_proc_entry("br2684", atm_proc_root);
while (!list_empty(&br2684_devs)) {
brdev = list_entry_brdev(br2684_devs.next);
unregister_netdev(&brdev->net_dev);
list_del(&brdev->br2684_devs);
kfree(brdev);
}
}
module_init(br2684_init);
module_exit(br2684_exit);
MODULE_AUTHOR("Marcell GAL");
MODULE_DESCRIPTION("RFC2684 bridged protocols over ATM/AAL5");
MODULE_LICENSE("GPL");
......@@ -127,6 +127,8 @@ static void idle_timer_check(unsigned long dummy)
struct atmarp_entry *entry = NEIGH2ENTRY(n);
struct clip_vcc *clip_vcc;
write_lock(&n->lock);
for (clip_vcc = entry->vccs; clip_vcc;
clip_vcc = clip_vcc->next)
if (clip_vcc->idle_timeout &&
......@@ -141,6 +143,7 @@ static void idle_timer_check(unsigned long dummy)
if (entry->vccs ||
time_before(jiffies, entry->expires)) {
np = &n->next;
write_unlock(&n->lock);
continue;
}
if (atomic_read(&n->refcnt) > 1) {
......@@ -152,11 +155,13 @@ static void idle_timer_check(unsigned long dummy)
NULL)
dev_kfree_skb(skb);
np = &n->next;
write_unlock(&n->lock);
continue;
}
*np = n->next;
DPRINTK("expired neigh %p\n",n);
n->dead = 1;
write_unlock(&n->lock);
neigh_release(n);
}
}
......
......@@ -62,6 +62,13 @@ int (*pppoatm_ioctl_hook)(struct atm_vcc *, unsigned int, unsigned long);
EXPORT_SYMBOL(pppoatm_ioctl_hook);
#endif
#if defined(CONFIG_ATM_BR2684) || defined(CONFIG_ATM_BR2684_MODULE)
int (*br2684_ioctl_hook)(struct atm_vcc *, unsigned int, unsigned long);
#ifdef CONFIG_ATM_BR2684_MODULE
EXPORT_SYMBOL(br2684_ioctl_hook);
#endif
#endif
#include "resources.h" /* atm_find_dev */
#include "common.h" /* prototypes */
#include "protocols.h" /* atm_init_<transport> */
......@@ -120,7 +127,6 @@ int atm_create(struct socket *sock,int protocol,int family)
vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */
vcc->atm_options = vcc->aal_options = 0;
init_waitqueue_head(&vcc->sleep);
skb_queue_head_init(&vcc->listenq);
sk->sleep = &vcc->sleep;
sock->sk = sk;
return 0;
......@@ -489,7 +495,7 @@ unsigned int atm_poll(struct file *file,struct socket *sock,poll_table *wait)
vcc = ATM_SD(sock);
poll_wait(file,&vcc->sleep,wait);
mask = 0;
if (skb_peek(&vcc->sk->receive_queue) || skb_peek(&vcc->listenq))
if (skb_peek(&vcc->sk->receive_queue))
mask |= POLLIN | POLLRDNORM;
if (test_bit(ATM_VF_RELEASED,&vcc->flags) ||
test_bit(ATM_VF_CLOSE,&vcc->flags))
......@@ -784,6 +790,13 @@ int atm_ioctl(struct socket *sock,unsigned int cmd,unsigned long arg)
if (ret_val != -ENOIOCTLCMD)
goto done;
}
#endif
#if defined(CONFIG_ATM_BR2684) || defined(CONFIG_ATM_BR2684_MODULE)
if (br2684_ioctl_hook) {
ret_val = br2684_ioctl_hook(vcc, cmd, arg);
if (ret_val != -ENOIOCTLCMD)
goto done;
}
#endif
if (get_user(buf,&((struct atmif_sioc *) arg)->arg)) {
ret_val = -EFAULT;
......
......@@ -302,7 +302,7 @@ lec_send_packet(struct sk_buff *skb, struct net_device *dev)
#endif
min_frame_size = LEC_MINIMUM_8023_SIZE;
if (skb->len < min_frame_size) {
if (skb->truesize < min_frame_size) {
if ((skb->len + skb_tailroom(skb)) < min_frame_size) {
skb2 = skb_copy_expand(skb, 0,
min_frame_size - skb->truesize, GFP_ATOMIC);
dev_kfree_skb(skb);
......
......@@ -129,12 +129,12 @@ static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
case as_indicate:
vcc = *(struct atm_vcc **) &msg->listen_vcc;
DPRINTK("as_indicate!!!\n");
if (!vcc->backlog_quota) {
if (vcc->sk->ack_backlog == vcc->sk->max_ack_backlog) {
sigd_enq(0,as_reject,vcc,NULL,NULL);
return 0;
}
vcc->backlog_quota--;
skb_queue_tail(&vcc->listenq,skb);
vcc->sk->ack_backlog++;
skb_queue_tail(&vcc->sk->receive_queue,skb);
if (vcc->callback) {
DPRINTK("waking vcc->sleep 0x%p\n",
&vcc->sleep);
......
......@@ -74,7 +74,7 @@ static void svc_disconnect(struct atm_vcc *vcc)
}
/* beware - socket is still in use by atmsigd until the last
as_indicate has been answered */
while ((skb = skb_dequeue(&vcc->listenq))) {
while ((skb = skb_dequeue(&vcc->sk->receive_queue))) {
DPRINTK("LISTEN REL\n");
sigd_enq2(NULL,as_reject,vcc,NULL,NULL,&vcc->qos,0);
dev_kfree_skb(skb);
......@@ -253,7 +253,7 @@ static int svc_listen(struct socket *sock,int backlog)
remove_wait_queue(&vcc->sleep,&wait);
if (!sigd) return -EUNATCH;
set_bit(ATM_VF_LISTEN,&vcc->flags);
vcc->backlog_quota = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
vcc->sk->max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
return vcc->reply;
}
......@@ -277,7 +277,7 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
DECLARE_WAITQUEUE(wait,current);
add_wait_queue(&old_vcc->sleep,&wait);
while (!(skb = skb_dequeue(&old_vcc->listenq)) && sigd) {
while (!(skb = skb_dequeue(&old_vcc->sk->receive_queue)) && sigd) {
if (test_bit(ATM_VF_RELEASED,&old_vcc->flags)) break;
if (test_bit(ATM_VF_CLOSE,&old_vcc->flags)) {
error = old_vcc->reply;
......@@ -306,7 +306,7 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
error = atm_connect(newsock,msg->pvc.sap_addr.itf,
msg->pvc.sap_addr.vpi,msg->pvc.sap_addr.vci);
dev_kfree_skb(skb);
old_vcc->backlog_quota++;
old_vcc->sk->ack_backlog--;
if (error) {
sigd_enq2(NULL,as_reject,old_vcc,NULL,NULL,
&old_vcc->qos,error);
......
......@@ -143,6 +143,9 @@ int br_handle_frame(struct sk_buff *skb)
return -1;
}
if (!memcmp(p->br->dev.dev_addr, dest, ETH_ALEN))
skb->pkt_type = PACKET_HOST;
NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish);
rcu_read_unlock();
......
......@@ -126,9 +126,6 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
struct sk_buff *skb;
u8 *data;
if (gfp_mask & __GFP_WAIT)
might_sleep();
/* Get the HEAD */
skb = kmem_cache_alloc(skbuff_head_cache,
gfp_mask & ~__GFP_DMA);
......
......@@ -926,6 +926,7 @@ struct proto_ops inet_dgram_ops = {
struct net_proto_family inet_family_ops = {
.family = PF_INET,
.create = inet_create,
.owner = THIS_MODULE,
};
......
......@@ -115,9 +115,9 @@ struct net_device * ip_dev_find(u32 addr)
if (res.type != RTN_LOCAL)
goto out;
dev = FIB_RES_DEV(res);
if (dev)
atomic_inc(&dev->refcnt);
if (dev)
dev_hold(dev);
out:
fib_res_put(&res);
return dev;
......
......@@ -406,7 +406,7 @@ static int fib_check_nh(const struct rtmsg *r, struct fib_info *fi, struct fib_n
if (!(dev->flags&IFF_UP))
return -ENETDOWN;
nh->nh_dev = dev;
atomic_inc(&dev->refcnt);
dev_hold(dev);
nh->nh_scope = RT_SCOPE_LINK;
return 0;
}
......@@ -429,7 +429,7 @@ static int fib_check_nh(const struct rtmsg *r, struct fib_info *fi, struct fib_n
nh->nh_oif = FIB_RES_OIF(res);
if ((nh->nh_dev = FIB_RES_DEV(res)) == NULL)
goto out;
atomic_inc(&nh->nh_dev->refcnt);
dev_hold(nh->nh_dev);
err = -ENETDOWN;
if (!(nh->nh_dev->flags & IFF_UP))
goto out;
......@@ -451,7 +451,7 @@ static int fib_check_nh(const struct rtmsg *r, struct fib_info *fi, struct fib_n
return -ENETDOWN;
}
nh->nh_dev = in_dev->dev;
atomic_inc(&nh->nh_dev->refcnt);
dev_hold(nh->nh_dev);
nh->nh_scope = RT_SCOPE_HOST;
in_dev_put(in_dev);
}
......
......@@ -444,6 +444,8 @@ static void SMP_TIMER_NAME(tcp_twkill)(unsigned long dummy)
while((tw = tcp_tw_death_row[tcp_tw_death_row_slot]) != NULL) {
tcp_tw_death_row[tcp_tw_death_row_slot] = tw->next_death;
if (tw->next_death)
tw->next_death->pprev_death = tw->pprev_death;
tw->pprev_death = NULL;
spin_unlock(&tw_death_lock);
......
......@@ -535,6 +535,7 @@ struct proto_ops inet6_dgram_ops = {
struct net_proto_family inet6_family_ops = {
.family = PF_INET6,
.create = inet6_create,
.owner = THIS_MODULE,
};
#ifdef MODULE
......
......@@ -593,7 +593,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nlmsghdr *nlh)
#ifdef CONFIG_IPV6_SUBTREES
/* Subtree creation failed, probably main tree node
is orphan. If it is, shot it.
is orphan. If it is, shoot it.
*/
st_failure:
if (fn && !(fn->fn_flags&RTN_RTINFO|RTN_ROOT))
......@@ -953,7 +953,7 @@ int fib6_del(struct rt6_info *rt, struct nlmsghdr *nlh)
#if RT6_DEBUG >= 2
if (rt->u.dst.obsolete>0) {
BUG_TRAP(fn==NULL || rt->u.dst.obsolete<=0);
BUG_TRAP(fn==NULL);
return -ENOENT;
}
#endif
......@@ -979,7 +979,7 @@ int fib6_del(struct rt6_info *rt, struct nlmsghdr *nlh)
}
/*
* Tree transversal function.
* Tree traversal function.
*
* Certainly, it is not interrupt safe.
* However, it is internally reenterable wrt itself and fib6_add/fib6_del.
......@@ -1179,14 +1179,14 @@ static int fib6_age(struct rt6_info *rt, void *arg)
*/
if (rt->rt6i_flags&RTF_EXPIRES && rt->rt6i_expires) {
if ((long)(now - rt->rt6i_expires) > 0) {
if (time_after(now, rt->rt6i_expires)) {
RT6_TRACE("expiring %p\n", rt);
return -1;
}
gc_args.more++;
} else if (rt->rt6i_flags & RTF_CACHE) {
if (atomic_read(&rt->u.dst.__refcnt) == 0 &&
(long)(now - rt->u.dst.lastuse) >= gc_args.timeout) {
time_after_eq(now, rt->u.dst.lastuse + gc_args.timeout)) {
RT6_TRACE("aging clone %p\n", rt);
return -1;
}
......
......@@ -39,6 +39,7 @@
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#endif
#include <net/snmp.h>
......@@ -564,7 +565,7 @@ static int ip6_dst_gc()
static unsigned long last_gc;
unsigned long now = jiffies;
if ((long)(now - last_gc) < ip6_rt_gc_min_interval &&
if (time_after(last_gc + ip6_rt_gc_min_interval, now) &&
atomic_read(&ip6_dst_ops.entries) <= ip6_rt_max_size)
goto out;
......@@ -1751,27 +1752,28 @@ static int rt6_proc_info(char *buffer, char **start, off_t offset, int length)
extern struct rt6_statistics rt6_stats;
static int rt6_proc_stats(char *buffer, char **start, off_t offset, int length)
static int rt6_stats_seq_show(struct seq_file *seq, void *v)
{
int len;
len = sprintf(buffer, "%04x %04x %04x %04x %04x %04x\n",
seq_printf(seq, "%04x %04x %04x %04x %04x %04x\n",
rt6_stats.fib_nodes, rt6_stats.fib_route_nodes,
rt6_stats.fib_rt_alloc, rt6_stats.fib_rt_entries,
rt6_stats.fib_rt_cache,
atomic_read(&ip6_dst_ops.entries));
len -= offset;
if (len > length)
len = length;
if(len < 0)
len = 0;
*start = buffer + offset;
return 0;
}
return len;
static int rt6_stats_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, rt6_stats_seq_show, NULL);
}
static struct file_operations rt6_stats_seq_fops = {
.open = rt6_stats_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SYSCTL
......@@ -1877,6 +1879,8 @@ ctl_table ipv6_route_table[] = {
void __init ip6_route_init(void)
{
struct proc_dir_entry *p;
ip6_dst_ops.kmem_cachep = kmem_cache_create("ip6_dst_cache",
sizeof(struct rt6_info),
0, SLAB_HWCACHE_ALIGN,
......@@ -1884,7 +1888,9 @@ void __init ip6_route_init(void)
fib6_init();
#ifdef CONFIG_PROC_FS
proc_net_create("ipv6_route", 0, rt6_proc_info);
proc_net_create("rt6_stats", 0, rt6_proc_stats);
p = create_proc_entry("rt6_stats", S_IRUGO, proc_net);
if (p)
p->proc_fops = &rt6_stats_seq_fops;
#endif
xfrm6_init();
}
......@@ -1894,7 +1900,7 @@ void ip6_route_cleanup(void)
{
#ifdef CONFIG_PROC_FS
proc_net_remove("ipv6_route");
proc_net_remove("rt6_stats");
remove_proc_entry("rt6_stats", proc_net);
#endif
xfrm6_fini();
rt6_ifdown(NULL);
......
......@@ -1280,26 +1280,26 @@ asmlinkage long sys_accept(int fd, struct sockaddr *upeer_sockaddr, int *upeer_a
* We don't need try_module_get here, as the listening socket (sock)
* has the protocol module (sock->ops->owner) held.
*/
__module_get(sock->ops->owner);
__module_get(newsock->ops->owner);
err = sock->ops->accept(sock, newsock, sock->file->f_flags);
if (err < 0)
goto out_module_put;
goto out_release;
if (upeer_sockaddr) {
if(newsock->ops->getname(newsock, (struct sockaddr *)address, &len, 2)<0) {
err = -ECONNABORTED;
goto out_module_put;
goto out_release;
}
err = move_addr_to_user(address, len, upeer_sockaddr, upeer_addrlen);
if (err < 0)
goto out_module_put;
goto out_release;
}
/* File flags are not inherited via accept() unlike another OSes. */
if ((err = sock_map_fd(newsock)) < 0)
goto out_module_put;
goto out_release;
security_socket_post_accept(sock, newsock);
......@@ -1307,8 +1307,6 @@ asmlinkage long sys_accept(int fd, struct sockaddr *upeer_sockaddr, int *upeer_a
sockfd_put(sock);
out:
return err;
out_module_put:
module_put(sock->ops->owner);
out_release:
sock_release(newsock);
goto out_put;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment