Commit e6cc458d authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.4

parent 34cc16a7
......@@ -62,6 +62,7 @@ CONFIG_SUN_OPENPROMIO=m
CONFIG_SUN_MOSTEK_RTC=y
# CONFIG_SUN_BPP is not set
# CONFIG_SUN_VIDEOPIX is not set
CONFIG_SUN_AURORA=m
#
# Linux/SPARC audio subsystem (EXPERIMENTAL)
......
......@@ -68,6 +68,7 @@ CONFIG_SAB82532=y
CONFIG_OBP_FLASH=m
# CONFIG_SUN_BPP is not set
# CONFIG_SUN_VIDEOPIX is not set
CONFIG_SUN_AURORA=m
#
# Linux/SPARC audio subsystem (EXPERIMENTAL)
......
......@@ -11,4 +11,5 @@ fi
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
tristate 'Bidirectional parallel port support (EXPERIMENTAL)' CONFIG_SUN_BPP
tristate 'Videopix Frame Grabber (EXPERIMENTAL)' CONFIG_SUN_VIDEOPIX
tristate 'Aurora Multiboard 1600se (EXPERIMENTAL)' CONFIG_SUN_AURORA
fi
......@@ -87,6 +87,14 @@ else
endif
endif
ifeq ($(CONFIG_SUN_AURORA),y)
O_OBJS += aurora.o
else
ifeq ($(CONFIG_SUN_AURORA),m)
M_OBJS += aurora.o
endif
endif
include $(TOPDIR)/Rules.make
sunkbdmap.o: sunkeymap.c
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/* $Id: sbus.c,v 1.76 1998/12/17 11:11:26 davem Exp $
/* $Id: sbus.c,v 1.77 1999/05/29 06:25:57 davem Exp $
* sbus.c: SBus support routines.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -207,6 +207,9 @@ extern void auxio_probe(void);
#ifdef CONFIG_OBP_FLASH
extern int flash_init(void);
#endif
#ifdef CONFIG_SUN_AURORA
extern int aurora_init(void);
#endif
__initfunc(static void
sbus_do_child_siblings(int start_node, struct linux_sbus_device *child,
......@@ -441,6 +444,9 @@ __initfunc(void sbus_init(void))
#ifdef CONFIG_OBP_FLASH
flash_init();
#endif
#ifdef CONFIG_SUN_AURORA
aurora_init();
#endif
#ifdef __sparc_v9__
if (sparc_cpu_model == sun4u) {
extern void clock_probe(void);
......
......@@ -16,6 +16,9 @@ extern unsigned int local_bh_count;
#define local_bh_count (cpu_data[smp_processor_id()].bh_count)
#endif
#define local_bh_disable() (local_bh_count++)
#define local_bh_enable() (local_bh_count--)
/* The locking mechanism for base handlers, to prevent re-entrancy,
* is entirely private to an implementation, it should not be
* referenced at all outside of this file.
......
......@@ -35,6 +35,7 @@ enum {
SERIAL_BH,
RISCOM8_BH,
SPECIALIX_BH,
AURORA_BH,
ESP_BH,
NET_BH,
SCSI_BH,
......
......@@ -98,6 +98,8 @@
#define IDE6_MAJOR 88
#define IDE7_MAJOR 89
#define AURORA_MAJOR 79
#define UNIX98_PTY_MASTER_MAJOR 128
#define UNIX98_PTY_MAJOR_COUNT 8
#define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT)
......
......@@ -32,15 +32,19 @@
#define CHECKSUM_UNNECESSARY 2
struct sk_buff_head {
/* These two members must be first. */
struct sk_buff * next;
struct sk_buff * prev;
__u32 qlen; /* Must be same length as a pointer
for using debugging */
__u32 qlen;
spinlock_t lock;
};
struct sk_buff {
/* These two members must be first. */
struct sk_buff * next; /* Next buffer in list */
struct sk_buff * prev; /* Previous buffer in list */
struct sk_buff_head * list; /* List we are on */
struct sock *sk; /* Socket we are owned by */
struct timeval stamp; /* Time we arrived */
......@@ -247,6 +251,7 @@ extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
{
spin_lock_init(&list->lock);
list->prev = (struct sk_buff *)list;
list->next = (struct sk_buff *)list;
list->qlen = 0;
......@@ -273,15 +278,13 @@ extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buf
prev->next = newsk;
}
extern spinlock_t skb_queue_lock;
extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
unsigned long flags;
spin_lock_irqsave(&skb_queue_lock, flags);
spin_lock_irqsave(&list->lock, flags);
__skb_queue_head(list, newsk);
spin_unlock_irqrestore(&skb_queue_lock, flags);
spin_unlock_irqrestore(&list->lock, flags);
}
/*
......@@ -306,9 +309,9 @@ extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff
{
unsigned long flags;
spin_lock_irqsave(&skb_queue_lock, flags);
spin_lock_irqsave(&list->lock, flags);
__skb_queue_tail(list, newsk);
spin_unlock_irqrestore(&skb_queue_lock, flags);
spin_unlock_irqrestore(&list->lock, flags);
}
/*
......@@ -340,9 +343,9 @@ extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
long flags;
struct sk_buff *result;
spin_lock_irqsave(&skb_queue_lock, flags);
spin_lock_irqsave(&list->lock, flags);
result = __skb_dequeue(list);
spin_unlock_irqrestore(&skb_queue_lock, flags);
spin_unlock_irqrestore(&list->lock, flags);
return result;
}
......@@ -369,9 +372,9 @@ extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
{
unsigned long flags;
spin_lock_irqsave(&skb_queue_lock, flags);
spin_lock_irqsave(&old->list->lock, flags);
__skb_insert(newsk, old->prev, old, old->list);
spin_unlock_irqrestore(&skb_queue_lock, flags);
spin_unlock_irqrestore(&old->list->lock, flags);
}
/*
......@@ -387,9 +390,9 @@ extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
{
unsigned long flags;
spin_lock_irqsave(&skb_queue_lock, flags);
spin_lock_irqsave(&old->list->lock, flags);
__skb_append(old, newsk);
spin_unlock_irqrestore(&skb_queue_lock, flags);
spin_unlock_irqrestore(&old->list->lock, flags);
}
/*
......@@ -419,12 +422,16 @@ extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *li
extern __inline__ void skb_unlink(struct sk_buff *skb)
{
unsigned long flags;
struct sk_buff_head *list = skb->list;
if(list) {
unsigned long flags;
spin_lock_irqsave(&skb_queue_lock, flags);
if(skb->list)
__skb_unlink(skb, skb->list);
spin_unlock_irqrestore(&skb_queue_lock, flags);
spin_lock_irqsave(&list->lock, flags);
if(skb->list == list)
__skb_unlink(skb, skb->list);
spin_unlock_irqrestore(&list->lock, flags);
}
}
/* XXX: more streamlined implementation */
......@@ -441,9 +448,9 @@ extern __inline__ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
long flags;
struct sk_buff *result;
spin_lock_irqsave(&skb_queue_lock, flags);
spin_lock_irqsave(&list->lock, flags);
result = __skb_dequeue_tail(list);
spin_unlock_irqrestore(&skb_queue_lock, flags);
spin_unlock_irqrestore(&list->lock, flags);
return result;
}
......
......@@ -83,7 +83,6 @@ extern int ip_mc_procinfo(char *, char **, off_t, int, int);
* Functions provided by ip.c
*/
extern int ip_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern void ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
u32 saddr, u32 daddr,
struct ip_options *opt);
......
......@@ -151,15 +151,15 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
is reentearble (it is not) or this function
is called by interrupts.
Protect it with global skb spinlock,
Protect it with skb queue spinlock,
though for now even this is overkill.
--ANK (980728)
*/
spin_lock_irqsave(&skb_queue_lock, cpu_flags);
spin_lock_irqsave(&sk->receive_queue.lock, cpu_flags);
skb = skb_peek(&sk->receive_queue);
if(skb!=NULL)
atomic_inc(&skb->users);
spin_unlock_irqrestore(&skb_queue_lock, cpu_flags);
spin_unlock_irqrestore(&sk->receive_queue.lock, cpu_flags);
} else
skb = skb_dequeue(&sk->receive_queue);
......
......@@ -753,9 +753,9 @@ static void dev_clear_backlog(struct device *dev)
curr=curr->next;
if ( curr->prev->dev == dev ) {
prev = curr->prev;
spin_lock_irqsave(&skb_queue_lock, flags);
spin_lock_irqsave(&backlog.lock, flags);
__skb_unlink(prev, &backlog);
spin_unlock_irqrestore(&skb_queue_lock, flags);
spin_unlock_irqrestore(&backlog.lock, flags);
kfree_skb(prev);
}
}
......
......@@ -4,7 +4,7 @@
* Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
* Florian La Roche <rzsfl@rz.uni-sb.de>
*
* Version: $Id: skbuff.c,v 1.55 1999/02/23 08:12:27 davem Exp $
* Version: $Id: skbuff.c,v 1.56 1999/05/29 23:20:42 davem Exp $
*
* Fixes:
* Alan Cox : Fixed the worst of the load balancer bugs.
......@@ -61,11 +61,6 @@
#include <asm/uaccess.h>
#include <asm/system.h>
/*
* Skb list spinlock
*/
spinlock_t skb_queue_lock = SPIN_LOCK_UNLOCKED;
/*
* Resource tracking variables
*/
......
......@@ -5,7 +5,7 @@
*
* PF_INET protocol family socket handler.
*
* Version: $Id: af_inet.c,v 1.89 1999/05/27 00:37:42 davem Exp $
* Version: $Id: af_inet.c,v 1.90 1999/05/29 04:30:38 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
......@@ -147,14 +147,8 @@ static __inline__ void kill_sk_queues(struct sock *sk)
struct sk_buff *skb;
/* First the read buffer. */
while((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
/* This will take care of closing sockets that were
* listening and didn't accept everything.
*/
if (skb->sk != NULL && skb->sk != sk)
skb->sk->prot->close(skb->sk, 0);
while((skb = skb_dequeue(&sk->receive_queue)) != NULL)
kfree_skb(skb);
}
/* Next, the error queue. */
while((skb = skb_dequeue(&sk->error_queue)) != NULL)
......
......@@ -3,7 +3,7 @@
*
* Alan Cox, <alan@cymru.net>
*
* Version: $Id: icmp.c,v 1.53 1999/05/12 11:24:32 davem Exp $
* Version: $Id: icmp.c,v 1.54 1999/05/30 01:16:22 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
......@@ -1152,6 +1152,11 @@ __initfunc(void icmp_init(struct net_proto_family *ops))
if ((err=ops->create(icmp_socket, IPPROTO_ICMP))<0)
panic("Failed to create the ICMP control socket.\n");
icmp_socket->sk->allocation=GFP_ATOMIC;
icmp_socket->sk->num = 256; /* Don't receive any data */
icmp_socket->sk->ip_ttl = MAXTTL;
/* Unhash it so that IP input processing does not even
* see it, we do not wish this socket to see incoming
* packets.
*/
icmp_socket->sk->prot->unhash(icmp_socket->sk);
}
......@@ -5,7 +5,7 @@
*
* The Internet Protocol (IP) module.
*
* Version: $Id: ip_input.c,v 1.37 1999/04/22 10:38:36 davem Exp $
* Version: $Id: ip_input.c,v 1.39 1999/05/30 01:16:25 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
......@@ -154,43 +154,10 @@
struct ip_mib ip_statistics={2,IPDEFTTL,}; /* Forwarding=No, Default TTL=64 */
/*
* Handle the issuing of an ioctl() request
* for the ip device. This is scheduled to
* disappear
*/
int ip_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch(cmd)
{
default:
return(-EINVAL);
}
}
#if defined(CONFIG_IP_TRANSPARENT_PROXY) && !defined(CONFIG_IP_ALWAYS_DEFRAG)
#define CONFIG_IP_ALWAYS_DEFRAG 1
#endif
/*
* 0 - deliver
* 1 - block
*/
static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
{
int type;
type = skb->h.icmph->type;
if (type < 32)
return test_bit(type, &sk->tp_pinfo.tp_raw4.filter);
/* Do not block unknown ICMP types */
return 0;
}
/*
* Process Router Attention IP option
*/
......@@ -224,16 +191,37 @@ int ip_call_ra_chain(struct sk_buff *skb)
return 0;
}
/* Handle this out of line, it is rare. */
static int ip_run_ipprot(struct sk_buff *skb, struct iphdr *iph,
struct inet_protocol *ipprot, int force_copy)
{
int ret = 0;
do {
if (ipprot->protocol == iph->protocol) {
struct sk_buff *skb2 = skb;
if (ipprot->copy || force_copy)
skb2 = skb_clone(skb, GFP_ATOMIC);
if(skb2 != NULL) {
ret = 1;
ipprot->handler(skb2,
ntohs(iph->tot_len) - (iph->ihl * 4));
}
}
ipprot = (struct inet_protocol *) ipprot->next;
} while(ipprot != NULL);
return ret;
}
extern struct sock *raw_v4_input(struct sk_buff *, struct iphdr *, int);
/*
* Deliver IP Packets to the higher protocol layers.
*/
int ip_local_deliver(struct sk_buff *skb)
{
struct iphdr *iph = skb->nh.iph;
struct inet_protocol *ipprot;
struct sock *raw_sk=NULL;
unsigned char hash;
int flag = 0;
#ifndef CONFIG_IP_ALWAYS_DEFRAG
/*
......@@ -249,34 +237,29 @@ int ip_local_deliver(struct sk_buff *skb)
#endif
#ifdef CONFIG_IP_MASQUERADE
/*
* Do we need to de-masquerade this packet?
*/
{
int ret;
/*
* Some masq modules can re-inject packets if
* bad configured.
/* Do we need to de-masquerade this packet? */
if((IPCB(skb)->flags&IPSKB_MASQUERADED)) {
/* Some masq modules can re-inject packets if
* bad configured.
*/
printk(KERN_DEBUG "ip_input(): demasq recursion detected. "
"Check masq modules configuration\n");
kfree_skb(skb);
return 0;
} else {
int ret = ip_fw_demasquerade(&skb);
if((IPCB(skb)->flags&IPSKB_MASQUERADED)) {
printk(KERN_DEBUG "ip_input(): demasq recursion detected. Check masq modules configuration\n");
kfree_skb(skb);
return 0;
}
ret = ip_fw_demasquerade(&skb);
if (ret < 0) {
kfree_skb(skb);
return 0;
}
if (ret) {
iph=skb->nh.iph;
iph = skb->nh.iph;
IPCB(skb)->flags |= IPSKB_MASQUERADED;
dst_release(skb->dst);
skb->dst = NULL;
if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, skb->dev)) {
if (ip_route_input(skb, iph->daddr, iph->saddr,
iph->tos, skb->dev)) {
kfree_skb(skb);
return 0;
}
......@@ -285,112 +268,50 @@ int ip_local_deliver(struct sk_buff *skb)
}
#endif
/*
* Point into the IP datagram, just past the header.
*/
/* Point into the IP datagram, just past the header. */
skb->h.raw = skb->nh.raw + iph->ihl*4;
/*
* Deliver to raw sockets. This is fun as to avoid copies we want to make no
* surplus copies.
*
* RFC 1122: SHOULD pass TOS value up to the transport layer.
* -> It does. And not only TOS, but all IP header.
*/
/* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
hash = iph->protocol & (MAX_INET_PROTOS - 1);
/*
* If there maybe a raw socket we must check - if not we don't care less
*/
if((raw_sk = raw_v4_htable[hash]) != NULL) {
struct sock *sknext = NULL;
struct sk_buff *skb1;
raw_sk = raw_v4_lookup(raw_sk, iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex);
if(raw_sk) { /* Any raw sockets */
do {
/* Find the next */
sknext = raw_v4_lookup(raw_sk->next, iph->protocol,
iph->saddr, iph->daddr, skb->dev->ifindex);
if (iph->protocol != IPPROTO_ICMP || !icmp_filter(raw_sk, skb)) {
if (sknext == NULL)
break;
skb1 = skb_clone(skb, GFP_ATOMIC);
if(skb1)
{
raw_rcv(raw_sk, skb1);
}
}
raw_sk = sknext;
} while(raw_sk!=NULL);
/* Here either raw_sk is the last raw socket, or NULL if
* none. We deliver to the last raw socket AFTER the
* protocol checks as it avoids a surplus copy.
*/
}
}
/*
* skb->h.raw now points at the protocol beyond the IP header.
*/
for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
{
struct sk_buff *skb2;
if (ipprot->protocol != iph->protocol)
continue;
/*
* See if we need to make a copy of it. This will
* only be set if more than one protocol wants it.
* and then not for the last one. If there is a pending
* raw delivery wait for that
/* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
int hash = iph->protocol & (MAX_INET_PROTOS - 1);
struct sock *raw_sk = raw_v4_htable[hash];
struct inet_protocol *ipprot;
int flag;
/* If there maybe a raw socket we must check - if not we
* don't care less
*/
if (ipprot->copy || raw_sk)
{
skb2 = skb_clone(skb, GFP_ATOMIC);
if(skb2==NULL)
continue;
}
else
{
skb2 = skb;
}
flag = 1;
if(raw_sk != NULL)
raw_sk = raw_v4_input(skb, iph, hash);
ipprot = (struct inet_protocol *) inet_protos[hash];
flag = 0;
if(ipprot != NULL) {
if(raw_sk == NULL &&
ipprot->next == NULL &&
ipprot->protocol == iph->protocol) {
/* Fast path... */
return ipprot->handler(skb, (ntohs(iph->tot_len) -
(iph->ihl * 4)));
} else {
flag = ip_run_ipprot(skb, iph, ipprot, (raw_sk != NULL));
}
}
/*
* Pass on the datagram to each protocol that wants it,
* based on the datagram protocol. We should really
* check the protocol handler's return values here...
/* All protocols checked.
* If this packet was a broadcast, we may *not* reply to it, since that
* causes (proven, grin) ARP storms and a leakage of memory (i.e. all
* ICMP reply messages get queued up for transmission...)
*/
ipprot->handler(skb2, ntohs(iph->tot_len) - (iph->ihl * 4));
}
/*
* All protocols checked.
* If this packet was a broadcast, we may *not* reply to it, since that
* causes (proven, grin) ARP storms and a leakage of memory (i.e. all
* ICMP reply messages get queued up for transmission...)
*/
if(raw_sk!=NULL) /* Shift to last raw user */
{
raw_rcv(raw_sk, skb);
}
else if (!flag) /* Free and report errors */
{
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0);
kfree_skb(skb);
if(raw_sk != NULL) { /* Shift to last raw user */
raw_rcv(raw_sk, skb);
} else if (!flag) { /* Free and report errors */
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0);
kfree_skb(skb);
}
}
return(0);
return 0;
}
/*
......@@ -404,9 +325,8 @@ int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
u16 rport;
#endif /* CONFIG_FIREWALL */
/*
* When the interface is in promisc. mode, drop all the crap
* that it receives, do not try to analyse it.
/* When the interface is in promisc. mode, drop all the crap
* that it receives, do not try to analyse it.
*/
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
......@@ -430,17 +350,15 @@ int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
goto inhdr_error;
{
__u32 len = ntohs(iph->tot_len);
if (skb->len < len)
goto inhdr_error;
__u32 len = ntohs(iph->tot_len);
if (skb->len < len)
goto inhdr_error;
/*
* Our transport medium may have padded the buffer out. Now we know it
* is IP we can trim to the true length of the frame.
* Note this now means skb->len holds ntohs(iph->tot_len).
*/
__skb_trim(skb, len);
/* Our transport medium may have padded the buffer out. Now we know it
* is IP we can trim to the true length of the frame.
* Note this now means skb->len holds ntohs(iph->tot_len).
*/
__skb_trim(skb, len);
}
#ifdef CONFIG_IP_ALWAYS_DEFRAG
......
......@@ -5,7 +5,7 @@
*
* RAW - implementation of IP "raw" sockets.
*
* Version: $Id: raw.c,v 1.40 1999/05/27 00:37:48 davem Exp $
* Version: $Id: raw.c,v 1.41 1999/05/30 01:16:19 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
......@@ -124,13 +124,12 @@ static void raw_v4_rehash(struct sock *sk)
SOCKHASH_UNLOCK_WRITE();
}
/* Grumble... icmp and ip_input want to get at this... */
struct sock *raw_v4_lookup(struct sock *sk, unsigned short num,
unsigned long raddr, unsigned long laddr, int dif)
static __inline__ struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num,
unsigned long raddr, unsigned long laddr,
int dif)
{
struct sock *s = sk;
SOCKHASH_LOCK_READ();
for(s = sk; s; s = s->next) {
if((s->num == num) &&
!(s->dead && (s->state == TCP_CLOSE)) &&
......@@ -139,10 +138,79 @@ struct sock *raw_v4_lookup(struct sock *sk, unsigned short num,
!(s->bound_dev_if && s->bound_dev_if != dif))
break; /* gotcha */
}
SOCKHASH_UNLOCK_READ();
return s;
}
struct sock *raw_v4_lookup(struct sock *sk, unsigned short num,
unsigned long raddr, unsigned long laddr,
int dif)
{
SOCKHASH_LOCK_READ();
sk = __raw_v4_lookup(sk, num, raddr, laddr, dif);
SOCKHASH_UNLOCK_READ();
return sk;
}
/*
* 0 - deliver
* 1 - block
*/
static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
{
int type;
type = skb->h.icmph->type;
if (type < 32)
return test_bit(type, &sk->tp_pinfo.tp_raw4.filter);
/* Do not block unknown ICMP types */
return 0;
}
/* IP input processing comes here for RAW socket delivery.
* This is fun as to avoid copies we want to make no surplus
* copies.
*
* RFC 1122: SHOULD pass TOS value up to the transport layer.
* -> It does. And not only TOS, but all IP header.
*/
struct sock *raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash)
{
struct sock *sk;
SOCKHASH_LOCK_READ_BH();
if ((sk = raw_v4_htable[hash]) == NULL)
goto out;
sk = __raw_v4_lookup(sk, iph->protocol,
iph->saddr, iph->daddr,
skb->dev->ifindex);
while(sk != NULL) {
struct sock *sknext = __raw_v4_lookup(sk->next, iph->protocol,
iph->saddr, iph->daddr,
skb->dev->ifindex);
if (iph->protocol != IPPROTO_ICMP ||
! icmp_filter(sk, skb)) {
struct sk_buff *clone;
if(sknext == NULL)
break;
clone = skb_clone(skb, GFP_ATOMIC);
if(clone) {
SOCKHASH_UNLOCK_READ_BH();
raw_rcv(sk, clone);
SOCKHASH_LOCK_READ_BH();
}
}
sk = sknext;
}
out:
SOCKHASH_UNLOCK_READ_BH();
return sk;
}
void raw_err (struct sock *sk, struct sk_buff *skb)
{
int type = skb->h.icmph->type;
......
......@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
* Version: $Id: tcp_input.c,v 1.166 1999/05/27 00:37:22 davem Exp $
* Version: $Id: tcp_input.c,v 1.167 1999/05/29 22:37:54 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
......@@ -1838,7 +1838,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
}
}
flg = *(((u32 *)th) + 3) & ~htonl(0x8 << 16);
flg = *(((u32 *)th) + 3) & ~htonl(0xFC8 << 16);
/* pred_flags is 0xS?10 << 16 + snd_wnd
* if header_predition is to be made
......
......@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
* Version: $Id: tcp_ipv4.c,v 1.177 1999/05/27 00:37:27 davem Exp $
* Version: $Id: tcp_ipv4.c,v 1.178 1999/05/30 01:16:27 davem Exp $
*
* IPv4 specific functions
*
......@@ -2011,6 +2011,11 @@ __initfunc(void tcp_v4_init(struct net_proto_family *ops))
if ((err=ops->create(tcp_socket, IPPROTO_TCP))<0)
panic("Failed to create the TCP control socket.\n");
tcp_socket->sk->allocation=GFP_ATOMIC;
tcp_socket->sk->num = 256; /* Don't receive any data */
tcp_socket->sk->ip_ttl = MAXTTL;
/* Unhash it so that IP input processing does not even
* see it, we do not wish this socket to see incoming
* packets.
*/
tcp_socket->sk->prot->unhash(tcp_socket->sk);
}
......@@ -163,7 +163,6 @@ EXPORT_SYMBOL(put_cmsg);
EXPORT_SYMBOL(net_families);
EXPORT_SYMBOL(sock_kmalloc);
EXPORT_SYMBOL(sock_kfree_s);
EXPORT_SYMBOL(skb_queue_lock);
#ifdef CONFIG_FILTER
EXPORT_SYMBOL(sk_run_filter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment