Commit 00dbd26b authored by David S. Miller's avatar David S. Miller

Merge bk://kernel.bkbits.net/acme/net-2.5

into nuts.ninka.net:/home/davem/src/BK/net-2.5
parents 62bfb89d a7e96d27
......@@ -72,8 +72,6 @@
#include "legacy/pdc4030.h"
static int driver_blocked;
static inline u32 idedisk_read_24 (ide_drive_t *drive)
{
u8 hcyl = HWIF(drive)->INB(IDE_HCYL_REG);
......@@ -132,8 +130,24 @@ static int lba_capacity_is_ok (struct hd_driveid *id)
return 0; /* lba_capacity value may be bad */
}
static int idedisk_start_tag(ide_drive_t *drive, struct request *rq)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&ide_lock, flags);
if (ata_pending_commands(drive) < drive->queue_depth)
ret = blk_queue_start_tag(&drive->queue, rq);
spin_unlock_irqrestore(&ide_lock, flags);
return ret;
}
#ifndef CONFIG_IDE_TASKFILE_IO
static int driver_blocked;
/*
* read_intr() is the handler for disk read/multread interrupts
*/
......@@ -344,20 +358,6 @@ static ide_startstop_t multwrite_intr (ide_drive_t *drive)
return DRIVER(drive)->error(drive, "multwrite_intr", stat);
}
static int idedisk_start_tag(ide_drive_t *drive, struct request *rq)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&ide_lock, flags);
if (ata_pending_commands(drive) < drive->queue_depth)
ret = blk_queue_start_tag(&drive->queue, rq);
spin_unlock_irqrestore(&ide_lock, flags);
return ret;
}
/*
* do_rw_disk() issues READ and WRITE commands to a disk,
* using LBA if supported, or CHS otherwise, to address sectors.
......@@ -744,7 +744,7 @@ static ide_startstop_t lba_48_rw_disk (ide_drive_t *drive, struct request *rq, u
args.tfRegister[IDE_FEATURE_OFFSET] = sectors;
args.tfRegister[IDE_NSECTOR_OFFSET] = rq->tag << 3;
args.hobRegister[IDE_FEATURE_OFFSET_HOB] = sectors >> 8;
args.hobRegister[IDE_NSECT_OFFSET_HOB] = 0;
args.hobRegister[IDE_NSECTOR_OFFSET_HOB] = 0;
} else {
args.tfRegister[IDE_NSECTOR_OFFSET] = sectors;
args.hobRegister[IDE_NSECTOR_OFFSET_HOB] = sectors >> 8;
......
......@@ -189,15 +189,15 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
case WIN_WRITEDMA_ONCE:
case WIN_WRITEDMA:
case WIN_WRITEDMA_EXT:
if (hwif->ide_dma_write(drive))
return ide_stopped;
if (!hwif->ide_dma_write(drive))
return ide_started;
break;
case WIN_READDMA_ONCE:
case WIN_READDMA:
case WIN_READDMA_EXT:
case WIN_IDENTIFY_DMA:
if (hwif->ide_dma_read(drive))
return ide_stopped;
if (!hwif->ide_dma_read(drive))
return ide_started;
break;
case WIN_READDMA_QUEUED:
case WIN_READDMA_QUEUED_EXT:
......
......@@ -63,7 +63,7 @@ static spinlock_t macio_lock = SPIN_LOCK_UNLOCKED;
static int macio_probe(void);
static int macio_init(void);
static void macio_adb_interrupt(int irq, void *arg, struct pt_regs *regs);
static irqreturn_t macio_adb_interrupt(int irq, void *arg, struct pt_regs *regs);
static int macio_send_request(struct adb_request *req, int sync);
static int macio_adb_autopoll(int devs);
static void macio_adb_poll(void);
......@@ -198,7 +198,8 @@ static int macio_send_request(struct adb_request *req, int sync)
return 0;
}
static void macio_adb_interrupt(int irq, void *arg, struct pt_regs *regs)
static irqreturn_t macio_adb_interrupt(int irq, void *arg,
struct pt_regs *regs)
{
int i, n, err;
struct adb_request *req;
......@@ -206,9 +207,11 @@ static void macio_adb_interrupt(int irq, void *arg, struct pt_regs *regs)
int ibuf_len = 0;
int complete = 0;
int autopoll = 0;
int handled = 0;
spin_lock(&macio_lock);
if (in_8(&adb->intr.r) & TAG) {
handled = 1;
if ((req = current_req) != 0) {
/* put the current request in */
for (i = 0; i < req->nbytes; ++i)
......@@ -229,6 +232,7 @@ static void macio_adb_interrupt(int irq, void *arg, struct pt_regs *regs)
}
if (in_8(&adb->intr.r) & DFB) {
handled = 1;
err = in_8(&adb->error.r);
if (current_req && current_req->sent) {
/* this is the response to a command */
......@@ -266,6 +270,8 @@ static void macio_adb_interrupt(int irq, void *arg, struct pt_regs *regs)
}
if (ibuf_len)
adb_input(ibuf, ibuf_len, regs, autopoll);
return IRQ_RETVAL(handled);
}
static void macio_adb_poll(void)
......
......@@ -154,8 +154,8 @@ static int set_scc_power(struct mac_serial * info, int state);
static int setup_scc(struct mac_serial * info);
static void dbdma_reset(volatile struct dbdma_regs *dma);
static void dbdma_flush(volatile struct dbdma_regs *dma);
static void rs_txdma_irq(int irq, void *dev_id, struct pt_regs *regs);
static void rs_rxdma_irq(int irq, void *dev_id, struct pt_regs *regs);
static irqreturn_t rs_txdma_irq(int irq, void *dev_id, struct pt_regs *regs);
static irqreturn_t rs_rxdma_irq(int irq, void *dev_id, struct pt_regs *regs);
static void dma_init(struct mac_serial * info);
static void rxdma_start(struct mac_serial * info, int current);
static void rxdma_to_tty(struct mac_serial * info);
......@@ -558,18 +558,19 @@ static _INLINE_ void receive_special_dma(struct mac_serial *info)
/*
* This is the serial driver's generic interrupt routine
*/
static void rs_interrupt(int irq, void *dev_id, struct pt_regs * regs)
static irqreturn_t rs_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
struct mac_serial *info = (struct mac_serial *) dev_id;
unsigned char zs_intreg;
int shift;
unsigned long flags;
int handled = 0;
if (!(info->flags & ZILOG_INITIALIZED)) {
printk(KERN_WARNING "rs_interrupt: irq %d, port not "
"initialized\n", irq);
disable_irq(irq);
return;
return IRQ_NONE;
}
/* NOTE: The read register 3, which holds the irq status,
......@@ -595,6 +596,7 @@ static void rs_interrupt(int irq, void *dev_id, struct pt_regs * regs)
if ((zs_intreg & CHAN_IRQMASK) == 0)
break;
handled = 1;
if (zs_intreg & CHBRxIP) {
/* If we are doing DMA, we only ask for interrupts
......@@ -610,30 +612,32 @@ static void rs_interrupt(int irq, void *dev_id, struct pt_regs * regs)
status_handle(info);
}
spin_unlock_irqrestore(&info->lock, flags);
return IRQ_RETVAL(handled);
}
/* Transmit DMA interrupt - not used at present */
static void rs_txdma_irq(int irq, void *dev_id, struct pt_regs *regs)
static irqreturn_t rs_txdma_irq(int irq, void *dev_id, struct pt_regs *regs)
{
return IRQ_HANDLED;
}
/*
* Receive DMA interrupt.
*/
static void rs_rxdma_irq(int irq, void *dev_id, struct pt_regs *regs)
static irqreturn_t rs_rxdma_irq(int irq, void *dev_id, struct pt_regs *regs)
{
struct mac_serial *info = (struct mac_serial *) dev_id;
volatile struct dbdma_cmd *cd;
if (!info->dma_initted)
return;
return IRQ_NONE;
spin_lock(&info->rx_dma_lock);
/* First, confirm that this interrupt is, indeed, coming */
/* from Rx DMA */
cd = info->rx_cmds[info->rx_cbuf] + 2;
if ((in_le16(&cd->xfer_status) & (RUN | ACTIVE)) != (RUN | ACTIVE)) {
spin_unlock(&info->rx_dma_lock);
return;
return IRQ_NONE;
}
if (info->rx_fbuf != RX_NO_FBUF) {
info->rx_cbuf = info->rx_fbuf;
......@@ -643,6 +647,7 @@ static void rs_rxdma_irq(int irq, void *dev_id, struct pt_regs *regs)
info->rx_fbuf = RX_NO_FBUF;
}
spin_unlock(&info->rx_dma_lock);
return IRQ_HANDLED;
}
/*
......@@ -2660,9 +2665,9 @@ int macserial_init(void)
callout_driver.proc_entry = 0;
if (tty_register_driver(&serial_driver))
panic("Couldn't register serial driver\n");
printk(KERN_ERR "Error: couldn't register serial driver\n");
if (tty_register_driver(&callout_driver))
panic("Couldn't register callout driver\n");
printk(KERN_ERR "Error: couldn't register callout driver\n");
for (channel = 0; channel < zs_channels_found; ++channel) {
#ifdef CONFIG_KGDB
......
......@@ -107,7 +107,7 @@ static int cuda_reset_adb_bus(void);
static int cuda_init_via(void);
static void cuda_start(void);
static void cuda_interrupt(int irq, void *arg, struct pt_regs *regs);
static irqreturn_t cuda_interrupt(int irq, void *arg, struct pt_regs *regs);
static void cuda_input(unsigned char *buf, int nb, struct pt_regs *regs);
void cuda_poll(void);
static int cuda_write(struct adb_request *req);
......@@ -441,7 +441,7 @@ cuda_poll()
local_irq_restore(flags);
}
static void
static irqreturn_t
cuda_interrupt(int irq, void *arg, struct pt_regs *regs)
{
int status;
......@@ -457,7 +457,7 @@ cuda_interrupt(int irq, void *arg, struct pt_regs *regs)
out_8(&via[IFR], virq);
if ((virq & SR_INT) == 0) {
spin_unlock(&cuda_lock);
return;
return IRQ_NONE;
}
status = (~in_8(&via[B]) & (TIP|TREQ)) | (in_8(&via[ACR]) & SR_OUT);
......@@ -595,6 +595,7 @@ cuda_interrupt(int irq, void *arg, struct pt_regs *regs)
}
if (ibuf_len)
cuda_input(ibuf, ibuf_len, regs);
return IRQ_HANDLED;
}
static void
......
......@@ -180,8 +180,8 @@ static int pmu_adb_reset_bus(void);
static int init_pmu(void);
static int pmu_queue_request(struct adb_request *req);
static void pmu_start(void);
static void via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs);
static void gpio1_interrupt(int irq, void *arg, struct pt_regs *regs);
static irqreturn_t via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs);
static irqreturn_t gpio1_interrupt(int irq, void *arg, struct pt_regs *regs);
static int proc_get_info(char *page, char **start, off_t off,
int count, int *eof, void *data);
#ifdef CONFIG_PMAC_BACKLIGHT
......@@ -1393,7 +1393,7 @@ pmu_sr_intr(struct pt_regs *regs)
return NULL;
}
static void __pmac
static irqreturn_t __pmac
via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs)
{
unsigned long flags;
......@@ -1401,6 +1401,7 @@ via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs)
int nloop = 0;
int int_data = -1;
struct adb_request *req = NULL;
int handled = 0;
/* This is a bit brutal, we can probably do better */
spin_lock_irqsave(&pmu_lock, flags);
......@@ -1410,6 +1411,7 @@ via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs)
intr = in_8(&via[IFR]) & (SR_INT | CB1_INT);
if (intr == 0)
break;
handled = 1;
if (++nloop > 1000) {
printk(KERN_DEBUG "PMU: stuck in intr loop, "
"intr=%x, ier=%x pmu_state=%d\n",
......@@ -1473,15 +1475,19 @@ via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs)
int_data = -1;
goto recheck;
}
return IRQ_RETVAL(handled);
}
static void __pmac
static irqreturn_t __pmac
gpio1_interrupt(int irq, void *arg, struct pt_regs *regs)
{
if ((in_8(gpio_reg + 0x9) & 0x02) == 0) {
adb_int_pending = 1;
via_pmu_interrupt(0, 0, 0);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
#ifdef CONFIG_PMAC_BACKLIGHT
......
......@@ -227,25 +227,6 @@ typedef unsigned char byte; /* used everywhere */
#define PRD_BYTES 8
#define PRD_ENTRIES (PAGE_SIZE / (2 * PRD_BYTES))
/*
* Our Physical Region Descriptor (PRD) table should be large enough
* to handle the biggest I/O request we are likely to see. Since requests
* can have no more than 256 sectors, and since the typical blocksize is
* two or more sectors, we could get by with a limit of 128 entries here for
* the usual worst case. Most requests seem to include some contiguous blocks,
* further reducing the number of table entries required.
*
* The driver reverts to PIO mode for individual requests that exceed
* this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
* 100% of all crazy scenarios here is not necessary.
*
* As it turns out though, we must allocate a full 4KB page for this,
* so the two PRD tables (ide0 & ide1) will each get half of that,
* allowing each to have about 256 entries (8 bytes each) from this.
*/
#define PRD_BYTES 8
#define PRD_ENTRIES (PAGE_SIZE / (2 * PRD_BYTES))
/*
* Some more useful definitions
*/
......
......@@ -30,6 +30,7 @@
#define BRNF_PKT_TYPE 0x01
#define BRNF_BRIDGED_DNAT 0x02
#define BRNF_DONT_TAKE_PARENT 0x04
#define BRNF_BRIDGED 0x08
enum nf_br_hook_priorities {
NF_BR_PRI_FIRST = INT_MIN,
......
......@@ -5,11 +5,16 @@
#include <linux/if.h>
#endif
#define IPT_PHYSDEV_OP_MATCH_IN 0x01
#define IPT_PHYSDEV_OP_MATCH_OUT 0x02
#define IPT_PHYSDEV_OP_IN 0x01
#define IPT_PHYSDEV_OP_OUT 0x02
#define IPT_PHYSDEV_OP_BRIDGED 0x04
#define IPT_PHYSDEV_OP_ISIN 0x08
#define IPT_PHYSDEV_OP_ISOUT 0x10
#define IPT_PHYSDEV_OP_MASK (0x20 - 1)
struct ipt_physdev_info {
u_int8_t invert;
u_int8_t bitmask;
char physindev[IFNAMSIZ];
char in_mask[IFNAMSIZ];
char physoutdev[IFNAMSIZ];
......
......@@ -348,6 +348,8 @@ static unsigned int br_nf_forward(unsigned int hook, struct sk_buff **pskb,
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->mask |= BRNF_PKT_TYPE;
/* The physdev module checks on this */
nf_bridge->mask |= BRNF_BRIDGED;
}
nf_bridge->physoutdev = skb->dev;
......
......@@ -300,8 +300,9 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, void *data)
write_lock_bh(&queue_lock);
if (!peer_pid)
goto err_out_unlock;
goto err_out_free_nskb;
/* netlink_unicast will either free the nskb or attach it to a socket */
status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
if (status < 0)
goto err_out_unlock;
......@@ -313,6 +314,9 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, void *data)
write_unlock_bh(&queue_lock);
return status;
err_out_free_nskb:
kfree_skb(nskb);
err_out_unlock:
write_unlock_bh(&queue_lock);
......
......@@ -11,7 +11,6 @@
#include <net/icmp.h>
#include <net/ip.h>
#include <net/tcp.h>
struct in_device;
#include <net/route.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_REJECT.h>
......@@ -71,8 +70,7 @@ static void send_reset(struct sk_buff *oldskb, int local)
.saddr = (local ?
oldskb->nh.iph->daddr :
0),
.tos = (RT_TOS(oldskb->nh.iph->tos) |
RTO_CONN) } } };
.tos = RT_TOS(oldskb->nh.iph->tos) } } };
/* Routing: if not headed for us, route won't like source */
if (ip_route_output_key(&rt, &fl))
......@@ -88,8 +86,10 @@ static void send_reset(struct sk_buff *oldskb, int local)
hh_len of incoming interface < hh_len of outgoing interface */
nskb = skb_copy_expand(oldskb, hh_len, skb_tailroom(oldskb),
GFP_ATOMIC);
if (!nskb)
if (!nskb) {
dst_release(&rt->u.dst);
return;
}
dst_release(nskb->dst);
nskb->dst = &rt->u.dst;
......
......@@ -4,6 +4,9 @@
#include <linux/skbuff.h>
#include <linux/netfilter_ipv4/ipt_physdev.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_bridge.h>
#define MATCH 1
#define NOMATCH 0
static int
match(const struct sk_buff *skb,
......@@ -25,29 +28,62 @@ match(const struct sk_buff *skb,
/* Not a bridged IP packet or no info available yet:
* LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if
* the destination device will be a bridge. */
if (!(nf_bridge = skb->nf_bridge))
return 1;
if (!(nf_bridge = skb->nf_bridge)) {
/* Return MATCH if the invert flags of the used options are on */
if ((info->bitmask & IPT_PHYSDEV_OP_BRIDGED) &&
!(info->invert & IPT_PHYSDEV_OP_BRIDGED))
return NOMATCH;
if ((info->bitmask & IPT_PHYSDEV_OP_ISIN) &&
!(info->invert & IPT_PHYSDEV_OP_ISIN))
return NOMATCH;
if ((info->bitmask & IPT_PHYSDEV_OP_ISOUT) &&
!(info->invert & IPT_PHYSDEV_OP_ISOUT))
return NOMATCH;
if ((info->bitmask & IPT_PHYSDEV_OP_IN) &&
!(info->invert & IPT_PHYSDEV_OP_IN))
return NOMATCH;
if ((info->bitmask & IPT_PHYSDEV_OP_OUT) &&
!(info->invert & IPT_PHYSDEV_OP_OUT))
return NOMATCH;
return MATCH;
}
indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
outdev = nf_bridge->physoutdev ?
nf_bridge->physoutdev->name : nulldevname;
/* This only makes sense in the FORWARD and POSTROUTING chains */
if ((info->bitmask & IPT_PHYSDEV_OP_BRIDGED) &&
(!!(nf_bridge->mask & BRNF_BRIDGED) ^
!(info->invert & IPT_PHYSDEV_OP_BRIDGED)))
return NOMATCH;
if ((info->bitmask & IPT_PHYSDEV_OP_ISIN &&
(!nf_bridge->physindev ^ !!(info->invert & IPT_PHYSDEV_OP_ISIN))) ||
(info->bitmask & IPT_PHYSDEV_OP_ISOUT &&
(!nf_bridge->physoutdev ^ !!(info->invert & IPT_PHYSDEV_OP_ISOUT))))
return NOMATCH;
if (!(info->bitmask & IPT_PHYSDEV_OP_IN))
goto match_outdev;
indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
ret |= (((const unsigned long *)indev)[i]
^ ((const unsigned long *)info->physindev)[i])
& ((const unsigned long *)info->in_mask)[i];
}
if ((ret == 0) ^ !(info->invert & IPT_PHYSDEV_OP_MATCH_IN))
return 0;
if ((ret == 0) ^ !(info->invert & IPT_PHYSDEV_OP_IN))
return NOMATCH;
match_outdev:
if (!(info->bitmask & IPT_PHYSDEV_OP_OUT))
return MATCH;
outdev = nf_bridge->physoutdev ?
nf_bridge->physoutdev->name : nulldevname;
for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
ret |= (((const unsigned long *)outdev)[i]
^ ((const unsigned long *)info->physoutdev)[i])
& ((const unsigned long *)info->out_mask)[i];
}
return (ret != 0) ^ !(info->invert & IPT_PHYSDEV_OP_MATCH_OUT);
return (ret != 0) ^ !(info->invert & IPT_PHYSDEV_OP_OUT);
}
static int
......@@ -57,9 +93,13 @@ checkentry(const char *tablename,
unsigned int matchsize,
unsigned int hook_mask)
{
const struct ipt_physdev_info *info = matchinfo;
if (matchsize != IPT_ALIGN(sizeof(struct ipt_physdev_info)))
return 0;
if (!(info->bitmask & IPT_PHYSDEV_OP_MASK) ||
info->bitmask & ~IPT_PHYSDEV_OP_MASK)
return 0;
return 1;
}
......
......@@ -174,6 +174,8 @@ static void ipip_err(struct sk_buff *skb, u32 info)
static int ipip_init_state(struct xfrm_state *x, void *args)
{
if (!x->props.mode)
return -EINVAL;
x->props.header_len = sizeof(struct iphdr);
return 0;
......
......@@ -304,8 +304,9 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, void *data)
write_lock_bh(&queue_lock);
if (!peer_pid)
goto err_out_unlock;
goto err_out_free_nskb;
/* netlink_unicast will either free the nskb or attach it to a socket */
status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
if (status < 0)
goto err_out_unlock;
......@@ -317,6 +318,9 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, void *data)
write_unlock_bh(&queue_lock);
return status;
err_out_free_nskb:
kfree_skb(nskb);
err_out_unlock:
write_unlock_bh(&queue_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment