Commit d378aca6 authored by Jeff Garzik's avatar Jeff Garzik

Merge branch 'master'

parents abc71c46 7705a879
......@@ -120,7 +120,6 @@ D: Author of lil (Linux Interrupt Latency benchmark)
D: Fixed the shm swap deallocation at swapoff time (try_to_unuse message)
D: VM hacker
D: Various other kernel hacks
S: Via Cicalini 26
S: Imola 40026
S: Italy
......
......@@ -1752,7 +1752,8 @@ P: Ralf Baechle
M: ralf@linux-mips.org
W: http://www.linux-mips.org/
L: linux-mips@linux-mips.org
S: Maintained
T: git www.linux-mips.org:/pub/scm/linux.git
S: Supported
MISCELLANEOUS MCA-SUPPORT
P: James Bottomley
......
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 16
EXTRAVERSION =-rc6
EXTRAVERSION =
NAME=Sliding Snow Leopard
# *DOCUMENTATION*
......
......@@ -1029,6 +1029,16 @@ int __devinit smp_prepare_cpu(int cpu)
int apicid, ret;
lock_cpu_hotplug();
/*
* On x86, CPU0 is never offlined. Trying to bring up an
* already-booted CPU will hang. So check for that case.
*/
if (cpu_online(cpu)) {
ret = -EINVAL;
goto exit;
}
apicid = x86_cpu_to_apicid[cpu];
if (apicid == BAD_APICID) {
ret = -ENODEV;
......
......@@ -108,7 +108,8 @@ MODFLAGS += -mlong-calls
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB)
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL)
cflags-$(CONFIG_SB1XXX_CORELIS) += -mno-sched-prolog -fno-omit-frame-pointer
cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
-fno-omit-frame-pointer
#
# Use: $(call set_gccflags,<cpu0>,<isa0>,<cpu1>,<isa1>,<isa2>)
......
......@@ -9,7 +9,6 @@
* Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/kernel.h>
......@@ -20,17 +19,11 @@
#include <asm/bootinfo.h>
#include <asm/system.h>
extern void *sgiwd93_host;
extern void reset_wd33c93(void *instance);
VOID
ArcHalt(VOID)
{
bc_disable();
local_irq_disable();
#ifdef CONFIG_SCSI_SGIWD93
reset_wd33c93(sgiwd93_host);
#endif
ARC_CALL0(halt);
never: goto never;
}
......@@ -40,9 +33,6 @@ ArcPowerDown(VOID)
{
bc_disable();
local_irq_disable();
#ifdef CONFIG_SCSI_SGIWD93
reset_wd33c93(sgiwd93_host);
#endif
ARC_CALL0(pdown);
never: goto never;
}
......@@ -53,9 +43,6 @@ ArcRestart(VOID)
{
bc_disable();
local_irq_disable();
#ifdef CONFIG_SCSI_SGIWD93
reset_wd33c93(sgiwd93_host);
#endif
ARC_CALL0(restart);
never: goto never;
}
......@@ -65,9 +52,6 @@ ArcReboot(VOID)
{
bc_disable();
local_irq_disable();
#ifdef CONFIG_SCSI_SGIWD93
reset_wd33c93(sgiwd93_host);
#endif
ARC_CALL0(reboot);
never: goto never;
}
......@@ -77,9 +61,6 @@ ArcEnterInteractiveMode(VOID)
{
bc_disable();
local_irq_disable();
#ifdef CONFIG_SCSI_SGIWD93
reset_wd33c93(sgiwd93_host);
#endif
ARC_CALL0(imode);
never: goto never;
}
......
......@@ -164,17 +164,20 @@ void au1000_restart(char *command)
void au1000_halt(void)
{
#if defined(CONFIG_MIPS_PB1550)
#if defined(CONFIG_MIPS_PB1550) || defined(CONFIG_MIPS_DB1550)
/* power off system */
printk("\n** Powering off Pb1550\n");
printk("\n** Powering off...\n");
au_writew(au_readw(0xAF00001C) | (3<<14), 0xAF00001C);
au_sync();
while(1); /* should not get here */
#endif
#else
printk(KERN_NOTICE "\n** You can safely turn off the power\n");
#ifdef CONFIG_MIPS_MIRAGE
au_writel((1 << 26) | (1 << 10), GPIO2_OUTPUT);
#endif
#ifdef CONFIG_MIPS_DB1200
au_writew(au_readw(0xB980001C) | (1<<14), 0xB980001C);
#endif
#ifdef CONFIG_PM
au_sleep();
......@@ -187,6 +190,7 @@ void au1000_halt(void)
"wait\n\t"
".set\tmips0");
#endif
#endif /* defined(CONFIG_MIPS_PB1550) || defined(CONFIG_MIPS_DB1550) */
}
void au1000_power_off(void)
......
......@@ -424,6 +424,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
unsigned long j;
unsigned int count;
write_seqlock(&xtime_lock);
count = mips_hpt_read();
mips_timer_ack();
......@@ -441,7 +443,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be
* called as close as possible to 500 ms before the new second starts.
*/
write_seqlock(&xtime_lock);
if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
......@@ -453,7 +454,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
last_rtc_update = xtime.tv_sec - 600;
}
}
write_sequnlock(&xtime_lock);
/*
* If jiffies has overflown in this timer_interrupt, we must
......@@ -496,6 +496,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
}
write_sequnlock(&xtime_lock);
/*
* In UP mode, we call local_timer_interrupt() to do profiling
* and process accouting.
......
......@@ -375,6 +375,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
struct flush_cache_page_args {
struct vm_area_struct *vma;
unsigned long addr;
unsigned long pfn;
};
static inline void local_r4k_flush_cache_page(void *args)
......@@ -382,6 +383,7 @@ static inline void local_r4k_flush_cache_page(void *args)
struct flush_cache_page_args *fcp_args = args;
struct vm_area_struct *vma = fcp_args->vma;
unsigned long addr = fcp_args->addr;
unsigned long paddr = fcp_args->pfn << PAGE_SHIFT;
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgdp;
......@@ -431,11 +433,12 @@ static inline void local_r4k_flush_cache_page(void *args)
* Do indexed flush, too much work to get the (possible) TLB refills
* to work correctly.
*/
addr = INDEX_BASE + (addr & (dcache_size - 1));
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
r4k_blast_dcache_page_indexed(addr);
if (exec && !cpu_icache_snoops_remote_store)
r4k_blast_scache_page_indexed(addr);
r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ?
paddr : addr);
if (exec && !cpu_icache_snoops_remote_store) {
r4k_blast_scache_page_indexed(paddr);
}
}
if (exec) {
if (cpu_has_vtag_icache) {
......@@ -455,6 +458,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
args.vma = vma;
args.addr = addr;
args.pfn = pfn;
on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
}
......@@ -956,6 +960,7 @@ static void __init probe_pcache(void)
switch (c->cputype) {
case CPU_20KC:
case CPU_25KF:
c->dcache.flags |= MIPS_CACHE_PINDEX;
case CPU_R10000:
case CPU_R12000:
case CPU_SB1:
......
......@@ -210,7 +210,6 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
* Do indexed flush, too much work to get the (possible) TLB refills
* to work correctly.
*/
page = (KSEG0 + (page & (dcache_size - 1)));
if (cpu_has_dc_aliases || exec)
tx39_blast_dcache_page_indexed(page);
if (exec)
......
......@@ -47,23 +47,51 @@
#define IMR_IP3_VAL K_INT_MAP_I1
#define IMR_IP4_VAL K_INT_MAP_I2
#define SB1250_HPT_NUM 3
#define SB1250_HPT_VALUE M_SCD_TIMER_CNT /* max value */
#define SB1250_HPT_SHIFT ((sizeof(unsigned int)*8)-V_SCD_TIMER_WIDTH)
extern int sb1250_steal_irq(int irq);
static unsigned int sb1250_hpt_read(void);
static void sb1250_hpt_init(unsigned int);
static unsigned int hpt_offset;
void __init sb1250_hpt_setup(void)
{
int cpu = smp_processor_id();
if (!cpu) {
/* Setup hpt using timer #3 but do not enable irq for it */
__raw_writeq(0, IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG)));
__raw_writeq(SB1250_HPT_VALUE,
IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_INIT)));
__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG)));
/*
* we need to fill 32 bits, so just use the upper 23 bits and pretend
* the timer is going 512Mhz instead of 1Mhz
*/
mips_hpt_frequency = V_SCD_TIMER_FREQ << SB1250_HPT_SHIFT;
mips_hpt_init = sb1250_hpt_init;
mips_hpt_read = sb1250_hpt_read;
}
}
void sb1250_time_init(void)
{
int cpu = smp_processor_id();
int irq = K_INT_TIMER_0+cpu;
/* Only have 4 general purpose timers */
if (cpu > 3) {
/* Only have 4 general purpose timers, and we use last one as hpt */
if (cpu > 2) {
BUG();
}
if (!cpu) {
/* Use our own gettimeoffset() routine */
do_gettimeoffset = sb1250_gettimeoffset;
}
sb1250_mask_irq(cpu, irq);
/* Map the timer interrupt to ip[4] of this cpu */
......@@ -75,10 +103,10 @@ void sb1250_time_init(void)
/* Disable the timer and set up the count */
__raw_writeq(0, IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
#ifdef CONFIG_SIMULATION
__raw_writeq(50000 / HZ,
__raw_writeq((50000 / HZ) - 1,
IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)));
#else
__raw_writeq(1000000 / HZ,
__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1,
IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)));
#endif
......@@ -103,7 +131,7 @@ void sb1250_timer_interrupt(struct pt_regs *regs)
int cpu = smp_processor_id();
int irq = K_INT_TIMER_0 + cpu;
/* Reset the timer */
/* ACK interrupt */
____raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
......@@ -122,15 +150,26 @@ void sb1250_timer_interrupt(struct pt_regs *regs)
}
/*
* We use our own do_gettimeoffset() instead of the generic one,
* because the generic one does not work for SMP case.
* In addition, since we use general timer 0 for system time,
* we can get accurate intra-jiffy offset without calibration.
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
* again. There's no easy way to set to a specific value so store init value
* in hpt_offset and subtract each time.
*
* Note: Timer isn't full 32bits so shift it into the upper part making
* it appear to run at a higher frequency.
*/
unsigned long sb1250_gettimeoffset(void)
static unsigned int sb1250_hpt_read(void)
{
unsigned long count =
__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(0, R_SCD_TIMER_CNT)));
unsigned int count;
return 1000000/HZ - count;
}
count = G_SCD_TIMER_CNT(__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT))));
count = (SB1250_HPT_VALUE - count) << SB1250_HPT_SHIFT;
return count - hpt_offset;
}
static void sb1250_hpt_init(unsigned int count)
{
hpt_offset = count;
return;
}
......@@ -70,6 +70,12 @@ const char *get_system_type(void)
return "SiByte " SIBYTE_BOARD_NAME;
}
void __init swarm_time_init(void)
{
/* Setup HPT */
sb1250_hpt_setup();
}
void __init swarm_timer_setup(struct irqaction *irq)
{
/*
......@@ -109,6 +115,7 @@ void __init plat_setup(void)
panic_timeout = 5; /* For debug. */
board_time_init = swarm_time_init;
board_timer_setup = swarm_timer_setup;
board_be_handler = swarm_be_handler;
......
......@@ -103,9 +103,15 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
if (((uint32_t)ti->len) & (chunk_size - 1)) {
ti->error = "dm-stripe: Target length not divisible by "
"chunk size";
return -EINVAL;
}
width = ti->len;
if (sector_div(width, stripes)) {
ti->error = "dm-stripe: Target length not divisable by "
ti->error = "dm-stripe: Target length not divisible by "
"number of stripes";
return -EINVAL;
}
......
......@@ -15,20 +15,6 @@ config VIDEO_CX88
To compile this driver as a module, choose M here: the
module will be called cx8800
config VIDEO_CX88_DVB
tristate "DVB/ATSC Support for cx2388x based TV cards"
depends on VIDEO_CX88 && DVB_CORE
select VIDEO_BUF_DVB
---help---
This adds support for DVB/ATSC cards based on the
Connexant 2388x chip.
To compile this driver as a module, choose M here: the
module will be called cx88-dvb.
You must also select one or more DVB/ATSC demodulators.
If you are unsure which you need, choose all of them.
config VIDEO_CX88_ALSA
tristate "ALSA DMA audio support"
depends on VIDEO_CX88 && SND && EXPERIMENTAL
......@@ -44,6 +30,20 @@ config VIDEO_CX88_ALSA
To compile this driver as a module, choose M here: the
module will be called cx88-alsa.
config VIDEO_CX88_DVB
tristate "DVB/ATSC Support for cx2388x based TV cards"
depends on VIDEO_CX88 && DVB_CORE
select VIDEO_BUF_DVB
---help---
This adds support for DVB/ATSC cards based on the
Connexant 2388x chip.
To compile this driver as a module, choose M here: the
module will be called cx88-dvb.
You must also select one or more DVB/ATSC demodulators.
If you are unsure which you need, choose all of them.
config VIDEO_CX88_DVB_ALL_FRONTENDS
bool "Build all supported frontends for cx2388x based TV cards"
default y
......
......@@ -367,6 +367,9 @@ static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
em28xx_capture_start(dev, 1);
em28xx_resolution_set(dev);
/* device needs to be initialized before isoc transfer */
video_mux(dev, 0);
/* start the transfer */
errCode = em28xx_init_isoc(dev);
if (errCode)
......
......@@ -9552,12 +9552,36 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
}
/* Find msi capability. */
/* The EPB bridge inside 5714, 5715, and 5780 cannot support
* DMA addresses > 40-bit. This bridge may have other additional
* 57xx devices behind it in some 4-port NIC designs for example.
* Any tg3 device found behind the bridge will also need the 40-bit
* DMA workaround.
*/
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
}
else {
struct pci_dev *bridge = NULL;
do {
bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_EPB,
bridge);
if (bridge && bridge->subordinate &&
(bridge->subordinate->number <=
tp->pdev->bus->number) &&
(bridge->subordinate->subordinate >=
tp->pdev->bus->number)) {
tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
pci_dev_put(bridge);
break;
}
} while (bridge);
}
/* Initialize misc host control in PCI block. */
tp->misc_host_ctrl |= (misc_ctrl_reg &
......@@ -10303,7 +10327,14 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
if (ccval == 0x6 || ccval == 0x7)
/* If the 5704 is behind the EPB bridge, we can
* do the less restrictive ONE_DMA workaround for
* better performance.
*/
if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
tp->dma_rwctrl |= 0x8000;
else if (ccval == 0x6 || ccval == 0x7)
tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
/* Set bit 23 to enable PCIX hw bug fix */
......@@ -10759,19 +10790,20 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_iounmap;
}
/* 5714, 5715 and 5780 cannot support DMA addresses > 40-bit.
/* The EPB bridge inside 5714, 5715, and 5780 and any
* device behind the EPB cannot support DMA addresses > 40-bit.
* On 64-bit systems with IOMMU, use 40-bit dma_mask.
* On 64-bit systems without IOMMU, use 64-bit dma_mask and
* do DMA address check in tg3_start_xmit().
*/
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
persist_dma_mask = dma_mask = DMA_32BIT_MASK;
else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
persist_dma_mask = dma_mask = DMA_40BIT_MASK;
#ifdef CONFIG_HIGHMEM
dma_mask = DMA_64BIT_MASK;
#endif
} else if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
persist_dma_mask = dma_mask = DMA_32BIT_MASK;
else
} else
persist_dma_mask = dma_mask = DMA_64BIT_MASK;
/* Configure DMA attributes. */
......@@ -10908,8 +10940,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
(tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
(tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
dev->name, tp->dma_rwctrl);
printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
dev->name, tp->dma_rwctrl,
(pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
(((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
return 0;
......
......@@ -2163,6 +2163,7 @@ struct tg3 {
#define TG3_FLAG_10_100_ONLY 0x01000000
#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
#define TG3_FLAG_IN_RESET_TASK 0x04000000
#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
#define TG3_FLAG_GOT_SERDES_FLOWCTL 0x20000000
#define TG3_FLAG_SPLIT_MODE 0x40000000
......
......@@ -202,7 +202,6 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
filp->private_data = NULL;
}
d_drop(filp->f_dentry);
return 0;
}
......
......@@ -98,10 +98,8 @@ asmlinkage sys_nfsservctl(int cmd, struct nfsctl_arg __user *arg, void __user *r
if (copy_from_user(&version, &arg->ca_version, sizeof(int)))
return -EFAULT;
if (version != NFSCTL_VERSION) {
printk(KERN_WARNING "nfsd: incompatible version in syscall.\n");
if (version != NFSCTL_VERSION)
return -EINVAL;
}
if (cmd < 0 || cmd >= sizeof(map)/sizeof(map[0]) || !map[cmd].name)
return -EINVAL;
......
......@@ -96,6 +96,9 @@
#ifndef cpu_has_ic_fills_f_dc
#define cpu_has_ic_fills_f_dc (cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC)
#endif
#ifndef cpu_has_pindexed_dcache
#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
#endif
/*
* I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
......
......@@ -39,6 +39,7 @@ struct cache_desc {
#define MIPS_CACHE_ALIASES 0x00000004 /* Cache could have aliases */
#define MIPS_CACHE_IC_F_DC 0x00000008 /* Ic can refill from D-cache */
#define MIPS_IC_SNOOPS_REMOTE 0x00000010 /* Ic snoops remote stores */
#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
struct cpuinfo_mips {
unsigned long udelay_val;
......
......@@ -3,7 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003, 2004 Ralf Baechle
* Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) MIPS Technologies, Inc.
* written by Ralf Baechle <ralf@linux-mips.org>
*/
#ifndef _ASM_HAZARDS_H
#define _ASM_HAZARDS_H
......@@ -74,8 +76,7 @@
#define irq_disable_hazard
_ehb
#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
defined(CONFIG_CPU_SB1)
#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
/*
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
......@@ -99,13 +100,13 @@
#else /* __ASSEMBLY__ */
__asm__(
" .macro _ssnop \n\t"
" sll $0, $0, 1 \n\t"
" .endm \n\t"
" \n\t"
" .macro _ehb \n\t"
" sll $0, $0, 3 \n\t"
" .endm \n\t");
" .macro _ssnop \n"
" sll $0, $0, 1 \n"
" .endm \n"
" \n"
" .macro _ehb \n"
" sll $0, $0, 3 \n"
" .endm \n");
#ifdef CONFIG_CPU_RM9000
......@@ -117,17 +118,21 @@ __asm__(
#define mtc0_tlbw_hazard() \
__asm__ __volatile__( \
".set\tmips32\n\t" \
"_ssnop; _ssnop; _ssnop; _ssnop\n\t" \
".set\tmips0")
" .set mips32 \n" \
" _ssnop \n" \
" _ssnop \n" \
" _ssnop \n" \
" _ssnop \n" \
" .set mips0 \n")
#define tlbw_use_hazard() \
__asm__ __volatile__( \
".set\tmips32\n\t" \
"_ssnop; _ssnop; _ssnop; _ssnop\n\t" \
".set\tmips0")
#define back_to_back_c0_hazard() do { } while (0)
" .set mips32 \n" \
" _ssnop \n" \
" _ssnop \n" \
" _ssnop \n" \
" _ssnop \n" \
" .set mips0 \n")
#else
......@@ -136,15 +141,25 @@ __asm__(
*/
#define mtc0_tlbw_hazard() \
__asm__ __volatile__( \
".set noreorder\n\t" \
"nop; nop; nop; nop; nop; nop;\n\t" \
".set reorder\n\t")
" .set noreorder \n" \
" nop \n" \
" nop \n" \
" nop \n" \
" nop \n" \
" nop \n" \
" nop \n" \
" .set reorder \n")
#define tlbw_use_hazard() \
__asm__ __volatile__( \
".set noreorder\n\t" \
"nop; nop; nop; nop; nop; nop;\n\t" \
".set reorder\n\t")
" .set noreorder \n" \
" nop \n" \
" nop \n" \
" nop \n" \
" nop \n" \
" nop \n" \
" nop \n" \
" .set reorder \n")
#endif
......@@ -156,49 +171,26 @@ __asm__(
#ifdef CONFIG_CPU_MIPSR2
__asm__(
" .macro\tirq_enable_hazard \n\t"
" _ehb \n\t"
" .endm \n\t"
" \n\t"
" .macro\tirq_disable_hazard \n\t"
" _ehb \n\t"
" .endm \n\t"
" \n\t"
" .macro\tback_to_back_c0_hazard \n\t"
" _ehb \n\t"
" .endm");
#define irq_enable_hazard() \
__asm__ __volatile__( \
"irq_enable_hazard")
__asm__(" .macro irq_enable_hazard \n"
" _ehb \n"
" .endm \n"
" \n"
" .macro irq_disable_hazard \n"
" _ehb \n"
" .endm \n");
#define irq_disable_hazard() \
__asm__ __volatile__( \
"irq_disable_hazard")
#define back_to_back_c0_hazard() \
__asm__ __volatile__( \
"back_to_back_c0_hazard")
#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
defined(CONFIG_CPU_SB1)
#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
/*
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
*/
__asm__(
" .macro\tirq_enable_hazard \n\t"
" .endm \n\t"
" \n\t"
" .macro\tirq_disable_hazard \n\t"
" .endm");
#define irq_enable_hazard() do { } while (0)
#define irq_disable_hazard() do { } while (0)
#define back_to_back_c0_hazard() do { } while (0)
" .macro irq_enable_hazard \n"
" .endm \n"
" \n"
" .macro irq_disable_hazard \n"
" .endm \n");
#else
......@@ -209,29 +201,63 @@ __asm__(
*/
__asm__(
" # \n\t"
" # There is a hazard but we do not care \n\t"
" # \n\t"
" .macro\tirq_enable_hazard \n\t"
" .endm \n\t"
" \n\t"
" .macro\tirq_disable_hazard \n\t"
" _ssnop; _ssnop; _ssnop \n\t"
" .endm");
" # \n"
" # There is a hazard but we do not care \n"
" # \n"
" .macro\tirq_enable_hazard \n"
" .endm \n"
" \n"
" .macro\tirq_disable_hazard \n"
" _ssnop \n"
" _ssnop \n"
" _ssnop \n"
" .endm \n");
#define irq_enable_hazard() do { } while (0)
#endif
#define irq_enable_hazard() \
__asm__ __volatile__("irq_enable_hazard")
#define irq_disable_hazard() \
__asm__ __volatile__( \
"irq_disable_hazard")
__asm__ __volatile__("irq_disable_hazard")
#define back_to_back_c0_hazard() \
__asm__ __volatile__( \
" .set noreorder \n" \
" nop; nop; nop \n" \
" .set reorder \n")
/*
* Back-to-back hazards -
*
* What is needed to separate a move to cp0 from a subsequent read from the
* same cp0 register?
*/
#ifdef CONFIG_CPU_MIPSR2
__asm__(" .macro back_to_back_c0_hazard \n"
" _ehb \n"
" .endm \n");
#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
defined(CONFIG_CPU_SB1)
__asm__(" .macro back_to_back_c0_hazard \n"
" .endm \n");
#else
__asm__(" .macro back_to_back_c0_hazard \n"
" .set noreorder \n"
" _ssnop \n"
" _ssnop \n"
" _ssnop \n"
" .set reorder \n"
" .endm");
#endif
#define back_to_back_c0_hazard() \
__asm__ __volatile__("back_to_back_c0_hazard")
/*
* Instruction execution hazard
*/
#ifdef CONFIG_CPU_MIPSR2
/*
* gcc has a tradition of misscompiling the previous construct using the
......
......@@ -4,7 +4,7 @@
* for more details.
*
* Copyright (C) 1994, 1995 Waldorf GmbH
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1994 - 2000, 06 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <macro@mips.com>
......@@ -103,8 +103,20 @@
*/
extern const unsigned long mips_io_port_base;
#define set_io_port_base(base) \
do { * (unsigned long *) &mips_io_port_base = (base); } while (0)
/*
* Gcc will generate code to load the value of mips_io_port_base after each
* function call which may be fairly wasteful in some cases. So we don't
* play quite by the book. We tell gcc mips_io_port_base is a long variable
* which solves the code generation issue. Now we need to violate the
* aliasing rules a little to make initialization possible and finally we
* will need the barrier() to fight side effects of the aliasing chat.
* This trickery will eventually collapse under gcc's optimizer. Oh well.
*/
static inline void set_io_port_base(unsigned long base)
{
* (unsigned long *) &mips_io_port_base = base;
barrier();
}
/*
* Thanks to James van Artsdalen for a better timing-fix than
......
......@@ -257,7 +257,8 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
\
static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
{ \
unsigned long start = page; \
unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
unsigned long start = INDEX_BASE + (page & indexmask); \
unsigned long end = start + PAGE_SIZE; \
unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
unsigned long ws_end = current_cpu_data.desc.ways << \
......
......@@ -45,8 +45,8 @@ extern unsigned int soc_type;
extern unsigned int periph_rev;
extern unsigned int zbbus_mhz;
extern void sb1250_hpt_setup(void);
extern void sb1250_time_init(void);
extern unsigned long sb1250_gettimeoffset(void);
extern void sb1250_mask_irq(int cpu, int irq);
extern void sb1250_unmask_irq(int cpu, int irq);
extern void sb1250_smp_finish(void);
......
......@@ -359,14 +359,15 @@
*/
#define V_SCD_TIMER_FREQ 1000000
#define V_SCD_TIMER_WIDTH 23
#define S_SCD_TIMER_INIT 0
#define M_SCD_TIMER_INIT _SB_MAKEMASK(20,S_SCD_TIMER_INIT)
#define M_SCD_TIMER_INIT _SB_MAKEMASK(V_SCD_TIMER_WIDTH,S_SCD_TIMER_INIT)
#define V_SCD_TIMER_INIT(x) _SB_MAKEVALUE(x,S_SCD_TIMER_INIT)
#define G_SCD_TIMER_INIT(x) _SB_GETVALUE(x,S_SCD_TIMER_INIT,M_SCD_TIMER_INIT)
#define S_SCD_TIMER_CNT 0
#define M_SCD_TIMER_CNT _SB_MAKEMASK(20,S_SCD_TIMER_CNT)
#define M_SCD_TIMER_CNT _SB_MAKEMASK(V_SCD_TIMER_WIDTH,S_SCD_TIMER_CNT)
#define V_SCD_TIMER_CNT(x) _SB_MAKEVALUE(x,S_SCD_TIMER_CNT)
#define G_SCD_TIMER_CNT(x) _SB_GETVALUE(x,S_SCD_TIMER_CNT,M_SCD_TIMER_CNT)
......
......@@ -1365,6 +1365,7 @@
#define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008
#define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009
#define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017
#define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103
#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200
#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201
#define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203
......
......@@ -110,8 +110,15 @@ enum {
enum {
AX25_PROTO_STD_SIMPLEX,
AX25_PROTO_STD_DUPLEX,
#ifdef CONFIG_AX25_DAMA_SLAVE
AX25_PROTO_DAMA_SLAVE,
AX25_PROTO_DAMA_MASTER
#ifdef CONFIG_AX25_DAMA_MASTER
AX25_PROTO_DAMA_MASTER,
#define AX25_PROTO_MAX AX25_PROTO_DAMA_MASTER
#endif
#endif
__AX25_PROTO_MAX,
AX25_PROTO_MAX = __AX25_PROTO_MAX -1
};
enum {
......
......@@ -478,9 +478,9 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
rc = __condition; \
if (!rc) { \
*(__timeo) = schedule_timeout(*(__timeo)); \
rc = __condition; \
} \
lock_sock(__sk); \
rc = __condition; \
rc; \
})
......
......@@ -1478,9 +1478,7 @@ static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
if ((unshare_flags & CLONE_VM) &&
(mm && atomic_read(&mm->mm_users) > 1)) {
*new_mmp = dup_mm(current);
if (!*new_mmp)
return -ENOMEM;
return -EINVAL;
}
return 0;
......@@ -1569,7 +1567,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
if (new_sigh) {
sigh = current->sighand;
current->sighand = new_sigh;
rcu_assign_pointer(current->sighand, new_sigh);
new_sigh = sigh;
}
......
......@@ -353,6 +353,7 @@ static int posix_timer_fn(void *data)
hrtimer_forward(&timr->it.real.timer,
timr->it.real.interval);
ret = HRTIMER_RESTART;
++timr->it_requeue_pending;
}
}
......
......@@ -1354,8 +1354,8 @@ void __init init_timers(void)
#ifdef CONFIG_TIME_INTERPOLATION
struct time_interpolator *time_interpolator;
static struct time_interpolator *time_interpolator_list;
struct time_interpolator *time_interpolator __read_mostly;
static struct time_interpolator *time_interpolator_list __read_mostly;
static DEFINE_SPINLOCK(time_interpolator_lock);
static inline u64 time_interpolator_get_cycles(unsigned int src)
......
......@@ -623,11 +623,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
(*zap_work)--;
continue;
}
(*zap_work) -= PAGE_SIZE;
if (pte_present(ptent)) {
struct page *page;
(*zap_work) -= PAGE_SIZE;
page = vm_normal_page(vma, addr, ptent);
if (unlikely(details) && page) {
/*
......
......@@ -330,9 +330,19 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
int err;
struct vm_area_struct *first, *vma, *prev;
/* Clear the LRU lists so pages can be isolated */
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
/* Must have swap device for migration */
if (nr_swap_pages <= 0)
return ERR_PTR(-ENODEV);
/*
* Clear the LRU lists so pages can be isolated.
* Note that pages may be moved off the LRU after we have
* drained them. Those pages will fail to migrate like other
* pages that may be busy.
*/
lru_add_drain_all();
}
first = find_vma(mm, start);
if (!first)
......
......@@ -393,7 +393,8 @@ void pagevec_strip(struct pagevec *pvec)
struct page *page = pvec->pages[i];
if (PagePrivate(page) && !TestSetPageLocked(page)) {
try_to_release_page(page, 0);
if (PagePrivate(page))
try_to_release_page(page, 0);
unlock_page(page);
}
}
......
......@@ -24,7 +24,7 @@ static int min_t3[1], max_t3[] = {3600 * HZ};
static int min_idle[1], max_idle[] = {65535 * HZ};
static int min_n2[] = {1}, max_n2[] = {31};
static int min_paclen[] = {1}, max_paclen[] = {512};
static int min_proto[1], max_proto[] = {3};
static int min_proto[1], max_proto[] = { AX25_PROTO_MAX };
static int min_ds_timeout[1], max_ds_timeout[] = {65535 * HZ};
static struct ctl_table_header *ax25_table_header;
......
......@@ -508,12 +508,7 @@ buf_printf(struct buffer *buf, const char *fmt, ...)
va_start(ap, fmt);
len = vsnprintf(tmp, SZ, fmt, ap);
if (buf->size - buf->pos < len + 1) {
buf->size += 128;
buf->p = realloc(buf->p, buf->size);
}
strncpy(buf->p + buf->pos, tmp, len + 1);
buf->pos += len;
buf_write(buf, tmp, len);
va_end(ap);
}
......@@ -521,7 +516,7 @@ void
buf_write(struct buffer *buf, const char *s, int len)
{
if (buf->size - buf->pos < len) {
buf->size += len;
buf->size += len + SZ;
buf->p = realloc(buf->p, buf->size);
}
strncpy(buf->p + buf->pos, s, len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment