Commit 312b2eaf authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5

into samba.org:/scratch/anton/sfr
parents 3c87bf0a 2b009dcf
...@@ -460,6 +460,13 @@ M: henrique@cyclades.com ...@@ -460,6 +460,13 @@ M: henrique@cyclades.com
W: http://www.cyclades.com/ W: http://www.cyclades.com/
S: Supported S: Supported
DAC960 RAID CONTROLLER DRIVER
P: Dave Olien
M dmo@osdl.org
W: http://www.osdl.org/archive/dmo/DAC960
L: linux-kernel@vger.kernel.org
S: Maintained
DAMA SLAVE for AX.25 DAMA SLAVE for AX.25
P: Joerg Reuter P: Joerg Reuter
M: jreuter@yaina.de M: jreuter@yaina.de
......
...@@ -52,7 +52,7 @@ int using_apic_timer = 0; ...@@ -52,7 +52,7 @@ int using_apic_timer = 0;
int prof_multiplier[NR_CPUS] = { 1, }; int prof_multiplier[NR_CPUS] = { 1, };
int prof_old_multiplier[NR_CPUS] = { 1, }; int prof_old_multiplier[NR_CPUS] = { 1, };
int prof_counter[NR_CPUS] = { 1, }; DEFINE_PER_CPU(int, prof_counter) = 1;
int get_maxlvt(void) int get_maxlvt(void)
{ {
...@@ -997,7 +997,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs) ...@@ -997,7 +997,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
x86_do_profile(regs); x86_do_profile(regs);
if (--prof_counter[cpu] <= 0) { if (--per_cpu(prof_counter, cpu) <= 0) {
/* /*
* The multiplier may have changed since the last time we got * The multiplier may have changed since the last time we got
* to this point as a result of the user writing to * to this point as a result of the user writing to
...@@ -1006,10 +1006,12 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs) ...@@ -1006,10 +1006,12 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
* *
* Interrupts are already masked off at this point. * Interrupts are already masked off at this point.
*/ */
prof_counter[cpu] = prof_multiplier[cpu]; per_cpu(prof_counter, cpu) = prof_multiplier[cpu];
if (prof_counter[cpu] != prof_old_multiplier[cpu]) { if (per_cpu(prof_counter, cpu) != prof_old_multiplier[cpu]) {
__setup_APIC_LVTT(calibration_result/prof_counter[cpu]); __setup_APIC_LVTT(
prof_old_multiplier[cpu] = prof_counter[cpu]; calibration_result/
per_cpu(prof_counter, cpu));
prof_old_multiplier[cpu] = per_cpu(prof_counter, cpu);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -1440,7 +1440,8 @@ void disable_IO_APIC(void) ...@@ -1440,7 +1440,8 @@ void disable_IO_APIC(void)
* by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
*/ */
static void __init setup_ioapic_ids_from_mpc (void) #ifndef CONFIG_X86_NUMAQ
static void __init setup_ioapic_ids_from_mpc(void)
{ {
struct IO_APIC_reg_00 reg_00; struct IO_APIC_reg_00 reg_00;
unsigned long phys_id_present_map; unsigned long phys_id_present_map;
...@@ -1533,6 +1534,9 @@ static void __init setup_ioapic_ids_from_mpc (void) ...@@ -1533,6 +1534,9 @@ static void __init setup_ioapic_ids_from_mpc (void)
printk(" ok.\n"); printk(" ok.\n");
} }
} }
#else
static void __init setup_ioapic_ids_from_mpc(void) { }
#endif
/* /*
* There is a nasty bug in some older SMP boards, their mptable lies * There is a nasty bug in some older SMP boards, their mptable lies
......
...@@ -935,10 +935,6 @@ static void smp_tune_scheduling (void) ...@@ -935,10 +935,6 @@ static void smp_tune_scheduling (void)
* Cycle through the processors sending APIC IPIs to boot each. * Cycle through the processors sending APIC IPIs to boot each.
*/ */
extern int prof_multiplier[NR_CPUS];
extern int prof_old_multiplier[NR_CPUS];
extern int prof_counter[NR_CPUS];
static int boot_cpu_logical_apicid; static int boot_cpu_logical_apicid;
/* Where the IO area was mapped on multiquad, always 0 otherwise */ /* Where the IO area was mapped on multiquad, always 0 otherwise */
void *xquad_portio; void *xquad_portio;
...@@ -949,17 +945,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -949,17 +945,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
{ {
int apicid, cpu, bit; int apicid, cpu, bit;
/*
* Initialize the logical to physical CPU number mapping
* and the per-CPU profiling counter/multiplier
*/
for (cpu = 0; cpu < NR_CPUS; cpu++) {
prof_counter[cpu] = 1;
prof_old_multiplier[cpu] = 1;
prof_multiplier[cpu] = 1;
}
/* /*
* Setup boot CPU information * Setup boot CPU information
*/ */
......
...@@ -236,7 +236,7 @@ static __u32 trampoline_base; ...@@ -236,7 +236,7 @@ static __u32 trampoline_base;
/* The per cpu profile stuff - used in smp_local_timer_interrupt */ /* The per cpu profile stuff - used in smp_local_timer_interrupt */
static unsigned int prof_multiplier[NR_CPUS] __cacheline_aligned = { 1, }; static unsigned int prof_multiplier[NR_CPUS] __cacheline_aligned = { 1, };
static unsigned int prof_old_multiplier[NR_CPUS] __cacheline_aligned = { 1, }; static unsigned int prof_old_multiplier[NR_CPUS] __cacheline_aligned = { 1, };
static unsigned int prof_counter[NR_CPUS] __cacheline_aligned = { 1, }; static DEFINE_PER_CPU(unsigned int, prof_counter) = 1;
/* the map used to check if a CPU has booted */ /* the map used to check if a CPU has booted */
static __u32 cpu_booted_map; static __u32 cpu_booted_map;
...@@ -393,9 +393,6 @@ find_smp_config(void) ...@@ -393,9 +393,6 @@ find_smp_config(void)
/* initialize the CPU structures (moved from smp_boot_cpus) */ /* initialize the CPU structures (moved from smp_boot_cpus) */
for(i=0; i<NR_CPUS; i++) { for(i=0; i<NR_CPUS; i++) {
prof_counter[i] = 1;
prof_old_multiplier[i] = 1;
prof_multiplier[i] = 1;
cpu_irq_affinity[i] = ~0; cpu_irq_affinity[i] = ~0;
} }
cpu_online_map = (1<<boot_cpu_id); cpu_online_map = (1<<boot_cpu_id);
...@@ -1312,7 +1309,7 @@ smp_local_timer_interrupt(struct pt_regs * regs) ...@@ -1312,7 +1309,7 @@ smp_local_timer_interrupt(struct pt_regs * regs)
x86_do_profile(regs); x86_do_profile(regs);
if (--prof_counter[cpu] <= 0) { if (--per_cpu(prof_counter, cpu) <= 0) {
/* /*
* The multiplier may have changed since the last time we got * The multiplier may have changed since the last time we got
* to this point as a result of the user writing to * to this point as a result of the user writing to
...@@ -1321,10 +1318,10 @@ smp_local_timer_interrupt(struct pt_regs * regs) ...@@ -1321,10 +1318,10 @@ smp_local_timer_interrupt(struct pt_regs * regs)
* *
* Interrupts are already masked off at this point. * Interrupts are already masked off at this point.
*/ */
prof_counter[cpu] = prof_multiplier[cpu]; per_cpu(prof_counter,cpu) = prof_multiplier[cpu];
if (prof_counter[cpu] != prof_old_multiplier[cpu]) { if (per_cpu(prof_counter, cpu) != prof_old_multiplier[cpu]) {
/* FIXME: need to update the vic timer tick here */ /* FIXME: need to update the vic timer tick here */
prof_old_multiplier[cpu] = prof_counter[cpu]; prof_old_multiplier[cpu] = per_cpu(prof_counter, cpu);
} }
update_process_times(user_mode(regs)); update_process_times(user_mode(regs));
......
...@@ -88,6 +88,18 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc ...@@ -88,6 +88,18 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc
set_pte(page_table, entry); set_pte(page_table, entry);
} }
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
......
...@@ -96,6 +96,18 @@ set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -96,6 +96,18 @@ set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
return; return;
} }
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
......
...@@ -232,6 +232,18 @@ make_hugetlb_pages_present(unsigned long addr, unsigned long end, int flags) ...@@ -232,6 +232,18 @@ make_hugetlb_pages_present(unsigned long addr, unsigned long end, int flags)
return -1; return -1;
} }
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
......
...@@ -86,6 +86,18 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc ...@@ -86,6 +86,18 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc
set_pte(page_table, entry); set_pte(page_table, entry);
} }
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
int int
copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma) struct vm_area_struct *vma)
......
...@@ -1731,12 +1731,17 @@ static boolean DAC960_V2_ReadControllerConfiguration(DAC960_Controller_T ...@@ -1731,12 +1731,17 @@ static boolean DAC960_V2_ReadControllerConfiguration(DAC960_Controller_T
if (!DAC960_V2_NewLogicalDeviceInfo(Controller, LogicalDeviceNumber)) if (!DAC960_V2_NewLogicalDeviceInfo(Controller, LogicalDeviceNumber))
break; break;
LogicalDeviceNumber = NewLogicalDeviceInfo->LogicalDeviceNumber; LogicalDeviceNumber = NewLogicalDeviceInfo->LogicalDeviceNumber;
if (LogicalDeviceNumber > DAC960_MaxLogicalDrives) if (LogicalDeviceNumber >= DAC960_MaxLogicalDrives) {
panic("DAC960: Logical Drive Number %d not supported\n", DAC960_Error("DAC960: Logical Drive Number %d not supported\n",
LogicalDeviceNumber); Controller, LogicalDeviceNumber);
if (NewLogicalDeviceInfo->DeviceBlockSizeInBytes != DAC960_BlockSize) break;
panic("DAC960: Logical Drive Block Size %d not supported\n", }
NewLogicalDeviceInfo->DeviceBlockSizeInBytes); if (NewLogicalDeviceInfo->DeviceBlockSizeInBytes != DAC960_BlockSize) {
DAC960_Error("DAC960: Logical Drive Block Size %d not supported\n",
Controller, NewLogicalDeviceInfo->DeviceBlockSizeInBytes);
LogicalDeviceNumber++;
continue;
}
PhysicalDevice.Controller = 0; PhysicalDevice.Controller = 0;
PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel; PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel;
PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID; PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID;
......
...@@ -60,7 +60,7 @@ static struct amd_ide_chip { ...@@ -60,7 +60,7 @@ static struct amd_ide_chip {
{ PCI_DEVICE_ID_AMD_OPUS_7441, 0x00, 0x40, AMD_UDMA_100 }, /* AMD-768 Opus */ { PCI_DEVICE_ID_AMD_OPUS_7441, 0x00, 0x40, AMD_UDMA_100 }, /* AMD-768 Opus */
{ PCI_DEVICE_ID_AMD_8111_IDE, 0x00, 0x40, AMD_UDMA_100 }, /* AMD-8111 */ { PCI_DEVICE_ID_AMD_8111_IDE, 0x00, 0x40, AMD_UDMA_100 }, /* AMD-8111 */
{ PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, 0x00, 0x50, AMD_UDMA_100 }, /* nVidia nForce */ { PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, 0x00, 0x50, AMD_UDMA_100 }, /* nVidia nForce */
{ PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE, 0x00, 0x50, AMD_UDMA_100 }, /* nVidia nForce 2 */
{ 0 } { 0 }
}; };
...@@ -446,6 +446,7 @@ static struct pci_device_id amd74xx_pci_tbl[] __devinitdata = { ...@@ -446,6 +446,7 @@ static struct pci_device_id amd74xx_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_OPUS_7441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3}, { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_OPUS_7441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5}, { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6},
{ 0, }, { 0, },
}; };
......
...@@ -110,6 +110,20 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = { ...@@ -110,6 +110,20 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
.bootable = ON_BOARD, .bootable = ON_BOARD,
.extra = 0, .extra = 0,
}, },
{ /* 6 */
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE,
.name = "NFORCE2",
.init_chipset = init_chipset_amd74xx,
.init_iops = NULL,
.init_hwif = init_hwif_amd74xx,
.init_dma = init_dma_amd74xx,
.channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x50,0x01,0x01}, {0x50,0x02,0x02}},
.bootable = ON_BOARD,
.extra = 0,
},
{ {
.vendor = 0, .vendor = 0,
.device = 0, .device = 0,
......
...@@ -338,16 +338,6 @@ static int __init el3_common_init (struct net_device *dev) ...@@ -338,16 +338,6 @@ static int __init el3_common_init (struct net_device *dev)
dev->watchdog_timeo = TX_TIMEOUT; dev->watchdog_timeo = TX_TIMEOUT;
dev->do_ioctl = netdev_ioctl; dev->do_ioctl = netdev_ioctl;
#ifdef CONFIG_PM
/* register power management */
lp->pmdev = pm_register(PM_ISA_DEV, card_idx, el3_pm_callback);
if (lp->pmdev) {
struct pm_dev *p;
p = lp->pmdev;
p->data = (struct net_device *)dev;
}
#endif
return 0; return 0;
} }
...@@ -417,6 +407,13 @@ static int __init el3_probe(int card_idx) ...@@ -417,6 +407,13 @@ static int __init el3_probe(int card_idx)
phys_addr[j] = phys_addr[j] =
htons(read_eeprom(ioaddr, j)); htons(read_eeprom(ioaddr, j));
if_port = read_eeprom(ioaddr, 8) >> 14; if_port = read_eeprom(ioaddr, 8) >> 14;
if (!(dev = init_etherdev(NULL, sizeof(struct el3_private)))) {
release_region(ioaddr, EL3_IO_EXTENT);
pnp_device_detach(idev);
return -ENOMEM;
}
SET_MODULE_OWNER(dev);
pnp_cards++; pnp_cards++;
goto found; goto found;
} }
...@@ -497,24 +494,29 @@ static int __init el3_probe(int card_idx) ...@@ -497,24 +494,29 @@ static int __init el3_probe(int card_idx)
} }
irq = id_read_eeprom(9) >> 12; irq = id_read_eeprom(9) >> 12;
#if 0 /* Huh ? if (!(dev = init_etherdev(NULL, sizeof(struct el3_private))))
Can someone explain what is this for ? */ return -ENOMEM;
if (dev) { /* Set passed-in IRQ or I/O Addr. */
if (dev->irq > 1 && dev->irq < 16) SET_MODULE_OWNER(dev);
/* Set passed-in IRQ or I/O Addr. */
if (dev->irq > 1 && dev->irq < 16)
irq = dev->irq; irq = dev->irq;
if (dev->base_addr) { if (dev->base_addr) {
if (dev->mem_end == 0x3c509 /* Magic key */ if (dev->mem_end == 0x3c509 /* Magic key */
&& dev->base_addr >= 0x200 && dev->base_addr <= 0x3e0) && dev->base_addr >= 0x200 && dev->base_addr <= 0x3e0)
ioaddr = dev->base_addr & 0x3f0; ioaddr = dev->base_addr & 0x3f0;
else if (dev->base_addr != ioaddr) else if (dev->base_addr != ioaddr) {
return -ENODEV; unregister_netdev (dev);
} return -ENODEV;
}
} }
#endif
if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509")) if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509")) {
return -EBUSY; unregister_netdev (dev);
return -EBUSY;
}
/* Set the adaptor tag so that the next card can be found. */ /* Set the adaptor tag so that the next card can be found. */
outb(0xd0 + ++current_tag, id_port); outb(0xd0 + ++current_tag, id_port);
...@@ -524,6 +526,7 @@ static int __init el3_probe(int card_idx) ...@@ -524,6 +526,7 @@ static int __init el3_probe(int card_idx)
EL3WINDOW(0); EL3WINDOW(0);
if (inw(ioaddr) != 0x6d50) { if (inw(ioaddr) != 0x6d50) {
unregister_netdev (dev);
release_region(ioaddr, EL3_IO_EXTENT); release_region(ioaddr, EL3_IO_EXTENT);
return -ENODEV; return -ENODEV;
} }
...@@ -531,12 +534,9 @@ static int __init el3_probe(int card_idx) ...@@ -531,12 +534,9 @@ static int __init el3_probe(int card_idx)
/* Free the interrupt so that some other card can use it. */ /* Free the interrupt so that some other card can use it. */
outw(0x0f00, ioaddr + WN0_IRQ); outw(0x0f00, ioaddr + WN0_IRQ);
dev = init_etherdev(NULL, sizeof(struct el3_private)); #ifdef __ISAPNP__
if (dev == NULL) { found: /* PNP jumps here... */
release_region(ioaddr, EL3_IO_EXTENT); #endif /* __ISAPNP__ */
return -ENOMEM;
}
SET_MODULE_OWNER(dev);
memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr)); memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr));
dev->base_addr = ioaddr; dev->base_addr = ioaddr;
...@@ -547,6 +547,16 @@ static int __init el3_probe(int card_idx) ...@@ -547,6 +547,16 @@ static int __init el3_probe(int card_idx)
lp->dev = &idev->dev; lp->dev = &idev->dev;
#endif #endif
#ifdef CONFIG_PM
/* register power management */
lp->pmdev = pm_register(PM_ISA_DEV, card_idx, el3_pm_callback);
if (lp->pmdev) {
struct pm_dev *p;
p = lp->pmdev;
p->data = (struct net_device *)dev;
}
#endif
return el3_common_init (dev); return el3_common_init (dev);
} }
...@@ -667,6 +677,7 @@ static int __init el3_eisa_probe (struct device *device) ...@@ -667,6 +677,7 @@ static int __init el3_eisa_probe (struct device *device)
} }
#endif #endif
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
/* This remove works for all device types. /* This remove works for all device types.
* *
* The net dev must be stored in the driver_data field */ * The net dev must be stored in the driver_data field */
...@@ -679,6 +690,7 @@ static int __devexit el3_device_remove (struct device *device) ...@@ -679,6 +690,7 @@ static int __devexit el3_device_remove (struct device *device)
el3_common_remove (dev); el3_common_remove (dev);
return 0; return 0;
} }
#endif
/* Read a word from the EEPROM using the regular EEPROM access register. /* Read a word from the EEPROM using the regular EEPROM access register.
Assume that we are in register window zero. Assume that we are in register window zero.
......
...@@ -181,7 +181,7 @@ ...@@ -181,7 +181,7 @@
- See http://www.zip.com.au/~akpm/linux/#3c59x-2.3 for more details. - See http://www.zip.com.au/~akpm/linux/#3c59x-2.3 for more details.
- Also see Documentation/networking/vortex.txt - Also see Documentation/networking/vortex.txt
LK1.1.19 10Nov09 Marc Zyngier <maz@wild-wind.fr.eu.org> LK1.1.19 10Nov02 Marc Zyngier <maz@wild-wind.fr.eu.org>
- EISA sysfs integration. - EISA sysfs integration.
*/ */
...@@ -817,7 +817,11 @@ struct vortex_private { ...@@ -817,7 +817,11 @@ struct vortex_private {
u32 power_state[16]; u32 power_state[16];
}; };
#ifdef CONFIG_PCI
#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL) #define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
#else
#define DEVICE_PCI(dev) NULL
#endif
#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL) #define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)
......
...@@ -224,9 +224,6 @@ static struct devprobe isa_probes[] __initdata = { ...@@ -224,9 +224,6 @@ static struct devprobe isa_probes[] __initdata = {
#ifdef CONFIG_EL2 /* 3c503 */ #ifdef CONFIG_EL2 /* 3c503 */
{el2_probe, 0}, {el2_probe, 0},
#endif #endif
#ifdef CONFIG_EL3
{el3_probe, 0},
#endif
#ifdef CONFIG_HPLAN #ifdef CONFIG_HPLAN
{hp_probe, 0}, {hp_probe, 0},
#endif #endif
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/mempool.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
...@@ -2791,7 +2790,6 @@ asmlinkage long sys_bdflush(int func, long data) ...@@ -2791,7 +2790,6 @@ asmlinkage long sys_bdflush(int func, long data)
* Buffer-head allocation * Buffer-head allocation
*/ */
static kmem_cache_t *bh_cachep; static kmem_cache_t *bh_cachep;
static mempool_t *bh_mempool;
/* /*
* Once the number of bh's in the machine exceeds this level, we start * Once the number of bh's in the machine exceeds this level, we start
...@@ -2825,7 +2823,7 @@ static void recalc_bh_state(void) ...@@ -2825,7 +2823,7 @@ static void recalc_bh_state(void)
struct buffer_head *alloc_buffer_head(void) struct buffer_head *alloc_buffer_head(void)
{ {
struct buffer_head *ret = mempool_alloc(bh_mempool, GFP_NOFS); struct buffer_head *ret = kmem_cache_alloc(bh_cachep, GFP_NOFS);
if (ret) { if (ret) {
preempt_disable(); preempt_disable();
__get_cpu_var(bh_accounting).nr++; __get_cpu_var(bh_accounting).nr++;
...@@ -2839,7 +2837,7 @@ EXPORT_SYMBOL(alloc_buffer_head); ...@@ -2839,7 +2837,7 @@ EXPORT_SYMBOL(alloc_buffer_head);
void free_buffer_head(struct buffer_head *bh) void free_buffer_head(struct buffer_head *bh)
{ {
BUG_ON(!list_empty(&bh->b_assoc_buffers)); BUG_ON(!list_empty(&bh->b_assoc_buffers));
mempool_free(bh, bh_mempool); kmem_cache_free(bh_cachep, bh);
preempt_disable(); preempt_disable();
__get_cpu_var(bh_accounting).nr--; __get_cpu_var(bh_accounting).nr--;
recalc_bh_state(); recalc_bh_state();
...@@ -2847,7 +2845,8 @@ void free_buffer_head(struct buffer_head *bh) ...@@ -2847,7 +2845,8 @@ void free_buffer_head(struct buffer_head *bh)
} }
EXPORT_SYMBOL(free_buffer_head); EXPORT_SYMBOL(free_buffer_head);
static void init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags) static void
init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
{ {
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) { SLAB_CTOR_CONSTRUCTOR) {
...@@ -2858,19 +2857,6 @@ static void init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long fla ...@@ -2858,19 +2857,6 @@ static void init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long fla
} }
} }
static void *bh_mempool_alloc(int gfp_mask, void *pool_data)
{
return kmem_cache_alloc(bh_cachep, gfp_mask);
}
static void bh_mempool_free(void *element, void *pool_data)
{
return kmem_cache_free(bh_cachep, element);
}
#define NR_RESERVED (10*MAX_BUF_PER_PAGE)
#define MAX_UNUSED_BUFFERS NR_RESERVED+20
static void buffer_init_cpu(int cpu) static void buffer_init_cpu(int cpu)
{ {
struct bh_accounting *bha = &per_cpu(bh_accounting, cpu); struct bh_accounting *bha = &per_cpu(bh_accounting, cpu);
...@@ -2907,8 +2893,6 @@ void __init buffer_init(void) ...@@ -2907,8 +2893,6 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head", bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0, sizeof(struct buffer_head), 0,
0, init_buffer_head, NULL); 0, init_buffer_head, NULL);
bh_mempool = mempool_create(MAX_UNUSED_BUFFERS, bh_mempool_alloc,
bh_mempool_free, NULL);
for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++) for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
init_waitqueue_head(&bh_wait_queue_heads[i].wqh); init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
......
...@@ -1317,10 +1317,7 @@ static int ext3_writepage(struct page *page, struct writeback_control *wbc) ...@@ -1317,10 +1317,7 @@ static int ext3_writepage(struct page *page, struct writeback_control *wbc)
goto out_fail; goto out_fail;
needed = ext3_writepage_trans_blocks(inode); needed = ext3_writepage_trans_blocks(inode);
if (wbc->for_reclaim) handle = ext3_journal_start(inode, needed);
handle = ext3_journal_try_start(inode, needed);
else
handle = ext3_journal_start(inode, needed);
if (IS_ERR(handle)) { if (IS_ERR(handle)) {
ret = PTR_ERR(handle); ret = PTR_ERR(handle);
......
...@@ -1343,9 +1343,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) ...@@ -1343,9 +1343,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
* superblock lock. * superblock lock.
*/ */
EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS; EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS;
unlock_super(sb); /* akpm: sigh */
ext3_orphan_cleanup(sb, es); ext3_orphan_cleanup(sb, es);
lock_super(sb);
EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS;
if (needs_recovery) if (needs_recovery)
printk (KERN_INFO "EXT3-fs: recovery complete.\n"); printk (KERN_INFO "EXT3-fs: recovery complete.\n");
......
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
EXPORT_SYMBOL(journal_start); EXPORT_SYMBOL(journal_start);
EXPORT_SYMBOL(journal_try_start);
EXPORT_SYMBOL(journal_restart); EXPORT_SYMBOL(journal_restart);
EXPORT_SYMBOL(journal_extend); EXPORT_SYMBOL(journal_extend);
EXPORT_SYMBOL(journal_stop); EXPORT_SYMBOL(journal_stop);
......
...@@ -212,16 +212,14 @@ do { \ ...@@ -212,16 +212,14 @@ do { \
* *
* The primary function for recovering the log contents when mounting a * The primary function for recovering the log contents when mounting a
* journaled device. * journaled device.
*/ *
int journal_recover(journal_t *journal)
{
/*
* Recovery is done in three passes. In the first pass, we look for the * Recovery is done in three passes. In the first pass, we look for the
* end of the log. In the second, we assemble the list of revoke * end of the log. In the second, we assemble the list of revoke
* blocks. In the third and final pass, we replay any un-revoked blocks * blocks. In the third and final pass, we replay any un-revoked blocks
* in the log. * in the log.
*/ */
int journal_recover(journal_t *journal)
{
int err; int err;
journal_superblock_t * sb; journal_superblock_t * sb;
...@@ -273,15 +271,13 @@ int journal_recover(journal_t *journal) ...@@ -273,15 +271,13 @@ int journal_recover(journal_t *journal)
* journal structures in memory to ignore it (presumably because the * journal structures in memory to ignore it (presumably because the
* caller has evidence that it is out of date). * caller has evidence that it is out of date).
* This function does'nt appear to be exorted.. * This function does'nt appear to be exorted..
*/ *
int journal_skip_recovery(journal_t *journal)
{
/*
* We perform one pass over the journal to allow us to tell the user how * We perform one pass over the journal to allow us to tell the user how
* much recovery information is being erased, and to let us initialise * much recovery information is being erased, and to let us initialise
* the journal transaction sequence numbers to the next unused ID. * the journal transaction sequence numbers to the next unused ID.
*/ */
int journal_skip_recovery(journal_t *journal)
{
int err; int err;
journal_superblock_t * sb; journal_superblock_t * sb;
......
...@@ -266,113 +266,6 @@ handle_t *journal_start(journal_t *journal, int nblocks) ...@@ -266,113 +266,6 @@ handle_t *journal_start(journal_t *journal, int nblocks)
return handle; return handle;
} }
/*
* Return zero on success
*/
static int try_start_this_handle(journal_t *journal, handle_t *handle)
{
transaction_t *transaction;
int needed;
int nblocks = handle->h_buffer_credits;
int ret = 0;
jbd_debug(3, "New handle %p maybe going live.\n", handle);
lock_journal(journal);
if (is_journal_aborted(journal) ||
(journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
ret = -EROFS;
goto fail_unlock;
}
if (journal->j_barrier_count)
goto fail_unlock;
if (!journal->j_running_transaction && get_transaction(journal, 1) == 0)
goto fail_unlock;
transaction = journal->j_running_transaction;
if (transaction->t_state == T_LOCKED)
goto fail_unlock;
needed = transaction->t_outstanding_credits + nblocks;
/* We could run log_start_commit here */
if (needed > journal->j_max_transaction_buffers)
goto fail_unlock;
needed = journal->j_max_transaction_buffers;
if (journal->j_committing_transaction)
needed += journal->j_committing_transaction->
t_outstanding_credits;
if (log_space_left(journal) < needed)
goto fail_unlock;
handle->h_transaction = transaction;
transaction->t_outstanding_credits += nblocks;
transaction->t_updates++;
jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
handle, nblocks, transaction->t_outstanding_credits,
log_space_left(journal));
unlock_journal(journal);
return 0;
fail_unlock:
unlock_journal(journal);
if (ret >= 0)
ret = -1;
return ret;
}
/**
* handle_t *journal_try_start() - Don't block, but try and get a handle
* @journal: Journal to start transaction on.
* @nblocks: number of block buffer we might modify
*
* Try to start a handle, but non-blockingly. If we weren't able
* to, return an ERR_PTR value.
*/
handle_t *journal_try_start(journal_t *journal, int nblocks)
{
handle_t *handle = journal_current_handle();
int err;
if (!journal)
return ERR_PTR(-EROFS);
if (handle) {
jbd_debug(4, "h_ref %d -> %d\n",
handle->h_ref,
handle->h_ref + 1);
J_ASSERT(handle->h_transaction->t_journal == journal);
if (is_handle_aborted(handle))
return ERR_PTR(-EIO);
handle->h_ref++;
return handle;
} else {
jbd_debug(4, "no current transaction\n");
}
if (is_journal_aborted(journal))
return ERR_PTR(-EIO);
handle = new_handle(nblocks);
if (!handle)
return ERR_PTR(-ENOMEM);
current->journal_info = handle;
err = try_start_this_handle(journal, handle);
if (err < 0) {
kfree(handle);
current->journal_info = NULL;
return ERR_PTR(err);
}
return handle;
}
/** /**
* int journal_extend() - extend buffer credits. * int journal_extend() - extend buffer credits.
* @handle: handle to 'extend' * @handle: handle to 'extend'
...@@ -969,22 +862,23 @@ int journal_get_undo_access (handle_t *handle, struct buffer_head *bh) ...@@ -969,22 +862,23 @@ int journal_get_undo_access (handle_t *handle, struct buffer_head *bh)
} }
/** /**
* int journal_dirty_data() - mark a buffer as containing dirty data which needs to be flushed before we can commit the current transaction. * int journal_dirty_data() - mark a buffer as containing dirty data which
* needs to be flushed before we can commit the
* current transaction.
* @handle: transaction * @handle: transaction
* @bh: bufferhead to mark * @bh: bufferhead to mark
* *
* The buffer is placed on the transaction's data list and is marked as * The buffer is placed on the transaction's data list and is marked as
* belonging to the transaction. * belonging to the transaction.
* *
* Returns error number or 0 on success. * Returns error number or 0 on success.
*/ *
int journal_dirty_data (handle_t *handle, struct buffer_head *bh)
{
/*
* journal_dirty_data() can be called via page_launder->ext3_writepage * journal_dirty_data() can be called via page_launder->ext3_writepage
* by kswapd. So it cannot block. Happily, there's nothing here * by kswapd. So it cannot block. Happily, there's nothing here
* which needs lock_journal if `async' is set. * which needs lock_journal if `async' is set.
*/ */
int journal_dirty_data (handle_t *handle, struct buffer_head *bh)
{
journal_t *journal = handle->h_transaction->t_journal; journal_t *journal = handle->h_transaction->t_journal;
int need_brelse = 0; int need_brelse = 0;
struct journal_head *jh; struct journal_head *jh;
...@@ -1129,23 +1023,22 @@ int journal_dirty_data (handle_t *handle, struct buffer_head *bh) ...@@ -1129,23 +1023,22 @@ int journal_dirty_data (handle_t *handle, struct buffer_head *bh)
* @handle: transaction to add buffer to. * @handle: transaction to add buffer to.
* @bh: buffer to mark * @bh: buffer to mark
* *
* mark dirty metadata which needs to be journaled as part of the current transaction. * mark dirty metadata which needs to be journaled as part of the current
* transaction.
* *
* The buffer is placed on the transaction's metadata list and is marked * The buffer is placed on the transaction's metadata list and is marked
* as belonging to the transaction. * as belonging to the transaction.
* *
* Returns error number or 0 on success. * Returns error number or 0 on success.
*/ *
int journal_dirty_metadata (handle_t *handle, struct buffer_head *bh)
{
/*
* Special care needs to be taken if the buffer already belongs to the * Special care needs to be taken if the buffer already belongs to the
* current committing transaction (in which case we should have frozen * current committing transaction (in which case we should have frozen
* data present for that commit). In that case, we don't relink the * data present for that commit). In that case, we don't relink the
* buffer: that only gets done when the old transaction finally * buffer: that only gets done when the old transaction finally
* completes its commit. * completes its commit.
*
*/ */
int journal_dirty_metadata (handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction; transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal; journal_t *journal = transaction->t_journal;
struct journal_head *jh = bh2jh(bh); struct journal_head *jh = bh2jh(bh);
...@@ -1726,13 +1619,6 @@ static inline int __journal_try_to_free_buffer(struct buffer_head *bh) ...@@ -1726,13 +1619,6 @@ static inline int __journal_try_to_free_buffer(struct buffer_head *bh)
* to be called. We do this if the page is releasable by try_to_free_buffers(). * to be called. We do this if the page is releasable by try_to_free_buffers().
* We also do it if the page has locked or dirty buffers and the caller wants * We also do it if the page has locked or dirty buffers and the caller wants
* us to perform sync or async writeout. * us to perform sync or async writeout.
*/
int journal_try_to_free_buffers(journal_t *journal,
struct page *page, int unused_gfp_mask)
{
/*
* journal_try_to_free_buffers(). Try to remove all this page's buffers
* from the journal.
* *
* This complicates JBD locking somewhat. We aren't protected by the * This complicates JBD locking somewhat. We aren't protected by the
* BKL here. We wish to remove the buffer from its committing or * BKL here. We wish to remove the buffer from its committing or
...@@ -1752,6 +1638,9 @@ int journal_try_to_free_buffers(journal_t *journal, ...@@ -1752,6 +1638,9 @@ int journal_try_to_free_buffers(journal_t *journal,
* cannot happen because we never reallocate freed data as metadata * cannot happen because we never reallocate freed data as metadata
* while the data is part of a transaction. Yes? * while the data is part of a transaction. Yes?
*/ */
int journal_try_to_free_buffers(journal_t *journal,
struct page *page, int unused_gfp_mask)
{
struct buffer_head *head; struct buffer_head *head;
struct buffer_head *bh; struct buffer_head *bh;
int ret = 0; int ret = 0;
......
...@@ -210,14 +210,6 @@ static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks) ...@@ -210,14 +210,6 @@ static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
return journal_start(journal, nblocks); return journal_start(journal, nblocks);
} }
static inline handle_t *
ext3_journal_try_start(struct inode *inode, int nblocks)
{
if (inode->i_sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
return journal_try_start(EXT3_JOURNAL(inode), nblocks);
}
/* /*
* The only special thing we need to do here is to make sure that all * The only special thing we need to do here is to make sure that all
* journal_stop calls result in the superblock being marked dirty, so * journal_stop calls result in the superblock being marked dirty, so
......
...@@ -26,6 +26,7 @@ struct vm_area_struct *hugepage_vma(struct mm_struct *mm, ...@@ -26,6 +26,7 @@ struct vm_area_struct *hugepage_vma(struct mm_struct *mm,
unsigned long address); unsigned long address);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write); pmd_t *pmd, int write);
int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
int pmd_huge(pmd_t pmd); int pmd_huge(pmd_t pmd);
extern int htlbpage_max; extern int htlbpage_max;
...@@ -56,6 +57,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) ...@@ -56,6 +57,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
#define hugepage_vma(mm, addr) 0 #define hugepage_vma(mm, addr) 0
#define mark_mm_hugetlb(mm, vma) do { } while (0) #define mark_mm_hugetlb(mm, vma) do { } while (0)
#define follow_huge_pmd(mm, addr, pmd, write) 0 #define follow_huge_pmd(mm, addr, pmd, write) 0
#define is_aligned_hugepage_range(addr, len) 0
#define pmd_huge(x) 0 #define pmd_huge(x) 0
#ifndef HPAGE_MASK #ifndef HPAGE_MASK
......
...@@ -726,7 +726,6 @@ static inline handle_t *journal_current_handle(void) ...@@ -726,7 +726,6 @@ static inline handle_t *journal_current_handle(void)
*/ */
extern handle_t *journal_start(journal_t *, int nblocks); extern handle_t *journal_start(journal_t *, int nblocks);
extern handle_t *journal_try_start(journal_t *, int nblocks);
extern int journal_restart (handle_t *, int nblocks); extern int journal_restart (handle_t *, int nblocks);
extern int journal_extend (handle_t *, int nblocks); extern int journal_extend (handle_t *, int nblocks);
extern int journal_get_write_access (handle_t *, struct buffer_head *); extern int journal_get_write_access (handle_t *, struct buffer_head *);
......
...@@ -15,24 +15,14 @@ ...@@ -15,24 +15,14 @@
extern u64 jiffies_64; extern u64 jiffies_64;
extern unsigned long volatile jiffies; extern unsigned long volatile jiffies;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
#else
static inline u64 get_jiffies_64(void) static inline u64 get_jiffies_64(void)
{ {
#if BITS_PER_LONG < 64
extern seqlock_t xtime_lock;
unsigned long seq;
u64 tmp;
do {
seq = read_seqbegin(&xtime_lock);
tmp = jiffies_64;
} while (read_seqretry(&xtime_lock, seq));
return tmp;
#else
return (u64)jiffies; return (u64)jiffies;
#endif
} }
#endif
/* /*
* These inlines deal with timer wrapping correctly. You are * These inlines deal with timer wrapping correctly. You are
......
...@@ -509,6 +509,7 @@ extern void __set_special_pids(pid_t session, pid_t pgrp); ...@@ -509,6 +509,7 @@ extern void __set_special_pids(pid_t session, pid_t pgrp);
/* per-UID process charging. */ /* per-UID process charging. */
extern struct user_struct * alloc_uid(uid_t); extern struct user_struct * alloc_uid(uid_t);
extern void free_uid(struct user_struct *); extern void free_uid(struct user_struct *);
extern void switch_uid(struct user_struct *);
#include <asm/current.h> #include <asm/current.h>
......
...@@ -249,7 +249,7 @@ void reparent_to_init(void) ...@@ -249,7 +249,7 @@ void reparent_to_init(void)
/* signals? */ /* signals? */
security_task_reparent_to_init(current); security_task_reparent_to_init(current);
memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim))); memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim)));
current->user = INIT_USER; switch_uid(INIT_USER);
write_unlock_irq(&tasklist_lock); write_unlock_irq(&tasklist_lock);
} }
......
...@@ -121,15 +121,7 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[]) ...@@ -121,15 +121,7 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[])
if (curtask->files->fd[i]) close(i); if (curtask->files->fd[i]) close(i);
} }
/* Drop the "current user" thing */ switch_uid(INIT_USER);
{
struct user_struct *user = curtask->user;
curtask->user = INIT_USER;
atomic_inc(&INIT_USER->__count);
atomic_inc(&INIT_USER->processes);
atomic_dec(&user->processes);
free_uid(user);
}
/* Give kmod all effective privileges.. */ /* Give kmod all effective privileges.. */
curtask->euid = curtask->fsuid = 0; curtask->euid = curtask->fsuid = 0;
......
...@@ -490,6 +490,9 @@ EXPORT_SYMBOL(xtime); ...@@ -490,6 +490,9 @@ EXPORT_SYMBOL(xtime);
EXPORT_SYMBOL(xtime_lock); EXPORT_SYMBOL(xtime_lock);
EXPORT_SYMBOL(do_gettimeofday); EXPORT_SYMBOL(do_gettimeofday);
EXPORT_SYMBOL(do_settimeofday); EXPORT_SYMBOL(do_settimeofday);
#if (BITS_PER_LONG < 64)
EXPORT_SYMBOL(get_jiffies_64);
#endif
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
EXPORT_SYMBOL(__might_sleep); EXPORT_SYMBOL(__might_sleep);
#endif #endif
......
...@@ -561,19 +561,12 @@ asmlinkage long sys_setgid(gid_t gid) ...@@ -561,19 +561,12 @@ asmlinkage long sys_setgid(gid_t gid)
static int set_user(uid_t new_ruid, int dumpclear) static int set_user(uid_t new_ruid, int dumpclear)
{ {
struct user_struct *new_user, *old_user; struct user_struct *new_user;
/* What if a process setreuid()'s and this brings the
* new uid over his NPROC rlimit? We can check this now
* cheaply with the new uid cache, so if it matters
* we should be checking for it. -DaveM
*/
new_user = alloc_uid(new_ruid); new_user = alloc_uid(new_ruid);
if (!new_user) if (!new_user)
return -EAGAIN; return -EAGAIN;
old_user = current->user; switch_uid(new_user);
atomic_dec(&old_user->processes);
atomic_inc(&new_user->processes);
if(dumpclear) if(dumpclear)
{ {
...@@ -581,8 +574,6 @@ static int set_user(uid_t new_ruid, int dumpclear) ...@@ -581,8 +574,6 @@ static int set_user(uid_t new_ruid, int dumpclear)
wmb(); wmb();
} }
current->uid = new_ruid; current->uid = new_ruid;
current->user = new_user;
free_uid(old_user);
return 0; return 0;
} }
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
/* /*
...@@ -416,3 +415,17 @@ struct timespec current_kernel_time(void) ...@@ -416,3 +415,17 @@ struct timespec current_kernel_time(void)
return now; return now;
} }
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
{
unsigned long seq;
u64 ret;
do {
seq = read_seqbegin(&xtime_lock);
ret = jiffies_64;
} while (read_seqretry(&xtime_lock, seq));
return ret;
}
#endif
...@@ -116,6 +116,23 @@ struct user_struct * alloc_uid(uid_t uid) ...@@ -116,6 +116,23 @@ struct user_struct * alloc_uid(uid_t uid)
return up; return up;
} }
void switch_uid(struct user_struct *new_user)
{
struct user_struct *old_user;
/* What if a process setreuid()'s and this brings the
* new uid over his NPROC rlimit? We can check this now
* cheaply with the new uid cache, so if it matters
* we should be checking for it. -DaveM
*/
old_user = current->user;
atomic_inc(&new_user->__count);
atomic_inc(&new_user->processes);
atomic_dec(&old_user->processes);
current->user = new_user;
free_uid(old_user);
}
static int __init uid_cache_init(void) static int __init uid_cache_init(void)
{ {
......
...@@ -154,8 +154,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) ...@@ -154,8 +154,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
/* Increase the height. */ /* Increase the height. */
node->slots[0] = root->rnode; node->slots[0] = root->rnode;
if (root->rnode) node->count = 1;
node->count = 1;
root->rnode = node; root->rnode = node;
root->height++; root->height++;
} while (height > root->height); } while (height > root->height);
......
...@@ -801,6 +801,13 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, ...@@ -801,6 +801,13 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
return -ENOMEM; return -ENOMEM;
if (addr & ~PAGE_MASK) if (addr & ~PAGE_MASK)
return -EINVAL; return -EINVAL;
if (is_file_hugepages(file)) {
unsigned long ret;
ret = is_aligned_hugepage_range(addr, len);
if (ret)
return ret;
}
return addr; return addr;
} }
...@@ -1224,8 +1231,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ...@@ -1224,8 +1231,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* we have start < mpnt->vm_end */ /* we have start < mpnt->vm_end */
if (is_vm_hugetlb_page(mpnt)) { if (is_vm_hugetlb_page(mpnt)) {
if ((start & ~HPAGE_MASK) || (len & ~HPAGE_MASK)) int ret = is_aligned_hugepage_range(start, len);
return -EINVAL;
if (ret)
return ret;
} }
/* if it doesn't overlap, we have nothing.. */ /* if it doesn't overlap, we have nothing.. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment