Commit 4024c058 authored by Steve French's avatar Steve French

Merge bk://linux.bkbits.net/linux-2.5

into hostme.bitkeeper.com:/repos/c/cifs/linux-2.5cifs
parents 720ecfed 44cb023a
......@@ -187,6 +187,9 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
struct sigframe *frame;
sigset_t set;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 64-bit boundary,
* then 'sp' should be word aligned here. If it's
......@@ -232,6 +235,9 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
struct rt_sigframe *frame;
sigset_t set;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 64-bit boundary,
* then 'sp' should be word aligned here. If it's
......@@ -462,8 +468,6 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
if (syscall) {
switch (regs->ARM_r0) {
case -ERESTART_RESTARTBLOCK:
current_thread_info()->restart_block.fn =
do_no_restart_syscall;
case -ERESTARTNOHAND:
regs->ARM_r0 = -EINTR;
break;
......
......@@ -277,8 +277,6 @@ static void __init init_intel(struct cpuinfo_x86 *c)
extern int phys_proc_id[NR_CPUS];
u32 eax, ebx, ecx, edx;
int index_lsb, index_msb, tmp;
int initial_apic_id;
int cpu = smp_processor_id();
cpuid(1, &eax, &ebx, &ecx, &edx);
......@@ -287,8 +285,6 @@ static void __init init_intel(struct cpuinfo_x86 *c)
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1 ) {
index_lsb = 0;
index_msb = 31;
/*
* At this point we only support two siblings per
* processor package.
......@@ -299,20 +295,13 @@ static void __init init_intel(struct cpuinfo_x86 *c)
smp_num_siblings = 1;
goto too_many_siblings;
}
tmp = smp_num_siblings;
while ((tmp & 1) == 0) {
tmp >>=1 ;
index_lsb++;
}
tmp = smp_num_siblings;
while ((tmp & 0x80000000 ) == 0) {
tmp <<=1 ;
index_msb--;
}
if (index_lsb != index_msb )
index_msb++;
initial_apic_id = ebx >> 24 & 0xff;
phys_proc_id[cpu] = initial_apic_id >> index_msb;
/* cpuid returns the value latched in the HW at reset,
* not the APIC ID register's value. For any box
* whose BIOS changes APIC IDs, like clustered APIC
* systems, we must use hard_smp_processor_id.
* See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
*/
phys_proc_id[cpu] = hard_smp_processor_id() & ~(smp_num_siblings - 1);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
phys_proc_id[cpu]);
......
......@@ -361,15 +361,12 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
unsigned char *mpt=((unsigned char *)mpc)+count;
if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
printk("SMP mptable: bad signature [%c%c%c%c]!\n",
mpc->mpc_signature[0],
mpc->mpc_signature[1],
mpc->mpc_signature[2],
mpc->mpc_signature[3]);
printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
*(u32 *)mpc->mpc_signature);
return 0;
}
if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
printk("SMP mptable: checksum error!\n");
printk(KERN_ERR "SMP mptable: checksum error!\n");
return 0;
}
if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
......
......@@ -94,6 +94,7 @@ void do_gettimeofday(struct timeval *tv)
{
unsigned long seq;
unsigned long usec, sec;
unsigned long max_ntp_tick = tick_usec - tickadj;
do {
unsigned long lost;
......@@ -102,16 +103,20 @@ void do_gettimeofday(struct timeval *tv)
usec = cur_timer->get_offset();
lost = jiffies - wall_jiffies;
if (lost)
usec += lost * (1000000 / HZ);
/*
* If time_adjust is negative then NTP is slowing the clock
* so make sure not to go into next possible interval.
* Better to lose some accuracy than have time go backwards..
*/
if (unlikely(time_adjust < 0) && usec > tickadj)
usec = tickadj;
if (unlikely(time_adjust < 0)) {
usec = min(usec, max_ntp_tick);
if (lost)
usec += lost * max_ntp_tick;
}
else if (unlikely(lost))
usec += lost * tick_usec;
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
......
......@@ -139,7 +139,7 @@ unsigned long long sched_clock(void)
* synchronized across all CPUs.
*/
#ifndef CONFIG_NUMA
if (unlikely(!cpu_has_tsc))
if (!use_tsc)
#endif
return (unsigned long long)jiffies * (1000000000 / HZ);
......
......@@ -59,7 +59,7 @@ static struct page *alloc_fresh_huge_page(void)
return page;
}
void free_huge_page(struct page *page);
static void free_huge_page(struct page *page);
static struct page *alloc_hugetlb_page(void)
{
......@@ -275,7 +275,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
}
#endif
void free_huge_page(struct page *page)
static void free_huge_page(struct page *page)
{
BUG_ON(page_count(page));
BUG_ON(page->mapping);
......@@ -381,7 +381,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
return ret;
}
void update_and_free_page(struct page *page)
static void update_and_free_page(struct page *page)
{
int j;
struct page *map;
......@@ -399,7 +399,7 @@ void update_and_free_page(struct page *page)
__free_pages(page, HUGETLB_PAGE_ORDER);
}
int try_to_free_low(int count)
static int try_to_free_low(int count)
{
struct list_head *p;
struct page *page, *map;
......@@ -430,7 +430,7 @@ int try_to_free_low(int count)
return count;
}
int set_hugetlb_mem_size(int count)
static int set_hugetlb_mem_size(int count)
{
int lcount;
struct page *page;
......@@ -471,6 +471,8 @@ int set_hugetlb_mem_size(int count)
int hugetlb_sysctl_handler(ctl_table *table, int write,
struct file *file, void *buffer, size_t *length)
{
if (!cpu_has_pse)
return -ENODEV;
proc_dointvec(table, write, file, buffer, length);
htlbpage_max = set_hugetlb_mem_size(htlbpage_max);
return 0;
......@@ -489,6 +491,9 @@ static int __init hugetlb_init(void)
int i;
struct page *page;
if (!cpu_has_pse)
return -ENODEV;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&hugepage_freelists[i]);
......
......@@ -220,6 +220,9 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
sigset_t set;
stack_t st;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
if (verify_area(VERIFY_READ, uc, sizeof(*uc)))
goto badframe;
......@@ -354,8 +357,6 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
{
switch ((int)regs->result) {
case -ERESTART_RESTARTBLOCK:
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/* fallthrough */
case -ERESTARTNOHAND:
/* ERESTARTNOHAND means that the syscall should only be
* restarted if there was no handler for the signal, and since
......
......@@ -300,6 +300,9 @@ static void setup_frame32(struct pt_regs *regs, struct sigregs32 *frame,
struct sigcontext32 *sc = (struct sigcontext32 *)(u64)newsp;
int i;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
if (verify_area(VERIFY_WRITE, frame, sizeof(*frame)))
goto badframe;
if (regs->msr & MSR_FP)
......@@ -420,6 +423,9 @@ long sys32_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
int i;
mm_segment_t old_fs;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/* Adjust the inputted reg1 to point to the first rt signal frame */
rt_sf = (struct rt_sigframe_32 *)(regs->gpr[1] + __SIGNAL_FRAMESIZE32);
/* Copy the information from the user stack */
......
......@@ -284,6 +284,9 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
sigset_t set;
int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
if (current->thread.new_signal)
......@@ -1085,12 +1088,6 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
ka = &current->sighand->action[signr-1];
/* Always make any pending restarted system
* calls return -EINTR.
*/
current_thread_info()->restart_block.fn =
do_no_restart_syscall;
if (cookie.restart_syscall)
syscall_restart(cookie.orig_i0, regs, &ka->sa);
handle_signal(signr, ka, &info, oldset, regs, svr4_signal);
......
......@@ -382,6 +382,9 @@ void do_rt_sigreturn(struct pt_regs *regs)
stack_t st;
int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack ();
sf = (struct rt_signal_frame __user *)
(regs->u_regs [UREG_FP] + STACK_BIAS);
......@@ -627,12 +630,6 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs,
ka = &current->sighand->action[signr-1];
/* Always make any pending restarted system
* calls return -EINTR.
*/
current_thread_info()->restart_block.fn =
do_no_restart_syscall;
if (cookie.restart_syscall)
syscall_restart(orig_i0, regs, &ka->sa);
handle_signal(signr, ka, &info, oldset, regs);
......
......@@ -331,6 +331,9 @@ asmlinkage void do_sigreturn32(struct pt_regs *regs)
unsigned int seta[_COMPAT_NSIG_WORDS];
int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
if (test_thread_flag(TIF_NEWSIGNALS))
return do_new_sigreturn32(regs);
......@@ -398,6 +401,9 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
stack_t st;
int err, i;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
......@@ -1258,12 +1264,6 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs,
ka = &current->sighand->action[signr-1];
/* Always make any pending restarted system
* calls return -EINTR.
*/
current_thread_info()->restart_block.fn =
do_no_restart_syscall;
if (cookie.restart_syscall)
syscall_restart32(orig_i0, regs, &ka->sa);
handle_signal32(signr, ka, &info, oldset, regs, svr4_signal);
......
......@@ -65,8 +65,8 @@ sys_call_table32:
.word sys32_ipc, sys32_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex
/*220*/ .word compat_sys_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys_getpgid
.word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16
/*230*/ .word sys32_select, sys_time, sys_nis_syscall, sys_stime, sys_statfs64
.word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
/*230*/ .word sys32_select, sys_time, sys_nis_syscall, sys_stime, compat_statfs64
.word compat_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
.word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
/*250*/ .word sys32_mremap, sys32_sysctl, sys_getsid, sys_fdatasync, sys32_nfsservctl
......
......@@ -249,12 +249,17 @@ config BLK_DEV_IDESCSI
tristate "SCSI emulation support"
depends on SCSI
---help---
WARNING: ide-scsi is no longer needed for cd writing applications!
The 2.6 kernel supports direct writing to ide-cd, which eliminates
the need for ide-scsi + the entire scsi stack just for writing a
cd. The new method is more efficient in every way.
This will provide SCSI host adapter emulation for IDE ATAPI devices,
and will allow you to use a SCSI device driver instead of a native
ATAPI driver.
This is useful if you have an ATAPI device for which no native
driver has been written (for example, an ATAPI PD-CD or CDR drive);
driver has been written (for example, an ATAPI PD-CD drive);
you can then use this emulation together with an appropriate SCSI
device driver. In order to do this, say Y here and to "SCSI support"
and "SCSI generic support", below. You must then provide the kernel
......@@ -262,8 +267,7 @@ config BLK_DEV_IDESCSI
documentation of your boot loader (lilo or loadlin) about how to
pass options to the kernel at boot time) for devices if you want the
native EIDE sub-drivers to skip over the native support, so that
this SCSI emulation can be used instead. This is required for use of
CD-RW's.
this SCSI emulation can be used instead.
Note that this option does NOT allow you to attach SCSI devices to a
box that doesn't have a SCSI host adapter installed.
......
......@@ -320,14 +320,16 @@ int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr)
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(&vb->done, &wait);
while (vb->state == STATE_ACTIVE ||
vb->state == STATE_QUEUED) {
while (vb->state == STATE_ACTIVE || vb->state == STATE_QUEUED) {
if (non_blocking) {
retval = -EAGAIN;
break;
}
current->state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
schedule();
set_current_state(intr ? TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
if (vb->state == STATE_ACTIVE || vb->state == STATE_QUEUED)
schedule();
set_current_state(TASK_RUNNING);
if (intr && signal_pending(current)) {
dprintk(1,"buffer waiton: -EINTR\n");
retval = -EINTR;
......
......@@ -998,12 +998,12 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
set_bit_le(index, hash_table);
for (i = 0; i < 32; i++) {
*setup_frm++ = hash_table[i];
*setup_frm++ = hash_table[i];
}
setup_frm = &tp->setup_frame[13*6];
}
for (i = 0; i < 32; i++) {
*setup_frm++ = hash_table[i];
*setup_frm++ = hash_table[i];
}
setup_frm = &tp->setup_frame[13*6];
/* Fill the final entry with our physical address. */
eaddrs = (u16 *)dev->dev_addr;
......@@ -1103,11 +1103,13 @@ static void set_rx_mode(struct net_device *dev)
}
} else {
unsigned long flags;
u32 tx_flags = 0x08000000 | 192;
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */
if (dev->mc_count > 14) { /* Must use a multicast hash table. */
build_setup_frame_hash(tp->setup_frame, dev);
tx_flags = 0x08400000 | 192;
} else {
build_setup_frame_perfect(tp->setup_frame, dev);
}
......@@ -1117,7 +1119,6 @@ static void set_rx_mode(struct net_device *dev)
if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
/* Same setup recently queued, we need not add it. */
} else {
u32 tx_flags = 0x08000000 | 192;
unsigned int entry;
int dummy = -1;
......
......@@ -950,8 +950,14 @@ static int idescsi_attach(ide_drive_t *drive)
{
idescsi_scsi_t *idescsi;
struct Scsi_Host *host;
static int warned;
int err;
if (!warned && drive->media == ide_cdrom) {
printk(KERN_WARNING "ide-scsi is deprecated for cd burning! Use ide-cd and give dev=/dev/hdX as device\n");
warned = 1;
}
if (!strstr("ide-scsi", drive->driver_req) ||
!drive->present ||
drive->media == ide_disk ||
......
......@@ -899,7 +899,7 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
unsigned int retries;
struct scsi_lun *lunp, *lun_data;
struct scsi_request *sreq;
char *data;
u8 *data;
/*
* Only support SCSI-3 and up devices.
......@@ -990,7 +990,7 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
/*
* Get the length from the first four bytes of lun_data.
*/
data = (char *) lun_data->scsi_lun;
data = (u8 *) lun_data->scsi_lun;
length = ((data[0] << 24) | (data[1] << 16) |
(data[2] << 8) | (data[3] << 0));
......
......@@ -6,7 +6,7 @@ menu "Console display driver support"
config VGA_CONSOLE
bool "VGA text console" if EMBEDDED || !X86
depends on !ARCH_ACORN && !ARCH_EBSA110 || !4xx && !8xx
depends on !ARCH_ACORN && !ARCH_EBSA110 && !4xx && !8xx && !SPARC32 && !SPARC64
default y
help
Saying Y here will allow you to use Linux in text mode through a
......
config BINFMT_ELF
tristate "Kernel support for ELF binaries"
bool "Kernel support for ELF binaries"
depends on MMU
default y
---help---
......
......@@ -169,7 +169,6 @@ asmlinkage long compat_sys_fstatfs(unsigned int fd, struct compat_statfs *buf)
static int put_compat_statfs64(struct compat_statfs64 *ubuf, struct kstatfs *kbuf)
{
if (sizeof ubuf->f_blocks == 4) {
if ((kbuf->f_blocks | kbuf->f_bfree |
kbuf->f_bavail | kbuf->f_files | kbuf->f_ffree) &
......@@ -192,11 +191,14 @@ static int put_compat_statfs64(struct compat_statfs64 *ubuf, struct kstatfs *kbu
return 0;
}
asmlinkage long compat_statfs64(const char *path, struct compat_statfs64 *buf)
asmlinkage long compat_statfs64(const char *path, compat_size_t sz, struct compat_statfs64 *buf)
{
struct nameidata nd;
int error;
if (sz != sizeof(*buf))
return -EINVAL;
error = user_path_walk(path, &nd);
if (!error) {
struct kstatfs tmp;
......@@ -208,12 +210,15 @@ asmlinkage long compat_statfs64(const char *path, struct compat_statfs64 *buf)
return error;
}
asmlinkage long compat_fstatfs64(unsigned int fd, struct compat_statfs64 *buf)
asmlinkage long compat_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 *buf)
{
struct file * file;
struct kstatfs tmp;
int error;
if (sz != sizeof(*buf))
return -EINVAL;
error = -EBADF;
file = fget(fd);
if (!file)
......
......@@ -331,7 +331,7 @@ int ext2_new_block(struct inode *inode, unsigned long goal,
struct ext2_group_desc *desc;
int group_no; /* i */
int ret_block; /* j */
int bit; /* k */
int group_idx; /* k */
int target_block; /* tmp */
int block = 0;
struct super_block *sb = inode->i_sb;
......@@ -340,6 +340,7 @@ int ext2_new_block(struct inode *inode, unsigned long goal,
unsigned group_size = EXT2_BLOCKS_PER_GROUP(sb);
unsigned prealloc_goal = es->s_prealloc_blocks;
unsigned group_alloc = 0, es_alloc, dq_alloc;
int nr_scanned_groups;
if (!prealloc_goal--)
prealloc_goal = EXT2_DEFAULT_PREALLOC_BLOCKS - 1;
......@@ -402,9 +403,10 @@ int ext2_new_block(struct inode *inode, unsigned long goal,
* Now search the rest of the groups. We assume that
* i and desc correctly point to the last group visited.
*/
nr_scanned_groups = 0;
retry:
for (bit = 0; !group_alloc &&
bit < sbi->s_groups_count; bit++) {
for (group_idx = 0; !group_alloc &&
group_idx < sbi->s_groups_count; group_idx++) {
group_no++;
if (group_no >= sbi->s_groups_count)
group_no = 0;
......@@ -426,10 +428,21 @@ int ext2_new_block(struct inode *inode, unsigned long goal,
ret_block = grab_block(sb_bgl_lock(sbi, group_no), bitmap_bh->b_data,
group_size, 0);
if (ret_block < 0) {
/*
* If a free block counter is corrupted we can loop inifintely.
* Detect that here.
*/
nr_scanned_groups++;
if (nr_scanned_groups > 2 * sbi->s_groups_count) {
ext2_error(sb, "ext2_new_block",
"corrupted free blocks counters");
goto io_error;
}
/*
* Someone else grabbed the last free block in this blockgroup
* before us. Retry the scan.
*/
group_release_blocks(sb, group_no, desc, gdp_bh, group_alloc);
group_alloc = 0;
goto retry;
}
......
......@@ -63,41 +63,6 @@ read_inode_bitmap(struct super_block * sb, unsigned long block_group)
return bh;
}
/*
* Speculatively reserve an inode in a blockgroup which used to have some
* spare ones. Later, when we come to actually claim the inode in the bitmap
* it may be that it was taken. In that case the allocator will undo this
* reservation and try again.
*
* The inode allocator does not physically alter the superblock. But we still
* set sb->s_dirt, because the superblock was "logically" altered - we need to
* go and add up the free inodes counts again and flush out the superblock.
*/
static void ext2_reserve_inode(struct super_block *sb, int group, int dir)
{
struct ext2_group_desc * desc;
struct buffer_head *bh;
desc = ext2_get_group_desc(sb, group, &bh);
if (!desc) {
ext2_error(sb, "ext2_reserve_inode",
"can't get descriptor for group %d", group);
return;
}
spin_lock(sb_bgl_lock(EXT2_SB(sb), group));
desc->bg_free_inodes_count =
cpu_to_le16(le16_to_cpu(desc->bg_free_inodes_count) - 1);
if (dir)
desc->bg_used_dirs_count =
cpu_to_le16(le16_to_cpu(desc->bg_used_dirs_count) + 1);
spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
if (dir)
percpu_counter_inc(&EXT2_SB(sb)->s_dirs_counter);
sb->s_dirt = 1;
mark_buffer_dirty(bh);
}
static void ext2_release_inode(struct super_block *sb, int group, int dir)
{
struct ext2_group_desc * desc;
......@@ -273,8 +238,6 @@ static int find_group_dir(struct super_block *sb, struct inode *parent)
if (!best_desc)
return -1;
ext2_reserve_inode(sb, best_group, 1);
return best_group;
}
......@@ -419,7 +382,6 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
return -1;
found:
ext2_reserve_inode(sb, group, 1);
return group;
}
......@@ -481,8 +443,6 @@ static int find_group_other(struct super_block *sb, struct inode *parent)
return -1;
found:
ext2_reserve_inode(sb, group, 0);
return group;
}
......@@ -508,7 +468,6 @@ struct inode *ext2_new_inode(struct inode *dir, int mode)
ei = EXT2_I(inode);
sbi = EXT2_SB(sb);
es = sbi->s_es;
repeat:
if (S_ISDIR(mode)) {
if (test_opt(sb, OLDALLOC))
group = find_group_dir(sb, dir);
......@@ -528,12 +487,14 @@ struct inode *ext2_new_inode(struct inode *dir, int mode)
bitmap_bh = read_inode_bitmap(sb, group);
if (!bitmap_bh) {
err = -EIO;
goto fail2;
goto fail;
}
ino = 0;
i = ext2_find_first_zero_bit((unsigned long *)bitmap_bh->b_data,
EXT2_INODES_PER_GROUP(sb));
if (i >= EXT2_INODES_PER_GROUP(sb)) {
repeat_in_this_group:
ino = ext2_find_next_zero_bit((unsigned long *)bitmap_bh->b_data,
EXT2_INODES_PER_GROUP(sb), ino);
if (ino >= EXT2_INODES_PER_GROUP(sb)) {
/*
* Rare race: find_group_xx() decided that there were
* free inodes in this group, but by the time we tried
......@@ -547,11 +508,16 @@ struct inode *ext2_new_inode(struct inode *dir, int mode)
continue;
}
if (ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group),
i, bitmap_bh->b_data)) {
brelse(bitmap_bh);
bitmap_bh = NULL;
ext2_release_inode(sb, group, S_ISDIR(mode));
goto repeat;
ino, bitmap_bh->b_data)) {
/* we lost this inode */
if (++ino >= EXT2_INODES_PER_GROUP(sb)) {
/* this group is exhausted, try next group */
if (++group == sbi->s_groups_count)
group = 0;
continue;
}
/* try to find free inode in the same group */
goto repeat_in_this_group;
}
goto got;
}
......@@ -560,29 +526,35 @@ struct inode *ext2_new_inode(struct inode *dir, int mode)
* Scanned all blockgroups.
*/
err = -ENOSPC;
goto fail2;
goto fail;
got:
mark_buffer_dirty(bitmap_bh);
if (sb->s_flags & MS_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
brelse(bitmap_bh);
ino = group * EXT2_INODES_PER_GROUP(sb) + i + 1;
ino += group * EXT2_INODES_PER_GROUP(sb) + 1;
if (ino < EXT2_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
ext2_error (sb, "ext2_new_inode",
"reserved inode or inode > inodes count - "
"block_group = %d,inode=%lu", group,
(unsigned long) ino);
err = -EIO;
goto fail2;
goto fail;
}
percpu_counter_mod(&EXT2_SB(sb)->s_freeinodes_counter, -1);
if (S_ISDIR(mode))
percpu_counter_inc(&EXT2_SB(sb)->s_dirs_counter);
spin_lock(sb_bgl_lock(EXT2_SB(sb), group));
gdp->bg_free_inodes_count =
cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
if (S_ISDIR(mode)) {
if (EXT2_SB(sb)->s_debts[group] < 255)
EXT2_SB(sb)->s_debts[group]++;
gdp->bg_used_dirs_count =
cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
} else {
if (EXT2_SB(sb)->s_debts[group])
EXT2_SB(sb)->s_debts[group]--;
......@@ -590,6 +562,7 @@ struct inode *ext2_new_inode(struct inode *dir, int mode)
spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
sb->s_dirt = 1;
mark_buffer_dirty(bh2);
inode->i_uid = current->fsuid;
if (test_opt (sb, GRPID))
inode->i_gid = dir->i_gid;
......@@ -632,26 +605,24 @@ struct inode *ext2_new_inode(struct inode *dir, int mode)
if (DQUOT_ALLOC_INODE(inode)) {
DQUOT_DROP(inode);
err = -ENOSPC;
goto fail3;
goto fail2;
}
err = ext2_init_acl(inode, dir);
if (err) {
DQUOT_FREE_INODE(inode);
goto fail3;
goto fail2;
}
mark_inode_dirty(inode);
ext2_debug("allocating inode %lu\n", inode->i_ino);
ext2_preread_inode(inode);
return inode;
fail3:
fail2:
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
iput(inode);
return ERR_PTR(err);
fail2:
ext2_release_inode(sb, group, S_ISDIR(mode));
fail:
make_bad_inode(inode);
iput(inode);
......
......@@ -617,9 +617,11 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
goto cleanup;
memcpy(header, HDR(bh), bh->b_size);
header->h_refcount = cpu_to_le32(1);
offset = (char *)header - bh->b_data;
here = ENTRY((char *)here + offset);
last = ENTRY((char *)last + offset);
offset = (char *)here - bh->b_data;
here = ENTRY((char *)header + offset);
offset = (char *)last - bh->b_data;
last = ENTRY((char *)header + offset);
}
} else {
/* Allocate a buffer where we construct the new block. */
......
......@@ -469,8 +469,11 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
if (!bitmap_bh)
goto fail;
ino = ext3_find_first_zero_bit((unsigned long *)
bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb));
ino = 0;
repeat_in_this_group:
ino = ext3_find_next_zero_bit((unsigned long *)
bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb), ino);
if (ino < EXT3_INODES_PER_GROUP(sb)) {
int credits = 0;
......@@ -493,6 +496,9 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
}
/* we lost it */
journal_release_buffer(handle, bitmap_bh, credits);
if (++ino < EXT3_INODES_PER_GROUP(sb))
goto repeat_in_this_group;
}
/*
......
......@@ -629,9 +629,10 @@ bad_block: ext3_error(sb, "ext3_xattr_set",
goto cleanup;
memcpy(header, HDR(bh), bh->b_size);
header->h_refcount = cpu_to_le32(1);
offset = (char *)header - bh->b_data;
here = ENTRY((char *)here + offset);
last = ENTRY((char *)last + offset);
offset = (char *)here - bh->b_data;
here = ENTRY((char *)header + offset);
offset = (char *)last - bh->b_data;
last = ENTRY((char *)header + offset);
}
} else {
/* Allocate a buffer where we construct the new block. */
......
......@@ -188,7 +188,7 @@ nlmclnt_recovery(struct nlm_host *host, u32 newstate)
nlmclnt_prepare_reclaim(host, newstate);
nlm_get_host(host);
__module_get(THIS_MODULE);
if (kernel_thread(reclaimer, host, CLONE_KERNEL))
if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0)
module_put(THIS_MODULE);
}
}
......
......@@ -942,6 +942,7 @@ static int read_super_block (struct super_block * s, int offset)
{
struct buffer_head * bh;
struct reiserfs_super_block * rs;
int fs_blocksize;
bh = sb_bread (s, offset / s->s_blocksize);
......@@ -961,8 +962,9 @@ static int read_super_block (struct super_block * s, int offset)
//
// ok, reiserfs signature (old or new) found in at the given offset
//
sb_set_blocksize (s, sb_blocksize(rs));
fs_blocksize = sb_blocksize(rs);
brelse (bh);
sb_set_blocksize (s, fs_blocksize);
bh = sb_bread (s, offset / s->s_blocksize);
if (!bh) {
......
......@@ -239,9 +239,9 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long* addr)
static int test_bit(int nr, const volatile void * addr);
#endif
static __inline__ int constant_test_bit(int nr, const volatile unsigned long * addr)
static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
}
static __inline__ int variable_test_bit(int nr, const volatile unsigned long * addr)
......
......@@ -9,6 +9,7 @@
# define __kernel
#endif
#ifndef __ASSEMBLY__
#if __GNUC__ > 3
# include <linux/compiler-gcc+.h> /* catch-all for GCC 4, 5, etc. */
#elif __GNUC__ == 3
......@@ -18,6 +19,7 @@
#else
# error Sorry, your compiler is too old/not recognized.
#endif
#endif
/* Intel compiler defines __GNUC__. So we will overwrite implementations
* coming from above header files here
......
......@@ -2,6 +2,7 @@
#define _LINUX_INIT_H
#include <linux/config.h>
#include <linux/compiler.h>
/* These macros are used to mark some functions or
* initialized data (doesn't apply to uninitialized data)
......
......@@ -244,9 +244,17 @@ static int find_resource(struct resource *root, struct resource *new,
struct resource *this = root->child;
new->start = root->start;
/*
* Skip past an allocated resource that starts at 0, since the assignment
* of this->start - 1 to new->end below would cause an underflow.
*/
if (this && this->start == 0) {
new->start = this->end + 1;
this = this->sibling;
}
for(;;) {
if (this)
new->end = this->start;
new->end = this->start - 1;
else
new->end = root->end;
if (new->start < min)
......
......@@ -9,7 +9,6 @@ lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_SMP) += percpu_counter.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
......
#include <linux/module.h>
#include <linux/percpu_counter.h>
#include <linux/sched.h>
void percpu_counter_mod(struct percpu_counter *fbc, long amount)
{
int cpu = get_cpu();
long count = fbc->counters[cpu].count;
count += amount;
if (count >= FBC_BATCH || count <= -FBC_BATCH) {
spin_lock(&fbc->lock);
fbc->count += count;
spin_unlock(&fbc->lock);
count = 0;
}
fbc->counters[cpu].count = count;
put_cpu();
}
EXPORT_SYMBOL(percpu_counter_mod);
......@@ -48,24 +48,8 @@ static unsigned long __init init_bootmem_core (pg_data_t *pgdat,
bootmem_data_t *bdata = pgdat->bdata;
unsigned long mapsize = ((end - start)+7)/8;
/*
* sort pgdat_list so that the lowest one comes first,
* which makes alloc_bootmem_low_pages work as desired.
*/
if (!pgdat_list || pgdat_list->node_start_pfn > pgdat->node_start_pfn) {
pgdat->pgdat_next = pgdat_list;
pgdat_list = pgdat;
} else {
pg_data_t *tmp = pgdat_list;
while (tmp->pgdat_next) {
if (tmp->pgdat_next->node_start_pfn > pgdat->node_start_pfn)
break;
tmp = tmp->pgdat_next;
}
pgdat->pgdat_next = tmp->pgdat_next;
tmp->pgdat_next = pgdat;
}
pgdat->pgdat_next = pgdat_list;
pgdat_list = pgdat;
mapsize = (mapsize + (sizeof(long) - 1UL)) & ~(sizeof(long) - 1UL);
bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
......
......@@ -14,6 +14,7 @@
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/mman.h>
......@@ -23,6 +24,8 @@
#include <linux/module.h>
#include <linux/mm_inline.h>
#include <linux/buffer_head.h> /* for try_to_release_page() */
#include <linux/module.h>
#include <linux/percpu_counter.h>
#include <linux/percpu.h>
/* How many pages do we try to swap or page in/out together? */
......@@ -380,6 +383,24 @@ void vm_acct_memory(long pages)
EXPORT_SYMBOL(vm_acct_memory);
#endif
#ifdef CONFIG_SMP
void percpu_counter_mod(struct percpu_counter *fbc, long amount)
{
int cpu = get_cpu();
long count = fbc->counters[cpu].count;
count += amount;
if (count >= FBC_BATCH || count <= -FBC_BATCH) {
spin_lock(&fbc->lock);
fbc->count += count;
spin_unlock(&fbc->lock);
count = 0;
}
fbc->counters[cpu].count = count;
put_cpu();
}
EXPORT_SYMBOL(percpu_counter_mod);
#endif
/*
* Perform any setup for the swap system
......
......@@ -1399,9 +1399,6 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
rt->rt_gateway = FIB_RES_GW(*res);
memcpy(rt->u.dst.metrics, fi->fib_metrics,
sizeof(rt->u.dst.metrics));
if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
rt->u.dst.metrics[RTAX_HOPLIMIT-1] =
sysctl_ip_default_ttl;
if (fi->fib_mtu == 0) {
rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
if (rt->u.dst.metrics[RTAX_LOCK-1] & (1 << RTAX_MTU) &&
......@@ -1415,6 +1412,8 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
} else
rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
if (rt->u.dst.metrics[RTAX_MTU-1] > IP_MAX_MTU)
rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0)
......
......@@ -664,7 +664,7 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
goto done;
spin_unlock_bh(&mc->mca_lock);
if (dev->flags&IFF_UP)
if (!mc->idev->dead)
igmp6_leave_group(mc);
spin_lock_bh(&mc->mca_lock);
......@@ -1040,7 +1040,7 @@ int igmp6_event_query(struct sk_buff *skb)
/* MLDv1 router present */
/* Translate milliseconds to jiffies */
max_delay = ntohs(hdr->icmp6_maxdelay)*(HZ/10);
max_delay = (ntohs(hdr->icmp6_maxdelay)*HZ)/1000;
switchback = (idev->mc_qrv + 1) * max_delay;
idev->mc_v1_seen = jiffies + switchback;
......@@ -1052,7 +1052,7 @@ int igmp6_event_query(struct sk_buff *skb)
/* clear deleted report items */
mld_clear_delrec(idev);
} else if (len >= 28) {
max_delay = MLDV2_MRC(ntohs(mlh2->mrc))*(HZ/10);
max_delay = (MLDV2_MRC(ntohs(mlh2->mrc))*HZ)/1000;
if (!max_delay)
max_delay = 1;
idev->mc_maxdelay = max_delay;
......
......@@ -1558,13 +1558,13 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
static int rt6_dump_route(struct rt6_info *rt, void *p_arg)
{
struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
struct rtmsg *rtm;
int prefix;
rtm = NLMSG_DATA(arg->cb->nlh);
if (rtm)
if (arg->cb->nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(struct rtmsg))) {
struct rtmsg *rtm = NLMSG_DATA(arg->cb->nlh);
prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
else prefix = 0;
} else
prefix = 0;
return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
......
......@@ -551,6 +551,7 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
if (!ipv6_addr_any(&np->rcv_saddr)) {
if (!ipv6_addr_cmp(&np->rcv_saddr, loc_addr))
return s;
continue;
}
if(!inet6_mc_check(s, loc_addr, rmt_addr))
continue;
......
......@@ -1670,6 +1670,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch_tree_lock(sch);
*old = cl->q;
cl->q = new;
sch->q.qlen -= (*old)->q.qlen;
qdisc_reset(*old);
sch_tree_unlock(sch);
......
......@@ -77,6 +77,7 @@ static int dsmark_graft(struct Qdisc *sch,unsigned long arg,
*old = xchg(&p->q,new);
if (*old)
qdisc_reset(*old);
sch->q.qlen = 0;
sch_tree_unlock(sch); /* @@@ move up ? */
return 0;
}
......
......@@ -47,7 +47,7 @@ bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct fifo_sched_data *q = (struct fifo_sched_data *)sch->data;
if (sch->stats.backlog <= q->limit) {
if (sch->stats.backlog + skb->len <= q->limit) {
__skb_queue_tail(&sch->q, skb);
sch->stats.backlog += skb->len;
sch->stats.bytes += skb->len;
......@@ -108,7 +108,7 @@ pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct fifo_sched_data *q = (struct fifo_sched_data *)sch->data;
if (sch->q.qlen <= q->limit) {
if (sch->q.qlen < q->limit) {
__skb_queue_tail(&sch->q, skb);
sch->stats.bytes += skb->len;
sch->stats.packets++;
......
......@@ -275,7 +275,7 @@ pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
list = ((struct sk_buff_head*)qdisc->data) +
prio2band[skb->priority&TC_PRIO_MAX];
if (list->qlen <= qdisc->dev->tx_queue_len) {
if (list->qlen < qdisc->dev->tx_queue_len) {
__skb_queue_tail(list, skb);
qdisc->q.qlen++;
qdisc->stats.bytes += skb->len;
......
......@@ -110,7 +110,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
unsigned long qave=0;
int i=0;
if (!t->initd && skb_queue_len(&sch->q) <= sch->dev->tx_queue_len) {
if (!t->initd && skb_queue_len(&sch->q) < sch->dev->tx_queue_len) {
D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
goto do_enqueue;
}
......@@ -175,7 +175,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if ((q->qave+qave) < q->qth_min) {
q->qcount = -1;
enqueue:
if (q->backlog <= q->limit) {
if (q->backlog + skb->len <= q->limit) {
q->backlog += skb->len;
do_enqueue:
__skb_queue_tail(&sch->q, skb);
......
......@@ -266,6 +266,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch_tree_lock(sch);
*old = q->queues[band];
q->queues[band] = new;
sch->q.qlen -= (*old)->q.qlen;
qdisc_reset(*old);
sch_tree_unlock(sch);
......
......@@ -257,7 +257,7 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (q->qave < q->qth_min) {
q->qcount = -1;
enqueue:
if (sch->stats.backlog <= q->limit) {
if (sch->stats.backlog + skb->len <= q->limit) {
__skb_queue_tail(&sch->q, skb);
sch->stats.backlog += skb->len;
sch->stats.bytes += skb->len;
......
......@@ -268,7 +268,7 @@ static void tbf_reset(struct Qdisc* sch)
struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
qdisc_reset(q->qdisc);
skb_queue_purge(&sch->q);
sch->q.qlen = 0;
sch->stats.backlog = 0;
PSCHED_GET_TIME(q->t_c);
q->tokens = q->buffer;
......@@ -455,6 +455,8 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch_tree_lock(sch);
*old = xchg(&q->qdisc, new);
qdisc_reset(*old);
sch->q.qlen = 0;
sch->stats.backlog = 0;
sch_tree_unlock(sch);
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment