Commit 27706c90 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.42pre2

parent 8ed6b77e
......@@ -58,24 +58,23 @@ struct mmap_arg_struct {
asmlinkage int old_mmap(struct mmap_arg_struct *arg)
{
int error = -EFAULT;
struct file * file = NULL;
struct mmap_arg_struct a;
lock_kernel();
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
goto out;
if (!(a.flags & MAP_ANONYMOUS)) {
error = -EBADF;
if (a.fd >= NR_OPEN || !(file = current->files->fd[a.fd]))
return -EBADF;
goto out;
}
a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
{
unsigned long retval;
struct semaphore *sem = &current->mm->mmap_sem;
down(sem);
retval = do_mmap(file, a.addr, a.len, a.prot, a.flags, a.offset);
up(sem);
return retval;
}
error = do_mmap(file, a.addr, a.len, a.prot, a.flags, a.offset);
out:
unlock_kernel();
return error;
}
extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
......
......@@ -83,10 +83,6 @@ int __verify_write(const void * addr, unsigned long size)
* bit 0 == 0 means no page found, 1 means protection fault
* bit 1 == 0 means read, 1 means write
* bit 2 == 0 means kernel, 1 means user-mode
*
* NOTE! This all needs to be SMP-safe. Happily, we're only really touching
* per-thread data that we can know is valid (except for the "mm" structure
* that is shared - which is protected by the mm->mmap_sem semaphore).
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
......@@ -98,6 +94,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
unsigned long fixup;
int write;
lock_kernel();
/* get the address */
__asm__("movl %%cr2,%0":"=r" (address));
down(&mm->mmap_sem);
......@@ -154,7 +152,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (bit < 32)
tsk->tss.screen_bitmap |= 1 << bit;
}
return;
goto out;
/*
* Something tried to access memory that isn't in our memory map..
......@@ -170,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
regs->eip,
fixup);
regs->eip = fixup;
return;
goto out;
}
if (error_code & 4) {
......@@ -178,7 +176,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
tsk->tss.error_code = error_code;
tsk->tss.trap_no = 14;
force_sig(SIGSEGV, tsk);
return;
goto out;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
......@@ -190,7 +188,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
wp_works_ok = 1;
pg0[0] = pte_val(mk_pte(0, PAGE_SHARED));
flush_tlb();
return;
goto out;
}
if (address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
......@@ -211,4 +209,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
}
die_if_kernel("Oops", regs, error_code);
do_exit(SIGKILL);
out:
unlock_kernel();
}
This diff is collapsed.
/* $Id: advansys.h,v 1.5 1997/01/19 23:07:10 davem Exp $ */
/* $Id: advansys.h,v 1997/05/28 00:23:06 bobf Exp bobf $ */
/*
* advansys.h - Linux Host Driver for AdvanSys SCSI Adapters
*
* Copyright (c) 1995-1996 Advanced System Products, Inc.
*
* Copyright (c) 1995-1997 Advanced System Products, Inc.
* All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that redistributions of source
* code retain the above copyright notice and this comment without
* modification.
*
* The latest version of this driver is available at the AdvanSys
* FTP and BBS sites listed below.
* There is an AdvanSys Linux WWW page at:
* http://www.advansys.com/linux.html
*
* The latest version of the AdvanSys driver is available at:
* ftp://ftp.advansys.com/pub/linux
*
* Please send questions, comments, and bug reports to:
* Please send questions, comments, bug reports to:
* bobf@advansys.com (Bob Frey)
*/
......@@ -20,7 +25,7 @@
#define _ADVANSYS_H
/* Convert Linux Version, Patch-level, Sub-level to LINUX_VERSION_CODE. */
#define ASC_LINUX_VERSION(V, P, S) (((V) * 65536) + ((P) * 256) + (S))
#define ASC_LINUX_VERSION(V, P, S) (((V) * 65536) + ((P) * 256) + (S))
#ifndef LINUX_VERSION_CODE
#include <linux/version.h>
......@@ -56,84 +61,86 @@ void advansys_setup(char *, int *);
*/
#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
#define ADVANSYS { \
NULL, /* struct SHT *next */ \
NULL, /* struct module *module */ \
"advansys", /* char *name */ \
advansys_detect, /* int (*detect)(struct SHT *) */ \
advansys_release, /* int (*release)(struct Scsi_Host *) */ \
advansys_info, /* const char *(*info)(struct Scsi_Host *) */ \
advansys_command, /* int (*command)(Scsi_Cmnd *) */ \
advansys_queuecommand, \
/* int (*queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)) */ \
advansys_abort, /* int (*abort)(Scsi_Cmnd *) */ \
advansys_reset, /* int (*reset)(Scsi_Cmnd *) */ \
NULL, /* int (*slave_attach)(int, int) */ \
advansys_biosparam, /* int (* bios_param)(Disk *, int, int []) */ \
/* \
* The following fields are set per adapter in advansys_detect(). \
*/ \
0, /* int can_queue */ \
0, /* int this_id */ \
0, /* short unsigned int sg_tablesize */ \
0, /* short cmd_per_lun */ \
0, /* unsigned char present */ \
/* \
* Because the driver may control an ISA adapter 'unchecked_isa_dma' \
* must be set. The flag will be cleared in advansys_detect for non-ISA \
* adapters. Refer to the comment in scsi_module.c for more information. \
*/ \
1, /* unsigned unchecked_isa_dma:1 */ \
/* \
* All adapters controlled by this driver are capable of large \
* scatter-gather lists. According to the mid-level SCSI documentation \
* this obviates any performance gain provided by setting \
* 'use_clustering'. But empirically while CPU utilization is increased \
* by enabling clustering, I/O throughput increases as well. \
*/ \
ENABLE_CLUSTERING, /* unsigned use_clustering:1 */ \
NULL, /* struct SHT *next */ \
NULL, /* int *usage_count */ \
"advansys", /* char *name */ \
advansys_detect, /* int (*detect)(struct SHT *) */ \
advansys_release, /* int (*release)(struct Scsi_Host *) */ \
advansys_info, /* const char *(*info)(struct Scsi_Host *) */ \
advansys_command, /* int (*command)(Scsi_Cmnd *) */ \
advansys_queuecommand, \
/* int (*queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)) */ \
advansys_abort, /* int (*abort)(Scsi_Cmnd *) */ \
advansys_reset, /* int (*reset)(Scsi_Cmnd *) */ \
NULL, /* int (*slave_attach)(int, int) */ \
advansys_biosparam, /* int (* bios_param)(Disk *, int, int []) */ \
/* \
* The following fields are set per adapter in advansys_detect(). \
*/ \
0, /* int can_queue */ \
0, /* int this_id */ \
0, /* short unsigned int sg_tablesize */ \
0, /* short cmd_per_lun */ \
0, /* unsigned char present */ \
/* \
* Because the driver may control an ISA adapter 'unchecked_isa_dma' \
* must be set. The flag will be cleared in advansys_detect for non-ISA \
* adapters. Refer to the comment in scsi_module.c for more information. \
*/ \
1, /* unsigned unchecked_isa_dma:1 */ \
/* \
* All adapters controlled by this driver are capable of large \
* scatter-gather lists. According to the mid-level SCSI documentation \
* this obviates any performance gain provided by setting \
* 'use_clustering'. But empirically while CPU utilization is increased \
* by enabling clustering, I/O throughput increases as well. \
*/ \
ENABLE_CLUSTERING, /* unsigned use_clustering:1 */ \
}
#else /* version >= v1.3.0 */
#define ADVANSYS { \
NULL, /* struct SHT *next */ \
NULL, /* struct module *module */ \
&proc_scsi_advansys, /* struct proc_dir_entry *proc_dir */ \
advansys_proc_info, \
/* int (*proc_info)(char *, char **, off_t, int, int, int) */ \
"advansys", /* const char *name */ \
advansys_detect, /* int (*detect)(struct SHT *) */ \
advansys_release, /* int (*release)(struct Scsi_Host *) */ \
advansys_info, /* const char *(*info)(struct Scsi_Host *) */ \
advansys_command, /* int (*command)(Scsi_Cmnd *) */ \
advansys_queuecommand, \
/* int (*queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)) */ \
advansys_abort, /* int (*abort)(Scsi_Cmnd *) */ \
advansys_reset, \
/* version < v1.3.89 int (*reset)(Scsi_Cmnd *) */ \
/* version >= v1.3.89 int (*reset)(Scsi_Cmnd *, unsigned int) */ \
NULL, /* int (*slave_attach)(int, int) */ \
advansys_biosparam, /* int (* bios_param)(Disk *, kdev_t, int []) */ \
/* \
* The following fields are set per adapter in advansys_detect(). \
*/ \
0, /* int can_queue */ \
0, /* int this_id */ \
0, /* short unsigned int sg_tablesize */ \
0, /* short cmd_per_lun */ \
0, /* unsigned char present */ \
/* \
* Because the driver may control an ISA adapter 'unchecked_isa_dma' \
* must be set. The flag will be cleared in advansys_detect for non-ISA \
* adapters. Refer to the comment in scsi_module.c for more information. \
*/ \
1, /* unsigned unchecked_isa_dma:1 */ \
/* \
* All adapters controlled by this driver are capable of large \
* scatter-gather lists. According to the mid-level SCSI documentation \
* this obviates any performance gain provided by setting \
* 'use_clustering'. But empirically while CPU utilization is increased \
* by enabling clustering, I/O throughput increases as well. \
*/ \
ENABLE_CLUSTERING, /* unsigned use_clustering:1 */ \
NULL, /* struct SHT *next */ \
NULL, \
/* version < v2.1.23 long *usage_count */ \
/* version >= v2.1.23 struct module * */ \
&proc_scsi_advansys, /* struct proc_dir_entry *proc_dir */ \
advansys_proc_info, \
/* int (*proc_info)(char *, char **, off_t, int, int, int) */ \
"advansys", /* const char *name */ \
advansys_detect, /* int (*detect)(struct SHT *) */ \
advansys_release, /* int (*release)(struct Scsi_Host *) */ \
advansys_info, /* const char *(*info)(struct Scsi_Host *) */ \
advansys_command, /* int (*command)(Scsi_Cmnd *) */ \
advansys_queuecommand, \
/* int (*queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)) */ \
advansys_abort, /* int (*abort)(Scsi_Cmnd *) */ \
advansys_reset, \
/* version < v1.3.89 int (*reset)(Scsi_Cmnd *) */ \
/* version >= v1.3.89 int (*reset)(Scsi_Cmnd *, unsigned int) */ \
NULL, /* int (*slave_attach)(int, int) */ \
advansys_biosparam, /* int (* bios_param)(Disk *, kdev_t, int []) */ \
/* \
* The following fields are set per adapter in advansys_detect(). \
*/ \
0, /* int can_queue */ \
0, /* int this_id */ \
0, /* short unsigned int sg_tablesize */ \
0, /* short cmd_per_lun */ \
0, /* unsigned char present */ \
/* \
* Because the driver may control an ISA adapter 'unchecked_isa_dma' \
* must be set. The flag will be cleared in advansys_detect for non-ISA \
* adapters. Refer to the comment in scsi_module.c for more information. \
*/ \
1, /* unsigned unchecked_isa_dma:1 */ \
/* \
* All adapters controlled by this driver are capable of large \
* scatter-gather lists. According to the mid-level SCSI documentation \
* this obviates any performance gain provided by setting \
* 'use_clustering'. But empirically while CPU utilization is increased \
* by enabling clustering, I/O throughput increases as well. \
*/ \
ENABLE_CLUSTERING, /* unsigned use_clustering:1 */ \
}
#endif /* version >= v1.3.0 */
#endif /* _ADVANSYS_H */
......@@ -275,14 +275,13 @@ extern void si_meminfo(struct sysinfo * val);
/* mmap.c */
extern void vma_init(void);
extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long off);
extern void merge_segments(struct mm_struct *, unsigned long, unsigned long);
extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void exit_mmap(struct mm_struct *);
extern unsigned long get_unmapped_area(unsigned long, unsigned long);
extern unsigned long do_mmap(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long);
extern int do_munmap(unsigned long, size_t);
extern unsigned long get_unmapped_area(unsigned long, unsigned long);
/* filemap.c */
extern unsigned long page_unuse(unsigned long);
......
......@@ -136,7 +136,7 @@ asmlinkage int sys_shmget (key_t key, int size, int shmflg)
struct shmid_ds *shp;
int err, id = 0;
down(&current->mm->mmap_sem);
lock_kernel();
if (size < 0 || size > SHMMAX) {
err = -EINVAL;
} else if (key == IPC_PRIVATE) {
......@@ -159,7 +159,7 @@ asmlinkage int sys_shmget (key_t key, int size, int shmflg)
else
err = (int) shp->shm_perm.seq * SHMMNI + id;
}
up(&current->mm->mmap_sem);
unlock_kernel();
return err;
}
......@@ -482,7 +482,7 @@ asmlinkage int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
unsigned long addr;
unsigned long len;
down(&current->mm->mmap_sem);
lock_kernel();
if (shmid < 0) {
/* printk("shmat() -> EINVAL because shmid = %d < 0\n",shmid); */
goto out;
......@@ -575,7 +575,7 @@ asmlinkage int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
*raddr = addr;
err = 0;
out:
up(&current->mm->mmap_sem);
unlock_kernel();
return err;
}
......@@ -626,14 +626,12 @@ asmlinkage int sys_shmdt (char *shmaddr)
{
struct vm_area_struct *shmd, *shmdnext;
down(&current->mm->mmap_sem);
for (shmd = current->mm->mmap; shmd; shmd = shmdnext) {
shmdnext = shmd->vm_next;
if (shmd->vm_ops == &shm_vm_ops
&& shmd->vm_start - shmd->vm_offset == (ulong) shmaddr)
do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
}
up(&current->mm->mmap_sem);
return 0;
}
......
......@@ -1210,9 +1210,7 @@ static int msync_interval(struct vm_area_struct * vma,
return 0;
if (vma->vm_ops->sync) {
int error;
lock_kernel(); /* Horrible */
error = vma->vm_ops->sync(vma, start, end-start, flags);
unlock_kernel(); /* Horrible */
if (error)
return error;
if (flags & MS_SYNC)
......@@ -1228,7 +1226,7 @@ asmlinkage int sys_msync(unsigned long start, size_t len, int flags)
struct vm_area_struct * vma;
int unmapped_error, error = -EINVAL;
down(&current->mm->mmap_sem);
lock_kernel();
if (start & ~PAGE_MASK)
goto out;
len = (len + ~PAGE_MASK) & PAGE_MASK;
......@@ -1274,7 +1272,7 @@ asmlinkage int sys_msync(unsigned long start, size_t len, int flags)
vma = vma->vm_next;
}
out:
up(&current->mm->mmap_sem);
unlock_kernel();
return error;
}
......
......@@ -44,8 +44,6 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
......@@ -865,9 +863,6 @@ static inline void handle_pte_fault(struct task_struct *tsk,
do_wp_page(tsk, vma, address, write_access, pte);
}
/*
* By the time we get here, we already have the mm semaphore.
*/
void handle_mm_fault(struct task_struct *tsk, struct vm_area_struct * vma,
unsigned long address, int write_access)
{
......@@ -882,10 +877,8 @@ void handle_mm_fault(struct task_struct *tsk, struct vm_area_struct * vma,
pte = pte_alloc(pmd, address);
if (!pte)
goto no_memory;
lock_kernel(); /* Horrible */
handle_pte_fault(tsk, vma, address, write_access, pte);
update_mmu_cache(vma, address, *pte);
unlock_kernel(); /* Horrible */
return;
no_memory:
oom(tsk);
......
......@@ -191,7 +191,7 @@ asmlinkage int sys_mlock(unsigned long start, size_t len)
unsigned long lock_limit;
int error = -ENOMEM;
down(&current->mm->mmap_sem);
lock_kernel();
len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
start &= PAGE_MASK;
......@@ -212,7 +212,7 @@ asmlinkage int sys_mlock(unsigned long start, size_t len)
error = do_mlock(start, len, 1);
out:
up(&current->mm->mmap_sem);
unlock_kernel();
return error;
}
......@@ -220,11 +220,11 @@ asmlinkage int sys_munlock(unsigned long start, size_t len)
{
int ret;
down(&current->mm->mmap_sem);
lock_kernel();
len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
start &= PAGE_MASK;
ret = do_mlock(start, len, 0);
up(&current->mm->mmap_sem);
unlock_kernel();
return ret;
}
......@@ -262,7 +262,7 @@ asmlinkage int sys_mlockall(int flags)
unsigned long lock_limit;
int ret = -EINVAL;
down(&current->mm->mmap_sem);
lock_kernel();
if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
goto out;
......@@ -280,7 +280,7 @@ asmlinkage int sys_mlockall(int flags)
ret = do_mlockall(flags);
out:
up(&current->mm->mmap_sem);
unlock_kernel();
return ret;
}
......@@ -288,8 +288,8 @@ asmlinkage int sys_munlockall(void)
{
int ret;
down(&current->mm->mmap_sem);
lock_kernel();
ret = do_mlockall(0);
up(&current->mm->mmap_sem);
unlock_kernel();
return ret;
}
......@@ -91,7 +91,7 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
down(&mm->mmap_sem);
lock_kernel();
retval = mm->brk;
if (brk < mm->end_code)
goto out;
......@@ -126,13 +126,13 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
goto out;
/* Ok, looks good - let it rip. */
if (do_mmap(NULL, oldbrk, newbrk-oldbrk,
if(do_mmap(NULL, oldbrk, newbrk-oldbrk,
PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_FIXED|MAP_PRIVATE, 0) == oldbrk)
mm->brk = brk;
retval = mm->brk;
out:
up(&mm->mmap_sem);
unlock_kernel();
return retval;
}
......@@ -158,8 +158,7 @@ static inline unsigned long vm_flags(unsigned long prot, unsigned long flags)
#undef _trans
}
unsigned long do_mmap(struct file * file,
unsigned long addr, unsigned long len,
unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long off)
{
struct mm_struct * mm = current->mm;
......@@ -318,15 +317,6 @@ unsigned long do_mmap(struct file * file,
if ((flags & VM_LOCKED) && !(flags & VM_IO)) {
unsigned long start = addr;
mm->locked_vm += len >> PAGE_SHIFT;
/*
* This used to be just ugly, now it's downright broken - we can't do
* this when we're holding the mm semaphore (because the page fault
* will also try to get the semaphore - quite correctly). Besides, this
* never worked correctly anyway (we may not have read permission to
* the area in the first place).
*/
#if 0
do {
char c;
get_user(c,(char *) start);
......@@ -334,7 +324,6 @@ unsigned long do_mmap(struct file * file,
start += PAGE_SIZE;
__asm__ __volatile__("": :"r" (c));
} while (len > 0);
#endif
}
return addr;
}
......@@ -441,6 +430,16 @@ static void unmap_fixup(struct vm_area_struct *area,
insert_vm_struct(current->mm, mpnt);
}
asmlinkage int sys_munmap(unsigned long addr, size_t len)
{
int ret;
lock_kernel();
ret = do_munmap(addr, len);
unlock_kernel();
return ret;
}
/* Munmap is split into 2 main parts -- this part which finds
* what needs doing, and the areas themselves, which do the
* work. This now handles partial unmappings.
......@@ -519,16 +518,6 @@ int do_munmap(unsigned long addr, size_t len)
return 0;
}
asmlinkage int sys_munmap(unsigned long addr, size_t len)
{
int ret;
down(&current->mm->mmap_sem);
ret = do_munmap(addr, len);
up(&current->mm->mmap_sem);
return ret;
}
/* Release all mmaps. */
void exit_mmap(struct mm_struct * mm)
{
......@@ -601,6 +590,8 @@ void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned l
{
struct vm_area_struct *prev, *mpnt, *next;
down(&mm->mmap_sem);
prev = NULL;
mpnt = mm->mmap;
while(mpnt && mpnt->vm_end <= start_addr) {
......@@ -608,7 +599,7 @@ void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned l
mpnt = mpnt->vm_next;
}
if (!mpnt)
return;
goto no_vma;
next = mpnt->vm_next;
......@@ -660,6 +651,8 @@ void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned l
mpnt = prev;
}
mm->mmap_cache = NULL; /* Kill the cache. */
no_vma:
up(&mm->mmap_sem);
}
__initfunc(void vma_init(void))
......
......@@ -206,20 +206,20 @@ asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
{
unsigned long nstart, end, tmp;
struct vm_area_struct * vma, * next;
int error;
int error = -EINVAL;
lock_kernel();
if (start & ~PAGE_MASK)
return -EINVAL;
goto out;
len = (len + ~PAGE_MASK) & PAGE_MASK;
end = start + len;
if (end < start)
return -EINVAL;
goto out;
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
return -EINVAL;
goto out;
error = 0;
if (end == start)
return 0;
down(&current->mm->mmap_sem);
goto out;
vma = find_vma(current->mm, start);
error = -EFAULT;
if (!vma || vma->vm_start > start)
......@@ -255,6 +255,6 @@ asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
}
merge_segments(current->mm, start, end);
out:
up(&current->mm->mmap_sem);
unlock_kernel();
return error;
}
......@@ -21,8 +21,6 @@
#include <asm/system.h>
#include <asm/pgtable.h>
extern int do_munmap(unsigned long addr, size_t len);
static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
{
pgd_t * pgd;
......@@ -168,7 +166,7 @@ asmlinkage unsigned long sys_mremap(unsigned long addr,
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
down(&current->mm->mmap_sem);
lock_kernel();
if (addr & ~PAGE_MASK)
goto out;
old_len = PAGE_ALIGN(old_len);
......@@ -233,6 +231,6 @@ asmlinkage unsigned long sys_mremap(unsigned long addr,
else
ret = -ENOMEM;
out:
up(&current->mm->mmap_sem);
unlock_kernel();
return ret;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment