Commit d2c585d3 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] pagefault accounting fix

From: William Lee Irwin III <wli@holomorphy.com>

Our accounting of minor faults versus major faults is currently quite wrong.

To fix it up we need to propagate the actual fault type back to the
higher-level code.  Repurpose the currently-unused third arg to ->nopage
for this.
parent 282ed003
......@@ -420,7 +420,7 @@ transfer: no
prototypes:
void (*open)(struct vm_area_struct*);
void (*close)(struct vm_area_struct*);
struct page *(*nopage)(struct vm_area_struct*, unsigned long, int);
struct page *(*nopage)(struct vm_area_struct*, unsigned long, int *);
locking rules:
BKL mmap_sem
......
......@@ -534,7 +534,7 @@ int is_hugepage_mem_enough(size_t size)
* this far.
*/
static struct page *hugetlb_nopage(struct vm_area_struct *vma,
unsigned long address, int unused)
unsigned long address, int *unused)
{
BUG();
return NULL;
......
......@@ -60,10 +60,12 @@ extern struct page *ia32_shared_page[];
extern unsigned long *ia32_gdt;
struct page *
ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int no_share)
ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int *type)
{
struct page *pg = ia32_shared_page[smp_processor_id()];
get_page(pg);
if (type)
*type = VM_FAULT_MINOR;
return pg;
}
......
......@@ -518,7 +518,7 @@ int is_hugepage_mem_enough(size_t size)
return 1;
}
static struct page *hugetlb_nopage(struct vm_area_struct * area, unsigned long address, int unused)
static struct page *hugetlb_nopage(struct vm_area_struct * area, unsigned long address, int *unused)
{
BUG();
return NULL;
......
......@@ -921,7 +921,7 @@ int is_hugepage_mem_enough(size_t size)
* this far.
*/
static struct page *hugetlb_nopage(struct vm_area_struct *vma,
unsigned long address, int unused)
unsigned long address, int *unused)
{
BUG();
return NULL;
......
......@@ -504,7 +504,7 @@ int is_hugepage_mem_enough(size_t size)
* this far.
*/
static struct page *hugetlb_nopage(struct vm_area_struct *vma,
unsigned long address, int unused)
unsigned long address, int *unused)
{
BUG();
return NULL;
......
......@@ -13,7 +13,7 @@
static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int write_access)
int *type)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
dma_addr_t dma_addr;
......@@ -30,6 +30,8 @@ static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma,
*/
page = virt_to_page(__va(pa));
get_page(page);
if (type)
*type = VM_FAULT_MINOR;
return page;
}
......
......@@ -760,16 +760,16 @@ extern int DRM(fasync)(int fd, struct file *filp, int on);
/* Mapping support (drm_vm.h) */
extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
int *type);
extern struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
int *type);
extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
int *type);
extern struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
int *type);
extern void DRM(vm_open)(struct vm_area_struct *vma);
extern void DRM(vm_close)(struct vm_area_struct *vma);
extern void DRM(vm_shm_close)(struct vm_area_struct *vma);
......
......@@ -76,7 +76,7 @@ struct vm_operations_struct DRM(vm_sg_ops) = {
*/
struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
int *type)
{
#if __REALLY_HAVE_AGP
drm_file_t *priv = vma->vm_file->private_data;
......@@ -133,6 +133,8 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
baddr, __va(agpmem->memory->memory[offset]), offset,
atomic_read(&page->count));
if (type)
*type = VM_FAULT_MINOR;
return page;
}
vm_nopage_error:
......@@ -154,7 +156,7 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
*/
struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
int *type)
{
drm_map_t *map = (drm_map_t *)vma->vm_private_data;
unsigned long offset;
......@@ -170,6 +172,8 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
if (!page)
return NOPAGE_OOM;
get_page(page);
if (type)
*type = VM_FAULT_MINOR;
DRM_DEBUG("shm_nopage 0x%lx\n", address);
return page;
......@@ -268,7 +272,7 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
*/
struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
int *type)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
......@@ -287,6 +291,8 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
(offset & (~PAGE_MASK))));
get_page(page);
if (type)
*type = VM_FAULT_MINOR;
DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
return page;
......@@ -304,7 +310,7 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
*/
struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
int *type)
{
drm_map_t *map = (drm_map_t *)vma->vm_private_data;
drm_file_t *priv = vma->vm_file->private_data;
......@@ -325,6 +331,8 @@ struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset];
get_page(page);
if (type)
*type = VM_FAULT_MINOR;
return page;
}
......
......@@ -187,7 +187,7 @@ void dma_region_sync(struct dma_region *dma, unsigned long offset, unsigned long
/* nopage() handler for mmap access */
static struct page*
dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int write_access)
dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int *type)
{
unsigned long offset;
unsigned long kernel_virt_addr;
......@@ -202,6 +202,8 @@ dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int wri
(address > (unsigned long) area->vm_start + (PAGE_SIZE * dma->n_pages)) )
goto out;
if (type)
*type = VM_FAULT_MINOR;
offset = address - area->vm_start;
kernel_virt_addr = (unsigned long) dma->kvirt + offset;
ret = vmalloc_to_page((void*) kernel_virt_addr);
......
......@@ -1078,7 +1078,7 @@ videobuf_vm_close(struct vm_area_struct *vma)
*/
static struct page*
videobuf_vm_nopage(struct vm_area_struct *vma, unsigned long vaddr,
int write_access)
int *type)
{
struct page *page;
......@@ -1090,6 +1090,8 @@ videobuf_vm_nopage(struct vm_area_struct *vma, unsigned long vaddr,
if (!page)
return NOPAGE_OOM;
clear_user_page(page_address(page), vaddr, page);
if (type)
*type = VM_FAULT_MINOR;
return page;
}
......
......@@ -1118,7 +1118,7 @@ sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
}
static struct page *
sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int unused)
sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
{
Sg_fd *sfp;
struct page *page = NOPAGE_SIGBUS;
......@@ -1158,6 +1158,8 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int unused)
page = virt_to_page(page_ptr);
get_page(page); /* increment page count */
}
if (type)
*type = VM_FAULT_MINOR;
return page;
}
......
......@@ -26,7 +26,7 @@
* Fill in the supplied page for mmap
*/
static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
unsigned long address, int write_access)
unsigned long address, int *type)
{
struct file *file = area->vm_file;
struct dentry *dentry = file->f_dentry;
......@@ -85,6 +85,15 @@ static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
memset(pg_addr + already_read, 0, PAGE_SIZE - already_read);
flush_dcache_page(page);
kunmap(page);
/*
* If I understand ncp_read_kernel() properly, the above always
* fetches from the network, here the analogue of disk.
* -- wli
*/
if (type)
*type = VM_FAULT_MAJOR;
inc_page_state(pgmajfault);
return page;
}
......
......@@ -143,7 +143,7 @@ extern pgprot_t protection_map[16];
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int unused);
struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
};
......@@ -407,7 +407,7 @@ static inline int page_mapped(struct page *page)
extern void show_free_areas(void);
struct page *shmem_nopage(struct vm_area_struct * vma,
unsigned long address, int unused);
unsigned long address, int *type);
struct file *shmem_file_setup(char * name, loff_t size, unsigned long flags);
void shmem_lock(struct file * file, int lock);
int shmem_zero_setup(struct vm_area_struct *);
......@@ -565,7 +565,7 @@ extern unsigned long page_unuse(struct page *);
extern void truncate_inode_pages(struct address_space *, loff_t);
/* generic vm_area_ops exported for stackable file systems */
extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int);
struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);
/* mm/page-writeback.c */
int write_one_page(struct page *page, int wait);
......
......@@ -1323,8 +1323,6 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
* either stopped or zombied. In the zombied case the task won't get
* reaped till shortly after the call to getrusage(), in both cases the
* task being examined is in a frozen state so the counters won't change.
*
* FIXME! Get the fault counts properly!
*/
int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
{
......
......@@ -990,7 +990,7 @@ static int page_cache_read(struct file * file, unsigned long offset)
* it in the page cache, and handles the special cases reasonably without
* having a lot of duplicated code.
*/
struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused)
struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int *type)
{
int error;
struct file *file = area->vm_file;
......@@ -999,7 +999,7 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address
struct inode *inode = mapping->host;
struct page *page;
unsigned long size, pgoff, endoff;
int did_readaround = 0;
int did_readaround = 0, majmin = VM_FAULT_MINOR;
pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
......@@ -1048,6 +1048,14 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address
if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS)
goto no_cached_page;
/*
* To keep the pgmajfault counter straight, we need to
* check did_readaround, as this is an inner loop.
*/
if (!did_readaround) {
majmin = VM_FAULT_MAJOR;
inc_page_state(pgmajfault);
}
did_readaround = 1;
do_page_cache_readahead(mapping, file,
pgoff & ~(MMAP_READAROUND-1), MMAP_READAROUND);
......@@ -1069,6 +1077,8 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address
* Found the page and have a reference on it.
*/
mark_page_accessed(page);
if (type)
*type = majmin;
return page;
outside_data_content:
......@@ -1104,7 +1114,10 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address
return NULL;
page_not_uptodate:
if (!did_readaround) {
majmin = VM_FAULT_MAJOR;
inc_page_state(pgmajfault);
}
lock_page(page);
/* Did it get unhashed while we waited for it? */
......
......@@ -1400,7 +1400,7 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t entry;
struct pte_chain *pte_chain;
int sequence = 0;
int ret;
int ret = VM_FAULT_MINOR;
if (!vma->vm_ops || !vma->vm_ops->nopage)
return do_anonymous_page(mm, vma, page_table,
......@@ -1414,7 +1414,7 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
smp_rmb(); /* Prevent CPU from reordering lock-free ->nopage() */
retry:
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, 0);
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
/* no page was available -- either SIGBUS or OOM */
if (new_page == NOPAGE_SIGBUS)
......@@ -1483,14 +1483,12 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap(page_table);
page_cache_release(new_page);
spin_unlock(&mm->page_table_lock);
ret = VM_FAULT_MINOR;
goto out;
}
/* no need to invalidate: a not-present page shouldn't be cached */
update_mmu_cache(vma, address, entry);
spin_unlock(&mm->page_table_lock);
ret = VM_FAULT_MAJOR;
goto out;
oom:
ret = VM_FAULT_OOM;
......
......@@ -71,7 +71,7 @@ enum sgp_type {
};
static int shmem_getpage(struct inode *inode, unsigned long idx,
struct page **pagep, enum sgp_type sgp);
struct page **pagep, enum sgp_type sgp, int *type);
static inline struct page *shmem_dir_alloc(unsigned int gfp_mask)
{
......@@ -540,7 +540,7 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
(void) shmem_getpage(inode,
attr->ia_size>>PAGE_CACHE_SHIFT,
&page, SGP_READ);
&page, SGP_READ, NULL);
}
/*
* Reset SHMEM_PAGEIN flag so that shmem_truncate can
......@@ -765,7 +765,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
* vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache
*/
static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **pagep, enum sgp_type sgp)
static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **pagep, enum sgp_type sgp, int *type)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
......@@ -774,7 +774,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p
struct page *swappage;
swp_entry_t *entry;
swp_entry_t swap;
int error;
int error, majmin = VM_FAULT_MINOR;
if (idx >= SHMEM_MAX_INDEX)
return -EFBIG;
......@@ -811,6 +811,10 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p
if (!swappage) {
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
/* here we actually do the io */
if (majmin == VM_FAULT_MINOR && type)
inc_page_state(pgmajfault);
majmin = VM_FAULT_MAJOR;
swapin_readahead(swap);
swappage = read_swap_cache_async(swap);
if (!swappage) {
......@@ -959,6 +963,8 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p
} else
*pagep = ZERO_PAGE(0);
}
if (type)
*type = majmin;
return 0;
failed:
......@@ -969,7 +975,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p
return error;
}
struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int unused)
struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
{
struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct page *page = NULL;
......@@ -980,7 +986,7 @@ struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int
idx += vma->vm_pgoff;
idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
error = shmem_getpage(inode, idx, &page, SGP_CACHE);
error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
if (error)
return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
......@@ -1007,7 +1013,7 @@ static int shmem_populate(struct vm_area_struct *vma,
/*
* Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
*/
err = shmem_getpage(inode, pgoff, &page, sgp);
err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
if (err)
return err;
if (page) {
......@@ -1157,7 +1163,7 @@ static int
shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{
struct inode *inode = page->mapping->host;
return shmem_getpage(inode, page->index, &page, SGP_WRITE);
return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
}
static ssize_t
......@@ -1214,7 +1220,7 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t
* But it still may be a good idea to prefault below.
*/
err = shmem_getpage(inode, index, &page, SGP_WRITE);
err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
if (err)
break;
......@@ -1296,7 +1302,7 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
break;
}
desc->error = shmem_getpage(inode, index, &page, SGP_READ);
desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
if (desc->error) {
if (desc->error == -EINVAL)
desc->error = 0;
......@@ -1552,7 +1558,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
iput(inode);
return -ENOMEM;
}
error = shmem_getpage(inode, 0, &page, SGP_WRITE);
error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
if (error) {
vm_unacct_memory(VM_ACCT(1));
iput(inode);
......@@ -1590,7 +1596,7 @@ static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
static int shmem_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct page *page = NULL;
int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ);
int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
if (res)
return res;
res = vfs_readlink(dentry, buffer, buflen, kmap(page));
......@@ -1603,7 +1609,7 @@ static int shmem_readlink(struct dentry *dentry, char __user *buffer, int buflen
static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct page *page = NULL;
int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ);
int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
if (res)
return res;
res = vfs_follow_link(nd, kmap(page));
......
......@@ -2779,7 +2779,7 @@ unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait)
return mask;
}
static struct page * snd_pcm_mmap_status_nopage(struct vm_area_struct *area, unsigned long address, int no_share)
static struct page * snd_pcm_mmap_status_nopage(struct vm_area_struct *area, unsigned long address, int *type)
{
snd_pcm_substream_t *substream = (snd_pcm_substream_t *)area->vm_private_data;
snd_pcm_runtime_t *runtime;
......@@ -2791,6 +2791,8 @@ static struct page * snd_pcm_mmap_status_nopage(struct vm_area_struct *area, uns
page = virt_to_page(runtime->status);
if (!PageReserved(page))
get_page(page);
if (type)
*type = VM_FAULT_MINOR;
return page;
}
......@@ -2817,7 +2819,7 @@ int snd_pcm_mmap_status(snd_pcm_substream_t *substream, struct file *file,
return 0;
}
static struct page * snd_pcm_mmap_control_nopage(struct vm_area_struct *area, unsigned long address, int no_share)
static struct page * snd_pcm_mmap_control_nopage(struct vm_area_struct *area, unsigned long address, int *type)
{
snd_pcm_substream_t *substream = (snd_pcm_substream_t *)area->vm_private_data;
snd_pcm_runtime_t *runtime;
......@@ -2829,6 +2831,8 @@ static struct page * snd_pcm_mmap_control_nopage(struct vm_area_struct *area, un
page = virt_to_page(runtime->control);
if (!PageReserved(page))
get_page(page);
if (type)
*type = VM_FAULT_MINOR;
return page;
}
......@@ -2867,7 +2871,7 @@ static void snd_pcm_mmap_data_close(struct vm_area_struct *area)
atomic_dec(&substream->runtime->mmap_count);
}
static struct page * snd_pcm_mmap_data_nopage(struct vm_area_struct *area, unsigned long address, int no_share)
static struct page * snd_pcm_mmap_data_nopage(struct vm_area_struct *area, unsigned long address, int *type)
{
snd_pcm_substream_t *substream = (snd_pcm_substream_t *)area->vm_private_data;
snd_pcm_runtime_t *runtime;
......@@ -2895,6 +2899,8 @@ static struct page * snd_pcm_mmap_data_nopage(struct vm_area_struct *area, unsig
}
if (!PageReserved(page))
get_page(page);
if (type)
*type = VM_FAULT_MINOR;
return page;
}
......
......@@ -989,7 +989,7 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
return 0;
}
static struct page *emu10k1_mm_nopage (struct vm_area_struct * vma, unsigned long address, int write_access)
static struct page *emu10k1_mm_nopage (struct vm_area_struct * vma, unsigned long address, int *type)
{
struct emu10k1_wavedevice *wave_dev = vma->vm_private_data;
struct woinst *woinst = wave_dev->woinst;
......@@ -1032,6 +1032,8 @@ static struct page *emu10k1_mm_nopage (struct vm_area_struct * vma, unsigned lon
get_page (dmapage);
DPD(3, "page: %#lx\n", (unsigned long) dmapage);
if (type)
*type = VM_FAULT_MINOR;
return dmapage;
}
......
......@@ -2116,7 +2116,7 @@ static void via_dsp_cleanup (struct via_info *card)
static struct page * via_mm_nopage (struct vm_area_struct * vma,
unsigned long address, int write_access)
unsigned long address, int *type)
{
struct via_info *card = vma->vm_private_data;
struct via_channel *chan = &card->ch_out;
......@@ -2124,12 +2124,11 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
unsigned long pgoff;
int rd, wr;
DPRINTK ("ENTER, start %lXh, ofs %lXh, pgoff %ld, addr %lXh, wr %d\n",
DPRINTK ("ENTER, start %lXh, ofs %lXh, pgoff %ld, addr %lXh\n",
vma->vm_start,
address - vma->vm_start,
(address - vma->vm_start) >> PAGE_SHIFT,
address,
write_access);
address);
if (address > vma->vm_end) {
DPRINTK ("EXIT, returning NOPAGE_SIGBUS\n");
......@@ -2167,6 +2166,8 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
DPRINTK ("EXIT, returning page %p for cpuaddr %lXh\n",
dmapage, (unsigned long) chan->pgtbl[pgoff].cpuaddr);
get_page (dmapage);
if (type)
*type = VM_FAULT_MINOR;
return dmapage;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment