Commit bfa5bf6d authored by Rolf Eike Beer's avatar Rolf Eike Beer Committed by Linus Torvalds

[PATCH] Add kerneldocs for some functions in mm/memory.c

These functions are already documented quite well with long comments.  Now
add kerneldoc style header to make this turn up in everyones favorite doc
format.
Signed-off-by: default avatarRolf Eike Beer <eike-kernel@sf-tec.de>
Cc: "Randy.Dunlap" <rdunlap@xenotime.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 7ff6f082
...@@ -1227,7 +1227,12 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa ...@@ -1227,7 +1227,12 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa
return retval; return retval;
} }
/* /**
* vm_insert_page - insert single page into user vma
* @vma: user vma to map to
* @addr: target user address of this page
* @page: source kernel page
*
* This allows drivers to insert individual pages they've allocated * This allows drivers to insert individual pages they've allocated
* into a user vma. * into a user vma.
* *
...@@ -1319,7 +1324,16 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, ...@@ -1319,7 +1324,16 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
return 0; return 0;
} }
/* Note: this is only safe if the mm semaphore is held when called. */ /**
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to
* @addr: target user address to start at
* @pfn: physical address of kernel memory
* @size: size of map area
* @prot: page protection flags for this mapping
*
* Note: this is only safe if the mm semaphore is held when called.
*/
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot) unsigned long pfn, unsigned long size, pgprot_t prot)
{ {
...@@ -1801,9 +1815,10 @@ void unmap_mapping_range(struct address_space *mapping, ...@@ -1801,9 +1815,10 @@ void unmap_mapping_range(struct address_space *mapping,
} }
EXPORT_SYMBOL(unmap_mapping_range); EXPORT_SYMBOL(unmap_mapping_range);
/* /**
* Handle all mappings that got truncated by a "truncate()" * vmtruncate - unmap mappings "freed" by truncate() syscall
* system call. * @inode: inode of the file used
* @offset: file offset to start truncating
* *
* NOTE! We have to be ready to update the memory sharing * NOTE! We have to be ready to update the memory sharing
* between the file and the memory map for a potential last * between the file and the memory map for a potential last
...@@ -1872,11 +1887,16 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) ...@@ -1872,11 +1887,16 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
} }
EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */ EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */
/* /**
* swapin_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
* @addr: address to start
* @vma: user vma this addresses belong to
*
* Primitive swap readahead code. We simply read an aligned block of * Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen * (1 << page_cluster) entries in the swap area. This method is chosen
* because it doesn't cost us any seek time. We also make sure to queue * because it doesn't cost us any seek time. We also make sure to queue
* the 'original' request together with the readahead ones... * the 'original' request together with the readahead ones...
* *
* This has been extended to use the NUMA policies from the mm triggering * This has been extended to use the NUMA policies from the mm triggering
* the readahead. * the readahead.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment