Linux Kernel
3.7.1
|
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/export.h>
#include <linux/delayacct.h>
#include <linux/init.h>
#include <linux/writeback.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
#include <linux/kallsyms.h>
#include <linux/swapops.h>
#include <linux/elf.h>
#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include "internal.h"
Go to the source code of this file.
Macros | |
#define | inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) |
#define | dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) |
#define | HAVE_PTE_SPECIAL 0 |
Variables | |
unsigned long | max_mapnr |
struct page * | mem_map |
unsigned long | num_physpages |
void * | high_memory |
int randomize_va_space | __read_mostly |
#define dec_mm_counter_fast | ( | mm, | |
member | |||
) | dec_mm_counter(mm, member) |
#define inc_mm_counter_fast | ( | mm, | |
member | |||
) | inc_mm_counter(mm, member) |
pte_t* __get_locked_pte | ( | struct mm_struct * | mm, |
unsigned long | addr, | ||
spinlock_t ** | ptl | ||
) |
get_dump_page() - pin user page in memory while writing it to core dump : user address
Returns struct page pointer of user page pinned for dump, to be freed afterwards by page_cache_release() or put_page().
Returns NULL on any kind of failure - a hole must then be inserted into the corefile, to preserve alignment with its headers; and also returns NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - allowing a hole to be left in the corefile to save diskspace.
Called without mmap_sem, but after all other threads have been killed.
int __get_user_pages | ( | struct task_struct * | tsk, |
struct mm_struct * | mm, | ||
unsigned long | start, | ||
int | nr_pages, | ||
unsigned int | gup_flags, | ||
struct page ** | pages, | ||
struct vm_area_struct ** | vmas, | ||
int * | nonblocking | ||
) |
__get_user_pages() - pin user pages in memory : task_struct of target task : mm_struct of target mm : starting user address : number of pages from start to pin : flags modifying pin behaviour : array that receives pointers to the pages pinned. Should be at least nr_pages long. Or NULL, if caller only intends to ensure the pages are faulted in. : array of pointers to vmas corresponding to each page. Or NULL if the caller does not require them. : whether waiting for disk IO or mmap_sem contention
Returns number of pages pinned. This may be fewer than the number requested. If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns -errno. Each page returned must be released with a put_page() call when it is finished with. vmas will only remain valid while mmap_sem is held.
Must be called with mmap_sem held for read or write.
__get_user_pages walks a process's page tables and takes a reference to each struct page that each user address corresponds to at a given instant. That is, it takes the page that would be accessed if a user thread accesses the given user virtual address at that instant.
This does not guarantee that the page exists in the user mappings when __get_user_pages returns, and there may even be a completely different page there in some cases (eg. if mmapped pagecache has been invalidated and subsequently re faulted). However it does guarantee that the page won't be freed completely. And mostly callers simply care that the page contains data that was valid at some point in time. Typically, an IO or similar operation cannot guarantee anything stronger anyway because locks can't be held over the syscall boundary.
If & FOLL_WRITE == 0, the page must not be written to. If the page is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must be called after the page is finished with, and before put_page is called.
If != NULL, __get_user_pages will not wait for disk IO or mmap_sem contention, and if waiting is needed to pin all pages, * will be set to 0.
In most cases, get_user_pages or get_user_pages_fast should be used instead of __get_user_pages. __get_user_pages should be used only if you need some special .
__setup | ( | "norandmaps" | , |
disable_randmaps | |||
) |
core_initcall | ( | init_zero_pfn | ) |
EXPORT_SYMBOL | ( | max_mapnr | ) |
EXPORT_SYMBOL | ( | mem_map | ) |
EXPORT_SYMBOL | ( | num_physpages | ) |
EXPORT_SYMBOL | ( | high_memory | ) |
EXPORT_SYMBOL | ( | __get_user_pages | ) |
EXPORT_SYMBOL | ( | get_user_pages | ) |
EXPORT_SYMBOL | ( | vm_insert_page | ) |
EXPORT_SYMBOL | ( | vm_insert_pfn | ) |
EXPORT_SYMBOL | ( | vm_insert_mixed | ) |
EXPORT_SYMBOL | ( | remap_pfn_range | ) |
EXPORT_SYMBOL | ( | unmap_mapping_range | ) |
EXPORT_SYMBOL | ( | follow_pfn | ) |
EXPORT_SYMBOL_GPL | ( | zap_vma_ptes | ) |
EXPORT_SYMBOL_GPL | ( | apply_to_page_range | ) |
|
read |
follow_page - look up a page descriptor from a user-virtual address : vm_area_struct mapping : virtual address to look up : flags modifying lookup behaviour
can have FOLL_ flags set, defined in <linux/mm.h>
Returns the mapped (struct page *), NULL if no mapping exists, or an error pointer if there is a mapping to something not represented by a page descriptor (see also vm_normal_page()).
int follow_pfn | ( | struct vm_area_struct * | vma, |
unsigned long | address, | ||
unsigned long * | pfn | ||
) |
void free_pgtables | ( | struct mmu_gather * | tlb, |
struct vm_area_struct * | vma, | ||
unsigned long | floor, | ||
unsigned long | ceiling | ||
) |
|
read |
int remap_pfn_range | ( | struct vm_area_struct * | vma, |
unsigned long | addr, | ||
unsigned long | pfn, | ||
unsigned long | size, | ||
pgprot_t | prot | ||
) |
remap_pfn_range - remap kernel memory to userspace : user vma to map to : target user address to start at : physical address of kernel memory : size of map area : page protection flags for this mapping
Note: this is only safe if the mm semaphore is held when called.
void unmap_mapping_range | ( | struct address_space * | mapping, |
loff_t const | holebegin, | ||
loff_t const | holelen, | ||
int | even_cows | ||
) |
unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. : the address space containing mmaps to be unmapped. : byte in first page to unmap, relative to the start of the underlying file. This will be rounded down to a PAGE_SIZE boundary. Note that this is different from truncate_pagecache(), which must keep the partial page. In contrast, we must get rid of partial pages. : size of prospective hole in bytes. This will be rounded up to a PAGE_SIZE boundary. A holelen of zero truncates to the end of the file. : 1 when truncating a file, unmap even private COWed pages; but 0 when invalidating pagecache, don't throw away private data.
void unmap_vmas | ( | struct mmu_gather * | tlb, |
struct vm_area_struct * | vma, | ||
unsigned long | start_addr, | ||
unsigned long | end_addr | ||
) |
unmap_vmas - unmap a range of memory covered by a list of vma's : address of the caller's struct mmu_gather : the starting vma : virtual address at which to start unmapping : virtual address at which to end unmapping
Unmap all pages in the vma list.
Only addresses between `start' and `end' will be unmapped.
The VMA list must be sorted in ascending virtual address order.
unmap_vmas() assumes that the caller will flush the whole unmapped address range after unmap_vmas() returns. So the only responsibility here is to ensure that any thus-far unmapped pages are flushed before unmap_vmas() drops the lock and schedules.
int vm_insert_mixed | ( | struct vm_area_struct * | vma, |
unsigned long | addr, | ||
unsigned long | pfn | ||
) |
int vm_insert_pfn | ( | struct vm_area_struct * | vma, |
unsigned long | addr, | ||
unsigned long | pfn | ||
) |
vm_insert_pfn - insert single pfn into user vma : user vma to map to : target user address of this page : source kernel pfn
Similar to vm_insert_page, this allows drivers to insert individual pages they've allocated into a user vma. Same comments apply.
This function should only be called from a vm_ops->fault handler, and in that case the handler should return NULL.
vma cannot be a COW mapping.
As this is called only for pages that do not currently exist, we do not need to flush old virtual caches or the TLB.
void zap_page_range | ( | struct vm_area_struct * | vma, |
unsigned long | start, | ||
unsigned long | size, | ||
struct zap_details * | details | ||
) |
zap_page_range - remove user pages in a given range : vm_area_struct holding the applicable pages : starting address of pages to zap : number of bytes to zap
: details of nonlinear truncation or shared cache invalidation
Caller must protect the VMA list
int zap_vma_ptes | ( | struct vm_area_struct * | vma, |
unsigned long | address, | ||
unsigned long | size | ||
) |
zap_vma_ptes - remove ptes mapping the vma : vm_area_struct holding ptes to be zapped : starting address of pages to zap : number of bytes to zap
This function only unmaps ptes assigned to VM_PFNMAP vmas.
The entire address range must be fully contained within the vma.
Returns 0 if successful.
unsigned long highest_memmap_pfn __read_mostly |