Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
Macros | Functions | Variables
memory.c File Reference
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/export.h>
#include <linux/delayacct.h>
#include <linux/init.h>
#include <linux/writeback.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
#include <linux/kallsyms.h>
#include <linux/swapops.h>
#include <linux/elf.h>
#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include "internal.h"

Go to the source code of this file.

Macros

#define inc_mm_counter_fast(mm, member)   inc_mm_counter(mm, member)
 
#define dec_mm_counter_fast(mm, member)   dec_mm_counter(mm, member)
 
#define HAVE_PTE_SPECIAL   0
 

Functions

 EXPORT_SYMBOL (max_mapnr)
 
 EXPORT_SYMBOL (mem_map)
 
 EXPORT_SYMBOL (num_physpages)
 
 EXPORT_SYMBOL (high_memory)
 
 __setup ("norandmaps", disable_randmaps)
 
 core_initcall (init_zero_pfn)
 
void pgd_clear_bad (pgd_t *pgd)
 
void pud_clear_bad (pud_t *pud)
 
void pmd_clear_bad (pmd_t *pmd)
 
void free_pgd_range (struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling)
 
void free_pgtables (struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling)
 
int __pte_alloc (struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, unsigned long address)
 
int __pte_alloc_kernel (pmd_t *pmd, unsigned long address)
 
struct pagevm_normal_page (struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 
int copy_pte_range (struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, unsigned long addr, unsigned long end)
 
int copy_page_range (struct mm_struct *dst_mm, struct mm_struct *src_mm, struct vm_area_struct *vma)
 
void unmap_vmas (struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr)
 
void zap_page_range (struct vm_area_struct *vma, unsigned long start, unsigned long size, struct zap_details *details)
 
int zap_vma_ptes (struct vm_area_struct *vma, unsigned long address, unsigned long size)
 
 EXPORT_SYMBOL_GPL (zap_vma_ptes)
 
struct pagefollow_page (struct vm_area_struct *vma, unsigned long address, unsigned int flags)
 
int __get_user_pages (struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking)
 
 EXPORT_SYMBOL (__get_user_pages)
 
int fixup_user_fault (struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags)
 
int get_user_pages (struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas)
 
 EXPORT_SYMBOL (get_user_pages)
 
pte_t__get_locked_pte (struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
 
int vm_insert_page (struct vm_area_struct *vma, unsigned long addr, struct page *page)
 
 EXPORT_SYMBOL (vm_insert_page)
 
int vm_insert_pfn (struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
 
 EXPORT_SYMBOL (vm_insert_pfn)
 
int vm_insert_mixed (struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
 
 EXPORT_SYMBOL (vm_insert_mixed)
 
int remap_pfn_range (struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot)
 
 EXPORT_SYMBOL (remap_pfn_range)
 
int apply_to_page_range (struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data)
 
 EXPORT_SYMBOL_GPL (apply_to_page_range)
 
void unmap_mapping_range (struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows)
 
 EXPORT_SYMBOL (unmap_mapping_range)
 
int handle_pte_fault (struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags)
 
int handle_mm_fault (struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags)
 
int __pud_alloc (struct mm_struct *mm, pgd_t *pgd, unsigned long address)
 
int __pmd_alloc (struct mm_struct *mm, pud_t *pud, unsigned long address)
 
int make_pages_present (unsigned long addr, unsigned long end)
 
struct vm_area_structget_gate_vma (struct mm_struct *mm)
 
int in_gate_area_no_mm (unsigned long addr)
 
int follow_pfn (struct vm_area_struct *vma, unsigned long address, unsigned long *pfn)
 
 EXPORT_SYMBOL (follow_pfn)
 
int access_remote_vm (struct mm_struct *mm, unsigned long addr, void *buf, int len, int write)
 
int access_process_vm (struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
 
void print_vma_addr (char *prefix, unsigned long ip)
 

Variables

unsigned long max_mapnr
 
struct pagemem_map
 
unsigned long num_physpages
 
voidhigh_memory
 
int randomize_va_space __read_mostly
 

Macro Definition Documentation

#define dec_mm_counter_fast (   mm,
  member 
)    dec_mm_counter(mm, member)

Definition at line 165 of file memory.c.

#define HAVE_PTE_SPECIAL   0

Definition at line 779 of file memory.c.

#define inc_mm_counter_fast (   mm,
  member 
)    inc_mm_counter(mm, member)

Definition at line 164 of file memory.c.

Function Documentation

pte_t* __get_locked_pte ( struct mm_struct mm,
unsigned long  addr,
spinlock_t **  ptl 
)

get_dump_page() - pin user page in memory while writing it to core dump : user address

Returns struct page pointer of user page pinned for dump, to be freed afterwards by page_cache_release() or put_page().

Returns NULL on any kind of failure - a hole must then be inserted into the corefile, to preserve alignment with its headers; and also returns NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - allowing a hole to be left in the corefile to save diskspace.

Called without mmap_sem, but after all other threads have been killed.

Definition at line 2017 of file memory.c.

int __get_user_pages ( struct task_struct tsk,
struct mm_struct mm,
unsigned long  start,
int  nr_pages,
unsigned int  gup_flags,
struct page **  pages,
struct vm_area_struct **  vmas,
int nonblocking 
)

__get_user_pages() - pin user pages in memory : task_struct of target task : mm_struct of target mm : starting user address : number of pages from start to pin : flags modifying pin behaviour : array that receives pointers to the pages pinned. Should be at least nr_pages long. Or NULL, if caller only intends to ensure the pages are faulted in. : array of pointers to vmas corresponding to each page. Or NULL if the caller does not require them. : whether waiting for disk IO or mmap_sem contention

Returns number of pages pinned. This may be fewer than the number requested. If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns -errno. Each page returned must be released with a put_page() call when it is finished with. vmas will only remain valid while mmap_sem is held.

Must be called with mmap_sem held for read or write.

__get_user_pages walks a process's page tables and takes a reference to each struct page that each user address corresponds to at a given instant. That is, it takes the page that would be accessed if a user thread accesses the given user virtual address at that instant.

This does not guarantee that the page exists in the user mappings when __get_user_pages returns, and there may even be a completely different page there in some cases (eg. if mmapped pagecache has been invalidated and subsequently re faulted). However it does guarantee that the page won't be freed completely. And mostly callers simply care that the page contains data that was valid at some point in time. Typically, an IO or similar operation cannot guarantee anything stronger anyway because locks can't be held over the syscall boundary.

If & FOLL_WRITE == 0, the page must not be written to. If the page is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must be called after the page is finished with, and before put_page is called.

If != NULL, __get_user_pages will not wait for disk IO or mmap_sem contention, and if waiting is needed to pin all pages, * will be set to 0.

In most cases, get_user_pages or get_user_pages_fast should be used instead of __get_user_pages. __get_user_pages should be used only if you need some special .

Definition at line 1679 of file memory.c.

int __pmd_alloc ( struct mm_struct mm,
pud_t pud,
unsigned long  address 
)

Definition at line 3607 of file memory.c.

int __pte_alloc ( struct mm_struct mm,
struct vm_area_struct vma,
pmd_t pmd,
unsigned long  address 
)

Definition at line 576 of file memory.c.

int __pte_alloc_kernel ( pmd_t pmd,
unsigned long  address 
)

Definition at line 615 of file memory.c.

int __pud_alloc ( struct mm_struct mm,
pgd_t pgd,
unsigned long  address 
)

Definition at line 3584 of file memory.c.

__setup ( "norandmaps"  ,
disable_randmaps   
)
int access_process_vm ( struct task_struct tsk,
unsigned long  addr,
void buf,
int  len,
int  write 
)

Definition at line 3906 of file memory.c.

int access_remote_vm ( struct mm_struct mm,
unsigned long  addr,
void buf,
int  len,
int  write 
)

access_remote_vm - access another process' address space : the mm_struct of the target address space : start address to access : source or destination buffer : number of bytes to transfer : whether the access is a write

The caller must hold a reference on .

Definition at line 3895 of file memory.c.

int apply_to_page_range ( struct mm_struct mm,
unsigned long  addr,
unsigned long  size,
pte_fn_t  fn,
void data 
)

Definition at line 2430 of file memory.c.

int copy_page_range ( struct mm_struct dst_mm,
struct mm_struct src_mm,
struct vm_area_struct vma 
)

Definition at line 1035 of file memory.c.

int copy_pte_range ( struct mm_struct dst_mm,
struct mm_struct src_mm,
pmd_t dst_pmd,
pmd_t src_pmd,
struct vm_area_struct vma,
unsigned long  addr,
unsigned long  end 
)

Definition at line 917 of file memory.c.

core_initcall ( init_zero_pfn  )
EXPORT_SYMBOL ( max_mapnr  )
EXPORT_SYMBOL ( mem_map  )
EXPORT_SYMBOL ( num_physpages  )
EXPORT_SYMBOL ( high_memory  )
EXPORT_SYMBOL ( __get_user_pages  )
EXPORT_SYMBOL ( get_user_pages  )
EXPORT_SYMBOL ( vm_insert_page  )
EXPORT_SYMBOL ( vm_insert_pfn  )
EXPORT_SYMBOL ( vm_insert_mixed  )
EXPORT_SYMBOL ( remap_pfn_range  )
EXPORT_SYMBOL ( unmap_mapping_range  )
EXPORT_SYMBOL ( follow_pfn  )
EXPORT_SYMBOL_GPL ( zap_vma_ptes  )
EXPORT_SYMBOL_GPL ( apply_to_page_range  )
int fixup_user_fault ( struct task_struct tsk,
struct mm_struct mm,
unsigned long  address,
unsigned int  fault_flags 
)

Definition at line 1890 of file memory.c.

struct page* follow_page ( struct vm_area_struct vma,
unsigned long  address,
unsigned int  flags 
)
read

follow_page - look up a page descriptor from a user-virtual address : vm_area_struct mapping : virtual address to look up : flags modifying lookup behaviour

can have FOLL_ flags set, defined in <linux/mm.h>

Returns the mapped (struct page *), NULL if no mapping exists, or an error pointer if there is a mapping to something not represented by a page descriptor (see also vm_normal_page()).

Definition at line 1479 of file memory.c.

int follow_pfn ( struct vm_area_struct vma,
unsigned long  address,
unsigned long pfn 
)

follow_pfn - look up PFN at a user virtual address : memory mapping : user virtual address : location to store found PFN

Only IO mappings and raw PFN mappings are allowed.

Returns zero and the pfn at on success, -ve otherwise.

Definition at line 3753 of file memory.c.

void free_pgd_range ( struct mmu_gather tlb,
unsigned long  addr,
unsigned long  end,
unsigned long  floor,
unsigned long  ceiling 
)

Definition at line 483 of file memory.c.

void free_pgtables ( struct mmu_gather tlb,
struct vm_area_struct vma,
unsigned long  floor,
unsigned long  ceiling 
)

Definition at line 541 of file memory.c.

struct vm_area_struct* get_gate_vma ( struct mm_struct mm)
read

Definition at line 3674 of file memory.c.

int get_user_pages ( struct task_struct tsk,
struct mm_struct mm,
unsigned long  start,
int  nr_pages,
int  write,
int  force,
struct page **  pages,
struct vm_area_struct **  vmas 
)

Definition at line 1970 of file memory.c.

int handle_mm_fault ( struct mm_struct mm,
struct vm_area_struct vma,
unsigned long  address,
unsigned int  flags 
)

Definition at line 3503 of file memory.c.

int handle_pte_fault ( struct mm_struct mm,
struct vm_area_struct vma,
unsigned long  address,
pte_t pte,
pmd_t pmd,
unsigned int  flags 
)

Definition at line 3447 of file memory.c.

int in_gate_area_no_mm ( unsigned long  addr)

Definition at line 3683 of file memory.c.

int make_pages_present ( unsigned long  addr,
unsigned long  end 
)

Definition at line 3632 of file memory.c.

void pgd_clear_bad ( pgd_t pgd)

Definition at line 381 of file memory.c.

void pmd_clear_bad ( pmd_t pmd)

Definition at line 393 of file memory.c.

void print_vma_addr ( char prefix,
unsigned long  ip 
)

Definition at line 3925 of file memory.c.

void pud_clear_bad ( pud_t pud)

Definition at line 387 of file memory.c.

int remap_pfn_range ( struct vm_area_struct vma,
unsigned long  addr,
unsigned long  pfn,
unsigned long  size,
pgprot_t  prot 
)

remap_pfn_range - remap kernel memory to userspace : user vma to map to : target user address to start at : physical address of kernel memory : size of map area : page protection flags for this mapping

Note: this is only safe if the mm semaphore is held when called.

Definition at line 2292 of file memory.c.

void unmap_mapping_range ( struct address_space mapping,
loff_t const  holebegin,
loff_t const  holelen,
int  even_cows 
)

unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. : the address space containing mmaps to be unmapped. : byte in first page to unmap, relative to the start of the underlying file. This will be rounded down to a PAGE_SIZE boundary. Note that this is different from truncate_pagecache(), which must keep the partial page. In contrast, we must get rid of partial pages. : size of prospective hole in bytes. This will be rounded up to a PAGE_SIZE boundary. A holelen of zero truncates to the end of the file. : 1 when truncating a file, unmap even private COWed pages; but 0 when invalidating pagecache, don't throw away private data.

Definition at line 2875 of file memory.c.

void unmap_vmas ( struct mmu_gather tlb,
struct vm_area_struct vma,
unsigned long  start_addr,
unsigned long  end_addr 
)

unmap_vmas - unmap a range of memory covered by a list of vma's : address of the caller's struct mmu_gather : the starting vma : virtual address at which to start unmapping : virtual address at which to end unmapping

Unmap all pages in the vma list.

Only addresses between `start' and `end' will be unmapped.

The VMA list must be sorted in ascending virtual address order.

unmap_vmas() assumes that the caller will flush the whole unmapped address range after unmap_vmas() returns. So the only responsibility here is to ensure that any thus-far unmapped pages are flushed before unmap_vmas() drops the lock and schedules.

Definition at line 1381 of file memory.c.

int vm_insert_mixed ( struct vm_area_struct vma,
unsigned long  addr,
unsigned long  pfn 
)

Definition at line 2190 of file memory.c.

int vm_insert_page ( struct vm_area_struct vma,
unsigned long  addr,
struct page page 
)

Definition at line 2101 of file memory.c.

int vm_insert_pfn ( struct vm_area_struct vma,
unsigned long  addr,
unsigned long  pfn 
)

vm_insert_pfn - insert single pfn into user vma : user vma to map to : target user address of this page : source kernel pfn

Similar to vm_insert_page, this allows drivers to insert individual pages they've allocated into a user vma. Same comments apply.

This function should only be called from a vm_ops->fault handler, and in that case the handler should return NULL.

vma cannot be a COW mapping.

As this is called only for pages that do not currently exist, we do not need to flush old virtual caches or the TLB.

Definition at line 2162 of file memory.c.

struct page* vm_normal_page ( struct vm_area_struct vma,
unsigned long  addr,
pte_t  pte 
)
read

Definition at line 781 of file memory.c.

void zap_page_range ( struct vm_area_struct vma,
unsigned long  start,
unsigned long  size,
struct zap_details *  details 
)

zap_page_range - remove user pages in a given range : vm_area_struct holding the applicable pages : starting address of pages to zap : number of bytes to zap

: details of nonlinear truncation or shared cache invalidation

Caller must protect the VMA list

Definition at line 1402 of file memory.c.

int zap_vma_ptes ( struct vm_area_struct vma,
unsigned long  address,
unsigned long  size 
)

zap_vma_ptes - remove ptes mapping the vma : vm_area_struct holding ptes to be zapped : starting address of pages to zap : number of bytes to zap

This function only unmaps ptes assigned to VM_PFNMAP vmas.

The entire address range must be fully contained within the vma.

Returns 0 if successful.

Definition at line 1456 of file memory.c.

Variable Documentation

unsigned long highest_memmap_pfn __read_mostly
Initial value:
=
2

Definition at line 98 of file memory.c.

void* high_memory

Definition at line 87 of file memory.c.

unsigned long max_mapnr

Definition at line 72 of file memory.c.

struct page* mem_map

Definition at line 73 of file memory.c.

unsigned long num_physpages

Definition at line 79 of file memory.c.