Linux Kernel
3.7.1
|
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/pfn.h>
#include <linux/kmemleak.h>
#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>
Go to the source code of this file.
Data Structures | |
struct | vmap_area |
struct | vmap_block_queue |
struct | vmap_block |
Macros | |
#define | VM_LAZY_FREE 0x01 |
#define | VM_LAZY_FREEING 0x02 |
#define | VM_VM_AREA 0x04 |
#define | VMALLOC_SPACE (128UL*1024*1024*1024) |
#define | VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) |
#define | VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ |
#define | VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ |
#define | VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) |
#define | VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ |
#define | VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ |
#define | VMAP_BBMAP_BITS |
#define | VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) |
#define | PAGE_KERNEL_EXEC PAGE_KERNEL |
#define | GFP_VMALLOC32 GFP_KERNEL |
Variables | |
struct vm_struct * | vmlist |
#define GFP_VMALLOC32 GFP_KERNEL |
#define PAGE_KERNEL_EXEC PAGE_KERNEL |
#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) |
#define VMAP_BBMAP_BITS |
#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) |
#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) |
#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ |
|
read |
read_persistent_clock - Return time from the persistent clock.
Weak dummy function for arches that do not yet support it. Reads the time from the battery backed persistent clock. Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
XXX - Do be sure to remove it once all arches implement it.
read_boot_clock - Return time of the system start.
Weak dummy function for arches that do not yet support it. Function to read the exact time the system has been started. Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
XXX - Do be sure to remove it once all arches implement it.
__iowrite32_copy - copy data to MMIO space, in 32-bit units : destination, in MMIO space (must be 32-bit aligned) : source (must be 32-bit aligned) : number of 32-bit quantities to copy
Copy data from kernel space to MMIO space, in units of 32 bits at a time. Order of access is not guaranteed, nor is a memory barrier performed afterwards.
__iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units : destination, in MMIO space (must be 64-bit aligned) : source (must be 64-bit aligned) : number of 64-bit quantities to copy
Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a time. Order of access is not guaranteed, nor is a memory barrier performed afterwards.
get_user_pages_fast() - pin user pages in memory : starting user address : number of pages from start to pin : whether pages will be written to : array that receives pointers to the pages pinned. Should be at least nr_pages long.
Returns number of pages pinned. This may be fewer than the number requested. If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns -errno.
get_user_pages_fast provides equivalent functionality to get_user_pages, operating on current and current->mm, with force=0 and vma=NULL. However unlike get_user_pages, it must be called without mmap_sem held.
get_user_pages_fast may take mmap_sem and page table locks, so no assumptions can be made about lack of locking. get_user_pages_fast is to be implemented in a way that is advantageous (vs get_user_pages()) when the user memory area is already faulted in and present in ptes. However if the pages have to be faulted in, it may turn out to be slightly slower so callers need to carefully consider what to use. On many architectures, get_user_pages_fast simply falls back to get_user_pages.
void* __vmalloc_node_range | ( | unsigned long | size, |
unsigned long | align, | ||
unsigned long | start, | ||
unsigned long | end, | ||
gfp_t | gfp_mask, | ||
pgprot_t | prot, | ||
int | node, | ||
const void * | caller | ||
) |
__vmalloc_node_range - allocate virtually contiguous memory : allocation size : desired alignment : vm area range start : vm area range end : flags for the page level allocator : protection mask for the allocated pages : node to use for allocation or -1 : caller's return address
Allocate enough pages to cover from the page level allocator with flags. Map them into contiguous kernel virtual space, using a pagetable protection of .
alloc_vm_area - allocate a range of kernel address space : size of the area : returns the PTEs for the address space
Returns: NULL on failure, vm_struct on success
This function reserves a range of kernel address space, and allocates pagetables to map that range. No actual mappings are created.
If is non-NULL, pointers to the PTEs (in init_mm) allocated for the VM area are returned.
DEFINE_RWLOCK | ( | vmlist_lock | ) |
EXPORT_SYMBOL | ( | vmalloc_to_page | ) |
EXPORT_SYMBOL | ( | vmalloc_to_pfn | ) |
EXPORT_SYMBOL | ( | vm_unmap_ram | ) |
EXPORT_SYMBOL | ( | vm_map_ram | ) |
EXPORT_SYMBOL | ( | vfree | ) |
EXPORT_SYMBOL | ( | vunmap | ) |
EXPORT_SYMBOL | ( | vmap | ) |
EXPORT_SYMBOL | ( | __vmalloc | ) |
EXPORT_SYMBOL | ( | vmalloc | ) |
EXPORT_SYMBOL | ( | vzalloc | ) |
EXPORT_SYMBOL | ( | vmalloc_user | ) |
EXPORT_SYMBOL | ( | vmalloc_node | ) |
EXPORT_SYMBOL | ( | vzalloc_node | ) |
EXPORT_SYMBOL | ( | vmalloc_32 | ) |
EXPORT_SYMBOL | ( | vmalloc_32_user | ) |
EXPORT_SYMBOL | ( | remap_vmalloc_range | ) |
EXPORT_SYMBOL_GPL | ( | vm_unmap_aliases | ) |
EXPORT_SYMBOL_GPL | ( | unmap_kernel_range_noflush | ) |
EXPORT_SYMBOL_GPL | ( | map_vm_area | ) |
EXPORT_SYMBOL_GPL | ( | __get_vm_area | ) |
EXPORT_SYMBOL_GPL | ( | alloc_vm_area | ) |
EXPORT_SYMBOL_GPL | ( | free_vm_area | ) |
int map_kernel_range_noflush | ( | unsigned long | addr, |
unsigned long | size, | ||
pgprot_t | prot, | ||
struct page ** | pages | ||
) |
map_kernel_range_noflush - map kernel VM area with the specified pages : start of the VM area to map : size of the VM area to map : page protection flags to use : pages to map
Map PFN_UP() pages at . The VM area and specify should have been allocated using get_vm_area() and its friends.
NOTE: This function does NOT do any cache flushing. The caller is responsible for calling flush_cache_vmap() on to-be-mapped areas before calling this function.
RETURNS: The number of pages mapped on success, -errno on failure.
int remap_vmalloc_range | ( | struct vm_area_struct * | vma, |
void * | addr, | ||
unsigned long | pgoff | ||
) |
remap_vmalloc_range - map vmalloc pages to userspace : vma to cover (map full range of vma) : vmalloc memory : number of pages into addr before first page to map
Returns: 0 for success, -Exxx on failure
This function checks that addr is a valid vmalloc'ed area, and that it is big enough to cover the vma. Will return failure if that criteria isn't met.
Similar to remap_pfn_range() (see mm/memory.c)
unmap_kernel_range_noflush - unmap kernel VM area : start of the VM area to unmap : size of the VM area to unmap
Unmap PFN_UP() pages at . The VM area and specify should have been allocated using get_vm_area() and its friends.
NOTE: This function does NOT do any cache flushing. The caller is responsible for calling flush_cache_vunmap() on to-be-mapped areas before calling this function and flush_tlb_kernel_range() after.
vfree - release memory allocated by vmalloc() : memory base address
Free the virtually continuous memory area starting at , as obtained from vmalloc(), vmalloc_32() or __vmalloc(). If is NULL, no operation is performed.
Must not be called in interrupt context.
vm_area_add_early - add vmap area early during boot : vm_struct to add
This function is used to add fixed kernel vm area to vmlist before vmalloc_init() is called. ->addr, ->size, and ->flags should contain proper values and the other fields should be zero.
DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
vm_area_register_early - register vmap area early during boot : vm_struct to register : requested alignment
This function is used to register kernel vm area before vmalloc_init() is called. ->size and ->flags should contain proper values on entry and other fields should be zero. On return, vm->addr contains the allocated address.
DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) : an array of pointers to the pages to be mapped : number of pages : prefer to allocate data structures on this node : memory protection to use. PAGE_KERNEL for regular RAM
Returns: a pointer to the address that has been mapped, or NULL on failure
vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily to amortize TLB flushing overheads. What this means is that any page you have now, may, in a former life, have been mapped into kernel virtual address by the vmap layer and so there might be some CPUs with TLB entries still referencing that page (additional to the regular 1:1 kernel mapping).
vm_unmap_aliases flushes all such lazy mappings. After it returns, we can be sure that none of the pages we have control over will have any aliases from the vmap layer.
vmalloc - allocate virtually contiguous memory : allocation size Allocate enough pages to cover from the page level allocator and map them into contiguous kernel virtual space.
For tight control over page level allocator and protection flags use __vmalloc() instead.
vmalloc_exec - allocate virtually contiguous, executable memory : allocation size
Kernel-internal function to allocate enough pages to cover the page level allocator and map them into contiguous and executable kernel virtual space.
For tight control over page level allocator and protection flags use __vmalloc() instead.
vmalloc_node - allocate memory on a specific node : allocation size : numa node
Allocate enough pages to cover from the page level allocator and map them into contiguous kernel virtual space.
For tight control over page level allocator and protection flags use __vmalloc() instead.
vread() - read vmalloc area in a safe way. : buffer for reading data : vm address. : number of bytes to be read.
Returns # of bytes which addr and buf should be increased. (same number to ). Returns 0 if [addr...addr+count) doesn't includes any intersect with alive vmalloc area.
This function checks that addr is a valid vmalloc'ed area, and copy data from that area to a given buffer. If the given memory range of [addr...addr+count) includes some valid address, data is copied to proper area of . If there are memory holes, they'll be zero-filled. IOREMAP area is treated as memory hole and no copy is done.
If [addr...addr+count) doesn't includes any intersects with alive vm_struct area, returns 0. should be kernel's buffer.
Note: In usual ops, vread() is never necessary because the caller should know vmalloc() area is valid and can use memcpy(). This is for routines which have to access vmalloc area without any informaion, as /dev/kmem.
vwrite() - write vmalloc area in a safe way. : buffer for source data : vm address. : number of bytes to be read.
Returns # of bytes which addr and buf should be incresed. (same number to ). If [addr...addr+count) doesn't includes any intersect with valid vmalloc area, returns 0.
This function checks that addr is a valid vmalloc'ed area, and copy data from a buffer to the given addr. If specified range of [addr...addr+count) includes some valid address, data is copied from proper area of . If there are memory holes, no copy to hole. IOREMAP area is treated as memory hole and no copy is done.
If [addr...addr+count) doesn't includes any intersects with alive vm_struct area, returns 0. should be kernel's buffer.
Note: In usual ops, vwrite() is never necessary because the caller should know vmalloc() area is valid and can use memcpy(). This is for routines which have to access vmalloc area without any informaion, as /dev/kmem.
vzalloc - allocate virtually contiguous memory with zero fill : allocation size Allocate enough pages to cover from the page level allocator and map them into contiguous kernel virtual space. The memory allocated is set to zero.
For tight control over page level allocator and protection flags use __vmalloc() instead.
vzalloc_node - allocate memory on a specific node with zero fill : allocation size : numa node
Allocate enough pages to cover from the page level allocator and map them into contiguous kernel virtual space. The memory allocated is set to zero.
For tight control over page level allocator and protection flags use __vmalloc_node() instead.