Linux Kernel
3.7.1
|
#include <linux/bitmap.h>
#include <linux/bootmem.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/pfn.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/kmemleak.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#include "percpu-vm.c"
Go to the source code of this file.
Data Structures | |
struct | pcpu_chunk |
Macros | |
#define | PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ |
#define | PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ |
#define | __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) |
#define | __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) |
#define | pcpu_for_each_unpop_region(chunk, rs, re, start, end) |
#define | pcpu_for_each_pop_region(chunk, rs, re, start, end) |
#define | PCPU_SETUP_BUG_ON(cond) |
Functions | |
EXPORT_SYMBOL_GPL (pcpu_base_addr) | |
void __percpu * | __alloc_percpu (size_t size, size_t align) |
EXPORT_SYMBOL_GPL (__alloc_percpu) | |
void __percpu * | __alloc_reserved_percpu (size_t size, size_t align) |
void | free_percpu (void __percpu *ptr) |
EXPORT_SYMBOL_GPL (free_percpu) | |
bool | is_kernel_percpu_address (unsigned long addr) |
phys_addr_t | per_cpu_ptr_to_phys (void *addr) |
struct pcpu_alloc_info *__init | pcpu_alloc_alloc_info (int nr_groups, int nr_units) |
void __init | pcpu_free_alloc_info (struct pcpu_alloc_info *ai) |
int __init | pcpu_setup_first_chunk (const struct pcpu_alloc_info *ai, void *base_addr) |
void __init | setup_per_cpu_areas (void) |
void __init | percpu_init_late (void) |
#define PCPU_SETUP_BUG_ON | ( | cond | ) |
__alloc_percpu - allocate dynamic percpu area : size of area to allocate in bytes : alignment of area (max PAGE_SIZE)
Allocate zero-filled percpu area of bytes aligned at . Might sleep. Might trigger writeouts.
CONTEXT: Does GFP_KERNEL allocation.
RETURNS: Percpu pointer to the allocated area on success, NULL on failure.
__alloc_reserved_percpu - allocate reserved percpu area : size of area to allocate in bytes : alignment of area (max PAGE_SIZE)
Allocate zero-filled percpu area of bytes aligned at from reserved percpu area if arch has set it up; otherwise, allocation is served from the same dynamic area. Might sleep. Might trigger writeouts.
CONTEXT: Does GFP_KERNEL allocation.
RETURNS: Percpu pointer to the allocated area on success, NULL on failure.
EXPORT_SYMBOL_GPL | ( | pcpu_base_addr | ) |
EXPORT_SYMBOL_GPL | ( | __alloc_percpu | ) |
EXPORT_SYMBOL_GPL | ( | free_percpu | ) |
is_kernel_percpu_address - test whether address is from static percpu area : address to test
Test whether belongs to in-kernel static percpu area. Module static percpu areas are not considered. For those, use is_module_percpu_address().
RETURNS: true if is from in-kernel static percpu area, false otherwise.
|
read |
pcpu_alloc_alloc_info - allocate percpu allocation info : the number of groups : the number of units
Allocate ai which is large enough for groups containing units. The returned ai's groups[0].cpu_map points to the cpu_map array which is long enough for and filled with NR_CPUS. It's the caller's responsibility to initialize cpu_map pointer of other groups.
RETURNS: Pointer to the allocated pcpu_alloc_info on success, NULL on failure.
void __init pcpu_free_alloc_info | ( | struct pcpu_alloc_info * | ai | ) |
pcpu_free_alloc_info - free percpu allocation info : pcpu_alloc_info to free
Free which was allocated by pcpu_alloc_alloc_info().
pcpu_setup_first_chunk - initialize the first percpu chunk : pcpu_alloc_info describing how to percpu area is shaped : mapped address
Initialize the first percpu chunk which contains the kernel static perpcu area. This function is to be called from arch percpu area setup path.
contains all information necessary to initialize the first chunk and prime the dynamic percpu allocator.
->static_size is the size of static percpu area.
->reserved_size, if non-zero, specifies the amount of bytes to reserve after the static area in the first chunk. This reserves the first chunk such that it's available only through reserved percpu allocation. This is primarily used to serve module percpu static areas on architectures where the addressing model has limited offset range for symbol relocations to guarantee module percpu symbols fall inside the relocatable range.
->dyn_size determines the number of bytes available for dynamic allocation in the first chunk. The area between ->static_size + ->reserved_size + ->dyn_size and ->unit_size is unused.
->unit_size specifies unit size and must be aligned to PAGE_SIZE and equal to or larger than ->static_size + ->reserved_size + ->dyn_size.
->atom_size is the allocation atom size and used as alignment for vm areas.
->alloc_size is the allocation size and always multiple of ->atom_size. This is larger than ->atom_size if ->unit_size is larger than ->atom_size.
->nr_groups and ->groups describe virtual memory layout of percpu areas. Units which should be colocated are put into the same group. Dynamic VM areas will be allocated according to these groupings. If ->nr_groups is zero, a single group containing all units is assumed.
The caller should have mapped the first chunk at and copied static data to each unit.
If the first chunk ends up with both reserved and dynamic areas, it is served by two chunks - one to serve the core static and reserved areas and the other for the dynamic area. They share the same vm and page map but uses different area allocation map to stay away from each other. The latter chunk is circulated in the chunk slots and available for dynamic allocation like any other chunks.
RETURNS: 0 on success, -errno on failure.
phys_addr_t per_cpu_ptr_to_phys | ( | void * | addr | ) |
per_cpu_ptr_to_phys - convert translated percpu address to physical address : the address to be converted to physical address
Given which is dereferenceable address obtained via one of percpu access macros, this function translates it into its physical address. The caller is responsible for ensuring stays valid until this function finishes.
percpu allocator has special setup for the first chunk, which currently supports either embedding in linear address space or vmalloc mapping, and, from the second one, the backing allocator (currently either vm or km) provides translation.
The addr can be tranlated simply without checking if it falls into the first chunk. But the current code reflects better how percpu allocator actually works, and the verification can discover both bugs in percpu allocator itself and per_cpu_ptr_to_phys() callers. So we keep current code.
RETURNS: The physical address for .