Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
memblock.h
Go to the documentation of this file.
1 #ifndef _LINUX_MEMBLOCK_H
2 #define _LINUX_MEMBLOCK_H
3 #ifdef __KERNEL__
4 
5 #ifdef CONFIG_HAVE_MEMBLOCK
6 /*
7  * Logical memory blocks.
8  *
9  * Copyright (C) 2001 Peter Bergner, IBM Corp.
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version
14  * 2 of the License, or (at your option) any later version.
15  */
16 
17 #include <linux/init.h>
18 #include <linux/mm.h>
19 
20 #define INIT_MEMBLOCK_REGIONS 128
21 
22 struct memblock_region {
25 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
26  int nid;
27 #endif
28 };
29 
30 struct memblock_type {
31  unsigned long cnt; /* number of regions */
32  unsigned long max; /* size of the allocated array */
33  phys_addr_t total_size; /* size of all regions */
34  struct memblock_region *regions;
35 };
36 
37 struct memblock {
38  phys_addr_t current_limit;
39  struct memblock_type memory;
40  struct memblock_type reserved;
41 };
42 
43 extern struct memblock memblock;
44 extern int memblock_debug;
45 
46 #define memblock_dbg(fmt, ...) \
47  if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
48 
50  phys_addr_t size, phys_addr_t align, int nid);
54 void memblock_allow_resize(void);
55 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
61 
62 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
63 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
64  unsigned long *out_end_pfn, int *out_nid);
65 
76 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
77  for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
78  i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
79 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
80 
81 void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
82  phys_addr_t *out_end, int *out_nid);
83 
95 #define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
96  for (i = 0, \
97  __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \
98  i != (u64)ULLONG_MAX; \
99  __next_free_mem_range(&i, nid, p_start, p_end, p_nid))
100 
101 void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
102  phys_addr_t *out_end, int *out_nid);
103 
115 #define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
116  for (i = (u64)ULLONG_MAX, \
117  __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \
118  i != (u64)ULLONG_MAX; \
119  __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid))
120 
121 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
122 int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid);
123 
124 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
125 {
126  r->nid = nid;
127 }
128 
129 static inline int memblock_get_region_node(const struct memblock_region *r)
130 {
131  return r->nid;
132 }
133 #else
134 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
135 {
136 }
137 
138 static inline int memblock_get_region_node(const struct memblock_region *r)
139 {
140  return 0;
141 }
142 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
143 
146 
148 
149 /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
150 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
151 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
152 
154  phys_addr_t max_addr);
156  phys_addr_t max_addr);
165 
166 extern void __memblock_dump_all(void);
167 
168 static inline void memblock_dump_all(void)
169 {
170  if (memblock_debug)
172 }
173 
181 
182 
183 /*
184  * pfn conversion functions
185  *
186  * While the memory MEMBLOCKs should always be page aligned, the reserved
187  * MEMBLOCKs may not be. This accessor attempt to provide a very clear
188  * idea of what they return for such non aligned MEMBLOCKs.
189  */
190 
195 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
196 {
197  return PFN_UP(reg->base);
198 }
199 
204 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
205 {
206  return PFN_DOWN(reg->base + reg->size);
207 }
208 
213 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
214 {
215  return PFN_DOWN(reg->base);
216 }
217 
222 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
223 {
224  return PFN_UP(reg->base + reg->size);
225 }
226 
227 #define for_each_memblock(memblock_type, region) \
228  for (region = memblock.memblock_type.regions; \
229  region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
230  region++)
231 
232 
233 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
234 #define __init_memblock __meminit
235 #define __initdata_memblock __meminitdata
236 #else
237 #define __init_memblock
238 #define __initdata_memblock
239 #endif
240 
241 #else
243 {
244  return 0;
245 }
246 
247 #endif /* CONFIG_HAVE_MEMBLOCK */
248 
249 #endif /* __KERNEL__ */
250 
251 #endif /* _LINUX_MEMBLOCK_H */