6 #include <linux/hash.h>
7 #include <linux/slab.h>
14 static unsigned long total_usage;
16 #if !defined(CONFIG_SPARSEMEM)
21 pgdat->node_page_cgroup =
NULL;
28 struct page_cgroup *
base;
30 base =
NODE_DATA(page_to_nid(page))->node_page_cgroup;
31 #ifdef CONFIG_DEBUG_VM
41 offset = pfn -
NODE_DATA(page_to_nid(page))->node_start_pfn;
45 static int __init alloc_node_page_cgroup(
int nid)
47 struct page_cgroup *
base;
48 unsigned long table_size;
49 unsigned long nr_pages;
51 nr_pages =
NODE_DATA(nid)->node_spanned_pages;
55 table_size =
sizeof(
struct page_cgroup) * nr_pages;
62 total_usage += table_size;
71 if (mem_cgroup_disabled())
75 fail = alloc_node_page_cgroup(nid);
81 " don't want memory cgroups\n");
86 panic(
"Out of memory");
94 struct mem_section *
section = __pfn_to_section(pfn);
95 #ifdef CONFIG_DEBUG_VM
102 if (!section->page_cgroup)
105 return section->page_cgroup + pfn;
108 static void *
__meminit alloc_page_cgroup(
size_t size,
int nid)
127 static int __meminit init_section_page_cgroup(
unsigned long pfn,
int nid)
129 struct mem_section *section;
130 struct page_cgroup *base;
131 unsigned long table_size;
133 section = __pfn_to_section(pfn);
135 if (section->page_cgroup)
138 table_size =
sizeof(
struct page_cgroup) * PAGES_PER_SECTION;
139 base = alloc_page_cgroup(table_size, nid);
157 pfn &= PAGE_SECTION_MASK;
158 section->page_cgroup = base - pfn;
159 total_usage += table_size;
162 #ifdef CONFIG_MEMORY_HOTPLUG
163 static void free_page_cgroup(
void *addr)
165 if (is_vmalloc_addr(addr)) {
170 sizeof(
struct page_cgroup) * PAGES_PER_SECTION;
172 BUG_ON(PageReserved(page));
177 void __free_page_cgroup(
unsigned long pfn)
179 struct mem_section *
ms;
180 struct page_cgroup *base;
182 ms = __pfn_to_section(pfn);
183 if (!ms || !ms->page_cgroup)
185 base = ms->page_cgroup + pfn;
186 free_page_cgroup(base);
187 ms->page_cgroup =
NULL;
190 int __meminit online_page_cgroup(
unsigned long start_pfn,
191 unsigned long nr_pages,
197 start = SECTION_ALIGN_DOWN(start_pfn);
198 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
206 nid = pfn_to_nid(start_pfn);
210 for (pfn = start; !fail && pfn <
end; pfn += PAGES_PER_SECTION) {
211 if (!pfn_present(pfn))
213 fail = init_section_page_cgroup(pfn, nid);
219 for (pfn = start; pfn <
end; pfn += PAGES_PER_SECTION)
220 __free_page_cgroup(pfn);
225 int __meminit offline_page_cgroup(
unsigned long start_pfn,
226 unsigned long nr_pages,
int nid)
230 start = SECTION_ALIGN_DOWN(start_pfn);
231 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
233 for (pfn = start; pfn <
end; pfn += PAGES_PER_SECTION)
234 __free_page_cgroup(pfn);
261 return notifier_from_errno(ret);
266 void __init page_cgroup_init(
void)
271 if (mem_cgroup_disabled())
284 for (pfn = start_pfn;
286 pfn =
ALIGN(pfn + 1, PAGES_PER_SECTION)) {
296 if (pfn_to_nid(pfn) != nid)
298 if (init_section_page_cgroup(pfn, nid))
305 "don't want memory cgroups\n");
309 panic(
"Out of memory");
320 #ifdef CONFIG_MEMCG_SWAP
323 struct swap_cgroup_ctrl {
329 static struct swap_cgroup_ctrl swap_cgroup_ctrl[
MAX_SWAPFILES];
334 #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
353 static int swap_cgroup_prepare(
int type)
356 struct swap_cgroup_ctrl *
ctrl;
359 ctrl = &swap_cgroup_ctrl[
type];
361 for (idx = 0; idx < ctrl->length; idx++) {
364 goto not_enough_page;
370 for (idx = 0; idx <
max; idx++)
377 struct swap_cgroup_ctrl **ctrlp)
380 struct swap_cgroup_ctrl *
ctrl;
381 struct page *mappage;
382 struct swap_cgroup *
sc;
384 ctrl = &swap_cgroup_ctrl[swp_type(ent)];
388 mappage = ctrl->map[offset / SC_PER_PAGE];
390 return sc + offset % SC_PER_PAGE;
402 unsigned short swap_cgroup_cmpxchg(
swp_entry_t ent,
403 unsigned short old,
unsigned short new)
405 struct swap_cgroup_ctrl *
ctrl;
406 struct swap_cgroup *
sc;
410 sc = lookup_swap_cgroup(ent, &ctrl);
418 spin_unlock_irqrestore(&ctrl->lock, flags);
430 unsigned short swap_cgroup_record(
swp_entry_t ent,
unsigned short id)
432 struct swap_cgroup_ctrl *
ctrl;
433 struct swap_cgroup *
sc;
437 sc = lookup_swap_cgroup(ent, &ctrl);
442 spin_unlock_irqrestore(&ctrl->lock, flags);
453 unsigned short lookup_swap_cgroup_id(
swp_entry_t ent)
455 return lookup_swap_cgroup(ent,
NULL)->id;
458 int swap_cgroup_swapon(
int type,
unsigned long max_pages)
461 unsigned long array_size;
463 struct swap_cgroup_ctrl *
ctrl;
469 array_size = length *
sizeof(
void *);
475 ctrl = &swap_cgroup_ctrl[
type];
480 if (swap_cgroup_prepare(type)) {
492 printk(
KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
494 "swap_cgroup can be disabled by swapaccount=0 boot option\n");
498 void swap_cgroup_swapoff(
int type)
502 struct swap_cgroup_ctrl *
ctrl;
508 ctrl = &swap_cgroup_ctrl[
type];
510 length = ctrl->length;
516 for (i = 0; i <
length; i++) {
517 struct page *page = map[
i];