13 #include <linux/module.h>
16 #include <linux/capability.h>
17 #include <linux/device.h>
23 #include <linux/stat.h>
24 #include <linux/slab.h>
27 #include <asm/uaccess.h>
31 #define MEMORY_CLASS_NAME "memory"
33 static int sections_per_block;
35 static inline int base_memory_block_id(
int section_nr)
37 return section_nr / sections_per_block;
40 static struct bus_type memory_subsys = {
81 memory->
dev.bus = &memory_subsys;
91 BUG_ON(memory->
dev.bus != &memory_subsys);
103 static unsigned long get_memory_block_size(
void)
105 unsigned long block_sz;
128 unsigned long phys_index;
131 return sprintf(buf,
"%08lx\n", phys_index);
139 unsigned long phys_index;
142 return sprintf(buf,
"%08lx\n", phys_index);
151 unsigned long i, pfn;
156 for (i = 0; i < sections_per_block; i++) {
158 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
161 return sprintf(buf,
"%d\n", ret);
178 switch (mem->
state) {
180 len =
sprintf(buf,
"online\n");
183 len =
sprintf(buf,
"offline\n");
186 len =
sprintf(buf,
"going-offline\n");
189 len =
sprintf(buf,
"ERROR-UNKNOWN-%ld\n",
212 static bool pages_correctly_reserved(
unsigned long start_pfn,
213 unsigned long nr_pages)
217 unsigned long pfn = start_pfn;
224 for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
229 for (j = 0; j < PAGES_PER_SECTION; j++) {
230 if (PageReserved(page + j))
234 "not reserved, was it already online?\n",
235 pfn_to_section_nr(pfn), j);
249 memory_block_action(
unsigned long phys_index,
unsigned long action)
251 unsigned long start_pfn;
252 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
256 first_page =
pfn_to_page(phys_index << PFN_SECTION_SHIFT);
261 if (!pages_correctly_reserved(start_pfn, nr_pages))
264 ret = online_pages(start_pfn, nr_pages);
271 "%ld\n", __func__, phys_index, action, action);
278 static int __memory_block_change_state(
struct memory_block *mem,
279 unsigned long to_state,
unsigned long from_state_req)
283 if (mem->
state != from_state_req) {
294 mem->
state = from_state_req;
298 mem->
state = to_state;
299 switch (mem->
state) {
313 static int memory_block_change_state(
struct memory_block *mem,
314 unsigned long to_state,
unsigned long from_state_req)
319 ret = __memory_block_change_state(mem, to_state, from_state_req);
325 store_mem_state(
struct device *dev,
333 if (!
strncmp(buf,
"online",
min((
int)count, 6)))
335 else if(!
strncmp(buf,
"offline",
min((
int)count, 7)))
361 static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index,
NULL);
366 #define mem_create_simple_file(mem, attr_name) \
367 device_create_file(&mem->dev, &dev_attr_##attr_name)
368 #define mem_remove_simple_file(mem, attr_name) \
369 device_remove_file(&mem->dev, &dev_attr_##attr_name)
378 return sprintf(buf,
"%lx\n", get_memory_block_size());
383 static int block_size_init(
void)
386 &dev_attr_block_size_bytes);
395 #ifdef CONFIG_ARCH_MEMORY_PROBE
398 const char *buf,
size_t count)
403 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
407 if (phys_addr & ((pages_per_block <<
PAGE_SHIFT) - 1))
410 for (i = 0; i < sections_per_block; i++) {
411 nid = memory_add_physaddr_to_nid(phys_addr);
426 static int memory_probe_init(
void)
431 static inline int memory_probe_init(
void)
437 #ifdef CONFIG_MEMORY_FAILURE
444 store_soft_offline_page(
struct device *dev,
446 const char *buf,
size_t count)
458 return ret == 0 ? count :
ret;
463 store_hard_offline_page(
struct device *dev,
465 const char *buf,
size_t count)
475 return ret ? ret :
count;
479 static DEVICE_ATTR(hard_offline_page, 0644,
NULL, store_hard_offline_page);
481 static __init int memory_fail_init(
void)
486 &dev_attr_soft_offline_page);
489 &dev_attr_hard_offline_page);
493 static inline int memory_fail_init(
void)
516 int block_id = base_memory_block_id(
__section_nr(section));
545 unsigned long start_pfn;
555 base_memory_block_id(scn_nr) * sections_per_block;
563 ret = register_memory(mem);
579 static int add_memory_section(
int nid,
struct mem_section *section,
581 unsigned long state,
enum mem_add_context
context)
589 if (context ==
BOOT) {
592 if (scn_nr >= (*mem_p)->start_section_nr &&
593 scn_nr <= (*mem_p)->end_section_nr) {
604 ret = init_memory_block(&mem, section, state);
606 if (!ret && context ==
BOOT)
612 if (context == HOTPLUG &&
614 ret = register_mem_sect_under_node(mem, nid);
628 unregister_mem_sect_under_nodes(mem,
__section_nr(section));
637 unregister_memory(mem);
657 if (!present_section(section))
686 unsigned long block_sz;
693 block_sz = get_memory_block_size();
700 for (i = 0; i < NR_MEM_SECTIONS; i++) {
701 if (!present_section_nr(i))
704 err = add_memory_section(0, __nr_to_section(i),
705 (sections_per_block == 1) ?
NULL : &mem,
712 err = memory_probe_init();
715 err = memory_fail_init();
718 err = block_size_init();