23 #include <linux/ctype.h>
25 #include <linux/errno.h>
30 #include <linux/module.h>
31 #include <linux/oom.h>
32 #include <linux/reboot.h>
33 #include <linux/sched.h>
36 #include <linux/device.h>
40 #include <asm/pgalloc.h>
41 #include <asm/uaccess.h>
46 #define CMM_DRIVER_VERSION "1.0.0"
47 #define CMM_DEFAULT_DELAY 1
48 #define CMM_HOTPLUG_DELAY 5
51 #define CMM_OOM_KB 1024
52 #define CMM_MIN_MEM_MB 256
53 #define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10))
54 #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
59 #define CMM_MEM_HOTPLUG_PRI 1
60 #define CMM_MEM_ISOLATE_PRI 15
65 static unsigned int cmm_debug =
CMM_DEBUG;
68 static struct device cmm_dev;
80 "before loaning resumes. "
86 MODULE_PARM_DESC(min_mem_mb,
"Minimum amount of memory (in MB) to not balloon. "
92 #define CMM_NR_PAGES ((PAGE_SIZE - sizeof(void *) - sizeof(unsigned long)) / sizeof(unsigned long))
94 #define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
102 static unsigned long loaned_pages;
103 static unsigned long loaned_pages_target;
104 static unsigned long oom_freed_pages;
110 static int hotplug_occurred;
121 static long cmm_alloc_pages(
long nr)
127 cmm_dbg(
"Begin request for %ld pages\n", nr);
132 if (hotplug_occurred) {
145 spin_lock(&cmm_lock);
149 spin_unlock(&cmm_lock);
154 pr_info(
"%s: Can not allocate new page list\n", __func__);
158 spin_lock(&cmm_lock);
170 if ((rc = plpar_page_set_loaned(
__pa(addr)))) {
171 pr_err(
"%s: Can not set page to loaned. rc=%ld\n", __func__, rc);
172 spin_unlock(&cmm_lock);
180 spin_unlock(&cmm_lock);
184 cmm_dbg(
"End request with %ld pages unfulfilled\n", nr);
195 static long cmm_free_pages(
long nr)
200 cmm_dbg(
"Begin free of %ld pages.\n", nr);
201 spin_lock(&cmm_lock);
204 if (!pa || pa->
index <= 0)
208 if (pa->
index == 0) {
210 free_page((
unsigned long) cmm_page_list);
214 plpar_page_set_active(
__pa(addr));
220 spin_unlock(&cmm_lock);
221 cmm_dbg(
"End request with %ld pages unfulfilled\n", nr);
237 unsigned long *freed =
parm;
240 cmm_dbg(
"OOM processing started\n");
241 nr = cmm_free_pages(nr);
242 loaned_pages_target = loaned_pages;
245 cmm_dbg(
"OOM processing complete\n");
257 static void cmm_get_mpp(
void)
260 struct hvcall_mpp_data mpp_data;
261 signed long active_pages_target, page_loan_request,
target;
262 signed long total_pages = totalram_pages + loaned_pages;
263 signed long min_mem_pages = (min_mem_mb * 1024 * 1024) /
PAGE_SIZE;
270 page_loan_request = div_s64((
s64)mpp_data.loan_request,
PAGE_SIZE);
271 target = page_loan_request + (
signed long)loaned_pages;
273 if (target < 0 || total_pages < min_mem_pages)
276 if (target > oom_freed_pages)
277 target -= oom_freed_pages;
281 active_pages_target = total_pages -
target;
283 if (min_mem_pages > active_pages_target)
284 target = total_pages - min_mem_pages;
289 loaned_pages_target =
target;
291 cmm_dbg(
"delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
292 page_loan_request, loaned_pages, loaned_pages_target,
293 oom_freed_pages, totalram_pages);
297 .notifier_call = cmm_oom_notify
307 static int cmm_thread(
void *
dummy)
309 unsigned long timeleft;
318 if (hotplug_occurred) {
319 hotplug_occurred = 0;
321 cmm_dbg(
"Hotplug operation has occurred, "
322 "loaning activity suspended "
333 cmm_dbg(
"Hotplug operation in progress, activity "
340 if (loaned_pages_target > loaned_pages) {
341 if (cmm_alloc_pages(loaned_pages_target - loaned_pages))
342 loaned_pages_target = loaned_pages;
343 }
else if (loaned_pages_target < loaned_pages)
344 cmm_free_pages(loaned_pages - loaned_pages_target);
349 #define CMM_SHOW(name, format, args...) \
350 static ssize_t show_##name(struct device *dev, \
351 struct device_attribute *attr, \
354 return sprintf(buf, format, ##args); \
356 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
369 const char *buf,
size_t count)
383 show_oom_pages, store_oom_pages);
387 &dev_attr_loaned_target_kb,
388 &dev_attr_oom_freed_kb,
391 static struct bus_type cmm_subsys = {
402 static int cmm_sysfs_register(
struct device *dev)
410 dev->
bus = &cmm_subsys;
413 goto subsys_unregister;
435 static void cmm_unregister_sysfs(
struct device *dev)
455 cmm_thread_ptr =
NULL;
456 cmm_free_pages(loaned_pages);
462 .notifier_call = cmm_reboot_notifier,
473 static unsigned long cmm_count_pages(
void *
arg)
481 spin_lock(&cmm_lock);
484 if ((
unsigned long)pa >= start && (
unsigned long)pa < end)
486 for (idx = 0; idx < pa->
index; idx++)
487 if (pa->
page[idx] >= start && pa->
page[idx] < end)
491 spin_unlock(&cmm_lock);
505 unsigned long action,
void *arg)
510 ret = cmm_count_pages(arg);
512 return notifier_from_errno(ret);
516 .notifier_call = cmm_memory_isolate_cb,
527 static int cmm_mem_going_offline(
void *arg)
534 unsigned long freed = 0;
536 cmm_dbg(
"Memory going offline, searching 0x%lx (%ld pages).\n",
538 spin_lock(&cmm_lock);
541 pa_last = pa_curr = cmm_page_list;
543 for (idx = (pa_curr->
index - 1); (idx + 1) > 0; idx--) {
544 if ((pa_curr->
page[idx] < start_page) ||
545 (pa_curr->
page[idx] >= end_page))
548 plpar_page_set_active(
__pa(pa_curr->
page[idx]));
554 if (pa_last->
index == 0) {
555 if (pa_curr == pa_last)
556 pa_curr = pa_last->
next;
557 pa_last = pa_last->
next;
559 cmm_page_list = pa_last;
563 pa_curr = pa_curr->
next;
568 pa_curr = cmm_page_list;
570 if (((
unsigned long)pa_curr >= start_page) &&
571 ((
unsigned long)pa_curr < end_page)) {
576 spin_unlock(&cmm_lock);
577 cmm_dbg(
"Failed to allocate memory for list "
578 "management. Memory hotplug "
583 if (pa_curr == cmm_page_list)
593 pa_curr = pa_curr->
next;
596 spin_unlock(&cmm_lock);
597 cmm_dbg(
"Released %ld pages in the search range.\n", freed);
613 unsigned long action,
void *arg)
620 hotplug_occurred = 1;
621 ret = cmm_mem_going_offline(arg);
626 cmm_dbg(
"Memory offline operation complete.\n");
634 return notifier_from_errno(ret);
638 .notifier_call = cmm_memory_cb,
648 static int cmm_init(
void)
652 if (!firmware_has_feature(FW_FEATURE_CMO))
659 goto out_oom_notifier;
661 if ((rc = cmm_sysfs_register(&cmm_dev)))
662 goto out_reboot_notifier;
666 goto out_unregister_notifier;
672 if (IS_ERR(cmm_thread_ptr)) {
673 rc = PTR_ERR(cmm_thread_ptr);
674 goto out_unregister_notifier;
679 out_unregister_notifier:
682 cmm_unregister_sysfs(&cmm_dev);
696 static void cmm_exit(
void)
704 cmm_free_pages(loaned_pages);
705 cmm_unregister_sysfs(&cmm_dev);
714 static int cmm_set_disable(
const char *val,
struct kernel_param *kp)
718 if (disable != 0 && disable != 1)
721 if (disable && !cmm_disabled) {
724 cmm_thread_ptr =
NULL;
725 cmm_free_pages(loaned_pages);
726 }
else if (!disable && cmm_disabled) {
728 if (IS_ERR(cmm_thread_ptr))
729 return PTR_ERR(cmm_thread_ptr);