10 #include <linux/kernel.h>
15 #include <linux/slab.h>
17 #include <linux/sched.h>
19 #include <linux/export.h>
21 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
23 type (*__routine)(struct device *__d); \
24 type __ret = (type)0; \
26 __routine = genpd->dev_ops.callback; \
28 __ret = __routine(dev); \
30 __routine = dev_gpd_data(dev)->ops.callback; \
32 __ret = __routine(dev); \
37 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
39 ktime_t __start = ktime_get(); \
40 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
41 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
43 if (!__retval && __elapsed > __td->field) { \
44 __td->field = __elapsed; \
45 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
47 genpd->max_off_time_changed = true; \
48 __td->constraint_changed = true; \
60 if (IS_ERR_OR_NULL(domain_name))
65 if (!
strcmp(gpd->name, domain_name)) {
87 stop_latency_ns,
"stop");
93 start_latency_ns,
"start");
156 usecs64 += genpd->
cpu_data->saved_exit_latency;
157 genpd->
cpu_data->idle_state->exit_latency = usecs64;
193 genpd_set_active(genpd);
199 genpd->
cpu_data->idle_state->disabled =
true;
210 genpd_sd_counter_inc(link->
master);
215 ret = pm_genpd_poweron(link->
master);
226 genpd_sd_counter_dec(link->
master);
239 elapsed_ns = ktime_to_ns(ktime_sub(
ktime_get(), time_start));
243 genpd_recalc_cpu_exit_latency(genpd);
246 "new value %lld ns\n", genpd->
name,
252 genpd_set_active(genpd);
258 genpd_sd_counter_dec(link->
master);
272 ret = __pm_genpd_poweron(genpd);
281 int pm_genpd_name_poweron(
const char *domain_name)
285 genpd = pm_genpd_lookup_name(domain_name);
286 return genpd ? pm_genpd_poweron(genpd) : -
EINVAL;
291 #ifdef CONFIG_PM_RUNTIME
302 save_state_latency_ns,
"state save");
308 restore_state_latency_ns,
313 unsigned long val,
void *
ptr)
321 dev = gpd_data->
base.dev;
332 spin_lock_irq(&dev->
power.lock);
334 pdd = dev->
power.subsys_data ?
335 dev->
power.subsys_data->domain_data :
NULL;
336 if (pdd && pdd->
dev) {
337 to_gpd_data(pdd)->td.constraint_changed =
true;
338 genpd = dev_to_genpd(dev);
343 spin_unlock_irq(&dev->
power.lock);
345 if (!IS_ERR(genpd)) {
352 if (!dev || dev->
power.ignore_children)
377 genpd_start_dev(genpd, dev);
378 ret = genpd_save_dev(genpd, dev);
379 genpd_stop_dev(genpd, dev);
405 genpd_start_dev(genpd, dev);
407 genpd_restore_dev(genpd, dev);
453 unsigned int not_suspended;
474 if (pdd->dev->
driver && (!pm_runtime_suspended(pdd->dev)
475 || pdd->dev->power.irq_safe))
478 if (not_suspended > genpd->in_progress)
481 if (genpd->poweroff_task) {
490 if (genpd->
gov && genpd->
gov->power_down_ok) {
491 if (!genpd->
gov->power_down_ok(&genpd->
domain))
500 __pm_genpd_save_device(pdd, genpd) : -
EBUSY;
502 if (genpd_abort_poweroff(genpd))
506 genpd_set_active(genpd);
526 genpd->
cpu_data->idle_state->disabled =
false;
552 genpd_set_active(genpd);
556 elapsed_ns = ktime_to_ns(ktime_sub(
ktime_get(), time_start));
562 "new value %lld ns\n", genpd->
name,
570 genpd_sd_counter_dec(link->
master);
571 genpd_queue_power_off_work(link->
master);
590 genpd_acquire_lock(genpd);
591 pm_genpd_poweroff(genpd);
592 genpd_release_lock(genpd);
609 dev_dbg(dev,
"%s()\n", __func__);
611 genpd = dev_to_genpd(dev);
617 stop_ok = genpd->
gov ? genpd->
gov->stop_ok :
NULL;
618 if (stop_ok && !stop_ok(dev))
621 ret = genpd_stop_dev(genpd, dev);
629 if (dev->
power.irq_safe)
634 pm_genpd_poweroff(genpd);
655 dev_dbg(dev,
"%s()\n", __func__);
657 genpd = dev_to_genpd(dev);
664 if (dev->
power.irq_safe)
665 return genpd_start_dev_no_timing(genpd, dev);
668 ret = __pm_genpd_poweron(genpd);
692 __pm_genpd_restore_device(dev->
power.subsys_data->domain_data, genpd);
694 genpd_set_active(genpd);
704 void pm_genpd_poweroff_unused(
void)
711 genpd_queue_power_off_work(genpd);
718 static inline int genpd_dev_pm_qos_notifier(
struct notifier_block *nb,
719 unsigned long val,
void *ptr)
724 static inline void genpd_power_off_work_fn(
struct work_struct *
work) {}
726 #define pm_genpd_runtime_suspend NULL
727 #define pm_genpd_runtime_resume NULL
731 #ifdef CONFIG_PM_SLEEP
741 if (IS_ERR_OR_NULL(genpd))
826 genpd_sd_counter_dec(link->
master);
827 pm_genpd_sync_poweroff(link->
master);
848 pm_genpd_sync_poweron(link->
master);
849 genpd_sd_counter_inc(link->
master);
878 if (!device_can_wakeup(dev))
881 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
882 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
899 dev_dbg(dev,
"%s()\n", __func__);
901 genpd = dev_to_genpd(dev);
910 pm_runtime_get_noresume(dev);
915 pm_runtime_put_sync(dev);
919 if (resume_needed(dev, genpd))
920 pm_runtime_resume(dev);
922 genpd_acquire_lock(genpd);
929 genpd_release_lock(genpd);
932 pm_runtime_put_noidle(dev);
942 pm_runtime_resume(dev);
956 pm_runtime_put_sync(dev);
972 dev_dbg(dev,
"%s()\n", __func__);
974 genpd = dev_to_genpd(dev);
993 dev_dbg(dev,
"%s()\n", __func__);
995 genpd = dev_to_genpd(dev);
1013 dev_dbg(dev,
"%s()\n", __func__);
1015 genpd = dev_to_genpd(dev);
1020 || (dev->
power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1023 genpd_stop_dev(genpd, dev);
1031 pm_genpd_sync_poweroff(genpd);
1046 dev_dbg(dev,
"%s()\n", __func__);
1048 genpd = dev_to_genpd(dev);
1053 || (dev->
power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1061 pm_genpd_sync_poweron(genpd);
1064 return genpd_start_dev(genpd, dev);
1080 dev_dbg(dev,
"%s()\n", __func__);
1082 genpd = dev_to_genpd(dev);
1101 dev_dbg(dev,
"%s()\n", __func__);
1103 genpd = dev_to_genpd(dev);
1122 dev_dbg(dev,
"%s()\n", __func__);
1124 genpd = dev_to_genpd(dev);
1144 dev_dbg(dev,
"%s()\n", __func__);
1146 genpd = dev_to_genpd(dev);
1166 dev_dbg(dev,
"%s()\n", __func__);
1168 genpd = dev_to_genpd(dev);
1186 dev_dbg(dev,
"%s()\n", __func__);
1188 genpd = dev_to_genpd(dev);
1208 dev_dbg(dev,
"%s()\n", __func__);
1210 genpd = dev_to_genpd(dev);
1229 dev_dbg(dev,
"%s()\n", __func__);
1231 genpd = dev_to_genpd(dev);
1249 dev_dbg(dev,
"%s()\n", __func__);
1251 genpd = dev_to_genpd(dev);
1285 pm_genpd_sync_poweron(genpd);
1287 return genpd_start_dev(genpd, dev);
1304 dev_dbg(dev,
"%s()\n", __func__);
1306 genpd = dev_to_genpd(dev);
1320 pm_runtime_set_active(dev);
1322 pm_runtime_idle(dev);
1333 void pm_genpd_syscore_switch(
struct device *dev,
bool suspend)
1337 genpd = dev_to_genpd(dev);
1338 if (!pm_genpd_present(genpd))
1343 pm_genpd_sync_poweroff(genpd);
1345 pm_genpd_sync_poweron(genpd);
1353 #define pm_genpd_prepare NULL
1354 #define pm_genpd_suspend NULL
1355 #define pm_genpd_suspend_late NULL
1356 #define pm_genpd_suspend_noirq NULL
1357 #define pm_genpd_resume_early NULL
1358 #define pm_genpd_resume_noirq NULL
1359 #define pm_genpd_resume NULL
1360 #define pm_genpd_freeze NULL
1361 #define pm_genpd_freeze_late NULL
1362 #define pm_genpd_freeze_noirq NULL
1363 #define pm_genpd_thaw_early NULL
1364 #define pm_genpd_thaw_noirq NULL
1365 #define pm_genpd_thaw NULL
1366 #define pm_genpd_restore_noirq NULL
1367 #define pm_genpd_complete NULL
1375 gpd_data = kzalloc(
sizeof(*gpd_data),
GFP_KERNEL);
1380 gpd_data->
nb.notifier_call = genpd_dev_pm_qos_notifier;
1385 static void __pm_genpd_free_dev_data(
struct device *dev,
1405 dev_dbg(dev,
"%s()\n", __func__);
1407 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1410 gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1414 genpd_acquire_lock(genpd);
1422 if (pdd->
dev == dev) {
1434 spin_lock_irq(&dev->
power.lock);
1437 if (dev->
power.subsys_data->domain_data) {
1438 gpd_data = to_gpd_data(dev->
power.subsys_data->domain_data);
1440 gpd_data = gpd_data_new;
1441 dev->
power.subsys_data->domain_data = &gpd_data->
base;
1447 spin_unlock_irq(&dev->
power.lock);
1453 gpd_data->
td.constraint_changed =
true;
1454 gpd_data->
td.effective_constraint_ns = -1;
1458 genpd_release_lock(genpd);
1460 if (gpd_data != gpd_data_new)
1461 __pm_genpd_free_dev_data(dev, gpd_data_new);
1478 dev_dbg(dev,
"%s()\n", __func__);
1480 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1485 if (gpd->of_node == genpd_node) {
1521 bool remove =
false;
1524 dev_dbg(dev,
"%s()\n", __func__);
1526 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1528 || pd_to_genpd(dev->
pm_domain) != genpd)
1531 genpd_acquire_lock(genpd);
1541 spin_lock_irq(&dev->
power.lock);
1544 pdd = dev->
power.subsys_data->domain_data;
1546 gpd_data = to_gpd_data(pdd);
1548 dev->
power.subsys_data->domain_data =
NULL;
1552 spin_unlock_irq(&dev->
power.lock);
1558 genpd_release_lock(genpd);
1562 __pm_genpd_free_dev_data(dev, gpd_data);
1567 genpd_release_lock(genpd);
1580 unsigned long flags;
1584 psd = dev_to_psd(dev);
1585 if (psd && psd->domain_data)
1586 to_gpd_data(psd->domain_data)->need_restore =
val;
1588 spin_unlock_irqrestore(&dev->
power.lock, flags);
1603 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1604 || genpd == subdomain)
1608 genpd_acquire_lock(genpd);
1614 genpd_release_lock(genpd);
1625 if (link->
slave == subdomain && link->
master == genpd) {
1638 link->
slave = subdomain;
1641 genpd_sd_counter_inc(genpd);
1645 genpd_release_lock(genpd);
1656 const char *subdomain_name)
1660 if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1665 if (!master && !
strcmp(gpd->name, master_name))
1668 if (!subdomain && !
strcmp(gpd->name, subdomain_name))
1671 if (master && subdomain)
1690 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1694 genpd_acquire_lock(genpd);
1697 if (link->
slave != subdomain)
1705 genpd_release_lock(genpd);
1713 genpd_sd_counter_dec(genpd);
1721 genpd_release_lock(genpd);
1744 gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1748 pm_runtime_disable(dev);
1755 spin_lock_irq(&dev->
power.lock);
1757 if (dev->
power.subsys_data->domain_data) {
1758 gpd_data = to_gpd_data(dev->
power.subsys_data->domain_data);
1760 gpd_data = gpd_data_new;
1761 dev->
power.subsys_data->domain_data = &gpd_data->
base;
1768 spin_unlock_irq(&dev->
power.lock);
1774 if (gpd_data != gpd_data_new)
1775 __pm_genpd_free_dev_data(dev, gpd_data_new);
1791 bool remove =
false;
1794 if (!(dev && dev->
power.subsys_data))
1797 pm_runtime_disable(dev);
1800 spin_lock_irq(&dev->
power.lock);
1802 if (dev->
power.subsys_data->domain_data) {
1803 gpd_data = to_gpd_data(dev->
power.subsys_data->domain_data);
1809 dev->
power.subsys_data->domain_data =
NULL;
1816 spin_unlock_irq(&dev->
power.lock);
1826 __pm_genpd_free_dev_data(dev, gpd_data);
1848 if (IS_ERR_OR_NULL(genpd) || state < 0)
1851 genpd_acquire_lock(genpd);
1857 cpu_data = kzalloc(
sizeof(*cpu_data),
GFP_KERNEL);
1879 genpd_recalc_cpu_exit_latency(genpd);
1882 genpd_release_lock(genpd);
1916 if (IS_ERR_OR_NULL(genpd))
1919 genpd_acquire_lock(genpd);
1937 genpd_release_lock(genpd);
1956 static int pm_genpd_default_save_state(
struct device *dev)
1960 cb = dev_gpd_data(dev)->ops.save_state;
1965 cb = dev->
type->pm->runtime_suspend;
1967 cb = dev->
class->pm->runtime_suspend;
1968 else if (dev->
bus && dev->
bus->pm)
1969 cb = dev->
bus->pm->runtime_suspend;
1974 cb = dev->
driver->pm->runtime_suspend;
1976 return cb ?
cb(dev) : 0;
1983 static int pm_genpd_default_restore_state(
struct device *dev)
1987 cb = dev_gpd_data(dev)->ops.restore_state;
1992 cb = dev->
type->pm->runtime_resume;
1994 cb = dev->
class->pm->runtime_resume;
1995 else if (dev->
bus && dev->
bus->pm)
1996 cb = dev->
bus->pm->runtime_resume;
2001 cb = dev->
driver->pm->runtime_resume;
2003 return cb ?
cb(dev) : 0;
2006 #ifdef CONFIG_PM_SLEEP
2014 int (*
cb)(
struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
2025 int (*
cb)(
struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
2027 return cb ?
cb(dev) : pm_generic_suspend_late(dev);
2036 int (*
cb)(
struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
2038 return cb ?
cb(dev) : pm_generic_resume_early(dev);
2047 int (*
cb)(
struct device *__dev) = dev_gpd_data(dev)->ops.resume;
2058 int (*
cb)(
struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
2069 int (*
cb)(
struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
2071 return cb ?
cb(dev) : pm_generic_freeze_late(dev);
2080 int (*
cb)(
struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
2082 return cb ?
cb(dev) : pm_generic_thaw_early(dev);
2091 int (*
cb)(
struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
2098 #define pm_genpd_default_suspend NULL
2099 #define pm_genpd_default_suspend_late NULL
2100 #define pm_genpd_default_resume_early NULL
2101 #define pm_genpd_default_resume NULL
2102 #define pm_genpd_default_freeze NULL
2103 #define pm_genpd_default_freeze_late NULL
2104 #define pm_genpd_default_thaw_early NULL
2105 #define pm_genpd_default_thaw NULL
2118 if (IS_ERR_OR_NULL(genpd))
2138 genpd->
domain.ops.runtime_idle = pm_generic_runtime_idle;
2159 genpd->
dev_ops.save_state = pm_genpd_default_save_state;
2160 genpd->
dev_ops.restore_state = pm_genpd_default_restore_state;