10 #include <linux/sched.h>
11 #include <linux/export.h>
16 static int rpm_resume(
struct device *
dev,
int rpmflags);
17 static int rpm_suspend(
struct device *
dev,
int rpmflags);
35 delta = now - dev->
power.accounting_timestamp;
37 dev->
power.accounting_timestamp = now;
39 if (dev->
power.disable_depth > 0)
58 static void pm_runtime_deactivate_timer(
struct device *
dev)
60 if (dev->
power.timer_expires > 0) {
62 dev->
power.timer_expires = 0;
70 static void pm_runtime_cancel_pending(
struct device *dev)
72 pm_runtime_deactivate_timer(dev);
94 int autosuspend_delay;
96 unsigned long last_busy;
97 unsigned long expires = 0;
99 if (!dev->
power.use_autosuspend)
103 if (autosuspend_delay < 0)
116 if (autosuspend_delay >= 1000)
119 if (elapsed >= expires - last_busy)
131 static int rpm_check_suspend_allowed(
struct device *dev)
135 if (dev->
power.runtime_error)
137 else if (dev->
power.disable_depth > 0)
141 else if (!pm_children_suspended(dev))
145 else if ((dev->
power.deferred_resume
147 || (dev->
power.request_pending
163 static int __rpm_callback(
int (*
cb)(
struct device *),
struct device *dev)
168 if (dev->
power.irq_safe)
169 spin_unlock(&dev->
power.lock);
171 spin_unlock_irq(&dev->
power.lock);
175 if (dev->
power.irq_safe)
176 spin_lock(&dev->
power.lock);
178 spin_lock_irq(&dev->
power.lock);
195 static int rpm_idle(
struct device *dev,
int rpmflags)
200 trace_rpm_idle(dev, rpmflags);
201 retval = rpm_check_suspend_allowed(dev);
213 else if (dev->
power.request_pending &&
218 else if (dev->
power.idle_notification)
226 if (dev->
power.no_callbacks) {
228 retval = rpm_suspend(dev, rpmflags);
235 if (!dev->
power.request_pending) {
236 dev->
power.request_pending =
true;
242 dev->
power.idle_notification =
true;
246 else if (dev->
type && dev->
type->pm)
250 else if (dev->
bus && dev->
bus->pm)
261 dev->
power.idle_notification =
false;
265 trace_rpm_return_int(dev,
_THIS_IP_, retval);
274 static int rpm_callback(
int (*
cb)(
struct device *),
struct device *dev)
281 retval = __rpm_callback(
cb, dev);
284 return retval != -
EACCES ? retval : -
EIO;
308 static int rpm_suspend(
struct device *dev,
int rpmflags)
315 trace_rpm_suspend(dev, rpmflags);
318 retval = rpm_check_suspend_allowed(dev);
325 !(rpmflags & RPM_ASYNC))
347 dev->
power.timer_expires, expires))) {
348 dev->
power.timer_expires = expires;
351 dev->
power.timer_autosuspends = 1;
357 pm_runtime_cancel_pending(dev);
367 if (dev->
power.irq_safe) {
368 spin_unlock(&dev->
power.lock);
372 spin_lock(&dev->
power.lock);
383 spin_unlock_irq(&dev->
power.lock);
387 spin_lock_irq(&dev->
power.lock);
393 if (dev->
power.no_callbacks)
397 if (rpmflags & RPM_ASYNC) {
400 if (!dev->
power.request_pending) {
401 dev->
power.request_pending =
true;
411 else if (dev->
type && dev->
type->pm)
415 else if (dev->
bus && dev->
bus->pm)
423 retval = rpm_callback(
callback, dev);
429 pm_runtime_deactivate_timer(dev);
433 atomic_add_unless(&parent->
power.child_count, -1, 0);
437 if (dev->
power.deferred_resume) {
438 dev->
power.deferred_resume =
false;
445 if (parent && !parent->
power.ignore_children && !dev->
power.irq_safe) {
446 spin_unlock(&dev->
power.lock);
448 spin_lock(&parent->
power.lock);
449 rpm_idle(parent, RPM_ASYNC);
450 spin_unlock(&parent->
power.lock);
452 spin_lock(&dev->
power.lock);
456 trace_rpm_return_int(dev,
_THIS_IP_, retval);
462 dev->
power.deferred_resume =
false;
466 dev->
power.runtime_error = 0;
474 if ((rpmflags & RPM_AUTO) &&
478 pm_runtime_cancel_pending(dev);
500 static int rpm_resume(
struct device *dev,
int rpmflags)
507 trace_rpm_resume(dev, rpmflags);
510 if (dev->
power.runtime_error)
512 else if (dev->
power.disable_depth == 1 && dev->
power.is_suspended
515 else if (dev->
power.disable_depth > 0)
527 if (!dev->
power.timer_autosuspends)
528 pm_runtime_deactivate_timer(dev);
541 dev->
power.deferred_resume =
true;
547 if (dev->
power.irq_safe) {
548 spin_unlock(&dev->
power.lock);
552 spin_lock(&dev->
power.lock);
564 spin_unlock_irq(&dev->
power.lock);
568 spin_lock_irq(&dev->
power.lock);
579 if (dev->
power.no_callbacks && !parent && dev->
parent) {
581 if (dev->
parent->power.disable_depth > 0
582 || dev->
parent->power.ignore_children
585 spin_unlock(&dev->
parent->power.lock);
589 spin_unlock(&dev->
parent->power.lock);
593 if (rpmflags & RPM_ASYNC) {
595 if (!dev->
power.request_pending) {
596 dev->
power.request_pending =
true;
603 if (!parent && dev->
parent) {
610 if (dev->
power.irq_safe)
612 spin_unlock(&dev->
power.lock);
614 pm_runtime_get_noresume(parent);
616 spin_lock(&parent->
power.lock);
621 if (!parent->
power.disable_depth
622 && !parent->
power.ignore_children) {
623 rpm_resume(parent, 0);
627 spin_unlock(&parent->
power.lock);
629 spin_lock(&dev->
power.lock);
636 if (dev->
power.no_callbacks)
643 else if (dev->
type && dev->
type->pm)
647 else if (dev->
bus && dev->
bus->pm)
655 retval = rpm_callback(
callback, dev);
658 pm_runtime_cancel_pending(dev);
668 rpm_idle(dev, RPM_ASYNC);
671 if (parent && !dev->
power.irq_safe) {
672 spin_unlock_irq(&dev->
power.lock);
674 pm_runtime_put(parent);
676 spin_lock_irq(&dev->
power.lock);
679 trace_rpm_return_int(dev,
_THIS_IP_, retval);
696 spin_lock_irq(&dev->
power.lock);
698 if (!dev->
power.request_pending)
701 req = dev->
power.request;
703 dev->
power.request_pending =
false;
723 spin_unlock_irq(&dev->
power.lock);
732 static void pm_suspend_timer_fn(
unsigned long data)
736 unsigned long expires;
740 expires = dev->
power.timer_expires;
742 if (expires > 0 && !
time_after(expires, jiffies)) {
743 dev->
power.timer_expires = 0;
744 rpm_suspend(dev, dev->
power.timer_autosuspends ?
745 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
748 spin_unlock_irqrestore(&dev->
power.lock, flags);
764 retval = rpm_suspend(dev, RPM_ASYNC);
768 retval = rpm_check_suspend_allowed(dev);
773 pm_runtime_cancel_pending(dev);
776 dev->
power.timer_expires += !dev->
power.timer_expires;
777 dev->
power.timer_autosuspends = 0;
781 spin_unlock_irqrestore(&dev->
power.lock, flags);
812 retval = rpm_idle(dev, rpmflags);
813 spin_unlock_irqrestore(&dev->
power.lock, flags);
844 retval = rpm_suspend(dev, rpmflags);
845 spin_unlock_irqrestore(&dev->
power.lock, flags);
873 retval = rpm_resume(dev, rpmflags);
874 spin_unlock_irqrestore(&dev->
power.lock, flags);
901 bool notify_parent =
false;
909 if (!dev->
power.runtime_error && !dev->
power.disable_depth) {
914 if (dev->
power.runtime_status == status)
920 atomic_add_unless(&parent->
power.child_count, -1, 0);
921 notify_parent = !parent->
power.ignore_children;
934 if (!parent->
power.disable_depth
935 && !parent->
power.ignore_children
941 spin_unlock(&parent->
power.lock);
948 __update_runtime_status(dev, status);
949 dev->
power.runtime_error = 0;
951 spin_unlock_irqrestore(&dev->
power.lock, flags);
954 pm_request_idle(parent);
969 static void __pm_runtime_barrier(
struct device *dev)
971 pm_runtime_deactivate_timer(dev);
973 if (dev->
power.request_pending) {
975 spin_unlock_irq(&dev->
power.lock);
979 spin_lock_irq(&dev->
power.lock);
980 dev->
power.request_pending =
false;
985 || dev->
power.idle_notification) {
994 && !dev->
power.idle_notification)
996 spin_unlock_irq(&dev->
power.lock);
1000 spin_lock_irq(&dev->
power.lock);
1024 pm_runtime_get_noresume(dev);
1025 spin_lock_irq(&dev->
power.lock);
1027 if (dev->
power.request_pending
1033 __pm_runtime_barrier(dev);
1035 spin_unlock_irq(&dev->
power.lock);
1036 pm_runtime_put_noidle(dev);
1058 spin_lock_irq(&dev->
power.lock);
1060 if (dev->
power.disable_depth > 0) {
1061 dev->
power.disable_depth++;
1070 if (check_resume && dev->
power.request_pending
1076 pm_runtime_get_noresume(dev);
1080 pm_runtime_put_noidle(dev);
1083 if (!dev->
power.disable_depth++)
1084 __pm_runtime_barrier(dev);
1087 spin_unlock_irq(&dev->
power.lock);
1097 unsigned long flags;
1101 if (dev->
power.disable_depth > 0)
1102 dev->
power.disable_depth--;
1104 dev_warn(dev,
"Unbalanced %s!\n", __func__);
1106 spin_unlock_irqrestore(&dev->
power.lock, flags);
1120 spin_lock_irq(&dev->
power.lock);
1121 if (!dev->
power.runtime_auto)
1124 dev->
power.runtime_auto =
false;
1129 spin_unlock_irq(&dev->
power.lock);
1141 spin_lock_irq(&dev->
power.lock);
1142 if (dev->
power.runtime_auto)
1145 dev->
power.runtime_auto =
true;
1147 rpm_idle(dev, RPM_AUTO);
1150 spin_unlock_irq(&dev->
power.lock);
1164 spin_lock_irq(&dev->
power.lock);
1165 dev->
power.no_callbacks = 1;
1166 spin_unlock_irq(&dev->
power.lock);
1167 if (device_is_registered(dev))
1186 pm_runtime_get_sync(dev->
parent);
1187 spin_lock_irq(&dev->
power.lock);
1188 dev->
power.irq_safe = 1;
1189 spin_unlock_irq(&dev->
power.lock);
1204 static void update_autosuspend(
struct device *dev,
int old_delay,
int old_use)
1209 if (dev->
power.use_autosuspend && delay < 0) {
1212 if (!old_use || old_delay >= 0) {
1222 if (old_use && old_delay < 0)
1226 rpm_idle(dev, RPM_AUTO);
1241 int old_delay, old_use;
1243 spin_lock_irq(&dev->
power.lock);
1244 old_delay = dev->
power.autosuspend_delay;
1245 old_use = dev->
power.use_autosuspend;
1247 update_autosuspend(dev, old_delay, old_use);
1248 spin_unlock_irq(&dev->
power.lock);
1262 int old_delay, old_use;
1264 spin_lock_irq(&dev->
power.lock);
1265 old_delay = dev->
power.autosuspend_delay;
1266 old_use = dev->
power.use_autosuspend;
1267 dev->
power.use_autosuspend = use;
1268 update_autosuspend(dev, old_delay, old_use);
1269 spin_unlock_irq(&dev->
power.lock);
1280 dev->
power.idle_notification =
false;
1282 dev->
power.disable_depth = 1;
1285 dev->
power.runtime_error = 0;
1288 pm_suspend_ignore_children(dev,
false);
1289 dev->
power.runtime_auto =
true;
1291 dev->
power.request_pending =
false;
1293 dev->
power.deferred_resume =
false;
1297 dev->
power.timer_expires = 0;
1299 (
unsigned long)dev);
1314 pm_runtime_set_suspended(dev);
1316 pm_runtime_put_sync(dev->
parent);