Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
main.c
Go to the documentation of this file.
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19 
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <linux/cpuidle.h>
32 #include "../base.h"
33 #include "power.h"
34 
35 typedef int (*pm_callback_t)(struct device *);
36 
37 /*
38  * The entries in the dpm_list list are in a depth first order, simply
39  * because children are guaranteed to be discovered after parents, and
40  * are inserted at the back of the list on discovery.
41  *
42  * Since device_pm_add() may be called with a device lock held,
43  * we must never try to acquire a device lock while holding
44  * dpm_list_mutex.
45  */
46 
47 LIST_HEAD(dpm_list);
48 static LIST_HEAD(dpm_prepared_list);
49 static LIST_HEAD(dpm_suspended_list);
50 static LIST_HEAD(dpm_late_early_list);
51 static LIST_HEAD(dpm_noirq_list);
52 
54 static DEFINE_MUTEX(dpm_list_mtx);
55 static pm_message_t pm_transition;
56 
57 static int async_error;
58 
64 {
65  dev->power.is_prepared = false;
66  dev->power.is_suspended = false;
67  init_completion(&dev->power.completion);
68  complete_all(&dev->power.completion);
69  dev->power.wakeup = NULL;
70  INIT_LIST_HEAD(&dev->power.entry);
71 }
72 
76 void device_pm_lock(void)
77 {
78  mutex_lock(&dpm_list_mtx);
79 }
80 
84 void device_pm_unlock(void)
85 {
86  mutex_unlock(&dpm_list_mtx);
87 }
88 
93 void device_pm_add(struct device *dev)
94 {
95  pr_debug("PM: Adding info for %s:%s\n",
96  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97  mutex_lock(&dpm_list_mtx);
98  if (dev->parent && dev->parent->power.is_prepared)
99  dev_warn(dev, "parent %s should not be sleeping\n",
100  dev_name(dev->parent));
101  list_add_tail(&dev->power.entry, &dpm_list);
103  mutex_unlock(&dpm_list_mtx);
104 }
105 
111 {
112  pr_debug("PM: Removing info for %s:%s\n",
113  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114  complete_all(&dev->power.completion);
115  mutex_lock(&dpm_list_mtx);
117  list_del_init(&dev->power.entry);
118  mutex_unlock(&dpm_list_mtx);
120  pm_runtime_remove(dev);
121 }
122 
128 void device_pm_move_before(struct device *deva, struct device *devb)
129 {
130  pr_debug("PM: Moving %s:%s before %s:%s\n",
131  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
132  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
133  /* Delete deva from dpm_list and reinsert before devb. */
134  list_move_tail(&deva->power.entry, &devb->power.entry);
135 }
136 
142 void device_pm_move_after(struct device *deva, struct device *devb)
143 {
144  pr_debug("PM: Moving %s:%s after %s:%s\n",
145  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
146  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
147  /* Delete deva from dpm_list and reinsert after devb. */
148  list_move(&deva->power.entry, &devb->power.entry);
149 }
150 
156 {
157  pr_debug("PM: Moving %s:%s to end of list\n",
158  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159  list_move_tail(&dev->power.entry, &dpm_list);
160 }
161 
162 static ktime_t initcall_debug_start(struct device *dev)
163 {
164  ktime_t calltime = ktime_set(0, 0);
165 
167  pr_info("calling %s+ @ %i, parent: %s\n",
168  dev_name(dev), task_pid_nr(current),
169  dev->parent ? dev_name(dev->parent) : "none");
170  calltime = ktime_get();
171  }
172 
173  return calltime;
174 }
175 
176 static void initcall_debug_report(struct device *dev, ktime_t calltime,
177  int error)
178 {
179  ktime_t delta, rettime;
180 
182  rettime = ktime_get();
183  delta = ktime_sub(rettime, calltime);
184  pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
185  error, (unsigned long long)ktime_to_ns(delta) >> 10);
186  }
187 }
188 
194 static void dpm_wait(struct device *dev, bool async)
195 {
196  if (!dev)
197  return;
198 
199  if (async || (pm_async_enabled && dev->power.async_suspend))
200  wait_for_completion(&dev->power.completion);
201 }
202 
203 static int dpm_wait_fn(struct device *dev, void *async_ptr)
204 {
205  dpm_wait(dev, *((bool *)async_ptr));
206  return 0;
207 }
208 
209 static void dpm_wait_for_children(struct device *dev, bool async)
210 {
211  device_for_each_child(dev, &async, dpm_wait_fn);
212 }
213 
219 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
220 {
221  switch (state.event) {
222 #ifdef CONFIG_SUSPEND
223  case PM_EVENT_SUSPEND:
224  return ops->suspend;
225  case PM_EVENT_RESUME:
226  return ops->resume;
227 #endif /* CONFIG_SUSPEND */
228 #ifdef CONFIG_HIBERNATE_CALLBACKS
229  case PM_EVENT_FREEZE:
230  case PM_EVENT_QUIESCE:
231  return ops->freeze;
232  case PM_EVENT_HIBERNATE:
233  return ops->poweroff;
234  case PM_EVENT_THAW:
235  case PM_EVENT_RECOVER:
236  return ops->thaw;
237  break;
238  case PM_EVENT_RESTORE:
239  return ops->restore;
240 #endif /* CONFIG_HIBERNATE_CALLBACKS */
241  }
242 
243  return NULL;
244 }
245 
253 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
254  pm_message_t state)
255 {
256  switch (state.event) {
257 #ifdef CONFIG_SUSPEND
258  case PM_EVENT_SUSPEND:
259  return ops->suspend_late;
260  case PM_EVENT_RESUME:
261  return ops->resume_early;
262 #endif /* CONFIG_SUSPEND */
263 #ifdef CONFIG_HIBERNATE_CALLBACKS
264  case PM_EVENT_FREEZE:
265  case PM_EVENT_QUIESCE:
266  return ops->freeze_late;
267  case PM_EVENT_HIBERNATE:
268  return ops->poweroff_late;
269  case PM_EVENT_THAW:
270  case PM_EVENT_RECOVER:
271  return ops->thaw_early;
272  case PM_EVENT_RESTORE:
273  return ops->restore_early;
274 #endif /* CONFIG_HIBERNATE_CALLBACKS */
275  }
276 
277  return NULL;
278 }
279 
288 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
289 {
290  switch (state.event) {
291 #ifdef CONFIG_SUSPEND
292  case PM_EVENT_SUSPEND:
293  return ops->suspend_noirq;
294  case PM_EVENT_RESUME:
295  return ops->resume_noirq;
296 #endif /* CONFIG_SUSPEND */
297 #ifdef CONFIG_HIBERNATE_CALLBACKS
298  case PM_EVENT_FREEZE:
299  case PM_EVENT_QUIESCE:
300  return ops->freeze_noirq;
301  case PM_EVENT_HIBERNATE:
302  return ops->poweroff_noirq;
303  case PM_EVENT_THAW:
304  case PM_EVENT_RECOVER:
305  return ops->thaw_noirq;
306  case PM_EVENT_RESTORE:
307  return ops->restore_noirq;
308 #endif /* CONFIG_HIBERNATE_CALLBACKS */
309  }
310 
311  return NULL;
312 }
313 
314 static char *pm_verb(int event)
315 {
316  switch (event) {
317  case PM_EVENT_SUSPEND:
318  return "suspend";
319  case PM_EVENT_RESUME:
320  return "resume";
321  case PM_EVENT_FREEZE:
322  return "freeze";
323  case PM_EVENT_QUIESCE:
324  return "quiesce";
325  case PM_EVENT_HIBERNATE:
326  return "hibernate";
327  case PM_EVENT_THAW:
328  return "thaw";
329  case PM_EVENT_RESTORE:
330  return "restore";
331  case PM_EVENT_RECOVER:
332  return "recover";
333  default:
334  return "(unknown PM event)";
335  }
336 }
337 
338 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
339 {
340  dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
341  ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
342  ", may wakeup" : "");
343 }
344 
345 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
346  int error)
347 {
348  printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
349  dev_name(dev), pm_verb(state.event), info, error);
350 }
351 
352 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
353 {
354  ktime_t calltime;
355  u64 usecs64;
356  int usecs;
357 
358  calltime = ktime_get();
359  usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
360  do_div(usecs64, NSEC_PER_USEC);
361  usecs = usecs64;
362  if (usecs == 0)
363  usecs = 1;
364  pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
365  info ?: "", info ? " " : "", pm_verb(state.event),
366  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
367 }
368 
369 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
370  pm_message_t state, char *info)
371 {
372  ktime_t calltime;
373  int error;
374 
375  if (!cb)
376  return 0;
377 
378  calltime = initcall_debug_start(dev);
379 
380  pm_dev_dbg(dev, state, info);
381  error = cb(dev);
382  suspend_report_result(cb, error);
383 
384  initcall_debug_report(dev, calltime, error);
385 
386  return error;
387 }
388 
389 /*------------------------- Resume routines -------------------------*/
390 
399 static int device_resume_noirq(struct device *dev, pm_message_t state)
400 {
402  char *info = NULL;
403  int error = 0;
404 
405  TRACE_DEVICE(dev);
406  TRACE_RESUME(0);
407 
408  if (dev->power.syscore)
409  goto Out;
410 
411  if (dev->pm_domain) {
412  info = "noirq power domain ";
413  callback = pm_noirq_op(&dev->pm_domain->ops, state);
414  } else if (dev->type && dev->type->pm) {
415  info = "noirq type ";
416  callback = pm_noirq_op(dev->type->pm, state);
417  } else if (dev->class && dev->class->pm) {
418  info = "noirq class ";
419  callback = pm_noirq_op(dev->class->pm, state);
420  } else if (dev->bus && dev->bus->pm) {
421  info = "noirq bus ";
422  callback = pm_noirq_op(dev->bus->pm, state);
423  }
424 
425  if (!callback && dev->driver && dev->driver->pm) {
426  info = "noirq driver ";
427  callback = pm_noirq_op(dev->driver->pm, state);
428  }
429 
430  error = dpm_run_callback(callback, dev, state, info);
431 
432  Out:
433  TRACE_RESUME(error);
434  return error;
435 }
436 
444 static void dpm_resume_noirq(pm_message_t state)
445 {
446  ktime_t starttime = ktime_get();
447 
448  mutex_lock(&dpm_list_mtx);
449  while (!list_empty(&dpm_noirq_list)) {
450  struct device *dev = to_device(dpm_noirq_list.next);
451  int error;
452 
453  get_device(dev);
454  list_move_tail(&dev->power.entry, &dpm_late_early_list);
455  mutex_unlock(&dpm_list_mtx);
456 
457  error = device_resume_noirq(dev, state);
458  if (error) {
460  dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
461  dpm_save_failed_dev(dev_name(dev));
462  pm_dev_err(dev, state, " noirq", error);
463  }
464 
465  mutex_lock(&dpm_list_mtx);
466  put_device(dev);
467  }
468  mutex_unlock(&dpm_list_mtx);
469  dpm_show_time(starttime, state, "noirq");
471  cpuidle_resume();
472 }
473 
481 static int device_resume_early(struct device *dev, pm_message_t state)
482 {
483  pm_callback_t callback = NULL;
484  char *info = NULL;
485  int error = 0;
486 
487  TRACE_DEVICE(dev);
488  TRACE_RESUME(0);
489 
490  if (dev->power.syscore)
491  goto Out;
492 
493  if (dev->pm_domain) {
494  info = "early power domain ";
495  callback = pm_late_early_op(&dev->pm_domain->ops, state);
496  } else if (dev->type && dev->type->pm) {
497  info = "early type ";
498  callback = pm_late_early_op(dev->type->pm, state);
499  } else if (dev->class && dev->class->pm) {
500  info = "early class ";
501  callback = pm_late_early_op(dev->class->pm, state);
502  } else if (dev->bus && dev->bus->pm) {
503  info = "early bus ";
504  callback = pm_late_early_op(dev->bus->pm, state);
505  }
506 
507  if (!callback && dev->driver && dev->driver->pm) {
508  info = "early driver ";
509  callback = pm_late_early_op(dev->driver->pm, state);
510  }
511 
512  error = dpm_run_callback(callback, dev, state, info);
513 
514  Out:
515  TRACE_RESUME(error);
516  return error;
517 }
518 
523 static void dpm_resume_early(pm_message_t state)
524 {
525  ktime_t starttime = ktime_get();
526 
527  mutex_lock(&dpm_list_mtx);
528  while (!list_empty(&dpm_late_early_list)) {
529  struct device *dev = to_device(dpm_late_early_list.next);
530  int error;
531 
532  get_device(dev);
533  list_move_tail(&dev->power.entry, &dpm_suspended_list);
534  mutex_unlock(&dpm_list_mtx);
535 
536  error = device_resume_early(dev, state);
537  if (error) {
539  dpm_save_failed_step(SUSPEND_RESUME_EARLY);
540  dpm_save_failed_dev(dev_name(dev));
541  pm_dev_err(dev, state, " early", error);
542  }
543 
544  mutex_lock(&dpm_list_mtx);
545  put_device(dev);
546  }
547  mutex_unlock(&dpm_list_mtx);
548  dpm_show_time(starttime, state, "early");
549 }
550 
556 {
557  dpm_resume_noirq(state);
558  dpm_resume_early(state);
559 }
561 
568 static int device_resume(struct device *dev, pm_message_t state, bool async)
569 {
570  pm_callback_t callback = NULL;
571  char *info = NULL;
572  int error = 0;
573 
574  TRACE_DEVICE(dev);
575  TRACE_RESUME(0);
576 
577  if (dev->power.syscore)
578  goto Complete;
579 
580  dpm_wait(dev->parent, async);
581  device_lock(dev);
582 
583  /*
584  * This is a fib. But we'll allow new children to be added below
585  * a resumed device, even if the device hasn't been completed yet.
586  */
587  dev->power.is_prepared = false;
588 
589  if (!dev->power.is_suspended)
590  goto Unlock;
591 
592  pm_runtime_enable(dev);
593 
594  if (dev->pm_domain) {
595  info = "power domain ";
596  callback = pm_op(&dev->pm_domain->ops, state);
597  goto Driver;
598  }
599 
600  if (dev->type && dev->type->pm) {
601  info = "type ";
602  callback = pm_op(dev->type->pm, state);
603  goto Driver;
604  }
605 
606  if (dev->class) {
607  if (dev->class->pm) {
608  info = "class ";
609  callback = pm_op(dev->class->pm, state);
610  goto Driver;
611  } else if (dev->class->resume) {
612  info = "legacy class ";
613  callback = dev->class->resume;
614  goto End;
615  }
616  }
617 
618  if (dev->bus) {
619  if (dev->bus->pm) {
620  info = "bus ";
621  callback = pm_op(dev->bus->pm, state);
622  } else if (dev->bus->resume) {
623  info = "legacy bus ";
624  callback = dev->bus->resume;
625  goto End;
626  }
627  }
628 
629  Driver:
630  if (!callback && dev->driver && dev->driver->pm) {
631  info = "driver ";
632  callback = pm_op(dev->driver->pm, state);
633  }
634 
635  End:
636  error = dpm_run_callback(callback, dev, state, info);
637  dev->power.is_suspended = false;
638 
639  Unlock:
640  device_unlock(dev);
641 
642  Complete:
643  complete_all(&dev->power.completion);
644 
645  TRACE_RESUME(error);
646 
647  return error;
648 }
649 
650 static void async_resume(void *data, async_cookie_t cookie)
651 {
652  struct device *dev = (struct device *)data;
653  int error;
654 
655  error = device_resume(dev, pm_transition, true);
656  if (error)
657  pm_dev_err(dev, pm_transition, " async", error);
658  put_device(dev);
659 }
660 
661 static bool is_async(struct device *dev)
662 {
663  return dev->power.async_suspend && pm_async_enabled
664  && !pm_trace_is_enabled();
665 }
666 
675 {
676  struct device *dev;
677  ktime_t starttime = ktime_get();
678 
679  might_sleep();
680 
681  mutex_lock(&dpm_list_mtx);
682  pm_transition = state;
683  async_error = 0;
684 
685  list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
686  INIT_COMPLETION(dev->power.completion);
687  if (is_async(dev)) {
688  get_device(dev);
689  async_schedule(async_resume, dev);
690  }
691  }
692 
693  while (!list_empty(&dpm_suspended_list)) {
694  dev = to_device(dpm_suspended_list.next);
695  get_device(dev);
696  if (!is_async(dev)) {
697  int error;
698 
699  mutex_unlock(&dpm_list_mtx);
700 
701  error = device_resume(dev, state, false);
702  if (error) {
704  dpm_save_failed_step(SUSPEND_RESUME);
705  dpm_save_failed_dev(dev_name(dev));
706  pm_dev_err(dev, state, "", error);
707  }
708 
709  mutex_lock(&dpm_list_mtx);
710  }
711  if (!list_empty(&dev->power.entry))
712  list_move_tail(&dev->power.entry, &dpm_prepared_list);
713  put_device(dev);
714  }
715  mutex_unlock(&dpm_list_mtx);
717  dpm_show_time(starttime, state, NULL);
718 }
719 
725 static void device_complete(struct device *dev, pm_message_t state)
726 {
727  void (*callback)(struct device *) = NULL;
728  char *info = NULL;
729 
730  if (dev->power.syscore)
731  return;
732 
733  device_lock(dev);
734 
735  if (dev->pm_domain) {
736  info = "completing power domain ";
737  callback = dev->pm_domain->ops.complete;
738  } else if (dev->type && dev->type->pm) {
739  info = "completing type ";
740  callback = dev->type->pm->complete;
741  } else if (dev->class && dev->class->pm) {
742  info = "completing class ";
743  callback = dev->class->pm->complete;
744  } else if (dev->bus && dev->bus->pm) {
745  info = "completing bus ";
746  callback = dev->bus->pm->complete;
747  }
748 
749  if (!callback && dev->driver && dev->driver->pm) {
750  info = "completing driver ";
751  callback = dev->driver->pm->complete;
752  }
753 
754  if (callback) {
755  pm_dev_dbg(dev, state, info);
756  callback(dev);
757  }
758 
759  device_unlock(dev);
760 
761  pm_runtime_put_sync(dev);
762 }
763 
772 {
773  struct list_head list;
774 
775  might_sleep();
776 
777  INIT_LIST_HEAD(&list);
778  mutex_lock(&dpm_list_mtx);
779  while (!list_empty(&dpm_prepared_list)) {
780  struct device *dev = to_device(dpm_prepared_list.prev);
781 
782  get_device(dev);
783  dev->power.is_prepared = false;
784  list_move(&dev->power.entry, &list);
785  mutex_unlock(&dpm_list_mtx);
786 
787  device_complete(dev, state);
788 
789  mutex_lock(&dpm_list_mtx);
790  put_device(dev);
791  }
792  list_splice(&list, &dpm_list);
793  mutex_unlock(&dpm_list_mtx);
794 }
795 
804 {
805  dpm_resume(state);
806  dpm_complete(state);
807 }
809 
810 
811 /*------------------------- Suspend routines -------------------------*/
812 
820 static pm_message_t resume_event(pm_message_t sleep_state)
821 {
822  switch (sleep_state.event) {
823  case PM_EVENT_SUSPEND:
824  return PMSG_RESUME;
825  case PM_EVENT_FREEZE:
826  case PM_EVENT_QUIESCE:
827  return PMSG_RECOVER;
828  case PM_EVENT_HIBERNATE:
829  return PMSG_RESTORE;
830  }
831  return PMSG_ON;
832 }
833 
842 static int device_suspend_noirq(struct device *dev, pm_message_t state)
843 {
844  pm_callback_t callback = NULL;
845  char *info = NULL;
846 
847  if (dev->power.syscore)
848  return 0;
849 
850  if (dev->pm_domain) {
851  info = "noirq power domain ";
852  callback = pm_noirq_op(&dev->pm_domain->ops, state);
853  } else if (dev->type && dev->type->pm) {
854  info = "noirq type ";
855  callback = pm_noirq_op(dev->type->pm, state);
856  } else if (dev->class && dev->class->pm) {
857  info = "noirq class ";
858  callback = pm_noirq_op(dev->class->pm, state);
859  } else if (dev->bus && dev->bus->pm) {
860  info = "noirq bus ";
861  callback = pm_noirq_op(dev->bus->pm, state);
862  }
863 
864  if (!callback && dev->driver && dev->driver->pm) {
865  info = "noirq driver ";
866  callback = pm_noirq_op(dev->driver->pm, state);
867  }
868 
869  return dpm_run_callback(callback, dev, state, info);
870 }
871 
879 static int dpm_suspend_noirq(pm_message_t state)
880 {
881  ktime_t starttime = ktime_get();
882  int error = 0;
883 
884  cpuidle_pause();
886  mutex_lock(&dpm_list_mtx);
887  while (!list_empty(&dpm_late_early_list)) {
888  struct device *dev = to_device(dpm_late_early_list.prev);
889 
890  get_device(dev);
891  mutex_unlock(&dpm_list_mtx);
892 
893  error = device_suspend_noirq(dev, state);
894 
895  mutex_lock(&dpm_list_mtx);
896  if (error) {
897  pm_dev_err(dev, state, " noirq", error);
899  dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
900  dpm_save_failed_dev(dev_name(dev));
901  put_device(dev);
902  break;
903  }
904  if (!list_empty(&dev->power.entry))
905  list_move(&dev->power.entry, &dpm_noirq_list);
906  put_device(dev);
907 
908  if (pm_wakeup_pending()) {
909  error = -EBUSY;
910  break;
911  }
912  }
913  mutex_unlock(&dpm_list_mtx);
914  if (error)
915  dpm_resume_noirq(resume_event(state));
916  else
917  dpm_show_time(starttime, state, "noirq");
918  return error;
919 }
920 
928 static int device_suspend_late(struct device *dev, pm_message_t state)
929 {
930  pm_callback_t callback = NULL;
931  char *info = NULL;
932 
933  if (dev->power.syscore)
934  return 0;
935 
936  if (dev->pm_domain) {
937  info = "late power domain ";
938  callback = pm_late_early_op(&dev->pm_domain->ops, state);
939  } else if (dev->type && dev->type->pm) {
940  info = "late type ";
941  callback = pm_late_early_op(dev->type->pm, state);
942  } else if (dev->class && dev->class->pm) {
943  info = "late class ";
944  callback = pm_late_early_op(dev->class->pm, state);
945  } else if (dev->bus && dev->bus->pm) {
946  info = "late bus ";
947  callback = pm_late_early_op(dev->bus->pm, state);
948  }
949 
950  if (!callback && dev->driver && dev->driver->pm) {
951  info = "late driver ";
952  callback = pm_late_early_op(dev->driver->pm, state);
953  }
954 
955  return dpm_run_callback(callback, dev, state, info);
956 }
957 
962 static int dpm_suspend_late(pm_message_t state)
963 {
964  ktime_t starttime = ktime_get();
965  int error = 0;
966 
967  mutex_lock(&dpm_list_mtx);
968  while (!list_empty(&dpm_suspended_list)) {
969  struct device *dev = to_device(dpm_suspended_list.prev);
970 
971  get_device(dev);
972  mutex_unlock(&dpm_list_mtx);
973 
974  error = device_suspend_late(dev, state);
975 
976  mutex_lock(&dpm_list_mtx);
977  if (error) {
978  pm_dev_err(dev, state, " late", error);
980  dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
981  dpm_save_failed_dev(dev_name(dev));
982  put_device(dev);
983  break;
984  }
985  if (!list_empty(&dev->power.entry))
986  list_move(&dev->power.entry, &dpm_late_early_list);
987  put_device(dev);
988 
989  if (pm_wakeup_pending()) {
990  error = -EBUSY;
991  break;
992  }
993  }
994  mutex_unlock(&dpm_list_mtx);
995  if (error)
996  dpm_resume_early(resume_event(state));
997  else
998  dpm_show_time(starttime, state, "late");
999 
1000  return error;
1001 }
1002 
1008 {
1009  int error = dpm_suspend_late(state);
1010  if (error)
1011  return error;
1012 
1013  error = dpm_suspend_noirq(state);
1014  if (error) {
1015  dpm_resume_early(resume_event(state));
1016  return error;
1017  }
1018 
1019  return 0;
1020 }
1022 
1029 static int legacy_suspend(struct device *dev, pm_message_t state,
1030  int (*cb)(struct device *dev, pm_message_t state))
1031 {
1032  int error;
1033  ktime_t calltime;
1034 
1035  calltime = initcall_debug_start(dev);
1036 
1037  error = cb(dev, state);
1038  suspend_report_result(cb, error);
1039 
1040  initcall_debug_report(dev, calltime, error);
1041 
1042  return error;
1043 }
1044 
1051 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1052 {
1053  pm_callback_t callback = NULL;
1054  char *info = NULL;
1055  int error = 0;
1056 
1057  dpm_wait_for_children(dev, async);
1058 
1059  if (async_error)
1060  goto Complete;
1061 
1062  /*
1063  * If a device configured to wake up the system from sleep states
1064  * has been suspended at run time and there's a resume request pending
1065  * for it, this is equivalent to the device signaling wakeup, so the
1066  * system suspend operation should be aborted.
1067  */
1068  if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1069  pm_wakeup_event(dev, 0);
1070 
1071  if (pm_wakeup_pending()) {
1072  async_error = -EBUSY;
1073  goto Complete;
1074  }
1075 
1076  if (dev->power.syscore)
1077  goto Complete;
1078 
1079  device_lock(dev);
1080 
1081  if (dev->pm_domain) {
1082  info = "power domain ";
1083  callback = pm_op(&dev->pm_domain->ops, state);
1084  goto Run;
1085  }
1086 
1087  if (dev->type && dev->type->pm) {
1088  info = "type ";
1089  callback = pm_op(dev->type->pm, state);
1090  goto Run;
1091  }
1092 
1093  if (dev->class) {
1094  if (dev->class->pm) {
1095  info = "class ";
1096  callback = pm_op(dev->class->pm, state);
1097  goto Run;
1098  } else if (dev->class->suspend) {
1099  pm_dev_dbg(dev, state, "legacy class ");
1100  error = legacy_suspend(dev, state, dev->class->suspend);
1101  goto End;
1102  }
1103  }
1104 
1105  if (dev->bus) {
1106  if (dev->bus->pm) {
1107  info = "bus ";
1108  callback = pm_op(dev->bus->pm, state);
1109  } else if (dev->bus->suspend) {
1110  pm_dev_dbg(dev, state, "legacy bus ");
1111  error = legacy_suspend(dev, state, dev->bus->suspend);
1112  goto End;
1113  }
1114  }
1115 
1116  Run:
1117  if (!callback && dev->driver && dev->driver->pm) {
1118  info = "driver ";
1119  callback = pm_op(dev->driver->pm, state);
1120  }
1121 
1122  error = dpm_run_callback(callback, dev, state, info);
1123 
1124  End:
1125  if (!error) {
1126  dev->power.is_suspended = true;
1127  if (dev->power.wakeup_path
1128  && dev->parent && !dev->parent->power.ignore_children)
1129  dev->parent->power.wakeup_path = true;
1130  }
1131 
1132  device_unlock(dev);
1133 
1134  Complete:
1135  complete_all(&dev->power.completion);
1136 
1137  if (error)
1138  async_error = error;
1139  else if (dev->power.is_suspended)
1140  __pm_runtime_disable(dev, false);
1141 
1142  return error;
1143 }
1144 
1145 static void async_suspend(void *data, async_cookie_t cookie)
1146 {
1147  struct device *dev = (struct device *)data;
1148  int error;
1149 
1150  error = __device_suspend(dev, pm_transition, true);
1151  if (error) {
1152  dpm_save_failed_dev(dev_name(dev));
1153  pm_dev_err(dev, pm_transition, " async", error);
1154  }
1155 
1156  put_device(dev);
1157 }
1158 
1159 static int device_suspend(struct device *dev)
1160 {
1161  INIT_COMPLETION(dev->power.completion);
1162 
1163  if (pm_async_enabled && dev->power.async_suspend) {
1164  get_device(dev);
1165  async_schedule(async_suspend, dev);
1166  return 0;
1167  }
1168 
1169  return __device_suspend(dev, pm_transition, false);
1170 }
1171 
1177 {
1178  ktime_t starttime = ktime_get();
1179  int error = 0;
1180 
1181  might_sleep();
1182 
1183  mutex_lock(&dpm_list_mtx);
1184  pm_transition = state;
1185  async_error = 0;
1186  while (!list_empty(&dpm_prepared_list)) {
1187  struct device *dev = to_device(dpm_prepared_list.prev);
1188 
1189  get_device(dev);
1190  mutex_unlock(&dpm_list_mtx);
1191 
1192  error = device_suspend(dev);
1193 
1194  mutex_lock(&dpm_list_mtx);
1195  if (error) {
1196  pm_dev_err(dev, state, "", error);
1197  dpm_save_failed_dev(dev_name(dev));
1198  put_device(dev);
1199  break;
1200  }
1201  if (!list_empty(&dev->power.entry))
1202  list_move(&dev->power.entry, &dpm_suspended_list);
1203  put_device(dev);
1204  if (async_error)
1205  break;
1206  }
1207  mutex_unlock(&dpm_list_mtx);
1209  if (!error)
1210  error = async_error;
1211  if (error) {
1213  dpm_save_failed_step(SUSPEND_SUSPEND);
1214  } else
1215  dpm_show_time(starttime, state, NULL);
1216  return error;
1217 }
1218 
1227 static int device_prepare(struct device *dev, pm_message_t state)
1228 {
1229  int (*callback)(struct device *) = NULL;
1230  char *info = NULL;
1231  int error = 0;
1232 
1233  if (dev->power.syscore)
1234  return 0;
1235 
1236  /*
1237  * If a device's parent goes into runtime suspend at the wrong time,
1238  * it won't be possible to resume the device. To prevent this we
1239  * block runtime suspend here, during the prepare phase, and allow
1240  * it again during the complete phase.
1241  */
1242  pm_runtime_get_noresume(dev);
1243 
1244  device_lock(dev);
1245 
1246  dev->power.wakeup_path = device_may_wakeup(dev);
1247 
1248  if (dev->pm_domain) {
1249  info = "preparing power domain ";
1250  callback = dev->pm_domain->ops.prepare;
1251  } else if (dev->type && dev->type->pm) {
1252  info = "preparing type ";
1253  callback = dev->type->pm->prepare;
1254  } else if (dev->class && dev->class->pm) {
1255  info = "preparing class ";
1256  callback = dev->class->pm->prepare;
1257  } else if (dev->bus && dev->bus->pm) {
1258  info = "preparing bus ";
1259  callback = dev->bus->pm->prepare;
1260  }
1261 
1262  if (!callback && dev->driver && dev->driver->pm) {
1263  info = "preparing driver ";
1264  callback = dev->driver->pm->prepare;
1265  }
1266 
1267  if (callback) {
1268  error = callback(dev);
1269  suspend_report_result(callback, error);
1270  }
1271 
1272  device_unlock(dev);
1273 
1274  return error;
1275 }
1276 
1284 {
1285  int error = 0;
1286 
1287  might_sleep();
1288 
1289  mutex_lock(&dpm_list_mtx);
1290  while (!list_empty(&dpm_list)) {
1291  struct device *dev = to_device(dpm_list.next);
1292 
1293  get_device(dev);
1294  mutex_unlock(&dpm_list_mtx);
1295 
1296  error = device_prepare(dev, state);
1297 
1298  mutex_lock(&dpm_list_mtx);
1299  if (error) {
1300  if (error == -EAGAIN) {
1301  put_device(dev);
1302  error = 0;
1303  continue;
1304  }
1305  printk(KERN_INFO "PM: Device %s not prepared "
1306  "for power transition: code %d\n",
1307  dev_name(dev), error);
1308  put_device(dev);
1309  break;
1310  }
1311  dev->power.is_prepared = true;
1312  if (!list_empty(&dev->power.entry))
1313  list_move_tail(&dev->power.entry, &dpm_prepared_list);
1314  put_device(dev);
1315  }
1316  mutex_unlock(&dpm_list_mtx);
1317  return error;
1318 }
1319 
1328 {
1329  int error;
1330 
1331  error = dpm_prepare(state);
1332  if (error) {
1334  dpm_save_failed_step(SUSPEND_PREPARE);
1335  } else
1336  error = dpm_suspend(state);
1337  return error;
1338 }
1340 
1341 void __suspend_report_result(const char *function, void *fn, int ret)
1342 {
1343  if (ret)
1344  printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1345 }
1347 
1353 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1354 {
1355  dpm_wait(dev, subordinate->power.async_suspend);
1356  return async_error;
1357 }
1359 
1368 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1369 {
1370  struct device *dev;
1371 
1372  if (!fn)
1373  return;
1374 
1375  device_pm_lock();
1376  list_for_each_entry(dev, &dpm_list, power.entry)
1377  fn(dev, data);
1378  device_pm_unlock();
1379 }