Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
domain.c
Go to the documentation of this file.
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <[email protected]>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_qos.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/sched.h>
18 #include <linux/suspend.h>
19 #include <linux/export.h>
20 
21 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
22 ({ \
23  type (*__routine)(struct device *__d); \
24  type __ret = (type)0; \
25  \
26  __routine = genpd->dev_ops.callback; \
27  if (__routine) { \
28  __ret = __routine(dev); \
29  } else { \
30  __routine = dev_gpd_data(dev)->ops.callback; \
31  if (__routine) \
32  __ret = __routine(dev); \
33  } \
34  __ret; \
35 })
36 
37 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
38 ({ \
39  ktime_t __start = ktime_get(); \
40  type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
41  s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
42  struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
43  if (!__retval && __elapsed > __td->field) { \
44  __td->field = __elapsed; \
45  dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
46  __elapsed); \
47  genpd->max_off_time_changed = true; \
48  __td->constraint_changed = true; \
49  } \
50  __retval; \
51 })
52 
53 static LIST_HEAD(gpd_list);
54 static DEFINE_MUTEX(gpd_list_lock);
55 
56 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
57 {
58  struct generic_pm_domain *genpd = NULL, *gpd;
59 
60  if (IS_ERR_OR_NULL(domain_name))
61  return NULL;
62 
63  mutex_lock(&gpd_list_lock);
64  list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
65  if (!strcmp(gpd->name, domain_name)) {
66  genpd = gpd;
67  break;
68  }
69  }
70  mutex_unlock(&gpd_list_lock);
71  return genpd;
72 }
73 
74 #ifdef CONFIG_PM
75 
76 struct generic_pm_domain *dev_to_genpd(struct device *dev)
77 {
78  if (IS_ERR_OR_NULL(dev->pm_domain))
79  return ERR_PTR(-EINVAL);
80 
81  return pd_to_genpd(dev->pm_domain);
82 }
83 
84 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
85 {
86  return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
87  stop_latency_ns, "stop");
88 }
89 
90 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
91 {
92  return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
93  start_latency_ns, "start");
94 }
95 
96 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
97 {
98  bool ret = false;
99 
100  if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
101  ret = !!atomic_dec_and_test(&genpd->sd_count);
102 
103  return ret;
104 }
105 
106 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
107 {
108  atomic_inc(&genpd->sd_count);
110 }
111 
112 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
113 {
114  DEFINE_WAIT(wait);
115 
116  mutex_lock(&genpd->lock);
117  /*
118  * Wait for the domain to transition into either the active,
119  * or the power off state.
120  */
121  for (;;) {
124  if (genpd->status == GPD_STATE_ACTIVE
125  || genpd->status == GPD_STATE_POWER_OFF)
126  break;
127  mutex_unlock(&genpd->lock);
128 
129  schedule();
130 
131  mutex_lock(&genpd->lock);
132  }
134 }
135 
136 static void genpd_release_lock(struct generic_pm_domain *genpd)
137 {
138  mutex_unlock(&genpd->lock);
139 }
140 
141 static void genpd_set_active(struct generic_pm_domain *genpd)
142 {
143  if (genpd->resume_count == 0)
144  genpd->status = GPD_STATE_ACTIVE;
145 }
146 
147 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
148 {
149  s64 usecs64;
150 
151  if (!genpd->cpu_data)
152  return;
153 
154  usecs64 = genpd->power_on_latency_ns;
155  do_div(usecs64, NSEC_PER_USEC);
156  usecs64 += genpd->cpu_data->saved_exit_latency;
157  genpd->cpu_data->idle_state->exit_latency = usecs64;
158 }
159 
167 static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
168  __releases(&genpd->lock) __acquires(&genpd->lock)
169 {
170  struct gpd_link *link;
171  DEFINE_WAIT(wait);
172  int ret = 0;
173 
174  /* If the domain's master is being waited for, we have to wait too. */
175  for (;;) {
178  if (genpd->status != GPD_STATE_WAIT_MASTER)
179  break;
180  mutex_unlock(&genpd->lock);
181 
182  schedule();
183 
184  mutex_lock(&genpd->lock);
185  }
187 
188  if (genpd->status == GPD_STATE_ACTIVE
189  || (genpd->prepared_count > 0 && genpd->suspend_power_off))
190  return 0;
191 
192  if (genpd->status != GPD_STATE_POWER_OFF) {
193  genpd_set_active(genpd);
194  return 0;
195  }
196 
197  if (genpd->cpu_data) {
199  genpd->cpu_data->idle_state->disabled = true;
201  goto out;
202  }
203 
204  /*
205  * The list is guaranteed not to change while the loop below is being
206  * executed, unless one of the masters' .power_on() callbacks fiddles
207  * with it.
208  */
209  list_for_each_entry(link, &genpd->slave_links, slave_node) {
210  genpd_sd_counter_inc(link->master);
211  genpd->status = GPD_STATE_WAIT_MASTER;
212 
213  mutex_unlock(&genpd->lock);
214 
215  ret = pm_genpd_poweron(link->master);
216 
217  mutex_lock(&genpd->lock);
218 
219  /*
220  * The "wait for parent" status is guaranteed not to change
221  * while the master is powering on.
222  */
223  genpd->status = GPD_STATE_POWER_OFF;
225  if (ret) {
226  genpd_sd_counter_dec(link->master);
227  goto err;
228  }
229  }
230 
231  if (genpd->power_on) {
232  ktime_t time_start = ktime_get();
233  s64 elapsed_ns;
234 
235  ret = genpd->power_on(genpd);
236  if (ret)
237  goto err;
238 
239  elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
240  if (elapsed_ns > genpd->power_on_latency_ns) {
241  genpd->power_on_latency_ns = elapsed_ns;
242  genpd->max_off_time_changed = true;
243  genpd_recalc_cpu_exit_latency(genpd);
244  if (genpd->name)
245  pr_warning("%s: Power-on latency exceeded, "
246  "new value %lld ns\n", genpd->name,
247  elapsed_ns);
248  }
249  }
250 
251  out:
252  genpd_set_active(genpd);
253 
254  return 0;
255 
256  err:
258  genpd_sd_counter_dec(link->master);
259 
260  return ret;
261 }
262 
267 int pm_genpd_poweron(struct generic_pm_domain *genpd)
268 {
269  int ret;
270 
271  mutex_lock(&genpd->lock);
272  ret = __pm_genpd_poweron(genpd);
273  mutex_unlock(&genpd->lock);
274  return ret;
275 }
276 
281 int pm_genpd_name_poweron(const char *domain_name)
282 {
283  struct generic_pm_domain *genpd;
284 
285  genpd = pm_genpd_lookup_name(domain_name);
286  return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
287 }
288 
289 #endif /* CONFIG_PM */
290 
291 #ifdef CONFIG_PM_RUNTIME
292 
293 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
294  struct device *dev)
295 {
296  return GENPD_DEV_CALLBACK(genpd, int, start, dev);
297 }
298 
299 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
300 {
301  return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
302  save_state_latency_ns, "state save");
303 }
304 
305 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
306 {
307  return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
308  restore_state_latency_ns,
309  "state restore");
310 }
311 
312 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
313  unsigned long val, void *ptr)
314 {
315  struct generic_pm_domain_data *gpd_data;
316  struct device *dev;
317 
318  gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
319 
320  mutex_lock(&gpd_data->lock);
321  dev = gpd_data->base.dev;
322  if (!dev) {
323  mutex_unlock(&gpd_data->lock);
324  return NOTIFY_DONE;
325  }
326  mutex_unlock(&gpd_data->lock);
327 
328  for (;;) {
329  struct generic_pm_domain *genpd;
330  struct pm_domain_data *pdd;
331 
332  spin_lock_irq(&dev->power.lock);
333 
334  pdd = dev->power.subsys_data ?
335  dev->power.subsys_data->domain_data : NULL;
336  if (pdd && pdd->dev) {
337  to_gpd_data(pdd)->td.constraint_changed = true;
338  genpd = dev_to_genpd(dev);
339  } else {
340  genpd = ERR_PTR(-ENODATA);
341  }
342 
343  spin_unlock_irq(&dev->power.lock);
344 
345  if (!IS_ERR(genpd)) {
346  mutex_lock(&genpd->lock);
347  genpd->max_off_time_changed = true;
348  mutex_unlock(&genpd->lock);
349  }
350 
351  dev = dev->parent;
352  if (!dev || dev->power.ignore_children)
353  break;
354  }
355 
356  return NOTIFY_DONE;
357 }
358 
364 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
365  struct generic_pm_domain *genpd)
366  __releases(&genpd->lock) __acquires(&genpd->lock)
367 {
368  struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
369  struct device *dev = pdd->dev;
370  int ret = 0;
371 
372  if (gpd_data->need_restore)
373  return 0;
374 
375  mutex_unlock(&genpd->lock);
376 
377  genpd_start_dev(genpd, dev);
378  ret = genpd_save_dev(genpd, dev);
379  genpd_stop_dev(genpd, dev);
380 
381  mutex_lock(&genpd->lock);
382 
383  if (!ret)
384  gpd_data->need_restore = true;
385 
386  return ret;
387 }
388 
394 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
395  struct generic_pm_domain *genpd)
396  __releases(&genpd->lock) __acquires(&genpd->lock)
397 {
398  struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
399  struct device *dev = pdd->dev;
400  bool need_restore = gpd_data->need_restore;
401 
402  gpd_data->need_restore = false;
403  mutex_unlock(&genpd->lock);
404 
405  genpd_start_dev(genpd, dev);
406  if (need_restore)
407  genpd_restore_dev(genpd, dev);
408 
409  mutex_lock(&genpd->lock);
410 }
411 
421 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
422 {
423  return genpd->status == GPD_STATE_WAIT_MASTER
424  || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
425 }
426 
434 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
435 {
436  if (!work_pending(&genpd->power_off_work))
437  queue_work(pm_wq, &genpd->power_off_work);
438 }
439 
448 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
449  __releases(&genpd->lock) __acquires(&genpd->lock)
450 {
451  struct pm_domain_data *pdd;
452  struct gpd_link *link;
453  unsigned int not_suspended;
454  int ret = 0;
455 
456  start:
457  /*
458  * Do not try to power off the domain in the following situations:
459  * (1) The domain is already in the "power off" state.
460  * (2) The domain is waiting for its master to power up.
461  * (3) One of the domain's devices is being resumed right now.
462  * (4) System suspend is in progress.
463  */
464  if (genpd->status == GPD_STATE_POWER_OFF
465  || genpd->status == GPD_STATE_WAIT_MASTER
466  || genpd->resume_count > 0 || genpd->prepared_count > 0)
467  return 0;
468 
469  if (atomic_read(&genpd->sd_count) > 0)
470  return -EBUSY;
471 
472  not_suspended = 0;
473  list_for_each_entry(pdd, &genpd->dev_list, list_node)
474  if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
475  || pdd->dev->power.irq_safe))
476  not_suspended++;
477 
478  if (not_suspended > genpd->in_progress)
479  return -EBUSY;
480 
481  if (genpd->poweroff_task) {
482  /*
483  * Another instance of pm_genpd_poweroff() is executing
484  * callbacks, so tell it to start over and return.
485  */
486  genpd->status = GPD_STATE_REPEAT;
487  return 0;
488  }
489 
490  if (genpd->gov && genpd->gov->power_down_ok) {
491  if (!genpd->gov->power_down_ok(&genpd->domain))
492  return -EAGAIN;
493  }
494 
495  genpd->status = GPD_STATE_BUSY;
496  genpd->poweroff_task = current;
497 
498  list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
499  ret = atomic_read(&genpd->sd_count) == 0 ?
500  __pm_genpd_save_device(pdd, genpd) : -EBUSY;
501 
502  if (genpd_abort_poweroff(genpd))
503  goto out;
504 
505  if (ret) {
506  genpd_set_active(genpd);
507  goto out;
508  }
509 
510  if (genpd->status == GPD_STATE_REPEAT) {
511  genpd->poweroff_task = NULL;
512  goto start;
513  }
514  }
515 
516  if (genpd->cpu_data) {
517  /*
518  * If cpu_data is set, cpuidle should turn the domain off when
519  * the CPU in it is idle. In that case we don't decrement the
520  * subdomain counts of the master domains, so that power is not
521  * removed from the current domain prematurely as a result of
522  * cutting off the masters' power.
523  */
524  genpd->status = GPD_STATE_POWER_OFF;
526  genpd->cpu_data->idle_state->disabled = false;
528  goto out;
529  }
530 
531  if (genpd->power_off) {
532  ktime_t time_start;
533  s64 elapsed_ns;
534 
535  if (atomic_read(&genpd->sd_count) > 0) {
536  ret = -EBUSY;
537  goto out;
538  }
539 
540  time_start = ktime_get();
541 
542  /*
543  * If sd_count > 0 at this point, one of the subdomains hasn't
544  * managed to call pm_genpd_poweron() for the master yet after
545  * incrementing it. In that case pm_genpd_poweron() will wait
546  * for us to drop the lock, so we can call .power_off() and let
547  * the pm_genpd_poweron() restore power for us (this shouldn't
548  * happen very often).
549  */
550  ret = genpd->power_off(genpd);
551  if (ret == -EBUSY) {
552  genpd_set_active(genpd);
553  goto out;
554  }
555 
556  elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
557  if (elapsed_ns > genpd->power_off_latency_ns) {
558  genpd->power_off_latency_ns = elapsed_ns;
559  genpd->max_off_time_changed = true;
560  if (genpd->name)
561  pr_warning("%s: Power-off latency exceeded, "
562  "new value %lld ns\n", genpd->name,
563  elapsed_ns);
564  }
565  }
566 
567  genpd->status = GPD_STATE_POWER_OFF;
568 
569  list_for_each_entry(link, &genpd->slave_links, slave_node) {
570  genpd_sd_counter_dec(link->master);
571  genpd_queue_power_off_work(link->master);
572  }
573 
574  out:
575  genpd->poweroff_task = NULL;
577  return ret;
578 }
579 
584 static void genpd_power_off_work_fn(struct work_struct *work)
585 {
586  struct generic_pm_domain *genpd;
587 
588  genpd = container_of(work, struct generic_pm_domain, power_off_work);
589 
590  genpd_acquire_lock(genpd);
591  pm_genpd_poweroff(genpd);
592  genpd_release_lock(genpd);
593 }
594 
603 static int pm_genpd_runtime_suspend(struct device *dev)
604 {
605  struct generic_pm_domain *genpd;
606  bool (*stop_ok)(struct device *__dev);
607  int ret;
608 
609  dev_dbg(dev, "%s()\n", __func__);
610 
611  genpd = dev_to_genpd(dev);
612  if (IS_ERR(genpd))
613  return -EINVAL;
614 
615  might_sleep_if(!genpd->dev_irq_safe);
616 
617  stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
618  if (stop_ok && !stop_ok(dev))
619  return -EBUSY;
620 
621  ret = genpd_stop_dev(genpd, dev);
622  if (ret)
623  return ret;
624 
625  /*
626  * If power.irq_safe is set, this routine will be run with interrupts
627  * off, so it can't use mutexes.
628  */
629  if (dev->power.irq_safe)
630  return 0;
631 
632  mutex_lock(&genpd->lock);
633  genpd->in_progress++;
634  pm_genpd_poweroff(genpd);
635  genpd->in_progress--;
636  mutex_unlock(&genpd->lock);
637 
638  return 0;
639 }
640 
649 static int pm_genpd_runtime_resume(struct device *dev)
650 {
651  struct generic_pm_domain *genpd;
652  DEFINE_WAIT(wait);
653  int ret;
654 
655  dev_dbg(dev, "%s()\n", __func__);
656 
657  genpd = dev_to_genpd(dev);
658  if (IS_ERR(genpd))
659  return -EINVAL;
660 
661  might_sleep_if(!genpd->dev_irq_safe);
662 
663  /* If power.irq_safe, the PM domain is never powered off. */
664  if (dev->power.irq_safe)
665  return genpd_start_dev_no_timing(genpd, dev);
666 
667  mutex_lock(&genpd->lock);
668  ret = __pm_genpd_poweron(genpd);
669  if (ret) {
670  mutex_unlock(&genpd->lock);
671  return ret;
672  }
673  genpd->status = GPD_STATE_BUSY;
674  genpd->resume_count++;
675  for (;;) {
678  /*
679  * If current is the powering off task, we have been called
680  * reentrantly from one of the device callbacks, so we should
681  * not wait.
682  */
683  if (!genpd->poweroff_task || genpd->poweroff_task == current)
684  break;
685  mutex_unlock(&genpd->lock);
686 
687  schedule();
688 
689  mutex_lock(&genpd->lock);
690  }
692  __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
693  genpd->resume_count--;
694  genpd_set_active(genpd);
696  mutex_unlock(&genpd->lock);
697 
698  return 0;
699 }
700 
704 void pm_genpd_poweroff_unused(void)
705 {
706  struct generic_pm_domain *genpd;
707 
708  mutex_lock(&gpd_list_lock);
709 
710  list_for_each_entry(genpd, &gpd_list, gpd_list_node)
711  genpd_queue_power_off_work(genpd);
712 
713  mutex_unlock(&gpd_list_lock);
714 }
715 
716 #else
717 
718 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
719  unsigned long val, void *ptr)
720 {
721  return NOTIFY_DONE;
722 }
723 
724 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
725 
726 #define pm_genpd_runtime_suspend NULL
727 #define pm_genpd_runtime_resume NULL
728 
729 #endif /* CONFIG_PM_RUNTIME */
730 
731 #ifdef CONFIG_PM_SLEEP
732 
737 static bool pm_genpd_present(struct generic_pm_domain *genpd)
738 {
739  struct generic_pm_domain *gpd;
740 
741  if (IS_ERR_OR_NULL(genpd))
742  return false;
743 
744  list_for_each_entry(gpd, &gpd_list, gpd_list_node)
745  if (gpd == genpd)
746  return true;
747 
748  return false;
749 }
750 
751 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
752  struct device *dev)
753 {
754  return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
755 }
756 
757 static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
758 {
759  return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
760 }
761 
762 static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
763 {
764  return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
765 }
766 
767 static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
768 {
769  return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
770 }
771 
772 static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
773 {
774  return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
775 }
776 
777 static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
778 {
779  return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
780 }
781 
782 static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
783 {
784  return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
785 }
786 
787 static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
788 {
789  return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
790 }
791 
792 static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
793 {
794  return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
795 }
796 
809 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
810 {
811  struct gpd_link *link;
812 
813  if (genpd->status == GPD_STATE_POWER_OFF)
814  return;
815 
816  if (genpd->suspended_count != genpd->device_count
817  || atomic_read(&genpd->sd_count) > 0)
818  return;
819 
820  if (genpd->power_off)
821  genpd->power_off(genpd);
822 
823  genpd->status = GPD_STATE_POWER_OFF;
824 
825  list_for_each_entry(link, &genpd->slave_links, slave_node) {
826  genpd_sd_counter_dec(link->master);
827  pm_genpd_sync_poweroff(link->master);
828  }
829 }
830 
840 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
841 {
842  struct gpd_link *link;
843 
844  if (genpd->status != GPD_STATE_POWER_OFF)
845  return;
846 
847  list_for_each_entry(link, &genpd->slave_links, slave_node) {
848  pm_genpd_sync_poweron(link->master);
849  genpd_sd_counter_inc(link->master);
850  }
851 
852  if (genpd->power_on)
853  genpd->power_on(genpd);
854 
855  genpd->status = GPD_STATE_ACTIVE;
856 }
857 
874 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
875 {
876  bool active_wakeup;
877 
878  if (!device_can_wakeup(dev))
879  return false;
880 
881  active_wakeup = genpd_dev_active_wakeup(genpd, dev);
882  return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
883 }
884 
894 static int pm_genpd_prepare(struct device *dev)
895 {
896  struct generic_pm_domain *genpd;
897  int ret;
898 
899  dev_dbg(dev, "%s()\n", __func__);
900 
901  genpd = dev_to_genpd(dev);
902  if (IS_ERR(genpd))
903  return -EINVAL;
904 
905  /*
906  * If a wakeup request is pending for the device, it should be woken up
907  * at this point and a system wakeup event should be reported if it's
908  * set up to wake up the system from sleep states.
909  */
910  pm_runtime_get_noresume(dev);
911  if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
912  pm_wakeup_event(dev, 0);
913 
914  if (pm_wakeup_pending()) {
915  pm_runtime_put_sync(dev);
916  return -EBUSY;
917  }
918 
919  if (resume_needed(dev, genpd))
920  pm_runtime_resume(dev);
921 
922  genpd_acquire_lock(genpd);
923 
924  if (genpd->prepared_count++ == 0) {
925  genpd->suspended_count = 0;
926  genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
927  }
928 
929  genpd_release_lock(genpd);
930 
931  if (genpd->suspend_power_off) {
932  pm_runtime_put_noidle(dev);
933  return 0;
934  }
935 
936  /*
937  * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
938  * so pm_genpd_poweron() will return immediately, but if the device
939  * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
940  * to make it operational.
941  */
942  pm_runtime_resume(dev);
943  __pm_runtime_disable(dev, false);
944 
945  ret = pm_generic_prepare(dev);
946  if (ret) {
947  mutex_lock(&genpd->lock);
948 
949  if (--genpd->prepared_count == 0)
950  genpd->suspend_power_off = false;
951 
952  mutex_unlock(&genpd->lock);
953  pm_runtime_enable(dev);
954  }
955 
956  pm_runtime_put_sync(dev);
957  return ret;
958 }
959 
968 static int pm_genpd_suspend(struct device *dev)
969 {
970  struct generic_pm_domain *genpd;
971 
972  dev_dbg(dev, "%s()\n", __func__);
973 
974  genpd = dev_to_genpd(dev);
975  if (IS_ERR(genpd))
976  return -EINVAL;
977 
978  return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
979 }
980 
989 static int pm_genpd_suspend_late(struct device *dev)
990 {
991  struct generic_pm_domain *genpd;
992 
993  dev_dbg(dev, "%s()\n", __func__);
994 
995  genpd = dev_to_genpd(dev);
996  if (IS_ERR(genpd))
997  return -EINVAL;
998 
999  return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
1000 }
1001 
1009 static int pm_genpd_suspend_noirq(struct device *dev)
1010 {
1011  struct generic_pm_domain *genpd;
1012 
1013  dev_dbg(dev, "%s()\n", __func__);
1014 
1015  genpd = dev_to_genpd(dev);
1016  if (IS_ERR(genpd))
1017  return -EINVAL;
1018 
1019  if (genpd->suspend_power_off
1020  || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1021  return 0;
1022 
1023  genpd_stop_dev(genpd, dev);
1024 
1025  /*
1026  * Since all of the "noirq" callbacks are executed sequentially, it is
1027  * guaranteed that this function will never run twice in parallel for
1028  * the same PM domain, so it is not necessary to use locking here.
1029  */
1030  genpd->suspended_count++;
1031  pm_genpd_sync_poweroff(genpd);
1032 
1033  return 0;
1034 }
1035 
1042 static int pm_genpd_resume_noirq(struct device *dev)
1043 {
1044  struct generic_pm_domain *genpd;
1045 
1046  dev_dbg(dev, "%s()\n", __func__);
1047 
1048  genpd = dev_to_genpd(dev);
1049  if (IS_ERR(genpd))
1050  return -EINVAL;
1051 
1052  if (genpd->suspend_power_off
1053  || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1054  return 0;
1055 
1056  /*
1057  * Since all of the "noirq" callbacks are executed sequentially, it is
1058  * guaranteed that this function will never run twice in parallel for
1059  * the same PM domain, so it is not necessary to use locking here.
1060  */
1061  pm_genpd_sync_poweron(genpd);
1062  genpd->suspended_count--;
1063 
1064  return genpd_start_dev(genpd, dev);
1065 }
1066 
1076 static int pm_genpd_resume_early(struct device *dev)
1077 {
1078  struct generic_pm_domain *genpd;
1079 
1080  dev_dbg(dev, "%s()\n", __func__);
1081 
1082  genpd = dev_to_genpd(dev);
1083  if (IS_ERR(genpd))
1084  return -EINVAL;
1085 
1086  return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
1087 }
1088 
1097 static int pm_genpd_resume(struct device *dev)
1098 {
1099  struct generic_pm_domain *genpd;
1100 
1101  dev_dbg(dev, "%s()\n", __func__);
1102 
1103  genpd = dev_to_genpd(dev);
1104  if (IS_ERR(genpd))
1105  return -EINVAL;
1106 
1107  return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
1108 }
1109 
1118 static int pm_genpd_freeze(struct device *dev)
1119 {
1120  struct generic_pm_domain *genpd;
1121 
1122  dev_dbg(dev, "%s()\n", __func__);
1123 
1124  genpd = dev_to_genpd(dev);
1125  if (IS_ERR(genpd))
1126  return -EINVAL;
1127 
1128  return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
1129 }
1130 
1140 static int pm_genpd_freeze_late(struct device *dev)
1141 {
1142  struct generic_pm_domain *genpd;
1143 
1144  dev_dbg(dev, "%s()\n", __func__);
1145 
1146  genpd = dev_to_genpd(dev);
1147  if (IS_ERR(genpd))
1148  return -EINVAL;
1149 
1150  return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
1151 }
1152 
1162 static int pm_genpd_freeze_noirq(struct device *dev)
1163 {
1164  struct generic_pm_domain *genpd;
1165 
1166  dev_dbg(dev, "%s()\n", __func__);
1167 
1168  genpd = dev_to_genpd(dev);
1169  if (IS_ERR(genpd))
1170  return -EINVAL;
1171 
1172  return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1173 }
1174 
1182 static int pm_genpd_thaw_noirq(struct device *dev)
1183 {
1184  struct generic_pm_domain *genpd;
1185 
1186  dev_dbg(dev, "%s()\n", __func__);
1187 
1188  genpd = dev_to_genpd(dev);
1189  if (IS_ERR(genpd))
1190  return -EINVAL;
1191 
1192  return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1193 }
1194 
1204 static int pm_genpd_thaw_early(struct device *dev)
1205 {
1206  struct generic_pm_domain *genpd;
1207 
1208  dev_dbg(dev, "%s()\n", __func__);
1209 
1210  genpd = dev_to_genpd(dev);
1211  if (IS_ERR(genpd))
1212  return -EINVAL;
1213 
1214  return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
1215 }
1216 
1225 static int pm_genpd_thaw(struct device *dev)
1226 {
1227  struct generic_pm_domain *genpd;
1228 
1229  dev_dbg(dev, "%s()\n", __func__);
1230 
1231  genpd = dev_to_genpd(dev);
1232  if (IS_ERR(genpd))
1233  return -EINVAL;
1234 
1235  return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
1236 }
1237 
1245 static int pm_genpd_restore_noirq(struct device *dev)
1246 {
1247  struct generic_pm_domain *genpd;
1248 
1249  dev_dbg(dev, "%s()\n", __func__);
1250 
1251  genpd = dev_to_genpd(dev);
1252  if (IS_ERR(genpd))
1253  return -EINVAL;
1254 
1255  /*
1256  * Since all of the "noirq" callbacks are executed sequentially, it is
1257  * guaranteed that this function will never run twice in parallel for
1258  * the same PM domain, so it is not necessary to use locking here.
1259  *
1260  * At this point suspended_count == 0 means we are being run for the
1261  * first time for the given domain in the present cycle.
1262  */
1263  if (genpd->suspended_count++ == 0) {
1264  /*
1265  * The boot kernel might put the domain into arbitrary state,
1266  * so make it appear as powered off to pm_genpd_sync_poweron(),
1267  * so that it tries to power it on in case it was really off.
1268  */
1269  genpd->status = GPD_STATE_POWER_OFF;
1270  if (genpd->suspend_power_off) {
1271  /*
1272  * If the domain was off before the hibernation, make
1273  * sure it will be off going forward.
1274  */
1275  if (genpd->power_off)
1276  genpd->power_off(genpd);
1277 
1278  return 0;
1279  }
1280  }
1281 
1282  if (genpd->suspend_power_off)
1283  return 0;
1284 
1285  pm_genpd_sync_poweron(genpd);
1286 
1287  return genpd_start_dev(genpd, dev);
1288 }
1289 
1299 static void pm_genpd_complete(struct device *dev)
1300 {
1301  struct generic_pm_domain *genpd;
1302  bool run_complete;
1303 
1304  dev_dbg(dev, "%s()\n", __func__);
1305 
1306  genpd = dev_to_genpd(dev);
1307  if (IS_ERR(genpd))
1308  return;
1309 
1310  mutex_lock(&genpd->lock);
1311 
1312  run_complete = !genpd->suspend_power_off;
1313  if (--genpd->prepared_count == 0)
1314  genpd->suspend_power_off = false;
1315 
1316  mutex_unlock(&genpd->lock);
1317 
1318  if (run_complete) {
1319  pm_generic_complete(dev);
1320  pm_runtime_set_active(dev);
1321  pm_runtime_enable(dev);
1322  pm_runtime_idle(dev);
1323  }
1324 }
1325 
1333 void pm_genpd_syscore_switch(struct device *dev, bool suspend)
1334 {
1335  struct generic_pm_domain *genpd;
1336 
1337  genpd = dev_to_genpd(dev);
1338  if (!pm_genpd_present(genpd))
1339  return;
1340 
1341  if (suspend) {
1342  genpd->suspended_count++;
1343  pm_genpd_sync_poweroff(genpd);
1344  } else {
1345  pm_genpd_sync_poweron(genpd);
1346  genpd->suspended_count--;
1347  }
1348 }
1349 EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
1350 
1351 #else
1352 
1353 #define pm_genpd_prepare NULL
1354 #define pm_genpd_suspend NULL
1355 #define pm_genpd_suspend_late NULL
1356 #define pm_genpd_suspend_noirq NULL
1357 #define pm_genpd_resume_early NULL
1358 #define pm_genpd_resume_noirq NULL
1359 #define pm_genpd_resume NULL
1360 #define pm_genpd_freeze NULL
1361 #define pm_genpd_freeze_late NULL
1362 #define pm_genpd_freeze_noirq NULL
1363 #define pm_genpd_thaw_early NULL
1364 #define pm_genpd_thaw_noirq NULL
1365 #define pm_genpd_thaw NULL
1366 #define pm_genpd_restore_noirq NULL
1367 #define pm_genpd_complete NULL
1368 
1369 #endif /* CONFIG_PM_SLEEP */
1370 
1371 static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1372 {
1373  struct generic_pm_domain_data *gpd_data;
1374 
1375  gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1376  if (!gpd_data)
1377  return NULL;
1378 
1379  mutex_init(&gpd_data->lock);
1380  gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1381  dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1382  return gpd_data;
1383 }
1384 
1385 static void __pm_genpd_free_dev_data(struct device *dev,
1386  struct generic_pm_domain_data *gpd_data)
1387 {
1388  dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1389  kfree(gpd_data);
1390 }
1391 
1398 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1399  struct gpd_timing_data *td)
1400 {
1401  struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1402  struct pm_domain_data *pdd;
1403  int ret = 0;
1404 
1405  dev_dbg(dev, "%s()\n", __func__);
1406 
1407  if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1408  return -EINVAL;
1409 
1410  gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1411  if (!gpd_data_new)
1412  return -ENOMEM;
1413 
1414  genpd_acquire_lock(genpd);
1415 
1416  if (genpd->prepared_count > 0) {
1417  ret = -EAGAIN;
1418  goto out;
1419  }
1420 
1421  list_for_each_entry(pdd, &genpd->dev_list, list_node)
1422  if (pdd->dev == dev) {
1423  ret = -EINVAL;
1424  goto out;
1425  }
1426 
1427  ret = dev_pm_get_subsys_data(dev);
1428  if (ret)
1429  goto out;
1430 
1431  genpd->device_count++;
1432  genpd->max_off_time_changed = true;
1433 
1434  spin_lock_irq(&dev->power.lock);
1435 
1436  dev->pm_domain = &genpd->domain;
1437  if (dev->power.subsys_data->domain_data) {
1438  gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1439  } else {
1440  gpd_data = gpd_data_new;
1441  dev->power.subsys_data->domain_data = &gpd_data->base;
1442  }
1443  gpd_data->refcount++;
1444  if (td)
1445  gpd_data->td = *td;
1446 
1447  spin_unlock_irq(&dev->power.lock);
1448 
1449  mutex_lock(&gpd_data->lock);
1450  gpd_data->base.dev = dev;
1451  list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1452  gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1453  gpd_data->td.constraint_changed = true;
1454  gpd_data->td.effective_constraint_ns = -1;
1455  mutex_unlock(&gpd_data->lock);
1456 
1457  out:
1458  genpd_release_lock(genpd);
1459 
1460  if (gpd_data != gpd_data_new)
1461  __pm_genpd_free_dev_data(dev, gpd_data_new);
1462 
1463  return ret;
1464 }
1465 
1473 int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1474  struct gpd_timing_data *td)
1475 {
1476  struct generic_pm_domain *genpd = NULL, *gpd;
1477 
1478  dev_dbg(dev, "%s()\n", __func__);
1479 
1480  if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1481  return -EINVAL;
1482 
1483  mutex_lock(&gpd_list_lock);
1484  list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1485  if (gpd->of_node == genpd_node) {
1486  genpd = gpd;
1487  break;
1488  }
1489  }
1490  mutex_unlock(&gpd_list_lock);
1491 
1492  if (!genpd)
1493  return -EINVAL;
1494 
1495  return __pm_genpd_add_device(genpd, dev, td);
1496 }
1497 
1498 
1505 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1506  struct gpd_timing_data *td)
1507 {
1508  return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1509 }
1510 
1517  struct device *dev)
1518 {
1519  struct generic_pm_domain_data *gpd_data;
1520  struct pm_domain_data *pdd;
1521  bool remove = false;
1522  int ret = 0;
1523 
1524  dev_dbg(dev, "%s()\n", __func__);
1525 
1526  if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1527  || IS_ERR_OR_NULL(dev->pm_domain)
1528  || pd_to_genpd(dev->pm_domain) != genpd)
1529  return -EINVAL;
1530 
1531  genpd_acquire_lock(genpd);
1532 
1533  if (genpd->prepared_count > 0) {
1534  ret = -EAGAIN;
1535  goto out;
1536  }
1537 
1538  genpd->device_count--;
1539  genpd->max_off_time_changed = true;
1540 
1541  spin_lock_irq(&dev->power.lock);
1542 
1543  dev->pm_domain = NULL;
1544  pdd = dev->power.subsys_data->domain_data;
1545  list_del_init(&pdd->list_node);
1546  gpd_data = to_gpd_data(pdd);
1547  if (--gpd_data->refcount == 0) {
1548  dev->power.subsys_data->domain_data = NULL;
1549  remove = true;
1550  }
1551 
1552  spin_unlock_irq(&dev->power.lock);
1553 
1554  mutex_lock(&gpd_data->lock);
1555  pdd->dev = NULL;
1556  mutex_unlock(&gpd_data->lock);
1557 
1558  genpd_release_lock(genpd);
1559 
1561  if (remove)
1562  __pm_genpd_free_dev_data(dev, gpd_data);
1563 
1564  return 0;
1565 
1566  out:
1567  genpd_release_lock(genpd);
1568 
1569  return ret;
1570 }
1571 
1577 void pm_genpd_dev_need_restore(struct device *dev, bool val)
1578 {
1579  struct pm_subsys_data *psd;
1580  unsigned long flags;
1581 
1582  spin_lock_irqsave(&dev->power.lock, flags);
1583 
1584  psd = dev_to_psd(dev);
1585  if (psd && psd->domain_data)
1586  to_gpd_data(psd->domain_data)->need_restore = val;
1587 
1588  spin_unlock_irqrestore(&dev->power.lock, flags);
1589 }
1591 
1598  struct generic_pm_domain *subdomain)
1599 {
1600  struct gpd_link *link;
1601  int ret = 0;
1602 
1603  if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1604  || genpd == subdomain)
1605  return -EINVAL;
1606 
1607  start:
1608  genpd_acquire_lock(genpd);
1610 
1611  if (subdomain->status != GPD_STATE_POWER_OFF
1612  && subdomain->status != GPD_STATE_ACTIVE) {
1613  mutex_unlock(&subdomain->lock);
1614  genpd_release_lock(genpd);
1615  goto start;
1616  }
1617 
1618  if (genpd->status == GPD_STATE_POWER_OFF
1619  && subdomain->status != GPD_STATE_POWER_OFF) {
1620  ret = -EINVAL;
1621  goto out;
1622  }
1623 
1624  list_for_each_entry(link, &genpd->master_links, master_node) {
1625  if (link->slave == subdomain && link->master == genpd) {
1626  ret = -EINVAL;
1627  goto out;
1628  }
1629  }
1630 
1631  link = kzalloc(sizeof(*link), GFP_KERNEL);
1632  if (!link) {
1633  ret = -ENOMEM;
1634  goto out;
1635  }
1636  link->master = genpd;
1637  list_add_tail(&link->master_node, &genpd->master_links);
1638  link->slave = subdomain;
1639  list_add_tail(&link->slave_node, &subdomain->slave_links);
1640  if (subdomain->status != GPD_STATE_POWER_OFF)
1641  genpd_sd_counter_inc(genpd);
1642 
1643  out:
1644  mutex_unlock(&subdomain->lock);
1645  genpd_release_lock(genpd);
1646 
1647  return ret;
1648 }
1649 
1655 int pm_genpd_add_subdomain_names(const char *master_name,
1656  const char *subdomain_name)
1657 {
1658  struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1659 
1660  if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1661  return -EINVAL;
1662 
1663  mutex_lock(&gpd_list_lock);
1664  list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1665  if (!master && !strcmp(gpd->name, master_name))
1666  master = gpd;
1667 
1668  if (!subdomain && !strcmp(gpd->name, subdomain_name))
1669  subdomain = gpd;
1670 
1671  if (master && subdomain)
1672  break;
1673  }
1674  mutex_unlock(&gpd_list_lock);
1675 
1676  return pm_genpd_add_subdomain(master, subdomain);
1677 }
1678 
1685  struct generic_pm_domain *subdomain)
1686 {
1687  struct gpd_link *link;
1688  int ret = -EINVAL;
1689 
1690  if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1691  return -EINVAL;
1692 
1693  start:
1694  genpd_acquire_lock(genpd);
1695 
1696  list_for_each_entry(link, &genpd->master_links, master_node) {
1697  if (link->slave != subdomain)
1698  continue;
1699 
1701 
1702  if (subdomain->status != GPD_STATE_POWER_OFF
1703  && subdomain->status != GPD_STATE_ACTIVE) {
1704  mutex_unlock(&subdomain->lock);
1705  genpd_release_lock(genpd);
1706  goto start;
1707  }
1708 
1709  list_del(&link->master_node);
1710  list_del(&link->slave_node);
1711  kfree(link);
1712  if (subdomain->status != GPD_STATE_POWER_OFF)
1713  genpd_sd_counter_dec(genpd);
1714 
1715  mutex_unlock(&subdomain->lock);
1716 
1717  ret = 0;
1718  break;
1719  }
1720 
1721  genpd_release_lock(genpd);
1722 
1723  return ret;
1724 }
1725 
1735 int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1736  struct gpd_timing_data *td)
1737 {
1738  struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1739  int ret = 0;
1740 
1741  if (!(dev && ops))
1742  return -EINVAL;
1743 
1744  gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1745  if (!gpd_data_new)
1746  return -ENOMEM;
1747 
1748  pm_runtime_disable(dev);
1749  device_pm_lock();
1750 
1751  ret = dev_pm_get_subsys_data(dev);
1752  if (ret)
1753  goto out;
1754 
1755  spin_lock_irq(&dev->power.lock);
1756 
1757  if (dev->power.subsys_data->domain_data) {
1758  gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1759  } else {
1760  gpd_data = gpd_data_new;
1761  dev->power.subsys_data->domain_data = &gpd_data->base;
1762  }
1763  gpd_data->refcount++;
1764  gpd_data->ops = *ops;
1765  if (td)
1766  gpd_data->td = *td;
1767 
1768  spin_unlock_irq(&dev->power.lock);
1769 
1770  out:
1771  device_pm_unlock();
1772  pm_runtime_enable(dev);
1773 
1774  if (gpd_data != gpd_data_new)
1775  __pm_genpd_free_dev_data(dev, gpd_data_new);
1776 
1777  return ret;
1778 }
1780 
1788 int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1789 {
1790  struct generic_pm_domain_data *gpd_data = NULL;
1791  bool remove = false;
1792  int ret = 0;
1793 
1794  if (!(dev && dev->power.subsys_data))
1795  return -EINVAL;
1796 
1797  pm_runtime_disable(dev);
1798  device_pm_lock();
1799 
1800  spin_lock_irq(&dev->power.lock);
1801 
1802  if (dev->power.subsys_data->domain_data) {
1803  gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1804  gpd_data->ops = (struct gpd_dev_ops){ NULL };
1805  if (clear_td)
1806  gpd_data->td = (struct gpd_timing_data){ 0 };
1807 
1808  if (--gpd_data->refcount == 0) {
1809  dev->power.subsys_data->domain_data = NULL;
1810  remove = true;
1811  }
1812  } else {
1813  ret = -EINVAL;
1814  }
1815 
1816  spin_unlock_irq(&dev->power.lock);
1817 
1818  device_pm_unlock();
1819  pm_runtime_enable(dev);
1820 
1821  if (ret)
1822  return ret;
1823 
1825  if (remove)
1826  __pm_genpd_free_dev_data(dev, gpd_data);
1827 
1828  return 0;
1829 }
1831 
1842 {
1843  struct cpuidle_driver *cpuidle_drv;
1844  struct gpd_cpu_data *cpu_data;
1845  struct cpuidle_state *idle_state;
1846  int ret = 0;
1847 
1848  if (IS_ERR_OR_NULL(genpd) || state < 0)
1849  return -EINVAL;
1850 
1851  genpd_acquire_lock(genpd);
1852 
1853  if (genpd->cpu_data) {
1854  ret = -EEXIST;
1855  goto out;
1856  }
1857  cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
1858  if (!cpu_data) {
1859  ret = -ENOMEM;
1860  goto out;
1861  }
1862  cpuidle_drv = cpuidle_driver_ref();
1863  if (!cpuidle_drv) {
1864  ret = -ENODEV;
1865  goto err_drv;
1866  }
1867  if (cpuidle_drv->state_count <= state) {
1868  ret = -EINVAL;
1869  goto err;
1870  }
1871  idle_state = &cpuidle_drv->states[state];
1872  if (!idle_state->disabled) {
1873  ret = -EAGAIN;
1874  goto err;
1875  }
1876  cpu_data->idle_state = idle_state;
1877  cpu_data->saved_exit_latency = idle_state->exit_latency;
1878  genpd->cpu_data = cpu_data;
1879  genpd_recalc_cpu_exit_latency(genpd);
1880 
1881  out:
1882  genpd_release_lock(genpd);
1883  return ret;
1884 
1885  err:
1887 
1888  err_drv:
1889  kfree(cpu_data);
1890  goto out;
1891 }
1892 
1899 {
1900  return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1901 }
1902 
1911 {
1912  struct gpd_cpu_data *cpu_data;
1913  struct cpuidle_state *idle_state;
1914  int ret = 0;
1915 
1916  if (IS_ERR_OR_NULL(genpd))
1917  return -EINVAL;
1918 
1919  genpd_acquire_lock(genpd);
1920 
1921  cpu_data = genpd->cpu_data;
1922  if (!cpu_data) {
1923  ret = -ENODEV;
1924  goto out;
1925  }
1926  idle_state = cpu_data->idle_state;
1927  if (!idle_state->disabled) {
1928  ret = -EAGAIN;
1929  goto out;
1930  }
1931  idle_state->exit_latency = cpu_data->saved_exit_latency;
1933  genpd->cpu_data = NULL;
1934  kfree(cpu_data);
1935 
1936  out:
1937  genpd_release_lock(genpd);
1938  return ret;
1939 }
1940 
1946 {
1947  return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1948 }
1949 
1950 /* Default device callbacks for generic PM domains. */
1951 
1956 static int pm_genpd_default_save_state(struct device *dev)
1957 {
1958  int (*cb)(struct device *__dev);
1959 
1960  cb = dev_gpd_data(dev)->ops.save_state;
1961  if (cb)
1962  return cb(dev);
1963 
1964  if (dev->type && dev->type->pm)
1965  cb = dev->type->pm->runtime_suspend;
1966  else if (dev->class && dev->class->pm)
1967  cb = dev->class->pm->runtime_suspend;
1968  else if (dev->bus && dev->bus->pm)
1969  cb = dev->bus->pm->runtime_suspend;
1970  else
1971  cb = NULL;
1972 
1973  if (!cb && dev->driver && dev->driver->pm)
1974  cb = dev->driver->pm->runtime_suspend;
1975 
1976  return cb ? cb(dev) : 0;
1977 }
1978 
1983 static int pm_genpd_default_restore_state(struct device *dev)
1984 {
1985  int (*cb)(struct device *__dev);
1986 
1987  cb = dev_gpd_data(dev)->ops.restore_state;
1988  if (cb)
1989  return cb(dev);
1990 
1991  if (dev->type && dev->type->pm)
1992  cb = dev->type->pm->runtime_resume;
1993  else if (dev->class && dev->class->pm)
1994  cb = dev->class->pm->runtime_resume;
1995  else if (dev->bus && dev->bus->pm)
1996  cb = dev->bus->pm->runtime_resume;
1997  else
1998  cb = NULL;
1999 
2000  if (!cb && dev->driver && dev->driver->pm)
2001  cb = dev->driver->pm->runtime_resume;
2002 
2003  return cb ? cb(dev) : 0;
2004 }
2005 
2006 #ifdef CONFIG_PM_SLEEP
2007 
2012 static int pm_genpd_default_suspend(struct device *dev)
2013 {
2014  int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
2015 
2016  return cb ? cb(dev) : pm_generic_suspend(dev);
2017 }
2018 
2023 static int pm_genpd_default_suspend_late(struct device *dev)
2024 {
2025  int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
2026 
2027  return cb ? cb(dev) : pm_generic_suspend_late(dev);
2028 }
2029 
2034 static int pm_genpd_default_resume_early(struct device *dev)
2035 {
2036  int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
2037 
2038  return cb ? cb(dev) : pm_generic_resume_early(dev);
2039 }
2040 
2045 static int pm_genpd_default_resume(struct device *dev)
2046 {
2047  int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
2048 
2049  return cb ? cb(dev) : pm_generic_resume(dev);
2050 }
2051 
2056 static int pm_genpd_default_freeze(struct device *dev)
2057 {
2058  int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
2059 
2060  return cb ? cb(dev) : pm_generic_freeze(dev);
2061 }
2062 
2067 static int pm_genpd_default_freeze_late(struct device *dev)
2068 {
2069  int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
2070 
2071  return cb ? cb(dev) : pm_generic_freeze_late(dev);
2072 }
2073 
2078 static int pm_genpd_default_thaw_early(struct device *dev)
2079 {
2080  int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
2081 
2082  return cb ? cb(dev) : pm_generic_thaw_early(dev);
2083 }
2084 
2089 static int pm_genpd_default_thaw(struct device *dev)
2090 {
2091  int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
2092 
2093  return cb ? cb(dev) : pm_generic_thaw(dev);
2094 }
2095 
2096 #else /* !CONFIG_PM_SLEEP */
2097 
2098 #define pm_genpd_default_suspend NULL
2099 #define pm_genpd_default_suspend_late NULL
2100 #define pm_genpd_default_resume_early NULL
2101 #define pm_genpd_default_resume NULL
2102 #define pm_genpd_default_freeze NULL
2103 #define pm_genpd_default_freeze_late NULL
2104 #define pm_genpd_default_thaw_early NULL
2105 #define pm_genpd_default_thaw NULL
2106 
2107 #endif /* !CONFIG_PM_SLEEP */
2108 
2116  struct dev_power_governor *gov, bool is_off)
2117 {
2118  if (IS_ERR_OR_NULL(genpd))
2119  return;
2120 
2121  INIT_LIST_HEAD(&genpd->master_links);
2122  INIT_LIST_HEAD(&genpd->slave_links);
2123  INIT_LIST_HEAD(&genpd->dev_list);
2124  mutex_init(&genpd->lock);
2125  genpd->gov = gov;
2126  INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2127  genpd->in_progress = 0;
2128  atomic_set(&genpd->sd_count, 0);
2129  genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
2131  genpd->poweroff_task = NULL;
2132  genpd->resume_count = 0;
2133  genpd->device_count = 0;
2134  genpd->max_off_time_ns = -1;
2135  genpd->max_off_time_changed = true;
2136  genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
2137  genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
2138  genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
2139  genpd->domain.ops.prepare = pm_genpd_prepare;
2140  genpd->domain.ops.suspend = pm_genpd_suspend;
2141  genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
2142  genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
2143  genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
2144  genpd->domain.ops.resume_early = pm_genpd_resume_early;
2145  genpd->domain.ops.resume = pm_genpd_resume;
2146  genpd->domain.ops.freeze = pm_genpd_freeze;
2147  genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
2148  genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
2149  genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
2150  genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
2151  genpd->domain.ops.thaw = pm_genpd_thaw;
2152  genpd->domain.ops.poweroff = pm_genpd_suspend;
2153  genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
2154  genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
2155  genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
2156  genpd->domain.ops.restore_early = pm_genpd_resume_early;
2157  genpd->domain.ops.restore = pm_genpd_resume;
2158  genpd->domain.ops.complete = pm_genpd_complete;
2159  genpd->dev_ops.save_state = pm_genpd_default_save_state;
2160  genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
2161  genpd->dev_ops.suspend = pm_genpd_default_suspend;
2162  genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
2163  genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
2164  genpd->dev_ops.resume = pm_genpd_default_resume;
2165  genpd->dev_ops.freeze = pm_genpd_default_freeze;
2166  genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
2167  genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
2168  genpd->dev_ops.thaw = pm_genpd_default_thaw;
2169  mutex_lock(&gpd_list_lock);
2170  list_add(&genpd->gpd_list_node, &gpd_list);
2171  mutex_unlock(&gpd_list_lock);
2172 }