Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
wakeup.c
Go to the documentation of this file.
1 /*
2  * drivers/base/power/wakeup.c - System wakeup events framework
3  *
4  * Copyright (c) 2010 Rafael J. Wysocki <[email protected]>, Novell Inc.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/capability.h>
13 #include <linux/export.h>
14 #include <linux/suspend.h>
15 #include <linux/seq_file.h>
16 #include <linux/debugfs.h>
17 #include <trace/events/power.h>
18 
19 #include "power.h"
20 
21 /*
22  * If set, the suspend/hibernate code will abort transitions to a sleep state
23  * if wakeup events are registered during or immediately before the transition.
24  */
25 bool events_check_enabled __read_mostly;
26 
27 /*
28  * Combined counters of registered wakeup events and wakeup events in progress.
29  * They need to be modified together atomically, so it's better to use one
30  * atomic variable to hold them both.
31  */
32 static atomic_t combined_event_count = ATOMIC_INIT(0);
33 
34 #define IN_PROGRESS_BITS (sizeof(int) * 4)
35 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
36 
37 static void split_counters(unsigned int *cnt, unsigned int *inpr)
38 {
39  unsigned int comb = atomic_read(&combined_event_count);
40 
41  *cnt = (comb >> IN_PROGRESS_BITS);
42  *inpr = comb & MAX_IN_PROGRESS;
43 }
44 
45 /* A preserved old value of the events counter. */
46 static unsigned int saved_count;
47 
48 static DEFINE_SPINLOCK(events_lock);
49 
50 static void pm_wakeup_timer_fn(unsigned long data);
51 
52 static LIST_HEAD(wakeup_sources);
53 
54 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
55 
64 void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
65 {
66  if (ws) {
67  memset(ws, 0, sizeof(*ws));
68  ws->name = name;
69  }
70 }
72 
78 {
79  struct wakeup_source *ws;
80 
81  ws = kmalloc(sizeof(*ws), GFP_KERNEL);
82  if (!ws)
83  return NULL;
84 
85  wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
86  return ws;
87 }
89 
98 {
99  if (!ws)
100  return;
101 
102  del_timer_sync(&ws->timer);
103  __pm_relax(ws);
104 }
106 
114 {
115  if (!ws)
116  return;
117 
118  wakeup_source_drop(ws);
119  kfree(ws->name);
120  kfree(ws);
121 }
123 
129 {
130  unsigned long flags;
131 
132  if (WARN_ON(!ws))
133  return;
134 
135  spin_lock_init(&ws->lock);
136  setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
137  ws->active = false;
138  ws->last_time = ktime_get();
139 
140  spin_lock_irqsave(&events_lock, flags);
141  list_add_rcu(&ws->entry, &wakeup_sources);
142  spin_unlock_irqrestore(&events_lock, flags);
143 }
145 
151 {
152  unsigned long flags;
153 
154  if (WARN_ON(!ws))
155  return;
156 
157  spin_lock_irqsave(&events_lock, flags);
158  list_del_rcu(&ws->entry);
159  spin_unlock_irqrestore(&events_lock, flags);
160  synchronize_rcu();
161 }
163 
169 {
170  struct wakeup_source *ws;
171 
172  ws = wakeup_source_create(name);
173  if (ws)
174  wakeup_source_add(ws);
175 
176  return ws;
177 }
179 
185 {
186  if (ws) {
189  }
190 }
192 
200 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
201 {
202  spin_lock_irq(&dev->power.lock);
203  if (dev->power.wakeup) {
204  spin_unlock_irq(&dev->power.lock);
205  return -EEXIST;
206  }
207  dev->power.wakeup = ws;
208  spin_unlock_irq(&dev->power.lock);
209  return 0;
210 }
211 
218 int device_wakeup_enable(struct device *dev)
219 {
220  struct wakeup_source *ws;
221  int ret;
222 
223  if (!dev || !dev->power.can_wakeup)
224  return -EINVAL;
225 
226  ws = wakeup_source_register(dev_name(dev));
227  if (!ws)
228  return -ENOMEM;
229 
230  ret = device_wakeup_attach(dev, ws);
231  if (ret)
233 
234  return ret;
235 }
237 
244 static struct wakeup_source *device_wakeup_detach(struct device *dev)
245 {
246  struct wakeup_source *ws;
247 
248  spin_lock_irq(&dev->power.lock);
249  ws = dev->power.wakeup;
250  dev->power.wakeup = NULL;
251  spin_unlock_irq(&dev->power.lock);
252  return ws;
253 }
254 
263 {
264  struct wakeup_source *ws;
265 
266  if (!dev || !dev->power.can_wakeup)
267  return -EINVAL;
268 
269  ws = device_wakeup_detach(dev);
270  if (ws)
272 
273  return 0;
274 }
276 
289 void device_set_wakeup_capable(struct device *dev, bool capable)
290 {
291  if (!!dev->power.can_wakeup == !!capable)
292  return;
293 
294  if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
295  if (capable) {
296  if (wakeup_sysfs_add(dev))
297  return;
298  } else {
299  wakeup_sysfs_remove(dev);
300  }
301  }
302  dev->power.can_wakeup = capable;
303 }
305 
317 int device_init_wakeup(struct device *dev, bool enable)
318 {
319  int ret = 0;
320 
321  if (enable) {
322  device_set_wakeup_capable(dev, true);
323  ret = device_wakeup_enable(dev);
324  } else {
325  device_set_wakeup_capable(dev, false);
326  }
327 
328  return ret;
329 }
331 
336 int device_set_wakeup_enable(struct device *dev, bool enable)
337 {
338  if (!dev || !dev->power.can_wakeup)
339  return -EINVAL;
340 
341  return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
342 }
344 
345 /*
346  * The functions below use the observation that each wakeup event starts a
347  * period in which the system should not be suspended. The moment this period
348  * will end depends on how the wakeup event is going to be processed after being
349  * detected and all of the possible cases can be divided into two distinct
350  * groups.
351  *
352  * First, a wakeup event may be detected by the same functional unit that will
353  * carry out the entire processing of it and possibly will pass it to user space
354  * for further processing. In that case the functional unit that has detected
355  * the event may later "close" the "no suspend" period associated with it
356  * directly as soon as it has been dealt with. The pair of pm_stay_awake() and
357  * pm_relax(), balanced with each other, is supposed to be used in such
358  * situations.
359  *
360  * Second, a wakeup event may be detected by one functional unit and processed
361  * by another one. In that case the unit that has detected it cannot really
362  * "close" the "no suspend" period associated with it, unless it knows in
363  * advance what's going to happen to the event during processing. This
364  * knowledge, however, may not be available to it, so it can simply specify time
365  * to wait before the system can be suspended and pass it as the second
366  * argument of pm_wakeup_event().
367  *
368  * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
369  * "no suspend" period will be ended either by the pm_relax(), or by the timer
370  * function executed when the timer expires, whichever comes first.
371  */
372 
381 static void wakeup_source_activate(struct wakeup_source *ws)
382 {
383  unsigned int cec;
384 
385  ws->active = true;
386  ws->active_count++;
387  ws->last_time = ktime_get();
388  if (ws->autosleep_enabled)
389  ws->start_prevent_time = ws->last_time;
390 
391  /* Increment the counter of events in progress. */
392  cec = atomic_inc_return(&combined_event_count);
393 
394  trace_wakeup_source_activate(ws->name, cec);
395 }
396 
401 static void wakeup_source_report_event(struct wakeup_source *ws)
402 {
403  ws->event_count++;
404  /* This is racy, but the counter is approximate anyway. */
405  if (events_check_enabled)
406  ws->wakeup_count++;
407 
408  if (!ws->active)
409  wakeup_source_activate(ws);
410 }
411 
419 {
420  unsigned long flags;
421 
422  if (!ws)
423  return;
424 
425  spin_lock_irqsave(&ws->lock, flags);
426 
427  wakeup_source_report_event(ws);
428  del_timer(&ws->timer);
429  ws->timer_expires = 0;
430 
431  spin_unlock_irqrestore(&ws->lock, flags);
432 }
434 
446 void pm_stay_awake(struct device *dev)
447 {
448  unsigned long flags;
449 
450  if (!dev)
451  return;
452 
453  spin_lock_irqsave(&dev->power.lock, flags);
454  __pm_stay_awake(dev->power.wakeup);
455  spin_unlock_irqrestore(&dev->power.lock, flags);
456 }
458 
459 #ifdef CONFIG_PM_AUTOSLEEP
460 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
461 {
462  ktime_t delta = ktime_sub(now, ws->start_prevent_time);
463  ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
464 }
465 #else
466 static inline void update_prevent_sleep_time(struct wakeup_source *ws,
467  ktime_t now) {}
468 #endif
469 
478 static void wakeup_source_deactivate(struct wakeup_source *ws)
479 {
480  unsigned int cnt, inpr, cec;
482  ktime_t now;
483 
484  ws->relax_count++;
485  /*
486  * __pm_relax() may be called directly or from a timer function.
487  * If it is called directly right after the timer function has been
488  * started, but before the timer function calls __pm_relax(), it is
489  * possible that __pm_stay_awake() will be called in the meantime and
490  * will set ws->active. Then, ws->active may be cleared immediately
491  * by the __pm_relax() called from the timer function, but in such a
492  * case ws->relax_count will be different from ws->active_count.
493  */
494  if (ws->relax_count != ws->active_count) {
495  ws->relax_count--;
496  return;
497  }
498 
499  ws->active = false;
500 
501  now = ktime_get();
502  duration = ktime_sub(now, ws->last_time);
503  ws->total_time = ktime_add(ws->total_time, duration);
504  if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
505  ws->max_time = duration;
506 
507  ws->last_time = now;
508  del_timer(&ws->timer);
509  ws->timer_expires = 0;
510 
511  if (ws->autosleep_enabled)
512  update_prevent_sleep_time(ws, now);
513 
514  /*
515  * Increment the counter of registered wakeup events and decrement the
516  * couter of wakeup events in progress simultaneously.
517  */
518  cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
519  trace_wakeup_source_deactivate(ws->name, cec);
520 
521  split_counters(&cnt, &inpr);
522  if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
523  wake_up(&wakeup_count_wait_queue);
524 }
525 
535 void __pm_relax(struct wakeup_source *ws)
536 {
537  unsigned long flags;
538 
539  if (!ws)
540  return;
541 
542  spin_lock_irqsave(&ws->lock, flags);
543  if (ws->active)
544  wakeup_source_deactivate(ws);
545  spin_unlock_irqrestore(&ws->lock, flags);
546 }
548 
555 void pm_relax(struct device *dev)
556 {
557  unsigned long flags;
558 
559  if (!dev)
560  return;
561 
562  spin_lock_irqsave(&dev->power.lock, flags);
563  __pm_relax(dev->power.wakeup);
564  spin_unlock_irqrestore(&dev->power.lock, flags);
565 }
567 
576 static void pm_wakeup_timer_fn(unsigned long data)
577 {
578  struct wakeup_source *ws = (struct wakeup_source *)data;
579  unsigned long flags;
580 
581  spin_lock_irqsave(&ws->lock, flags);
582 
583  if (ws->active && ws->timer_expires
584  && time_after_eq(jiffies, ws->timer_expires)) {
585  wakeup_source_deactivate(ws);
586  ws->expire_count++;
587  }
588 
589  spin_unlock_irqrestore(&ws->lock, flags);
590 }
591 
604 void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
605 {
606  unsigned long flags;
607  unsigned long expires;
608 
609  if (!ws)
610  return;
611 
612  spin_lock_irqsave(&ws->lock, flags);
613 
614  wakeup_source_report_event(ws);
615 
616  if (!msec) {
617  wakeup_source_deactivate(ws);
618  goto unlock;
619  }
620 
621  expires = jiffies + msecs_to_jiffies(msec);
622  if (!expires)
623  expires = 1;
624 
625  if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
626  mod_timer(&ws->timer, expires);
627  ws->timer_expires = expires;
628  }
629 
630  unlock:
631  spin_unlock_irqrestore(&ws->lock, flags);
632 }
634 
635 
643 void pm_wakeup_event(struct device *dev, unsigned int msec)
644 {
645  unsigned long flags;
646 
647  if (!dev)
648  return;
649 
650  spin_lock_irqsave(&dev->power.lock, flags);
651  __pm_wakeup_event(dev->power.wakeup, msec);
652  spin_unlock_irqrestore(&dev->power.lock, flags);
653 }
655 
656 static void print_active_wakeup_sources(void)
657 {
658  struct wakeup_source *ws;
659  int active = 0;
660  struct wakeup_source *last_activity_ws = NULL;
661 
662  rcu_read_lock();
663  list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
664  if (ws->active) {
665  pr_info("active wakeup source: %s\n", ws->name);
666  active = 1;
667  } else if (!active &&
668  (!last_activity_ws ||
669  ktime_to_ns(ws->last_time) >
670  ktime_to_ns(last_activity_ws->last_time))) {
671  last_activity_ws = ws;
672  }
673  }
674 
675  if (!active && last_activity_ws)
676  pr_info("last active wakeup source: %s\n",
677  last_activity_ws->name);
678  rcu_read_unlock();
679 }
680 
690 {
691  unsigned long flags;
692  bool ret = false;
693 
694  spin_lock_irqsave(&events_lock, flags);
695  if (events_check_enabled) {
696  unsigned int cnt, inpr;
697 
698  split_counters(&cnt, &inpr);
699  ret = (cnt != saved_count || inpr > 0);
700  events_check_enabled = !ret;
701  }
702  spin_unlock_irqrestore(&events_lock, flags);
703 
704  if (ret)
705  print_active_wakeup_sources();
706 
707  return ret;
708 }
709 
722 bool pm_get_wakeup_count(unsigned int *count, bool block)
723 {
724  unsigned int cnt, inpr;
725 
726  if (block) {
727  DEFINE_WAIT(wait);
728 
729  for (;;) {
730  prepare_to_wait(&wakeup_count_wait_queue, &wait,
732  split_counters(&cnt, &inpr);
733  if (inpr == 0 || signal_pending(current))
734  break;
735 
736  schedule();
737  }
738  finish_wait(&wakeup_count_wait_queue, &wait);
739  }
740 
741  split_counters(&cnt, &inpr);
742  *count = cnt;
743  return !inpr;
744 }
745 
756 bool pm_save_wakeup_count(unsigned int count)
757 {
758  unsigned int cnt, inpr;
759  unsigned long flags;
760 
761  events_check_enabled = false;
762  spin_lock_irqsave(&events_lock, flags);
763  split_counters(&cnt, &inpr);
764  if (cnt == count && inpr == 0) {
765  saved_count = count;
766  events_check_enabled = true;
767  }
768  spin_unlock_irqrestore(&events_lock, flags);
769  return events_check_enabled;
770 }
771 
772 #ifdef CONFIG_PM_AUTOSLEEP
773 
777 void pm_wakep_autosleep_enabled(bool set)
778 {
779  struct wakeup_source *ws;
780  ktime_t now = ktime_get();
781 
782  rcu_read_lock();
783  list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
784  spin_lock_irq(&ws->lock);
785  if (ws->autosleep_enabled != set) {
786  ws->autosleep_enabled = set;
787  if (ws->active) {
788  if (set)
789  ws->start_prevent_time = now;
790  else
791  update_prevent_sleep_time(ws, now);
792  }
793  }
794  spin_unlock_irq(&ws->lock);
795  }
796  rcu_read_unlock();
797 }
798 #endif /* CONFIG_PM_AUTOSLEEP */
799 
800 static struct dentry *wakeup_sources_stats_dentry;
801 
807 static int print_wakeup_source_stats(struct seq_file *m,
808  struct wakeup_source *ws)
809 {
810  unsigned long flags;
811  ktime_t total_time;
812  ktime_t max_time;
813  unsigned long active_count;
814  ktime_t active_time;
815  ktime_t prevent_sleep_time;
816  int ret;
817 
818  spin_lock_irqsave(&ws->lock, flags);
819 
820  total_time = ws->total_time;
821  max_time = ws->max_time;
822  prevent_sleep_time = ws->prevent_sleep_time;
823  active_count = ws->active_count;
824  if (ws->active) {
825  ktime_t now = ktime_get();
826 
827  active_time = ktime_sub(now, ws->last_time);
828  total_time = ktime_add(total_time, active_time);
829  if (active_time.tv64 > max_time.tv64)
830  max_time = active_time;
831 
832  if (ws->autosleep_enabled)
833  prevent_sleep_time = ktime_add(prevent_sleep_time,
834  ktime_sub(now, ws->start_prevent_time));
835  } else {
836  active_time = ktime_set(0, 0);
837  }
838 
839  ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t"
840  "%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
841  ws->name, active_count, ws->event_count,
842  ws->wakeup_count, ws->expire_count,
843  ktime_to_ms(active_time), ktime_to_ms(total_time),
844  ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
845  ktime_to_ms(prevent_sleep_time));
846 
847  spin_unlock_irqrestore(&ws->lock, flags);
848 
849  return ret;
850 }
851 
856 static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
857 {
858  struct wakeup_source *ws;
859 
860  seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
861  "expire_count\tactive_since\ttotal_time\tmax_time\t"
862  "last_change\tprevent_suspend_time\n");
863 
864  rcu_read_lock();
865  list_for_each_entry_rcu(ws, &wakeup_sources, entry)
866  print_wakeup_source_stats(m, ws);
867  rcu_read_unlock();
868 
869  return 0;
870 }
871 
872 static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
873 {
874  return single_open(file, wakeup_sources_stats_show, NULL);
875 }
876 
877 static const struct file_operations wakeup_sources_stats_fops = {
878  .owner = THIS_MODULE,
879  .open = wakeup_sources_stats_open,
880  .read = seq_read,
881  .llseek = seq_lseek,
882  .release = single_release,
883 };
884 
885 static int __init wakeup_sources_debugfs_init(void)
886 {
887  wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
888  S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
889  return 0;
890 }
891 
892 postcore_initcall(wakeup_sources_debugfs_init);