23 #include <linux/export.h>
32 #include <linux/time.h>
41 #include <linux/sched.h>
42 #include <linux/slab.h>
44 #include <asm/uaccess.h>
45 #include <asm/unistd.h>
46 #include <asm/div64.h>
47 #include <asm/timex.h>
50 #define CREATE_TRACE_POINTS
60 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
61 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
62 #define TVN_SIZE (1 << TVN_BITS)
63 #define TVR_SIZE (1 << TVR_BITS)
64 #define TVN_MASK (TVN_SIZE - 1)
65 #define TVR_MASK (TVR_SIZE - 1)
66 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
94 static inline unsigned int tbase_get_deferrable(
struct tvec_base *base)
99 static inline unsigned int tbase_get_irqsafe(
struct tvec_base *base)
117 static unsigned long round_jiffies_common(
unsigned long j,
int cpu,
121 unsigned long original =
j;
142 if (rem <
HZ/4 && !force_up)
177 return round_jiffies_common(j, cpu,
false);
206 return round_jiffies_common(j + j0, cpu,
false) - j0;
264 return round_jiffies_common(j, cpu,
true);
283 return round_jiffies_common(j + j0, cpu,
true) - j0;
332 timer->
slack = slack_hz;
339 unsigned long expires = timer->
expires;
345 vec = base->
tv1.vec +
i;
348 vec = base->
tv2.vec +
i;
351 vec = base->
tv3.vec +
i;
354 vec = base->
tv4.vec +
i;
355 }
else if ((
signed long) idx < 0) {
372 vec = base->
tv5.vec +
i;
382 __internal_add_timer(base, timer);
386 if (!tbase_get_deferrable(timer->
base)) {
393 #ifdef CONFIG_TIMER_STATS
394 void __timer_stats_timer_set_start_info(
struct timer_list *timer,
void *
addr)
396 if (timer->start_site)
399 timer->start_site =
addr;
401 timer->start_pid =
current->pid;
404 static void timer_stats_account_timer(
struct timer_list *timer)
406 unsigned int flag = 0;
408 if (
likely(!timer->start_site))
411 flag |= TIMER_STATS_FLAG_DEFERRABLE;
414 timer->
function, timer->start_comm, flag);
418 static void timer_stats_account_timer(
struct timer_list *timer) {}
421 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
425 static void *timer_debug_hint(
void *addr)
427 return ((
struct timer_list *) addr)->function;
449 static void stub_timer(
unsigned long data)
459 static int timer_fixup_activate(
void *addr,
enum debug_obj_state state)
512 static int timer_fixup_assert_init(
void *addr,
enum debug_obj_state state)
536 .
name =
"timer_list",
537 .debug_hint = timer_debug_hint,
538 .fixup_init = timer_fixup_init,
539 .fixup_activate = timer_fixup_activate,
540 .fixup_free = timer_fixup_free,
541 .fixup_assert_init = timer_fixup_assert_init,
544 static inline void debug_timer_init(
struct timer_list *timer)
549 static inline void debug_timer_activate(
struct timer_list *timer)
554 static inline void debug_timer_deactivate(
struct timer_list *timer)
559 static inline void debug_timer_free(
struct timer_list *timer)
564 static inline void debug_timer_assert_init(
struct timer_list *timer)
569 static void do_init_timer(
struct timer_list *timer,
unsigned int flags,
572 void init_timer_on_stack_key(
struct timer_list *timer,
unsigned int flags,
576 do_init_timer(timer, flags, name, key);
580 void destroy_timer_on_stack(
struct timer_list *timer)
587 static inline void debug_timer_init(
struct timer_list *timer) { }
588 static inline void debug_timer_activate(
struct timer_list *timer) { }
589 static inline void debug_timer_deactivate(
struct timer_list *timer) { }
590 static inline void debug_timer_assert_init(
struct timer_list *timer) { }
593 static inline void debug_init(
struct timer_list *timer)
595 debug_timer_init(timer);
596 trace_timer_init(timer);
600 debug_activate(
struct timer_list *timer,
unsigned long expires)
602 debug_timer_activate(timer);
603 trace_timer_start(timer, expires);
606 static inline void debug_deactivate(
struct timer_list *timer)
608 debug_timer_deactivate(timer);
609 trace_timer_cancel(timer);
612 static inline void debug_assert_init(
struct timer_list *timer)
614 debug_timer_assert_init(timer);
617 static void do_init_timer(
struct timer_list *timer,
unsigned int flags,
623 timer->
base = (
void *)((
unsigned long)base |
flags);
625 #ifdef CONFIG_TIMER_STATS
626 timer->start_site =
NULL;
627 timer->start_pid = -1;
648 do_init_timer(timer, flags, name, key);
652 static inline void detach_timer(
struct timer_list *timer,
bool clear_pending)
656 debug_deactivate(timer);
658 __list_del(entry->
prev, entry->
next);
667 detach_timer(timer,
true);
668 if (!tbase_get_deferrable(timer->
base))
675 if (!timer_pending(timer))
678 detach_timer(timer, clear_pending);
679 if (!tbase_get_deferrable(timer->
base)) {
700 unsigned long *flags)
707 base = tbase_get_base(prelock_base);
713 spin_unlock_irqrestore(&base->
lock, *flags);
720 __mod_timer(
struct timer_list *timer,
unsigned long expires,
721 bool pending_only,
int pinned)
727 timer_stats_timer_set_start_info(timer);
730 base = lock_timer_base(timer, &flags);
732 ret = detach_if_pending(timer, base,
false);
733 if (!ret && pending_only)
736 debug_activate(timer, expires);
740 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
741 if (!pinned && get_sysctl_timer_migration() &&
idle_cpu(cpu))
742 cpu = get_nohz_timer_target();
744 new_base =
per_cpu(tvec_bases, cpu);
746 if (base != new_base) {
756 timer_set_base(timer,
NULL);
757 spin_unlock(&base->
lock);
759 spin_lock(&base->
lock);
760 timer_set_base(timer, base);
765 internal_add_timer(base, timer);
768 spin_unlock_irqrestore(&base->
lock, flags);
800 unsigned long apply_slack(
struct timer_list *timer,
unsigned long expires)
802 unsigned long expires_limit,
mask;
805 if (timer->
slack >= 0) {
806 expires_limit = expires + timer->
slack;
813 expires_limit = expires + delta / 256;
815 mask = expires ^ expires_limit;
821 mask = (1 <<
bit) - 1;
823 expires_limit = expires_limit & ~(
mask);
825 return expires_limit;
850 expires = apply_slack(timer, expires);
857 if (timer_pending(timer) && timer->
expires == expires)
885 if (timer->
expires == expires && timer_pending(timer))
888 return __mod_timer(timer, expires,
false,
TIMER_PINNED);
908 BUG_ON(timer_pending(timer));
925 timer_stats_timer_set_start_info(timer);
928 timer_set_base(timer, base);
929 debug_activate(timer, timer->
expires);
930 internal_add_timer(base, timer);
939 wake_up_idle_cpu(cpu);
940 spin_unlock_irqrestore(&base->
lock, flags);
961 debug_assert_init(timer);
963 timer_stats_timer_clear_start_info(timer);
964 if (timer_pending(timer)) {
965 base = lock_timer_base(timer, &flags);
966 ret = detach_if_pending(timer, base,
true);
967 spin_unlock_irqrestore(&base->
lock, flags);
987 debug_assert_init(timer);
989 base = lock_timer_base(timer, &flags);
992 timer_stats_timer_clear_start_info(timer);
993 ret = detach_if_pending(timer, base,
true);
995 spin_unlock_irqrestore(&base->
lock, flags);
1040 #ifdef CONFIG_LOCKDEP
1041 unsigned long flags;
1073 list_replace_init(tv->
vec + index, &tv_list);
1082 __internal_add_timer(base, timer);
1088 static void call_timer_fn(
struct timer_list *timer,
void (*
fn)(
unsigned long),
1093 #ifdef CONFIG_LOCKDEP
1101 struct lockdep_map lockdep_map;
1103 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1112 trace_timer_expire_entry(timer);
1114 trace_timer_expire_exit(timer);
1119 WARN_ONCE(1,
"timer: %pF preempt leak: %08x -> %08x\n",
1131 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1140 static inline void __run_timers(
struct tvec_base *base)
1144 spin_lock_irq(&base->
lock);
1154 (!cascade(base, &base->
tv2,
INDEX(0))) &&
1155 (!cascade(base, &base->
tv3,
INDEX(1))) &&
1156 !cascade(base, &base->
tv4,
INDEX(2)))
1157 cascade(base, &base->
tv5,
INDEX(3));
1159 list_replace_init(base->
tv1.vec + index, &work_list);
1160 while (!list_empty(head)) {
1168 irqsafe = tbase_get_irqsafe(timer->
base);
1170 timer_stats_account_timer(timer);
1173 detach_expired_timer(timer, base);
1176 spin_unlock(&base->
lock);
1177 call_timer_fn(timer,
fn, data);
1178 spin_lock(&base->
lock);
1180 spin_unlock_irq(&base->
lock);
1181 call_timer_fn(timer,
fn, data);
1182 spin_lock_irq(&base->
lock);
1187 spin_unlock_irq(&base->
lock);
1196 static unsigned long __next_timer_interrupt(
struct tvec_base *base)
1202 struct tvec *varray[4];
1205 index = slot = timer_jiffies &
TVR_MASK;
1208 if (tbase_get_deferrable(nte->
base))
1214 if (!index || slot < index)
1218 slot = (slot + 1) & TVR_MASK;
1219 }
while (slot != index);
1228 varray[0] = &base->
tv2;
1229 varray[1] = &base->
tv3;
1230 varray[2] = &base->
tv4;
1231 varray[3] = &base->
tv5;
1233 for (array = 0; array < 4; array++) {
1234 struct tvec *varp = varray[array];
1236 index = slot = timer_jiffies &
TVN_MASK;
1239 if (tbase_get_deferrable(nte->
base))
1252 if (!index || slot < index)
1256 slot = (slot + 1) & TVN_MASK;
1257 }
while (slot != index);
1270 static unsigned long cmp_next_hrtimer_event(
unsigned long now,
1271 unsigned long expires)
1275 unsigned long delta;
1283 if (hr_delta.
tv64 <= 0)
1286 tsdelta = ktime_to_timespec(hr_delta);
1293 if (delta > NEXT_TIMER_MAX_DELTA)
1326 spin_lock(&base->
lock);
1329 base->
next_timer = __next_timer_interrupt(base);
1332 spin_unlock(&base->
lock);
1337 return cmp_next_hrtimer_event(now, expires);
1355 #ifdef CONFIG_IRQ_WORK
1385 #ifdef __ARCH_WANT_SYS_ALARM
1409 return task_tgid_vnr(
current);
1453 static void process_timeout(
unsigned long __data)
1487 unsigned long expire;
1511 "value %lx\n", timeout);
1518 expire = timeout + jiffies;
1526 destroy_timer_on_stack(&timer);
1528 timeout = expire - jiffies;
1531 return timeout < 0 ? 0 : timeout;
1572 unsigned long mem_total, sav_total;
1573 unsigned int mem_unit, bitcount;
1603 while (mem_unit > 1) {
1606 sav_total = mem_total;
1608 if (mem_total < sav_total)
1645 static int __cpuinit init_timers_cpu(
int cpu)
1651 if (!tvec_base_done[cpu]) {
1652 static char boot_done;
1658 base = kmalloc_node(
sizeof(*base),
1665 if (tbase_get_deferrable(base)) {
1670 per_cpu(tvec_bases, cpu) = base;
1681 tvec_base_done[
cpu] = 1;
1683 base =
per_cpu(tvec_bases, cpu);
1689 INIT_LIST_HEAD(base->
tv5.vec + j);
1690 INIT_LIST_HEAD(base->
tv4.vec + j);
1691 INIT_LIST_HEAD(base->
tv3.vec + j);
1692 INIT_LIST_HEAD(base->
tv2.vec + j);
1695 INIT_LIST_HEAD(base->
tv1.vec + j);
1703 #ifdef CONFIG_HOTPLUG_CPU
1708 while (!list_empty(head)) {
1711 detach_timer(timer,
false);
1712 timer_set_base(timer, new_base);
1713 internal_add_timer(new_base, timer);
1717 static void __cpuinit migrate_timers(
int cpu)
1724 old_base =
per_cpu(tvec_bases, cpu);
1730 spin_lock_irq(&new_base->
lock);
1736 migrate_timer_list(new_base, old_base->
tv1.vec + i);
1738 migrate_timer_list(new_base, old_base->
tv2.vec + i);
1739 migrate_timer_list(new_base, old_base->
tv3.vec + i);
1740 migrate_timer_list(new_base, old_base->
tv4.vec + i);
1741 migrate_timer_list(new_base, old_base->
tv5.vec + i);
1744 spin_unlock(&old_base->
lock);
1745 spin_unlock_irq(&new_base->
lock);
1751 unsigned long action,
void *hcpu)
1753 long cpu = (
long)hcpu;
1759 err = init_timers_cpu(cpu);
1761 return notifier_from_errno(err);
1763 #ifdef CONFIG_HOTPLUG_CPU
1766 migrate_timers(cpu);
1776 .notifier_call = timer_cpu_notify,
1787 err = timer_cpu_notify(&timers_nb, (
unsigned long)
CPU_UP_PREPARE,
1791 BUG_ON(err != NOTIFY_OK);
1792 register_cpu_notifier(&timers_nb);
1818 while (timeout && !signal_pending(
current))
1825 static int __sched do_usleep_range(
unsigned long min,
unsigned long max)
1828 unsigned long delta;
1843 do_usleep_range(min, max);