Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vtime.c
Go to the documentation of this file.
1 /*
2  * Virtual cpu timer based timer functions.
3  *
4  * Copyright IBM Corp. 2004, 2012
5  * Author(s): Jan Glauber <[email protected]>
6  */
7 
8 #include <linux/kernel_stat.h>
9 #include <linux/notifier.h>
10 #include <linux/kprobes.h>
11 #include <linux/export.h>
12 #include <linux/kernel.h>
13 #include <linux/timex.h>
14 #include <linux/types.h>
15 #include <linux/time.h>
16 #include <linux/cpu.h>
17 #include <linux/smp.h>
18 
19 #include <asm/irq_regs.h>
20 #include <asm/cputime.h>
21 #include <asm/vtimer.h>
22 #include <asm/irq.h>
23 #include "entry.h"
24 
25 static void virt_timer_expire(void);
26 
27 DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
28 
29 static LIST_HEAD(virt_timer_list);
30 static DEFINE_SPINLOCK(virt_timer_lock);
31 static atomic64_t virt_timer_current;
32 static atomic64_t virt_timer_elapsed;
33 
34 static inline u64 get_vtimer(void)
35 {
36  u64 timer;
37 
38  asm volatile("stpt %0" : "=m" (timer));
39  return timer;
40 }
41 
42 static inline void set_vtimer(u64 expires)
43 {
44  u64 timer;
45 
46  asm volatile(
47  " stpt %0\n" /* Store current cpu timer value */
48  " spt %1" /* Set new value imm. afterwards */
49  : "=m" (timer) : "m" (expires));
50  S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
51  S390_lowcore.last_update_timer = expires;
52 }
53 
54 static inline int virt_timer_forward(u64 elapsed)
55 {
57 
58  if (list_empty(&virt_timer_list))
59  return 0;
60  elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
61  return elapsed >= atomic64_read(&virt_timer_current);
62 }
63 
64 /*
65  * Update process times based on virtual cpu times stored by entry.S
66  * to the lowcore fields user_timer, system_timer & steal_clock.
67  */
68 static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
69 {
70  struct thread_info *ti = task_thread_info(tsk);
71  u64 timer, clock, user, system, steal;
72 
73  timer = S390_lowcore.last_update_timer;
74  clock = S390_lowcore.last_update_clock;
75  asm volatile(
76  " stpt %0\n" /* Store current cpu timer value */
77  " stck %1" /* Store current tod clock value */
78  : "=m" (S390_lowcore.last_update_timer),
79  "=m" (S390_lowcore.last_update_clock));
80  S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
81  S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
82 
83  user = S390_lowcore.user_timer - ti->user_timer;
84  S390_lowcore.steal_timer -= user;
85  ti->user_timer = S390_lowcore.user_timer;
86  account_user_time(tsk, user, user);
87 
88  system = S390_lowcore.system_timer - ti->system_timer;
89  S390_lowcore.steal_timer -= system;
90  ti->system_timer = S390_lowcore.system_timer;
91  account_system_time(tsk, hardirq_offset, system, system);
92 
93  steal = S390_lowcore.steal_timer;
94  if ((s64) steal > 0) {
95  S390_lowcore.steal_timer = 0;
96  account_steal_time(steal);
97  }
98 
99  return virt_timer_forward(user + system);
100 }
101 
103 {
104  struct thread_info *ti;
105 
106  do_account_vtime(prev, 0);
107  ti = task_thread_info(prev);
108  ti->user_timer = S390_lowcore.user_timer;
109  ti->system_timer = S390_lowcore.system_timer;
111  S390_lowcore.user_timer = ti->user_timer;
112  S390_lowcore.system_timer = ti->system_timer;
113 }
114 
115 void account_process_tick(struct task_struct *tsk, int user_tick)
116 {
117  if (do_account_vtime(tsk, HARDIRQ_OFFSET))
118  virt_timer_expire();
119 }
120 
121 /*
122  * Update process times based on virtual cpu times stored by entry.S
123  * to the lowcore fields user_timer, system_timer & steal_clock.
124  */
125 void vtime_account(struct task_struct *tsk)
126 {
127  struct thread_info *ti = task_thread_info(tsk);
128  u64 timer, system;
129 
130  timer = S390_lowcore.last_update_timer;
131  S390_lowcore.last_update_timer = get_vtimer();
132  S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
133 
134  system = S390_lowcore.system_timer - ti->system_timer;
135  S390_lowcore.steal_timer -= system;
136  ti->system_timer = S390_lowcore.system_timer;
137  account_system_time(tsk, 0, system, system);
138 
139  virt_timer_forward(system);
140 }
142 
144 {
145  struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
146  unsigned long long idle_time;
147  unsigned long psw_mask;
148 
150  /* Don't trace preempt off for idle. */
152 
153  /* Wait for external, I/O or machine check interrupt. */
156  idle->nohz_delay = 0;
157 
158  /* Call the assembler magic in entry.S */
159  psw_idle(idle, psw_mask);
160 
161  /* Reenable preemption tracer. */
163 
164  /* Account time spent with enabled wait psw loaded as idle time. */
165  idle->sequence++;
166  smp_wmb();
167  idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
168  idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
169  idle->idle_time += idle_time;
170  idle->idle_count++;
171  account_idle_time(idle_time);
172  smp_wmb();
173  idle->sequence++;
174 }
175 
177 {
178  struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
179  unsigned long long now, idle_enter, idle_exit;
180  unsigned int sequence;
181 
182  do {
183  now = get_clock();
184  sequence = ACCESS_ONCE(idle->sequence);
185  idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
186  idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
187  } while ((sequence & 1) || (idle->sequence != sequence));
188  return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
189 }
190 
191 /*
192  * Sorted add to a list. List is linear searched until first bigger
193  * element is found.
194  */
195 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
196 {
197  struct vtimer_list *tmp;
198 
199  list_for_each_entry(tmp, head, entry) {
200  if (tmp->expires > timer->expires) {
201  list_add_tail(&timer->entry, &tmp->entry);
202  return;
203  }
204  }
205  list_add_tail(&timer->entry, head);
206 }
207 
208 /*
209  * Handler for expired virtual CPU timer.
210  */
211 static void virt_timer_expire(void)
212 {
213  struct vtimer_list *timer, *tmp;
214  unsigned long elapsed;
215  LIST_HEAD(cb_list);
216 
217  /* walk timer list, fire all expired timers */
218  spin_lock(&virt_timer_lock);
219  elapsed = atomic64_read(&virt_timer_elapsed);
220  list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
221  if (timer->expires < elapsed)
222  /* move expired timer to the callback queue */
223  list_move_tail(&timer->entry, &cb_list);
224  else
225  timer->expires -= elapsed;
226  }
227  if (!list_empty(&virt_timer_list)) {
228  timer = list_first_entry(&virt_timer_list,
229  struct vtimer_list, entry);
230  atomic64_set(&virt_timer_current, timer->expires);
231  }
232  atomic64_sub(elapsed, &virt_timer_elapsed);
233  spin_unlock(&virt_timer_lock);
234 
235  /* Do callbacks and recharge periodic timers */
236  list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
237  list_del_init(&timer->entry);
238  timer->function(timer->data);
239  if (timer->interval) {
240  /* Recharge interval timer */
241  timer->expires = timer->interval +
242  atomic64_read(&virt_timer_elapsed);
243  spin_lock(&virt_timer_lock);
244  list_add_sorted(timer, &virt_timer_list);
245  spin_unlock(&virt_timer_lock);
246  }
247  }
248 }
249 
250 void init_virt_timer(struct vtimer_list *timer)
251 {
252  timer->function = NULL;
253  INIT_LIST_HEAD(&timer->entry);
254 }
256 
257 static inline int vtimer_pending(struct vtimer_list *timer)
258 {
259  return !list_empty(&timer->entry);
260 }
261 
262 static void internal_add_vtimer(struct vtimer_list *timer)
263 {
264  if (list_empty(&virt_timer_list)) {
265  /* First timer, just program it. */
266  atomic64_set(&virt_timer_current, timer->expires);
267  atomic64_set(&virt_timer_elapsed, 0);
268  list_add(&timer->entry, &virt_timer_list);
269  } else {
270  /* Update timer against current base. */
271  timer->expires += atomic64_read(&virt_timer_elapsed);
272  if (likely((s64) timer->expires <
273  (s64) atomic64_read(&virt_timer_current)))
274  /* The new timer expires before the current timer. */
275  atomic64_set(&virt_timer_current, timer->expires);
276  /* Insert new timer into the list. */
277  list_add_sorted(timer, &virt_timer_list);
278  }
279 }
280 
281 static void __add_vtimer(struct vtimer_list *timer, int periodic)
282 {
283  unsigned long flags;
284 
285  timer->interval = periodic ? timer->expires : 0;
286  spin_lock_irqsave(&virt_timer_lock, flags);
287  internal_add_vtimer(timer);
288  spin_unlock_irqrestore(&virt_timer_lock, flags);
289 }
290 
291 /*
292  * add_virt_timer - add an oneshot virtual CPU timer
293  */
294 void add_virt_timer(struct vtimer_list *timer)
295 {
296  __add_vtimer(timer, 0);
297 }
299 
300 /*
301  * add_virt_timer_int - add an interval virtual CPU timer
302  */
304 {
305  __add_vtimer(timer, 1);
306 }
308 
309 static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
310 {
311  unsigned long flags;
312  int rc;
313 
314  BUG_ON(!timer->function);
315 
316  if (timer->expires == expires && vtimer_pending(timer))
317  return 1;
318  spin_lock_irqsave(&virt_timer_lock, flags);
319  rc = vtimer_pending(timer);
320  if (rc)
321  list_del_init(&timer->entry);
322  timer->interval = periodic ? expires : 0;
323  timer->expires = expires;
324  internal_add_vtimer(timer);
325  spin_unlock_irqrestore(&virt_timer_lock, flags);
326  return rc;
327 }
328 
329 /*
330  * returns whether it has modified a pending timer (1) or not (0)
331  */
332 int mod_virt_timer(struct vtimer_list *timer, u64 expires)
333 {
334  return __mod_vtimer(timer, expires, 0);
335 }
337 
338 /*
339  * returns whether it has modified a pending timer (1) or not (0)
340  */
341 int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
342 {
343  return __mod_vtimer(timer, expires, 1);
344 }
346 
347 /*
348  * Delete a virtual timer.
349  *
350  * returns whether the deleted timer was pending (1) or not (0)
351  */
352 int del_virt_timer(struct vtimer_list *timer)
353 {
354  unsigned long flags;
355 
356  if (!vtimer_pending(timer))
357  return 0;
358  spin_lock_irqsave(&virt_timer_lock, flags);
359  list_del_init(&timer->entry);
360  spin_unlock_irqrestore(&virt_timer_lock, flags);
361  return 1;
362 }
364 
365 /*
366  * Start the virtual CPU timer on the current CPU.
367  */
369 {
370  /* set initial cpu timer */
371  set_vtimer(VTIMER_MAX_SLICE);
372 }
373 
374 static int __cpuinit s390_nohz_notify(struct notifier_block *self,
375  unsigned long action, void *hcpu)
376 {
377  struct s390_idle_data *idle;
378  long cpu = (long) hcpu;
379 
380  idle = &per_cpu(s390_idle, cpu);
381  switch (action & ~CPU_TASKS_FROZEN) {
382  case CPU_DYING:
383  idle->nohz_delay = 0;
384  default:
385  break;
386  }
387  return NOTIFY_OK;
388 }
389 
390 void __init vtime_init(void)
391 {
392  /* Enable cpu timer interrupts on the boot cpu. */
393  init_cpu_vtimer();
394  cpu_notifier(s390_nohz_notify, 0);
395 }