Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
process.c
Go to the documentation of this file.
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation, version 2.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  * NON INFRINGEMENT. See the GNU General Public License for
12  * more details.
13  */
14 
15 #include <linux/sched.h>
16 #include <linux/preempt.h>
17 #include <linux/module.h>
18 #include <linux/fs.h>
19 #include <linux/kprobes.h>
20 #include <linux/elfcore.h>
21 #include <linux/tick.h>
22 #include <linux/init.h>
23 #include <linux/mm.h>
24 #include <linux/compat.h>
25 #include <linux/hardirq.h>
26 #include <linux/syscalls.h>
27 #include <linux/kernel.h>
28 #include <linux/tracehook.h>
29 #include <linux/signal.h>
30 #include <asm/stack.h>
31 #include <asm/switch_to.h>
32 #include <asm/homecache.h>
33 #include <asm/syscalls.h>
34 #include <asm/traps.h>
35 #include <asm/setup.h>
36 #ifdef CONFIG_HARDWALL
37 #include <asm/hardwall.h>
38 #endif
39 #include <arch/chip.h>
40 #include <arch/abi.h>
41 #include <arch/sim_def.h>
42 
43 
44 /*
45  * Use the (x86) "idle=poll" option to prefer low latency when leaving the
46  * idle loop over low power while in the idle loop, e.g. if we have
47  * one thread per core and we want to get threads out of futex waits fast.
48  */
49 static int no_idle_nap;
50 static int __init idle_setup(char *str)
51 {
52  if (!str)
53  return -EINVAL;
54 
55  if (!strcmp(str, "poll")) {
56  pr_info("using polling idle threads.\n");
57  no_idle_nap = 1;
58  } else if (!strcmp(str, "halt"))
59  no_idle_nap = 0;
60  else
61  return -1;
62 
63  return 0;
64 }
65 early_param("idle", idle_setup);
66 
67 /*
68  * The idle thread. There's no useful work to be
69  * done, so just try to conserve power and have a
70  * low exit latency (ie sit in a loop waiting for
71  * somebody to say that they'd like to reschedule)
72  */
73 void cpu_idle(void)
74 {
75  int cpu = smp_processor_id();
76 
77 
78  current_thread_info()->status |= TS_POLLING;
79 
80  if (no_idle_nap) {
81  while (1) {
82  while (!need_resched())
83  cpu_relax();
84  schedule();
85  }
86  }
87 
88  /* endless idle loop with no priority at all */
89  while (1) {
90  tick_nohz_idle_enter();
92  while (!need_resched()) {
93  if (cpu_is_offline(cpu))
94  BUG(); /* no HOTPLUG_CPU */
95 
97  __get_cpu_var(irq_stat).idle_timestamp = jiffies;
98  current_thread_info()->status &= ~TS_POLLING;
99  /*
100  * TS_POLLING-cleared state must be visible before we
101  * test NEED_RESCHED:
102  */
103  smp_mb();
104 
105  if (!need_resched())
106  _cpu_idle();
107  else
109  current_thread_info()->status |= TS_POLLING;
110  }
111  rcu_idle_exit();
112  tick_nohz_idle_exit();
114  }
115 }
116 
117 /*
118  * Release a thread_info structure
119  */
121 {
122  struct single_step_state *step_state = info->step_state;
123 
124 #ifdef CONFIG_HARDWALL
125  /*
126  * We free a thread_info from the context of the task that has
127  * been scheduled next, so the original task is already dead.
128  * Calling deactivate here just frees up the data structures.
129  * If the task we're freeing held the last reference to a
130  * hardwall fd, it would have been released prior to this point
131  * anyway via exit_files(), and the hardwall_task.info pointers
132  * would be NULL by now.
133  */
135 #endif
136 
137  if (step_state) {
138 
139  /*
140  * FIXME: we don't munmap step_state->buffer
141  * because the mm_struct for this process (info->task->mm)
142  * has already been zeroed in exit_mm(). Keeping a
143  * reference to it here seems like a bad move, so this
144  * means we can't munmap() the buffer, and therefore if we
145  * ptrace multiple threads in a process, we will slowly
146  * leak user memory. (Note that as soon as the last
147  * thread in a process dies, we will reclaim all user
148  * memory including single-step buffers in the usual way.)
149  * We should either assign a kernel VA to this buffer
150  * somehow, or we should associate the buffer(s) with the
151  * mm itself so we can clean them up that way.
152  */
153  kfree(step_state);
154  }
155 }
156 
157 static void save_arch_state(struct thread_struct *t);
158 
159 int copy_thread(unsigned long clone_flags, unsigned long sp,
160  unsigned long stack_size,
161  struct task_struct *p, struct pt_regs *regs)
162 {
163  struct pt_regs *childregs;
164  unsigned long ksp;
165 
166  /*
167  * When creating a new kernel thread we pass sp as zero.
168  * Assign it to a reasonable value now that we have the stack.
169  */
170  if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0))
171  sp = KSTK_TOP(p);
172 
173  /*
174  * Do not clone step state from the parent; each thread
175  * must make its own lazily.
176  */
177  task_thread_info(p)->step_state = NULL;
178 
179  /*
180  * Start new thread in ret_from_fork so it schedules properly
181  * and then return from interrupt like the parent.
182  */
183  p->thread.pc = (unsigned long) ret_from_fork;
184 
185  /* Save user stack top pointer so we can ID the stack vm area later. */
186  p->thread.usp0 = sp;
187 
188  /* Record the pid of the process that created this one. */
189  p->thread.creator_pid = current->pid;
190 
191  /*
192  * Copy the registers onto the kernel stack so the
193  * return-from-interrupt code will reload it into registers.
194  */
195  childregs = task_pt_regs(p);
196  *childregs = *regs;
197  childregs->regs[0] = 0; /* return value is zero */
198  childregs->sp = sp; /* override with new user stack pointer */
199 
200  /*
201  * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
202  * which is passed in as arg #5 to sys_clone().
203  */
204  if (clone_flags & CLONE_SETTLS)
205  childregs->tp = regs->regs[4];
206 
207  /*
208  * Copy the callee-saved registers from the passed pt_regs struct
209  * into the context-switch callee-saved registers area.
210  * This way when we start the interrupt-return sequence, the
211  * callee-save registers will be correctly in registers, which
212  * is how we assume the compiler leaves them as we start doing
213  * the normal return-from-interrupt path after calling C code.
214  * Zero out the C ABI save area to mark the top of the stack.
215  */
216  ksp = (unsigned long) childregs;
217  ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */
218  ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
219  ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
220  memcpy((void *)ksp, &regs->regs[CALLEE_SAVED_FIRST_REG],
221  CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
222  ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */
223  ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
224  p->thread.ksp = ksp;
225 
226 #if CHIP_HAS_TILE_DMA()
227  /*
228  * No DMA in the new thread. We model this on the fact that
229  * fork() clears the pending signals, alarms, and aio for the child.
230  */
231  memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
232  memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
233 #endif
234 
235 #if CHIP_HAS_SN_PROC()
236  /* Likewise, the new thread is not running static processor code. */
237  p->thread.sn_proc_running = 0;
238  memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
239 #endif
240 
241 #if CHIP_HAS_PROC_STATUS_SPR()
242  /* New thread has its miscellaneous processor state bits clear. */
243  p->thread.proc_status = 0;
244 #endif
245 
246 #ifdef CONFIG_HARDWALL
247  /* New thread does not own any networks. */
248  memset(&p->thread.hardwall[0], 0,
249  sizeof(struct hardwall_task) * HARDWALL_TYPES);
250 #endif
251 
252 
253  /*
254  * Start the new thread with the current architecture state
255  * (user interrupt masks, etc.).
256  */
257  save_arch_state(&p->thread);
258 
259  return 0;
260 }
261 
262 /*
263  * Return "current" if it looks plausible, or else a pointer to a dummy.
264  * This can be helpful if we are just trying to emit a clean panic.
265  */
267 {
268  static struct task_struct corrupt = { .comm = "<corrupt>" };
269  struct task_struct *tsk = current;
270  if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
271  (high_memory && (void *)tsk > high_memory) ||
272  ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
273  pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
274  tsk = &corrupt;
275  }
276  return tsk;
277 }
278 
279 /* Take and return the pointer to the previous task, for schedule_tail(). */
281 {
282  struct task_struct *tsk = current;
284  (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
285  __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
286  (tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
287  return prev;
288 }
289 
291 {
292  struct pt_regs *ptregs = task_pt_regs(tsk);
293  elf_core_copy_regs(regs, ptregs);
294  return 1;
295 }
296 
297 #if CHIP_HAS_TILE_DMA()
298 
299 /* Allow user processes to access the DMA SPRs */
300 void grant_dma_mpls(void)
301 {
302 #if CONFIG_KERNEL_PL == 2
303  __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
304  __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
305 #else
306  __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
307  __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
308 #endif
309 }
310 
311 /* Forbid user processes from accessing the DMA SPRs */
312 void restrict_dma_mpls(void)
313 {
314 #if CONFIG_KERNEL_PL == 2
315  __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
316  __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
317 #else
318  __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
319  __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
320 #endif
321 }
322 
323 /* Pause the DMA engine, then save off its state registers. */
324 static void save_tile_dma_state(struct tile_dma_state *dma)
325 {
326  unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS);
327  unsigned long post_suspend_state;
328 
329  /* If we're running, suspend the engine. */
330  if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK)
331  __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
332 
333  /*
334  * Wait for the engine to idle, then save regs. Note that we
335  * want to record the "running" bit from before suspension,
336  * and the "done" bit from after, so that we can properly
337  * distinguish a case where the user suspended the engine from
338  * the case where the kernel suspended as part of the context
339  * swap.
340  */
341  do {
342  post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS);
343  } while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK);
344 
345  dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR);
346  dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR);
347  dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR);
348  dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR);
349  dma->strides = __insn_mfspr(SPR_DMA_STRIDE);
350  dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE);
351  dma->byte = __insn_mfspr(SPR_DMA_BYTE);
352  dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) |
353  (post_suspend_state & SPR_DMA_STATUS__DONE_MASK);
354 }
355 
356 /* Restart a DMA that was running before we were context-switched out. */
357 static void restore_tile_dma_state(struct thread_struct *t)
358 {
359  const struct tile_dma_state *dma = &t->tile_dma_state;
360 
361  /*
362  * The only way to restore the done bit is to run a zero
363  * length transaction.
364  */
365  if ((dma->status & SPR_DMA_STATUS__DONE_MASK) &&
366  !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) {
367  __insn_mtspr(SPR_DMA_BYTE, 0);
368  __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
369  while (__insn_mfspr(SPR_DMA_USER_STATUS) &
370  SPR_DMA_STATUS__BUSY_MASK)
371  ;
372  }
373 
374  __insn_mtspr(SPR_DMA_SRC_ADDR, dma->src);
375  __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk);
376  __insn_mtspr(SPR_DMA_DST_ADDR, dma->dest);
377  __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk);
378  __insn_mtspr(SPR_DMA_STRIDE, dma->strides);
379  __insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size);
380  __insn_mtspr(SPR_DMA_BYTE, dma->byte);
381 
382  /*
383  * Restart the engine if we were running and not done.
384  * Clear a pending async DMA fault that we were waiting on return
385  * to user space to execute, since we expect the DMA engine
386  * to regenerate those faults for us now. Note that we don't
387  * try to clear the TIF_ASYNC_TLB flag, since it's relatively
388  * harmless if set, and it covers both DMA and the SN processor.
389  */
390  if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) {
391  t->dma_async_tlb.fault_num = 0;
392  __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
393  }
394 }
395 
396 #endif
397 
398 static void save_arch_state(struct thread_struct *t)
399 {
400 #if CHIP_HAS_SPLIT_INTR_MASK()
401  t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) |
402  ((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32);
403 #else
404  t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0);
405 #endif
406  t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0);
407  t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1);
408  t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0);
409  t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1);
410  t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
411  t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
412  t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
413 #if CHIP_HAS_PROC_STATUS_SPR()
414  t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
415 #endif
416 #if !CHIP_HAS_FIXED_INTVEC_BASE()
418 #endif
419 #if CHIP_HAS_TILE_RTF_HWM()
420  t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
421 #endif
422 #if CHIP_HAS_DSTREAM_PF()
423  t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
424 #endif
425 }
426 
427 static void restore_arch_state(const struct thread_struct *t)
428 {
429 #if CHIP_HAS_SPLIT_INTR_MASK()
430  __insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask);
431  __insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32);
432 #else
433  __insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask);
434 #endif
435  __insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]);
436  __insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]);
437  __insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]);
438  __insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]);
439  __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
440  __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
441  __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
442 #if CHIP_HAS_PROC_STATUS_SPR()
443  __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
444 #endif
445 #if !CHIP_HAS_FIXED_INTVEC_BASE()
447 #endif
448 #if CHIP_HAS_TILE_RTF_HWM()
449  __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
450 #endif
451 #if CHIP_HAS_DSTREAM_PF()
452  __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
453 #endif
454 }
455 
456 
458 {
459 #if CHIP_HAS_SN_PROC()
460  int snctl;
461 #endif
462 #if CHIP_HAS_TILE_DMA()
463  struct tile_dma_state *dma = &current->thread.tile_dma_state;
464  if (dma->enabled)
465  save_tile_dma_state(dma);
466 #endif
467 #if CHIP_HAS_SN_PROC()
468  /*
469  * Suspend the static network processor if it was running.
470  * We do not suspend the fabric itself, just like we don't
471  * try to suspend the UDN.
472  */
473  snctl = __insn_mfspr(SPR_SNCTL);
474  current->thread.sn_proc_running =
475  (snctl & SPR_SNCTL__FRZPROC_MASK) == 0;
476  if (current->thread.sn_proc_running)
477  __insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK);
478 #endif
479 }
480 
481 
483  struct task_struct *next)
484 {
485  /* DMA state is already saved; save off other arch state. */
486  save_arch_state(&prev->thread);
487 
488 #if CHIP_HAS_TILE_DMA()
489  /*
490  * Restore DMA in new task if desired.
491  * Note that it is only safe to restart here since interrupts
492  * are disabled, so we can't take any DMATLB miss or access
493  * interrupts before we have finished switching stacks.
494  */
495  if (next->thread.tile_dma_state.enabled) {
496  restore_tile_dma_state(&next->thread);
497  grant_dma_mpls();
498  } else {
500  }
501 #endif
502 
503  /* Restore other arch state. */
504  restore_arch_state(&next->thread);
505 
506 #if CHIP_HAS_SN_PROC()
507  /*
508  * Restart static network processor in the new process
509  * if it was running before.
510  */
511  if (next->thread.sn_proc_running) {
512  int snctl = __insn_mfspr(SPR_SNCTL);
513  __insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK);
514  }
515 #endif
516 
517 #ifdef CONFIG_HARDWALL
518  /* Enable or disable access to the network registers appropriately. */
519  hardwall_switch_tasks(prev, next);
520 #endif
521 
522  /*
523  * Switch kernel SP, PC, and callee-saved registers.
524  * In the context of the new task, return the old task pointer
525  * (i.e. the task that actually called __switch_to).
526  * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
527  */
528  return __switch_to(prev, next, next_current_ksp0(next));
529 }
530 
531 /*
532  * This routine is called on return from interrupt if any of the
533  * TIF_WORK_MASK flags are set in thread_info->flags. It is
534  * entered with interrupts disabled so we don't miss an event
535  * that modified the thread_info flags. If any flag is set, we
536  * handle it and return, and the calling assembly code will
537  * re-disable interrupts, reload the thread flags, and call back
538  * if more flags need to be handled.
539  *
540  * We return whether we need to check the thread_info flags again
541  * or not. Note that we don't clear TIF_SINGLESTEP here, so it's
542  * important that it be tested last, and then claim that we don't
543  * need to recheck the flags.
544  */
545 int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
546 {
547  /* If we enter in kernel mode, do nothing and exit the caller loop. */
548  if (!user_mode(regs))
549  return 0;
550 
551  /* Enable interrupts; they are disabled again on return to caller. */
553 
554  if (thread_info_flags & _TIF_NEED_RESCHED) {
555  schedule();
556  return 1;
557  }
558 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
559  if (thread_info_flags & _TIF_ASYNC_TLB) {
560  do_async_page_fault(regs);
561  return 1;
562  }
563 #endif
564  if (thread_info_flags & _TIF_SIGPENDING) {
565  do_signal(regs);
566  return 1;
567  }
568  if (thread_info_flags & _TIF_NOTIFY_RESUME) {
569  clear_thread_flag(TIF_NOTIFY_RESUME);
570  tracehook_notify_resume(regs);
571  return 1;
572  }
573  if (thread_info_flags & _TIF_SINGLESTEP) {
574  single_step_once(regs);
575  return 0;
576  }
577  panic("work_pending: bad flags %#x\n", thread_info_flags);
578 }
579 
580 /* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
581 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
582  void __user *, parent_tidptr, void __user *, child_tidptr,
583  struct pt_regs *, regs)
584 {
585  if (!newsp)
586  newsp = regs->sp;
587  return do_fork(clone_flags, newsp, regs, 0,
588  parent_tidptr, child_tidptr);
589 }
590 
591 /*
592  * sys_execve() executes a new program.
593  */
594 SYSCALL_DEFINE4(execve, const char __user *, path,
595  const char __user *const __user *, argv,
596  const char __user *const __user *, envp,
597  struct pt_regs *, regs)
598 {
599  long error;
600  struct filename *filename;
601 
602  filename = getname(path);
603  error = PTR_ERR(filename);
604  if (IS_ERR(filename))
605  goto out;
606  error = do_execve(filename->name, argv, envp, regs);
607  putname(filename);
608  if (error == 0)
610 out:
611  return error;
612 }
613 
614 #ifdef CONFIG_COMPAT
615 long compat_sys_execve(const char __user *path,
616  compat_uptr_t __user *argv,
617  compat_uptr_t __user *envp,
618  struct pt_regs *regs)
619 {
620  long error;
621  struct filename *filename;
622 
623  filename = getname(path);
624  error = PTR_ERR(filename);
625  if (IS_ERR(filename))
626  goto out;
627  error = compat_do_execve(filename->name, argv, envp, regs);
628  putname(filename);
629  if (error == 0)
631 out:
632  return error;
633 }
634 #endif
635 
636 unsigned long get_wchan(struct task_struct *p)
637 {
638  struct KBacktraceIterator kbt;
639 
640  if (!p || p == current || p->state == TASK_RUNNING)
641  return 0;
642 
643  for (KBacktraceIterator_init(&kbt, p, NULL);
644  !KBacktraceIterator_end(&kbt);
645  KBacktraceIterator_next(&kbt)) {
646  if (!in_sched_functions(kbt.it.pc))
647  return kbt.it.pc;
648  }
649 
650  return 0;
651 }
652 
653 /*
654  * We pass in lr as zero (cleared in kernel_thread) and the caller
655  * part of the backtrace ABI on the stack also zeroed (in copy_thread)
656  * so that backtraces will stop with this function.
657  * Note that we don't use r0, since copy_thread() clears it.
658  */
659 static void start_kernel_thread(int dummy, int (*fn)(int), int arg)
660 {
661  do_exit(fn(arg));
662 }
663 
664 /*
665  * Create a kernel thread
666  */
667 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
668 {
669  struct pt_regs regs;
670 
671  memset(&regs, 0, sizeof(regs));
672  regs.ex1 = PL_ICS_EX1(KERNEL_PL, 0); /* run at kernel PL, no ICS */
673  regs.pc = (long) start_kernel_thread;
674  regs.flags = PT_FLAGS_CALLER_SAVES; /* need to restore r1 and r2 */
675  regs.regs[1] = (long) fn; /* function pointer */
676  regs.regs[2] = (long) arg; /* parameter register */
677 
678  /* Ok, create the new process.. */
679  return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs,
680  0, NULL, NULL);
681 }
683 
684 /* Flush thread state. */
685 void flush_thread(void)
686 {
687  /* Nothing */
688 }
689 
690 /*
691  * Free current thread data structures etc..
692  */
693 void exit_thread(void)
694 {
695  /* Nothing */
696 }
697 
698 void show_regs(struct pt_regs *regs)
699 {
700  struct task_struct *tsk = validate_current();
701  int i;
702 
703  pr_err("\n");
704  pr_err(" Pid: %d, comm: %20s, CPU: %d\n",
705  tsk->pid, tsk->comm, smp_processor_id());
706 #ifdef __tilegx__
707  for (i = 0; i < 51; i += 3)
708  pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
709  i, regs->regs[i], i+1, regs->regs[i+1],
710  i+2, regs->regs[i+2]);
711  pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n",
712  regs->regs[51], regs->regs[52], regs->tp);
713  pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
714 #else
715  for (i = 0; i < 52; i += 4)
716  pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
717  " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
718  i, regs->regs[i], i+1, regs->regs[i+1],
719  i+2, regs->regs[i+2], i+3, regs->regs[i+3]);
720  pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
721  regs->regs[52], regs->tp, regs->sp, regs->lr);
722 #endif
723  pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n",
724  regs->pc, regs->ex1, regs->faultnum);
725 
726  dump_stack_regs(regs);
727 }