Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
process.c
Go to the documentation of this file.
1 /* MN10300 Process handling code
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells ([email protected])
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 #include <linux/module.h>
12 #include <linux/errno.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/smp.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/interrupt.h>
22 #include <linux/delay.h>
23 #include <linux/reboot.h>
24 #include <linux/percpu.h>
25 #include <linux/err.h>
26 #include <linux/fs.h>
27 #include <linux/slab.h>
28 #include <linux/rcupdate.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
31 #include <asm/io.h>
32 #include <asm/processor.h>
33 #include <asm/mmu_context.h>
34 #include <asm/fpu.h>
35 #include <asm/reset-regs.h>
36 #include <asm/gdb-stub.h>
37 #include "internal.h"
38 
39 /*
40  * power management idle function, if any..
41  */
44 
45 /*
46  * return saved PC of a blocked thread.
47  */
48 unsigned long thread_saved_pc(struct task_struct *tsk)
49 {
50  return ((unsigned long *) tsk->thread.sp)[3];
51 }
52 
53 /*
54  * power off function, if any
55  */
58 
59 #if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
60 /*
61  * we use this if we don't have any better idle routine
62  */
63 static void default_idle(void)
64 {
66  if (!need_resched())
67  safe_halt();
68  else
70 }
71 
72 #else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
73 /*
74  * On SMP it's slightly faster (but much more power-consuming!)
75  * to poll the ->work.need_resched flag instead of waiting for the
76  * cross-CPU IPI to arrive. Use this option with caution.
77  */
78 static inline void poll_idle(void)
79 {
80  int oldval;
81 
83 
84  /*
85  * Deal with another CPU just having chosen a thread to
86  * run here:
87  */
88  oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
89 
90  if (!oldval) {
91  set_thread_flag(TIF_POLLING_NRFLAG);
92  while (!need_resched())
93  cpu_relax();
94  clear_thread_flag(TIF_POLLING_NRFLAG);
95  } else {
96  set_need_resched();
97  }
98 }
99 #endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
100 
101 /*
102  * the idle thread
103  * - there's no useful work to be done, so just try to conserve power and have
104  * a low exit latency (ie sit in a loop waiting for somebody to say that
105  * they'd like to reschedule)
106  */
107 void cpu_idle(void)
108 {
109  /* endless idle loop with no priority at all */
110  for (;;) {
111  rcu_idle_enter();
112  while (!need_resched()) {
113  void (*idle)(void);
114 
115  smp_rmb();
116  idle = pm_idle;
117  if (!idle) {
118 #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
119  idle = poll_idle;
120 #else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
121  idle = default_idle;
122 #endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
123  }
124  idle();
125  }
126  rcu_idle_exit();
127 
129  }
130 }
131 
132 void release_segments(struct mm_struct *mm)
133 {
134 }
135 
136 void machine_restart(char *cmd)
137 {
138 #ifdef CONFIG_KERNEL_DEBUGGER
139  gdbstub_exit(0);
140 #endif
141 
142 #ifdef mn10300_unit_hard_reset
144 #else
145  mn10300_proc_hard_reset();
146 #endif
147 }
148 
149 void machine_halt(void)
150 {
151 #ifdef CONFIG_KERNEL_DEBUGGER
152  gdbstub_exit(0);
153 #endif
154 }
155 
157 {
158 #ifdef CONFIG_KERNEL_DEBUGGER
159  gdbstub_exit(0);
160 #endif
161 }
162 
163 void show_regs(struct pt_regs *regs)
164 {
165 }
166 
167 /*
168  * free current thread data structures etc..
169  */
170 void exit_thread(void)
171 {
172  exit_fpu();
173 }
174 
175 void flush_thread(void)
176 {
177  flush_fpu();
178 }
179 
180 void release_thread(struct task_struct *dead_task)
181 {
182 }
183 
184 /*
185  * we do not have to muck with descriptors here, that is
186  * done in switch_mm() as needed.
187  */
189 {
190 }
191 
192 /*
193  * this gets called so that we can store lazy state into memory and copy the
194  * current task into the new thread.
195  */
197 {
198  unlazy_fpu(src);
199  *dst = *src;
200  return 0;
201 }
202 
203 /*
204  * set up the kernel stack for a new thread and copy arch-specific thread
205  * control information
206  */
207 int copy_thread(unsigned long clone_flags,
208  unsigned long c_usp, unsigned long ustk_size,
209  struct task_struct *p, struct pt_regs *kregs)
210 {
211  struct thread_info *ti = task_thread_info(p);
212  struct pt_regs *c_regs;
213  unsigned long c_ksp;
214 
215  c_ksp = (unsigned long) task_stack_page(p) + THREAD_SIZE;
216 
217  /* allocate the userspace exception frame and set it up */
218  c_ksp -= sizeof(struct pt_regs);
219  c_regs = (struct pt_regs *) c_ksp;
220  c_ksp -= 12; /* allocate function call ABI slack */
221 
222  /* set up things up so the scheduler can start the new task */
223  p->thread.uregs = c_regs;
224  ti->frame = c_regs;
225  p->thread.a3 = (unsigned long) c_regs;
226  p->thread.sp = c_ksp;
227  p->thread.wchan = p->thread.pc;
228  p->thread.usp = c_usp;
229 
230  if (unlikely(!kregs)) {
231  memset(c_regs, 0, sizeof(struct pt_regs));
232  c_regs->a0 = c_usp; /* function */
233  c_regs->d0 = ustk_size; /* argument */
234  local_save_flags(c_regs->epsw);
235  c_regs->epsw |= EPSW_IE | EPSW_IM_7;
236  p->thread.pc = (unsigned long) ret_from_kernel_thread;
237  return 0;
238  }
239  *c_regs = *kregs;
240  c_regs->sp = c_usp;
241  c_regs->epsw &= ~EPSW_FE; /* my FPU */
242 
243  /* the new TLS pointer is passed in as arg #5 to sys_clone() */
244  if (clone_flags & CLONE_SETTLS)
245  c_regs->e2 = current_frame()->d3;
246 
247  p->thread.pc = (unsigned long) ret_from_fork;
248 
249  return 0;
250 }
251 
252 /*
253  * clone a process
254  * - tlsptr is retrieved by copy_thread() from current_frame()->d3
255  */
256 asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
257  int __user *parent_tidptr, int __user *child_tidptr,
258  int __user *tlsptr)
259 {
260  return do_fork(clone_flags, newsp ?: current_frame()->sp,
261  current_frame(), 0, parent_tidptr, child_tidptr);
262 }
263 
265 {
266  return do_fork(SIGCHLD, current_frame()->sp,
267  current_frame(), 0, NULL, NULL);
268 }
269 
271 {
272  return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, current_frame()->sp,
273  current_frame(), 0, NULL, NULL);
274 }
275 
276 unsigned long get_wchan(struct task_struct *p)
277 {
278  return p->thread.wchan;
279 }