Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pid.c
Go to the documentation of this file.
1 /*
2  * Generic pidhash and scalable, time-bounded PID allocator
3  *
4  * (C) 2002-2003 William Irwin, IBM
5  * (C) 2004 William Irwin, Oracle
6  * (C) 2002-2004 Ingo Molnar, Red Hat
7  *
8  * pid-structures are backing objects for tasks sharing a given ID to chain
9  * against. There is very little to them aside from hashing them and
10  * parking tasks using given ID's on a list.
11  *
12  * The hash is always changed with the tasklist_lock write-acquired,
13  * and the hash is only accessed with the tasklist_lock at least
14  * read-acquired, so there's no additional SMP locking needed here.
15  *
16  * We have a list of bitmap pages, which bitmaps represent the PID space.
17  * Allocating and freeing PIDs is completely lockless. The worst-case
18  * allocation scenario when all but one out of 1 million PIDs possible are
19  * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20  * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21  *
22  * Pid namespaces:
23  * (C) 2007 Pavel Emelyanov <[email protected]>, OpenVZ, SWsoft Inc.
24  * (C) 2007 Sukadev Bhattiprolu <[email protected]>, IBM
25  * Many thanks to Oleg Nesterov for comments and help
26  *
27  */
28 
29 #include <linux/mm.h>
30 #include <linux/export.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rculist.h>
34 #include <linux/bootmem.h>
35 #include <linux/hash.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 
40 #define pid_hashfn(nr, ns) \
41  hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
42 static struct hlist_head *pid_hash;
43 static unsigned int pidhash_shift = 4;
45 
47 
48 #define RESERVED_PIDS 300
49 
52 
53 #define BITS_PER_PAGE (PAGE_SIZE*8)
54 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
55 
56 static inline int mk_pid(struct pid_namespace *pid_ns,
57  struct pidmap *map, int off)
58 {
59  return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
60 }
61 
62 #define find_next_offset(map, off) \
63  find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
64 
65 /*
66  * PID-map pages start out as NULL, they get allocated upon
67  * first use and are never deallocated. This way a low pid_max
68  * value does not cause lots of bitmaps to be allocated, but
69  * the scheme scales to up to 4 million PIDs, runtime.
70  */
72  .kref = {
73  .refcount = ATOMIC_INIT(2),
74  },
75  .pidmap = {
76  [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
77  },
78  .last_pid = 0,
79  .level = 0,
80  .child_reaper = &init_task,
81 };
82 EXPORT_SYMBOL_GPL(init_pid_ns);
83 
85 {
86  int ret = 0;
87  struct pid *pid;
88 
89  rcu_read_lock();
90  pid = task_pid(tsk);
91  if (pid != NULL && pid->numbers[pid->level].nr == 1)
92  ret = 1;
93  rcu_read_unlock();
94 
95  return ret;
96 }
98 
99 /*
100  * Note: disable interrupts while the pidmap_lock is held as an
101  * interrupt might come in and do read_lock(&tasklist_lock).
102  *
103  * If we don't disable interrupts there is a nasty deadlock between
104  * detach_pid()->free_pid() and another cpu that does
105  * spin_lock(&pidmap_lock) followed by an interrupt routine that does
106  * read_lock(&tasklist_lock);
107  *
108  * After we clean up the tasklist_lock and know there are no
109  * irq handlers that take it we can leave the interrupts enabled.
110  * For now it is easier to be safe than to prove it can't happen.
111  */
112 
113 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
114 
115 static void free_pidmap(struct upid *upid)
116 {
117  int nr = upid->nr;
118  struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
119  int offset = nr & BITS_PER_PAGE_MASK;
120 
121  clear_bit(offset, map->page);
122  atomic_inc(&map->nr_free);
123 }
124 
125 /*
126  * If we started walking pids at 'base', is 'a' seen before 'b'?
127  */
128 static int pid_before(int base, int a, int b)
129 {
130  /*
131  * This is the same as saying
132  *
133  * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
134  * and that mapping orders 'a' and 'b' with respect to 'base'.
135  */
136  return (unsigned)(a - base) < (unsigned)(b - base);
137 }
138 
139 /*
140  * We might be racing with someone else trying to set pid_ns->last_pid
141  * at the pid allocation time (there's also a sysctl for this, but racing
142  * with this one is OK, see comment in kernel/pid_namespace.c about it).
143  * We want the winner to have the "later" value, because if the
144  * "earlier" value prevails, then a pid may get reused immediately.
145  *
146  * Since pids rollover, it is not sufficient to just pick the bigger
147  * value. We have to consider where we started counting from.
148  *
149  * 'base' is the value of pid_ns->last_pid that we observed when
150  * we started looking for a pid.
151  *
152  * 'pid' is the pid that we eventually found.
153  */
154 static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
155 {
156  int prev;
157  int last_write = base;
158  do {
159  prev = last_write;
160  last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
161  } while ((prev != last_write) && (pid_before(base, last_write, pid)));
162 }
163 
164 static int alloc_pidmap(struct pid_namespace *pid_ns)
165 {
166  int i, offset, max_scan, pid, last = pid_ns->last_pid;
167  struct pidmap *map;
168 
169  pid = last + 1;
170  if (pid >= pid_max)
171  pid = RESERVED_PIDS;
172  offset = pid & BITS_PER_PAGE_MASK;
173  map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
174  /*
175  * If last_pid points into the middle of the map->page we
176  * want to scan this bitmap block twice, the second time
177  * we start with offset == 0 (or RESERVED_PIDS).
178  */
179  max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
180  for (i = 0; i <= max_scan; ++i) {
181  if (unlikely(!map->page)) {
182  void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
183  /*
184  * Free the page if someone raced with us
185  * installing it:
186  */
187  spin_lock_irq(&pidmap_lock);
188  if (!map->page) {
189  map->page = page;
190  page = NULL;
191  }
192  spin_unlock_irq(&pidmap_lock);
193  kfree(page);
194  if (unlikely(!map->page))
195  break;
196  }
197  if (likely(atomic_read(&map->nr_free))) {
198  do {
199  if (!test_and_set_bit(offset, map->page)) {
200  atomic_dec(&map->nr_free);
201  set_last_pid(pid_ns, last, pid);
202  return pid;
203  }
204  offset = find_next_offset(map, offset);
205  pid = mk_pid(pid_ns, map, offset);
206  } while (offset < BITS_PER_PAGE && pid < pid_max);
207  }
209  ++map;
210  offset = 0;
211  } else {
212  map = &pid_ns->pidmap[0];
213  offset = RESERVED_PIDS;
214  if (unlikely(last == offset))
215  break;
216  }
217  pid = mk_pid(pid_ns, map, offset);
218  }
219  return -1;
220 }
221 
222 int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
223 {
224  int offset;
225  struct pidmap *map, *end;
226 
227  if (last >= PID_MAX_LIMIT)
228  return -1;
229 
230  offset = (last + 1) & BITS_PER_PAGE_MASK;
231  map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
232  end = &pid_ns->pidmap[PIDMAP_ENTRIES];
233  for (; map < end; map++, offset = 0) {
234  if (unlikely(!map->page))
235  continue;
236  offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
237  if (offset < BITS_PER_PAGE)
238  return mk_pid(pid_ns, map, offset);
239  }
240  return -1;
241 }
242 
243 void put_pid(struct pid *pid)
244 {
245  struct pid_namespace *ns;
246 
247  if (!pid)
248  return;
249 
250  ns = pid->numbers[pid->level].ns;
251  if ((atomic_read(&pid->count) == 1) ||
252  atomic_dec_and_test(&pid->count)) {
253  kmem_cache_free(ns->pid_cachep, pid);
254  put_pid_ns(ns);
255  }
256 }
258 
259 static void delayed_put_pid(struct rcu_head *rhp)
260 {
261  struct pid *pid = container_of(rhp, struct pid, rcu);
262  put_pid(pid);
263 }
264 
265 void free_pid(struct pid *pid)
266 {
267  /* We can be called with write_lock_irq(&tasklist_lock) held */
268  int i;
269  unsigned long flags;
270 
271  spin_lock_irqsave(&pidmap_lock, flags);
272  for (i = 0; i <= pid->level; i++)
273  hlist_del_rcu(&pid->numbers[i].pid_chain);
274  spin_unlock_irqrestore(&pidmap_lock, flags);
275 
276  for (i = 0; i <= pid->level; i++)
277  free_pidmap(pid->numbers + i);
278 
279  call_rcu(&pid->rcu, delayed_put_pid);
280 }
281 
282 struct pid *alloc_pid(struct pid_namespace *ns)
283 {
284  struct pid *pid;
285  enum pid_type type;
286  int i, nr;
287  struct pid_namespace *tmp;
288  struct upid *upid;
289 
291  if (!pid)
292  goto out;
293 
294  tmp = ns;
295  for (i = ns->level; i >= 0; i--) {
296  nr = alloc_pidmap(tmp);
297  if (nr < 0)
298  goto out_free;
299 
300  pid->numbers[i].nr = nr;
301  pid->numbers[i].ns = tmp;
302  tmp = tmp->parent;
303  }
304 
305  get_pid_ns(ns);
306  pid->level = ns->level;
307  atomic_set(&pid->count, 1);
308  for (type = 0; type < PIDTYPE_MAX; ++type)
309  INIT_HLIST_HEAD(&pid->tasks[type]);
310 
311  upid = pid->numbers + ns->level;
312  spin_lock_irq(&pidmap_lock);
313  for ( ; upid >= pid->numbers; --upid)
314  hlist_add_head_rcu(&upid->pid_chain,
315  &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
316  spin_unlock_irq(&pidmap_lock);
317 
318 out:
319  return pid;
320 
321 out_free:
322  while (++i <= ns->level)
323  free_pidmap(pid->numbers + i);
324 
325  kmem_cache_free(ns->pid_cachep, pid);
326  pid = NULL;
327  goto out;
328 }
329 
330 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
331 {
332  struct hlist_node *elem;
333  struct upid *pnr;
334 
335  hlist_for_each_entry_rcu(pnr, elem,
336  &pid_hash[pid_hashfn(nr, ns)], pid_chain)
337  if (pnr->nr == nr && pnr->ns == ns)
338  return container_of(pnr, struct pid,
339  numbers[ns->level]);
340 
341  return NULL;
342 }
344 
345 struct pid *find_vpid(int nr)
346 {
347  return find_pid_ns(nr, current->nsproxy->pid_ns);
348 }
350 
351 /*
352  * attach_pid() must be called with the tasklist_lock write-held.
353  */
355  struct pid *pid)
356 {
357  struct pid_link *link;
358 
359  link = &task->pids[type];
360  link->pid = pid;
361  hlist_add_head_rcu(&link->node, &pid->tasks[type]);
362 }
363 
364 static void __change_pid(struct task_struct *task, enum pid_type type,
365  struct pid *new)
366 {
367  struct pid_link *link;
368  struct pid *pid;
369  int tmp;
370 
371  link = &task->pids[type];
372  pid = link->pid;
373 
374  hlist_del_rcu(&link->node);
375  link->pid = new;
376 
377  for (tmp = PIDTYPE_MAX; --tmp >= 0; )
378  if (!hlist_empty(&pid->tasks[tmp]))
379  return;
380 
381  free_pid(pid);
382 }
383 
384 void detach_pid(struct task_struct *task, enum pid_type type)
385 {
386  __change_pid(task, type, NULL);
387 }
388 
389 void change_pid(struct task_struct *task, enum pid_type type,
390  struct pid *pid)
391 {
392  __change_pid(task, type, pid);
393  attach_pid(task, type, pid);
394 }
395 
396 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
397 void transfer_pid(struct task_struct *old, struct task_struct *new,
398  enum pid_type type)
399 {
400  new->pids[type].pid = old->pids[type].pid;
401  hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
402 }
403 
404 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
405 {
406  struct task_struct *result = NULL;
407  if (pid) {
408  struct hlist_node *first;
409  first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
410  lockdep_tasklist_lock_is_held());
411  if (first)
412  result = hlist_entry(first, struct task_struct, pids[(type)].node);
413  }
414  return result;
415 }
417 
418 /*
419  * Must be called under rcu_read_lock().
420  */
422 {
423  rcu_lockdep_assert(rcu_read_lock_held(),
424  "find_task_by_pid_ns() needs rcu_read_lock()"
425  " protection");
426  return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
427 }
428 
430 {
431  return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
432 }
433 
434 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
435 {
436  struct pid *pid;
437  rcu_read_lock();
438  if (type != PIDTYPE_PID)
439  task = task->group_leader;
440  pid = get_pid(task->pids[type].pid);
441  rcu_read_unlock();
442  return pid;
443 }
445 
446 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
447 {
448  struct task_struct *result;
449  rcu_read_lock();
450  result = pid_task(pid, type);
451  if (result)
452  get_task_struct(result);
453  rcu_read_unlock();
454  return result;
455 }
457 
458 struct pid *find_get_pid(pid_t nr)
459 {
460  struct pid *pid;
461 
462  rcu_read_lock();
463  pid = get_pid(find_vpid(nr));
464  rcu_read_unlock();
465 
466  return pid;
467 }
469 
470 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
471 {
472  struct upid *upid;
473  pid_t nr = 0;
474 
475  if (pid && ns->level <= pid->level) {
476  upid = &pid->numbers[ns->level];
477  if (upid->ns == ns)
478  nr = upid->nr;
479  }
480  return nr;
481 }
483 
484 pid_t pid_vnr(struct pid *pid)
485 {
486  return pid_nr_ns(pid, current->nsproxy->pid_ns);
487 }
489 
490 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
491  struct pid_namespace *ns)
492 {
493  pid_t nr = 0;
494 
495  rcu_read_lock();
496  if (!ns)
497  ns = current->nsproxy->pid_ns;
498  if (likely(pid_alive(task))) {
499  if (type != PIDTYPE_PID)
500  task = task->group_leader;
501  nr = pid_nr_ns(task->pids[type].pid, ns);
502  }
503  rcu_read_unlock();
504 
505  return nr;
506 }
508 
510 {
511  return pid_nr_ns(task_tgid(tsk), ns);
512 }
514 
516 {
517  return ns_of_pid(task_pid(tsk));
518 }
520 
521 /*
522  * Used by proc to find the first pid that is greater than or equal to nr.
523  *
524  * If there is a pid at nr this function is exactly the same as find_pid_ns.
525  */
526 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
527 {
528  struct pid *pid;
529 
530  do {
531  pid = find_pid_ns(nr, ns);
532  if (pid)
533  break;
534  nr = next_pidmap(ns, nr);
535  } while (nr > 0);
536 
537  return pid;
538 }
539 
540 /*
541  * The pid hash table is scaled according to the amount of memory in the
542  * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
543  * more.
544  */
546 {
547  unsigned int i, pidhash_size;
548 
549  pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
551  &pidhash_shift, NULL,
552  0, 4096);
553  pidhash_size = 1U << pidhash_shift;
554 
555  for (i = 0; i < pidhash_size; i++)
556  INIT_HLIST_HEAD(&pid_hash[i]);
557 }
558 
559 void __init pidmap_init(void)
560 {
561  /* bump default and minimum pid_max based on number of cpus */
566  pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
567 
568  init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
569  /* Reserve PID 0. We never call free_pidmap(0) */
570  set_bit(0, init_pid_ns.pidmap[0].page);
571  atomic_dec(&init_pid_ns.pidmap[0].nr_free);
572 
573  init_pid_ns.pid_cachep = KMEM_CACHE(pid,
575 }