Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
user.c
Go to the documentation of this file.
1 /*
2  * The "user cache".
3  *
4  * (C) Copyright 1991-2000 Linus Torvalds
5  *
6  * We have a per-user structure to keep track of how many
7  * processes, files etc the user has claimed, in order to be
8  * able to have per-user limits for system resources.
9  */
10 
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
19 
20 /*
21  * userns count is 1 for root user, 1 for init_uts_ns,
22  * and 1 for... ?
23  */
25  .uid_map = {
26  .nr_extents = 1,
27  .extent[0] = {
28  .first = 0,
29  .lower_first = 0,
30  .count = 4294967295U,
31  },
32  },
33  .gid_map = {
34  .nr_extents = 1,
35  .extent[0] = {
36  .first = 0,
37  .lower_first = 0,
38  .count = 4294967295U,
39  },
40  },
41  .projid_map = {
42  .nr_extents = 1,
43  .extent[0] = {
44  .first = 0,
45  .lower_first = 0,
46  .count = 4294967295U,
47  },
48  },
49  .kref = {
50  .refcount = ATOMIC_INIT(3),
51  },
52  .owner = GLOBAL_ROOT_UID,
53  .group = GLOBAL_ROOT_GID,
54 };
55 EXPORT_SYMBOL_GPL(init_user_ns);
56 
57 /*
58  * UID task count cache, to get fast user lookup in "alloc_uid"
59  * when changing user ID's (ie setuid() and friends).
60  */
61 
62 #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
63 #define UIDHASH_SZ (1 << UIDHASH_BITS)
64 #define UIDHASH_MASK (UIDHASH_SZ - 1)
65 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
66 #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
67 
68 static struct kmem_cache *uid_cachep;
70 
71 /*
72  * The uidhash_lock is mostly taken from process context, but it is
73  * occasionally also taken from softirq/tasklet context, when
74  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
75  * But free_uid() is also called with local interrupts disabled, and running
76  * local_bh_enable() with local interrupts disabled is an error - we'll run
77  * softirq callbacks, and they can unconditionally enable interrupts, and
78  * the caller of free_uid() didn't expect that..
79  */
80 static DEFINE_SPINLOCK(uidhash_lock);
81 
82 /* root_user.__count is 1, for init task cred */
84  .__count = ATOMIC_INIT(1),
85  .processes = ATOMIC_INIT(1),
86  .files = ATOMIC_INIT(0),
87  .sigpending = ATOMIC_INIT(0),
88  .locked_shm = 0,
89  .uid = GLOBAL_ROOT_UID,
90 };
91 
92 /*
93  * These routines must be called with the uidhash spinlock held!
94  */
95 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
96 {
97  hlist_add_head(&up->uidhash_node, hashent);
98 }
99 
100 static void uid_hash_remove(struct user_struct *up)
101 {
102  hlist_del_init(&up->uidhash_node);
103 }
104 
105 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
106 {
107  struct user_struct *user;
108  struct hlist_node *h;
109 
110  hlist_for_each_entry(user, h, hashent, uidhash_node) {
111  if (uid_eq(user->uid, uid)) {
112  atomic_inc(&user->__count);
113  return user;
114  }
115  }
116 
117  return NULL;
118 }
119 
120 /* IRQs are disabled and uidhash_lock is held upon function entry.
121  * IRQ state (as stored in flags) is restored and uidhash_lock released
122  * upon function exit.
123  */
124 static void free_user(struct user_struct *up, unsigned long flags)
125  __releases(&uidhash_lock)
126 {
127  uid_hash_remove(up);
128  spin_unlock_irqrestore(&uidhash_lock, flags);
129  key_put(up->uid_keyring);
130  key_put(up->session_keyring);
131  kmem_cache_free(uid_cachep, up);
132 }
133 
134 /*
135  * Locate the user_struct for the passed UID. If found, take a ref on it. The
136  * caller must undo that ref with free_uid().
137  *
138  * If the user_struct could not be found, return NULL.
139  */
141 {
142  struct user_struct *ret;
143  unsigned long flags;
144 
145  spin_lock_irqsave(&uidhash_lock, flags);
146  ret = uid_hash_find(uid, uidhashentry(uid));
147  spin_unlock_irqrestore(&uidhash_lock, flags);
148  return ret;
149 }
150 
151 void free_uid(struct user_struct *up)
152 {
153  unsigned long flags;
154 
155  if (!up)
156  return;
157 
158  local_irq_save(flags);
159  if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
160  free_user(up, flags);
161  else
162  local_irq_restore(flags);
163 }
164 
166 {
167  struct hlist_head *hashent = uidhashentry(uid);
168  struct user_struct *up, *new;
169 
170  spin_lock_irq(&uidhash_lock);
171  up = uid_hash_find(uid, hashent);
172  spin_unlock_irq(&uidhash_lock);
173 
174  if (!up) {
175  new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
176  if (!new)
177  goto out_unlock;
178 
179  new->uid = uid;
180  atomic_set(&new->__count, 1);
181 
182  /*
183  * Before adding this, check whether we raced
184  * on adding the same user already..
185  */
186  spin_lock_irq(&uidhash_lock);
187  up = uid_hash_find(uid, hashent);
188  if (up) {
189  key_put(new->uid_keyring);
190  key_put(new->session_keyring);
191  kmem_cache_free(uid_cachep, new);
192  } else {
193  uid_hash_insert(new, hashent);
194  up = new;
195  }
196  spin_unlock_irq(&uidhash_lock);
197  }
198 
199  return up;
200 
201 out_unlock:
202  return NULL;
203 }
204 
205 static int __init uid_cache_init(void)
206 {
207  int n;
208 
209  uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
211 
212  for(n = 0; n < UIDHASH_SZ; ++n)
214 
215  /* Insert the root user immediately (init already runs as root) */
216  spin_lock_irq(&uidhash_lock);
217  uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
218  spin_unlock_irq(&uidhash_lock);
219 
220  return 0;
221 }
222 
223 module_init(uid_cache_init);