Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
gc.c
Go to the documentation of this file.
1 /*
2  * security/tomoyo/gc.c
3  *
4  * Copyright (C) 2005-2011 NTT DATA CORPORATION
5  */
6 
7 #include "common.h"
8 #include <linux/kthread.h>
9 #include <linux/slab.h>
10 
20 static inline void tomoyo_memory_free(void *ptr)
21 {
23  kfree(ptr);
24 }
25 
26 /* The list for "struct tomoyo_io_buffer". */
27 static LIST_HEAD(tomoyo_io_buffer_list);
28 /* Lock for protecting tomoyo_io_buffer_list. */
29 static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock);
30 
39 static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element)
40 {
41  struct tomoyo_io_buffer *head;
42  bool in_use = false;
43 
44  spin_lock(&tomoyo_io_buffer_list_lock);
45  list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
46  head->users++;
47  spin_unlock(&tomoyo_io_buffer_list_lock);
48  mutex_lock(&head->io_sem);
49  if (head->r.domain == element || head->r.group == element ||
50  head->r.acl == element || &head->w.domain->list == element)
51  in_use = true;
52  mutex_unlock(&head->io_sem);
53  spin_lock(&tomoyo_io_buffer_list_lock);
54  head->users--;
55  if (in_use)
56  break;
57  }
58  spin_unlock(&tomoyo_io_buffer_list_lock);
59  return in_use;
60 }
61 
70 static bool tomoyo_name_used_by_io_buffer(const char *string)
71 {
72  struct tomoyo_io_buffer *head;
73  const size_t size = strlen(string) + 1;
74  bool in_use = false;
75 
76  spin_lock(&tomoyo_io_buffer_list_lock);
77  list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
78  int i;
79  head->users++;
80  spin_unlock(&tomoyo_io_buffer_list_lock);
81  mutex_lock(&head->io_sem);
82  for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) {
83  const char *w = head->r.w[i];
84  if (w < string || w > string + size)
85  continue;
86  in_use = true;
87  break;
88  }
89  mutex_unlock(&head->io_sem);
90  spin_lock(&tomoyo_io_buffer_list_lock);
91  head->users--;
92  if (in_use)
93  break;
94  }
95  spin_unlock(&tomoyo_io_buffer_list_lock);
96  return in_use;
97 }
98 
106 static inline void tomoyo_del_transition_control(struct list_head *element)
107 {
109  container_of(element, typeof(*ptr), head.list);
110  tomoyo_put_name(ptr->domainname);
111  tomoyo_put_name(ptr->program);
112 }
113 
121 static inline void tomoyo_del_aggregator(struct list_head *element)
122 {
123  struct tomoyo_aggregator *ptr =
124  container_of(element, typeof(*ptr), head.list);
125  tomoyo_put_name(ptr->original_name);
126  tomoyo_put_name(ptr->aggregated_name);
127 }
128 
136 static inline void tomoyo_del_manager(struct list_head *element)
137 {
138  struct tomoyo_manager *ptr =
139  container_of(element, typeof(*ptr), head.list);
140  tomoyo_put_name(ptr->manager);
141 }
142 
150 static void tomoyo_del_acl(struct list_head *element)
151 {
152  struct tomoyo_acl_info *acl =
153  container_of(element, typeof(*acl), list);
154  tomoyo_put_condition(acl->cond);
155  switch (acl->type) {
157  {
158  struct tomoyo_path_acl *entry
159  = container_of(acl, typeof(*entry), head);
160  tomoyo_put_name_union(&entry->name);
161  }
162  break;
164  {
165  struct tomoyo_path2_acl *entry
166  = container_of(acl, typeof(*entry), head);
167  tomoyo_put_name_union(&entry->name1);
168  tomoyo_put_name_union(&entry->name2);
169  }
170  break;
172  {
173  struct tomoyo_path_number_acl *entry
174  = container_of(acl, typeof(*entry), head);
175  tomoyo_put_name_union(&entry->name);
177  }
178  break;
180  {
181  struct tomoyo_mkdev_acl *entry
182  = container_of(acl, typeof(*entry), head);
183  tomoyo_put_name_union(&entry->name);
184  tomoyo_put_number_union(&entry->mode);
187  }
188  break;
190  {
191  struct tomoyo_mount_acl *entry
192  = container_of(acl, typeof(*entry), head);
197  }
198  break;
199  case TOMOYO_TYPE_ENV_ACL:
200  {
201  struct tomoyo_env_acl *entry =
202  container_of(acl, typeof(*entry), head);
203 
204  tomoyo_put_name(entry->env);
205  }
206  break;
208  {
209  struct tomoyo_inet_acl *entry =
210  container_of(acl, typeof(*entry), head);
211 
212  tomoyo_put_group(entry->address.group);
213  tomoyo_put_number_union(&entry->port);
214  }
215  break;
217  {
218  struct tomoyo_unix_acl *entry =
219  container_of(acl, typeof(*entry), head);
220 
221  tomoyo_put_name_union(&entry->name);
222  }
223  break;
225  {
226  struct tomoyo_task_acl *entry =
227  container_of(acl, typeof(*entry), head);
228  tomoyo_put_name(entry->domainname);
229  }
230  break;
231  }
232 }
233 
243 static inline void tomoyo_del_domain(struct list_head *element)
244 {
245  struct tomoyo_domain_info *domain =
246  container_of(element, typeof(*domain), list);
247  struct tomoyo_acl_info *acl;
248  struct tomoyo_acl_info *tmp;
249  /*
250  * Since this domain is referenced from neither
251  * "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete
252  * elements without checking for is_deleted flag.
253  */
254  list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
255  tomoyo_del_acl(&acl->list);
256  tomoyo_memory_free(acl);
257  }
258  tomoyo_put_name(domain->domainname);
259 }
260 
268 void tomoyo_del_condition(struct list_head *element)
269 {
270  struct tomoyo_condition *cond = container_of(element, typeof(*cond),
271  head.list);
272  const u16 condc = cond->condc;
273  const u16 numbers_count = cond->numbers_count;
274  const u16 names_count = cond->names_count;
275  const u16 argc = cond->argc;
276  const u16 envc = cond->envc;
277  unsigned int i;
278  const struct tomoyo_condition_element *condp
279  = (const struct tomoyo_condition_element *) (cond + 1);
280  struct tomoyo_number_union *numbers_p
281  = (struct tomoyo_number_union *) (condp + condc);
282  struct tomoyo_name_union *names_p
283  = (struct tomoyo_name_union *) (numbers_p + numbers_count);
284  const struct tomoyo_argv *argv
285  = (const struct tomoyo_argv *) (names_p + names_count);
286  const struct tomoyo_envp *envp
287  = (const struct tomoyo_envp *) (argv + argc);
288  for (i = 0; i < numbers_count; i++)
289  tomoyo_put_number_union(numbers_p++);
290  for (i = 0; i < names_count; i++)
291  tomoyo_put_name_union(names_p++);
292  for (i = 0; i < argc; argv++, i++)
293  tomoyo_put_name(argv->value);
294  for (i = 0; i < envc; envp++, i++) {
295  tomoyo_put_name(envp->name);
296  tomoyo_put_name(envp->value);
297  }
298 }
299 
307 static inline void tomoyo_del_name(struct list_head *element)
308 {
309  /* Nothing to do. */
310 }
311 
319 static inline void tomoyo_del_path_group(struct list_head *element)
320 {
321  struct tomoyo_path_group *member =
322  container_of(element, typeof(*member), head.list);
323  tomoyo_put_name(member->member_name);
324 }
325 
333 static inline void tomoyo_del_group(struct list_head *element)
334 {
335  struct tomoyo_group *group =
336  container_of(element, typeof(*group), head.list);
337  tomoyo_put_name(group->group_name);
338 }
339 
347 static inline void tomoyo_del_address_group(struct list_head *element)
348 {
349  /* Nothing to do. */
350 }
351 
359 static inline void tomoyo_del_number_group(struct list_head *element)
360 {
361  /* Nothing to do. */
362 }
363 
374 static void tomoyo_try_to_gc(const enum tomoyo_policy_id type,
375  struct list_head *element)
376 {
377  /*
378  * __list_del_entry() guarantees that the list element became no longer
379  * reachable from the list which the element was originally on (e.g.
380  * tomoyo_domain_list). Also, synchronize_srcu() guarantees that the
381  * list element became no longer referenced by syscall users.
382  */
383  __list_del_entry(element);
384  mutex_unlock(&tomoyo_policy_lock);
386  /*
387  * However, there are two users which may still be using the list
388  * element. We need to defer until both users forget this element.
389  *
390  * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl}
391  * and "struct tomoyo_io_buffer"->w.domain forget this element.
392  */
393  if (tomoyo_struct_used_by_io_buffer(element))
394  goto reinject;
395  switch (type) {
397  tomoyo_del_transition_control(element);
398  break;
399  case TOMOYO_ID_MANAGER:
400  tomoyo_del_manager(element);
401  break;
403  tomoyo_del_aggregator(element);
404  break;
405  case TOMOYO_ID_GROUP:
406  tomoyo_del_group(element);
407  break;
409  tomoyo_del_path_group(element);
410  break;
412  tomoyo_del_address_group(element);
413  break;
415  tomoyo_del_number_group(element);
416  break;
417  case TOMOYO_ID_CONDITION:
418  tomoyo_del_condition(element);
419  break;
420  case TOMOYO_ID_NAME:
421  /*
422  * Don't kfree() until all "struct tomoyo_io_buffer"->r.w[]
423  * forget this element.
424  */
425  if (tomoyo_name_used_by_io_buffer
426  (container_of(element, typeof(struct tomoyo_name),
427  head.list)->entry.name))
428  goto reinject;
429  tomoyo_del_name(element);
430  break;
431  case TOMOYO_ID_ACL:
432  tomoyo_del_acl(element);
433  break;
434  case TOMOYO_ID_DOMAIN:
435  /*
436  * Don't kfree() until all "struct cred"->security forget this
437  * element.
438  */
440  (element, typeof(struct tomoyo_domain_info),
441  list)->users))
442  goto reinject;
443  break;
444  case TOMOYO_MAX_POLICY:
445  break;
446  }
447  mutex_lock(&tomoyo_policy_lock);
448  if (type == TOMOYO_ID_DOMAIN)
449  tomoyo_del_domain(element);
450  tomoyo_memory_free(element);
451  return;
452 reinject:
453  /*
454  * We can safely reinject this element here bacause
455  * (1) Appending list elements and removing list elements are protected
456  * by tomoyo_policy_lock mutex.
457  * (2) Only this function removes list elements and this function is
458  * exclusively executed by tomoyo_gc_mutex mutex.
459  * are true.
460  */
461  mutex_lock(&tomoyo_policy_lock);
462  list_add_rcu(element, element->prev);
463 }
464 
473 static void tomoyo_collect_member(const enum tomoyo_policy_id id,
474  struct list_head *member_list)
475 {
476  struct tomoyo_acl_head *member;
477  struct tomoyo_acl_head *tmp;
478  list_for_each_entry_safe(member, tmp, member_list, list) {
479  if (!member->is_deleted)
480  continue;
482  tomoyo_try_to_gc(id, &member->list);
483  }
484 }
485 
493 static void tomoyo_collect_acl(struct list_head *list)
494 {
495  struct tomoyo_acl_info *acl;
496  struct tomoyo_acl_info *tmp;
497  list_for_each_entry_safe(acl, tmp, list, list) {
498  if (!acl->is_deleted)
499  continue;
501  tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list);
502  }
503 }
504 
510 static void tomoyo_collect_entry(void)
511 {
512  int i;
513  enum tomoyo_policy_id id;
514  struct tomoyo_policy_namespace *ns;
515  mutex_lock(&tomoyo_policy_lock);
516  {
517  struct tomoyo_domain_info *domain;
518  struct tomoyo_domain_info *tmp;
519  list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list,
520  list) {
521  tomoyo_collect_acl(&domain->acl_info_list);
522  if (!domain->is_deleted || atomic_read(&domain->users))
523  continue;
524  tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list);
525  }
526  }
527  list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
528  for (id = 0; id < TOMOYO_MAX_POLICY; id++)
529  tomoyo_collect_member(id, &ns->policy_list[id]);
530  for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++)
531  tomoyo_collect_acl(&ns->acl_group[i]);
532  }
533  {
534  struct tomoyo_shared_acl_head *ptr;
535  struct tomoyo_shared_acl_head *tmp;
536  list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list,
537  list) {
538  if (atomic_read(&ptr->users) > 0)
539  continue;
541  tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list);
542  }
543  }
544  list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
545  for (i = 0; i < TOMOYO_MAX_GROUP; i++) {
546  struct list_head *list = &ns->group_list[i];
547  struct tomoyo_group *group;
548  struct tomoyo_group *tmp;
549  switch (i) {
550  case 0:
552  break;
553  case 1:
555  break;
556  default:
558  break;
559  }
560  list_for_each_entry_safe(group, tmp, list, head.list) {
561  tomoyo_collect_member(id, &group->member_list);
562  if (!list_empty(&group->member_list) ||
563  atomic_read(&group->head.users) > 0)
564  continue;
565  atomic_set(&group->head.users,
567  tomoyo_try_to_gc(TOMOYO_ID_GROUP,
568  &group->head.list);
569  }
570  }
571  }
572  for (i = 0; i < TOMOYO_MAX_HASH; i++) {
573  struct list_head *list = &tomoyo_name_list[i];
574  struct tomoyo_shared_acl_head *ptr;
575  struct tomoyo_shared_acl_head *tmp;
576  list_for_each_entry_safe(ptr, tmp, list, list) {
577  if (atomic_read(&ptr->users) > 0)
578  continue;
580  tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list);
581  }
582  }
583  mutex_unlock(&tomoyo_policy_lock);
584 }
585 
593 static int tomoyo_gc_thread(void *unused)
594 {
595  /* Garbage collector thread is exclusive. */
596  static DEFINE_MUTEX(tomoyo_gc_mutex);
597  if (!mutex_trylock(&tomoyo_gc_mutex))
598  goto out;
599  tomoyo_collect_entry();
600  {
601  struct tomoyo_io_buffer *head;
602  struct tomoyo_io_buffer *tmp;
603 
604  spin_lock(&tomoyo_io_buffer_list_lock);
605  list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list,
606  list) {
607  if (head->users)
608  continue;
609  list_del(&head->list);
610  kfree(head->read_buf);
611  kfree(head->write_buf);
612  kfree(head);
613  }
614  spin_unlock(&tomoyo_io_buffer_list_lock);
615  }
616  mutex_unlock(&tomoyo_gc_mutex);
617 out:
618  /* This acts as do_exit(0). */
619  return 0;
620 }
621 
630 void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register)
631 {
632  bool is_write = false;
633 
634  spin_lock(&tomoyo_io_buffer_list_lock);
635  if (is_register) {
636  head->users = 1;
637  list_add(&head->list, &tomoyo_io_buffer_list);
638  } else {
639  is_write = head->write_buf != NULL;
640  if (!--head->users) {
641  list_del(&head->list);
642  kfree(head->read_buf);
643  kfree(head->write_buf);
644  kfree(head);
645  }
646  }
647  spin_unlock(&tomoyo_io_buffer_list_lock);
648  if (is_write) {
649  struct task_struct *task = kthread_create(tomoyo_gc_thread,
650  NULL,
651  "GC for TOMOYO");
652  if (!IS_ERR(task))
653  wake_up_process(task);
654  }
655 }