6 #include <linux/slab.h>
71 static struct fsnotify_group *audit_tree_group;
81 INIT_LIST_HEAD(&tree->
chunks);
82 INIT_LIST_HEAD(&tree->
rules);
83 INIT_LIST_HEAD(&tree->
list);
91 static inline void get_tree(
struct audit_tree *tree)
96 static inline void put_tree(
struct audit_tree *tree)
112 for (i = 0; i < chunk->
count; i++) {
113 if (chunk->
owners[i].owner)
114 put_tree(chunk->
owners[i].owner);
121 if (atomic_long_dec_and_test(&chunk->
refs))
125 static void __put_chunk(
struct rcu_head *rcu)
131 static void audit_tree_destroy_watch(
struct fsnotify_mark *
entry)
148 INIT_LIST_HEAD(&chunk->hash);
149 INIT_LIST_HEAD(&chunk->trees);
150 chunk->count =
count;
151 atomic_long_set(&chunk->refs, 1);
152 for (i = 0; i <
count; i++) {
153 INIT_LIST_HEAD(&chunk->owners[i].list);
154 chunk->owners[
i].index =
i;
173 struct fsnotify_mark *
entry = &chunk->
mark;
178 list = chunk_hash(entry->i.inode);
179 list_add_rcu(&chunk->
hash, list);
185 struct list_head *list = chunk_hash(inode);
188 list_for_each_entry_rcu(p, list,
hash) {
190 if (p->
mark.i.inode == inode) {
191 atomic_long_inc(&p->
refs);
201 for (n = 0; n < chunk->
count; n++)
202 if (chunk->
owners[n].owner == tree)
211 int index = p->index & ~(1
U<<31);
216 static void untag_chunk(
struct node *
p)
219 struct fsnotify_mark *entry = &chunk->
mark;
222 int size = chunk->
count - 1;
227 spin_unlock(&hash_lock);
230 new = alloc_chunk(size);
232 spin_lock(&entry->lock);
233 if (chunk->
dead || !entry->i.inode) {
234 spin_unlock(&entry->lock);
244 spin_lock(&hash_lock);
245 list_del_init(&chunk->
trees);
246 if (owner->
root == chunk)
248 list_del_init(&p->list);
249 list_del_rcu(&chunk->
hash);
250 spin_unlock(&hash_lock);
251 spin_unlock(&entry->lock);
266 spin_lock(&hash_lock);
267 list_replace_init(&chunk->
trees, &new->trees);
268 if (owner->
root == chunk) {
273 for (i = j = 0; j <=
size; i++, j++) {
275 if (&chunk->
owners[j] == p) {
276 list_del_init(&p->list);
281 new->owners[
i].owner =
s;
282 new->owners[
i].index = chunk->
owners[
j].index - j +
i;
286 list_replace_init(&chunk->
owners[j].list, &new->owners[i].list);
289 list_replace_rcu(&chunk->
hash, &new->hash);
292 spin_unlock(&hash_lock);
293 spin_unlock(&entry->lock);
300 spin_lock(&hash_lock);
301 if (owner->
root == chunk) {
305 list_del_init(&p->list);
308 spin_unlock(&hash_lock);
309 spin_unlock(&entry->lock);
312 spin_lock(&hash_lock);
317 struct fsnotify_mark *
entry;
322 entry = &chunk->
mark;
328 spin_lock(&entry->lock);
329 spin_lock(&hash_lock);
331 spin_unlock(&hash_lock);
333 spin_unlock(&entry->lock);
338 chunk->
owners[0].index = (1
U << 31);
347 spin_unlock(&hash_lock);
348 spin_unlock(&entry->lock);
354 static int tag_chunk(
struct inode *inode,
struct audit_tree *tree)
356 struct fsnotify_mark *old_entry, *chunk_entry;
364 return create_chunk(inode, tree);
369 spin_lock(&hash_lock);
370 for (n = 0; n < old->
count; n++) {
371 if (old->
owners[n].owner == tree) {
372 spin_unlock(&hash_lock);
377 spin_unlock(&hash_lock);
379 chunk = alloc_chunk(old->
count + 1);
385 chunk_entry = &chunk->
mark;
387 spin_lock(&old_entry->lock);
388 if (!old_entry->i.inode) {
390 spin_unlock(&old_entry->lock);
398 spin_unlock(&old_entry->lock);
405 spin_lock(&chunk_entry->lock);
406 spin_lock(&hash_lock);
410 spin_unlock(&hash_lock);
412 spin_unlock(&chunk_entry->lock);
413 spin_unlock(&old_entry->lock);
421 list_replace_init(&old->
trees, &chunk->
trees);
422 for (n = 0, p = chunk->
owners; n < old->count; n++, p++) {
425 p->index = old->
owners[
n].index;
429 list_replace_init(&old->
owners[n].list, &p->list);
431 p->index = (chunk->
count - 1) | (1
U<<31);
434 list_add(&p->list, &tree->
chunks);
435 list_replace_rcu(&old->
hash, &chunk->
hash);
441 list_add(&tree->
same_root, &chunk->trees);
443 spin_unlock(&hash_lock);
444 spin_unlock(&chunk_entry->lock);
445 spin_unlock(&old_entry->lock);
452 static void kill_rules(
struct audit_tree *tree)
461 list_del_init(&rule->
rlist);
466 audit_log_string(ab,
"remove rule");
473 list_del_rcu(&entry->
list);
483 static void prune_one(
struct audit_tree *victim)
485 spin_lock(&hash_lock);
486 while (!list_empty(&victim->
chunks)) {
493 spin_unlock(&hash_lock);
499 static void trim_marked(
struct audit_tree *tree)
502 spin_lock(&hash_lock);
504 spin_unlock(&hash_lock);
508 for (p = tree->
chunks.next; p != &tree->
chunks; p = q) {
511 if (node->index & (1
U<<31)) {
513 list_add(p, &tree->
chunks);
517 while (!list_empty(&tree->
chunks)) {
523 if (!(node->index & (1
U<<31)))
530 spin_unlock(&hash_lock);
533 list_del_init(&tree->
list);
537 spin_unlock(&hash_lock);
541 static void audit_schedule_prune(
void);
549 spin_lock(&hash_lock);
550 list_del_init(&rule->
rlist);
551 if (list_empty(&tree->
rules) && !tree->
goner) {
555 list_move(&tree->
list, &prune_list);
557 spin_unlock(&hash_lock);
558 audit_schedule_prune();
562 spin_unlock(&hash_lock);
568 static int compare_root(
struct vfsmount *mnt,
void *
arg)
578 list_add(&cursor, &tree_list);
579 while (cursor.
next != &tree_list) {
589 list_add(&cursor, &tree->
list);
598 if (IS_ERR(root_mnt))
601 spin_lock(&hash_lock);
605 struct inode *inode = chunk->
mark.i.inode;
606 node->index |= 1
U<<31;
608 node->index &= ~(1
U<<31);
610 spin_unlock(&hash_lock);
624 if (pathname[0] !=
'/' ||
629 rule->
tree = alloc_tree(pathname);
640 static int tag_mount(
struct vfsmount *mnt,
void *
arg)
642 return tag_chunk(mnt->
mnt_root->d_inode, arg);
662 list_add(&tree->
list, &tree_list);
683 spin_lock(&hash_lock);
685 node->index &= ~(1
U<<31);
686 spin_unlock(&hash_lock);
693 if (list_empty(&rule->
rlist)) {
703 list_del_init(&tree->
list);
704 list_del_init(&tree->
rules);
713 struct path path1, path2;
723 return PTR_ERR(tagged);
732 list_add(&barrier, &tree_list);
733 list_add(&cursor, &barrier);
735 while (cursor.
next != &tree_list) {
742 list_add(&cursor, &tree->
list);
765 spin_lock(&hash_lock);
768 list_add(&tree->
list, &tree_list);
770 spin_unlock(&hash_lock);
774 while (barrier.
prev != &tree_list) {
780 list_add(&tree->
list, &barrier);
785 spin_lock(&hash_lock);
787 node->index &= ~(1
U<<31);
788 spin_unlock(&hash_lock);
808 static int prune_tree_thread(
void *
unused)
813 while (!list_empty(&prune_list)) {
817 list_del_init(&victim->
list);
831 static void audit_schedule_prune(
void)
845 while (!list_empty(list)) {
850 list_del_init(&victim->
list);
879 spin_lock(&hash_lock);
880 while (!list_empty(&chunk->
trees)) {
886 spin_unlock(&hash_lock);
889 list_move(&owner->
list, &prune_list);
892 list_move(&owner->
list, postponed);
894 spin_lock(&hash_lock);
896 list_del_rcu(&chunk->
hash);
897 for (n = 0; n < chunk->
count; n++)
898 list_del_init(&chunk->
owners[n].list);
899 spin_unlock(&hash_lock);
901 audit_schedule_prune();
905 static int audit_tree_handle_event(
struct fsnotify_group *
group,
906 struct fsnotify_mark *inode_mark,
907 struct fsnotify_mark *vfsmonut_mark,
908 struct fsnotify_event *
event)
914 static void audit_tree_freeing_mark(
struct fsnotify_mark *entry,
struct fsnotify_group *
group)
927 static bool audit_tree_send_event(
struct fsnotify_group *group,
struct inode *inode,
928 struct fsnotify_mark *inode_mark,
929 struct fsnotify_mark *vfsmount_mark,
935 static const struct fsnotify_ops audit_tree_ops = {
936 .handle_event = audit_tree_handle_event,
937 .should_send_event = audit_tree_send_event,
938 .free_group_priv =
NULL,
939 .free_event_priv =
NULL,
940 .freeing_mark = audit_tree_freeing_mark,
943 static int __init audit_tree_init(
void)
948 if (IS_ERR(audit_tree_group))
949 audit_panic(
"cannot initialize fsnotify group for rectree watches");
952 INIT_LIST_HEAD(&chunk_hash_heads[i]);