Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
inotify_user.c
Go to the documentation of this file.
1 /*
2  * fs/inotify_user.c - inotify support for userspace
3  *
4  * Authors:
5  * John McCutchan <[email protected]>
6  * Robert Love <[email protected]>
7  *
8  * Copyright (C) 2005 John McCutchan
9  * Copyright 2006 Hewlett-Packard Development Company, L.P.
10  *
11  * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12  * inotify was largely rewriten to make use of the fsnotify infrastructure
13  *
14  * This program is free software; you can redistribute it and/or modify it
15  * under the terms of the GNU General Public License as published by the
16  * Free Software Foundation; either version 2, or (at your option) any
17  * later version.
18  *
19  * This program is distributed in the hope that it will be useful, but
20  * WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22  * General Public License for more details.
23  */
24 
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* module_init */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/namei.h> /* LOOKUP_FOLLOW */
33 #include <linux/sched.h> /* struct user */
34 #include <linux/slab.h> /* struct kmem_cache */
35 #include <linux/syscalls.h>
36 #include <linux/types.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/uaccess.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 
42 #include "inotify.h"
43 
44 #include <asm/ioctls.h>
45 
46 /* these are configurable via /proc/sys/fs/inotify/ */
47 static int inotify_max_user_instances __read_mostly;
48 static int inotify_max_queued_events __read_mostly;
49 static int inotify_max_user_watches __read_mostly;
50 
51 static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
53 
54 #ifdef CONFIG_SYSCTL
55 
56 #include <linux/sysctl.h>
57 
58 static int zero;
59 
61  {
62  .procname = "max_user_instances",
63  .data = &inotify_max_user_instances,
64  .maxlen = sizeof(int),
65  .mode = 0644,
67  .extra1 = &zero,
68  },
69  {
70  .procname = "max_user_watches",
71  .data = &inotify_max_user_watches,
72  .maxlen = sizeof(int),
73  .mode = 0644,
75  .extra1 = &zero,
76  },
77  {
78  .procname = "max_queued_events",
79  .data = &inotify_max_queued_events,
80  .maxlen = sizeof(int),
81  .mode = 0644,
83  .extra1 = &zero
84  },
85  { }
86 };
87 #endif /* CONFIG_SYSCTL */
88 
89 static inline __u32 inotify_arg_to_mask(u32 arg)
90 {
91  __u32 mask;
92 
93  /*
94  * everything should accept their own ignored, cares about children,
95  * and should receive events when the inode is unmounted
96  */
97  mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
98 
99  /* mask off the flags used to open the fd */
100  mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
101 
102  return mask;
103 }
104 
105 static inline u32 inotify_mask_to_arg(__u32 mask)
106 {
107  return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
108  IN_Q_OVERFLOW);
109 }
110 
111 /* intofiy userspace file descriptor functions */
112 static unsigned int inotify_poll(struct file *file, poll_table *wait)
113 {
114  struct fsnotify_group *group = file->private_data;
115  int ret = 0;
116 
117  poll_wait(file, &group->notification_waitq, wait);
118  mutex_lock(&group->notification_mutex);
119  if (!fsnotify_notify_queue_is_empty(group))
120  ret = POLLIN | POLLRDNORM;
121  mutex_unlock(&group->notification_mutex);
122 
123  return ret;
124 }
125 
126 /*
127  * Get an inotify_kernel_event if one exists and is small
128  * enough to fit in "count". Return an error pointer if
129  * not large enough.
130  *
131  * Called with the group->notification_mutex held.
132  */
133 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
134  size_t count)
135 {
136  size_t event_size = sizeof(struct inotify_event);
137  struct fsnotify_event *event;
138 
140  return NULL;
141 
142  event = fsnotify_peek_notify_event(group);
143 
144  pr_debug("%s: group=%p event=%p\n", __func__, group, event);
145 
146  if (event->name_len)
147  event_size += roundup(event->name_len + 1, event_size);
148 
149  if (event_size > count)
150  return ERR_PTR(-EINVAL);
151 
152  /* held the notification_mutex the whole time, so this is the
153  * same event we peeked above */
155 
156  return event;
157 }
158 
159 /*
160  * Copy an event to user space, returning how much we copied.
161  *
162  * We already checked that the event size is smaller than the
163  * buffer we had in "get_one_event()" above.
164  */
165 static ssize_t copy_event_to_user(struct fsnotify_group *group,
166  struct fsnotify_event *event,
167  char __user *buf)
168 {
170  struct fsnotify_event_private_data *fsn_priv;
172  size_t event_size = sizeof(struct inotify_event);
173  size_t name_len = 0;
174 
175  pr_debug("%s: group=%p event=%p\n", __func__, group, event);
176 
177  /* we get the inotify watch descriptor from the event private data */
178  spin_lock(&event->lock);
179  fsn_priv = fsnotify_remove_priv_from_event(group, event);
180  spin_unlock(&event->lock);
181 
182  if (!fsn_priv)
183  inotify_event.wd = -1;
184  else {
185  priv = container_of(fsn_priv, struct inotify_event_private_data,
186  fsnotify_event_priv_data);
187  inotify_event.wd = priv->wd;
188  inotify_free_event_priv(fsn_priv);
189  }
190 
191  /*
192  * round up event->name_len so it is a multiple of event_size
193  * plus an extra byte for the terminating '\0'.
194  */
195  if (event->name_len)
196  name_len = roundup(event->name_len + 1, event_size);
198 
199  inotify_event.mask = inotify_mask_to_arg(event->mask);
200  inotify_event.cookie = event->sync_cookie;
201 
202  /* send the main event */
203  if (copy_to_user(buf, &inotify_event, event_size))
204  return -EFAULT;
205 
206  buf += event_size;
207 
208  /*
209  * fsnotify only stores the pathname, so here we have to send the pathname
210  * and then pad that pathname out to a multiple of sizeof(inotify_event)
211  * with zeros. I get my zeros from the nul_inotify_event.
212  */
213  if (name_len) {
214  unsigned int len_to_zero = name_len - event->name_len;
215  /* copy the path name */
216  if (copy_to_user(buf, event->file_name, event->name_len))
217  return -EFAULT;
218  buf += event->name_len;
219 
220  /* fill userspace with 0's */
221  if (clear_user(buf, len_to_zero))
222  return -EFAULT;
223  buf += len_to_zero;
224  event_size += name_len;
225  }
226 
227  return event_size;
228 }
229 
230 static ssize_t inotify_read(struct file *file, char __user *buf,
231  size_t count, loff_t *pos)
232 {
233  struct fsnotify_group *group;
234  struct fsnotify_event *kevent;
235  char __user *start;
236  int ret;
237  DEFINE_WAIT(wait);
238 
239  start = buf;
240  group = file->private_data;
241 
242  while (1) {
243  prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
244 
245  mutex_lock(&group->notification_mutex);
246  kevent = get_one_event(group, count);
247  mutex_unlock(&group->notification_mutex);
248 
249  pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
250 
251  if (kevent) {
252  ret = PTR_ERR(kevent);
253  if (IS_ERR(kevent))
254  break;
255  ret = copy_event_to_user(group, kevent, buf);
256  fsnotify_put_event(kevent);
257  if (ret < 0)
258  break;
259  buf += ret;
260  count -= ret;
261  continue;
262  }
263 
264  ret = -EAGAIN;
265  if (file->f_flags & O_NONBLOCK)
266  break;
267  ret = -EINTR;
268  if (signal_pending(current))
269  break;
270 
271  if (start != buf)
272  break;
273 
274  schedule();
275  }
276 
277  finish_wait(&group->notification_waitq, &wait);
278  if (start != buf && ret != -EFAULT)
279  ret = buf - start;
280  return ret;
281 }
282 
283 static int inotify_fasync(int fd, struct file *file, int on)
284 {
285  struct fsnotify_group *group = file->private_data;
286 
287  return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
288 }
289 
290 static int inotify_release(struct inode *ignored, struct file *file)
291 {
292  struct fsnotify_group *group = file->private_data;
293 
294  pr_debug("%s: group=%p\n", __func__, group);
295 
297 
298  /* free this group, matching get was inotify_init->fsnotify_obtain_group */
299  fsnotify_put_group(group);
300 
301  return 0;
302 }
303 
304 static long inotify_ioctl(struct file *file, unsigned int cmd,
305  unsigned long arg)
306 {
307  struct fsnotify_group *group;
308  struct fsnotify_event_holder *holder;
309  struct fsnotify_event *event;
310  void __user *p;
311  int ret = -ENOTTY;
312  size_t send_len = 0;
313 
314  group = file->private_data;
315  p = (void __user *) arg;
316 
317  pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
318 
319  switch (cmd) {
320  case FIONREAD:
321  mutex_lock(&group->notification_mutex);
322  list_for_each_entry(holder, &group->notification_list, event_list) {
323  event = holder->event;
324  send_len += sizeof(struct inotify_event);
325  if (event->name_len)
326  send_len += roundup(event->name_len + 1,
327  sizeof(struct inotify_event));
328  }
329  mutex_unlock(&group->notification_mutex);
330  ret = put_user(send_len, (int __user *) p);
331  break;
332  }
333 
334  return ret;
335 }
336 
337 static const struct file_operations inotify_fops = {
338  .poll = inotify_poll,
339  .read = inotify_read,
340  .fasync = inotify_fasync,
341  .release = inotify_release,
342  .unlocked_ioctl = inotify_ioctl,
343  .compat_ioctl = inotify_ioctl,
344  .llseek = noop_llseek,
345 };
346 
347 
348 /*
349  * find_inode - resolve a user-given path to a specific inode
350  */
351 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
352 {
353  int error;
354 
355  error = user_path_at(AT_FDCWD, dirname, flags, path);
356  if (error)
357  return error;
358  /* you can only watch an inode if you have read permissions on it */
359  error = inode_permission(path->dentry->d_inode, MAY_READ);
360  if (error)
361  path_put(path);
362  return error;
363 }
364 
365 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
366  int *last_wd,
367  struct inotify_inode_mark *i_mark)
368 {
369  int ret;
370 
371  do {
372  if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
373  return -ENOMEM;
374 
375  spin_lock(idr_lock);
376  ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
377  &i_mark->wd);
378  /* we added the mark to the idr, take a reference */
379  if (!ret) {
380  *last_wd = i_mark->wd;
381  fsnotify_get_mark(&i_mark->fsn_mark);
382  }
383  spin_unlock(idr_lock);
384  } while (ret == -EAGAIN);
385 
386  return ret;
387 }
388 
389 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
390  int wd)
391 {
392  struct idr *idr = &group->inotify_data.idr;
393  spinlock_t *idr_lock = &group->inotify_data.idr_lock;
394  struct inotify_inode_mark *i_mark;
395 
396  assert_spin_locked(idr_lock);
397 
398  i_mark = idr_find(idr, wd);
399  if (i_mark) {
400  struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
401 
402  fsnotify_get_mark(fsn_mark);
403  /* One ref for being in the idr, one ref we just took */
404  BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
405  }
406 
407  return i_mark;
408 }
409 
410 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
411  int wd)
412 {
413  struct inotify_inode_mark *i_mark;
414  spinlock_t *idr_lock = &group->inotify_data.idr_lock;
415 
416  spin_lock(idr_lock);
417  i_mark = inotify_idr_find_locked(group, wd);
418  spin_unlock(idr_lock);
419 
420  return i_mark;
421 }
422 
423 static void do_inotify_remove_from_idr(struct fsnotify_group *group,
424  struct inotify_inode_mark *i_mark)
425 {
426  struct idr *idr = &group->inotify_data.idr;
427  spinlock_t *idr_lock = &group->inotify_data.idr_lock;
428  int wd = i_mark->wd;
429 
430  assert_spin_locked(idr_lock);
431 
432  idr_remove(idr, wd);
433 
434  /* removed from the idr, drop that ref */
435  fsnotify_put_mark(&i_mark->fsn_mark);
436 }
437 
438 /*
439  * Remove the mark from the idr (if present) and drop the reference
440  * on the mark because it was in the idr.
441  */
442 static void inotify_remove_from_idr(struct fsnotify_group *group,
443  struct inotify_inode_mark *i_mark)
444 {
445  spinlock_t *idr_lock = &group->inotify_data.idr_lock;
446  struct inotify_inode_mark *found_i_mark = NULL;
447  int wd;
448 
449  spin_lock(idr_lock);
450  wd = i_mark->wd;
451 
452  /*
453  * does this i_mark think it is in the idr? we shouldn't get called
454  * if it wasn't....
455  */
456  if (wd == -1) {
457  WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
458  " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
459  i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
460  goto out;
461  }
462 
463  /* Lets look in the idr to see if we find it */
464  found_i_mark = inotify_idr_find_locked(group, wd);
465  if (unlikely(!found_i_mark)) {
466  WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
467  " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
468  i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
469  goto out;
470  }
471 
472  /*
473  * We found an mark in the idr at the right wd, but it's
474  * not the mark we were told to remove. eparis seriously
475  * fucked up somewhere.
476  */
477  if (unlikely(found_i_mark != i_mark)) {
478  WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
479  "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
480  "found_i_mark->group=%p found_i_mark->inode=%p\n",
481  __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
482  i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
483  found_i_mark->fsn_mark.group,
484  found_i_mark->fsn_mark.i.inode);
485  goto out;
486  }
487 
488  /*
489  * One ref for being in the idr
490  * one ref held by the caller trying to kill us
491  * one ref grabbed by inotify_idr_find
492  */
493  if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
494  printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
495  " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
496  i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
497  /* we can't really recover with bad ref cnting.. */
498  BUG();
499  }
500 
501  do_inotify_remove_from_idr(group, i_mark);
502 out:
503  /* match the ref taken by inotify_idr_find_locked() */
504  if (found_i_mark)
505  fsnotify_put_mark(&found_i_mark->fsn_mark);
506  i_mark->wd = -1;
507  spin_unlock(idr_lock);
508 }
509 
510 /*
511  * Send IN_IGNORED for this wd, remove this wd from the idr.
512  */
513 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
514  struct fsnotify_group *group)
515 {
516  struct inotify_inode_mark *i_mark;
517  struct fsnotify_event *ignored_event, *notify_event;
518  struct inotify_event_private_data *event_priv;
519  struct fsnotify_event_private_data *fsn_event_priv;
520  int ret;
521 
522  ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
523  FSNOTIFY_EVENT_NONE, NULL, 0,
524  GFP_NOFS);
525  if (!ignored_event)
526  return;
527 
528  i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
529 
531  if (unlikely(!event_priv))
532  goto skip_send_ignore;
533 
534  fsn_event_priv = &event_priv->fsnotify_event_priv_data;
535 
536  fsn_event_priv->group = group;
537  event_priv->wd = i_mark->wd;
538 
539  notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
540  if (notify_event) {
541  if (IS_ERR(notify_event))
542  ret = PTR_ERR(notify_event);
543  else
544  fsnotify_put_event(notify_event);
545  inotify_free_event_priv(fsn_event_priv);
546  }
547 
548 skip_send_ignore:
549 
550  /* matches the reference taken when the event was created */
551  fsnotify_put_event(ignored_event);
552 
553  /* remove this mark from the idr */
554  inotify_remove_from_idr(group, i_mark);
555 
556  atomic_dec(&group->inotify_data.user->inotify_watches);
557 }
558 
559 /* ding dong the mark is dead */
560 static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
561 {
562  struct inotify_inode_mark *i_mark;
563 
564  i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
565 
566  kmem_cache_free(inotify_inode_mark_cachep, i_mark);
567 }
568 
569 static int inotify_update_existing_watch(struct fsnotify_group *group,
570  struct inode *inode,
571  u32 arg)
572 {
573  struct fsnotify_mark *fsn_mark;
574  struct inotify_inode_mark *i_mark;
575  __u32 old_mask, new_mask;
576  __u32 mask;
577  int add = (arg & IN_MASK_ADD);
578  int ret;
579 
580  /* don't allow invalid bits: we don't want flags set */
581  mask = inotify_arg_to_mask(arg);
582  if (unlikely(!(mask & IN_ALL_EVENTS)))
583  return -EINVAL;
584 
585  fsn_mark = fsnotify_find_inode_mark(group, inode);
586  if (!fsn_mark)
587  return -ENOENT;
588 
589  i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
590 
591  spin_lock(&fsn_mark->lock);
592 
593  old_mask = fsn_mark->mask;
594  if (add)
595  fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
596  else
597  fsnotify_set_mark_mask_locked(fsn_mark, mask);
598  new_mask = fsn_mark->mask;
599 
600  spin_unlock(&fsn_mark->lock);
601 
602  if (old_mask != new_mask) {
603  /* more bits in old than in new? */
604  int dropped = (old_mask & ~new_mask);
605  /* more bits in this fsn_mark than the inode's mask? */
606  int do_inode = (new_mask & ~inode->i_fsnotify_mask);
607 
608  /* update the inode with this new fsn_mark */
609  if (dropped || do_inode)
611 
612  }
613 
614  /* return the wd */
615  ret = i_mark->wd;
616 
617  /* match the get from fsnotify_find_mark() */
618  fsnotify_put_mark(fsn_mark);
619 
620  return ret;
621 }
622 
623 static int inotify_new_watch(struct fsnotify_group *group,
624  struct inode *inode,
625  u32 arg)
626 {
627  struct inotify_inode_mark *tmp_i_mark;
628  __u32 mask;
629  int ret;
630  struct idr *idr = &group->inotify_data.idr;
631  spinlock_t *idr_lock = &group->inotify_data.idr_lock;
632 
633  /* don't allow invalid bits: we don't want flags set */
634  mask = inotify_arg_to_mask(arg);
635  if (unlikely(!(mask & IN_ALL_EVENTS)))
636  return -EINVAL;
637 
638  tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
639  if (unlikely(!tmp_i_mark))
640  return -ENOMEM;
641 
642  fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
643  tmp_i_mark->fsn_mark.mask = mask;
644  tmp_i_mark->wd = -1;
645 
646  ret = -ENOSPC;
647  if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
648  goto out_err;
649 
650  ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
651  tmp_i_mark);
652  if (ret)
653  goto out_err;
654 
655  /* we are on the idr, now get on the inode */
656  ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0);
657  if (ret) {
658  /* we failed to get on the inode, get off the idr */
659  inotify_remove_from_idr(group, tmp_i_mark);
660  goto out_err;
661  }
662 
663  /* increment the number of watches the user has */
664  atomic_inc(&group->inotify_data.user->inotify_watches);
665 
666  /* return the watch descriptor for this new mark */
667  ret = tmp_i_mark->wd;
668 
669 out_err:
670  /* match the ref from fsnotify_init_mark() */
671  fsnotify_put_mark(&tmp_i_mark->fsn_mark);
672 
673  return ret;
674 }
675 
676 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
677 {
678  int ret = 0;
679 
680 retry:
681  /* try to update and existing watch with the new arg */
682  ret = inotify_update_existing_watch(group, inode, arg);
683  /* no mark present, try to add a new one */
684  if (ret == -ENOENT)
685  ret = inotify_new_watch(group, inode, arg);
686  /*
687  * inotify_new_watch could race with another thread which did an
688  * inotify_new_watch between the update_existing and the add watch
689  * here, go back and try to update an existing mark again.
690  */
691  if (ret == -EEXIST)
692  goto retry;
693 
694  return ret;
695 }
696 
697 static struct fsnotify_group *inotify_new_group(unsigned int max_events)
698 {
699  struct fsnotify_group *group;
700 
702  if (IS_ERR(group))
703  return group;
704 
705  group->max_events = max_events;
706 
707  spin_lock_init(&group->inotify_data.idr_lock);
708  idr_init(&group->inotify_data.idr);
709  group->inotify_data.last_wd = 0;
710  group->inotify_data.fa = NULL;
711  group->inotify_data.user = get_current_user();
712 
713  if (atomic_inc_return(&group->inotify_data.user->inotify_devs) >
714  inotify_max_user_instances) {
715  fsnotify_put_group(group);
716  return ERR_PTR(-EMFILE);
717  }
718 
719  return group;
720 }
721 
722 
723 /* inotify syscalls */
724 SYSCALL_DEFINE1(inotify_init1, int, flags)
725 {
726  struct fsnotify_group *group;
727  int ret;
728 
729  /* Check the IN_* constants for consistency. */
732 
733  if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
734  return -EINVAL;
735 
736  /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
737  group = inotify_new_group(inotify_max_queued_events);
738  if (IS_ERR(group))
739  return PTR_ERR(group);
740 
741  ret = anon_inode_getfd("inotify", &inotify_fops, group,
742  O_RDONLY | flags);
743  if (ret < 0)
744  fsnotify_put_group(group);
745 
746  return ret;
747 }
748 
749 SYSCALL_DEFINE0(inotify_init)
750 {
751  return sys_inotify_init1(0);
752 }
753 
754 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
755  u32, mask)
756 {
757  struct fsnotify_group *group;
758  struct inode *inode;
759  struct path path;
760  struct fd f;
761  int ret;
762  unsigned flags = 0;
763 
764  f = fdget(fd);
765  if (unlikely(!f.file))
766  return -EBADF;
767 
768  /* verify that this is indeed an inotify instance */
769  if (unlikely(f.file->f_op != &inotify_fops)) {
770  ret = -EINVAL;
771  goto fput_and_out;
772  }
773 
774  if (!(mask & IN_DONT_FOLLOW))
775  flags |= LOOKUP_FOLLOW;
776  if (mask & IN_ONLYDIR)
777  flags |= LOOKUP_DIRECTORY;
778 
779  ret = inotify_find_inode(pathname, &path, flags);
780  if (ret)
781  goto fput_and_out;
782 
783  /* inode held in place by reference to path; group by fget on fd */
784  inode = path.dentry->d_inode;
785  group = f.file->private_data;
786 
787  /* create/update an inode mark */
788  ret = inotify_update_watch(group, inode, mask);
789  path_put(&path);
790 fput_and_out:
791  fdput(f);
792  return ret;
793 }
794 
795 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
796 {
797  struct fsnotify_group *group;
798  struct inotify_inode_mark *i_mark;
799  struct fd f;
800  int ret = 0;
801 
802  f = fdget(fd);
803  if (unlikely(!f.file))
804  return -EBADF;
805 
806  /* verify that this is indeed an inotify instance */
807  ret = -EINVAL;
808  if (unlikely(f.file->f_op != &inotify_fops))
809  goto out;
810 
811  group = f.file->private_data;
812 
813  ret = -EINVAL;
814  i_mark = inotify_idr_find(group, wd);
815  if (unlikely(!i_mark))
816  goto out;
817 
818  ret = 0;
819 
821 
822  /* match ref taken by inotify_idr_find */
823  fsnotify_put_mark(&i_mark->fsn_mark);
824 
825 out:
826  fdput(f);
827  return ret;
828 }
829 
830 /*
831  * inotify_user_setup - Our initialization function. Note that we cannot return
832  * error because we have compiled-in VFS hooks. So an (unlikely) failure here
833  * must result in panic().
834  */
835 static int __init inotify_user_setup(void)
836 {
837  BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
838  BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
839  BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
840  BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
841  BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
842  BUILD_BUG_ON(IN_OPEN != FS_OPEN);
843  BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
844  BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
845  BUILD_BUG_ON(IN_CREATE != FS_CREATE);
846  BUILD_BUG_ON(IN_DELETE != FS_DELETE);
847  BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
848  BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
849  BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
850  BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
851  BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
852  BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
853  BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
854  BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
855 
857 
858  inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
860 
861  inotify_max_queued_events = 16384;
862  inotify_max_user_instances = 128;
863  inotify_max_user_watches = 8192;
864 
865  return 0;
866 }
867 module_init(inotify_user_setup);