30 #include <linux/inotify.h>
31 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
36 #include <linux/types.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
44 #include <asm/ioctls.h>
56 #include <linux/sysctl.h>
62 .procname =
"max_user_instances",
63 .data = &inotify_max_user_instances,
64 .maxlen =
sizeof(
int),
70 .procname =
"max_user_watches",
71 .data = &inotify_max_user_watches,
72 .maxlen =
sizeof(
int),
78 .procname =
"max_queued_events",
79 .data = &inotify_max_queued_events,
80 .maxlen =
sizeof(
int),
97 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
117 poll_wait(file, &group->notification_waitq, wait);
133 static struct fsnotify_event *get_one_event(
struct fsnotify_group *group,
137 struct fsnotify_event *
event;
144 pr_debug(
"%s: group=%p event=%p\n", __func__, group, event);
147 event_size +=
roundup(event->name_len + 1, event_size);
149 if (event_size > count)
165 static ssize_t copy_event_to_user(
struct fsnotify_group *group,
166 struct fsnotify_event *event,
170 struct fsnotify_event_private_data *fsn_priv;
175 pr_debug(
"%s: group=%p event=%p\n", __func__, group, event);
178 spin_lock(&event->lock);
180 spin_unlock(&event->lock);
186 fsnotify_event_priv_data);
196 name_len =
roundup(event->name_len + 1, event_size);
214 unsigned int len_to_zero = name_len -
event->name_len;
216 if (
copy_to_user(buf, event->file_name, event->name_len))
218 buf +=
event->name_len;
230 static ssize_t inotify_read(
struct file *file,
char __user *buf,
231 size_t count, loff_t *
pos)
233 struct fsnotify_group *
group;
234 struct fsnotify_event *kevent;
246 kevent = get_one_event(group, count);
249 pr_debug(
"%s: group=%p kevent=%p\n", __func__, group, kevent);
252 ret = PTR_ERR(kevent);
255 ret = copy_event_to_user(group, kevent, buf);
278 if (start != buf && ret != -
EFAULT)
283 static int inotify_fasync(
int fd,
struct file *file,
int on)
287 return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -
EIO;
290 static int inotify_release(
struct inode *
ignored,
struct file *file)
294 pr_debug(
"%s: group=%p\n", __func__, group);
304 static long inotify_ioctl(
struct file *file,
unsigned int cmd,
307 struct fsnotify_group *
group;
308 struct fsnotify_event_holder *holder;
309 struct fsnotify_event *
event;
317 pr_debug(
"%s: group=%p cmd=%u\n", __func__, group, cmd);
323 event = holder->event;
326 send_len +=
roundup(event->name_len + 1,
330 ret =
put_user(send_len, (
int __user *) p);
338 .poll = inotify_poll,
339 .read = inotify_read,
340 .fasync = inotify_fasync,
341 .release = inotify_release,
342 .unlocked_ioctl = inotify_ioctl,
343 .compat_ioctl = inotify_ioctl,
351 static int inotify_find_inode(
const char __user *dirname,
struct path *
path,
unsigned flags)
380 *last_wd = i_mark->
wd;
383 spin_unlock(idr_lock);
389 static struct inotify_inode_mark *inotify_idr_find_locked(
struct fsnotify_group *group,
392 struct idr *idr = &group->inotify_data.idr;
393 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
400 struct fsnotify_mark *fsn_mark = &i_mark->
fsn_mark;
414 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
417 i_mark = inotify_idr_find_locked(group, wd);
418 spin_unlock(idr_lock);
423 static void do_inotify_remove_from_idr(
struct fsnotify_group *group,
426 struct idr *idr = &group->inotify_data.idr;
427 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
442 static void inotify_remove_from_idr(
struct fsnotify_group *group,
445 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
457 WARN_ONCE(1,
"%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
458 " i_mark->inode=%p\n", __func__, i_mark, i_mark->
wd,
464 found_i_mark = inotify_idr_find_locked(group, wd);
466 WARN_ONCE(1,
"%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
467 " i_mark->inode=%p\n", __func__, i_mark, i_mark->
wd,
477 if (
unlikely(found_i_mark != i_mark)) {
478 WARN_ONCE(1,
"%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
479 "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
480 "found_i_mark->group=%p found_i_mark->inode=%p\n",
481 __func__, i_mark, i_mark->
wd, i_mark->
fsn_mark.group,
482 i_mark->
fsn_mark.i.inode, found_i_mark, found_i_mark->
wd,
495 " i_mark->inode=%p\n", __func__, i_mark, i_mark->
wd,
501 do_inotify_remove_from_idr(group, i_mark);
507 spin_unlock(idr_lock);
514 struct fsnotify_group *group)
517 struct fsnotify_event *ignored_event, *notify_event;
519 struct fsnotify_event_private_data *fsn_event_priv;
523 FSNOTIFY_EVENT_NONE,
NULL, 0,
532 goto skip_send_ignore;
536 fsn_event_priv->group =
group;
537 event_priv->
wd = i_mark->
wd;
541 if (IS_ERR(notify_event))
542 ret = PTR_ERR(notify_event);
554 inotify_remove_from_idr(group, i_mark);
556 atomic_dec(&group->inotify_data.user->inotify_watches);
560 static void inotify_free_mark(
struct fsnotify_mark *fsn_mark)
569 static int inotify_update_existing_watch(
struct fsnotify_group *group,
573 struct fsnotify_mark *fsn_mark;
575 __u32 old_mask, new_mask;
581 mask = inotify_arg_to_mask(arg);
591 spin_lock(&fsn_mark->lock);
593 old_mask = fsn_mark->mask;
598 new_mask = fsn_mark->mask;
600 spin_unlock(&fsn_mark->lock);
602 if (old_mask != new_mask) {
604 int dropped = (old_mask & ~new_mask);
606 int do_inode = (new_mask & ~inode->i_fsnotify_mask);
609 if (dropped || do_inode)
623 static int inotify_new_watch(
struct fsnotify_group *group,
630 struct idr *idr = &group->inotify_data.idr;
631 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
634 mask = inotify_arg_to_mask(arg);
635 if (
unlikely(!(mask & IN_ALL_EVENTS)))
647 if (
atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
650 ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
659 inotify_remove_from_idr(group, tmp_i_mark);
664 atomic_inc(&group->inotify_data.user->inotify_watches);
667 ret = tmp_i_mark->
wd;
676 static int inotify_update_watch(
struct fsnotify_group *group,
struct inode *inode,
u32 arg)
682 ret = inotify_update_existing_watch(group, inode, arg);
685 ret = inotify_new_watch(group, inode, arg);
697 static struct fsnotify_group *inotify_new_group(
unsigned int max_events)
699 struct fsnotify_group *
group;
705 group->max_events = max_events;
709 group->inotify_data.last_wd = 0;
710 group->inotify_data.fa =
NULL;
714 inotify_max_user_instances) {
726 struct fsnotify_group *
group;
737 group = inotify_new_group(inotify_max_queued_events);
739 return PTR_ERR(group);
757 struct fsnotify_group *
group;
779 ret = inotify_find_inode(
pathname, &path, flags);
784 inode = path.
dentry->d_inode;
785 group = f.
file->private_data;
788 ret = inotify_update_watch(group, inode, mask);
797 struct fsnotify_group *
group;
811 group = f.
file->private_data;
814 i_mark = inotify_idr_find(group, wd);
835 static int __init inotify_user_setup(
void)
861 inotify_max_queued_events = 16384;
862 inotify_max_user_instances = 128;
863 inotify_max_user_watches = 8192;