Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
locks.c
Go to the documentation of this file.
1 /*
2  * linux/fs/locks.c
3  *
4  * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5  * Doug Evans ([email protected]), August 07, 1992
6  *
7  * Deadlock detection added.
8  * FIXME: one thing isn't handled yet:
9  * - mandatory locks (requires lots of changes elsewhere)
10  * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11  *
12  * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13  * Kai Petzke ([email protected]), 1994
14  *
15  * Converted file_lock_table to a linked list from an array, which eliminates
16  * the limits on how many active file locks are open.
17  * Chad Page ([email protected]), November 27, 1994
18  *
19  * Removed dependency on file descriptors. dup()'ed file descriptors now
20  * get the same locks as the original file descriptors, and a close() on
21  * any file descriptor removes ALL the locks on the file for the current
22  * process. Since locks still depend on the process id, locks are inherited
23  * after an exec() but not after a fork(). This agrees with POSIX, and both
24  * BSD and SVR4 practice.
25  * Andy Walker ([email protected]), February 14, 1995
26  *
27  * Scrapped free list which is redundant now that we allocate locks
28  * dynamically with kmalloc()/kfree().
29  * Andy Walker ([email protected]), February 21, 1995
30  *
31  * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32  *
33  * FL_POSIX locks are created with calls to fcntl() and lockf() through the
34  * fcntl() system call. They have the semantics described above.
35  *
36  * FL_FLOCK locks are created with calls to flock(), through the flock()
37  * system call, which is new. Old C libraries implement flock() via fcntl()
38  * and will continue to use the old, broken implementation.
39  *
40  * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41  * with a file pointer (filp). As a result they can be shared by a parent
42  * process and its children after a fork(). They are removed when the last
43  * file descriptor referring to the file pointer is closed (unless explicitly
44  * unlocked).
45  *
46  * FL_FLOCK locks never deadlock, an existing lock is always removed before
47  * upgrading from shared to exclusive (or vice versa). When this happens
48  * any processes blocked by the current lock are woken up and allowed to
49  * run before the new lock is applied.
50  * Andy Walker ([email protected]), June 09, 1995
51  *
52  * Removed some race conditions in flock_lock_file(), marked other possible
53  * races. Just grep for FIXME to see them.
54  * Dmitry Gorodchanin ([email protected]), February 09, 1996.
55  *
56  * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57  * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58  * once we've checked for blocking and deadlocking.
59  * Andy Walker ([email protected]), April 03, 1996.
60  *
61  * Initial implementation of mandatory locks. SunOS turned out to be
62  * a rotten model, so I implemented the "obvious" semantics.
63  * See 'Documentation/filesystems/mandatory-locking.txt' for details.
64  * Andy Walker ([email protected]), April 06, 1996.
65  *
66  * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67  * check if a file has mandatory locks, used by mmap(), open() and creat() to
68  * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69  * Manual, Section 2.
70  * Andy Walker ([email protected]), April 09, 1996.
71  *
72  * Tidied up block list handling. Added '/proc/locks' interface.
73  * Andy Walker ([email protected]), April 24, 1996.
74  *
75  * Fixed deadlock condition for pathological code that mixes calls to
76  * flock() and fcntl().
77  * Andy Walker ([email protected]), April 29, 1996.
78  *
79  * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80  * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81  * guarantee sensible behaviour in the case where file system modules might
82  * be compiled with different options than the kernel itself.
83  * Andy Walker ([email protected]), May 15, 1996.
84  *
85  * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86  * ([email protected]) for spotting this.
87  * Andy Walker ([email protected]), May 15, 1996.
88  *
89  * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90  * locks. Changed process synchronisation to avoid dereferencing locks that
91  * have already been freed.
92  * Andy Walker ([email protected]), Sep 21, 1996.
93  *
94  * Made the block list a circular list to minimise searching in the list.
95  * Andy Walker ([email protected]), Sep 25, 1996.
96  *
97  * Made mandatory locking a mount option. Default is not to allow mandatory
98  * locking.
99  * Andy Walker ([email protected]), Oct 04, 1996.
100  *
101  * Some adaptations for NFS support.
102  * Olaf Kirch ([email protected]), Dec 1996,
103  *
104  * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105  * Andy Walker ([email protected]), May 12, 1997.
106  *
107  * Use slab allocator instead of kmalloc/kfree.
108  * Use generic list implementation from <linux/list.h>.
109  * Sped up posix_locks_deadlock by only considering blocked locks.
110  * Matthew Wilcox <[email protected]>, March, 2000.
111  *
112  * Leases and LOCK_MAND
113  * Matthew Wilcox <[email protected]>, June, 2000.
114  * Stephen Rothwell <[email protected]>, June, 2000.
115  */
116 
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/syscalls.h>
126 #include <linux/time.h>
127 #include <linux/rcupdate.h>
128 #include <linux/pid_namespace.h>
129 
130 #include <asm/uaccess.h>
131 
132 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
133 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
134 #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
135 
136 static bool lease_breaking(struct file_lock *fl)
137 {
139 }
140 
141 static int target_leasetype(struct file_lock *fl)
142 {
143  if (fl->fl_flags & FL_UNLOCK_PENDING)
144  return F_UNLCK;
145  if (fl->fl_flags & FL_DOWNGRADE_PENDING)
146  return F_RDLCK;
147  return fl->fl_type;
148 }
149 
152 
153 #define for_each_lock(inode, lockp) \
154  for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
155 
156 static LIST_HEAD(file_lock_list);
157 static LIST_HEAD(blocked_list);
158 static DEFINE_SPINLOCK(file_lock_lock);
159 
160 /*
161  * Protects the two list heads above, plus the inode->i_flock list
162  */
163 void lock_flocks(void)
164 {
165  spin_lock(&file_lock_lock);
166 }
168 
169 void unlock_flocks(void)
170 {
171  spin_unlock(&file_lock_lock);
172 }
174 
175 static struct kmem_cache *filelock_cache __read_mostly;
176 
177 static void locks_init_lock_heads(struct file_lock *fl)
178 {
179  INIT_LIST_HEAD(&fl->fl_link);
180  INIT_LIST_HEAD(&fl->fl_block);
182 }
183 
184 /* Allocate an empty lock structure. */
186 {
187  struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
188 
189  if (fl)
190  locks_init_lock_heads(fl);
191 
192  return fl;
193 }
195 
197 {
198  if (fl->fl_ops) {
199  if (fl->fl_ops->fl_release_private)
200  fl->fl_ops->fl_release_private(fl);
201  fl->fl_ops = NULL;
202  }
203  fl->fl_lmops = NULL;
204 
205 }
207 
208 /* Free a lock which is not in use. */
209 void locks_free_lock(struct file_lock *fl)
210 {
211  BUG_ON(waitqueue_active(&fl->fl_wait));
212  BUG_ON(!list_empty(&fl->fl_block));
213  BUG_ON(!list_empty(&fl->fl_link));
214 
216  kmem_cache_free(filelock_cache, fl);
217 }
219 
220 void locks_init_lock(struct file_lock *fl)
221 {
222  memset(fl, 0, sizeof(struct file_lock));
223  locks_init_lock_heads(fl);
224 }
225 
227 
228 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
229 {
230  if (fl->fl_ops) {
231  if (fl->fl_ops->fl_copy_lock)
232  fl->fl_ops->fl_copy_lock(new, fl);
233  new->fl_ops = fl->fl_ops;
234  }
235  if (fl->fl_lmops)
236  new->fl_lmops = fl->fl_lmops;
237 }
238 
239 /*
240  * Initialize a new lock from an existing file_lock structure.
241  */
242 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
243 {
244  new->fl_owner = fl->fl_owner;
245  new->fl_pid = fl->fl_pid;
246  new->fl_file = NULL;
247  new->fl_flags = fl->fl_flags;
248  new->fl_type = fl->fl_type;
249  new->fl_start = fl->fl_start;
250  new->fl_end = fl->fl_end;
251  new->fl_ops = NULL;
252  new->fl_lmops = NULL;
253 }
255 
256 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
257 {
259 
260  __locks_copy_lock(new, fl);
261  new->fl_file = fl->fl_file;
262  new->fl_ops = fl->fl_ops;
263  new->fl_lmops = fl->fl_lmops;
264 
265  locks_copy_private(new, fl);
266 }
267 
269 
270 static inline int flock_translate_cmd(int cmd) {
271  if (cmd & LOCK_MAND)
272  return cmd & (LOCK_MAND | LOCK_RW);
273  switch (cmd) {
274  case LOCK_SH:
275  return F_RDLCK;
276  case LOCK_EX:
277  return F_WRLCK;
278  case LOCK_UN:
279  return F_UNLCK;
280  }
281  return -EINVAL;
282 }
283 
284 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
285 static int flock_make_lock(struct file *filp, struct file_lock **lock,
286  unsigned int cmd)
287 {
288  struct file_lock *fl;
289  int type = flock_translate_cmd(cmd);
290  if (type < 0)
291  return type;
292 
293  fl = locks_alloc_lock();
294  if (fl == NULL)
295  return -ENOMEM;
296 
297  fl->fl_file = filp;
298  fl->fl_pid = current->tgid;
299  fl->fl_flags = FL_FLOCK;
300  fl->fl_type = type;
301  fl->fl_end = OFFSET_MAX;
302 
303  *lock = fl;
304  return 0;
305 }
306 
307 static int assign_type(struct file_lock *fl, long type)
308 {
309  switch (type) {
310  case F_RDLCK:
311  case F_WRLCK:
312  case F_UNLCK:
313  fl->fl_type = type;
314  break;
315  default:
316  return -EINVAL;
317  }
318  return 0;
319 }
320 
321 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
322  * style lock.
323  */
324 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
325  struct flock *l)
326 {
327  off_t start, end;
328 
329  switch (l->l_whence) {
330  case SEEK_SET:
331  start = 0;
332  break;
333  case SEEK_CUR:
334  start = filp->f_pos;
335  break;
336  case SEEK_END:
337  start = i_size_read(filp->f_path.dentry->d_inode);
338  break;
339  default:
340  return -EINVAL;
341  }
342 
343  /* POSIX-1996 leaves the case l->l_len < 0 undefined;
344  POSIX-2001 defines it. */
345  start += l->l_start;
346  if (start < 0)
347  return -EINVAL;
348  fl->fl_end = OFFSET_MAX;
349  if (l->l_len > 0) {
350  end = start + l->l_len - 1;
351  fl->fl_end = end;
352  } else if (l->l_len < 0) {
353  end = start - 1;
354  fl->fl_end = end;
355  start += l->l_len;
356  if (start < 0)
357  return -EINVAL;
358  }
359  fl->fl_start = start; /* we record the absolute position */
360  if (fl->fl_end < fl->fl_start)
361  return -EOVERFLOW;
362 
363  fl->fl_owner = current->files;
364  fl->fl_pid = current->tgid;
365  fl->fl_file = filp;
366  fl->fl_flags = FL_POSIX;
367  fl->fl_ops = NULL;
368  fl->fl_lmops = NULL;
369 
370  return assign_type(fl, l->l_type);
371 }
372 
373 #if BITS_PER_LONG == 32
374 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
375  struct flock64 *l)
376 {
377  loff_t start;
378 
379  switch (l->l_whence) {
380  case SEEK_SET:
381  start = 0;
382  break;
383  case SEEK_CUR:
384  start = filp->f_pos;
385  break;
386  case SEEK_END:
387  start = i_size_read(filp->f_path.dentry->d_inode);
388  break;
389  default:
390  return -EINVAL;
391  }
392 
393  start += l->l_start;
394  if (start < 0)
395  return -EINVAL;
396  fl->fl_end = OFFSET_MAX;
397  if (l->l_len > 0) {
398  fl->fl_end = start + l->l_len - 1;
399  } else if (l->l_len < 0) {
400  fl->fl_end = start - 1;
401  start += l->l_len;
402  if (start < 0)
403  return -EINVAL;
404  }
405  fl->fl_start = start; /* we record the absolute position */
406  if (fl->fl_end < fl->fl_start)
407  return -EOVERFLOW;
408 
409  fl->fl_owner = current->files;
410  fl->fl_pid = current->tgid;
411  fl->fl_file = filp;
412  fl->fl_flags = FL_POSIX;
413  fl->fl_ops = NULL;
414  fl->fl_lmops = NULL;
415 
416  return assign_type(fl, l->l_type);
417 }
418 #endif
419 
420 /* default lease lock manager operations */
421 static void lease_break_callback(struct file_lock *fl)
422 {
424 }
425 
426 static const struct lock_manager_operations lease_manager_ops = {
427  .lm_break = lease_break_callback,
428  .lm_change = lease_modify,
429 };
430 
431 /*
432  * Initialize a lease, use the default lock manager operations
433  */
434 static int lease_init(struct file *filp, long type, struct file_lock *fl)
435  {
436  if (assign_type(fl, type) != 0)
437  return -EINVAL;
438 
439  fl->fl_owner = current->files;
440  fl->fl_pid = current->tgid;
441 
442  fl->fl_file = filp;
443  fl->fl_flags = FL_LEASE;
444  fl->fl_start = 0;
445  fl->fl_end = OFFSET_MAX;
446  fl->fl_ops = NULL;
447  fl->fl_lmops = &lease_manager_ops;
448  return 0;
449 }
450 
451 /* Allocate a file_lock initialised to this type of lease */
452 static struct file_lock *lease_alloc(struct file *filp, long type)
453 {
454  struct file_lock *fl = locks_alloc_lock();
455  int error = -ENOMEM;
456 
457  if (fl == NULL)
458  return ERR_PTR(error);
459 
460  error = lease_init(filp, type, fl);
461  if (error) {
462  locks_free_lock(fl);
463  return ERR_PTR(error);
464  }
465  return fl;
466 }
467 
468 /* Check if two locks overlap each other.
469  */
470 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
471 {
472  return ((fl1->fl_end >= fl2->fl_start) &&
473  (fl2->fl_end >= fl1->fl_start));
474 }
475 
476 /*
477  * Check whether two locks have the same owner.
478  */
479 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
480 {
481  if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
482  return fl2->fl_lmops == fl1->fl_lmops &&
483  fl1->fl_lmops->lm_compare_owner(fl1, fl2);
484  return fl1->fl_owner == fl2->fl_owner;
485 }
486 
487 /* Remove waiter from blocker's block list.
488  * When blocker ends up pointing to itself then the list is empty.
489  */
490 static void __locks_delete_block(struct file_lock *waiter)
491 {
492  list_del_init(&waiter->fl_block);
493  list_del_init(&waiter->fl_link);
494  waiter->fl_next = NULL;
495 }
496 
497 /*
498  */
500 {
501  lock_flocks();
502  __locks_delete_block(waiter);
503  unlock_flocks();
504 }
506 
507 /* Insert waiter into blocker's block list.
508  * We use a circular list so that processes can be easily woken up in
509  * the order they blocked. The documentation doesn't require this but
510  * it seems like the reasonable thing to do.
511  */
512 static void locks_insert_block(struct file_lock *blocker,
513  struct file_lock *waiter)
514 {
515  BUG_ON(!list_empty(&waiter->fl_block));
516  list_add_tail(&waiter->fl_block, &blocker->fl_block);
517  waiter->fl_next = blocker;
518  if (IS_POSIX(blocker))
519  list_add(&waiter->fl_link, &blocked_list);
520 }
521 
522 /* Wake up processes blocked waiting for blocker.
523  * If told to wait then schedule the processes until the block list
524  * is empty, otherwise empty the block list ourselves.
525  */
526 static void locks_wake_up_blocks(struct file_lock *blocker)
527 {
528  while (!list_empty(&blocker->fl_block)) {
529  struct file_lock *waiter;
530 
531  waiter = list_first_entry(&blocker->fl_block,
532  struct file_lock, fl_block);
533  __locks_delete_block(waiter);
534  if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
535  waiter->fl_lmops->lm_notify(waiter);
536  else
537  wake_up(&waiter->fl_wait);
538  }
539 }
540 
541 /* Insert file lock fl into an inode's lock list at the position indicated
542  * by pos. At the same time add the lock to the global file lock list.
543  */
544 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
545 {
546  list_add(&fl->fl_link, &file_lock_list);
547 
548  fl->fl_nspid = get_pid(task_tgid(current));
549 
550  /* insert into file's list */
551  fl->fl_next = *pos;
552  *pos = fl;
553 }
554 
555 /*
556  * Delete a lock and then free it.
557  * Wake up processes that are blocked waiting for this lock,
558  * notify the FS that the lock has been cleared and
559  * finally free the lock.
560  */
561 static void locks_delete_lock(struct file_lock **thisfl_p)
562 {
563  struct file_lock *fl = *thisfl_p;
564 
565  *thisfl_p = fl->fl_next;
566  fl->fl_next = NULL;
567  list_del_init(&fl->fl_link);
568 
569  if (fl->fl_nspid) {
570  put_pid(fl->fl_nspid);
571  fl->fl_nspid = NULL;
572  }
573 
574  locks_wake_up_blocks(fl);
575  locks_free_lock(fl);
576 }
577 
578 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
579  * checks for shared/exclusive status of overlapping locks.
580  */
581 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
582 {
583  if (sys_fl->fl_type == F_WRLCK)
584  return 1;
585  if (caller_fl->fl_type == F_WRLCK)
586  return 1;
587  return 0;
588 }
589 
590 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
591  * checking before calling the locks_conflict().
592  */
593 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
594 {
595  /* POSIX locks owned by the same process do not conflict with
596  * each other.
597  */
598  if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
599  return (0);
600 
601  /* Check whether they overlap */
602  if (!locks_overlap(caller_fl, sys_fl))
603  return 0;
604 
605  return (locks_conflict(caller_fl, sys_fl));
606 }
607 
608 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
609  * checking before calling the locks_conflict().
610  */
611 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
612 {
613  /* FLOCK locks referring to the same filp do not conflict with
614  * each other.
615  */
616  if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
617  return (0);
618  if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
619  return 0;
620 
621  return (locks_conflict(caller_fl, sys_fl));
622 }
623 
624 void
625 posix_test_lock(struct file *filp, struct file_lock *fl)
626 {
627  struct file_lock *cfl;
628 
629  lock_flocks();
630  for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
631  if (!IS_POSIX(cfl))
632  continue;
633  if (posix_locks_conflict(fl, cfl))
634  break;
635  }
636  if (cfl) {
637  __locks_copy_lock(fl, cfl);
638  if (cfl->fl_nspid)
639  fl->fl_pid = pid_vnr(cfl->fl_nspid);
640  } else
641  fl->fl_type = F_UNLCK;
642  unlock_flocks();
643  return;
644 }
646 
647 /*
648  * Deadlock detection:
649  *
650  * We attempt to detect deadlocks that are due purely to posix file
651  * locks.
652  *
653  * We assume that a task can be waiting for at most one lock at a time.
654  * So for any acquired lock, the process holding that lock may be
655  * waiting on at most one other lock. That lock in turns may be held by
656  * someone waiting for at most one other lock. Given a requested lock
657  * caller_fl which is about to wait for a conflicting lock block_fl, we
658  * follow this chain of waiters to ensure we are not about to create a
659  * cycle.
660  *
661  * Since we do this before we ever put a process to sleep on a lock, we
662  * are ensured that there is never a cycle; that is what guarantees that
663  * the while() loop in posix_locks_deadlock() eventually completes.
664  *
665  * Note: the above assumption may not be true when handling lock
666  * requests from a broken NFS client. It may also fail in the presence
667  * of tasks (such as posix threads) sharing the same open file table.
668  *
669  * To handle those cases, we just bail out after a few iterations.
670  */
671 
672 #define MAX_DEADLK_ITERATIONS 10
673 
674 /* Find a lock that the owner of the given block_fl is blocking on. */
675 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
676 {
677  struct file_lock *fl;
678 
679  list_for_each_entry(fl, &blocked_list, fl_link) {
680  if (posix_same_owner(fl, block_fl))
681  return fl->fl_next;
682  }
683  return NULL;
684 }
685 
686 static int posix_locks_deadlock(struct file_lock *caller_fl,
687  struct file_lock *block_fl)
688 {
689  int i = 0;
690 
691  while ((block_fl = what_owner_is_waiting_for(block_fl))) {
692  if (i++ > MAX_DEADLK_ITERATIONS)
693  return 0;
694  if (posix_same_owner(caller_fl, block_fl))
695  return 1;
696  }
697  return 0;
698 }
699 
700 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
701  * after any leases, but before any posix locks.
702  *
703  * Note that if called with an FL_EXISTS argument, the caller may determine
704  * whether or not a lock was successfully freed by testing the return
705  * value for -ENOENT.
706  */
707 static int flock_lock_file(struct file *filp, struct file_lock *request)
708 {
709  struct file_lock *new_fl = NULL;
710  struct file_lock **before;
711  struct inode * inode = filp->f_path.dentry->d_inode;
712  int error = 0;
713  int found = 0;
714 
715  if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
716  new_fl = locks_alloc_lock();
717  if (!new_fl)
718  return -ENOMEM;
719  }
720 
721  lock_flocks();
722  if (request->fl_flags & FL_ACCESS)
723  goto find_conflict;
724 
725  for_each_lock(inode, before) {
726  struct file_lock *fl = *before;
727  if (IS_POSIX(fl))
728  break;
729  if (IS_LEASE(fl))
730  continue;
731  if (filp != fl->fl_file)
732  continue;
733  if (request->fl_type == fl->fl_type)
734  goto out;
735  found = 1;
736  locks_delete_lock(before);
737  break;
738  }
739 
740  if (request->fl_type == F_UNLCK) {
741  if ((request->fl_flags & FL_EXISTS) && !found)
742  error = -ENOENT;
743  goto out;
744  }
745 
746  /*
747  * If a higher-priority process was blocked on the old file lock,
748  * give it the opportunity to lock the file.
749  */
750  if (found) {
751  unlock_flocks();
752  cond_resched();
753  lock_flocks();
754  }
755 
756 find_conflict:
757  for_each_lock(inode, before) {
758  struct file_lock *fl = *before;
759  if (IS_POSIX(fl))
760  break;
761  if (IS_LEASE(fl))
762  continue;
763  if (!flock_locks_conflict(request, fl))
764  continue;
765  error = -EAGAIN;
766  if (!(request->fl_flags & FL_SLEEP))
767  goto out;
768  error = FILE_LOCK_DEFERRED;
769  locks_insert_block(fl, request);
770  goto out;
771  }
772  if (request->fl_flags & FL_ACCESS)
773  goto out;
774  locks_copy_lock(new_fl, request);
775  locks_insert_lock(before, new_fl);
776  new_fl = NULL;
777  error = 0;
778 
779 out:
780  unlock_flocks();
781  if (new_fl)
782  locks_free_lock(new_fl);
783  return error;
784 }
785 
786 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
787 {
788  struct file_lock *fl;
789  struct file_lock *new_fl = NULL;
790  struct file_lock *new_fl2 = NULL;
791  struct file_lock *left = NULL;
792  struct file_lock *right = NULL;
793  struct file_lock **before;
794  int error, added = 0;
795 
796  /*
797  * We may need two file_lock structures for this operation,
798  * so we get them in advance to avoid races.
799  *
800  * In some cases we can be sure, that no new locks will be needed
801  */
802  if (!(request->fl_flags & FL_ACCESS) &&
803  (request->fl_type != F_UNLCK ||
804  request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
805  new_fl = locks_alloc_lock();
806  new_fl2 = locks_alloc_lock();
807  }
808 
809  lock_flocks();
810  if (request->fl_type != F_UNLCK) {
811  for_each_lock(inode, before) {
812  fl = *before;
813  if (!IS_POSIX(fl))
814  continue;
815  if (!posix_locks_conflict(request, fl))
816  continue;
817  if (conflock)
818  __locks_copy_lock(conflock, fl);
819  error = -EAGAIN;
820  if (!(request->fl_flags & FL_SLEEP))
821  goto out;
822  error = -EDEADLK;
823  if (posix_locks_deadlock(request, fl))
824  goto out;
825  error = FILE_LOCK_DEFERRED;
826  locks_insert_block(fl, request);
827  goto out;
828  }
829  }
830 
831  /* If we're just looking for a conflict, we're done. */
832  error = 0;
833  if (request->fl_flags & FL_ACCESS)
834  goto out;
835 
836  /*
837  * Find the first old lock with the same owner as the new lock.
838  */
839 
840  before = &inode->i_flock;
841 
842  /* First skip locks owned by other processes. */
843  while ((fl = *before) && (!IS_POSIX(fl) ||
844  !posix_same_owner(request, fl))) {
845  before = &fl->fl_next;
846  }
847 
848  /* Process locks with this owner. */
849  while ((fl = *before) && posix_same_owner(request, fl)) {
850  /* Detect adjacent or overlapping regions (if same lock type)
851  */
852  if (request->fl_type == fl->fl_type) {
853  /* In all comparisons of start vs end, use
854  * "start - 1" rather than "end + 1". If end
855  * is OFFSET_MAX, end + 1 will become negative.
856  */
857  if (fl->fl_end < request->fl_start - 1)
858  goto next_lock;
859  /* If the next lock in the list has entirely bigger
860  * addresses than the new one, insert the lock here.
861  */
862  if (fl->fl_start - 1 > request->fl_end)
863  break;
864 
865  /* If we come here, the new and old lock are of the
866  * same type and adjacent or overlapping. Make one
867  * lock yielding from the lower start address of both
868  * locks to the higher end address.
869  */
870  if (fl->fl_start > request->fl_start)
871  fl->fl_start = request->fl_start;
872  else
873  request->fl_start = fl->fl_start;
874  if (fl->fl_end < request->fl_end)
875  fl->fl_end = request->fl_end;
876  else
877  request->fl_end = fl->fl_end;
878  if (added) {
879  locks_delete_lock(before);
880  continue;
881  }
882  request = fl;
883  added = 1;
884  }
885  else {
886  /* Processing for different lock types is a bit
887  * more complex.
888  */
889  if (fl->fl_end < request->fl_start)
890  goto next_lock;
891  if (fl->fl_start > request->fl_end)
892  break;
893  if (request->fl_type == F_UNLCK)
894  added = 1;
895  if (fl->fl_start < request->fl_start)
896  left = fl;
897  /* If the next lock in the list has a higher end
898  * address than the new one, insert the new one here.
899  */
900  if (fl->fl_end > request->fl_end) {
901  right = fl;
902  break;
903  }
904  if (fl->fl_start >= request->fl_start) {
905  /* The new lock completely replaces an old
906  * one (This may happen several times).
907  */
908  if (added) {
909  locks_delete_lock(before);
910  continue;
911  }
912  /* Replace the old lock with the new one.
913  * Wake up anybody waiting for the old one,
914  * as the change in lock type might satisfy
915  * their needs.
916  */
917  locks_wake_up_blocks(fl);
918  fl->fl_start = request->fl_start;
919  fl->fl_end = request->fl_end;
920  fl->fl_type = request->fl_type;
922  locks_copy_private(fl, request);
923  request = fl;
924  added = 1;
925  }
926  }
927  /* Go on to next lock.
928  */
929  next_lock:
930  before = &fl->fl_next;
931  }
932 
933  /*
934  * The above code only modifies existing locks in case of
935  * merging or replacing. If new lock(s) need to be inserted
936  * all modifications are done bellow this, so it's safe yet to
937  * bail out.
938  */
939  error = -ENOLCK; /* "no luck" */
940  if (right && left == right && !new_fl2)
941  goto out;
942 
943  error = 0;
944  if (!added) {
945  if (request->fl_type == F_UNLCK) {
946  if (request->fl_flags & FL_EXISTS)
947  error = -ENOENT;
948  goto out;
949  }
950 
951  if (!new_fl) {
952  error = -ENOLCK;
953  goto out;
954  }
955  locks_copy_lock(new_fl, request);
956  locks_insert_lock(before, new_fl);
957  new_fl = NULL;
958  }
959  if (right) {
960  if (left == right) {
961  /* The new lock breaks the old one in two pieces,
962  * so we have to use the second new lock.
963  */
964  left = new_fl2;
965  new_fl2 = NULL;
966  locks_copy_lock(left, right);
967  locks_insert_lock(before, left);
968  }
969  right->fl_start = request->fl_end + 1;
970  locks_wake_up_blocks(right);
971  }
972  if (left) {
973  left->fl_end = request->fl_start - 1;
974  locks_wake_up_blocks(left);
975  }
976  out:
977  unlock_flocks();
978  /*
979  * Free any unused locks.
980  */
981  if (new_fl)
982  locks_free_lock(new_fl);
983  if (new_fl2)
984  locks_free_lock(new_fl2);
985  return error;
986 }
987 
1002 int posix_lock_file(struct file *filp, struct file_lock *fl,
1003  struct file_lock *conflock)
1004 {
1005  return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1006 }
1008 
1018 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1019 {
1020  int error;
1021  might_sleep ();
1022  for (;;) {
1023  error = posix_lock_file(filp, fl, NULL);
1024  if (error != FILE_LOCK_DEFERRED)
1025  break;
1026  error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1027  if (!error)
1028  continue;
1029 
1030  locks_delete_block(fl);
1031  break;
1032  }
1033  return error;
1034 }
1036 
1044 int locks_mandatory_locked(struct inode *inode)
1045 {
1046  fl_owner_t owner = current->files;
1047  struct file_lock *fl;
1048 
1049  /*
1050  * Search the lock list for this inode for any POSIX locks.
1051  */
1052  lock_flocks();
1053  for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1054  if (!IS_POSIX(fl))
1055  continue;
1056  if (fl->fl_owner != owner)
1057  break;
1058  }
1059  unlock_flocks();
1060  return fl ? -EAGAIN : 0;
1061 }
1062 
1076 int locks_mandatory_area(int read_write, struct inode *inode,
1077  struct file *filp, loff_t offset,
1078  size_t count)
1079 {
1080  struct file_lock fl;
1081  int error;
1082 
1083  locks_init_lock(&fl);
1084  fl.fl_owner = current->files;
1085  fl.fl_pid = current->tgid;
1086  fl.fl_file = filp;
1087  fl.fl_flags = FL_POSIX | FL_ACCESS;
1088  if (filp && !(filp->f_flags & O_NONBLOCK))
1089  fl.fl_flags |= FL_SLEEP;
1090  fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1091  fl.fl_start = offset;
1092  fl.fl_end = offset + count - 1;
1093 
1094  for (;;) {
1095  error = __posix_lock_file(inode, &fl, NULL);
1096  if (error != FILE_LOCK_DEFERRED)
1097  break;
1098  error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1099  if (!error) {
1100  /*
1101  * If we've been sleeping someone might have
1102  * changed the permissions behind our back.
1103  */
1104  if (__mandatory_lock(inode))
1105  continue;
1106  }
1107 
1108  locks_delete_block(&fl);
1109  break;
1110  }
1111 
1112  return error;
1113 }
1114 
1116 
1117 static void lease_clear_pending(struct file_lock *fl, int arg)
1118 {
1119  switch (arg) {
1120  case F_UNLCK:
1121  fl->fl_flags &= ~FL_UNLOCK_PENDING;
1122  /* fall through: */
1123  case F_RDLCK:
1125  }
1126 }
1127 
1128 /* We already had a lease on this file; just change its type */
1129 int lease_modify(struct file_lock **before, int arg)
1130 {
1131  struct file_lock *fl = *before;
1132  int error = assign_type(fl, arg);
1133 
1134  if (error)
1135  return error;
1136  lease_clear_pending(fl, arg);
1137  locks_wake_up_blocks(fl);
1138  if (arg == F_UNLCK) {
1139  struct file *filp = fl->fl_file;
1140 
1141  f_delown(filp);
1142  filp->f_owner.signum = 0;
1143  fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1144  if (fl->fl_fasync != NULL) {
1145  printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1146  fl->fl_fasync = NULL;
1147  }
1148  locks_delete_lock(before);
1149  }
1150  return 0;
1151 }
1152 
1154 
1155 static bool past_time(unsigned long then)
1156 {
1157  if (!then)
1158  /* 0 is a special value meaning "this never expires": */
1159  return false;
1160  return time_after(jiffies, then);
1161 }
1162 
1163 static void time_out_leases(struct inode *inode)
1164 {
1165  struct file_lock **before;
1166  struct file_lock *fl;
1167 
1168  before = &inode->i_flock;
1169  while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
1170  if (past_time(fl->fl_downgrade_time))
1171  lease_modify(before, F_RDLCK);
1172  if (past_time(fl->fl_break_time))
1173  lease_modify(before, F_UNLCK);
1174  if (fl == *before) /* lease_modify may have freed fl */
1175  before = &fl->fl_next;
1176  }
1177 }
1178 
1189 int __break_lease(struct inode *inode, unsigned int mode)
1190 {
1191  int error = 0;
1192  struct file_lock *new_fl, *flock;
1193  struct file_lock *fl;
1194  unsigned long break_time;
1195  int i_have_this_lease = 0;
1196  int want_write = (mode & O_ACCMODE) != O_RDONLY;
1197 
1198  new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1199  if (IS_ERR(new_fl))
1200  return PTR_ERR(new_fl);
1201 
1202  lock_flocks();
1203 
1204  time_out_leases(inode);
1205 
1206  flock = inode->i_flock;
1207  if ((flock == NULL) || !IS_LEASE(flock))
1208  goto out;
1209 
1210  if (!locks_conflict(flock, new_fl))
1211  goto out;
1212 
1213  for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1214  if (fl->fl_owner == current->files)
1215  i_have_this_lease = 1;
1216 
1217  break_time = 0;
1218  if (lease_break_time > 0) {
1219  break_time = jiffies + lease_break_time * HZ;
1220  if (break_time == 0)
1221  break_time++; /* so that 0 means no break time */
1222  }
1223 
1224  for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1225  if (want_write) {
1226  if (fl->fl_flags & FL_UNLOCK_PENDING)
1227  continue;
1228  fl->fl_flags |= FL_UNLOCK_PENDING;
1229  fl->fl_break_time = break_time;
1230  } else {
1231  if (lease_breaking(flock))
1232  continue;
1234  fl->fl_downgrade_time = break_time;
1235  }
1236  fl->fl_lmops->lm_break(fl);
1237  }
1238 
1239  if (i_have_this_lease || (mode & O_NONBLOCK)) {
1240  error = -EWOULDBLOCK;
1241  goto out;
1242  }
1243 
1244 restart:
1245  break_time = flock->fl_break_time;
1246  if (break_time != 0) {
1247  break_time -= jiffies;
1248  if (break_time == 0)
1249  break_time++;
1250  }
1251  locks_insert_block(flock, new_fl);
1252  unlock_flocks();
1253  error = wait_event_interruptible_timeout(new_fl->fl_wait,
1254  !new_fl->fl_next, break_time);
1255  lock_flocks();
1256  __locks_delete_block(new_fl);
1257  if (error >= 0) {
1258  if (error == 0)
1259  time_out_leases(inode);
1260  /*
1261  * Wait for the next conflicting lease that has not been
1262  * broken yet
1263  */
1264  for (flock = inode->i_flock; flock && IS_LEASE(flock);
1265  flock = flock->fl_next) {
1266  if (locks_conflict(new_fl, flock))
1267  goto restart;
1268  }
1269  error = 0;
1270  }
1271 
1272 out:
1273  unlock_flocks();
1274  locks_free_lock(new_fl);
1275  return error;
1276 }
1277 
1279 
1289 void lease_get_mtime(struct inode *inode, struct timespec *time)
1290 {
1291  struct file_lock *flock = inode->i_flock;
1292  if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
1293  *time = current_fs_time(inode->i_sb);
1294  else
1295  *time = inode->i_mtime;
1296 }
1297 
1299 
1323 int fcntl_getlease(struct file *filp)
1324 {
1325  struct file_lock *fl;
1326  int type = F_UNLCK;
1327 
1328  lock_flocks();
1329  time_out_leases(filp->f_path.dentry->d_inode);
1330  for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1331  fl = fl->fl_next) {
1332  if (fl->fl_file == filp) {
1333  type = target_leasetype(fl);
1334  break;
1335  }
1336  }
1337  unlock_flocks();
1338  return type;
1339 }
1340 
1341 int generic_add_lease(struct file *filp, long arg, struct file_lock **flp)
1342 {
1343  struct file_lock *fl, **before, **my_before = NULL, *lease;
1344  struct dentry *dentry = filp->f_path.dentry;
1345  struct inode *inode = dentry->d_inode;
1346  int error;
1347 
1348  lease = *flp;
1349 
1350  error = -EAGAIN;
1351  if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1352  goto out;
1353  if ((arg == F_WRLCK)
1354  && ((dentry->d_count > 1)
1355  || (atomic_read(&inode->i_count) > 1)))
1356  goto out;
1357 
1358  /*
1359  * At this point, we know that if there is an exclusive
1360  * lease on this file, then we hold it on this filp
1361  * (otherwise our open of this file would have blocked).
1362  * And if we are trying to acquire an exclusive lease,
1363  * then the file is not open by anyone (including us)
1364  * except for this filp.
1365  */
1366  error = -EAGAIN;
1367  for (before = &inode->i_flock;
1368  ((fl = *before) != NULL) && IS_LEASE(fl);
1369  before = &fl->fl_next) {
1370  if (fl->fl_file == filp) {
1371  my_before = before;
1372  continue;
1373  }
1374  /*
1375  * No exclusive leases if someone else has a lease on
1376  * this file:
1377  */
1378  if (arg == F_WRLCK)
1379  goto out;
1380  /*
1381  * Modifying our existing lease is OK, but no getting a
1382  * new lease if someone else is opening for write:
1383  */
1384  if (fl->fl_flags & FL_UNLOCK_PENDING)
1385  goto out;
1386  }
1387 
1388  if (my_before != NULL) {
1389  error = lease->fl_lmops->lm_change(my_before, arg);
1390  if (!error)
1391  *flp = *my_before;
1392  goto out;
1393  }
1394 
1395  error = -EINVAL;
1396  if (!leases_enable)
1397  goto out;
1398 
1399  locks_insert_lock(before, lease);
1400  return 0;
1401 
1402 out:
1403  return error;
1404 }
1405 
1406 int generic_delete_lease(struct file *filp, struct file_lock **flp)
1407 {
1408  struct file_lock *fl, **before;
1409  struct dentry *dentry = filp->f_path.dentry;
1410  struct inode *inode = dentry->d_inode;
1411 
1412  for (before = &inode->i_flock;
1413  ((fl = *before) != NULL) && IS_LEASE(fl);
1414  before = &fl->fl_next) {
1415  if (fl->fl_file != filp)
1416  continue;
1417  return (*flp)->fl_lmops->lm_change(before, F_UNLCK);
1418  }
1419  return -EAGAIN;
1420 }
1421 
1433 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1434 {
1435  struct dentry *dentry = filp->f_path.dentry;
1436  struct inode *inode = dentry->d_inode;
1437  int error;
1438 
1439  if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1440  return -EACCES;
1441  if (!S_ISREG(inode->i_mode))
1442  return -EINVAL;
1443  error = security_file_lock(filp, arg);
1444  if (error)
1445  return error;
1446 
1447  time_out_leases(inode);
1448 
1449  BUG_ON(!(*flp)->fl_lmops->lm_break);
1450 
1451  switch (arg) {
1452  case F_UNLCK:
1453  return generic_delete_lease(filp, flp);
1454  case F_RDLCK:
1455  case F_WRLCK:
1456  return generic_add_lease(filp, arg, flp);
1457  default:
1458  return -EINVAL;
1459  }
1460 }
1462 
1463 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1464 {
1465  if (filp->f_op && filp->f_op->setlease)
1466  return filp->f_op->setlease(filp, arg, lease);
1467  else
1468  return generic_setlease(filp, arg, lease);
1469 }
1470 
1498 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1499 {
1500  int error;
1501 
1502  lock_flocks();
1503  error = __vfs_setlease(filp, arg, lease);
1504  unlock_flocks();
1505 
1506  return error;
1507 }
1509 
1510 static int do_fcntl_delete_lease(struct file *filp)
1511 {
1512  struct file_lock fl, *flp = &fl;
1513 
1514  lease_init(filp, F_UNLCK, flp);
1515 
1516  return vfs_setlease(filp, F_UNLCK, &flp);
1517 }
1518 
1519 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1520 {
1521  struct file_lock *fl, *ret;
1522  struct fasync_struct *new;
1523  int error;
1524 
1525  fl = lease_alloc(filp, arg);
1526  if (IS_ERR(fl))
1527  return PTR_ERR(fl);
1528 
1529  new = fasync_alloc();
1530  if (!new) {
1531  locks_free_lock(fl);
1532  return -ENOMEM;
1533  }
1534  ret = fl;
1535  lock_flocks();
1536  error = __vfs_setlease(filp, arg, &ret);
1537  if (error) {
1538  unlock_flocks();
1539  locks_free_lock(fl);
1540  goto out_free_fasync;
1541  }
1542  if (ret != fl)
1543  locks_free_lock(fl);
1544 
1545  /*
1546  * fasync_insert_entry() returns the old entry if any.
1547  * If there was no old entry, then it used 'new' and
1548  * inserted it into the fasync list. Clear new so that
1549  * we don't release it here.
1550  */
1551  if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1552  new = NULL;
1553 
1554  error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1555  unlock_flocks();
1556 
1557 out_free_fasync:
1558  if (new)
1559  fasync_free(new);
1560  return error;
1561 }
1562 
1573 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1574 {
1575  if (arg == F_UNLCK)
1576  return do_fcntl_delete_lease(filp);
1577  return do_fcntl_add_lease(fd, filp, arg);
1578 }
1579 
1587 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1588 {
1589  int error;
1590  might_sleep();
1591  for (;;) {
1592  error = flock_lock_file(filp, fl);
1593  if (error != FILE_LOCK_DEFERRED)
1594  break;
1595  error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1596  if (!error)
1597  continue;
1598 
1599  locks_delete_block(fl);
1600  break;
1601  }
1602  return error;
1603 }
1604 
1606 
1626 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1627 {
1628  struct fd f = fdget(fd);
1629  struct file_lock *lock;
1630  int can_sleep, unlock;
1631  int error;
1632 
1633  error = -EBADF;
1634  if (!f.file)
1635  goto out;
1636 
1637  can_sleep = !(cmd & LOCK_NB);
1638  cmd &= ~LOCK_NB;
1639  unlock = (cmd == LOCK_UN);
1640 
1641  if (!unlock && !(cmd & LOCK_MAND) &&
1642  !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
1643  goto out_putf;
1644 
1645  error = flock_make_lock(f.file, &lock, cmd);
1646  if (error)
1647  goto out_putf;
1648  if (can_sleep)
1649  lock->fl_flags |= FL_SLEEP;
1650 
1651  error = security_file_lock(f.file, lock->fl_type);
1652  if (error)
1653  goto out_free;
1654 
1655  if (f.file->f_op && f.file->f_op->flock)
1656  error = f.file->f_op->flock(f.file,
1657  (can_sleep) ? F_SETLKW : F_SETLK,
1658  lock);
1659  else
1660  error = flock_lock_file_wait(f.file, lock);
1661 
1662  out_free:
1663  locks_free_lock(lock);
1664 
1665  out_putf:
1666  fdput(f);
1667  out:
1668  return error;
1669 }
1670 
1679 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1680 {
1681  if (filp->f_op && filp->f_op->lock)
1682  return filp->f_op->lock(filp, F_GETLK, fl);
1683  posix_test_lock(filp, fl);
1684  return 0;
1685 }
1687 
1688 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1689 {
1690  flock->l_pid = fl->fl_pid;
1691 #if BITS_PER_LONG == 32
1692  /*
1693  * Make sure we can represent the posix lock via
1694  * legacy 32bit flock.
1695  */
1696  if (fl->fl_start > OFFT_OFFSET_MAX)
1697  return -EOVERFLOW;
1698  if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1699  return -EOVERFLOW;
1700 #endif
1701  flock->l_start = fl->fl_start;
1702  flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1703  fl->fl_end - fl->fl_start + 1;
1704  flock->l_whence = 0;
1705  flock->l_type = fl->fl_type;
1706  return 0;
1707 }
1708 
1709 #if BITS_PER_LONG == 32
1710 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1711 {
1712  flock->l_pid = fl->fl_pid;
1713  flock->l_start = fl->fl_start;
1714  flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1715  fl->fl_end - fl->fl_start + 1;
1716  flock->l_whence = 0;
1717  flock->l_type = fl->fl_type;
1718 }
1719 #endif
1720 
1721 /* Report the first existing lock that would conflict with l.
1722  * This implements the F_GETLK command of fcntl().
1723  */
1724 int fcntl_getlk(struct file *filp, struct flock __user *l)
1725 {
1726  struct file_lock file_lock;
1727  struct flock flock;
1728  int error;
1729 
1730  error = -EFAULT;
1731  if (copy_from_user(&flock, l, sizeof(flock)))
1732  goto out;
1733  error = -EINVAL;
1734  if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1735  goto out;
1736 
1737  error = flock_to_posix_lock(filp, &file_lock, &flock);
1738  if (error)
1739  goto out;
1740 
1741  error = vfs_test_lock(filp, &file_lock);
1742  if (error)
1743  goto out;
1744 
1745  flock.l_type = file_lock.fl_type;
1746  if (file_lock.fl_type != F_UNLCK) {
1747  error = posix_lock_to_flock(&flock, &file_lock);
1748  if (error)
1749  goto out;
1750  }
1751  error = -EFAULT;
1752  if (!copy_to_user(l, &flock, sizeof(flock)))
1753  error = 0;
1754 out:
1755  return error;
1756 }
1757 
1791 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1792 {
1793  if (filp->f_op && filp->f_op->lock)
1794  return filp->f_op->lock(filp, cmd, fl);
1795  else
1796  return posix_lock_file(filp, fl, conf);
1797 }
1799 
1800 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1801  struct file_lock *fl)
1802 {
1803  int error;
1804 
1805  error = security_file_lock(filp, fl->fl_type);
1806  if (error)
1807  return error;
1808 
1809  for (;;) {
1810  error = vfs_lock_file(filp, cmd, fl, NULL);
1811  if (error != FILE_LOCK_DEFERRED)
1812  break;
1813  error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1814  if (!error)
1815  continue;
1816 
1817  locks_delete_block(fl);
1818  break;
1819  }
1820 
1821  return error;
1822 }
1823 
1824 /* Apply the lock described by l to an open file descriptor.
1825  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1826  */
1827 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1828  struct flock __user *l)
1829 {
1830  struct file_lock *file_lock = locks_alloc_lock();
1831  struct flock flock;
1832  struct inode *inode;
1833  struct file *f;
1834  int error;
1835 
1836  if (file_lock == NULL)
1837  return -ENOLCK;
1838 
1839  /*
1840  * This might block, so we do it before checking the inode.
1841  */
1842  error = -EFAULT;
1843  if (copy_from_user(&flock, l, sizeof(flock)))
1844  goto out;
1845 
1846  inode = filp->f_path.dentry->d_inode;
1847 
1848  /* Don't allow mandatory locks on files that may be memory mapped
1849  * and shared.
1850  */
1851  if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1852  error = -EAGAIN;
1853  goto out;
1854  }
1855 
1856 again:
1857  error = flock_to_posix_lock(filp, file_lock, &flock);
1858  if (error)
1859  goto out;
1860  if (cmd == F_SETLKW) {
1861  file_lock->fl_flags |= FL_SLEEP;
1862  }
1863 
1864  error = -EBADF;
1865  switch (flock.l_type) {
1866  case F_RDLCK:
1867  if (!(filp->f_mode & FMODE_READ))
1868  goto out;
1869  break;
1870  case F_WRLCK:
1871  if (!(filp->f_mode & FMODE_WRITE))
1872  goto out;
1873  break;
1874  case F_UNLCK:
1875  break;
1876  default:
1877  error = -EINVAL;
1878  goto out;
1879  }
1880 
1881  error = do_lock_file_wait(filp, cmd, file_lock);
1882 
1883  /*
1884  * Attempt to detect a close/fcntl race and recover by
1885  * releasing the lock that was just acquired.
1886  */
1887  /*
1888  * we need that spin_lock here - it prevents reordering between
1889  * update of inode->i_flock and check for it done in close().
1890  * rcu_read_lock() wouldn't do.
1891  */
1892  spin_lock(&current->files->file_lock);
1893  f = fcheck(fd);
1894  spin_unlock(&current->files->file_lock);
1895  if (!error && f != filp && flock.l_type != F_UNLCK) {
1896  flock.l_type = F_UNLCK;
1897  goto again;
1898  }
1899 
1900 out:
1901  locks_free_lock(file_lock);
1902  return error;
1903 }
1904 
1905 #if BITS_PER_LONG == 32
1906 /* Report the first existing lock that would conflict with l.
1907  * This implements the F_GETLK command of fcntl().
1908  */
1909 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1910 {
1911  struct file_lock file_lock;
1912  struct flock64 flock;
1913  int error;
1914 
1915  error = -EFAULT;
1916  if (copy_from_user(&flock, l, sizeof(flock)))
1917  goto out;
1918  error = -EINVAL;
1919  if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1920  goto out;
1921 
1922  error = flock64_to_posix_lock(filp, &file_lock, &flock);
1923  if (error)
1924  goto out;
1925 
1926  error = vfs_test_lock(filp, &file_lock);
1927  if (error)
1928  goto out;
1929 
1930  flock.l_type = file_lock.fl_type;
1931  if (file_lock.fl_type != F_UNLCK)
1932  posix_lock_to_flock64(&flock, &file_lock);
1933 
1934  error = -EFAULT;
1935  if (!copy_to_user(l, &flock, sizeof(flock)))
1936  error = 0;
1937 
1938 out:
1939  return error;
1940 }
1941 
1942 /* Apply the lock described by l to an open file descriptor.
1943  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1944  */
1945 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1946  struct flock64 __user *l)
1947 {
1948  struct file_lock *file_lock = locks_alloc_lock();
1949  struct flock64 flock;
1950  struct inode *inode;
1951  struct file *f;
1952  int error;
1953 
1954  if (file_lock == NULL)
1955  return -ENOLCK;
1956 
1957  /*
1958  * This might block, so we do it before checking the inode.
1959  */
1960  error = -EFAULT;
1961  if (copy_from_user(&flock, l, sizeof(flock)))
1962  goto out;
1963 
1964  inode = filp->f_path.dentry->d_inode;
1965 
1966  /* Don't allow mandatory locks on files that may be memory mapped
1967  * and shared.
1968  */
1969  if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1970  error = -EAGAIN;
1971  goto out;
1972  }
1973 
1974 again:
1975  error = flock64_to_posix_lock(filp, file_lock, &flock);
1976  if (error)
1977  goto out;
1978  if (cmd == F_SETLKW64) {
1979  file_lock->fl_flags |= FL_SLEEP;
1980  }
1981 
1982  error = -EBADF;
1983  switch (flock.l_type) {
1984  case F_RDLCK:
1985  if (!(filp->f_mode & FMODE_READ))
1986  goto out;
1987  break;
1988  case F_WRLCK:
1989  if (!(filp->f_mode & FMODE_WRITE))
1990  goto out;
1991  break;
1992  case F_UNLCK:
1993  break;
1994  default:
1995  error = -EINVAL;
1996  goto out;
1997  }
1998 
1999  error = do_lock_file_wait(filp, cmd, file_lock);
2000 
2001  /*
2002  * Attempt to detect a close/fcntl race and recover by
2003  * releasing the lock that was just acquired.
2004  */
2005  spin_lock(&current->files->file_lock);
2006  f = fcheck(fd);
2007  spin_unlock(&current->files->file_lock);
2008  if (!error && f != filp && flock.l_type != F_UNLCK) {
2009  flock.l_type = F_UNLCK;
2010  goto again;
2011  }
2012 
2013 out:
2014  locks_free_lock(file_lock);
2015  return error;
2016 }
2017 #endif /* BITS_PER_LONG == 32 */
2018 
2019 /*
2020  * This function is called when the file is being removed
2021  * from the task's fd array. POSIX locks belonging to this task
2022  * are deleted at this time.
2023  */
2025 {
2026  struct file_lock lock;
2027 
2028  /*
2029  * If there are no locks held on this file, we don't need to call
2030  * posix_lock_file(). Another process could be setting a lock on this
2031  * file at the same time, but we wouldn't remove that lock anyway.
2032  */
2033  if (!filp->f_path.dentry->d_inode->i_flock)
2034  return;
2035 
2036  lock.fl_type = F_UNLCK;
2037  lock.fl_flags = FL_POSIX | FL_CLOSE;
2038  lock.fl_start = 0;
2039  lock.fl_end = OFFSET_MAX;
2040  lock.fl_owner = owner;
2041  lock.fl_pid = current->tgid;
2042  lock.fl_file = filp;
2043  lock.fl_ops = NULL;
2044  lock.fl_lmops = NULL;
2045 
2046  vfs_lock_file(filp, F_SETLK, &lock, NULL);
2047 
2048  if (lock.fl_ops && lock.fl_ops->fl_release_private)
2049  lock.fl_ops->fl_release_private(&lock);
2050 }
2051 
2053 
2054 /*
2055  * This function is called on the last close of an open file.
2056  */
2057 void locks_remove_flock(struct file *filp)
2058 {
2059  struct inode * inode = filp->f_path.dentry->d_inode;
2060  struct file_lock *fl;
2061  struct file_lock **before;
2062 
2063  if (!inode->i_flock)
2064  return;
2065 
2066  if (filp->f_op && filp->f_op->flock) {
2067  struct file_lock fl = {
2068  .fl_pid = current->tgid,
2069  .fl_file = filp,
2070  .fl_flags = FL_FLOCK,
2071  .fl_type = F_UNLCK,
2072  .fl_end = OFFSET_MAX,
2073  };
2074  filp->f_op->flock(filp, F_SETLKW, &fl);
2075  if (fl.fl_ops && fl.fl_ops->fl_release_private)
2076  fl.fl_ops->fl_release_private(&fl);
2077  }
2078 
2079  lock_flocks();
2080  before = &inode->i_flock;
2081 
2082  while ((fl = *before) != NULL) {
2083  if (fl->fl_file == filp) {
2084  if (IS_FLOCK(fl)) {
2085  locks_delete_lock(before);
2086  continue;
2087  }
2088  if (IS_LEASE(fl)) {
2089  lease_modify(before, F_UNLCK);
2090  continue;
2091  }
2092  /* What? */
2093  BUG();
2094  }
2095  before = &fl->fl_next;
2096  }
2097  unlock_flocks();
2098 }
2099 
2107 int
2108 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2109 {
2110  int status = 0;
2111 
2112  lock_flocks();
2113  if (waiter->fl_next)
2114  __locks_delete_block(waiter);
2115  else
2116  status = -ENOENT;
2117  unlock_flocks();
2118  return status;
2119 }
2120 
2122 
2130 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2131 {
2132  if (filp->f_op && filp->f_op->lock)
2133  return filp->f_op->lock(filp, F_CANCELLK, fl);
2134  return 0;
2135 }
2136 
2138 
2139 #ifdef CONFIG_PROC_FS
2140 #include <linux/proc_fs.h>
2141 #include <linux/seq_file.h>
2142 
2143 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2144  loff_t id, char *pfx)
2145 {
2146  struct inode *inode = NULL;
2147  unsigned int fl_pid;
2148 
2149  if (fl->fl_nspid)
2150  fl_pid = pid_vnr(fl->fl_nspid);
2151  else
2152  fl_pid = fl->fl_pid;
2153 
2154  if (fl->fl_file != NULL)
2155  inode = fl->fl_file->f_path.dentry->d_inode;
2156 
2157  seq_printf(f, "%lld:%s ", id, pfx);
2158  if (IS_POSIX(fl)) {
2159  seq_printf(f, "%6s %s ",
2160  (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2161  (inode == NULL) ? "*NOINODE*" :
2162  mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2163  } else if (IS_FLOCK(fl)) {
2164  if (fl->fl_type & LOCK_MAND) {
2165  seq_printf(f, "FLOCK MSNFS ");
2166  } else {
2167  seq_printf(f, "FLOCK ADVISORY ");
2168  }
2169  } else if (IS_LEASE(fl)) {
2170  seq_printf(f, "LEASE ");
2171  if (lease_breaking(fl))
2172  seq_printf(f, "BREAKING ");
2173  else if (fl->fl_file)
2174  seq_printf(f, "ACTIVE ");
2175  else
2176  seq_printf(f, "BREAKER ");
2177  } else {
2178  seq_printf(f, "UNKNOWN UNKNOWN ");
2179  }
2180  if (fl->fl_type & LOCK_MAND) {
2181  seq_printf(f, "%s ",
2182  (fl->fl_type & LOCK_READ)
2183  ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2184  : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2185  } else {
2186  seq_printf(f, "%s ",
2187  (lease_breaking(fl))
2188  ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2189  : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2190  }
2191  if (inode) {
2192 #ifdef WE_CAN_BREAK_LSLK_NOW
2193  seq_printf(f, "%d %s:%ld ", fl_pid,
2194  inode->i_sb->s_id, inode->i_ino);
2195 #else
2196  /* userspace relies on this representation of dev_t ;-( */
2197  seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2198  MAJOR(inode->i_sb->s_dev),
2199  MINOR(inode->i_sb->s_dev), inode->i_ino);
2200 #endif
2201  } else {
2202  seq_printf(f, "%d <none>:0 ", fl_pid);
2203  }
2204  if (IS_POSIX(fl)) {
2205  if (fl->fl_end == OFFSET_MAX)
2206  seq_printf(f, "%Ld EOF\n", fl->fl_start);
2207  else
2208  seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2209  } else {
2210  seq_printf(f, "0 EOF\n");
2211  }
2212 }
2213 
2214 static int locks_show(struct seq_file *f, void *v)
2215 {
2216  struct file_lock *fl, *bfl;
2217 
2218  fl = list_entry(v, struct file_lock, fl_link);
2219 
2220  lock_get_status(f, fl, *((loff_t *)f->private), "");
2221 
2223  lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2224 
2225  return 0;
2226 }
2227 
2228 static void *locks_start(struct seq_file *f, loff_t *pos)
2229 {
2230  loff_t *p = f->private;
2231 
2232  lock_flocks();
2233  *p = (*pos + 1);
2234  return seq_list_start(&file_lock_list, *pos);
2235 }
2236 
2237 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2238 {
2239  loff_t *p = f->private;
2240  ++*p;
2241  return seq_list_next(v, &file_lock_list, pos);
2242 }
2243 
2244 static void locks_stop(struct seq_file *f, void *v)
2245 {
2246  unlock_flocks();
2247 }
2248 
2249 static const struct seq_operations locks_seq_operations = {
2250  .start = locks_start,
2251  .next = locks_next,
2252  .stop = locks_stop,
2253  .show = locks_show,
2254 };
2255 
2256 static int locks_open(struct inode *inode, struct file *filp)
2257 {
2258  return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2259 }
2260 
2261 static const struct file_operations proc_locks_operations = {
2262  .open = locks_open,
2263  .read = seq_read,
2264  .llseek = seq_lseek,
2265  .release = seq_release_private,
2266 };
2267 
2268 static int __init proc_locks_init(void)
2269 {
2270  proc_create("locks", 0, NULL, &proc_locks_operations);
2271  return 0;
2272 }
2273 module_init(proc_locks_init);
2274 #endif
2275 
2289 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2290 {
2291  struct file_lock *fl;
2292  int result = 1;
2293  lock_flocks();
2294  for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2295  if (IS_POSIX(fl)) {
2296  if (fl->fl_type == F_RDLCK)
2297  continue;
2298  if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2299  continue;
2300  } else if (IS_FLOCK(fl)) {
2301  if (!(fl->fl_type & LOCK_MAND))
2302  continue;
2303  if (fl->fl_type & LOCK_READ)
2304  continue;
2305  } else
2306  continue;
2307  result = 0;
2308  break;
2309  }
2310  unlock_flocks();
2311  return result;
2312 }
2313 
2315 
2329 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2330 {
2331  struct file_lock *fl;
2332  int result = 1;
2333  lock_flocks();
2334  for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2335  if (IS_POSIX(fl)) {
2336  if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2337  continue;
2338  } else if (IS_FLOCK(fl)) {
2339  if (!(fl->fl_type & LOCK_MAND))
2340  continue;
2341  if (fl->fl_type & LOCK_WRITE)
2342  continue;
2343  } else
2344  continue;
2345  result = 0;
2346  break;
2347  }
2348  unlock_flocks();
2349  return result;
2350 }
2351 
2353 
2354 static int __init filelock_init(void)
2355 {
2356  filelock_cache = kmem_cache_create("file_lock_cache",
2357  sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2358 
2359  return 0;
2360 }
2361 
2362 core_initcall(filelock_init);