Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
locking.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2008 Oracle. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/spinlock.h>
21 #include <linux/page-flags.h>
22 #include <asm/bug.h>
23 #include "ctree.h"
24 #include "extent_io.h"
25 #include "locking.h"
26 
28 
29 /*
30  * if we currently have a spinning reader or writer lock
31  * (indicated by the rw flag) this will bump the count
32  * of blocking holders and drop the spinlock.
33  */
35 {
36  if (eb->lock_nested) {
37  read_lock(&eb->lock);
38  if (eb->lock_nested && current->pid == eb->lock_owner) {
39  read_unlock(&eb->lock);
40  return;
41  }
42  read_unlock(&eb->lock);
43  }
44  if (rw == BTRFS_WRITE_LOCK) {
45  if (atomic_read(&eb->blocking_writers) == 0) {
50  write_unlock(&eb->lock);
51  }
52  } else if (rw == BTRFS_READ_LOCK) {
57  read_unlock(&eb->lock);
58  }
59  return;
60 }
61 
62 /*
63  * if we currently have a blocking lock, take the spinlock
64  * and drop our blocking count
65  */
67 {
68  if (eb->lock_nested) {
69  read_lock(&eb->lock);
70  if (eb->lock_nested && current->pid == eb->lock_owner) {
71  read_unlock(&eb->lock);
72  return;
73  }
74  read_unlock(&eb->lock);
75  }
76  if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
78  write_lock(&eb->lock);
82  waitqueue_active(&eb->write_lock_wq))
83  wake_up(&eb->write_lock_wq);
84  } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
86  read_lock(&eb->lock);
89  waitqueue_active(&eb->read_lock_wq))
90  wake_up(&eb->read_lock_wq);
91  }
92  return;
93 }
94 
95 /*
96  * take a spinning read lock. This will wait for any blocking
97  * writers
98  */
100 {
101 again:
102  read_lock(&eb->lock);
103  if (atomic_read(&eb->blocking_writers) &&
104  current->pid == eb->lock_owner) {
105  /*
106  * This extent is already write-locked by our thread. We allow
107  * an additional read lock to be added because it's for the same
108  * thread. btrfs_find_all_roots() depends on this as it may be
109  * called on a partly (write-)locked tree.
110  */
111  BUG_ON(eb->lock_nested);
112  eb->lock_nested = 1;
113  read_unlock(&eb->lock);
114  return;
115  }
116  read_unlock(&eb->lock);
118  read_lock(&eb->lock);
119  if (atomic_read(&eb->blocking_writers)) {
120  read_unlock(&eb->lock);
121  goto again;
122  }
123  atomic_inc(&eb->read_locks);
125 }
126 
127 /*
128  * returns 1 if we get the read lock and 0 if we don't
129  * this won't wait for blocking writers
130  */
132 {
133  if (atomic_read(&eb->blocking_writers))
134  return 0;
135 
136  read_lock(&eb->lock);
137  if (atomic_read(&eb->blocking_writers)) {
138  read_unlock(&eb->lock);
139  return 0;
140  }
141  atomic_inc(&eb->read_locks);
143  return 1;
144 }
145 
146 /*
147  * returns 1 if we get the read lock and 0 if we don't
148  * this won't wait for blocking writers or readers
149  */
151 {
152  if (atomic_read(&eb->blocking_writers) ||
154  return 0;
155  write_lock(&eb->lock);
156  if (atomic_read(&eb->blocking_writers) ||
158  write_unlock(&eb->lock);
159  return 0;
160  }
161  atomic_inc(&eb->write_locks);
163  eb->lock_owner = current->pid;
164  return 1;
165 }
166 
167 /*
168  * drop a spinning read lock
169  */
171 {
172  if (eb->lock_nested) {
173  read_lock(&eb->lock);
174  if (eb->lock_nested && current->pid == eb->lock_owner) {
175  eb->lock_nested = 0;
176  read_unlock(&eb->lock);
177  return;
178  }
179  read_unlock(&eb->lock);
180  }
184  atomic_dec(&eb->read_locks);
185  read_unlock(&eb->lock);
186 }
187 
188 /*
189  * drop a blocking read lock
190  */
192 {
193  if (eb->lock_nested) {
194  read_lock(&eb->lock);
195  if (eb->lock_nested && current->pid == eb->lock_owner) {
196  eb->lock_nested = 0;
197  read_unlock(&eb->lock);
198  return;
199  }
200  read_unlock(&eb->lock);
201  }
205  waitqueue_active(&eb->read_lock_wq))
206  wake_up(&eb->read_lock_wq);
207  atomic_dec(&eb->read_locks);
208 }
209 
210 /*
211  * take a spinning write lock. This will wait for both
212  * blocking readers or writers
213  */
215 {
216 again:
219  write_lock(&eb->lock);
220  if (atomic_read(&eb->blocking_readers)) {
221  write_unlock(&eb->lock);
223  atomic_read(&eb->blocking_readers) == 0);
224  goto again;
225  }
226  if (atomic_read(&eb->blocking_writers)) {
227  write_unlock(&eb->lock);
229  atomic_read(&eb->blocking_writers) == 0);
230  goto again;
231  }
234  atomic_inc(&eb->write_locks);
235  eb->lock_owner = current->pid;
236 }
237 
238 /*
239  * drop a spinning or a blocking write lock.
240  */
242 {
243  int blockers = atomic_read(&eb->blocking_writers);
244 
245  BUG_ON(blockers > 1);
246 
248  atomic_dec(&eb->write_locks);
249 
250  if (blockers) {
253  smp_mb();
254  if (waitqueue_active(&eb->write_lock_wq))
255  wake_up(&eb->write_lock_wq);
256  } else {
259  write_unlock(&eb->lock);
260  }
261 }
262 
264 {
266 }
267 
269 {
270  BUG_ON(!atomic_read(&eb->read_locks));
271 }