8 #include "../dm-bufio.h"
11 #include <linux/module.h>
12 #include <linux/slab.h>
17 #define DM_MSG_PREFIX "block manager"
43 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
55 static unsigned __find_holder(
struct block_lock *lock,
71 unsigned h = __find_holder(lock,
NULL);
72 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
73 struct stack_trace *
t;
79 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
83 t->entries = lock->entries[
h];
92 unsigned h = __find_holder(lock, task);
94 put_task_struct(task);
97 static int __check_holder(
struct block_lock *lock)
100 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
101 static struct stack_trace
t;
107 DMERR(
"recursive lock detected in pool metadata");
108 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
109 DMERR(
"previously held here:");
112 DMERR(
"subsequent aquisition attempted here:");
127 static void __wait(
struct waiter *
w)
141 static void __wake_waiter(
struct waiter *w)
155 static void __wake_many(
struct block_lock *lock)
161 if (lock->
count >= MAX_HOLDERS)
169 __add_holder(lock, w->
task);
175 __add_holder(lock, w->
task);
186 INIT_LIST_HEAD(&lock->
waiters);
191 static int __available_for_read(
struct block_lock *lock)
193 return lock->
count >= 0 &&
194 lock->
count < MAX_HOLDERS &&
198 static int bl_down_read(
struct block_lock *lock)
203 spin_lock(&lock->
lock);
204 r = __check_holder(lock);
206 spin_unlock(&lock->
lock);
210 if (__available_for_read(lock)) {
213 spin_unlock(&lock->
lock);
222 spin_unlock(&lock->
lock);
229 static int bl_down_read_nonblock(
struct block_lock *lock)
233 spin_lock(&lock->
lock);
234 r = __check_holder(lock);
238 if (__available_for_read(lock)) {
246 spin_unlock(&lock->
lock);
250 static void bl_up_read(
struct block_lock *lock)
252 spin_lock(&lock->
lock);
256 if (!list_empty(&lock->
waiters))
258 spin_unlock(&lock->
lock);
261 static int bl_down_write(
struct block_lock *lock)
266 spin_lock(&lock->
lock);
267 r = __check_holder(lock);
269 spin_unlock(&lock->
lock);
276 spin_unlock(&lock->
lock);
289 spin_unlock(&lock->
lock);
297 static void bl_up_write(
struct block_lock *lock)
299 spin_lock(&lock->
lock);
302 if (!list_empty(&lock->
waiters))
304 spin_unlock(&lock->
lock);
307 static void report_recursive_bug(
dm_block_t b,
int r)
310 DMERR(
"recursive acquisition of block %llu requested.",
311 (
unsigned long long) b);
323 static struct dm_buffer *to_buffer(
struct dm_block *b)
346 static void dm_block_manager_alloc_callback(
struct dm_buffer *
buf)
353 static void dm_block_manager_write_callback(
struct dm_buffer *
buf)
373 unsigned max_held_per_thread)
386 dm_block_manager_alloc_callback,
387 dm_block_manager_write_callback);
388 if (IS_ERR(bm->
bufio)) {
389 r = PTR_ERR(bm->
bufio);
436 DMERR(
"validator mismatch (old=%s vs new=%s) for block %llu",
459 r = bl_down_read(&aux->
lock);
462 report_recursive_bug(b, r);
468 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
470 bl_up_read(&aux->
lock);
495 r = bl_down_write(&aux->
lock);
498 report_recursive_bug(b, r);
504 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
506 bl_up_write(&aux->
lock);
530 r = bl_down_read_nonblock(&aux->
lock);
533 report_recursive_bug(b, r);
538 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
540 bl_up_read(&aux->
lock);
566 r = bl_down_write(&aux->
lock);
586 bl_up_write(&aux->
lock);
588 bl_up_read(&aux->
lock);
597 struct dm_block *superblock)
623 return crc32c(~(
u32) 0, data, len) ^ init_xor;