29 #include <linux/module.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
34 #include <linux/ctype.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
49 #define __KERNEL_SYSCALLS__
74 static int drbd_release(
struct gendisk *gd,
fmode_t mode);
79 static void md_sync_timer_fn(
unsigned long data);
82 static void _tl_clear(
struct drbd_conf *mdev);
104 #ifdef CONFIG_DRBD_FAULT_INJECTION
107 static int fault_count;
159 static const struct block_device_operations drbd_ops = {
162 .release = drbd_release,
167 if (!drbd_md_io_bio_set)
168 return bio_alloc(gfp_mask, 1);
202 static int tl_init(
struct drbd_conf *mdev)
211 INIT_LIST_HEAD(&b->
w.list);
228 static void tl_cleanup(
struct drbd_conf *mdev)
252 INIT_LIST_HEAD(&new->requests);
253 INIT_LIST_HEAD(&new->w.list);
259 new->br_number = newest_before->
br_number+1;
277 unsigned int set_size)
289 dev_err(
DEV,
"BAD! BarrierAck #%u received, but no epoch in tl!?\n",
294 dev_err(
DEV,
"BAD! BarrierAck #%u received, expected #%u!\n",
299 dev_err(
DEV,
"BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
361 int rv, n_writes, n_reads;
368 INIT_LIST_HEAD(&carry_reads);
371 rv = _req_mod(req, what);
381 if (b->
w.cb ==
NULL) {
383 inc_ap_pending(mdev);
387 drbd_queue_work(&mdev->
data.work, &b->
w);
392 list_add(&carry_reads, &b->
requests);
407 list_splice(&carry_reads, &b->
requests);
408 INIT_LIST_HEAD(&b->
w.list);
420 list_splice(&carry_reads, &b->
requests);
448 static void _tl_clear(
struct drbd_conf *mdev)
476 _tl_restart(mdev, what);
518 static int cl_wide_st_chg(
struct drbd_conf *mdev,
540 ns.
i = (os.
i & ~mask.
i) | val.
i;
541 rv = _drbd_set_state(mdev, ns, f,
NULL);
543 spin_unlock_irqrestore(&mdev->
req_lock, flags);
594 ns.
i = (os.
i & ~mask.
i) | val.
i;
595 ns = sanitize_state(mdev, os, ns,
NULL);
597 if (!cl_wide_st_chg(mdev, os, ns))
600 rv = is_valid_state(mdev, ns);
602 rv = is_valid_state_transition(mdev, ns, os);
607 spin_unlock_irqrestore(&mdev->
req_lock, flags);
631 init_completion(&
done);
638 ns.
i = (os.
i & ~mask.
i) | val.
i;
639 ns = sanitize_state(mdev, os, ns,
NULL);
641 if (cl_wide_st_chg(mdev, os, ns)) {
642 rv = is_valid_state(mdev, ns);
644 rv = is_valid_state_transition(mdev, ns, os);
645 spin_unlock_irqrestore(&mdev->
req_lock, flags);
653 drbd_state_lock(mdev);
655 drbd_state_unlock(mdev);
663 (rv = _req_st_cond(mdev, mask, val)));
666 drbd_state_unlock(mdev);
673 ns.
i = (os.
i & ~mask.
i) | val.
i;
674 rv = _drbd_set_state(mdev, ns, f, &
done);
675 drbd_state_unlock(mdev);
677 rv = _drbd_set_state(mdev, ns, f, &
done);
680 spin_unlock_irqrestore(&mdev->
req_lock, flags);
688 if (f & CS_SERIALIZE)
718 dev_err(
DEV,
" %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
725 is_susp(ns) ?
's' :
'r',
738 print_st(mdev,
" state", os);
739 print_st(mdev,
"wanted", ns);
758 fp = mdev->ldev->dc.fencing;
762 if (get_net_conf(mdev)) {
763 if (!mdev->
net_conf->two_primaries &&
875 static const char *msg_table[] = {
909 fp = mdev->ldev->dc.fencing;
955 if (mdev->ed_uuid == mdev->ldev->md.uuid[
UI_CURRENT]) {
1038 if (ns.
disk > disk_max)
1041 if (ns.
disk < disk_min) {
1046 if (ns.
pdsk > pdsk_max)
1049 if (ns.
pdsk < pdsk_min) {
1107 static void drbd_resume_al(
struct drbd_conf *mdev)
1133 ns = sanitize_state(mdev, os, ns, &ssw);
1142 rv = is_valid_state(mdev, ns);
1147 if (is_valid_state(mdev, os) == rv)
1148 rv = is_valid_state_transition(mdev, ns, os);
1150 rv = is_valid_state_transition(mdev, ns, os);
1159 print_sanitize_warnings(mdev, ssw);
1166 pbp +=
sprintf(pbp,
"role( %s -> %s ) ",
1170 pbp +=
sprintf(pbp,
"peer( %s -> %s ) ",
1174 pbp +=
sprintf(pbp,
"conn( %s -> %s ) ",
1178 pbp +=
sprintf(pbp,
"disk( %s -> %s ) ",
1182 pbp +=
sprintf(pbp,
"pdsk( %s -> %s ) ",
1185 if (is_susp(ns) != is_susp(os))
1186 pbp +=
sprintf(pbp,
"susp( %d -> %d ) ",
1190 pbp +=
sprintf(pbp,
"aftr_isp( %d -> %d ) ",
1194 pbp +=
sprintf(pbp,
"peer_isp( %d -> %d ) ",
1198 pbp +=
sprintf(pbp,
"user_isp( %d -> %d ) ",
1239 dev_info(
DEV,
"Online Verify reached sector %llu\n",
1263 set_ov_position(mdev, ns.
conn);
1278 dev_info(
DEV,
"Starting Online Verify from sector %llu\n",
1302 if (mdf != mdev->ldev->md.
flags) {
1307 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[
UI_CURRENT]);
1318 drbd_thread_stop_nowait(&mdev->
receiver);
1322 drbd_thread_stop_nowait(&mdev->
receiver);
1327 drbd_thread_restart_nowait(&mdev->
receiver);
1331 drbd_resume_al(mdev);
1347 ascw->
w.cb = w_after_state_ch;
1349 drbd_queue_work(&mdev->
data.work, &ascw->
w);
1361 after_state_ch(mdev, ascw->
os, ascw->
ns, ascw->
flags);
1371 static void abw_start_sync(
struct drbd_conf *mdev,
int rv)
1374 dev_err(
DEV,
"Writing the bitmap failed not starting resync.\n");
1391 char *why,
enum bm_flag flags)
1431 fp = mdev->ldev->dc.fencing;
1483 _tl_restart(mdev, what);
1518 "send_bitmap (WFBitMapS)",
1531 if (is_susp(mdev->
state)) {
1616 int was_io_error = 0;
1622 eh = mdev->ldev->dc.on_io_error;
1649 "ASSERT FAILED: disk is %s during detach\n",
1673 "ASSERT FAILED: disk is %s while going diskless\n",
1742 drbd_thread_stop_nowait(&mdev->
worker);
1749 static int drbd_thread_setup(
void *
arg)
1753 unsigned long flags;
1774 spin_unlock_irqrestore(&thi->
t_lock, flags);
1782 spin_unlock_irqrestore(&thi->
t_lock, flags);
1805 unsigned long flags;
1808 thi == &mdev->
receiver ?
"receiver" :
1809 thi == &mdev->
asender ?
"asender" :
1810 thi == &mdev->
worker ?
"worker" :
"NONSENSE";
1818 dev_info(
DEV,
"Starting %s thread (from %s [%d])\n",
1823 dev_err(
DEV,
"Failed to get module reference in drbd_thread_start\n");
1824 spin_unlock_irqrestore(&thi->
t_lock, flags);
1828 init_completion(&thi->
stop);
1832 spin_unlock_irqrestore(&thi->
t_lock, flags);
1836 "drbd%d_%s", mdev_to_minor(mdev), me);
1847 spin_unlock_irqrestore(&thi->
t_lock, flags);
1852 dev_info(
DEV,
"Restarting %s thread (from %s [%d])\n",
1858 spin_unlock_irqrestore(&thi->
t_lock, flags);
1868 unsigned long flags;
1876 spin_unlock_irqrestore(&thi->
t_lock, flags);
1884 spin_unlock_irqrestore(&thi->
t_lock, flags);
1890 init_completion(&thi->
stop);
1896 spin_unlock_irqrestore(&thi->
t_lock, flags);
1915 if (cpumask_weight(mdev->
cpu_mask))
1918 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1921 cpumask_set_cpu(cpu, mdev->
cpu_mask);
1949 set_cpus_allowed_ptr(p, mdev->
cpu_mask);
1956 size_t size,
unsigned msg_flags)
1961 ERR_IF(!size)
return false;
1967 sent =
drbd_send(mdev, sock, h, size, msg_flags);
1969 ok = (sent ==
size);
1970 if (!ok && !signal_pending(
current))
1972 cmdname(cmd), (
int)size, sent);
1985 if (use_data_socket) {
1987 sock = mdev->
data.socket;
1990 sock = mdev->
meta.socket;
1998 if (use_data_socket)
2015 if (!drbd_get_data_sock(mdev))
2023 drbd_put_data_sock(mdev);
2036 : apv == 88 ?
sizeof(struct p_rs_param)
2038 : apv <= 94 ? sizeof(struct p_rs_param_89)
2039 : sizeof(struct p_rs_param_95);
2044 mutex_lock(&mdev->data.mutex);
2047 if (likely(sock != NULL)) {
2050 p = &mdev->
data.sbuf.rs_param_95;
2104 dev_err(
DEV,
"--dry-run is not supported by peer");
2133 uuid_flags |= mdev->
net_conf->want_lose ? 1 : 0;
2157 u64 *
uuid = mdev->ldev->md.uuid;
2158 dev_info(
DEV,
"%s %016llX:%016llX:%016llX:%016llX\n",
2166 dev_info(
DEV,
"%s effective data uuid: %016llX\n",
2168 (
unsigned long long)mdev->
ed_uuid);
2202 D_ASSERT(mdev->ldev->backing_bdev);
2203 d_size = drbd_get_max_capacity(mdev->ldev);
2204 u_size = mdev->ldev->dc.disk_size;
2205 q_order_type = drbd_queue_order_type(mdev);
2206 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2244 drbd_state_lock(mdev);
2249 sock = mdev->
data.socket;
2258 drbd_state_unlock(mdev);
2281 sock = mdev->
data.socket;
2320 unsigned long plain_bits;
2359 DCBP_set_start(p, 1);
2364 DCBP_set_start(p, 0);
2370 dev_err(
DEV,
"unexpected zero runlength while encoding bitmap "
2375 bits = vli_encode_bits(&bs, rl);
2379 dev_err(
DEV,
"error while encoding bitmap: %d\n", bits);
2390 if (plain_bits < (len << 3)) {
2394 bm_xfer_ctx_bit_to_word_offset(c);
2401 bm_xfer_ctx_bit_to_word_offset(c);
2404 DCBP_set_pad_bits(p, (8 - bs.
cur.bit) & 0x7);
2416 send_bitmap_rle_or_plain(
struct drbd_conf *mdev,
2420 unsigned long num_words;
2432 sizeof(*p) + len, 0);
2435 c->
bytes[0] +=
sizeof(*p) + len;
2443 len = num_words *
sizeof(
long);
2480 dev_err(
DEV,
"failed to allocate one page buffer in %s\n", __func__);
2486 dev_info(
DEV,
"Writing the whole bitmap, MDF_FullSync was set.\n");
2492 dev_err(
DEV,
"Failed to write bitmap to disk!\n");
2507 err = send_bitmap_rle_or_plain(mdev, p, &c);
2518 if (!drbd_get_data_sock(mdev))
2521 drbd_put_data_sock(mdev);
2595 return _drbd_send_ack(mdev, cmd,
2606 return _drbd_send_ack(mdev, cmd,
2629 void *
digest,
int digest_size,
2645 ok = (
sizeof(
p) ==
drbd_send(mdev, mdev->
data.socket, &p,
sizeof(p), 0));
2646 ok = ok && (digest_size ==
drbd_send(mdev, mdev->
data.socket, digest, digest_size, 0));
2676 drop_it = mdev->
meta.socket == sock
2686 dev_err(
DEV,
"[%s/%d] sock_sendmsg time expired, ko = %u\n",
2718 int sent =
drbd_send(mdev, mdev->
data.socket,
kmap(page) + offset, size, msg_flags);
2722 return sent ==
size;
2725 static int _drbd_send_page(
struct drbd_conf *mdev,
struct page *page,
2726 int offset,
size_t size,
unsigned msg_flags)
2738 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2739 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
2742 drbd_update_congested(mdev);
2745 sent = mdev->
data.socket->ops->sendpage(mdev->
data.socket, page,
2749 if (we_should_drop_the_connection(mdev,
2757 __func__, (
int)size, len, sent);
2772 static int _drbd_send_bio(
struct drbd_conf *mdev,
struct bio *bio)
2774 struct bio_vec *bvec;
2777 bio_for_each_segment(bvec, bio, i) {
2778 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2779 bvec->bv_offset, bvec->bv_len,
2780 i == bio->bi_vcnt -1 ? 0 :
MSG_MORE))
2786 static int _drbd_send_zc_bio(
struct drbd_conf *mdev,
struct bio *bio)
2788 struct bio_vec *bvec;
2791 bio_for_each_segment(bvec, bio, i) {
2792 if (!_drbd_send_page(mdev, bvec->bv_page,
2793 bvec->bv_offset, bvec->bv_len,
2794 i == bio->bi_vcnt -1 ? 0 :
MSG_MORE))
2802 struct page *page = e->
pages;
2803 unsigned len = e->
size;
2807 if (!_drbd_send_page(mdev, page, 0, l,
2808 page_chain_next(page) ?
MSG_MORE : 0))
2815 static u32 bio_flags_to_wire(
struct drbd_conf *mdev,
unsigned long bi_rw)
2823 return bi_rw &
REQ_SYNC ? DP_RW_SYNC : 0;
2837 if (!drbd_get_data_sock(mdev))
2859 dp_flags = bio_flags_to_wire(mdev, req->
master_bio->bi_rw);
2872 ok = dgs ==
drbd_send(mdev, mdev->
data.socket, dgb, dgs, 0);
2889 ok = _drbd_send_zc_bio(mdev, req->
master_bio);
2892 if (dgs > 0 && dgs <= 64) {
2895 unsigned char digest[64];
2899 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2907 drbd_put_data_sock(mdev);
2947 if (!drbd_get_data_sock(mdev))
2954 ok = dgs ==
drbd_send(mdev, mdev->
data.socket, dgb, dgs, 0);
2957 ok = _drbd_send_zc_ee(mdev, e);
2959 drbd_put_data_sock(mdev);
2991 void *
buf,
size_t size,
unsigned msg_flags)
3011 if (sock == mdev->
data.socket) {
3013 drbd_update_congested(mdev);
3027 if (we_should_drop_the_connection(mdev, sock))
3042 }
while (sent < size);
3044 if (sock == mdev->
data.socket)
3050 sock == mdev->
meta.socket ?
"msock" :
"sock",
3063 unsigned long flags;
3074 else if (!allow_oos)
3080 spin_unlock_irqrestore(&mdev->
req_lock, flags);
3086 static int drbd_release(
struct gendisk *gd,
fmode_t mode)
3088 struct drbd_conf *mdev = gd->private_data;
3095 static void drbd_set_defaults(
struct drbd_conf *mdev)
3099 mdev->
sync_conf = (
struct syncer_conf) {
3134 drbd_set_defaults(mdev);
3152 sema_init(&mdev->
data.work.s, 0);
3153 sema_init(&mdev->
meta.work.s, 0);
3165 INIT_LIST_HEAD(&mdev->
sync_ee);
3166 INIT_LIST_HEAD(&mdev->
done_ee);
3167 INIT_LIST_HEAD(&mdev->
read_ee);
3168 INIT_LIST_HEAD(&mdev->
net_ee);
3170 INIT_LIST_HEAD(&mdev->
data.work.q);
3171 INIT_LIST_HEAD(&mdev->
meta.work.q);
3172 INIT_LIST_HEAD(&mdev->resync_work.list);
3173 INIT_LIST_HEAD(&mdev->unplug_work.list);
3174 INIT_LIST_HEAD(&mdev->go_diskless.list);
3175 INIT_LIST_HEAD(&mdev->md_sync_work.list);
3181 mdev->go_diskless.cb = w_go_diskless;
3182 mdev->md_sync_work.cb = w_md_sync;
3220 dev_err(
DEV,
"ASSERT FAILED: receiver t_state == %d expected 0.\n",
3244 drbd_set_my_capacity(mdev, 0);
3266 D_ASSERT(list_empty(&mdev->resync_work.list));
3267 D_ASSERT(list_empty(&mdev->unplug_work.list));
3268 D_ASSERT(list_empty(&mdev->go_diskless.list));
3270 drbd_set_defaults(mdev);
3274 static void drbd_destroy_mempools(
void)
3278 while (drbd_pp_pool) {
3280 drbd_pp_pool = (
struct page *)page_private(page);
3287 if (drbd_md_io_bio_set)
3289 if (drbd_md_io_page_pool)
3291 if (drbd_ee_mempool)
3293 if (drbd_request_mempool)
3297 if (drbd_request_cache)
3299 if (drbd_bm_ext_cache)
3301 if (drbd_al_ext_cache)
3304 drbd_md_io_bio_set =
NULL;
3305 drbd_md_io_page_pool =
NULL;
3306 drbd_ee_mempool =
NULL;
3307 drbd_request_mempool =
NULL;
3308 drbd_ee_cache =
NULL;
3309 drbd_request_cache =
NULL;
3310 drbd_bm_ext_cache =
NULL;
3311 drbd_al_ext_cache =
NULL;
3316 static int drbd_create_mempools(
void)
3323 drbd_request_mempool =
NULL;
3324 drbd_ee_cache =
NULL;
3325 drbd_request_cache =
NULL;
3326 drbd_bm_ext_cache =
NULL;
3327 drbd_al_ext_cache =
NULL;
3328 drbd_pp_pool =
NULL;
3329 drbd_md_io_page_pool =
NULL;
3330 drbd_md_io_bio_set =
NULL;
3335 if (drbd_request_cache ==
NULL)
3340 if (drbd_ee_cache ==
NULL)
3345 if (drbd_bm_ext_cache ==
NULL)
3350 if (drbd_al_ext_cache ==
NULL)
3354 #ifdef COMPAT_HAVE_BIOSET_CREATE
3356 if (drbd_md_io_bio_set ==
NULL)
3361 if (drbd_md_io_page_pool ==
NULL)
3366 if (drbd_request_mempool ==
NULL)
3371 if (drbd_ee_mempool ==
NULL)
3377 for (i = 0; i < number; i++) {
3381 set_page_private(page, (
unsigned long)drbd_pp_pool);
3382 drbd_pp_pool =
page;
3384 drbd_pp_vacant = number;
3389 drbd_destroy_mempools();
3404 .notifier_call = drbd_notify_sys,
3407 static void drbd_release_ee_lists(
struct drbd_conf *mdev)
3413 dev_err(
DEV,
"%d EEs in active list found!\n", rr);
3417 dev_err(
DEV,
"%d EEs in sync list found!\n", rr);
3421 dev_err(
DEV,
"%d EEs in read list found!\n", rr);
3425 dev_err(
DEV,
"%d EEs in done list found!\n", rr);
3429 dev_err(
DEV,
"%d EEs in net list found!\n", rr);
3434 static void drbd_delete_device(
unsigned int minor)
3436 struct drbd_conf *mdev = minor_to_mdev(minor);
3446 __FILE__ , __LINE__);
3466 drbd_release_ee_lists(mdev);
3491 static void drbd_cleanup(
void)
3513 drbd_delete_device(i);
3514 drbd_destroy_mempools();
3531 static int drbd_congested(
void *congested_data,
int bdi_bits)
3533 struct drbd_conf *mdev = congested_data;
3538 if (!may_inc_ap_bio(mdev)) {
3562 q = bdev_get_queue(mdev->ldev->backing_bdev);
3563 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3571 reason = reason ==
'b' ?
'a' :
'n';
3582 struct gendisk *disk;
3590 goto out_no_cpumask;
3592 mdev->
minor = minor;
3600 q->queuedata = mdev;
3611 disk->first_minor = minor;
3612 disk->fops = &drbd_ops;
3613 sprintf(disk->disk_name,
"drbd%d", minor);
3614 disk->private_data = mdev;
3620 q->backing_dev_info.congested_fn = drbd_congested;
3621 q->backing_dev_info.congested_data = mdev;
3634 goto out_no_io_page;
3644 goto out_no_app_reads;
3700 "drbd: never change the size or layout "
3701 "of the HandShake packet.\n");
3707 "drbd: invalid minor_count (%d)\n", minor_count);
3722 "drbd: unable to register block device major %d\n",
3737 minor_table = kzalloc(
sizeof(
struct drbd_conf *)*minor_count,
3742 err = drbd_create_mempools();
3755 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3787 if (mdev->
data.socket) {
3794 if (mdev->
meta.socket) {
3821 mdev->ldev =
NULL;);
3885 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3886 sector = mdev->ldev->md.md_offset;
3896 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->
this_bdev);
3926 dev_err(
DEV,
"Error while reading metadata.\n");
3932 dev_err(
DEV,
"Error while reading metadata, magic not found.\n");
3937 dev_err(
DEV,
"unexpected al_offset: %d (expected %d)\n",
3943 dev_err(
DEV,
"unexpected bm_offset: %d (expected %d)\n",
3949 dev_err(
DEV,
"unexpected md_size: %u (expected %u)\n",
3956 dev_err(
DEV,
"unexpected bm_bytes_per_bit: %u (expected %u)\n",
3998 void drbd_md_mark_dirty_(
struct drbd_conf *mdev,
unsigned int line,
const char *
func)
4002 mdev->last_md_mark_dirty.line = line;
4003 mdev->last_md_mark_dirty.func =
func;
4019 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
4030 drbd_set_ed_uuid(mdev, val);
4033 mdev->ldev->md.uuid[
idx] =
val;
4040 if (mdev->ldev->md.uuid[
idx]) {
4041 drbd_uuid_move_history(mdev);
4057 unsigned long long bm_uuid = mdev->ldev->md.uuid[
UI_BITMAP];
4060 dev_warn(
DEV,
"bm UUID was already set: %llX\n", bm_uuid);
4073 if (mdev->ldev->md.uuid[
UI_BITMAP] == 0 && val == 0)
4077 drbd_uuid_move_history(mdev);
4081 unsigned long long bm_uuid = mdev->ldev->md.uuid[
UI_BITMAP];
4083 dev_warn(
DEV,
"bm UUID was already set: %llX\n", bm_uuid);
4128 drbd_resume_al(mdev);
4147 rv = work->
io_fn(mdev);
4157 work->
done(mdev, rv);
4174 mdev->ldev =
NULL;);
4198 drbd_queue_work(&mdev->
data.work, &mdev->go_diskless);
4224 dev_err(
DEV,
"FIXME going to queue '%s' but '%s' still pending?\n",
4264 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4280 if ((mdev->ldev->md.
flags &
flag) != 0) {
4287 return (bdev->
md.flags & flag) != 0;
4290 static void md_sync_timer_fn(
unsigned long data)
4294 drbd_queue_work_front(&mdev->
data.work, &mdev->md_sync_work);
4299 dev_warn(
DEV,
"md_sync_timer expired! Worker calls drbd_md_sync().\n");
4302 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4308 #ifdef CONFIG_DRBD_FAULT_INJECTION
4311 struct fault_random_state {
4312 unsigned long state;
4313 unsigned long count;
4316 #define FAULT_RANDOM_MULT 39916801
4317 #define FAULT_RANDOM_ADD 479001701
4318 #define FAULT_RANDOM_REFRESH 10000
4324 static unsigned long
4325 _drbd_fault_random(
struct fault_random_state *
rsp)
4329 if (!rsp->count--) {
4331 rsp->state += refresh;
4332 rsp->count = FAULT_RANDOM_REFRESH;
4334 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4339 _drbd_fault_str(
unsigned int type) {
4340 static char *_faults[] = {
4359 static struct fault_random_state rrs = {0, 0};
4361 unsigned int ret = (
4363 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4364 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4371 _drbd_fault_str(type));
4383 static char buildtag[38] =
"\0uilt-in";
4385 if (buildtag[0] == 0) {