26 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/connector.h>
38 #include <asm/unaligned.h>
41 #include <linux/compiler.h>
44 static unsigned short *tl_add_blob(
unsigned short *,
enum drbd_tags,
const void *,
int);
45 static unsigned short *tl_add_str(
unsigned short *,
enum drbd_tags,
const char *);
46 static unsigned short *tl_add_int(
unsigned short *,
enum drbd_tags,
const void *);
49 static char *drbd_m_holder =
"Hands off! this is DRBD's meta data device.";
52 #define NL_PACKET(name, number, fields) \
53 static int name ## _from_tags(struct drbd_conf *mdev, \
54 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
55 static int name ## _from_tags(struct drbd_conf *mdev, \
56 unsigned short *tags, struct name *arg) \
61 while ((tag = get_unaligned(tags++)) != TT_END) { \
62 dlen = get_unaligned(tags++); \
63 switch (tag_number(tag)) { \
66 if (tag & T_MANDATORY) { \
67 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
71 tags = (unsigned short *)((char *)tags + dlen); \
75 #define NL_INTEGER(pn, pr, member) \
77 arg->member = get_unaligned((int *)(tags)); \
79 #define NL_INT64(pn, pr, member) \
81 arg->member = get_unaligned((u64 *)(tags)); \
83 #define NL_BIT(pn, pr, member) \
85 arg->member = *(char *)(tags) ? 1 : 0; \
87 #define NL_STRING(pn, pr, member, len) \
90 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
91 #member, dlen, (unsigned int)len); \
94 arg->member ## _len = dlen; \
95 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
100 #define NL_PACKET(name, number, fields) \
101 static unsigned short* \
102 name ## _to_tags(struct drbd_conf *mdev, \
103 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
104 static unsigned short* \
105 name ## _to_tags(struct drbd_conf *mdev, \
106 struct name *arg, unsigned short *tags) \
112 #define NL_INTEGER(pn, pr, member) \
113 put_unaligned(pn | pr | TT_INTEGER, tags++); \
114 put_unaligned(sizeof(int), tags++); \
115 put_unaligned(arg->member, (int *)tags); \
116 tags = (unsigned short *)((char *)tags+sizeof(int));
117 #define NL_INT64(pn, pr, member) \
118 put_unaligned(pn | pr | TT_INT64, tags++); \
119 put_unaligned(sizeof(u64), tags++); \
120 put_unaligned(arg->member, (u64 *)tags); \
121 tags = (unsigned short *)((char *)tags+sizeof(u64));
122 #define NL_BIT(pn, pr, member) \
123 put_unaligned(pn | pr | TT_BIT, tags++); \
124 put_unaligned(sizeof(char), tags++); \
125 *(char *)tags = arg->member; \
126 tags = (unsigned short *)((char *)tags+sizeof(char));
127 #define NL_STRING(pn, pr, member, len) \
128 put_unaligned(pn | pr | TT_STRING, tags++); \
129 put_unaligned(arg->member ## _len, tags++); \
130 memcpy(tags, arg->member, arg->member ## _len); \
131 tags = (unsigned short *)((char *)tags + arg->member ## _len);
139 char *envp[] = {
"HOME=/",
141 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
146 char mb[12], af[20], ad[60], *afs;
153 snprintf(mb, 12,
"minor-%d", mdev_to_minor(mdev));
155 if (get_net_conf(mdev)) {
159 snprintf(ad, 60,
"DRBD_PEER_ADDRESS=%pI6",
164 snprintf(ad, 60,
"DRBD_PEER_ADDRESS=%pI4",
169 snprintf(ad, 60,
"DRBD_PEER_ADDRESS=%pI4",
172 snprintf(af, 20,
"DRBD_PEER_AF=%s", afs);
187 dev_warn(
DEV,
"helper command: %s %s %s exit code %u (0x%x)\n",
189 (ret >> 8) & 0xff, ret);
191 dev_info(
DEV,
"helper command: %s %s %s exit code %u (0x%x)\n",
193 (ret >> 8) & 0xff, ret);
214 fp = mdev->ldev->dc.fencing;
217 dev_warn(
DEV,
"Not fencing peer, I'm not even Consistent myself.\n");
224 switch ((r>>8) & 0xff) {
226 ex_to_string =
"peer is inconsistent or worse";
230 ex_to_string =
"peer was fenced";
236 ex_to_string =
"peer is unreachable, assumed to be dead";
239 ex_to_string =
"peer unreachable, doing nothing since disk != UpToDate";
246 ex_to_string =
"peer is active";
247 dev_warn(
DEV,
"Peer is primary, outdating myself.\n");
253 dev_err(
DEV,
"fence-peer() = 7 && fencing != Stonith !!!\n");
254 ex_to_string =
"peer was stonithed";
260 dev_err(
DEV,
"fence-peer helper broken, returned %d\n", (r>>8)&0xff);
264 dev_info(
DEV,
"fence-peer helper returned %d (%s)\n",
265 (r>>8) & 0xff, ex_to_string);
277 static int _try_outdate_peer_async(
void *
data)
311 opa =
kthread_run(_try_outdate_peer_async, mdev,
"drbd%d_a_helper", mdev_to_minor(mdev));
313 dev_err(
DEV,
"out of mem, failed to invoke fence-peer helper\n");
319 const int max_tries = 4;
332 val.
i = 0; val.
role = new_role;
334 while (
try++ < max_tries) {
376 dev_warn(
DEV,
"Forced into split brain situation!\n");
406 dev_warn(
DEV,
"Forced to consider local data as UpToDate!\n");
418 if (get_net_conf(mdev)) {
426 && mdev->ldev->md.uuid[
UI_BITMAP] == 0) || forced)
459 mdev = minor_to_mdev(minor);
461 if (!mdev && create) {
462 struct gendisk *disk =
NULL;
480 mdev = minor_to_mdev(minor);
492 if (!primary_from_tags(mdev, nlp->
tag_list, &primary_args)) {
513 static void drbd_md_set_sector_offsets(
struct drbd_conf *mdev,
517 switch (bdev->
dc.meta_dev_idx) {
521 bdev->
md.md_offset = drbd_md_ss__(mdev, bdev);
527 bdev->
md.md_size_sect = drbd_get_capacity(bdev->
md_bdev);
528 bdev->
md.md_offset = 0;
534 bdev->
md.md_offset = drbd_md_ss__(mdev, bdev);
541 md_size_sect =
ALIGN(md_size_sect, 8);
547 bdev->
md.md_size_sect = md_size_sect;
559 static char units[] = {
'K',
'M',
'G',
'T',
'P',
'E' };
561 while (size >= 10000 && base <
sizeof(units)-1) {
563 size = (size >> 10) + !!(size & (1<<9));
566 sprintf(buf,
"%u %cB", (
unsigned)size, units[base]);
587 if (is_susp(mdev->
state))
607 sector_t prev_first_sect, prev_size;
612 int md_moved, la_size_changed;
629 prev_first_sect = drbd_md_first_sector(mdev->ldev);
630 prev_size = mdev->ldev->md.md_size_sect;
631 la_size = mdev->ldev->md.la_size_sect;
634 drbd_md_set_sector_offsets(mdev, mdev->ldev);
638 if (drbd_get_capacity(mdev->
this_bdev) != size ||
647 "Could not allocate bitmap!\n");
650 "Leaving size unchanged at size = %lu KB\n",
651 (
unsigned long)size);
656 drbd_set_my_capacity(mdev, size);
657 mdev->ldev->md.la_size_sect =
size;
659 (
unsigned long long)size>>1);
664 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
666 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
667 || prev_size != mdev->ldev->md.md_size_sect;
669 if (la_size_changed || md_moved) {
674 la_size_changed && md_moved ?
"size changed and md moved" :
675 la_size_changed ?
"size changed" :
"md moved");
707 m_size = drbd_get_max_capacity(bdev);
710 dev_warn(
DEV,
"Resize while not connected was forced by the user!\n");
714 if (p_size && m_size) {
719 if (m_size && m_size < size)
721 if (p_size && p_size < size)
736 dev_err(
DEV,
"Requested disk size is too big (%lu > %lu)\n",
737 (
unsigned long)u_size>>1, (
unsigned long)size>>1);
753 static int drbd_check_al_size(
struct drbd_conf *mdev)
761 mdev->sync_conf.al_extents = 127;
764 mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
773 dev_err(
DEV,
"Cannot allocate act_log lru!\n");
788 spin_unlock_irq(&mdev->
al_lock);
801 static void drbd_setup_queue_param(
struct drbd_conf *mdev,
unsigned int max_bio_size)
804 unsigned int max_hw_sectors = max_bio_size >> 9;
805 unsigned int max_segments = 0;
808 struct request_queue *
const b = mdev->ldev->backing_bdev->bd_disk->queue;
810 max_hw_sectors =
min(queue_max_hw_sectors(b), max_bio_size >> 9);
811 max_segments = mdev->ldev->dc.max_bio_bvecs;
822 struct request_queue *
const b = mdev->ldev->backing_bdev->bd_disk->queue;
826 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
827 dev_info(
DEV,
"Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
828 q->backing_dev_info.ra_pages,
829 b->backing_dev_info.ra_pages);
830 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
838 unsigned int now,
new, local, peer;
840 now = queue_max_hw_sectors(mdev->
rq_queue) << 9;
845 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
864 new =
min(local, peer);
867 dev_err(
DEV,
"ASSERT FAILED new < now; (%u < %u)\n",
new, now);
872 drbd_setup_queue_param(mdev,
new);
882 static void drbd_reconfig_start(
struct drbd_conf *mdev)
893 static void drbd_reconfig_done(
struct drbd_conf *mdev)
900 drbd_thread_stop_nowait(&mdev->
worker);
908 static void drbd_suspend_al(
struct drbd_conf *mdev)
912 if (lc_try_lock(mdev->
act_log)) {
916 dev_warn(
DEV,
"Failed to lock al in drbd_suspend_al()\n");
944 int cp_discovered = 0;
945 int logical_block_size;
947 drbd_reconfig_start(mdev);
980 if (!disk_conf_from_tags(mdev, nlp->
tag_list, &nbc->
dc)) {
990 if (get_net_conf(mdev)) {
991 int prot = mdev->
net_conf->wire_protocol;
1002 dev_err(
DEV,
"open(\"%s\") failed with %ld\n", nbc->
dc.backing_dev,
1019 (nbc->
dc.meta_dev_idx < 0) ?
1020 (
void *)mdev : (
void *)drbd_m_holder);
1022 dev_err(
DEV,
"open(\"%s\") failed with %ld\n", nbc->
dc.meta_dev,
1045 drbd_md_set_sector_offsets(mdev, nbc);
1047 if (drbd_get_max_capacity(nbc) < nbc->
dc.disk_size) {
1048 dev_err(
DEV,
"max capacity %llu smaller than disk size %llu\n",
1049 (
unsigned long long) drbd_get_max_capacity(nbc),
1050 (
unsigned long long) nbc->
dc.disk_size);
1055 if (nbc->
dc.meta_dev_idx < 0) {
1058 min_md_device_sectors = (2<<10);
1064 if (drbd_get_capacity(nbc->
md_bdev) < min_md_device_sectors) {
1066 dev_warn(
DEV,
"refusing attach: md-device too small, "
1067 "at least %llu sectors needed for this meta-disk type\n",
1068 (
unsigned long long) min_md_device_sectors);
1074 if (drbd_get_max_capacity(nbc) <
1082 if (nbc->
known_size > max_possible_sectors) {
1083 dev_warn(
DEV,
"==> truncating very big lower level device "
1084 "to currently maximum possible %llu sectors <==\n",
1085 (
unsigned long long) max_possible_sectors);
1086 if (nbc->
dc.meta_dev_idx >= 0)
1088 "meta data may help <<==\n");
1104 goto force_diskless;
1106 drbd_md_set_sector_offsets(mdev, nbc);
1109 logical_block_size = bdev_logical_block_size(nbc->
md_bdev);
1110 if (logical_block_size == 0)
1117 goto force_diskless_dec;
1119 dev_warn(
DEV,
"Meta data's bdev logical_block_size = %d != %d\n",
1121 dev_warn(
DEV,
"Workaround engaged (has performance impact).\n");
1130 goto force_diskless_dec;
1136 goto force_diskless_dec;
1141 dev_err(
DEV,
"Can only attach to data with current UUID=%016llX\n",
1142 (
unsigned long long)mdev->
ed_uuid);
1144 goto force_diskless_dec;
1148 if (drbd_check_al_size(mdev)) {
1150 goto force_diskless_dec;
1156 dev_warn(
DEV,
"refusing to truncate a consistent device\n");
1158 goto force_diskless_dec;
1163 goto force_diskless_dec;
1168 if (nbc->
dc.no_md_flush)
1179 mdev->
resync = resync_lru;
1227 goto force_diskless_dec;
1228 }
else if (dd ==
grew)
1232 dev_info(
DEV,
"Assuming that all blocks are out of sync "
1233 "(aka FullSync)\n");
1237 goto force_diskless_dec;
1243 goto force_diskless_dec;
1247 if (cp_discovered) {
1252 goto force_diskless_dec;
1257 drbd_suspend_al(mdev);
1307 goto force_diskless_dec;
1320 drbd_reconfig_done(mdev);
1341 drbd_reconfig_done(mdev);
1355 struct detach dt = {};
1357 if (!detach_from_tags(mdev, nlp->
tag_list, &dt)) {
1362 if (dt.detach_force) {
1371 retcode = drbd_request_state(mdev,
NS(disk,
D_FAILED));
1392 struct net_conf *new_conf =
NULL;
1403 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1405 drbd_reconfig_start(mdev);
1413 new_conf = kzalloc(
sizeof(
struct net_conf),
GFP_KERNEL);
1431 new_conf->want_lose = 0;
1432 new_conf->two_primaries = 0;
1439 if (!net_conf_from_tags(mdev, nlp->
tag_list, new_conf)) {
1444 if (new_conf->two_primaries
1471 new_my_addr = (
struct sockaddr *)&new_conf->my_addr;
1472 new_peer_addr = (
struct sockaddr *)&new_conf->peer_addr;
1474 odev = minor_to_mdev(i);
1475 if (!odev || odev == mdev)
1477 if (get_net_conf(odev)) {
1479 if (new_conf->my_addr_len == odev->
net_conf->my_addr_len &&
1480 !
memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1484 if (new_conf->peer_addr_len == odev->
net_conf->peer_addr_len &&
1485 !
memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1494 if (new_conf->cram_hmac_alg[0] != 0) {
1496 new_conf->cram_hmac_alg);
1504 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
1510 if (new_conf->integrity_alg[0]) {
1511 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0,
CRYPTO_ALG_ASYNC);
1512 if (IS_ERR(integrity_w_tfm)) {
1513 integrity_w_tfm =
NULL;
1518 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1523 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0,
CRYPTO_ALG_ASYNC);
1524 if (IS_ERR(integrity_r_tfm)) {
1525 integrity_r_tfm =
NULL;
1531 ns = new_conf->max_epoch_size/8;
1533 new_tl_hash = kzalloc(ns*
sizeof(
void *),
GFP_KERNEL);
1540 ns = new_conf->max_buffers/8;
1541 if (new_conf->two_primaries && (mdev->
ee_hash_s != ns)) {
1542 new_ee_hash = kzalloc(ns*
sizeof(
void *),
GFP_KERNEL);
1551 if (integrity_w_tfm) {
1552 i = crypto_hash_digestsize(integrity_w_tfm);
1621 drbd_reconfig_done(mdev);
1628 crypto_free_hash(tfm);
1629 crypto_free_hash(integrity_w_tfm);
1630 crypto_free_hash(integrity_r_tfm);
1636 drbd_reconfig_done(mdev);
1647 if (!disconnect_from_tags(mdev, nlp->
tag_list, &
dc)) {
1704 dev_info(
DEV,
"Resync of new storage after online grow\n");
1724 memset(&rs, 0,
sizeof(
struct resize));
1725 if (!resize_from_tags(mdev, nlp->
tag_list, &rs)) {
1751 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
1752 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
1754 mdev->ldev->dc.disk_size = (
sector_t)
rs.resize_size;
1790 struct syncer_conf
sc;
1792 int *rs_plan_s =
NULL;
1795 if (!zalloc_cpumask_var(&new_cpu_mask,
GFP_KERNEL)) {
1801 memset(&
sc, 0,
sizeof(
struct syncer_conf));
1814 if (!syncer_conf_from_tags(mdev, nlp->
tag_list, &
sc)) {
1830 if (!rsr &&
sc.csums_alg[0]) {
1832 if (IS_ERR(csums_tfm)) {
1838 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1854 if (!ovr &&
sc.verify_alg[0]) {
1856 if (IS_ERR(verify_tfm)) {
1862 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1869 if (nr_cpu_ids > 1 &&
sc.cpu_mask[0] != 0) {
1870 err = bitmap_parse(
sc.cpu_mask, 32,
1873 dev_warn(
DEV,
"bitmap_parse() failed with %d\n", err);
1880 ERR_IF (
sc.al_extents < 7)
sc.al_extents = 127;
1881 #define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
1882 if (
sc.al_extents > AL_MAX) {
1883 dev_err(
DEV,
"sc.al_extents > %d\n", AL_MAX);
1892 ensure_mdev(
sc.after, 1);
1902 if (fifo_size != mdev->
rs_plan_s.size && fifo_size > 0) {
1903 rs_plan_s = kzalloc(
sizeof(
int) * fifo_size,
GFP_KERNEL);
1905 dev_err(
DEV,
"kmalloc of fifo_buffer failed");
1928 if (fifo_size != mdev->
rs_plan_s.size) {
1941 err = drbd_check_al_size(mdev);
1957 if (!cpumask_equal(mdev->
cpu_mask, new_cpu_mask)) {
1958 cpumask_copy(mdev->
cpu_mask, new_cpu_mask);
1961 mdev->
asender.reset_cpu_mask = 1;
1962 mdev->
worker.reset_cpu_mask = 1;
1968 free_cpumask_var(new_cpu_mask);
1969 crypto_free_hash(csums_tfm);
1970 crypto_free_hash(verify_tfm);
2009 static int drbd_bmio_set_susp_al(
struct drbd_conf *mdev)
2014 drbd_suspend_al(mdev);
2039 "set_n_write from invalidate_peer",
2127 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
2131 if (get_net_conf(mdev)) {
2132 tl = net_conf_to_tags(mdev, mdev->
net_conf, tl);
2135 tl = syncer_conf_to_tags(mdev, &mdev->
sync_conf, tl);
2139 return (
int)((
char *)tl - (
char *)reply->
tag_list);
2145 unsigned short *tl = reply->
tag_list;
2147 unsigned long rs_left;
2150 tl = get_state_to_tags(mdev, (
struct get_state *)&s, tl);
2155 drbd_get_syncer_progress(mdev, &rs_left, &res);
2156 tl = tl_add_int(tl, T_sync_progress, &res);
2162 return (
int)((
char *)tl - (
char *)reply->
tag_list);
2173 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid,
UI_SIZE*
sizeof(
u64));
2174 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.
flags);
2179 return (
int)((
char *)tl - (
char *)reply->
tag_list);
2199 tl = tl_add_blob(tl, T_use_degraded, &rv,
sizeof(rv));
2202 return (
int)((
char *)tl - (
char *)reply->
tag_list);
2209 struct start_ov
args =
2212 if (!start_ov_from_tags(mdev, nlp->
tag_list, &args)) {
2234 int skip_initial_sync = 0;
2237 struct new_c_uuid args;
2239 memset(&args, 0,
sizeof(
struct new_c_uuid));
2240 if (!new_c_uuid_from_tags(mdev, nlp->
tag_list, &args)) {
2255 dev_info(
DEV,
"Preparing to skip initial sync\n");
2256 skip_initial_sync = 1;
2265 if (args.clear_bm) {
2269 dev_err(
DEV,
"Writing bitmap failed with %d\n",err);
2272 if (skip_initial_sync) {
2301 [ P_primary ] = { &drbd_nl_primary, 0 },
2302 [ P_secondary ] = { &drbd_nl_secondary, 0 },
2303 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
2304 [ P_detach ] = { &drbd_nl_detach, 0 },
2305 [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
2306 [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
2307 [ P_resize ] = { &drbd_nl_resize, 0 },
2308 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
2309 [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
2310 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
2311 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
2312 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
2313 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
2314 [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
2315 [ P_outdate ] = { &drbd_nl_outdate, 0 },
2316 [ P_get_config ] = { &drbd_nl_get_config,
2317 sizeof(
struct syncer_conf_tag_len_struct) +
2320 [ P_get_state ] = { &drbd_nl_get_state,
2321 sizeof(
struct get_state_tag_len_struct) +
2322 sizeof(
struct sync_progress_tag_len_struct) },
2323 [ P_get_uuids ] = { &drbd_nl_get_uuids,
2324 sizeof(
struct get_uuids_tag_len_struct) },
2325 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
2326 sizeof(
struct get_timeout_flag_tag_len_struct)},
2327 [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
2328 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
2339 int reply_size =
sizeof(
struct cn_msg)
2369 if (cm->function ==
NULL) {
2374 reply_size += cm->reply_body_size;
2385 cm->reply_body_size ? nlp->
packet_type : P_return_code_only;
2390 rr = cm->function(mdev, nlp, reply);
2392 cn_reply->id = req->
id;
2393 cn_reply->seq = req->
seq;
2394 cn_reply->ack = req->
ack + 1;
2396 cn_reply->flags = 0;
2399 if (rr && rr != -
ESRCH)
2412 static unsigned short *
2413 __tl_add_blob(
unsigned short *tl,
enum drbd_tags tag,
const void *data,
2414 unsigned short len,
int nul_terminated)
2416 unsigned short l = tag_descriptions[
tag_number(tag)].max_len;
2417 len = (len <
l) ? len : l;
2421 tl = (
unsigned short*)((
char*)tl + len);
2423 *((
char*)tl - 1) = 0;
2427 static unsigned short *
2428 tl_add_blob(
unsigned short *tl,
enum drbd_tags tag,
const void *data,
int len)
2430 return __tl_add_blob(tl, tag, data, len, 0);
2433 static unsigned short *
2434 tl_add_str(
unsigned short *tl,
enum drbd_tags tag,
const char *
str)
2436 return __tl_add_blob(tl, tag, str,
strlen(str)+1, 0);
2439 static unsigned short *
2440 tl_add_int(
unsigned short *tl,
enum drbd_tags tag,
const void *
val)
2447 tl = (
unsigned short*)((
char*)tl+
sizeof(
int));
2452 tl = (
unsigned short*)((
char*)tl+
sizeof(
u64));
2464 sizeof(struct drbd_nl_cfg_reply)+
2465 sizeof(struct get_state_tag_len_struct)+
2468 struct drbd_nl_cfg_reply *reply =
2469 (
struct drbd_nl_cfg_reply *)cn_reply->
data;
2470 unsigned short *tl = reply->
tag_list;
2474 tl = get_state_to_tags(mdev, (
struct get_state *)&
state, tl);
2483 cn_reply->
len =
sizeof(
struct drbd_nl_cfg_reply) +
2484 (int)((char *)tl - (
char *)reply->
tag_list);
2485 cn_reply->
flags = 0;
2488 reply->
minor = mdev_to_minor(mdev);
2497 sizeof(struct drbd_nl_cfg_reply)+
2498 sizeof(struct call_helper_tag_len_struct)+
2501 struct drbd_nl_cfg_reply *reply =
2502 (
struct drbd_nl_cfg_reply *)cn_reply->
data;
2503 unsigned short *tl = reply->
tag_list;
2507 tl = tl_add_str(tl, T_helper, helper_name);
2515 cn_reply->
len =
sizeof(
struct drbd_nl_cfg_reply) +
2516 (int)((char *)tl - (
char *)reply->
tag_list);
2517 cn_reply->
flags = 0;
2520 reply->
minor = mdev_to_minor(mdev);
2527 const char *
reason,
const int dgs,
2528 const char* seen_hash,
const char* calc_hash,
2539 if (!reason || !reason[0])
2551 sizeof(
struct dump_ee_tag_len_struct)+
2556 dev_err(
DEV,
"could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
2564 tl = tl_add_str(tl, T_dump_ee_reason, reason);
2565 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
2566 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
2567 tl = tl_add_int(tl, T_ee_sector, &e->
sector);
2568 tl = tl_add_int(tl, T_ee_block_id, &e->
block_id);
2571 len =
min_t(
unsigned, e->
size, 32 << 10);
2581 tl = (
unsigned short*)((
char*)tl +
l);
2594 (int)((char*)tl - (
char*)reply->
tag_list);
2595 cn_reply->
flags = 0;
2598 reply->
minor = mdev_to_minor(mdev);
2608 sizeof(struct drbd_nl_cfg_reply)+
2609 sizeof(struct sync_progress_tag_len_struct)+
2612 struct drbd_nl_cfg_reply *reply =
2613 (
struct drbd_nl_cfg_reply *)cn_reply->
data;
2614 unsigned short *tl = reply->
tag_list;
2615 unsigned long rs_left;
2621 drbd_get_syncer_progress(mdev, &rs_left, &res);
2624 tl = tl_add_int(tl, T_sync_progress, &res);
2632 cn_reply->
len =
sizeof(
struct drbd_nl_cfg_reply) +
2633 (int)((char *)tl - (
char *)reply->
tag_list);
2634 cn_reply->
flags = 0;
2637 reply->
minor = mdev_to_minor(mdev);
2645 static struct cb_id cn_id_drbd;
2651 err =
cn_add_callback(&cn_id_drbd,
"cn_drbd", &drbd_connector_callback);
2667 static struct cb_id cn_id_drbd;
2677 char buffer[
sizeof(
struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
2679 struct drbd_nl_cfg_reply *reply =
2680 (
struct drbd_nl_cfg_reply *)cn_reply->
data;
2684 cn_reply->
id = req->
id;
2686 cn_reply->
seq = req->
seq;
2687 cn_reply->
ack = req->
ack + 1;
2688 cn_reply->
len =
sizeof(
struct drbd_nl_cfg_reply);
2689 cn_reply->
flags = 0;
2696 if (rr && rr != -
ESRCH)