26 #include <linux/module.h>
28 #include <linux/slab.h>
37 const int rw = bio_data_dir(bio);
39 cpu = part_stat_lock();
41 part_stat_inc(cpu, &mdev->
vdisk->part0, ios[rw]);
42 part_stat_add(cpu, &mdev->
vdisk->part0,
sectors[rw], bio_sectors(bio));
43 part_inc_in_flight(&mdev->
vdisk->part0, rw);
53 cpu = part_stat_lock();
54 part_stat_add(cpu, &mdev->
vdisk->part0,
ticks[rw], duration);
56 part_dec_in_flight(&mdev->
vdisk->part0, rw);
83 if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s &
RQ_NET_SIS))
102 dev_warn(
DEV,
"Should have called drbd_al_complete_io(, %llu), "
103 "but my Disk seems to have failed :(\n",
104 (
unsigned long long) req->
sector);
112 static void queue_barrier(
struct drbd_conf *mdev)
130 inc_ap_pending(mdev);
131 drbd_queue_work(&mdev->
data.work, &b->
w);
135 static void _about_to_complete_local_write(
struct drbd_conf *mdev,
138 const unsigned long s = req->
rq_state;
165 #define OVERLAPS overlaps(sector, size, i->sector, i->size)
166 slot = tl_hash_slot(mdev, sector);
169 dev_alert(
DEV,
"LOGIC BUG: completed: %p %llus +%u; "
170 "other: %p %llus +%u\n",
171 req, (
unsigned long long)sector, size,
189 #define OVERLAPS overlaps(sector, size, e->sector, e->size)
190 slot = ee_hash_slot(mdev, req->
sector);
216 const unsigned long s = req->
rq_state;
270 _about_to_complete_local_write(mdev, req);
273 _drbd_end_io_acct(mdev, req);
275 m->
error = ok ? 0 : (error ?: -
EIO);
288 _req_is_done(mdev, req, rw);
298 if (!is_susp(mdev->
state))
329 const int size = req->
size;
337 if (!get_net_conf(mdev))
342 goto out_no_conflict;
345 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
346 slot = tl_hash_slot(mdev, sector);
349 dev_alert(
DEV,
"%s[%u] Concurrent local write detected! "
350 "[DISCARD L] new: %llus +%u; "
351 "pending: %llus +%u\n",
353 (
unsigned long long)sector, size,
363 #define OVERLAPS overlaps(e->sector, e->size, sector, size)
364 slot = ee_hash_slot(mdev, sector);
367 dev_alert(
DEV,
"%s[%u] Concurrent remote write detected!"
368 " [DISCARD L] new: %llus +%u; "
369 "pending: %llus +%u\n",
371 (
unsigned long long)sector, size,
411 dev_err(
DEV,
"LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
425 inc_ap_pending(mdev);
443 _req_may_be_done_not_susp(req, m);
449 _req_may_be_done_not_susp(req, m);
451 goto goto_queue_for_net_read;
459 _req_may_be_done_not_susp(req, m);
466 _req_may_be_done_not_susp(req, m);
482 goto_queue_for_net_read:
489 _req_may_be_done_not_susp(req, m);
495 inc_ap_pending(mdev);
517 drbd_queue_work(&mdev->
data.work, &req->
w);
559 drbd_queue_work(&mdev->
data.work, &req->
w);
570 drbd_queue_work(&mdev->
data.work, &req->
w);
581 _req_may_be_done_not_susp(req, m);
604 _req_may_be_done_not_susp(req, m);
612 _req_may_be_done_not_susp(req, m);
636 dev_alert(
DEV,
"Got DiscardAck packet %llus +%u!"
637 " DRBD is not a random data generator!\n",
659 _req_may_be_done_not_susp(req, m);
671 _req_may_be_done_not_susp(req, m);
694 drbd_queue_work(&mdev->
data.work, &req->
w);
710 drbd_queue_work(&mdev->
data.work, &req->
w);
725 dev_err(
DEV,
"FIXME (barrier_acked but pending)\n");
741 _req_may_be_done_not_susp(req, m);
755 static int drbd_may_do_local_read(
struct drbd_conf *mdev,
sector_t sector,
int size)
757 unsigned long sbnr, ebnr;
767 nr_sectors = drbd_get_capacity(mdev->
this_bdev);
768 esector = sector + (size >> 9) - 1;
779 static void maybe_pull_ahead(
struct drbd_conf *mdev)
793 dev_info(
DEV,
"Congestion-fill threshold reached\n");
798 dev_info(
DEV,
"Congestion-extents threshold reached\n");
806 _drbd_set_state(
_NS(mdev, conn,
C_AHEAD), 0, NULL);
813 static int drbd_make_request_common(
struct drbd_conf *mdev,
struct bio *bio,
unsigned long start_time)
815 const int rw = bio_rw(bio);
816 const int size = bio->bi_size;
817 const sector_t sector = bio->bi_sector;
820 int local, remote, send_oos = 0;
826 req = drbd_req_new(mdev, bio);
840 req->private_bio =
NULL;
855 if (!drbd_may_do_local_read(mdev, sector, size)) {
863 req->private_bio =
NULL;
880 goto fail_and_free_req;
890 if (rw ==
WRITE && local && size
897 remote = remote && drbd_should_do_remote(s);
898 send_oos = rw ==
WRITE && drbd_should_send_oos(s);
901 if (!(local || remote) && !is_susp(mdev->
state)) {
903 dev_err(
DEV,
"IO ERROR: neither local nor remote disk\n");
904 goto fail_free_complete;
913 if (rw ==
WRITE && (remote || send_oos) &&
921 goto fail_free_complete;
928 if (is_susp(mdev->
state)) {
935 goto fail_free_complete;
938 if (remote || send_oos) {
939 remote = drbd_should_do_remote(mdev->
state);
940 send_oos = rw ==
WRITE && drbd_should_send_oos(mdev->
state);
943 if (!(remote || send_oos))
944 dev_warn(
DEV,
"lost connection while grabbing the req_lock!\n");
945 if (!(local || remote)) {
946 dev_err(
DEV,
"IO ERROR: neither local nor remote disk\n");
948 goto fail_free_complete;
956 if (rw ==
WRITE && (remote || send_oos) &&
962 goto allocate_barrier;
967 _drbd_start_io_acct(mdev, req, bio);
1011 if (rw ==
WRITE && _req_conflicts(req))
1012 goto fail_conflicting;
1026 _req_mod(req, (rw ==
WRITE)
1035 maybe_pull_ahead(mdev);
1042 && drbd_should_do_remote(mdev->
state))
1043 queue_barrier(mdev);
1049 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
1077 _drbd_end_io_acct(mdev, req);
1091 req->private_bio =
NULL;
1109 static int drbd_fail_request_early(
struct drbd_conf *mdev,
int is_write)
1115 "since we are not in Primary state, "
1116 "we cannot allow this\n",
1118 is_write ?
"WRITE" :
"READ");
1128 unsigned int s_enr, e_enr;
1130 unsigned long start_time;
1132 if (drbd_fail_request_early(mdev, bio_data_dir(bio) &
WRITE)) {
1142 D_ASSERT((bio->bi_size & 0x1ff) == 0);
1146 s_enr = bio->bi_sector >>
HT_SHIFT;
1147 e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >>
HT_SHIFT : s_enr;
1149 if (
likely(s_enr == e_enr)) {
1151 inc_ap_bio(mdev, 1);
1152 }
while (drbd_make_request_common(mdev, bio, start_time));
1158 if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size >
DRBD_MAX_BIO_SIZE) {
1160 dev_err(
DEV,
"bio would need to, but cannot, be split: "
1161 "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n",
1162 bio->bi_vcnt, bio->bi_idx, bio->bi_size,
1163 (
unsigned long long)bio->bi_sector);
1167 struct bio_pair *bp;
1178 const int mask = sps - 1;
1187 inc_ap_bio(mdev, 3);
1191 while (drbd_make_request_common(mdev, &bp->bio1, start_time))
1192 inc_ap_bio(mdev, 1);
1194 while (drbd_make_request_common(mdev, &bp->bio2, start_time))
1195 inc_ap_bio(mdev, 1);
1219 unsigned int bio_offset =
1220 (
unsigned int)bvm->bi_sector << 9;
1221 unsigned int bio_size = bvm->bi_size;
1222 int limit, backing_limit;
1228 if (bio_size == 0) {
1229 if (limit <= bvec->bv_len)
1230 limit = bvec->bv_len;
1231 }
else if (limit &&
get_ldev(mdev)) {
1233 mdev->ldev->backing_bdev->bd_disk->queue;
1234 if (b->merge_bvec_fn) {
1235 backing_limit = b->merge_bvec_fn(b, bvm, bvec);
1236 limit =
min(limit, backing_limit);
1248 unsigned long ent = 0, dt = 0,
et, nt;
1251 if (get_net_conf(mdev)) {
1258 dt = mdev->ldev->dc.disk_timeout *
HZ / 10;
1270 if (list_empty(le)) {
1298 dev_warn(
DEV,
"Remote failed to finish a request within ko-count * timeout\n");
1304 dev_warn(
DEV,
"Local backing device failed to meet the disk-timeout\n");