23 #include <linux/module.h>
24 #include <linux/slab.h>
29 static int raid0_congested(
void *
data,
int bits)
40 for (i = 0; i < raid_disks && !
ret ; i++) {
43 ret |= bdi_congested(&q->backing_dev_info, bits);
51 static void dump_zones(
struct mddev *mddev)
70 zone_size = conf->
strip_zone[
j].zone_end - zone_start;
72 "device-offset=%10lluKB, size=%10lluKB\n",
73 (
unsigned long long)zone_start>>1,
74 (
unsigned long long)conf->
strip_zone[j].dev_start>>1,
75 (
unsigned long long)zone_size>>1);
81 static int create_strip_zones(
struct mddev *mddev,
struct r0conf **private_conf)
91 bool discard_supported =
false;
96 pr_debug(
"md/raid0:%s: looking at %s\n",
107 pr_debug(
"md/raid0:%s: comparing %s(%llu)"
111 (
unsigned long long)rdev1->
sectors,
113 (
unsigned long long)rdev2->
sectors);
114 if (rdev2 == rdev1) {
129 pr_debug(
"md/raid0:%s: NOT EQUAL\n",
133 pr_debug(
"md/raid0:%s: ==> UNIQUE\n",
140 pr_debug(
"md/raid0:%s: FINAL %d zones\n",
164 if (mddev->
level == 10) {
170 if (mddev->
level == 1) {
180 "aborting!\n", mdname(mddev), j);
185 "aborting!\n", mdname(mddev), j);
193 if (rdev1->
bdev->bd_disk->queue->merge_bvec_fn)
200 if (blk_queue_discard(bdev_get_queue(rdev1->
bdev)))
201 discard_supported =
true;
205 "aborting!\n", mdname(mddev), cnt, mddev->
raid_disks);
221 pr_debug(
"md/raid0:%s: zone %d\n", mdname(mddev), i);
226 for (j=0; j<
cnt; j++) {
229 pr_debug(
"md/raid0:%s: checking %s ... nope\n",
234 pr_debug(
"md/raid0:%s: checking %s ..."
235 " contained as device %d\n",
242 pr_debug(
"md/raid0:%s: (%llu) is smallest!.\n",
244 (
unsigned long long)rdev->
sectors);
250 pr_debug(
"md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
252 zone->
nb_dev, (
unsigned long long)sectors);
257 pr_debug(
"md/raid0:%s: current zone start: %llu\n",
259 (
unsigned long long)smallest->
sectors);
261 mddev->
queue->backing_dev_info.congested_fn = raid0_congested;
262 mddev->
queue->backing_dev_info.congested_data = mddev;
279 if (!discard_supported)
280 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->
queue);
282 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->
queue);
284 pr_debug(
"md/raid0:%s: done.\n", mdname(mddev));
285 *private_conf = conf;
292 *private_conf =
NULL;
309 *sectorp = sector - z[i-1].
zone_end;
322 unsigned int sect_in_chunk;
329 int chunksect_bits =
ffz(~chunk_sects);
331 sect_in_chunk = sector & (chunk_sects - 1);
332 sector >>= chunksect_bits;
334 chunk = *sector_offset;
338 sect_in_chunk =
sector_div(sector, chunk_sects);
339 chunk = *sector_offset;
347 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
361 struct bvec_merge_data *bvm,
362 struct bio_vec *biovec)
364 struct mddev *mddev = q->queuedata;
366 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
370 unsigned int bio_sectors = bvm->bi_size >> 9;
376 max = (chunk_sectors - ((sector & (chunk_sectors-1))
377 + bio_sectors)) << 9;
379 max = (chunk_sectors - (
sector_div(sector, chunk_sectors)
380 + bio_sectors)) << 9;
383 if (max <= biovec->bv_len && bio_sectors == 0)
384 return biovec->bv_len;
385 if (max < biovec->bv_len)
392 sector = sector_offset;
393 zone = find_zone(mddev->
private, §or_offset);
394 rdev = map_sector(mddev, zone, sector, §or_offset);
395 subq = bdev_get_queue(rdev->
bdev);
396 if (subq->merge_bvec_fn) {
397 bvm->bi_bdev = rdev->
bdev;
398 bvm->bi_sector = sector_offset + zone->
dev_start +
400 return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
405 static sector_t raid0_size(
struct mddev *mddev,
sector_t sectors,
int raid_disks)
411 "%s does not support generic reshape\n", __func__);
414 array_sectors += rdev->sectors;
419 static
int raid0_stop(
struct mddev *mddev);
421 static
int raid0_run(
struct mddev *mddev)
426 if (mddev->chunk_sectors == 0) {
438 if (mddev->private ==
NULL) {
439 ret = create_strip_zones(mddev, &conf);
442 mddev->private = conf;
444 conf = mddev->private;
451 (
unsigned long long)mddev->array_sectors);
462 int stripe = mddev->raid_disks *
464 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
465 mddev->queue->backing_dev_info.ra_pages = 2*
stripe;
478 static int raid0_stop(
struct mddev *mddev)
493 static inline int is_io_in_chunk_boundary(
struct mddev *mddev,
494 unsigned int chunk_sects,
struct bio *bio)
497 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
498 + (bio->bi_size >> 9));
501 return chunk_sects >= (
sector_div(sector, chunk_sects)
502 + (bio->bi_size >> 9));
506 static void raid0_make_request(
struct mddev *mddev,
struct bio *bio)
508 unsigned int chunk_sects;
519 if (
unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
523 if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
530 bp =
bio_split(bio, chunk_sects - (sector &
535 raid0_make_request(mddev, &bp->bio1);
536 raid0_make_request(mddev, &bp->bio2);
541 sector_offset = bio->bi_sector;
542 zone = find_zone(mddev->
private, §or_offset);
543 tmp_dev = map_sector(mddev, zone, bio->bi_sector,
545 bio->bi_bdev = tmp_dev->
bdev;
546 bio->bi_sector = sector_offset + zone->
dev_start +
550 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
560 printk(
"md/raid0:%s: make_request bug: can't convert block across chunks"
561 " or bigger than %dk %llu %d\n",
562 mdname(mddev), chunk_sects / 2,
563 (
unsigned long long)bio->bi_sector, bio->bi_size >> 10);
569 static void raid0_status(
struct seq_file *seq,
struct mddev *mddev)
575 static void *raid0_takeover_raid45(
struct mddev *mddev)
581 printk(
KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
590 printk(
KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
605 create_strip_zones(mddev, &priv_conf);
609 static void *raid0_takeover_raid10(
struct mddev *mddev)
619 if (mddev->
layout != ((1 << 8) + 2)) {
620 printk(
KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
626 printk(
KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
631 printk(
KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
646 create_strip_zones(mddev, &priv_conf);
650 static void *raid0_takeover_raid1(
struct mddev *mddev)
659 printk(
KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
671 while (chunksect && (mddev->
array_sectors & (chunksect - 1)))
688 create_strip_zones(mddev, &priv_conf);
692 static void *raid0_takeover(
struct mddev *mddev)
700 if (mddev->
level == 4)
701 return raid0_takeover_raid45(mddev);
703 if (mddev->
level == 5) {
705 return raid0_takeover_raid45(mddev);
707 printk(
KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
711 if (mddev->
level == 10)
712 return raid0_takeover_raid10(mddev);
714 if (mddev->
level == 1)
715 return raid0_takeover_raid1(mddev);
723 static void raid0_quiesce(
struct mddev *mddev,
int state)
732 .make_request = raid0_make_request,
735 .status = raid0_status,
737 .takeover = raid0_takeover,
738 .quiesce = raid0_quiesce,
741 static int __init raid0_init (
void)
746 static void raid0_exit (
void)