14 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
19 #define DM_MSG_PREFIX "thin"
24 #define ENDIO_HOOK_POOL_SIZE 1024
25 #define MAPPING_POOL_SIZE 1024
26 #define PRISON_CELLS 1024
27 #define COMMIT_PERIOD HZ
33 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
34 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
39 #define MAX_DEV_ID ((1 << 24) - 1)
233 static struct dm_thin_pool_table {
236 } dm_thin_pool_table;
238 static void pool_table_init(
void)
241 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
244 static void __pool_table_insert(
struct pool *
pool)
246 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
247 list_add(&pool->
list, &dm_thin_pool_table.pools);
250 static void __pool_table_remove(
struct pool *pool)
252 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
258 struct pool *pool =
NULL, *
tmp;
260 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
263 if (tmp->pool_md == md) {
274 struct pool *pool =
NULL, *
tmp;
276 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
279 if (tmp->md_dev == md_dev) {
297 static void __requeue_bio_list(
struct thin_c *tc,
struct bio_list *master)
302 bio_list_init(&bios);
303 bio_list_merge(&bios, master);
304 bio_list_init(master);
306 while ((bio = bio_list_pop(&bios))) {
312 bio_list_add(master, bio);
316 static void requeue_io(
struct thin_c *tc)
318 struct pool *pool = tc->
pool;
324 spin_unlock_irqrestore(&pool->
lock, flags);
338 if (tc->
pool->sectors_per_block_shift < 0)
341 block_nr >>= tc->
pool->sectors_per_block_shift;
348 struct pool *pool = tc->
pool;
349 sector_t bi_sector = bio->bi_sector;
352 if (tc->
pool->sectors_per_block_shift < 0)
360 static void remap_to_origin(
struct thin_c *tc,
struct bio *bio)
365 static int bio_triggers_commit(
struct thin_c *tc,
struct bio *bio)
371 static void issue(
struct thin_c *tc,
struct bio *bio)
373 struct pool *pool = tc->
pool;
376 if (!bio_triggers_commit(tc, bio)) {
397 spin_unlock_irqrestore(&pool->
lock, flags);
400 static void remap_to_origin_and_issue(
struct thin_c *tc,
struct bio *bio)
402 remap_to_origin(tc, bio);
406 static void remap_and_issue(
struct thin_c *tc,
struct bio *bio,
409 remap(tc, bio, block);
417 static void wake_worker(
struct pool *pool)
452 struct pool *pool = m->
tc->pool;
460 static void copy_complete(
int read_err,
unsigned long write_err,
void *
context)
464 struct pool *pool = m->
tc->pool;
466 m->
err = read_err || write_err ? -
EIO : 0;
470 __maybe_add_mapping(m);
471 spin_unlock_irqrestore(&pool->
lock, flags);
474 static void overwrite_endio(
struct bio *bio,
int err)
479 struct pool *pool = m->
tc->pool;
485 __maybe_add_mapping(m);
486 spin_unlock_irqrestore(&pool->
lock, flags);
505 struct pool *pool = tc->
pool;
510 spin_unlock_irqrestore(&tc->
pool->lock, flags);
522 struct pool *pool = tc->
pool;
525 bio_list_init(&
bios);
529 spin_unlock_irqrestore(&pool->
lock, flags);
564 DMERR(
"dm_thin_insert_block() failed");
576 cell_defer_except(tc, m->
cell);
590 bio_io_error(m->
bio);
591 cell_defer_except(tc, m->
cell);
592 cell_defer_except(tc, m->
cell2);
605 cell_defer_except(tc, m->
cell);
606 cell_defer_except(tc, m->
cell2);
617 DMERR(
"dm_thin_remove_block() failed");
619 process_prepared_discard_passdown(m);
622 static void process_prepared(
struct pool *pool,
struct list_head *
head,
629 INIT_LIST_HEAD(&maps);
631 list_splice_init(head, &maps);
632 spin_unlock_irqrestore(&pool->
lock, flags);
641 static int io_overlaps_block(
struct pool *pool,
struct bio *bio)
646 static int io_overwrites_block(
struct pool *pool,
struct bio *bio)
648 return (bio_data_dir(bio) ==
WRITE) &&
649 io_overlaps_block(pool, bio);
652 static void save_and_set_endio(
struct bio *bio, bio_end_io_t **save,
655 *save = bio->bi_end_io;
659 static int ensure_next_mapping(
struct pool *pool)
686 struct pool *pool = tc->
pool;
689 INIT_LIST_HEAD(&m->
list);
708 if (io_overwrites_block(pool, bio)) {
714 remap_and_issue(tc, bio, data_dest);
716 struct dm_io_region
from, to;
727 0, copy_complete, m);
730 DMERR(
"dm_kcopyd_copy() failed");
736 static void schedule_internal_copy(
struct thin_c *tc,
dm_block_t virt_block,
740 schedule_copy(tc, virt_block, tc->
pool_dev,
741 data_origin, data_dest, cell, bio);
744 static void schedule_external_copy(
struct thin_c *tc,
dm_block_t virt_block,
749 virt_block, data_dest, cell, bio);
756 struct pool *pool = tc->
pool;
759 INIT_LIST_HEAD(&m->
list);
774 if (!pool->
pf.zero_new_blocks)
775 process_prepared_mapping(m);
777 else if (io_overwrites_block(pool, bio)) {
783 remap_and_issue(tc, bio, data_block);
786 struct dm_io_region to;
795 DMERR(
"dm_kcopyd_zero() failed");
801 static int commit(
struct pool *pool)
807 DMERR(
"commit failed, error = %d", r);
816 static int commit_or_fallback(
struct pool *pool)
820 if (get_pool_mode(pool) !=
PM_WRITE)
835 struct pool *pool = tc->
pool;
842 DMWARN(
"%s: reached low water mark, sending event.",
846 spin_unlock_irqrestore(&pool->
lock, flags);
858 (
void) commit_or_fallback(pool);
869 DMWARN(
"%s: no free space available.",
873 spin_unlock_irqrestore(&pool->
lock, flags);
890 static void retry_on_resume(
struct bio *bio)
894 struct pool *pool = tc->
pool;
899 spin_unlock_irqrestore(&pool->
lock, flags);
907 bio_list_init(&
bios);
910 while ((bio = bio_list_pop(&
bios)))
911 retry_on_resume(bio);
914 static void process_discard(
struct thin_c *tc,
struct bio *bio)
918 struct pool *pool = tc->
pool;
925 build_virtual_key(tc->
td, block, &
key);
937 build_data_key(tc->
td, lookup_result.block, &key2);
943 if (io_overlaps_block(pool, bio)) {
948 m = get_next_mapping(pool);
950 m->
pass_discard = (!lookup_result.shared) && pool->
pf.discard_passdown;
961 spin_unlock_irqrestore(&pool->
lock, flags);
972 if ((!lookup_result.shared) && pool->
pf.discard_passdown)
973 remap_and_issue(tc, bio, lookup_result.block);
988 DMERR(
"discard: find block unexpectedly returned %d", r);
995 static void break_sharing(
struct thin_c *tc,
struct bio *bio,
dm_block_t block,
1003 r = alloc_data_block(tc, &data_block);
1006 schedule_internal_copy(tc, block, lookup_result->
block,
1007 data_block, cell, bio);
1015 DMERR(
"%s: alloc_data_block() failed, error = %d", __func__, r);
1021 static void process_shared_bio(
struct thin_c *tc,
struct bio *bio,
1026 struct pool *pool = tc->
pool;
1033 build_data_key(tc->
td, lookup_result->
block, &key);
1037 if (bio_data_dir(bio) ==
WRITE && bio->bi_size)
1038 break_sharing(tc, bio, block, &key, lookup_result, cell);
1045 remap_and_issue(tc, bio, lookup_result->
block);
1049 static void provision_block(
struct thin_c *tc,
struct bio *bio,
dm_block_t block,
1058 if (!bio->bi_size) {
1060 remap_and_issue(tc, bio, 0);
1067 if (bio_data_dir(bio) ==
READ) {
1074 r = alloc_data_block(tc, &data_block);
1078 schedule_external_copy(tc, block, data_block, cell, bio);
1080 schedule_zero(tc, block, data_block, cell, bio);
1088 DMERR(
"%s: alloc_data_block() failed, error = %d", __func__, r);
1095 static void process_bio(
struct thin_c *tc,
struct bio *bio)
1107 build_virtual_key(tc->
td, block, &key);
1125 if (lookup_result.
shared)
1126 process_shared_bio(tc, bio, block, &lookup_result);
1128 remap_and_issue(tc, bio, lookup_result.
block);
1134 remap_to_origin_and_issue(tc, bio);
1136 provision_block(tc, bio, block, cell);
1140 DMERR(
"dm_thin_find_block() failed, error = %d", r);
1147 static void process_bio_read_only(
struct thin_c *tc,
struct bio *bio)
1150 int rw = bio_data_dir(bio);
1157 if (lookup_result.
shared && (rw ==
WRITE) && bio->bi_size)
1160 remap_and_issue(tc, bio, lookup_result.
block);
1170 remap_to_origin_and_issue(tc, bio);
1179 DMERR(
"dm_thin_find_block() failed, error = %d", r);
1185 static void process_bio_fail(
struct thin_c *tc,
struct bio *bio)
1190 static int need_commit_due_to_time(
struct pool *pool)
1196 static void process_deferred_bios(
struct pool *pool)
1198 unsigned long flags;
1202 bio_list_init(&
bios);
1207 spin_unlock_irqrestore(&pool->
lock, flags);
1209 while ((bio = bio_list_pop(&
bios))) {
1218 if (ensure_next_mapping(pool)) {
1221 spin_unlock_irqrestore(&pool->
lock, flags);
1236 bio_list_init(&
bios);
1240 spin_unlock_irqrestore(&pool->
lock, flags);
1242 if (bio_list_empty(&
bios) && !need_commit_due_to_time(pool))
1245 if (commit_or_fallback(pool)) {
1246 while ((bio = bio_list_pop(&
bios)))
1252 while ((bio = bio_list_pop(&
bios)))
1262 process_deferred_bios(pool);
1278 static enum pool_mode get_pool_mode(
struct pool *pool)
1280 return pool->
pf.mode;
1283 static void set_pool_mode(
struct pool *pool,
enum pool_mode mode)
1291 DMERR(
"switching pool to failure mode");
1299 DMERR(
"switching pool to read-only mode");
1302 DMERR(
"aborting transaction failed");
1331 static void thin_defer_bio(
struct thin_c *tc,
struct bio *bio)
1333 unsigned long flags;
1334 struct pool *pool = tc->
pool;
1338 spin_unlock_irqrestore(&pool->
lock, flags);
1345 struct pool *pool = tc->
pool;
1359 static int thin_bio_map(
struct dm_target *ti,
struct bio *bio,
1368 map_context->
ptr = thin_hook_bio(tc, bio);
1376 thin_defer_bio(tc, bio);
1402 thin_defer_bio(tc, bio);
1405 remap(tc, bio, result.block);
1427 thin_defer_bio(tc, bio);
1448 unsigned long flags;
1452 r = !bio_list_empty(&pt->
pool->retry_on_resume_list);
1453 spin_unlock_irqrestore(&pt->
pool->lock, flags);
1457 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1463 static void __requeue_bios(
struct pool *pool)
1472 static bool data_dev_supports_discard(
struct pool_c *pt)
1476 return q && blk_queue_discard(q);
1483 static void disable_passdown_if_not_supported(
struct pool_c *pt)
1485 struct pool *pool = pt->
pool;
1487 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1495 if (!data_dev_supports_discard(pt))
1496 reason =
"discard unsupported";
1499 reason =
"max discard sectors smaller than a block";
1501 else if (data_limits->discard_granularity > block_size)
1502 reason =
"discard granularity larger than a block";
1504 else if (block_size & (data_limits->discard_granularity - 1))
1505 reason =
"discard granularity not a factor of block size";
1508 DMWARN(
"Data device (%s) %s: Disabling discard passdown.",
bdevname(data_bdev, buf), reason);
1513 static int bind_control_target(
struct pool *pool,
struct dm_target *ti)
1523 if (old_mode > new_mode)
1524 new_mode = old_mode;
1530 set_pool_mode(pool, new_mode);
1535 static void unbind_control_target(
struct pool *pool,
struct dm_target *ti)
1553 static void __pool_destroy(
struct pool *pool)
1555 __pool_table_remove(pool);
1558 DMWARN(
"%s: dm_pool_metadata_close() failed.", __func__);
1575 static struct kmem_cache *_new_mapping_cache;
1580 unsigned long block_size,
1587 bool format_device = read_only ?
false :
true;
1591 *error =
"Error creating metadata object";
1592 return (
struct pool *)
pmd;
1597 *error =
"Error allocating memory for pool";
1598 err_p = ERR_PTR(-
ENOMEM);
1604 if (block_size & (block_size - 1))
1609 pool_features_init(&pool->
pf);
1612 *error =
"Error creating pool's bio prison";
1613 err_p = ERR_PTR(-
ENOMEM);
1618 if (IS_ERR(pool->
copier)) {
1619 r = PTR_ERR(pool->
copier);
1620 *error =
"Error creating pool's kcopyd client";
1622 goto bad_kcopyd_client;
1631 *error =
"Error creating pool's workqueue";
1632 err_p = ERR_PTR(-
ENOMEM);
1649 *error =
"Error creating pool's shared read deferred set";
1650 err_p = ERR_PTR(-
ENOMEM);
1651 goto bad_shared_read_ds;
1656 *error =
"Error creating pool's all io deferred set";
1657 err_p = ERR_PTR(-
ENOMEM);
1663 _new_mapping_cache);
1665 *error =
"Error creating pool's mapping mempool";
1666 err_p = ERR_PTR(-
ENOMEM);
1667 goto bad_mapping_pool;
1673 *error =
"Error creating pool's endio_hook mempool";
1674 err_p = ERR_PTR(-
ENOMEM);
1675 goto bad_endio_hook_pool;
1680 pool->
md_dev = metadata_dev;
1681 __pool_table_insert(pool);
1685 bad_endio_hook_pool:
1701 DMWARN(
"%s: dm_pool_metadata_close() failed.", __func__);
1706 static void __pool_inc(
struct pool *pool)
1708 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1712 static void __pool_dec(
struct pool *pool)
1714 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1717 __pool_destroy(pool);
1720 static struct pool *__pool_find(
struct mapped_device *pool_md,
1722 unsigned long block_size,
int read_only,
1725 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1728 if (pool->
pool_md != pool_md) {
1729 *error =
"metadata device already in use by a pool";
1730 return ERR_PTR(-
EBUSY);
1735 pool = __pool_table_lookup(pool_md);
1737 if (pool->
md_dev != metadata_dev) {
1738 *error =
"different pool cannot replace a pool";
1744 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
1755 static void pool_dtr(
struct dm_target *ti)
1761 unbind_control_target(pt->
pool, ti);
1762 __pool_dec(pt->
pool);
1775 const char *arg_name;
1777 static struct dm_arg _args[] = {
1778 {0, 3,
"Invalid number of pool feature arguments"},
1791 while (argc && !r) {
1795 if (!
strcasecmp(arg_name,
"skip_block_zeroing"))
1798 else if (!
strcasecmp(arg_name,
"ignore_discard"))
1801 else if (!
strcasecmp(arg_name,
"no_discard_passdown"))
1808 ti->
error =
"Unrecognised pool feature requested";
1828 static int pool_ctr(
struct dm_target *ti,
unsigned argc,
char **argv)
1830 int r, pool_created = 0;
1838 struct dm_dev *metadata_dev;
1848 ti->
error =
"Invalid argument count";
1857 ti->
error =
"Error opening metadata block device";
1861 metadata_dev_size = i_size_read(metadata_dev->
bdev->bd_inode) >>
SECTOR_SHIFT;
1863 DMWARN(
"Metadata device %s is larger than %u sectors: excess space will not be used.",
1868 ti->
error =
"Error getting data device";
1872 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
1876 ti->
error =
"Invalid block size";
1881 if (
kstrtoull(argv[3], 10, (
unsigned long long *)&low_water_blocks)) {
1882 ti->
error =
"Invalid low water mark";
1890 pool_features_init(&pf);
1893 r = parse_pool_features(&as, &pf, ti);
1917 ti->
error =
"Discard support cannot be disabled once enabled";
1919 goto out_flags_changed;
1948 pt->
callbacks.congested_fn = pool_is_congested;
1969 static int pool_map(
struct dm_target *ti,
struct bio *bio,
1974 struct pool *pool = pt->
pool;
1975 unsigned long flags;
1983 spin_unlock_irqrestore(&pool->
lock, flags);
1999 static int pool_preresume(
struct dm_target *ti)
2003 struct pool *pool = pt->
pool;
2010 r = bind_control_target(pool, ti);
2018 DMERR(
"failed to retrieve data device size");
2022 if (data_size < sb_data_size) {
2023 DMERR(
"pool target too small, is %llu blocks (expected %llu)",
2024 (
unsigned long long)data_size, sb_data_size);
2027 }
else if (data_size > sb_data_size) {
2030 DMERR(
"failed to resize data device");
2036 (
void) commit_or_fallback(pool);
2042 static void pool_resume(
struct dm_target *ti)
2045 struct pool *pool = pt->
pool;
2046 unsigned long flags;
2051 __requeue_bios(pool);
2052 spin_unlock_irqrestore(&pool->
lock, flags);
2054 do_waker(&pool->
waker.work);
2057 static void pool_postsuspend(
struct dm_target *ti)
2060 struct pool *pool = pt->
pool;
2064 (
void) commit_or_fallback(pool);
2067 static int check_arg_count(
unsigned argc,
unsigned args_required)
2069 if (argc != args_required) {
2070 DMWARN(
"Message received with %u arguments instead of %u.",
2071 argc, args_required);
2080 if (!
kstrtoull(arg, 10, (
unsigned long long *)dev_id) &&
2085 DMWARN(
"Message received with invalid device id: %s", arg);
2090 static int process_create_thin_mesg(
unsigned argc,
char **argv,
struct pool *pool)
2095 r = check_arg_count(argc, 2);
2099 r = read_dev_id(argv[1], &dev_id, 1);
2105 DMWARN(
"Creation of new thinly-provisioned device with id %s failed.",
2113 static int process_create_snap_mesg(
unsigned argc,
char **argv,
struct pool *pool)
2119 r = check_arg_count(argc, 3);
2123 r = read_dev_id(argv[1], &dev_id, 1);
2127 r = read_dev_id(argv[2], &origin_dev_id, 1);
2133 DMWARN(
"Creation of new snapshot %s of device %s failed.",
2141 static int process_delete_mesg(
unsigned argc,
char **argv,
struct pool *pool)
2146 r = check_arg_count(argc, 2);
2150 r = read_dev_id(argv[1], &dev_id, 1);
2156 DMWARN(
"Deletion of thin device %s failed.", argv[1]);
2161 static int process_set_transaction_id_mesg(
unsigned argc,
char **argv,
struct pool *pool)
2166 r = check_arg_count(argc, 3);
2170 if (
kstrtoull(argv[1], 10, (
unsigned long long *)&old_id)) {
2171 DMWARN(
"set_transaction_id message: Unrecognised id %s.", argv[1]);
2175 if (
kstrtoull(argv[2], 10, (
unsigned long long *)&new_id)) {
2176 DMWARN(
"set_transaction_id message: Unrecognised new id %s.", argv[2]);
2182 DMWARN(
"Failed to change transaction id from %s to %s.",
2190 static int process_reserve_metadata_snap_mesg(
unsigned argc,
char **argv,
struct pool *pool)
2194 r = check_arg_count(argc, 1);
2198 (
void) commit_or_fallback(pool);
2202 DMWARN(
"reserve_metadata_snap message failed.");
2207 static int process_release_metadata_snap_mesg(
unsigned argc,
char **argv,
struct pool *pool)
2211 r = check_arg_count(argc, 1);
2217 DMWARN(
"release_metadata_snap message failed.");
2232 static int pool_message(
struct dm_target *ti,
unsigned argc,
char **argv)
2236 struct pool *pool = pt->
pool;
2239 r = process_create_thin_mesg(argc, argv, pool);
2241 else if (!
strcasecmp(argv[0],
"create_snap"))
2242 r = process_create_snap_mesg(argc, argv, pool);
2245 r = process_delete_mesg(argc, argv, pool);
2247 else if (!
strcasecmp(argv[0],
"set_transaction_id"))
2248 r = process_set_transaction_id_mesg(argc, argv, pool);
2250 else if (!
strcasecmp(argv[0],
"reserve_metadata_snap"))
2251 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2253 else if (!
strcasecmp(argv[0],
"release_metadata_snap"))
2254 r = process_release_metadata_snap_mesg(argc, argv, pool);
2257 DMWARN(
"Unrecognised thin pool target message received: %s", argv[0]);
2260 (
void) commit_or_fallback(pool);
2265 static void emit_flags(
struct pool_features *pf,
char *result,
2266 unsigned sz,
unsigned maxlen)
2273 DMEMIT(
"skip_block_zeroing ");
2276 DMEMIT(
"ignore_discard ");
2279 DMEMIT(
"no_discard_passdown ");
2291 unsigned status_flags,
char *result,
unsigned maxlen)
2304 struct pool *pool = pt->
pool;
2308 if (get_pool_mode(pool) ==
PM_FAIL) {
2315 (
void) commit_or_fallback(pool);
2323 &nr_free_blocks_metadata);
2332 &nr_free_blocks_data);
2344 DMEMIT(
"%llu %llu/%llu %llu/%llu ",
2345 (
unsigned long long)transaction_id,
2346 (
unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2347 (
unsigned long long)nr_blocks_metadata,
2348 (
unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2349 (
unsigned long long)nr_blocks_data);
2352 DMEMIT(
"%llu ", held_root);
2361 if (pool->
pf.discard_enabled && pool->
pf.discard_passdown)
2362 DMEMIT(
"discard_passdown");
2364 DMEMIT(
"no_discard_passdown");
2369 DMEMIT(
"%s %s %lu %llu ",
2381 static int pool_iterate_devices(
struct dm_target *ti,
2389 static int pool_merge(
struct dm_target *ti,
struct bvec_merge_data *bvm,
2390 struct bio_vec *biovec,
int max_size)
2395 if (!q->merge_bvec_fn)
2400 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2403 static bool block_size_is_power_of_two(
struct pool *pool)
2408 static void set_discard_limits(
struct pool_c *pt,
struct queue_limits *limits)
2410 struct pool *pool = pt->
pool;
2411 struct queue_limits *data_limits;
2419 data_limits = &bdev_get_queue(pt->
data_dev->bdev)->limits;
2420 limits->discard_granularity = data_limits->discard_granularity;
2421 }
else if (block_size_is_power_of_two(pool))
2432 static void pool_io_hints(
struct dm_target *ti,
struct queue_limits *limits)
2435 struct pool *pool = pt->
pool;
2448 disable_passdown_if_not_supported(pt);
2450 set_discard_limits(pt, limits);
2454 .name =
"thin-pool",
2457 .version = {1, 5, 0},
2462 .postsuspend = pool_postsuspend,
2463 .preresume = pool_preresume,
2464 .resume = pool_resume,
2465 .message = pool_message,
2466 .status = pool_status,
2467 .merge = pool_merge,
2468 .iterate_devices = pool_iterate_devices,
2469 .io_hints = pool_io_hints,
2475 static void thin_dtr(
struct dm_target *ti)
2481 __pool_dec(tc->
pool);
2503 static int thin_ctr(
struct dm_target *ti,
unsigned argc,
char **argv)
2507 struct dm_dev *pool_dev, *origin_dev;
2512 if (argc != 2 && argc != 3) {
2513 ti->
error =
"Invalid argument count";
2520 ti->
error =
"Out of memory";
2528 ti->
error =
"Error opening origin device";
2529 goto bad_origin_dev;
2536 ti->
error =
"Error opening pool device";
2541 if (read_dev_id(argv[1], (
unsigned long long *)&tc->
dev_id, 0)) {
2542 ti->
error =
"Invalid device id";
2549 ti->
error =
"Couldn't get pool mapped device";
2554 tc->
pool = __pool_table_lookup(pool_md);
2556 ti->
error =
"Couldn't find pool object";
2558 goto bad_pool_lookup;
2560 __pool_inc(tc->
pool);
2563 ti->
error =
"Couldn't open thin device, Pool is in fail mode";
2569 ti->
error =
"Couldn't open thin internal device";
2581 if (tc->
pool->pf.discard_enabled) {
2596 __pool_dec(tc->
pool);
2612 static int thin_map(
struct dm_target *ti,
struct bio *bio,
2617 return thin_bio_map(ti, bio, map_context);
2620 static int thin_endio(
struct dm_target *ti,
2621 struct bio *bio,
int err,
2624 unsigned long flags;
2628 struct pool *pool = h->
tc->pool;
2631 INIT_LIST_HEAD(&
work);
2638 __maybe_add_mapping(m);
2640 spin_unlock_irqrestore(&pool->
lock, flags);
2644 INIT_LIST_HEAD(&
work);
2649 spin_unlock_irqrestore(&pool->
lock, flags);
2660 requeue_io((
struct thin_c *)ti->private);
2667 unsigned status_flags,
char *result,
unsigned maxlen)
2693 DMEMIT(
"%llu ", mapped * tc->
pool->sectors_per_block);
2695 DMEMIT(
"%llu", ((highest + 1) *
2696 tc->
pool->sectors_per_block) - 1);
2704 (
unsigned long) tc->
dev_id);
2714 static int thin_iterate_devices(
struct dm_target *ti,
2719 struct pool *pool = tc->
pool;
2728 blocks = pool->
ti->len;
2739 static void thin_io_hints(
struct dm_target *ti,
struct queue_limits *limits)
2743 *limits = bdev_get_queue(tc->
pool_dev->bdev)->limits;
2748 .version = {1, 5, 0},
2753 .end_io = thin_endio,
2754 .postsuspend = thin_postsuspend,
2755 .status = thin_status,
2756 .iterate_devices = thin_iterate_devices,
2757 .io_hints = thin_io_hints,
2762 static int __init dm_thin_init(
void)
2774 goto bad_pool_target;
2779 if (!_new_mapping_cache)
2780 goto bad_new_mapping_cache;
2783 if (!_endio_hook_cache)
2784 goto bad_endio_hook_cache;
2788 bad_endio_hook_cache:
2790 bad_new_mapping_cache:
2798 static void dm_thin_exit(
void)