11 #include <linux/module.h>
13 #include <linux/kernel.h>
17 #include <linux/slab.h>
25 #include <asm/unaligned.h>
32 #define DM_MSG_PREFIX "crypt"
95 #define LMK_SEED_SIZE 64
179 #define MIN_POOL_PAGES 32
183 static void clone_init(
struct dm_crypt_io *,
struct bio *);
274 err = crypto_cipher_setkey(essiv_tfm, essiv->
salt,
275 crypto_hash_digestsize(essiv->
hash_tfm));
286 unsigned salt_size = crypto_hash_digestsize(essiv->
hash_tfm);
293 r = crypto_cipher_setkey(essiv_tfm, essiv->
salt, salt_size);
303 u8 *salt,
unsigned saltsize)
310 if (IS_ERR(essiv_tfm)) {
311 ti->
error =
"Error allocating crypto tfm for ESSIV";
315 if (crypto_cipher_blocksize(essiv_tfm) !=
316 crypto_ablkcipher_ivsize(any_tfm(cc))) {
317 ti->
error =
"Block size of ESSIV cipher does "
318 "not match IV size of block cipher";
319 crypto_free_cipher(essiv_tfm);
323 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
325 ti->
error =
"Failed to set key for ESSIV cipher";
326 crypto_free_cipher(essiv_tfm);
347 crypto_free_cipher(essiv_tfm);
361 ti->
error =
"Digest algorithm missing for ESSIV mode";
367 if (IS_ERR(hash_tfm)) {
368 ti->
error =
"Error initializing ESSIV hash";
369 err = PTR_ERR(hash_tfm);
373 salt = kzalloc(crypto_hash_digestsize(hash_tfm),
GFP_KERNEL);
375 ti->
error =
"Error kmallocing salt storage in ESSIV";
383 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
384 crypto_hash_digestsize(hash_tfm));
385 if (IS_ERR(essiv_tfm)) {
386 crypt_iv_essiv_dtr(cc);
387 return PTR_ERR(essiv_tfm);
394 if (hash_tfm && !IS_ERR(hash_tfm))
395 crypto_free_hash(hash_tfm);
407 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
415 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
421 if (1 << log != bs) {
422 ti->
error =
"cypher blocksize is not a power of 2";
427 ti->
error =
"cypher blocksize is > 512";
480 ti->
error =
"Error initializing LMK hash";
492 crypt_iv_lmk_dtr(cc);
493 ti->
error =
"Error kmallocing seed storage in LMK";
508 crypto_shash_digestsize(lmk->
hash_tfm));
539 r = crypto_shash_init(&
sdesc.desc);
564 r = crypto_shash_export(&
sdesc.desc, &md5state);
581 if (bio_data_dir(dmreq->
ctx->bio_in) ==
WRITE) {
583 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->
sg_in.offset);
597 if (bio_data_dir(dmreq->
ctx->bio_in) ==
WRITE)
601 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->
sg_out.offset);
612 .generator = crypt_iv_plain_gen
616 .generator = crypt_iv_plain64_gen
620 .ctr = crypt_iv_essiv_ctr,
621 .dtr = crypt_iv_essiv_dtr,
622 .init = crypt_iv_essiv_init,
623 .wipe = crypt_iv_essiv_wipe,
624 .generator = crypt_iv_essiv_gen
628 .ctr = crypt_iv_benbi_ctr,
629 .dtr = crypt_iv_benbi_dtr,
630 .generator = crypt_iv_benbi_gen
634 .generator = crypt_iv_null_gen
638 .ctr = crypt_iv_lmk_ctr,
639 .dtr = crypt_iv_lmk_dtr,
640 .init = crypt_iv_lmk_init,
641 .wipe = crypt_iv_lmk_wipe,
642 .generator = crypt_iv_lmk_gen,
643 .post = crypt_iv_lmk_post
648 struct bio *bio_out,
struct bio *bio_in,
655 ctx->
idx_in = bio_in ? bio_in->bi_idx : 0;
656 ctx->
idx_out = bio_out ? bio_out->bi_idx : 0;
658 init_completion(&ctx->
restart);
676 return (
u8 *)
ALIGN((
unsigned long)(dmreq + 1),
677 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
684 struct bio_vec *bv_in = bio_iovec_idx(ctx->
bio_in, ctx->
idx_in);
685 struct bio_vec *bv_out = bio_iovec_idx(ctx->
bio_out, ctx->
idx_out);
690 dmreq = dmreq_of_req(cc, req);
691 iv = iv_of_dmreq(cc, dmreq);
721 ablkcipher_request_set_crypt(req, &dmreq->
sg_in, &dmreq->
sg_out,
725 r = crypto_ablkcipher_encrypt(req);
727 r = crypto_ablkcipher_decrypt(req);
741 struct crypt_cpu *this_cc = this_crypt_config(cc);
747 ablkcipher_request_set_tfm(this_cc->
req, cc->
tfms[key_index]);
748 ablkcipher_request_set_callback(this_cc->
req,
750 kcryptd_async_done, dmreq_of_req(cc, this_cc->
req));
759 struct crypt_cpu *this_cc = this_crypt_config(cc);
767 crypt_alloc_req(cc, ctx);
771 r = crypt_convert_block(cc, ctx, this_cc->
req);
808 unsigned *out_of_pages)
821 clone_init(io, clone);
824 for (i = 0; i < nr_iovecs; i++) {
848 if (!clone->bi_size) {
856 static void crypt_free_buffer_pages(
struct crypt_config *cc,
struct bio *clone)
861 for (i = 0; i < clone->bi_vcnt; i++) {
862 bv = bio_iovec_idx(clone, i);
885 static void crypt_inc_pending(
struct dm_crypt_io *io)
895 static void crypt_dec_pending(
struct dm_crypt_io *io)
898 struct bio *base_bio = io->
base_bio;
910 if (error && !base_io->
error)
912 crypt_dec_pending(base_io);
933 static void crypt_endio(
struct bio *clone,
int error)
937 unsigned rw = bio_data_dir(clone);
939 if (
unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
946 crypt_free_buffer_pages(cc, clone);
950 if (rw ==
READ && !error) {
951 kcryptd_queue_crypt(io);
958 crypt_dec_pending(io);
961 static void clone_init(
struct dm_crypt_io *io,
struct bio *clone)
965 clone->bi_private =
io;
966 clone->bi_end_io = crypt_endio;
967 clone->bi_bdev = cc->
dev->bdev;
974 struct bio *base_bio = io->
base_bio;
986 crypt_inc_pending(io);
988 clone_init(io, clone);
995 static void kcryptd_io_write(
struct dm_crypt_io *io)
997 struct bio *clone = io->
ctx.bio_out;
1006 crypt_inc_pending(io);
1009 crypt_dec_pending(io);
1011 kcryptd_io_write(io);
1014 static void kcryptd_queue_io(
struct dm_crypt_io *io)
1022 static void kcryptd_crypt_write_io_submit(
struct dm_crypt_io *io,
int async)
1024 struct bio *clone = io->
ctx.bio_out;
1028 crypt_free_buffer_pages(cc, clone);
1030 crypt_dec_pending(io);
1035 BUG_ON(io->
ctx.idx_out < clone->bi_vcnt);
1040 kcryptd_queue_io(io);
1045 static void kcryptd_crypt_write_convert(
struct dm_crypt_io *io)
1051 unsigned out_of_pages = 0;
1052 unsigned remaining = io->
base_bio->bi_size;
1059 crypt_inc_pending(io);
1067 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
1073 io->
ctx.bio_out = clone;
1074 io->
ctx.idx_out = 0;
1076 remaining -= clone->bi_size;
1077 sector += bio_sectors(clone);
1079 crypt_inc_pending(io);
1081 r = crypt_convert(cc, &io->
ctx);
1088 if (crypt_finished) {
1089 kcryptd_crypt_write_io_submit(io, 0);
1112 if (
unlikely(!crypt_finished && remaining)) {
1113 new_io = crypt_io_alloc(io->
cc, io->
base_bio,
1115 crypt_inc_pending(new_io);
1116 crypt_convert_init(cc, &new_io->
ctx,
NULL,
1118 new_io->
ctx.idx_in = io->
ctx.idx_in;
1119 new_io->
ctx.offset_in = io->
ctx.offset_in;
1129 crypt_inc_pending(io->
base_io);
1130 crypt_dec_pending(io);
1137 crypt_dec_pending(io);
1140 static void kcryptd_crypt_read_done(
struct dm_crypt_io *io)
1142 crypt_dec_pending(io);
1145 static void kcryptd_crypt_read_convert(
struct dm_crypt_io *io)
1150 crypt_inc_pending(io);
1155 r = crypt_convert(cc, &io->
ctx);
1160 kcryptd_crypt_read_done(io);
1162 crypt_dec_pending(io);
1179 error = cc->
iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1190 kcryptd_crypt_read_done(io);
1192 kcryptd_crypt_write_io_submit(io, 1);
1195 static void kcryptd_crypt(
struct work_struct *work)
1200 kcryptd_crypt_read_convert(io);
1202 kcryptd_crypt_write_convert(io);
1205 static void kcryptd_queue_crypt(
struct dm_crypt_io *io)
1216 static int crypt_decode_key(
u8 *
key,
char *hex,
unsigned int size)
1223 for (i = 0; i <
size; i++) {
1240 static void crypt_encode_key(
char *hex,
u8 *key,
unsigned int size)
1244 for (i = 0; i <
size; i++) {
1259 if (cc->
tfms[i] && !IS_ERR(cc->
tfms[i])) {
1260 crypto_free_ablkcipher(cc->
tfms[i]);
1268 static int crypt_alloc_tfms(
struct crypt_config *cc,
char *ciphermode)
1280 if (IS_ERR(cc->
tfms[i])) {
1281 err = PTR_ERR(cc->
tfms[i]);
1282 crypt_free_tfms(cc);
1290 static int crypt_setkey_allcpus(
struct crypt_config *cc)
1296 r = crypto_ablkcipher_setkey(cc->
tfms[i],
1297 cc->
key + (i * subkey_size),
1306 static int crypt_set_key(
struct crypt_config *cc,
char *key)
1309 int key_string_len =
strlen(key);
1312 if (cc->
key_size != (key_string_len >> 1))
1324 r = crypt_setkey_allcpus(cc);
1328 memset(key,
'0', key_string_len);
1338 return crypt_setkey_allcpus(cc);
1341 static void crypt_dtr(
struct dm_target *ti)
1364 crypt_free_tfms(cc);
1392 static int crypt_ctr_cipher(
struct dm_target *ti,
1393 char *cipher_in,
char *key)
1396 char *
tmp, *
cipher, *chainmode, *ivmode, *ivopts, *keycount;
1397 char *cipher_api =
NULL;
1402 if (
strchr(cipher_in,
'(')) {
1403 ti->
error =
"Bad cipher specification";
1416 keycount =
strsep(&tmp,
"-");
1417 cipher =
strsep(&keycount,
":");
1423 ti->
error =
"Bad cipher key count specification";
1432 chainmode =
strsep(&tmp,
"-");
1433 ivopts =
strsep(&tmp,
"-");
1434 ivmode =
strsep(&ivopts,
":");
1437 DMWARN(
"Ignoring unexpected additional cipher options");
1442 ti->
error =
"Cannot allocate per cpu state";
1450 if (!chainmode || (!
strcmp(chainmode,
"plain") && !ivmode)) {
1455 if (
strcmp(chainmode,
"ecb") && !ivmode) {
1456 ti->
error =
"IV mechanism required";
1465 "%s(%s)", chainmode, cipher);
1472 ret = crypt_alloc_tfms(cc, cipher_api);
1474 ti->
error =
"Error allocating crypto tfm";
1479 ret = crypt_set_key(cc, key);
1481 ti->
error =
"Error decoding and setting key";
1486 cc->
iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1490 (
unsigned int)(
sizeof(
u64) /
sizeof(
u8)));
1492 DMWARN(
"Selected cipher does not support IVs");
1499 else if (
strcmp(ivmode,
"plain") == 0)
1501 else if (
strcmp(ivmode,
"plain64") == 0)
1503 else if (
strcmp(ivmode,
"essiv") == 0)
1505 else if (
strcmp(ivmode,
"benbi") == 0)
1507 else if (
strcmp(ivmode,
"null") == 0)
1509 else if (
strcmp(ivmode,
"lmk") == 0) {
1519 ti->
error =
"Invalid IV mode";
1527 ti->
error =
"Error creating IV";
1536 ti->
error =
"Error initialising IV";
1547 ti->
error =
"Cannot allocate cipher strings";
1555 static int crypt_ctr(
struct dm_target *ti,
unsigned int argc,
char **argv)
1559 unsigned long long tmpll;
1562 const char *opt_string;
1565 static struct dm_arg _args[] = {
1566 {0, 1,
"Invalid number of feature args"},
1570 ti->
error =
"Not enough arguments";
1574 key_size =
strlen(argv[1]) >> 1;
1576 cc = kzalloc(
sizeof(*cc) + key_size *
sizeof(
u8),
GFP_KERNEL);
1578 ti->
error =
"Cannot allocate encryption context";
1584 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1591 ti->
error =
"Cannot allocate crypt io mempool";
1596 cc->
dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1598 cc->
dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
1599 ~(crypto_tfm_ctx_alignment() - 1);
1604 ti->
error =
"Cannot allocate crypt request mempool";
1610 ti->
error =
"Cannot allocate page mempool";
1616 ti->
error =
"Cannot allocate crypt bioset";
1621 if (
sscanf(argv[2],
"%llu%c", &tmpll, &dummy) != 1) {
1622 ti->
error =
"Invalid iv_offset sector";
1628 ti->
error =
"Device lookup failed";
1632 if (
sscanf(argv[4],
"%llu%c", &tmpll, &dummy) != 1) {
1633 ti->
error =
"Invalid device sector";
1652 if (opt_params == 1 && opt_string &&
1655 else if (opt_params) {
1657 ti->
error =
"Invalid feature arguments";
1668 ti->
error =
"Couldn't create kcryptd io queue";
1678 ti->
error =
"Couldn't create kcryptd queue";
1692 static int crypt_map(
struct dm_target *ti,
struct bio *bio,
1704 bio->bi_bdev = cc->
dev->bdev;
1705 if (bio_sectors(bio))
1714 kcryptd_queue_io(io);
1716 kcryptd_queue_crypt(io);
1722 unsigned status_flags,
char *
result,
unsigned maxlen)
1725 unsigned int sz = 0;
1736 if ((maxlen - sz) < ((cc->
key_size << 1) + 1))
1739 crypt_encode_key(result + sz, cc->
key, cc->
key_size);
1748 cc->
dev->name, (
unsigned long long)cc->
start);
1751 DMEMIT(
" 1 allow_discards");
1758 static void crypt_postsuspend(
struct dm_target *ti)
1765 static int crypt_preresume(
struct dm_target *ti)
1770 DMERR(
"aborting resume - crypt key is not set.");
1777 static void crypt_resume(
struct dm_target *ti)
1788 static int crypt_message(
struct dm_target *ti,
unsigned argc,
char **argv)
1798 DMWARN(
"not suspended during key manipulation.");
1801 if (argc == 3 && !
strcasecmp(argv[1],
"set")) {
1802 ret = crypt_set_key(cc, argv[2]);
1809 if (argc == 2 && !
strcasecmp(argv[1],
"wipe")) {
1815 return crypt_wipe_key(cc);
1820 DMWARN(
"unrecognised message received.");
1824 static int crypt_merge(
struct dm_target *ti,
struct bvec_merge_data *bvm,
1825 struct bio_vec *biovec,
int max_size)
1830 if (!q->merge_bvec_fn)
1833 bvm->bi_bdev = cc->
dev->bdev;
1836 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1839 static int crypt_iterate_devices(
struct dm_target *ti,
1849 .version = {1, 11, 0},
1854 .status = crypt_status,
1855 .postsuspend = crypt_postsuspend,
1856 .preresume = crypt_preresume,
1857 .resume = crypt_resume,
1858 .message = crypt_message,
1859 .merge = crypt_merge,
1860 .iterate_devices = crypt_iterate_devices,
1863 static int __init dm_crypt_init(
void)
1868 if (!_crypt_io_pool)
1873 DMERR(
"register failed %d", r);
1880 static void __exit dm_crypt_exit(
void)