Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
aes_s390.c
Go to the documentation of this file.
1 /*
2  * Cryptographic API.
3  *
4  * s390 implementation of the AES Cipher Algorithm.
5  *
6  * s390 Version:
7  * Copyright IBM Corp. 2005, 2007
8  * Author(s): Jan Glauber ([email protected])
9  * Sebastian Siewior ([email protected]> SW-Fallback
10  *
11  * Derived from "crypto/aes_generic.c"
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 2 of the License, or (at your option)
16  * any later version.
17  *
18  */
19 
20 #define KMSG_COMPONENT "aes_s390"
21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <linux/err.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include "crypt_s390.h"
29 
30 #define AES_KEYLEN_128 1
31 #define AES_KEYLEN_192 2
32 #define AES_KEYLEN_256 4
33 
34 static u8 *ctrblk;
35 static char keylen_flag;
36 
37 struct s390_aes_ctx {
40  long enc;
41  long dec;
42  int key_len;
43  union {
45  struct crypto_cipher *cip;
46  } fallback;
47 };
48 
49 struct pcc_param {
50  u8 key[32];
51  u8 tweak[16];
52  u8 block[16];
53  u8 bit[16];
54  u8 xts[16];
55 };
56 
57 struct s390_xts_ctx {
58  u8 key[32];
60  struct pcc_param pcc;
61  long enc;
62  long dec;
63  int key_len;
65 };
66 
67 /*
68  * Check if the key_len is supported by the HW.
69  * Returns 0 if it is, a positive number if it is not and software fallback is
70  * required or a negative number in case the key size is not valid
71  */
72 static int need_fallback(unsigned int key_len)
73 {
74  switch (key_len) {
75  case 16:
76  if (!(keylen_flag & AES_KEYLEN_128))
77  return 1;
78  break;
79  case 24:
80  if (!(keylen_flag & AES_KEYLEN_192))
81  return 1;
82  break;
83  case 32:
84  if (!(keylen_flag & AES_KEYLEN_256))
85  return 1;
86  break;
87  default:
88  return -1;
89  break;
90  }
91  return 0;
92 }
93 
94 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
95  unsigned int key_len)
96 {
97  struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
98  int ret;
99 
100  sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
101  sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
103 
104  ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
105  if (ret) {
107  tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
109  }
110  return ret;
111 }
112 
113 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
114  unsigned int key_len)
115 {
116  struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
117  u32 *flags = &tfm->crt_flags;
118  int ret;
119 
120  ret = need_fallback(key_len);
121  if (ret < 0) {
122  *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
123  return -EINVAL;
124  }
125 
126  sctx->key_len = key_len;
127  if (!ret) {
128  memcpy(sctx->key, in_key, key_len);
129  return 0;
130  }
131 
132  return setkey_fallback_cip(tfm, in_key, key_len);
133 }
134 
135 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
136 {
137  const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
138 
139  if (unlikely(need_fallback(sctx->key_len))) {
140  crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
141  return;
142  }
143 
144  switch (sctx->key_len) {
145  case 16:
146  crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
148  break;
149  case 24:
150  crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
152  break;
153  case 32:
154  crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
156  break;
157  }
158 }
159 
160 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
161 {
162  const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
163 
164  if (unlikely(need_fallback(sctx->key_len))) {
165  crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
166  return;
167  }
168 
169  switch (sctx->key_len) {
170  case 16:
171  crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
173  break;
174  case 24:
175  crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
177  break;
178  case 32:
179  crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
181  break;
182  }
183 }
184 
185 static int fallback_init_cip(struct crypto_tfm *tfm)
186 {
187  const char *name = tfm->__crt_alg->cra_name;
188  struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
189 
190  sctx->fallback.cip = crypto_alloc_cipher(name, 0,
192 
193  if (IS_ERR(sctx->fallback.cip)) {
194  pr_err("Allocating AES fallback algorithm %s failed\n",
195  name);
196  return PTR_ERR(sctx->fallback.cip);
197  }
198 
199  return 0;
200 }
201 
202 static void fallback_exit_cip(struct crypto_tfm *tfm)
203 {
204  struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
205 
206  crypto_free_cipher(sctx->fallback.cip);
207  sctx->fallback.cip = NULL;
208 }
209 
210 static struct crypto_alg aes_alg = {
211  .cra_name = "aes",
212  .cra_driver_name = "aes-s390",
213  .cra_priority = CRYPT_S390_PRIORITY,
214  .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
216  .cra_blocksize = AES_BLOCK_SIZE,
217  .cra_ctxsize = sizeof(struct s390_aes_ctx),
218  .cra_module = THIS_MODULE,
219  .cra_init = fallback_init_cip,
220  .cra_exit = fallback_exit_cip,
221  .cra_u = {
222  .cipher = {
223  .cia_min_keysize = AES_MIN_KEY_SIZE,
224  .cia_max_keysize = AES_MAX_KEY_SIZE,
225  .cia_setkey = aes_set_key,
226  .cia_encrypt = aes_encrypt,
227  .cia_decrypt = aes_decrypt,
228  }
229  }
230 };
231 
232 static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
233  unsigned int len)
234 {
235  struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
236  unsigned int ret;
237 
238  sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
239  sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
241 
242  ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
243  if (ret) {
245  tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
247  }
248  return ret;
249 }
250 
251 static int fallback_blk_dec(struct blkcipher_desc *desc,
252  struct scatterlist *dst, struct scatterlist *src,
253  unsigned int nbytes)
254 {
255  unsigned int ret;
256  struct crypto_blkcipher *tfm;
257  struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
258 
259  tfm = desc->tfm;
260  desc->tfm = sctx->fallback.blk;
261 
262  ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
263 
264  desc->tfm = tfm;
265  return ret;
266 }
267 
268 static int fallback_blk_enc(struct blkcipher_desc *desc,
269  struct scatterlist *dst, struct scatterlist *src,
270  unsigned int nbytes)
271 {
272  unsigned int ret;
273  struct crypto_blkcipher *tfm;
274  struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
275 
276  tfm = desc->tfm;
277  desc->tfm = sctx->fallback.blk;
278 
279  ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
280 
281  desc->tfm = tfm;
282  return ret;
283 }
284 
285 static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
286  unsigned int key_len)
287 {
288  struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
289  int ret;
290 
291  ret = need_fallback(key_len);
292  if (ret > 0) {
293  sctx->key_len = key_len;
294  return setkey_fallback_blk(tfm, in_key, key_len);
295  }
296 
297  switch (key_len) {
298  case 16:
299  sctx->enc = KM_AES_128_ENCRYPT;
300  sctx->dec = KM_AES_128_DECRYPT;
301  break;
302  case 24:
303  sctx->enc = KM_AES_192_ENCRYPT;
304  sctx->dec = KM_AES_192_DECRYPT;
305  break;
306  case 32:
307  sctx->enc = KM_AES_256_ENCRYPT;
308  sctx->dec = KM_AES_256_DECRYPT;
309  break;
310  }
311 
312  return aes_set_key(tfm, in_key, key_len);
313 }
314 
315 static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
316  struct blkcipher_walk *walk)
317 {
318  int ret = blkcipher_walk_virt(desc, walk);
319  unsigned int nbytes;
320 
321  while ((nbytes = walk->nbytes)) {
322  /* only use complete blocks */
323  unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
324  u8 *out = walk->dst.virt.addr;
325  u8 *in = walk->src.virt.addr;
326 
327  ret = crypt_s390_km(func, param, out, in, n);
328  BUG_ON((ret < 0) || (ret != n));
329 
330  nbytes &= AES_BLOCK_SIZE - 1;
331  ret = blkcipher_walk_done(desc, walk, nbytes);
332  }
333 
334  return ret;
335 }
336 
337 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
338  struct scatterlist *dst, struct scatterlist *src,
339  unsigned int nbytes)
340 {
341  struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
342  struct blkcipher_walk walk;
343 
344  if (unlikely(need_fallback(sctx->key_len)))
345  return fallback_blk_enc(desc, dst, src, nbytes);
346 
347  blkcipher_walk_init(&walk, dst, src, nbytes);
348  return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
349 }
350 
351 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
352  struct scatterlist *dst, struct scatterlist *src,
353  unsigned int nbytes)
354 {
355  struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
356  struct blkcipher_walk walk;
357 
358  if (unlikely(need_fallback(sctx->key_len)))
359  return fallback_blk_dec(desc, dst, src, nbytes);
360 
361  blkcipher_walk_init(&walk, dst, src, nbytes);
362  return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
363 }
364 
365 static int fallback_init_blk(struct crypto_tfm *tfm)
366 {
367  const char *name = tfm->__crt_alg->cra_name;
368  struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
369 
370  sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
372 
373  if (IS_ERR(sctx->fallback.blk)) {
374  pr_err("Allocating AES fallback algorithm %s failed\n",
375  name);
376  return PTR_ERR(sctx->fallback.blk);
377  }
378 
379  return 0;
380 }
381 
382 static void fallback_exit_blk(struct crypto_tfm *tfm)
383 {
384  struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
385 
386  crypto_free_blkcipher(sctx->fallback.blk);
387  sctx->fallback.blk = NULL;
388 }
389 
390 static struct crypto_alg ecb_aes_alg = {
391  .cra_name = "ecb(aes)",
392  .cra_driver_name = "ecb-aes-s390",
393  .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
394  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
396  .cra_blocksize = AES_BLOCK_SIZE,
397  .cra_ctxsize = sizeof(struct s390_aes_ctx),
398  .cra_type = &crypto_blkcipher_type,
399  .cra_module = THIS_MODULE,
400  .cra_init = fallback_init_blk,
401  .cra_exit = fallback_exit_blk,
402  .cra_u = {
403  .blkcipher = {
404  .min_keysize = AES_MIN_KEY_SIZE,
405  .max_keysize = AES_MAX_KEY_SIZE,
406  .setkey = ecb_aes_set_key,
407  .encrypt = ecb_aes_encrypt,
408  .decrypt = ecb_aes_decrypt,
409  }
410  }
411 };
412 
413 static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
414  unsigned int key_len)
415 {
416  struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
417  int ret;
418 
419  ret = need_fallback(key_len);
420  if (ret > 0) {
421  sctx->key_len = key_len;
422  return setkey_fallback_blk(tfm, in_key, key_len);
423  }
424 
425  switch (key_len) {
426  case 16:
427  sctx->enc = KMC_AES_128_ENCRYPT;
428  sctx->dec = KMC_AES_128_DECRYPT;
429  break;
430  case 24:
431  sctx->enc = KMC_AES_192_ENCRYPT;
432  sctx->dec = KMC_AES_192_DECRYPT;
433  break;
434  case 32:
435  sctx->enc = KMC_AES_256_ENCRYPT;
436  sctx->dec = KMC_AES_256_DECRYPT;
437  break;
438  }
439 
440  return aes_set_key(tfm, in_key, key_len);
441 }
442 
443 static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
444  struct blkcipher_walk *walk)
445 {
446  int ret = blkcipher_walk_virt(desc, walk);
447  unsigned int nbytes = walk->nbytes;
448 
449  if (!nbytes)
450  goto out;
451 
452  memcpy(param, walk->iv, AES_BLOCK_SIZE);
453  do {
454  /* only use complete blocks */
455  unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
456  u8 *out = walk->dst.virt.addr;
457  u8 *in = walk->src.virt.addr;
458 
459  ret = crypt_s390_kmc(func, param, out, in, n);
460  BUG_ON((ret < 0) || (ret != n));
461 
462  nbytes &= AES_BLOCK_SIZE - 1;
463  ret = blkcipher_walk_done(desc, walk, nbytes);
464  } while ((nbytes = walk->nbytes));
465  memcpy(walk->iv, param, AES_BLOCK_SIZE);
466 
467 out:
468  return ret;
469 }
470 
471 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
472  struct scatterlist *dst, struct scatterlist *src,
473  unsigned int nbytes)
474 {
475  struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
476  struct blkcipher_walk walk;
477 
478  if (unlikely(need_fallback(sctx->key_len)))
479  return fallback_blk_enc(desc, dst, src, nbytes);
480 
481  blkcipher_walk_init(&walk, dst, src, nbytes);
482  return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
483 }
484 
485 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
486  struct scatterlist *dst, struct scatterlist *src,
487  unsigned int nbytes)
488 {
489  struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
490  struct blkcipher_walk walk;
491 
492  if (unlikely(need_fallback(sctx->key_len)))
493  return fallback_blk_dec(desc, dst, src, nbytes);
494 
495  blkcipher_walk_init(&walk, dst, src, nbytes);
496  return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
497 }
498 
499 static struct crypto_alg cbc_aes_alg = {
500  .cra_name = "cbc(aes)",
501  .cra_driver_name = "cbc-aes-s390",
502  .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
503  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
505  .cra_blocksize = AES_BLOCK_SIZE,
506  .cra_ctxsize = sizeof(struct s390_aes_ctx),
507  .cra_type = &crypto_blkcipher_type,
508  .cra_module = THIS_MODULE,
509  .cra_init = fallback_init_blk,
510  .cra_exit = fallback_exit_blk,
511  .cra_u = {
512  .blkcipher = {
513  .min_keysize = AES_MIN_KEY_SIZE,
514  .max_keysize = AES_MAX_KEY_SIZE,
515  .ivsize = AES_BLOCK_SIZE,
516  .setkey = cbc_aes_set_key,
517  .encrypt = cbc_aes_encrypt,
518  .decrypt = cbc_aes_decrypt,
519  }
520  }
521 };
522 
523 static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
524  unsigned int len)
525 {
526  struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
527  unsigned int ret;
528 
529  xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
530  xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
532 
533  ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
534  if (ret) {
536  tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
538  }
539  return ret;
540 }
541 
542 static int xts_fallback_decrypt(struct blkcipher_desc *desc,
543  struct scatterlist *dst, struct scatterlist *src,
544  unsigned int nbytes)
545 {
546  struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
547  struct crypto_blkcipher *tfm;
548  unsigned int ret;
549 
550  tfm = desc->tfm;
551  desc->tfm = xts_ctx->fallback;
552 
553  ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
554 
555  desc->tfm = tfm;
556  return ret;
557 }
558 
559 static int xts_fallback_encrypt(struct blkcipher_desc *desc,
560  struct scatterlist *dst, struct scatterlist *src,
561  unsigned int nbytes)
562 {
563  struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
564  struct crypto_blkcipher *tfm;
565  unsigned int ret;
566 
567  tfm = desc->tfm;
568  desc->tfm = xts_ctx->fallback;
569 
570  ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
571 
572  desc->tfm = tfm;
573  return ret;
574 }
575 
576 static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
577  unsigned int key_len)
578 {
579  struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
580  u32 *flags = &tfm->crt_flags;
581 
582  switch (key_len) {
583  case 32:
584  xts_ctx->enc = KM_XTS_128_ENCRYPT;
585  xts_ctx->dec = KM_XTS_128_DECRYPT;
586  memcpy(xts_ctx->key + 16, in_key, 16);
587  memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
588  break;
589  case 48:
590  xts_ctx->enc = 0;
591  xts_ctx->dec = 0;
592  xts_fallback_setkey(tfm, in_key, key_len);
593  break;
594  case 64:
595  xts_ctx->enc = KM_XTS_256_ENCRYPT;
596  xts_ctx->dec = KM_XTS_256_DECRYPT;
597  memcpy(xts_ctx->key, in_key, 32);
598  memcpy(xts_ctx->pcc.key, in_key + 32, 32);
599  break;
600  default:
601  *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
602  return -EINVAL;
603  }
604  xts_ctx->key_len = key_len;
605  return 0;
606 }
607 
608 static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
609  struct s390_xts_ctx *xts_ctx,
610  struct blkcipher_walk *walk)
611 {
612  unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
613  int ret = blkcipher_walk_virt(desc, walk);
614  unsigned int nbytes = walk->nbytes;
615  unsigned int n;
616  u8 *in, *out;
617  void *param;
618 
619  if (!nbytes)
620  goto out;
621 
622  memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
623  memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
624  memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
625  memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
626  param = xts_ctx->pcc.key + offset;
627  ret = crypt_s390_pcc(func, param);
628  BUG_ON(ret < 0);
629 
630  memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
631  param = xts_ctx->key + offset;
632  do {
633  /* only use complete blocks */
634  n = nbytes & ~(AES_BLOCK_SIZE - 1);
635  out = walk->dst.virt.addr;
636  in = walk->src.virt.addr;
637 
638  ret = crypt_s390_km(func, param, out, in, n);
639  BUG_ON(ret < 0 || ret != n);
640 
641  nbytes &= AES_BLOCK_SIZE - 1;
642  ret = blkcipher_walk_done(desc, walk, nbytes);
643  } while ((nbytes = walk->nbytes));
644 out:
645  return ret;
646 }
647 
648 static int xts_aes_encrypt(struct blkcipher_desc *desc,
649  struct scatterlist *dst, struct scatterlist *src,
650  unsigned int nbytes)
651 {
652  struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
653  struct blkcipher_walk walk;
654 
655  if (unlikely(xts_ctx->key_len == 48))
656  return xts_fallback_encrypt(desc, dst, src, nbytes);
657 
658  blkcipher_walk_init(&walk, dst, src, nbytes);
659  return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
660 }
661 
662 static int xts_aes_decrypt(struct blkcipher_desc *desc,
663  struct scatterlist *dst, struct scatterlist *src,
664  unsigned int nbytes)
665 {
666  struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
667  struct blkcipher_walk walk;
668 
669  if (unlikely(xts_ctx->key_len == 48))
670  return xts_fallback_decrypt(desc, dst, src, nbytes);
671 
672  blkcipher_walk_init(&walk, dst, src, nbytes);
673  return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
674 }
675 
676 static int xts_fallback_init(struct crypto_tfm *tfm)
677 {
678  const char *name = tfm->__crt_alg->cra_name;
679  struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
680 
681  xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
683 
684  if (IS_ERR(xts_ctx->fallback)) {
685  pr_err("Allocating XTS fallback algorithm %s failed\n",
686  name);
687  return PTR_ERR(xts_ctx->fallback);
688  }
689  return 0;
690 }
691 
692 static void xts_fallback_exit(struct crypto_tfm *tfm)
693 {
694  struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
695 
696  crypto_free_blkcipher(xts_ctx->fallback);
697  xts_ctx->fallback = NULL;
698 }
699 
700 static struct crypto_alg xts_aes_alg = {
701  .cra_name = "xts(aes)",
702  .cra_driver_name = "xts-aes-s390",
703  .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
704  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
706  .cra_blocksize = AES_BLOCK_SIZE,
707  .cra_ctxsize = sizeof(struct s390_xts_ctx),
708  .cra_type = &crypto_blkcipher_type,
709  .cra_module = THIS_MODULE,
710  .cra_init = xts_fallback_init,
711  .cra_exit = xts_fallback_exit,
712  .cra_u = {
713  .blkcipher = {
714  .min_keysize = 2 * AES_MIN_KEY_SIZE,
715  .max_keysize = 2 * AES_MAX_KEY_SIZE,
716  .ivsize = AES_BLOCK_SIZE,
717  .setkey = xts_aes_set_key,
718  .encrypt = xts_aes_encrypt,
719  .decrypt = xts_aes_decrypt,
720  }
721  }
722 };
723 
724 static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
725  unsigned int key_len)
726 {
727  struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
728 
729  switch (key_len) {
730  case 16:
731  sctx->enc = KMCTR_AES_128_ENCRYPT;
732  sctx->dec = KMCTR_AES_128_DECRYPT;
733  break;
734  case 24:
735  sctx->enc = KMCTR_AES_192_ENCRYPT;
736  sctx->dec = KMCTR_AES_192_DECRYPT;
737  break;
738  case 32:
739  sctx->enc = KMCTR_AES_256_ENCRYPT;
740  sctx->dec = KMCTR_AES_256_DECRYPT;
741  break;
742  }
743 
744  return aes_set_key(tfm, in_key, key_len);
745 }
746 
747 static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
748  struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
749 {
750  int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
751  unsigned int i, n, nbytes;
753  u8 *out, *in;
754 
755  if (!walk->nbytes)
756  return ret;
757 
758  memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
759  while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
760  out = walk->dst.virt.addr;
761  in = walk->src.virt.addr;
762  while (nbytes >= AES_BLOCK_SIZE) {
763  /* only use complete blocks, max. PAGE_SIZE */
764  n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
765  nbytes & ~(AES_BLOCK_SIZE - 1);
766  for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
767  memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
769  crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
770  }
771  ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
772  BUG_ON(ret < 0 || ret != n);
773  if (n > AES_BLOCK_SIZE)
774  memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
776  crypto_inc(ctrblk, AES_BLOCK_SIZE);
777  out += n;
778  in += n;
779  nbytes -= n;
780  }
781  ret = blkcipher_walk_done(desc, walk, nbytes);
782  }
783  /*
784  * final block may be < AES_BLOCK_SIZE, copy only nbytes
785  */
786  if (nbytes) {
787  out = walk->dst.virt.addr;
788  in = walk->src.virt.addr;
789  ret = crypt_s390_kmctr(func, sctx->key, buf, in,
790  AES_BLOCK_SIZE, ctrblk);
791  BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE);
792  memcpy(out, buf, nbytes);
793  crypto_inc(ctrblk, AES_BLOCK_SIZE);
794  ret = blkcipher_walk_done(desc, walk, 0);
795  }
796  memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
797  return ret;
798 }
799 
800 static int ctr_aes_encrypt(struct blkcipher_desc *desc,
801  struct scatterlist *dst, struct scatterlist *src,
802  unsigned int nbytes)
803 {
804  struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
805  struct blkcipher_walk walk;
806 
807  blkcipher_walk_init(&walk, dst, src, nbytes);
808  return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
809 }
810 
811 static int ctr_aes_decrypt(struct blkcipher_desc *desc,
812  struct scatterlist *dst, struct scatterlist *src,
813  unsigned int nbytes)
814 {
815  struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
816  struct blkcipher_walk walk;
817 
818  blkcipher_walk_init(&walk, dst, src, nbytes);
819  return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
820 }
821 
822 static struct crypto_alg ctr_aes_alg = {
823  .cra_name = "ctr(aes)",
824  .cra_driver_name = "ctr-aes-s390",
825  .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
826  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
827  .cra_blocksize = 1,
828  .cra_ctxsize = sizeof(struct s390_aes_ctx),
829  .cra_type = &crypto_blkcipher_type,
830  .cra_module = THIS_MODULE,
831  .cra_u = {
832  .blkcipher = {
833  .min_keysize = AES_MIN_KEY_SIZE,
834  .max_keysize = AES_MAX_KEY_SIZE,
835  .ivsize = AES_BLOCK_SIZE,
836  .setkey = ctr_aes_set_key,
837  .encrypt = ctr_aes_encrypt,
838  .decrypt = ctr_aes_decrypt,
839  }
840  }
841 };
842 
843 static int __init aes_s390_init(void)
844 {
845  int ret;
846 
847  if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
848  keylen_flag |= AES_KEYLEN_128;
849  if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
850  keylen_flag |= AES_KEYLEN_192;
851  if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
852  keylen_flag |= AES_KEYLEN_256;
853 
854  if (!keylen_flag)
855  return -EOPNOTSUPP;
856 
857  /* z9 109 and z9 BC/EC only support 128 bit key length */
858  if (keylen_flag == AES_KEYLEN_128)
859  pr_info("AES hardware acceleration is only available for"
860  " 128-bit keys\n");
861 
862  ret = crypto_register_alg(&aes_alg);
863  if (ret)
864  goto aes_err;
865 
866  ret = crypto_register_alg(&ecb_aes_alg);
867  if (ret)
868  goto ecb_aes_err;
869 
870  ret = crypto_register_alg(&cbc_aes_alg);
871  if (ret)
872  goto cbc_aes_err;
873 
874  if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
876  crypt_s390_func_available(KM_XTS_256_ENCRYPT,
878  ret = crypto_register_alg(&xts_aes_alg);
879  if (ret)
880  goto xts_aes_err;
881  }
882 
883  if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
885  crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
887  crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
889  ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
890  if (!ctrblk) {
891  ret = -ENOMEM;
892  goto ctr_aes_err;
893  }
894  ret = crypto_register_alg(&ctr_aes_alg);
895  if (ret) {
896  free_page((unsigned long) ctrblk);
897  goto ctr_aes_err;
898  }
899  }
900 
901 out:
902  return ret;
903 
904 ctr_aes_err:
905  crypto_unregister_alg(&xts_aes_alg);
906 xts_aes_err:
907  crypto_unregister_alg(&cbc_aes_alg);
908 cbc_aes_err:
909  crypto_unregister_alg(&ecb_aes_alg);
910 ecb_aes_err:
911  crypto_unregister_alg(&aes_alg);
912 aes_err:
913  goto out;
914 }
915 
916 static void __exit aes_s390_fini(void)
917 {
918  crypto_unregister_alg(&ctr_aes_alg);
919  free_page((unsigned long) ctrblk);
920  crypto_unregister_alg(&xts_aes_alg);
921  crypto_unregister_alg(&cbc_aes_alg);
922  crypto_unregister_alg(&ecb_aes_alg);
923  crypto_unregister_alg(&aes_alg);
924 }
925 
926 module_init(aes_s390_init);
927 module_exit(aes_s390_fini);
928 
929 MODULE_ALIAS("aes-all");
930 
931 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
932 MODULE_LICENSE("GPL");