Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
aesni-intel_glue.c
Go to the documentation of this file.
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  * Author: Huang Ying <[email protected]>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  * Authors: Adrian Hoban <[email protected]>
11  * Gabriele Paoloni <[email protected]>
12  * Tadeusz Struk ([email protected])
13  * Aidan O'Mahony ([email protected])
14  * Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21 
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/i387.h>
36 #include <asm/crypto/aes.h>
37 #include <asm/crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 
43 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
44 #define HAS_CTR
45 #endif
46 
47 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
48 #define HAS_PCBC
49 #endif
50 
51 /* This data is stored at the end of the crypto_tfm struct.
52  * It's a type of per "session" data storage location.
53  * This needs to be 16 byte aligned.
54  */
58  u8 nonce[4];
60 };
61 
63  int err;
65 };
66 
68  u8 iv[16];
70  struct scatterlist sg;
71 };
72 
73 #define AESNI_ALIGN (16)
74 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
75 #define RFC4106_HASH_SUBKEY_SIZE 16
76 
77 struct aesni_lrw_ctx {
79  u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80 };
81 
82 struct aesni_xts_ctx {
83  u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
84  u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
85 };
86 
87 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
88  unsigned int key_len);
90  const u8 *in);
92  const u8 *in);
94  const u8 *in, unsigned int len);
96  const u8 *in, unsigned int len);
98  const u8 *in, unsigned int len, u8 *iv);
100  const u8 *in, unsigned int len, u8 *iv);
101 
102 int crypto_fpu_init(void);
103 void crypto_fpu_exit(void);
104 
105 #ifdef CONFIG_X86_64
106 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
107  const u8 *in, unsigned int len, u8 *iv);
108 
109 /* asmlinkage void aesni_gcm_enc()
110  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
111  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
112  * const u8 *in, Plaintext input
113  * unsigned long plaintext_len, Length of data in bytes for encryption.
114  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
115  * concatenated with 8 byte Initialisation Vector (from IPSec ESP
116  * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
117  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
118  * const u8 *aad, Additional Authentication Data (AAD)
119  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
120  * is going to be 8 or 12 bytes
121  * u8 *auth_tag, Authenticated Tag output.
122  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
123  * Valid values are 16 (most likely), 12 or 8.
124  */
125 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
126  const u8 *in, unsigned long plaintext_len, u8 *iv,
127  u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
128  u8 *auth_tag, unsigned long auth_tag_len);
129 
130 /* asmlinkage void aesni_gcm_dec()
131  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
132  * u8 *out, Plaintext output. Decrypt in-place is allowed.
133  * const u8 *in, Ciphertext input
134  * unsigned long ciphertext_len, Length of data in bytes for decryption.
135  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
136  * concatenated with 8 byte Initialisation Vector (from IPSec ESP
137  * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
138  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
139  * const u8 *aad, Additional Authentication Data (AAD)
140  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
141  * to be 8 or 12 bytes
142  * u8 *auth_tag, Authenticated Tag output.
143  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
144  * Valid values are 16 (most likely), 12 or 8.
145  */
146 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
147  const u8 *in, unsigned long ciphertext_len, u8 *iv,
148  u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
149  u8 *auth_tag, unsigned long auth_tag_len);
150 
151 static inline struct
152 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
153 {
154  return
155  (struct aesni_rfc4106_gcm_ctx *)
156  PTR_ALIGN((u8 *)
157  crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
158 }
159 #endif
160 
161 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
162 {
163  unsigned long addr = (unsigned long)raw_ctx;
164  unsigned long align = AESNI_ALIGN;
165 
166  if (align <= crypto_tfm_ctx_alignment())
167  align = 1;
168  return (struct crypto_aes_ctx *)ALIGN(addr, align);
169 }
170 
171 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
172  const u8 *in_key, unsigned int key_len)
173 {
174  struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
175  u32 *flags = &tfm->crt_flags;
176  int err;
177 
178  if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
179  key_len != AES_KEYSIZE_256) {
180  *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
181  return -EINVAL;
182  }
183 
184  if (!irq_fpu_usable())
185  err = crypto_aes_expand_key(ctx, in_key, key_len);
186  else {
187  kernel_fpu_begin();
188  err = aesni_set_key(ctx, in_key, key_len);
189  kernel_fpu_end();
190  }
191 
192  return err;
193 }
194 
195 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
196  unsigned int key_len)
197 {
198  return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
199 }
200 
201 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
202 {
203  struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
204 
205  if (!irq_fpu_usable())
206  crypto_aes_encrypt_x86(ctx, dst, src);
207  else {
208  kernel_fpu_begin();
209  aesni_enc(ctx, dst, src);
210  kernel_fpu_end();
211  }
212 }
213 
214 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
215 {
216  struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
217 
218  if (!irq_fpu_usable())
219  crypto_aes_decrypt_x86(ctx, dst, src);
220  else {
221  kernel_fpu_begin();
222  aesni_dec(ctx, dst, src);
223  kernel_fpu_end();
224  }
225 }
226 
227 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
228 {
229  struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
230 
231  aesni_enc(ctx, dst, src);
232 }
233 
234 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
235 {
236  struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
237 
238  aesni_dec(ctx, dst, src);
239 }
240 
241 static int ecb_encrypt(struct blkcipher_desc *desc,
242  struct scatterlist *dst, struct scatterlist *src,
243  unsigned int nbytes)
244 {
245  struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
246  struct blkcipher_walk walk;
247  int err;
248 
249  blkcipher_walk_init(&walk, dst, src, nbytes);
250  err = blkcipher_walk_virt(desc, &walk);
252 
253  kernel_fpu_begin();
254  while ((nbytes = walk.nbytes)) {
255  aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
256  nbytes & AES_BLOCK_MASK);
257  nbytes &= AES_BLOCK_SIZE - 1;
258  err = blkcipher_walk_done(desc, &walk, nbytes);
259  }
260  kernel_fpu_end();
261 
262  return err;
263 }
264 
265 static int ecb_decrypt(struct blkcipher_desc *desc,
266  struct scatterlist *dst, struct scatterlist *src,
267  unsigned int nbytes)
268 {
269  struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
270  struct blkcipher_walk walk;
271  int err;
272 
273  blkcipher_walk_init(&walk, dst, src, nbytes);
274  err = blkcipher_walk_virt(desc, &walk);
276 
277  kernel_fpu_begin();
278  while ((nbytes = walk.nbytes)) {
279  aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
280  nbytes & AES_BLOCK_MASK);
281  nbytes &= AES_BLOCK_SIZE - 1;
282  err = blkcipher_walk_done(desc, &walk, nbytes);
283  }
284  kernel_fpu_end();
285 
286  return err;
287 }
288 
289 static int cbc_encrypt(struct blkcipher_desc *desc,
290  struct scatterlist *dst, struct scatterlist *src,
291  unsigned int nbytes)
292 {
293  struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
294  struct blkcipher_walk walk;
295  int err;
296 
297  blkcipher_walk_init(&walk, dst, src, nbytes);
298  err = blkcipher_walk_virt(desc, &walk);
300 
301  kernel_fpu_begin();
302  while ((nbytes = walk.nbytes)) {
303  aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
304  nbytes & AES_BLOCK_MASK, walk.iv);
305  nbytes &= AES_BLOCK_SIZE - 1;
306  err = blkcipher_walk_done(desc, &walk, nbytes);
307  }
308  kernel_fpu_end();
309 
310  return err;
311 }
312 
313 static int cbc_decrypt(struct blkcipher_desc *desc,
314  struct scatterlist *dst, struct scatterlist *src,
315  unsigned int nbytes)
316 {
317  struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
318  struct blkcipher_walk walk;
319  int err;
320 
321  blkcipher_walk_init(&walk, dst, src, nbytes);
322  err = blkcipher_walk_virt(desc, &walk);
324 
325  kernel_fpu_begin();
326  while ((nbytes = walk.nbytes)) {
327  aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
328  nbytes & AES_BLOCK_MASK, walk.iv);
329  nbytes &= AES_BLOCK_SIZE - 1;
330  err = blkcipher_walk_done(desc, &walk, nbytes);
331  }
332  kernel_fpu_end();
333 
334  return err;
335 }
336 
337 #ifdef CONFIG_X86_64
338 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
339  struct blkcipher_walk *walk)
340 {
341  u8 *ctrblk = walk->iv;
342  u8 keystream[AES_BLOCK_SIZE];
343  u8 *src = walk->src.virt.addr;
344  u8 *dst = walk->dst.virt.addr;
345  unsigned int nbytes = walk->nbytes;
346 
347  aesni_enc(ctx, keystream, ctrblk);
348  crypto_xor(keystream, src, nbytes);
349  memcpy(dst, keystream, nbytes);
350  crypto_inc(ctrblk, AES_BLOCK_SIZE);
351 }
352 
353 static int ctr_crypt(struct blkcipher_desc *desc,
354  struct scatterlist *dst, struct scatterlist *src,
355  unsigned int nbytes)
356 {
357  struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
358  struct blkcipher_walk walk;
359  int err;
360 
361  blkcipher_walk_init(&walk, dst, src, nbytes);
362  err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
364 
365  kernel_fpu_begin();
366  while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
367  aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
368  nbytes & AES_BLOCK_MASK, walk.iv);
369  nbytes &= AES_BLOCK_SIZE - 1;
370  err = blkcipher_walk_done(desc, &walk, nbytes);
371  }
372  if (walk.nbytes) {
373  ctr_crypt_final(ctx, &walk);
374  err = blkcipher_walk_done(desc, &walk, 0);
375  }
376  kernel_fpu_end();
377 
378  return err;
379 }
380 #endif
381 
382 static int ablk_ecb_init(struct crypto_tfm *tfm)
383 {
384  return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
385 }
386 
387 static int ablk_cbc_init(struct crypto_tfm *tfm)
388 {
389  return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
390 }
391 
392 #ifdef CONFIG_X86_64
393 static int ablk_ctr_init(struct crypto_tfm *tfm)
394 {
395  return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
396 }
397 
398 #ifdef HAS_CTR
399 static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
400 {
401  return ablk_init_common(tfm, "rfc3686(__driver-ctr-aes-aesni)");
402 }
403 #endif
404 #endif
405 
406 #ifdef HAS_PCBC
407 static int ablk_pcbc_init(struct crypto_tfm *tfm)
408 {
409  return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
410 }
411 #endif
412 
413 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
414 {
415  aesni_ecb_enc(ctx, blks, blks, nbytes);
416 }
417 
418 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
419 {
420  aesni_ecb_dec(ctx, blks, blks, nbytes);
421 }
422 
423 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
424  unsigned int keylen)
425 {
426  struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
427  int err;
428 
429  err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
430  keylen - AES_BLOCK_SIZE);
431  if (err)
432  return err;
433 
434  return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
435 }
436 
437 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
438 {
439  struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
440 
441  lrw_free_table(&ctx->lrw_table);
442 }
443 
444 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
445  struct scatterlist *src, unsigned int nbytes)
446 {
447  struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
448  be128 buf[8];
449  struct lrw_crypt_req req = {
450  .tbuf = buf,
451  .tbuflen = sizeof(buf),
452 
453  .table_ctx = &ctx->lrw_table,
454  .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
455  .crypt_fn = lrw_xts_encrypt_callback,
456  };
457  int ret;
458 
460 
461  kernel_fpu_begin();
462  ret = lrw_crypt(desc, dst, src, nbytes, &req);
463  kernel_fpu_end();
464 
465  return ret;
466 }
467 
468 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
469  struct scatterlist *src, unsigned int nbytes)
470 {
471  struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
472  be128 buf[8];
473  struct lrw_crypt_req req = {
474  .tbuf = buf,
475  .tbuflen = sizeof(buf),
476 
477  .table_ctx = &ctx->lrw_table,
478  .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
479  .crypt_fn = lrw_xts_decrypt_callback,
480  };
481  int ret;
482 
484 
485  kernel_fpu_begin();
486  ret = lrw_crypt(desc, dst, src, nbytes, &req);
487  kernel_fpu_end();
488 
489  return ret;
490 }
491 
492 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
493  unsigned int keylen)
494 {
495  struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
496  u32 *flags = &tfm->crt_flags;
497  int err;
498 
499  /* key consists of keys of equal size concatenated, therefore
500  * the length must be even
501  */
502  if (keylen % 2) {
503  *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
504  return -EINVAL;
505  }
506 
507  /* first half of xts-key is for crypt */
508  err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
509  if (err)
510  return err;
511 
512  /* second half of xts-key is for tweak */
513  return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
514  keylen / 2);
515 }
516 
517 
518 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
519 {
520  aesni_enc(ctx, out, in);
521 }
522 
523 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
524  struct scatterlist *src, unsigned int nbytes)
525 {
526  struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
527  be128 buf[8];
528  struct xts_crypt_req req = {
529  .tbuf = buf,
530  .tbuflen = sizeof(buf),
531 
533  .tweak_fn = aesni_xts_tweak,
534  .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
535  .crypt_fn = lrw_xts_encrypt_callback,
536  };
537  int ret;
538 
540 
541  kernel_fpu_begin();
542  ret = xts_crypt(desc, dst, src, nbytes, &req);
543  kernel_fpu_end();
544 
545  return ret;
546 }
547 
548 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
549  struct scatterlist *src, unsigned int nbytes)
550 {
551  struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
552  be128 buf[8];
553  struct xts_crypt_req req = {
554  .tbuf = buf,
555  .tbuflen = sizeof(buf),
556 
558  .tweak_fn = aesni_xts_tweak,
559  .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
560  .crypt_fn = lrw_xts_decrypt_callback,
561  };
562  int ret;
563 
565 
566  kernel_fpu_begin();
567  ret = xts_crypt(desc, dst, src, nbytes, &req);
568  kernel_fpu_end();
569 
570  return ret;
571 }
572 
573 #ifdef CONFIG_X86_64
574 static int rfc4106_init(struct crypto_tfm *tfm)
575 {
576  struct cryptd_aead *cryptd_tfm;
577  struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
578  PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
579  struct crypto_aead *cryptd_child;
580  struct aesni_rfc4106_gcm_ctx *child_ctx;
581  cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
582  if (IS_ERR(cryptd_tfm))
583  return PTR_ERR(cryptd_tfm);
584 
585  cryptd_child = cryptd_aead_child(cryptd_tfm);
586  child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
587  memcpy(child_ctx, ctx, sizeof(*ctx));
588  ctx->cryptd_tfm = cryptd_tfm;
589  tfm->crt_aead.reqsize = sizeof(struct aead_request)
590  + crypto_aead_reqsize(&cryptd_tfm->base);
591  return 0;
592 }
593 
594 static void rfc4106_exit(struct crypto_tfm *tfm)
595 {
596  struct aesni_rfc4106_gcm_ctx *ctx =
597  (struct aesni_rfc4106_gcm_ctx *)
598  PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
599  if (!IS_ERR(ctx->cryptd_tfm))
601  return;
602 }
603 
604 static void
605 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
606 {
608 
609  if (err == -EINPROGRESS)
610  return;
611  result->err = err;
612  complete(&result->completion);
613 }
614 
615 static int
616 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
617 {
618  struct crypto_ablkcipher *ctr_tfm;
619  struct ablkcipher_request *req;
620  int ret = -EINVAL;
621  struct aesni_hash_subkey_req_data *req_data;
622 
623  ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
624  if (IS_ERR(ctr_tfm))
625  return PTR_ERR(ctr_tfm);
626 
627  crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
628 
629  ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
630  if (ret)
631  goto out_free_ablkcipher;
632 
633  ret = -ENOMEM;
634  req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
635  if (!req)
636  goto out_free_ablkcipher;
637 
638  req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
639  if (!req_data)
640  goto out_free_request;
641 
642  memset(req_data->iv, 0, sizeof(req_data->iv));
643 
644  /* Clear the data in the hash sub key container to zero.*/
645  /* We want to cipher all zeros to create the hash sub key. */
646  memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
647 
648  init_completion(&req_data->result.completion);
649  sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
650  ablkcipher_request_set_tfm(req, ctr_tfm);
651  ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
653  rfc4106_set_hash_subkey_done,
654  &req_data->result);
655 
656  ablkcipher_request_set_crypt(req, &req_data->sg,
657  &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
658 
659  ret = crypto_ablkcipher_encrypt(req);
660  if (ret == -EINPROGRESS || ret == -EBUSY) {
662  (&req_data->result.completion);
663  if (!ret)
664  ret = req_data->result.err;
665  }
666  kfree(req_data);
667 out_free_request:
668  ablkcipher_request_free(req);
669 out_free_ablkcipher:
670  crypto_free_ablkcipher(ctr_tfm);
671  return ret;
672 }
673 
674 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
675  unsigned int key_len)
676 {
677  int ret = 0;
678  struct crypto_tfm *tfm = crypto_aead_tfm(parent);
679  struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
680  struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
681  struct aesni_rfc4106_gcm_ctx *child_ctx =
682  aesni_rfc4106_gcm_ctx_get(cryptd_child);
683  u8 *new_key_align, *new_key_mem = NULL;
684 
685  if (key_len < 4) {
686  crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
687  return -EINVAL;
688  }
689  /*Account for 4 byte nonce at the end.*/
690  key_len -= 4;
691  if (key_len != AES_KEYSIZE_128) {
692  crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
693  return -EINVAL;
694  }
695 
696  memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
697  /*This must be on a 16 byte boundary!*/
698  if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
699  return -EINVAL;
700 
701  if ((unsigned long)key % AESNI_ALIGN) {
702  /*key is not aligned: use an auxuliar aligned pointer*/
703  new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
704  if (!new_key_mem)
705  return -ENOMEM;
706 
707  new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
708  memcpy(new_key_align, key, key_len);
709  key = new_key_align;
710  }
711 
712  if (!irq_fpu_usable())
714  key, key_len);
715  else {
716  kernel_fpu_begin();
717  ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
718  kernel_fpu_end();
719  }
720  /*This must be on a 16 byte boundary!*/
721  if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
722  ret = -EINVAL;
723  goto exit;
724  }
725  ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
726  memcpy(child_ctx, ctx, sizeof(*ctx));
727 exit:
728  kfree(new_key_mem);
729  return ret;
730 }
731 
732 /* This is the Integrity Check Value (aka the authentication tag length and can
733  * be 8, 12 or 16 bytes long. */
734 static int rfc4106_set_authsize(struct crypto_aead *parent,
735  unsigned int authsize)
736 {
737  struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
738  struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
739 
740  switch (authsize) {
741  case 8:
742  case 12:
743  case 16:
744  break;
745  default:
746  return -EINVAL;
747  }
748  crypto_aead_crt(parent)->authsize = authsize;
749  crypto_aead_crt(cryptd_child)->authsize = authsize;
750  return 0;
751 }
752 
753 static int rfc4106_encrypt(struct aead_request *req)
754 {
755  int ret;
756  struct crypto_aead *tfm = crypto_aead_reqtfm(req);
757  struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
758 
759  if (!irq_fpu_usable()) {
760  struct aead_request *cryptd_req =
761  (struct aead_request *) aead_request_ctx(req);
762  memcpy(cryptd_req, req, sizeof(*req));
763  aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
764  return crypto_aead_encrypt(cryptd_req);
765  } else {
766  struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
767  kernel_fpu_begin();
768  ret = cryptd_child->base.crt_aead.encrypt(req);
769  kernel_fpu_end();
770  return ret;
771  }
772 }
773 
774 static int rfc4106_decrypt(struct aead_request *req)
775 {
776  int ret;
777  struct crypto_aead *tfm = crypto_aead_reqtfm(req);
778  struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
779 
780  if (!irq_fpu_usable()) {
781  struct aead_request *cryptd_req =
782  (struct aead_request *) aead_request_ctx(req);
783  memcpy(cryptd_req, req, sizeof(*req));
784  aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
785  return crypto_aead_decrypt(cryptd_req);
786  } else {
787  struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
788  kernel_fpu_begin();
789  ret = cryptd_child->base.crt_aead.decrypt(req);
790  kernel_fpu_end();
791  return ret;
792  }
793 }
794 
795 static int __driver_rfc4106_encrypt(struct aead_request *req)
796 {
797  u8 one_entry_in_sg = 0;
798  u8 *src, *dst, *assoc;
800  struct crypto_aead *tfm = crypto_aead_reqtfm(req);
801  struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
802  void *aes_ctx = &(ctx->aes_key_expanded);
803  unsigned long auth_tag_len = crypto_aead_authsize(tfm);
804  u8 iv_tab[16+AESNI_ALIGN];
805  u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
806  struct scatter_walk src_sg_walk;
807  struct scatter_walk assoc_sg_walk;
808  struct scatter_walk dst_sg_walk;
809  unsigned int i;
810 
811  /* Assuming we are supporting rfc4106 64-bit extended */
812  /* sequence numbers We need to have the AAD length equal */
813  /* to 8 or 12 bytes */
814  if (unlikely(req->assoclen != 8 && req->assoclen != 12))
815  return -EINVAL;
816  /* IV below built */
817  for (i = 0; i < 4; i++)
818  *(iv+i) = ctx->nonce[i];
819  for (i = 0; i < 8; i++)
820  *(iv+4+i) = req->iv[i];
821  *((__be32 *)(iv+12)) = counter;
822 
823  if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
824  one_entry_in_sg = 1;
825  scatterwalk_start(&src_sg_walk, req->src);
826  scatterwalk_start(&assoc_sg_walk, req->assoc);
827  src = scatterwalk_map(&src_sg_walk);
828  assoc = scatterwalk_map(&assoc_sg_walk);
829  dst = src;
830  if (unlikely(req->src != req->dst)) {
831  scatterwalk_start(&dst_sg_walk, req->dst);
832  dst = scatterwalk_map(&dst_sg_walk);
833  }
834 
835  } else {
836  /* Allocate memory for src, dst, assoc */
837  src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
838  GFP_ATOMIC);
839  if (unlikely(!src))
840  return -ENOMEM;
841  assoc = (src + req->cryptlen + auth_tag_len);
842  scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
843  scatterwalk_map_and_copy(assoc, req->assoc, 0,
844  req->assoclen, 0);
845  dst = src;
846  }
847 
848  aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
849  ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
850  + ((unsigned long)req->cryptlen), auth_tag_len);
851 
852  /* The authTag (aka the Integrity Check Value) needs to be written
853  * back to the packet. */
854  if (one_entry_in_sg) {
855  if (unlikely(req->src != req->dst)) {
856  scatterwalk_unmap(dst);
857  scatterwalk_done(&dst_sg_walk, 0, 0);
858  }
859  scatterwalk_unmap(src);
860  scatterwalk_unmap(assoc);
861  scatterwalk_done(&src_sg_walk, 0, 0);
862  scatterwalk_done(&assoc_sg_walk, 0, 0);
863  } else {
864  scatterwalk_map_and_copy(dst, req->dst, 0,
865  req->cryptlen + auth_tag_len, 1);
866  kfree(src);
867  }
868  return 0;
869 }
870 
871 static int __driver_rfc4106_decrypt(struct aead_request *req)
872 {
873  u8 one_entry_in_sg = 0;
874  u8 *src, *dst, *assoc;
875  unsigned long tempCipherLen = 0;
876  __be32 counter = cpu_to_be32(1);
877  int retval = 0;
878  struct crypto_aead *tfm = crypto_aead_reqtfm(req);
879  struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
880  void *aes_ctx = &(ctx->aes_key_expanded);
881  unsigned long auth_tag_len = crypto_aead_authsize(tfm);
882  u8 iv_and_authTag[32+AESNI_ALIGN];
883  u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
884  u8 *authTag = iv + 16;
885  struct scatter_walk src_sg_walk;
886  struct scatter_walk assoc_sg_walk;
887  struct scatter_walk dst_sg_walk;
888  unsigned int i;
889 
890  if (unlikely((req->cryptlen < auth_tag_len) ||
891  (req->assoclen != 8 && req->assoclen != 12)))
892  return -EINVAL;
893  /* Assuming we are supporting rfc4106 64-bit extended */
894  /* sequence numbers We need to have the AAD length */
895  /* equal to 8 or 12 bytes */
896 
897  tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
898  /* IV below built */
899  for (i = 0; i < 4; i++)
900  *(iv+i) = ctx->nonce[i];
901  for (i = 0; i < 8; i++)
902  *(iv+4+i) = req->iv[i];
903  *((__be32 *)(iv+12)) = counter;
904 
905  if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
906  one_entry_in_sg = 1;
907  scatterwalk_start(&src_sg_walk, req->src);
908  scatterwalk_start(&assoc_sg_walk, req->assoc);
909  src = scatterwalk_map(&src_sg_walk);
910  assoc = scatterwalk_map(&assoc_sg_walk);
911  dst = src;
912  if (unlikely(req->src != req->dst)) {
913  scatterwalk_start(&dst_sg_walk, req->dst);
914  dst = scatterwalk_map(&dst_sg_walk);
915  }
916 
917  } else {
918  /* Allocate memory for src, dst, assoc */
919  src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
920  if (!src)
921  return -ENOMEM;
922  assoc = (src + req->cryptlen + auth_tag_len);
923  scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
924  scatterwalk_map_and_copy(assoc, req->assoc, 0,
925  req->assoclen, 0);
926  dst = src;
927  }
928 
929  aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
930  ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
931  authTag, auth_tag_len);
932 
933  /* Compare generated tag with passed in tag. */
934  retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
935  -EBADMSG : 0;
936 
937  if (one_entry_in_sg) {
938  if (unlikely(req->src != req->dst)) {
939  scatterwalk_unmap(dst);
940  scatterwalk_done(&dst_sg_walk, 0, 0);
941  }
942  scatterwalk_unmap(src);
943  scatterwalk_unmap(assoc);
944  scatterwalk_done(&src_sg_walk, 0, 0);
945  scatterwalk_done(&assoc_sg_walk, 0, 0);
946  } else {
947  scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
948  kfree(src);
949  }
950  return retval;
951 }
952 #endif
953 
954 static struct crypto_alg aesni_algs[] = { {
955  .cra_name = "aes",
956  .cra_driver_name = "aes-aesni",
957  .cra_priority = 300,
958  .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
959  .cra_blocksize = AES_BLOCK_SIZE,
960  .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
961  AESNI_ALIGN - 1,
962  .cra_alignmask = 0,
963  .cra_module = THIS_MODULE,
964  .cra_u = {
965  .cipher = {
966  .cia_min_keysize = AES_MIN_KEY_SIZE,
967  .cia_max_keysize = AES_MAX_KEY_SIZE,
968  .cia_setkey = aes_set_key,
969  .cia_encrypt = aes_encrypt,
970  .cia_decrypt = aes_decrypt
971  }
972  }
973 }, {
974  .cra_name = "__aes-aesni",
975  .cra_driver_name = "__driver-aes-aesni",
976  .cra_priority = 0,
977  .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
978  .cra_blocksize = AES_BLOCK_SIZE,
979  .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
980  AESNI_ALIGN - 1,
981  .cra_alignmask = 0,
982  .cra_module = THIS_MODULE,
983  .cra_u = {
984  .cipher = {
985  .cia_min_keysize = AES_MIN_KEY_SIZE,
986  .cia_max_keysize = AES_MAX_KEY_SIZE,
987  .cia_setkey = aes_set_key,
988  .cia_encrypt = __aes_encrypt,
989  .cia_decrypt = __aes_decrypt
990  }
991  }
992 }, {
993  .cra_name = "__ecb-aes-aesni",
994  .cra_driver_name = "__driver-ecb-aes-aesni",
995  .cra_priority = 0,
996  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
997  .cra_blocksize = AES_BLOCK_SIZE,
998  .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
999  AESNI_ALIGN - 1,
1000  .cra_alignmask = 0,
1001  .cra_type = &crypto_blkcipher_type,
1002  .cra_module = THIS_MODULE,
1003  .cra_u = {
1004  .blkcipher = {
1005  .min_keysize = AES_MIN_KEY_SIZE,
1006  .max_keysize = AES_MAX_KEY_SIZE,
1007  .setkey = aes_set_key,
1008  .encrypt = ecb_encrypt,
1009  .decrypt = ecb_decrypt,
1010  },
1011  },
1012 }, {
1013  .cra_name = "__cbc-aes-aesni",
1014  .cra_driver_name = "__driver-cbc-aes-aesni",
1015  .cra_priority = 0,
1016  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1017  .cra_blocksize = AES_BLOCK_SIZE,
1018  .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1019  AESNI_ALIGN - 1,
1020  .cra_alignmask = 0,
1021  .cra_type = &crypto_blkcipher_type,
1022  .cra_module = THIS_MODULE,
1023  .cra_u = {
1024  .blkcipher = {
1025  .min_keysize = AES_MIN_KEY_SIZE,
1026  .max_keysize = AES_MAX_KEY_SIZE,
1027  .setkey = aes_set_key,
1028  .encrypt = cbc_encrypt,
1029  .decrypt = cbc_decrypt,
1030  },
1031  },
1032 }, {
1033  .cra_name = "ecb(aes)",
1034  .cra_driver_name = "ecb-aes-aesni",
1035  .cra_priority = 400,
1037  .cra_blocksize = AES_BLOCK_SIZE,
1038  .cra_ctxsize = sizeof(struct async_helper_ctx),
1039  .cra_alignmask = 0,
1040  .cra_type = &crypto_ablkcipher_type,
1041  .cra_module = THIS_MODULE,
1042  .cra_init = ablk_ecb_init,
1043  .cra_exit = ablk_exit,
1044  .cra_u = {
1045  .ablkcipher = {
1046  .min_keysize = AES_MIN_KEY_SIZE,
1047  .max_keysize = AES_MAX_KEY_SIZE,
1048  .setkey = ablk_set_key,
1049  .encrypt = ablk_encrypt,
1050  .decrypt = ablk_decrypt,
1051  },
1052  },
1053 }, {
1054  .cra_name = "cbc(aes)",
1055  .cra_driver_name = "cbc-aes-aesni",
1056  .cra_priority = 400,
1058  .cra_blocksize = AES_BLOCK_SIZE,
1059  .cra_ctxsize = sizeof(struct async_helper_ctx),
1060  .cra_alignmask = 0,
1061  .cra_type = &crypto_ablkcipher_type,
1062  .cra_module = THIS_MODULE,
1063  .cra_init = ablk_cbc_init,
1064  .cra_exit = ablk_exit,
1065  .cra_u = {
1066  .ablkcipher = {
1067  .min_keysize = AES_MIN_KEY_SIZE,
1068  .max_keysize = AES_MAX_KEY_SIZE,
1069  .ivsize = AES_BLOCK_SIZE,
1070  .setkey = ablk_set_key,
1071  .encrypt = ablk_encrypt,
1072  .decrypt = ablk_decrypt,
1073  },
1074  },
1075 #ifdef CONFIG_X86_64
1076 }, {
1077  .cra_name = "__ctr-aes-aesni",
1078  .cra_driver_name = "__driver-ctr-aes-aesni",
1079  .cra_priority = 0,
1080  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1081  .cra_blocksize = 1,
1082  .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1083  AESNI_ALIGN - 1,
1084  .cra_alignmask = 0,
1085  .cra_type = &crypto_blkcipher_type,
1086  .cra_module = THIS_MODULE,
1087  .cra_u = {
1088  .blkcipher = {
1089  .min_keysize = AES_MIN_KEY_SIZE,
1090  .max_keysize = AES_MAX_KEY_SIZE,
1091  .ivsize = AES_BLOCK_SIZE,
1092  .setkey = aes_set_key,
1093  .encrypt = ctr_crypt,
1094  .decrypt = ctr_crypt,
1095  },
1096  },
1097 }, {
1098  .cra_name = "ctr(aes)",
1099  .cra_driver_name = "ctr-aes-aesni",
1100  .cra_priority = 400,
1102  .cra_blocksize = 1,
1103  .cra_ctxsize = sizeof(struct async_helper_ctx),
1104  .cra_alignmask = 0,
1105  .cra_type = &crypto_ablkcipher_type,
1106  .cra_module = THIS_MODULE,
1107  .cra_init = ablk_ctr_init,
1108  .cra_exit = ablk_exit,
1109  .cra_u = {
1110  .ablkcipher = {
1111  .min_keysize = AES_MIN_KEY_SIZE,
1112  .max_keysize = AES_MAX_KEY_SIZE,
1113  .ivsize = AES_BLOCK_SIZE,
1114  .setkey = ablk_set_key,
1115  .encrypt = ablk_encrypt,
1116  .decrypt = ablk_encrypt,
1117  .geniv = "chainiv",
1118  },
1119  },
1120 }, {
1121  .cra_name = "__gcm-aes-aesni",
1122  .cra_driver_name = "__driver-gcm-aes-aesni",
1123  .cra_priority = 0,
1124  .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1125  .cra_blocksize = 1,
1126  .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1127  AESNI_ALIGN,
1128  .cra_alignmask = 0,
1129  .cra_type = &crypto_aead_type,
1130  .cra_module = THIS_MODULE,
1131  .cra_u = {
1132  .aead = {
1133  .encrypt = __driver_rfc4106_encrypt,
1134  .decrypt = __driver_rfc4106_decrypt,
1135  },
1136  },
1137 }, {
1138  .cra_name = "rfc4106(gcm(aes))",
1139  .cra_driver_name = "rfc4106-gcm-aesni",
1140  .cra_priority = 400,
1141  .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1142  .cra_blocksize = 1,
1143  .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1144  AESNI_ALIGN,
1145  .cra_alignmask = 0,
1146  .cra_type = &crypto_nivaead_type,
1147  .cra_module = THIS_MODULE,
1148  .cra_init = rfc4106_init,
1149  .cra_exit = rfc4106_exit,
1150  .cra_u = {
1151  .aead = {
1152  .setkey = rfc4106_set_key,
1153  .setauthsize = rfc4106_set_authsize,
1154  .encrypt = rfc4106_encrypt,
1155  .decrypt = rfc4106_decrypt,
1156  .geniv = "seqiv",
1157  .ivsize = 8,
1158  .maxauthsize = 16,
1159  },
1160  },
1161 #ifdef HAS_CTR
1162 }, {
1163  .cra_name = "rfc3686(ctr(aes))",
1164  .cra_driver_name = "rfc3686-ctr-aes-aesni",
1165  .cra_priority = 400,
1167  .cra_blocksize = 1,
1168  .cra_ctxsize = sizeof(struct async_helper_ctx),
1169  .cra_alignmask = 0,
1170  .cra_type = &crypto_ablkcipher_type,
1171  .cra_module = THIS_MODULE,
1172  .cra_init = ablk_rfc3686_ctr_init,
1173  .cra_exit = ablk_exit,
1174  .cra_u = {
1175  .ablkcipher = {
1176  .min_keysize = AES_MIN_KEY_SIZE +
1178  .max_keysize = AES_MAX_KEY_SIZE +
1180  .ivsize = CTR_RFC3686_IV_SIZE,
1181  .setkey = ablk_set_key,
1182  .encrypt = ablk_encrypt,
1183  .decrypt = ablk_decrypt,
1184  .geniv = "seqiv",
1185  },
1186  },
1187 #endif
1188 #endif
1189 #ifdef HAS_PCBC
1190 }, {
1191  .cra_name = "pcbc(aes)",
1192  .cra_driver_name = "pcbc-aes-aesni",
1193  .cra_priority = 400,
1195  .cra_blocksize = AES_BLOCK_SIZE,
1196  .cra_ctxsize = sizeof(struct async_helper_ctx),
1197  .cra_alignmask = 0,
1198  .cra_type = &crypto_ablkcipher_type,
1199  .cra_module = THIS_MODULE,
1200  .cra_init = ablk_pcbc_init,
1201  .cra_exit = ablk_exit,
1202  .cra_u = {
1203  .ablkcipher = {
1204  .min_keysize = AES_MIN_KEY_SIZE,
1205  .max_keysize = AES_MAX_KEY_SIZE,
1206  .ivsize = AES_BLOCK_SIZE,
1207  .setkey = ablk_set_key,
1208  .encrypt = ablk_encrypt,
1209  .decrypt = ablk_decrypt,
1210  },
1211  },
1212 #endif
1213 }, {
1214  .cra_name = "__lrw-aes-aesni",
1215  .cra_driver_name = "__driver-lrw-aes-aesni",
1216  .cra_priority = 0,
1217  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1218  .cra_blocksize = AES_BLOCK_SIZE,
1219  .cra_ctxsize = sizeof(struct aesni_lrw_ctx),
1220  .cra_alignmask = 0,
1221  .cra_type = &crypto_blkcipher_type,
1222  .cra_module = THIS_MODULE,
1223  .cra_exit = lrw_aesni_exit_tfm,
1224  .cra_u = {
1225  .blkcipher = {
1226  .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1227  .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1228  .ivsize = AES_BLOCK_SIZE,
1229  .setkey = lrw_aesni_setkey,
1230  .encrypt = lrw_encrypt,
1231  .decrypt = lrw_decrypt,
1232  },
1233  },
1234 }, {
1235  .cra_name = "__xts-aes-aesni",
1236  .cra_driver_name = "__driver-xts-aes-aesni",
1237  .cra_priority = 0,
1238  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1239  .cra_blocksize = AES_BLOCK_SIZE,
1240  .cra_ctxsize = sizeof(struct aesni_xts_ctx),
1241  .cra_alignmask = 0,
1242  .cra_type = &crypto_blkcipher_type,
1243  .cra_module = THIS_MODULE,
1244  .cra_u = {
1245  .blkcipher = {
1246  .min_keysize = 2 * AES_MIN_KEY_SIZE,
1247  .max_keysize = 2 * AES_MAX_KEY_SIZE,
1248  .ivsize = AES_BLOCK_SIZE,
1249  .setkey = xts_aesni_setkey,
1250  .encrypt = xts_encrypt,
1251  .decrypt = xts_decrypt,
1252  },
1253  },
1254 }, {
1255  .cra_name = "lrw(aes)",
1256  .cra_driver_name = "lrw-aes-aesni",
1257  .cra_priority = 400,
1259  .cra_blocksize = AES_BLOCK_SIZE,
1260  .cra_ctxsize = sizeof(struct async_helper_ctx),
1261  .cra_alignmask = 0,
1262  .cra_type = &crypto_ablkcipher_type,
1263  .cra_module = THIS_MODULE,
1264  .cra_init = ablk_init,
1265  .cra_exit = ablk_exit,
1266  .cra_u = {
1267  .ablkcipher = {
1268  .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1269  .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1270  .ivsize = AES_BLOCK_SIZE,
1271  .setkey = ablk_set_key,
1272  .encrypt = ablk_encrypt,
1273  .decrypt = ablk_decrypt,
1274  },
1275  },
1276 }, {
1277  .cra_name = "xts(aes)",
1278  .cra_driver_name = "xts-aes-aesni",
1279  .cra_priority = 400,
1281  .cra_blocksize = AES_BLOCK_SIZE,
1282  .cra_ctxsize = sizeof(struct async_helper_ctx),
1283  .cra_alignmask = 0,
1284  .cra_type = &crypto_ablkcipher_type,
1285  .cra_module = THIS_MODULE,
1286  .cra_init = ablk_init,
1287  .cra_exit = ablk_exit,
1288  .cra_u = {
1289  .ablkcipher = {
1290  .min_keysize = 2 * AES_MIN_KEY_SIZE,
1291  .max_keysize = 2 * AES_MAX_KEY_SIZE,
1292  .ivsize = AES_BLOCK_SIZE,
1293  .setkey = ablk_set_key,
1294  .encrypt = ablk_encrypt,
1295  .decrypt = ablk_decrypt,
1296  },
1297  },
1298 } };
1299 
1300 
1301 static const struct x86_cpu_id aesni_cpu_id[] = {
1303  {}
1304 };
1305 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1306 
1307 static int __init aesni_init(void)
1308 {
1309  int err;
1310 
1311  if (!x86_match_cpu(aesni_cpu_id))
1312  return -ENODEV;
1313 
1314  err = crypto_fpu_init();
1315  if (err)
1316  return err;
1317 
1318  return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1319 }
1320 
1321 static void __exit aesni_exit(void)
1322 {
1323  crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1324 
1325  crypto_fpu_exit();
1326 }
1327 
1328 module_init(aesni_init);
1329 module_exit(aesni_exit);
1330 
1331 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1332 MODULE_LICENSE("GPL");
1333 MODULE_ALIAS("aes");