Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cast6_avx_glue.c
Go to the documentation of this file.
1 /*
2  * Glue Code for the AVX assembler implemention of the Cast6 Cipher
3  *
4  * Copyright (C) 2012 Johannes Goetzfried
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20  * USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/hardirq.h>
26 #include <linux/types.h>
27 #include <linux/crypto.h>
28 #include <linux/err.h>
29 #include <crypto/algapi.h>
30 #include <crypto/cast6.h>
31 #include <crypto/cryptd.h>
32 #include <crypto/b128ops.h>
33 #include <crypto/ctr.h>
34 #include <crypto/lrw.h>
35 #include <crypto/xts.h>
36 #include <asm/xcr.h>
37 #include <asm/xsave.h>
38 #include <asm/crypto/ablk_helper.h>
39 #include <asm/crypto/glue_helper.h>
40 
41 #define CAST6_PARALLEL_BLOCKS 8
42 
44  const u8 *src, bool xor);
46  const u8 *src);
47 
48 static inline void cast6_enc_blk_xway(struct cast6_ctx *ctx, u8 *dst,
49  const u8 *src)
50 {
51  __cast6_enc_blk_8way(ctx, dst, src, false);
52 }
53 
54 static inline void cast6_enc_blk_xway_xor(struct cast6_ctx *ctx, u8 *dst,
55  const u8 *src)
56 {
57  __cast6_enc_blk_8way(ctx, dst, src, true);
58 }
59 
60 static inline void cast6_dec_blk_xway(struct cast6_ctx *ctx, u8 *dst,
61  const u8 *src)
62 {
63  cast6_dec_blk_8way(ctx, dst, src);
64 }
65 
66 
67 static void cast6_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
68 {
69  u128 ivs[CAST6_PARALLEL_BLOCKS - 1];
70  unsigned int j;
71 
72  for (j = 0; j < CAST6_PARALLEL_BLOCKS - 1; j++)
73  ivs[j] = src[j];
74 
75  cast6_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
76 
77  for (j = 0; j < CAST6_PARALLEL_BLOCKS - 1; j++)
78  u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
79 }
80 
81 static void cast6_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
82 {
83  be128 ctrblk;
84 
85  u128_to_be128(&ctrblk, iv);
86  u128_inc(iv);
87 
88  __cast6_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
89  u128_xor(dst, src, (u128 *)&ctrblk);
90 }
91 
92 static void cast6_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
93  u128 *iv)
94 {
96  unsigned int i;
97 
98  for (i = 0; i < CAST6_PARALLEL_BLOCKS; i++) {
99  if (dst != src)
100  dst[i] = src[i];
101 
102  u128_to_be128(&ctrblks[i], iv);
103  u128_inc(iv);
104  }
105 
106  cast6_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
107 }
108 
109 static const struct common_glue_ctx cast6_enc = {
110  .num_funcs = 2,
111  .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
112 
113  .funcs = { {
114  .num_blocks = CAST6_PARALLEL_BLOCKS,
115  .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_enc_blk_xway) }
116  }, {
117  .num_blocks = 1,
118  .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_encrypt) }
119  } }
120 };
121 
122 static const struct common_glue_ctx cast6_ctr = {
123  .num_funcs = 2,
124  .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
125 
126  .funcs = { {
127  .num_blocks = CAST6_PARALLEL_BLOCKS,
128  .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr_xway) }
129  }, {
130  .num_blocks = 1,
131  .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr) }
132  } }
133 };
134 
135 static const struct common_glue_ctx cast6_dec = {
136  .num_funcs = 2,
137  .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
138 
139  .funcs = { {
140  .num_blocks = CAST6_PARALLEL_BLOCKS,
141  .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_dec_blk_xway) }
142  }, {
143  .num_blocks = 1,
144  .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_decrypt) }
145  } }
146 };
147 
148 static const struct common_glue_ctx cast6_dec_cbc = {
149  .num_funcs = 2,
150  .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
151 
152  .funcs = { {
153  .num_blocks = CAST6_PARALLEL_BLOCKS,
154  .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(cast6_decrypt_cbc_xway) }
155  }, {
156  .num_blocks = 1,
157  .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__cast6_decrypt) }
158  } }
159 };
160 
161 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
162  struct scatterlist *src, unsigned int nbytes)
163 {
164  return glue_ecb_crypt_128bit(&cast6_enc, desc, dst, src, nbytes);
165 }
166 
167 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
168  struct scatterlist *src, unsigned int nbytes)
169 {
170  return glue_ecb_crypt_128bit(&cast6_dec, desc, dst, src, nbytes);
171 }
172 
173 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
174  struct scatterlist *src, unsigned int nbytes)
175 {
177  dst, src, nbytes);
178 }
179 
180 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
181  struct scatterlist *src, unsigned int nbytes)
182 {
183  return glue_cbc_decrypt_128bit(&cast6_dec_cbc, desc, dst, src,
184  nbytes);
185 }
186 
187 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
188  struct scatterlist *src, unsigned int nbytes)
189 {
190  return glue_ctr_crypt_128bit(&cast6_ctr, desc, dst, src, nbytes);
191 }
192 
193 static inline bool cast6_fpu_begin(bool fpu_enabled, unsigned int nbytes)
194 {
195  return glue_fpu_begin(CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS,
196  NULL, fpu_enabled, nbytes);
197 }
198 
199 static inline void cast6_fpu_end(bool fpu_enabled)
200 {
201  glue_fpu_end(fpu_enabled);
202 }
203 
204 struct crypt_priv {
205  struct cast6_ctx *ctx;
207 };
208 
209 static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
210 {
211  const unsigned int bsize = CAST6_BLOCK_SIZE;
212  struct crypt_priv *ctx = priv;
213  int i;
214 
215  ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
216 
217  if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
218  cast6_enc_blk_xway(ctx->ctx, srcdst, srcdst);
219  return;
220  }
221 
222  for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
223  __cast6_encrypt(ctx->ctx, srcdst, srcdst);
224 }
225 
226 static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
227 {
228  const unsigned int bsize = CAST6_BLOCK_SIZE;
229  struct crypt_priv *ctx = priv;
230  int i;
231 
232  ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
233 
234  if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
235  cast6_dec_blk_xway(ctx->ctx, srcdst, srcdst);
236  return;
237  }
238 
239  for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
240  __cast6_decrypt(ctx->ctx, srcdst, srcdst);
241 }
242 
246 };
247 
248 static int lrw_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
249  unsigned int keylen)
250 {
251  struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
252  int err;
253 
254  err = __cast6_setkey(&ctx->cast6_ctx, key, keylen - CAST6_BLOCK_SIZE,
255  &tfm->crt_flags);
256  if (err)
257  return err;
258 
259  return lrw_init_table(&ctx->lrw_table, key + keylen - CAST6_BLOCK_SIZE);
260 }
261 
262 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
263  struct scatterlist *src, unsigned int nbytes)
264 {
265  struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
267  struct crypt_priv crypt_ctx = {
268  .ctx = &ctx->cast6_ctx,
269  .fpu_enabled = false,
270  };
271  struct lrw_crypt_req req = {
272  .tbuf = buf,
273  .tbuflen = sizeof(buf),
274 
275  .table_ctx = &ctx->lrw_table,
276  .crypt_ctx = &crypt_ctx,
277  .crypt_fn = encrypt_callback,
278  };
279  int ret;
280 
282  ret = lrw_crypt(desc, dst, src, nbytes, &req);
283  cast6_fpu_end(crypt_ctx.fpu_enabled);
284 
285  return ret;
286 }
287 
288 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
289  struct scatterlist *src, unsigned int nbytes)
290 {
291  struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
293  struct crypt_priv crypt_ctx = {
294  .ctx = &ctx->cast6_ctx,
295  .fpu_enabled = false,
296  };
297  struct lrw_crypt_req req = {
298  .tbuf = buf,
299  .tbuflen = sizeof(buf),
300 
301  .table_ctx = &ctx->lrw_table,
302  .crypt_ctx = &crypt_ctx,
303  .crypt_fn = decrypt_callback,
304  };
305  int ret;
306 
308  ret = lrw_crypt(desc, dst, src, nbytes, &req);
309  cast6_fpu_end(crypt_ctx.fpu_enabled);
310 
311  return ret;
312 }
313 
314 static void lrw_exit_tfm(struct crypto_tfm *tfm)
315 {
316  struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
317 
318  lrw_free_table(&ctx->lrw_table);
319 }
320 
324 };
325 
326 static int xts_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
327  unsigned int keylen)
328 {
329  struct cast6_xts_ctx *ctx = crypto_tfm_ctx(tfm);
330  u32 *flags = &tfm->crt_flags;
331  int err;
332 
333  /* key consists of keys of equal size concatenated, therefore
334  * the length must be even
335  */
336  if (keylen % 2) {
337  *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
338  return -EINVAL;
339  }
340 
341  /* first half of xts-key is for crypt */
342  err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
343  if (err)
344  return err;
345 
346  /* second half of xts-key is for tweak */
347  return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
348  flags);
349 }
350 
351 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
352  struct scatterlist *src, unsigned int nbytes)
353 {
354  struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
356  struct crypt_priv crypt_ctx = {
357  .ctx = &ctx->crypt_ctx,
358  .fpu_enabled = false,
359  };
360  struct xts_crypt_req req = {
361  .tbuf = buf,
362  .tbuflen = sizeof(buf),
363 
364  .tweak_ctx = &ctx->tweak_ctx,
365  .tweak_fn = XTS_TWEAK_CAST(__cast6_encrypt),
366  .crypt_ctx = &crypt_ctx,
367  .crypt_fn = encrypt_callback,
368  };
369  int ret;
370 
372  ret = xts_crypt(desc, dst, src, nbytes, &req);
373  cast6_fpu_end(crypt_ctx.fpu_enabled);
374 
375  return ret;
376 }
377 
378 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
379  struct scatterlist *src, unsigned int nbytes)
380 {
381  struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
383  struct crypt_priv crypt_ctx = {
384  .ctx = &ctx->crypt_ctx,
385  .fpu_enabled = false,
386  };
387  struct xts_crypt_req req = {
388  .tbuf = buf,
389  .tbuflen = sizeof(buf),
390 
391  .tweak_ctx = &ctx->tweak_ctx,
392  .tweak_fn = XTS_TWEAK_CAST(__cast6_encrypt),
393  .crypt_ctx = &crypt_ctx,
394  .crypt_fn = decrypt_callback,
395  };
396  int ret;
397 
399  ret = xts_crypt(desc, dst, src, nbytes, &req);
400  cast6_fpu_end(crypt_ctx.fpu_enabled);
401 
402  return ret;
403 }
404 
405 static struct crypto_alg cast6_algs[10] = { {
406  .cra_name = "__ecb-cast6-avx",
407  .cra_driver_name = "__driver-ecb-cast6-avx",
408  .cra_priority = 0,
409  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
410  .cra_blocksize = CAST6_BLOCK_SIZE,
411  .cra_ctxsize = sizeof(struct cast6_ctx),
412  .cra_alignmask = 0,
413  .cra_type = &crypto_blkcipher_type,
414  .cra_module = THIS_MODULE,
415  .cra_u = {
416  .blkcipher = {
417  .min_keysize = CAST6_MIN_KEY_SIZE,
418  .max_keysize = CAST6_MAX_KEY_SIZE,
419  .setkey = cast6_setkey,
420  .encrypt = ecb_encrypt,
421  .decrypt = ecb_decrypt,
422  },
423  },
424 }, {
425  .cra_name = "__cbc-cast6-avx",
426  .cra_driver_name = "__driver-cbc-cast6-avx",
427  .cra_priority = 0,
428  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
429  .cra_blocksize = CAST6_BLOCK_SIZE,
430  .cra_ctxsize = sizeof(struct cast6_ctx),
431  .cra_alignmask = 0,
432  .cra_type = &crypto_blkcipher_type,
433  .cra_module = THIS_MODULE,
434  .cra_u = {
435  .blkcipher = {
436  .min_keysize = CAST6_MIN_KEY_SIZE,
437  .max_keysize = CAST6_MAX_KEY_SIZE,
438  .setkey = cast6_setkey,
439  .encrypt = cbc_encrypt,
440  .decrypt = cbc_decrypt,
441  },
442  },
443 }, {
444  .cra_name = "__ctr-cast6-avx",
445  .cra_driver_name = "__driver-ctr-cast6-avx",
446  .cra_priority = 0,
447  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
448  .cra_blocksize = 1,
449  .cra_ctxsize = sizeof(struct cast6_ctx),
450  .cra_alignmask = 0,
451  .cra_type = &crypto_blkcipher_type,
452  .cra_module = THIS_MODULE,
453  .cra_u = {
454  .blkcipher = {
455  .min_keysize = CAST6_MIN_KEY_SIZE,
456  .max_keysize = CAST6_MAX_KEY_SIZE,
457  .ivsize = CAST6_BLOCK_SIZE,
458  .setkey = cast6_setkey,
459  .encrypt = ctr_crypt,
460  .decrypt = ctr_crypt,
461  },
462  },
463 }, {
464  .cra_name = "__lrw-cast6-avx",
465  .cra_driver_name = "__driver-lrw-cast6-avx",
466  .cra_priority = 0,
467  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
468  .cra_blocksize = CAST6_BLOCK_SIZE,
469  .cra_ctxsize = sizeof(struct cast6_lrw_ctx),
470  .cra_alignmask = 0,
471  .cra_type = &crypto_blkcipher_type,
472  .cra_module = THIS_MODULE,
473  .cra_exit = lrw_exit_tfm,
474  .cra_u = {
475  .blkcipher = {
476  .min_keysize = CAST6_MIN_KEY_SIZE +
478  .max_keysize = CAST6_MAX_KEY_SIZE +
480  .ivsize = CAST6_BLOCK_SIZE,
481  .setkey = lrw_cast6_setkey,
482  .encrypt = lrw_encrypt,
483  .decrypt = lrw_decrypt,
484  },
485  },
486 }, {
487  .cra_name = "__xts-cast6-avx",
488  .cra_driver_name = "__driver-xts-cast6-avx",
489  .cra_priority = 0,
490  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
491  .cra_blocksize = CAST6_BLOCK_SIZE,
492  .cra_ctxsize = sizeof(struct cast6_xts_ctx),
493  .cra_alignmask = 0,
494  .cra_type = &crypto_blkcipher_type,
495  .cra_module = THIS_MODULE,
496  .cra_u = {
497  .blkcipher = {
498  .min_keysize = CAST6_MIN_KEY_SIZE * 2,
499  .max_keysize = CAST6_MAX_KEY_SIZE * 2,
500  .ivsize = CAST6_BLOCK_SIZE,
501  .setkey = xts_cast6_setkey,
502  .encrypt = xts_encrypt,
503  .decrypt = xts_decrypt,
504  },
505  },
506 }, {
507  .cra_name = "ecb(cast6)",
508  .cra_driver_name = "ecb-cast6-avx",
509  .cra_priority = 200,
511  .cra_blocksize = CAST6_BLOCK_SIZE,
512  .cra_ctxsize = sizeof(struct async_helper_ctx),
513  .cra_alignmask = 0,
514  .cra_type = &crypto_ablkcipher_type,
515  .cra_module = THIS_MODULE,
516  .cra_init = ablk_init,
517  .cra_exit = ablk_exit,
518  .cra_u = {
519  .ablkcipher = {
520  .min_keysize = CAST6_MIN_KEY_SIZE,
521  .max_keysize = CAST6_MAX_KEY_SIZE,
522  .setkey = ablk_set_key,
523  .encrypt = ablk_encrypt,
524  .decrypt = ablk_decrypt,
525  },
526  },
527 }, {
528  .cra_name = "cbc(cast6)",
529  .cra_driver_name = "cbc-cast6-avx",
530  .cra_priority = 200,
532  .cra_blocksize = CAST6_BLOCK_SIZE,
533  .cra_ctxsize = sizeof(struct async_helper_ctx),
534  .cra_alignmask = 0,
535  .cra_type = &crypto_ablkcipher_type,
536  .cra_module = THIS_MODULE,
537  .cra_init = ablk_init,
538  .cra_exit = ablk_exit,
539  .cra_u = {
540  .ablkcipher = {
541  .min_keysize = CAST6_MIN_KEY_SIZE,
542  .max_keysize = CAST6_MAX_KEY_SIZE,
543  .ivsize = CAST6_BLOCK_SIZE,
544  .setkey = ablk_set_key,
545  .encrypt = __ablk_encrypt,
546  .decrypt = ablk_decrypt,
547  },
548  },
549 }, {
550  .cra_name = "ctr(cast6)",
551  .cra_driver_name = "ctr-cast6-avx",
552  .cra_priority = 200,
554  .cra_blocksize = 1,
555  .cra_ctxsize = sizeof(struct async_helper_ctx),
556  .cra_alignmask = 0,
557  .cra_type = &crypto_ablkcipher_type,
558  .cra_module = THIS_MODULE,
559  .cra_init = ablk_init,
560  .cra_exit = ablk_exit,
561  .cra_u = {
562  .ablkcipher = {
563  .min_keysize = CAST6_MIN_KEY_SIZE,
564  .max_keysize = CAST6_MAX_KEY_SIZE,
565  .ivsize = CAST6_BLOCK_SIZE,
566  .setkey = ablk_set_key,
567  .encrypt = ablk_encrypt,
568  .decrypt = ablk_encrypt,
569  .geniv = "chainiv",
570  },
571  },
572 }, {
573  .cra_name = "lrw(cast6)",
574  .cra_driver_name = "lrw-cast6-avx",
575  .cra_priority = 200,
577  .cra_blocksize = CAST6_BLOCK_SIZE,
578  .cra_ctxsize = sizeof(struct async_helper_ctx),
579  .cra_alignmask = 0,
580  .cra_type = &crypto_ablkcipher_type,
581  .cra_module = THIS_MODULE,
582  .cra_init = ablk_init,
583  .cra_exit = ablk_exit,
584  .cra_u = {
585  .ablkcipher = {
586  .min_keysize = CAST6_MIN_KEY_SIZE +
588  .max_keysize = CAST6_MAX_KEY_SIZE +
590  .ivsize = CAST6_BLOCK_SIZE,
591  .setkey = ablk_set_key,
592  .encrypt = ablk_encrypt,
593  .decrypt = ablk_decrypt,
594  },
595  },
596 }, {
597  .cra_name = "xts(cast6)",
598  .cra_driver_name = "xts-cast6-avx",
599  .cra_priority = 200,
601  .cra_blocksize = CAST6_BLOCK_SIZE,
602  .cra_ctxsize = sizeof(struct async_helper_ctx),
603  .cra_alignmask = 0,
604  .cra_type = &crypto_ablkcipher_type,
605  .cra_module = THIS_MODULE,
606  .cra_init = ablk_init,
607  .cra_exit = ablk_exit,
608  .cra_u = {
609  .ablkcipher = {
610  .min_keysize = CAST6_MIN_KEY_SIZE * 2,
611  .max_keysize = CAST6_MAX_KEY_SIZE * 2,
612  .ivsize = CAST6_BLOCK_SIZE,
613  .setkey = ablk_set_key,
614  .encrypt = ablk_encrypt,
615  .decrypt = ablk_decrypt,
616  },
617  },
618 } };
619 
620 static int __init cast6_init(void)
621 {
622  u64 xcr0;
623 
624  if (!cpu_has_avx || !cpu_has_osxsave) {
625  pr_info("AVX instructions are not detected.\n");
626  return -ENODEV;
627  }
628 
629  xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
630  if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
631  pr_info("AVX detected but unusable.\n");
632  return -ENODEV;
633  }
634 
635  return crypto_register_algs(cast6_algs, ARRAY_SIZE(cast6_algs));
636 }
637 
638 static void __exit cast6_exit(void)
639 {
640  crypto_unregister_algs(cast6_algs, ARRAY_SIZE(cast6_algs));
641 }
642 
643 module_init(cast6_init);
644 module_exit(cast6_exit);
645 
646 MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized");
647 MODULE_LICENSE("GPL");
648 MODULE_ALIAS("cast6");