Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
serpent_sse2_glue.c
Go to the documentation of this file.
1 /*
2  * Glue Code for SSE2 assembler versions of Serpent Cipher
3  *
4  * Copyright (c) 2011 Jussi Kivilinna <[email protected]>
5  *
6  * Glue code based on aesni-intel_glue.c by:
7  * Copyright (C) 2008, Intel Corp.
8  * Author: Huang Ying <[email protected]>
9  *
10  * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
11  * Copyright (c) 2006 Herbert Xu <[email protected]>
12  * CTR part based on code (crypto/ctr.c) by:
13  * (C) Copyright IBM Corp. 2007 - Joy Latten <[email protected]>
14  *
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License as published by
17  * the Free Software Foundation; either version 2 of the License, or
18  * (at your option) any later version.
19  *
20  * This program is distributed in the hope that it will be useful,
21  * but WITHOUT ANY WARRANTY; without even the implied warranty of
22  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23  * GNU General Public License for more details.
24  *
25  * You should have received a copy of the GNU General Public License
26  * along with this program; if not, write to the Free Software
27  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28  * USA
29  *
30  */
31 
32 #include <linux/module.h>
33 #include <linux/hardirq.h>
34 #include <linux/types.h>
35 #include <linux/crypto.h>
36 #include <linux/err.h>
37 #include <crypto/algapi.h>
38 #include <crypto/serpent.h>
39 #include <crypto/cryptd.h>
40 #include <crypto/b128ops.h>
41 #include <crypto/ctr.h>
42 #include <crypto/lrw.h>
43 #include <crypto/xts.h>
45 #include <asm/crypto/ablk_helper.h>
46 #include <asm/crypto/glue_helper.h>
47 
48 static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
49 {
51  unsigned int j;
52 
53  for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
54  ivs[j] = src[j];
55 
56  serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
57 
58  for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
59  u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
60 }
61 
62 static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
63 {
64  be128 ctrblk;
65 
66  u128_to_be128(&ctrblk, iv);
67  u128_inc(iv);
68 
69  __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
70  u128_xor(dst, src, (u128 *)&ctrblk);
71 }
72 
73 static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
74  u128 *iv)
75 {
77  unsigned int i;
78 
79  for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
80  if (dst != src)
81  dst[i] = src[i];
82 
83  u128_to_be128(&ctrblks[i], iv);
84  u128_inc(iv);
85  }
86 
87  serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
88 }
89 
90 static const struct common_glue_ctx serpent_enc = {
91  .num_funcs = 2,
92  .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
93 
94  .funcs = { {
95  .num_blocks = SERPENT_PARALLEL_BLOCKS,
96  .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
97  }, {
98  .num_blocks = 1,
99  .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
100  } }
101 };
102 
103 static const struct common_glue_ctx serpent_ctr = {
104  .num_funcs = 2,
105  .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
106 
107  .funcs = { {
108  .num_blocks = SERPENT_PARALLEL_BLOCKS,
109  .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
110  }, {
111  .num_blocks = 1,
112  .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
113  } }
114 };
115 
116 static const struct common_glue_ctx serpent_dec = {
117  .num_funcs = 2,
118  .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
119 
120  .funcs = { {
121  .num_blocks = SERPENT_PARALLEL_BLOCKS,
122  .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
123  }, {
124  .num_blocks = 1,
125  .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
126  } }
127 };
128 
129 static const struct common_glue_ctx serpent_dec_cbc = {
130  .num_funcs = 2,
131  .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
132 
133  .funcs = { {
134  .num_blocks = SERPENT_PARALLEL_BLOCKS,
135  .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
136  }, {
137  .num_blocks = 1,
138  .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
139  } }
140 };
141 
142 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
143  struct scatterlist *src, unsigned int nbytes)
144 {
145  return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
146 }
147 
148 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
149  struct scatterlist *src, unsigned int nbytes)
150 {
151  return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
152 }
153 
154 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
155  struct scatterlist *src, unsigned int nbytes)
156 {
158  dst, src, nbytes);
159 }
160 
161 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
162  struct scatterlist *src, unsigned int nbytes)
163 {
164  return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
165  nbytes);
166 }
167 
168 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
169  struct scatterlist *src, unsigned int nbytes)
170 {
171  return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
172 }
173 
174 static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
175 {
176  return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
177  NULL, fpu_enabled, nbytes);
178 }
179 
180 static inline void serpent_fpu_end(bool fpu_enabled)
181 {
182  glue_fpu_end(fpu_enabled);
183 }
184 
185 struct crypt_priv {
186  struct serpent_ctx *ctx;
187  bool fpu_enabled;
188 };
189 
190 static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
191 {
192  const unsigned int bsize = SERPENT_BLOCK_SIZE;
193  struct crypt_priv *ctx = priv;
194  int i;
195 
196  ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
197 
198  if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
199  serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
200  return;
201  }
202 
203  for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
204  __serpent_encrypt(ctx->ctx, srcdst, srcdst);
205 }
206 
207 static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
208 {
209  const unsigned int bsize = SERPENT_BLOCK_SIZE;
210  struct crypt_priv *ctx = priv;
211  int i;
212 
213  ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
214 
215  if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
216  serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
217  return;
218  }
219 
220  for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
221  __serpent_decrypt(ctx->ctx, srcdst, srcdst);
222 }
223 
224 struct serpent_lrw_ctx {
225  struct lrw_table_ctx lrw_table;
226  struct serpent_ctx serpent_ctx;
227 };
228 
229 static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
230  unsigned int keylen)
231 {
232  struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
233  int err;
234 
235  err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
237  if (err)
238  return err;
239 
240  return lrw_init_table(&ctx->lrw_table, key + keylen -
242 }
243 
244 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
245  struct scatterlist *src, unsigned int nbytes)
246 {
247  struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
249  struct crypt_priv crypt_ctx = {
250  .ctx = &ctx->serpent_ctx,
251  .fpu_enabled = false,
252  };
253  struct lrw_crypt_req req = {
254  .tbuf = buf,
255  .tbuflen = sizeof(buf),
256 
257  .table_ctx = &ctx->lrw_table,
258  .crypt_ctx = &crypt_ctx,
259  .crypt_fn = encrypt_callback,
260  };
261  int ret;
262 
264  ret = lrw_crypt(desc, dst, src, nbytes, &req);
265  serpent_fpu_end(crypt_ctx.fpu_enabled);
266 
267  return ret;
268 }
269 
270 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
271  struct scatterlist *src, unsigned int nbytes)
272 {
273  struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
275  struct crypt_priv crypt_ctx = {
276  .ctx = &ctx->serpent_ctx,
277  .fpu_enabled = false,
278  };
279  struct lrw_crypt_req req = {
280  .tbuf = buf,
281  .tbuflen = sizeof(buf),
282 
283  .table_ctx = &ctx->lrw_table,
284  .crypt_ctx = &crypt_ctx,
285  .crypt_fn = decrypt_callback,
286  };
287  int ret;
288 
290  ret = lrw_crypt(desc, dst, src, nbytes, &req);
291  serpent_fpu_end(crypt_ctx.fpu_enabled);
292 
293  return ret;
294 }
295 
296 static void lrw_exit_tfm(struct crypto_tfm *tfm)
297 {
298  struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
299 
300  lrw_free_table(&ctx->lrw_table);
301 }
302 
303 struct serpent_xts_ctx {
304  struct serpent_ctx tweak_ctx;
305  struct serpent_ctx crypt_ctx;
306 };
307 
308 static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
309  unsigned int keylen)
310 {
311  struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
312  u32 *flags = &tfm->crt_flags;
313  int err;
314 
315  /* key consists of keys of equal size concatenated, therefore
316  * the length must be even
317  */
318  if (keylen % 2) {
319  *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
320  return -EINVAL;
321  }
322 
323  /* first half of xts-key is for crypt */
324  err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
325  if (err)
326  return err;
327 
328  /* second half of xts-key is for tweak */
329  return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
330 }
331 
332 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
333  struct scatterlist *src, unsigned int nbytes)
334 {
335  struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
337  struct crypt_priv crypt_ctx = {
338  .ctx = &ctx->crypt_ctx,
339  .fpu_enabled = false,
340  };
341  struct xts_crypt_req req = {
342  .tbuf = buf,
343  .tbuflen = sizeof(buf),
344 
345  .tweak_ctx = &ctx->tweak_ctx,
346  .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
347  .crypt_ctx = &crypt_ctx,
348  .crypt_fn = encrypt_callback,
349  };
350  int ret;
351 
353  ret = xts_crypt(desc, dst, src, nbytes, &req);
354  serpent_fpu_end(crypt_ctx.fpu_enabled);
355 
356  return ret;
357 }
358 
359 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
360  struct scatterlist *src, unsigned int nbytes)
361 {
362  struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
364  struct crypt_priv crypt_ctx = {
365  .ctx = &ctx->crypt_ctx,
366  .fpu_enabled = false,
367  };
368  struct xts_crypt_req req = {
369  .tbuf = buf,
370  .tbuflen = sizeof(buf),
371 
372  .tweak_ctx = &ctx->tweak_ctx,
373  .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
374  .crypt_ctx = &crypt_ctx,
375  .crypt_fn = decrypt_callback,
376  };
377  int ret;
378 
380  ret = xts_crypt(desc, dst, src, nbytes, &req);
381  serpent_fpu_end(crypt_ctx.fpu_enabled);
382 
383  return ret;
384 }
385 
386 static struct crypto_alg serpent_algs[10] = { {
387  .cra_name = "__ecb-serpent-sse2",
388  .cra_driver_name = "__driver-ecb-serpent-sse2",
389  .cra_priority = 0,
390  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
391  .cra_blocksize = SERPENT_BLOCK_SIZE,
392  .cra_ctxsize = sizeof(struct serpent_ctx),
393  .cra_alignmask = 0,
394  .cra_type = &crypto_blkcipher_type,
395  .cra_module = THIS_MODULE,
396  .cra_u = {
397  .blkcipher = {
398  .min_keysize = SERPENT_MIN_KEY_SIZE,
399  .max_keysize = SERPENT_MAX_KEY_SIZE,
400  .setkey = serpent_setkey,
401  .encrypt = ecb_encrypt,
402  .decrypt = ecb_decrypt,
403  },
404  },
405 }, {
406  .cra_name = "__cbc-serpent-sse2",
407  .cra_driver_name = "__driver-cbc-serpent-sse2",
408  .cra_priority = 0,
409  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
410  .cra_blocksize = SERPENT_BLOCK_SIZE,
411  .cra_ctxsize = sizeof(struct serpent_ctx),
412  .cra_alignmask = 0,
413  .cra_type = &crypto_blkcipher_type,
414  .cra_module = THIS_MODULE,
415  .cra_u = {
416  .blkcipher = {
417  .min_keysize = SERPENT_MIN_KEY_SIZE,
418  .max_keysize = SERPENT_MAX_KEY_SIZE,
419  .setkey = serpent_setkey,
420  .encrypt = cbc_encrypt,
421  .decrypt = cbc_decrypt,
422  },
423  },
424 }, {
425  .cra_name = "__ctr-serpent-sse2",
426  .cra_driver_name = "__driver-ctr-serpent-sse2",
427  .cra_priority = 0,
428  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
429  .cra_blocksize = 1,
430  .cra_ctxsize = sizeof(struct serpent_ctx),
431  .cra_alignmask = 0,
432  .cra_type = &crypto_blkcipher_type,
433  .cra_module = THIS_MODULE,
434  .cra_u = {
435  .blkcipher = {
436  .min_keysize = SERPENT_MIN_KEY_SIZE,
437  .max_keysize = SERPENT_MAX_KEY_SIZE,
438  .ivsize = SERPENT_BLOCK_SIZE,
439  .setkey = serpent_setkey,
440  .encrypt = ctr_crypt,
441  .decrypt = ctr_crypt,
442  },
443  },
444 }, {
445  .cra_name = "__lrw-serpent-sse2",
446  .cra_driver_name = "__driver-lrw-serpent-sse2",
447  .cra_priority = 0,
448  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
449  .cra_blocksize = SERPENT_BLOCK_SIZE,
450  .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
451  .cra_alignmask = 0,
452  .cra_type = &crypto_blkcipher_type,
453  .cra_module = THIS_MODULE,
454  .cra_exit = lrw_exit_tfm,
455  .cra_u = {
456  .blkcipher = {
457  .min_keysize = SERPENT_MIN_KEY_SIZE +
459  .max_keysize = SERPENT_MAX_KEY_SIZE +
461  .ivsize = SERPENT_BLOCK_SIZE,
462  .setkey = lrw_serpent_setkey,
463  .encrypt = lrw_encrypt,
464  .decrypt = lrw_decrypt,
465  },
466  },
467 }, {
468  .cra_name = "__xts-serpent-sse2",
469  .cra_driver_name = "__driver-xts-serpent-sse2",
470  .cra_priority = 0,
471  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
472  .cra_blocksize = SERPENT_BLOCK_SIZE,
473  .cra_ctxsize = sizeof(struct serpent_xts_ctx),
474  .cra_alignmask = 0,
475  .cra_type = &crypto_blkcipher_type,
476  .cra_module = THIS_MODULE,
477  .cra_u = {
478  .blkcipher = {
479  .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
480  .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
481  .ivsize = SERPENT_BLOCK_SIZE,
482  .setkey = xts_serpent_setkey,
483  .encrypt = xts_encrypt,
484  .decrypt = xts_decrypt,
485  },
486  },
487 }, {
488  .cra_name = "ecb(serpent)",
489  .cra_driver_name = "ecb-serpent-sse2",
490  .cra_priority = 400,
492  .cra_blocksize = SERPENT_BLOCK_SIZE,
493  .cra_ctxsize = sizeof(struct async_helper_ctx),
494  .cra_alignmask = 0,
495  .cra_type = &crypto_ablkcipher_type,
496  .cra_module = THIS_MODULE,
497  .cra_init = ablk_init,
498  .cra_exit = ablk_exit,
499  .cra_u = {
500  .ablkcipher = {
501  .min_keysize = SERPENT_MIN_KEY_SIZE,
502  .max_keysize = SERPENT_MAX_KEY_SIZE,
503  .setkey = ablk_set_key,
504  .encrypt = ablk_encrypt,
505  .decrypt = ablk_decrypt,
506  },
507  },
508 }, {
509  .cra_name = "cbc(serpent)",
510  .cra_driver_name = "cbc-serpent-sse2",
511  .cra_priority = 400,
513  .cra_blocksize = SERPENT_BLOCK_SIZE,
514  .cra_ctxsize = sizeof(struct async_helper_ctx),
515  .cra_alignmask = 0,
516  .cra_type = &crypto_ablkcipher_type,
517  .cra_module = THIS_MODULE,
518  .cra_init = ablk_init,
519  .cra_exit = ablk_exit,
520  .cra_u = {
521  .ablkcipher = {
522  .min_keysize = SERPENT_MIN_KEY_SIZE,
523  .max_keysize = SERPENT_MAX_KEY_SIZE,
524  .ivsize = SERPENT_BLOCK_SIZE,
525  .setkey = ablk_set_key,
526  .encrypt = __ablk_encrypt,
527  .decrypt = ablk_decrypt,
528  },
529  },
530 }, {
531  .cra_name = "ctr(serpent)",
532  .cra_driver_name = "ctr-serpent-sse2",
533  .cra_priority = 400,
535  .cra_blocksize = 1,
536  .cra_ctxsize = sizeof(struct async_helper_ctx),
537  .cra_alignmask = 0,
538  .cra_type = &crypto_ablkcipher_type,
539  .cra_module = THIS_MODULE,
540  .cra_init = ablk_init,
541  .cra_exit = ablk_exit,
542  .cra_u = {
543  .ablkcipher = {
544  .min_keysize = SERPENT_MIN_KEY_SIZE,
545  .max_keysize = SERPENT_MAX_KEY_SIZE,
546  .ivsize = SERPENT_BLOCK_SIZE,
547  .setkey = ablk_set_key,
548  .encrypt = ablk_encrypt,
549  .decrypt = ablk_encrypt,
550  .geniv = "chainiv",
551  },
552  },
553 }, {
554  .cra_name = "lrw(serpent)",
555  .cra_driver_name = "lrw-serpent-sse2",
556  .cra_priority = 400,
558  .cra_blocksize = SERPENT_BLOCK_SIZE,
559  .cra_ctxsize = sizeof(struct async_helper_ctx),
560  .cra_alignmask = 0,
561  .cra_type = &crypto_ablkcipher_type,
562  .cra_module = THIS_MODULE,
563  .cra_init = ablk_init,
564  .cra_exit = ablk_exit,
565  .cra_u = {
566  .ablkcipher = {
567  .min_keysize = SERPENT_MIN_KEY_SIZE +
569  .max_keysize = SERPENT_MAX_KEY_SIZE +
571  .ivsize = SERPENT_BLOCK_SIZE,
572  .setkey = ablk_set_key,
573  .encrypt = ablk_encrypt,
574  .decrypt = ablk_decrypt,
575  },
576  },
577 }, {
578  .cra_name = "xts(serpent)",
579  .cra_driver_name = "xts-serpent-sse2",
580  .cra_priority = 400,
582  .cra_blocksize = SERPENT_BLOCK_SIZE,
583  .cra_ctxsize = sizeof(struct async_helper_ctx),
584  .cra_alignmask = 0,
585  .cra_type = &crypto_ablkcipher_type,
586  .cra_module = THIS_MODULE,
587  .cra_init = ablk_init,
588  .cra_exit = ablk_exit,
589  .cra_u = {
590  .ablkcipher = {
591  .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
592  .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
593  .ivsize = SERPENT_BLOCK_SIZE,
594  .setkey = ablk_set_key,
595  .encrypt = ablk_encrypt,
596  .decrypt = ablk_decrypt,
597  },
598  },
599 } };
600 
601 static int __init serpent_sse2_init(void)
602 {
603  if (!cpu_has_xmm2) {
604  printk(KERN_INFO "SSE2 instructions are not detected.\n");
605  return -ENODEV;
606  }
607 
608  return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
609 }
610 
611 static void __exit serpent_sse2_exit(void)
612 {
613  crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
614 }
615 
616 module_init(serpent_sse2_init);
617 module_exit(serpent_sse2_exit);
618 
619 MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
620 MODULE_LICENSE("GPL");
621 MODULE_ALIAS("serpent");