Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cryptd.c
Go to the documentation of this file.
1 /*
2  * Software async crypto daemon.
3  *
4  * Copyright (c) 2006 Herbert Xu <[email protected]>
5  *
6  * Added AEAD support to cryptd.
7  * Authors: Tadeusz Struk ([email protected])
8  * Adrian Hoban <[email protected]>
9  * Gabriele Paoloni <[email protected]>
10  * Aidan O'Mahony ([email protected])
11  * Copyright (c) 2010, Intel Corporation.
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 2 of the License, or (at your option)
16  * any later version.
17  *
18  */
19 
20 #include <crypto/algapi.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/internal/aead.h>
23 #include <crypto/cryptd.h>
24 #include <crypto/crypto_wq.h>
25 #include <linux/err.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/scatterlist.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 
34 #define CRYPTD_MAX_CPU_QLEN 100
35 
38  struct work_struct work;
39 };
40 
41 struct cryptd_queue {
43 };
44 
48 };
49 
53 };
54 
58 };
59 
62 };
63 
66 };
67 
70 };
71 
74  struct shash_desc desc;
75 };
76 
78  struct crypto_aead *child;
79 };
80 
83 };
84 
85 static void cryptd_queue_worker(struct work_struct *work);
86 
87 static int cryptd_init_queue(struct cryptd_queue *queue,
88  unsigned int max_cpu_qlen)
89 {
90  int cpu;
91  struct cryptd_cpu_queue *cpu_queue;
92 
93  queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
94  if (!queue->cpu_queue)
95  return -ENOMEM;
97  cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
98  crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
99  INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
100  }
101  return 0;
102 }
103 
104 static void cryptd_fini_queue(struct cryptd_queue *queue)
105 {
106  int cpu;
107  struct cryptd_cpu_queue *cpu_queue;
108 
109  for_each_possible_cpu(cpu) {
110  cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111  BUG_ON(cpu_queue->queue.qlen);
112  }
113  free_percpu(queue->cpu_queue);
114 }
115 
116 static int cryptd_enqueue_request(struct cryptd_queue *queue,
118 {
119  int cpu, err;
120  struct cryptd_cpu_queue *cpu_queue;
121 
122  cpu = get_cpu();
123  cpu_queue = this_cpu_ptr(queue->cpu_queue);
124  err = crypto_enqueue_request(&cpu_queue->queue, request);
125  queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
126  put_cpu();
127 
128  return err;
129 }
130 
131 /* Called in workqueue context, do one real cryption work (via
132  * req->complete) and reschedule itself if there are more work to
133  * do. */
134 static void cryptd_queue_worker(struct work_struct *work)
135 {
136  struct cryptd_cpu_queue *cpu_queue;
137  struct crypto_async_request *req, *backlog;
138 
139  cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
140  /*
141  * Only handle one request at a time to avoid hogging crypto workqueue.
142  * preempt_disable/enable is used to prevent being preempted by
143  * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
144  * cryptd_enqueue_request() being accessed from software interrupts.
145  */
147  preempt_disable();
148  backlog = crypto_get_backlog(&cpu_queue->queue);
149  req = crypto_dequeue_request(&cpu_queue->queue);
150  preempt_enable();
151  local_bh_enable();
152 
153  if (!req)
154  return;
155 
156  if (backlog)
157  backlog->complete(backlog, -EINPROGRESS);
158  req->complete(req, 0);
159 
160  if (cpu_queue->queue.qlen)
161  queue_work(kcrypto_wq, &cpu_queue->work);
162 }
163 
164 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
165 {
166  struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
167  struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
168  return ictx->queue;
169 }
170 
171 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
172  const u8 *key, unsigned int keylen)
173 {
174  struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
175  struct crypto_blkcipher *child = ctx->child;
176  int err;
177 
178  crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
179  crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
181  err = crypto_blkcipher_setkey(child, key, keylen);
182  crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
184  return err;
185 }
186 
187 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
188  struct crypto_blkcipher *child,
189  int err,
190  int (*crypt)(struct blkcipher_desc *desc,
191  struct scatterlist *dst,
192  struct scatterlist *src,
193  unsigned int len))
194 {
195  struct cryptd_blkcipher_request_ctx *rctx;
196  struct blkcipher_desc desc;
197 
198  rctx = ablkcipher_request_ctx(req);
199 
200  if (unlikely(err == -EINPROGRESS))
201  goto out;
202 
203  desc.tfm = child;
204  desc.info = req->info;
206 
207  err = crypt(&desc, req->dst, req->src, req->nbytes);
208 
209  req->base.complete = rctx->complete;
210 
211 out:
213  rctx->complete(&req->base, err);
214  local_bh_enable();
215 }
216 
217 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
218 {
219  struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
220  struct crypto_blkcipher *child = ctx->child;
221 
222  cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
223  crypto_blkcipher_crt(child)->encrypt);
224 }
225 
226 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
227 {
228  struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
229  struct crypto_blkcipher *child = ctx->child;
230 
231  cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
232  crypto_blkcipher_crt(child)->decrypt);
233 }
234 
235 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
237 {
238  struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
239  struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
240  struct cryptd_queue *queue;
241 
242  queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
243  rctx->complete = req->base.complete;
244  req->base.complete = complete;
245 
246  return cryptd_enqueue_request(queue, &req->base);
247 }
248 
249 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
250 {
251  return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
252 }
253 
254 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
255 {
256  return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
257 }
258 
259 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
260 {
261  struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
262  struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
263  struct crypto_spawn *spawn = &ictx->spawn;
264  struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
265  struct crypto_blkcipher *cipher;
266 
267  cipher = crypto_spawn_blkcipher(spawn);
268  if (IS_ERR(cipher))
269  return PTR_ERR(cipher);
270 
271  ctx->child = cipher;
272  tfm->crt_ablkcipher.reqsize =
273  sizeof(struct cryptd_blkcipher_request_ctx);
274  return 0;
275 }
276 
277 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
278 {
279  struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
280 
281  crypto_free_blkcipher(ctx->child);
282 }
283 
284 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
285  unsigned int tail)
286 {
287  char *p;
288  struct crypto_instance *inst;
289  int err;
290 
291  p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
292  if (!p)
293  return ERR_PTR(-ENOMEM);
294 
295  inst = (void *)(p + head);
296 
297  err = -ENAMETOOLONG;
298  if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
299  "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
300  goto out_free_inst;
301 
302  memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
303 
304  inst->alg.cra_priority = alg->cra_priority + 50;
305  inst->alg.cra_blocksize = alg->cra_blocksize;
306  inst->alg.cra_alignmask = alg->cra_alignmask;
307 
308 out:
309  return p;
310 
311 out_free_inst:
312  kfree(p);
313  p = ERR_PTR(err);
314  goto out;
315 }
316 
317 static int cryptd_create_blkcipher(struct crypto_template *tmpl,
318  struct rtattr **tb,
319  struct cryptd_queue *queue)
320 {
321  struct cryptd_instance_ctx *ctx;
322  struct crypto_instance *inst;
323  struct crypto_alg *alg;
324  int err;
325 
326  alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
328  if (IS_ERR(alg))
329  return PTR_ERR(alg);
330 
331  inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
332  err = PTR_ERR(inst);
333  if (IS_ERR(inst))
334  goto out_put_alg;
335 
336  ctx = crypto_instance_ctx(inst);
337  ctx->queue = queue;
338 
339  err = crypto_init_spawn(&ctx->spawn, alg, inst,
341  if (err)
342  goto out_free_inst;
343 
344  inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
345  inst->alg.cra_type = &crypto_ablkcipher_type;
346 
347  inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
348  inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
349  inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
350 
351  inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
352 
353  inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
354 
355  inst->alg.cra_init = cryptd_blkcipher_init_tfm;
356  inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
357 
358  inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
359  inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
360  inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
361 
362  err = crypto_register_instance(tmpl, inst);
363  if (err) {
364  crypto_drop_spawn(&ctx->spawn);
365 out_free_inst:
366  kfree(inst);
367  }
368 
369 out_put_alg:
370  crypto_mod_put(alg);
371  return err;
372 }
373 
374 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
375 {
376  struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
377  struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
378  struct crypto_shash_spawn *spawn = &ictx->spawn;
379  struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
380  struct crypto_shash *hash;
381 
382  hash = crypto_spawn_shash(spawn);
383  if (IS_ERR(hash))
384  return PTR_ERR(hash);
385 
386  ctx->child = hash;
387  crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
388  sizeof(struct cryptd_hash_request_ctx) +
389  crypto_shash_descsize(hash));
390  return 0;
391 }
392 
393 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
394 {
395  struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
396 
397  crypto_free_shash(ctx->child);
398 }
399 
400 static int cryptd_hash_setkey(struct crypto_ahash *parent,
401  const u8 *key, unsigned int keylen)
402 {
403  struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
404  struct crypto_shash *child = ctx->child;
405  int err;
406 
407  crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
408  crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
410  err = crypto_shash_setkey(child, key, keylen);
411  crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
413  return err;
414 }
415 
416 static int cryptd_hash_enqueue(struct ahash_request *req,
417  crypto_completion_t complete)
418 {
419  struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
420  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
421  struct cryptd_queue *queue =
422  cryptd_get_queue(crypto_ahash_tfm(tfm));
423 
424  rctx->complete = req->base.complete;
425  req->base.complete = complete;
426 
427  return cryptd_enqueue_request(queue, &req->base);
428 }
429 
430 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
431 {
432  struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
433  struct crypto_shash *child = ctx->child;
434  struct ahash_request *req = ahash_request_cast(req_async);
435  struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
436  struct shash_desc *desc = &rctx->desc;
437 
438  if (unlikely(err == -EINPROGRESS))
439  goto out;
440 
441  desc->tfm = child;
443 
444  err = crypto_shash_init(desc);
445 
446  req->base.complete = rctx->complete;
447 
448 out:
450  rctx->complete(&req->base, err);
451  local_bh_enable();
452 }
453 
454 static int cryptd_hash_init_enqueue(struct ahash_request *req)
455 {
456  return cryptd_hash_enqueue(req, cryptd_hash_init);
457 }
458 
459 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
460 {
461  struct ahash_request *req = ahash_request_cast(req_async);
462  struct cryptd_hash_request_ctx *rctx;
463 
464  rctx = ahash_request_ctx(req);
465 
466  if (unlikely(err == -EINPROGRESS))
467  goto out;
468 
469  err = shash_ahash_update(req, &rctx->desc);
470 
471  req->base.complete = rctx->complete;
472 
473 out:
475  rctx->complete(&req->base, err);
476  local_bh_enable();
477 }
478 
479 static int cryptd_hash_update_enqueue(struct ahash_request *req)
480 {
481  return cryptd_hash_enqueue(req, cryptd_hash_update);
482 }
483 
484 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
485 {
486  struct ahash_request *req = ahash_request_cast(req_async);
487  struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
488 
489  if (unlikely(err == -EINPROGRESS))
490  goto out;
491 
492  err = crypto_shash_final(&rctx->desc, req->result);
493 
494  req->base.complete = rctx->complete;
495 
496 out:
498  rctx->complete(&req->base, err);
499  local_bh_enable();
500 }
501 
502 static int cryptd_hash_final_enqueue(struct ahash_request *req)
503 {
504  return cryptd_hash_enqueue(req, cryptd_hash_final);
505 }
506 
507 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
508 {
509  struct ahash_request *req = ahash_request_cast(req_async);
510  struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
511 
512  if (unlikely(err == -EINPROGRESS))
513  goto out;
514 
515  err = shash_ahash_finup(req, &rctx->desc);
516 
517  req->base.complete = rctx->complete;
518 
519 out:
521  rctx->complete(&req->base, err);
522  local_bh_enable();
523 }
524 
525 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
526 {
527  return cryptd_hash_enqueue(req, cryptd_hash_finup);
528 }
529 
530 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
531 {
532  struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
533  struct crypto_shash *child = ctx->child;
534  struct ahash_request *req = ahash_request_cast(req_async);
535  struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
536  struct shash_desc *desc = &rctx->desc;
537 
538  if (unlikely(err == -EINPROGRESS))
539  goto out;
540 
541  desc->tfm = child;
543 
544  err = shash_ahash_digest(req, desc);
545 
546  req->base.complete = rctx->complete;
547 
548 out:
550  rctx->complete(&req->base, err);
551  local_bh_enable();
552 }
553 
554 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
555 {
556  return cryptd_hash_enqueue(req, cryptd_hash_digest);
557 }
558 
559 static int cryptd_hash_export(struct ahash_request *req, void *out)
560 {
561  struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
562 
563  return crypto_shash_export(&rctx->desc, out);
564 }
565 
566 static int cryptd_hash_import(struct ahash_request *req, const void *in)
567 {
568  struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
569 
570  return crypto_shash_import(&rctx->desc, in);
571 }
572 
573 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
574  struct cryptd_queue *queue)
575 {
576  struct hashd_instance_ctx *ctx;
577  struct ahash_instance *inst;
578  struct shash_alg *salg;
579  struct crypto_alg *alg;
580  int err;
581 
582  salg = shash_attr_alg(tb[1], 0, 0);
583  if (IS_ERR(salg))
584  return PTR_ERR(salg);
585 
586  alg = &salg->base;
587  inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
588  sizeof(*ctx));
589  err = PTR_ERR(inst);
590  if (IS_ERR(inst))
591  goto out_put_alg;
592 
593  ctx = ahash_instance_ctx(inst);
594  ctx->queue = queue;
595 
596  err = crypto_init_shash_spawn(&ctx->spawn, salg,
597  ahash_crypto_instance(inst));
598  if (err)
599  goto out_free_inst;
600 
601  inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
602 
603  inst->alg.halg.digestsize = salg->digestsize;
604  inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
605 
606  inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
607  inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
608 
609  inst->alg.init = cryptd_hash_init_enqueue;
610  inst->alg.update = cryptd_hash_update_enqueue;
611  inst->alg.final = cryptd_hash_final_enqueue;
612  inst->alg.finup = cryptd_hash_finup_enqueue;
613  inst->alg.export = cryptd_hash_export;
614  inst->alg.import = cryptd_hash_import;
615  inst->alg.setkey = cryptd_hash_setkey;
616  inst->alg.digest = cryptd_hash_digest_enqueue;
617 
618  err = ahash_register_instance(tmpl, inst);
619  if (err) {
620  crypto_drop_shash(&ctx->spawn);
621 out_free_inst:
622  kfree(inst);
623  }
624 
625 out_put_alg:
626  crypto_mod_put(alg);
627  return err;
628 }
629 
630 static void cryptd_aead_crypt(struct aead_request *req,
631  struct crypto_aead *child,
632  int err,
633  int (*crypt)(struct aead_request *req))
634 {
635  struct cryptd_aead_request_ctx *rctx;
636  rctx = aead_request_ctx(req);
637 
638  if (unlikely(err == -EINPROGRESS))
639  goto out;
640  aead_request_set_tfm(req, child);
641  err = crypt( req );
642  req->base.complete = rctx->complete;
643 out:
645  rctx->complete(&req->base, err);
646  local_bh_enable();
647 }
648 
649 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
650 {
651  struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
652  struct crypto_aead *child = ctx->child;
653  struct aead_request *req;
654 
655  req = container_of(areq, struct aead_request, base);
656  cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
657 }
658 
659 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
660 {
661  struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
662  struct crypto_aead *child = ctx->child;
663  struct aead_request *req;
664 
665  req = container_of(areq, struct aead_request, base);
666  cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
667 }
668 
669 static int cryptd_aead_enqueue(struct aead_request *req,
670  crypto_completion_t complete)
671 {
672  struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
673  struct crypto_aead *tfm = crypto_aead_reqtfm(req);
674  struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
675 
676  rctx->complete = req->base.complete;
677  req->base.complete = complete;
678  return cryptd_enqueue_request(queue, &req->base);
679 }
680 
681 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
682 {
683  return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
684 }
685 
686 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
687 {
688  return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
689 }
690 
691 static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
692 {
693  struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
694  struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
695  struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
696  struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
697  struct crypto_aead *cipher;
698 
699  cipher = crypto_spawn_aead(spawn);
700  if (IS_ERR(cipher))
701  return PTR_ERR(cipher);
702 
703  crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
704  ctx->child = cipher;
705  tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
706  return 0;
707 }
708 
709 static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
710 {
711  struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
712  crypto_free_aead(ctx->child);
713 }
714 
715 static int cryptd_create_aead(struct crypto_template *tmpl,
716  struct rtattr **tb,
717  struct cryptd_queue *queue)
718 {
719  struct aead_instance_ctx *ctx;
720  struct crypto_instance *inst;
721  struct crypto_alg *alg;
722  int err;
723 
724  alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
726  if (IS_ERR(alg))
727  return PTR_ERR(alg);
728 
729  inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
730  err = PTR_ERR(inst);
731  if (IS_ERR(inst))
732  goto out_put_alg;
733 
734  ctx = crypto_instance_ctx(inst);
735  ctx->queue = queue;
736 
737  err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
739  if (err)
740  goto out_free_inst;
741 
742  inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
743  inst->alg.cra_type = alg->cra_type;
744  inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
745  inst->alg.cra_init = cryptd_aead_init_tfm;
746  inst->alg.cra_exit = cryptd_aead_exit_tfm;
747  inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
748  inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
749  inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
750  inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
751  inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
752  inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue;
753  inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue;
754  inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt;
755  inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt;
756 
757  err = crypto_register_instance(tmpl, inst);
758  if (err) {
759  crypto_drop_spawn(&ctx->aead_spawn.base);
760 out_free_inst:
761  kfree(inst);
762  }
763 out_put_alg:
764  crypto_mod_put(alg);
765  return err;
766 }
767 
768 static struct cryptd_queue queue;
769 
770 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
771 {
772  struct crypto_attr_type *algt;
773 
774  algt = crypto_get_attr_type(tb);
775  if (IS_ERR(algt))
776  return PTR_ERR(algt);
777 
778  switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
780  return cryptd_create_blkcipher(tmpl, tb, &queue);
782  return cryptd_create_hash(tmpl, tb, &queue);
784  return cryptd_create_aead(tmpl, tb, &queue);
785  }
786 
787  return -EINVAL;
788 }
789 
790 static void cryptd_free(struct crypto_instance *inst)
791 {
792  struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
793  struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
794  struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
795 
796  switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
798  crypto_drop_shash(&hctx->spawn);
799  kfree(ahash_instance(inst));
800  return;
802  crypto_drop_spawn(&aead_ctx->aead_spawn.base);
803  kfree(inst);
804  return;
805  default:
806  crypto_drop_spawn(&ctx->spawn);
807  kfree(inst);
808  }
809 }
810 
811 static struct crypto_template cryptd_tmpl = {
812  .name = "cryptd",
813  .create = cryptd_create,
814  .free = cryptd_free,
815  .module = THIS_MODULE,
816 };
817 
818 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
819  u32 type, u32 mask)
820 {
821  char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
822  struct crypto_tfm *tfm;
823 
824  if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
825  "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
826  return ERR_PTR(-EINVAL);
829  mask &= ~CRYPTO_ALG_TYPE_MASK;
831  tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
832  if (IS_ERR(tfm))
833  return ERR_CAST(tfm);
834  if (tfm->__crt_alg->cra_module != THIS_MODULE) {
835  crypto_free_tfm(tfm);
836  return ERR_PTR(-EINVAL);
837  }
838 
839  return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
840 }
842 
844 {
845  struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
846  return ctx->child;
847 }
849 
851 {
852  crypto_free_ablkcipher(&tfm->base);
853 }
855 
856 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
857  u32 type, u32 mask)
858 {
859  char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
860  struct crypto_ahash *tfm;
861 
862  if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
863  "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
864  return ERR_PTR(-EINVAL);
865  tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
866  if (IS_ERR(tfm))
867  return ERR_CAST(tfm);
868  if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
869  crypto_free_ahash(tfm);
870  return ERR_PTR(-EINVAL);
871  }
872 
873  return __cryptd_ahash_cast(tfm);
874 }
876 
878 {
879  struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
880 
881  return ctx->child;
882 }
884 
886 {
887  struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
888  return &rctx->desc;
889 }
891 
893 {
894  crypto_free_ahash(&tfm->base);
895 }
897 
898 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
899  u32 type, u32 mask)
900 {
901  char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
902  struct crypto_aead *tfm;
903 
904  if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
905  "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
906  return ERR_PTR(-EINVAL);
907  tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
908  if (IS_ERR(tfm))
909  return ERR_CAST(tfm);
910  if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
911  crypto_free_aead(tfm);
912  return ERR_PTR(-EINVAL);
913  }
914  return __cryptd_aead_cast(tfm);
915 }
917 
919 {
920  struct cryptd_aead_ctx *ctx;
921  ctx = crypto_aead_ctx(&tfm->base);
922  return ctx->child;
923 }
925 
926 void cryptd_free_aead(struct cryptd_aead *tfm)
927 {
928  crypto_free_aead(&tfm->base);
929 }
931 
932 static int __init cryptd_init(void)
933 {
934  int err;
935 
936  err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
937  if (err)
938  return err;
939 
940  err = crypto_register_template(&cryptd_tmpl);
941  if (err)
942  cryptd_fini_queue(&queue);
943 
944  return err;
945 }
946 
947 static void __exit cryptd_exit(void)
948 {
949  cryptd_fini_queue(&queue);
950  crypto_unregister_template(&cryptd_tmpl);
951 }
952 
953 subsys_initcall(cryptd_init);
954 module_exit(cryptd_exit);
955 
956 MODULE_LICENSE("GPL");
957 MODULE_DESCRIPTION("Software async crypto daemon");