Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sep_crypto.c
Go to the documentation of this file.
1 /*
2  *
3  * sep_crypto.c - Crypto interface structures
4  *
5  * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6  * Contributions(c) 2009-2010 Discretix. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; version 2 of the License.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc., 59
19  * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  *
21  * CONTACTS:
22  *
23  * Mark Allyn [email protected]
24  * Jayant Mangalampalli [email protected]
25  *
26  * CHANGES:
27  *
28  * 2009.06.26 Initial publish
29  * 2010.09.14 Upgrade to Medfield
30  * 2011.02.22 Enable Kernel Crypto
31  *
32  */
33 
34 /* #define DEBUG */
35 #include <linux/init.h>
36 #include <linux/module.h>
37 #include <linux/miscdevice.h>
38 #include <linux/fs.h>
39 #include <linux/cdev.h>
40 #include <linux/kdev_t.h>
41 #include <linux/mutex.h>
42 #include <linux/sched.h>
43 #include <linux/mm.h>
44 #include <linux/poll.h>
45 #include <linux/wait.h>
46 #include <linux/pci.h>
47 #include <linux/pm_runtime.h>
48 #include <linux/err.h>
49 #include <linux/device.h>
50 #include <linux/errno.h>
51 #include <linux/interrupt.h>
52 #include <linux/kernel.h>
53 #include <linux/clk.h>
54 #include <linux/irq.h>
55 #include <linux/io.h>
56 #include <linux/platform_device.h>
57 #include <linux/list.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/delay.h>
60 #include <linux/jiffies.h>
61 #include <linux/workqueue.h>
62 #include <linux/crypto.h>
63 #include <crypto/internal/hash.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/sha.h>
66 #include <crypto/md5.h>
67 #include <crypto/aes.h>
68 #include <crypto/des.h>
69 #include <crypto/hash.h>
70 #include "sep_driver_hw_defs.h"
71 #include "sep_driver_config.h"
72 #include "sep_driver_api.h"
73 #include "sep_dev.h"
74 #include "sep_crypto.h"
75 
76 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
77 
78 /* Globals for queuing */
79 static spinlock_t queue_lock;
80 static struct crypto_queue sep_queue;
81 
82 /* Declare of dequeuer */
83 static void sep_dequeuer(void *data);
84 
85 /* TESTING */
93 static void sep_do_callback(struct work_struct *work)
94 {
95  struct sep_work_struct *sep_work = container_of(work,
96  struct sep_work_struct, work);
97  if (sep_work != NULL) {
98  (sep_work->callback)(sep_work->data);
99  kfree(sep_work);
100  } else {
101  pr_debug("sep crypto: do callback - NULL container\n");
102  }
103 }
104 
115 static int sep_submit_work(struct workqueue_struct *work_queue,
116  void(*funct)(void *),
117  void *data)
118 {
119  struct sep_work_struct *sep_work;
120  int result;
121 
122  sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
123 
124  if (sep_work == NULL) {
125  pr_debug("sep crypto: cant allocate work structure\n");
126  return -ENOMEM;
127  }
128 
129  sep_work->callback = funct;
130  sep_work->data = data;
131  INIT_WORK(&sep_work->work, sep_do_callback);
132  result = queue_work(work_queue, &sep_work->work);
133  if (!result) {
134  pr_debug("sep_crypto: queue_work failed\n");
135  return -EINVAL;
136  }
137  return 0;
138 }
139 
149 static struct scatterlist *sep_alloc_sg_buf(
150  struct sep_device *sep,
151  size_t size,
152  size_t block_size)
153 {
154  u32 nbr_pages;
155  u32 ct1;
156  void *buf;
157  size_t current_size;
158  size_t real_page_size;
159 
160  struct scatterlist *sg, *sg_temp;
161 
162  if (size == 0)
163  return NULL;
164 
165  dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
166 
167  current_size = 0;
168  nbr_pages = 0;
169  real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
176  while (current_size < size) {
177  current_size += real_page_size;
178  nbr_pages += 1;
179  }
180 
181  sg = kmalloc((sizeof(struct scatterlist) * nbr_pages), GFP_ATOMIC);
182  if (!sg) {
183  dev_warn(&sep->pdev->dev, "Cannot allocate page for new sg\n");
184  return NULL;
185  }
186 
187  sg_init_table(sg, nbr_pages);
188 
189  current_size = 0;
190  sg_temp = sg;
191  for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
192  buf = (void *)get_zeroed_page(GFP_ATOMIC);
193  if (!buf) {
194  dev_warn(&sep->pdev->dev,
195  "Cannot allocate page for new buffer\n");
196  kfree(sg);
197  return NULL;
198  }
199 
200  sg_set_buf(sg_temp, buf, real_page_size);
201  if ((size - current_size) > real_page_size) {
202  sg_temp->length = real_page_size;
203  current_size += real_page_size;
204  } else {
205  sg_temp->length = (size - current_size);
206  current_size = size;
207  }
208  sg_temp = sg_next(sg);
209  }
210  return sg;
211 }
212 
217 static void sep_free_sg_buf(struct scatterlist *sg)
218 {
219  struct scatterlist *sg_temp = sg;
220  while (sg_temp) {
221  free_page((unsigned long)sg_virt(sg_temp));
222  sg_temp = sg_next(sg_temp);
223  }
224  kfree(sg);
225 }
226 
237 static void sep_copy_sg(
238  struct sep_device *sep,
239  struct scatterlist *sg_src,
240  struct scatterlist *sg_dst,
241  size_t size)
242 {
243  u32 seg_size;
244  u32 in_offset, out_offset;
245 
246  u32 count = 0;
247  struct scatterlist *sg_src_tmp = sg_src;
248  struct scatterlist *sg_dst_tmp = sg_dst;
249  in_offset = 0;
250  out_offset = 0;
251 
252  dev_dbg(&sep->pdev->dev, "sep copy sg\n");
253 
254  if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
255  return;
256 
257  dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
258 
259  while (count < size) {
260  if ((sg_src_tmp->length - in_offset) >
261  (sg_dst_tmp->length - out_offset))
262  seg_size = sg_dst_tmp->length - out_offset;
263  else
264  seg_size = sg_src_tmp->length - in_offset;
265 
266  if (seg_size > (size - count))
267  seg_size = (size = count);
268 
269  memcpy(sg_virt(sg_dst_tmp) + out_offset,
270  sg_virt(sg_src_tmp) + in_offset,
271  seg_size);
272 
273  in_offset += seg_size;
274  out_offset += seg_size;
275  count += seg_size;
276 
277  if (in_offset >= sg_src_tmp->length) {
278  sg_src_tmp = sg_next(sg_src_tmp);
279  in_offset = 0;
280  }
281 
282  if (out_offset >= sg_dst_tmp->length) {
283  sg_dst_tmp = sg_next(sg_dst_tmp);
284  out_offset = 0;
285  }
286  }
287 }
288 
311 static int sep_oddball_pages(
312  struct sep_device *sep,
313  struct scatterlist *sg,
314  size_t data_size,
315  u32 block_size,
316  struct scatterlist **new_sg,
317  u32 do_copy)
318 {
319  struct scatterlist *sg_temp;
320  u32 flag;
321  u32 nbr_pages, page_count;
322 
323  dev_dbg(&sep->pdev->dev, "sep oddball\n");
324  if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
325  return 0;
326 
327  dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
328  flag = 0;
329  nbr_pages = 0;
330  page_count = 0;
331  sg_temp = sg;
332 
333  while (sg_temp) {
334  nbr_pages += 1;
335  sg_temp = sg_next(sg_temp);
336  }
337 
338  sg_temp = sg;
339  while ((sg_temp) && (flag == 0)) {
340  page_count += 1;
341  if (sg_temp->length % block_size)
342  flag = 1;
343  else
344  sg_temp = sg_next(sg_temp);
345  }
346 
347  /* Do not process if last (or only) page is oddball */
348  if (nbr_pages == page_count)
349  flag = 0;
350 
351  if (flag) {
352  dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
353  *new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
354  if (*new_sg == NULL) {
355  dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
356  return -ENOMEM;
357  }
358 
359  if (do_copy)
360  sep_copy_sg(sep, sg, *new_sg, data_size);
361 
362  return 1;
363  } else {
364  return 0;
365  }
366 }
367 
381 static size_t sep_copy_offset_sg(
382  struct sep_device *sep,
383  struct scatterlist *sg,
384  u32 offset,
385  void *dst,
386  u32 len)
387 {
388  size_t page_start;
389  size_t page_end;
390  size_t offset_within_page;
391  size_t length_within_page;
392  size_t length_remaining;
393  size_t current_offset;
394 
395  /* Find which page is beginning of segment */
396  page_start = 0;
397  page_end = sg->length;
398  while ((sg) && (offset > page_end)) {
399  page_start += sg->length;
400  sg = sg_next(sg);
401  if (sg)
402  page_end += sg->length;
403  }
404 
405  if (sg == NULL)
406  return -ENOMEM;
407 
408  offset_within_page = offset - page_start;
409  if ((sg->length - offset_within_page) >= len) {
410  /* All within this page */
411  memcpy(dst, sg_virt(sg) + offset_within_page, len);
412  return len;
413  } else {
414  /* Scattered multiple pages */
415  current_offset = 0;
416  length_remaining = len;
417  while ((sg) && (current_offset < len)) {
418  length_within_page = sg->length - offset_within_page;
419  if (length_within_page >= length_remaining) {
420  memcpy(dst+current_offset,
421  sg_virt(sg) + offset_within_page,
422  length_remaining);
423  length_remaining = 0;
424  current_offset = len;
425  } else {
426  memcpy(dst+current_offset,
427  sg_virt(sg) + offset_within_page,
428  length_within_page);
429  length_remaining -= length_within_page;
430  current_offset += length_within_page;
431  offset_within_page = 0;
432  sg = sg_next(sg);
433  }
434  }
435 
436  if (sg == NULL)
437  return -ENOMEM;
438  }
439  return len;
440 }
441 
451 static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
452 {
453  /* Check for partial overlap */
454  if (src_ptr != dst_ptr) {
455  if (src_ptr < dst_ptr) {
456  if ((src_ptr + nbytes) > dst_ptr)
457  return -EINVAL;
458  } else {
459  if ((dst_ptr + nbytes) > src_ptr)
460  return -EINVAL;
461  }
462  }
463 
464  return 0;
465 }
466 
467 /* Debug - prints only if DEBUG is defined */
468 static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
469 
470  {
471  unsigned char *cptr;
472  struct sep_aes_internal_context *aes_internal;
473  struct sep_des_internal_context *des_internal;
474  int ct1;
475 
476  struct this_task_ctx *ta_ctx;
477  struct crypto_ablkcipher *tfm;
478  struct sep_system_ctx *sctx;
479 
480  ta_ctx = ablkcipher_request_ctx(req);
481  tfm = crypto_ablkcipher_reqtfm(req);
482  sctx = crypto_ablkcipher_ctx(tfm);
483 
484  dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
485  if ((ta_ctx->current_request == DES_CBC) &&
486  (ta_ctx->des_opmode == SEP_DES_CBC)) {
487 
488  des_internal = (struct sep_des_internal_context *)
489  sctx->des_private_ctx.ctx_buf;
490  /* print vendor */
491  dev_dbg(&ta_ctx->sep_used->pdev->dev,
492  "sep - vendor iv for DES\n");
493  cptr = (unsigned char *)des_internal->iv_context;
494  for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
495  dev_dbg(&ta_ctx->sep_used->pdev->dev,
496  "%02x\n", *(cptr + ct1));
497 
498  /* print walk */
499  dev_dbg(&ta_ctx->sep_used->pdev->dev,
500  "sep - walk from kernel crypto iv for DES\n");
501  cptr = (unsigned char *)ta_ctx->walk.iv;
502  for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
503  dev_dbg(&ta_ctx->sep_used->pdev->dev,
504  "%02x\n", *(cptr + ct1));
505  } else if ((ta_ctx->current_request == AES_CBC) &&
506  (ta_ctx->aes_opmode == SEP_AES_CBC)) {
507 
508  aes_internal = (struct sep_aes_internal_context *)
509  sctx->aes_private_ctx.cbuff;
510  /* print vendor */
511  dev_dbg(&ta_ctx->sep_used->pdev->dev,
512  "sep - vendor iv for AES\n");
513  cptr = (unsigned char *)aes_internal->aes_ctx_iv;
514  for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
515  dev_dbg(&ta_ctx->sep_used->pdev->dev,
516  "%02x\n", *(cptr + ct1));
517 
518  /* print walk */
519  dev_dbg(&ta_ctx->sep_used->pdev->dev,
520  "sep - walk from kernel crypto iv for AES\n");
521  cptr = (unsigned char *)ta_ctx->walk.iv;
522  for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
523  dev_dbg(&ta_ctx->sep_used->pdev->dev,
524  "%02x\n", *(cptr + ct1));
525  }
526 }
527 
532 static int sep_weak_key(const u8 *key, unsigned int keylen)
533 {
534  static const u8 parity[] = {
535  8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
536  0, 8, 8, 0, 8, 0, 0, 8, 8,
537  0, 0, 8, 0, 8, 8, 3,
538  0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
539  8, 0, 0, 8, 0, 8, 8, 0, 0,
540  8, 8, 0, 8, 0, 0, 8,
541  0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
542  8, 0, 0, 8, 0, 8, 8, 0, 0,
543  8, 8, 0, 8, 0, 0, 8,
544  8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
545  0, 8, 8, 0, 8, 0, 0, 8, 8,
546  0, 0, 8, 0, 8, 8, 0,
547  0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
548  8, 0, 0, 8, 0, 8, 8, 0, 0,
549  8, 8, 0, 8, 0, 0, 8,
550  8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
551  0, 8, 8, 0, 8, 0, 0, 8, 8,
552  0, 0, 8, 0, 8, 8, 0,
553  8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
554  0, 8, 8, 0, 8, 0, 0, 8, 8,
555  0, 0, 8, 0, 8, 8, 0,
556  4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
557  8, 5, 0, 8, 0, 8, 8, 0, 0,
558  8, 8, 0, 8, 0, 6, 8,
559  };
560 
561  u32 n, w;
562 
563  n = parity[key[0]]; n <<= 4;
564  n |= parity[key[1]]; n <<= 4;
565  n |= parity[key[2]]; n <<= 4;
566  n |= parity[key[3]]; n <<= 4;
567  n |= parity[key[4]]; n <<= 4;
568  n |= parity[key[5]]; n <<= 4;
569  n |= parity[key[6]]; n <<= 4;
570  n |= parity[key[7]];
571  w = 0x88888888L;
572 
573  /* 1 in 10^10 keys passes this test */
574  if (!((n - (w >> 3)) & w)) {
575  if (n < 0x41415151) {
576  if (n < 0x31312121) {
577  if (n < 0x14141515) {
578  /* 01 01 01 01 01 01 01 01 */
579  if (n == 0x11111111)
580  goto weak;
581  /* 01 1F 01 1F 01 0E 01 0E */
582  if (n == 0x13131212)
583  goto weak;
584  } else {
585  /* 01 E0 01 E0 01 F1 01 F1 */
586  if (n == 0x14141515)
587  goto weak;
588  /* 01 FE 01 FE 01 FE 01 FE */
589  if (n == 0x16161616)
590  goto weak;
591  }
592  } else {
593  if (n < 0x34342525) {
594  /* 1F 01 1F 01 0E 01 0E 01 */
595  if (n == 0x31312121)
596  goto weak;
597  /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
598  if (n == 0x33332222)
599  goto weak;
600  } else {
601  /* 1F E0 1F E0 0E F1 0E F1 */
602  if (n == 0x34342525)
603  goto weak;
604  /* 1F FE 1F FE 0E FE 0E FE */
605  if (n == 0x36362626)
606  goto weak;
607  }
608  }
609  } else {
610  if (n < 0x61616161) {
611  if (n < 0x44445555) {
612  /* E0 01 E0 01 F1 01 F1 01 */
613  if (n == 0x41415151)
614  goto weak;
615  /* E0 1F E0 1F F1 0E F1 0E */
616  if (n == 0x43435252)
617  goto weak;
618  } else {
619  /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
620  if (n == 0x44445555)
621  goto weak;
622  /* E0 FE E0 FE F1 FE F1 FE */
623  if (n == 0x46465656)
624  goto weak;
625  }
626  } else {
627  if (n < 0x64646565) {
628  /* FE 01 FE 01 FE 01 FE 01 */
629  if (n == 0x61616161)
630  goto weak;
631  /* FE 1F FE 1F FE 0E FE 0E */
632  if (n == 0x63636262)
633  goto weak;
634  } else {
635  /* FE E0 FE E0 FE F1 FE F1 */
636  if (n == 0x64646565)
637  goto weak;
638  /* FE FE FE FE FE FE FE FE */
639  if (n == 0x66666666)
640  goto weak;
641  }
642  }
643  }
644  }
645  return 0;
646 weak:
647  return 1;
648 }
652 static u32 sep_sg_nents(struct scatterlist *sg)
653 {
654  u32 ct1 = 0;
655  while (sg) {
656  ct1 += 1;
657  sg = sg_next(sg);
658  }
659 
660  return ct1;
661 }
662 
669 static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
670 {
671  u32 *word_ptr;
672  ta_ctx->msg_len_words = 2;
673  ta_ctx->msgptr = ta_ctx->msg;
675  ta_ctx->msgptr += sizeof(u32) * 2;
676  word_ptr = (u32 *)ta_ctx->msgptr;
677  *word_ptr = SEP_START_MSG_TOKEN;
678  return sizeof(u32) * 2;
679 }
680 
689 static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
690 {
691  u32 *word_ptr;
692  /* Msg size goes into msg after token */
693  ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
694  word_ptr = (u32 *)ta_ctx->msgptr;
695  word_ptr += 1;
696  *word_ptr = ta_ctx->msg_len_words;
697 
698  /* CRC (currently 0) goes at end of msg */
699  word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
700  *word_ptr = 0;
701 }
702 
710 static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
711 {
712  u32 *word_ptr;
713  u32 token;
714  u32 error = SEP_OK;
715 
716  *msg_offset = sizeof(u32) * 2;
717  word_ptr = (u32 *)ta_ctx->msgptr;
718  token = *word_ptr;
719  ta_ctx->msg_len_words = *(word_ptr + 1);
720 
721  if (token != SEP_START_MSG_TOKEN) {
722  error = SEP_INVALID_START;
723  goto end_function;
724  }
725 
726 end_function:
727 
728  return error;
729 }
730 
741 static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
742  u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
743 {
744  u32 *word_ptr;
745  void *void_ptr;
746  void_ptr = ta_ctx->msgptr + *msg_offset;
747  word_ptr = (u32 *)void_ptr;
748  memcpy(void_ptr, in_addr, size);
749  *msg_offset += max_size;
750 
751  /* Do we need to manipulate endian? */
752  if (byte_array) {
753  u32 i;
754  for (i = 0; i < ((size + 3) / 4); i += 1)
755  *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
756  }
757 }
758 
766 static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
767  u32 op_code)
768 {
769  u32 *word_ptr;
770 
771  *msg_offset = sep_start_msg(ta_ctx);
772  word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
773  *word_ptr = op_code;
774  *msg_offset += sizeof(u32);
775 }
776 
777 
778 
789 static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
790  u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
791 {
792  u32 *word_ptr;
793  void *void_ptr;
794  void_ptr = ta_ctx->msgptr + *msg_offset;
795  word_ptr = (u32 *)void_ptr;
796 
797  /* Do we need to manipulate endian? */
798  if (byte_array) {
799  u32 i;
800  for (i = 0; i < ((size + 3) / 4); i += 1)
801  *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
802  }
803 
804  memcpy(in_addr, void_ptr, size);
805  *msg_offset += max_size;
806 }
807 
815 static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
816  u32 *msg_offset)
817 {
818  u32 error;
819  u32 in_ary[2];
820 
821  struct sep_device *sep = ta_ctx->sep_used;
822 
823  dev_dbg(&sep->pdev->dev, "dumping return message\n");
824  error = sep_start_inbound_msg(ta_ctx, msg_offset);
825  if (error) {
826  dev_warn(&sep->pdev->dev,
827  "sep_start_inbound_msg error\n");
828  return error;
829  }
830 
831  sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
832  msg_offset, 0);
833 
834  if (in_ary[0] != op_code) {
835  dev_warn(&sep->pdev->dev,
836  "sep got back wrong opcode\n");
837  dev_warn(&sep->pdev->dev,
838  "got back %x; expected %x\n",
839  in_ary[0], op_code);
840  return SEP_WRONG_OPCODE;
841  }
842 
843  if (in_ary[1] != SEP_OK) {
844  dev_warn(&sep->pdev->dev,
845  "sep execution error\n");
846  dev_warn(&sep->pdev->dev,
847  "got back %x; expected %x\n",
848  in_ary[1], SEP_OK);
849  return in_ary[0];
850  }
851 
852 return 0;
853 }
854 
867 static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
868  void *dst, u32 len)
869 {
870  u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
871  sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
872 }
873 
886 static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
887  void *src, u32 len)
888 {
889  u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
890  sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
891 }
892 
900 static void sep_clear_out(struct this_task_ctx *ta_ctx)
901 {
902  if (ta_ctx->src_sg_hold) {
903  sep_free_sg_buf(ta_ctx->src_sg_hold);
904  ta_ctx->src_sg_hold = NULL;
905  }
906 
907  if (ta_ctx->dst_sg_hold) {
908  sep_free_sg_buf(ta_ctx->dst_sg_hold);
909  ta_ctx->dst_sg_hold = NULL;
910  }
911 
912  ta_ctx->src_sg = NULL;
913  ta_ctx->dst_sg = NULL;
914 
916 
917  if (ta_ctx->i_own_sep) {
923  ta_ctx->sep_used->current_hash_req = NULL;
924  ta_ctx->sep_used->current_cypher_req = NULL;
925  ta_ctx->sep_used->current_request = 0;
926  ta_ctx->sep_used->current_hash_stage = 0;
927  ta_ctx->sep_used->ta_ctx = NULL;
928  ta_ctx->sep_used->in_kernel = 0;
929 
930  ta_ctx->call_status.status = 0;
931 
932  /* Remove anything confidential */
933  memset(ta_ctx->sep_used->shared_addr, 0,
935 
936  sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
937 
938 #ifdef SEP_ENABLE_RUNTIME_PM
939  ta_ctx->sep_used->in_use = 0;
940  pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
941  pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
942 #endif
943 
945  &ta_ctx->sep_used->in_use_flags);
946  ta_ctx->sep_used->pid_doing_transaction = 0;
947 
948  dev_dbg(&ta_ctx->sep_used->pdev->dev,
949  "[PID%d] waking up next transaction\n",
950  current->pid);
951 
953  &ta_ctx->sep_used->in_use_flags);
954  wake_up(&ta_ctx->sep_used->event_transactions);
955 
956  ta_ctx->i_own_sep = 0;
957  }
958 }
959 
964 static void sep_crypto_release(struct sep_system_ctx *sctx,
965  struct this_task_ctx *ta_ctx, u32 error)
966 {
967  struct ahash_request *hash_req = ta_ctx->current_hash_req;
968  struct ablkcipher_request *cypher_req =
969  ta_ctx->current_cypher_req;
970  struct sep_device *sep = ta_ctx->sep_used;
971 
972  sep_clear_out(ta_ctx);
973 
979  if (ta_ctx->are_we_done_yet != NULL)
980  *ta_ctx->are_we_done_yet = 1;
981 
982  if (cypher_req != NULL) {
983  if ((sctx->key_sent == 1) ||
984  ((error != 0) && (error != -EINPROGRESS))) {
985  if (cypher_req->base.complete == NULL) {
986  dev_dbg(&sep->pdev->dev,
987  "release is null for cypher!");
988  } else {
989  cypher_req->base.complete(
990  &cypher_req->base, error);
991  }
992  }
993  }
994 
995  if (hash_req != NULL) {
996  if (hash_req->base.complete == NULL) {
997  dev_dbg(&sep->pdev->dev,
998  "release is null for hash!");
999  } else {
1000  hash_req->base.complete(
1001  &hash_req->base, error);
1002  }
1003  }
1004 }
1005 
1012 static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
1013 {
1014  struct sep_device *sep = ta_ctx->sep_used;
1015  int result;
1016  struct sep_msgarea_hdr *my_msg_header;
1017 
1018  my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
1019 
1020  /* add to status queue */
1021  ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
1022  ta_ctx->nbytes, current->pid,
1023  current->comm, sizeof(current->comm));
1024 
1025  if (!ta_ctx->queue_elem) {
1026  dev_dbg(&sep->pdev->dev,
1027  "[PID%d] updating queue status error\n", current->pid);
1028  return -EINVAL;
1029  }
1030 
1031  /* get the device; this can sleep */
1032  result = sep_wait_transaction(sep);
1033  if (result)
1034  return result;
1035 
1036  if (sep_dev->power_save_setup == 1)
1037  pm_runtime_get_sync(&sep_dev->pdev->dev);
1038 
1039  /* Copy in the message */
1040  memcpy(sep->shared_addr, ta_ctx->msg,
1042 
1043  /* Copy in the dcb information if there is any */
1044  if (ta_ctx->dcb_region) {
1046  &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
1047  ta_ctx->dma_ctx);
1048  if (result)
1049  return result;
1050  }
1051 
1052  /* Mark the device so we know how to finish the job in the tasklet */
1053  if (ta_ctx->current_hash_req)
1054  sep->current_hash_req = ta_ctx->current_hash_req;
1055  else
1056  sep->current_cypher_req = ta_ctx->current_cypher_req;
1057 
1058  sep->current_request = ta_ctx->current_request;
1059  sep->current_hash_stage = ta_ctx->current_hash_stage;
1060  sep->ta_ctx = ta_ctx;
1061  sep->in_kernel = 1;
1062  ta_ctx->i_own_sep = 1;
1063 
1064  /* need to set bit first to avoid race condition with interrupt */
1066 
1067  result = sep_send_command_handler(sep);
1068 
1069  dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
1070  current->pid);
1071 
1072  if (!result)
1073  dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
1074  current->pid);
1075  else {
1076  dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
1077  current->pid);
1079  &ta_ctx->call_status.status);
1080  }
1081 
1082  return result;
1083 }
1084 
1092 static int sep_crypto_block_data(struct ablkcipher_request *req)
1093 {
1094 
1095  int int_error;
1096  u32 msg_offset;
1097  static u32 msg[10];
1098  void *src_ptr;
1099  void *dst_ptr;
1100 
1101  static char small_buf[100];
1102  ssize_t copy_result;
1103  int result;
1104 
1105  struct scatterlist *new_sg;
1106  struct this_task_ctx *ta_ctx;
1107  struct crypto_ablkcipher *tfm;
1108  struct sep_system_ctx *sctx;
1109 
1110  struct sep_des_internal_context *des_internal;
1111  struct sep_aes_internal_context *aes_internal;
1112 
1113  ta_ctx = ablkcipher_request_ctx(req);
1114  tfm = crypto_ablkcipher_reqtfm(req);
1115  sctx = crypto_ablkcipher_ctx(tfm);
1116 
1117  /* start the walk on scatterlists */
1118  ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1119  dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
1120  req->nbytes);
1121 
1122  int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1123  if (int_error) {
1124  dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1125  int_error);
1126  return -ENOMEM;
1127  }
1128 
1129  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1130  "crypto block: src is %lx dst is %lx\n",
1131  (unsigned long)req->src, (unsigned long)req->dst);
1132 
1133  /* Make sure all pages are even block */
1134  int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
1135  req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
1136 
1137  if (int_error < 0) {
1138  dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page error\n");
1139  return -ENOMEM;
1140  } else if (int_error == 1) {
1141  ta_ctx->src_sg = new_sg;
1142  ta_ctx->src_sg_hold = new_sg;
1143  } else {
1144  ta_ctx->src_sg = req->src;
1145  ta_ctx->src_sg_hold = NULL;
1146  }
1147 
1148  int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
1149  req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
1150 
1151  if (int_error < 0) {
1152  dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1153  int_error);
1154  return -ENOMEM;
1155  } else if (int_error == 1) {
1156  ta_ctx->dst_sg = new_sg;
1157  ta_ctx->dst_sg_hold = new_sg;
1158  } else {
1159  ta_ctx->dst_sg = req->dst;
1160  ta_ctx->dst_sg_hold = NULL;
1161  }
1162 
1163  /* set nbytes for queue status */
1164  ta_ctx->nbytes = req->nbytes;
1165 
1166  /* Key already done; this is for data */
1167  dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
1168 
1169  /* check for valid data and proper spacing */
1170  src_ptr = sg_virt(ta_ctx->src_sg);
1171  dst_ptr = sg_virt(ta_ctx->dst_sg);
1172 
1173  if (!src_ptr || !dst_ptr ||
1174  (ta_ctx->current_cypher_req->nbytes %
1175  crypto_ablkcipher_blocksize(tfm))) {
1176 
1177  dev_warn(&ta_ctx->sep_used->pdev->dev,
1178  "cipher block size odd\n");
1179  dev_warn(&ta_ctx->sep_used->pdev->dev,
1180  "cipher block size is %x\n",
1181  crypto_ablkcipher_blocksize(tfm));
1182  dev_warn(&ta_ctx->sep_used->pdev->dev,
1183  "cipher data size is %x\n",
1184  ta_ctx->current_cypher_req->nbytes);
1185  return -EINVAL;
1186  }
1187 
1188  if (partial_overlap(src_ptr, dst_ptr,
1189  ta_ctx->current_cypher_req->nbytes)) {
1190  dev_warn(&ta_ctx->sep_used->pdev->dev,
1191  "block partial overlap\n");
1192  return -EINVAL;
1193  }
1194 
1195  /* Put together the message */
1196  sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
1197 
1198  /* If des, and size is 1 block, put directly in msg */
1199  if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
1200  (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
1201 
1202  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1203  "writing out one block des\n");
1204 
1205  copy_result = sg_copy_to_buffer(
1206  ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
1207  small_buf, crypto_ablkcipher_blocksize(tfm));
1208 
1209  if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
1210  dev_warn(&ta_ctx->sep_used->pdev->dev,
1211  "des block copy faild\n");
1212  return -ENOMEM;
1213  }
1214 
1215  /* Put data into message */
1216  sep_write_msg(ta_ctx, small_buf,
1217  crypto_ablkcipher_blocksize(tfm),
1218  crypto_ablkcipher_blocksize(tfm) * 2,
1219  &msg_offset, 1);
1220 
1221  /* Put size into message */
1222  sep_write_msg(ta_ctx, &req->nbytes,
1223  sizeof(u32), sizeof(u32), &msg_offset, 0);
1224  } else {
1225  /* Otherwise, fill out dma tables */
1226  ta_ctx->dcb_input_data.app_in_address = src_ptr;
1227  ta_ctx->dcb_input_data.data_in_size = req->nbytes;
1228  ta_ctx->dcb_input_data.app_out_address = dst_ptr;
1229  ta_ctx->dcb_input_data.block_size =
1230  crypto_ablkcipher_blocksize(tfm);
1231  ta_ctx->dcb_input_data.tail_block_size = 0;
1232  ta_ctx->dcb_input_data.is_applet = 0;
1233  ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
1234  ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
1235 
1237  ta_ctx->sep_used,
1238  &ta_ctx->dcb_region,
1239  &ta_ctx->dmatables_region,
1240  &ta_ctx->dma_ctx,
1241  &ta_ctx->dcb_input_data,
1242  1);
1243  if (result) {
1244  dev_warn(&ta_ctx->sep_used->pdev->dev,
1245  "crypto dma table create failed\n");
1246  return -EINVAL;
1247  }
1248 
1249  /* Portion of msg is nulled (no data) */
1250  msg[0] = (u32)0;
1251  msg[1] = (u32)0;
1252  msg[2] = (u32)0;
1253  msg[3] = (u32)0;
1254  msg[4] = (u32)0;
1255  sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
1256  sizeof(u32) * 5, &msg_offset, 0);
1257  }
1258 
1264  sep_dump_ivs(req, "sending data block to sep\n");
1265  if ((ta_ctx->current_request == DES_CBC) &&
1266  (ta_ctx->des_opmode == SEP_DES_CBC)) {
1267 
1268  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1269  "overwrite vendor iv on DES\n");
1270  des_internal = (struct sep_des_internal_context *)
1271  sctx->des_private_ctx.ctx_buf;
1272  memcpy((void *)des_internal->iv_context,
1273  ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1274  } else if ((ta_ctx->current_request == AES_CBC) &&
1275  (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1276 
1277  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1278  "overwrite vendor iv on AES\n");
1279  aes_internal = (struct sep_aes_internal_context *)
1280  sctx->aes_private_ctx.cbuff;
1281  memcpy((void *)aes_internal->aes_ctx_iv,
1282  ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1283  }
1284 
1285  /* Write context into message */
1286  if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1287  sep_write_context(ta_ctx, &msg_offset,
1288  &sctx->des_private_ctx,
1289  sizeof(struct sep_des_private_context));
1290  } else {
1291  sep_write_context(ta_ctx, &msg_offset,
1292  &sctx->aes_private_ctx,
1293  sizeof(struct sep_aes_private_context));
1294  }
1295 
1296  /* conclude message */
1297  sep_end_msg(ta_ctx, msg_offset);
1298 
1299  /* Parent (caller) is now ready to tell the sep to do ahead */
1300  return 0;
1301 }
1302 
1303 
1311 static int sep_crypto_send_key(struct ablkcipher_request *req)
1312 {
1313 
1314  int int_error;
1315  u32 msg_offset;
1316  static u32 msg[10];
1317 
1318  u32 max_length;
1319  struct this_task_ctx *ta_ctx;
1320  struct crypto_ablkcipher *tfm;
1321  struct sep_system_ctx *sctx;
1322 
1323  ta_ctx = ablkcipher_request_ctx(req);
1324  tfm = crypto_ablkcipher_reqtfm(req);
1325  sctx = crypto_ablkcipher_ctx(tfm);
1326 
1327  dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
1328 
1329  /* start the walk on scatterlists */
1330  ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1331  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1332  "sep crypto block data size of %x\n", req->nbytes);
1333 
1334  int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1335  if (int_error) {
1336  dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1337  int_error);
1338  return -ENOMEM;
1339  }
1340 
1341  /* check iv */
1342  if ((ta_ctx->current_request == DES_CBC) &&
1343  (ta_ctx->des_opmode == SEP_DES_CBC)) {
1344  if (!ta_ctx->walk.iv) {
1345  dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1346  return -EINVAL;
1347  }
1348 
1349  memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1350  }
1351 
1352  if ((ta_ctx->current_request == AES_CBC) &&
1353  (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1354  if (!ta_ctx->walk.iv) {
1355  dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1356  return -EINVAL;
1357  }
1358 
1359  memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1360  }
1361 
1362  /* put together message to SEP */
1363  /* Start with op code */
1364  sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
1365 
1366  /* now deal with IV */
1367  if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1368  if (ta_ctx->des_opmode == SEP_DES_CBC) {
1369  sep_write_msg(ta_ctx, ta_ctx->iv,
1370  SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
1371  &msg_offset, 1);
1372  } else {
1373  /* Skip if ECB */
1374  msg_offset += 4 * sizeof(u32);
1375  }
1376  } else {
1377  max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
1378  sizeof(u32)) * sizeof(u32);
1379  if (ta_ctx->aes_opmode == SEP_AES_CBC) {
1380  sep_write_msg(ta_ctx, ta_ctx->iv,
1381  SEP_AES_IV_SIZE_BYTES, max_length,
1382  &msg_offset, 1);
1383  } else {
1384  /* Skip if ECB */
1385  msg_offset += max_length;
1386  }
1387  }
1388 
1389  /* load the key */
1390  if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1391  sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
1392  sizeof(u32) * 8, sizeof(u32) * 8,
1393  &msg_offset, 1);
1394 
1395  msg[0] = (u32)sctx->des_nbr_keys;
1396  msg[1] = (u32)ta_ctx->des_encmode;
1397  msg[2] = (u32)ta_ctx->des_opmode;
1398 
1399  sep_write_msg(ta_ctx, (void *)msg,
1400  sizeof(u32) * 3, sizeof(u32) * 3,
1401  &msg_offset, 0);
1402  } else {
1403  sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
1404  sctx->keylen,
1406  &msg_offset, 1);
1407 
1408  msg[0] = (u32)sctx->aes_key_size;
1409  msg[1] = (u32)ta_ctx->aes_encmode;
1410  msg[2] = (u32)ta_ctx->aes_opmode;
1411  msg[3] = (u32)0; /* Secret key is not used */
1412  sep_write_msg(ta_ctx, (void *)msg,
1413  sizeof(u32) * 4, sizeof(u32) * 4,
1414  &msg_offset, 0);
1415  }
1416 
1417  /* conclude message */
1418  sep_end_msg(ta_ctx, msg_offset);
1419 
1420  /* Parent (caller) is now ready to tell the sep to do ahead */
1421  return 0;
1422 }
1423 
1424 
1425 /* This needs to be run as a work queue as it can be put asleep */
1426 static void sep_crypto_block(void *data)
1427 {
1428  unsigned long end_time;
1429 
1430  int result;
1431 
1432  struct ablkcipher_request *req;
1433  struct this_task_ctx *ta_ctx;
1434  struct crypto_ablkcipher *tfm;
1435  struct sep_system_ctx *sctx;
1436  int are_we_done_yet;
1437 
1438  req = (struct ablkcipher_request *)data;
1439  ta_ctx = ablkcipher_request_ctx(req);
1440  tfm = crypto_ablkcipher_reqtfm(req);
1441  sctx = crypto_ablkcipher_ctx(tfm);
1442 
1443  ta_ctx->are_we_done_yet = &are_we_done_yet;
1444 
1445  pr_debug("sep_crypto_block\n");
1446  pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
1447  tfm, sctx, ta_ctx);
1448  pr_debug("key_sent is %d\n", sctx->key_sent);
1449 
1450  /* do we need to send the key */
1451  if (sctx->key_sent == 0) {
1452  are_we_done_yet = 0;
1453  result = sep_crypto_send_key(req); /* prep to send key */
1454  if (result != 0) {
1455  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1456  "could not prep key %x\n", result);
1457  sep_crypto_release(sctx, ta_ctx, result);
1458  return;
1459  }
1460 
1461  result = sep_crypto_take_sep(ta_ctx);
1462  if (result) {
1463  dev_warn(&ta_ctx->sep_used->pdev->dev,
1464  "sep_crypto_take_sep for key send failed\n");
1465  sep_crypto_release(sctx, ta_ctx, result);
1466  return;
1467  }
1468 
1469  /* now we sit and wait up to a fixed time for completion */
1470  end_time = jiffies + (WAIT_TIME * HZ);
1471  while ((time_before(jiffies, end_time)) &&
1472  (are_we_done_yet == 0))
1473  schedule();
1474 
1475  /* Done waiting; still not done yet? */
1476  if (are_we_done_yet == 0) {
1477  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1478  "Send key job never got done\n");
1479  sep_crypto_release(sctx, ta_ctx, -EINVAL);
1480  return;
1481  }
1482 
1483  /* Set the key sent variable so this can be skipped later */
1484  sctx->key_sent = 1;
1485  }
1486 
1487  /* Key sent (or maybe not if we did not have to), now send block */
1488  are_we_done_yet = 0;
1489 
1490  result = sep_crypto_block_data(req);
1491 
1492  if (result != 0) {
1493  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1494  "could prep not send block %x\n", result);
1495  sep_crypto_release(sctx, ta_ctx, result);
1496  return;
1497  }
1498 
1499  result = sep_crypto_take_sep(ta_ctx);
1500  if (result) {
1501  dev_warn(&ta_ctx->sep_used->pdev->dev,
1502  "sep_crypto_take_sep for block send failed\n");
1503  sep_crypto_release(sctx, ta_ctx, result);
1504  return;
1505  }
1506 
1507  /* now we sit and wait up to a fixed time for completion */
1508  end_time = jiffies + (WAIT_TIME * HZ);
1509  while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
1510  schedule();
1511 
1512  /* Done waiting; still not done yet? */
1513  if (are_we_done_yet == 0) {
1514  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1515  "Send block job never got done\n");
1516  sep_crypto_release(sctx, ta_ctx, -EINVAL);
1517  return;
1518  }
1519 
1520  /* That's it; entire thing done, get out of queue */
1521 
1522  pr_debug("crypto_block leaving\n");
1523  pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
1524 }
1525 
1529 static u32 crypto_post_op(struct sep_device *sep)
1530 {
1531  /* HERE */
1532  u32 u32_error;
1533  u32 msg_offset;
1534 
1535  ssize_t copy_result;
1536  static char small_buf[100];
1537 
1538  struct ablkcipher_request *req;
1539  struct this_task_ctx *ta_ctx;
1540  struct sep_system_ctx *sctx;
1541  struct crypto_ablkcipher *tfm;
1542 
1543  struct sep_des_internal_context *des_internal;
1544  struct sep_aes_internal_context *aes_internal;
1545 
1546  if (!sep->current_cypher_req)
1547  return -EINVAL;
1548 
1549  /* hold req since we need to submit work after clearing sep */
1550  req = sep->current_cypher_req;
1551 
1552  ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
1553  tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
1554  sctx = crypto_ablkcipher_ctx(tfm);
1555 
1556  pr_debug("crypto_post op\n");
1557  pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1558  sctx->key_sent, tfm, sctx, ta_ctx);
1559 
1560  dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
1561  dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
1562 
1563  /* first bring msg from shared area to local area */
1564  memcpy(ta_ctx->msg, sep->shared_addr,
1566 
1567  /* Is this the result of performing init (key to SEP */
1568  if (sctx->key_sent == 0) {
1569 
1570  /* Did SEP do it okay */
1571  u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
1572  &msg_offset);
1573  if (u32_error) {
1574  dev_warn(&ta_ctx->sep_used->pdev->dev,
1575  "aes init error %x\n", u32_error);
1576  sep_crypto_release(sctx, ta_ctx, u32_error);
1577  return u32_error;
1578  }
1579 
1580  /* Read Context */
1581  if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1582  sep_read_context(ta_ctx, &msg_offset,
1583  &sctx->des_private_ctx,
1584  sizeof(struct sep_des_private_context));
1585  } else {
1586  sep_read_context(ta_ctx, &msg_offset,
1587  &sctx->aes_private_ctx,
1588  sizeof(struct sep_aes_private_context));
1589  }
1590 
1591  sep_dump_ivs(req, "after sending key to sep\n");
1592 
1593  /* key sent went okay; release sep, and set are_we_done_yet */
1594  sctx->key_sent = 1;
1595  sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
1596 
1597  } else {
1598 
1602  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1603  "crypto_post_op block response\n");
1604 
1605  u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
1606  &msg_offset);
1607 
1608  if (u32_error) {
1609  dev_warn(&ta_ctx->sep_used->pdev->dev,
1610  "sep block error %x\n", u32_error);
1611  sep_crypto_release(sctx, ta_ctx, u32_error);
1612  return -EINVAL;
1613  }
1614 
1615  if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1616 
1617  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1618  "post op for DES\n");
1619 
1620  /* special case for 1 block des */
1621  if (sep->current_cypher_req->nbytes ==
1622  crypto_ablkcipher_blocksize(tfm)) {
1623 
1624  sep_read_msg(ta_ctx, small_buf,
1625  crypto_ablkcipher_blocksize(tfm),
1626  crypto_ablkcipher_blocksize(tfm) * 2,
1627  &msg_offset, 1);
1628 
1629  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1630  "reading in block des\n");
1631 
1632  copy_result = sg_copy_from_buffer(
1633  ta_ctx->dst_sg,
1634  sep_sg_nents(ta_ctx->dst_sg),
1635  small_buf,
1636  crypto_ablkcipher_blocksize(tfm));
1637 
1638  if (copy_result !=
1639  crypto_ablkcipher_blocksize(tfm)) {
1640 
1641  dev_warn(&ta_ctx->sep_used->pdev->dev,
1642  "des block copy faild\n");
1643  sep_crypto_release(sctx, ta_ctx,
1644  -ENOMEM);
1645  return -ENOMEM;
1646  }
1647  }
1648 
1649  /* Read Context */
1650  sep_read_context(ta_ctx, &msg_offset,
1651  &sctx->des_private_ctx,
1652  sizeof(struct sep_des_private_context));
1653  } else {
1654 
1655  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1656  "post op for AES\n");
1657 
1658  /* Skip the MAC Output */
1659  msg_offset += (sizeof(u32) * 4);
1660 
1661  /* Read Context */
1662  sep_read_context(ta_ctx, &msg_offset,
1663  &sctx->aes_private_ctx,
1664  sizeof(struct sep_aes_private_context));
1665  }
1666 
1667  /* Copy to correct sg if this block had oddball pages */
1668  if (ta_ctx->dst_sg_hold)
1669  sep_copy_sg(ta_ctx->sep_used,
1670  ta_ctx->dst_sg,
1671  ta_ctx->current_cypher_req->dst,
1672  ta_ctx->current_cypher_req->nbytes);
1673 
1678  sep_dump_ivs(req, "got data block from sep\n");
1679  if ((ta_ctx->current_request == DES_CBC) &&
1680  (ta_ctx->des_opmode == SEP_DES_CBC)) {
1681 
1682  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1683  "returning result iv to walk on DES\n");
1684  des_internal = (struct sep_des_internal_context *)
1685  sctx->des_private_ctx.ctx_buf;
1686  memcpy(ta_ctx->walk.iv,
1687  (void *)des_internal->iv_context,
1688  crypto_ablkcipher_ivsize(tfm));
1689  } else if ((ta_ctx->current_request == AES_CBC) &&
1690  (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1691 
1692  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1693  "returning result iv to walk on AES\n");
1694  aes_internal = (struct sep_aes_internal_context *)
1695  sctx->aes_private_ctx.cbuff;
1696  memcpy(ta_ctx->walk.iv,
1697  (void *)aes_internal->aes_ctx_iv,
1698  crypto_ablkcipher_ivsize(tfm));
1699  }
1700 
1701  /* finished, release everything */
1702  sep_crypto_release(sctx, ta_ctx, 0);
1703  }
1704  pr_debug("crypto_post_op done\n");
1705  pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1706  sctx->key_sent, tfm, sctx, ta_ctx);
1707 
1708  return 0;
1709 }
1710 
1711 static u32 hash_init_post_op(struct sep_device *sep)
1712 {
1713  u32 u32_error;
1714  u32 msg_offset;
1715  struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1716  struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1717  struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1718  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1719  "hash init post op\n");
1720 
1721  /* first bring msg from shared area to local area */
1722  memcpy(ta_ctx->msg, sep->shared_addr,
1724 
1725  u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
1726  &msg_offset);
1727 
1728  if (u32_error) {
1729  dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1730  u32_error);
1731  sep_crypto_release(sctx, ta_ctx, u32_error);
1732  return u32_error;
1733  }
1734 
1735  /* Read Context */
1736  sep_read_context(ta_ctx, &msg_offset,
1737  &sctx->hash_private_ctx,
1738  sizeof(struct sep_hash_private_context));
1739 
1740  /* Signal to crypto infrastructure and clear out */
1741  dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
1742  sep_crypto_release(sctx, ta_ctx, 0);
1743  return 0;
1744 }
1745 
1746 static u32 hash_update_post_op(struct sep_device *sep)
1747 {
1748  u32 u32_error;
1749  u32 msg_offset;
1750  struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1751  struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1752  struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1753  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1754  "hash update post op\n");
1755 
1756  /* first bring msg from shared area to local area */
1757  memcpy(ta_ctx->msg, sep->shared_addr,
1759 
1760  u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
1761  &msg_offset);
1762 
1763  if (u32_error) {
1764  dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1765  u32_error);
1766  sep_crypto_release(sctx, ta_ctx, u32_error);
1767  return u32_error;
1768  }
1769 
1770  /* Read Context */
1771  sep_read_context(ta_ctx, &msg_offset,
1772  &sctx->hash_private_ctx,
1773  sizeof(struct sep_hash_private_context));
1774 
1781  if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
1782 
1783  /* first reset stage to HASH_FINUP_FINISH */
1784  ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
1785 
1786  /* now enqueue the finish operation */
1787  spin_lock_irq(&queue_lock);
1788  u32_error = crypto_enqueue_request(&sep_queue,
1789  &ta_ctx->sep_used->current_hash_req->base);
1790  spin_unlock_irq(&queue_lock);
1791 
1792  if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
1793  dev_warn(&ta_ctx->sep_used->pdev->dev,
1794  "spe cypher post op cant queue\n");
1795  sep_crypto_release(sctx, ta_ctx, u32_error);
1796  return u32_error;
1797  }
1798 
1799  /* schedule the data send */
1800  u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
1801  sep_dequeuer, (void *)&sep_queue);
1802 
1803  if (u32_error) {
1804  dev_warn(&ta_ctx->sep_used->pdev->dev,
1805  "cant submit work sep_crypto_block\n");
1806  sep_crypto_release(sctx, ta_ctx, -EINVAL);
1807  return -EINVAL;
1808  }
1809  }
1810 
1811  /* Signal to crypto infrastructure and clear out */
1812  dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
1813  sep_crypto_release(sctx, ta_ctx, 0);
1814  return 0;
1815 }
1816 
1817 static u32 hash_final_post_op(struct sep_device *sep)
1818 {
1819  int max_length;
1820  u32 u32_error;
1821  u32 msg_offset;
1822  struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1823  struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1824  struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1825  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1826  "hash final post op\n");
1827 
1828  /* first bring msg from shared area to local area */
1829  memcpy(ta_ctx->msg, sep->shared_addr,
1831 
1832  u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
1833  &msg_offset);
1834 
1835  if (u32_error) {
1836  dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
1837  u32_error);
1838  sep_crypto_release(sctx, ta_ctx, u32_error);
1839  return u32_error;
1840  }
1841 
1842  /* Grab the result */
1843  if (ta_ctx->current_hash_req->result == NULL) {
1844  /* Oops, null buffer; error out here */
1845  dev_warn(&ta_ctx->sep_used->pdev->dev,
1846  "hash finish null buffer\n");
1847  sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1848  return -ENOMEM;
1849  }
1850 
1851  max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1852  sizeof(u32)) * sizeof(u32);
1853 
1854  sep_read_msg(ta_ctx,
1855  ta_ctx->current_hash_req->result,
1856  crypto_ahash_digestsize(tfm), max_length,
1857  &msg_offset, 0);
1858 
1859  /* Signal to crypto infrastructure and clear out */
1860  dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
1861  sep_crypto_release(sctx, ta_ctx, 0);
1862  return 0;
1863 }
1864 
1865 static u32 hash_digest_post_op(struct sep_device *sep)
1866 {
1867  int max_length;
1868  u32 u32_error;
1869  u32 msg_offset;
1870  struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1871  struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1872  struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1873  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1874  "hash digest post op\n");
1875 
1876  /* first bring msg from shared area to local area */
1877  memcpy(ta_ctx->msg, sep->shared_addr,
1879 
1880  u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
1881  &msg_offset);
1882 
1883  if (u32_error) {
1884  dev_warn(&ta_ctx->sep_used->pdev->dev,
1885  "hash digest finish error %x\n", u32_error);
1886 
1887  sep_crypto_release(sctx, ta_ctx, u32_error);
1888  return u32_error;
1889  }
1890 
1891  /* Grab the result */
1892  if (ta_ctx->current_hash_req->result == NULL) {
1893  /* Oops, null buffer; error out here */
1894  dev_warn(&ta_ctx->sep_used->pdev->dev,
1895  "hash digest finish null buffer\n");
1896  sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1897  return -ENOMEM;
1898  }
1899 
1900  max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1901  sizeof(u32)) * sizeof(u32);
1902 
1903  sep_read_msg(ta_ctx,
1904  ta_ctx->current_hash_req->result,
1905  crypto_ahash_digestsize(tfm), max_length,
1906  &msg_offset, 0);
1907 
1908  /* Signal to crypto infrastructure and clear out */
1909  dev_dbg(&ta_ctx->sep_used->pdev->dev,
1910  "hash digest finish post op done\n");
1911 
1912  sep_crypto_release(sctx, ta_ctx, 0);
1913  return 0;
1914 }
1915 
1921 static void sep_finish(unsigned long data)
1922 {
1923  struct sep_device *sep_dev;
1924  int res;
1925 
1926  res = 0;
1927 
1928  if (data == 0) {
1929  pr_debug("sep_finish called with null data\n");
1930  return;
1931  }
1932 
1933  sep_dev = (struct sep_device *)data;
1934  if (sep_dev == NULL) {
1935  pr_debug("sep_finish; sep_dev is NULL\n");
1936  return;
1937  }
1938 
1939  if (sep_dev->in_kernel == (u32)0) {
1940  dev_warn(&sep_dev->pdev->dev,
1941  "sep_finish; not in kernel operation\n");
1942  return;
1943  }
1944 
1945  /* Did we really do a sep command prior to this? */
1947  &sep_dev->ta_ctx->call_status.status)) {
1948 
1949  dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
1950  current->pid);
1951  return;
1952  }
1953 
1954  if (sep_dev->send_ct != sep_dev->reply_ct) {
1955  dev_warn(&sep_dev->pdev->dev,
1956  "[PID%d] poll; no message came back\n",
1957  current->pid);
1958  return;
1959  }
1960 
1961  /* Check for error (In case time ran out) */
1962  if ((res != 0x0) && (res != 0x8)) {
1963  dev_warn(&sep_dev->pdev->dev,
1964  "[PID%d] poll; poll error GPR3 is %x\n",
1965  current->pid, res);
1966  return;
1967  }
1968 
1969  /* What kind of interrupt from sep was this? */
1970  res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
1971 
1972  dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
1973  current->pid, res);
1974 
1975  /* Print request? */
1976  if ((res >> 30) & 0x1) {
1977  dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
1978  current->pid);
1979  dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
1980  current->pid,
1981  (char *)(sep_dev->shared_addr +
1983  return;
1984  }
1985 
1986  /* Request for daemon (not currently in POR)? */
1987  if (res >> 31) {
1988  dev_dbg(&sep_dev->pdev->dev,
1989  "[PID%d] sep request; ignoring\n",
1990  current->pid);
1991  return;
1992  }
1993 
1994  /* If we got here, then we have a replay to a sep command */
1995 
1996  dev_dbg(&sep_dev->pdev->dev,
1997  "[PID%d] sep reply to command; processing request: %x\n",
1998  current->pid, sep_dev->current_request);
1999 
2000  switch (sep_dev->current_request) {
2001  case AES_CBC:
2002  case AES_ECB:
2003  case DES_CBC:
2004  case DES_ECB:
2005  res = crypto_post_op(sep_dev);
2006  break;
2007  case SHA1:
2008  case MD5:
2009  case SHA224:
2010  case SHA256:
2011  switch (sep_dev->current_hash_stage) {
2012  case HASH_INIT:
2013  res = hash_init_post_op(sep_dev);
2014  break;
2015  case HASH_UPDATE:
2016  case HASH_FINUP_DATA:
2017  res = hash_update_post_op(sep_dev);
2018  break;
2019  case HASH_FINUP_FINISH:
2020  case HASH_FINISH:
2021  res = hash_final_post_op(sep_dev);
2022  break;
2023  case HASH_DIGEST:
2024  res = hash_digest_post_op(sep_dev);
2025  break;
2026  default:
2027  pr_debug("sep - invalid stage for hash finish\n");
2028  }
2029  break;
2030  default:
2031  pr_debug("sep - invalid request for finish\n");
2032  }
2033 
2034  if (res)
2035  pr_debug("sep - finish returned error %x\n", res);
2036 }
2037 
2038 static int sep_hash_cra_init(struct crypto_tfm *tfm)
2039  {
2040  const char *alg_name = crypto_tfm_alg_name(tfm);
2041 
2042  pr_debug("sep_hash_cra_init name is %s\n", alg_name);
2043 
2044  crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2045  sizeof(struct this_task_ctx));
2046  return 0;
2047  }
2048 
2049 static void sep_hash_cra_exit(struct crypto_tfm *tfm)
2050 {
2051  pr_debug("sep_hash_cra_exit\n");
2052 }
2053 
2054 static void sep_hash_init(void *data)
2055 {
2056  u32 msg_offset;
2057  int result;
2058  struct ahash_request *req;
2059  struct crypto_ahash *tfm;
2060  struct this_task_ctx *ta_ctx;
2061  struct sep_system_ctx *sctx;
2062  unsigned long end_time;
2063  int are_we_done_yet;
2064 
2065  req = (struct ahash_request *)data;
2066  tfm = crypto_ahash_reqtfm(req);
2067  sctx = crypto_ahash_ctx(tfm);
2068  ta_ctx = ahash_request_ctx(req);
2069  ta_ctx->sep_used = sep_dev;
2070 
2071  ta_ctx->are_we_done_yet = &are_we_done_yet;
2072 
2073  dev_dbg(&ta_ctx->sep_used->pdev->dev,
2074  "sep_hash_init\n");
2075  ta_ctx->current_hash_stage = HASH_INIT;
2076  /* opcode and mode */
2077  sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
2078  sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2079  sizeof(u32), sizeof(u32), &msg_offset, 0);
2080  sep_end_msg(ta_ctx, msg_offset);
2081 
2082  are_we_done_yet = 0;
2083  result = sep_crypto_take_sep(ta_ctx);
2084  if (result) {
2085  dev_warn(&ta_ctx->sep_used->pdev->dev,
2086  "sep_hash_init take sep failed\n");
2087  sep_crypto_release(sctx, ta_ctx, -EINVAL);
2088  }
2089 
2090  /* now we sit and wait up to a fixed time for completion */
2091  end_time = jiffies + (WAIT_TIME * HZ);
2092  while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2093  schedule();
2094 
2095  /* Done waiting; still not done yet? */
2096  if (are_we_done_yet == 0) {
2097  dev_dbg(&ta_ctx->sep_used->pdev->dev,
2098  "hash init never got done\n");
2099  sep_crypto_release(sctx, ta_ctx, -EINVAL);
2100  return;
2101  }
2102 
2103 }
2104 
2105 static void sep_hash_update(void *data)
2106 {
2107  int int_error;
2108  u32 msg_offset;
2109  u32 len;
2110  struct sep_hash_internal_context *int_ctx;
2111  u32 block_size;
2112  u32 head_len;
2113  u32 tail_len;
2114  int are_we_done_yet;
2115 
2116  static u32 msg[10];
2117  static char small_buf[100];
2118  void *src_ptr;
2119  struct scatterlist *new_sg;
2120  ssize_t copy_result;
2121  struct ahash_request *req;
2122  struct crypto_ahash *tfm;
2123  struct this_task_ctx *ta_ctx;
2124  struct sep_system_ctx *sctx;
2125  unsigned long end_time;
2126 
2127  req = (struct ahash_request *)data;
2128  tfm = crypto_ahash_reqtfm(req);
2129  sctx = crypto_ahash_ctx(tfm);
2130  ta_ctx = ahash_request_ctx(req);
2131  ta_ctx->sep_used = sep_dev;
2132 
2133  ta_ctx->are_we_done_yet = &are_we_done_yet;
2134 
2135  /* length for queue status */
2136  ta_ctx->nbytes = req->nbytes;
2137 
2138  dev_dbg(&ta_ctx->sep_used->pdev->dev,
2139  "sep_hash_update\n");
2140  ta_ctx->current_hash_stage = HASH_UPDATE;
2141  len = req->nbytes;
2142 
2143  block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2144  tail_len = req->nbytes % block_size;
2145  dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
2146  dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2147  dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2148 
2149  /* Compute header/tail sizes */
2150  int_ctx = (struct sep_hash_internal_context *)&sctx->
2151  hash_private_ctx.internal_context;
2152  head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
2153  tail_len = (req->nbytes - head_len) % block_size;
2154 
2155  /* Make sure all pages are an even block */
2156  int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2157  req->nbytes,
2158  block_size, &new_sg, 1);
2159 
2160  if (int_error < 0) {
2161  dev_warn(&ta_ctx->sep_used->pdev->dev,
2162  "oddball pages error in crash update\n");
2163  sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2164  return;
2165  } else if (int_error == 1) {
2166  ta_ctx->src_sg = new_sg;
2167  ta_ctx->src_sg_hold = new_sg;
2168  } else {
2169  ta_ctx->src_sg = req->src;
2170  ta_ctx->src_sg_hold = NULL;
2171  }
2172 
2173  src_ptr = sg_virt(ta_ctx->src_sg);
2174 
2175  if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2176  /* null data */
2177  src_ptr = NULL;
2178  }
2179 
2180  ta_ctx->dcb_input_data.app_in_address = src_ptr;
2181  ta_ctx->dcb_input_data.data_in_size =
2182  req->nbytes - (head_len + tail_len);
2183  ta_ctx->dcb_input_data.app_out_address = NULL;
2184  ta_ctx->dcb_input_data.block_size = block_size;
2185  ta_ctx->dcb_input_data.tail_block_size = 0;
2186  ta_ctx->dcb_input_data.is_applet = 0;
2187  ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2188  ta_ctx->dcb_input_data.dst_sg = NULL;
2189 
2191  ta_ctx->sep_used,
2192  &ta_ctx->dcb_region,
2193  &ta_ctx->dmatables_region,
2194  &ta_ctx->dma_ctx,
2195  &ta_ctx->dcb_input_data,
2196  1);
2197  if (int_error) {
2198  dev_warn(&ta_ctx->sep_used->pdev->dev,
2199  "hash update dma table create failed\n");
2200  sep_crypto_release(sctx, ta_ctx, -EINVAL);
2201  return;
2202  }
2203 
2204  /* Construct message to SEP */
2205  sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
2206 
2207  msg[0] = (u32)0;
2208  msg[1] = (u32)0;
2209  msg[2] = (u32)0;
2210 
2211  sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2212  &msg_offset, 0);
2213 
2214  /* Handle remainders */
2215 
2216  /* Head */
2217  sep_write_msg(ta_ctx, &head_len, sizeof(u32),
2218  sizeof(u32), &msg_offset, 0);
2219 
2220  if (head_len) {
2221  copy_result = sg_copy_to_buffer(
2222  req->src,
2223  sep_sg_nents(ta_ctx->src_sg),
2224  small_buf, head_len);
2225 
2226  if (copy_result != head_len) {
2227  dev_warn(&ta_ctx->sep_used->pdev->dev,
2228  "sg head copy failure in hash block\n");
2229  sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2230  return;
2231  }
2232 
2233  sep_write_msg(ta_ctx, small_buf, head_len,
2234  sizeof(u32) * 32, &msg_offset, 1);
2235  } else {
2236  msg_offset += sizeof(u32) * 32;
2237  }
2238 
2239  /* Tail */
2240  sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2241  sizeof(u32), &msg_offset, 0);
2242 
2243  if (tail_len) {
2244  copy_result = sep_copy_offset_sg(
2245  ta_ctx->sep_used,
2246  ta_ctx->src_sg,
2247  req->nbytes - tail_len,
2248  small_buf, tail_len);
2249 
2250  if (copy_result != tail_len) {
2251  dev_warn(&ta_ctx->sep_used->pdev->dev,
2252  "sg tail copy failure in hash block\n");
2253  sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2254  return;
2255  }
2256 
2257  sep_write_msg(ta_ctx, small_buf, tail_len,
2258  sizeof(u32) * 32, &msg_offset, 1);
2259  } else {
2260  msg_offset += sizeof(u32) * 32;
2261  }
2262 
2263  /* Context */
2264  sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2265  sizeof(struct sep_hash_private_context));
2266 
2267  sep_end_msg(ta_ctx, msg_offset);
2268  are_we_done_yet = 0;
2269  int_error = sep_crypto_take_sep(ta_ctx);
2270  if (int_error) {
2271  dev_warn(&ta_ctx->sep_used->pdev->dev,
2272  "sep_hash_update take sep failed\n");
2273  sep_crypto_release(sctx, ta_ctx, -EINVAL);
2274  }
2275 
2276  /* now we sit and wait up to a fixed time for completion */
2277  end_time = jiffies + (WAIT_TIME * HZ);
2278  while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2279  schedule();
2280 
2281  /* Done waiting; still not done yet? */
2282  if (are_we_done_yet == 0) {
2283  dev_dbg(&ta_ctx->sep_used->pdev->dev,
2284  "hash update never got done\n");
2285  sep_crypto_release(sctx, ta_ctx, -EINVAL);
2286  return;
2287  }
2288 
2289 }
2290 
2291 static void sep_hash_final(void *data)
2292 {
2293  u32 msg_offset;
2294  struct ahash_request *req;
2295  struct crypto_ahash *tfm;
2296  struct this_task_ctx *ta_ctx;
2297  struct sep_system_ctx *sctx;
2298  int result;
2299  unsigned long end_time;
2300  int are_we_done_yet;
2301 
2302  req = (struct ahash_request *)data;
2303  tfm = crypto_ahash_reqtfm(req);
2304  sctx = crypto_ahash_ctx(tfm);
2305  ta_ctx = ahash_request_ctx(req);
2306  ta_ctx->sep_used = sep_dev;
2307 
2308  dev_dbg(&ta_ctx->sep_used->pdev->dev,
2309  "sep_hash_final\n");
2310  ta_ctx->current_hash_stage = HASH_FINISH;
2311 
2312  ta_ctx->are_we_done_yet = &are_we_done_yet;
2313 
2314  /* opcode and mode */
2315  sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
2316 
2317  /* Context */
2318  sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2319  sizeof(struct sep_hash_private_context));
2320 
2321  sep_end_msg(ta_ctx, msg_offset);
2322  are_we_done_yet = 0;
2323  result = sep_crypto_take_sep(ta_ctx);
2324  if (result) {
2325  dev_warn(&ta_ctx->sep_used->pdev->dev,
2326  "sep_hash_final take sep failed\n");
2327  sep_crypto_release(sctx, ta_ctx, -EINVAL);
2328  }
2329 
2330  /* now we sit and wait up to a fixed time for completion */
2331  end_time = jiffies + (WAIT_TIME * HZ);
2332  while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2333  schedule();
2334 
2335  /* Done waiting; still not done yet? */
2336  if (are_we_done_yet == 0) {
2337  dev_dbg(&ta_ctx->sep_used->pdev->dev,
2338  "hash final job never got done\n");
2339  sep_crypto_release(sctx, ta_ctx, -EINVAL);
2340  return;
2341  }
2342 
2343 }
2344 
2345 static void sep_hash_digest(void *data)
2346 {
2347  int int_error;
2348  u32 msg_offset;
2349  u32 block_size;
2350  u32 msg[10];
2351  size_t copy_result;
2352  int result;
2353  int are_we_done_yet;
2354  u32 tail_len;
2355  static char small_buf[100];
2356  struct scatterlist *new_sg;
2357  void *src_ptr;
2358 
2359  struct ahash_request *req;
2360  struct crypto_ahash *tfm;
2361  struct this_task_ctx *ta_ctx;
2362  struct sep_system_ctx *sctx;
2363  unsigned long end_time;
2364 
2365  req = (struct ahash_request *)data;
2366  tfm = crypto_ahash_reqtfm(req);
2367  sctx = crypto_ahash_ctx(tfm);
2368  ta_ctx = ahash_request_ctx(req);
2369  ta_ctx->sep_used = sep_dev;
2370 
2371  dev_dbg(&ta_ctx->sep_used->pdev->dev,
2372  "sep_hash_digest\n");
2373  ta_ctx->current_hash_stage = HASH_DIGEST;
2374 
2375  ta_ctx->are_we_done_yet = &are_we_done_yet;
2376 
2377  /* length for queue status */
2378  ta_ctx->nbytes = req->nbytes;
2379 
2380  block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2381  tail_len = req->nbytes % block_size;
2382  dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
2383  dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2384  dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2385 
2386  /* Make sure all pages are an even block */
2387  int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2388  req->nbytes,
2389  block_size, &new_sg, 1);
2390 
2391  if (int_error < 0) {
2392  dev_warn(&ta_ctx->sep_used->pdev->dev,
2393  "oddball pages error in crash update\n");
2394  sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2395  return;
2396  } else if (int_error == 1) {
2397  ta_ctx->src_sg = new_sg;
2398  ta_ctx->src_sg_hold = new_sg;
2399  } else {
2400  ta_ctx->src_sg = req->src;
2401  ta_ctx->src_sg_hold = NULL;
2402  }
2403 
2404  src_ptr = sg_virt(ta_ctx->src_sg);
2405 
2406  if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2407  /* null data */
2408  src_ptr = NULL;
2409  }
2410 
2411  ta_ctx->dcb_input_data.app_in_address = src_ptr;
2412  ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
2413  ta_ctx->dcb_input_data.app_out_address = NULL;
2414  ta_ctx->dcb_input_data.block_size = block_size;
2415  ta_ctx->dcb_input_data.tail_block_size = 0;
2416  ta_ctx->dcb_input_data.is_applet = 0;
2417  ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2418  ta_ctx->dcb_input_data.dst_sg = NULL;
2419 
2421  ta_ctx->sep_used,
2422  &ta_ctx->dcb_region,
2423  &ta_ctx->dmatables_region,
2424  &ta_ctx->dma_ctx,
2425  &ta_ctx->dcb_input_data,
2426  1);
2427  if (int_error) {
2428  dev_warn(&ta_ctx->sep_used->pdev->dev,
2429  "hash update dma table create failed\n");
2430  sep_crypto_release(sctx, ta_ctx, -EINVAL);
2431  return;
2432  }
2433 
2434  /* Construct message to SEP */
2435  sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
2436  sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2437  sizeof(u32), sizeof(u32), &msg_offset, 0);
2438 
2439  msg[0] = (u32)0;
2440  msg[1] = (u32)0;
2441  msg[2] = (u32)0;
2442 
2443  sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2444  &msg_offset, 0);
2445 
2446  /* Tail */
2447  sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2448  sizeof(u32), &msg_offset, 0);
2449 
2450  if (tail_len) {
2451  copy_result = sep_copy_offset_sg(
2452  ta_ctx->sep_used,
2453  ta_ctx->src_sg,
2454  req->nbytes - tail_len,
2455  small_buf, tail_len);
2456 
2457  if (copy_result != tail_len) {
2458  dev_warn(&ta_ctx->sep_used->pdev->dev,
2459  "sg tail copy failure in hash block\n");
2460  sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2461  return;
2462  }
2463 
2464  sep_write_msg(ta_ctx, small_buf, tail_len,
2465  sizeof(u32) * 32, &msg_offset, 1);
2466  } else {
2467  msg_offset += sizeof(u32) * 32;
2468  }
2469 
2470  sep_end_msg(ta_ctx, msg_offset);
2471 
2472  are_we_done_yet = 0;
2473  result = sep_crypto_take_sep(ta_ctx);
2474  if (result) {
2475  dev_warn(&ta_ctx->sep_used->pdev->dev,
2476  "sep_hash_digest take sep failed\n");
2477  sep_crypto_release(sctx, ta_ctx, -EINVAL);
2478  }
2479 
2480  /* now we sit and wait up to a fixed time for completion */
2481  end_time = jiffies + (WAIT_TIME * HZ);
2482  while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2483  schedule();
2484 
2485  /* Done waiting; still not done yet? */
2486  if (are_we_done_yet == 0) {
2487  dev_dbg(&ta_ctx->sep_used->pdev->dev,
2488  "hash digest job never got done\n");
2489  sep_crypto_release(sctx, ta_ctx, -EINVAL);
2490  return;
2491  }
2492 
2493 }
2494 
2501 static void sep_dequeuer(void *data)
2502 {
2503  struct crypto_queue *this_queue;
2504  struct crypto_async_request *async_req;
2505  struct crypto_async_request *backlog;
2506  struct ablkcipher_request *cypher_req;
2507  struct ahash_request *hash_req;
2508  struct sep_system_ctx *sctx;
2509  struct crypto_ahash *hash_tfm;
2510  struct this_task_ctx *ta_ctx;
2511 
2512 
2513  this_queue = (struct crypto_queue *)data;
2514 
2515  spin_lock_irq(&queue_lock);
2516  backlog = crypto_get_backlog(this_queue);
2517  async_req = crypto_dequeue_request(this_queue);
2518  spin_unlock_irq(&queue_lock);
2519 
2520  if (!async_req) {
2521  pr_debug("sep crypto queue is empty\n");
2522  return;
2523  }
2524 
2525  if (backlog) {
2526  pr_debug("sep crypto backlog set\n");
2527  if (backlog->complete)
2528  backlog->complete(backlog, -EINPROGRESS);
2529  backlog = NULL;
2530  }
2531 
2532  if (!async_req->tfm) {
2533  pr_debug("sep crypto queue null tfm\n");
2534  return;
2535  }
2536 
2537  if (!async_req->tfm->__crt_alg) {
2538  pr_debug("sep crypto queue null __crt_alg\n");
2539  return;
2540  }
2541 
2542  if (!async_req->tfm->__crt_alg->cra_type) {
2543  pr_debug("sep crypto queue null cra_type\n");
2544  return;
2545  }
2546 
2547  /* we have stuff in the queue */
2548  if (async_req->tfm->__crt_alg->cra_type !=
2549  &crypto_ahash_type) {
2550  /* This is for a cypher */
2551  pr_debug("sep crypto queue doing cipher\n");
2552  cypher_req = container_of(async_req,
2553  struct ablkcipher_request,
2554  base);
2555  if (!cypher_req) {
2556  pr_debug("sep crypto queue null cypher_req\n");
2557  return;
2558  }
2559 
2560  sep_crypto_block((void *)cypher_req);
2561  return;
2562  } else {
2563  /* This is a hash */
2564  pr_debug("sep crypto queue doing hash\n");
2569  hash_req = ahash_request_cast(async_req);
2570  if (!hash_req) {
2571  pr_debug("sep crypto queue null hash_req\n");
2572  return;
2573  }
2574 
2575  hash_tfm = crypto_ahash_reqtfm(hash_req);
2576  if (!hash_tfm) {
2577  pr_debug("sep crypto queue null hash_tfm\n");
2578  return;
2579  }
2580 
2581 
2582  sctx = crypto_ahash_ctx(hash_tfm);
2583  if (!sctx) {
2584  pr_debug("sep crypto queue null sctx\n");
2585  return;
2586  }
2587 
2588  ta_ctx = ahash_request_ctx(hash_req);
2589 
2590  if (ta_ctx->current_hash_stage == HASH_INIT) {
2591  pr_debug("sep crypto queue hash init\n");
2592  sep_hash_init((void *)hash_req);
2593  return;
2594  } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
2595  pr_debug("sep crypto queue hash update\n");
2596  sep_hash_update((void *)hash_req);
2597  return;
2598  } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
2599  pr_debug("sep crypto queue hash final\n");
2600  sep_hash_final((void *)hash_req);
2601  return;
2602  } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
2603  pr_debug("sep crypto queue hash digest\n");
2604  sep_hash_digest((void *)hash_req);
2605  return;
2606  } else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
2607  pr_debug("sep crypto queue hash digest\n");
2608  sep_hash_update((void *)hash_req);
2609  return;
2610  } else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
2611  pr_debug("sep crypto queue hash digest\n");
2612  sep_hash_final((void *)hash_req);
2613  return;
2614  } else {
2615  pr_debug("sep crypto queue hash oops nothing\n");
2616  return;
2617  }
2618  }
2619 }
2620 
2621 static int sep_sha1_init(struct ahash_request *req)
2622 {
2623  int error;
2624  int error1;
2625  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2626 
2627  pr_debug("sep - doing sha1 init\n");
2628 
2629  /* Clear out task context */
2630  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2631 
2632  ta_ctx->sep_used = sep_dev;
2633  ta_ctx->current_request = SHA1;
2634  ta_ctx->current_hash_req = req;
2635  ta_ctx->current_cypher_req = NULL;
2636  ta_ctx->hash_opmode = SEP_HASH_SHA1;
2637  ta_ctx->current_hash_stage = HASH_INIT;
2638 
2639  /* lock necessary so that only one entity touches the queues */
2640  spin_lock_irq(&queue_lock);
2641  error = crypto_enqueue_request(&sep_queue, &req->base);
2642 
2643  if ((error != 0) && (error != -EINPROGRESS))
2644  pr_debug(" sep - crypto enqueue failed: %x\n",
2645  error);
2646  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2647  sep_dequeuer, (void *)&sep_queue);
2648  if (error1)
2649  pr_debug(" sep - workqueue submit failed: %x\n",
2650  error1);
2651  spin_unlock_irq(&queue_lock);
2652  /* We return result of crypto enqueue */
2653  return error;
2654 }
2655 
2656 static int sep_sha1_update(struct ahash_request *req)
2657 {
2658  int error;
2659  int error1;
2660  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2661 
2662  pr_debug("sep - doing sha1 update\n");
2663 
2664  ta_ctx->sep_used = sep_dev;
2665  ta_ctx->current_request = SHA1;
2666  ta_ctx->current_hash_req = req;
2667  ta_ctx->current_cypher_req = NULL;
2668  ta_ctx->hash_opmode = SEP_HASH_SHA1;
2669  ta_ctx->current_hash_stage = HASH_UPDATE;
2670 
2671  /* lock necessary so that only one entity touches the queues */
2672  spin_lock_irq(&queue_lock);
2673  error = crypto_enqueue_request(&sep_queue, &req->base);
2674 
2675  if ((error != 0) && (error != -EINPROGRESS))
2676  pr_debug(" sep - crypto enqueue failed: %x\n",
2677  error);
2678  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2679  sep_dequeuer, (void *)&sep_queue);
2680  if (error1)
2681  pr_debug(" sep - workqueue submit failed: %x\n",
2682  error1);
2683  spin_unlock_irq(&queue_lock);
2684  /* We return result of crypto enqueue */
2685  return error;
2686 }
2687 
2688 static int sep_sha1_final(struct ahash_request *req)
2689 {
2690  int error;
2691  int error1;
2692  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2693  pr_debug("sep - doing sha1 final\n");
2694 
2695  ta_ctx->sep_used = sep_dev;
2696  ta_ctx->current_request = SHA1;
2697  ta_ctx->current_hash_req = req;
2698  ta_ctx->current_cypher_req = NULL;
2699  ta_ctx->hash_opmode = SEP_HASH_SHA1;
2700  ta_ctx->current_hash_stage = HASH_FINISH;
2701 
2702  /* lock necessary so that only one entity touches the queues */
2703  spin_lock_irq(&queue_lock);
2704  error = crypto_enqueue_request(&sep_queue, &req->base);
2705 
2706  if ((error != 0) && (error != -EINPROGRESS))
2707  pr_debug(" sep - crypto enqueue failed: %x\n",
2708  error);
2709  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2710  sep_dequeuer, (void *)&sep_queue);
2711  if (error1)
2712  pr_debug(" sep - workqueue submit failed: %x\n",
2713  error1);
2714  spin_unlock_irq(&queue_lock);
2715  /* We return result of crypto enqueue */
2716  return error;
2717 }
2718 
2719 static int sep_sha1_digest(struct ahash_request *req)
2720 {
2721  int error;
2722  int error1;
2723  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2724  pr_debug("sep - doing sha1 digest\n");
2725 
2726  /* Clear out task context */
2727  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2728 
2729  ta_ctx->sep_used = sep_dev;
2730  ta_ctx->current_request = SHA1;
2731  ta_ctx->current_hash_req = req;
2732  ta_ctx->current_cypher_req = NULL;
2733  ta_ctx->hash_opmode = SEP_HASH_SHA1;
2734  ta_ctx->current_hash_stage = HASH_DIGEST;
2735 
2736  /* lock necessary so that only one entity touches the queues */
2737  spin_lock_irq(&queue_lock);
2738  error = crypto_enqueue_request(&sep_queue, &req->base);
2739 
2740  if ((error != 0) && (error != -EINPROGRESS))
2741  pr_debug(" sep - crypto enqueue failed: %x\n",
2742  error);
2743  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2744  sep_dequeuer, (void *)&sep_queue);
2745  if (error1)
2746  pr_debug(" sep - workqueue submit failed: %x\n",
2747  error1);
2748  spin_unlock_irq(&queue_lock);
2749  /* We return result of crypto enqueue */
2750  return error;
2751 }
2752 
2753 static int sep_sha1_finup(struct ahash_request *req)
2754 {
2755  int error;
2756  int error1;
2757  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2758  pr_debug("sep - doing sha1 finup\n");
2759 
2760  ta_ctx->sep_used = sep_dev;
2761  ta_ctx->current_request = SHA1;
2762  ta_ctx->current_hash_req = req;
2763  ta_ctx->current_cypher_req = NULL;
2764  ta_ctx->hash_opmode = SEP_HASH_SHA1;
2766 
2767  /* lock necessary so that only one entity touches the queues */
2768  spin_lock_irq(&queue_lock);
2769  error = crypto_enqueue_request(&sep_queue, &req->base);
2770 
2771  if ((error != 0) && (error != -EINPROGRESS))
2772  pr_debug(" sep - crypto enqueue failed: %x\n",
2773  error);
2774  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2775  sep_dequeuer, (void *)&sep_queue);
2776  if (error1)
2777  pr_debug(" sep - workqueue submit failed: %x\n",
2778  error1);
2779  spin_unlock_irq(&queue_lock);
2780  /* We return result of crypto enqueue */
2781  return error;
2782 }
2783 
2784 static int sep_md5_init(struct ahash_request *req)
2785 {
2786  int error;
2787  int error1;
2788  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2789  pr_debug("sep - doing md5 init\n");
2790 
2791  /* Clear out task context */
2792  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2793 
2794  ta_ctx->sep_used = sep_dev;
2795  ta_ctx->current_request = MD5;
2796  ta_ctx->current_hash_req = req;
2797  ta_ctx->current_cypher_req = NULL;
2798  ta_ctx->hash_opmode = SEP_HASH_MD5;
2799  ta_ctx->current_hash_stage = HASH_INIT;
2800 
2801  /* lock necessary so that only one entity touches the queues */
2802  spin_lock_irq(&queue_lock);
2803  error = crypto_enqueue_request(&sep_queue, &req->base);
2804 
2805  if ((error != 0) && (error != -EINPROGRESS))
2806  pr_debug(" sep - crypto enqueue failed: %x\n",
2807  error);
2808  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2809  sep_dequeuer, (void *)&sep_queue);
2810  if (error1)
2811  pr_debug(" sep - workqueue submit failed: %x\n",
2812  error1);
2813  spin_unlock_irq(&queue_lock);
2814  /* We return result of crypto enqueue */
2815  return error;
2816 }
2817 
2818 static int sep_md5_update(struct ahash_request *req)
2819 {
2820  int error;
2821  int error1;
2822  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2823  pr_debug("sep - doing md5 update\n");
2824 
2825  ta_ctx->sep_used = sep_dev;
2826  ta_ctx->current_request = MD5;
2827  ta_ctx->current_hash_req = req;
2828  ta_ctx->current_cypher_req = NULL;
2829  ta_ctx->hash_opmode = SEP_HASH_MD5;
2830  ta_ctx->current_hash_stage = HASH_UPDATE;
2831 
2832  /* lock necessary so that only one entity touches the queues */
2833  spin_lock_irq(&queue_lock);
2834  error = crypto_enqueue_request(&sep_queue, &req->base);
2835 
2836  if ((error != 0) && (error != -EINPROGRESS))
2837  pr_debug(" sep - crypto enqueue failed: %x\n",
2838  error);
2839  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2840  sep_dequeuer, (void *)&sep_queue);
2841  if (error1)
2842  pr_debug(" sep - workqueue submit failed: %x\n",
2843  error1);
2844  spin_unlock_irq(&queue_lock);
2845  /* We return result of crypto enqueue */
2846  return error;
2847 }
2848 
2849 static int sep_md5_final(struct ahash_request *req)
2850 {
2851  int error;
2852  int error1;
2853  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2854  pr_debug("sep - doing md5 final\n");
2855 
2856  ta_ctx->sep_used = sep_dev;
2857  ta_ctx->current_request = MD5;
2858  ta_ctx->current_hash_req = req;
2859  ta_ctx->current_cypher_req = NULL;
2860  ta_ctx->hash_opmode = SEP_HASH_MD5;
2861  ta_ctx->current_hash_stage = HASH_FINISH;
2862 
2863  /* lock necessary so that only one entity touches the queues */
2864  spin_lock_irq(&queue_lock);
2865  error = crypto_enqueue_request(&sep_queue, &req->base);
2866 
2867  if ((error != 0) && (error != -EINPROGRESS))
2868  pr_debug(" sep - crypto enqueue failed: %x\n",
2869  error);
2870  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2871  sep_dequeuer, (void *)&sep_queue);
2872  if (error1)
2873  pr_debug(" sep - workqueue submit failed: %x\n",
2874  error1);
2875  spin_unlock_irq(&queue_lock);
2876  /* We return result of crypto enqueue */
2877  return error;
2878 }
2879 
2880 static int sep_md5_digest(struct ahash_request *req)
2881 {
2882  int error;
2883  int error1;
2884  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2885 
2886  pr_debug("sep - doing md5 digest\n");
2887 
2888  /* Clear out task context */
2889  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2890 
2891  ta_ctx->sep_used = sep_dev;
2892  ta_ctx->current_request = MD5;
2893  ta_ctx->current_hash_req = req;
2894  ta_ctx->current_cypher_req = NULL;
2895  ta_ctx->hash_opmode = SEP_HASH_MD5;
2896  ta_ctx->current_hash_stage = HASH_DIGEST;
2897 
2898  /* lock necessary so that only one entity touches the queues */
2899  spin_lock_irq(&queue_lock);
2900  error = crypto_enqueue_request(&sep_queue, &req->base);
2901 
2902  if ((error != 0) && (error != -EINPROGRESS))
2903  pr_debug(" sep - crypto enqueue failed: %x\n",
2904  error);
2905  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2906  sep_dequeuer, (void *)&sep_queue);
2907  if (error1)
2908  pr_debug(" sep - workqueue submit failed: %x\n",
2909  error1);
2910  spin_unlock_irq(&queue_lock);
2911  /* We return result of crypto enqueue */
2912  return error;
2913 }
2914 
2915 static int sep_md5_finup(struct ahash_request *req)
2916 {
2917  int error;
2918  int error1;
2919  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2920 
2921  pr_debug("sep - doing md5 finup\n");
2922 
2923  ta_ctx->sep_used = sep_dev;
2924  ta_ctx->current_request = MD5;
2925  ta_ctx->current_hash_req = req;
2926  ta_ctx->current_cypher_req = NULL;
2927  ta_ctx->hash_opmode = SEP_HASH_MD5;
2929 
2930  /* lock necessary so that only one entity touches the queues */
2931  spin_lock_irq(&queue_lock);
2932  error = crypto_enqueue_request(&sep_queue, &req->base);
2933 
2934  if ((error != 0) && (error != -EINPROGRESS))
2935  pr_debug(" sep - crypto enqueue failed: %x\n",
2936  error);
2937  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2938  sep_dequeuer, (void *)&sep_queue);
2939  if (error1)
2940  pr_debug(" sep - workqueue submit failed: %x\n",
2941  error1);
2942  spin_unlock_irq(&queue_lock);
2943  /* We return result of crypto enqueue */
2944  return error;
2945 }
2946 
2947 static int sep_sha224_init(struct ahash_request *req)
2948 {
2949  int error;
2950  int error1;
2951  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2952  pr_debug("sep - doing sha224 init\n");
2953 
2954  /* Clear out task context */
2955  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2956 
2957  ta_ctx->sep_used = sep_dev;
2958  ta_ctx->current_request = SHA224;
2959  ta_ctx->current_hash_req = req;
2960  ta_ctx->current_cypher_req = NULL;
2961  ta_ctx->hash_opmode = SEP_HASH_SHA224;
2962  ta_ctx->current_hash_stage = HASH_INIT;
2963 
2964  /* lock necessary so that only one entity touches the queues */
2965  spin_lock_irq(&queue_lock);
2966  error = crypto_enqueue_request(&sep_queue, &req->base);
2967 
2968  if ((error != 0) && (error != -EINPROGRESS))
2969  pr_debug(" sep - crypto enqueue failed: %x\n",
2970  error);
2971  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2972  sep_dequeuer, (void *)&sep_queue);
2973  if (error1)
2974  pr_debug(" sep - workqueue submit failed: %x\n",
2975  error1);
2976  spin_unlock_irq(&queue_lock);
2977  /* We return result of crypto enqueue */
2978  return error;
2979 }
2980 
2981 static int sep_sha224_update(struct ahash_request *req)
2982 {
2983  int error;
2984  int error1;
2985  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2986  pr_debug("sep - doing sha224 update\n");
2987 
2988  ta_ctx->sep_used = sep_dev;
2989  ta_ctx->current_request = SHA224;
2990  ta_ctx->current_hash_req = req;
2991  ta_ctx->current_cypher_req = NULL;
2992  ta_ctx->hash_opmode = SEP_HASH_SHA224;
2993  ta_ctx->current_hash_stage = HASH_UPDATE;
2994 
2995  /* lock necessary so that only one entity touches the queues */
2996  spin_lock_irq(&queue_lock);
2997  error = crypto_enqueue_request(&sep_queue, &req->base);
2998 
2999  if ((error != 0) && (error != -EINPROGRESS))
3000  pr_debug(" sep - crypto enqueue failed: %x\n",
3001  error);
3002  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3003  sep_dequeuer, (void *)&sep_queue);
3004  if (error1)
3005  pr_debug(" sep - workqueue submit failed: %x\n",
3006  error1);
3007  spin_unlock_irq(&queue_lock);
3008  /* We return result of crypto enqueue */
3009  return error;
3010 }
3011 
3012 static int sep_sha224_final(struct ahash_request *req)
3013 {
3014  int error;
3015  int error1;
3016  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3017  pr_debug("sep - doing sha224 final\n");
3018 
3019  ta_ctx->sep_used = sep_dev;
3020  ta_ctx->current_request = SHA224;
3021  ta_ctx->current_hash_req = req;
3022  ta_ctx->current_cypher_req = NULL;
3023  ta_ctx->hash_opmode = SEP_HASH_SHA224;
3024  ta_ctx->current_hash_stage = HASH_FINISH;
3025 
3026  /* lock necessary so that only one entity touches the queues */
3027  spin_lock_irq(&queue_lock);
3028  error = crypto_enqueue_request(&sep_queue, &req->base);
3029 
3030  if ((error != 0) && (error != -EINPROGRESS))
3031  pr_debug(" sep - crypto enqueue failed: %x\n",
3032  error);
3033  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3034  sep_dequeuer, (void *)&sep_queue);
3035  if (error1)
3036  pr_debug(" sep - workqueue submit failed: %x\n",
3037  error1);
3038  spin_unlock_irq(&queue_lock);
3039  /* We return result of crypto enqueue */
3040  return error;
3041 }
3042 
3043 static int sep_sha224_digest(struct ahash_request *req)
3044 {
3045  int error;
3046  int error1;
3047  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3048 
3049  pr_debug("sep - doing sha224 digest\n");
3050 
3051  /* Clear out task context */
3052  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3053 
3054  ta_ctx->sep_used = sep_dev;
3055  ta_ctx->current_request = SHA224;
3056  ta_ctx->current_hash_req = req;
3057  ta_ctx->current_cypher_req = NULL;
3058  ta_ctx->hash_opmode = SEP_HASH_SHA224;
3059  ta_ctx->current_hash_stage = HASH_DIGEST;
3060 
3061  /* lock necessary so that only one entity touches the queues */
3062  spin_lock_irq(&queue_lock);
3063  error = crypto_enqueue_request(&sep_queue, &req->base);
3064 
3065  if ((error != 0) && (error != -EINPROGRESS))
3066  pr_debug(" sep - crypto enqueue failed: %x\n",
3067  error);
3068  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3069  sep_dequeuer, (void *)&sep_queue);
3070  if (error1)
3071  pr_debug(" sep - workqueue submit failed: %x\n",
3072  error1);
3073  spin_unlock_irq(&queue_lock);
3074  /* We return result of crypto enqueue */
3075  return error;
3076 }
3077 
3078 static int sep_sha224_finup(struct ahash_request *req)
3079 {
3080  int error;
3081  int error1;
3082  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3083 
3084  pr_debug("sep - doing sha224 finup\n");
3085 
3086  ta_ctx->sep_used = sep_dev;
3087  ta_ctx->current_request = SHA224;
3088  ta_ctx->current_hash_req = req;
3089  ta_ctx->current_cypher_req = NULL;
3090  ta_ctx->hash_opmode = SEP_HASH_SHA224;
3092 
3093  /* lock necessary so that only one entity touches the queues */
3094  spin_lock_irq(&queue_lock);
3095  error = crypto_enqueue_request(&sep_queue, &req->base);
3096 
3097  if ((error != 0) && (error != -EINPROGRESS))
3098  pr_debug(" sep - crypto enqueue failed: %x\n",
3099  error);
3100  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3101  sep_dequeuer, (void *)&sep_queue);
3102  if (error1)
3103  pr_debug(" sep - workqueue submit failed: %x\n",
3104  error1);
3105  spin_unlock_irq(&queue_lock);
3106  /* We return result of crypto enqueue */
3107  return error;
3108 }
3109 
3110 static int sep_sha256_init(struct ahash_request *req)
3111 {
3112  int error;
3113  int error1;
3114  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3115  pr_debug("sep - doing sha256 init\n");
3116 
3117  /* Clear out task context */
3118  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3119 
3120  ta_ctx->sep_used = sep_dev;
3121  ta_ctx->current_request = SHA256;
3122  ta_ctx->current_hash_req = req;
3123  ta_ctx->current_cypher_req = NULL;
3124  ta_ctx->hash_opmode = SEP_HASH_SHA256;
3125  ta_ctx->current_hash_stage = HASH_INIT;
3126 
3127  /* lock necessary so that only one entity touches the queues */
3128  spin_lock_irq(&queue_lock);
3129  error = crypto_enqueue_request(&sep_queue, &req->base);
3130 
3131  if ((error != 0) && (error != -EINPROGRESS))
3132  pr_debug(" sep - crypto enqueue failed: %x\n",
3133  error);
3134  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3135  sep_dequeuer, (void *)&sep_queue);
3136  if (error1)
3137  pr_debug(" sep - workqueue submit failed: %x\n",
3138  error1);
3139  spin_unlock_irq(&queue_lock);
3140  /* We return result of crypto enqueue */
3141  return error;
3142 }
3143 
3144 static int sep_sha256_update(struct ahash_request *req)
3145 {
3146  int error;
3147  int error1;
3148  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3149  pr_debug("sep - doing sha256 update\n");
3150 
3151  ta_ctx->sep_used = sep_dev;
3152  ta_ctx->current_request = SHA256;
3153  ta_ctx->current_hash_req = req;
3154  ta_ctx->current_cypher_req = NULL;
3155  ta_ctx->hash_opmode = SEP_HASH_SHA256;
3156  ta_ctx->current_hash_stage = HASH_UPDATE;
3157 
3158  /* lock necessary so that only one entity touches the queues */
3159  spin_lock_irq(&queue_lock);
3160  error = crypto_enqueue_request(&sep_queue, &req->base);
3161 
3162  if ((error != 0) && (error != -EINPROGRESS))
3163  pr_debug(" sep - crypto enqueue failed: %x\n",
3164  error);
3165  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3166  sep_dequeuer, (void *)&sep_queue);
3167  if (error1)
3168  pr_debug(" sep - workqueue submit failed: %x\n",
3169  error1);
3170  spin_unlock_irq(&queue_lock);
3171  /* We return result of crypto enqueue */
3172  return error;
3173 }
3174 
3175 static int sep_sha256_final(struct ahash_request *req)
3176 {
3177  int error;
3178  int error1;
3179  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3180  pr_debug("sep - doing sha256 final\n");
3181 
3182  ta_ctx->sep_used = sep_dev;
3183  ta_ctx->current_request = SHA256;
3184  ta_ctx->current_hash_req = req;
3185  ta_ctx->current_cypher_req = NULL;
3186  ta_ctx->hash_opmode = SEP_HASH_SHA256;
3187  ta_ctx->current_hash_stage = HASH_FINISH;
3188 
3189  /* lock necessary so that only one entity touches the queues */
3190  spin_lock_irq(&queue_lock);
3191  error = crypto_enqueue_request(&sep_queue, &req->base);
3192 
3193  if ((error != 0) && (error != -EINPROGRESS))
3194  pr_debug(" sep - crypto enqueue failed: %x\n",
3195  error);
3196  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3197  sep_dequeuer, (void *)&sep_queue);
3198  if (error1)
3199  pr_debug(" sep - workqueue submit failed: %x\n",
3200  error1);
3201  spin_unlock_irq(&queue_lock);
3202  /* We return result of crypto enqueue */
3203  return error;
3204 }
3205 
3206 static int sep_sha256_digest(struct ahash_request *req)
3207 {
3208  int error;
3209  int error1;
3210  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3211 
3212  pr_debug("sep - doing sha256 digest\n");
3213 
3214  /* Clear out task context */
3215  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3216 
3217  ta_ctx->sep_used = sep_dev;
3218  ta_ctx->current_request = SHA256;
3219  ta_ctx->current_hash_req = req;
3220  ta_ctx->current_cypher_req = NULL;
3221  ta_ctx->hash_opmode = SEP_HASH_SHA256;
3222  ta_ctx->current_hash_stage = HASH_DIGEST;
3223 
3224  /* lock necessary so that only one entity touches the queues */
3225  spin_lock_irq(&queue_lock);
3226  error = crypto_enqueue_request(&sep_queue, &req->base);
3227 
3228  if ((error != 0) && (error != -EINPROGRESS))
3229  pr_debug(" sep - crypto enqueue failed: %x\n",
3230  error);
3231  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3232  sep_dequeuer, (void *)&sep_queue);
3233  if (error1)
3234  pr_debug(" sep - workqueue submit failed: %x\n",
3235  error1);
3236  spin_unlock_irq(&queue_lock);
3237  /* We return result of crypto enqueue */
3238  return error;
3239 }
3240 
3241 static int sep_sha256_finup(struct ahash_request *req)
3242 {
3243  int error;
3244  int error1;
3245  struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3246 
3247  pr_debug("sep - doing sha256 finup\n");
3248 
3249  ta_ctx->sep_used = sep_dev;
3250  ta_ctx->current_request = SHA256;
3251  ta_ctx->current_hash_req = req;
3252  ta_ctx->current_cypher_req = NULL;
3253  ta_ctx->hash_opmode = SEP_HASH_SHA256;
3255 
3256  /* lock necessary so that only one entity touches the queues */
3257  spin_lock_irq(&queue_lock);
3258  error = crypto_enqueue_request(&sep_queue, &req->base);
3259 
3260  if ((error != 0) && (error != -EINPROGRESS))
3261  pr_debug(" sep - crypto enqueue failed: %x\n",
3262  error);
3263  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3264  sep_dequeuer, (void *)&sep_queue);
3265  if (error1)
3266  pr_debug(" sep - workqueue submit failed: %x\n",
3267  error1);
3268  spin_unlock_irq(&queue_lock);
3269  /* We return result of crypto enqueue */
3270  return error;
3271 }
3272 
3273 static int sep_crypto_init(struct crypto_tfm *tfm)
3274 {
3275  const char *alg_name = crypto_tfm_alg_name(tfm);
3276 
3277  if (alg_name == NULL)
3278  pr_debug("sep_crypto_init alg is NULL\n");
3279  else
3280  pr_debug("sep_crypto_init alg is %s\n", alg_name);
3281 
3282  tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
3283  return 0;
3284 }
3285 
3286 static void sep_crypto_exit(struct crypto_tfm *tfm)
3287 {
3288  pr_debug("sep_crypto_exit\n");
3289 }
3290 
3291 static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3292  unsigned int keylen)
3293 {
3294  struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3295 
3296  pr_debug("sep aes setkey\n");
3297 
3298  pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
3299  switch (keylen) {
3300  case SEP_AES_KEY_128_SIZE:
3301  sctx->aes_key_size = AES_128;
3302  break;
3303  case SEP_AES_KEY_192_SIZE:
3304  sctx->aes_key_size = AES_192;
3305  break;
3306  case SEP_AES_KEY_256_SIZE:
3307  sctx->aes_key_size = AES_256;
3308  break;
3309  case SEP_AES_KEY_512_SIZE:
3310  sctx->aes_key_size = AES_512;
3311  break;
3312  default:
3313  pr_debug("invalid sep aes key size %x\n",
3314  keylen);
3315  return -EINVAL;
3316  }
3317 
3318  memset(&sctx->key.aes, 0, sizeof(u32) *
3320  memcpy(&sctx->key.aes, key, keylen);
3321  sctx->keylen = keylen;
3322  /* Indicate to encrypt/decrypt function to send key to SEP */
3323  sctx->key_sent = 0;
3324 
3325  return 0;
3326 }
3327 
3328 static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
3329 {
3330  int error;
3331  int error1;
3332  struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3333 
3334  pr_debug("sep - doing aes ecb encrypt\n");
3335 
3336  /* Clear out task context */
3337  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3338 
3339  ta_ctx->sep_used = sep_dev;
3340  ta_ctx->current_request = AES_ECB;
3341  ta_ctx->current_hash_req = NULL;
3342  ta_ctx->current_cypher_req = req;
3343  ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3344  ta_ctx->aes_opmode = SEP_AES_ECB;
3345  ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3347 
3348  /* lock necessary so that only one entity touches the queues */
3349  spin_lock_irq(&queue_lock);
3350  error = crypto_enqueue_request(&sep_queue, &req->base);
3351 
3352  if ((error != 0) && (error != -EINPROGRESS))
3353  pr_debug(" sep - crypto enqueue failed: %x\n",
3354  error);
3355  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3356  sep_dequeuer, (void *)&sep_queue);
3357  if (error1)
3358  pr_debug(" sep - workqueue submit failed: %x\n",
3359  error1);
3360  spin_unlock_irq(&queue_lock);
3361  /* We return result of crypto enqueue */
3362  return error;
3363 }
3364 
3365 static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
3366 {
3367  int error;
3368  int error1;
3369  struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3370 
3371  pr_debug("sep - doing aes ecb decrypt\n");
3372 
3373  /* Clear out task context */
3374  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3375 
3376  ta_ctx->sep_used = sep_dev;
3377  ta_ctx->current_request = AES_ECB;
3378  ta_ctx->current_hash_req = NULL;
3379  ta_ctx->current_cypher_req = req;
3380  ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3381  ta_ctx->aes_opmode = SEP_AES_ECB;
3382  ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3384 
3385  /* lock necessary so that only one entity touches the queues */
3386  spin_lock_irq(&queue_lock);
3387  error = crypto_enqueue_request(&sep_queue, &req->base);
3388 
3389  if ((error != 0) && (error != -EINPROGRESS))
3390  pr_debug(" sep - crypto enqueue failed: %x\n",
3391  error);
3392  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3393  sep_dequeuer, (void *)&sep_queue);
3394  if (error1)
3395  pr_debug(" sep - workqueue submit failed: %x\n",
3396  error1);
3397  spin_unlock_irq(&queue_lock);
3398  /* We return result of crypto enqueue */
3399  return error;
3400 }
3401 
3402 static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
3403 {
3404  int error;
3405  int error1;
3406  struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3407  struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3408  crypto_ablkcipher_reqtfm(req));
3409 
3410  pr_debug("sep - doing aes cbc encrypt\n");
3411 
3412  /* Clear out task context */
3413  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3414 
3415  pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3416  crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3417 
3418  ta_ctx->sep_used = sep_dev;
3419  ta_ctx->current_request = AES_CBC;
3420  ta_ctx->current_hash_req = NULL;
3421  ta_ctx->current_cypher_req = req;
3422  ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3423  ta_ctx->aes_opmode = SEP_AES_CBC;
3424  ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3426 
3427  /* lock necessary so that only one entity touches the queues */
3428  spin_lock_irq(&queue_lock);
3429  error = crypto_enqueue_request(&sep_queue, &req->base);
3430 
3431  if ((error != 0) && (error != -EINPROGRESS))
3432  pr_debug(" sep - crypto enqueue failed: %x\n",
3433  error);
3434  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3435  sep_dequeuer, (void *)&sep_queue);
3436  if (error1)
3437  pr_debug(" sep - workqueue submit failed: %x\n",
3438  error1);
3439  spin_unlock_irq(&queue_lock);
3440  /* We return result of crypto enqueue */
3441  return error;
3442 }
3443 
3444 static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
3445 {
3446  int error;
3447  int error1;
3448  struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3449  struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3450  crypto_ablkcipher_reqtfm(req));
3451 
3452  pr_debug("sep - doing aes cbc decrypt\n");
3453 
3454  pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3455  crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3456 
3457  /* Clear out task context */
3458  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3459 
3460  ta_ctx->sep_used = sep_dev;
3461  ta_ctx->current_request = AES_CBC;
3462  ta_ctx->current_hash_req = NULL;
3463  ta_ctx->current_cypher_req = req;
3464  ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3465  ta_ctx->aes_opmode = SEP_AES_CBC;
3466  ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3468 
3469  /* lock necessary so that only one entity touches the queues */
3470  spin_lock_irq(&queue_lock);
3471  error = crypto_enqueue_request(&sep_queue, &req->base);
3472 
3473  if ((error != 0) && (error != -EINPROGRESS))
3474  pr_debug(" sep - crypto enqueue failed: %x\n",
3475  error);
3476  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3477  sep_dequeuer, (void *)&sep_queue);
3478  if (error1)
3479  pr_debug(" sep - workqueue submit failed: %x\n",
3480  error1);
3481  spin_unlock_irq(&queue_lock);
3482  /* We return result of crypto enqueue */
3483  return error;
3484 }
3485 
3486 static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3487  unsigned int keylen)
3488 {
3489  struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3490  struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
3491  u32 *flags = &ctfm->crt_flags;
3492 
3493  pr_debug("sep des setkey\n");
3494 
3495  switch (keylen) {
3496  case DES_KEY_SIZE:
3497  sctx->des_nbr_keys = DES_KEY_1;
3498  break;
3499  case DES_KEY_SIZE * 2:
3500  sctx->des_nbr_keys = DES_KEY_2;
3501  break;
3502  case DES_KEY_SIZE * 3:
3503  sctx->des_nbr_keys = DES_KEY_3;
3504  break;
3505  default:
3506  pr_debug("invalid key size %x\n",
3507  keylen);
3508  return -EINVAL;
3509  }
3510 
3511  if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
3512  (sep_weak_key(key, keylen))) {
3513 
3514  *flags |= CRYPTO_TFM_RES_WEAK_KEY;
3515  pr_debug("weak key\n");
3516  return -EINVAL;
3517  }
3518 
3519  memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
3520  memcpy(&sctx->key.des.key1, key, keylen);
3521  sctx->keylen = keylen;
3522  /* Indicate to encrypt/decrypt function to send key to SEP */
3523  sctx->key_sent = 0;
3524 
3525  return 0;
3526 }
3527 
3528 static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
3529 {
3530  int error;
3531  int error1;
3532  struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3533 
3534  pr_debug("sep - doing des ecb encrypt\n");
3535 
3536  /* Clear out task context */
3537  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3538 
3539  ta_ctx->sep_used = sep_dev;
3540  ta_ctx->current_request = DES_ECB;
3541  ta_ctx->current_hash_req = NULL;
3542  ta_ctx->current_cypher_req = req;
3543  ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3544  ta_ctx->des_opmode = SEP_DES_ECB;
3545  ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3547 
3548  /* lock necessary so that only one entity touches the queues */
3549  spin_lock_irq(&queue_lock);
3550  error = crypto_enqueue_request(&sep_queue, &req->base);
3551 
3552  if ((error != 0) && (error != -EINPROGRESS))
3553  pr_debug(" sep - crypto enqueue failed: %x\n",
3554  error);
3555  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3556  sep_dequeuer, (void *)&sep_queue);
3557  if (error1)
3558  pr_debug(" sep - workqueue submit failed: %x\n",
3559  error1);
3560  spin_unlock_irq(&queue_lock);
3561  /* We return result of crypto enqueue */
3562  return error;
3563 }
3564 
3565 static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
3566 {
3567  int error;
3568  int error1;
3569  struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3570 
3571  pr_debug("sep - doing des ecb decrypt\n");
3572 
3573  /* Clear out task context */
3574  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3575 
3576  ta_ctx->sep_used = sep_dev;
3577  ta_ctx->current_request = DES_ECB;
3578  ta_ctx->current_hash_req = NULL;
3579  ta_ctx->current_cypher_req = req;
3580  ta_ctx->des_encmode = SEP_DES_DECRYPT;
3581  ta_ctx->des_opmode = SEP_DES_ECB;
3582  ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3584 
3585  /* lock necessary so that only one entity touches the queues */
3586  spin_lock_irq(&queue_lock);
3587  error = crypto_enqueue_request(&sep_queue, &req->base);
3588 
3589  if ((error != 0) && (error != -EINPROGRESS))
3590  pr_debug(" sep - crypto enqueue failed: %x\n",
3591  error);
3592  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3593  sep_dequeuer, (void *)&sep_queue);
3594  if (error1)
3595  pr_debug(" sep - workqueue submit failed: %x\n",
3596  error1);
3597  spin_unlock_irq(&queue_lock);
3598  /* We return result of crypto enqueue */
3599  return error;
3600 }
3601 
3602 static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
3603 {
3604  int error;
3605  int error1;
3606  struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3607 
3608  pr_debug("sep - doing des cbc encrypt\n");
3609 
3610  /* Clear out task context */
3611  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3612 
3613  ta_ctx->sep_used = sep_dev;
3614  ta_ctx->current_request = DES_CBC;
3615  ta_ctx->current_hash_req = NULL;
3616  ta_ctx->current_cypher_req = req;
3617  ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3618  ta_ctx->des_opmode = SEP_DES_CBC;
3619  ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3621 
3622  /* lock necessary so that only one entity touches the queues */
3623  spin_lock_irq(&queue_lock);
3624  error = crypto_enqueue_request(&sep_queue, &req->base);
3625 
3626  if ((error != 0) && (error != -EINPROGRESS))
3627  pr_debug(" sep - crypto enqueue failed: %x\n",
3628  error);
3629  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3630  sep_dequeuer, (void *)&sep_queue);
3631  if (error1)
3632  pr_debug(" sep - workqueue submit failed: %x\n",
3633  error1);
3634  spin_unlock_irq(&queue_lock);
3635  /* We return result of crypto enqueue */
3636  return error;
3637 }
3638 
3639 static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
3640 {
3641  int error;
3642  int error1;
3643  struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3644 
3645  pr_debug("sep - doing des ecb decrypt\n");
3646 
3647  /* Clear out task context */
3648  memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3649 
3650  ta_ctx->sep_used = sep_dev;
3651  ta_ctx->current_request = DES_CBC;
3652  ta_ctx->current_hash_req = NULL;
3653  ta_ctx->current_cypher_req = req;
3654  ta_ctx->des_encmode = SEP_DES_DECRYPT;
3655  ta_ctx->des_opmode = SEP_DES_CBC;
3656  ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3658 
3659  /* lock necessary so that only one entity touches the queues */
3660  spin_lock_irq(&queue_lock);
3661  error = crypto_enqueue_request(&sep_queue, &req->base);
3662 
3663  if ((error != 0) && (error != -EINPROGRESS))
3664  pr_debug(" sep - crypto enqueue failed: %x\n",
3665  error);
3666  error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3667  sep_dequeuer, (void *)&sep_queue);
3668  if (error1)
3669  pr_debug(" sep - workqueue submit failed: %x\n",
3670  error1);
3671  spin_unlock_irq(&queue_lock);
3672  /* We return result of crypto enqueue */
3673  return error;
3674 }
3675 
3676 static struct ahash_alg hash_algs[] = {
3677 {
3678  .init = sep_sha1_init,
3679  .update = sep_sha1_update,
3680  .final = sep_sha1_final,
3681  .digest = sep_sha1_digest,
3682  .finup = sep_sha1_finup,
3683  .halg = {
3684  .digestsize = SHA1_DIGEST_SIZE,
3685  .base = {
3686  .cra_name = "sha1",
3687  .cra_driver_name = "sha1-sep",
3688  .cra_priority = 100,
3689  .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3691  .cra_blocksize = SHA1_BLOCK_SIZE,
3692  .cra_ctxsize = sizeof(struct sep_system_ctx),
3693  .cra_alignmask = 0,
3694  .cra_module = THIS_MODULE,
3695  .cra_init = sep_hash_cra_init,
3696  .cra_exit = sep_hash_cra_exit,
3697  }
3698  }
3699 },
3700 {
3701  .init = sep_md5_init,
3702  .update = sep_md5_update,
3703  .final = sep_md5_final,
3704  .digest = sep_md5_digest,
3705  .finup = sep_md5_finup,
3706  .halg = {
3707  .digestsize = MD5_DIGEST_SIZE,
3708  .base = {
3709  .cra_name = "md5",
3710  .cra_driver_name = "md5-sep",
3711  .cra_priority = 100,
3712  .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3714  .cra_blocksize = SHA1_BLOCK_SIZE,
3715  .cra_ctxsize = sizeof(struct sep_system_ctx),
3716  .cra_alignmask = 0,
3717  .cra_module = THIS_MODULE,
3718  .cra_init = sep_hash_cra_init,
3719  .cra_exit = sep_hash_cra_exit,
3720  }
3721  }
3722 },
3723 {
3724  .init = sep_sha224_init,
3725  .update = sep_sha224_update,
3726  .final = sep_sha224_final,
3727  .digest = sep_sha224_digest,
3728  .finup = sep_sha224_finup,
3729  .halg = {
3730  .digestsize = SHA224_DIGEST_SIZE,
3731  .base = {
3732  .cra_name = "sha224",
3733  .cra_driver_name = "sha224-sep",
3734  .cra_priority = 100,
3735  .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3737  .cra_blocksize = SHA224_BLOCK_SIZE,
3738  .cra_ctxsize = sizeof(struct sep_system_ctx),
3739  .cra_alignmask = 0,
3740  .cra_module = THIS_MODULE,
3741  .cra_init = sep_hash_cra_init,
3742  .cra_exit = sep_hash_cra_exit,
3743  }
3744  }
3745 },
3746 {
3747  .init = sep_sha256_init,
3748  .update = sep_sha256_update,
3749  .final = sep_sha256_final,
3750  .digest = sep_sha256_digest,
3751  .finup = sep_sha256_finup,
3752  .halg = {
3753  .digestsize = SHA256_DIGEST_SIZE,
3754  .base = {
3755  .cra_name = "sha256",
3756  .cra_driver_name = "sha256-sep",
3757  .cra_priority = 100,
3758  .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3760  .cra_blocksize = SHA256_BLOCK_SIZE,
3761  .cra_ctxsize = sizeof(struct sep_system_ctx),
3762  .cra_alignmask = 0,
3763  .cra_module = THIS_MODULE,
3764  .cra_init = sep_hash_cra_init,
3765  .cra_exit = sep_hash_cra_exit,
3766  }
3767  }
3768 }
3769 };
3770 
3771 static struct crypto_alg crypto_algs[] = {
3772 {
3773  .cra_name = "ecb(aes)",
3774  .cra_driver_name = "ecb-aes-sep",
3775  .cra_priority = 100,
3777  .cra_blocksize = AES_BLOCK_SIZE,
3778  .cra_ctxsize = sizeof(struct sep_system_ctx),
3779  .cra_alignmask = 0,
3780  .cra_type = &crypto_ablkcipher_type,
3781  .cra_module = THIS_MODULE,
3782  .cra_init = sep_crypto_init,
3783  .cra_exit = sep_crypto_exit,
3784  .cra_u.ablkcipher = {
3785  .min_keysize = AES_MIN_KEY_SIZE,
3786  .max_keysize = AES_MAX_KEY_SIZE,
3787  .setkey = sep_aes_setkey,
3788  .encrypt = sep_aes_ecb_encrypt,
3789  .decrypt = sep_aes_ecb_decrypt,
3790  }
3791 },
3792 {
3793  .cra_name = "cbc(aes)",
3794  .cra_driver_name = "cbc-aes-sep",
3795  .cra_priority = 100,
3797  .cra_blocksize = AES_BLOCK_SIZE,
3798  .cra_ctxsize = sizeof(struct sep_system_ctx),
3799  .cra_alignmask = 0,
3800  .cra_type = &crypto_ablkcipher_type,
3801  .cra_module = THIS_MODULE,
3802  .cra_init = sep_crypto_init,
3803  .cra_exit = sep_crypto_exit,
3804  .cra_u.ablkcipher = {
3805  .min_keysize = AES_MIN_KEY_SIZE,
3806  .max_keysize = AES_MAX_KEY_SIZE,
3807  .setkey = sep_aes_setkey,
3808  .encrypt = sep_aes_cbc_encrypt,
3809  .ivsize = AES_BLOCK_SIZE,
3810  .decrypt = sep_aes_cbc_decrypt,
3811  }
3812 },
3813 {
3814  .cra_name = "ebc(des)",
3815  .cra_driver_name = "ebc-des-sep",
3816  .cra_priority = 100,
3818  .cra_blocksize = DES_BLOCK_SIZE,
3819  .cra_ctxsize = sizeof(struct sep_system_ctx),
3820  .cra_alignmask = 0,
3821  .cra_type = &crypto_ablkcipher_type,
3822  .cra_module = THIS_MODULE,
3823  .cra_init = sep_crypto_init,
3824  .cra_exit = sep_crypto_exit,
3825  .cra_u.ablkcipher = {
3826  .min_keysize = DES_KEY_SIZE,
3827  .max_keysize = DES_KEY_SIZE,
3828  .setkey = sep_des_setkey,
3829  .encrypt = sep_des_ebc_encrypt,
3830  .decrypt = sep_des_ebc_decrypt,
3831  }
3832 },
3833 {
3834  .cra_name = "cbc(des)",
3835  .cra_driver_name = "cbc-des-sep",
3836  .cra_priority = 100,
3838  .cra_blocksize = DES_BLOCK_SIZE,
3839  .cra_ctxsize = sizeof(struct sep_system_ctx),
3840  .cra_alignmask = 0,
3841  .cra_type = &crypto_ablkcipher_type,
3842  .cra_module = THIS_MODULE,
3843  .cra_init = sep_crypto_init,
3844  .cra_exit = sep_crypto_exit,
3845  .cra_u.ablkcipher = {
3846  .min_keysize = DES_KEY_SIZE,
3847  .max_keysize = DES_KEY_SIZE,
3848  .setkey = sep_des_setkey,
3849  .encrypt = sep_des_cbc_encrypt,
3850  .ivsize = DES_BLOCK_SIZE,
3851  .decrypt = sep_des_cbc_decrypt,
3852  }
3853 },
3854 {
3855  .cra_name = "ebc(des3-ede)",
3856  .cra_driver_name = "ebc-des3-ede-sep",
3857  .cra_priority = 100,
3859  .cra_blocksize = DES_BLOCK_SIZE,
3860  .cra_ctxsize = sizeof(struct sep_system_ctx),
3861  .cra_alignmask = 0,
3862  .cra_type = &crypto_ablkcipher_type,
3863  .cra_module = THIS_MODULE,
3864  .cra_init = sep_crypto_init,
3865  .cra_exit = sep_crypto_exit,
3866  .cra_u.ablkcipher = {
3867  .min_keysize = DES3_EDE_KEY_SIZE,
3868  .max_keysize = DES3_EDE_KEY_SIZE,
3869  .setkey = sep_des_setkey,
3870  .encrypt = sep_des_ebc_encrypt,
3871  .decrypt = sep_des_ebc_decrypt,
3872  }
3873 },
3874 {
3875  .cra_name = "cbc(des3-ede)",
3876  .cra_driver_name = "cbc-des3--ede-sep",
3877  .cra_priority = 100,
3879  .cra_blocksize = DES_BLOCK_SIZE,
3880  .cra_ctxsize = sizeof(struct sep_system_ctx),
3881  .cra_alignmask = 0,
3882  .cra_type = &crypto_ablkcipher_type,
3883  .cra_module = THIS_MODULE,
3884  .cra_init = sep_crypto_init,
3885  .cra_exit = sep_crypto_exit,
3886  .cra_u.ablkcipher = {
3887  .min_keysize = DES3_EDE_KEY_SIZE,
3888  .max_keysize = DES3_EDE_KEY_SIZE,
3889  .setkey = sep_des_setkey,
3890  .encrypt = sep_des_cbc_encrypt,
3891  .decrypt = sep_des_cbc_decrypt,
3892  }
3893 }
3894 };
3895 
3896 int sep_crypto_setup(void)
3897 {
3898  int err, i, j, k;
3899  tasklet_init(&sep_dev->finish_tasklet, sep_finish,
3900  (unsigned long)sep_dev);
3901 
3902  crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
3903 
3905  "sep_crypto_workqueue");
3906  if (!sep_dev->workqueue) {
3907  dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
3908  return -ENOMEM;
3909  }
3910 
3911  i = 0;
3912  j = 0;
3913 
3914  spin_lock_init(&queue_lock);
3915 
3916  err = 0;
3917 
3918  for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
3919  err = crypto_register_ahash(&hash_algs[i]);
3920  if (err)
3921  goto err_algs;
3922  }
3923 
3924  err = 0;
3925  for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
3926  err = crypto_register_alg(&crypto_algs[j]);
3927  if (err)
3928  goto err_crypto_algs;
3929  }
3930 
3931  return err;
3932 
3933 err_algs:
3934  for (k = 0; k < i; k++)
3935  crypto_unregister_ahash(&hash_algs[k]);
3936  return err;
3937 
3938 err_crypto_algs:
3939  for (k = 0; k < j; k++)
3940  crypto_unregister_alg(&crypto_algs[k]);
3941  goto err_algs;
3942 }
3943 
3944 void sep_crypto_takedown(void)
3945 {
3946 
3947  int i;
3948 
3949  for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
3950  crypto_unregister_ahash(&hash_algs[i]);
3951  for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
3952  crypto_unregister_alg(&crypto_algs[i]);
3953 
3954  tasklet_kill(&sep_dev->finish_tasklet);
3955 }
3956 
3957 #endif