Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
caamalg.c
Go to the documentation of this file.
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * --------------- ---------------
11  * | JobDesc #1 |-------------------->| ShareDesc |
12  * | *(packet 1) | | (PDB) |
13  * --------------- |------------->| (hashKey) |
14  * . | | (cipherKey) |
15  * . | |-------->| (operation) |
16  * --------------- | | ---------------
17  * | JobDesc #2 |------| |
18  * | *(packet 2) | |
19  * --------------- |
20  * . |
21  * . |
22  * --------------- |
23  * | JobDesc #3 |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR |
39  * | (output buffer) |
40  * | (output length) |
41  * | SEQ_IN_PTR |
42  * | (input buffer) |
43  * | (input length) |
44  * ---------------------
45  */
46 
47 #include "compat.h"
48 
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56 
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY 3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63  SHA512_DIGEST_SIZE * 2)
64 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65 #define CAAM_MAX_IV_LENGTH 16
66 
67 /* length of descriptors text */
68 #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
69 
70 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
71 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
72 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
73 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
74 
75 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
76 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
77  20 * CAAM_CMD_SZ)
78 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
79  15 * CAAM_CMD_SZ)
80 
81 #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
82  CAAM_MAX_KEY_SIZE)
83 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
84 
85 #ifdef DEBUG
86 /* for print_hex_dumps with line references */
87 #define xstr(s) str(s)
88 #define str(s) #s
89 #define debug(format, arg...) printk(format, arg)
90 #else
91 #define debug(format, arg...)
92 #endif
93 
94 /* Set DK bit in class 1 operation if shared */
95 static inline void append_dec_op1(u32 *desc, u32 type)
96 {
97  u32 *jump_cmd, *uncond_jump_cmd;
98 
99  jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
100  append_operation(desc, type | OP_ALG_AS_INITFINAL |
102  uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
103  set_jump_tgt_here(desc, jump_cmd);
104  append_operation(desc, type | OP_ALG_AS_INITFINAL |
106  set_jump_tgt_here(desc, uncond_jump_cmd);
107 }
108 
109 /*
110  * Wait for completion of class 1 key loading before allowing
111  * error propagation
112  */
113 static inline void append_dec_shr_done(u32 *desc)
114 {
115  u32 *jump_cmd;
116 
117  jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
118  set_jump_tgt_here(desc, jump_cmd);
119  append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
120 }
121 
122 /*
123  * For aead functions, read payload and write payload,
124  * both of which are specified in req->src and req->dst
125  */
126 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
127 {
128  append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
129  KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
130  append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
131 }
132 
133 /*
134  * For aead encrypt and decrypt, read iv for both classes
135  */
136 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
137 {
138  append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
139  LDST_CLASS_1_CCB | ivsize);
140  append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
141 }
142 
143 /*
144  * For ablkcipher encrypt and decrypt, read from req->src and
145  * write to req->dst
146  */
147 static inline void ablkcipher_append_src_dst(u32 *desc)
148 {
149  append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
150  append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
151  append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
153  append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
154 }
155 
156 /*
157  * If all data, including src (with assoc and iv) or dst (with iv only) are
158  * contiguous
159  */
160 #define GIV_SRC_CONTIG 1
161 #define GIV_DST_CONTIG (1 << 1)
162 
163 /*
164  * per-session context
165  */
166 struct caam_ctx {
167  struct device *jrdev;
179  unsigned int enckeylen;
180  unsigned int split_key_len;
181  unsigned int split_key_pad_len;
182  unsigned int authsize;
183 };
184 
185 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
186  int keys_fit_inline)
187 {
188  if (keys_fit_inline) {
189  append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
190  ctx->split_key_len, CLASS_2 |
192  append_key_as_imm(desc, (void *)ctx->key +
193  ctx->split_key_pad_len, ctx->enckeylen,
195  } else {
196  append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
198  append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
200  }
201 }
202 
203 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
204  int keys_fit_inline)
205 {
206  u32 *key_jump_cmd;
207 
208  init_sh_desc(desc, HDR_SHARE_SERIAL);
209 
210  /* Skip if already shared */
211  key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
213 
214  append_key_aead(desc, ctx, keys_fit_inline);
215 
216  set_jump_tgt_here(desc, key_jump_cmd);
217 
218  /* Propagate errors from shared to job descriptor */
219  append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
220 }
221 
222 static int aead_set_sh_desc(struct crypto_aead *aead)
223 {
224  struct aead_tfm *tfm = &aead->base.crt_aead;
225  struct caam_ctx *ctx = crypto_aead_ctx(aead);
226  struct device *jrdev = ctx->jrdev;
227  bool keys_fit_inline = false;
228  u32 *key_jump_cmd, *jump_cmd;
229  u32 geniv, moveiv;
230  u32 *desc;
231 
232  if (!ctx->enckeylen || !ctx->authsize)
233  return 0;
234 
235  /*
236  * Job Descriptor and Shared Descriptors
237  * must all fit into the 64-word Descriptor h/w Buffer
238  */
240  ctx->split_key_pad_len + ctx->enckeylen <=
242  keys_fit_inline = true;
243 
244  /* aead_encrypt shared descriptor */
245  desc = ctx->sh_desc_enc;
246 
247  init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
248 
249  /* Class 2 operation */
250  append_operation(desc, ctx->class2_alg_type |
252 
253  /* cryptlen = seqoutlen - authsize */
254  append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
255 
256  /* assoclen + cryptlen = seqinlen - ivsize */
257  append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
258 
259  /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
260  append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
261 
262  /* read assoc before reading payload */
263  append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
264  KEY_VLF);
265  aead_append_ld_iv(desc, tfm->ivsize);
266 
267  /* Class 1 operation */
268  append_operation(desc, ctx->class1_alg_type |
270 
271  /* Read and write cryptlen bytes */
272  append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
273  append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
274  aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
275 
276  /* Write ICV */
277  append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
279 
280  ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
281  desc_bytes(desc),
282  DMA_TO_DEVICE);
283  if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
284  dev_err(jrdev, "unable to map shared descriptor\n");
285  return -ENOMEM;
286  }
287 #ifdef DEBUG
288  print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
289  DUMP_PREFIX_ADDRESS, 16, 4, desc,
290  desc_bytes(desc), 1);
291 #endif
292 
293  /*
294  * Job Descriptor and Shared Descriptors
295  * must all fit into the 64-word Descriptor h/w Buffer
296  */
298  ctx->split_key_pad_len + ctx->enckeylen <=
300  keys_fit_inline = true;
301 
302  desc = ctx->sh_desc_dec;
303 
304  /* aead_decrypt shared descriptor */
305  init_sh_desc(desc, HDR_SHARE_SERIAL);
306 
307  /* Skip if already shared */
308  key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
310 
311  append_key_aead(desc, ctx, keys_fit_inline);
312 
313  /* Only propagate error immediately if shared */
314  jump_cmd = append_jump(desc, JUMP_TEST_ALL);
315  set_jump_tgt_here(desc, key_jump_cmd);
316  append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
317  set_jump_tgt_here(desc, jump_cmd);
318 
319  /* Class 2 operation */
320  append_operation(desc, ctx->class2_alg_type |
322 
323  /* assoclen + cryptlen = seqinlen - ivsize */
324  append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
325  ctx->authsize + tfm->ivsize)
326  /* assoclen = (assoclen + cryptlen) - cryptlen */
327  append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
328  append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
329 
330  /* read assoc before reading payload */
331  append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
332  KEY_VLF);
333 
334  aead_append_ld_iv(desc, tfm->ivsize);
335 
336  append_dec_op1(desc, ctx->class1_alg_type);
337 
338  /* Read and write cryptlen bytes */
339  append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
340  append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
341  aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
342 
343  /* Load ICV */
344  append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
346  append_dec_shr_done(desc);
347 
348  ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
349  desc_bytes(desc),
350  DMA_TO_DEVICE);
351  if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
352  dev_err(jrdev, "unable to map shared descriptor\n");
353  return -ENOMEM;
354  }
355 #ifdef DEBUG
356  print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
357  DUMP_PREFIX_ADDRESS, 16, 4, desc,
358  desc_bytes(desc), 1);
359 #endif
360 
361  /*
362  * Job Descriptor and Shared Descriptors
363  * must all fit into the 64-word Descriptor h/w Buffer
364  */
366  ctx->split_key_pad_len + ctx->enckeylen <=
368  keys_fit_inline = true;
369 
370  /* aead_givencrypt shared descriptor */
371  desc = ctx->sh_desc_givenc;
372 
373  init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
374 
375  /* Generate IV */
379  append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
381  append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
382  append_move(desc, MOVE_SRC_INFIFO |
384  append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
385 
386  /* Copy IV to class 1 context */
387  append_move(desc, MOVE_SRC_CLASS1CTX |
389 
390  /* Return to encryption */
391  append_operation(desc, ctx->class2_alg_type |
393 
394  /* ivsize + cryptlen = seqoutlen - authsize */
395  append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
396 
397  /* assoclen = seqinlen - (ivsize + cryptlen) */
398  append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
399 
400  /* read assoc before reading payload */
401  append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
402  KEY_VLF);
403 
404  /* Copy iv from class 1 ctx to class 2 fifo*/
407  append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
409  append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
411 
412  /* Class 1 operation */
413  append_operation(desc, ctx->class1_alg_type |
415 
416  /* Will write ivsize + cryptlen */
417  append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
418 
419  /* Not need to reload iv */
420  append_seq_fifo_load(desc, tfm->ivsize,
422 
423  /* Will read cryptlen */
424  append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
425  aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
426 
427  /* Write ICV */
428  append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
430 
431  ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
432  desc_bytes(desc),
433  DMA_TO_DEVICE);
434  if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
435  dev_err(jrdev, "unable to map shared descriptor\n");
436  return -ENOMEM;
437  }
438 #ifdef DEBUG
439  print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
440  DUMP_PREFIX_ADDRESS, 16, 4, desc,
441  desc_bytes(desc), 1);
442 #endif
443 
444  return 0;
445 }
446 
447 static int aead_setauthsize(struct crypto_aead *authenc,
448  unsigned int authsize)
449 {
450  struct caam_ctx *ctx = crypto_aead_ctx(authenc);
451 
452  ctx->authsize = authsize;
453  aead_set_sh_desc(authenc);
454 
455  return 0;
456 }
457 
458 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
459  u32 authkeylen)
460 {
461  return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
462  ctx->split_key_pad_len, key_in, authkeylen,
463  ctx->alg_op);
464 }
465 
466 static int aead_setkey(struct crypto_aead *aead,
467  const u8 *key, unsigned int keylen)
468 {
469  /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
470  static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
471  struct caam_ctx *ctx = crypto_aead_ctx(aead);
472  struct device *jrdev = ctx->jrdev;
473  struct rtattr *rta = (void *)key;
475  unsigned int authkeylen;
476  unsigned int enckeylen;
477  int ret = 0;
478 
479  param = RTA_DATA(rta);
480  enckeylen = be32_to_cpu(param->enckeylen);
481 
482  key += RTA_ALIGN(rta->rta_len);
483  keylen -= RTA_ALIGN(rta->rta_len);
484 
485  if (keylen < enckeylen)
486  goto badkey;
487 
488  authkeylen = keylen - enckeylen;
489 
490  if (keylen > CAAM_MAX_KEY_SIZE)
491  goto badkey;
492 
493  /* Pick class 2 key length from algorithm submask */
494  ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
495  OP_ALG_ALGSEL_SHIFT] * 2;
496  ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
497 
498 #ifdef DEBUG
499  printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
500  keylen, enckeylen, authkeylen);
501  printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
502  ctx->split_key_len, ctx->split_key_pad_len);
503  print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
504  DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
505 #endif
506 
507  ret = gen_split_aead_key(ctx, key, authkeylen);
508  if (ret) {
509  goto badkey;
510  }
511 
512  /* postpend encryption key to auth split key */
513  memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
514 
515  ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
516  enckeylen, DMA_TO_DEVICE);
517  if (dma_mapping_error(jrdev, ctx->key_dma)) {
518  dev_err(jrdev, "unable to map key i/o memory\n");
519  return -ENOMEM;
520  }
521 #ifdef DEBUG
522  print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
523  DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
524  ctx->split_key_pad_len + enckeylen, 1);
525 #endif
526 
527  ctx->enckeylen = enckeylen;
528 
529  ret = aead_set_sh_desc(aead);
530  if (ret) {
531  dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
532  enckeylen, DMA_TO_DEVICE);
533  }
534 
535  return ret;
536 badkey:
537  crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
538  return -EINVAL;
539 }
540 
541 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
542  const u8 *key, unsigned int keylen)
543 {
544  struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
545  struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
546  struct device *jrdev = ctx->jrdev;
547  int ret = 0;
548  u32 *key_jump_cmd, *jump_cmd;
549  u32 *desc;
550 
551 #ifdef DEBUG
552  print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
553  DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
554 #endif
555 
556  memcpy(ctx->key, key, keylen);
557  ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
558  DMA_TO_DEVICE);
559  if (dma_mapping_error(jrdev, ctx->key_dma)) {
560  dev_err(jrdev, "unable to map key i/o memory\n");
561  return -ENOMEM;
562  }
563  ctx->enckeylen = keylen;
564 
565  /* ablkcipher_encrypt shared descriptor */
566  desc = ctx->sh_desc_enc;
567  init_sh_desc(desc, HDR_SHARE_SERIAL);
568  /* Skip if already shared */
569  key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
571 
572  /* Load class1 key only */
573  append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
574  ctx->enckeylen, CLASS_1 |
576 
577  set_jump_tgt_here(desc, key_jump_cmd);
578 
579  /* Propagate errors from shared to job descriptor */
580  append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
581 
582  /* Load iv */
583  append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
584  LDST_CLASS_1_CCB | tfm->ivsize);
585 
586  /* Load operation */
587  append_operation(desc, ctx->class1_alg_type |
589 
590  /* Perform operation */
591  ablkcipher_append_src_dst(desc);
592 
593  ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
594  desc_bytes(desc),
595  DMA_TO_DEVICE);
596  if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
597  dev_err(jrdev, "unable to map shared descriptor\n");
598  return -ENOMEM;
599  }
600 #ifdef DEBUG
601  print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
602  DUMP_PREFIX_ADDRESS, 16, 4, desc,
603  desc_bytes(desc), 1);
604 #endif
605  /* ablkcipher_decrypt shared descriptor */
606  desc = ctx->sh_desc_dec;
607 
608  init_sh_desc(desc, HDR_SHARE_SERIAL);
609  /* Skip if already shared */
610  key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
612 
613  /* Load class1 key only */
614  append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
615  ctx->enckeylen, CLASS_1 |
617 
618  /* For aead, only propagate error immediately if shared */
619  jump_cmd = append_jump(desc, JUMP_TEST_ALL);
620  set_jump_tgt_here(desc, key_jump_cmd);
621  append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
622  set_jump_tgt_here(desc, jump_cmd);
623 
624  /* load IV */
625  append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
626  LDST_CLASS_1_CCB | tfm->ivsize);
627 
628  /* Choose operation */
629  append_dec_op1(desc, ctx->class1_alg_type);
630 
631  /* Perform operation */
632  ablkcipher_append_src_dst(desc);
633 
634  /* Wait for key to load before allowing propagating error */
635  append_dec_shr_done(desc);
636 
637  ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
638  desc_bytes(desc),
639  DMA_TO_DEVICE);
640  if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
641  dev_err(jrdev, "unable to map shared descriptor\n");
642  return -ENOMEM;
643  }
644 
645 #ifdef DEBUG
646  print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
647  DUMP_PREFIX_ADDRESS, 16, 4, desc,
648  desc_bytes(desc), 1);
649 #endif
650 
651  return ret;
652 }
653 
654 /*
655  * aead_edesc - s/w-extended aead descriptor
656  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
657  * @assoc_chained: if source is chained
658  * @src_nents: number of segments in input scatterlist
659  * @src_chained: if source is chained
660  * @dst_nents: number of segments in output scatterlist
661  * @dst_chained: if destination is chained
662  * @iv_dma: dma address of iv for checking continuity and link table
663  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
664  * @sec4_sg_bytes: length of dma mapped sec4_sg space
665  * @sec4_sg_dma: bus physical mapped address of h/w link table
666  * @hw_desc: the h/w job descriptor followed by any referenced link tables
667  */
668 struct aead_edesc {
680 };
681 
682 /*
683  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
684  * @src_nents: number of segments in input scatterlist
685  * @src_chained: if source is chained
686  * @dst_nents: number of segments in output scatterlist
687  * @dst_chained: if destination is chained
688  * @iv_dma: dma address of iv for checking continuity and link table
689  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
690  * @sec4_sg_bytes: length of dma mapped sec4_sg space
691  * @sec4_sg_dma: bus physical mapped address of h/w link table
692  * @hw_desc: the h/w job descriptor followed by any referenced link tables
693  */
704 };
705 
706 static void caam_unmap(struct device *dev, struct scatterlist *src,
707  struct scatterlist *dst, int src_nents,
708  bool src_chained, int dst_nents, bool dst_chained,
709  dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
710  int sec4_sg_bytes)
711 {
712  if (dst != src) {
713  dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
714  src_chained);
715  dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
716  dst_chained);
717  } else {
718  dma_unmap_sg_chained(dev, src, src_nents ? : 1,
719  DMA_BIDIRECTIONAL, src_chained);
720  }
721 
722  if (iv_dma)
723  dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
724  if (sec4_sg_bytes)
725  dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
726  DMA_TO_DEVICE);
727 }
728 
729 static void aead_unmap(struct device *dev,
730  struct aead_edesc *edesc,
731  struct aead_request *req)
732 {
733  struct crypto_aead *aead = crypto_aead_reqtfm(req);
734  int ivsize = crypto_aead_ivsize(aead);
735 
736  dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
737  DMA_TO_DEVICE, edesc->assoc_chained);
738 
739  caam_unmap(dev, req->src, req->dst,
740  edesc->src_nents, edesc->src_chained, edesc->dst_nents,
741  edesc->dst_chained, edesc->iv_dma, ivsize,
742  edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
743 }
744 
745 static void ablkcipher_unmap(struct device *dev,
746  struct ablkcipher_edesc *edesc,
747  struct ablkcipher_request *req)
748 {
749  struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
750  int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
751 
752  caam_unmap(dev, req->src, req->dst,
753  edesc->src_nents, edesc->src_chained, edesc->dst_nents,
754  edesc->dst_chained, edesc->iv_dma, ivsize,
755  edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
756 }
757 
758 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
759  void *context)
760 {
761  struct aead_request *req = context;
762  struct aead_edesc *edesc;
763 #ifdef DEBUG
764  struct crypto_aead *aead = crypto_aead_reqtfm(req);
765  struct caam_ctx *ctx = crypto_aead_ctx(aead);
766  int ivsize = crypto_aead_ivsize(aead);
767 
768  dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
769 #endif
770 
771  edesc = (struct aead_edesc *)((char *)desc -
772  offsetof(struct aead_edesc, hw_desc));
773 
774  if (err) {
775  char tmp[CAAM_ERROR_STR_MAX];
776 
777  dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
778  }
779 
780  aead_unmap(jrdev, edesc, req);
781 
782 #ifdef DEBUG
783  print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
784  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
785  req->assoclen , 1);
786  print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
787  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
788  edesc->src_nents ? 100 : ivsize, 1);
789  print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
790  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
791  edesc->src_nents ? 100 : req->cryptlen +
792  ctx->authsize + 4, 1);
793 #endif
794 
795  kfree(edesc);
796 
797  aead_request_complete(req, err);
798 }
799 
800 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
801  void *context)
802 {
803  struct aead_request *req = context;
804  struct aead_edesc *edesc;
805 #ifdef DEBUG
806  struct crypto_aead *aead = crypto_aead_reqtfm(req);
807  struct caam_ctx *ctx = crypto_aead_ctx(aead);
808  int ivsize = crypto_aead_ivsize(aead);
809 
810  dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
811 #endif
812 
813  edesc = (struct aead_edesc *)((char *)desc -
814  offsetof(struct aead_edesc, hw_desc));
815 
816 #ifdef DEBUG
817  print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
818  DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
819  ivsize, 1);
820  print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
821  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
822  req->cryptlen, 1);
823 #endif
824 
825  if (err) {
826  char tmp[CAAM_ERROR_STR_MAX];
827 
828  dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
829  }
830 
831  aead_unmap(jrdev, edesc, req);
832 
833  /*
834  * verify hw auth check passed else return -EBADMSG
835  */
837  err = -EBADMSG;
838 
839 #ifdef DEBUG
840  print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
841  DUMP_PREFIX_ADDRESS, 16, 4,
842  ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
843  sizeof(struct iphdr) + req->assoclen +
844  ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
845  ctx->authsize + 36, 1);
846  if (!err && edesc->sec4_sg_bytes) {
847  struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
848  print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
849  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
850  sg->length + ctx->authsize + 16, 1);
851  }
852 #endif
853 
854  kfree(edesc);
855 
856  aead_request_complete(req, err);
857 }
858 
859 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
860  void *context)
861 {
862  struct ablkcipher_request *req = context;
863  struct ablkcipher_edesc *edesc;
864 #ifdef DEBUG
865  struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
866  int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
867 
868  dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
869 #endif
870 
871  edesc = (struct ablkcipher_edesc *)((char *)desc -
873 
874  if (err) {
875  char tmp[CAAM_ERROR_STR_MAX];
876 
877  dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
878  }
879 
880 #ifdef DEBUG
881  print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
882  DUMP_PREFIX_ADDRESS, 16, 4, req->info,
883  edesc->src_nents > 1 ? 100 : ivsize, 1);
884  print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
885  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
886  edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
887 #endif
888 
889  ablkcipher_unmap(jrdev, edesc, req);
890  kfree(edesc);
891 
892  ablkcipher_request_complete(req, err);
893 }
894 
895 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
896  void *context)
897 {
898  struct ablkcipher_request *req = context;
899  struct ablkcipher_edesc *edesc;
900 #ifdef DEBUG
901  struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
902  int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
903 
904  dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
905 #endif
906 
907  edesc = (struct ablkcipher_edesc *)((char *)desc -
909  if (err) {
910  char tmp[CAAM_ERROR_STR_MAX];
911 
912  dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
913  }
914 
915 #ifdef DEBUG
916  print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
917  DUMP_PREFIX_ADDRESS, 16, 4, req->info,
918  ivsize, 1);
919  print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
920  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
921  edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
922 #endif
923 
924  ablkcipher_unmap(jrdev, edesc, req);
925  kfree(edesc);
926 
927  ablkcipher_request_complete(req, err);
928 }
929 
930 /*
931  * Fill in aead job descriptor
932  */
933 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
934  struct aead_edesc *edesc,
935  struct aead_request *req,
936  bool all_contig, bool encrypt)
937 {
938  struct crypto_aead *aead = crypto_aead_reqtfm(req);
939  struct caam_ctx *ctx = crypto_aead_ctx(aead);
940  int ivsize = crypto_aead_ivsize(aead);
941  int authsize = ctx->authsize;
942  u32 *desc = edesc->hw_desc;
943  u32 out_options = 0, in_options;
944  dma_addr_t dst_dma, src_dma;
945  int len, sec4_sg_index = 0;
946 
947 #ifdef DEBUG
948  debug("assoclen %d cryptlen %d authsize %d\n",
949  req->assoclen, req->cryptlen, authsize);
950  print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
951  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
952  req->assoclen , 1);
953  print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
954  DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
955  edesc->src_nents ? 100 : ivsize, 1);
956  print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
957  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
958  edesc->src_nents ? 100 : req->cryptlen, 1);
959  print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
960  DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
961  desc_bytes(sh_desc), 1);
962 #endif
963 
964  len = desc_len(sh_desc);
965  init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
966 
967  if (all_contig) {
968  src_dma = sg_dma_address(req->assoc);
969  in_options = 0;
970  } else {
971  src_dma = edesc->sec4_sg_dma;
972  sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
973  (edesc->src_nents ? : 1);
974  in_options = LDST_SGF;
975  }
976  if (encrypt)
977  append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
978  req->cryptlen - authsize, in_options);
979  else
980  append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
981  req->cryptlen, in_options);
982 
983  if (likely(req->src == req->dst)) {
984  if (all_contig) {
985  dst_dma = sg_dma_address(req->src);
986  } else {
987  dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
988  ((edesc->assoc_nents ? : 1) + 1);
989  out_options = LDST_SGF;
990  }
991  } else {
992  if (!edesc->dst_nents) {
993  dst_dma = sg_dma_address(req->dst);
994  } else {
995  dst_dma = edesc->sec4_sg_dma +
996  sec4_sg_index *
997  sizeof(struct sec4_sg_entry);
998  out_options = LDST_SGF;
999  }
1000  }
1001  if (encrypt)
1002  append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
1003  else
1004  append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1005  out_options);
1006 }
1007 
1008 /*
1009  * Fill in aead givencrypt job descriptor
1010  */
1011 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1012  struct aead_edesc *edesc,
1013  struct aead_request *req,
1014  int contig)
1015 {
1016  struct crypto_aead *aead = crypto_aead_reqtfm(req);
1017  struct caam_ctx *ctx = crypto_aead_ctx(aead);
1018  int ivsize = crypto_aead_ivsize(aead);
1019  int authsize = ctx->authsize;
1020  u32 *desc = edesc->hw_desc;
1021  u32 out_options = 0, in_options;
1022  dma_addr_t dst_dma, src_dma;
1023  int len, sec4_sg_index = 0;
1024 
1025 #ifdef DEBUG
1026  debug("assoclen %d cryptlen %d authsize %d\n",
1027  req->assoclen, req->cryptlen, authsize);
1028  print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
1029  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1030  req->assoclen , 1);
1031  print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1032  DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1033  print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1034  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1035  edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1036  print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1037  DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1038  desc_bytes(sh_desc), 1);
1039 #endif
1040 
1041  len = desc_len(sh_desc);
1042  init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1043 
1044  if (contig & GIV_SRC_CONTIG) {
1045  src_dma = sg_dma_address(req->assoc);
1046  in_options = 0;
1047  } else {
1048  src_dma = edesc->sec4_sg_dma;
1049  sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1050  in_options = LDST_SGF;
1051  }
1052  append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1053  req->cryptlen - authsize, in_options);
1054 
1055  if (contig & GIV_DST_CONTIG) {
1056  dst_dma = edesc->iv_dma;
1057  } else {
1058  if (likely(req->src == req->dst)) {
1059  dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1060  edesc->assoc_nents;
1061  out_options = LDST_SGF;
1062  } else {
1063  dst_dma = edesc->sec4_sg_dma +
1064  sec4_sg_index *
1065  sizeof(struct sec4_sg_entry);
1066  out_options = LDST_SGF;
1067  }
1068  }
1069 
1070  append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
1071 }
1072 
1073 /*
1074  * Fill in ablkcipher job descriptor
1075  */
1076 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1077  struct ablkcipher_edesc *edesc,
1078  struct ablkcipher_request *req,
1079  bool iv_contig)
1080 {
1081  struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1082  int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1083  u32 *desc = edesc->hw_desc;
1084  u32 out_options = 0, in_options;
1085  dma_addr_t dst_dma, src_dma;
1086  int len, sec4_sg_index = 0;
1087 
1088 #ifdef DEBUG
1089  print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1090  DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1091  ivsize, 1);
1092  print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1093  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1094  edesc->src_nents ? 100 : req->nbytes, 1);
1095 #endif
1096 
1097  len = desc_len(sh_desc);
1098  init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1099 
1100  if (iv_contig) {
1101  src_dma = edesc->iv_dma;
1102  in_options = 0;
1103  } else {
1104  src_dma = edesc->sec4_sg_dma;
1105  sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1106  in_options = LDST_SGF;
1107  }
1108  append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1109 
1110  if (likely(req->src == req->dst)) {
1111  if (!edesc->src_nents && iv_contig) {
1112  dst_dma = sg_dma_address(req->src);
1113  } else {
1114  dst_dma = edesc->sec4_sg_dma +
1115  sizeof(struct sec4_sg_entry);
1116  out_options = LDST_SGF;
1117  }
1118  } else {
1119  if (!edesc->dst_nents) {
1120  dst_dma = sg_dma_address(req->dst);
1121  } else {
1122  dst_dma = edesc->sec4_sg_dma +
1123  sec4_sg_index * sizeof(struct sec4_sg_entry);
1124  out_options = LDST_SGF;
1125  }
1126  }
1127  append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1128 }
1129 
1130 /*
1131  * allocate and map the aead extended descriptor
1132  */
1133 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1134  int desc_bytes, bool *all_contig_ptr)
1135 {
1136  struct crypto_aead *aead = crypto_aead_reqtfm(req);
1137  struct caam_ctx *ctx = crypto_aead_ctx(aead);
1138  struct device *jrdev = ctx->jrdev;
1139  gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1141  int assoc_nents, src_nents, dst_nents = 0;
1142  struct aead_edesc *edesc;
1143  dma_addr_t iv_dma = 0;
1144  int sgc;
1145  bool all_contig = true;
1146  bool assoc_chained = false, src_chained = false, dst_chained = false;
1147  int ivsize = crypto_aead_ivsize(aead);
1148  int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1149 
1150  assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1151  src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1152 
1153  if (unlikely(req->dst != req->src))
1154  dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1155 
1156  sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1157  DMA_BIDIRECTIONAL, assoc_chained);
1158  if (likely(req->src == req->dst)) {
1159  sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1160  DMA_BIDIRECTIONAL, src_chained);
1161  } else {
1162  sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1163  DMA_TO_DEVICE, src_chained);
1164  sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1165  DMA_FROM_DEVICE, dst_chained);
1166  }
1167 
1168  /* Check if data are contiguous */
1169  iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1170  if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1171  iv_dma || src_nents || iv_dma + ivsize !=
1172  sg_dma_address(req->src)) {
1173  all_contig = false;
1174  assoc_nents = assoc_nents ? : 1;
1175  src_nents = src_nents ? : 1;
1176  sec4_sg_len = assoc_nents + 1 + src_nents;
1177  }
1178  sec4_sg_len += dst_nents;
1179 
1180  sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1181 
1182  /* allocate space for base edesc and hw desc commands, link tables */
1183  edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1184  sec4_sg_bytes, GFP_DMA | flags);
1185  if (!edesc) {
1186  dev_err(jrdev, "could not allocate extended descriptor\n");
1187  return ERR_PTR(-ENOMEM);
1188  }
1189 
1190  edesc->assoc_nents = assoc_nents;
1191  edesc->assoc_chained = assoc_chained;
1192  edesc->src_nents = src_nents;
1193  edesc->src_chained = src_chained;
1194  edesc->dst_nents = dst_nents;
1195  edesc->dst_chained = dst_chained;
1196  edesc->iv_dma = iv_dma;
1197  edesc->sec4_sg_bytes = sec4_sg_bytes;
1198  edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1199  desc_bytes;
1200  edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1201  sec4_sg_bytes, DMA_TO_DEVICE);
1202  *all_contig_ptr = all_contig;
1203 
1204  sec4_sg_index = 0;
1205  if (!all_contig) {
1206  sg_to_sec4_sg(req->assoc,
1207  (assoc_nents ? : 1),
1208  edesc->sec4_sg +
1209  sec4_sg_index, 0);
1210  sec4_sg_index += assoc_nents ? : 1;
1211  dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1212  iv_dma, ivsize, 0);
1213  sec4_sg_index += 1;
1214  sg_to_sec4_sg_last(req->src,
1215  (src_nents ? : 1),
1216  edesc->sec4_sg +
1217  sec4_sg_index, 0);
1218  sec4_sg_index += src_nents ? : 1;
1219  }
1220  if (dst_nents) {
1221  sg_to_sec4_sg_last(req->dst, dst_nents,
1222  edesc->sec4_sg + sec4_sg_index, 0);
1223  }
1224 
1225  return edesc;
1226 }
1227 
1228 static int aead_encrypt(struct aead_request *req)
1229 {
1230  struct aead_edesc *edesc;
1231  struct crypto_aead *aead = crypto_aead_reqtfm(req);
1232  struct caam_ctx *ctx = crypto_aead_ctx(aead);
1233  struct device *jrdev = ctx->jrdev;
1234  bool all_contig;
1235  u32 *desc;
1236  int ret = 0;
1237 
1238  req->cryptlen += ctx->authsize;
1239 
1240  /* allocate extended descriptor */
1241  edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1242  CAAM_CMD_SZ, &all_contig);
1243  if (IS_ERR(edesc))
1244  return PTR_ERR(edesc);
1245 
1246  /* Create and submit job descriptor */
1247  init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1248  all_contig, true);
1249 #ifdef DEBUG
1250  print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1251  DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1252  desc_bytes(edesc->hw_desc), 1);
1253 #endif
1254 
1255  desc = edesc->hw_desc;
1256  ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1257  if (!ret) {
1258  ret = -EINPROGRESS;
1259  } else {
1260  aead_unmap(jrdev, edesc, req);
1261  kfree(edesc);
1262  }
1263 
1264  return ret;
1265 }
1266 
1267 static int aead_decrypt(struct aead_request *req)
1268 {
1269  struct aead_edesc *edesc;
1270  struct crypto_aead *aead = crypto_aead_reqtfm(req);
1271  struct caam_ctx *ctx = crypto_aead_ctx(aead);
1272  struct device *jrdev = ctx->jrdev;
1273  bool all_contig;
1274  u32 *desc;
1275  int ret = 0;
1276 
1277  /* allocate extended descriptor */
1278  edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1279  CAAM_CMD_SZ, &all_contig);
1280  if (IS_ERR(edesc))
1281  return PTR_ERR(edesc);
1282 
1283 #ifdef DEBUG
1284  print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
1285  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1286  req->cryptlen, 1);
1287 #endif
1288 
1289  /* Create and submit job descriptor*/
1290  init_aead_job(ctx->sh_desc_dec,
1291  ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1292 #ifdef DEBUG
1293  print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1294  DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1295  desc_bytes(edesc->hw_desc), 1);
1296 #endif
1297 
1298  desc = edesc->hw_desc;
1299  ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1300  if (!ret) {
1301  ret = -EINPROGRESS;
1302  } else {
1303  aead_unmap(jrdev, edesc, req);
1304  kfree(edesc);
1305  }
1306 
1307  return ret;
1308 }
1309 
1310 /*
1311  * allocate and map the aead extended descriptor for aead givencrypt
1312  */
1313 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1314  *greq, int desc_bytes,
1315  u32 *contig_ptr)
1316 {
1317  struct aead_request *req = &greq->areq;
1318  struct crypto_aead *aead = crypto_aead_reqtfm(req);
1319  struct caam_ctx *ctx = crypto_aead_ctx(aead);
1320  struct device *jrdev = ctx->jrdev;
1321  gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1323  int assoc_nents, src_nents, dst_nents = 0;
1324  struct aead_edesc *edesc;
1325  dma_addr_t iv_dma = 0;
1326  int sgc;
1327  u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1328  int ivsize = crypto_aead_ivsize(aead);
1329  bool assoc_chained = false, src_chained = false, dst_chained = false;
1330  int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1331 
1332  assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1333  src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1334 
1335  if (unlikely(req->dst != req->src))
1336  dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1337 
1338  sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1339  DMA_BIDIRECTIONAL, assoc_chained);
1340  if (likely(req->src == req->dst)) {
1341  sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1342  DMA_BIDIRECTIONAL, src_chained);
1343  } else {
1344  sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1345  DMA_TO_DEVICE, src_chained);
1346  sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1347  DMA_FROM_DEVICE, dst_chained);
1348  }
1349 
1350  /* Check if data are contiguous */
1351  iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1352  if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1353  iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1354  contig &= ~GIV_SRC_CONTIG;
1355  if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1356  contig &= ~GIV_DST_CONTIG;
1357  if (unlikely(req->src != req->dst)) {
1358  dst_nents = dst_nents ? : 1;
1359  sec4_sg_len += 1;
1360  }
1361  if (!(contig & GIV_SRC_CONTIG)) {
1362  assoc_nents = assoc_nents ? : 1;
1363  src_nents = src_nents ? : 1;
1364  sec4_sg_len += assoc_nents + 1 + src_nents;
1365  if (likely(req->src == req->dst))
1366  contig &= ~GIV_DST_CONTIG;
1367  }
1368  sec4_sg_len += dst_nents;
1369 
1370  sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1371 
1372  /* allocate space for base edesc and hw desc commands, link tables */
1373  edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1374  sec4_sg_bytes, GFP_DMA | flags);
1375  if (!edesc) {
1376  dev_err(jrdev, "could not allocate extended descriptor\n");
1377  return ERR_PTR(-ENOMEM);
1378  }
1379 
1380  edesc->assoc_nents = assoc_nents;
1381  edesc->assoc_chained = assoc_chained;
1382  edesc->src_nents = src_nents;
1383  edesc->src_chained = src_chained;
1384  edesc->dst_nents = dst_nents;
1385  edesc->dst_chained = dst_chained;
1386  edesc->iv_dma = iv_dma;
1387  edesc->sec4_sg_bytes = sec4_sg_bytes;
1388  edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1389  desc_bytes;
1390  edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1391  sec4_sg_bytes, DMA_TO_DEVICE);
1392  *contig_ptr = contig;
1393 
1394  sec4_sg_index = 0;
1395  if (!(contig & GIV_SRC_CONTIG)) {
1396  sg_to_sec4_sg(req->assoc, assoc_nents,
1397  edesc->sec4_sg +
1398  sec4_sg_index, 0);
1399  sec4_sg_index += assoc_nents;
1400  dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1401  iv_dma, ivsize, 0);
1402  sec4_sg_index += 1;
1403  sg_to_sec4_sg_last(req->src, src_nents,
1404  edesc->sec4_sg +
1405  sec4_sg_index, 0);
1406  sec4_sg_index += src_nents;
1407  }
1408  if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1409  dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1410  iv_dma, ivsize, 0);
1411  sec4_sg_index += 1;
1412  sg_to_sec4_sg_last(req->dst, dst_nents,
1413  edesc->sec4_sg + sec4_sg_index, 0);
1414  }
1415 
1416  return edesc;
1417 }
1418 
1419 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1420 {
1421  struct aead_request *req = &areq->areq;
1422  struct aead_edesc *edesc;
1423  struct crypto_aead *aead = crypto_aead_reqtfm(req);
1424  struct caam_ctx *ctx = crypto_aead_ctx(aead);
1425  struct device *jrdev = ctx->jrdev;
1426  u32 contig;
1427  u32 *desc;
1428  int ret = 0;
1429 
1430  req->cryptlen += ctx->authsize;
1431 
1432  /* allocate extended descriptor */
1433  edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1434  CAAM_CMD_SZ, &contig);
1435 
1436  if (IS_ERR(edesc))
1437  return PTR_ERR(edesc);
1438 
1439 #ifdef DEBUG
1440  print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
1441  DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1442  req->cryptlen, 1);
1443 #endif
1444 
1445  /* Create and submit job descriptor*/
1446  init_aead_giv_job(ctx->sh_desc_givenc,
1447  ctx->sh_desc_givenc_dma, edesc, req, contig);
1448 #ifdef DEBUG
1449  print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1450  DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1451  desc_bytes(edesc->hw_desc), 1);
1452 #endif
1453 
1454  desc = edesc->hw_desc;
1455  ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1456  if (!ret) {
1457  ret = -EINPROGRESS;
1458  } else {
1459  aead_unmap(jrdev, edesc, req);
1460  kfree(edesc);
1461  }
1462 
1463  return ret;
1464 }
1465 
1466 /*
1467  * allocate and map the ablkcipher extended descriptor for ablkcipher
1468  */
1469 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1470  *req, int desc_bytes,
1471  bool *iv_contig_out)
1472 {
1473  struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1474  struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1475  struct device *jrdev = ctx->jrdev;
1476  gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1479  int src_nents, dst_nents = 0, sec4_sg_bytes;
1480  struct ablkcipher_edesc *edesc;
1481  dma_addr_t iv_dma = 0;
1482  bool iv_contig = false;
1483  int sgc;
1484  int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1485  bool src_chained = false, dst_chained = false;
1486  int sec4_sg_index;
1487 
1488  src_nents = sg_count(req->src, req->nbytes, &src_chained);
1489 
1490  if (req->dst != req->src)
1491  dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1492 
1493  if (likely(req->src == req->dst)) {
1494  sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1495  DMA_BIDIRECTIONAL, src_chained);
1496  } else {
1497  sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1498  DMA_TO_DEVICE, src_chained);
1499  sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1500  DMA_FROM_DEVICE, dst_chained);
1501  }
1502 
1503  /*
1504  * Check if iv can be contiguous with source and destination.
1505  * If so, include it. If not, create scatterlist.
1506  */
1507  iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1508  if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1509  iv_contig = true;
1510  else
1511  src_nents = src_nents ? : 1;
1512  sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1513  sizeof(struct sec4_sg_entry);
1514 
1515  /* allocate space for base edesc and hw desc commands, link tables */
1516  edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1517  sec4_sg_bytes, GFP_DMA | flags);
1518  if (!edesc) {
1519  dev_err(jrdev, "could not allocate extended descriptor\n");
1520  return ERR_PTR(-ENOMEM);
1521  }
1522 
1523  edesc->src_nents = src_nents;
1524  edesc->src_chained = src_chained;
1525  edesc->dst_nents = dst_nents;
1526  edesc->dst_chained = dst_chained;
1527  edesc->sec4_sg_bytes = sec4_sg_bytes;
1528  edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1529  desc_bytes;
1530 
1531  sec4_sg_index = 0;
1532  if (!iv_contig) {
1533  dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1534  sg_to_sec4_sg_last(req->src, src_nents,
1535  edesc->sec4_sg + 1, 0);
1536  sec4_sg_index += 1 + src_nents;
1537  }
1538 
1539  if (dst_nents) {
1540  sg_to_sec4_sg_last(req->dst, dst_nents,
1541  edesc->sec4_sg + sec4_sg_index, 0);
1542  }
1543 
1544  edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1545  sec4_sg_bytes, DMA_TO_DEVICE);
1546  edesc->iv_dma = iv_dma;
1547 
1548 #ifdef DEBUG
1549  print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
1550  DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1551  sec4_sg_bytes, 1);
1552 #endif
1553 
1554  *iv_contig_out = iv_contig;
1555  return edesc;
1556 }
1557 
1558 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1559 {
1560  struct ablkcipher_edesc *edesc;
1561  struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1562  struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1563  struct device *jrdev = ctx->jrdev;
1564  bool iv_contig;
1565  u32 *desc;
1566  int ret = 0;
1567 
1568  /* allocate extended descriptor */
1569  edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1570  CAAM_CMD_SZ, &iv_contig);
1571  if (IS_ERR(edesc))
1572  return PTR_ERR(edesc);
1573 
1574  /* Create and submit job descriptor*/
1575  init_ablkcipher_job(ctx->sh_desc_enc,
1576  ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1577 #ifdef DEBUG
1578  print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1579  DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1580  desc_bytes(edesc->hw_desc), 1);
1581 #endif
1582  desc = edesc->hw_desc;
1583  ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1584 
1585  if (!ret) {
1586  ret = -EINPROGRESS;
1587  } else {
1588  ablkcipher_unmap(jrdev, edesc, req);
1589  kfree(edesc);
1590  }
1591 
1592  return ret;
1593 }
1594 
1595 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1596 {
1597  struct ablkcipher_edesc *edesc;
1598  struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1599  struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1600  struct device *jrdev = ctx->jrdev;
1601  bool iv_contig;
1602  u32 *desc;
1603  int ret = 0;
1604 
1605  /* allocate extended descriptor */
1606  edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1607  CAAM_CMD_SZ, &iv_contig);
1608  if (IS_ERR(edesc))
1609  return PTR_ERR(edesc);
1610 
1611  /* Create and submit job descriptor*/
1612  init_ablkcipher_job(ctx->sh_desc_dec,
1613  ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1614  desc = edesc->hw_desc;
1615 #ifdef DEBUG
1616  print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1617  DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1618  desc_bytes(edesc->hw_desc), 1);
1619 #endif
1620 
1621  ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1622  if (!ret) {
1623  ret = -EINPROGRESS;
1624  } else {
1625  ablkcipher_unmap(jrdev, edesc, req);
1626  kfree(edesc);
1627  }
1628 
1629  return ret;
1630 }
1631 
1632 #define template_aead template_u.aead
1633 #define template_ablkcipher template_u.ablkcipher
1637  unsigned int blocksize;
1639  union {
1640  struct ablkcipher_alg ablkcipher;
1641  struct aead_alg aead;
1645  struct rng_alg rng;
1646  } template_u;
1650 };
1651 
1652 static struct caam_alg_template driver_algs[] = {
1653  /*
1654  * single-pass ipsec_esp descriptor
1655  * authencesn(*,*) is also registered, although not present
1656  * explicitly here.
1657  */
1658  {
1659  .name = "authenc(hmac(md5),cbc(aes))",
1660  .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1661  .blocksize = AES_BLOCK_SIZE,
1662  .type = CRYPTO_ALG_TYPE_AEAD,
1663  .template_aead = {
1664  .setkey = aead_setkey,
1665  .setauthsize = aead_setauthsize,
1666  .encrypt = aead_encrypt,
1667  .decrypt = aead_decrypt,
1668  .givencrypt = aead_givencrypt,
1669  .geniv = "<built-in>",
1670  .ivsize = AES_BLOCK_SIZE,
1671  .maxauthsize = MD5_DIGEST_SIZE,
1672  },
1673  .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1674  .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1675  .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1676  },
1677  {
1678  .name = "authenc(hmac(sha1),cbc(aes))",
1679  .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1680  .blocksize = AES_BLOCK_SIZE,
1681  .type = CRYPTO_ALG_TYPE_AEAD,
1682  .template_aead = {
1683  .setkey = aead_setkey,
1684  .setauthsize = aead_setauthsize,
1685  .encrypt = aead_encrypt,
1686  .decrypt = aead_decrypt,
1687  .givencrypt = aead_givencrypt,
1688  .geniv = "<built-in>",
1689  .ivsize = AES_BLOCK_SIZE,
1690  .maxauthsize = SHA1_DIGEST_SIZE,
1691  },
1692  .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1693  .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1694  .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1695  },
1696  {
1697  .name = "authenc(hmac(sha224),cbc(aes))",
1698  .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1699  .blocksize = AES_BLOCK_SIZE,
1700  .template_aead = {
1701  .setkey = aead_setkey,
1702  .setauthsize = aead_setauthsize,
1703  .encrypt = aead_encrypt,
1704  .decrypt = aead_decrypt,
1705  .givencrypt = aead_givencrypt,
1706  .geniv = "<built-in>",
1707  .ivsize = AES_BLOCK_SIZE,
1708  .maxauthsize = SHA224_DIGEST_SIZE,
1709  },
1710  .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1711  .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1714  },
1715  {
1716  .name = "authenc(hmac(sha256),cbc(aes))",
1717  .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1718  .blocksize = AES_BLOCK_SIZE,
1719  .type = CRYPTO_ALG_TYPE_AEAD,
1720  .template_aead = {
1721  .setkey = aead_setkey,
1722  .setauthsize = aead_setauthsize,
1723  .encrypt = aead_encrypt,
1724  .decrypt = aead_decrypt,
1725  .givencrypt = aead_givencrypt,
1726  .geniv = "<built-in>",
1727  .ivsize = AES_BLOCK_SIZE,
1728  .maxauthsize = SHA256_DIGEST_SIZE,
1729  },
1730  .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1731  .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1734  },
1735  {
1736  .name = "authenc(hmac(sha384),cbc(aes))",
1737  .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
1738  .blocksize = AES_BLOCK_SIZE,
1739  .template_aead = {
1740  .setkey = aead_setkey,
1741  .setauthsize = aead_setauthsize,
1742  .encrypt = aead_encrypt,
1743  .decrypt = aead_decrypt,
1744  .givencrypt = aead_givencrypt,
1745  .geniv = "<built-in>",
1746  .ivsize = AES_BLOCK_SIZE,
1747  .maxauthsize = SHA384_DIGEST_SIZE,
1748  },
1749  .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1750  .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1753  },
1754 
1755  {
1756  .name = "authenc(hmac(sha512),cbc(aes))",
1757  .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1758  .blocksize = AES_BLOCK_SIZE,
1759  .type = CRYPTO_ALG_TYPE_AEAD,
1760  .template_aead = {
1761  .setkey = aead_setkey,
1762  .setauthsize = aead_setauthsize,
1763  .encrypt = aead_encrypt,
1764  .decrypt = aead_decrypt,
1765  .givencrypt = aead_givencrypt,
1766  .geniv = "<built-in>",
1767  .ivsize = AES_BLOCK_SIZE,
1768  .maxauthsize = SHA512_DIGEST_SIZE,
1769  },
1770  .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1771  .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1774  },
1775  {
1776  .name = "authenc(hmac(md5),cbc(des3_ede))",
1777  .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
1778  .blocksize = DES3_EDE_BLOCK_SIZE,
1779  .type = CRYPTO_ALG_TYPE_AEAD,
1780  .template_aead = {
1781  .setkey = aead_setkey,
1782  .setauthsize = aead_setauthsize,
1783  .encrypt = aead_encrypt,
1784  .decrypt = aead_decrypt,
1785  .givencrypt = aead_givencrypt,
1786  .geniv = "<built-in>",
1787  .ivsize = DES3_EDE_BLOCK_SIZE,
1788  .maxauthsize = MD5_DIGEST_SIZE,
1789  },
1790  .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1791  .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1792  .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1793  },
1794  {
1795  .name = "authenc(hmac(sha1),cbc(des3_ede))",
1796  .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1797  .blocksize = DES3_EDE_BLOCK_SIZE,
1798  .type = CRYPTO_ALG_TYPE_AEAD,
1799  .template_aead = {
1800  .setkey = aead_setkey,
1801  .setauthsize = aead_setauthsize,
1802  .encrypt = aead_encrypt,
1803  .decrypt = aead_decrypt,
1804  .givencrypt = aead_givencrypt,
1805  .geniv = "<built-in>",
1806  .ivsize = DES3_EDE_BLOCK_SIZE,
1807  .maxauthsize = SHA1_DIGEST_SIZE,
1808  },
1809  .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1810  .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1811  .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1812  },
1813  {
1814  .name = "authenc(hmac(sha224),cbc(des3_ede))",
1815  .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
1816  .blocksize = DES3_EDE_BLOCK_SIZE,
1817  .template_aead = {
1818  .setkey = aead_setkey,
1819  .setauthsize = aead_setauthsize,
1820  .encrypt = aead_encrypt,
1821  .decrypt = aead_decrypt,
1822  .givencrypt = aead_givencrypt,
1823  .geniv = "<built-in>",
1824  .ivsize = DES3_EDE_BLOCK_SIZE,
1825  .maxauthsize = SHA224_DIGEST_SIZE,
1826  },
1827  .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1828  .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1831  },
1832  {
1833  .name = "authenc(hmac(sha256),cbc(des3_ede))",
1834  .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1835  .blocksize = DES3_EDE_BLOCK_SIZE,
1836  .type = CRYPTO_ALG_TYPE_AEAD,
1837  .template_aead = {
1838  .setkey = aead_setkey,
1839  .setauthsize = aead_setauthsize,
1840  .encrypt = aead_encrypt,
1841  .decrypt = aead_decrypt,
1842  .givencrypt = aead_givencrypt,
1843  .geniv = "<built-in>",
1844  .ivsize = DES3_EDE_BLOCK_SIZE,
1845  .maxauthsize = SHA256_DIGEST_SIZE,
1846  },
1847  .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1848  .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1851  },
1852  {
1853  .name = "authenc(hmac(sha384),cbc(des3_ede))",
1854  .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
1855  .blocksize = DES3_EDE_BLOCK_SIZE,
1856  .template_aead = {
1857  .setkey = aead_setkey,
1858  .setauthsize = aead_setauthsize,
1859  .encrypt = aead_encrypt,
1860  .decrypt = aead_decrypt,
1861  .givencrypt = aead_givencrypt,
1862  .geniv = "<built-in>",
1863  .ivsize = DES3_EDE_BLOCK_SIZE,
1864  .maxauthsize = SHA384_DIGEST_SIZE,
1865  },
1866  .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1867  .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1870  },
1871  {
1872  .name = "authenc(hmac(sha512),cbc(des3_ede))",
1873  .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1874  .blocksize = DES3_EDE_BLOCK_SIZE,
1875  .type = CRYPTO_ALG_TYPE_AEAD,
1876  .template_aead = {
1877  .setkey = aead_setkey,
1878  .setauthsize = aead_setauthsize,
1879  .encrypt = aead_encrypt,
1880  .decrypt = aead_decrypt,
1881  .givencrypt = aead_givencrypt,
1882  .geniv = "<built-in>",
1883  .ivsize = DES3_EDE_BLOCK_SIZE,
1884  .maxauthsize = SHA512_DIGEST_SIZE,
1885  },
1886  .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1887  .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1890  },
1891  {
1892  .name = "authenc(hmac(md5),cbc(des))",
1893  .driver_name = "authenc-hmac-md5-cbc-des-caam",
1894  .blocksize = DES_BLOCK_SIZE,
1895  .type = CRYPTO_ALG_TYPE_AEAD,
1896  .template_aead = {
1897  .setkey = aead_setkey,
1898  .setauthsize = aead_setauthsize,
1899  .encrypt = aead_encrypt,
1900  .decrypt = aead_decrypt,
1901  .givencrypt = aead_givencrypt,
1902  .geniv = "<built-in>",
1903  .ivsize = DES_BLOCK_SIZE,
1904  .maxauthsize = MD5_DIGEST_SIZE,
1905  },
1906  .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1907  .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1908  .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1909  },
1910  {
1911  .name = "authenc(hmac(sha1),cbc(des))",
1912  .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1913  .blocksize = DES_BLOCK_SIZE,
1914  .type = CRYPTO_ALG_TYPE_AEAD,
1915  .template_aead = {
1916  .setkey = aead_setkey,
1917  .setauthsize = aead_setauthsize,
1918  .encrypt = aead_encrypt,
1919  .decrypt = aead_decrypt,
1920  .givencrypt = aead_givencrypt,
1921  .geniv = "<built-in>",
1922  .ivsize = DES_BLOCK_SIZE,
1923  .maxauthsize = SHA1_DIGEST_SIZE,
1924  },
1925  .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1926  .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1927  .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1928  },
1929  {
1930  .name = "authenc(hmac(sha224),cbc(des))",
1931  .driver_name = "authenc-hmac-sha224-cbc-des-caam",
1932  .blocksize = DES_BLOCK_SIZE,
1933  .template_aead = {
1934  .setkey = aead_setkey,
1935  .setauthsize = aead_setauthsize,
1936  .encrypt = aead_encrypt,
1937  .decrypt = aead_decrypt,
1938  .givencrypt = aead_givencrypt,
1939  .geniv = "<built-in>",
1940  .ivsize = DES_BLOCK_SIZE,
1941  .maxauthsize = SHA224_DIGEST_SIZE,
1942  },
1943  .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1944  .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1947  },
1948  {
1949  .name = "authenc(hmac(sha256),cbc(des))",
1950  .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1951  .blocksize = DES_BLOCK_SIZE,
1952  .type = CRYPTO_ALG_TYPE_AEAD,
1953  .template_aead = {
1954  .setkey = aead_setkey,
1955  .setauthsize = aead_setauthsize,
1956  .encrypt = aead_encrypt,
1957  .decrypt = aead_decrypt,
1958  .givencrypt = aead_givencrypt,
1959  .geniv = "<built-in>",
1960  .ivsize = DES_BLOCK_SIZE,
1961  .maxauthsize = SHA256_DIGEST_SIZE,
1962  },
1963  .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1964  .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1967  },
1968  {
1969  .name = "authenc(hmac(sha384),cbc(des))",
1970  .driver_name = "authenc-hmac-sha384-cbc-des-caam",
1971  .blocksize = DES_BLOCK_SIZE,
1972  .template_aead = {
1973  .setkey = aead_setkey,
1974  .setauthsize = aead_setauthsize,
1975  .encrypt = aead_encrypt,
1976  .decrypt = aead_decrypt,
1977  .givencrypt = aead_givencrypt,
1978  .geniv = "<built-in>",
1979  .ivsize = DES_BLOCK_SIZE,
1980  .maxauthsize = SHA384_DIGEST_SIZE,
1981  },
1982  .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1983  .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1986  },
1987  {
1988  .name = "authenc(hmac(sha512),cbc(des))",
1989  .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1990  .blocksize = DES_BLOCK_SIZE,
1991  .type = CRYPTO_ALG_TYPE_AEAD,
1992  .template_aead = {
1993  .setkey = aead_setkey,
1994  .setauthsize = aead_setauthsize,
1995  .encrypt = aead_encrypt,
1996  .decrypt = aead_decrypt,
1997  .givencrypt = aead_givencrypt,
1998  .geniv = "<built-in>",
1999  .ivsize = DES_BLOCK_SIZE,
2000  .maxauthsize = SHA512_DIGEST_SIZE,
2001  },
2002  .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2003  .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2006  },
2007  /* ablkcipher descriptor */
2008  {
2009  .name = "cbc(aes)",
2010  .driver_name = "cbc-aes-caam",
2011  .blocksize = AES_BLOCK_SIZE,
2013  .template_ablkcipher = {
2014  .setkey = ablkcipher_setkey,
2015  .encrypt = ablkcipher_encrypt,
2016  .decrypt = ablkcipher_decrypt,
2017  .geniv = "eseqiv",
2018  .min_keysize = AES_MIN_KEY_SIZE,
2019  .max_keysize = AES_MAX_KEY_SIZE,
2020  .ivsize = AES_BLOCK_SIZE,
2021  },
2022  .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2023  },
2024  {
2025  .name = "cbc(des3_ede)",
2026  .driver_name = "cbc-3des-caam",
2027  .blocksize = DES3_EDE_BLOCK_SIZE,
2029  .template_ablkcipher = {
2030  .setkey = ablkcipher_setkey,
2031  .encrypt = ablkcipher_encrypt,
2032  .decrypt = ablkcipher_decrypt,
2033  .geniv = "eseqiv",
2034  .min_keysize = DES3_EDE_KEY_SIZE,
2035  .max_keysize = DES3_EDE_KEY_SIZE,
2036  .ivsize = DES3_EDE_BLOCK_SIZE,
2037  },
2038  .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2039  },
2040  {
2041  .name = "cbc(des)",
2042  .driver_name = "cbc-des-caam",
2043  .blocksize = DES_BLOCK_SIZE,
2045  .template_ablkcipher = {
2046  .setkey = ablkcipher_setkey,
2047  .encrypt = ablkcipher_encrypt,
2048  .decrypt = ablkcipher_decrypt,
2049  .geniv = "eseqiv",
2050  .min_keysize = DES_KEY_SIZE,
2051  .max_keysize = DES_KEY_SIZE,
2052  .ivsize = DES_BLOCK_SIZE,
2053  },
2054  .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2055  }
2056 };
2057 
2060  struct device *ctrldev;
2063  int alg_op;
2065 };
2066 
2067 static int caam_cra_init(struct crypto_tfm *tfm)
2068 {
2069  struct crypto_alg *alg = tfm->__crt_alg;
2070  struct caam_crypto_alg *caam_alg =
2071  container_of(alg, struct caam_crypto_alg, crypto_alg);
2072  struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2073  struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2074  int tgt_jr = atomic_inc_return(&priv->tfm_count);
2075 
2076  /*
2077  * distribute tfms across job rings to ensure in-order
2078  * crypto request processing per tfm
2079  */
2080  ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
2081 
2082  /* copy descriptor header template value */
2085  ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2086 
2087  return 0;
2088 }
2089 
2090 static void caam_cra_exit(struct crypto_tfm *tfm)
2091 {
2092  struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2093 
2094  if (ctx->sh_desc_enc_dma &&
2097  desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2098  if (ctx->sh_desc_dec_dma &&
2101  desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2102  if (ctx->sh_desc_givenc_dma &&
2105  desc_bytes(ctx->sh_desc_givenc),
2106  DMA_TO_DEVICE);
2107 }
2108 
2109 static void __exit caam_algapi_exit(void)
2110 {
2111 
2112  struct device_node *dev_node;
2113  struct platform_device *pdev;
2114  struct device *ctrldev;
2115  struct caam_drv_private *priv;
2116  struct caam_crypto_alg *t_alg, *n;
2117 
2118  dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2119  if (!dev_node) {
2120  dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2121  if (!dev_node)
2122  return;
2123  }
2124 
2125  pdev = of_find_device_by_node(dev_node);
2126  if (!pdev)
2127  return;
2128 
2129  ctrldev = &pdev->dev;
2130  of_node_put(dev_node);
2131  priv = dev_get_drvdata(ctrldev);
2132 
2133  if (!priv->alg_list.next)
2134  return;
2135 
2136  list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2138  list_del(&t_alg->entry);
2139  kfree(t_alg);
2140  }
2141 }
2142 
2143 static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2144  struct caam_alg_template
2145  *template)
2146 {
2147  struct caam_crypto_alg *t_alg;
2148  struct crypto_alg *alg;
2149 
2150  t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2151  if (!t_alg) {
2152  dev_err(ctrldev, "failed to allocate t_alg\n");
2153  return ERR_PTR(-ENOMEM);
2154  }
2155 
2156  alg = &t_alg->crypto_alg;
2157 
2158  snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2160  template->driver_name);
2161  alg->cra_module = THIS_MODULE;
2162  alg->cra_init = caam_cra_init;
2163  alg->cra_exit = caam_cra_exit;
2165  alg->cra_blocksize = template->blocksize;
2166  alg->cra_alignmask = 0;
2167  alg->cra_ctxsize = sizeof(struct caam_ctx);
2169  template->type;
2170  switch (template->type) {
2173  alg->cra_ablkcipher = template->template_ablkcipher;
2174  break;
2175  case CRYPTO_ALG_TYPE_AEAD:
2176  alg->cra_type = &crypto_aead_type;
2177  alg->cra_aead = template->template_aead;
2178  break;
2179  }
2180 
2181  t_alg->class1_alg_type = template->class1_alg_type;
2182  t_alg->class2_alg_type = template->class2_alg_type;
2183  t_alg->alg_op = template->alg_op;
2184  t_alg->ctrldev = ctrldev;
2185 
2186  return t_alg;
2187 }
2188 
2189 static int __init caam_algapi_init(void)
2190 {
2191  struct device_node *dev_node;
2192  struct platform_device *pdev;
2193  struct device *ctrldev;
2194  struct caam_drv_private *priv;
2195  int i = 0, err = 0;
2196 
2197  dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2198  if (!dev_node) {
2199  dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2200  if (!dev_node)
2201  return -ENODEV;
2202  }
2203 
2204  pdev = of_find_device_by_node(dev_node);
2205  if (!pdev)
2206  return -ENODEV;
2207 
2208  ctrldev = &pdev->dev;
2209  priv = dev_get_drvdata(ctrldev);
2210  of_node_put(dev_node);
2211 
2212  INIT_LIST_HEAD(&priv->alg_list);
2213 
2214  atomic_set(&priv->tfm_count, -1);
2215 
2216  /* register crypto algorithms the device supports */
2217  for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2218  /* TODO: check if h/w supports alg */
2219  struct caam_crypto_alg *t_alg;
2220  bool done = false;
2221 
2222 authencesn:
2223  t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2224  if (IS_ERR(t_alg)) {
2225  err = PTR_ERR(t_alg);
2226  dev_warn(ctrldev, "%s alg allocation failed\n",
2227  driver_algs[i].driver_name);
2228  continue;
2229  }
2230 
2231  err = crypto_register_alg(&t_alg->crypto_alg);
2232  if (err) {
2233  dev_warn(ctrldev, "%s alg registration failed\n",
2234  t_alg->crypto_alg.cra_driver_name);
2235  kfree(t_alg);
2236  } else {
2237  list_add_tail(&t_alg->entry, &priv->alg_list);
2238  if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
2239  !memcmp(driver_algs[i].name, "authenc", 7) &&
2240  !done) {
2241  char *name;
2242 
2243  name = driver_algs[i].name;
2244  memmove(name + 10, name + 7, strlen(name) - 7);
2245  memcpy(name + 7, "esn", 3);
2246 
2247  name = driver_algs[i].driver_name;
2248  memmove(name + 10, name + 7, strlen(name) - 7);
2249  memcpy(name + 7, "esn", 3);
2250 
2251  done = true;
2252  goto authencesn;
2253  }
2254  }
2255  }
2256  if (!list_empty(&priv->alg_list))
2257  dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
2258  (char *)of_get_property(dev_node, "compatible", NULL));
2259 
2260  return err;
2261 }
2262 
2263 module_init(caam_algapi_init);
2264 module_exit(caam_algapi_exit);
2265 
2266 MODULE_LICENSE("GPL");
2267 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2268 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");