Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
caamhash.c
Go to the documentation of this file.
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * --------------- ---------------
12  * | JobDesc #1 |-------------------->| ShareDesc |
13  * | *(packet 1) | | (hashKey) |
14  * --------------- | (operation) |
15  * ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * --------------- ---------------
20  * | JobDesc #2 |-------------------->| ShareDesc |
21  * | *(packet 2) | |------------->| (hashKey) |
22  * --------------- | |-------->| (operation) |
23  * . | | | (load ctx2) |
24  * . | | ---------------
25  * --------------- | |
26  * | JobDesc #3 |------| |
27  * | *(packet 3) | |
28  * --------------- |
29  * . |
30  * . |
31  * --------------- |
32  * | JobDesc #4 |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR |
48  * | (output buffer) |
49  * | (output length) |
50  * | SEQ_IN_PTR |
51  * | (input buffer) |
52  * | (input length) |
53  * ---------------------
54  */
55 
56 #include "compat.h"
57 
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65 
66 #define CAAM_CRA_PRIORITY 3000
67 
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70 
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73 
74 /* length of descriptors text */
75 #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
76 
77 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
79 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
80 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
81 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
82 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
83 
84 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
85  CAAM_MAX_HASH_KEY_SIZE)
86 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
87 
88 /* caam context sizes for hashes: running digest + 8 */
89 #define HASH_MSG_LEN 8
90 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 
92 #ifdef DEBUG
93 /* for print_hex_dumps with line references */
94 #define xstr(s) str(s)
95 #define str(s) #s
96 #define debug(format, arg...) printk(format, arg)
97 #else
98 #define debug(format, arg...)
99 #endif
100 
101 /* ahash per-session context */
103  struct device *jrdev;
118  int ctx_len;
119  unsigned int split_key_len;
120  unsigned int split_key_pad_len;
121 };
122 
123 /* ahash state */
128  int buflen_0;
130  int buflen_1;
133  int (*final)(struct ahash_request *req);
134  int (*finup)(struct ahash_request *req);
136 };
137 
138 /* Common job descriptor seq in/out ptr routines */
139 
140 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
141 static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
142  struct caam_hash_state *state,
143  int ctx_len)
144 {
145  state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
146  ctx_len, DMA_FROM_DEVICE);
147  append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
148 }
149 
150 /* Map req->result, and append seq_out_ptr command that points to it */
151 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
152  u8 *result, int digestsize)
153 {
154  dma_addr_t dst_dma;
155 
156  dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
157  append_seq_out_ptr(desc, dst_dma, digestsize, 0);
158 
159  return dst_dma;
160 }
161 
162 /* Map current buffer in state and put it in link table */
163 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
164  struct sec4_sg_entry *sec4_sg,
165  u8 *buf, int buflen)
166 {
167  dma_addr_t buf_dma;
168 
169  buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
170  dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
171 
172  return buf_dma;
173 }
174 
175 /* Map req->src and put it in link table */
176 static inline void src_map_to_sec4_sg(struct device *jrdev,
177  struct scatterlist *src, int src_nents,
178  struct sec4_sg_entry *sec4_sg,
179  bool chained)
180 {
181  dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
182  sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
183 }
184 
185 /*
186  * Only put buffer in link table if it contains data, which is possible,
187  * since a buffer has previously been used, and needs to be unmapped,
188  */
189 static inline dma_addr_t
190 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
191  u8 *buf, dma_addr_t buf_dma, int buflen,
192  int last_buflen)
193 {
194  if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
195  dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
196  if (buflen)
197  buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
198  else
199  buf_dma = 0;
200 
201  return buf_dma;
202 }
203 
204 /* Map state->caam_ctx, and add it to link table */
205 static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
206  struct caam_hash_state *state,
207  int ctx_len,
208  struct sec4_sg_entry *sec4_sg,
209  u32 flag)
210 {
211  state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
212  dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
213 }
214 
215 /* Common shared descriptor commands */
216 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
217 {
218  append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
219  ctx->split_key_len, CLASS_2 |
221 }
222 
223 /* Append key if it has been set */
224 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
225 {
226  u32 *key_jump_cmd;
227 
228  init_sh_desc(desc, HDR_SHARE_SERIAL);
229 
230  if (ctx->split_key_len) {
231  /* Skip if already shared */
232  key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
234 
235  append_key_ahash(desc, ctx);
236 
237  set_jump_tgt_here(desc, key_jump_cmd);
238  }
239 
240  /* Propagate errors from shared to job descriptor */
241  append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
242 }
243 
244 /*
245  * For ahash read data from seqin following state->caam_ctx,
246  * and write resulting class2 context to seqout, which may be state->caam_ctx
247  * or req->result
248  */
249 static inline void ahash_append_load_str(u32 *desc, int digestsize)
250 {
251  /* Calculate remaining bytes to read */
252  append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
253 
254  /* Read remaining bytes */
255  append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
257 
258  /* Store class2 context bytes */
259  append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
261 }
262 
263 /*
264  * For ahash update, final and finup, import context, read and write to seqout
265  */
266 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
267  int digestsize,
268  struct caam_hash_ctx *ctx)
269 {
270  init_sh_desc_key_ahash(desc, ctx);
271 
272  /* Import context from software */
273  append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
274  LDST_CLASS_2_CCB | ctx->ctx_len);
275 
276  /* Class 2 operation */
277  append_operation(desc, op | state | OP_ALG_ENCRYPT);
278 
279  /*
280  * Load from buf and/or src and write to req->result or state->context
281  */
282  ahash_append_load_str(desc, digestsize);
283 }
284 
285 /* For ahash firsts and digest, read and write to seqout */
286 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
287  int digestsize, struct caam_hash_ctx *ctx)
288 {
289  init_sh_desc_key_ahash(desc, ctx);
290 
291  /* Class 2 operation */
292  append_operation(desc, op | state | OP_ALG_ENCRYPT);
293 
294  /*
295  * Load from buf and/or src and write to req->result or state->context
296  */
297  ahash_append_load_str(desc, digestsize);
298 }
299 
300 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
301 {
302  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
303  int digestsize = crypto_ahash_digestsize(ahash);
304  struct device *jrdev = ctx->jrdev;
305  u32 have_key = 0;
306  u32 *desc;
307 
308  if (ctx->split_key_len)
309  have_key = OP_ALG_AAI_HMAC_PRECOMP;
310 
311  /* ahash_update shared descriptor */
312  desc = ctx->sh_desc_update;
313 
314  init_sh_desc(desc, HDR_SHARE_SERIAL);
315 
316  /* Import context from software */
317  append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
318  LDST_CLASS_2_CCB | ctx->ctx_len);
319 
320  /* Class 2 operation */
321  append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
323 
324  /* Load data and write to result or context */
325  ahash_append_load_str(desc, ctx->ctx_len);
326 
327  ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
328  DMA_TO_DEVICE);
329  if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
330  dev_err(jrdev, "unable to map shared descriptor\n");
331  return -ENOMEM;
332  }
333 #ifdef DEBUG
334  print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
335  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
336 #endif
337 
338  /* ahash_update_first shared descriptor */
339  desc = ctx->sh_desc_update_first;
340 
341  ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
342  ctx->ctx_len, ctx);
343 
344  ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
345  desc_bytes(desc),
346  DMA_TO_DEVICE);
347  if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
348  dev_err(jrdev, "unable to map shared descriptor\n");
349  return -ENOMEM;
350  }
351 #ifdef DEBUG
352  print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
353  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
354 #endif
355 
356  /* ahash_final shared descriptor */
357  desc = ctx->sh_desc_fin;
358 
359  ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
360  OP_ALG_AS_FINALIZE, digestsize, ctx);
361 
362  ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
363  DMA_TO_DEVICE);
364  if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
365  dev_err(jrdev, "unable to map shared descriptor\n");
366  return -ENOMEM;
367  }
368 #ifdef DEBUG
369  print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
370  DUMP_PREFIX_ADDRESS, 16, 4, desc,
371  desc_bytes(desc), 1);
372 #endif
373 
374  /* ahash_finup shared descriptor */
375  desc = ctx->sh_desc_finup;
376 
377  ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
378  OP_ALG_AS_FINALIZE, digestsize, ctx);
379 
380  ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
381  DMA_TO_DEVICE);
382  if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
383  dev_err(jrdev, "unable to map shared descriptor\n");
384  return -ENOMEM;
385  }
386 #ifdef DEBUG
387  print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
388  DUMP_PREFIX_ADDRESS, 16, 4, desc,
389  desc_bytes(desc), 1);
390 #endif
391 
392  /* ahash_digest shared descriptor */
393  desc = ctx->sh_desc_digest;
394 
395  ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
396  digestsize, ctx);
397 
398  ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
399  desc_bytes(desc),
400  DMA_TO_DEVICE);
401  if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
402  dev_err(jrdev, "unable to map shared descriptor\n");
403  return -ENOMEM;
404  }
405 #ifdef DEBUG
406  print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
407  DUMP_PREFIX_ADDRESS, 16, 4, desc,
408  desc_bytes(desc), 1);
409 #endif
410 
411  return 0;
412 }
413 
414 static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
415  u32 keylen)
416 {
417  return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
418  ctx->split_key_pad_len, key_in, keylen,
419  ctx->alg_op);
420 }
421 
422 /* Digest hash size if it is too large */
423 static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
424  u32 *keylen, u8 *key_out, u32 digestsize)
425 {
426  struct device *jrdev = ctx->jrdev;
427  u32 *desc;
428  struct split_key_result result;
429  dma_addr_t src_dma, dst_dma;
430  int ret = 0;
431 
432  desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
433  if (!desc) {
434  dev_err(jrdev, "unable to allocate key input memory\n");
435  return -ENOMEM;
436  }
437 
438  init_job_desc(desc, 0);
439 
440  src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
441  DMA_TO_DEVICE);
442  if (dma_mapping_error(jrdev, src_dma)) {
443  dev_err(jrdev, "unable to map key input memory\n");
444  kfree(desc);
445  return -ENOMEM;
446  }
447  dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
449  if (dma_mapping_error(jrdev, dst_dma)) {
450  dev_err(jrdev, "unable to map key output memory\n");
451  dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
452  kfree(desc);
453  return -ENOMEM;
454  }
455 
456  /* Job descriptor to perform unkeyed hash on key_in */
457  append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
459  append_seq_in_ptr(desc, src_dma, *keylen, 0);
460  append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
462  append_seq_out_ptr(desc, dst_dma, digestsize, 0);
463  append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
465 
466 #ifdef DEBUG
467  print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
468  DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
469  print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
470  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
471 #endif
472 
473  result.err = 0;
474  init_completion(&result.completion);
475 
476  ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
477  if (!ret) {
478  /* in progress */
480  ret = result.err;
481 #ifdef DEBUG
482  print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
483  DUMP_PREFIX_ADDRESS, 16, 4, key_in,
484  digestsize, 1);
485 #endif
486  }
487  *keylen = digestsize;
488 
489  dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
490  dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
491 
492  kfree(desc);
493 
494  return ret;
495 }
496 
497 static int ahash_setkey(struct crypto_ahash *ahash,
498  const u8 *key, unsigned int keylen)
499 {
500  /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
501  static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
502  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
503  struct device *jrdev = ctx->jrdev;
504  int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
505  int digestsize = crypto_ahash_digestsize(ahash);
506  int ret = 0;
507  u8 *hashed_key = NULL;
508 
509 #ifdef DEBUG
510  printk(KERN_ERR "keylen %d\n", keylen);
511 #endif
512 
513  if (keylen > blocksize) {
514  hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
515  GFP_DMA);
516  if (!hashed_key)
517  return -ENOMEM;
518  ret = hash_digest_key(ctx, key, &keylen, hashed_key,
519  digestsize);
520  if (ret)
521  goto badkey;
522  key = hashed_key;
523  }
524 
525  /* Pick class 2 key length from algorithm submask */
526  ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
527  OP_ALG_ALGSEL_SHIFT] * 2;
528  ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
529 
530 #ifdef DEBUG
531  printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
532  ctx->split_key_len, ctx->split_key_pad_len);
533  print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
534  DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
535 #endif
536 
537  ret = gen_split_hash_key(ctx, key, keylen);
538  if (ret)
539  goto badkey;
540 
541  ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
542  DMA_TO_DEVICE);
543  if (dma_mapping_error(jrdev, ctx->key_dma)) {
544  dev_err(jrdev, "unable to map key i/o memory\n");
545  return -ENOMEM;
546  }
547 #ifdef DEBUG
548  print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
549  DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
550  ctx->split_key_pad_len, 1);
551 #endif
552 
553  ret = ahash_set_sh_desc(ahash);
554  if (ret) {
555  dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
556  DMA_TO_DEVICE);
557  }
558 
559  kfree(hashed_key);
560  return ret;
561 badkey:
562  kfree(hashed_key);
563  crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
564  return -EINVAL;
565 }
566 
567 /*
568  * ahash_edesc - s/w-extended ahash descriptor
569  * @dst_dma: physical mapped address of req->result
570  * @sec4_sg_dma: physical mapped address of h/w link table
571  * @chained: if source is chained
572  * @src_nents: number of segments in input scatterlist
573  * @sec4_sg_bytes: length of dma mapped sec4_sg space
574  * @sec4_sg: pointer to h/w link table
575  * @hw_desc: the h/w job descriptor followed by any referenced link tables
576  */
577 struct ahash_edesc {
580  bool chained;
585 };
586 
587 static inline void ahash_unmap(struct device *dev,
588  struct ahash_edesc *edesc,
589  struct ahash_request *req, int dst_len)
590 {
591  if (edesc->src_nents)
592  dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
593  DMA_TO_DEVICE, edesc->chained);
594  if (edesc->dst_dma)
595  dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
596 
597  if (edesc->sec4_sg_bytes)
598  dma_unmap_single(dev, edesc->sec4_sg_dma,
599  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
600 }
601 
602 static inline void ahash_unmap_ctx(struct device *dev,
603  struct ahash_edesc *edesc,
604  struct ahash_request *req, int dst_len, u32 flag)
605 {
606  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
607  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
608  struct caam_hash_state *state = ahash_request_ctx(req);
609 
610  if (state->ctx_dma)
611  dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
612  ahash_unmap(dev, edesc, req, dst_len);
613 }
614 
615 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
616  void *context)
617 {
618  struct ahash_request *req = context;
619  struct ahash_edesc *edesc;
620  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
621  int digestsize = crypto_ahash_digestsize(ahash);
622 #ifdef DEBUG
623  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
624  struct caam_hash_state *state = ahash_request_ctx(req);
625 
626  dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
627 #endif
628 
629  edesc = (struct ahash_edesc *)((char *)desc -
630  offsetof(struct ahash_edesc, hw_desc));
631  if (err) {
632  char tmp[CAAM_ERROR_STR_MAX];
633 
634  dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
635  }
636 
637  ahash_unmap(jrdev, edesc, req, digestsize);
638  kfree(edesc);
639 
640 #ifdef DEBUG
641  print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
642  DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
643  ctx->ctx_len, 1);
644  if (req->result)
645  print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
646  DUMP_PREFIX_ADDRESS, 16, 4, req->result,
647  digestsize, 1);
648 #endif
649 
650  req->base.complete(&req->base, err);
651 }
652 
653 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
654  void *context)
655 {
656  struct ahash_request *req = context;
657  struct ahash_edesc *edesc;
658  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
659  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
660 #ifdef DEBUG
661  struct caam_hash_state *state = ahash_request_ctx(req);
662  int digestsize = crypto_ahash_digestsize(ahash);
663 
664  dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
665 #endif
666 
667  edesc = (struct ahash_edesc *)((char *)desc -
668  offsetof(struct ahash_edesc, hw_desc));
669  if (err) {
670  char tmp[CAAM_ERROR_STR_MAX];
671 
672  dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
673  }
674 
675  ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
676  kfree(edesc);
677 
678 #ifdef DEBUG
679  print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
680  DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
681  ctx->ctx_len, 1);
682  if (req->result)
683  print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
684  DUMP_PREFIX_ADDRESS, 16, 4, req->result,
685  digestsize, 1);
686 #endif
687 
688  req->base.complete(&req->base, err);
689 }
690 
691 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
692  void *context)
693 {
694  struct ahash_request *req = context;
695  struct ahash_edesc *edesc;
696  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
697  int digestsize = crypto_ahash_digestsize(ahash);
698 #ifdef DEBUG
699  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
700  struct caam_hash_state *state = ahash_request_ctx(req);
701 
702  dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
703 #endif
704 
705  edesc = (struct ahash_edesc *)((char *)desc -
706  offsetof(struct ahash_edesc, hw_desc));
707  if (err) {
708  char tmp[CAAM_ERROR_STR_MAX];
709 
710  dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
711  }
712 
713  ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
714  kfree(edesc);
715 
716 #ifdef DEBUG
717  print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
718  DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
719  ctx->ctx_len, 1);
720  if (req->result)
721  print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
722  DUMP_PREFIX_ADDRESS, 16, 4, req->result,
723  digestsize, 1);
724 #endif
725 
726  req->base.complete(&req->base, err);
727 }
728 
729 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
730  void *context)
731 {
732  struct ahash_request *req = context;
733  struct ahash_edesc *edesc;
734  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
735  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
736 #ifdef DEBUG
737  struct caam_hash_state *state = ahash_request_ctx(req);
738  int digestsize = crypto_ahash_digestsize(ahash);
739 
740  dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
741 #endif
742 
743  edesc = (struct ahash_edesc *)((char *)desc -
744  offsetof(struct ahash_edesc, hw_desc));
745  if (err) {
746  char tmp[CAAM_ERROR_STR_MAX];
747 
748  dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
749  }
750 
751  ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
752  kfree(edesc);
753 
754 #ifdef DEBUG
755  print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
756  DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
757  ctx->ctx_len, 1);
758  if (req->result)
759  print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
760  DUMP_PREFIX_ADDRESS, 16, 4, req->result,
761  digestsize, 1);
762 #endif
763 
764  req->base.complete(&req->base, err);
765 }
766 
767 /* submit update job descriptor */
768 static int ahash_update_ctx(struct ahash_request *req)
769 {
770  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
771  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
772  struct caam_hash_state *state = ahash_request_ctx(req);
773  struct device *jrdev = ctx->jrdev;
774  gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
776  u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
777  int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
778  u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
779  int *next_buflen = state->current_buf ? &state->buflen_0 :
780  &state->buflen_1, last_buflen;
781  int in_len = *buflen + req->nbytes, to_hash;
782  u32 *sh_desc = ctx->sh_desc_update, *desc;
784  int src_nents, sec4_sg_bytes, sec4_sg_src_index;
785  struct ahash_edesc *edesc;
786  bool chained = false;
787  int ret = 0;
788  int sh_len;
789 
790  last_buflen = *next_buflen;
791  *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
792  to_hash = in_len - *next_buflen;
793 
794  if (to_hash) {
795  src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
796  &chained);
797  sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
798  sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
799  sizeof(struct sec4_sg_entry);
800 
801  /*
802  * allocate space for base edesc and hw desc commands,
803  * link tables
804  */
805  edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
806  sec4_sg_bytes, GFP_DMA | flags);
807  if (!edesc) {
808  dev_err(jrdev,
809  "could not allocate extended descriptor\n");
810  return -ENOMEM;
811  }
812 
813  edesc->src_nents = src_nents;
814  edesc->chained = chained;
815  edesc->sec4_sg_bytes = sec4_sg_bytes;
816  edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
818  edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
819  sec4_sg_bytes,
820  DMA_TO_DEVICE);
821 
822  ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
823  edesc->sec4_sg, DMA_BIDIRECTIONAL);
824 
825  state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
826  edesc->sec4_sg + 1,
827  buf, state->buf_dma,
828  *buflen, last_buflen);
829 
830  if (src_nents) {
831  src_map_to_sec4_sg(jrdev, req->src, src_nents,
832  edesc->sec4_sg + sec4_sg_src_index,
833  chained);
834  if (*next_buflen) {
835  sg_copy_part(next_buf, req->src, to_hash -
836  *buflen, req->nbytes);
837  state->current_buf = !state->current_buf;
838  }
839  } else {
840  (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
842  }
843 
844  sh_len = desc_len(sh_desc);
845  desc = edesc->hw_desc;
846  init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
847  HDR_REVERSE);
848 
849  append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
850  to_hash, LDST_SGF);
851 
852  append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
853 
854 #ifdef DEBUG
855  print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
856  DUMP_PREFIX_ADDRESS, 16, 4, desc,
857  desc_bytes(desc), 1);
858 #endif
859 
860  ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
861  if (!ret) {
862  ret = -EINPROGRESS;
863  } else {
864  ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
866  kfree(edesc);
867  }
868  } else if (*next_buflen) {
869  sg_copy(buf + *buflen, req->src, req->nbytes);
870  *buflen = *next_buflen;
871  *next_buflen = last_buflen;
872  }
873 #ifdef DEBUG
874  print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
875  DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
876  print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
877  DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
878  *next_buflen, 1);
879 #endif
880 
881  return ret;
882 }
883 
884 static int ahash_final_ctx(struct ahash_request *req)
885 {
886  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
887  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
888  struct caam_hash_state *state = ahash_request_ctx(req);
889  struct device *jrdev = ctx->jrdev;
890  gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
892  u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
893  int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
894  int last_buflen = state->current_buf ? state->buflen_0 :
895  state->buflen_1;
896  u32 *sh_desc = ctx->sh_desc_fin, *desc;
897  dma_addr_t ptr = ctx->sh_desc_fin_dma;
898  int sec4_sg_bytes;
899  int digestsize = crypto_ahash_digestsize(ahash);
900  struct ahash_edesc *edesc;
901  int ret = 0;
902  int sh_len;
903 
904  sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
905 
906  /* allocate space for base edesc and hw desc commands, link tables */
907  edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
908  sec4_sg_bytes, GFP_DMA | flags);
909  if (!edesc) {
910  dev_err(jrdev, "could not allocate extended descriptor\n");
911  return -ENOMEM;
912  }
913 
914  sh_len = desc_len(sh_desc);
915  desc = edesc->hw_desc;
916  init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
917 
918  edesc->sec4_sg_bytes = sec4_sg_bytes;
919  edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
921  edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
922  sec4_sg_bytes, DMA_TO_DEVICE);
923  edesc->src_nents = 0;
924 
925  ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
926  DMA_TO_DEVICE);
927 
928  state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
929  buf, state->buf_dma, buflen,
930  last_buflen);
931  (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
932 
933  append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
934  LDST_SGF);
935 
936  edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
937  digestsize);
938 
939 #ifdef DEBUG
940  print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
941  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
942 #endif
943 
944  ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
945  if (!ret) {
946  ret = -EINPROGRESS;
947  } else {
948  ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
949  kfree(edesc);
950  }
951 
952  return ret;
953 }
954 
955 static int ahash_finup_ctx(struct ahash_request *req)
956 {
957  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
958  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
959  struct caam_hash_state *state = ahash_request_ctx(req);
960  struct device *jrdev = ctx->jrdev;
961  gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
963  u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
964  int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
965  int last_buflen = state->current_buf ? state->buflen_0 :
966  state->buflen_1;
967  u32 *sh_desc = ctx->sh_desc_finup, *desc;
968  dma_addr_t ptr = ctx->sh_desc_finup_dma;
969  int sec4_sg_bytes, sec4_sg_src_index;
970  int src_nents;
971  int digestsize = crypto_ahash_digestsize(ahash);
972  struct ahash_edesc *edesc;
973  bool chained = false;
974  int ret = 0;
975  int sh_len;
976 
977  src_nents = __sg_count(req->src, req->nbytes, &chained);
978  sec4_sg_src_index = 1 + (buflen ? 1 : 0);
979  sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
980  sizeof(struct sec4_sg_entry);
981 
982  /* allocate space for base edesc and hw desc commands, link tables */
983  edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
984  sec4_sg_bytes, GFP_DMA | flags);
985  if (!edesc) {
986  dev_err(jrdev, "could not allocate extended descriptor\n");
987  return -ENOMEM;
988  }
989 
990  sh_len = desc_len(sh_desc);
991  desc = edesc->hw_desc;
992  init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
993 
994  edesc->src_nents = src_nents;
995  edesc->chained = chained;
996  edesc->sec4_sg_bytes = sec4_sg_bytes;
997  edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
999  edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1000  sec4_sg_bytes, DMA_TO_DEVICE);
1001 
1002  ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
1003  DMA_TO_DEVICE);
1004 
1005  state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1006  buf, state->buf_dma, buflen,
1007  last_buflen);
1008 
1009  src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1010  sec4_sg_src_index, chained);
1011 
1012  append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1013  buflen + req->nbytes, LDST_SGF);
1014 
1015  edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1016  digestsize);
1017 
1018 #ifdef DEBUG
1019  print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1020  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1021 #endif
1022 
1023  ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1024  if (!ret) {
1025  ret = -EINPROGRESS;
1026  } else {
1027  ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1028  kfree(edesc);
1029  }
1030 
1031  return ret;
1032 }
1033 
1034 static int ahash_digest(struct ahash_request *req)
1035 {
1036  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1037  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1038  struct device *jrdev = ctx->jrdev;
1039  gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1041  u32 *sh_desc = ctx->sh_desc_digest, *desc;
1042  dma_addr_t ptr = ctx->sh_desc_digest_dma;
1043  int digestsize = crypto_ahash_digestsize(ahash);
1044  int src_nents, sec4_sg_bytes;
1046  struct ahash_edesc *edesc;
1047  bool chained = false;
1048  int ret = 0;
1049  u32 options;
1050  int sh_len;
1051 
1052  src_nents = sg_count(req->src, req->nbytes, &chained);
1053  dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1054  chained);
1055  sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1056 
1057  /* allocate space for base edesc and hw desc commands, link tables */
1058  edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1059  DESC_JOB_IO_LEN, GFP_DMA | flags);
1060  if (!edesc) {
1061  dev_err(jrdev, "could not allocate extended descriptor\n");
1062  return -ENOMEM;
1063  }
1064  edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1066  edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1067  sec4_sg_bytes, DMA_TO_DEVICE);
1068  edesc->src_nents = src_nents;
1069  edesc->chained = chained;
1070 
1071  sh_len = desc_len(sh_desc);
1072  desc = edesc->hw_desc;
1073  init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1074 
1075  if (src_nents) {
1076  sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1077  src_dma = edesc->sec4_sg_dma;
1078  options = LDST_SGF;
1079  } else {
1080  src_dma = sg_dma_address(req->src);
1081  options = 0;
1082  }
1083  append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1084 
1085  edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1086  digestsize);
1087 
1088 #ifdef DEBUG
1089  print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1090  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1091 #endif
1092 
1093  ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1094  if (!ret) {
1095  ret = -EINPROGRESS;
1096  } else {
1097  ahash_unmap(jrdev, edesc, req, digestsize);
1098  kfree(edesc);
1099  }
1100 
1101  return ret;
1102 }
1103 
1104 /* submit ahash final if it the first job descriptor */
1105 static int ahash_final_no_ctx(struct ahash_request *req)
1106 {
1107  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1108  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1109  struct caam_hash_state *state = ahash_request_ctx(req);
1110  struct device *jrdev = ctx->jrdev;
1111  gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1113  u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1114  int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1115  u32 *sh_desc = ctx->sh_desc_digest, *desc;
1116  dma_addr_t ptr = ctx->sh_desc_digest_dma;
1117  int digestsize = crypto_ahash_digestsize(ahash);
1118  struct ahash_edesc *edesc;
1119  int ret = 0;
1120  int sh_len;
1121 
1122  /* allocate space for base edesc and hw desc commands, link tables */
1123  edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1124  GFP_DMA | flags);
1125  if (!edesc) {
1126  dev_err(jrdev, "could not allocate extended descriptor\n");
1127  return -ENOMEM;
1128  }
1129 
1130  sh_len = desc_len(sh_desc);
1131  desc = edesc->hw_desc;
1132  init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1133 
1134  state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1135 
1136  append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1137 
1138  edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1139  digestsize);
1140  edesc->src_nents = 0;
1141 
1142 #ifdef DEBUG
1143  print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1144  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1145 #endif
1146 
1147  ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1148  if (!ret) {
1149  ret = -EINPROGRESS;
1150  } else {
1151  ahash_unmap(jrdev, edesc, req, digestsize);
1152  kfree(edesc);
1153  }
1154 
1155  return ret;
1156 }
1157 
1158 /* submit ahash update if it the first job descriptor after update */
1159 static int ahash_update_no_ctx(struct ahash_request *req)
1160 {
1161  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1162  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1163  struct caam_hash_state *state = ahash_request_ctx(req);
1164  struct device *jrdev = ctx->jrdev;
1165  gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1167  u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1168  int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1169  u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1170  int *next_buflen = state->current_buf ? &state->buflen_0 :
1171  &state->buflen_1;
1172  int in_len = *buflen + req->nbytes, to_hash;
1173  int sec4_sg_bytes, src_nents;
1174  struct ahash_edesc *edesc;
1175  u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1177  bool chained = false;
1178  int ret = 0;
1179  int sh_len;
1180 
1181  *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1182  to_hash = in_len - *next_buflen;
1183 
1184  if (to_hash) {
1185  src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1186  &chained);
1187  sec4_sg_bytes = (1 + src_nents) *
1188  sizeof(struct sec4_sg_entry);
1189 
1190  /*
1191  * allocate space for base edesc and hw desc commands,
1192  * link tables
1193  */
1194  edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1195  sec4_sg_bytes, GFP_DMA | flags);
1196  if (!edesc) {
1197  dev_err(jrdev,
1198  "could not allocate extended descriptor\n");
1199  return -ENOMEM;
1200  }
1201 
1202  edesc->src_nents = src_nents;
1203  edesc->chained = chained;
1204  edesc->sec4_sg_bytes = sec4_sg_bytes;
1205  edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1207  edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1208  sec4_sg_bytes,
1209  DMA_TO_DEVICE);
1210 
1211  state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1212  buf, *buflen);
1213  src_map_to_sec4_sg(jrdev, req->src, src_nents,
1214  edesc->sec4_sg + 1, chained);
1215  if (*next_buflen) {
1216  sg_copy_part(next_buf, req->src, to_hash - *buflen,
1217  req->nbytes);
1218  state->current_buf = !state->current_buf;
1219  }
1220 
1221  sh_len = desc_len(sh_desc);
1222  desc = edesc->hw_desc;
1223  init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1224  HDR_REVERSE);
1225 
1226  append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1227 
1228  map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1229 
1230 #ifdef DEBUG
1231  print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1232  DUMP_PREFIX_ADDRESS, 16, 4, desc,
1233  desc_bytes(desc), 1);
1234 #endif
1235 
1236  ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1237  if (!ret) {
1238  ret = -EINPROGRESS;
1239  state->update = ahash_update_ctx;
1240  state->finup = ahash_finup_ctx;
1241  state->final = ahash_final_ctx;
1242  } else {
1243  ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1244  DMA_TO_DEVICE);
1245  kfree(edesc);
1246  }
1247  } else if (*next_buflen) {
1248  sg_copy(buf + *buflen, req->src, req->nbytes);
1249  *buflen = *next_buflen;
1250  *next_buflen = 0;
1251  }
1252 #ifdef DEBUG
1253  print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
1254  DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1255  print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1256  DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1257  *next_buflen, 1);
1258 #endif
1259 
1260  return ret;
1261 }
1262 
1263 /* submit ahash finup if it the first job descriptor after update */
1264 static int ahash_finup_no_ctx(struct ahash_request *req)
1265 {
1266  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1267  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1268  struct caam_hash_state *state = ahash_request_ctx(req);
1269  struct device *jrdev = ctx->jrdev;
1270  gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1272  u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1273  int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1274  int last_buflen = state->current_buf ? state->buflen_0 :
1275  state->buflen_1;
1276  u32 *sh_desc = ctx->sh_desc_digest, *desc;
1277  dma_addr_t ptr = ctx->sh_desc_digest_dma;
1278  int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1279  int digestsize = crypto_ahash_digestsize(ahash);
1280  struct ahash_edesc *edesc;
1281  bool chained = false;
1282  int sh_len;
1283  int ret = 0;
1284 
1285  src_nents = __sg_count(req->src, req->nbytes, &chained);
1286  sec4_sg_src_index = 2;
1287  sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1288  sizeof(struct sec4_sg_entry);
1289 
1290  /* allocate space for base edesc and hw desc commands, link tables */
1291  edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1292  sec4_sg_bytes, GFP_DMA | flags);
1293  if (!edesc) {
1294  dev_err(jrdev, "could not allocate extended descriptor\n");
1295  return -ENOMEM;
1296  }
1297 
1298  sh_len = desc_len(sh_desc);
1299  desc = edesc->hw_desc;
1300  init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1301 
1302  edesc->src_nents = src_nents;
1303  edesc->chained = chained;
1304  edesc->sec4_sg_bytes = sec4_sg_bytes;
1305  edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1307  edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1308  sec4_sg_bytes, DMA_TO_DEVICE);
1309 
1310  state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1311  state->buf_dma, buflen,
1312  last_buflen);
1313 
1314  src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1315  chained);
1316 
1317  append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1318  req->nbytes, LDST_SGF);
1319 
1320  edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1321  digestsize);
1322 
1323 #ifdef DEBUG
1324  print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1325  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1326 #endif
1327 
1328  ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1329  if (!ret) {
1330  ret = -EINPROGRESS;
1331  } else {
1332  ahash_unmap(jrdev, edesc, req, digestsize);
1333  kfree(edesc);
1334  }
1335 
1336  return ret;
1337 }
1338 
1339 /* submit first update job descriptor after init */
1340 static int ahash_update_first(struct ahash_request *req)
1341 {
1342  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1343  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1344  struct caam_hash_state *state = ahash_request_ctx(req);
1345  struct device *jrdev = ctx->jrdev;
1346  gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1348  u8 *next_buf = state->buf_0 + state->current_buf *
1350  int *next_buflen = &state->buflen_0 + state->current_buf;
1351  int to_hash;
1352  u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1354  int sec4_sg_bytes, src_nents;
1356  u32 options;
1357  struct ahash_edesc *edesc;
1358  bool chained = false;
1359  int ret = 0;
1360  int sh_len;
1361 
1362  *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1363  1);
1364  to_hash = req->nbytes - *next_buflen;
1365 
1366  if (to_hash) {
1367  src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1368  &chained);
1369  dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1370  DMA_TO_DEVICE, chained);
1371  sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1372 
1373  /*
1374  * allocate space for base edesc and hw desc commands,
1375  * link tables
1376  */
1377  edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1378  sec4_sg_bytes, GFP_DMA | flags);
1379  if (!edesc) {
1380  dev_err(jrdev,
1381  "could not allocate extended descriptor\n");
1382  return -ENOMEM;
1383  }
1384 
1385  edesc->src_nents = src_nents;
1386  edesc->chained = chained;
1387  edesc->sec4_sg_bytes = sec4_sg_bytes;
1388  edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1390  edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1391  sec4_sg_bytes,
1392  DMA_TO_DEVICE);
1393 
1394  if (src_nents) {
1395  sg_to_sec4_sg_last(req->src, src_nents,
1396  edesc->sec4_sg, 0);
1397  src_dma = edesc->sec4_sg_dma;
1398  options = LDST_SGF;
1399  } else {
1400  src_dma = sg_dma_address(req->src);
1401  options = 0;
1402  }
1403 
1404  if (*next_buflen)
1405  sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1406 
1407  sh_len = desc_len(sh_desc);
1408  desc = edesc->hw_desc;
1409  init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1410  HDR_REVERSE);
1411 
1412  append_seq_in_ptr(desc, src_dma, to_hash, options);
1413 
1414  map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1415 
1416 #ifdef DEBUG
1417  print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1418  DUMP_PREFIX_ADDRESS, 16, 4, desc,
1419  desc_bytes(desc), 1);
1420 #endif
1421 
1422  ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1423  req);
1424  if (!ret) {
1425  ret = -EINPROGRESS;
1426  state->update = ahash_update_ctx;
1427  state->finup = ahash_finup_ctx;
1428  state->final = ahash_final_ctx;
1429  } else {
1430  ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1431  DMA_TO_DEVICE);
1432  kfree(edesc);
1433  }
1434  } else if (*next_buflen) {
1435  state->update = ahash_update_no_ctx;
1436  state->finup = ahash_finup_no_ctx;
1437  state->final = ahash_final_no_ctx;
1438  sg_copy(next_buf, req->src, req->nbytes);
1439  }
1440 #ifdef DEBUG
1441  print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1442  DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1443  *next_buflen, 1);
1444 #endif
1445 
1446  return ret;
1447 }
1448 
1449 static int ahash_finup_first(struct ahash_request *req)
1450 {
1451  return ahash_digest(req);
1452 }
1453 
1454 static int ahash_init(struct ahash_request *req)
1455 {
1456  struct caam_hash_state *state = ahash_request_ctx(req);
1457 
1458  state->update = ahash_update_first;
1459  state->finup = ahash_finup_first;
1460  state->final = ahash_final_no_ctx;
1461 
1462  state->current_buf = 0;
1463 
1464  return 0;
1465 }
1466 
1467 static int ahash_update(struct ahash_request *req)
1468 {
1469  struct caam_hash_state *state = ahash_request_ctx(req);
1470 
1471  return state->update(req);
1472 }
1473 
1474 static int ahash_finup(struct ahash_request *req)
1475 {
1476  struct caam_hash_state *state = ahash_request_ctx(req);
1477 
1478  return state->finup(req);
1479 }
1480 
1481 static int ahash_final(struct ahash_request *req)
1482 {
1483  struct caam_hash_state *state = ahash_request_ctx(req);
1484 
1485  return state->final(req);
1486 }
1487 
1488 static int ahash_export(struct ahash_request *req, void *out)
1489 {
1490  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1491  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1492  struct caam_hash_state *state = ahash_request_ctx(req);
1493 
1494  memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1495  memcpy(out + sizeof(struct caam_hash_ctx), state,
1496  sizeof(struct caam_hash_state));
1497  return 0;
1498 }
1499 
1500 static int ahash_import(struct ahash_request *req, const void *in)
1501 {
1502  struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1503  struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1504  struct caam_hash_state *state = ahash_request_ctx(req);
1505 
1506  memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1507  memcpy(state, in + sizeof(struct caam_hash_ctx),
1508  sizeof(struct caam_hash_state));
1509  return 0;
1510 }
1511 
1517  unsigned int blocksize;
1521 };
1522 
1523 /* ahash descriptors */
1524 static struct caam_hash_template driver_hash[] = {
1525  {
1526  .name = "sha1",
1527  .driver_name = "sha1-caam",
1528  .hmac_name = "hmac(sha1)",
1529  .hmac_driver_name = "hmac-sha1-caam",
1530  .blocksize = SHA1_BLOCK_SIZE,
1531  .template_ahash = {
1532  .init = ahash_init,
1533  .update = ahash_update,
1534  .final = ahash_final,
1535  .finup = ahash_finup,
1536  .digest = ahash_digest,
1537  .export = ahash_export,
1538  .import = ahash_import,
1539  .setkey = ahash_setkey,
1540  .halg = {
1541  .digestsize = SHA1_DIGEST_SIZE,
1542  },
1543  },
1544  .alg_type = OP_ALG_ALGSEL_SHA1,
1545  .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1546  }, {
1547  .name = "sha224",
1548  .driver_name = "sha224-caam",
1549  .hmac_name = "hmac(sha224)",
1550  .hmac_driver_name = "hmac-sha224-caam",
1551  .blocksize = SHA224_BLOCK_SIZE,
1552  .template_ahash = {
1553  .init = ahash_init,
1554  .update = ahash_update,
1555  .final = ahash_final,
1556  .finup = ahash_finup,
1557  .digest = ahash_digest,
1558  .export = ahash_export,
1559  .import = ahash_import,
1560  .setkey = ahash_setkey,
1561  .halg = {
1562  .digestsize = SHA224_DIGEST_SIZE,
1563  },
1564  },
1565  .alg_type = OP_ALG_ALGSEL_SHA224,
1567  }, {
1568  .name = "sha256",
1569  .driver_name = "sha256-caam",
1570  .hmac_name = "hmac(sha256)",
1571  .hmac_driver_name = "hmac-sha256-caam",
1572  .blocksize = SHA256_BLOCK_SIZE,
1573  .template_ahash = {
1574  .init = ahash_init,
1575  .update = ahash_update,
1576  .final = ahash_final,
1577  .finup = ahash_finup,
1578  .digest = ahash_digest,
1579  .export = ahash_export,
1580  .import = ahash_import,
1581  .setkey = ahash_setkey,
1582  .halg = {
1583  .digestsize = SHA256_DIGEST_SIZE,
1584  },
1585  },
1586  .alg_type = OP_ALG_ALGSEL_SHA256,
1588  }, {
1589  .name = "sha384",
1590  .driver_name = "sha384-caam",
1591  .hmac_name = "hmac(sha384)",
1592  .hmac_driver_name = "hmac-sha384-caam",
1593  .blocksize = SHA384_BLOCK_SIZE,
1594  .template_ahash = {
1595  .init = ahash_init,
1596  .update = ahash_update,
1597  .final = ahash_final,
1598  .finup = ahash_finup,
1599  .digest = ahash_digest,
1600  .export = ahash_export,
1601  .import = ahash_import,
1602  .setkey = ahash_setkey,
1603  .halg = {
1604  .digestsize = SHA384_DIGEST_SIZE,
1605  },
1606  },
1607  .alg_type = OP_ALG_ALGSEL_SHA384,
1609  }, {
1610  .name = "sha512",
1611  .driver_name = "sha512-caam",
1612  .hmac_name = "hmac(sha512)",
1613  .hmac_driver_name = "hmac-sha512-caam",
1614  .blocksize = SHA512_BLOCK_SIZE,
1615  .template_ahash = {
1616  .init = ahash_init,
1617  .update = ahash_update,
1618  .final = ahash_final,
1619  .finup = ahash_finup,
1620  .digest = ahash_digest,
1621  .export = ahash_export,
1622  .import = ahash_import,
1623  .setkey = ahash_setkey,
1624  .halg = {
1625  .digestsize = SHA512_DIGEST_SIZE,
1626  },
1627  },
1628  .alg_type = OP_ALG_ALGSEL_SHA512,
1630  }, {
1631  .name = "md5",
1632  .driver_name = "md5-caam",
1633  .hmac_name = "hmac(md5)",
1634  .hmac_driver_name = "hmac-md5-caam",
1635  .blocksize = MD5_BLOCK_WORDS * 4,
1636  .template_ahash = {
1637  .init = ahash_init,
1638  .update = ahash_update,
1639  .final = ahash_final,
1640  .finup = ahash_finup,
1641  .digest = ahash_digest,
1642  .export = ahash_export,
1643  .import = ahash_import,
1644  .setkey = ahash_setkey,
1645  .halg = {
1646  .digestsize = MD5_DIGEST_SIZE,
1647  },
1648  },
1649  .alg_type = OP_ALG_ALGSEL_MD5,
1650  .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1651  },
1652 };
1653 
1656  struct device *ctrldev;
1658  int alg_op;
1660 };
1661 
1662 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1663 {
1664  struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1665  struct crypto_alg *base = tfm->__crt_alg;
1666  struct hash_alg_common *halg =
1667  container_of(base, struct hash_alg_common, base);
1668  struct ahash_alg *alg =
1669  container_of(halg, struct ahash_alg, halg);
1670  struct caam_hash_alg *caam_hash =
1671  container_of(alg, struct caam_hash_alg, ahash_alg);
1672  struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1673  struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
1674  /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1675  static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1677  HASH_MSG_LEN + 32,
1678  HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1679  HASH_MSG_LEN + 64,
1680  HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1681  int tgt_jr = atomic_inc_return(&priv->tfm_count);
1682  int ret = 0;
1683 
1684  /*
1685  * distribute tfms across job rings to ensure in-order
1686  * crypto request processing per tfm
1687  */
1688  ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
1689 
1690  /* copy descriptor header template value */
1691  ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1692  ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1693 
1694  ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1696 
1697  crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1698  sizeof(struct caam_hash_state));
1699 
1700  ret = ahash_set_sh_desc(ahash);
1701 
1702  return ret;
1703 }
1704 
1705 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1706 {
1707  struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1708 
1709  if (ctx->sh_desc_update_dma &&
1712  desc_bytes(ctx->sh_desc_update),
1713  DMA_TO_DEVICE);
1714  if (ctx->sh_desc_update_first_dma &&
1717  desc_bytes(ctx->sh_desc_update_first),
1718  DMA_TO_DEVICE);
1719  if (ctx->sh_desc_fin_dma &&
1722  desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1723  if (ctx->sh_desc_digest_dma &&
1726  desc_bytes(ctx->sh_desc_digest),
1727  DMA_TO_DEVICE);
1728  if (ctx->sh_desc_finup_dma &&
1731  desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1732 }
1733 
1734 static void __exit caam_algapi_hash_exit(void)
1735 {
1736  struct device_node *dev_node;
1737  struct platform_device *pdev;
1738  struct device *ctrldev;
1739  struct caam_drv_private *priv;
1740  struct caam_hash_alg *t_alg, *n;
1741 
1742  dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1743  if (!dev_node) {
1744  dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1745  if (!dev_node)
1746  return;
1747  }
1748 
1749  pdev = of_find_device_by_node(dev_node);
1750  if (!pdev)
1751  return;
1752 
1753  ctrldev = &pdev->dev;
1754  of_node_put(dev_node);
1755  priv = dev_get_drvdata(ctrldev);
1756 
1757  if (!priv->hash_list.next)
1758  return;
1759 
1760  list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
1762  list_del(&t_alg->entry);
1763  kfree(t_alg);
1764  }
1765 }
1766 
1767 static struct caam_hash_alg *
1768 caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
1769  bool keyed)
1770 {
1771  struct caam_hash_alg *t_alg;
1772  struct ahash_alg *halg;
1773  struct crypto_alg *alg;
1774 
1775  t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1776  if (!t_alg) {
1777  dev_err(ctrldev, "failed to allocate t_alg\n");
1778  return ERR_PTR(-ENOMEM);
1779  }
1780 
1781  t_alg->ahash_alg = template->template_ahash;
1782  halg = &t_alg->ahash_alg;
1783  alg = &halg->halg.base;
1784 
1785  if (keyed) {
1787  template->hmac_name);
1789  template->hmac_driver_name);
1790  } else {
1792  template->name);
1794  template->driver_name);
1795  }
1796  alg->cra_module = THIS_MODULE;
1797  alg->cra_init = caam_hash_cra_init;
1798  alg->cra_exit = caam_hash_cra_exit;
1799  alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1801  alg->cra_blocksize = template->blocksize;
1802  alg->cra_alignmask = 0;
1804  alg->cra_type = &crypto_ahash_type;
1805 
1806  t_alg->alg_type = template->alg_type;
1807  t_alg->alg_op = template->alg_op;
1808  t_alg->ctrldev = ctrldev;
1809 
1810  return t_alg;
1811 }
1812 
1813 static int __init caam_algapi_hash_init(void)
1814 {
1815  struct device_node *dev_node;
1816  struct platform_device *pdev;
1817  struct device *ctrldev;
1818  struct caam_drv_private *priv;
1819  int i = 0, err = 0;
1820 
1821  dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1822  if (!dev_node) {
1823  dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1824  if (!dev_node)
1825  return -ENODEV;
1826  }
1827 
1828  pdev = of_find_device_by_node(dev_node);
1829  if (!pdev)
1830  return -ENODEV;
1831 
1832  ctrldev = &pdev->dev;
1833  priv = dev_get_drvdata(ctrldev);
1834  of_node_put(dev_node);
1835 
1836  INIT_LIST_HEAD(&priv->hash_list);
1837 
1838  atomic_set(&priv->tfm_count, -1);
1839 
1840  /* register crypto algorithms the device supports */
1841  for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1842  /* TODO: check if h/w supports alg */
1843  struct caam_hash_alg *t_alg;
1844 
1845  /* register hmac version */
1846  t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
1847  if (IS_ERR(t_alg)) {
1848  err = PTR_ERR(t_alg);
1849  dev_warn(ctrldev, "%s alg allocation failed\n",
1850  driver_hash[i].driver_name);
1851  continue;
1852  }
1853 
1854  err = crypto_register_ahash(&t_alg->ahash_alg);
1855  if (err) {
1856  dev_warn(ctrldev, "%s alg registration failed\n",
1857  t_alg->ahash_alg.halg.base.cra_driver_name);
1858  kfree(t_alg);
1859  } else
1860  list_add_tail(&t_alg->entry, &priv->hash_list);
1861 
1862  /* register unkeyed version */
1863  t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
1864  if (IS_ERR(t_alg)) {
1865  err = PTR_ERR(t_alg);
1866  dev_warn(ctrldev, "%s alg allocation failed\n",
1867  driver_hash[i].driver_name);
1868  continue;
1869  }
1870 
1871  err = crypto_register_ahash(&t_alg->ahash_alg);
1872  if (err) {
1873  dev_warn(ctrldev, "%s alg registration failed\n",
1874  t_alg->ahash_alg.halg.base.cra_driver_name);
1875  kfree(t_alg);
1876  } else
1877  list_add_tail(&t_alg->entry, &priv->hash_list);
1878  }
1879 
1880  return err;
1881 }
1882 
1883 module_init(caam_algapi_hash_init);
1884 module_exit(caam_algapi_hash_exit);
1885 
1886 MODULE_LICENSE("GPL");
1887 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1888 MODULE_AUTHOR("Freescale Semiconductor - NMG");