Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
hash_core.c
Go to the documentation of this file.
1 /*
2  * Cryptographic API.
3  * Support for Nomadik hardware crypto engine.
4 
5  * Copyright (C) ST-Ericsson SA 2010
6  * Author: Shujuan Chen <[email protected]> for ST-Ericsson
7  * Author: Joakim Bech <[email protected]> for ST-Ericsson
8  * Author: Berne Hebark <[email protected]> for ST-Ericsson.
9  * Author: Niklas Hernaeus <[email protected]> for ST-Ericsson.
10  * Author: Andreas Westin <[email protected]> for ST-Ericsson.
11  * License terms: GNU General Public License (GPL) version 2
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/device.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/io.h>
19 #include <linux/klist.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/crypto.h>
24 
26 #include <linux/dmaengine.h>
27 #include <linux/bitops.h>
28 
29 #include <crypto/internal/hash.h>
30 #include <crypto/sha.h>
31 #include <crypto/scatterwalk.h>
32 #include <crypto/algapi.h>
33 
35 #include <mach/hardware.h>
36 
37 #include "hash_alg.h"
38 
39 #define DEV_DBG_NAME "hashX hashX:"
40 
41 static int hash_mode;
42 module_param(hash_mode, int, 0);
43 MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
44 
48 static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
49  0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
50  0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
51  0xaf, 0xd8, 0x07, 0x09
52 };
53 
54 static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
55  0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
56  0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
57  0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
58  0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
59 };
60 
61 /* HMAC-SHA1, no key */
62 static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
63  0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
64  0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
65  0x70, 0x69, 0x0e, 0x1d
66 };
67 
68 /* HMAC-SHA256, no key */
69 static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
70  0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
71  0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
72  0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
73  0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
74 };
75 
85 };
86 
87 static struct hash_driver_data driver_data;
88 
89 /* Declaration of functions */
100 static void hash_messagepad(struct hash_device_data *device_data,
101  const u32 *message, u8 index_bytes);
102 
108 static void release_hash_device(struct hash_device_data *device_data)
109 {
110  spin_lock(&device_data->ctx_lock);
111  device_data->current_ctx->device = NULL;
112  device_data->current_ctx = NULL;
113  spin_unlock(&device_data->ctx_lock);
114 
115  /*
116  * The down_interruptible part for this semaphore is called in
117  * cryp_get_device_data.
118  */
119  up(&driver_data.device_allocation);
120 }
121 
122 static void hash_dma_setup_channel(struct hash_device_data *device_data,
123  struct device *dev)
124 {
126  dma_cap_zero(device_data->dma.mask);
127  dma_cap_set(DMA_SLAVE, device_data->dma.mask);
128 
129  device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
130  device_data->dma.chan_mem2hash =
131  dma_request_channel(device_data->dma.mask,
132  platform_data->dma_filter,
133  device_data->dma.cfg_mem2hash);
134 
135  init_completion(&device_data->dma.complete);
136 }
137 
138 static void hash_dma_callback(void *data)
139 {
140  struct hash_ctx *ctx = (struct hash_ctx *) data;
141 
142  complete(&ctx->device->dma.complete);
143 }
144 
145 static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
146  int len, enum dma_data_direction direction)
147 {
149  struct dma_chan *channel = NULL;
151 
152  if (direction != DMA_TO_DEVICE) {
153  dev_err(ctx->device->dev, "[%s] Invalid DMA direction",
154  __func__);
155  return -EFAULT;
156  }
157 
159 
160  channel = ctx->device->dma.chan_mem2hash;
161  ctx->device->dma.sg = sg;
162  ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
163  ctx->device->dma.sg, ctx->device->dma.nents,
164  direction);
165 
166  if (!ctx->device->dma.sg_len) {
167  dev_err(ctx->device->dev,
168  "[%s]: Could not map the sg list (TO_DEVICE)",
169  __func__);
170  return -EFAULT;
171  }
172 
173  dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
174  "(TO_DEVICE)", __func__);
175  desc = channel->device->device_prep_slave_sg(channel,
176  ctx->device->dma.sg, ctx->device->dma.sg_len,
177  direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT, NULL);
178  if (!desc) {
179  dev_err(ctx->device->dev,
180  "[%s]: device_prep_slave_sg() failed!", __func__);
181  return -EFAULT;
182  }
183 
184  desc->callback = hash_dma_callback;
185  desc->callback_param = ctx;
186 
187  cookie = desc->tx_submit(desc);
188  dma_async_issue_pending(channel);
189 
190  return 0;
191 }
192 
193 static void hash_dma_done(struct hash_ctx *ctx)
194 {
195  struct dma_chan *chan;
196 
197  chan = ctx->device->dma.chan_mem2hash;
198  chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
199  dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
200  ctx->device->dma.sg_len, DMA_TO_DEVICE);
201 
202 }
203 
204 static int hash_dma_write(struct hash_ctx *ctx,
205  struct scatterlist *sg, int len)
206 {
207  int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
208  if (error) {
209  dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() "
210  "failed", __func__);
211  return error;
212  }
213 
214  return len;
215 }
216 
225 static int get_empty_message_digest(
226  struct hash_device_data *device_data,
227  u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
228 {
229  int ret = 0;
230  struct hash_ctx *ctx = device_data->current_ctx;
231  *zero_digest = false;
232 
237  if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
238  if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
239  memcpy(zero_hash, &zero_message_hash_sha1[0],
241  *zero_hash_size = SHA1_DIGEST_SIZE;
242  *zero_digest = true;
243  } else if (HASH_ALGO_SHA256 ==
244  ctx->config.algorithm) {
245  memcpy(zero_hash, &zero_message_hash_sha256[0],
247  *zero_hash_size = SHA256_DIGEST_SIZE;
248  *zero_digest = true;
249  } else {
250  dev_err(device_data->dev, "[%s] "
251  "Incorrect algorithm!"
252  , __func__);
253  ret = -EINVAL;
254  goto out;
255  }
256  } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
257  if (!ctx->keylen) {
258  if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
259  memcpy(zero_hash, &zero_message_hmac_sha1[0],
261  *zero_hash_size = SHA1_DIGEST_SIZE;
262  *zero_digest = true;
263  } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
264  memcpy(zero_hash, &zero_message_hmac_sha256[0],
266  *zero_hash_size = SHA256_DIGEST_SIZE;
267  *zero_digest = true;
268  } else {
269  dev_err(device_data->dev, "[%s] "
270  "Incorrect algorithm!"
271  , __func__);
272  ret = -EINVAL;
273  goto out;
274  }
275  } else {
276  dev_dbg(device_data->dev, "[%s] Continue hash "
277  "calculation, since hmac key avalable",
278  __func__);
279  }
280  }
281 out:
282 
283  return ret;
284 }
285 
294 static int hash_disable_power(
295  struct hash_device_data *device_data,
296  bool save_device_state)
297 {
298  int ret = 0;
299  struct device *dev = device_data->dev;
300 
301  spin_lock(&device_data->power_state_lock);
302  if (!device_data->power_state)
303  goto out;
304 
305  if (save_device_state) {
306  hash_save_state(device_data,
307  &device_data->state);
308  device_data->restore_dev_state = true;
309  }
310 
311  clk_disable(device_data->clk);
312  ret = regulator_disable(device_data->regulator);
313  if (ret)
314  dev_err(dev, "[%s] regulator_disable() failed!", __func__);
315 
316  device_data->power_state = false;
317 
318 out:
319  spin_unlock(&device_data->power_state_lock);
320 
321  return ret;
322 }
323 
332 static int hash_enable_power(
333  struct hash_device_data *device_data,
334  bool restore_device_state)
335 {
336  int ret = 0;
337  struct device *dev = device_data->dev;
338 
339  spin_lock(&device_data->power_state_lock);
340  if (!device_data->power_state) {
341  ret = regulator_enable(device_data->regulator);
342  if (ret) {
343  dev_err(dev, "[%s]: regulator_enable() failed!",
344  __func__);
345  goto out;
346  }
347  ret = clk_enable(device_data->clk);
348  if (ret) {
349  dev_err(dev, "[%s]: clk_enable() failed!",
350  __func__);
351  ret = regulator_disable(
352  device_data->regulator);
353  goto out;
354  }
355  device_data->power_state = true;
356  }
357 
358  if (device_data->restore_dev_state) {
359  if (restore_device_state) {
360  device_data->restore_dev_state = false;
361  hash_resume_state(device_data,
362  &device_data->state);
363  }
364  }
365 out:
366  spin_unlock(&device_data->power_state_lock);
367 
368  return ret;
369 }
370 
380 static int hash_get_device_data(struct hash_ctx *ctx,
381  struct hash_device_data **device_data)
382 {
383  int ret;
384  struct klist_iter device_iterator;
385  struct klist_node *device_node;
386  struct hash_device_data *local_device_data = NULL;
387 
388  /* Wait until a device is available */
389  ret = down_interruptible(&driver_data.device_allocation);
390  if (ret)
391  return ret; /* Interrupted */
392 
393  /* Select a device */
394  klist_iter_init(&driver_data.device_list, &device_iterator);
395  device_node = klist_next(&device_iterator);
396  while (device_node) {
397  local_device_data = container_of(device_node,
398  struct hash_device_data, list_node);
399  spin_lock(&local_device_data->ctx_lock);
400  /* current_ctx allocates a device, NULL = unallocated */
401  if (local_device_data->current_ctx) {
402  device_node = klist_next(&device_iterator);
403  } else {
404  local_device_data->current_ctx = ctx;
405  ctx->device = local_device_data;
406  spin_unlock(&local_device_data->ctx_lock);
407  break;
408  }
409  spin_unlock(&local_device_data->ctx_lock);
410  }
411  klist_iter_exit(&device_iterator);
412 
413  if (!device_node) {
422  return -EBUSY;
423  }
424 
425  *device_data = local_device_data;
426 
427  return 0;
428 }
429 
441 static void hash_hw_write_key(struct hash_device_data *device_data,
442  const u8 *key, unsigned int keylen)
443 {
444  u32 word = 0;
445  int nwords = 1;
446 
447  HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
448 
449  while (keylen >= 4) {
450  u32 *key_word = (u32 *)key;
451 
452  HASH_SET_DIN(key_word, nwords);
453  keylen -= 4;
454  key += 4;
455  }
456 
457  /* Take care of the remaining bytes in the last word */
458  if (keylen) {
459  word = 0;
460  while (keylen) {
461  word |= (key[keylen - 1] << (8 * (keylen - 1)));
462  keylen--;
463  }
464 
465  HASH_SET_DIN(&word, nwords);
466  }
467 
468  while (device_data->base->str & HASH_STR_DCAL_MASK)
469  cpu_relax();
470 
472 
473  while (device_data->base->str & HASH_STR_DCAL_MASK)
474  cpu_relax();
475 }
476 
485 static int init_hash_hw(struct hash_device_data *device_data,
486  struct hash_ctx *ctx)
487 {
488  int ret = 0;
489 
490  ret = hash_setconfiguration(device_data, &ctx->config);
491  if (ret) {
492  dev_err(device_data->dev, "[%s] hash_setconfiguration() "
493  "failed!", __func__);
494  return ret;
495  }
496 
497  hash_begin(device_data, ctx);
498 
499  if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
500  hash_hw_write_key(device_data, ctx->key, ctx->keylen);
501 
502  return ret;
503 }
504 
513 static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
514 {
515  int nents = 0;
516  bool aligned_data = true;
517 
518  while (size > 0 && sg) {
519  nents++;
520  size -= sg->length;
521 
522  /* hash_set_dma_transfer will align last nent */
523  if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE))
525  size > 0))
526  aligned_data = false;
527 
528  sg = sg_next(sg);
529  }
530 
531  if (aligned)
532  *aligned = aligned_data;
533 
534  if (size != 0)
535  return -EFAULT;
536 
537  return nents;
538 }
539 
548 static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
549 {
550  bool aligned;
551 
552  /* Need to include at least one nent, else error */
553  if (hash_get_nents(sg, datasize, &aligned) < 1)
554  return false;
555 
556  return aligned;
557 }
558 
565 static int hash_init(struct ahash_request *req)
566 {
567  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
568  struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
569  struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
570 
571  if (!ctx->key)
572  ctx->keylen = 0;
573 
574  memset(&req_ctx->state, 0, sizeof(struct hash_state));
575  req_ctx->updated = 0;
576  if (hash_mode == HASH_MODE_DMA) {
577  if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
578  req_ctx->dma_mode = false; /* Don't use DMA */
579 
580  pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct "
581  "to CPU mode for data size < %d",
582  __func__, HASH_DMA_ALIGN_SIZE);
583  } else {
585  hash_dma_valid_data(req->src,
586  req->nbytes)) {
587  req_ctx->dma_mode = true;
588  } else {
589  req_ctx->dma_mode = false;
590  pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use"
591  " CPU mode for datalength < %d"
592  " or non-aligned data, except "
593  "in last nent", __func__,
595  }
596  }
597  }
598  return 0;
599 }
600 
609 static void hash_processblock(
610  struct hash_device_data *device_data,
611  const u32 *message, int length)
612 {
613  int len = length / HASH_BYTES_PER_WORD;
614  /*
615  * NBLW bits. Reset the number of bits in last word (NBLW).
616  */
617  HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
618 
619  /*
620  * Write message data to the HASH_DIN register.
621  */
622  HASH_SET_DIN(message, len);
623 }
624 
635 static void hash_messagepad(struct hash_device_data *device_data,
636  const u32 *message, u8 index_bytes)
637 {
638  int nwords = 1;
639 
640  /*
641  * Clear hash str register, only clear NBLW
642  * since DCAL will be reset by hardware.
643  */
644  HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
645 
646  /* Main loop */
647  while (index_bytes >= 4) {
648  HASH_SET_DIN(message, nwords);
649  index_bytes -= 4;
650  message++;
651  }
652 
653  if (index_bytes)
654  HASH_SET_DIN(message, nwords);
655 
656  while (device_data->base->str & HASH_STR_DCAL_MASK)
657  cpu_relax();
658 
659  /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
660  HASH_SET_NBLW(index_bytes * 8);
661  dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__,
662  readl_relaxed(&device_data->base->din),
663  (int)(readl_relaxed(&device_data->base->str) &
666  dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d",
667  __func__, readl_relaxed(&device_data->base->din),
668  (int)(readl_relaxed(&device_data->base->str) &
670 
671  while (device_data->base->str & HASH_STR_DCAL_MASK)
672  cpu_relax();
673 }
674 
683 static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
684 {
685  ctx->state.length.low_word += incr;
686 
687  /* Check for wrap-around */
688  if (ctx->state.length.low_word < incr)
689  ctx->state.length.high_word++;
690 }
691 
698 int hash_setconfiguration(struct hash_device_data *device_data,
699  struct hash_config *config)
700 {
701  int ret = 0;
702 
703  if (config->algorithm != HASH_ALGO_SHA1 &&
704  config->algorithm != HASH_ALGO_SHA256)
705  return -EPERM;
706 
707  /*
708  * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
709  * to be written to HASH_DIN is considered as 32 bits.
710  */
712 
713  /*
714  * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
715  */
716  switch (config->algorithm) {
717  case HASH_ALGO_SHA1:
718  HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
719  break;
720 
721  case HASH_ALGO_SHA256:
722  HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
723  break;
724 
725  default:
726  dev_err(device_data->dev, "[%s] Incorrect algorithm.",
727  __func__);
728  return -EPERM;
729  }
730 
731  /*
732  * MODE bit. This bit selects between HASH or HMAC mode for the
733  * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
734  */
735  if (HASH_OPER_MODE_HASH == config->oper_mode)
736  HASH_CLEAR_BITS(&device_data->base->cr,
738  else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
739  HASH_SET_BITS(&device_data->base->cr,
741  if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
742  /* Truncate key to blocksize */
743  dev_dbg(device_data->dev, "[%s] LKEY set", __func__);
744  HASH_SET_BITS(&device_data->base->cr,
746  } else {
747  dev_dbg(device_data->dev, "[%s] LKEY cleared",
748  __func__);
749  HASH_CLEAR_BITS(&device_data->base->cr,
751  }
752  } else { /* Wrong hash mode */
753  ret = -EPERM;
754  dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
755  __func__);
756  }
757  return ret;
758 }
759 
766 void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
767 {
768  /* HW and SW initializations */
769  /* Note: there is no need to initialize buffer and digest members */
770 
771  while (device_data->base->str & HASH_STR_DCAL_MASK)
772  cpu_relax();
773 
774  /*
775  * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
776  * prepare the initialize the HASH accelerator to compute the message
777  * digest of a new message.
778  */
780 
781  /*
782  * NBLW bits. Reset the number of bits in last word (NBLW).
783  */
784  HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
785 }
786 
788  struct hash_device_data *device_data,
789  struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
790  int msg_length, u8 *data_buffer, u8 *buffer, u8 *index)
791 {
792  int ret = 0;
793  u32 count;
794 
795  do {
796  if ((*index + msg_length) < HASH_BLOCK_SIZE) {
797  for (count = 0; count < msg_length; count++) {
798  buffer[*index + count] =
799  *(data_buffer + count);
800  }
801  *index += msg_length;
802  msg_length = 0;
803  } else {
804  if (req_ctx->updated) {
805 
806  ret = hash_resume_state(device_data,
807  &device_data->state);
808  memmove(req_ctx->state.buffer,
809  device_data->state.buffer,
810  HASH_BLOCK_SIZE / sizeof(u32));
811  if (ret) {
812  dev_err(device_data->dev, "[%s] "
813  "hash_resume_state()"
814  " failed!", __func__);
815  goto out;
816  }
817  } else {
818  ret = init_hash_hw(device_data, ctx);
819  if (ret) {
820  dev_err(device_data->dev, "[%s] "
821  "init_hash_hw()"
822  " failed!", __func__);
823  goto out;
824  }
825  req_ctx->updated = 1;
826  }
827  /*
828  * If 'data_buffer' is four byte aligned and
829  * local buffer does not have any data, we can
830  * write data directly from 'data_buffer' to
831  * HW peripheral, otherwise we first copy data
832  * to a local buffer
833  */
834  if ((0 == (((u32)data_buffer) % 4))
835  && (0 == *index))
836  hash_processblock(device_data,
837  (const u32 *)
838  data_buffer, HASH_BLOCK_SIZE);
839  else {
840  for (count = 0; count <
841  (u32)(HASH_BLOCK_SIZE -
842  *index);
843  count++) {
844  buffer[*index + count] =
845  *(data_buffer + count);
846  }
847  hash_processblock(device_data,
848  (const u32 *)buffer,
850  }
851  hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
852  data_buffer += (HASH_BLOCK_SIZE - *index);
853 
854  msg_length -= (HASH_BLOCK_SIZE - *index);
855  *index = 0;
856 
857  ret = hash_save_state(device_data,
858  &device_data->state);
859 
860  memmove(device_data->state.buffer,
861  req_ctx->state.buffer,
862  HASH_BLOCK_SIZE / sizeof(u32));
863  if (ret) {
864  dev_err(device_data->dev, "[%s] "
865  "hash_save_state()"
866  " failed!", __func__);
867  goto out;
868  }
869  }
870  } while (msg_length != 0);
871 out:
872 
873  return ret;
874 }
875 
880 static int hash_dma_final(struct ahash_request *req)
881 {
882  int ret = 0;
883  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
884  struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
885  struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
886  struct hash_device_data *device_data;
888  int bytes_written = 0;
889 
890  ret = hash_get_device_data(ctx, &device_data);
891  if (ret)
892  return ret;
893 
894  dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
895 
896  if (req_ctx->updated) {
897  ret = hash_resume_state(device_data, &device_data->state);
898 
899  if (ret) {
900  dev_err(device_data->dev, "[%s] hash_resume_state() "
901  "failed!", __func__);
902  goto out;
903  }
904 
905  }
906 
907  if (!req_ctx->updated) {
908  ret = hash_setconfiguration(device_data, &ctx->config);
909  if (ret) {
910  dev_err(device_data->dev, "[%s] "
911  "hash_setconfiguration() failed!",
912  __func__);
913  goto out;
914  }
915 
916  /* Enable DMA input */
917  if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
918  HASH_CLEAR_BITS(&device_data->base->cr,
920  } else {
921  HASH_SET_BITS(&device_data->base->cr,
923  HASH_SET_BITS(&device_data->base->cr,
925  }
926 
928 
929  if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
930  hash_hw_write_key(device_data, ctx->key, ctx->keylen);
931 
932  /* Number of bits in last word = (nbytes * 8) % 32 */
933  HASH_SET_NBLW((req->nbytes * 8) % 32);
934  req_ctx->updated = 1;
935  }
936 
937  /* Store the nents in the dma struct. */
938  ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
939  if (!ctx->device->dma.nents) {
940  dev_err(device_data->dev, "[%s] "
941  "ctx->device->dma.nents = 0", __func__);
942  goto out;
943  }
944 
945  bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
946  if (bytes_written != req->nbytes) {
947  dev_err(device_data->dev, "[%s] "
948  "hash_dma_write() failed!", __func__);
949  goto out;
950  }
951 
952  wait_for_completion(&ctx->device->dma.complete);
953  hash_dma_done(ctx);
954 
955  while (device_data->base->str & HASH_STR_DCAL_MASK)
956  cpu_relax();
957 
958  if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
959  unsigned int keylen = ctx->keylen;
960  u8 *key = ctx->key;
961 
962  dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
963  ctx->keylen);
964  hash_hw_write_key(device_data, key, keylen);
965  }
966 
967  hash_get_digest(device_data, digest, ctx->config.algorithm);
968  memcpy(req->result, digest, ctx->digestsize);
969 
970 out:
971  release_hash_device(device_data);
972 
976  kfree(ctx->key);
977 
978  return ret;
979 }
980 
985 int hash_hw_final(struct ahash_request *req)
986 {
987  int ret = 0;
988  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
989  struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
990  struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
991  struct hash_device_data *device_data;
992  u8 digest[SHA256_DIGEST_SIZE];
993 
994  ret = hash_get_device_data(ctx, &device_data);
995  if (ret)
996  return ret;
997 
998  dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
999 
1000  if (req_ctx->updated) {
1001  ret = hash_resume_state(device_data, &device_data->state);
1002 
1003  if (ret) {
1004  dev_err(device_data->dev, "[%s] hash_resume_state() "
1005  "failed!", __func__);
1006  goto out;
1007  }
1008  } else if (req->nbytes == 0 && ctx->keylen == 0) {
1009  u8 zero_hash[SHA256_DIGEST_SIZE];
1010  u32 zero_hash_size = 0;
1011  bool zero_digest = false;
1016  ret = get_empty_message_digest(device_data, &zero_hash[0],
1017  &zero_hash_size, &zero_digest);
1018  if (!ret && likely(zero_hash_size == ctx->digestsize) &&
1019  zero_digest) {
1020  memcpy(req->result, &zero_hash[0], ctx->digestsize);
1021  goto out;
1022  } else if (!ret && !zero_digest) {
1023  dev_dbg(device_data->dev, "[%s] HMAC zero msg with "
1024  "key, continue...", __func__);
1025  } else {
1026  dev_err(device_data->dev, "[%s] ret=%d, or wrong "
1027  "digest size? %s", __func__, ret,
1028  (zero_hash_size == ctx->digestsize) ?
1029  "true" : "false");
1030  /* Return error */
1031  goto out;
1032  }
1033  } else if (req->nbytes == 0 && ctx->keylen > 0) {
1034  dev_err(device_data->dev, "[%s] Empty message with "
1035  "keylength > 0, NOT supported.", __func__);
1036  goto out;
1037  }
1038 
1039  if (!req_ctx->updated) {
1040  ret = init_hash_hw(device_data, ctx);
1041  if (ret) {
1042  dev_err(device_data->dev, "[%s] init_hash_hw() "
1043  "failed!", __func__);
1044  goto out;
1045  }
1046  }
1047 
1048  if (req_ctx->state.index) {
1049  hash_messagepad(device_data, req_ctx->state.buffer,
1050  req_ctx->state.index);
1051  } else {
1052  HASH_SET_DCAL;
1053  while (device_data->base->str & HASH_STR_DCAL_MASK)
1054  cpu_relax();
1055  }
1056 
1057  if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
1058  unsigned int keylen = ctx->keylen;
1059  u8 *key = ctx->key;
1060 
1061  dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
1062  ctx->keylen);
1063  hash_hw_write_key(device_data, key, keylen);
1064  }
1065 
1066  hash_get_digest(device_data, digest, ctx->config.algorithm);
1067  memcpy(req->result, digest, ctx->digestsize);
1068 
1069 out:
1070  release_hash_device(device_data);
1071 
1075  kfree(ctx->key);
1076 
1077  return ret;
1078 }
1079 
1087 {
1088  int ret = 0;
1089  u8 index = 0;
1090  u8 *buffer;
1091  struct hash_device_data *device_data;
1092  u8 *data_buffer;
1093  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1094  struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1095  struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1096  struct crypto_hash_walk walk;
1097  int msg_length = crypto_hash_walk_first(req, &walk);
1098 
1099  /* Empty message ("") is correct indata */
1100  if (msg_length == 0)
1101  return ret;
1102 
1103  index = req_ctx->state.index;
1104  buffer = (u8 *)req_ctx->state.buffer;
1105 
1106  /* Check if ctx->state.length + msg_length
1107  overflows */
1108  if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
1110  req_ctx->state.length.high_word) {
1111  pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!",
1112  __func__);
1113  return -EPERM;
1114  }
1115 
1116  ret = hash_get_device_data(ctx, &device_data);
1117  if (ret)
1118  return ret;
1119 
1120  /* Main loop */
1121  while (0 != msg_length) {
1122  data_buffer = walk.data;
1123  ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
1124  data_buffer, buffer, &index);
1125 
1126  if (ret) {
1127  dev_err(device_data->dev, "[%s] hash_internal_hw_"
1128  "update() failed!", __func__);
1129  goto out;
1130  }
1131 
1132  msg_length = crypto_hash_walk_done(&walk, 0);
1133  }
1134 
1135  req_ctx->state.index = index;
1136  dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))",
1137  __func__, req_ctx->state.index,
1138  req_ctx->state.bit_index);
1139 
1140 out:
1141  release_hash_device(device_data);
1142 
1143  return ret;
1144 }
1145 
1151 int hash_resume_state(struct hash_device_data *device_data,
1152  const struct hash_state *device_state)
1153 {
1154  u32 temp_cr;
1155  s32 count;
1157 
1158  if (NULL == device_state) {
1159  dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
1160  __func__);
1161  return -EPERM;
1162  }
1163 
1164  /* Check correctness of index and length members */
1165  if (device_state->index > HASH_BLOCK_SIZE
1166  || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
1167  dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
1168  __func__);
1169  return -EPERM;
1170  }
1171 
1172  /*
1173  * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
1174  * prepare the initialize the HASH accelerator to compute the message
1175  * digest of a new message.
1176  */
1178 
1179  temp_cr = device_state->temp_cr;
1180  writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
1181 
1182  if (device_data->base->cr & HASH_CR_MODE_MASK)
1183  hash_mode = HASH_OPER_MODE_HMAC;
1184  else
1185  hash_mode = HASH_OPER_MODE_HASH;
1186 
1187  for (count = 0; count < HASH_CSR_COUNT; count++) {
1188  if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1189  break;
1190 
1191  writel_relaxed(device_state->csr[count],
1192  &device_data->base->csrx[count]);
1193  }
1194 
1195  writel_relaxed(device_state->csfull, &device_data->base->csfull);
1196  writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
1197 
1198  writel_relaxed(device_state->str_reg, &device_data->base->str);
1199  writel_relaxed(temp_cr, &device_data->base->cr);
1200 
1201  return 0;
1202 }
1203 
1209 int hash_save_state(struct hash_device_data *device_data,
1210  struct hash_state *device_state)
1211 {
1212  u32 temp_cr;
1213  u32 count;
1215 
1216  if (NULL == device_state) {
1217  dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
1218  __func__);
1219  return -ENOTSUPP;
1220  }
1221 
1222  /* Write dummy value to force digest intermediate calculation. This
1223  * actually makes sure that there isn't any ongoing calculation in the
1224  * hardware.
1225  */
1226  while (device_data->base->str & HASH_STR_DCAL_MASK)
1227  cpu_relax();
1228 
1229  temp_cr = readl_relaxed(&device_data->base->cr);
1230 
1231  device_state->str_reg = readl_relaxed(&device_data->base->str);
1232 
1233  device_state->din_reg = readl_relaxed(&device_data->base->din);
1234 
1235  if (device_data->base->cr & HASH_CR_MODE_MASK)
1236  hash_mode = HASH_OPER_MODE_HMAC;
1237  else
1238  hash_mode = HASH_OPER_MODE_HASH;
1239 
1240  for (count = 0; count < HASH_CSR_COUNT; count++) {
1241  if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1242  break;
1243 
1244  device_state->csr[count] =
1245  readl_relaxed(&device_data->base->csrx[count]);
1246  }
1247 
1248  device_state->csfull = readl_relaxed(&device_data->base->csfull);
1249  device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
1250 
1251  device_state->temp_cr = temp_cr;
1252 
1253  return 0;
1254 }
1255 
1261 int hash_check_hw(struct hash_device_data *device_data)
1262 {
1263  /* Checking Peripheral Ids */
1264  if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0)
1265  && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1)
1266  && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2)
1267  && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3)
1268  && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0)
1269  && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1)
1270  && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2)
1271  && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)
1272  ) {
1273  return 0;
1274  }
1275 
1276  dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!",
1277  __func__);
1278  return -ENOTSUPP;
1279 }
1280 
1287 void hash_get_digest(struct hash_device_data *device_data,
1288  u8 *digest, int algorithm)
1289 {
1290  u32 temp_hx_val, count;
1291  int loop_ctr;
1292 
1293  if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
1294  dev_err(device_data->dev, "[%s] Incorrect algorithm %d",
1295  __func__, algorithm);
1296  return;
1297  }
1298 
1299  if (algorithm == HASH_ALGO_SHA1)
1300  loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
1301  else
1302  loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
1303 
1304  dev_dbg(device_data->dev, "[%s] digest array:(0x%x)",
1305  __func__, (u32) digest);
1306 
1307  /* Copy result into digest array */
1308  for (count = 0; count < loop_ctr; count++) {
1309  temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
1310  digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
1311  digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
1312  digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
1313  digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
1314  }
1315 }
1316 
1321 static int ahash_update(struct ahash_request *req)
1322 {
1323  int ret = 0;
1324  struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1325 
1326  if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
1327  ret = hash_hw_update(req);
1328  /* Skip update for DMA, all data will be passed to DMA in final */
1329 
1330  if (ret) {
1331  pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!",
1332  __func__);
1333  }
1334 
1335  return ret;
1336 }
1337 
1342 static int ahash_final(struct ahash_request *req)
1343 {
1344  int ret = 0;
1345  struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1346 
1347  pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes);
1348 
1349  if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
1350  ret = hash_dma_final(req);
1351  else
1352  ret = hash_hw_final(req);
1353 
1354  if (ret) {
1355  pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed",
1356  __func__);
1357  }
1358 
1359  return ret;
1360 }
1361 
1362 static int hash_setkey(struct crypto_ahash *tfm,
1363  const u8 *key, unsigned int keylen, int alg)
1364 {
1365  int ret = 0;
1366  struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1367 
1371  ctx->key = kmalloc(keylen, GFP_KERNEL);
1372  if (!ctx->key) {
1373  pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key "
1374  "for %d\n", __func__, alg);
1375  return -ENOMEM;
1376  }
1377 
1378  memcpy(ctx->key, key, keylen);
1379  ctx->keylen = keylen;
1380 
1381  return ret;
1382 }
1383 
1384 static int ahash_sha1_init(struct ahash_request *req)
1385 {
1386  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1387  struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1388 
1389  ctx->config.data_format = HASH_DATA_8_BITS;
1390  ctx->config.algorithm = HASH_ALGO_SHA1;
1391  ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1393 
1394  return hash_init(req);
1395 }
1396 
1397 static int ahash_sha256_init(struct ahash_request *req)
1398 {
1399  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1400  struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1401 
1402  ctx->config.data_format = HASH_DATA_8_BITS;
1403  ctx->config.algorithm = HASH_ALGO_SHA256;
1404  ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1406 
1407  return hash_init(req);
1408 }
1409 
1410 static int ahash_sha1_digest(struct ahash_request *req)
1411 {
1412  int ret2, ret1;
1413 
1414  ret1 = ahash_sha1_init(req);
1415  if (ret1)
1416  goto out;
1417 
1418  ret1 = ahash_update(req);
1419  ret2 = ahash_final(req);
1420 
1421 out:
1422  return ret1 ? ret1 : ret2;
1423 }
1424 
1425 static int ahash_sha256_digest(struct ahash_request *req)
1426 {
1427  int ret2, ret1;
1428 
1429  ret1 = ahash_sha256_init(req);
1430  if (ret1)
1431  goto out;
1432 
1433  ret1 = ahash_update(req);
1434  ret2 = ahash_final(req);
1435 
1436 out:
1437  return ret1 ? ret1 : ret2;
1438 }
1439 
1440 static int hmac_sha1_init(struct ahash_request *req)
1441 {
1442  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1443  struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1444 
1445  ctx->config.data_format = HASH_DATA_8_BITS;
1446  ctx->config.algorithm = HASH_ALGO_SHA1;
1447  ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1449 
1450  return hash_init(req);
1451 }
1452 
1453 static int hmac_sha256_init(struct ahash_request *req)
1454 {
1455  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1456  struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1457 
1458  ctx->config.data_format = HASH_DATA_8_BITS;
1459  ctx->config.algorithm = HASH_ALGO_SHA256;
1460  ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1462 
1463  return hash_init(req);
1464 }
1465 
1466 static int hmac_sha1_digest(struct ahash_request *req)
1467 {
1468  int ret2, ret1;
1469 
1470  ret1 = hmac_sha1_init(req);
1471  if (ret1)
1472  goto out;
1473 
1474  ret1 = ahash_update(req);
1475  ret2 = ahash_final(req);
1476 
1477 out:
1478  return ret1 ? ret1 : ret2;
1479 }
1480 
1481 static int hmac_sha256_digest(struct ahash_request *req)
1482 {
1483  int ret2, ret1;
1484 
1485  ret1 = hmac_sha256_init(req);
1486  if (ret1)
1487  goto out;
1488 
1489  ret1 = ahash_update(req);
1490  ret2 = ahash_final(req);
1491 
1492 out:
1493  return ret1 ? ret1 : ret2;
1494 }
1495 
1496 static int hmac_sha1_setkey(struct crypto_ahash *tfm,
1497  const u8 *key, unsigned int keylen)
1498 {
1499  return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
1500 }
1501 
1502 static int hmac_sha256_setkey(struct crypto_ahash *tfm,
1503  const u8 *key, unsigned int keylen)
1504 {
1505  return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
1506 }
1507 
1510  struct ahash_alg hash;
1511 };
1512 
1513 static int hash_cra_init(struct crypto_tfm *tfm)
1514 {
1515  struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
1516  struct crypto_alg *alg = tfm->__crt_alg;
1517  struct hash_algo_template *hash_alg;
1518 
1519  hash_alg = container_of(__crypto_ahash_alg(alg),
1520  struct hash_algo_template,
1521  hash);
1522 
1523  crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1524  sizeof(struct hash_req_ctx));
1525 
1526  ctx->config.data_format = HASH_DATA_8_BITS;
1527  ctx->config.algorithm = hash_alg->conf.algorithm;
1528  ctx->config.oper_mode = hash_alg->conf.oper_mode;
1529 
1530  ctx->digestsize = hash_alg->hash.halg.digestsize;
1531 
1532  return 0;
1533 }
1534 
1535 static struct hash_algo_template hash_algs[] = {
1536  {
1537  .conf.algorithm = HASH_ALGO_SHA1,
1538  .conf.oper_mode = HASH_OPER_MODE_HASH,
1539  .hash = {
1540  .init = hash_init,
1541  .update = ahash_update,
1542  .final = ahash_final,
1543  .digest = ahash_sha1_digest,
1544  .halg.digestsize = SHA1_DIGEST_SIZE,
1545  .halg.statesize = sizeof(struct hash_ctx),
1546  .halg.base = {
1547  .cra_name = "sha1",
1548  .cra_driver_name = "sha1-ux500",
1549  .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1551  .cra_blocksize = SHA1_BLOCK_SIZE,
1552  .cra_ctxsize = sizeof(struct hash_ctx),
1553  .cra_init = hash_cra_init,
1554  .cra_module = THIS_MODULE,
1555  }
1556  }
1557  },
1558  {
1559  .conf.algorithm = HASH_ALGO_SHA256,
1560  .conf.oper_mode = HASH_OPER_MODE_HASH,
1561  .hash = {
1562  .init = hash_init,
1563  .update = ahash_update,
1564  .final = ahash_final,
1565  .digest = ahash_sha256_digest,
1566  .halg.digestsize = SHA256_DIGEST_SIZE,
1567  .halg.statesize = sizeof(struct hash_ctx),
1568  .halg.base = {
1569  .cra_name = "sha256",
1570  .cra_driver_name = "sha256-ux500",
1571  .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1573  .cra_blocksize = SHA256_BLOCK_SIZE,
1574  .cra_ctxsize = sizeof(struct hash_ctx),
1575  .cra_type = &crypto_ahash_type,
1576  .cra_init = hash_cra_init,
1577  .cra_module = THIS_MODULE,
1578  }
1579  }
1580 
1581  },
1582  {
1583  .conf.algorithm = HASH_ALGO_SHA1,
1584  .conf.oper_mode = HASH_OPER_MODE_HMAC,
1585  .hash = {
1586  .init = hash_init,
1587  .update = ahash_update,
1588  .final = ahash_final,
1589  .digest = hmac_sha1_digest,
1590  .setkey = hmac_sha1_setkey,
1591  .halg.digestsize = SHA1_DIGEST_SIZE,
1592  .halg.statesize = sizeof(struct hash_ctx),
1593  .halg.base = {
1594  .cra_name = "hmac(sha1)",
1595  .cra_driver_name = "hmac-sha1-ux500",
1596  .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1598  .cra_blocksize = SHA1_BLOCK_SIZE,
1599  .cra_ctxsize = sizeof(struct hash_ctx),
1600  .cra_type = &crypto_ahash_type,
1601  .cra_init = hash_cra_init,
1602  .cra_module = THIS_MODULE,
1603  }
1604  }
1605  },
1606  {
1607  .conf.algorithm = HASH_ALGO_SHA256,
1608  .conf.oper_mode = HASH_OPER_MODE_HMAC,
1609  .hash = {
1610  .init = hash_init,
1611  .update = ahash_update,
1612  .final = ahash_final,
1613  .digest = hmac_sha256_digest,
1614  .setkey = hmac_sha256_setkey,
1615  .halg.digestsize = SHA256_DIGEST_SIZE,
1616  .halg.statesize = sizeof(struct hash_ctx),
1617  .halg.base = {
1618  .cra_name = "hmac(sha256)",
1619  .cra_driver_name = "hmac-sha256-ux500",
1620  .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1622  .cra_blocksize = SHA256_BLOCK_SIZE,
1623  .cra_ctxsize = sizeof(struct hash_ctx),
1624  .cra_type = &crypto_ahash_type,
1625  .cra_init = hash_cra_init,
1626  .cra_module = THIS_MODULE,
1627  }
1628  }
1629  }
1630 };
1631 
1635 static int ahash_algs_register_all(struct hash_device_data *device_data)
1636 {
1637  int ret;
1638  int i;
1639  int count;
1640 
1641  for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
1642  ret = crypto_register_ahash(&hash_algs[i].hash);
1643  if (ret) {
1644  count = i;
1645  dev_err(device_data->dev, "[%s] alg registration failed",
1646  hash_algs[i].hash.halg.base.cra_driver_name);
1647  goto unreg;
1648  }
1649  }
1650  return 0;
1651 unreg:
1652  for (i = 0; i < count; i++)
1653  crypto_unregister_ahash(&hash_algs[i].hash);
1654  return ret;
1655 }
1656 
1660 static void ahash_algs_unregister_all(struct hash_device_data *device_data)
1661 {
1662  int i;
1663 
1664  for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
1665  crypto_unregister_ahash(&hash_algs[i].hash);
1666 }
1667 
1672 static int ux500_hash_probe(struct platform_device *pdev)
1673 {
1674  int ret = 0;
1675  struct resource *res = NULL;
1676  struct hash_device_data *device_data;
1677  struct device *dev = &pdev->dev;
1678 
1679  device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC);
1680  if (!device_data) {
1681  dev_dbg(dev, "[%s] kzalloc() failed!", __func__);
1682  ret = -ENOMEM;
1683  goto out;
1684  }
1685 
1686  device_data->dev = dev;
1687  device_data->current_ctx = NULL;
1688 
1689  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1690  if (!res) {
1691  dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__);
1692  ret = -ENODEV;
1693  goto out_kfree;
1694  }
1695 
1696  res = request_mem_region(res->start, resource_size(res), pdev->name);
1697  if (res == NULL) {
1698  dev_dbg(dev, "[%s] request_mem_region() failed!", __func__);
1699  ret = -EBUSY;
1700  goto out_kfree;
1701  }
1702 
1703  device_data->base = ioremap(res->start, resource_size(res));
1704  if (!device_data->base) {
1705  dev_err(dev, "[%s] ioremap() failed!",
1706  __func__);
1707  ret = -ENOMEM;
1708  goto out_free_mem;
1709  }
1710  spin_lock_init(&device_data->ctx_lock);
1711  spin_lock_init(&device_data->power_state_lock);
1712 
1713  /* Enable power for HASH1 hardware block */
1714  device_data->regulator = regulator_get(dev, "v-ape");
1715  if (IS_ERR(device_data->regulator)) {
1716  dev_err(dev, "[%s] regulator_get() failed!", __func__);
1717  ret = PTR_ERR(device_data->regulator);
1718  device_data->regulator = NULL;
1719  goto out_unmap;
1720  }
1721 
1722  /* Enable the clock for HASH1 hardware block */
1723  device_data->clk = clk_get(dev, NULL);
1724  if (IS_ERR(device_data->clk)) {
1725  dev_err(dev, "[%s] clk_get() failed!", __func__);
1726  ret = PTR_ERR(device_data->clk);
1727  goto out_regulator;
1728  }
1729 
1730  /* Enable device power (and clock) */
1731  ret = hash_enable_power(device_data, false);
1732  if (ret) {
1733  dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
1734  goto out_clk;
1735  }
1736 
1737  ret = hash_check_hw(device_data);
1738  if (ret) {
1739  dev_err(dev, "[%s] hash_check_hw() failed!", __func__);
1740  goto out_power;
1741  }
1742 
1743  if (hash_mode == HASH_MODE_DMA)
1744  hash_dma_setup_channel(device_data, dev);
1745 
1746  platform_set_drvdata(pdev, device_data);
1747 
1748  /* Put the new device into the device list... */
1749  klist_add_tail(&device_data->list_node, &driver_data.device_list);
1750  /* ... and signal that a new device is available. */
1751  up(&driver_data.device_allocation);
1752 
1753  ret = ahash_algs_register_all(device_data);
1754  if (ret) {
1755  dev_err(dev, "[%s] ahash_algs_register_all() "
1756  "failed!", __func__);
1757  goto out_power;
1758  }
1759 
1760  dev_info(dev, "[%s] successfully probed\n", __func__);
1761  return 0;
1762 
1763 out_power:
1764  hash_disable_power(device_data, false);
1765 
1766 out_clk:
1767  clk_put(device_data->clk);
1768 
1769 out_regulator:
1770  regulator_put(device_data->regulator);
1771 
1772 out_unmap:
1773  iounmap(device_data->base);
1774 
1775 out_free_mem:
1776  release_mem_region(res->start, resource_size(res));
1777 
1778 out_kfree:
1779  kfree(device_data);
1780 out:
1781  return ret;
1782 }
1783 
1788 static int ux500_hash_remove(struct platform_device *pdev)
1789 {
1790  struct resource *res;
1791  struct hash_device_data *device_data;
1792  struct device *dev = &pdev->dev;
1793 
1794  device_data = platform_get_drvdata(pdev);
1795  if (!device_data) {
1796  dev_err(dev, "[%s]: platform_get_drvdata() failed!",
1797  __func__);
1798  return -ENOMEM;
1799  }
1800 
1801  /* Try to decrease the number of available devices. */
1802  if (down_trylock(&driver_data.device_allocation))
1803  return -EBUSY;
1804 
1805  /* Check that the device is free */
1806  spin_lock(&device_data->ctx_lock);
1807  /* current_ctx allocates a device, NULL = unallocated */
1808  if (device_data->current_ctx) {
1809  /* The device is busy */
1810  spin_unlock(&device_data->ctx_lock);
1811  /* Return the device to the pool. */
1812  up(&driver_data.device_allocation);
1813  return -EBUSY;
1814  }
1815 
1816  spin_unlock(&device_data->ctx_lock);
1817 
1818  /* Remove the device from the list */
1819  if (klist_node_attached(&device_data->list_node))
1820  klist_remove(&device_data->list_node);
1821 
1822  /* If this was the last device, remove the services */
1823  if (list_empty(&driver_data.device_list.k_list))
1824  ahash_algs_unregister_all(device_data);
1825 
1826  if (hash_disable_power(device_data, false))
1827  dev_err(dev, "[%s]: hash_disable_power() failed",
1828  __func__);
1829 
1830  clk_put(device_data->clk);
1831  regulator_put(device_data->regulator);
1832 
1833  iounmap(device_data->base);
1834 
1835  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1836  if (res)
1837  release_mem_region(res->start, resource_size(res));
1838 
1839  kfree(device_data);
1840 
1841  return 0;
1842 }
1843 
1848 static void ux500_hash_shutdown(struct platform_device *pdev)
1849 {
1850  struct resource *res = NULL;
1851  struct hash_device_data *device_data;
1852 
1853  device_data = platform_get_drvdata(pdev);
1854  if (!device_data) {
1855  dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
1856  __func__);
1857  return;
1858  }
1859 
1860  /* Check that the device is free */
1861  spin_lock(&device_data->ctx_lock);
1862  /* current_ctx allocates a device, NULL = unallocated */
1863  if (!device_data->current_ctx) {
1864  if (down_trylock(&driver_data.device_allocation))
1865  dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
1866  "Shutting down anyway...", __func__);
1872  device_data->current_ctx++;
1873  }
1874  spin_unlock(&device_data->ctx_lock);
1875 
1876  /* Remove the device from the list */
1877  if (klist_node_attached(&device_data->list_node))
1878  klist_remove(&device_data->list_node);
1879 
1880  /* If this was the last device, remove the services */
1881  if (list_empty(&driver_data.device_list.k_list))
1882  ahash_algs_unregister_all(device_data);
1883 
1884  iounmap(device_data->base);
1885 
1886  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1887  if (res)
1888  release_mem_region(res->start, resource_size(res));
1889 
1890  if (hash_disable_power(device_data, false))
1891  dev_err(&pdev->dev, "[%s] hash_disable_power() failed",
1892  __func__);
1893 }
1894 
1899 static int ux500_hash_suspend(struct device *dev)
1900 {
1901  int ret;
1902  struct hash_device_data *device_data;
1903  struct hash_ctx *temp_ctx = NULL;
1904 
1905  device_data = dev_get_drvdata(dev);
1906  if (!device_data) {
1907  dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__);
1908  return -ENOMEM;
1909  }
1910 
1911  spin_lock(&device_data->ctx_lock);
1912  if (!device_data->current_ctx)
1913  device_data->current_ctx++;
1914  spin_unlock(&device_data->ctx_lock);
1915 
1916  if (device_data->current_ctx == ++temp_ctx) {
1917  if (down_interruptible(&driver_data.device_allocation))
1918  dev_dbg(dev, "[%s]: down_interruptible() failed",
1919  __func__);
1920  ret = hash_disable_power(device_data, false);
1921 
1922  } else
1923  ret = hash_disable_power(device_data, true);
1924 
1925  if (ret)
1926  dev_err(dev, "[%s]: hash_disable_power()", __func__);
1927 
1928  return ret;
1929 }
1930 
1935 static int ux500_hash_resume(struct device *dev)
1936 {
1937  int ret = 0;
1938  struct hash_device_data *device_data;
1939  struct hash_ctx *temp_ctx = NULL;
1940 
1941  device_data = dev_get_drvdata(dev);
1942  if (!device_data) {
1943  dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__);
1944  return -ENOMEM;
1945  }
1946 
1947  spin_lock(&device_data->ctx_lock);
1948  if (device_data->current_ctx == ++temp_ctx)
1949  device_data->current_ctx = NULL;
1950  spin_unlock(&device_data->ctx_lock);
1951 
1952  if (!device_data->current_ctx)
1953  up(&driver_data.device_allocation);
1954  else
1955  ret = hash_enable_power(device_data, true);
1956 
1957  if (ret)
1958  dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
1959 
1960  return ret;
1961 }
1962 
1963 static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
1964 
1965 static struct platform_driver hash_driver = {
1966  .probe = ux500_hash_probe,
1967  .remove = ux500_hash_remove,
1968  .shutdown = ux500_hash_shutdown,
1969  .driver = {
1970  .owner = THIS_MODULE,
1971  .name = "hash1",
1972  .pm = &ux500_hash_pm,
1973  }
1974 };
1975 
1979 static int __init ux500_hash_mod_init(void)
1980 {
1981  klist_init(&driver_data.device_list, NULL, NULL);
1982  /* Initialize the semaphore to 0 devices (locked state) */
1983  sema_init(&driver_data.device_allocation, 0);
1984 
1985  return platform_driver_register(&hash_driver);
1986 }
1987 
1991 static void __exit ux500_hash_mod_fini(void)
1992 {
1993  platform_driver_unregister(&hash_driver);
1994 }
1995 
1996 module_init(ux500_hash_mod_init);
1997 module_exit(ux500_hash_mod_fini);
1998 
1999 MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
2000 MODULE_LICENSE("GPL");
2001 
2002 MODULE_ALIAS("sha1-all");
2003 MODULE_ALIAS("sha256-all");
2004 MODULE_ALIAS("hmac-sha1-all");
2005 MODULE_ALIAS("hmac-sha256-all");