Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cryp_core.c
Go to the documentation of this file.
1 
12 #include <linux/clk.h>
13 #include <linux/completion.h>
14 #include <linux/crypto.h>
15 #include <linux/dmaengine.h>
16 #include <linux/err.h>
17 #include <linux/errno.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/irqreturn.h>
21 #include <linux/klist.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
25 #include <linux/semaphore.h>
26 
27 #include <crypto/aes.h>
28 #include <crypto/algapi.h>
29 #include <crypto/ctr.h>
30 #include <crypto/des.h>
31 #include <crypto/scatterwalk.h>
32 
33 #include <plat/ste_dma40.h>
34 
36 #include <mach/hardware.h>
37 
38 #include "cryp_p.h"
39 #include "cryp.h"
40 
41 #define CRYP_MAX_KEY_SIZE 32
42 #define BYTES_PER_WORD 4
43 
44 static int cryp_mode;
45 static atomic_t session_id;
46 
47 static struct stedma40_chan_cfg *mem_to_engine;
48 static struct stedma40_chan_cfg *engine_to_mem;
49 
59 };
60 
76 struct cryp_ctx {
80  u8 *iv;
81  const u8 *indata;
90 };
91 
92 static struct cryp_driver_data driver_data;
93 
98 static inline u32 uint8p_to_uint32_be(u8 *in)
99 {
100  u32 *data = (u32 *)in;
101 
102  return cpu_to_be32p(data);
103 }
104 
127 static inline u8 swap_bits_in_byte(u8 b)
128 {
129 #define R_SHIFT_4_MASK 0xc0 /* Bits 6 and 7, right shift 4 */
130 #define R_SHIFT_2_MASK 0x28 /* (After right shift 4) Bits 3 and 5,
131  right shift 2 */
132 #define R_SHIFT_1_MASK 0x1e /* (After right shift 2) Bits 1-4,
133  right shift 1 */
134 #define L_SHIFT_4_MASK 0x03 /* Bits 0 and 1, left shift 4 */
135 #define L_SHIFT_2_MASK 0x14 /* (After left shift 4) Bits 2 and 4,
136  left shift 2 */
137 #define L_SHIFT_1_MASK 0x78 /* (After left shift 1) Bits 3-6,
138  left shift 1 */
139 
140  u8 n1;
141  u8 n2;
142 
143  /* Swap most significant nibble */
144  /* Right shift 4, bits 6 and 7 */
145  n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4));
146  /* Right shift 2, bits 3 and 5 */
147  n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2));
148  /* Right shift 1, bits 1-4 */
149  n1 = (n1 & R_SHIFT_1_MASK) >> 1;
150 
151  /* Swap least significant nibble */
152  /* Left shift 4, bits 0 and 1 */
153  n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4));
154  /* Left shift 2, bits 2 and 4 */
155  n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2));
156  /* Left shift 1, bits 3-6 */
157  n2 = (n2 & L_SHIFT_1_MASK) << 1;
158 
159  return n1 | n2;
160 }
161 
162 static inline void swap_words_in_key_and_bits_in_byte(const u8 *in,
163  u8 *out, u32 len)
164 {
165  unsigned int i = 0;
166  int j;
167  int index = 0;
168 
169  j = len - BYTES_PER_WORD;
170  while (j >= 0) {
171  for (i = 0; i < BYTES_PER_WORD; i++) {
172  index = len - j - BYTES_PER_WORD + i;
173  out[j + i] =
174  swap_bits_in_byte(in[index]);
175  }
176  j -= BYTES_PER_WORD;
177  }
178 }
179 
180 static void add_session_id(struct cryp_ctx *ctx)
181 {
182  /*
183  * We never want 0 to be a valid value, since this is the default value
184  * for the software context.
185  */
186  if (unlikely(atomic_inc_and_test(&session_id)))
187  atomic_inc(&session_id);
188 
189  ctx->session_id = atomic_read(&session_id);
190 }
191 
192 static irqreturn_t cryp_interrupt_handler(int irq, void *param)
193 {
194  struct cryp_ctx *ctx;
195  int i;
196  struct cryp_device_data *device_data;
197 
198  if (param == NULL) {
199  BUG_ON(!param);
200  return IRQ_HANDLED;
201  }
202 
203  /* The device is coming from the one found in hw_crypt_noxts. */
204  device_data = (struct cryp_device_data *)param;
205 
206  ctx = device_data->current_ctx;
207 
208  if (ctx == NULL) {
209  BUG_ON(!ctx);
210  return IRQ_HANDLED;
211  }
212 
213  dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen,
215  "out" : "in");
216 
217  if (cryp_pending_irq_src(device_data,
219  if (ctx->outlen / ctx->blocksize > 0) {
220  for (i = 0; i < ctx->blocksize / 4; i++) {
221  *(ctx->outdata) = readl_relaxed(
222  &device_data->base->dout);
223  ctx->outdata += 4;
224  ctx->outlen -= 4;
225  }
226 
227  if (ctx->outlen == 0) {
228  cryp_disable_irq_src(device_data,
230  }
231  }
232  } else if (cryp_pending_irq_src(device_data,
234  if (ctx->datalen / ctx->blocksize > 0) {
235  for (i = 0 ; i < ctx->blocksize / 4; i++) {
236  writel_relaxed(ctx->indata,
237  &device_data->base->din);
238  ctx->indata += 4;
239  ctx->datalen -= 4;
240  }
241 
242  if (ctx->datalen == 0)
243  cryp_disable_irq_src(device_data,
245 
246  if (ctx->config.algomode == CRYP_ALGO_AES_XTS) {
247  CRYP_PUT_BITS(&device_data->base->cr,
251 
252  cryp_wait_until_done(device_data);
253  }
254  }
255  }
256 
257  return IRQ_HANDLED;
258 }
259 
260 static int mode_is_aes(enum cryp_algo_mode mode)
261 {
262  return CRYP_ALGO_AES_ECB == mode ||
263  CRYP_ALGO_AES_CBC == mode ||
264  CRYP_ALGO_AES_CTR == mode ||
266 }
267 
268 static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right,
269  enum cryp_init_vector_index index)
270 {
271  struct cryp_init_vector_value vector_value;
272 
273  dev_dbg(device_data->dev, "[%s]", __func__);
274 
275  vector_value.init_value_left = left;
276  vector_value.init_value_right = right;
277 
278  return cryp_configure_init_vector(device_data,
279  index,
280  vector_value);
281 }
282 
283 static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
284 {
285  int i;
286  int status = 0;
287  int num_of_regs = ctx->blocksize / 8;
288  u32 iv[AES_BLOCK_SIZE / 4];
289 
290  dev_dbg(device_data->dev, "[%s]", __func__);
291 
292  /*
293  * Since we loop on num_of_regs we need to have a check in case
294  * someone provides an incorrect blocksize which would force calling
295  * cfg_iv with i greater than 2 which is an error.
296  */
297  if (num_of_regs > 2) {
298  dev_err(device_data->dev, "[%s] Incorrect blocksize %d",
299  __func__, ctx->blocksize);
300  return -EINVAL;
301  }
302 
303  for (i = 0; i < ctx->blocksize / 4; i++)
304  iv[i] = uint8p_to_uint32_be(ctx->iv + i*4);
305 
306  for (i = 0; i < num_of_regs; i++) {
307  status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
308  (enum cryp_init_vector_index) i);
309  if (status != 0)
310  return status;
311  }
312  return status;
313 }
314 
315 static int set_key(struct cryp_device_data *device_data,
316  u32 left_key,
317  u32 right_key,
318  enum cryp_key_reg_index index)
319 {
320  struct cryp_key_value key_value;
321  int cryp_error;
322 
323  dev_dbg(device_data->dev, "[%s]", __func__);
324 
325  key_value.key_value_left = left_key;
326  key_value.key_value_right = right_key;
327 
328  cryp_error = cryp_configure_key_values(device_data,
329  index,
330  key_value);
331  if (cryp_error != 0)
332  dev_err(device_data->dev, "[%s]: "
333  "cryp_configure_key_values() failed!", __func__);
334 
335  return cryp_error;
336 }
337 
338 static int cfg_keys(struct cryp_ctx *ctx)
339 {
340  int i;
341  int num_of_regs = ctx->keylen / 8;
342  u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
343  int cryp_error = 0;
344 
345  dev_dbg(ctx->device->dev, "[%s]", __func__);
346 
347  if (mode_is_aes(ctx->config.algomode)) {
348  swap_words_in_key_and_bits_in_byte((u8 *)ctx->key,
349  (u8 *)swapped_key,
350  ctx->keylen);
351  } else {
352  for (i = 0; i < ctx->keylen / 4; i++)
353  swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4);
354  }
355 
356  for (i = 0; i < num_of_regs; i++) {
357  cryp_error = set_key(ctx->device,
358  *(((u32 *)swapped_key)+i*2),
359  *(((u32 *)swapped_key)+i*2+1),
360  (enum cryp_key_reg_index) i);
361 
362  if (cryp_error != 0) {
363  dev_err(ctx->device->dev, "[%s]: set_key() failed!",
364  __func__);
365  return cryp_error;
366  }
367  }
368  return cryp_error;
369 }
370 
371 static int cryp_setup_context(struct cryp_ctx *ctx,
372  struct cryp_device_data *device_data)
373 {
374  u32 control_register = CRYP_CR_DEFAULT;
375 
376  switch (cryp_mode) {
377  case CRYP_MODE_INTERRUPT:
378  writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc);
379  break;
380 
381  case CRYP_MODE_DMA:
382  writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr);
383  break;
384 
385  default:
386  break;
387  }
388 
389  if (ctx->updated == 0) {
390  cryp_flush_inoutfifo(device_data);
391  if (cfg_keys(ctx) != 0) {
392  dev_err(ctx->device->dev, "[%s]: cfg_keys failed!",
393  __func__);
394  return -EINVAL;
395  }
396 
397  if (ctx->iv &&
398  CRYP_ALGO_AES_ECB != ctx->config.algomode &&
399  CRYP_ALGO_DES_ECB != ctx->config.algomode &&
400  CRYP_ALGO_TDES_ECB != ctx->config.algomode) {
401  if (cfg_ivs(device_data, ctx) != 0)
402  return -EPERM;
403  }
404 
405  cryp_set_configuration(device_data, &ctx->config,
406  &control_register);
407  add_session_id(ctx);
408  } else if (ctx->updated == 1 &&
409  ctx->session_id != atomic_read(&session_id)) {
410  cryp_flush_inoutfifo(device_data);
411  cryp_restore_device_context(device_data, &ctx->dev_ctx);
412 
413  add_session_id(ctx);
414  control_register = ctx->dev_ctx.cr;
415  } else
416  control_register = ctx->dev_ctx.cr;
417 
418  writel(control_register |
420  &device_data->base->cr);
421 
422  return 0;
423 }
424 
425 static int cryp_get_device_data(struct cryp_ctx *ctx,
426  struct cryp_device_data **device_data)
427 {
428  int ret;
429  struct klist_iter device_iterator;
430  struct klist_node *device_node;
431  struct cryp_device_data *local_device_data = NULL;
432  pr_debug(DEV_DBG_NAME " [%s]", __func__);
433 
434  /* Wait until a device is available */
435  ret = down_interruptible(&driver_data.device_allocation);
436  if (ret)
437  return ret; /* Interrupted */
438 
439  /* Select a device */
440  klist_iter_init(&driver_data.device_list, &device_iterator);
441 
442  device_node = klist_next(&device_iterator);
443  while (device_node) {
444  local_device_data = container_of(device_node,
445  struct cryp_device_data, list_node);
446  spin_lock(&local_device_data->ctx_lock);
447  /* current_ctx allocates a device, NULL = unallocated */
448  if (local_device_data->current_ctx) {
449  device_node = klist_next(&device_iterator);
450  } else {
451  local_device_data->current_ctx = ctx;
452  ctx->device = local_device_data;
453  spin_unlock(&local_device_data->ctx_lock);
454  break;
455  }
456  spin_unlock(&local_device_data->ctx_lock);
457  }
458  klist_iter_exit(&device_iterator);
459 
460  if (!device_node) {
469  return -EBUSY;
470  }
471 
472  *device_data = local_device_data;
473 
474  return 0;
475 }
476 
477 static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
478  struct device *dev)
479 {
480  dma_cap_zero(device_data->dma.mask);
481  dma_cap_set(DMA_SLAVE, device_data->dma.mask);
482 
483  device_data->dma.cfg_mem2cryp = mem_to_engine;
484  device_data->dma.chan_mem2cryp =
485  dma_request_channel(device_data->dma.mask,
487  device_data->dma.cfg_mem2cryp);
488 
489  device_data->dma.cfg_cryp2mem = engine_to_mem;
490  device_data->dma.chan_cryp2mem =
491  dma_request_channel(device_data->dma.mask,
493  device_data->dma.cfg_cryp2mem);
494 
495  init_completion(&device_data->dma.cryp_dma_complete);
496 }
497 
498 static void cryp_dma_out_callback(void *data)
499 {
500  struct cryp_ctx *ctx = (struct cryp_ctx *) data;
501  dev_dbg(ctx->device->dev, "[%s]: ", __func__);
502 
503  complete(&ctx->device->dma.cryp_dma_complete);
504 }
505 
506 static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
507  struct scatterlist *sg,
508  int len,
510 {
512  struct dma_chan *channel = NULL;
514 
515  dev_dbg(ctx->device->dev, "[%s]: ", __func__);
516 
517  if (unlikely(!IS_ALIGNED((u32)sg, 4))) {
518  dev_err(ctx->device->dev, "[%s]: Data in sg list isn't "
519  "aligned! Addr: 0x%08x", __func__, (u32)sg);
520  return -EFAULT;
521  }
522 
523  switch (direction) {
524  case DMA_TO_DEVICE:
525  channel = ctx->device->dma.chan_mem2cryp;
526  ctx->device->dma.sg_src = sg;
527  ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev,
528  ctx->device->dma.sg_src,
529  ctx->device->dma.nents_src,
530  direction);
531 
532  if (!ctx->device->dma.sg_src_len) {
533  dev_dbg(ctx->device->dev,
534  "[%s]: Could not map the sg list (TO_DEVICE)",
535  __func__);
536  return -EFAULT;
537  }
538 
539  dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
540  "(TO_DEVICE)", __func__);
541 
542  desc = channel->device->device_prep_slave_sg(channel,
543  ctx->device->dma.sg_src,
544  ctx->device->dma.sg_src_len,
545  direction, DMA_CTRL_ACK, NULL);
546  break;
547 
548  case DMA_FROM_DEVICE:
549  channel = ctx->device->dma.chan_cryp2mem;
550  ctx->device->dma.sg_dst = sg;
551  ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev,
552  ctx->device->dma.sg_dst,
553  ctx->device->dma.nents_dst,
554  direction);
555 
556  if (!ctx->device->dma.sg_dst_len) {
557  dev_dbg(ctx->device->dev,
558  "[%s]: Could not map the sg list (FROM_DEVICE)",
559  __func__);
560  return -EFAULT;
561  }
562 
563  dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
564  "(FROM_DEVICE)", __func__);
565 
566  desc = channel->device->device_prep_slave_sg(channel,
567  ctx->device->dma.sg_dst,
568  ctx->device->dma.sg_dst_len,
569  direction,
570  DMA_CTRL_ACK |
572 
573  desc->callback = cryp_dma_out_callback;
574  desc->callback_param = ctx;
575  break;
576 
577  default:
578  dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction",
579  __func__);
580  return -EFAULT;
581  }
582 
583  cookie = desc->tx_submit(desc);
584  dma_async_issue_pending(channel);
585 
586  return 0;
587 }
588 
589 static void cryp_dma_done(struct cryp_ctx *ctx)
590 {
591  struct dma_chan *chan;
592 
593  dev_dbg(ctx->device->dev, "[%s]: ", __func__);
594 
595  chan = ctx->device->dma.chan_mem2cryp;
596  chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
597  dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
598  ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
599 
600  chan = ctx->device->dma.chan_cryp2mem;
601  chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
602  dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
603  ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
604 }
605 
606 static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
607  int len)
608 {
609  int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
610  dev_dbg(ctx->device->dev, "[%s]: ", __func__);
611 
612  if (error) {
613  dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
614  "failed", __func__);
615  return error;
616  }
617 
618  return len;
619 }
620 
621 static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len)
622 {
623  int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE);
624  if (error) {
625  dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
626  "failed", __func__);
627  return error;
628  }
629 
630  return len;
631 }
632 
633 static void cryp_polling_mode(struct cryp_ctx *ctx,
634  struct cryp_device_data *device_data)
635 {
636  int len = ctx->blocksize / BYTES_PER_WORD;
637  int remaining_length = ctx->datalen;
638  u32 *indata = (u32 *)ctx->indata;
639  u32 *outdata = (u32 *)ctx->outdata;
640 
641  while (remaining_length > 0) {
642  writesl(&device_data->base->din, indata, len);
643  indata += len;
644  remaining_length -= (len * BYTES_PER_WORD);
645  cryp_wait_until_done(device_data);
646 
647  readsl(&device_data->base->dout, outdata, len);
648  outdata += len;
649  cryp_wait_until_done(device_data);
650  }
651 }
652 
653 static int cryp_disable_power(struct device *dev,
654  struct cryp_device_data *device_data,
655  bool save_device_context)
656 {
657  int ret = 0;
658 
659  dev_dbg(dev, "[%s]", __func__);
660 
661  spin_lock(&device_data->power_state_spinlock);
662  if (!device_data->power_state)
663  goto out;
664 
665  spin_lock(&device_data->ctx_lock);
666  if (save_device_context && device_data->current_ctx) {
667  cryp_save_device_context(device_data,
668  &device_data->current_ctx->dev_ctx,
669  cryp_mode);
670  device_data->restore_dev_ctx = true;
671  }
672  spin_unlock(&device_data->ctx_lock);
673 
674  clk_disable(device_data->clk);
675  ret = regulator_disable(device_data->pwr_regulator);
676  if (ret)
677  dev_err(dev, "[%s]: "
678  "regulator_disable() failed!",
679  __func__);
680 
681  device_data->power_state = false;
682 
683 out:
684  spin_unlock(&device_data->power_state_spinlock);
685 
686  return ret;
687 }
688 
689 static int cryp_enable_power(
690  struct device *dev,
691  struct cryp_device_data *device_data,
692  bool restore_device_context)
693 {
694  int ret = 0;
695 
696  dev_dbg(dev, "[%s]", __func__);
697 
698  spin_lock(&device_data->power_state_spinlock);
699  if (!device_data->power_state) {
700  ret = regulator_enable(device_data->pwr_regulator);
701  if (ret) {
702  dev_err(dev, "[%s]: regulator_enable() failed!",
703  __func__);
704  goto out;
705  }
706 
707  ret = clk_enable(device_data->clk);
708  if (ret) {
709  dev_err(dev, "[%s]: clk_enable() failed!",
710  __func__);
711  regulator_disable(device_data->pwr_regulator);
712  goto out;
713  }
714  device_data->power_state = true;
715  }
716 
717  if (device_data->restore_dev_ctx) {
718  spin_lock(&device_data->ctx_lock);
719  if (restore_device_context && device_data->current_ctx) {
720  device_data->restore_dev_ctx = false;
721  cryp_restore_device_context(device_data,
722  &device_data->current_ctx->dev_ctx);
723  }
724  spin_unlock(&device_data->ctx_lock);
725  }
726 out:
727  spin_unlock(&device_data->power_state_spinlock);
728 
729  return ret;
730 }
731 
732 static int hw_crypt_noxts(struct cryp_ctx *ctx,
733  struct cryp_device_data *device_data)
734 {
735  int ret = 0;
736 
737  const u8 *indata = ctx->indata;
738  u8 *outdata = ctx->outdata;
739  u32 datalen = ctx->datalen;
740  u32 outlen = datalen;
741 
742  pr_debug(DEV_DBG_NAME " [%s]", __func__);
743 
744  ctx->outlen = ctx->datalen;
745 
746  if (unlikely(!IS_ALIGNED((u32)indata, 4))) {
747  pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: "
748  "0x%08x", __func__, (u32)indata);
749  return -EINVAL;
750  }
751 
752  ret = cryp_setup_context(ctx, device_data);
753 
754  if (ret)
755  goto out;
756 
760 
761  /*
762  * ctx->outlen is decremented in the cryp_interrupt_handler
763  * function. We had to add cpu_relax() (barrier) to make sure
764  * that gcc didn't optimze away this variable.
765  */
766  while (ctx->outlen > 0)
767  cpu_relax();
768  } else if (cryp_mode == CRYP_MODE_POLLING ||
770  /*
771  * The reason for having DMA in this if case is that if we are
772  * running cryp_mode = 2, then we separate DMA routines for
773  * handling cipher/plaintext > blocksize, except when
774  * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use
775  * the polling mode. Overhead of doing DMA setup eats up the
776  * benefits using it.
777  */
778  cryp_polling_mode(ctx, device_data);
779  } else {
780  dev_err(ctx->device->dev, "[%s]: Invalid operation mode!",
781  __func__);
782  ret = -EPERM;
783  goto out;
784  }
785 
786  cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
787  ctx->updated = 1;
788 
789 out:
790  ctx->indata = indata;
791  ctx->outdata = outdata;
792  ctx->datalen = datalen;
793  ctx->outlen = outlen;
794 
795  return ret;
796 }
797 
798 static int get_nents(struct scatterlist *sg, int nbytes)
799 {
800  int nents = 0;
801 
802  while (nbytes > 0) {
803  nbytes -= sg->length;
804  sg = scatterwalk_sg_next(sg);
805  nents++;
806  }
807 
808  return nents;
809 }
810 
811 static int ablk_dma_crypt(struct ablkcipher_request *areq)
812 {
813  struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
814  struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
815  struct cryp_device_data *device_data;
816 
817  int bytes_written = 0;
818  int bytes_read = 0;
819  int ret;
820 
821  pr_debug(DEV_DBG_NAME " [%s]", __func__);
822 
823  ctx->datalen = areq->nbytes;
824  ctx->outlen = areq->nbytes;
825 
826  ret = cryp_get_device_data(ctx, &device_data);
827  if (ret)
828  return ret;
829 
830  ret = cryp_setup_context(ctx, device_data);
831  if (ret)
832  goto out;
833 
834  /* We have the device now, so store the nents in the dma struct. */
835  ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen);
836  ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen);
837 
838  /* Enable DMA in- and output. */
840 
841  bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen);
842  bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written);
843 
844  wait_for_completion(&ctx->device->dma.cryp_dma_complete);
845  cryp_dma_done(ctx);
846 
847  cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
848  ctx->updated = 1;
849 
850 out:
851  spin_lock(&device_data->ctx_lock);
852  device_data->current_ctx = NULL;
853  ctx->device = NULL;
854  spin_unlock(&device_data->ctx_lock);
855 
856  /*
857  * The down_interruptible part for this semaphore is called in
858  * cryp_get_device_data.
859  */
860  up(&driver_data.device_allocation);
861 
862  if (unlikely(bytes_written != bytes_read))
863  return -EPERM;
864 
865  return 0;
866 }
867 
868 static int ablk_crypt(struct ablkcipher_request *areq)
869 {
870  struct ablkcipher_walk walk;
871  struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
872  struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
873  struct cryp_device_data *device_data;
874  unsigned long src_paddr;
875  unsigned long dst_paddr;
876  int ret;
877  int nbytes;
878 
879  pr_debug(DEV_DBG_NAME " [%s]", __func__);
880 
881  ret = cryp_get_device_data(ctx, &device_data);
882  if (ret)
883  goto out;
884 
885  ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes);
886  ret = ablkcipher_walk_phys(areq, &walk);
887 
888  if (ret) {
889  pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!",
890  __func__);
891  goto out;
892  }
893 
894  while ((nbytes = walk.nbytes) > 0) {
895  ctx->iv = walk.iv;
896  src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
897  ctx->indata = phys_to_virt(src_paddr);
898 
899  dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
900  ctx->outdata = phys_to_virt(dst_paddr);
901 
902  ctx->datalen = nbytes - (nbytes % ctx->blocksize);
903 
904  ret = hw_crypt_noxts(ctx, device_data);
905  if (ret)
906  goto out;
907 
908  nbytes -= ctx->datalen;
909  ret = ablkcipher_walk_done(areq, &walk, nbytes);
910  if (ret)
911  goto out;
912  }
913  ablkcipher_walk_complete(&walk);
914 
915 out:
916  /* Release the device */
917  spin_lock(&device_data->ctx_lock);
918  device_data->current_ctx = NULL;
919  ctx->device = NULL;
920  spin_unlock(&device_data->ctx_lock);
921 
922  /*
923  * The down_interruptible part for this semaphore is called in
924  * cryp_get_device_data.
925  */
926  up(&driver_data.device_allocation);
927 
928  return ret;
929 }
930 
931 static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
932  const u8 *key, unsigned int keylen)
933 {
934  struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
935  u32 *flags = &cipher->base.crt_flags;
936 
937  pr_debug(DEV_DBG_NAME " [%s]", __func__);
938 
939  switch (keylen) {
940  case AES_KEYSIZE_128:
941  ctx->config.keysize = CRYP_KEY_SIZE_128;
942  break;
943 
944  case AES_KEYSIZE_192:
945  ctx->config.keysize = CRYP_KEY_SIZE_192;
946  break;
947 
948  case AES_KEYSIZE_256:
949  ctx->config.keysize = CRYP_KEY_SIZE_256;
950  break;
951 
952  default:
953  pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
954  *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
955  return -EINVAL;
956  }
957 
958  memcpy(ctx->key, key, keylen);
959  ctx->keylen = keylen;
960 
961  ctx->updated = 0;
962 
963  return 0;
964 }
965 
966 static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
967  const u8 *key, unsigned int keylen)
968 {
969  struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
970  u32 *flags = &cipher->base.crt_flags;
972  int ret;
973 
974  pr_debug(DEV_DBG_NAME " [%s]", __func__);
975  if (keylen != DES_KEY_SIZE) {
976  *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
977  pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
978  __func__);
979  return -EINVAL;
980  }
981 
982  ret = des_ekey(tmp, key);
983  if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
984  *flags |= CRYPTO_TFM_RES_WEAK_KEY;
985  pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
986  __func__);
987  return -EINVAL;
988  }
989 
990  memcpy(ctx->key, key, keylen);
991  ctx->keylen = keylen;
992 
993  ctx->updated = 0;
994  return 0;
995 }
996 
997 static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
998  const u8 *key, unsigned int keylen)
999 {
1000  struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1001  u32 *flags = &cipher->base.crt_flags;
1002  const u32 *K = (const u32 *)key;
1004  int i, ret;
1005 
1006  pr_debug(DEV_DBG_NAME " [%s]", __func__);
1007  if (keylen != DES3_EDE_KEY_SIZE) {
1008  *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
1009  pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
1010  __func__);
1011  return -EINVAL;
1012  }
1013 
1014  /* Checking key interdependency for weak key detection. */
1015  if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
1016  !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
1017  (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
1018  *flags |= CRYPTO_TFM_RES_WEAK_KEY;
1019  pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
1020  __func__);
1021  return -EINVAL;
1022  }
1023  for (i = 0; i < 3; i++) {
1024  ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
1025  if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
1026  *flags |= CRYPTO_TFM_RES_WEAK_KEY;
1027  pr_debug(DEV_DBG_NAME " [%s]: "
1028  "CRYPTO_TFM_REQ_WEAK_KEY", __func__);
1029  return -EINVAL;
1030  }
1031  }
1032 
1033  memcpy(ctx->key, key, keylen);
1034  ctx->keylen = keylen;
1035 
1036  ctx->updated = 0;
1037  return 0;
1038 }
1039 
1040 static int cryp_blk_encrypt(struct ablkcipher_request *areq)
1041 {
1042  struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1043  struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1044 
1045  pr_debug(DEV_DBG_NAME " [%s]", __func__);
1046 
1047  ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
1048 
1049  /*
1050  * DMA does not work for DES due to a hw bug */
1051  if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
1052  return ablk_dma_crypt(areq);
1053 
1054  /* For everything except DMA, we run the non DMA version. */
1055  return ablk_crypt(areq);
1056 }
1057 
1058 static int cryp_blk_decrypt(struct ablkcipher_request *areq)
1059 {
1060  struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1061  struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1062 
1063  pr_debug(DEV_DBG_NAME " [%s]", __func__);
1064 
1065  ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
1066 
1067  /* DMA does not work for DES due to a hw bug */
1068  if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
1069  return ablk_dma_crypt(areq);
1070 
1071  /* For everything except DMA, we run the non DMA version. */
1072  return ablk_crypt(areq);
1074 
1075 struct cryp_algo_template {
1076  enum cryp_algo_mode algomode;
1077  struct crypto_alg crypto;
1078 };
1079 
1080 static int cryp_cra_init(struct crypto_tfm *tfm)
1081 {
1082  struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
1083  struct crypto_alg *alg = tfm->__crt_alg;
1084  struct cryp_algo_template *cryp_alg = container_of(alg,
1085  struct cryp_algo_template,
1086  crypto);
1087 
1088  ctx->config.algomode = cryp_alg->algomode;
1089  ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
1090 
1091  return 0;
1092 }
1093 
1094 static struct cryp_algo_template cryp_algs[] = {
1095  {
1096  .algomode = CRYP_ALGO_AES_ECB,
1097  .crypto = {
1098  .cra_name = "aes",
1099  .cra_driver_name = "aes-ux500",
1100  .cra_priority = 300,
1101  .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1103  .cra_blocksize = AES_BLOCK_SIZE,
1104  .cra_ctxsize = sizeof(struct cryp_ctx),
1105  .cra_alignmask = 3,
1106  .cra_type = &crypto_ablkcipher_type,
1107  .cra_init = cryp_cra_init,
1108  .cra_module = THIS_MODULE,
1109  .cra_u = {
1110  .ablkcipher = {
1111  .min_keysize = AES_MIN_KEY_SIZE,
1112  .max_keysize = AES_MAX_KEY_SIZE,
1113  .setkey = aes_ablkcipher_setkey,
1114  .encrypt = cryp_blk_encrypt,
1115  .decrypt = cryp_blk_decrypt
1116  }
1117  }
1118  }
1119  },
1120  {
1121  .algomode = CRYP_ALGO_AES_ECB,
1122  .crypto = {
1123  .cra_name = "ecb(aes)",
1124  .cra_driver_name = "ecb-aes-ux500",
1125  .cra_priority = 300,
1126  .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1128  .cra_blocksize = AES_BLOCK_SIZE,
1129  .cra_ctxsize = sizeof(struct cryp_ctx),
1130  .cra_alignmask = 3,
1131  .cra_type = &crypto_ablkcipher_type,
1132  .cra_init = cryp_cra_init,
1133  .cra_module = THIS_MODULE,
1134  .cra_u = {
1135  .ablkcipher = {
1136  .min_keysize = AES_MIN_KEY_SIZE,
1137  .max_keysize = AES_MAX_KEY_SIZE,
1138  .setkey = aes_ablkcipher_setkey,
1139  .encrypt = cryp_blk_encrypt,
1140  .decrypt = cryp_blk_decrypt,
1141  }
1142  }
1143  }
1144  },
1145  {
1146  .algomode = CRYP_ALGO_AES_CBC,
1147  .crypto = {
1148  .cra_name = "cbc(aes)",
1149  .cra_driver_name = "cbc-aes-ux500",
1150  .cra_priority = 300,
1151  .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1153  .cra_blocksize = AES_BLOCK_SIZE,
1154  .cra_ctxsize = sizeof(struct cryp_ctx),
1155  .cra_alignmask = 3,
1156  .cra_type = &crypto_ablkcipher_type,
1157  .cra_init = cryp_cra_init,
1158  .cra_module = THIS_MODULE,
1159  .cra_u = {
1160  .ablkcipher = {
1161  .min_keysize = AES_MIN_KEY_SIZE,
1162  .max_keysize = AES_MAX_KEY_SIZE,
1163  .setkey = aes_ablkcipher_setkey,
1164  .encrypt = cryp_blk_encrypt,
1165  .decrypt = cryp_blk_decrypt,
1166  .ivsize = AES_BLOCK_SIZE,
1167  }
1168  }
1169  }
1170  },
1171  {
1172  .algomode = CRYP_ALGO_AES_CTR,
1173  .crypto = {
1174  .cra_name = "ctr(aes)",
1175  .cra_driver_name = "ctr-aes-ux500",
1176  .cra_priority = 300,
1177  .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1179  .cra_blocksize = AES_BLOCK_SIZE,
1180  .cra_ctxsize = sizeof(struct cryp_ctx),
1181  .cra_alignmask = 3,
1182  .cra_type = &crypto_ablkcipher_type,
1183  .cra_init = cryp_cra_init,
1184  .cra_module = THIS_MODULE,
1185  .cra_u = {
1186  .ablkcipher = {
1187  .min_keysize = AES_MIN_KEY_SIZE,
1188  .max_keysize = AES_MAX_KEY_SIZE,
1189  .setkey = aes_ablkcipher_setkey,
1190  .encrypt = cryp_blk_encrypt,
1191  .decrypt = cryp_blk_decrypt,
1192  .ivsize = AES_BLOCK_SIZE,
1193  }
1194  }
1195  }
1196  },
1197  {
1198  .algomode = CRYP_ALGO_DES_ECB,
1199  .crypto = {
1200  .cra_name = "des",
1201  .cra_driver_name = "des-ux500",
1202  .cra_priority = 300,
1203  .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1205  .cra_blocksize = DES_BLOCK_SIZE,
1206  .cra_ctxsize = sizeof(struct cryp_ctx),
1207  .cra_alignmask = 3,
1208  .cra_type = &crypto_ablkcipher_type,
1209  .cra_init = cryp_cra_init,
1210  .cra_module = THIS_MODULE,
1211  .cra_u = {
1212  .ablkcipher = {
1213  .min_keysize = DES_KEY_SIZE,
1214  .max_keysize = DES_KEY_SIZE,
1215  .setkey = des_ablkcipher_setkey,
1216  .encrypt = cryp_blk_encrypt,
1217  .decrypt = cryp_blk_decrypt
1218  }
1219  }
1220  }
1221 
1222  },
1223  {
1224  .algomode = CRYP_ALGO_TDES_ECB,
1225  .crypto = {
1226  .cra_name = "des3_ede",
1227  .cra_driver_name = "des3_ede-ux500",
1228  .cra_priority = 300,
1229  .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1231  .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1232  .cra_ctxsize = sizeof(struct cryp_ctx),
1233  .cra_alignmask = 3,
1234  .cra_type = &crypto_ablkcipher_type,
1235  .cra_init = cryp_cra_init,
1236  .cra_module = THIS_MODULE,
1237  .cra_u = {
1238  .ablkcipher = {
1239  .min_keysize = DES3_EDE_KEY_SIZE,
1240  .max_keysize = DES3_EDE_KEY_SIZE,
1241  .setkey = des_ablkcipher_setkey,
1242  .encrypt = cryp_blk_encrypt,
1243  .decrypt = cryp_blk_decrypt
1244  }
1245  }
1246  }
1247  },
1248  {
1249  .algomode = CRYP_ALGO_DES_ECB,
1250  .crypto = {
1251  .cra_name = "ecb(des)",
1252  .cra_driver_name = "ecb-des-ux500",
1253  .cra_priority = 300,
1254  .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1256  .cra_blocksize = DES_BLOCK_SIZE,
1257  .cra_ctxsize = sizeof(struct cryp_ctx),
1258  .cra_alignmask = 3,
1259  .cra_type = &crypto_ablkcipher_type,
1260  .cra_init = cryp_cra_init,
1261  .cra_module = THIS_MODULE,
1262  .cra_u = {
1263  .ablkcipher = {
1264  .min_keysize = DES_KEY_SIZE,
1265  .max_keysize = DES_KEY_SIZE,
1266  .setkey = des_ablkcipher_setkey,
1267  .encrypt = cryp_blk_encrypt,
1268  .decrypt = cryp_blk_decrypt,
1269  }
1270  }
1271  }
1272  },
1273  {
1274  .algomode = CRYP_ALGO_TDES_ECB,
1275  .crypto = {
1276  .cra_name = "ecb(des3_ede)",
1277  .cra_driver_name = "ecb-des3_ede-ux500",
1278  .cra_priority = 300,
1279  .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1281  .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1282  .cra_ctxsize = sizeof(struct cryp_ctx),
1283  .cra_alignmask = 3,
1284  .cra_type = &crypto_ablkcipher_type,
1285  .cra_init = cryp_cra_init,
1286  .cra_module = THIS_MODULE,
1287  .cra_u = {
1288  .ablkcipher = {
1289  .min_keysize = DES3_EDE_KEY_SIZE,
1290  .max_keysize = DES3_EDE_KEY_SIZE,
1291  .setkey = des3_ablkcipher_setkey,
1292  .encrypt = cryp_blk_encrypt,
1293  .decrypt = cryp_blk_decrypt,
1294  }
1295  }
1296  }
1297  },
1298  {
1299  .algomode = CRYP_ALGO_DES_CBC,
1300  .crypto = {
1301  .cra_name = "cbc(des)",
1302  .cra_driver_name = "cbc-des-ux500",
1303  .cra_priority = 300,
1304  .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1306  .cra_blocksize = DES_BLOCK_SIZE,
1307  .cra_ctxsize = sizeof(struct cryp_ctx),
1308  .cra_alignmask = 3,
1309  .cra_type = &crypto_ablkcipher_type,
1310  .cra_init = cryp_cra_init,
1311  .cra_module = THIS_MODULE,
1312  .cra_u = {
1313  .ablkcipher = {
1314  .min_keysize = DES_KEY_SIZE,
1315  .max_keysize = DES_KEY_SIZE,
1316  .setkey = des_ablkcipher_setkey,
1317  .encrypt = cryp_blk_encrypt,
1318  .decrypt = cryp_blk_decrypt,
1319  }
1320  }
1321  }
1322  },
1323  {
1324  .algomode = CRYP_ALGO_TDES_CBC,
1325  .crypto = {
1326  .cra_name = "cbc(des3_ede)",
1327  .cra_driver_name = "cbc-des3_ede-ux500",
1328  .cra_priority = 300,
1329  .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1331  .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1332  .cra_ctxsize = sizeof(struct cryp_ctx),
1333  .cra_alignmask = 3,
1334  .cra_type = &crypto_ablkcipher_type,
1335  .cra_init = cryp_cra_init,
1336  .cra_module = THIS_MODULE,
1337  .cra_u = {
1338  .ablkcipher = {
1339  .min_keysize = DES3_EDE_KEY_SIZE,
1340  .max_keysize = DES3_EDE_KEY_SIZE,
1341  .setkey = des3_ablkcipher_setkey,
1342  .encrypt = cryp_blk_encrypt,
1343  .decrypt = cryp_blk_decrypt,
1344  .ivsize = DES3_EDE_BLOCK_SIZE,
1345  }
1346  }
1347  }
1348  }
1349 };
1350 
1354 static int cryp_algs_register_all(void)
1355 {
1356  int ret;
1357  int i;
1358  int count;
1359 
1360  pr_debug("[%s]", __func__);
1361 
1362  for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) {
1363  ret = crypto_register_alg(&cryp_algs[i].crypto);
1364  if (ret) {
1365  count = i;
1366  pr_err("[%s] alg registration failed",
1367  cryp_algs[i].crypto.cra_driver_name);
1368  goto unreg;
1369  }
1370  }
1371  return 0;
1372 unreg:
1373  for (i = 0; i < count; i++)
1374  crypto_unregister_alg(&cryp_algs[i].crypto);
1375  return ret;
1376 }
1377 
1381 static void cryp_algs_unregister_all(void)
1382 {
1383  int i;
1384 
1385  pr_debug(DEV_DBG_NAME " [%s]", __func__);
1386 
1387  for (i = 0; i < ARRAY_SIZE(cryp_algs); i++)
1388  crypto_unregister_alg(&cryp_algs[i].crypto);
1389 }
1390 
1391 static int ux500_cryp_probe(struct platform_device *pdev)
1392 {
1393  int ret;
1394  int cryp_error = 0;
1395  struct resource *res = NULL;
1396  struct resource *res_irq = NULL;
1397  struct cryp_device_data *device_data;
1398  struct cryp_protection_config prot = {
1400  };
1401  struct device *dev = &pdev->dev;
1402 
1403  dev_dbg(dev, "[%s]", __func__);
1404  device_data = kzalloc(sizeof(struct cryp_device_data), GFP_ATOMIC);
1405  if (!device_data) {
1406  dev_err(dev, "[%s]: kzalloc() failed!", __func__);
1407  ret = -ENOMEM;
1408  goto out;
1409  }
1410 
1411  device_data->dev = dev;
1412  device_data->current_ctx = NULL;
1413 
1414  /* Grab the DMA configuration from platform data. */
1415  mem_to_engine = &((struct cryp_platform_data *)
1416  dev->platform_data)->mem_to_engine;
1417  engine_to_mem = &((struct cryp_platform_data *)
1418  dev->platform_data)->engine_to_mem;
1419 
1420  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1421  if (!res) {
1422  dev_err(dev, "[%s]: platform_get_resource() failed",
1423  __func__);
1424  ret = -ENODEV;
1425  goto out_kfree;
1426  }
1427 
1428  res = request_mem_region(res->start, resource_size(res), pdev->name);
1429  if (res == NULL) {
1430  dev_err(dev, "[%s]: request_mem_region() failed",
1431  __func__);
1432  ret = -EBUSY;
1433  goto out_kfree;
1434  }
1435 
1436  device_data->base = ioremap(res->start, resource_size(res));
1437  if (!device_data->base) {
1438  dev_err(dev, "[%s]: ioremap failed!", __func__);
1439  ret = -ENOMEM;
1440  goto out_free_mem;
1441  }
1442 
1443  spin_lock_init(&device_data->ctx_lock);
1444  spin_lock_init(&device_data->power_state_spinlock);
1445 
1446  /* Enable power for CRYP hardware block */
1447  device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape");
1448  if (IS_ERR(device_data->pwr_regulator)) {
1449  dev_err(dev, "[%s]: could not get cryp regulator", __func__);
1450  ret = PTR_ERR(device_data->pwr_regulator);
1451  device_data->pwr_regulator = NULL;
1452  goto out_unmap;
1453  }
1454 
1455  /* Enable the clk for CRYP hardware block */
1456  device_data->clk = clk_get(&pdev->dev, NULL);
1457  if (IS_ERR(device_data->clk)) {
1458  dev_err(dev, "[%s]: clk_get() failed!", __func__);
1459  ret = PTR_ERR(device_data->clk);
1460  goto out_regulator;
1461  }
1462 
1463  /* Enable device power (and clock) */
1464  ret = cryp_enable_power(device_data->dev, device_data, false);
1465  if (ret) {
1466  dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
1467  goto out_clk;
1468  }
1469 
1470  cryp_error = cryp_check(device_data);
1471  if (cryp_error != 0) {
1472  dev_err(dev, "[%s]: cryp_init() failed!", __func__);
1473  ret = -EINVAL;
1474  goto out_power;
1475  }
1476 
1477  cryp_error = cryp_configure_protection(device_data, &prot);
1478  if (cryp_error != 0) {
1479  dev_err(dev, "[%s]: cryp_configure_protection() failed!",
1480  __func__);
1481  ret = -EINVAL;
1482  goto out_power;
1483  }
1484 
1485  res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1486  if (!res_irq) {
1487  dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
1488  __func__);
1489  ret = -ENODEV;
1490  goto out_power;
1491  }
1492 
1493  ret = request_irq(res_irq->start,
1494  cryp_interrupt_handler,
1495  0,
1496  "cryp1",
1497  device_data);
1498  if (ret) {
1499  dev_err(dev, "[%s]: Unable to request IRQ", __func__);
1500  goto out_power;
1501  }
1502 
1503  if (cryp_mode == CRYP_MODE_DMA)
1504  cryp_dma_setup_channel(device_data, dev);
1505 
1506  platform_set_drvdata(pdev, device_data);
1507 
1508  /* Put the new device into the device list... */
1509  klist_add_tail(&device_data->list_node, &driver_data.device_list);
1510 
1511  /* ... and signal that a new device is available. */
1512  up(&driver_data.device_allocation);
1513 
1514  atomic_set(&session_id, 1);
1515 
1516  ret = cryp_algs_register_all();
1517  if (ret) {
1518  dev_err(dev, "[%s]: cryp_algs_register_all() failed!",
1519  __func__);
1520  goto out_power;
1521  }
1522 
1523  return 0;
1524 
1525 out_power:
1526  cryp_disable_power(device_data->dev, device_data, false);
1527 
1528 out_clk:
1529  clk_put(device_data->clk);
1530 
1531 out_regulator:
1532  regulator_put(device_data->pwr_regulator);
1533 
1534 out_unmap:
1535  iounmap(device_data->base);
1536 
1537 out_free_mem:
1538  release_mem_region(res->start, resource_size(res));
1539 
1540 out_kfree:
1541  kfree(device_data);
1542 out:
1543  return ret;
1544 }
1545 
1546 static int ux500_cryp_remove(struct platform_device *pdev)
1547 {
1548  struct resource *res = NULL;
1549  struct resource *res_irq = NULL;
1550  struct cryp_device_data *device_data;
1551 
1552  dev_dbg(&pdev->dev, "[%s]", __func__);
1553  device_data = platform_get_drvdata(pdev);
1554  if (!device_data) {
1555  dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
1556  __func__);
1557  return -ENOMEM;
1558  }
1559 
1560  /* Try to decrease the number of available devices. */
1561  if (down_trylock(&driver_data.device_allocation))
1562  return -EBUSY;
1563 
1564  /* Check that the device is free */
1565  spin_lock(&device_data->ctx_lock);
1566  /* current_ctx allocates a device, NULL = unallocated */
1567  if (device_data->current_ctx) {
1568  /* The device is busy */
1569  spin_unlock(&device_data->ctx_lock);
1570  /* Return the device to the pool. */
1571  up(&driver_data.device_allocation);
1572  return -EBUSY;
1573  }
1574 
1575  spin_unlock(&device_data->ctx_lock);
1576 
1577  /* Remove the device from the list */
1578  if (klist_node_attached(&device_data->list_node))
1579  klist_remove(&device_data->list_node);
1580 
1581  /* If this was the last device, remove the services */
1582  if (list_empty(&driver_data.device_list.k_list))
1583  cryp_algs_unregister_all();
1584 
1585  res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1586  if (!res_irq)
1587  dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
1588  __func__);
1589  else {
1590  disable_irq(res_irq->start);
1591  free_irq(res_irq->start, device_data);
1592  }
1593 
1594  if (cryp_disable_power(&pdev->dev, device_data, false))
1595  dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1596  __func__);
1597 
1598  clk_put(device_data->clk);
1599  regulator_put(device_data->pwr_regulator);
1600 
1601  iounmap(device_data->base);
1602 
1603  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1604  if (res)
1605  release_mem_region(res->start, res->end - res->start + 1);
1606 
1607  kfree(device_data);
1608 
1609  return 0;
1610 }
1611 
1612 static void ux500_cryp_shutdown(struct platform_device *pdev)
1613 {
1614  struct resource *res_irq = NULL;
1615  struct cryp_device_data *device_data;
1616 
1617  dev_dbg(&pdev->dev, "[%s]", __func__);
1618 
1619  device_data = platform_get_drvdata(pdev);
1620  if (!device_data) {
1621  dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
1622  __func__);
1623  return;
1624  }
1625 
1626  /* Check that the device is free */
1627  spin_lock(&device_data->ctx_lock);
1628  /* current_ctx allocates a device, NULL = unallocated */
1629  if (!device_data->current_ctx) {
1630  if (down_trylock(&driver_data.device_allocation))
1631  dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
1632  "Shutting down anyway...", __func__);
1638  device_data->current_ctx++;
1639  }
1640  spin_unlock(&device_data->ctx_lock);
1641 
1642  /* Remove the device from the list */
1643  if (klist_node_attached(&device_data->list_node))
1644  klist_remove(&device_data->list_node);
1645 
1646  /* If this was the last device, remove the services */
1647  if (list_empty(&driver_data.device_list.k_list))
1648  cryp_algs_unregister_all();
1649 
1650  res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1651  if (!res_irq)
1652  dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
1653  __func__);
1654  else {
1655  disable_irq(res_irq->start);
1656  free_irq(res_irq->start, device_data);
1657  }
1658 
1659  if (cryp_disable_power(&pdev->dev, device_data, false))
1660  dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1661  __func__);
1662 
1663 }
1664 
1665 static int ux500_cryp_suspend(struct device *dev)
1666 {
1667  int ret;
1668  struct platform_device *pdev = to_platform_device(dev);
1669  struct cryp_device_data *device_data;
1670  struct resource *res_irq;
1671  struct cryp_ctx *temp_ctx = NULL;
1672 
1673  dev_dbg(dev, "[%s]", __func__);
1674 
1675  /* Handle state? */
1676  device_data = platform_get_drvdata(pdev);
1677  if (!device_data) {
1678  dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
1679  return -ENOMEM;
1680  }
1681 
1682  res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1683  if (!res_irq)
1684  dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__);
1685  else
1686  disable_irq(res_irq->start);
1687 
1688  spin_lock(&device_data->ctx_lock);
1689  if (!device_data->current_ctx)
1690  device_data->current_ctx++;
1691  spin_unlock(&device_data->ctx_lock);
1692 
1693  if (device_data->current_ctx == ++temp_ctx) {
1694  if (down_interruptible(&driver_data.device_allocation))
1695  dev_dbg(dev, "[%s]: down_interruptible() failed",
1696  __func__);
1697  ret = cryp_disable_power(dev, device_data, false);
1698 
1699  } else
1700  ret = cryp_disable_power(dev, device_data, true);
1701 
1702  if (ret)
1703  dev_err(dev, "[%s]: cryp_disable_power()", __func__);
1704 
1705  return ret;
1706 }
1707 
1708 static int ux500_cryp_resume(struct device *dev)
1709 {
1710  int ret = 0;
1711  struct platform_device *pdev = to_platform_device(dev);
1712  struct cryp_device_data *device_data;
1713  struct resource *res_irq;
1714  struct cryp_ctx *temp_ctx = NULL;
1715 
1716  dev_dbg(dev, "[%s]", __func__);
1717 
1718  device_data = platform_get_drvdata(pdev);
1719  if (!device_data) {
1720  dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
1721  return -ENOMEM;
1722  }
1723 
1724  spin_lock(&device_data->ctx_lock);
1725  if (device_data->current_ctx == ++temp_ctx)
1726  device_data->current_ctx = NULL;
1727  spin_unlock(&device_data->ctx_lock);
1728 
1729 
1730  if (!device_data->current_ctx)
1731  up(&driver_data.device_allocation);
1732  else
1733  ret = cryp_enable_power(dev, device_data, true);
1734 
1735  if (ret)
1736  dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
1737  else {
1738  res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1739  if (res_irq)
1740  enable_irq(res_irq->start);
1741  }
1742 
1743  return ret;
1744 }
1745 
1746 static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
1747 
1748 static struct platform_driver cryp_driver = {
1749  .probe = ux500_cryp_probe,
1750  .remove = ux500_cryp_remove,
1751  .shutdown = ux500_cryp_shutdown,
1752  .driver = {
1753  .owner = THIS_MODULE,
1754  .name = "cryp1"
1755  .pm = &ux500_cryp_pm,
1756  }
1757 };
1758 
1759 static int __init ux500_cryp_mod_init(void)
1760 {
1761  pr_debug("[%s] is called!", __func__);
1762  klist_init(&driver_data.device_list, NULL, NULL);
1763  /* Initialize the semaphore to 0 devices (locked state) */
1764  sema_init(&driver_data.device_allocation, 0);
1765  return platform_driver_register(&cryp_driver);
1766 }
1767 
1768 static void __exit ux500_cryp_mod_fini(void)
1769 {
1770  pr_debug("[%s] is called!", __func__);
1771  platform_driver_unregister(&cryp_driver);
1772  return;
1773 }
1774 
1775 module_init(ux500_cryp_mod_init);
1776 module_exit(ux500_cryp_mod_fini);
1777 
1778 module_param(cryp_mode, int, 0);
1779 
1780 MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
1781 MODULE_ALIAS("aes-all");
1782 MODULE_ALIAS("des-all");
1783 
1784 MODULE_LICENSE("GPL");