Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
atmel-aes.c
Go to the documentation of this file.
1 /*
2  * Cryptographic API.
3  *
4  * Support for ATMEL AES HW acceleration.
5  *
6  * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7  * Author: Nicolas Royer <[email protected]>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as published
11  * by the Free Software Foundation.
12  *
13  * Some ideas are from omap-aes.c driver.
14  */
15 
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/io.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
25 
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/delay.h>
34 #include <linux/crypto.h>
35 #include <linux/cryptohash.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/aes.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
42 #include "atmel-aes-regs.h"
43 
44 #define CFB8_BLOCK_SIZE 1
45 #define CFB16_BLOCK_SIZE 2
46 #define CFB32_BLOCK_SIZE 4
47 #define CFB64_BLOCK_SIZE 8
48 
49 /* AES flags */
50 #define AES_FLAGS_MODE_MASK 0x01ff
51 #define AES_FLAGS_ENCRYPT BIT(0)
52 #define AES_FLAGS_CBC BIT(1)
53 #define AES_FLAGS_CFB BIT(2)
54 #define AES_FLAGS_CFB8 BIT(3)
55 #define AES_FLAGS_CFB16 BIT(4)
56 #define AES_FLAGS_CFB32 BIT(5)
57 #define AES_FLAGS_CFB64 BIT(6)
58 #define AES_FLAGS_OFB BIT(7)
59 #define AES_FLAGS_CTR BIT(8)
60 
61 #define AES_FLAGS_INIT BIT(16)
62 #define AES_FLAGS_DMA BIT(17)
63 #define AES_FLAGS_BUSY BIT(18)
64 
65 #define AES_FLAGS_DUALBUFF BIT(24)
66 
67 #define ATMEL_AES_QUEUE_LENGTH 1
68 #define ATMEL_AES_CACHE_SIZE 0
69 
70 #define ATMEL_AES_DMA_THRESHOLD 16
71 
72 
73 struct atmel_aes_dev;
74 
75 struct atmel_aes_ctx {
76  struct atmel_aes_dev *dd;
77 
78  int keylen;
79  u32 key[AES_KEYSIZE_256 / sizeof(u32)];
80 };
81 
83  unsigned long mode;
84 };
85 
86 struct atmel_aes_dma {
87  struct dma_chan *chan;
89 };
90 
91 struct atmel_aes_dev {
92  struct list_head list;
93  unsigned long phys_base;
95 
96  struct atmel_aes_ctx *ctx;
97  struct device *dev;
98  struct clk *iclk;
99  int irq;
100 
101  unsigned long flags;
102  int err;
103 
106 
109 
111  size_t total;
112 
114  unsigned int nb_in_sg;
115 
117  unsigned int nb_out_sg;
118 
119  size_t bufcnt;
120 
121  u8 buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
122  int dma_in;
124 
125  u8 buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
126  int dma_out;
128 
130 };
131 
135 };
136 
137 static struct atmel_aes_drv atmel_aes = {
138  .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
139  .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
140 };
141 
142 static int atmel_aes_sg_length(struct ablkcipher_request *req,
143  struct scatterlist *sg)
144 {
145  unsigned int total = req->nbytes;
146  int sg_nb;
147  unsigned int len;
148  struct scatterlist *sg_list;
149 
150  sg_nb = 0;
151  sg_list = sg;
152  total = req->nbytes;
153 
154  while (total) {
155  len = min(sg_list->length, total);
156 
157  sg_nb++;
158  total -= len;
159 
160  sg_list = sg_next(sg_list);
161  if (!sg_list)
162  total = 0;
163  }
164 
165  return sg_nb;
166 }
167 
168 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
169 {
170  return readl_relaxed(dd->io_base + offset);
171 }
172 
173 static inline void atmel_aes_write(struct atmel_aes_dev *dd,
174  u32 offset, u32 value)
175 {
176  writel_relaxed(value, dd->io_base + offset);
177 }
178 
179 static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
180  u32 *value, int count)
181 {
182  for (; count--; value++, offset += 4)
183  *value = atmel_aes_read(dd, offset);
184 }
185 
186 static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
187  u32 *value, int count)
188 {
189  for (; count--; value++, offset += 4)
190  atmel_aes_write(dd, offset, *value);
191 }
192 
193 static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd)
194 {
195  atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF);
196 
197  if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF)
198  dd->flags |= AES_FLAGS_DUALBUFF;
199 }
200 
201 static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
202 {
203  struct atmel_aes_dev *aes_dd = NULL;
204  struct atmel_aes_dev *tmp;
205 
206  spin_lock_bh(&atmel_aes.lock);
207  if (!ctx->dd) {
208  list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
209  aes_dd = tmp;
210  break;
211  }
212  ctx->dd = aes_dd;
213  } else {
214  aes_dd = ctx->dd;
215  }
216 
217  spin_unlock_bh(&atmel_aes.lock);
218 
219  return aes_dd;
220 }
221 
222 static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
223 {
224  clk_prepare_enable(dd->iclk);
225 
226  if (!(dd->flags & AES_FLAGS_INIT)) {
227  atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
228  atmel_aes_dualbuff_test(dd);
229  dd->flags |= AES_FLAGS_INIT;
230  dd->err = 0;
231  }
232 
233  return 0;
234 }
235 
236 static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
237 {
238  atmel_aes_hw_init(dd);
239 
240  dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION);
241 
242  clk_disable_unprepare(dd->iclk);
243 }
244 
245 static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
246 {
247  struct ablkcipher_request *req = dd->req;
248 
249  clk_disable_unprepare(dd->iclk);
250  dd->flags &= ~AES_FLAGS_BUSY;
251 
252  req->base.complete(&req->base, err);
253 }
254 
255 static void atmel_aes_dma_callback(void *data)
256 {
257  struct atmel_aes_dev *dd = data;
258 
259  /* dma_lch_out - completed */
260  tasklet_schedule(&dd->done_task);
261 }
262 
263 static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd)
264 {
265  struct dma_async_tx_descriptor *in_desc, *out_desc;
266  int nb_dma_sg_in, nb_dma_sg_out;
267 
268  dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
269  if (!dd->nb_in_sg)
270  goto exit_err;
271 
272  nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
273  DMA_TO_DEVICE);
274  if (!nb_dma_sg_in)
275  goto exit_err;
276 
277  in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg,
278  nb_dma_sg_in, DMA_MEM_TO_DEV,
280 
281  if (!in_desc)
282  goto unmap_in;
283 
284  /* callback not needed */
285 
286  dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
287  if (!dd->nb_out_sg)
288  goto unmap_in;
289 
290  nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
292  if (!nb_dma_sg_out)
293  goto unmap_out;
294 
295  out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg,
296  nb_dma_sg_out, DMA_DEV_TO_MEM,
298 
299  if (!out_desc)
300  goto unmap_out;
301 
302  out_desc->callback = atmel_aes_dma_callback;
303  out_desc->callback_param = dd;
304 
305  dd->total -= dd->req->nbytes;
306 
307  dmaengine_submit(out_desc);
308  dma_async_issue_pending(dd->dma_lch_out.chan);
309 
310  dmaengine_submit(in_desc);
311  dma_async_issue_pending(dd->dma_lch_in.chan);
312 
313  return 0;
314 
315 unmap_out:
316  dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
318 unmap_in:
319  dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
320  DMA_TO_DEVICE);
321 exit_err:
322  return -EINVAL;
323 }
324 
325 static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
326 {
327  dd->flags &= ~AES_FLAGS_DMA;
328 
329  /* use cache buffers */
330  dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
331  if (!dd->nb_in_sg)
332  return -EINVAL;
333 
334  dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
335  if (!dd->nb_in_sg)
336  return -EINVAL;
337 
338  dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
339  dd->buf_in, dd->total);
340 
341  if (!dd->bufcnt)
342  return -EINVAL;
343 
344  dd->total -= dd->bufcnt;
345 
346  atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
347  atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
348  dd->bufcnt >> 2);
349 
350  return 0;
351 }
352 
353 static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
354 {
355  int err;
356 
357  if (dd->flags & AES_FLAGS_CFB8) {
358  dd->dma_lch_in.dma_conf.dst_addr_width =
360  dd->dma_lch_out.dma_conf.src_addr_width =
362  } else if (dd->flags & AES_FLAGS_CFB16) {
363  dd->dma_lch_in.dma_conf.dst_addr_width =
365  dd->dma_lch_out.dma_conf.src_addr_width =
367  } else {
368  dd->dma_lch_in.dma_conf.dst_addr_width =
370  dd->dma_lch_out.dma_conf.src_addr_width =
372  }
373 
374  dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
375  dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
376 
377  dd->flags |= AES_FLAGS_DMA;
378  err = atmel_aes_crypt_dma(dd);
379 
380  return err;
381 }
382 
383 static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
384 {
385  int err;
386  u32 valcr = 0, valmr = 0;
387 
388  err = atmel_aes_hw_init(dd);
389 
390  if (err)
391  return err;
392 
393  /* MR register must be set before IV registers */
394  if (dd->ctx->keylen == AES_KEYSIZE_128)
395  valmr |= AES_MR_KEYSIZE_128;
396  else if (dd->ctx->keylen == AES_KEYSIZE_192)
397  valmr |= AES_MR_KEYSIZE_192;
398  else
399  valmr |= AES_MR_KEYSIZE_256;
400 
401  if (dd->flags & AES_FLAGS_CBC) {
402  valmr |= AES_MR_OPMOD_CBC;
403  } else if (dd->flags & AES_FLAGS_CFB) {
404  valmr |= AES_MR_OPMOD_CFB;
405  if (dd->flags & AES_FLAGS_CFB8)
406  valmr |= AES_MR_CFBS_8b;
407  else if (dd->flags & AES_FLAGS_CFB16)
408  valmr |= AES_MR_CFBS_16b;
409  else if (dd->flags & AES_FLAGS_CFB32)
410  valmr |= AES_MR_CFBS_32b;
411  else if (dd->flags & AES_FLAGS_CFB64)
412  valmr |= AES_MR_CFBS_64b;
413  } else if (dd->flags & AES_FLAGS_OFB) {
414  valmr |= AES_MR_OPMOD_OFB;
415  } else if (dd->flags & AES_FLAGS_CTR) {
416  valmr |= AES_MR_OPMOD_CTR;
417  } else {
418  valmr |= AES_MR_OPMOD_ECB;
419  }
420 
421  if (dd->flags & AES_FLAGS_ENCRYPT)
422  valmr |= AES_MR_CYPHER_ENC;
423 
424  if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
425  valmr |= AES_MR_SMOD_IDATAR0;
426  if (dd->flags & AES_FLAGS_DUALBUFF)
427  valmr |= AES_MR_DUALBUFF;
428  } else {
429  valmr |= AES_MR_SMOD_AUTO;
430  }
431 
432  atmel_aes_write(dd, AES_CR, valcr);
433  atmel_aes_write(dd, AES_MR, valmr);
434 
435  atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
436  dd->ctx->keylen >> 2);
437 
438  if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
439  (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
440  dd->req->info) {
441  atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
442  }
443 
444  return 0;
445 }
446 
447 static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
448  struct ablkcipher_request *req)
449 {
450  struct crypto_async_request *async_req, *backlog;
451  struct atmel_aes_ctx *ctx;
452  struct atmel_aes_reqctx *rctx;
453  unsigned long flags;
454  int err, ret = 0;
455 
456  spin_lock_irqsave(&dd->lock, flags);
457  if (req)
458  ret = ablkcipher_enqueue_request(&dd->queue, req);
459  if (dd->flags & AES_FLAGS_BUSY) {
460  spin_unlock_irqrestore(&dd->lock, flags);
461  return ret;
462  }
463  backlog = crypto_get_backlog(&dd->queue);
464  async_req = crypto_dequeue_request(&dd->queue);
465  if (async_req)
466  dd->flags |= AES_FLAGS_BUSY;
467  spin_unlock_irqrestore(&dd->lock, flags);
468 
469  if (!async_req)
470  return ret;
471 
472  if (backlog)
473  backlog->complete(backlog, -EINPROGRESS);
474 
475  req = ablkcipher_request_cast(async_req);
476 
477  /* assign new request to device */
478  dd->req = req;
479  dd->total = req->nbytes;
480  dd->in_sg = req->src;
481  dd->out_sg = req->dst;
482 
483  rctx = ablkcipher_request_ctx(req);
484  ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
485  rctx->mode &= AES_FLAGS_MODE_MASK;
486  dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
487  dd->ctx = ctx;
488  ctx->dd = dd;
489 
490  err = atmel_aes_write_ctrl(dd);
491  if (!err) {
492  if (dd->total > ATMEL_AES_DMA_THRESHOLD)
493  err = atmel_aes_crypt_dma_start(dd);
494  else
495  err = atmel_aes_crypt_cpu_start(dd);
496  }
497  if (err) {
498  /* aes_task will not finish it, so do it here */
499  atmel_aes_finish_req(dd, err);
500  tasklet_schedule(&dd->queue_task);
501  }
502 
503  return ret;
504 }
505 
506 static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
507 {
508  int err = -EINVAL;
509 
510  if (dd->flags & AES_FLAGS_DMA) {
511  dma_unmap_sg(dd->dev, dd->out_sg,
513  dma_unmap_sg(dd->dev, dd->in_sg,
514  dd->nb_in_sg, DMA_TO_DEVICE);
515  err = 0;
516  }
517 
518  return err;
519 }
520 
521 static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
522 {
523  struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
524  crypto_ablkcipher_reqtfm(req));
525  struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
526  struct atmel_aes_dev *dd;
527 
528  if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
529  pr_err("request size is not exact amount of AES blocks\n");
530  return -EINVAL;
531  }
532 
533  dd = atmel_aes_find_dev(ctx);
534  if (!dd)
535  return -ENODEV;
536 
537  rctx->mode = mode;
538 
539  return atmel_aes_handle_queue(dd, req);
540 }
541 
542 static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
543 {
544  struct at_dma_slave *sl = slave;
545 
546  if (sl && sl->dma_dev == chan->device->dev) {
547  chan->private = sl;
548  return true;
549  } else {
550  return false;
551  }
552 }
553 
554 static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
555 {
556  int err = -ENOMEM;
557  struct aes_platform_data *pdata;
558  dma_cap_mask_t mask_in, mask_out;
559 
560  pdata = dd->dev->platform_data;
561 
562  if (pdata && pdata->dma_slave->txdata.dma_dev &&
563  pdata->dma_slave->rxdata.dma_dev) {
564 
565  /* Try to grab 2 DMA channels */
566  dma_cap_zero(mask_in);
567  dma_cap_set(DMA_SLAVE, mask_in);
568 
569  dd->dma_lch_in.chan = dma_request_channel(mask_in,
570  atmel_aes_filter, &pdata->dma_slave->rxdata);
571  if (!dd->dma_lch_in.chan)
572  goto err_dma_in;
573 
574  dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
575  dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
576  AES_IDATAR(0);
577  dd->dma_lch_in.dma_conf.src_maxburst = 1;
578  dd->dma_lch_in.dma_conf.dst_maxburst = 1;
579  dd->dma_lch_in.dma_conf.device_fc = false;
580 
581  dma_cap_zero(mask_out);
582  dma_cap_set(DMA_SLAVE, mask_out);
583  dd->dma_lch_out.chan = dma_request_channel(mask_out,
584  atmel_aes_filter, &pdata->dma_slave->txdata);
585  if (!dd->dma_lch_out.chan)
586  goto err_dma_out;
587 
588  dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
589  dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
590  AES_ODATAR(0);
591  dd->dma_lch_out.dma_conf.src_maxburst = 1;
592  dd->dma_lch_out.dma_conf.dst_maxburst = 1;
593  dd->dma_lch_out.dma_conf.device_fc = false;
594 
595  return 0;
596  } else {
597  return -ENODEV;
598  }
599 
600 err_dma_out:
602 err_dma_in:
603  return err;
604 }
605 
606 static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
607 {
610 }
611 
612 static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
613  unsigned int keylen)
614 {
615  struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
616 
617  if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
618  keylen != AES_KEYSIZE_256) {
619  crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
620  return -EINVAL;
621  }
622 
623  memcpy(ctx->key, key, keylen);
624  ctx->keylen = keylen;
625 
626  return 0;
627 }
628 
629 static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
630 {
631  return atmel_aes_crypt(req,
633 }
634 
635 static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
636 {
637  return atmel_aes_crypt(req,
638  0);
639 }
640 
641 static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
642 {
643  return atmel_aes_crypt(req,
645 }
646 
647 static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
648 {
649  return atmel_aes_crypt(req,
650  AES_FLAGS_CBC);
651 }
652 
653 static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
654 {
655  return atmel_aes_crypt(req,
657 }
658 
659 static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
660 {
661  return atmel_aes_crypt(req,
662  AES_FLAGS_OFB);
663 }
664 
665 static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
666 {
667  return atmel_aes_crypt(req,
669 }
670 
671 static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
672 {
673  return atmel_aes_crypt(req,
674  AES_FLAGS_CFB);
675 }
676 
677 static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
678 {
679  return atmel_aes_crypt(req,
681 }
682 
683 static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
684 {
685  return atmel_aes_crypt(req,
687 }
688 
689 static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
690 {
691  return atmel_aes_crypt(req,
693 }
694 
695 static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
696 {
697  return atmel_aes_crypt(req,
699 }
700 
701 static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
702 {
703  return atmel_aes_crypt(req,
705 }
706 
707 static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
708 {
709  return atmel_aes_crypt(req,
711 }
712 
713 static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
714 {
715  return atmel_aes_crypt(req,
717 }
718 
719 static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
720 {
721  return atmel_aes_crypt(req,
723 }
724 
725 static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
726 {
727  return atmel_aes_crypt(req,
729 }
730 
731 static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
732 {
733  return atmel_aes_crypt(req,
734  AES_FLAGS_CTR);
735 }
736 
737 static int atmel_aes_cra_init(struct crypto_tfm *tfm)
738 {
739  tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
740 
741  return 0;
742 }
743 
744 static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
745 {
746 }
747 
748 static struct crypto_alg aes_algs[] = {
749 {
750  .cra_name = "ecb(aes)",
751  .cra_driver_name = "atmel-ecb-aes",
752  .cra_priority = 100,
754  .cra_blocksize = AES_BLOCK_SIZE,
755  .cra_ctxsize = sizeof(struct atmel_aes_ctx),
756  .cra_alignmask = 0x0,
757  .cra_type = &crypto_ablkcipher_type,
758  .cra_module = THIS_MODULE,
759  .cra_init = atmel_aes_cra_init,
760  .cra_exit = atmel_aes_cra_exit,
761  .cra_u.ablkcipher = {
762  .min_keysize = AES_MIN_KEY_SIZE,
763  .max_keysize = AES_MAX_KEY_SIZE,
764  .setkey = atmel_aes_setkey,
765  .encrypt = atmel_aes_ecb_encrypt,
766  .decrypt = atmel_aes_ecb_decrypt,
767  }
768 },
769 {
770  .cra_name = "cbc(aes)",
771  .cra_driver_name = "atmel-cbc-aes",
772  .cra_priority = 100,
774  .cra_blocksize = AES_BLOCK_SIZE,
775  .cra_ctxsize = sizeof(struct atmel_aes_ctx),
776  .cra_alignmask = 0x0,
777  .cra_type = &crypto_ablkcipher_type,
778  .cra_module = THIS_MODULE,
779  .cra_init = atmel_aes_cra_init,
780  .cra_exit = atmel_aes_cra_exit,
781  .cra_u.ablkcipher = {
782  .min_keysize = AES_MIN_KEY_SIZE,
783  .max_keysize = AES_MAX_KEY_SIZE,
784  .ivsize = AES_BLOCK_SIZE,
785  .setkey = atmel_aes_setkey,
786  .encrypt = atmel_aes_cbc_encrypt,
787  .decrypt = atmel_aes_cbc_decrypt,
788  }
789 },
790 {
791  .cra_name = "ofb(aes)",
792  .cra_driver_name = "atmel-ofb-aes",
793  .cra_priority = 100,
795  .cra_blocksize = AES_BLOCK_SIZE,
796  .cra_ctxsize = sizeof(struct atmel_aes_ctx),
797  .cra_alignmask = 0x0,
798  .cra_type = &crypto_ablkcipher_type,
799  .cra_module = THIS_MODULE,
800  .cra_init = atmel_aes_cra_init,
801  .cra_exit = atmel_aes_cra_exit,
802  .cra_u.ablkcipher = {
803  .min_keysize = AES_MIN_KEY_SIZE,
804  .max_keysize = AES_MAX_KEY_SIZE,
805  .ivsize = AES_BLOCK_SIZE,
806  .setkey = atmel_aes_setkey,
807  .encrypt = atmel_aes_ofb_encrypt,
808  .decrypt = atmel_aes_ofb_decrypt,
809  }
810 },
811 {
812  .cra_name = "cfb(aes)",
813  .cra_driver_name = "atmel-cfb-aes",
814  .cra_priority = 100,
816  .cra_blocksize = AES_BLOCK_SIZE,
817  .cra_ctxsize = sizeof(struct atmel_aes_ctx),
818  .cra_alignmask = 0x0,
819  .cra_type = &crypto_ablkcipher_type,
820  .cra_module = THIS_MODULE,
821  .cra_init = atmel_aes_cra_init,
822  .cra_exit = atmel_aes_cra_exit,
823  .cra_u.ablkcipher = {
824  .min_keysize = AES_MIN_KEY_SIZE,
825  .max_keysize = AES_MAX_KEY_SIZE,
826  .ivsize = AES_BLOCK_SIZE,
827  .setkey = atmel_aes_setkey,
828  .encrypt = atmel_aes_cfb_encrypt,
829  .decrypt = atmel_aes_cfb_decrypt,
830  }
831 },
832 {
833  .cra_name = "cfb32(aes)",
834  .cra_driver_name = "atmel-cfb32-aes",
835  .cra_priority = 100,
837  .cra_blocksize = CFB32_BLOCK_SIZE,
838  .cra_ctxsize = sizeof(struct atmel_aes_ctx),
839  .cra_alignmask = 0x0,
840  .cra_type = &crypto_ablkcipher_type,
841  .cra_module = THIS_MODULE,
842  .cra_init = atmel_aes_cra_init,
843  .cra_exit = atmel_aes_cra_exit,
844  .cra_u.ablkcipher = {
845  .min_keysize = AES_MIN_KEY_SIZE,
846  .max_keysize = AES_MAX_KEY_SIZE,
847  .ivsize = AES_BLOCK_SIZE,
848  .setkey = atmel_aes_setkey,
849  .encrypt = atmel_aes_cfb32_encrypt,
850  .decrypt = atmel_aes_cfb32_decrypt,
851  }
852 },
853 {
854  .cra_name = "cfb16(aes)",
855  .cra_driver_name = "atmel-cfb16-aes",
856  .cra_priority = 100,
858  .cra_blocksize = CFB16_BLOCK_SIZE,
859  .cra_ctxsize = sizeof(struct atmel_aes_ctx),
860  .cra_alignmask = 0x0,
861  .cra_type = &crypto_ablkcipher_type,
862  .cra_module = THIS_MODULE,
863  .cra_init = atmel_aes_cra_init,
864  .cra_exit = atmel_aes_cra_exit,
865  .cra_u.ablkcipher = {
866  .min_keysize = AES_MIN_KEY_SIZE,
867  .max_keysize = AES_MAX_KEY_SIZE,
868  .ivsize = AES_BLOCK_SIZE,
869  .setkey = atmel_aes_setkey,
870  .encrypt = atmel_aes_cfb16_encrypt,
871  .decrypt = atmel_aes_cfb16_decrypt,
872  }
873 },
874 {
875  .cra_name = "cfb8(aes)",
876  .cra_driver_name = "atmel-cfb8-aes",
877  .cra_priority = 100,
879  .cra_blocksize = CFB64_BLOCK_SIZE,
880  .cra_ctxsize = sizeof(struct atmel_aes_ctx),
881  .cra_alignmask = 0x0,
882  .cra_type = &crypto_ablkcipher_type,
883  .cra_module = THIS_MODULE,
884  .cra_init = atmel_aes_cra_init,
885  .cra_exit = atmel_aes_cra_exit,
886  .cra_u.ablkcipher = {
887  .min_keysize = AES_MIN_KEY_SIZE,
888  .max_keysize = AES_MAX_KEY_SIZE,
889  .ivsize = AES_BLOCK_SIZE,
890  .setkey = atmel_aes_setkey,
891  .encrypt = atmel_aes_cfb8_encrypt,
892  .decrypt = atmel_aes_cfb8_decrypt,
893  }
894 },
895 {
896  .cra_name = "ctr(aes)",
897  .cra_driver_name = "atmel-ctr-aes",
898  .cra_priority = 100,
900  .cra_blocksize = AES_BLOCK_SIZE,
901  .cra_ctxsize = sizeof(struct atmel_aes_ctx),
902  .cra_alignmask = 0x0,
903  .cra_type = &crypto_ablkcipher_type,
904  .cra_module = THIS_MODULE,
905  .cra_init = atmel_aes_cra_init,
906  .cra_exit = atmel_aes_cra_exit,
907  .cra_u.ablkcipher = {
908  .min_keysize = AES_MIN_KEY_SIZE,
909  .max_keysize = AES_MAX_KEY_SIZE,
910  .ivsize = AES_BLOCK_SIZE,
911  .setkey = atmel_aes_setkey,
912  .encrypt = atmel_aes_ctr_encrypt,
913  .decrypt = atmel_aes_ctr_decrypt,
914  }
915 },
916 };
917 
918 static struct crypto_alg aes_cfb64_alg[] = {
919 {
920  .cra_name = "cfb64(aes)",
921  .cra_driver_name = "atmel-cfb64-aes",
922  .cra_priority = 100,
924  .cra_blocksize = CFB64_BLOCK_SIZE,
925  .cra_ctxsize = sizeof(struct atmel_aes_ctx),
926  .cra_alignmask = 0x0,
927  .cra_type = &crypto_ablkcipher_type,
928  .cra_module = THIS_MODULE,
929  .cra_init = atmel_aes_cra_init,
930  .cra_exit = atmel_aes_cra_exit,
931  .cra_u.ablkcipher = {
932  .min_keysize = AES_MIN_KEY_SIZE,
933  .max_keysize = AES_MAX_KEY_SIZE,
934  .ivsize = AES_BLOCK_SIZE,
935  .setkey = atmel_aes_setkey,
936  .encrypt = atmel_aes_cfb64_encrypt,
937  .decrypt = atmel_aes_cfb64_decrypt,
938  }
939 },
940 };
941 
942 static void atmel_aes_queue_task(unsigned long data)
943 {
944  struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
945 
946  atmel_aes_handle_queue(dd, NULL);
947 }
948 
949 static void atmel_aes_done_task(unsigned long data)
950 {
951  struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
952  int err;
953 
954  if (!(dd->flags & AES_FLAGS_DMA)) {
955  atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
956  dd->bufcnt >> 2);
957 
958  if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
959  dd->buf_out, dd->bufcnt))
960  err = 0;
961  else
962  err = -EINVAL;
963 
964  goto cpu_end;
965  }
966 
967  err = atmel_aes_crypt_dma_stop(dd);
968 
969  err = dd->err ? : err;
970 
971  if (dd->total && !err) {
972  err = atmel_aes_crypt_dma_start(dd);
973  if (!err)
974  return; /* DMA started. Not fininishing. */
975  }
976 
977 cpu_end:
978  atmel_aes_finish_req(dd, err);
979  atmel_aes_handle_queue(dd, NULL);
980 }
981 
982 static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
983 {
984  struct atmel_aes_dev *aes_dd = dev_id;
985  u32 reg;
986 
987  reg = atmel_aes_read(aes_dd, AES_ISR);
988  if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
989  atmel_aes_write(aes_dd, AES_IDR, reg);
990  if (AES_FLAGS_BUSY & aes_dd->flags)
991  tasklet_schedule(&aes_dd->done_task);
992  else
993  dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
994  return IRQ_HANDLED;
995  }
996 
997  return IRQ_NONE;
998 }
999 
1000 static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1001 {
1002  int i;
1003 
1004  for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1005  crypto_unregister_alg(&aes_algs[i]);
1006  if (dd->hw_version >= 0x130)
1007  crypto_unregister_alg(&aes_cfb64_alg[0]);
1008 }
1009 
1010 static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1011 {
1012  int err, i, j;
1013 
1014  for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1015  err = crypto_register_alg(&aes_algs[i]);
1016  if (err)
1017  goto err_aes_algs;
1018  }
1019 
1020  atmel_aes_hw_version_init(dd);
1021 
1022  if (dd->hw_version >= 0x130) {
1023  err = crypto_register_alg(&aes_cfb64_alg[0]);
1024  if (err)
1025  goto err_aes_cfb64_alg;
1026  }
1027 
1028  return 0;
1029 
1030 err_aes_cfb64_alg:
1031  i = ARRAY_SIZE(aes_algs);
1032 err_aes_algs:
1033  for (j = 0; j < i; j++)
1034  crypto_unregister_alg(&aes_algs[j]);
1035 
1036  return err;
1037 }
1038 
1039 static int __devinit atmel_aes_probe(struct platform_device *pdev)
1040 {
1041  struct atmel_aes_dev *aes_dd;
1042  struct aes_platform_data *pdata;
1043  struct device *dev = &pdev->dev;
1044  struct resource *aes_res;
1045  unsigned long aes_phys_size;
1046  int err;
1047 
1048  pdata = pdev->dev.platform_data;
1049  if (!pdata) {
1050  err = -ENXIO;
1051  goto aes_dd_err;
1052  }
1053 
1054  aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
1055  if (aes_dd == NULL) {
1056  dev_err(dev, "unable to alloc data struct.\n");
1057  err = -ENOMEM;
1058  goto aes_dd_err;
1059  }
1060 
1061  aes_dd->dev = dev;
1062 
1063  platform_set_drvdata(pdev, aes_dd);
1064 
1065  INIT_LIST_HEAD(&aes_dd->list);
1066 
1067  tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
1068  (unsigned long)aes_dd);
1069  tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
1070  (unsigned long)aes_dd);
1071 
1073 
1074  aes_dd->irq = -1;
1075 
1076  /* Get the base address */
1077  aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1078  if (!aes_res) {
1079  dev_err(dev, "no MEM resource info\n");
1080  err = -ENODEV;
1081  goto res_err;
1082  }
1083  aes_dd->phys_base = aes_res->start;
1084  aes_phys_size = resource_size(aes_res);
1085 
1086  /* Get the IRQ */
1087  aes_dd->irq = platform_get_irq(pdev, 0);
1088  if (aes_dd->irq < 0) {
1089  dev_err(dev, "no IRQ resource info\n");
1090  err = aes_dd->irq;
1091  goto aes_irq_err;
1092  }
1093 
1094  err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
1095  aes_dd);
1096  if (err) {
1097  dev_err(dev, "unable to request aes irq.\n");
1098  goto aes_irq_err;
1099  }
1100 
1101  /* Initializing the clock */
1102  aes_dd->iclk = clk_get(&pdev->dev, NULL);
1103  if (IS_ERR(aes_dd->iclk)) {
1104  dev_err(dev, "clock intialization failed.\n");
1105  err = PTR_ERR(aes_dd->iclk);
1106  goto clk_err;
1107  }
1108 
1109  aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
1110  if (!aes_dd->io_base) {
1111  dev_err(dev, "can't ioremap\n");
1112  err = -ENOMEM;
1113  goto aes_io_err;
1114  }
1115 
1116  err = atmel_aes_dma_init(aes_dd);
1117  if (err)
1118  goto err_aes_dma;
1119 
1120  spin_lock(&atmel_aes.lock);
1121  list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
1122  spin_unlock(&atmel_aes.lock);
1123 
1124  err = atmel_aes_register_algs(aes_dd);
1125  if (err)
1126  goto err_algs;
1127 
1128  dev_info(dev, "Atmel AES\n");
1129 
1130  return 0;
1131 
1132 err_algs:
1133  spin_lock(&atmel_aes.lock);
1134  list_del(&aes_dd->list);
1135  spin_unlock(&atmel_aes.lock);
1136  atmel_aes_dma_cleanup(aes_dd);
1137 err_aes_dma:
1138  iounmap(aes_dd->io_base);
1139 aes_io_err:
1140  clk_put(aes_dd->iclk);
1141 clk_err:
1142  free_irq(aes_dd->irq, aes_dd);
1143 aes_irq_err:
1144 res_err:
1145  tasklet_kill(&aes_dd->done_task);
1146  tasklet_kill(&aes_dd->queue_task);
1147  kfree(aes_dd);
1148  aes_dd = NULL;
1149 aes_dd_err:
1150  dev_err(dev, "initialization failed.\n");
1151 
1152  return err;
1153 }
1154 
1155 static int __devexit atmel_aes_remove(struct platform_device *pdev)
1156 {
1157  static struct atmel_aes_dev *aes_dd;
1158 
1159  aes_dd = platform_get_drvdata(pdev);
1160  if (!aes_dd)
1161  return -ENODEV;
1162  spin_lock(&atmel_aes.lock);
1163  list_del(&aes_dd->list);
1164  spin_unlock(&atmel_aes.lock);
1165 
1166  atmel_aes_unregister_algs(aes_dd);
1167 
1168  tasklet_kill(&aes_dd->done_task);
1169  tasklet_kill(&aes_dd->queue_task);
1170 
1171  atmel_aes_dma_cleanup(aes_dd);
1172 
1173  iounmap(aes_dd->io_base);
1174 
1175  clk_put(aes_dd->iclk);
1176 
1177  if (aes_dd->irq > 0)
1178  free_irq(aes_dd->irq, aes_dd);
1179 
1180  kfree(aes_dd);
1181  aes_dd = NULL;
1182 
1183  return 0;
1184 }
1185 
1186 static struct platform_driver atmel_aes_driver = {
1187  .probe = atmel_aes_probe,
1188  .remove = __devexit_p(atmel_aes_remove),
1189  .driver = {
1190  .name = "atmel_aes",
1191  .owner = THIS_MODULE,
1192  },
1193 };
1194 
1195 module_platform_driver(atmel_aes_driver);
1196 
1197 MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1198 MODULE_LICENSE("GPL v2");
1199 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");