Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
geode-aes.c
Go to the documentation of this file.
1  /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License as published by
5  * the Free Software Foundation; either version 2 of the License, or
6  * (at your option) any later version.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/pci_ids.h>
13 #include <linux/crypto.h>
14 #include <linux/spinlock.h>
15 #include <crypto/algapi.h>
16 #include <crypto/aes.h>
17 
18 #include <linux/io.h>
19 #include <linux/delay.h>
20 
21 #include "geode-aes.h"
22 
23 /* Static structures */
24 
25 static void __iomem *_iobase;
26 static spinlock_t lock;
27 
28 /* Write a 128 bit field (either a writable key or IV) */
29 static inline void
30 _writefield(u32 offset, void *value)
31 {
32  int i;
33  for (i = 0; i < 4; i++)
34  iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
35 }
36 
37 /* Read a 128 bit field (either a writable key or IV) */
38 static inline void
39 _readfield(u32 offset, void *value)
40 {
41  int i;
42  for (i = 0; i < 4; i++)
43  ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
44 }
45 
46 static int
47 do_crypt(void *src, void *dst, int len, u32 flags)
48 {
49  u32 status;
51 
52  iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
53  iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
54  iowrite32(len, _iobase + AES_LENA_REG);
55 
56  /* Start the operation */
57  iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
58 
59  do {
60  status = ioread32(_iobase + AES_INTR_REG);
61  cpu_relax();
62  } while (!(status & AES_INTRA_PENDING) && --counter);
63 
64  /* Clear the event */
65  iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
66  return counter ? 0 : 1;
67 }
68 
69 static unsigned int
70 geode_aes_crypt(struct geode_aes_op *op)
71 {
72  u32 flags = 0;
73  unsigned long iflags;
74  int ret;
75 
76  if (op->len == 0)
77  return 0;
78 
79  /* If the source and destination is the same, then
80  * we need to turn on the coherent flags, otherwise
81  * we don't need to worry
82  */
83 
84  flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
85 
86  if (op->dir == AES_DIR_ENCRYPT)
87  flags |= AES_CTRL_ENCRYPT;
88 
89  /* Start the critical section */
90 
91  spin_lock_irqsave(&lock, iflags);
92 
93  if (op->mode == AES_MODE_CBC) {
94  flags |= AES_CTRL_CBC;
95  _writefield(AES_WRITEIV0_REG, op->iv);
96  }
97 
98  if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
99  flags |= AES_CTRL_WRKEY;
100  _writefield(AES_WRITEKEY0_REG, op->key);
101  }
102 
103  ret = do_crypt(op->src, op->dst, op->len, flags);
104  BUG_ON(ret);
105 
106  if (op->mode == AES_MODE_CBC)
107  _readfield(AES_WRITEIV0_REG, op->iv);
108 
109  spin_unlock_irqrestore(&lock, iflags);
110 
111  return op->len;
112 }
113 
114 /* CRYPTO-API Functions */
115 
116 static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
117  unsigned int len)
118 {
119  struct geode_aes_op *op = crypto_tfm_ctx(tfm);
120  unsigned int ret;
121 
122  op->keylen = len;
123 
124  if (len == AES_KEYSIZE_128) {
125  memcpy(op->key, key, len);
126  return 0;
127  }
128 
129  if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
130  /* not supported at all */
132  return -EINVAL;
133  }
134 
135  /*
136  * The requested key size is not supported by HW, do a fallback
137  */
138  op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
139  op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
140 
141  ret = crypto_cipher_setkey(op->fallback.cip, key, len);
142  if (ret) {
144  tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
145  }
146  return ret;
147 }
148 
149 static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
150  unsigned int len)
151 {
152  struct geode_aes_op *op = crypto_tfm_ctx(tfm);
153  unsigned int ret;
154 
155  op->keylen = len;
156 
157  if (len == AES_KEYSIZE_128) {
158  memcpy(op->key, key, len);
159  return 0;
160  }
161 
162  if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
163  /* not supported at all */
165  return -EINVAL;
166  }
167 
168  /*
169  * The requested key size is not supported by HW, do a fallback
170  */
171  op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
172  op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
173 
174  ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
175  if (ret) {
177  tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
178  }
179  return ret;
180 }
181 
182 static int fallback_blk_dec(struct blkcipher_desc *desc,
183  struct scatterlist *dst, struct scatterlist *src,
184  unsigned int nbytes)
185 {
186  unsigned int ret;
187  struct crypto_blkcipher *tfm;
188  struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
189 
190  tfm = desc->tfm;
191  desc->tfm = op->fallback.blk;
192 
193  ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
194 
195  desc->tfm = tfm;
196  return ret;
197 }
198 static int fallback_blk_enc(struct blkcipher_desc *desc,
199  struct scatterlist *dst, struct scatterlist *src,
200  unsigned int nbytes)
201 {
202  unsigned int ret;
203  struct crypto_blkcipher *tfm;
204  struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
205 
206  tfm = desc->tfm;
207  desc->tfm = op->fallback.blk;
208 
209  ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
210 
211  desc->tfm = tfm;
212  return ret;
213 }
214 
215 static void
216 geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
217 {
218  struct geode_aes_op *op = crypto_tfm_ctx(tfm);
219 
220  if (unlikely(op->keylen != AES_KEYSIZE_128)) {
221  crypto_cipher_encrypt_one(op->fallback.cip, out, in);
222  return;
223  }
224 
225  op->src = (void *) in;
226  op->dst = (void *) out;
227  op->mode = AES_MODE_ECB;
228  op->flags = 0;
229  op->len = AES_MIN_BLOCK_SIZE;
230  op->dir = AES_DIR_ENCRYPT;
231 
232  geode_aes_crypt(op);
233 }
234 
235 
236 static void
237 geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
238 {
239  struct geode_aes_op *op = crypto_tfm_ctx(tfm);
240 
241  if (unlikely(op->keylen != AES_KEYSIZE_128)) {
242  crypto_cipher_decrypt_one(op->fallback.cip, out, in);
243  return;
244  }
245 
246  op->src = (void *) in;
247  op->dst = (void *) out;
248  op->mode = AES_MODE_ECB;
249  op->flags = 0;
250  op->len = AES_MIN_BLOCK_SIZE;
251  op->dir = AES_DIR_DECRYPT;
252 
253  geode_aes_crypt(op);
254 }
255 
256 static int fallback_init_cip(struct crypto_tfm *tfm)
257 {
258  const char *name = tfm->__crt_alg->cra_name;
259  struct geode_aes_op *op = crypto_tfm_ctx(tfm);
260 
261  op->fallback.cip = crypto_alloc_cipher(name, 0,
263 
264  if (IS_ERR(op->fallback.cip)) {
265  printk(KERN_ERR "Error allocating fallback algo %s\n", name);
266  return PTR_ERR(op->fallback.cip);
267  }
268 
269  return 0;
270 }
271 
272 static void fallback_exit_cip(struct crypto_tfm *tfm)
273 {
274  struct geode_aes_op *op = crypto_tfm_ctx(tfm);
275 
276  crypto_free_cipher(op->fallback.cip);
277  op->fallback.cip = NULL;
278 }
279 
280 static struct crypto_alg geode_alg = {
281  .cra_name = "aes",
282  .cra_driver_name = "geode-aes",
283  .cra_priority = 300,
284  .cra_alignmask = 15,
285  .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
287  .cra_init = fallback_init_cip,
288  .cra_exit = fallback_exit_cip,
289  .cra_blocksize = AES_MIN_BLOCK_SIZE,
290  .cra_ctxsize = sizeof(struct geode_aes_op),
291  .cra_module = THIS_MODULE,
292  .cra_u = {
293  .cipher = {
294  .cia_min_keysize = AES_MIN_KEY_SIZE,
295  .cia_max_keysize = AES_MAX_KEY_SIZE,
296  .cia_setkey = geode_setkey_cip,
297  .cia_encrypt = geode_encrypt,
298  .cia_decrypt = geode_decrypt
299  }
300  }
301 };
302 
303 static int
304 geode_cbc_decrypt(struct blkcipher_desc *desc,
305  struct scatterlist *dst, struct scatterlist *src,
306  unsigned int nbytes)
307 {
308  struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
309  struct blkcipher_walk walk;
310  int err, ret;
311 
312  if (unlikely(op->keylen != AES_KEYSIZE_128))
313  return fallback_blk_dec(desc, dst, src, nbytes);
314 
315  blkcipher_walk_init(&walk, dst, src, nbytes);
316  err = blkcipher_walk_virt(desc, &walk);
317  op->iv = walk.iv;
318 
319  while ((nbytes = walk.nbytes)) {
320  op->src = walk.src.virt.addr,
321  op->dst = walk.dst.virt.addr;
322  op->mode = AES_MODE_CBC;
323  op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
324  op->dir = AES_DIR_DECRYPT;
325 
326  ret = geode_aes_crypt(op);
327 
328  nbytes -= ret;
329  err = blkcipher_walk_done(desc, &walk, nbytes);
330  }
331 
332  return err;
333 }
334 
335 static int
336 geode_cbc_encrypt(struct blkcipher_desc *desc,
337  struct scatterlist *dst, struct scatterlist *src,
338  unsigned int nbytes)
339 {
340  struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
341  struct blkcipher_walk walk;
342  int err, ret;
343 
344  if (unlikely(op->keylen != AES_KEYSIZE_128))
345  return fallback_blk_enc(desc, dst, src, nbytes);
346 
347  blkcipher_walk_init(&walk, dst, src, nbytes);
348  err = blkcipher_walk_virt(desc, &walk);
349  op->iv = walk.iv;
350 
351  while ((nbytes = walk.nbytes)) {
352  op->src = walk.src.virt.addr,
353  op->dst = walk.dst.virt.addr;
354  op->mode = AES_MODE_CBC;
355  op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
356  op->dir = AES_DIR_ENCRYPT;
357 
358  ret = geode_aes_crypt(op);
359  nbytes -= ret;
360  err = blkcipher_walk_done(desc, &walk, nbytes);
361  }
362 
363  return err;
364 }
365 
366 static int fallback_init_blk(struct crypto_tfm *tfm)
367 {
368  const char *name = tfm->__crt_alg->cra_name;
369  struct geode_aes_op *op = crypto_tfm_ctx(tfm);
370 
371  op->fallback.blk = crypto_alloc_blkcipher(name, 0,
373 
374  if (IS_ERR(op->fallback.blk)) {
375  printk(KERN_ERR "Error allocating fallback algo %s\n", name);
376  return PTR_ERR(op->fallback.blk);
377  }
378 
379  return 0;
380 }
381 
382 static void fallback_exit_blk(struct crypto_tfm *tfm)
383 {
384  struct geode_aes_op *op = crypto_tfm_ctx(tfm);
385 
386  crypto_free_blkcipher(op->fallback.blk);
387  op->fallback.blk = NULL;
388 }
389 
390 static struct crypto_alg geode_cbc_alg = {
391  .cra_name = "cbc(aes)",
392  .cra_driver_name = "cbc-aes-geode",
393  .cra_priority = 400,
394  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
397  .cra_init = fallback_init_blk,
398  .cra_exit = fallback_exit_blk,
399  .cra_blocksize = AES_MIN_BLOCK_SIZE,
400  .cra_ctxsize = sizeof(struct geode_aes_op),
401  .cra_alignmask = 15,
402  .cra_type = &crypto_blkcipher_type,
403  .cra_module = THIS_MODULE,
404  .cra_u = {
405  .blkcipher = {
406  .min_keysize = AES_MIN_KEY_SIZE,
407  .max_keysize = AES_MAX_KEY_SIZE,
408  .setkey = geode_setkey_blk,
409  .encrypt = geode_cbc_encrypt,
410  .decrypt = geode_cbc_decrypt,
411  .ivsize = AES_IV_LENGTH,
412  }
413  }
414 };
415 
416 static int
417 geode_ecb_decrypt(struct blkcipher_desc *desc,
418  struct scatterlist *dst, struct scatterlist *src,
419  unsigned int nbytes)
420 {
421  struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
422  struct blkcipher_walk walk;
423  int err, ret;
424 
425  if (unlikely(op->keylen != AES_KEYSIZE_128))
426  return fallback_blk_dec(desc, dst, src, nbytes);
427 
428  blkcipher_walk_init(&walk, dst, src, nbytes);
429  err = blkcipher_walk_virt(desc, &walk);
430 
431  while ((nbytes = walk.nbytes)) {
432  op->src = walk.src.virt.addr,
433  op->dst = walk.dst.virt.addr;
434  op->mode = AES_MODE_ECB;
435  op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
436  op->dir = AES_DIR_DECRYPT;
437 
438  ret = geode_aes_crypt(op);
439  nbytes -= ret;
440  err = blkcipher_walk_done(desc, &walk, nbytes);
441  }
442 
443  return err;
444 }
445 
446 static int
447 geode_ecb_encrypt(struct blkcipher_desc *desc,
448  struct scatterlist *dst, struct scatterlist *src,
449  unsigned int nbytes)
450 {
451  struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
452  struct blkcipher_walk walk;
453  int err, ret;
454 
455  if (unlikely(op->keylen != AES_KEYSIZE_128))
456  return fallback_blk_enc(desc, dst, src, nbytes);
457 
458  blkcipher_walk_init(&walk, dst, src, nbytes);
459  err = blkcipher_walk_virt(desc, &walk);
460 
461  while ((nbytes = walk.nbytes)) {
462  op->src = walk.src.virt.addr,
463  op->dst = walk.dst.virt.addr;
464  op->mode = AES_MODE_ECB;
465  op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
466  op->dir = AES_DIR_ENCRYPT;
467 
468  ret = geode_aes_crypt(op);
469  nbytes -= ret;
470  ret = blkcipher_walk_done(desc, &walk, nbytes);
471  }
472 
473  return err;
474 }
475 
476 static struct crypto_alg geode_ecb_alg = {
477  .cra_name = "ecb(aes)",
478  .cra_driver_name = "ecb-aes-geode",
479  .cra_priority = 400,
480  .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
483  .cra_init = fallback_init_blk,
484  .cra_exit = fallback_exit_blk,
485  .cra_blocksize = AES_MIN_BLOCK_SIZE,
486  .cra_ctxsize = sizeof(struct geode_aes_op),
487  .cra_alignmask = 15,
488  .cra_type = &crypto_blkcipher_type,
489  .cra_module = THIS_MODULE,
490  .cra_u = {
491  .blkcipher = {
492  .min_keysize = AES_MIN_KEY_SIZE,
493  .max_keysize = AES_MAX_KEY_SIZE,
494  .setkey = geode_setkey_blk,
495  .encrypt = geode_ecb_encrypt,
496  .decrypt = geode_ecb_decrypt,
497  }
498  }
499 };
500 
501 static void __devexit
502 geode_aes_remove(struct pci_dev *dev)
503 {
504  crypto_unregister_alg(&geode_alg);
505  crypto_unregister_alg(&geode_ecb_alg);
506  crypto_unregister_alg(&geode_cbc_alg);
507 
508  pci_iounmap(dev, _iobase);
509  _iobase = NULL;
510 
511  pci_release_regions(dev);
512  pci_disable_device(dev);
513 }
514 
515 
516 static int __devinit
517 geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
518 {
519  int ret;
520  ret = pci_enable_device(dev);
521  if (ret)
522  return ret;
523 
524  ret = pci_request_regions(dev, "geode-aes");
525  if (ret)
526  goto eenable;
527 
528  _iobase = pci_iomap(dev, 0, 0);
529 
530  if (_iobase == NULL) {
531  ret = -ENOMEM;
532  goto erequest;
533  }
534 
535  spin_lock_init(&lock);
536 
537  /* Clear any pending activity */
539 
540  ret = crypto_register_alg(&geode_alg);
541  if (ret)
542  goto eiomap;
543 
544  ret = crypto_register_alg(&geode_ecb_alg);
545  if (ret)
546  goto ealg;
547 
548  ret = crypto_register_alg(&geode_cbc_alg);
549  if (ret)
550  goto eecb;
551 
552  printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
553  return 0;
554 
555  eecb:
556  crypto_unregister_alg(&geode_ecb_alg);
557 
558  ealg:
559  crypto_unregister_alg(&geode_alg);
560 
561  eiomap:
562  pci_iounmap(dev, _iobase);
563 
564  erequest:
565  pci_release_regions(dev);
566 
567  eenable:
568  pci_disable_device(dev);
569 
570  printk(KERN_ERR "geode-aes: GEODE AES initialization failed.\n");
571  return ret;
572 }
573 
574 static struct pci_device_id geode_aes_tbl[] = {
576  { 0, }
577 };
578 
579 MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
580 
581 static struct pci_driver geode_aes_driver = {
582  .name = "Geode LX AES",
583  .id_table = geode_aes_tbl,
584  .probe = geode_aes_probe,
585  .remove = __devexit_p(geode_aes_remove)
586 };
587 
588 module_pci_driver(geode_aes_driver);
589 
590 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
591 MODULE_DESCRIPTION("Geode LX Hardware AES driver");
592 MODULE_LICENSE("GPL");