Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
n2_core.c
Go to the documentation of this file.
1 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
2  *
3  * Copyright (C) 2010, 2011 David S. Miller <[email protected]>
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/cpumask.h>
13 #include <linux/slab.h>
14 #include <linux/interrupt.h>
15 #include <linux/crypto.h>
16 #include <crypto/md5.h>
17 #include <crypto/sha.h>
18 #include <crypto/aes.h>
19 #include <crypto/des.h>
20 #include <linux/mutex.h>
21 #include <linux/delay.h>
22 #include <linux/sched.h>
23 
24 #include <crypto/internal/hash.h>
25 #include <crypto/scatterwalk.h>
26 #include <crypto/algapi.h>
27 
28 #include <asm/hypervisor.h>
29 #include <asm/mdesc.h>
30 
31 #include "n2_core.h"
32 
33 #define DRV_MODULE_NAME "n2_crypto"
34 #define DRV_MODULE_VERSION "0.2"
35 #define DRV_MODULE_RELDATE "July 28, 2011"
36 
37 static char version[] __devinitdata =
39 
40 MODULE_AUTHOR("David S. Miller ([email protected])");
41 MODULE_DESCRIPTION("Niagara2 Crypto driver");
42 MODULE_LICENSE("GPL");
44 
45 #define N2_CRA_PRIORITY 200
46 
47 static DEFINE_MUTEX(spu_lock);
48 
49 struct spu_queue {
51  unsigned long qhandle;
52 
55  void *q;
56  unsigned long head;
57  unsigned long tail;
58  struct list_head jobs;
59 
60  unsigned long devino;
61 
62  char irq_name[32];
63  unsigned int irq;
64 
65  struct list_head list;
66 };
67 
68 static struct spu_queue **cpu_to_cwq;
69 static struct spu_queue **cpu_to_mau;
70 
71 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
72 {
73  if (q->q_type == HV_NCS_QTYPE_MAU) {
74  off += MAU_ENTRY_SIZE;
75  if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
76  off = 0;
77  } else {
78  off += CWQ_ENTRY_SIZE;
79  if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
80  off = 0;
81  }
82  return off;
83 }
84 
86  struct list_head entry;
87  unsigned int offset;
88 };
89 #define OFFSET_NOT_RUNNING (~(unsigned int)0)
90 
91 /* An async job request records the final tail value it used in
92  * n2_request_common->offset, test to see if that offset is in
93  * the range old_head, new_head, inclusive.
94  */
95 static inline bool job_finished(struct spu_queue *q, unsigned int offset,
96  unsigned long old_head, unsigned long new_head)
97 {
98  if (old_head <= new_head) {
99  if (offset > old_head && offset <= new_head)
100  return true;
101  } else {
102  if (offset > old_head || offset <= new_head)
103  return true;
104  }
105  return false;
106 }
107 
108 /* When the HEAD marker is unequal to the actual HEAD, we get
109  * a virtual device INO interrupt. We should process the
110  * completed CWQ entries and adjust the HEAD marker to clear
111  * the IRQ.
112  */
113 static irqreturn_t cwq_intr(int irq, void *dev_id)
114 {
115  unsigned long off, new_head, hv_ret;
116  struct spu_queue *q = dev_id;
117 
118  pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
119  smp_processor_id(), q->qhandle);
120 
121  spin_lock(&q->lock);
122 
123  hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
124 
125  pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
126  smp_processor_id(), new_head, hv_ret);
127 
128  for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
129  /* XXX ... XXX */
130  }
131 
132  hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
133  if (hv_ret == HV_EOK)
134  q->head = new_head;
135 
136  spin_unlock(&q->lock);
137 
138  return IRQ_HANDLED;
139 }
140 
141 static irqreturn_t mau_intr(int irq, void *dev_id)
142 {
143  struct spu_queue *q = dev_id;
144  unsigned long head, hv_ret;
145 
146  spin_lock(&q->lock);
147 
148  pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
149  smp_processor_id(), q->qhandle);
150 
151  hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
152 
153  pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
154  smp_processor_id(), head, hv_ret);
155 
157 
158  spin_unlock(&q->lock);
159 
160  return IRQ_HANDLED;
161 }
162 
163 static void *spu_queue_next(struct spu_queue *q, void *cur)
164 {
165  return q->q + spu_next_offset(q, cur - q->q);
166 }
167 
168 static int spu_queue_num_free(struct spu_queue *q)
169 {
170  unsigned long head = q->head;
171  unsigned long tail = q->tail;
172  unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
173  unsigned long diff;
174 
175  if (head > tail)
176  diff = head - tail;
177  else
178  diff = (end - tail) + head;
179 
180  return (diff / CWQ_ENTRY_SIZE) - 1;
181 }
182 
183 static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
184 {
185  int avail = spu_queue_num_free(q);
186 
187  if (avail >= num_entries)
188  return q->q + q->tail;
189 
190  return NULL;
191 }
192 
193 static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
194 {
195  unsigned long hv_ret, new_tail;
196 
197  new_tail = spu_next_offset(q, last - q->q);
198 
199  hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
200  if (hv_ret == HV_EOK)
201  q->tail = new_tail;
202  return hv_ret;
203 }
204 
205 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
206  int enc_type, int auth_type,
207  unsigned int hash_len,
208  bool sfas, bool sob, bool eob, bool encrypt,
209  int opcode)
210 {
211  u64 word = (len - 1) & CONTROL_LEN;
212 
213  word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
214  word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
215  word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
216  if (sfas)
218  if (sob)
219  word |= CONTROL_START_OF_BLOCK;
220  if (eob)
221  word |= CONTROL_END_OF_BLOCK;
222  if (encrypt)
223  word |= CONTROL_ENCRYPT;
224  if (hmac_key_len)
225  word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
226  if (hash_len)
227  word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
228 
229  return word;
230 }
231 
232 #if 0
233 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
234 {
235  if (this_len >= 64 ||
236  qp->head != qp->tail)
237  return true;
238  return false;
239 }
240 #endif
241 
242 struct n2_ahash_alg {
243  struct list_head entry;
244  const char *hash_zero;
245  const u32 *hash_init;
250  struct ahash_alg alg;
251 };
252 
253 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
254 {
255  struct crypto_alg *alg = tfm->__crt_alg;
256  struct ahash_alg *ahash_alg;
257 
258  ahash_alg = container_of(alg, struct ahash_alg, halg.base);
259 
260  return container_of(ahash_alg, struct n2_ahash_alg, alg);
261 }
262 
263 struct n2_hmac_alg {
264  const char *child_alg;
266 };
267 
268 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
269 {
270  struct crypto_alg *alg = tfm->__crt_alg;
271  struct ahash_alg *ahash_alg;
272 
273  ahash_alg = container_of(alg, struct ahash_alg, halg.base);
274 
275  return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
276 }
277 
278 struct n2_hash_ctx {
280 };
281 
282 #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
283 
284 struct n2_hmac_ctx {
286 
288 
290  unsigned char hash_key[N2_HASH_KEY_MAX];
291 };
292 
294  union {
295  struct md5_state md5;
296  struct sha1_state sha1;
298  } u;
299 
301 };
302 
303 static int n2_hash_async_init(struct ahash_request *req)
304 {
305  struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
306  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
307  struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
308 
309  ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
310  rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
311 
312  return crypto_ahash_init(&rctx->fallback_req);
313 }
314 
315 static int n2_hash_async_update(struct ahash_request *req)
316 {
317  struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
318  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
319  struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
320 
321  ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
322  rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
323  rctx->fallback_req.nbytes = req->nbytes;
324  rctx->fallback_req.src = req->src;
325 
326  return crypto_ahash_update(&rctx->fallback_req);
327 }
328 
329 static int n2_hash_async_final(struct ahash_request *req)
330 {
331  struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
332  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
333  struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
334 
335  ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
336  rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
337  rctx->fallback_req.result = req->result;
338 
339  return crypto_ahash_final(&rctx->fallback_req);
340 }
341 
342 static int n2_hash_async_finup(struct ahash_request *req)
343 {
344  struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
345  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
346  struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
347 
348  ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
349  rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
350  rctx->fallback_req.nbytes = req->nbytes;
351  rctx->fallback_req.src = req->src;
352  rctx->fallback_req.result = req->result;
353 
354  return crypto_ahash_finup(&rctx->fallback_req);
355 }
356 
357 static int n2_hash_cra_init(struct crypto_tfm *tfm)
358 {
359  const char *fallback_driver_name = tfm->__crt_alg->cra_name;
360  struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
361  struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
362  struct crypto_ahash *fallback_tfm;
363  int err;
364 
365  fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
367  if (IS_ERR(fallback_tfm)) {
368  pr_warning("Fallback driver '%s' could not be loaded!\n",
369  fallback_driver_name);
370  err = PTR_ERR(fallback_tfm);
371  goto out;
372  }
373 
374  crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
375  crypto_ahash_reqsize(fallback_tfm)));
376 
377  ctx->fallback_tfm = fallback_tfm;
378  return 0;
379 
380 out:
381  return err;
382 }
383 
384 static void n2_hash_cra_exit(struct crypto_tfm *tfm)
385 {
386  struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
387  struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
388 
389  crypto_free_ahash(ctx->fallback_tfm);
390 }
391 
392 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
393 {
394  const char *fallback_driver_name = tfm->__crt_alg->cra_name;
395  struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
396  struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
397  struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
398  struct crypto_ahash *fallback_tfm;
399  struct crypto_shash *child_shash;
400  int err;
401 
402  fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
404  if (IS_ERR(fallback_tfm)) {
405  pr_warning("Fallback driver '%s' could not be loaded!\n",
406  fallback_driver_name);
407  err = PTR_ERR(fallback_tfm);
408  goto out;
409  }
410 
411  child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
412  if (IS_ERR(child_shash)) {
413  pr_warning("Child shash '%s' could not be loaded!\n",
414  n2alg->child_alg);
415  err = PTR_ERR(child_shash);
416  goto out_free_fallback;
417  }
418 
419  crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
420  crypto_ahash_reqsize(fallback_tfm)));
421 
422  ctx->child_shash = child_shash;
423  ctx->base.fallback_tfm = fallback_tfm;
424  return 0;
425 
426 out_free_fallback:
427  crypto_free_ahash(fallback_tfm);
428 
429 out:
430  return err;
431 }
432 
433 static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
434 {
435  struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
436  struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
437 
438  crypto_free_ahash(ctx->base.fallback_tfm);
439  crypto_free_shash(ctx->child_shash);
440 }
441 
442 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
443  unsigned int keylen)
444 {
445  struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
446  struct crypto_shash *child_shash = ctx->child_shash;
447  struct crypto_ahash *fallback_tfm;
448  struct {
449  struct shash_desc shash;
450  char ctx[crypto_shash_descsize(child_shash)];
451  } desc;
452  int err, bs, ds;
453 
454  fallback_tfm = ctx->base.fallback_tfm;
455  err = crypto_ahash_setkey(fallback_tfm, key, keylen);
456  if (err)
457  return err;
458 
459  desc.shash.tfm = child_shash;
460  desc.shash.flags = crypto_ahash_get_flags(tfm) &
462 
463  bs = crypto_shash_blocksize(child_shash);
464  ds = crypto_shash_digestsize(child_shash);
465  BUG_ON(ds > N2_HASH_KEY_MAX);
466  if (keylen > bs) {
467  err = crypto_shash_digest(&desc.shash, key, keylen,
468  ctx->hash_key);
469  if (err)
470  return err;
471  keylen = ds;
472  } else if (keylen <= N2_HASH_KEY_MAX)
473  memcpy(ctx->hash_key, key, keylen);
474 
475  ctx->hash_key_len = keylen;
476 
477  return err;
478 }
479 
480 static unsigned long wait_for_tail(struct spu_queue *qp)
481 {
482  unsigned long head, hv_ret;
483 
484  do {
485  hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
486  if (hv_ret != HV_EOK) {
487  pr_err("Hypervisor error on gethead\n");
488  break;
489  }
490  if (head == qp->tail) {
491  qp->head = head;
492  break;
493  }
494  } while (1);
495  return hv_ret;
496 }
497 
498 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
499  struct cwq_initial_entry *ent)
500 {
501  unsigned long hv_ret = spu_queue_submit(qp, ent);
502 
503  if (hv_ret == HV_EOK)
504  hv_ret = wait_for_tail(qp);
505 
506  return hv_ret;
507 }
508 
509 static int n2_do_async_digest(struct ahash_request *req,
510  unsigned int auth_type, unsigned int digest_size,
511  unsigned int result_size, void *hash_loc,
512  unsigned long auth_key, unsigned int auth_key_len)
513 {
514  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
515  struct cwq_initial_entry *ent;
516  struct crypto_hash_walk walk;
517  struct spu_queue *qp;
518  unsigned long flags;
519  int err = -ENODEV;
520  int nbytes, cpu;
521 
522  /* The total effective length of the operation may not
523  * exceed 2^16.
524  */
525  if (unlikely(req->nbytes > (1 << 16))) {
526  struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
527  struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
528 
529  ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
530  rctx->fallback_req.base.flags =
531  req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
532  rctx->fallback_req.nbytes = req->nbytes;
533  rctx->fallback_req.src = req->src;
534  rctx->fallback_req.result = req->result;
535 
536  return crypto_ahash_digest(&rctx->fallback_req);
537  }
538 
539  nbytes = crypto_hash_walk_first(req, &walk);
540 
541  cpu = get_cpu();
542  qp = cpu_to_cwq[cpu];
543  if (!qp)
544  goto out;
545 
546  spin_lock_irqsave(&qp->lock, flags);
547 
548  /* XXX can do better, improve this later by doing a by-hand scatterlist
549  * XXX walk, etc.
550  */
551  ent = qp->q + qp->tail;
552 
553  ent->control = control_word_base(nbytes, auth_key_len, 0,
554  auth_type, digest_size,
555  false, true, false, false,
558  ent->src_addr = __pa(walk.data);
559  ent->auth_key_addr = auth_key;
560  ent->auth_iv_addr = __pa(hash_loc);
561  ent->final_auth_state_addr = 0UL;
562  ent->enc_key_addr = 0UL;
563  ent->enc_iv_addr = 0UL;
564  ent->dest_addr = __pa(hash_loc);
565 
566  nbytes = crypto_hash_walk_done(&walk, 0);
567  while (nbytes > 0) {
568  ent = spu_queue_next(qp, ent);
569 
570  ent->control = (nbytes - 1);
571  ent->src_addr = __pa(walk.data);
572  ent->auth_key_addr = 0UL;
573  ent->auth_iv_addr = 0UL;
574  ent->final_auth_state_addr = 0UL;
575  ent->enc_key_addr = 0UL;
576  ent->enc_iv_addr = 0UL;
577  ent->dest_addr = 0UL;
578 
579  nbytes = crypto_hash_walk_done(&walk, 0);
580  }
582 
583  if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
584  err = -EINVAL;
585  else
586  err = 0;
587 
588  spin_unlock_irqrestore(&qp->lock, flags);
589 
590  if (!err)
591  memcpy(req->result, hash_loc, result_size);
592 out:
593  put_cpu();
594 
595  return err;
596 }
597 
598 static int n2_hash_async_digest(struct ahash_request *req)
599 {
600  struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
601  struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
602  int ds;
603 
604  ds = n2alg->digest_size;
605  if (unlikely(req->nbytes == 0)) {
606  memcpy(req->result, n2alg->hash_zero, ds);
607  return 0;
608  }
609  memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
610 
611  return n2_do_async_digest(req, n2alg->auth_type,
612  n2alg->hw_op_hashsz, ds,
613  &rctx->u, 0UL, 0);
614 }
615 
616 static int n2_hmac_async_digest(struct ahash_request *req)
617 {
618  struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
619  struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
620  struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
621  struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
622  int ds;
623 
624  ds = n2alg->derived.digest_size;
625  if (unlikely(req->nbytes == 0) ||
627  struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
628  struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
629 
630  ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
631  rctx->fallback_req.base.flags =
632  req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
633  rctx->fallback_req.nbytes = req->nbytes;
634  rctx->fallback_req.src = req->src;
635  rctx->fallback_req.result = req->result;
636 
637  return crypto_ahash_digest(&rctx->fallback_req);
638  }
639  memcpy(&rctx->u, n2alg->derived.hash_init,
640  n2alg->derived.hw_op_hashsz);
641 
642  return n2_do_async_digest(req, n2alg->derived.hmac_type,
643  n2alg->derived.hw_op_hashsz, ds,
644  &rctx->u,
645  __pa(&ctx->hash_key),
646  ctx->hash_key_len);
647 }
648 
650  int key_len;
651  int enc_type;
652  union {
656  u8 arc4[258]; /* S-box, X, Y */
657  } key;
658 };
659 
660 #define N2_CHUNK_ARR_LEN 16
661 
663  struct list_head entry;
664  unsigned long iv_paddr : 44;
665  unsigned long arr_len : 20;
666  unsigned long dest_paddr;
667  unsigned long dest_final;
668  struct {
669  unsigned long src_paddr : 44;
670  unsigned long src_len : 20;
672 };
673 
678  u8 temp_iv[16];
679 };
680 
681 /* The SPU allows some level of flexibility for partial cipher blocks
682  * being specified in a descriptor.
683  *
684  * It merely requires that every descriptor's length field is at least
685  * as large as the cipher block size. This means that a cipher block
686  * can span at most 2 descriptors. However, this does not allow a
687  * partial block to span into the final descriptor as that would
688  * violate the rule (since every descriptor's length must be at lest
689  * the block size). So, for example, assuming an 8 byte block size:
690  *
691  * 0xe --> 0xa --> 0x8
692  *
693  * is a valid length sequence, whereas:
694  *
695  * 0xe --> 0xb --> 0x7
696  *
697  * is not a valid sequence.
698  */
699 
701  struct list_head entry;
703  struct crypto_alg alg;
704 };
705 
706 static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
707 {
708  struct crypto_alg *alg = tfm->__crt_alg;
709 
710  return container_of(alg, struct n2_cipher_alg, alg);
711 }
712 
715 };
716 
717 static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
718  unsigned int keylen)
719 {
720  struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
721  struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
722  struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
723 
724  ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
725 
726  switch (keylen) {
727  case AES_KEYSIZE_128:
729  break;
730  case AES_KEYSIZE_192:
732  break;
733  case AES_KEYSIZE_256:
735  break;
736  default:
737  crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
738  return -EINVAL;
739  }
740 
741  ctx->key_len = keylen;
742  memcpy(ctx->key.aes, key, keylen);
743  return 0;
744 }
745 
746 static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
747  unsigned int keylen)
748 {
749  struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
750  struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
751  struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
753  int err;
754 
755  ctx->enc_type = n2alg->enc_type;
756 
757  if (keylen != DES_KEY_SIZE) {
758  crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
759  return -EINVAL;
760  }
761 
762  err = des_ekey(tmp, key);
763  if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
765  return -EINVAL;
766  }
767 
768  ctx->key_len = keylen;
769  memcpy(ctx->key.des, key, keylen);
770  return 0;
771 }
772 
773 static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
774  unsigned int keylen)
775 {
776  struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
777  struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
778  struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
779 
780  ctx->enc_type = n2alg->enc_type;
781 
782  if (keylen != (3 * DES_KEY_SIZE)) {
783  crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
784  return -EINVAL;
785  }
786  ctx->key_len = keylen;
787  memcpy(ctx->key.des3, key, keylen);
788  return 0;
789 }
790 
791 static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
792  unsigned int keylen)
793 {
794  struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
795  struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
796  struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
797  u8 *s = ctx->key.arc4;
798  u8 *x = s + 256;
799  u8 *y = x + 1;
800  int i, j, k;
801 
802  ctx->enc_type = n2alg->enc_type;
803 
804  j = k = 0;
805  *x = 0;
806  *y = 0;
807  for (i = 0; i < 256; i++)
808  s[i] = i;
809  for (i = 0; i < 256; i++) {
810  u8 a = s[i];
811  j = (j + key[k] + a) & 0xff;
812  s[i] = s[j];
813  s[j] = a;
814  if (++k >= keylen)
815  k = 0;
816  }
817 
818  return 0;
819 }
820 
821 static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
822 {
823  int this_len = nbytes;
824 
825  this_len -= (nbytes & (block_size - 1));
826  return this_len > (1 << 16) ? (1 << 16) : this_len;
827 }
828 
829 static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
830  struct spu_queue *qp, bool encrypt)
831 {
832  struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
833  struct cwq_initial_entry *ent;
834  bool in_place;
835  int i;
836 
837  ent = spu_queue_alloc(qp, cp->arr_len);
838  if (!ent) {
839  pr_info("queue_alloc() of %d fails\n",
840  cp->arr_len);
841  return -EBUSY;
842  }
843 
844  in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
845 
846  ent->control = control_word_base(cp->arr[0].src_len,
847  0, ctx->enc_type, 0, 0,
848  false, true, false, encrypt,
850  (in_place ? OPCODE_INPLACE_BIT : 0));
851  ent->src_addr = cp->arr[0].src_paddr;
852  ent->auth_key_addr = 0UL;
853  ent->auth_iv_addr = 0UL;
854  ent->final_auth_state_addr = 0UL;
855  ent->enc_key_addr = __pa(&ctx->key);
856  ent->enc_iv_addr = cp->iv_paddr;
857  ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
858 
859  for (i = 1; i < cp->arr_len; i++) {
860  ent = spu_queue_next(qp, ent);
861 
862  ent->control = cp->arr[i].src_len - 1;
863  ent->src_addr = cp->arr[i].src_paddr;
864  ent->auth_key_addr = 0UL;
865  ent->auth_iv_addr = 0UL;
866  ent->final_auth_state_addr = 0UL;
867  ent->enc_key_addr = 0UL;
868  ent->enc_iv_addr = 0UL;
869  ent->dest_addr = 0UL;
870  }
872 
873  return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
874 }
875 
876 static int n2_compute_chunks(struct ablkcipher_request *req)
877 {
878  struct n2_request_context *rctx = ablkcipher_request_ctx(req);
879  struct ablkcipher_walk *walk = &rctx->walk;
880  struct n2_crypto_chunk *chunk;
881  unsigned long dest_prev;
882  unsigned int tot_len;
883  bool prev_in_place;
884  int err, nbytes;
885 
886  ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
887  err = ablkcipher_walk_phys(req, walk);
888  if (err)
889  return err;
890 
891  INIT_LIST_HEAD(&rctx->chunk_list);
892 
893  chunk = &rctx->chunk;
894  INIT_LIST_HEAD(&chunk->entry);
895 
896  chunk->iv_paddr = 0UL;
897  chunk->arr_len = 0;
898  chunk->dest_paddr = 0UL;
899 
900  prev_in_place = false;
901  dest_prev = ~0UL;
902  tot_len = 0;
903 
904  while ((nbytes = walk->nbytes) != 0) {
905  unsigned long dest_paddr, src_paddr;
906  bool in_place;
907  int this_len;
908 
909  src_paddr = (page_to_phys(walk->src.page) +
910  walk->src.offset);
911  dest_paddr = (page_to_phys(walk->dst.page) +
912  walk->dst.offset);
913  in_place = (src_paddr == dest_paddr);
914  this_len = cipher_descriptor_len(nbytes, walk->blocksize);
915 
916  if (chunk->arr_len != 0) {
917  if (in_place != prev_in_place ||
918  (!prev_in_place &&
919  dest_paddr != dest_prev) ||
920  chunk->arr_len == N2_CHUNK_ARR_LEN ||
921  tot_len + this_len > (1 << 16)) {
922  chunk->dest_final = dest_prev;
923  list_add_tail(&chunk->entry,
924  &rctx->chunk_list);
925  chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
926  if (!chunk) {
927  err = -ENOMEM;
928  break;
929  }
930  INIT_LIST_HEAD(&chunk->entry);
931  }
932  }
933  if (chunk->arr_len == 0) {
934  chunk->dest_paddr = dest_paddr;
935  tot_len = 0;
936  }
937  chunk->arr[chunk->arr_len].src_paddr = src_paddr;
938  chunk->arr[chunk->arr_len].src_len = this_len;
939  chunk->arr_len++;
940 
941  dest_prev = dest_paddr + this_len;
942  prev_in_place = in_place;
943  tot_len += this_len;
944 
945  err = ablkcipher_walk_done(req, walk, nbytes - this_len);
946  if (err)
947  break;
948  }
949  if (!err && chunk->arr_len != 0) {
950  chunk->dest_final = dest_prev;
951  list_add_tail(&chunk->entry, &rctx->chunk_list);
952  }
953 
954  return err;
955 }
956 
957 static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
958 {
959  struct n2_request_context *rctx = ablkcipher_request_ctx(req);
960  struct n2_crypto_chunk *c, *tmp;
961 
962  if (final_iv)
963  memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
964 
965  ablkcipher_walk_complete(&rctx->walk);
966  list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
967  list_del(&c->entry);
968  if (unlikely(c != &rctx->chunk))
969  kfree(c);
970  }
971 
972 }
973 
974 static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
975 {
976  struct n2_request_context *rctx = ablkcipher_request_ctx(req);
977  struct crypto_tfm *tfm = req->base.tfm;
978  int err = n2_compute_chunks(req);
979  struct n2_crypto_chunk *c, *tmp;
980  unsigned long flags, hv_ret;
981  struct spu_queue *qp;
982 
983  if (err)
984  return err;
985 
986  qp = cpu_to_cwq[get_cpu()];
987  err = -ENODEV;
988  if (!qp)
989  goto out;
990 
991  spin_lock_irqsave(&qp->lock, flags);
992 
993  list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
994  err = __n2_crypt_chunk(tfm, c, qp, encrypt);
995  if (err)
996  break;
997  list_del(&c->entry);
998  if (unlikely(c != &rctx->chunk))
999  kfree(c);
1000  }
1001  if (!err) {
1002  hv_ret = wait_for_tail(qp);
1003  if (hv_ret != HV_EOK)
1004  err = -EINVAL;
1005  }
1006 
1007  spin_unlock_irqrestore(&qp->lock, flags);
1008 
1009 out:
1010  put_cpu();
1011 
1012  n2_chunk_complete(req, NULL);
1013  return err;
1014 }
1015 
1016 static int n2_encrypt_ecb(struct ablkcipher_request *req)
1017 {
1018  return n2_do_ecb(req, true);
1019 }
1020 
1021 static int n2_decrypt_ecb(struct ablkcipher_request *req)
1022 {
1023  return n2_do_ecb(req, false);
1024 }
1025 
1026 static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
1027 {
1028  struct n2_request_context *rctx = ablkcipher_request_ctx(req);
1029  struct crypto_tfm *tfm = req->base.tfm;
1030  unsigned long flags, hv_ret, iv_paddr;
1031  int err = n2_compute_chunks(req);
1032  struct n2_crypto_chunk *c, *tmp;
1033  struct spu_queue *qp;
1034  void *final_iv_addr;
1035 
1036  final_iv_addr = NULL;
1037 
1038  if (err)
1039  return err;
1040 
1041  qp = cpu_to_cwq[get_cpu()];
1042  err = -ENODEV;
1043  if (!qp)
1044  goto out;
1045 
1046  spin_lock_irqsave(&qp->lock, flags);
1047 
1048  if (encrypt) {
1049  iv_paddr = __pa(rctx->walk.iv);
1050  list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1051  entry) {
1052  c->iv_paddr = iv_paddr;
1053  err = __n2_crypt_chunk(tfm, c, qp, true);
1054  if (err)
1055  break;
1056  iv_paddr = c->dest_final - rctx->walk.blocksize;
1057  list_del(&c->entry);
1058  if (unlikely(c != &rctx->chunk))
1059  kfree(c);
1060  }
1061  final_iv_addr = __va(iv_paddr);
1062  } else {
1064  entry) {
1065  if (c == &rctx->chunk) {
1066  iv_paddr = __pa(rctx->walk.iv);
1067  } else {
1068  iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1069  tmp->arr[tmp->arr_len-1].src_len -
1070  rctx->walk.blocksize);
1071  }
1072  if (!final_iv_addr) {
1073  unsigned long pa;
1074 
1075  pa = (c->arr[c->arr_len-1].src_paddr +
1076  c->arr[c->arr_len-1].src_len -
1077  rctx->walk.blocksize);
1078  final_iv_addr = rctx->temp_iv;
1079  memcpy(rctx->temp_iv, __va(pa),
1080  rctx->walk.blocksize);
1081  }
1082  c->iv_paddr = iv_paddr;
1083  err = __n2_crypt_chunk(tfm, c, qp, false);
1084  if (err)
1085  break;
1086  list_del(&c->entry);
1087  if (unlikely(c != &rctx->chunk))
1088  kfree(c);
1089  }
1090  }
1091  if (!err) {
1092  hv_ret = wait_for_tail(qp);
1093  if (hv_ret != HV_EOK)
1094  err = -EINVAL;
1095  }
1096 
1097  spin_unlock_irqrestore(&qp->lock, flags);
1098 
1099 out:
1100  put_cpu();
1101 
1102  n2_chunk_complete(req, err ? NULL : final_iv_addr);
1103  return err;
1104 }
1105 
1106 static int n2_encrypt_chaining(struct ablkcipher_request *req)
1107 {
1108  return n2_do_chaining(req, true);
1109 }
1110 
1111 static int n2_decrypt_chaining(struct ablkcipher_request *req)
1112 {
1113  return n2_do_chaining(req, false);
1114 }
1115 
1117  const char *name;
1118  const char *drv_name;
1122 };
1123 
1124 static const struct n2_cipher_tmpl cipher_tmpls[] = {
1125  /* ARC4: only ECB is supported (chaining bits ignored) */
1126  { .name = "ecb(arc4)",
1127  .drv_name = "ecb-arc4",
1128  .block_size = 1,
1129  .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
1131  .ablkcipher = {
1132  .min_keysize = 1,
1133  .max_keysize = 256,
1134  .setkey = n2_arc4_setkey,
1135  .encrypt = n2_encrypt_ecb,
1136  .decrypt = n2_decrypt_ecb,
1137  },
1138  },
1139 
1140  /* DES: ECB CBC and CFB are supported */
1141  { .name = "ecb(des)",
1142  .drv_name = "ecb-des",
1143  .block_size = DES_BLOCK_SIZE,
1144  .enc_type = (ENC_TYPE_ALG_DES |
1146  .ablkcipher = {
1147  .min_keysize = DES_KEY_SIZE,
1148  .max_keysize = DES_KEY_SIZE,
1149  .setkey = n2_des_setkey,
1150  .encrypt = n2_encrypt_ecb,
1151  .decrypt = n2_decrypt_ecb,
1152  },
1153  },
1154  { .name = "cbc(des)",
1155  .drv_name = "cbc-des",
1156  .block_size = DES_BLOCK_SIZE,
1157  .enc_type = (ENC_TYPE_ALG_DES |
1159  .ablkcipher = {
1160  .ivsize = DES_BLOCK_SIZE,
1161  .min_keysize = DES_KEY_SIZE,
1162  .max_keysize = DES_KEY_SIZE,
1163  .setkey = n2_des_setkey,
1164  .encrypt = n2_encrypt_chaining,
1165  .decrypt = n2_decrypt_chaining,
1166  },
1167  },
1168  { .name = "cfb(des)",
1169  .drv_name = "cfb-des",
1170  .block_size = DES_BLOCK_SIZE,
1171  .enc_type = (ENC_TYPE_ALG_DES |
1173  .ablkcipher = {
1174  .min_keysize = DES_KEY_SIZE,
1175  .max_keysize = DES_KEY_SIZE,
1176  .setkey = n2_des_setkey,
1177  .encrypt = n2_encrypt_chaining,
1178  .decrypt = n2_decrypt_chaining,
1179  },
1180  },
1181 
1182  /* 3DES: ECB CBC and CFB are supported */
1183  { .name = "ecb(des3_ede)",
1184  .drv_name = "ecb-3des",
1185  .block_size = DES_BLOCK_SIZE,
1186  .enc_type = (ENC_TYPE_ALG_3DES |
1188  .ablkcipher = {
1189  .min_keysize = 3 * DES_KEY_SIZE,
1190  .max_keysize = 3 * DES_KEY_SIZE,
1191  .setkey = n2_3des_setkey,
1192  .encrypt = n2_encrypt_ecb,
1193  .decrypt = n2_decrypt_ecb,
1194  },
1195  },
1196  { .name = "cbc(des3_ede)",
1197  .drv_name = "cbc-3des",
1198  .block_size = DES_BLOCK_SIZE,
1199  .enc_type = (ENC_TYPE_ALG_3DES |
1201  .ablkcipher = {
1202  .ivsize = DES_BLOCK_SIZE,
1203  .min_keysize = 3 * DES_KEY_SIZE,
1204  .max_keysize = 3 * DES_KEY_SIZE,
1205  .setkey = n2_3des_setkey,
1206  .encrypt = n2_encrypt_chaining,
1207  .decrypt = n2_decrypt_chaining,
1208  },
1209  },
1210  { .name = "cfb(des3_ede)",
1211  .drv_name = "cfb-3des",
1212  .block_size = DES_BLOCK_SIZE,
1213  .enc_type = (ENC_TYPE_ALG_3DES |
1215  .ablkcipher = {
1216  .min_keysize = 3 * DES_KEY_SIZE,
1217  .max_keysize = 3 * DES_KEY_SIZE,
1218  .setkey = n2_3des_setkey,
1219  .encrypt = n2_encrypt_chaining,
1220  .decrypt = n2_decrypt_chaining,
1221  },
1222  },
1223  /* AES: ECB CBC and CTR are supported */
1224  { .name = "ecb(aes)",
1225  .drv_name = "ecb-aes",
1226  .block_size = AES_BLOCK_SIZE,
1227  .enc_type = (ENC_TYPE_ALG_AES128 |
1229  .ablkcipher = {
1230  .min_keysize = AES_MIN_KEY_SIZE,
1231  .max_keysize = AES_MAX_KEY_SIZE,
1232  .setkey = n2_aes_setkey,
1233  .encrypt = n2_encrypt_ecb,
1234  .decrypt = n2_decrypt_ecb,
1235  },
1236  },
1237  { .name = "cbc(aes)",
1238  .drv_name = "cbc-aes",
1239  .block_size = AES_BLOCK_SIZE,
1240  .enc_type = (ENC_TYPE_ALG_AES128 |
1242  .ablkcipher = {
1243  .ivsize = AES_BLOCK_SIZE,
1244  .min_keysize = AES_MIN_KEY_SIZE,
1245  .max_keysize = AES_MAX_KEY_SIZE,
1246  .setkey = n2_aes_setkey,
1247  .encrypt = n2_encrypt_chaining,
1248  .decrypt = n2_decrypt_chaining,
1249  },
1250  },
1251  { .name = "ctr(aes)",
1252  .drv_name = "ctr-aes",
1253  .block_size = AES_BLOCK_SIZE,
1254  .enc_type = (ENC_TYPE_ALG_AES128 |
1256  .ablkcipher = {
1257  .ivsize = AES_BLOCK_SIZE,
1258  .min_keysize = AES_MIN_KEY_SIZE,
1259  .max_keysize = AES_MAX_KEY_SIZE,
1260  .setkey = n2_aes_setkey,
1261  .encrypt = n2_encrypt_chaining,
1262  .decrypt = n2_encrypt_chaining,
1263  },
1264  },
1265 
1266 };
1267 #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
1268 
1269 static LIST_HEAD(cipher_algs);
1270 
1272  const char *name;
1273  const char *hash_zero;
1274  const u32 *hash_init;
1280 };
1281 
1282 static const char md5_zero[MD5_DIGEST_SIZE] = {
1283  0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
1284  0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
1285 };
1286 static const u32 md5_init[MD5_HASH_WORDS] = {
1287  cpu_to_le32(0x67452301),
1288  cpu_to_le32(0xefcdab89),
1289  cpu_to_le32(0x98badcfe),
1290  cpu_to_le32(0x10325476),
1291 };
1292 static const char sha1_zero[SHA1_DIGEST_SIZE] = {
1293  0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
1294  0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
1295  0x07, 0x09
1296 };
1297 static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
1299 };
1300 static const char sha256_zero[SHA256_DIGEST_SIZE] = {
1301  0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
1302  0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
1303  0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
1304  0x1b, 0x78, 0x52, 0xb8, 0x55
1305 };
1306 static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
1309 };
1310 static const char sha224_zero[SHA224_DIGEST_SIZE] = {
1311  0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
1312  0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
1313  0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
1314  0x2f
1315 };
1316 static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
1319 };
1320 
1321 static const struct n2_hash_tmpl hash_tmpls[] = {
1322  { .name = "md5",
1323  .hash_zero = md5_zero,
1324  .hash_init = md5_init,
1325  .auth_type = AUTH_TYPE_MD5,
1326  .hmac_type = AUTH_TYPE_HMAC_MD5,
1327  .hw_op_hashsz = MD5_DIGEST_SIZE,
1328  .digest_size = MD5_DIGEST_SIZE,
1329  .block_size = MD5_HMAC_BLOCK_SIZE },
1330  { .name = "sha1",
1331  .hash_zero = sha1_zero,
1332  .hash_init = sha1_init,
1333  .auth_type = AUTH_TYPE_SHA1,
1334  .hmac_type = AUTH_TYPE_HMAC_SHA1,
1335  .hw_op_hashsz = SHA1_DIGEST_SIZE,
1336  .digest_size = SHA1_DIGEST_SIZE,
1337  .block_size = SHA1_BLOCK_SIZE },
1338  { .name = "sha256",
1339  .hash_zero = sha256_zero,
1340  .hash_init = sha256_init,
1341  .auth_type = AUTH_TYPE_SHA256,
1342  .hmac_type = AUTH_TYPE_HMAC_SHA256,
1343  .hw_op_hashsz = SHA256_DIGEST_SIZE,
1344  .digest_size = SHA256_DIGEST_SIZE,
1345  .block_size = SHA256_BLOCK_SIZE },
1346  { .name = "sha224",
1347  .hash_zero = sha224_zero,
1348  .hash_init = sha224_init,
1349  .auth_type = AUTH_TYPE_SHA256,
1350  .hmac_type = AUTH_TYPE_RESERVED,
1351  .hw_op_hashsz = SHA256_DIGEST_SIZE,
1352  .digest_size = SHA224_DIGEST_SIZE,
1353  .block_size = SHA224_BLOCK_SIZE },
1354 };
1355 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1356 
1357 static LIST_HEAD(ahash_algs);
1358 static LIST_HEAD(hmac_algs);
1359 
1360 static int algs_registered;
1361 
1362 static void __n2_unregister_algs(void)
1363 {
1364  struct n2_cipher_alg *cipher, *cipher_tmp;
1365  struct n2_ahash_alg *alg, *alg_tmp;
1366  struct n2_hmac_alg *hmac, *hmac_tmp;
1367 
1368  list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
1369  crypto_unregister_alg(&cipher->alg);
1370  list_del(&cipher->entry);
1371  kfree(cipher);
1372  }
1373  list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1374  crypto_unregister_ahash(&hmac->derived.alg);
1375  list_del(&hmac->derived.entry);
1376  kfree(hmac);
1377  }
1378  list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1380  list_del(&alg->entry);
1381  kfree(alg);
1382  }
1383 }
1384 
1385 static int n2_cipher_cra_init(struct crypto_tfm *tfm)
1386 {
1387  tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
1388  return 0;
1389 }
1390 
1391 static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
1392 {
1393  struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1394  struct crypto_alg *alg;
1395  int err;
1396 
1397  if (!p)
1398  return -ENOMEM;
1399 
1400  alg = &p->alg;
1401 
1402  snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1403  snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1407  alg->cra_blocksize = tmpl->block_size;
1408  p->enc_type = tmpl->enc_type;
1409  alg->cra_ctxsize = sizeof(struct n2_cipher_context);
1411  alg->cra_u.ablkcipher = tmpl->ablkcipher;
1412  alg->cra_init = n2_cipher_cra_init;
1413  alg->cra_module = THIS_MODULE;
1414 
1415  list_add(&p->entry, &cipher_algs);
1416  err = crypto_register_alg(alg);
1417  if (err) {
1418  pr_err("%s alg registration failed\n", alg->cra_name);
1419  list_del(&p->entry);
1420  kfree(p);
1421  } else {
1422  pr_info("%s alg registered\n", alg->cra_name);
1423  }
1424  return err;
1425 }
1426 
1427 static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1428 {
1429  struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1430  struct ahash_alg *ahash;
1431  struct crypto_alg *base;
1432  int err;
1433 
1434  if (!p)
1435  return -ENOMEM;
1436 
1437  p->child_alg = n2ahash->alg.halg.base.cra_name;
1438  memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1439  INIT_LIST_HEAD(&p->derived.entry);
1440 
1441  ahash = &p->derived.alg;
1442  ahash->digest = n2_hmac_async_digest;
1443  ahash->setkey = n2_hmac_async_setkey;
1444 
1445  base = &ahash->halg.base;
1446  snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1447  snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1448 
1449  base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1450  base->cra_init = n2_hmac_cra_init;
1451  base->cra_exit = n2_hmac_cra_exit;
1452 
1453  list_add(&p->derived.entry, &hmac_algs);
1454  err = crypto_register_ahash(ahash);
1455  if (err) {
1456  pr_err("%s alg registration failed\n", base->cra_name);
1457  list_del(&p->derived.entry);
1458  kfree(p);
1459  } else {
1460  pr_info("%s alg registered\n", base->cra_name);
1461  }
1462  return err;
1463 }
1464 
1465 static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1466 {
1467  struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1468  struct hash_alg_common *halg;
1469  struct crypto_alg *base;
1470  struct ahash_alg *ahash;
1471  int err;
1472 
1473  if (!p)
1474  return -ENOMEM;
1475 
1476  p->hash_zero = tmpl->hash_zero;
1477  p->hash_init = tmpl->hash_init;
1478  p->auth_type = tmpl->auth_type;
1479  p->hmac_type = tmpl->hmac_type;
1480  p->hw_op_hashsz = tmpl->hw_op_hashsz;
1481  p->digest_size = tmpl->digest_size;
1482 
1483  ahash = &p->alg;
1484  ahash->init = n2_hash_async_init;
1485  ahash->update = n2_hash_async_update;
1486  ahash->final = n2_hash_async_final;
1487  ahash->finup = n2_hash_async_finup;
1488  ahash->digest = n2_hash_async_digest;
1489 
1490  halg = &ahash->halg;
1491  halg->digestsize = tmpl->digest_size;
1492 
1493  base = &halg->base;
1494  snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1495  snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1496  base->cra_priority = N2_CRA_PRIORITY;
1500  base->cra_blocksize = tmpl->block_size;
1501  base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1502  base->cra_module = THIS_MODULE;
1503  base->cra_init = n2_hash_cra_init;
1504  base->cra_exit = n2_hash_cra_exit;
1505 
1506  list_add(&p->entry, &ahash_algs);
1507  err = crypto_register_ahash(ahash);
1508  if (err) {
1509  pr_err("%s alg registration failed\n", base->cra_name);
1510  list_del(&p->entry);
1511  kfree(p);
1512  } else {
1513  pr_info("%s alg registered\n", base->cra_name);
1514  }
1515  if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1516  err = __n2_register_one_hmac(p);
1517  return err;
1518 }
1519 
1520 static int __devinit n2_register_algs(void)
1521 {
1522  int i, err = 0;
1523 
1524  mutex_lock(&spu_lock);
1525  if (algs_registered++)
1526  goto out;
1527 
1528  for (i = 0; i < NUM_HASH_TMPLS; i++) {
1529  err = __n2_register_one_ahash(&hash_tmpls[i]);
1530  if (err) {
1531  __n2_unregister_algs();
1532  goto out;
1533  }
1534  }
1535  for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1536  err = __n2_register_one_cipher(&cipher_tmpls[i]);
1537  if (err) {
1538  __n2_unregister_algs();
1539  goto out;
1540  }
1541  }
1542 
1543 out:
1544  mutex_unlock(&spu_lock);
1545  return err;
1546 }
1547 
1548 static void __devexit n2_unregister_algs(void)
1549 {
1550  mutex_lock(&spu_lock);
1551  if (!--algs_registered)
1552  __n2_unregister_algs();
1553  mutex_unlock(&spu_lock);
1554 }
1555 
1556 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1557  * a devino. This isn't very useful to us because all of the
1558  * interrupts listed in the device_node have been translated to
1559  * Linux virtual IRQ cookie numbers.
1560  *
1561  * So we have to back-translate, going through the 'intr' and 'ino'
1562  * property tables of the n2cp MDESC node, matching it with the OF
1563  * 'interrupts' property entries, in order to to figure out which
1564  * devino goes to which already-translated IRQ.
1565  */
1566 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1567  unsigned long dev_ino)
1568 {
1569  const unsigned int *dev_intrs;
1570  unsigned int intr;
1571  int i;
1572 
1573  for (i = 0; i < ip->num_intrs; i++) {
1574  if (ip->ino_table[i].ino == dev_ino)
1575  break;
1576  }
1577  if (i == ip->num_intrs)
1578  return -ENODEV;
1579 
1580  intr = ip->ino_table[i].intr;
1581 
1582  dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1583  if (!dev_intrs)
1584  return -ENODEV;
1585 
1586  for (i = 0; i < dev->archdata.num_irqs; i++) {
1587  if (dev_intrs[i] == intr)
1588  return i;
1589  }
1590 
1591  return -ENODEV;
1592 }
1593 
1594 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1595  const char *irq_name, struct spu_queue *p,
1596  irq_handler_t handler)
1597 {
1598  unsigned long herr;
1599  int index;
1600 
1601  herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1602  if (herr)
1603  return -EINVAL;
1604 
1605  index = find_devino_index(dev, ip, p->devino);
1606  if (index < 0)
1607  return index;
1608 
1609  p->irq = dev->archdata.irqs[index];
1610 
1611  sprintf(p->irq_name, "%s-%d", irq_name, index);
1612 
1613  return request_irq(p->irq, handler, 0, p->irq_name, p);
1614 }
1615 
1616 static struct kmem_cache *queue_cache[2];
1617 
1618 static void *new_queue(unsigned long q_type)
1619 {
1620  return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1621 }
1622 
1623 static void free_queue(void *p, unsigned long q_type)
1624 {
1625  return kmem_cache_free(queue_cache[q_type - 1], p);
1626 }
1627 
1628 static int queue_cache_init(void)
1629 {
1630  if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1631  queue_cache[HV_NCS_QTYPE_MAU - 1] =
1632  kmem_cache_create("mau_queue",
1633  (MAU_NUM_ENTRIES *
1634  MAU_ENTRY_SIZE),
1635  MAU_ENTRY_SIZE, 0, NULL);
1636  if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1637  return -ENOMEM;
1638 
1639  if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1640  queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1641  kmem_cache_create("cwq_queue",
1642  (CWQ_NUM_ENTRIES *
1643  CWQ_ENTRY_SIZE),
1644  CWQ_ENTRY_SIZE, 0, NULL);
1645  if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1646  kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1647  return -ENOMEM;
1648  }
1649  return 0;
1650 }
1651 
1652 static void queue_cache_destroy(void)
1653 {
1654  kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1655  kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1656 }
1657 
1658 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1659 {
1660  cpumask_var_t old_allowed;
1661  unsigned long hv_ret;
1662 
1663  if (cpumask_empty(&p->sharing))
1664  return -EINVAL;
1665 
1666  if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
1667  return -ENOMEM;
1668 
1669  cpumask_copy(old_allowed, &current->cpus_allowed);
1670 
1671  set_cpus_allowed_ptr(current, &p->sharing);
1672 
1673  hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1674  CWQ_NUM_ENTRIES, &p->qhandle);
1675  if (!hv_ret)
1677 
1678  set_cpus_allowed_ptr(current, old_allowed);
1679 
1680  free_cpumask_var(old_allowed);
1681 
1682  return (hv_ret ? -EINVAL : 0);
1683 }
1684 
1685 static int spu_queue_setup(struct spu_queue *p)
1686 {
1687  int err;
1688 
1689  p->q = new_queue(p->q_type);
1690  if (!p->q)
1691  return -ENOMEM;
1692 
1693  err = spu_queue_register(p, p->q_type);
1694  if (err) {
1695  free_queue(p->q, p->q_type);
1696  p->q = NULL;
1697  }
1698 
1699  return err;
1700 }
1701 
1702 static void spu_queue_destroy(struct spu_queue *p)
1703 {
1704  unsigned long hv_ret;
1705 
1706  if (!p->q)
1707  return;
1708 
1709  hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1710 
1711  if (!hv_ret)
1712  free_queue(p->q, p->q_type);
1713 }
1714 
1715 static void spu_list_destroy(struct list_head *list)
1716 {
1717  struct spu_queue *p, *n;
1718 
1719  list_for_each_entry_safe(p, n, list, list) {
1720  int i;
1721 
1722  for (i = 0; i < NR_CPUS; i++) {
1723  if (cpu_to_cwq[i] == p)
1724  cpu_to_cwq[i] = NULL;
1725  }
1726 
1727  if (p->irq) {
1728  free_irq(p->irq, p);
1729  p->irq = 0;
1730  }
1731  spu_queue_destroy(p);
1732  list_del(&p->list);
1733  kfree(p);
1734  }
1735 }
1736 
1737 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1738  * gathering cpu membership information.
1739  */
1740 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1741  struct platform_device *dev,
1742  u64 node, struct spu_queue *p,
1743  struct spu_queue **table)
1744 {
1745  u64 arc;
1746 
1747  mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1748  u64 tgt = mdesc_arc_target(mdesc, arc);
1749  const char *name = mdesc_node_name(mdesc, tgt);
1750  const u64 *id;
1751 
1752  if (strcmp(name, "cpu"))
1753  continue;
1754  id = mdesc_get_property(mdesc, tgt, "id", NULL);
1755  if (table[*id] != NULL) {
1756  dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
1757  dev->dev.of_node->full_name);
1758  return -EINVAL;
1759  }
1760  cpu_set(*id, p->sharing);
1761  table[*id] = p;
1762  }
1763  return 0;
1764 }
1765 
1766 /* Process an 'exec-unit' MDESC node of type 'cwq'. */
1767 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1768  struct platform_device *dev, struct mdesc_handle *mdesc,
1769  u64 node, const char *iname, unsigned long q_type,
1770  irq_handler_t handler, struct spu_queue **table)
1771 {
1772  struct spu_queue *p;
1773  int err;
1774 
1775  p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1776  if (!p) {
1777  dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
1778  dev->dev.of_node->full_name);
1779  return -ENOMEM;
1780  }
1781 
1782  cpus_clear(p->sharing);
1783  spin_lock_init(&p->lock);
1784  p->q_type = q_type;
1785  INIT_LIST_HEAD(&p->jobs);
1786  list_add(&p->list, list);
1787 
1788  err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1789  if (err)
1790  return err;
1791 
1792  err = spu_queue_setup(p);
1793  if (err)
1794  return err;
1795 
1796  return spu_map_ino(dev, ip, iname, p, handler);
1797 }
1798 
1799 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1800  struct spu_mdesc_info *ip, struct list_head *list,
1801  const char *exec_name, unsigned long q_type,
1802  irq_handler_t handler, struct spu_queue **table)
1803 {
1804  int err = 0;
1805  u64 node;
1806 
1807  mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1808  const char *type;
1809 
1810  type = mdesc_get_property(mdesc, node, "type", NULL);
1811  if (!type || strcmp(type, exec_name))
1812  continue;
1813 
1814  err = handle_exec_unit(ip, list, dev, mdesc, node,
1815  exec_name, q_type, handler, table);
1816  if (err) {
1817  spu_list_destroy(list);
1818  break;
1819  }
1820  }
1821 
1822  return err;
1823 }
1824 
1825 static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
1826  struct spu_mdesc_info *ip)
1827 {
1828  const u64 *ino;
1829  int ino_len;
1830  int i;
1831 
1832  ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1833  if (!ino) {
1834  printk("NO 'ino'\n");
1835  return -ENODEV;
1836  }
1837 
1838  ip->num_intrs = ino_len / sizeof(u64);
1839  ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1840  ip->num_intrs),
1841  GFP_KERNEL);
1842  if (!ip->ino_table)
1843  return -ENOMEM;
1844 
1845  for (i = 0; i < ip->num_intrs; i++) {
1846  struct ino_blob *b = &ip->ino_table[i];
1847  b->intr = i + 1;
1848  b->ino = ino[i];
1849  }
1850 
1851  return 0;
1852 }
1853 
1854 static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1855  struct platform_device *dev,
1856  struct spu_mdesc_info *ip,
1857  const char *node_name)
1858 {
1859  const unsigned int *reg;
1860  u64 node;
1861 
1862  reg = of_get_property(dev->dev.of_node, "reg", NULL);
1863  if (!reg)
1864  return -ENODEV;
1865 
1866  mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1867  const char *name;
1868  const u64 *chdl;
1869 
1870  name = mdesc_get_property(mdesc, node, "name", NULL);
1871  if (!name || strcmp(name, node_name))
1872  continue;
1873  chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1874  if (!chdl || (*chdl != *reg))
1875  continue;
1876  ip->cfg_handle = *chdl;
1877  return get_irq_props(mdesc, node, ip);
1878  }
1879 
1880  return -ENODEV;
1881 }
1882 
1883 static unsigned long n2_spu_hvapi_major;
1884 static unsigned long n2_spu_hvapi_minor;
1885 
1886 static int __devinit n2_spu_hvapi_register(void)
1887 {
1888  int err;
1889 
1890  n2_spu_hvapi_major = 2;
1891  n2_spu_hvapi_minor = 0;
1892 
1894  n2_spu_hvapi_major,
1895  &n2_spu_hvapi_minor);
1896 
1897  if (!err)
1898  pr_info("Registered NCS HVAPI version %lu.%lu\n",
1899  n2_spu_hvapi_major,
1900  n2_spu_hvapi_minor);
1901 
1902  return err;
1903 }
1904 
1905 static void n2_spu_hvapi_unregister(void)
1906 {
1908 }
1909 
1910 static int global_ref;
1911 
1912 static int __devinit grab_global_resources(void)
1913 {
1914  int err = 0;
1915 
1916  mutex_lock(&spu_lock);
1917 
1918  if (global_ref++)
1919  goto out;
1920 
1921  err = n2_spu_hvapi_register();
1922  if (err)
1923  goto out;
1924 
1925  err = queue_cache_init();
1926  if (err)
1927  goto out_hvapi_release;
1928 
1929  err = -ENOMEM;
1930  cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1931  GFP_KERNEL);
1932  if (!cpu_to_cwq)
1933  goto out_queue_cache_destroy;
1934 
1935  cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1936  GFP_KERNEL);
1937  if (!cpu_to_mau)
1938  goto out_free_cwq_table;
1939 
1940  err = 0;
1941 
1942 out:
1943  if (err)
1944  global_ref--;
1945  mutex_unlock(&spu_lock);
1946  return err;
1947 
1948 out_free_cwq_table:
1949  kfree(cpu_to_cwq);
1950  cpu_to_cwq = NULL;
1951 
1952 out_queue_cache_destroy:
1953  queue_cache_destroy();
1954 
1955 out_hvapi_release:
1956  n2_spu_hvapi_unregister();
1957  goto out;
1958 }
1959 
1960 static void release_global_resources(void)
1961 {
1962  mutex_lock(&spu_lock);
1963  if (!--global_ref) {
1964  kfree(cpu_to_cwq);
1965  cpu_to_cwq = NULL;
1966 
1967  kfree(cpu_to_mau);
1968  cpu_to_mau = NULL;
1969 
1970  queue_cache_destroy();
1971  n2_spu_hvapi_unregister();
1972  }
1973  mutex_unlock(&spu_lock);
1974 }
1975 
1976 static struct n2_crypto * __devinit alloc_n2cp(void)
1977 {
1978  struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1979 
1980  if (np)
1981  INIT_LIST_HEAD(&np->cwq_list);
1982 
1983  return np;
1984 }
1985 
1986 static void free_n2cp(struct n2_crypto *np)
1987 {
1988  if (np->cwq_info.ino_table) {
1989  kfree(np->cwq_info.ino_table);
1990  np->cwq_info.ino_table = NULL;
1991  }
1992 
1993  kfree(np);
1994 }
1995 
1996 static void __devinit n2_spu_driver_version(void)
1997 {
1998  static int n2_spu_version_printed;
1999 
2000  if (n2_spu_version_printed++ == 0)
2001  pr_info("%s", version);
2002 }
2003 
2004 static int __devinit n2_crypto_probe(struct platform_device *dev)
2005 {
2006  struct mdesc_handle *mdesc;
2007  const char *full_name;
2008  struct n2_crypto *np;
2009  int err;
2010 
2011  n2_spu_driver_version();
2012 
2013  full_name = dev->dev.of_node->full_name;
2014  pr_info("Found N2CP at %s\n", full_name);
2015 
2016  np = alloc_n2cp();
2017  if (!np) {
2018  dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
2019  full_name);
2020  return -ENOMEM;
2021  }
2022 
2023  err = grab_global_resources();
2024  if (err) {
2025  dev_err(&dev->dev, "%s: Unable to grab "
2026  "global resources.\n", full_name);
2027  goto out_free_n2cp;
2028  }
2029 
2030  mdesc = mdesc_grab();
2031 
2032  if (!mdesc) {
2033  dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
2034  full_name);
2035  err = -ENODEV;
2036  goto out_free_global;
2037  }
2038  err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
2039  if (err) {
2040  dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
2041  full_name);
2042  mdesc_release(mdesc);
2043  goto out_free_global;
2044  }
2045 
2046  err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
2047  "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
2048  cpu_to_cwq);
2049  mdesc_release(mdesc);
2050 
2051  if (err) {
2052  dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
2053  full_name);
2054  goto out_free_global;
2055  }
2056 
2057  err = n2_register_algs();
2058  if (err) {
2059  dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
2060  full_name);
2061  goto out_free_spu_list;
2062  }
2063 
2064  dev_set_drvdata(&dev->dev, np);
2065 
2066  return 0;
2067 
2068 out_free_spu_list:
2069  spu_list_destroy(&np->cwq_list);
2070 
2071 out_free_global:
2072  release_global_resources();
2073 
2074 out_free_n2cp:
2075  free_n2cp(np);
2076 
2077  return err;
2078 }
2079 
2080 static int __devexit n2_crypto_remove(struct platform_device *dev)
2081 {
2082  struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2083 
2084  n2_unregister_algs();
2085 
2086  spu_list_destroy(&np->cwq_list);
2087 
2088  release_global_resources();
2089 
2090  free_n2cp(np);
2091 
2092  return 0;
2093 }
2094 
2095 static struct n2_mau * __devinit alloc_ncp(void)
2096 {
2097  struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2098 
2099  if (mp)
2100  INIT_LIST_HEAD(&mp->mau_list);
2101 
2102  return mp;
2103 }
2104 
2105 static void free_ncp(struct n2_mau *mp)
2106 {
2107  if (mp->mau_info.ino_table) {
2108  kfree(mp->mau_info.ino_table);
2109  mp->mau_info.ino_table = NULL;
2110  }
2111 
2112  kfree(mp);
2113 }
2114 
2115 static int __devinit n2_mau_probe(struct platform_device *dev)
2116 {
2117  struct mdesc_handle *mdesc;
2118  const char *full_name;
2119  struct n2_mau *mp;
2120  int err;
2121 
2122  n2_spu_driver_version();
2123 
2124  full_name = dev->dev.of_node->full_name;
2125  pr_info("Found NCP at %s\n", full_name);
2126 
2127  mp = alloc_ncp();
2128  if (!mp) {
2129  dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
2130  full_name);
2131  return -ENOMEM;
2132  }
2133 
2134  err = grab_global_resources();
2135  if (err) {
2136  dev_err(&dev->dev, "%s: Unable to grab "
2137  "global resources.\n", full_name);
2138  goto out_free_ncp;
2139  }
2140 
2141  mdesc = mdesc_grab();
2142 
2143  if (!mdesc) {
2144  dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
2145  full_name);
2146  err = -ENODEV;
2147  goto out_free_global;
2148  }
2149 
2150  err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2151  if (err) {
2152  dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
2153  full_name);
2154  mdesc_release(mdesc);
2155  goto out_free_global;
2156  }
2157 
2158  err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2159  "mau", HV_NCS_QTYPE_MAU, mau_intr,
2160  cpu_to_mau);
2161  mdesc_release(mdesc);
2162 
2163  if (err) {
2164  dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
2165  full_name);
2166  goto out_free_global;
2167  }
2168 
2169  dev_set_drvdata(&dev->dev, mp);
2170 
2171  return 0;
2172 
2173 out_free_global:
2174  release_global_resources();
2175 
2176 out_free_ncp:
2177  free_ncp(mp);
2178 
2179  return err;
2180 }
2181 
2182 static int __devexit n2_mau_remove(struct platform_device *dev)
2183 {
2184  struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2185 
2186  spu_list_destroy(&mp->mau_list);
2187 
2188  release_global_resources();
2189 
2190  free_ncp(mp);
2191 
2192  return 0;
2193 }
2194 
2195 static struct of_device_id n2_crypto_match[] = {
2196  {
2197  .name = "n2cp",
2198  .compatible = "SUNW,n2-cwq",
2199  },
2200  {
2201  .name = "n2cp",
2202  .compatible = "SUNW,vf-cwq",
2203  },
2204  {
2205  .name = "n2cp",
2206  .compatible = "SUNW,kt-cwq",
2207  },
2208  {},
2209 };
2210 
2211 MODULE_DEVICE_TABLE(of, n2_crypto_match);
2212 
2213 static struct platform_driver n2_crypto_driver = {
2214  .driver = {
2215  .name = "n2cp",
2216  .owner = THIS_MODULE,
2217  .of_match_table = n2_crypto_match,
2218  },
2219  .probe = n2_crypto_probe,
2220  .remove = __devexit_p(n2_crypto_remove),
2221 };
2222 
2223 static struct of_device_id n2_mau_match[] = {
2224  {
2225  .name = "ncp",
2226  .compatible = "SUNW,n2-mau",
2227  },
2228  {
2229  .name = "ncp",
2230  .compatible = "SUNW,vf-mau",
2231  },
2232  {
2233  .name = "ncp",
2234  .compatible = "SUNW,kt-mau",
2235  },
2236  {},
2237 };
2238 
2239 MODULE_DEVICE_TABLE(of, n2_mau_match);
2240 
2241 static struct platform_driver n2_mau_driver = {
2242  .driver = {
2243  .name = "ncp",
2244  .owner = THIS_MODULE,
2245  .of_match_table = n2_mau_match,
2246  },
2247  .probe = n2_mau_probe,
2248  .remove = __devexit_p(n2_mau_remove),
2249 };
2250 
2251 static int __init n2_init(void)
2252 {
2253  int err = platform_driver_register(&n2_crypto_driver);
2254 
2255  if (!err) {
2256  err = platform_driver_register(&n2_mau_driver);
2257  if (err)
2258  platform_driver_unregister(&n2_crypto_driver);
2259  }
2260  return err;
2261 }
2262 
2263 static void __exit n2_exit(void)
2264 {
2265  platform_driver_unregister(&n2_mau_driver);
2266  platform_driver_unregister(&n2_crypto_driver);
2267 }
2268 
2269 module_init(n2_init);
2270 module_exit(n2_exit);