Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tpm_ibmvtpm.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2012 IBM Corporation
3  *
4  * Author: Ashley Lai <[email protected]>
5  *
6  * Maintained by: <[email protected]>
7  *
8  * Device driver for TCG/TCPA TPM (trusted platform module).
9  * Specifications at www.trustedcomputinggroup.org
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation, version 2 of the
14  * License.
15  *
16  */
17 
18 #include <linux/dma-mapping.h>
19 #include <linux/dmapool.h>
20 #include <linux/slab.h>
21 #include <asm/vio.h>
22 #include <asm/irq.h>
23 #include <linux/types.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include <asm/prom.h>
29 
30 #include "tpm.h"
31 #include "tpm_ibmvtpm.h"
32 
33 static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
34 
35 static struct vio_device_id tpm_ibmvtpm_device_table[] __devinitdata = {
36  { "IBM,vtpm", "IBM,vtpm"},
37  { "", "" }
38 };
39 MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
40 
42 
53 static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
54 {
55  return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
56 }
57 
65 static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev)
66 {
67  struct tpm_chip *chip = dev_get_drvdata(dev);
68  if (chip)
69  return (struct ibmvtpm_dev *)chip->vendor.data;
70  return NULL;
71 }
72 
82 static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
83 {
84  struct ibmvtpm_dev *ibmvtpm;
85  u16 len;
86 
87  ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
88 
89  if (!ibmvtpm->rtce_buf) {
90  dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
91  return 0;
92  }
93 
94  wait_event_interruptible(wq, ibmvtpm->crq_res.len != 0);
95 
96  if (count < ibmvtpm->crq_res.len) {
97  dev_err(ibmvtpm->dev,
98  "Invalid size in recv: count=%ld, crq_size=%d\n",
99  count, ibmvtpm->crq_res.len);
100  return -EIO;
101  }
102 
103  spin_lock(&ibmvtpm->rtce_lock);
104  memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, ibmvtpm->crq_res.len);
105  memset(ibmvtpm->rtce_buf, 0, ibmvtpm->crq_res.len);
106  ibmvtpm->crq_res.valid = 0;
107  ibmvtpm->crq_res.msg = 0;
108  len = ibmvtpm->crq_res.len;
109  ibmvtpm->crq_res.len = 0;
110  spin_unlock(&ibmvtpm->rtce_lock);
111  return len;
112 }
113 
123 static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
124 {
125  struct ibmvtpm_dev *ibmvtpm;
126  struct ibmvtpm_crq crq;
127  u64 *word = (u64 *) &crq;
128  int rc;
129 
130  ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
131 
132  if (!ibmvtpm->rtce_buf) {
133  dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
134  return 0;
135  }
136 
137  if (count > ibmvtpm->rtce_size) {
138  dev_err(ibmvtpm->dev,
139  "Invalid size in send: count=%ld, rtce_size=%d\n",
140  count, ibmvtpm->rtce_size);
141  return -EIO;
142  }
143 
144  spin_lock(&ibmvtpm->rtce_lock);
145  memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
146  crq.valid = (u8)IBMVTPM_VALID_CMD;
147  crq.msg = (u8)VTPM_TPM_COMMAND;
148  crq.len = (u16)count;
149  crq.data = ibmvtpm->rtce_dma_handle;
150 
151  rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]);
152  if (rc != H_SUCCESS) {
153  dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
154  rc = 0;
155  } else
156  rc = count;
157 
158  spin_unlock(&ibmvtpm->rtce_lock);
159  return rc;
160 }
161 
162 static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
163 {
164  return;
165 }
166 
167 static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
168 {
169  return 0;
170 }
171 
180 static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
181 {
182  struct ibmvtpm_crq crq;
183  u64 *buf = (u64 *) &crq;
184  int rc;
185 
186  crq.valid = (u8)IBMVTPM_VALID_CMD;
187  crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
188 
189  rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
190  if (rc != H_SUCCESS)
191  dev_err(ibmvtpm->dev,
192  "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
193 
194  return rc;
195 }
196 
206 static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
207 {
208  struct ibmvtpm_crq crq;
209  u64 *buf = (u64 *) &crq;
210  int rc;
211 
212  crq.valid = (u8)IBMVTPM_VALID_CMD;
213  crq.msg = (u8)VTPM_GET_VERSION;
214 
215  rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
216  if (rc != H_SUCCESS)
217  dev_err(ibmvtpm->dev,
218  "ibmvtpm_crq_get_version failed rc=%d\n", rc);
219 
220  return rc;
221 }
222 
231 static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
232 {
233  int rc;
234 
235  rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
236  if (rc != H_SUCCESS)
237  dev_err(ibmvtpm->dev,
238  "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
239 
240  return rc;
241 }
242 
251 static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
252 {
253  int rc;
254 
255  rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
256  if (rc != H_SUCCESS)
257  dev_err(ibmvtpm->dev,
258  "ibmvtpm_crq_send_init failed rc=%d\n", rc);
259 
260  return rc;
261 }
262 
270 static int __devexit tpm_ibmvtpm_remove(struct vio_dev *vdev)
271 {
272  struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
273  int rc = 0;
274 
275  free_irq(vdev->irq, ibmvtpm);
276  tasklet_kill(&ibmvtpm->tasklet);
277 
278  do {
279  if (rc)
280  msleep(100);
281  rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
282  } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
283 
284  dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
286  free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
287 
288  if (ibmvtpm->rtce_buf) {
289  dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
290  ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
291  kfree(ibmvtpm->rtce_buf);
292  }
293 
294  tpm_remove_hardware(ibmvtpm->dev);
295 
296  kfree(ibmvtpm);
297 
298  return 0;
299 }
300 
308 static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
309 {
310  struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
311  return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
312 }
313 
321 static int tpm_ibmvtpm_suspend(struct device *dev)
322 {
323  struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
324  struct ibmvtpm_crq crq;
325  u64 *buf = (u64 *) &crq;
326  int rc = 0;
327 
328  crq.valid = (u8)IBMVTPM_VALID_CMD;
329  crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
330 
331  rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
332  if (rc != H_SUCCESS)
333  dev_err(ibmvtpm->dev,
334  "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
335 
336  return rc;
337 }
338 
347 static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
348 {
349  int rc = 0;
350 
351  do {
352  if (rc)
353  msleep(100);
354  rc = plpar_hcall_norets(H_FREE_CRQ,
355  ibmvtpm->vdev->unit_address);
356  } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
357 
358  memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
359  ibmvtpm->crq_queue.index = 0;
360 
361  return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
362  ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
363 }
364 
372 static int tpm_ibmvtpm_resume(struct device *dev)
373 {
374  struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
375  unsigned long flags;
376  int rc = 0;
377 
378  do {
379  if (rc)
380  msleep(100);
381  rc = plpar_hcall_norets(H_ENABLE_CRQ,
382  ibmvtpm->vdev->unit_address);
383  } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
384 
385  if (rc) {
386  dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
387  return rc;
388  }
389 
390  spin_lock_irqsave(&ibmvtpm->lock, flags);
391  vio_disable_interrupts(ibmvtpm->vdev);
392  tasklet_schedule(&ibmvtpm->tasklet);
393  spin_unlock_irqrestore(&ibmvtpm->lock, flags);
394 
395  rc = ibmvtpm_crq_send_init(ibmvtpm);
396  if (rc)
397  dev_err(dev, "Error send_init rc=%d\n", rc);
398 
399  return rc;
400 }
401 
402 static const struct file_operations ibmvtpm_ops = {
403  .owner = THIS_MODULE,
404  .llseek = no_llseek,
405  .open = tpm_open,
406  .read = tpm_read,
407  .write = tpm_write,
408  .release = tpm_release,
409 };
410 
411 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
412 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
415 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
416 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
417  NULL);
420 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
421 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
422 
423 static struct attribute *ibmvtpm_attrs[] = {
424  &dev_attr_pubek.attr,
425  &dev_attr_pcrs.attr,
426  &dev_attr_enabled.attr,
427  &dev_attr_active.attr,
428  &dev_attr_owned.attr,
429  &dev_attr_temp_deactivated.attr,
430  &dev_attr_caps.attr,
431  &dev_attr_cancel.attr,
432  &dev_attr_durations.attr,
433  &dev_attr_timeouts.attr, NULL,
434 };
435 
436 static struct attribute_group ibmvtpm_attr_grp = { .attrs = ibmvtpm_attrs };
437 
438 static const struct tpm_vendor_specific tpm_ibmvtpm = {
439  .recv = tpm_ibmvtpm_recv,
440  .send = tpm_ibmvtpm_send,
441  .cancel = tpm_ibmvtpm_cancel,
442  .status = tpm_ibmvtpm_status,
443  .req_complete_mask = 0,
444  .req_complete_val = 0,
445  .req_canceled = 0,
446  .attr_group = &ibmvtpm_attr_grp,
447  .miscdev = { .fops = &ibmvtpm_ops, },
448 };
449 
450 static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
451  .suspend = tpm_ibmvtpm_suspend,
452  .resume = tpm_ibmvtpm_resume,
453 };
454 
462 static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
463 {
464  struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
465  struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
466 
467  if (crq->valid & VTPM_MSG_RES) {
468  if (++crq_q->index == crq_q->num_entry)
469  crq_q->index = 0;
470  rmb();
471  } else
472  crq = NULL;
473  return crq;
474 }
475 
484 static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
485  struct ibmvtpm_dev *ibmvtpm)
486 {
487  int rc = 0;
488 
489  switch (crq->valid) {
490  case VALID_INIT_CRQ:
491  switch (crq->msg) {
492  case INIT_CRQ_RES:
493  dev_info(ibmvtpm->dev, "CRQ initialized\n");
494  rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
495  if (rc)
496  dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
497  return;
498  case INIT_CRQ_COMP_RES:
499  dev_info(ibmvtpm->dev,
500  "CRQ initialization completed\n");
501  return;
502  default:
503  dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
504  return;
505  }
506  return;
507  case IBMVTPM_VALID_CMD:
508  switch (crq->msg) {
510  if (crq->len <= 0) {
511  dev_err(ibmvtpm->dev, "Invalid rtce size\n");
512  return;
513  }
514  ibmvtpm->rtce_size = crq->len;
515  ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
516  GFP_KERNEL);
517  if (!ibmvtpm->rtce_buf) {
518  dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
519  return;
520  }
521 
522  ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
523  ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
525 
526  if (dma_mapping_error(ibmvtpm->dev,
527  ibmvtpm->rtce_dma_handle)) {
528  kfree(ibmvtpm->rtce_buf);
529  ibmvtpm->rtce_buf = NULL;
530  dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
531  }
532 
533  return;
535  ibmvtpm->vtpm_version = crq->data;
536  return;
538  ibmvtpm->crq_res.valid = crq->valid;
539  ibmvtpm->crq_res.msg = crq->msg;
540  ibmvtpm->crq_res.len = crq->len;
541  ibmvtpm->crq_res.data = crq->data;
543  return;
544  default:
545  return;
546  }
547  }
548  return;
549 }
550 
559 static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
560 {
561  struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
562  unsigned long flags;
563 
564  spin_lock_irqsave(&ibmvtpm->lock, flags);
565  vio_disable_interrupts(ibmvtpm->vdev);
566  tasklet_schedule(&ibmvtpm->tasklet);
567  spin_unlock_irqrestore(&ibmvtpm->lock, flags);
568 
569  return IRQ_HANDLED;
570 }
571 
579 static void ibmvtpm_tasklet(void *data)
580 {
581  struct ibmvtpm_dev *ibmvtpm = data;
582  struct ibmvtpm_crq *crq;
583  unsigned long flags;
584 
585  spin_lock_irqsave(&ibmvtpm->lock, flags);
586  while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
587  ibmvtpm_crq_process(crq, ibmvtpm);
588  crq->valid = 0;
589  wmb();
590  }
591 
592  vio_enable_interrupts(ibmvtpm->vdev);
593  spin_unlock_irqrestore(&ibmvtpm->lock, flags);
594 }
595 
605 static int __devinit tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
606  const struct vio_device_id *id)
607 {
608  struct ibmvtpm_dev *ibmvtpm;
609  struct device *dev = &vio_dev->dev;
610  struct ibmvtpm_crq_queue *crq_q;
611  struct tpm_chip *chip;
612  int rc = -ENOMEM, rc1;
613 
614  chip = tpm_register_hardware(dev, &tpm_ibmvtpm);
615  if (!chip) {
616  dev_err(dev, "tpm_register_hardware failed\n");
617  return -ENODEV;
618  }
619 
620  ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
621  if (!ibmvtpm) {
622  dev_err(dev, "kzalloc for ibmvtpm failed\n");
623  goto cleanup;
624  }
625 
626  crq_q = &ibmvtpm->crq_queue;
627  crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
628  if (!crq_q->crq_addr) {
629  dev_err(dev, "Unable to allocate memory for crq_addr\n");
630  goto cleanup;
631  }
632 
633  crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
634  ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
637 
638  if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
639  dev_err(dev, "dma mapping failed\n");
640  goto cleanup;
641  }
642 
643  rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
644  ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
645  if (rc == H_RESOURCE)
646  rc = ibmvtpm_reset_crq(ibmvtpm);
647 
648  if (rc) {
649  dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
650  goto reg_crq_cleanup;
651  }
652 
653  tasklet_init(&ibmvtpm->tasklet, (void *)ibmvtpm_tasklet,
654  (unsigned long)ibmvtpm);
655 
656  rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
657  tpm_ibmvtpm_driver_name, ibmvtpm);
658  if (rc) {
659  dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
660  goto init_irq_cleanup;
661  }
662 
663  rc = vio_enable_interrupts(vio_dev);
664  if (rc) {
665  dev_err(dev, "Error %d enabling interrupts\n", rc);
666  goto init_irq_cleanup;
667  }
668 
669  crq_q->index = 0;
670 
671  ibmvtpm->dev = dev;
672  ibmvtpm->vdev = vio_dev;
673  chip->vendor.data = (void *)ibmvtpm;
674 
675  spin_lock_init(&ibmvtpm->lock);
676  spin_lock_init(&ibmvtpm->rtce_lock);
677 
678  rc = ibmvtpm_crq_send_init(ibmvtpm);
679  if (rc)
680  goto init_irq_cleanup;
681 
682  rc = ibmvtpm_crq_get_version(ibmvtpm);
683  if (rc)
684  goto init_irq_cleanup;
685 
686  rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
687  if (rc)
688  goto init_irq_cleanup;
689 
690  return rc;
691 init_irq_cleanup:
692  tasklet_kill(&ibmvtpm->tasklet);
693  do {
694  rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
695  } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
696 reg_crq_cleanup:
699 cleanup:
700  if (ibmvtpm) {
701  if (crq_q->crq_addr)
702  free_page((unsigned long)crq_q->crq_addr);
703  kfree(ibmvtpm);
704  }
705 
706  tpm_remove_hardware(dev);
707 
708  return rc;
709 }
710 
711 static struct vio_driver ibmvtpm_driver = {
712  .id_table = tpm_ibmvtpm_device_table,
713  .probe = tpm_ibmvtpm_probe,
714  .remove = tpm_ibmvtpm_remove,
715  .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
716  .name = tpm_ibmvtpm_driver_name,
717  .pm = &tpm_ibmvtpm_pm_ops,
718 };
719 
727 static int __init ibmvtpm_module_init(void)
728 {
729  return vio_register_driver(&ibmvtpm_driver);
730 }
731 
738 static void __exit ibmvtpm_module_exit(void)
739 {
740  vio_unregister_driver(&ibmvtpm_driver);
741 }
742 
743 module_init(ibmvtpm_module_init);
744 module_exit(ibmvtpm_module_exit);
745 
747 MODULE_DESCRIPTION("IBM vTPM Driver");
748 MODULE_VERSION("1.0");
749 MODULE_LICENSE("GPL");