20 #include <linux/slab.h>
23 #include <linux/types.h>
24 #include <linux/list.h>
27 #include <linux/wait.h>
33 static const char tpm_ibmvtpm_driver_name[] =
"tpm_ibmvtpm";
36 {
"IBM,vtpm",
"IBM,vtpm"},
55 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
90 dev_err(ibmvtpm->
dev,
"ibmvtpm device is not ready\n");
96 if (count < ibmvtpm->
crq_res.len) {
98 "Invalid size in recv: count=%ld, crq_size=%d\n",
123 static int tpm_ibmvtpm_send(
struct tpm_chip *chip,
u8 *buf,
size_t count)
133 dev_err(ibmvtpm->
dev,
"ibmvtpm device is not ready\n");
139 "Invalid size in send: count=%ld, rtce_size=%d\n",
148 crq.len = (
u16)count;
151 rc = ibmvtpm_send_crq(ibmvtpm->
vdev, word[0], word[1]);
152 if (rc != H_SUCCESS) {
153 dev_err(ibmvtpm->
dev,
"tpm_ibmvtpm_send failed rc=%d\n", rc);
162 static void tpm_ibmvtpm_cancel(
struct tpm_chip *chip)
167 static u8 tpm_ibmvtpm_status(
struct tpm_chip *chip)
180 static int ibmvtpm_crq_get_rtce_size(
struct ibmvtpm_dev *ibmvtpm)
189 rc = ibmvtpm_send_crq(ibmvtpm->
vdev, buf[0], buf[1]);
192 "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
206 static int ibmvtpm_crq_get_version(
struct ibmvtpm_dev *ibmvtpm)
215 rc = ibmvtpm_send_crq(ibmvtpm->
vdev, buf[0], buf[1]);
218 "ibmvtpm_crq_get_version failed rc=%d\n", rc);
231 static int ibmvtpm_crq_send_init_complete(
struct ibmvtpm_dev *ibmvtpm)
238 "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
251 static int ibmvtpm_crq_send_init(
struct ibmvtpm_dev *ibmvtpm)
258 "ibmvtpm_crq_send_init failed rc=%d\n", rc);
281 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
282 }
while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
308 static unsigned long tpm_ibmvtpm_get_desired_dma(
struct vio_dev *vdev)
321 static int tpm_ibmvtpm_suspend(
struct device *dev)
323 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
331 rc = ibmvtpm_send_crq(ibmvtpm->
vdev, buf[0], buf[1]);
334 "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
347 static int ibmvtpm_reset_crq(
struct ibmvtpm_dev *ibmvtpm)
354 rc = plpar_hcall_norets(H_FREE_CRQ,
355 ibmvtpm->
vdev->unit_address);
356 }
while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
361 return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->
vdev->unit_address,
372 static int tpm_ibmvtpm_resume(
struct device *dev)
374 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
381 rc = plpar_hcall_norets(H_ENABLE_CRQ,
382 ibmvtpm->
vdev->unit_address);
383 }
while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
386 dev_err(dev,
"Error enabling ibmvtpm rc=%d\n", rc);
391 vio_disable_interrupts(ibmvtpm->
vdev);
392 tasklet_schedule(&ibmvtpm->
tasklet);
393 spin_unlock_irqrestore(&ibmvtpm->
lock, flags);
395 rc = ibmvtpm_crq_send_init(ibmvtpm);
397 dev_err(dev,
"Error send_init rc=%d\n", rc);
423 static struct attribute *ibmvtpm_attrs[] = {
424 &dev_attr_pubek.attr,
426 &dev_attr_enabled.attr,
427 &dev_attr_active.attr,
428 &dev_attr_owned.attr,
429 &dev_attr_temp_deactivated.attr,
431 &dev_attr_cancel.attr,
432 &dev_attr_durations.attr,
433 &dev_attr_timeouts.attr,
NULL,
436 static struct attribute_group ibmvtpm_attr_grp = { .attrs = ibmvtpm_attrs };
439 .recv = tpm_ibmvtpm_recv,
440 .send = tpm_ibmvtpm_send,
441 .cancel = tpm_ibmvtpm_cancel,
442 .status = tpm_ibmvtpm_status,
443 .req_complete_mask = 0,
444 .req_complete_val = 0,
446 .attr_group = &ibmvtpm_attr_grp,
447 .miscdev = { .fops = &ibmvtpm_ops, },
450 static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
451 .suspend = tpm_ibmvtpm_suspend,
452 .resume = tpm_ibmvtpm_resume,
484 static void ibmvtpm_crq_process(
struct ibmvtpm_crq *crq,
489 switch (crq->
valid) {
494 rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
496 dev_err(ibmvtpm->
dev,
"Unable to send CRQ init complete rc=%d\n", rc);
500 "CRQ initialization completed\n");
503 dev_err(ibmvtpm->
dev,
"Unknown crq message type: %d\n", crq->
msg);
518 dev_err(ibmvtpm->
dev,
"Failed to allocate memory for rtce buffer\n");
530 dev_err(ibmvtpm->
dev,
"Failed to dma map rtce buffer\n");
559 static irqreturn_t ibmvtpm_interrupt(
int irq,
void *vtpm_instance)
565 vio_disable_interrupts(ibmvtpm->
vdev);
566 tasklet_schedule(&ibmvtpm->
tasklet);
567 spin_unlock_irqrestore(&ibmvtpm->
lock, flags);
579 static void ibmvtpm_tasklet(
void *
data)
586 while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) !=
NULL) {
587 ibmvtpm_crq_process(crq, ibmvtpm);
592 vio_enable_interrupts(ibmvtpm->
vdev);
593 spin_unlock_irqrestore(&ibmvtpm->
lock, flags);
616 dev_err(dev,
"tpm_register_hardware failed\n");
622 dev_err(dev,
"kzalloc for ibmvtpm failed\n");
629 dev_err(dev,
"Unable to allocate memory for crq_addr\n");
639 dev_err(dev,
"dma mapping failed\n");
643 rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
645 if (rc == H_RESOURCE)
646 rc = ibmvtpm_reset_crq(ibmvtpm);
649 dev_err(dev,
"Unable to register CRQ rc=%d\n", rc);
650 goto reg_crq_cleanup;
654 (
unsigned long)ibmvtpm);
656 rc =
request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
657 tpm_ibmvtpm_driver_name, ibmvtpm);
659 dev_err(dev,
"Error %d register irq 0x%x\n", rc, vio_dev->irq);
660 goto init_irq_cleanup;
663 rc = vio_enable_interrupts(vio_dev);
665 dev_err(dev,
"Error %d enabling interrupts\n", rc);
666 goto init_irq_cleanup;
672 ibmvtpm->
vdev = vio_dev;
673 chip->
vendor.data = (
void *)ibmvtpm;
678 rc = ibmvtpm_crq_send_init(ibmvtpm);
680 goto init_irq_cleanup;
682 rc = ibmvtpm_crq_get_version(ibmvtpm);
684 goto init_irq_cleanup;
686 rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
688 goto init_irq_cleanup;
694 rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
695 }
while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
712 .id_table = tpm_ibmvtpm_device_table,
713 .probe = tpm_ibmvtpm_probe,
714 .remove = tpm_ibmvtpm_remove,
715 .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
716 .name = tpm_ibmvtpm_driver_name,
717 .pm = &tpm_ibmvtpm_pm_ops,
727 static int __init ibmvtpm_module_init(
void)
738 static void __exit ibmvtpm_module_exit(
void)