tpm_tis: add delay after aborting command
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / char / tpm / tpm_tis.c
blobd93bafde31206d2a7984c6b589d6c17707abd9ff
1 /*
2 * Copyright (C) 2005, 2006 IBM Corporation
4 * Authors:
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Kylene Hall <kjhall@us.ibm.com>
8 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
10 * Device driver for TCG/TCPA TPM (trusted platform module).
11 * Specifications at www.trustedcomputinggroup.org
13 * This device driver implements the TPM interface as defined in
14 * the TCG TPM Interface Spec version 1.2, revision 1.0.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation, version 2 of the
19 * License.
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/pnp.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include <linux/acpi.h>
29 #include <linux/freezer.h>
30 #include "tpm.h"
32 #define TPM_HEADER_SIZE 10
34 enum tis_access {
35 TPM_ACCESS_VALID = 0x80,
36 TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
37 TPM_ACCESS_REQUEST_PENDING = 0x04,
38 TPM_ACCESS_REQUEST_USE = 0x02,
41 enum tis_status {
42 TPM_STS_VALID = 0x80,
43 TPM_STS_COMMAND_READY = 0x40,
44 TPM_STS_GO = 0x20,
45 TPM_STS_DATA_AVAIL = 0x10,
46 TPM_STS_DATA_EXPECT = 0x08,
49 enum tis_int_flags {
50 TPM_GLOBAL_INT_ENABLE = 0x80000000,
51 TPM_INTF_BURST_COUNT_STATIC = 0x100,
52 TPM_INTF_CMD_READY_INT = 0x080,
53 TPM_INTF_INT_EDGE_FALLING = 0x040,
54 TPM_INTF_INT_EDGE_RISING = 0x020,
55 TPM_INTF_INT_LEVEL_LOW = 0x010,
56 TPM_INTF_INT_LEVEL_HIGH = 0x008,
57 TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
58 TPM_INTF_STS_VALID_INT = 0x002,
59 TPM_INTF_DATA_AVAIL_INT = 0x001,
62 enum tis_defaults {
63 TIS_MEM_BASE = 0xFED40000,
64 TIS_MEM_LEN = 0x5000,
65 TIS_SHORT_TIMEOUT = 750, /* ms */
66 TIS_LONG_TIMEOUT = 2000, /* 2 sec */
69 #define TPM_ACCESS(l) (0x0000 | ((l) << 12))
70 #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
71 #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
72 #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
73 #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
74 #define TPM_STS(l) (0x0018 | ((l) << 12))
75 #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
77 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
78 #define TPM_RID(l) (0x0F04 | ((l) << 12))
80 static LIST_HEAD(tis_chips);
81 static DEFINE_SPINLOCK(tis_lock);
83 #if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
84 static int is_itpm(struct pnp_dev *dev)
86 struct acpi_device *acpi = pnp_acpi_device(dev);
87 struct acpi_hardware_id *id;
89 list_for_each_entry(id, &acpi->pnp.ids, list) {
90 if (!strcmp("INTC0102", id->id))
91 return 1;
94 return 0;
96 #else
97 static inline int is_itpm(struct pnp_dev *dev)
99 return 0;
101 #endif
103 static int check_locality(struct tpm_chip *chip, int l)
105 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
106 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
107 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
108 return chip->vendor.locality = l;
110 return -1;
113 static void release_locality(struct tpm_chip *chip, int l, int force)
115 if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
116 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
117 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
118 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
119 chip->vendor.iobase + TPM_ACCESS(l));
122 static int request_locality(struct tpm_chip *chip, int l)
124 unsigned long stop, timeout;
125 long rc;
127 if (check_locality(chip, l) >= 0)
128 return l;
130 iowrite8(TPM_ACCESS_REQUEST_USE,
131 chip->vendor.iobase + TPM_ACCESS(l));
133 stop = jiffies + chip->vendor.timeout_a;
135 if (chip->vendor.irq) {
136 again:
137 timeout = stop - jiffies;
138 if ((long)timeout <= 0)
139 return -1;
140 rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
141 (check_locality
142 (chip, l) >= 0),
143 timeout);
144 if (rc > 0)
145 return l;
146 if (rc == -ERESTARTSYS && freezing(current)) {
147 clear_thread_flag(TIF_SIGPENDING);
148 goto again;
150 } else {
151 /* wait for burstcount */
152 do {
153 if (check_locality(chip, l) >= 0)
154 return l;
155 msleep(TPM_TIMEOUT);
157 while (time_before(jiffies, stop));
159 return -1;
162 static u8 tpm_tis_status(struct tpm_chip *chip)
164 return ioread8(chip->vendor.iobase +
165 TPM_STS(chip->vendor.locality));
168 static void tpm_tis_ready(struct tpm_chip *chip)
170 /* this causes the current command to be aborted */
171 iowrite8(TPM_STS_COMMAND_READY,
172 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
175 static int get_burstcount(struct tpm_chip *chip)
177 unsigned long stop;
178 int burstcnt;
180 /* wait for burstcount */
181 /* which timeout value, spec has 2 answers (c & d) */
182 stop = jiffies + chip->vendor.timeout_d;
183 do {
184 burstcnt = ioread8(chip->vendor.iobase +
185 TPM_STS(chip->vendor.locality) + 1);
186 burstcnt += ioread8(chip->vendor.iobase +
187 TPM_STS(chip->vendor.locality) +
188 2) << 8;
189 if (burstcnt)
190 return burstcnt;
191 msleep(TPM_TIMEOUT);
192 } while (time_before(jiffies, stop));
193 return -EBUSY;
196 static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
197 wait_queue_head_t *queue)
199 unsigned long stop;
200 long rc;
201 u8 status;
203 /* check current status */
204 status = tpm_tis_status(chip);
205 if ((status & mask) == mask)
206 return 0;
208 stop = jiffies + timeout;
210 if (chip->vendor.irq) {
211 again:
212 timeout = stop - jiffies;
213 if ((long)timeout <= 0)
214 return -ETIME;
215 rc = wait_event_interruptible_timeout(*queue,
216 ((tpm_tis_status
217 (chip) & mask) ==
218 mask), timeout);
219 if (rc > 0)
220 return 0;
221 if (rc == -ERESTARTSYS && freezing(current)) {
222 clear_thread_flag(TIF_SIGPENDING);
223 goto again;
225 } else {
226 do {
227 msleep(TPM_TIMEOUT);
228 status = tpm_tis_status(chip);
229 if ((status & mask) == mask)
230 return 0;
231 } while (time_before(jiffies, stop));
233 return -ETIME;
236 static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
238 int size = 0, burstcnt;
239 while (size < count &&
240 wait_for_stat(chip,
241 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
242 chip->vendor.timeout_c,
243 &chip->vendor.read_queue)
244 == 0) {
245 burstcnt = get_burstcount(chip);
246 for (; burstcnt > 0 && size < count; burstcnt--)
247 buf[size++] = ioread8(chip->vendor.iobase +
248 TPM_DATA_FIFO(chip->vendor.
249 locality));
251 return size;
254 static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
256 int size = 0;
257 int expected, status;
259 if (count < TPM_HEADER_SIZE) {
260 size = -EIO;
261 goto out;
264 /* read first 10 bytes, including tag, paramsize, and result */
265 if ((size =
266 recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
267 dev_err(chip->dev, "Unable to read header\n");
268 goto out;
271 expected = be32_to_cpu(*(__be32 *) (buf + 2));
272 if (expected > count) {
273 size = -EIO;
274 goto out;
277 if ((size +=
278 recv_data(chip, &buf[TPM_HEADER_SIZE],
279 expected - TPM_HEADER_SIZE)) < expected) {
280 dev_err(chip->dev, "Unable to read remainder of result\n");
281 size = -ETIME;
282 goto out;
285 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
286 &chip->vendor.int_queue);
287 status = tpm_tis_status(chip);
288 if (status & TPM_STS_DATA_AVAIL) { /* retry? */
289 dev_err(chip->dev, "Error left over data\n");
290 size = -EIO;
291 goto out;
294 out:
295 tpm_tis_ready(chip);
296 release_locality(chip, chip->vendor.locality, 0);
297 return size;
300 static int itpm;
301 module_param(itpm, bool, 0444);
302 MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
305 * If interrupts are used (signaled by an irq set in the vendor structure)
306 * tpm.c can skip polling for the data to be available as the interrupt is
307 * waited for here
309 static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
311 int rc, status, burstcnt;
312 size_t count = 0;
314 if (request_locality(chip, 0) < 0)
315 return -EBUSY;
317 status = tpm_tis_status(chip);
318 if ((status & TPM_STS_COMMAND_READY) == 0) {
319 tpm_tis_ready(chip);
320 if (wait_for_stat
321 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
322 &chip->vendor.int_queue) < 0) {
323 rc = -ETIME;
324 goto out_err;
328 while (count < len - 1) {
329 burstcnt = get_burstcount(chip);
330 for (; burstcnt > 0 && count < len - 1; burstcnt--) {
331 iowrite8(buf[count], chip->vendor.iobase +
332 TPM_DATA_FIFO(chip->vendor.locality));
333 count++;
336 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
337 &chip->vendor.int_queue);
338 status = tpm_tis_status(chip);
339 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
340 rc = -EIO;
341 goto out_err;
345 /* write last byte */
346 iowrite8(buf[count],
347 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
348 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
349 &chip->vendor.int_queue);
350 status = tpm_tis_status(chip);
351 if ((status & TPM_STS_DATA_EXPECT) != 0) {
352 rc = -EIO;
353 goto out_err;
356 return 0;
358 out_err:
359 tpm_tis_ready(chip);
360 release_locality(chip, chip->vendor.locality, 0);
361 return rc;
365 * If interrupts are used (signaled by an irq set in the vendor structure)
366 * tpm.c can skip polling for the data to be available as the interrupt is
367 * waited for here
369 static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
371 int rc;
372 u32 ordinal;
374 rc = tpm_tis_send_data(chip, buf, len);
375 if (rc < 0)
376 return rc;
378 /* go and do it */
379 iowrite8(TPM_STS_GO,
380 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
382 if (chip->vendor.irq) {
383 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
384 if (wait_for_stat
385 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
386 tpm_calc_ordinal_duration(chip, ordinal),
387 &chip->vendor.read_queue) < 0) {
388 rc = -ETIME;
389 goto out_err;
392 return len;
393 out_err:
394 tpm_tis_ready(chip);
395 release_locality(chip, chip->vendor.locality, 0);
396 return rc;
400 * Early probing for iTPM with STS_DATA_EXPECT flaw.
401 * Try sending command without itpm flag set and if that
402 * fails, repeat with itpm flag set.
404 static int probe_itpm(struct tpm_chip *chip)
406 int rc = 0;
407 u8 cmd_getticks[] = {
408 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
409 0x00, 0x00, 0x00, 0xf1
411 size_t len = sizeof(cmd_getticks);
412 int rem_itpm = itpm;
414 itpm = 0;
416 rc = tpm_tis_send_data(chip, cmd_getticks, len);
417 if (rc == 0)
418 goto out;
420 tpm_tis_ready(chip);
421 release_locality(chip, chip->vendor.locality, 0);
423 itpm = 1;
425 rc = tpm_tis_send_data(chip, cmd_getticks, len);
426 if (rc == 0) {
427 dev_info(chip->dev, "Detected an iTPM.\n");
428 rc = 1;
429 } else
430 rc = -EFAULT;
432 out:
433 itpm = rem_itpm;
434 tpm_tis_ready(chip);
435 /* some TPMs need a break here otherwise they will not work
436 * correctly on the immediately subsequent command */
437 msleep(chip->vendor.timeout_b);
438 release_locality(chip, chip->vendor.locality, 0);
440 return rc;
443 static const struct file_operations tis_ops = {
444 .owner = THIS_MODULE,
445 .llseek = no_llseek,
446 .open = tpm_open,
447 .read = tpm_read,
448 .write = tpm_write,
449 .release = tpm_release,
452 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
453 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
454 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
455 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
456 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
457 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
458 NULL);
459 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
460 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
461 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
462 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
464 static struct attribute *tis_attrs[] = {
465 &dev_attr_pubek.attr,
466 &dev_attr_pcrs.attr,
467 &dev_attr_enabled.attr,
468 &dev_attr_active.attr,
469 &dev_attr_owned.attr,
470 &dev_attr_temp_deactivated.attr,
471 &dev_attr_caps.attr,
472 &dev_attr_cancel.attr,
473 &dev_attr_durations.attr,
474 &dev_attr_timeouts.attr, NULL,
477 static struct attribute_group tis_attr_grp = {
478 .attrs = tis_attrs
481 static struct tpm_vendor_specific tpm_tis = {
482 .status = tpm_tis_status,
483 .recv = tpm_tis_recv,
484 .send = tpm_tis_send,
485 .cancel = tpm_tis_ready,
486 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
487 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
488 .req_canceled = TPM_STS_COMMAND_READY,
489 .attr_group = &tis_attr_grp,
490 .miscdev = {
491 .fops = &tis_ops,},
494 static irqreturn_t tis_int_probe(int irq, void *dev_id)
496 struct tpm_chip *chip = dev_id;
497 u32 interrupt;
499 interrupt = ioread32(chip->vendor.iobase +
500 TPM_INT_STATUS(chip->vendor.locality));
502 if (interrupt == 0)
503 return IRQ_NONE;
505 chip->vendor.probed_irq = irq;
507 /* Clear interrupts handled with TPM_EOI */
508 iowrite32(interrupt,
509 chip->vendor.iobase +
510 TPM_INT_STATUS(chip->vendor.locality));
511 return IRQ_HANDLED;
514 static irqreturn_t tis_int_handler(int dummy, void *dev_id)
516 struct tpm_chip *chip = dev_id;
517 u32 interrupt;
518 int i;
520 interrupt = ioread32(chip->vendor.iobase +
521 TPM_INT_STATUS(chip->vendor.locality));
523 if (interrupt == 0)
524 return IRQ_NONE;
526 if (interrupt & TPM_INTF_DATA_AVAIL_INT)
527 wake_up_interruptible(&chip->vendor.read_queue);
528 if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
529 for (i = 0; i < 5; i++)
530 if (check_locality(chip, i) >= 0)
531 break;
532 if (interrupt &
533 (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
534 TPM_INTF_CMD_READY_INT))
535 wake_up_interruptible(&chip->vendor.int_queue);
537 /* Clear interrupts handled with TPM_EOI */
538 iowrite32(interrupt,
539 chip->vendor.iobase +
540 TPM_INT_STATUS(chip->vendor.locality));
541 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
542 return IRQ_HANDLED;
545 static int interrupts = 1;
546 module_param(interrupts, bool, 0444);
547 MODULE_PARM_DESC(interrupts, "Enable interrupts");
549 static int tpm_tis_init(struct device *dev, resource_size_t start,
550 resource_size_t len, unsigned int irq)
552 u32 vendor, intfcaps, intmask;
553 int rc, i, irq_s, irq_e;
554 struct tpm_chip *chip;
556 if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
557 return -ENODEV;
559 chip->vendor.iobase = ioremap(start, len);
560 if (!chip->vendor.iobase) {
561 rc = -EIO;
562 goto out_err;
565 /* Default timeouts */
566 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
567 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
568 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
569 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
571 if (request_locality(chip, 0) != 0) {
572 rc = -ENODEV;
573 goto out_err;
576 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
578 dev_info(dev,
579 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
580 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
582 if (!itpm) {
583 itpm = probe_itpm(chip);
584 if (itpm < 0) {
585 rc = -ENODEV;
586 goto out_err;
590 if (itpm)
591 dev_info(dev, "Intel iTPM workaround enabled\n");
594 /* Figure out the capabilities */
595 intfcaps =
596 ioread32(chip->vendor.iobase +
597 TPM_INTF_CAPS(chip->vendor.locality));
598 dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
599 intfcaps);
600 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
601 dev_dbg(dev, "\tBurst Count Static\n");
602 if (intfcaps & TPM_INTF_CMD_READY_INT)
603 dev_dbg(dev, "\tCommand Ready Int Support\n");
604 if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
605 dev_dbg(dev, "\tInterrupt Edge Falling\n");
606 if (intfcaps & TPM_INTF_INT_EDGE_RISING)
607 dev_dbg(dev, "\tInterrupt Edge Rising\n");
608 if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
609 dev_dbg(dev, "\tInterrupt Level Low\n");
610 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
611 dev_dbg(dev, "\tInterrupt Level High\n");
612 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
613 dev_dbg(dev, "\tLocality Change Int Support\n");
614 if (intfcaps & TPM_INTF_STS_VALID_INT)
615 dev_dbg(dev, "\tSts Valid Int Support\n");
616 if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
617 dev_dbg(dev, "\tData Avail Int Support\n");
619 /* get the timeouts before testing for irqs */
620 if (tpm_get_timeouts(chip)) {
621 dev_err(dev, "Could not get TPM timeouts and durations\n");
622 rc = -ENODEV;
623 goto out_err;
626 if (tpm_do_selftest(chip)) {
627 dev_err(dev, "TPM self test failed\n");
628 rc = -ENODEV;
629 goto out_err;
632 /* INTERRUPT Setup */
633 init_waitqueue_head(&chip->vendor.read_queue);
634 init_waitqueue_head(&chip->vendor.int_queue);
636 intmask =
637 ioread32(chip->vendor.iobase +
638 TPM_INT_ENABLE(chip->vendor.locality));
640 intmask |= TPM_INTF_CMD_READY_INT
641 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
642 | TPM_INTF_STS_VALID_INT;
644 iowrite32(intmask,
645 chip->vendor.iobase +
646 TPM_INT_ENABLE(chip->vendor.locality));
647 if (interrupts)
648 chip->vendor.irq = irq;
649 if (interrupts && !chip->vendor.irq) {
650 irq_s =
651 ioread8(chip->vendor.iobase +
652 TPM_INT_VECTOR(chip->vendor.locality));
653 if (irq_s) {
654 irq_e = irq_s;
655 } else {
656 irq_s = 3;
657 irq_e = 15;
660 for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
661 iowrite8(i, chip->vendor.iobase +
662 TPM_INT_VECTOR(chip->vendor.locality));
663 if (request_irq
664 (i, tis_int_probe, IRQF_SHARED,
665 chip->vendor.miscdev.name, chip) != 0) {
666 dev_info(chip->dev,
667 "Unable to request irq: %d for probe\n",
669 continue;
672 /* Clear all existing */
673 iowrite32(ioread32
674 (chip->vendor.iobase +
675 TPM_INT_STATUS(chip->vendor.locality)),
676 chip->vendor.iobase +
677 TPM_INT_STATUS(chip->vendor.locality));
679 /* Turn on */
680 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
681 chip->vendor.iobase +
682 TPM_INT_ENABLE(chip->vendor.locality));
684 chip->vendor.probed_irq = 0;
686 /* Generate Interrupts */
687 tpm_gen_interrupt(chip);
689 chip->vendor.irq = chip->vendor.probed_irq;
691 /* free_irq will call into tis_int_probe;
692 clear all irqs we haven't seen while doing
693 tpm_gen_interrupt */
694 iowrite32(ioread32
695 (chip->vendor.iobase +
696 TPM_INT_STATUS(chip->vendor.locality)),
697 chip->vendor.iobase +
698 TPM_INT_STATUS(chip->vendor.locality));
700 /* Turn off */
701 iowrite32(intmask,
702 chip->vendor.iobase +
703 TPM_INT_ENABLE(chip->vendor.locality));
704 free_irq(i, chip);
707 if (chip->vendor.irq) {
708 iowrite8(chip->vendor.irq,
709 chip->vendor.iobase +
710 TPM_INT_VECTOR(chip->vendor.locality));
711 if (request_irq
712 (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
713 chip->vendor.miscdev.name, chip) != 0) {
714 dev_info(chip->dev,
715 "Unable to request irq: %d for use\n",
716 chip->vendor.irq);
717 chip->vendor.irq = 0;
718 } else {
719 /* Clear all existing */
720 iowrite32(ioread32
721 (chip->vendor.iobase +
722 TPM_INT_STATUS(chip->vendor.locality)),
723 chip->vendor.iobase +
724 TPM_INT_STATUS(chip->vendor.locality));
726 /* Turn on */
727 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
728 chip->vendor.iobase +
729 TPM_INT_ENABLE(chip->vendor.locality));
733 INIT_LIST_HEAD(&chip->vendor.list);
734 spin_lock(&tis_lock);
735 list_add(&chip->vendor.list, &tis_chips);
736 spin_unlock(&tis_lock);
739 return 0;
740 out_err:
741 if (chip->vendor.iobase)
742 iounmap(chip->vendor.iobase);
743 tpm_remove_hardware(chip->dev);
744 return rc;
747 static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
749 u32 intmask;
751 /* reenable interrupts that device may have lost or
752 BIOS/firmware may have disabled */
753 iowrite8(chip->vendor.irq, chip->vendor.iobase +
754 TPM_INT_VECTOR(chip->vendor.locality));
756 intmask =
757 ioread32(chip->vendor.iobase +
758 TPM_INT_ENABLE(chip->vendor.locality));
760 intmask |= TPM_INTF_CMD_READY_INT
761 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
762 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
764 iowrite32(intmask,
765 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
769 #ifdef CONFIG_PNP
770 static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
771 const struct pnp_device_id *pnp_id)
773 resource_size_t start, len;
774 unsigned int irq = 0;
776 start = pnp_mem_start(pnp_dev, 0);
777 len = pnp_mem_len(pnp_dev, 0);
779 if (pnp_irq_valid(pnp_dev, 0))
780 irq = pnp_irq(pnp_dev, 0);
781 else
782 interrupts = 0;
784 if (is_itpm(pnp_dev))
785 itpm = 1;
787 return tpm_tis_init(&pnp_dev->dev, start, len, irq);
790 static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
792 return tpm_pm_suspend(&dev->dev, msg);
795 static int tpm_tis_pnp_resume(struct pnp_dev *dev)
797 struct tpm_chip *chip = pnp_get_drvdata(dev);
798 int ret;
800 if (chip->vendor.irq)
801 tpm_tis_reenable_interrupts(chip);
803 ret = tpm_pm_resume(&dev->dev);
804 if (!ret)
805 tpm_do_selftest(chip);
807 return ret;
810 static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
811 {"PNP0C31", 0}, /* TPM */
812 {"ATM1200", 0}, /* Atmel */
813 {"IFX0102", 0}, /* Infineon */
814 {"BCM0101", 0}, /* Broadcom */
815 {"BCM0102", 0}, /* Broadcom */
816 {"NSC1200", 0}, /* National */
817 {"ICO0102", 0}, /* Intel */
818 /* Add new here */
819 {"", 0}, /* User Specified */
820 {"", 0} /* Terminator */
822 MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
824 static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
826 struct tpm_chip *chip = pnp_get_drvdata(dev);
828 tpm_dev_vendor_release(chip);
830 kfree(chip);
834 static struct pnp_driver tis_pnp_driver = {
835 .name = "tpm_tis",
836 .id_table = tpm_pnp_tbl,
837 .probe = tpm_tis_pnp_init,
838 .suspend = tpm_tis_pnp_suspend,
839 .resume = tpm_tis_pnp_resume,
840 .remove = tpm_tis_pnp_remove,
843 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
844 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
845 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
846 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
847 #endif
848 static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
850 return tpm_pm_suspend(&dev->dev, msg);
853 static int tpm_tis_resume(struct platform_device *dev)
855 struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
857 if (chip->vendor.irq)
858 tpm_tis_reenable_interrupts(chip);
860 return tpm_pm_resume(&dev->dev);
862 static struct platform_driver tis_drv = {
863 .driver = {
864 .name = "tpm_tis",
865 .owner = THIS_MODULE,
867 .suspend = tpm_tis_suspend,
868 .resume = tpm_tis_resume,
871 static struct platform_device *pdev;
873 static int force;
874 module_param(force, bool, 0444);
875 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
876 static int __init init_tis(void)
878 int rc;
879 #ifdef CONFIG_PNP
880 if (!force)
881 return pnp_register_driver(&tis_pnp_driver);
882 #endif
884 rc = platform_driver_register(&tis_drv);
885 if (rc < 0)
886 return rc;
887 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
888 return PTR_ERR(pdev);
889 if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
890 platform_device_unregister(pdev);
891 platform_driver_unregister(&tis_drv);
893 return rc;
896 static void __exit cleanup_tis(void)
898 struct tpm_vendor_specific *i, *j;
899 struct tpm_chip *chip;
900 spin_lock(&tis_lock);
901 list_for_each_entry_safe(i, j, &tis_chips, list) {
902 chip = to_tpm_chip(i);
903 tpm_remove_hardware(chip->dev);
904 iowrite32(~TPM_GLOBAL_INT_ENABLE &
905 ioread32(chip->vendor.iobase +
906 TPM_INT_ENABLE(chip->vendor.
907 locality)),
908 chip->vendor.iobase +
909 TPM_INT_ENABLE(chip->vendor.locality));
910 release_locality(chip, chip->vendor.locality, 1);
911 if (chip->vendor.irq)
912 free_irq(chip->vendor.irq, chip);
913 iounmap(i->iobase);
914 list_del(&i->list);
916 spin_unlock(&tis_lock);
917 #ifdef CONFIG_PNP
918 if (!force) {
919 pnp_unregister_driver(&tis_pnp_driver);
920 return;
922 #endif
923 platform_device_unregister(pdev);
924 platform_driver_unregister(&tis_drv);
927 module_init(init_tis);
928 module_exit(cleanup_tis);
929 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
930 MODULE_DESCRIPTION("TPM Driver");
931 MODULE_VERSION("2.0");
932 MODULE_LICENSE("GPL");