Commit b5666502700855a1eb1a15482005b22478b9460e

Authored by Ashley Lai
Committed by Kent Yoder
1 parent 61d335dd27

drivers/char/tpm: remove tasklet and cleanup

This patch removed the tasklet and moved the wait queue into the
private structure.  It also cleaned up the response CRQ path.

Signed-off-by: Ashley Lai <adlai@us.ibm.com>
Signed-off-by: Kent Yoder <key@linux.vnet.ibm.com>

Showing 2 changed files with 30 additions and 56 deletions Inline Diff

drivers/char/tpm/tpm_ibmvtpm.c
1 /* 1 /*
2 * Copyright (C) 2012 IBM Corporation 2 * Copyright (C) 2012 IBM Corporation
3 * 3 *
4 * Author: Ashley Lai <adlai@us.ibm.com> 4 * Author: Ashley Lai <adlai@us.ibm.com>
5 * 5 *
6 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 6 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
7 * 7 *
8 * Device driver for TCG/TCPA TPM (trusted platform module). 8 * Device driver for TCG/TCPA TPM (trusted platform module).
9 * Specifications at www.trustedcomputinggroup.org 9 * Specifications at www.trustedcomputinggroup.org
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as 12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation, version 2 of the 13 * published by the Free Software Foundation, version 2 of the
14 * License. 14 * License.
15 * 15 *
16 */ 16 */
17 17
18 #include <linux/dma-mapping.h> 18 #include <linux/dma-mapping.h>
19 #include <linux/dmapool.h> 19 #include <linux/dmapool.h>
20 #include <linux/slab.h> 20 #include <linux/slab.h>
21 #include <asm/vio.h> 21 #include <asm/vio.h>
22 #include <asm/irq.h> 22 #include <asm/irq.h>
23 #include <linux/types.h> 23 #include <linux/types.h>
24 #include <linux/list.h> 24 #include <linux/list.h>
25 #include <linux/spinlock.h> 25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h> 26 #include <linux/interrupt.h>
27 #include <linux/wait.h> 27 #include <linux/wait.h>
28 #include <asm/prom.h> 28 #include <asm/prom.h>
29 29
30 #include "tpm.h" 30 #include "tpm.h"
31 #include "tpm_ibmvtpm.h" 31 #include "tpm_ibmvtpm.h"
32 32
33 static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm"; 33 static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
34 34
35 static struct vio_device_id tpm_ibmvtpm_device_table[] __devinitdata = { 35 static struct vio_device_id tpm_ibmvtpm_device_table[] __devinitdata = {
36 { "IBM,vtpm", "IBM,vtpm"}, 36 { "IBM,vtpm", "IBM,vtpm"},
37 { "", "" } 37 { "", "" }
38 }; 38 };
39 MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table); 39 MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
40 40
41 DECLARE_WAIT_QUEUE_HEAD(wq);
42
43 /** 41 /**
44 * ibmvtpm_send_crq - Send a CRQ request 42 * ibmvtpm_send_crq - Send a CRQ request
45 * @vdev: vio device struct 43 * @vdev: vio device struct
46 * @w1: first word 44 * @w1: first word
47 * @w2: second word 45 * @w2: second word
48 * 46 *
49 * Return value: 47 * Return value:
50 * 0 -Sucess 48 * 0 -Sucess
51 * Non-zero - Failure 49 * Non-zero - Failure
52 */ 50 */
53 static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2) 51 static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
54 { 52 {
55 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2); 53 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
56 } 54 }
57 55
58 /** 56 /**
59 * ibmvtpm_get_data - Retrieve ibm vtpm data 57 * ibmvtpm_get_data - Retrieve ibm vtpm data
60 * @dev: device struct 58 * @dev: device struct
61 * 59 *
62 * Return value: 60 * Return value:
63 * vtpm device struct 61 * vtpm device struct
64 */ 62 */
65 static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev) 63 static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev)
66 { 64 {
67 struct tpm_chip *chip = dev_get_drvdata(dev); 65 struct tpm_chip *chip = dev_get_drvdata(dev);
68 if (chip) 66 if (chip)
69 return (struct ibmvtpm_dev *)chip->vendor.data; 67 return (struct ibmvtpm_dev *)chip->vendor.data;
70 return NULL; 68 return NULL;
71 } 69 }
72 70
73 /** 71 /**
74 * tpm_ibmvtpm_recv - Receive data after send 72 * tpm_ibmvtpm_recv - Receive data after send
75 * @chip: tpm chip struct 73 * @chip: tpm chip struct
76 * @buf: buffer to read 74 * @buf: buffer to read
77 * count: size of buffer 75 * count: size of buffer
78 * 76 *
79 * Return value: 77 * Return value:
80 * Number of bytes read 78 * Number of bytes read
81 */ 79 */
82 static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) 80 static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
83 { 81 {
84 struct ibmvtpm_dev *ibmvtpm; 82 struct ibmvtpm_dev *ibmvtpm;
85 u16 len; 83 u16 len;
84 int sig;
86 85
87 ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data; 86 ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
88 87
89 if (!ibmvtpm->rtce_buf) { 88 if (!ibmvtpm->rtce_buf) {
90 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); 89 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
91 return 0; 90 return 0;
92 } 91 }
93 92
94 wait_event_interruptible(wq, ibmvtpm->crq_res.len != 0); 93 sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0);
94 if (sig)
95 return -EINTR;
95 96
96 if (count < ibmvtpm->crq_res.len) { 97 len = ibmvtpm->res_len;
98
99 if (count < len) {
97 dev_err(ibmvtpm->dev, 100 dev_err(ibmvtpm->dev,
98 "Invalid size in recv: count=%ld, crq_size=%d\n", 101 "Invalid size in recv: count=%ld, crq_size=%d\n",
99 count, ibmvtpm->crq_res.len); 102 count, len);
100 return -EIO; 103 return -EIO;
101 } 104 }
102 105
103 spin_lock(&ibmvtpm->rtce_lock); 106 spin_lock(&ibmvtpm->rtce_lock);
104 memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, ibmvtpm->crq_res.len); 107 memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
105 memset(ibmvtpm->rtce_buf, 0, ibmvtpm->crq_res.len); 108 memset(ibmvtpm->rtce_buf, 0, len);
106 ibmvtpm->crq_res.valid = 0; 109 ibmvtpm->res_len = 0;
107 ibmvtpm->crq_res.msg = 0;
108 len = ibmvtpm->crq_res.len;
109 ibmvtpm->crq_res.len = 0;
110 spin_unlock(&ibmvtpm->rtce_lock); 110 spin_unlock(&ibmvtpm->rtce_lock);
111 return len; 111 return len;
112 } 112 }
113 113
114 /** 114 /**
115 * tpm_ibmvtpm_send - Send tpm request 115 * tpm_ibmvtpm_send - Send tpm request
116 * @chip: tpm chip struct 116 * @chip: tpm chip struct
117 * @buf: buffer contains data to send 117 * @buf: buffer contains data to send
118 * count: size of buffer 118 * count: size of buffer
119 * 119 *
120 * Return value: 120 * Return value:
121 * Number of bytes sent 121 * Number of bytes sent
122 */ 122 */
123 static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) 123 static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
124 { 124 {
125 struct ibmvtpm_dev *ibmvtpm; 125 struct ibmvtpm_dev *ibmvtpm;
126 struct ibmvtpm_crq crq; 126 struct ibmvtpm_crq crq;
127 u64 *word = (u64 *) &crq; 127 u64 *word = (u64 *) &crq;
128 int rc; 128 int rc;
129 129
130 ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data; 130 ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
131 131
132 if (!ibmvtpm->rtce_buf) { 132 if (!ibmvtpm->rtce_buf) {
133 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); 133 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
134 return 0; 134 return 0;
135 } 135 }
136 136
137 if (count > ibmvtpm->rtce_size) { 137 if (count > ibmvtpm->rtce_size) {
138 dev_err(ibmvtpm->dev, 138 dev_err(ibmvtpm->dev,
139 "Invalid size in send: count=%ld, rtce_size=%d\n", 139 "Invalid size in send: count=%ld, rtce_size=%d\n",
140 count, ibmvtpm->rtce_size); 140 count, ibmvtpm->rtce_size);
141 return -EIO; 141 return -EIO;
142 } 142 }
143 143
144 spin_lock(&ibmvtpm->rtce_lock); 144 spin_lock(&ibmvtpm->rtce_lock);
145 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); 145 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
146 crq.valid = (u8)IBMVTPM_VALID_CMD; 146 crq.valid = (u8)IBMVTPM_VALID_CMD;
147 crq.msg = (u8)VTPM_TPM_COMMAND; 147 crq.msg = (u8)VTPM_TPM_COMMAND;
148 crq.len = (u16)count; 148 crq.len = (u16)count;
149 crq.data = ibmvtpm->rtce_dma_handle; 149 crq.data = ibmvtpm->rtce_dma_handle;
150 150
151 rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]); 151 rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]);
152 if (rc != H_SUCCESS) { 152 if (rc != H_SUCCESS) {
153 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); 153 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
154 rc = 0; 154 rc = 0;
155 } else 155 } else
156 rc = count; 156 rc = count;
157 157
158 spin_unlock(&ibmvtpm->rtce_lock); 158 spin_unlock(&ibmvtpm->rtce_lock);
159 return rc; 159 return rc;
160 } 160 }
161 161
162 static void tpm_ibmvtpm_cancel(struct tpm_chip *chip) 162 static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
163 { 163 {
164 return; 164 return;
165 } 165 }
166 166
167 static u8 tpm_ibmvtpm_status(struct tpm_chip *chip) 167 static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
168 { 168 {
169 return 0; 169 return 0;
170 } 170 }
171 171
172 /** 172 /**
173 * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size 173 * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
174 * @ibmvtpm: vtpm device struct 174 * @ibmvtpm: vtpm device struct
175 * 175 *
176 * Return value: 176 * Return value:
177 * 0 - Success 177 * 0 - Success
178 * Non-zero - Failure 178 * Non-zero - Failure
179 */ 179 */
180 static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) 180 static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
181 { 181 {
182 struct ibmvtpm_crq crq; 182 struct ibmvtpm_crq crq;
183 u64 *buf = (u64 *) &crq; 183 u64 *buf = (u64 *) &crq;
184 int rc; 184 int rc;
185 185
186 crq.valid = (u8)IBMVTPM_VALID_CMD; 186 crq.valid = (u8)IBMVTPM_VALID_CMD;
187 crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE; 187 crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
188 188
189 rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]); 189 rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
190 if (rc != H_SUCCESS) 190 if (rc != H_SUCCESS)
191 dev_err(ibmvtpm->dev, 191 dev_err(ibmvtpm->dev,
192 "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); 192 "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
193 193
194 return rc; 194 return rc;
195 } 195 }
196 196
197 /** 197 /**
198 * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version 198 * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
199 * - Note that this is vtpm version and not tpm version 199 * - Note that this is vtpm version and not tpm version
200 * @ibmvtpm: vtpm device struct 200 * @ibmvtpm: vtpm device struct
201 * 201 *
202 * Return value: 202 * Return value:
203 * 0 - Success 203 * 0 - Success
204 * Non-zero - Failure 204 * Non-zero - Failure
205 */ 205 */
206 static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm) 206 static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
207 { 207 {
208 struct ibmvtpm_crq crq; 208 struct ibmvtpm_crq crq;
209 u64 *buf = (u64 *) &crq; 209 u64 *buf = (u64 *) &crq;
210 int rc; 210 int rc;
211 211
212 crq.valid = (u8)IBMVTPM_VALID_CMD; 212 crq.valid = (u8)IBMVTPM_VALID_CMD;
213 crq.msg = (u8)VTPM_GET_VERSION; 213 crq.msg = (u8)VTPM_GET_VERSION;
214 214
215 rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]); 215 rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
216 if (rc != H_SUCCESS) 216 if (rc != H_SUCCESS)
217 dev_err(ibmvtpm->dev, 217 dev_err(ibmvtpm->dev,
218 "ibmvtpm_crq_get_version failed rc=%d\n", rc); 218 "ibmvtpm_crq_get_version failed rc=%d\n", rc);
219 219
220 return rc; 220 return rc;
221 } 221 }
222 222
223 /** 223 /**
224 * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message 224 * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
225 * @ibmvtpm: vtpm device struct 225 * @ibmvtpm: vtpm device struct
226 * 226 *
227 * Return value: 227 * Return value:
228 * 0 - Success 228 * 0 - Success
229 * Non-zero - Failure 229 * Non-zero - Failure
230 */ 230 */
231 static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm) 231 static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
232 { 232 {
233 int rc; 233 int rc;
234 234
235 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0); 235 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
236 if (rc != H_SUCCESS) 236 if (rc != H_SUCCESS)
237 dev_err(ibmvtpm->dev, 237 dev_err(ibmvtpm->dev,
238 "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc); 238 "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
239 239
240 return rc; 240 return rc;
241 } 241 }
242 242
243 /** 243 /**
244 * ibmvtpm_crq_send_init - Send a CRQ initialize message 244 * ibmvtpm_crq_send_init - Send a CRQ initialize message
245 * @ibmvtpm: vtpm device struct 245 * @ibmvtpm: vtpm device struct
246 * 246 *
247 * Return value: 247 * Return value:
248 * 0 - Success 248 * 0 - Success
249 * Non-zero - Failure 249 * Non-zero - Failure
250 */ 250 */
251 static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) 251 static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
252 { 252 {
253 int rc; 253 int rc;
254 254
255 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0); 255 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
256 if (rc != H_SUCCESS) 256 if (rc != H_SUCCESS)
257 dev_err(ibmvtpm->dev, 257 dev_err(ibmvtpm->dev,
258 "ibmvtpm_crq_send_init failed rc=%d\n", rc); 258 "ibmvtpm_crq_send_init failed rc=%d\n", rc);
259 259
260 return rc; 260 return rc;
261 } 261 }
262 262
263 /** 263 /**
264 * tpm_ibmvtpm_remove - ibm vtpm remove entry point 264 * tpm_ibmvtpm_remove - ibm vtpm remove entry point
265 * @vdev: vio device struct 265 * @vdev: vio device struct
266 * 266 *
267 * Return value: 267 * Return value:
268 * 0 268 * 0
269 */ 269 */
270 static int __devexit tpm_ibmvtpm_remove(struct vio_dev *vdev) 270 static int __devexit tpm_ibmvtpm_remove(struct vio_dev *vdev)
271 { 271 {
272 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev); 272 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
273 int rc = 0; 273 int rc = 0;
274 274
275 free_irq(vdev->irq, ibmvtpm); 275 free_irq(vdev->irq, ibmvtpm);
276 tasklet_kill(&ibmvtpm->tasklet);
277 276
278 do { 277 do {
279 if (rc) 278 if (rc)
280 msleep(100); 279 msleep(100);
281 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 280 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
282 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 281 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
283 282
284 dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle, 283 dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
285 CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); 284 CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
286 free_page((unsigned long)ibmvtpm->crq_queue.crq_addr); 285 free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
287 286
288 if (ibmvtpm->rtce_buf) { 287 if (ibmvtpm->rtce_buf) {
289 dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle, 288 dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
290 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL); 289 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
291 kfree(ibmvtpm->rtce_buf); 290 kfree(ibmvtpm->rtce_buf);
292 } 291 }
293 292
294 tpm_remove_hardware(ibmvtpm->dev); 293 tpm_remove_hardware(ibmvtpm->dev);
295 294
296 kfree(ibmvtpm); 295 kfree(ibmvtpm);
297 296
298 return 0; 297 return 0;
299 } 298 }
300 299
301 /** 300 /**
302 * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver 301 * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
303 * @vdev: vio device struct 302 * @vdev: vio device struct
304 * 303 *
305 * Return value: 304 * Return value:
306 * Number of bytes the driver needs to DMA map 305 * Number of bytes the driver needs to DMA map
307 */ 306 */
308 static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) 307 static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
309 { 308 {
310 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev); 309 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
311 return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size; 310 return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
312 } 311 }
313 312
314 /** 313 /**
315 * tpm_ibmvtpm_suspend - Suspend 314 * tpm_ibmvtpm_suspend - Suspend
316 * @dev: device struct 315 * @dev: device struct
317 * 316 *
318 * Return value: 317 * Return value:
319 * 0 318 * 0
320 */ 319 */
321 static int tpm_ibmvtpm_suspend(struct device *dev) 320 static int tpm_ibmvtpm_suspend(struct device *dev)
322 { 321 {
323 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev); 322 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
324 struct ibmvtpm_crq crq; 323 struct ibmvtpm_crq crq;
325 u64 *buf = (u64 *) &crq; 324 u64 *buf = (u64 *) &crq;
326 int rc = 0; 325 int rc = 0;
327 326
328 crq.valid = (u8)IBMVTPM_VALID_CMD; 327 crq.valid = (u8)IBMVTPM_VALID_CMD;
329 crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND; 328 crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
330 329
331 rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]); 330 rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
332 if (rc != H_SUCCESS) 331 if (rc != H_SUCCESS)
333 dev_err(ibmvtpm->dev, 332 dev_err(ibmvtpm->dev,
334 "tpm_ibmvtpm_suspend failed rc=%d\n", rc); 333 "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
335 334
336 return rc; 335 return rc;
337 } 336 }
338 337
339 /** 338 /**
340 * ibmvtpm_reset_crq - Reset CRQ 339 * ibmvtpm_reset_crq - Reset CRQ
341 * @ibmvtpm: ibm vtpm struct 340 * @ibmvtpm: ibm vtpm struct
342 * 341 *
343 * Return value: 342 * Return value:
344 * 0 - Success 343 * 0 - Success
345 * Non-zero - Failure 344 * Non-zero - Failure
346 */ 345 */
347 static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm) 346 static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
348 { 347 {
349 int rc = 0; 348 int rc = 0;
350 349
351 do { 350 do {
352 if (rc) 351 if (rc)
353 msleep(100); 352 msleep(100);
354 rc = plpar_hcall_norets(H_FREE_CRQ, 353 rc = plpar_hcall_norets(H_FREE_CRQ,
355 ibmvtpm->vdev->unit_address); 354 ibmvtpm->vdev->unit_address);
356 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 355 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
357 356
358 memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE); 357 memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
359 ibmvtpm->crq_queue.index = 0; 358 ibmvtpm->crq_queue.index = 0;
360 359
361 return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address, 360 return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
362 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); 361 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
363 } 362 }
364 363
365 /** 364 /**
366 * tpm_ibmvtpm_resume - Resume from suspend 365 * tpm_ibmvtpm_resume - Resume from suspend
367 * @dev: device struct 366 * @dev: device struct
368 * 367 *
369 * Return value: 368 * Return value:
370 * 0 369 * 0
371 */ 370 */
372 static int tpm_ibmvtpm_resume(struct device *dev) 371 static int tpm_ibmvtpm_resume(struct device *dev)
373 { 372 {
374 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev); 373 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
375 unsigned long flags;
376 int rc = 0; 374 int rc = 0;
377 375
378 do { 376 do {
379 if (rc) 377 if (rc)
380 msleep(100); 378 msleep(100);
381 rc = plpar_hcall_norets(H_ENABLE_CRQ, 379 rc = plpar_hcall_norets(H_ENABLE_CRQ,
382 ibmvtpm->vdev->unit_address); 380 ibmvtpm->vdev->unit_address);
383 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 381 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
384 382
385 if (rc) { 383 if (rc) {
386 dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc); 384 dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
387 return rc; 385 return rc;
388 } 386 }
389 387
390 spin_lock_irqsave(&ibmvtpm->lock, flags); 388 rc = vio_enable_interrupts(ibmvtpm->vdev);
391 vio_disable_interrupts(ibmvtpm->vdev); 389 if (rc) {
392 tasklet_schedule(&ibmvtpm->tasklet); 390 dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
393 spin_unlock_irqrestore(&ibmvtpm->lock, flags); 391 return rc;
392 }
394 393
395 rc = ibmvtpm_crq_send_init(ibmvtpm); 394 rc = ibmvtpm_crq_send_init(ibmvtpm);
396 if (rc) 395 if (rc)
397 dev_err(dev, "Error send_init rc=%d\n", rc); 396 dev_err(dev, "Error send_init rc=%d\n", rc);
398 397
399 return rc; 398 return rc;
400 } 399 }
401 400
402 static const struct file_operations ibmvtpm_ops = { 401 static const struct file_operations ibmvtpm_ops = {
403 .owner = THIS_MODULE, 402 .owner = THIS_MODULE,
404 .llseek = no_llseek, 403 .llseek = no_llseek,
405 .open = tpm_open, 404 .open = tpm_open,
406 .read = tpm_read, 405 .read = tpm_read,
407 .write = tpm_write, 406 .write = tpm_write,
408 .release = tpm_release, 407 .release = tpm_release,
409 }; 408 };
410 409
411 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); 410 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
412 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); 411 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
413 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); 412 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
414 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); 413 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
415 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); 414 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
416 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, 415 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
417 NULL); 416 NULL);
418 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); 417 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
419 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 418 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
420 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); 419 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
421 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); 420 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
422 421
423 static struct attribute *ibmvtpm_attrs[] = { 422 static struct attribute *ibmvtpm_attrs[] = {
424 &dev_attr_pubek.attr, 423 &dev_attr_pubek.attr,
425 &dev_attr_pcrs.attr, 424 &dev_attr_pcrs.attr,
426 &dev_attr_enabled.attr, 425 &dev_attr_enabled.attr,
427 &dev_attr_active.attr, 426 &dev_attr_active.attr,
428 &dev_attr_owned.attr, 427 &dev_attr_owned.attr,
429 &dev_attr_temp_deactivated.attr, 428 &dev_attr_temp_deactivated.attr,
430 &dev_attr_caps.attr, 429 &dev_attr_caps.attr,
431 &dev_attr_cancel.attr, 430 &dev_attr_cancel.attr,
432 &dev_attr_durations.attr, 431 &dev_attr_durations.attr,
433 &dev_attr_timeouts.attr, NULL, 432 &dev_attr_timeouts.attr, NULL,
434 }; 433 };
435 434
436 static struct attribute_group ibmvtpm_attr_grp = { .attrs = ibmvtpm_attrs }; 435 static struct attribute_group ibmvtpm_attr_grp = { .attrs = ibmvtpm_attrs };
437 436
438 static const struct tpm_vendor_specific tpm_ibmvtpm = { 437 static const struct tpm_vendor_specific tpm_ibmvtpm = {
439 .recv = tpm_ibmvtpm_recv, 438 .recv = tpm_ibmvtpm_recv,
440 .send = tpm_ibmvtpm_send, 439 .send = tpm_ibmvtpm_send,
441 .cancel = tpm_ibmvtpm_cancel, 440 .cancel = tpm_ibmvtpm_cancel,
442 .status = tpm_ibmvtpm_status, 441 .status = tpm_ibmvtpm_status,
443 .req_complete_mask = 0, 442 .req_complete_mask = 0,
444 .req_complete_val = 0, 443 .req_complete_val = 0,
445 .req_canceled = 0, 444 .req_canceled = 0,
446 .attr_group = &ibmvtpm_attr_grp, 445 .attr_group = &ibmvtpm_attr_grp,
447 .miscdev = { .fops = &ibmvtpm_ops, }, 446 .miscdev = { .fops = &ibmvtpm_ops, },
448 }; 447 };
449 448
450 static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = { 449 static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
451 .suspend = tpm_ibmvtpm_suspend, 450 .suspend = tpm_ibmvtpm_suspend,
452 .resume = tpm_ibmvtpm_resume, 451 .resume = tpm_ibmvtpm_resume,
453 }; 452 };
454 453
455 /** 454 /**
456 * ibmvtpm_crq_get_next - Get next responded crq 455 * ibmvtpm_crq_get_next - Get next responded crq
457 * @ibmvtpm vtpm device struct 456 * @ibmvtpm vtpm device struct
458 * 457 *
459 * Return value: 458 * Return value:
460 * vtpm crq pointer 459 * vtpm crq pointer
461 */ 460 */
462 static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm) 461 static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
463 { 462 {
464 struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue; 463 struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
465 struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index]; 464 struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
466 465
467 if (crq->valid & VTPM_MSG_RES) { 466 if (crq->valid & VTPM_MSG_RES) {
468 if (++crq_q->index == crq_q->num_entry) 467 if (++crq_q->index == crq_q->num_entry)
469 crq_q->index = 0; 468 crq_q->index = 0;
470 rmb(); 469 smp_rmb();
471 } else 470 } else
472 crq = NULL; 471 crq = NULL;
473 return crq; 472 return crq;
474 } 473 }
475 474
476 /** 475 /**
477 * ibmvtpm_crq_process - Process responded crq 476 * ibmvtpm_crq_process - Process responded crq
478 * @crq crq to be processed 477 * @crq crq to be processed
479 * @ibmvtpm vtpm device struct 478 * @ibmvtpm vtpm device struct
480 * 479 *
481 * Return value: 480 * Return value:
482 * Nothing 481 * Nothing
483 */ 482 */
484 static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq, 483 static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
485 struct ibmvtpm_dev *ibmvtpm) 484 struct ibmvtpm_dev *ibmvtpm)
486 { 485 {
487 int rc = 0; 486 int rc = 0;
488 487
489 switch (crq->valid) { 488 switch (crq->valid) {
490 case VALID_INIT_CRQ: 489 case VALID_INIT_CRQ:
491 switch (crq->msg) { 490 switch (crq->msg) {
492 case INIT_CRQ_RES: 491 case INIT_CRQ_RES:
493 dev_info(ibmvtpm->dev, "CRQ initialized\n"); 492 dev_info(ibmvtpm->dev, "CRQ initialized\n");
494 rc = ibmvtpm_crq_send_init_complete(ibmvtpm); 493 rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
495 if (rc) 494 if (rc)
496 dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc); 495 dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
497 return; 496 return;
498 case INIT_CRQ_COMP_RES: 497 case INIT_CRQ_COMP_RES:
499 dev_info(ibmvtpm->dev, 498 dev_info(ibmvtpm->dev,
500 "CRQ initialization completed\n"); 499 "CRQ initialization completed\n");
501 return; 500 return;
502 default: 501 default:
503 dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg); 502 dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
504 return; 503 return;
505 } 504 }
506 return; 505 return;
507 case IBMVTPM_VALID_CMD: 506 case IBMVTPM_VALID_CMD:
508 switch (crq->msg) { 507 switch (crq->msg) {
509 case VTPM_GET_RTCE_BUFFER_SIZE_RES: 508 case VTPM_GET_RTCE_BUFFER_SIZE_RES:
510 if (crq->len <= 0) { 509 if (crq->len <= 0) {
511 dev_err(ibmvtpm->dev, "Invalid rtce size\n"); 510 dev_err(ibmvtpm->dev, "Invalid rtce size\n");
512 return; 511 return;
513 } 512 }
514 ibmvtpm->rtce_size = crq->len; 513 ibmvtpm->rtce_size = crq->len;
515 ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size, 514 ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
516 GFP_KERNEL); 515 GFP_KERNEL);
517 if (!ibmvtpm->rtce_buf) { 516 if (!ibmvtpm->rtce_buf) {
518 dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n"); 517 dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
519 return; 518 return;
520 } 519 }
521 520
522 ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev, 521 ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
523 ibmvtpm->rtce_buf, ibmvtpm->rtce_size, 522 ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
524 DMA_BIDIRECTIONAL); 523 DMA_BIDIRECTIONAL);
525 524
526 if (dma_mapping_error(ibmvtpm->dev, 525 if (dma_mapping_error(ibmvtpm->dev,
527 ibmvtpm->rtce_dma_handle)) { 526 ibmvtpm->rtce_dma_handle)) {
528 kfree(ibmvtpm->rtce_buf); 527 kfree(ibmvtpm->rtce_buf);
529 ibmvtpm->rtce_buf = NULL; 528 ibmvtpm->rtce_buf = NULL;
530 dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n"); 529 dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
531 } 530 }
532 531
533 return; 532 return;
534 case VTPM_GET_VERSION_RES: 533 case VTPM_GET_VERSION_RES:
535 ibmvtpm->vtpm_version = crq->data; 534 ibmvtpm->vtpm_version = crq->data;
536 return; 535 return;
537 case VTPM_TPM_COMMAND_RES: 536 case VTPM_TPM_COMMAND_RES:
538 ibmvtpm->crq_res.valid = crq->valid; 537 /* len of the data in rtce buffer */
539 ibmvtpm->crq_res.msg = crq->msg; 538 ibmvtpm->res_len = crq->len;
540 ibmvtpm->crq_res.len = crq->len; 539 wake_up_interruptible(&ibmvtpm->wq);
541 ibmvtpm->crq_res.data = crq->data;
542 wake_up_interruptible(&wq);
543 return; 540 return;
544 default: 541 default:
545 return; 542 return;
546 } 543 }
547 } 544 }
548 return; 545 return;
549 } 546 }
550 547
551 /** 548 /**
552 * ibmvtpm_interrupt - Interrupt handler 549 * ibmvtpm_interrupt - Interrupt handler
553 * @irq: irq number to handle 550 * @irq: irq number to handle
554 * @vtpm_instance: vtpm that received interrupt 551 * @vtpm_instance: vtpm that received interrupt
555 * 552 *
556 * Returns: 553 * Returns:
557 * IRQ_HANDLED 554 * IRQ_HANDLED
558 **/ 555 **/
559 static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance) 556 static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
560 { 557 {
561 struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance; 558 struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
562 unsigned long flags;
563
564 spin_lock_irqsave(&ibmvtpm->lock, flags);
565 vio_disable_interrupts(ibmvtpm->vdev);
566 tasklet_schedule(&ibmvtpm->tasklet);
567 spin_unlock_irqrestore(&ibmvtpm->lock, flags);
568
569 return IRQ_HANDLED;
570 }
571
572 /**
573 * ibmvtpm_tasklet - Interrupt handler tasklet
574 * @data: ibm vtpm device struct
575 *
576 * Returns:
577 * Nothing
578 **/
579 static void ibmvtpm_tasklet(void *data)
580 {
581 struct ibmvtpm_dev *ibmvtpm = data;
582 struct ibmvtpm_crq *crq; 559 struct ibmvtpm_crq *crq;
583 unsigned long flags;
584 560
585 spin_lock_irqsave(&ibmvtpm->lock, flags); 561 /* while loop is needed for initial setup (get version and
562 * get rtce_size). There should be only one tpm request at any
563 * given time.
564 */
586 while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { 565 while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
587 ibmvtpm_crq_process(crq, ibmvtpm); 566 ibmvtpm_crq_process(crq, ibmvtpm);
588 crq->valid = 0; 567 crq->valid = 0;
589 wmb(); 568 smp_wmb();
590 } 569 }
591 570
592 vio_enable_interrupts(ibmvtpm->vdev); 571 return IRQ_HANDLED;
593 spin_unlock_irqrestore(&ibmvtpm->lock, flags);
594 } 572 }
595 573
596 /** 574 /**
597 * tpm_ibmvtpm_probe - ibm vtpm initialize entry point 575 * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
598 * @vio_dev: vio device struct 576 * @vio_dev: vio device struct
599 * @id: vio device id struct 577 * @id: vio device id struct
600 * 578 *
601 * Return value: 579 * Return value:
602 * 0 - Success 580 * 0 - Success
603 * Non-zero - Failure 581 * Non-zero - Failure
604 */ 582 */
605 static int __devinit tpm_ibmvtpm_probe(struct vio_dev *vio_dev, 583 static int __devinit tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
606 const struct vio_device_id *id) 584 const struct vio_device_id *id)
607 { 585 {
608 struct ibmvtpm_dev *ibmvtpm; 586 struct ibmvtpm_dev *ibmvtpm;
609 struct device *dev = &vio_dev->dev; 587 struct device *dev = &vio_dev->dev;
610 struct ibmvtpm_crq_queue *crq_q; 588 struct ibmvtpm_crq_queue *crq_q;
611 struct tpm_chip *chip; 589 struct tpm_chip *chip;
612 int rc = -ENOMEM, rc1; 590 int rc = -ENOMEM, rc1;
613 591
614 chip = tpm_register_hardware(dev, &tpm_ibmvtpm); 592 chip = tpm_register_hardware(dev, &tpm_ibmvtpm);
615 if (!chip) { 593 if (!chip) {
616 dev_err(dev, "tpm_register_hardware failed\n"); 594 dev_err(dev, "tpm_register_hardware failed\n");
617 return -ENODEV; 595 return -ENODEV;
618 } 596 }
619 597
620 ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL); 598 ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
621 if (!ibmvtpm) { 599 if (!ibmvtpm) {
622 dev_err(dev, "kzalloc for ibmvtpm failed\n"); 600 dev_err(dev, "kzalloc for ibmvtpm failed\n");
623 goto cleanup; 601 goto cleanup;
624 } 602 }
625 603
626 crq_q = &ibmvtpm->crq_queue; 604 crq_q = &ibmvtpm->crq_queue;
627 crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL); 605 crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
628 if (!crq_q->crq_addr) { 606 if (!crq_q->crq_addr) {
629 dev_err(dev, "Unable to allocate memory for crq_addr\n"); 607 dev_err(dev, "Unable to allocate memory for crq_addr\n");
630 goto cleanup; 608 goto cleanup;
631 } 609 }
632 610
633 crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); 611 crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
634 ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, 612 ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
635 CRQ_RES_BUF_SIZE, 613 CRQ_RES_BUF_SIZE,
636 DMA_BIDIRECTIONAL); 614 DMA_BIDIRECTIONAL);
637 615
638 if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) { 616 if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
639 dev_err(dev, "dma mapping failed\n"); 617 dev_err(dev, "dma mapping failed\n");
640 goto cleanup; 618 goto cleanup;
641 } 619 }
642 620
643 rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address, 621 rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
644 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); 622 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
645 if (rc == H_RESOURCE) 623 if (rc == H_RESOURCE)
646 rc = ibmvtpm_reset_crq(ibmvtpm); 624 rc = ibmvtpm_reset_crq(ibmvtpm);
647 625
648 if (rc) { 626 if (rc) {
649 dev_err(dev, "Unable to register CRQ rc=%d\n", rc); 627 dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
650 goto reg_crq_cleanup; 628 goto reg_crq_cleanup;
651 } 629 }
652 630
653 tasklet_init(&ibmvtpm->tasklet, (void *)ibmvtpm_tasklet,
654 (unsigned long)ibmvtpm);
655
656 rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0, 631 rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
657 tpm_ibmvtpm_driver_name, ibmvtpm); 632 tpm_ibmvtpm_driver_name, ibmvtpm);
658 if (rc) { 633 if (rc) {
659 dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq); 634 dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
660 goto init_irq_cleanup; 635 goto init_irq_cleanup;
661 } 636 }
662 637
663 rc = vio_enable_interrupts(vio_dev); 638 rc = vio_enable_interrupts(vio_dev);
664 if (rc) { 639 if (rc) {
665 dev_err(dev, "Error %d enabling interrupts\n", rc); 640 dev_err(dev, "Error %d enabling interrupts\n", rc);
666 goto init_irq_cleanup; 641 goto init_irq_cleanup;
667 } 642 }
668 643
644 init_waitqueue_head(&ibmvtpm->wq);
645
669 crq_q->index = 0; 646 crq_q->index = 0;
670 647
671 ibmvtpm->dev = dev; 648 ibmvtpm->dev = dev;
672 ibmvtpm->vdev = vio_dev; 649 ibmvtpm->vdev = vio_dev;
673 chip->vendor.data = (void *)ibmvtpm; 650 chip->vendor.data = (void *)ibmvtpm;
674 651
675 spin_lock_init(&ibmvtpm->lock);
676 spin_lock_init(&ibmvtpm->rtce_lock); 652 spin_lock_init(&ibmvtpm->rtce_lock);
677 653
678 rc = ibmvtpm_crq_send_init(ibmvtpm); 654 rc = ibmvtpm_crq_send_init(ibmvtpm);
679 if (rc) 655 if (rc)
680 goto init_irq_cleanup; 656 goto init_irq_cleanup;
681 657
682 rc = ibmvtpm_crq_get_version(ibmvtpm); 658 rc = ibmvtpm_crq_get_version(ibmvtpm);
683 if (rc) 659 if (rc)
684 goto init_irq_cleanup; 660 goto init_irq_cleanup;
685 661
686 rc = ibmvtpm_crq_get_rtce_size(ibmvtpm); 662 rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
687 if (rc) 663 if (rc)
688 goto init_irq_cleanup; 664 goto init_irq_cleanup;
689 665
690 return rc; 666 return rc;
691 init_irq_cleanup: 667 init_irq_cleanup:
692 tasklet_kill(&ibmvtpm->tasklet);
693 do { 668 do {
694 rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address); 669 rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
695 } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1)); 670 } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
696 reg_crq_cleanup: 671 reg_crq_cleanup:
697 dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE, 672 dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
698 DMA_BIDIRECTIONAL); 673 DMA_BIDIRECTIONAL);
699 cleanup: 674 cleanup:
700 if (ibmvtpm) { 675 if (ibmvtpm) {
701 if (crq_q->crq_addr) 676 if (crq_q->crq_addr)
702 free_page((unsigned long)crq_q->crq_addr); 677 free_page((unsigned long)crq_q->crq_addr);
703 kfree(ibmvtpm); 678 kfree(ibmvtpm);
704 } 679 }
705 680
706 tpm_remove_hardware(dev); 681 tpm_remove_hardware(dev);
707 682
708 return rc; 683 return rc;
709 } 684 }
710 685
711 static struct vio_driver ibmvtpm_driver = { 686 static struct vio_driver ibmvtpm_driver = {
712 .id_table = tpm_ibmvtpm_device_table, 687 .id_table = tpm_ibmvtpm_device_table,
713 .probe = tpm_ibmvtpm_probe, 688 .probe = tpm_ibmvtpm_probe,
714 .remove = tpm_ibmvtpm_remove, 689 .remove = tpm_ibmvtpm_remove,
715 .get_desired_dma = tpm_ibmvtpm_get_desired_dma, 690 .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
716 .name = tpm_ibmvtpm_driver_name, 691 .name = tpm_ibmvtpm_driver_name,
717 .pm = &tpm_ibmvtpm_pm_ops, 692 .pm = &tpm_ibmvtpm_pm_ops,
718 }; 693 };
719 694
720 /** 695 /**
721 * ibmvtpm_module_init - Initialize ibm vtpm module 696 * ibmvtpm_module_init - Initialize ibm vtpm module
722 * 697 *
723 * Return value: 698 * Return value:
724 * 0 -Success 699 * 0 -Success
725 * Non-zero - Failure 700 * Non-zero - Failure
726 */ 701 */
727 static int __init ibmvtpm_module_init(void) 702 static int __init ibmvtpm_module_init(void)
728 { 703 {
729 return vio_register_driver(&ibmvtpm_driver); 704 return vio_register_driver(&ibmvtpm_driver);
730 } 705 }
731 706
732 /** 707 /**
733 * ibmvtpm_module_exit - Teardown ibm vtpm module 708 * ibmvtpm_module_exit - Teardown ibm vtpm module
734 * 709 *
735 * Return value: 710 * Return value:
736 * Nothing 711 * Nothing
737 */ 712 */
738 static void __exit ibmvtpm_module_exit(void) 713 static void __exit ibmvtpm_module_exit(void)
739 { 714 {
drivers/char/tpm/tpm_ibmvtpm.h
1 /* 1 /*
2 * Copyright (C) 2012 IBM Corporation 2 * Copyright (C) 2012 IBM Corporation
3 * 3 *
4 * Author: Ashley Lai <adlai@us.ibm.com> 4 * Author: Ashley Lai <adlai@us.ibm.com>
5 * 5 *
6 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 6 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
7 * 7 *
8 * Device driver for TCG/TCPA TPM (trusted platform module). 8 * Device driver for TCG/TCPA TPM (trusted platform module).
9 * Specifications at www.trustedcomputinggroup.org 9 * Specifications at www.trustedcomputinggroup.org
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as 12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation, version 2 of the 13 * published by the Free Software Foundation, version 2 of the
14 * License. 14 * License.
15 * 15 *
16 */ 16 */
17 17
18 #ifndef __TPM_IBMVTPM_H__ 18 #ifndef __TPM_IBMVTPM_H__
19 #define __TPM_IBMVTPM_H__ 19 #define __TPM_IBMVTPM_H__
20 20
21 /* vTPM Message Format 1 */ 21 /* vTPM Message Format 1 */
22 struct ibmvtpm_crq { 22 struct ibmvtpm_crq {
23 u8 valid; 23 u8 valid;
24 u8 msg; 24 u8 msg;
25 u16 len; 25 u16 len;
26 u32 data; 26 u32 data;
27 u64 reserved; 27 u64 reserved;
28 } __attribute__((packed, aligned(8))); 28 } __attribute__((packed, aligned(8)));
29 29
30 struct ibmvtpm_crq_queue { 30 struct ibmvtpm_crq_queue {
31 struct ibmvtpm_crq *crq_addr; 31 struct ibmvtpm_crq *crq_addr;
32 u32 index; 32 u32 index;
33 u32 num_entry; 33 u32 num_entry;
34 }; 34 };
35 35
36 struct ibmvtpm_dev { 36 struct ibmvtpm_dev {
37 struct device *dev; 37 struct device *dev;
38 struct vio_dev *vdev; 38 struct vio_dev *vdev;
39 struct ibmvtpm_crq_queue crq_queue; 39 struct ibmvtpm_crq_queue crq_queue;
40 dma_addr_t crq_dma_handle; 40 dma_addr_t crq_dma_handle;
41 spinlock_t lock;
42 struct tasklet_struct tasklet;
43 u32 rtce_size; 41 u32 rtce_size;
44 void __iomem *rtce_buf; 42 void __iomem *rtce_buf;
45 dma_addr_t rtce_dma_handle; 43 dma_addr_t rtce_dma_handle;
46 spinlock_t rtce_lock; 44 spinlock_t rtce_lock;
47 struct ibmvtpm_crq crq_res; 45 wait_queue_head_t wq;
46 u16 res_len;
48 u32 vtpm_version; 47 u32 vtpm_version;
49 }; 48 };
50 49
51 #define CRQ_RES_BUF_SIZE PAGE_SIZE 50 #define CRQ_RES_BUF_SIZE PAGE_SIZE
52 51
53 /* Initialize CRQ */ 52 /* Initialize CRQ */
54 #define INIT_CRQ_CMD 0xC001000000000000LL /* Init cmd */ 53 #define INIT_CRQ_CMD 0xC001000000000000LL /* Init cmd */
55 #define INIT_CRQ_COMP_CMD 0xC002000000000000LL /* Init complete cmd */ 54 #define INIT_CRQ_COMP_CMD 0xC002000000000000LL /* Init complete cmd */
56 #define INIT_CRQ_RES 0x01 /* Init respond */ 55 #define INIT_CRQ_RES 0x01 /* Init respond */
57 #define INIT_CRQ_COMP_RES 0x02 /* Init complete respond */ 56 #define INIT_CRQ_COMP_RES 0x02 /* Init complete respond */
58 #define VALID_INIT_CRQ 0xC0 /* Valid command for init crq */ 57 #define VALID_INIT_CRQ 0xC0 /* Valid command for init crq */
59 58
60 /* vTPM CRQ response is the message type | 0x80 */ 59 /* vTPM CRQ response is the message type | 0x80 */
61 #define VTPM_MSG_RES 0x80 60 #define VTPM_MSG_RES 0x80
62 #define IBMVTPM_VALID_CMD 0x80 61 #define IBMVTPM_VALID_CMD 0x80
63 62
64 /* vTPM CRQ message types */ 63 /* vTPM CRQ message types */
65 #define VTPM_GET_VERSION 0x01 64 #define VTPM_GET_VERSION 0x01
66 #define VTPM_GET_VERSION_RES (0x01 | VTPM_MSG_RES) 65 #define VTPM_GET_VERSION_RES (0x01 | VTPM_MSG_RES)
67 66
68 #define VTPM_TPM_COMMAND 0x02 67 #define VTPM_TPM_COMMAND 0x02
69 #define VTPM_TPM_COMMAND_RES (0x02 | VTPM_MSG_RES) 68 #define VTPM_TPM_COMMAND_RES (0x02 | VTPM_MSG_RES)
70 69
71 #define VTPM_GET_RTCE_BUFFER_SIZE 0x03 70 #define VTPM_GET_RTCE_BUFFER_SIZE 0x03
72 #define VTPM_GET_RTCE_BUFFER_SIZE_RES (0x03 | VTPM_MSG_RES) 71 #define VTPM_GET_RTCE_BUFFER_SIZE_RES (0x03 | VTPM_MSG_RES)
73 72
74 #define VTPM_PREPARE_TO_SUSPEND 0x04 73 #define VTPM_PREPARE_TO_SUSPEND 0x04
75 #define VTPM_PREPARE_TO_SUSPEND_RES (0x04 | VTPM_MSG_RES) 74 #define VTPM_PREPARE_TO_SUSPEND_RES (0x04 | VTPM_MSG_RES)
76 75
77 #endif 76 #endif