Commit 86e4754ac8fde9a9c913571016bc31257aa2e195

Authored by Julia Lawall
Committed by Benjamin Herrenschmidt
1 parent f6d8c8bb1d

powerpc/pmac: Add missing unlocks in error path

In some error handling cases the lock is not unlocked.

A simplified version of the semantic patch that finds this problem is as
follows: (http://coccinelle.lip6.fr/)

// <smpl>
@r exists@
expression E1;
identifier f;
@@

f (...) { <+...
* spin_lock_irqsave (E1,...);
... when != E1
* return ...;
...+> }
// </smpl>

Signed-off-by: Julia Lawall <julia@diku.dk>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

Showing 2 changed files with 4 additions and 1 deletions Inline Diff

drivers/macintosh/macio-adb.c
1 /* 1 /*
2 * Driver for the ADB controller in the Mac I/O (Hydra) chip. 2 * Driver for the ADB controller in the Mac I/O (Hydra) chip.
3 */ 3 */
4 #include <stdarg.h> 4 #include <stdarg.h>
5 #include <linux/types.h> 5 #include <linux/types.h>
6 #include <linux/errno.h> 6 #include <linux/errno.h>
7 #include <linux/kernel.h> 7 #include <linux/kernel.h>
8 #include <linux/delay.h> 8 #include <linux/delay.h>
9 #include <linux/spinlock.h> 9 #include <linux/spinlock.h>
10 #include <linux/interrupt.h> 10 #include <linux/interrupt.h>
11 #include <asm/prom.h> 11 #include <asm/prom.h>
12 #include <linux/adb.h> 12 #include <linux/adb.h>
13 #include <asm/io.h> 13 #include <asm/io.h>
14 #include <asm/pgtable.h> 14 #include <asm/pgtable.h>
15 #include <asm/hydra.h> 15 #include <asm/hydra.h>
16 #include <asm/irq.h> 16 #include <asm/irq.h>
17 #include <asm/system.h> 17 #include <asm/system.h>
18 #include <linux/init.h> 18 #include <linux/init.h>
19 #include <linux/ioport.h> 19 #include <linux/ioport.h>
20 20
21 struct preg { 21 struct preg {
22 unsigned char r; 22 unsigned char r;
23 char pad[15]; 23 char pad[15];
24 }; 24 };
25 25
26 struct adb_regs { 26 struct adb_regs {
27 struct preg intr; 27 struct preg intr;
28 struct preg data[9]; 28 struct preg data[9];
29 struct preg intr_enb; 29 struct preg intr_enb;
30 struct preg dcount; 30 struct preg dcount;
31 struct preg error; 31 struct preg error;
32 struct preg ctrl; 32 struct preg ctrl;
33 struct preg autopoll; 33 struct preg autopoll;
34 struct preg active_hi; 34 struct preg active_hi;
35 struct preg active_lo; 35 struct preg active_lo;
36 struct preg test; 36 struct preg test;
37 }; 37 };
38 38
39 /* Bits in intr and intr_enb registers */ 39 /* Bits in intr and intr_enb registers */
40 #define DFB 1 /* data from bus */ 40 #define DFB 1 /* data from bus */
41 #define TAG 2 /* transfer access grant */ 41 #define TAG 2 /* transfer access grant */
42 42
43 /* Bits in dcount register */ 43 /* Bits in dcount register */
44 #define HMB 0x0f /* how many bytes */ 44 #define HMB 0x0f /* how many bytes */
45 #define APD 0x10 /* auto-poll data */ 45 #define APD 0x10 /* auto-poll data */
46 46
47 /* Bits in error register */ 47 /* Bits in error register */
48 #define NRE 1 /* no response error */ 48 #define NRE 1 /* no response error */
49 #define DLE 2 /* data lost error */ 49 #define DLE 2 /* data lost error */
50 50
51 /* Bits in ctrl register */ 51 /* Bits in ctrl register */
52 #define TAR 1 /* transfer access request */ 52 #define TAR 1 /* transfer access request */
53 #define DTB 2 /* data to bus */ 53 #define DTB 2 /* data to bus */
54 #define CRE 4 /* command response expected */ 54 #define CRE 4 /* command response expected */
55 #define ADB_RST 8 /* ADB reset */ 55 #define ADB_RST 8 /* ADB reset */
56 56
57 /* Bits in autopoll register */ 57 /* Bits in autopoll register */
58 #define APE 1 /* autopoll enable */ 58 #define APE 1 /* autopoll enable */
59 59
60 static volatile struct adb_regs __iomem *adb; 60 static volatile struct adb_regs __iomem *adb;
61 static struct adb_request *current_req, *last_req; 61 static struct adb_request *current_req, *last_req;
62 static DEFINE_SPINLOCK(macio_lock); 62 static DEFINE_SPINLOCK(macio_lock);
63 63
64 static int macio_probe(void); 64 static int macio_probe(void);
65 static int macio_init(void); 65 static int macio_init(void);
66 static irqreturn_t macio_adb_interrupt(int irq, void *arg); 66 static irqreturn_t macio_adb_interrupt(int irq, void *arg);
67 static int macio_send_request(struct adb_request *req, int sync); 67 static int macio_send_request(struct adb_request *req, int sync);
68 static int macio_adb_autopoll(int devs); 68 static int macio_adb_autopoll(int devs);
69 static void macio_adb_poll(void); 69 static void macio_adb_poll(void);
70 static int macio_adb_reset_bus(void); 70 static int macio_adb_reset_bus(void);
71 71
72 struct adb_driver macio_adb_driver = { 72 struct adb_driver macio_adb_driver = {
73 "MACIO", 73 "MACIO",
74 macio_probe, 74 macio_probe,
75 macio_init, 75 macio_init,
76 macio_send_request, 76 macio_send_request,
77 /*macio_write,*/ 77 /*macio_write,*/
78 macio_adb_autopoll, 78 macio_adb_autopoll,
79 macio_adb_poll, 79 macio_adb_poll,
80 macio_adb_reset_bus 80 macio_adb_reset_bus
81 }; 81 };
82 82
83 int macio_probe(void) 83 int macio_probe(void)
84 { 84 {
85 struct device_node *np; 85 struct device_node *np;
86 86
87 np = of_find_compatible_node(NULL, "adb", "chrp,adb0"); 87 np = of_find_compatible_node(NULL, "adb", "chrp,adb0");
88 if (np) { 88 if (np) {
89 of_node_put(np); 89 of_node_put(np);
90 return 0; 90 return 0;
91 } 91 }
92 return -ENODEV; 92 return -ENODEV;
93 } 93 }
94 94
95 int macio_init(void) 95 int macio_init(void)
96 { 96 {
97 struct device_node *adbs; 97 struct device_node *adbs;
98 struct resource r; 98 struct resource r;
99 unsigned int irq; 99 unsigned int irq;
100 100
101 adbs = of_find_compatible_node(NULL, "adb", "chrp,adb0"); 101 adbs = of_find_compatible_node(NULL, "adb", "chrp,adb0");
102 if (adbs == 0) 102 if (adbs == 0)
103 return -ENXIO; 103 return -ENXIO;
104 104
105 if (of_address_to_resource(adbs, 0, &r)) { 105 if (of_address_to_resource(adbs, 0, &r)) {
106 of_node_put(adbs); 106 of_node_put(adbs);
107 return -ENXIO; 107 return -ENXIO;
108 } 108 }
109 adb = ioremap(r.start, sizeof(struct adb_regs)); 109 adb = ioremap(r.start, sizeof(struct adb_regs));
110 110
111 out_8(&adb->ctrl.r, 0); 111 out_8(&adb->ctrl.r, 0);
112 out_8(&adb->intr.r, 0); 112 out_8(&adb->intr.r, 0);
113 out_8(&adb->error.r, 0); 113 out_8(&adb->error.r, 0);
114 out_8(&adb->active_hi.r, 0xff); /* for now, set all devices active */ 114 out_8(&adb->active_hi.r, 0xff); /* for now, set all devices active */
115 out_8(&adb->active_lo.r, 0xff); 115 out_8(&adb->active_lo.r, 0xff);
116 out_8(&adb->autopoll.r, APE); 116 out_8(&adb->autopoll.r, APE);
117 117
118 irq = irq_of_parse_and_map(adbs, 0); 118 irq = irq_of_parse_and_map(adbs, 0);
119 of_node_put(adbs); 119 of_node_put(adbs);
120 if (request_irq(irq, macio_adb_interrupt, 0, "ADB", (void *)0)) { 120 if (request_irq(irq, macio_adb_interrupt, 0, "ADB", (void *)0)) {
121 printk(KERN_ERR "ADB: can't get irq %d\n", irq); 121 printk(KERN_ERR "ADB: can't get irq %d\n", irq);
122 return -EAGAIN; 122 return -EAGAIN;
123 } 123 }
124 out_8(&adb->intr_enb.r, DFB | TAG); 124 out_8(&adb->intr_enb.r, DFB | TAG);
125 125
126 printk("adb: mac-io driver 1.0 for unified ADB\n"); 126 printk("adb: mac-io driver 1.0 for unified ADB\n");
127 127
128 return 0; 128 return 0;
129 } 129 }
130 130
131 static int macio_adb_autopoll(int devs) 131 static int macio_adb_autopoll(int devs)
132 { 132 {
133 unsigned long flags; 133 unsigned long flags;
134 134
135 spin_lock_irqsave(&macio_lock, flags); 135 spin_lock_irqsave(&macio_lock, flags);
136 out_8(&adb->active_hi.r, devs >> 8); 136 out_8(&adb->active_hi.r, devs >> 8);
137 out_8(&adb->active_lo.r, devs); 137 out_8(&adb->active_lo.r, devs);
138 out_8(&adb->autopoll.r, devs? APE: 0); 138 out_8(&adb->autopoll.r, devs? APE: 0);
139 spin_unlock_irqrestore(&macio_lock, flags); 139 spin_unlock_irqrestore(&macio_lock, flags);
140 return 0; 140 return 0;
141 } 141 }
142 142
143 static int macio_adb_reset_bus(void) 143 static int macio_adb_reset_bus(void)
144 { 144 {
145 unsigned long flags; 145 unsigned long flags;
146 int timeout = 1000000; 146 int timeout = 1000000;
147 147
148 /* Hrm... we may want to not lock interrupts for so 148 /* Hrm... we may want to not lock interrupts for so
149 * long ... oh well, who uses that chip anyway ? :) 149 * long ... oh well, who uses that chip anyway ? :)
150 * That function will be seldomly used during boot 150 * That function will be seldomly used during boot
151 * on rare machines, so... 151 * on rare machines, so...
152 */ 152 */
153 spin_lock_irqsave(&macio_lock, flags); 153 spin_lock_irqsave(&macio_lock, flags);
154 out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | ADB_RST); 154 out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | ADB_RST);
155 while ((in_8(&adb->ctrl.r) & ADB_RST) != 0) { 155 while ((in_8(&adb->ctrl.r) & ADB_RST) != 0) {
156 if (--timeout == 0) { 156 if (--timeout == 0) {
157 out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) & ~ADB_RST); 157 out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) & ~ADB_RST);
158 spin_unlock_irqrestore(&macio_lock, flags);
158 return -1; 159 return -1;
159 } 160 }
160 } 161 }
161 spin_unlock_irqrestore(&macio_lock, flags); 162 spin_unlock_irqrestore(&macio_lock, flags);
162 return 0; 163 return 0;
163 } 164 }
164 165
165 /* Send an ADB command */ 166 /* Send an ADB command */
166 static int macio_send_request(struct adb_request *req, int sync) 167 static int macio_send_request(struct adb_request *req, int sync)
167 { 168 {
168 unsigned long flags; 169 unsigned long flags;
169 int i; 170 int i;
170 171
171 if (req->data[0] != ADB_PACKET) 172 if (req->data[0] != ADB_PACKET)
172 return -EINVAL; 173 return -EINVAL;
173 174
174 for (i = 0; i < req->nbytes - 1; ++i) 175 for (i = 0; i < req->nbytes - 1; ++i)
175 req->data[i] = req->data[i+1]; 176 req->data[i] = req->data[i+1];
176 --req->nbytes; 177 --req->nbytes;
177 178
178 req->next = NULL; 179 req->next = NULL;
179 req->sent = 0; 180 req->sent = 0;
180 req->complete = 0; 181 req->complete = 0;
181 req->reply_len = 0; 182 req->reply_len = 0;
182 183
183 spin_lock_irqsave(&macio_lock, flags); 184 spin_lock_irqsave(&macio_lock, flags);
184 if (current_req != 0) { 185 if (current_req != 0) {
185 last_req->next = req; 186 last_req->next = req;
186 last_req = req; 187 last_req = req;
187 } else { 188 } else {
188 current_req = last_req = req; 189 current_req = last_req = req;
189 out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR); 190 out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
190 } 191 }
191 spin_unlock_irqrestore(&macio_lock, flags); 192 spin_unlock_irqrestore(&macio_lock, flags);
192 193
193 if (sync) { 194 if (sync) {
194 while (!req->complete) 195 while (!req->complete)
195 macio_adb_poll(); 196 macio_adb_poll();
196 } 197 }
197 198
198 return 0; 199 return 0;
199 } 200 }
200 201
201 static irqreturn_t macio_adb_interrupt(int irq, void *arg) 202 static irqreturn_t macio_adb_interrupt(int irq, void *arg)
202 { 203 {
203 int i, n, err; 204 int i, n, err;
204 struct adb_request *req = NULL; 205 struct adb_request *req = NULL;
205 unsigned char ibuf[16]; 206 unsigned char ibuf[16];
206 int ibuf_len = 0; 207 int ibuf_len = 0;
207 int complete = 0; 208 int complete = 0;
208 int autopoll = 0; 209 int autopoll = 0;
209 int handled = 0; 210 int handled = 0;
210 211
211 spin_lock(&macio_lock); 212 spin_lock(&macio_lock);
212 if (in_8(&adb->intr.r) & TAG) { 213 if (in_8(&adb->intr.r) & TAG) {
213 handled = 1; 214 handled = 1;
214 if ((req = current_req) != 0) { 215 if ((req = current_req) != 0) {
215 /* put the current request in */ 216 /* put the current request in */
216 for (i = 0; i < req->nbytes; ++i) 217 for (i = 0; i < req->nbytes; ++i)
217 out_8(&adb->data[i].r, req->data[i]); 218 out_8(&adb->data[i].r, req->data[i]);
218 out_8(&adb->dcount.r, req->nbytes & HMB); 219 out_8(&adb->dcount.r, req->nbytes & HMB);
219 req->sent = 1; 220 req->sent = 1;
220 if (req->reply_expected) { 221 if (req->reply_expected) {
221 out_8(&adb->ctrl.r, DTB + CRE); 222 out_8(&adb->ctrl.r, DTB + CRE);
222 } else { 223 } else {
223 out_8(&adb->ctrl.r, DTB); 224 out_8(&adb->ctrl.r, DTB);
224 current_req = req->next; 225 current_req = req->next;
225 complete = 1; 226 complete = 1;
226 if (current_req) 227 if (current_req)
227 out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR); 228 out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
228 } 229 }
229 } 230 }
230 out_8(&adb->intr.r, 0); 231 out_8(&adb->intr.r, 0);
231 } 232 }
232 233
233 if (in_8(&adb->intr.r) & DFB) { 234 if (in_8(&adb->intr.r) & DFB) {
234 handled = 1; 235 handled = 1;
235 err = in_8(&adb->error.r); 236 err = in_8(&adb->error.r);
236 if (current_req && current_req->sent) { 237 if (current_req && current_req->sent) {
237 /* this is the response to a command */ 238 /* this is the response to a command */
238 req = current_req; 239 req = current_req;
239 if (err == 0) { 240 if (err == 0) {
240 req->reply_len = in_8(&adb->dcount.r) & HMB; 241 req->reply_len = in_8(&adb->dcount.r) & HMB;
241 for (i = 0; i < req->reply_len; ++i) 242 for (i = 0; i < req->reply_len; ++i)
242 req->reply[i] = in_8(&adb->data[i].r); 243 req->reply[i] = in_8(&adb->data[i].r);
243 } 244 }
244 current_req = req->next; 245 current_req = req->next;
245 complete = 1; 246 complete = 1;
246 if (current_req) 247 if (current_req)
247 out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR); 248 out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
248 } else if (err == 0) { 249 } else if (err == 0) {
249 /* autopoll data */ 250 /* autopoll data */
250 n = in_8(&adb->dcount.r) & HMB; 251 n = in_8(&adb->dcount.r) & HMB;
251 for (i = 0; i < n; ++i) 252 for (i = 0; i < n; ++i)
252 ibuf[i] = in_8(&adb->data[i].r); 253 ibuf[i] = in_8(&adb->data[i].r);
253 ibuf_len = n; 254 ibuf_len = n;
254 autopoll = (in_8(&adb->dcount.r) & APD) != 0; 255 autopoll = (in_8(&adb->dcount.r) & APD) != 0;
255 } 256 }
256 out_8(&adb->error.r, 0); 257 out_8(&adb->error.r, 0);
257 out_8(&adb->intr.r, 0); 258 out_8(&adb->intr.r, 0);
258 } 259 }
259 spin_unlock(&macio_lock); 260 spin_unlock(&macio_lock);
260 if (complete && req) { 261 if (complete && req) {
261 void (*done)(struct adb_request *) = req->done; 262 void (*done)(struct adb_request *) = req->done;
262 mb(); 263 mb();
263 req->complete = 1; 264 req->complete = 1;
264 /* Here, we assume that if the request has a done member, the 265 /* Here, we assume that if the request has a done member, the
265 * struct request will survive to setting req->complete to 1 266 * struct request will survive to setting req->complete to 1
266 */ 267 */
267 if (done) 268 if (done)
268 (*done)(req); 269 (*done)(req);
269 } 270 }
270 if (ibuf_len) 271 if (ibuf_len)
271 adb_input(ibuf, ibuf_len, autopoll); 272 adb_input(ibuf, ibuf_len, autopoll);
272 273
273 return IRQ_RETVAL(handled); 274 return IRQ_RETVAL(handled);
274 } 275 }
275 276
276 static void macio_adb_poll(void) 277 static void macio_adb_poll(void)
277 { 278 {
278 unsigned long flags; 279 unsigned long flags;
279 280
280 local_irq_save(flags); 281 local_irq_save(flags);
281 if (in_8(&adb->intr.r) != 0) 282 if (in_8(&adb->intr.r) != 0)
282 macio_adb_interrupt(0, NULL); 283 macio_adb_interrupt(0, NULL);
283 local_irq_restore(flags); 284 local_irq_restore(flags);
284 } 285 }
285 286
drivers/macintosh/smu.c
1 /* 1 /*
2 * PowerMac G5 SMU driver 2 * PowerMac G5 SMU driver
3 * 3 *
4 * Copyright 2004 J. Mayer <l_indien@magic.fr> 4 * Copyright 2004 J. Mayer <l_indien@magic.fr>
5 * Copyright 2005 Benjamin Herrenschmidt, IBM Corp. 5 * Copyright 2005 Benjamin Herrenschmidt, IBM Corp.
6 * 6 *
7 * Released under the term of the GNU GPL v2. 7 * Released under the term of the GNU GPL v2.
8 */ 8 */
9 9
10 /* 10 /*
11 * TODO: 11 * TODO:
12 * - maybe add timeout to commands ? 12 * - maybe add timeout to commands ?
13 * - blocking version of time functions 13 * - blocking version of time functions
14 * - polling version of i2c commands (including timer that works with 14 * - polling version of i2c commands (including timer that works with
15 * interrupts off) 15 * interrupts off)
16 * - maybe avoid some data copies with i2c by directly using the smu cmd 16 * - maybe avoid some data copies with i2c by directly using the smu cmd
17 * buffer and a lower level internal interface 17 * buffer and a lower level internal interface
18 * - understand SMU -> CPU events and implement reception of them via 18 * - understand SMU -> CPU events and implement reception of them via
19 * the userland interface 19 * the userland interface
20 */ 20 */
21 21
22 #include <linux/smp_lock.h> 22 #include <linux/smp_lock.h>
23 #include <linux/types.h> 23 #include <linux/types.h>
24 #include <linux/kernel.h> 24 #include <linux/kernel.h>
25 #include <linux/device.h> 25 #include <linux/device.h>
26 #include <linux/dmapool.h> 26 #include <linux/dmapool.h>
27 #include <linux/bootmem.h> 27 #include <linux/bootmem.h>
28 #include <linux/vmalloc.h> 28 #include <linux/vmalloc.h>
29 #include <linux/highmem.h> 29 #include <linux/highmem.h>
30 #include <linux/jiffies.h> 30 #include <linux/jiffies.h>
31 #include <linux/interrupt.h> 31 #include <linux/interrupt.h>
32 #include <linux/rtc.h> 32 #include <linux/rtc.h>
33 #include <linux/completion.h> 33 #include <linux/completion.h>
34 #include <linux/miscdevice.h> 34 #include <linux/miscdevice.h>
35 #include <linux/delay.h> 35 #include <linux/delay.h>
36 #include <linux/sysdev.h> 36 #include <linux/sysdev.h>
37 #include <linux/poll.h> 37 #include <linux/poll.h>
38 #include <linux/mutex.h> 38 #include <linux/mutex.h>
39 #include <linux/of_device.h> 39 #include <linux/of_device.h>
40 #include <linux/of_platform.h> 40 #include <linux/of_platform.h>
41 #include <linux/slab.h> 41 #include <linux/slab.h>
42 42
43 #include <asm/byteorder.h> 43 #include <asm/byteorder.h>
44 #include <asm/io.h> 44 #include <asm/io.h>
45 #include <asm/prom.h> 45 #include <asm/prom.h>
46 #include <asm/machdep.h> 46 #include <asm/machdep.h>
47 #include <asm/pmac_feature.h> 47 #include <asm/pmac_feature.h>
48 #include <asm/smu.h> 48 #include <asm/smu.h>
49 #include <asm/sections.h> 49 #include <asm/sections.h>
50 #include <asm/abs_addr.h> 50 #include <asm/abs_addr.h>
51 #include <asm/uaccess.h> 51 #include <asm/uaccess.h>
52 52
53 #define VERSION "0.7" 53 #define VERSION "0.7"
54 #define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp." 54 #define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
55 55
56 #undef DEBUG_SMU 56 #undef DEBUG_SMU
57 57
58 #ifdef DEBUG_SMU 58 #ifdef DEBUG_SMU
59 #define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0) 59 #define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0)
60 #else 60 #else
61 #define DPRINTK(fmt, args...) do { } while (0) 61 #define DPRINTK(fmt, args...) do { } while (0)
62 #endif 62 #endif
63 63
64 /* 64 /*
65 * This is the command buffer passed to the SMU hardware 65 * This is the command buffer passed to the SMU hardware
66 */ 66 */
67 #define SMU_MAX_DATA 254 67 #define SMU_MAX_DATA 254
68 68
69 struct smu_cmd_buf { 69 struct smu_cmd_buf {
70 u8 cmd; 70 u8 cmd;
71 u8 length; 71 u8 length;
72 u8 data[SMU_MAX_DATA]; 72 u8 data[SMU_MAX_DATA];
73 }; 73 };
74 74
75 struct smu_device { 75 struct smu_device {
76 spinlock_t lock; 76 spinlock_t lock;
77 struct device_node *of_node; 77 struct device_node *of_node;
78 struct of_device *of_dev; 78 struct of_device *of_dev;
79 int doorbell; /* doorbell gpio */ 79 int doorbell; /* doorbell gpio */
80 u32 __iomem *db_buf; /* doorbell buffer */ 80 u32 __iomem *db_buf; /* doorbell buffer */
81 struct device_node *db_node; 81 struct device_node *db_node;
82 unsigned int db_irq; 82 unsigned int db_irq;
83 int msg; 83 int msg;
84 struct device_node *msg_node; 84 struct device_node *msg_node;
85 unsigned int msg_irq; 85 unsigned int msg_irq;
86 struct smu_cmd_buf *cmd_buf; /* command buffer virtual */ 86 struct smu_cmd_buf *cmd_buf; /* command buffer virtual */
87 u32 cmd_buf_abs; /* command buffer absolute */ 87 u32 cmd_buf_abs; /* command buffer absolute */
88 struct list_head cmd_list; 88 struct list_head cmd_list;
89 struct smu_cmd *cmd_cur; /* pending command */ 89 struct smu_cmd *cmd_cur; /* pending command */
90 int broken_nap; 90 int broken_nap;
91 struct list_head cmd_i2c_list; 91 struct list_head cmd_i2c_list;
92 struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */ 92 struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */
93 struct timer_list i2c_timer; 93 struct timer_list i2c_timer;
94 }; 94 };
95 95
96 /* 96 /*
97 * I don't think there will ever be more than one SMU, so 97 * I don't think there will ever be more than one SMU, so
98 * for now, just hard code that 98 * for now, just hard code that
99 */ 99 */
100 static struct smu_device *smu; 100 static struct smu_device *smu;
101 static DEFINE_MUTEX(smu_part_access); 101 static DEFINE_MUTEX(smu_part_access);
102 static int smu_irq_inited; 102 static int smu_irq_inited;
103 103
104 static void smu_i2c_retry(unsigned long data); 104 static void smu_i2c_retry(unsigned long data);
105 105
106 /* 106 /*
107 * SMU driver low level stuff 107 * SMU driver low level stuff
108 */ 108 */
109 109
110 static void smu_start_cmd(void) 110 static void smu_start_cmd(void)
111 { 111 {
112 unsigned long faddr, fend; 112 unsigned long faddr, fend;
113 struct smu_cmd *cmd; 113 struct smu_cmd *cmd;
114 114
115 if (list_empty(&smu->cmd_list)) 115 if (list_empty(&smu->cmd_list))
116 return; 116 return;
117 117
118 /* Fetch first command in queue */ 118 /* Fetch first command in queue */
119 cmd = list_entry(smu->cmd_list.next, struct smu_cmd, link); 119 cmd = list_entry(smu->cmd_list.next, struct smu_cmd, link);
120 smu->cmd_cur = cmd; 120 smu->cmd_cur = cmd;
121 list_del(&cmd->link); 121 list_del(&cmd->link);
122 122
123 DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd, 123 DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd,
124 cmd->data_len); 124 cmd->data_len);
125 DPRINTK("SMU: data buffer: %02x %02x %02x %02x %02x %02x %02x %02x\n", 125 DPRINTK("SMU: data buffer: %02x %02x %02x %02x %02x %02x %02x %02x\n",
126 ((u8 *)cmd->data_buf)[0], ((u8 *)cmd->data_buf)[1], 126 ((u8 *)cmd->data_buf)[0], ((u8 *)cmd->data_buf)[1],
127 ((u8 *)cmd->data_buf)[2], ((u8 *)cmd->data_buf)[3], 127 ((u8 *)cmd->data_buf)[2], ((u8 *)cmd->data_buf)[3],
128 ((u8 *)cmd->data_buf)[4], ((u8 *)cmd->data_buf)[5], 128 ((u8 *)cmd->data_buf)[4], ((u8 *)cmd->data_buf)[5],
129 ((u8 *)cmd->data_buf)[6], ((u8 *)cmd->data_buf)[7]); 129 ((u8 *)cmd->data_buf)[6], ((u8 *)cmd->data_buf)[7]);
130 130
131 /* Fill the SMU command buffer */ 131 /* Fill the SMU command buffer */
132 smu->cmd_buf->cmd = cmd->cmd; 132 smu->cmd_buf->cmd = cmd->cmd;
133 smu->cmd_buf->length = cmd->data_len; 133 smu->cmd_buf->length = cmd->data_len;
134 memcpy(smu->cmd_buf->data, cmd->data_buf, cmd->data_len); 134 memcpy(smu->cmd_buf->data, cmd->data_buf, cmd->data_len);
135 135
136 /* Flush command and data to RAM */ 136 /* Flush command and data to RAM */
137 faddr = (unsigned long)smu->cmd_buf; 137 faddr = (unsigned long)smu->cmd_buf;
138 fend = faddr + smu->cmd_buf->length + 2; 138 fend = faddr + smu->cmd_buf->length + 2;
139 flush_inval_dcache_range(faddr, fend); 139 flush_inval_dcache_range(faddr, fend);
140 140
141 141
142 /* We also disable NAP mode for the duration of the command 142 /* We also disable NAP mode for the duration of the command
143 * on U3 based machines. 143 * on U3 based machines.
144 * This is slightly racy as it can be written back to 1 by a sysctl 144 * This is slightly racy as it can be written back to 1 by a sysctl
145 * but that never happens in practice. There seem to be an issue with 145 * but that never happens in practice. There seem to be an issue with
146 * U3 based machines such as the iMac G5 where napping for the 146 * U3 based machines such as the iMac G5 where napping for the
147 * whole duration of the command prevents the SMU from fetching it 147 * whole duration of the command prevents the SMU from fetching it
148 * from memory. This might be related to the strange i2c based 148 * from memory. This might be related to the strange i2c based
149 * mechanism the SMU uses to access memory. 149 * mechanism the SMU uses to access memory.
150 */ 150 */
151 if (smu->broken_nap) 151 if (smu->broken_nap)
152 powersave_nap = 0; 152 powersave_nap = 0;
153 153
154 /* This isn't exactly a DMA mapping here, I suspect 154 /* This isn't exactly a DMA mapping here, I suspect
155 * the SMU is actually communicating with us via i2c to the 155 * the SMU is actually communicating with us via i2c to the
156 * northbridge or the CPU to access RAM. 156 * northbridge or the CPU to access RAM.
157 */ 157 */
158 writel(smu->cmd_buf_abs, smu->db_buf); 158 writel(smu->cmd_buf_abs, smu->db_buf);
159 159
160 /* Ring the SMU doorbell */ 160 /* Ring the SMU doorbell */
161 pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, smu->doorbell, 4); 161 pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, smu->doorbell, 4);
162 } 162 }
163 163
164 164
165 static irqreturn_t smu_db_intr(int irq, void *arg) 165 static irqreturn_t smu_db_intr(int irq, void *arg)
166 { 166 {
167 unsigned long flags; 167 unsigned long flags;
168 struct smu_cmd *cmd; 168 struct smu_cmd *cmd;
169 void (*done)(struct smu_cmd *cmd, void *misc) = NULL; 169 void (*done)(struct smu_cmd *cmd, void *misc) = NULL;
170 void *misc = NULL; 170 void *misc = NULL;
171 u8 gpio; 171 u8 gpio;
172 int rc = 0; 172 int rc = 0;
173 173
174 /* SMU completed the command, well, we hope, let's make sure 174 /* SMU completed the command, well, we hope, let's make sure
175 * of it 175 * of it
176 */ 176 */
177 spin_lock_irqsave(&smu->lock, flags); 177 spin_lock_irqsave(&smu->lock, flags);
178 178
179 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell); 179 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
180 if ((gpio & 7) != 7) { 180 if ((gpio & 7) != 7) {
181 spin_unlock_irqrestore(&smu->lock, flags); 181 spin_unlock_irqrestore(&smu->lock, flags);
182 return IRQ_HANDLED; 182 return IRQ_HANDLED;
183 } 183 }
184 184
185 cmd = smu->cmd_cur; 185 cmd = smu->cmd_cur;
186 smu->cmd_cur = NULL; 186 smu->cmd_cur = NULL;
187 if (cmd == NULL) 187 if (cmd == NULL)
188 goto bail; 188 goto bail;
189 189
190 if (rc == 0) { 190 if (rc == 0) {
191 unsigned long faddr; 191 unsigned long faddr;
192 int reply_len; 192 int reply_len;
193 u8 ack; 193 u8 ack;
194 194
195 /* CPU might have brought back the cache line, so we need 195 /* CPU might have brought back the cache line, so we need
196 * to flush again before peeking at the SMU response. We 196 * to flush again before peeking at the SMU response. We
197 * flush the entire buffer for now as we haven't read the 197 * flush the entire buffer for now as we haven't read the
198 * reply length (it's only 2 cache lines anyway) 198 * reply length (it's only 2 cache lines anyway)
199 */ 199 */
200 faddr = (unsigned long)smu->cmd_buf; 200 faddr = (unsigned long)smu->cmd_buf;
201 flush_inval_dcache_range(faddr, faddr + 256); 201 flush_inval_dcache_range(faddr, faddr + 256);
202 202
203 /* Now check ack */ 203 /* Now check ack */
204 ack = (~cmd->cmd) & 0xff; 204 ack = (~cmd->cmd) & 0xff;
205 if (ack != smu->cmd_buf->cmd) { 205 if (ack != smu->cmd_buf->cmd) {
206 DPRINTK("SMU: incorrect ack, want %x got %x\n", 206 DPRINTK("SMU: incorrect ack, want %x got %x\n",
207 ack, smu->cmd_buf->cmd); 207 ack, smu->cmd_buf->cmd);
208 rc = -EIO; 208 rc = -EIO;
209 } 209 }
210 reply_len = rc == 0 ? smu->cmd_buf->length : 0; 210 reply_len = rc == 0 ? smu->cmd_buf->length : 0;
211 DPRINTK("SMU: reply len: %d\n", reply_len); 211 DPRINTK("SMU: reply len: %d\n", reply_len);
212 if (reply_len > cmd->reply_len) { 212 if (reply_len > cmd->reply_len) {
213 printk(KERN_WARNING "SMU: reply buffer too small," 213 printk(KERN_WARNING "SMU: reply buffer too small,"
214 "got %d bytes for a %d bytes buffer\n", 214 "got %d bytes for a %d bytes buffer\n",
215 reply_len, cmd->reply_len); 215 reply_len, cmd->reply_len);
216 reply_len = cmd->reply_len; 216 reply_len = cmd->reply_len;
217 } 217 }
218 cmd->reply_len = reply_len; 218 cmd->reply_len = reply_len;
219 if (cmd->reply_buf && reply_len) 219 if (cmd->reply_buf && reply_len)
220 memcpy(cmd->reply_buf, smu->cmd_buf->data, reply_len); 220 memcpy(cmd->reply_buf, smu->cmd_buf->data, reply_len);
221 } 221 }
222 222
223 /* Now complete the command. Write status last in order as we lost 223 /* Now complete the command. Write status last in order as we lost
224 * ownership of the command structure as soon as it's no longer -1 224 * ownership of the command structure as soon as it's no longer -1
225 */ 225 */
226 done = cmd->done; 226 done = cmd->done;
227 misc = cmd->misc; 227 misc = cmd->misc;
228 mb(); 228 mb();
229 cmd->status = rc; 229 cmd->status = rc;
230 230
231 /* Re-enable NAP mode */ 231 /* Re-enable NAP mode */
232 if (smu->broken_nap) 232 if (smu->broken_nap)
233 powersave_nap = 1; 233 powersave_nap = 1;
234 bail: 234 bail:
235 /* Start next command if any */ 235 /* Start next command if any */
236 smu_start_cmd(); 236 smu_start_cmd();
237 spin_unlock_irqrestore(&smu->lock, flags); 237 spin_unlock_irqrestore(&smu->lock, flags);
238 238
239 /* Call command completion handler if any */ 239 /* Call command completion handler if any */
240 if (done) 240 if (done)
241 done(cmd, misc); 241 done(cmd, misc);
242 242
243 /* It's an edge interrupt, nothing to do */ 243 /* It's an edge interrupt, nothing to do */
244 return IRQ_HANDLED; 244 return IRQ_HANDLED;
245 } 245 }
246 246
247 247
248 static irqreturn_t smu_msg_intr(int irq, void *arg) 248 static irqreturn_t smu_msg_intr(int irq, void *arg)
249 { 249 {
250 /* I don't quite know what to do with this one, we seem to never 250 /* I don't quite know what to do with this one, we seem to never
251 * receive it, so I suspect we have to arm it someway in the SMU 251 * receive it, so I suspect we have to arm it someway in the SMU
252 * to start getting events that way. 252 * to start getting events that way.
253 */ 253 */
254 254
255 printk(KERN_INFO "SMU: message interrupt !\n"); 255 printk(KERN_INFO "SMU: message interrupt !\n");
256 256
257 /* It's an edge interrupt, nothing to do */ 257 /* It's an edge interrupt, nothing to do */
258 return IRQ_HANDLED; 258 return IRQ_HANDLED;
259 } 259 }
260 260
261 261
262 /* 262 /*
263 * Queued command management. 263 * Queued command management.
264 * 264 *
265 */ 265 */
266 266
267 int smu_queue_cmd(struct smu_cmd *cmd) 267 int smu_queue_cmd(struct smu_cmd *cmd)
268 { 268 {
269 unsigned long flags; 269 unsigned long flags;
270 270
271 if (smu == NULL) 271 if (smu == NULL)
272 return -ENODEV; 272 return -ENODEV;
273 if (cmd->data_len > SMU_MAX_DATA || 273 if (cmd->data_len > SMU_MAX_DATA ||
274 cmd->reply_len > SMU_MAX_DATA) 274 cmd->reply_len > SMU_MAX_DATA)
275 return -EINVAL; 275 return -EINVAL;
276 276
277 cmd->status = 1; 277 cmd->status = 1;
278 spin_lock_irqsave(&smu->lock, flags); 278 spin_lock_irqsave(&smu->lock, flags);
279 list_add_tail(&cmd->link, &smu->cmd_list); 279 list_add_tail(&cmd->link, &smu->cmd_list);
280 if (smu->cmd_cur == NULL) 280 if (smu->cmd_cur == NULL)
281 smu_start_cmd(); 281 smu_start_cmd();
282 spin_unlock_irqrestore(&smu->lock, flags); 282 spin_unlock_irqrestore(&smu->lock, flags);
283 283
284 /* Workaround for early calls when irq isn't available */ 284 /* Workaround for early calls when irq isn't available */
285 if (!smu_irq_inited || smu->db_irq == NO_IRQ) 285 if (!smu_irq_inited || smu->db_irq == NO_IRQ)
286 smu_spinwait_cmd(cmd); 286 smu_spinwait_cmd(cmd);
287 287
288 return 0; 288 return 0;
289 } 289 }
290 EXPORT_SYMBOL(smu_queue_cmd); 290 EXPORT_SYMBOL(smu_queue_cmd);
291 291
292 292
293 int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command, 293 int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
294 unsigned int data_len, 294 unsigned int data_len,
295 void (*done)(struct smu_cmd *cmd, void *misc), 295 void (*done)(struct smu_cmd *cmd, void *misc),
296 void *misc, ...) 296 void *misc, ...)
297 { 297 {
298 struct smu_cmd *cmd = &scmd->cmd; 298 struct smu_cmd *cmd = &scmd->cmd;
299 va_list list; 299 va_list list;
300 int i; 300 int i;
301 301
302 if (data_len > sizeof(scmd->buffer)) 302 if (data_len > sizeof(scmd->buffer))
303 return -EINVAL; 303 return -EINVAL;
304 304
305 memset(scmd, 0, sizeof(*scmd)); 305 memset(scmd, 0, sizeof(*scmd));
306 cmd->cmd = command; 306 cmd->cmd = command;
307 cmd->data_len = data_len; 307 cmd->data_len = data_len;
308 cmd->data_buf = scmd->buffer; 308 cmd->data_buf = scmd->buffer;
309 cmd->reply_len = sizeof(scmd->buffer); 309 cmd->reply_len = sizeof(scmd->buffer);
310 cmd->reply_buf = scmd->buffer; 310 cmd->reply_buf = scmd->buffer;
311 cmd->done = done; 311 cmd->done = done;
312 cmd->misc = misc; 312 cmd->misc = misc;
313 313
314 va_start(list, misc); 314 va_start(list, misc);
315 for (i = 0; i < data_len; ++i) 315 for (i = 0; i < data_len; ++i)
316 scmd->buffer[i] = (u8)va_arg(list, int); 316 scmd->buffer[i] = (u8)va_arg(list, int);
317 va_end(list); 317 va_end(list);
318 318
319 return smu_queue_cmd(cmd); 319 return smu_queue_cmd(cmd);
320 } 320 }
321 EXPORT_SYMBOL(smu_queue_simple); 321 EXPORT_SYMBOL(smu_queue_simple);
322 322
323 323
324 void smu_poll(void) 324 void smu_poll(void)
325 { 325 {
326 u8 gpio; 326 u8 gpio;
327 327
328 if (smu == NULL) 328 if (smu == NULL)
329 return; 329 return;
330 330
331 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell); 331 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
332 if ((gpio & 7) == 7) 332 if ((gpio & 7) == 7)
333 smu_db_intr(smu->db_irq, smu); 333 smu_db_intr(smu->db_irq, smu);
334 } 334 }
335 EXPORT_SYMBOL(smu_poll); 335 EXPORT_SYMBOL(smu_poll);
336 336
337 337
338 void smu_done_complete(struct smu_cmd *cmd, void *misc) 338 void smu_done_complete(struct smu_cmd *cmd, void *misc)
339 { 339 {
340 struct completion *comp = misc; 340 struct completion *comp = misc;
341 341
342 complete(comp); 342 complete(comp);
343 } 343 }
344 EXPORT_SYMBOL(smu_done_complete); 344 EXPORT_SYMBOL(smu_done_complete);
345 345
346 346
347 void smu_spinwait_cmd(struct smu_cmd *cmd) 347 void smu_spinwait_cmd(struct smu_cmd *cmd)
348 { 348 {
349 while(cmd->status == 1) 349 while(cmd->status == 1)
350 smu_poll(); 350 smu_poll();
351 } 351 }
352 EXPORT_SYMBOL(smu_spinwait_cmd); 352 EXPORT_SYMBOL(smu_spinwait_cmd);
353 353
354 354
355 /* RTC low level commands */ 355 /* RTC low level commands */
356 static inline int bcd2hex (int n) 356 static inline int bcd2hex (int n)
357 { 357 {
358 return (((n & 0xf0) >> 4) * 10) + (n & 0xf); 358 return (((n & 0xf0) >> 4) * 10) + (n & 0xf);
359 } 359 }
360 360
361 361
362 static inline int hex2bcd (int n) 362 static inline int hex2bcd (int n)
363 { 363 {
364 return ((n / 10) << 4) + (n % 10); 364 return ((n / 10) << 4) + (n % 10);
365 } 365 }
366 366
367 367
368 static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf, 368 static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
369 struct rtc_time *time) 369 struct rtc_time *time)
370 { 370 {
371 cmd_buf->cmd = 0x8e; 371 cmd_buf->cmd = 0x8e;
372 cmd_buf->length = 8; 372 cmd_buf->length = 8;
373 cmd_buf->data[0] = 0x80; 373 cmd_buf->data[0] = 0x80;
374 cmd_buf->data[1] = hex2bcd(time->tm_sec); 374 cmd_buf->data[1] = hex2bcd(time->tm_sec);
375 cmd_buf->data[2] = hex2bcd(time->tm_min); 375 cmd_buf->data[2] = hex2bcd(time->tm_min);
376 cmd_buf->data[3] = hex2bcd(time->tm_hour); 376 cmd_buf->data[3] = hex2bcd(time->tm_hour);
377 cmd_buf->data[4] = time->tm_wday; 377 cmd_buf->data[4] = time->tm_wday;
378 cmd_buf->data[5] = hex2bcd(time->tm_mday); 378 cmd_buf->data[5] = hex2bcd(time->tm_mday);
379 cmd_buf->data[6] = hex2bcd(time->tm_mon) + 1; 379 cmd_buf->data[6] = hex2bcd(time->tm_mon) + 1;
380 cmd_buf->data[7] = hex2bcd(time->tm_year - 100); 380 cmd_buf->data[7] = hex2bcd(time->tm_year - 100);
381 } 381 }
382 382
383 383
384 int smu_get_rtc_time(struct rtc_time *time, int spinwait) 384 int smu_get_rtc_time(struct rtc_time *time, int spinwait)
385 { 385 {
386 struct smu_simple_cmd cmd; 386 struct smu_simple_cmd cmd;
387 int rc; 387 int rc;
388 388
389 if (smu == NULL) 389 if (smu == NULL)
390 return -ENODEV; 390 return -ENODEV;
391 391
392 memset(time, 0, sizeof(struct rtc_time)); 392 memset(time, 0, sizeof(struct rtc_time));
393 rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 1, NULL, NULL, 393 rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 1, NULL, NULL,
394 SMU_CMD_RTC_GET_DATETIME); 394 SMU_CMD_RTC_GET_DATETIME);
395 if (rc) 395 if (rc)
396 return rc; 396 return rc;
397 smu_spinwait_simple(&cmd); 397 smu_spinwait_simple(&cmd);
398 398
399 time->tm_sec = bcd2hex(cmd.buffer[0]); 399 time->tm_sec = bcd2hex(cmd.buffer[0]);
400 time->tm_min = bcd2hex(cmd.buffer[1]); 400 time->tm_min = bcd2hex(cmd.buffer[1]);
401 time->tm_hour = bcd2hex(cmd.buffer[2]); 401 time->tm_hour = bcd2hex(cmd.buffer[2]);
402 time->tm_wday = bcd2hex(cmd.buffer[3]); 402 time->tm_wday = bcd2hex(cmd.buffer[3]);
403 time->tm_mday = bcd2hex(cmd.buffer[4]); 403 time->tm_mday = bcd2hex(cmd.buffer[4]);
404 time->tm_mon = bcd2hex(cmd.buffer[5]) - 1; 404 time->tm_mon = bcd2hex(cmd.buffer[5]) - 1;
405 time->tm_year = bcd2hex(cmd.buffer[6]) + 100; 405 time->tm_year = bcd2hex(cmd.buffer[6]) + 100;
406 406
407 return 0; 407 return 0;
408 } 408 }
409 409
410 410
411 int smu_set_rtc_time(struct rtc_time *time, int spinwait) 411 int smu_set_rtc_time(struct rtc_time *time, int spinwait)
412 { 412 {
413 struct smu_simple_cmd cmd; 413 struct smu_simple_cmd cmd;
414 int rc; 414 int rc;
415 415
416 if (smu == NULL) 416 if (smu == NULL)
417 return -ENODEV; 417 return -ENODEV;
418 418
419 rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 8, NULL, NULL, 419 rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 8, NULL, NULL,
420 SMU_CMD_RTC_SET_DATETIME, 420 SMU_CMD_RTC_SET_DATETIME,
421 hex2bcd(time->tm_sec), 421 hex2bcd(time->tm_sec),
422 hex2bcd(time->tm_min), 422 hex2bcd(time->tm_min),
423 hex2bcd(time->tm_hour), 423 hex2bcd(time->tm_hour),
424 time->tm_wday, 424 time->tm_wday,
425 hex2bcd(time->tm_mday), 425 hex2bcd(time->tm_mday),
426 hex2bcd(time->tm_mon) + 1, 426 hex2bcd(time->tm_mon) + 1,
427 hex2bcd(time->tm_year - 100)); 427 hex2bcd(time->tm_year - 100));
428 if (rc) 428 if (rc)
429 return rc; 429 return rc;
430 smu_spinwait_simple(&cmd); 430 smu_spinwait_simple(&cmd);
431 431
432 return 0; 432 return 0;
433 } 433 }
434 434
435 435
436 void smu_shutdown(void) 436 void smu_shutdown(void)
437 { 437 {
438 struct smu_simple_cmd cmd; 438 struct smu_simple_cmd cmd;
439 439
440 if (smu == NULL) 440 if (smu == NULL)
441 return; 441 return;
442 442
443 if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 9, NULL, NULL, 443 if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 9, NULL, NULL,
444 'S', 'H', 'U', 'T', 'D', 'O', 'W', 'N', 0)) 444 'S', 'H', 'U', 'T', 'D', 'O', 'W', 'N', 0))
445 return; 445 return;
446 smu_spinwait_simple(&cmd); 446 smu_spinwait_simple(&cmd);
447 for (;;) 447 for (;;)
448 ; 448 ;
449 } 449 }
450 450
451 451
452 void smu_restart(void) 452 void smu_restart(void)
453 { 453 {
454 struct smu_simple_cmd cmd; 454 struct smu_simple_cmd cmd;
455 455
456 if (smu == NULL) 456 if (smu == NULL)
457 return; 457 return;
458 458
459 if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, NULL, NULL, 459 if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, NULL, NULL,
460 'R', 'E', 'S', 'T', 'A', 'R', 'T', 0)) 460 'R', 'E', 'S', 'T', 'A', 'R', 'T', 0))
461 return; 461 return;
462 smu_spinwait_simple(&cmd); 462 smu_spinwait_simple(&cmd);
463 for (;;) 463 for (;;)
464 ; 464 ;
465 } 465 }
466 466
467 467
468 int smu_present(void) 468 int smu_present(void)
469 { 469 {
470 return smu != NULL; 470 return smu != NULL;
471 } 471 }
472 EXPORT_SYMBOL(smu_present); 472 EXPORT_SYMBOL(smu_present);
473 473
474 474
475 int __init smu_init (void) 475 int __init smu_init (void)
476 { 476 {
477 struct device_node *np; 477 struct device_node *np;
478 const u32 *data; 478 const u32 *data;
479 int ret = 0; 479 int ret = 0;
480 480
481 np = of_find_node_by_type(NULL, "smu"); 481 np = of_find_node_by_type(NULL, "smu");
482 if (np == NULL) 482 if (np == NULL)
483 return -ENODEV; 483 return -ENODEV;
484 484
485 printk(KERN_INFO "SMU: Driver %s %s\n", VERSION, AUTHOR); 485 printk(KERN_INFO "SMU: Driver %s %s\n", VERSION, AUTHOR);
486 486
487 if (smu_cmdbuf_abs == 0) { 487 if (smu_cmdbuf_abs == 0) {
488 printk(KERN_ERR "SMU: Command buffer not allocated !\n"); 488 printk(KERN_ERR "SMU: Command buffer not allocated !\n");
489 ret = -EINVAL; 489 ret = -EINVAL;
490 goto fail_np; 490 goto fail_np;
491 } 491 }
492 492
493 smu = alloc_bootmem(sizeof(struct smu_device)); 493 smu = alloc_bootmem(sizeof(struct smu_device));
494 494
495 spin_lock_init(&smu->lock); 495 spin_lock_init(&smu->lock);
496 INIT_LIST_HEAD(&smu->cmd_list); 496 INIT_LIST_HEAD(&smu->cmd_list);
497 INIT_LIST_HEAD(&smu->cmd_i2c_list); 497 INIT_LIST_HEAD(&smu->cmd_i2c_list);
498 smu->of_node = np; 498 smu->of_node = np;
499 smu->db_irq = NO_IRQ; 499 smu->db_irq = NO_IRQ;
500 smu->msg_irq = NO_IRQ; 500 smu->msg_irq = NO_IRQ;
501 501
502 /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a 502 /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a
503 * 32 bits value safely 503 * 32 bits value safely
504 */ 504 */
505 smu->cmd_buf_abs = (u32)smu_cmdbuf_abs; 505 smu->cmd_buf_abs = (u32)smu_cmdbuf_abs;
506 smu->cmd_buf = (struct smu_cmd_buf *)abs_to_virt(smu_cmdbuf_abs); 506 smu->cmd_buf = (struct smu_cmd_buf *)abs_to_virt(smu_cmdbuf_abs);
507 507
508 smu->db_node = of_find_node_by_name(NULL, "smu-doorbell"); 508 smu->db_node = of_find_node_by_name(NULL, "smu-doorbell");
509 if (smu->db_node == NULL) { 509 if (smu->db_node == NULL) {
510 printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n"); 510 printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n");
511 ret = -ENXIO; 511 ret = -ENXIO;
512 goto fail_bootmem; 512 goto fail_bootmem;
513 } 513 }
514 data = of_get_property(smu->db_node, "reg", NULL); 514 data = of_get_property(smu->db_node, "reg", NULL);
515 if (data == NULL) { 515 if (data == NULL) {
516 printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n"); 516 printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n");
517 ret = -ENXIO; 517 ret = -ENXIO;
518 goto fail_db_node; 518 goto fail_db_node;
519 } 519 }
520 520
521 /* Current setup has one doorbell GPIO that does both doorbell 521 /* Current setup has one doorbell GPIO that does both doorbell
522 * and ack. GPIOs are at 0x50, best would be to find that out 522 * and ack. GPIOs are at 0x50, best would be to find that out
523 * in the device-tree though. 523 * in the device-tree though.
524 */ 524 */
525 smu->doorbell = *data; 525 smu->doorbell = *data;
526 if (smu->doorbell < 0x50) 526 if (smu->doorbell < 0x50)
527 smu->doorbell += 0x50; 527 smu->doorbell += 0x50;
528 528
529 /* Now look for the smu-interrupt GPIO */ 529 /* Now look for the smu-interrupt GPIO */
530 do { 530 do {
531 smu->msg_node = of_find_node_by_name(NULL, "smu-interrupt"); 531 smu->msg_node = of_find_node_by_name(NULL, "smu-interrupt");
532 if (smu->msg_node == NULL) 532 if (smu->msg_node == NULL)
533 break; 533 break;
534 data = of_get_property(smu->msg_node, "reg", NULL); 534 data = of_get_property(smu->msg_node, "reg", NULL);
535 if (data == NULL) { 535 if (data == NULL) {
536 of_node_put(smu->msg_node); 536 of_node_put(smu->msg_node);
537 smu->msg_node = NULL; 537 smu->msg_node = NULL;
538 break; 538 break;
539 } 539 }
540 smu->msg = *data; 540 smu->msg = *data;
541 if (smu->msg < 0x50) 541 if (smu->msg < 0x50)
542 smu->msg += 0x50; 542 smu->msg += 0x50;
543 } while(0); 543 } while(0);
544 544
545 /* Doorbell buffer is currently hard-coded, I didn't find a proper 545 /* Doorbell buffer is currently hard-coded, I didn't find a proper
546 * device-tree entry giving the address. Best would probably to use 546 * device-tree entry giving the address. Best would probably to use
547 * an offset for K2 base though, but let's do it that way for now. 547 * an offset for K2 base though, but let's do it that way for now.
548 */ 548 */
549 smu->db_buf = ioremap(0x8000860c, 0x1000); 549 smu->db_buf = ioremap(0x8000860c, 0x1000);
550 if (smu->db_buf == NULL) { 550 if (smu->db_buf == NULL) {
551 printk(KERN_ERR "SMU: Can't map doorbell buffer pointer !\n"); 551 printk(KERN_ERR "SMU: Can't map doorbell buffer pointer !\n");
552 ret = -ENXIO; 552 ret = -ENXIO;
553 goto fail_msg_node; 553 goto fail_msg_node;
554 } 554 }
555 555
556 /* U3 has an issue with NAP mode when issuing SMU commands */ 556 /* U3 has an issue with NAP mode when issuing SMU commands */
557 smu->broken_nap = pmac_get_uninorth_variant() < 4; 557 smu->broken_nap = pmac_get_uninorth_variant() < 4;
558 if (smu->broken_nap) 558 if (smu->broken_nap)
559 printk(KERN_INFO "SMU: using NAP mode workaround\n"); 559 printk(KERN_INFO "SMU: using NAP mode workaround\n");
560 560
561 sys_ctrler = SYS_CTRLER_SMU; 561 sys_ctrler = SYS_CTRLER_SMU;
562 return 0; 562 return 0;
563 563
564 fail_msg_node: 564 fail_msg_node:
565 if (smu->msg_node) 565 if (smu->msg_node)
566 of_node_put(smu->msg_node); 566 of_node_put(smu->msg_node);
567 fail_db_node: 567 fail_db_node:
568 of_node_put(smu->db_node); 568 of_node_put(smu->db_node);
569 fail_bootmem: 569 fail_bootmem:
570 free_bootmem((unsigned long)smu, sizeof(struct smu_device)); 570 free_bootmem((unsigned long)smu, sizeof(struct smu_device));
571 smu = NULL; 571 smu = NULL;
572 fail_np: 572 fail_np:
573 of_node_put(np); 573 of_node_put(np);
574 return ret; 574 return ret;
575 } 575 }
576 576
577 577
578 static int smu_late_init(void) 578 static int smu_late_init(void)
579 { 579 {
580 if (!smu) 580 if (!smu)
581 return 0; 581 return 0;
582 582
583 init_timer(&smu->i2c_timer); 583 init_timer(&smu->i2c_timer);
584 smu->i2c_timer.function = smu_i2c_retry; 584 smu->i2c_timer.function = smu_i2c_retry;
585 smu->i2c_timer.data = (unsigned long)smu; 585 smu->i2c_timer.data = (unsigned long)smu;
586 586
587 if (smu->db_node) { 587 if (smu->db_node) {
588 smu->db_irq = irq_of_parse_and_map(smu->db_node, 0); 588 smu->db_irq = irq_of_parse_and_map(smu->db_node, 0);
589 if (smu->db_irq == NO_IRQ) 589 if (smu->db_irq == NO_IRQ)
590 printk(KERN_ERR "smu: failed to map irq for node %s\n", 590 printk(KERN_ERR "smu: failed to map irq for node %s\n",
591 smu->db_node->full_name); 591 smu->db_node->full_name);
592 } 592 }
593 if (smu->msg_node) { 593 if (smu->msg_node) {
594 smu->msg_irq = irq_of_parse_and_map(smu->msg_node, 0); 594 smu->msg_irq = irq_of_parse_and_map(smu->msg_node, 0);
595 if (smu->msg_irq == NO_IRQ) 595 if (smu->msg_irq == NO_IRQ)
596 printk(KERN_ERR "smu: failed to map irq for node %s\n", 596 printk(KERN_ERR "smu: failed to map irq for node %s\n",
597 smu->msg_node->full_name); 597 smu->msg_node->full_name);
598 } 598 }
599 599
600 /* 600 /*
601 * Try to request the interrupts 601 * Try to request the interrupts
602 */ 602 */
603 603
604 if (smu->db_irq != NO_IRQ) { 604 if (smu->db_irq != NO_IRQ) {
605 if (request_irq(smu->db_irq, smu_db_intr, 605 if (request_irq(smu->db_irq, smu_db_intr,
606 IRQF_SHARED, "SMU doorbell", smu) < 0) { 606 IRQF_SHARED, "SMU doorbell", smu) < 0) {
607 printk(KERN_WARNING "SMU: can't " 607 printk(KERN_WARNING "SMU: can't "
608 "request interrupt %d\n", 608 "request interrupt %d\n",
609 smu->db_irq); 609 smu->db_irq);
610 smu->db_irq = NO_IRQ; 610 smu->db_irq = NO_IRQ;
611 } 611 }
612 } 612 }
613 613
614 if (smu->msg_irq != NO_IRQ) { 614 if (smu->msg_irq != NO_IRQ) {
615 if (request_irq(smu->msg_irq, smu_msg_intr, 615 if (request_irq(smu->msg_irq, smu_msg_intr,
616 IRQF_SHARED, "SMU message", smu) < 0) { 616 IRQF_SHARED, "SMU message", smu) < 0) {
617 printk(KERN_WARNING "SMU: can't " 617 printk(KERN_WARNING "SMU: can't "
618 "request interrupt %d\n", 618 "request interrupt %d\n",
619 smu->msg_irq); 619 smu->msg_irq);
620 smu->msg_irq = NO_IRQ; 620 smu->msg_irq = NO_IRQ;
621 } 621 }
622 } 622 }
623 623
624 smu_irq_inited = 1; 624 smu_irq_inited = 1;
625 return 0; 625 return 0;
626 } 626 }
627 /* This has to be before arch_initcall as the low i2c stuff relies on the 627 /* This has to be before arch_initcall as the low i2c stuff relies on the
628 * above having been done before we reach arch_initcalls 628 * above having been done before we reach arch_initcalls
629 */ 629 */
630 core_initcall(smu_late_init); 630 core_initcall(smu_late_init);
631 631
632 /* 632 /*
633 * sysfs visibility 633 * sysfs visibility
634 */ 634 */
635 635
636 static void smu_expose_childs(struct work_struct *unused) 636 static void smu_expose_childs(struct work_struct *unused)
637 { 637 {
638 struct device_node *np; 638 struct device_node *np;
639 639
640 for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;) 640 for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;)
641 if (of_device_is_compatible(np, "smu-sensors")) 641 if (of_device_is_compatible(np, "smu-sensors"))
642 of_platform_device_create(np, "smu-sensors", 642 of_platform_device_create(np, "smu-sensors",
643 &smu->of_dev->dev); 643 &smu->of_dev->dev);
644 } 644 }
645 645
646 static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs); 646 static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
647 647
648 static int smu_platform_probe(struct of_device* dev, 648 static int smu_platform_probe(struct of_device* dev,
649 const struct of_device_id *match) 649 const struct of_device_id *match)
650 { 650 {
651 if (!smu) 651 if (!smu)
652 return -ENODEV; 652 return -ENODEV;
653 smu->of_dev = dev; 653 smu->of_dev = dev;
654 654
655 /* 655 /*
656 * Ok, we are matched, now expose all i2c busses. We have to defer 656 * Ok, we are matched, now expose all i2c busses. We have to defer
657 * that unfortunately or it would deadlock inside the device model 657 * that unfortunately or it would deadlock inside the device model
658 */ 658 */
659 schedule_work(&smu_expose_childs_work); 659 schedule_work(&smu_expose_childs_work);
660 660
661 return 0; 661 return 0;
662 } 662 }
663 663
664 static const struct of_device_id smu_platform_match[] = 664 static const struct of_device_id smu_platform_match[] =
665 { 665 {
666 { 666 {
667 .type = "smu", 667 .type = "smu",
668 }, 668 },
669 {}, 669 {},
670 }; 670 };
671 671
672 static struct of_platform_driver smu_of_platform_driver = 672 static struct of_platform_driver smu_of_platform_driver =
673 { 673 {
674 .name = "smu", 674 .name = "smu",
675 .match_table = smu_platform_match, 675 .match_table = smu_platform_match,
676 .probe = smu_platform_probe, 676 .probe = smu_platform_probe,
677 }; 677 };
678 678
679 static int __init smu_init_sysfs(void) 679 static int __init smu_init_sysfs(void)
680 { 680 {
681 /* 681 /*
682 * Due to sysfs bogosity, a sysdev is not a real device, so 682 * Due to sysfs bogosity, a sysdev is not a real device, so
683 * we should in fact create both if we want sysdev semantics 683 * we should in fact create both if we want sysdev semantics
684 * for power management. 684 * for power management.
685 * For now, we don't power manage machines with an SMU chip, 685 * For now, we don't power manage machines with an SMU chip,
686 * I'm a bit too far from figuring out how that works with those 686 * I'm a bit too far from figuring out how that works with those
687 * new chipsets, but that will come back and bite us 687 * new chipsets, but that will come back and bite us
688 */ 688 */
689 of_register_platform_driver(&smu_of_platform_driver); 689 of_register_platform_driver(&smu_of_platform_driver);
690 return 0; 690 return 0;
691 } 691 }
692 692
693 device_initcall(smu_init_sysfs); 693 device_initcall(smu_init_sysfs);
694 694
695 struct of_device *smu_get_ofdev(void) 695 struct of_device *smu_get_ofdev(void)
696 { 696 {
697 if (!smu) 697 if (!smu)
698 return NULL; 698 return NULL;
699 return smu->of_dev; 699 return smu->of_dev;
700 } 700 }
701 701
702 EXPORT_SYMBOL_GPL(smu_get_ofdev); 702 EXPORT_SYMBOL_GPL(smu_get_ofdev);
703 703
704 /* 704 /*
705 * i2c interface 705 * i2c interface
706 */ 706 */
707 707
708 static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail) 708 static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail)
709 { 709 {
710 void (*done)(struct smu_i2c_cmd *cmd, void *misc) = cmd->done; 710 void (*done)(struct smu_i2c_cmd *cmd, void *misc) = cmd->done;
711 void *misc = cmd->misc; 711 void *misc = cmd->misc;
712 unsigned long flags; 712 unsigned long flags;
713 713
714 /* Check for read case */ 714 /* Check for read case */
715 if (!fail && cmd->read) { 715 if (!fail && cmd->read) {
716 if (cmd->pdata[0] < 1) 716 if (cmd->pdata[0] < 1)
717 fail = 1; 717 fail = 1;
718 else 718 else
719 memcpy(cmd->info.data, &cmd->pdata[1], 719 memcpy(cmd->info.data, &cmd->pdata[1],
720 cmd->info.datalen); 720 cmd->info.datalen);
721 } 721 }
722 722
723 DPRINTK("SMU: completing, success: %d\n", !fail); 723 DPRINTK("SMU: completing, success: %d\n", !fail);
724 724
725 /* Update status and mark no pending i2c command with lock 725 /* Update status and mark no pending i2c command with lock
726 * held so nobody comes in while we dequeue an eventual 726 * held so nobody comes in while we dequeue an eventual
727 * pending next i2c command 727 * pending next i2c command
728 */ 728 */
729 spin_lock_irqsave(&smu->lock, flags); 729 spin_lock_irqsave(&smu->lock, flags);
730 smu->cmd_i2c_cur = NULL; 730 smu->cmd_i2c_cur = NULL;
731 wmb(); 731 wmb();
732 cmd->status = fail ? -EIO : 0; 732 cmd->status = fail ? -EIO : 0;
733 733
734 /* Is there another i2c command waiting ? */ 734 /* Is there another i2c command waiting ? */
735 if (!list_empty(&smu->cmd_i2c_list)) { 735 if (!list_empty(&smu->cmd_i2c_list)) {
736 struct smu_i2c_cmd *newcmd; 736 struct smu_i2c_cmd *newcmd;
737 737
738 /* Fetch it, new current, remove from list */ 738 /* Fetch it, new current, remove from list */
739 newcmd = list_entry(smu->cmd_i2c_list.next, 739 newcmd = list_entry(smu->cmd_i2c_list.next,
740 struct smu_i2c_cmd, link); 740 struct smu_i2c_cmd, link);
741 smu->cmd_i2c_cur = newcmd; 741 smu->cmd_i2c_cur = newcmd;
742 list_del(&cmd->link); 742 list_del(&cmd->link);
743 743
744 /* Queue with low level smu */ 744 /* Queue with low level smu */
745 list_add_tail(&cmd->scmd.link, &smu->cmd_list); 745 list_add_tail(&cmd->scmd.link, &smu->cmd_list);
746 if (smu->cmd_cur == NULL) 746 if (smu->cmd_cur == NULL)
747 smu_start_cmd(); 747 smu_start_cmd();
748 } 748 }
749 spin_unlock_irqrestore(&smu->lock, flags); 749 spin_unlock_irqrestore(&smu->lock, flags);
750 750
751 /* Call command completion handler if any */ 751 /* Call command completion handler if any */
752 if (done) 752 if (done)
753 done(cmd, misc); 753 done(cmd, misc);
754 754
755 } 755 }
756 756
757 757
758 static void smu_i2c_retry(unsigned long data) 758 static void smu_i2c_retry(unsigned long data)
759 { 759 {
760 struct smu_i2c_cmd *cmd = smu->cmd_i2c_cur; 760 struct smu_i2c_cmd *cmd = smu->cmd_i2c_cur;
761 761
762 DPRINTK("SMU: i2c failure, requeuing...\n"); 762 DPRINTK("SMU: i2c failure, requeuing...\n");
763 763
764 /* requeue command simply by resetting reply_len */ 764 /* requeue command simply by resetting reply_len */
765 cmd->pdata[0] = 0xff; 765 cmd->pdata[0] = 0xff;
766 cmd->scmd.reply_len = sizeof(cmd->pdata); 766 cmd->scmd.reply_len = sizeof(cmd->pdata);
767 smu_queue_cmd(&cmd->scmd); 767 smu_queue_cmd(&cmd->scmd);
768 } 768 }
769 769
770 770
771 static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc) 771 static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc)
772 { 772 {
773 struct smu_i2c_cmd *cmd = misc; 773 struct smu_i2c_cmd *cmd = misc;
774 int fail = 0; 774 int fail = 0;
775 775
776 DPRINTK("SMU: i2c compl. stage=%d status=%x pdata[0]=%x rlen: %x\n", 776 DPRINTK("SMU: i2c compl. stage=%d status=%x pdata[0]=%x rlen: %x\n",
777 cmd->stage, scmd->status, cmd->pdata[0], scmd->reply_len); 777 cmd->stage, scmd->status, cmd->pdata[0], scmd->reply_len);
778 778
779 /* Check for possible status */ 779 /* Check for possible status */
780 if (scmd->status < 0) 780 if (scmd->status < 0)
781 fail = 1; 781 fail = 1;
782 else if (cmd->read) { 782 else if (cmd->read) {
783 if (cmd->stage == 0) 783 if (cmd->stage == 0)
784 fail = cmd->pdata[0] != 0; 784 fail = cmd->pdata[0] != 0;
785 else 785 else
786 fail = cmd->pdata[0] >= 0x80; 786 fail = cmd->pdata[0] >= 0x80;
787 } else { 787 } else {
788 fail = cmd->pdata[0] != 0; 788 fail = cmd->pdata[0] != 0;
789 } 789 }
790 790
791 /* Handle failures by requeuing command, after 5ms interval 791 /* Handle failures by requeuing command, after 5ms interval
792 */ 792 */
793 if (fail && --cmd->retries > 0) { 793 if (fail && --cmd->retries > 0) {
794 DPRINTK("SMU: i2c failure, starting timer...\n"); 794 DPRINTK("SMU: i2c failure, starting timer...\n");
795 BUG_ON(cmd != smu->cmd_i2c_cur); 795 BUG_ON(cmd != smu->cmd_i2c_cur);
796 if (!smu_irq_inited) { 796 if (!smu_irq_inited) {
797 mdelay(5); 797 mdelay(5);
798 smu_i2c_retry(0); 798 smu_i2c_retry(0);
799 return; 799 return;
800 } 800 }
801 mod_timer(&smu->i2c_timer, jiffies + msecs_to_jiffies(5)); 801 mod_timer(&smu->i2c_timer, jiffies + msecs_to_jiffies(5));
802 return; 802 return;
803 } 803 }
804 804
805 /* If failure or stage 1, command is complete */ 805 /* If failure or stage 1, command is complete */
806 if (fail || cmd->stage != 0) { 806 if (fail || cmd->stage != 0) {
807 smu_i2c_complete_command(cmd, fail); 807 smu_i2c_complete_command(cmd, fail);
808 return; 808 return;
809 } 809 }
810 810
811 DPRINTK("SMU: going to stage 1\n"); 811 DPRINTK("SMU: going to stage 1\n");
812 812
813 /* Ok, initial command complete, now poll status */ 813 /* Ok, initial command complete, now poll status */
814 scmd->reply_buf = cmd->pdata; 814 scmd->reply_buf = cmd->pdata;
815 scmd->reply_len = sizeof(cmd->pdata); 815 scmd->reply_len = sizeof(cmd->pdata);
816 scmd->data_buf = cmd->pdata; 816 scmd->data_buf = cmd->pdata;
817 scmd->data_len = 1; 817 scmd->data_len = 1;
818 cmd->pdata[0] = 0; 818 cmd->pdata[0] = 0;
819 cmd->stage = 1; 819 cmd->stage = 1;
820 cmd->retries = 20; 820 cmd->retries = 20;
821 smu_queue_cmd(scmd); 821 smu_queue_cmd(scmd);
822 } 822 }
823 823
824 824
825 int smu_queue_i2c(struct smu_i2c_cmd *cmd) 825 int smu_queue_i2c(struct smu_i2c_cmd *cmd)
826 { 826 {
827 unsigned long flags; 827 unsigned long flags;
828 828
829 if (smu == NULL) 829 if (smu == NULL)
830 return -ENODEV; 830 return -ENODEV;
831 831
832 /* Fill most fields of scmd */ 832 /* Fill most fields of scmd */
833 cmd->scmd.cmd = SMU_CMD_I2C_COMMAND; 833 cmd->scmd.cmd = SMU_CMD_I2C_COMMAND;
834 cmd->scmd.done = smu_i2c_low_completion; 834 cmd->scmd.done = smu_i2c_low_completion;
835 cmd->scmd.misc = cmd; 835 cmd->scmd.misc = cmd;
836 cmd->scmd.reply_buf = cmd->pdata; 836 cmd->scmd.reply_buf = cmd->pdata;
837 cmd->scmd.reply_len = sizeof(cmd->pdata); 837 cmd->scmd.reply_len = sizeof(cmd->pdata);
838 cmd->scmd.data_buf = (u8 *)(char *)&cmd->info; 838 cmd->scmd.data_buf = (u8 *)(char *)&cmd->info;
839 cmd->scmd.status = 1; 839 cmd->scmd.status = 1;
840 cmd->stage = 0; 840 cmd->stage = 0;
841 cmd->pdata[0] = 0xff; 841 cmd->pdata[0] = 0xff;
842 cmd->retries = 20; 842 cmd->retries = 20;
843 cmd->status = 1; 843 cmd->status = 1;
844 844
845 /* Check transfer type, sanitize some "info" fields 845 /* Check transfer type, sanitize some "info" fields
846 * based on transfer type and do more checking 846 * based on transfer type and do more checking
847 */ 847 */
848 cmd->info.caddr = cmd->info.devaddr; 848 cmd->info.caddr = cmd->info.devaddr;
849 cmd->read = cmd->info.devaddr & 0x01; 849 cmd->read = cmd->info.devaddr & 0x01;
850 switch(cmd->info.type) { 850 switch(cmd->info.type) {
851 case SMU_I2C_TRANSFER_SIMPLE: 851 case SMU_I2C_TRANSFER_SIMPLE:
852 memset(&cmd->info.sublen, 0, 4); 852 memset(&cmd->info.sublen, 0, 4);
853 break; 853 break;
854 case SMU_I2C_TRANSFER_COMBINED: 854 case SMU_I2C_TRANSFER_COMBINED:
855 cmd->info.devaddr &= 0xfe; 855 cmd->info.devaddr &= 0xfe;
856 case SMU_I2C_TRANSFER_STDSUB: 856 case SMU_I2C_TRANSFER_STDSUB:
857 if (cmd->info.sublen > 3) 857 if (cmd->info.sublen > 3)
858 return -EINVAL; 858 return -EINVAL;
859 break; 859 break;
860 default: 860 default:
861 return -EINVAL; 861 return -EINVAL;
862 } 862 }
863 863
864 /* Finish setting up command based on transfer direction 864 /* Finish setting up command based on transfer direction
865 */ 865 */
866 if (cmd->read) { 866 if (cmd->read) {
867 if (cmd->info.datalen > SMU_I2C_READ_MAX) 867 if (cmd->info.datalen > SMU_I2C_READ_MAX)
868 return -EINVAL; 868 return -EINVAL;
869 memset(cmd->info.data, 0xff, cmd->info.datalen); 869 memset(cmd->info.data, 0xff, cmd->info.datalen);
870 cmd->scmd.data_len = 9; 870 cmd->scmd.data_len = 9;
871 } else { 871 } else {
872 if (cmd->info.datalen > SMU_I2C_WRITE_MAX) 872 if (cmd->info.datalen > SMU_I2C_WRITE_MAX)
873 return -EINVAL; 873 return -EINVAL;
874 cmd->scmd.data_len = 9 + cmd->info.datalen; 874 cmd->scmd.data_len = 9 + cmd->info.datalen;
875 } 875 }
876 876
877 DPRINTK("SMU: i2c enqueuing command\n"); 877 DPRINTK("SMU: i2c enqueuing command\n");
878 DPRINTK("SMU: %s, len=%d bus=%x addr=%x sub0=%x type=%x\n", 878 DPRINTK("SMU: %s, len=%d bus=%x addr=%x sub0=%x type=%x\n",
879 cmd->read ? "read" : "write", cmd->info.datalen, 879 cmd->read ? "read" : "write", cmd->info.datalen,
880 cmd->info.bus, cmd->info.caddr, 880 cmd->info.bus, cmd->info.caddr,
881 cmd->info.subaddr[0], cmd->info.type); 881 cmd->info.subaddr[0], cmd->info.type);
882 882
883 883
884 /* Enqueue command in i2c list, and if empty, enqueue also in 884 /* Enqueue command in i2c list, and if empty, enqueue also in
885 * main command list 885 * main command list
886 */ 886 */
887 spin_lock_irqsave(&smu->lock, flags); 887 spin_lock_irqsave(&smu->lock, flags);
888 if (smu->cmd_i2c_cur == NULL) { 888 if (smu->cmd_i2c_cur == NULL) {
889 smu->cmd_i2c_cur = cmd; 889 smu->cmd_i2c_cur = cmd;
890 list_add_tail(&cmd->scmd.link, &smu->cmd_list); 890 list_add_tail(&cmd->scmd.link, &smu->cmd_list);
891 if (smu->cmd_cur == NULL) 891 if (smu->cmd_cur == NULL)
892 smu_start_cmd(); 892 smu_start_cmd();
893 } else 893 } else
894 list_add_tail(&cmd->link, &smu->cmd_i2c_list); 894 list_add_tail(&cmd->link, &smu->cmd_i2c_list);
895 spin_unlock_irqrestore(&smu->lock, flags); 895 spin_unlock_irqrestore(&smu->lock, flags);
896 896
897 return 0; 897 return 0;
898 } 898 }
899 899
900 /* 900 /*
901 * Handling of "partitions" 901 * Handling of "partitions"
902 */ 902 */
903 903
904 static int smu_read_datablock(u8 *dest, unsigned int addr, unsigned int len) 904 static int smu_read_datablock(u8 *dest, unsigned int addr, unsigned int len)
905 { 905 {
906 DECLARE_COMPLETION_ONSTACK(comp); 906 DECLARE_COMPLETION_ONSTACK(comp);
907 unsigned int chunk; 907 unsigned int chunk;
908 struct smu_cmd cmd; 908 struct smu_cmd cmd;
909 int rc; 909 int rc;
910 u8 params[8]; 910 u8 params[8];
911 911
912 /* We currently use a chunk size of 0xe. We could check the 912 /* We currently use a chunk size of 0xe. We could check the
913 * SMU firmware version and use bigger sizes though 913 * SMU firmware version and use bigger sizes though
914 */ 914 */
915 chunk = 0xe; 915 chunk = 0xe;
916 916
917 while (len) { 917 while (len) {
918 unsigned int clen = min(len, chunk); 918 unsigned int clen = min(len, chunk);
919 919
920 cmd.cmd = SMU_CMD_MISC_ee_COMMAND; 920 cmd.cmd = SMU_CMD_MISC_ee_COMMAND;
921 cmd.data_len = 7; 921 cmd.data_len = 7;
922 cmd.data_buf = params; 922 cmd.data_buf = params;
923 cmd.reply_len = chunk; 923 cmd.reply_len = chunk;
924 cmd.reply_buf = dest; 924 cmd.reply_buf = dest;
925 cmd.done = smu_done_complete; 925 cmd.done = smu_done_complete;
926 cmd.misc = &comp; 926 cmd.misc = &comp;
927 params[0] = SMU_CMD_MISC_ee_GET_DATABLOCK_REC; 927 params[0] = SMU_CMD_MISC_ee_GET_DATABLOCK_REC;
928 params[1] = 0x4; 928 params[1] = 0x4;
929 *((u32 *)&params[2]) = addr; 929 *((u32 *)&params[2]) = addr;
930 params[6] = clen; 930 params[6] = clen;
931 931
932 rc = smu_queue_cmd(&cmd); 932 rc = smu_queue_cmd(&cmd);
933 if (rc) 933 if (rc)
934 return rc; 934 return rc;
935 wait_for_completion(&comp); 935 wait_for_completion(&comp);
936 if (cmd.status != 0) 936 if (cmd.status != 0)
937 return rc; 937 return rc;
938 if (cmd.reply_len != clen) { 938 if (cmd.reply_len != clen) {
939 printk(KERN_DEBUG "SMU: short read in " 939 printk(KERN_DEBUG "SMU: short read in "
940 "smu_read_datablock, got: %d, want: %d\n", 940 "smu_read_datablock, got: %d, want: %d\n",
941 cmd.reply_len, clen); 941 cmd.reply_len, clen);
942 return -EIO; 942 return -EIO;
943 } 943 }
944 len -= clen; 944 len -= clen;
945 addr += clen; 945 addr += clen;
946 dest += clen; 946 dest += clen;
947 } 947 }
948 return 0; 948 return 0;
949 } 949 }
950 950
951 static struct smu_sdbp_header *smu_create_sdb_partition(int id) 951 static struct smu_sdbp_header *smu_create_sdb_partition(int id)
952 { 952 {
953 DECLARE_COMPLETION_ONSTACK(comp); 953 DECLARE_COMPLETION_ONSTACK(comp);
954 struct smu_simple_cmd cmd; 954 struct smu_simple_cmd cmd;
955 unsigned int addr, len, tlen; 955 unsigned int addr, len, tlen;
956 struct smu_sdbp_header *hdr; 956 struct smu_sdbp_header *hdr;
957 struct property *prop; 957 struct property *prop;
958 958
959 /* First query the partition info */ 959 /* First query the partition info */
960 DPRINTK("SMU: Query partition infos ... (irq=%d)\n", smu->db_irq); 960 DPRINTK("SMU: Query partition infos ... (irq=%d)\n", smu->db_irq);
961 smu_queue_simple(&cmd, SMU_CMD_PARTITION_COMMAND, 2, 961 smu_queue_simple(&cmd, SMU_CMD_PARTITION_COMMAND, 2,
962 smu_done_complete, &comp, 962 smu_done_complete, &comp,
963 SMU_CMD_PARTITION_LATEST, id); 963 SMU_CMD_PARTITION_LATEST, id);
964 wait_for_completion(&comp); 964 wait_for_completion(&comp);
965 DPRINTK("SMU: done, status: %d, reply_len: %d\n", 965 DPRINTK("SMU: done, status: %d, reply_len: %d\n",
966 cmd.cmd.status, cmd.cmd.reply_len); 966 cmd.cmd.status, cmd.cmd.reply_len);
967 967
968 /* Partition doesn't exist (or other error) */ 968 /* Partition doesn't exist (or other error) */
969 if (cmd.cmd.status != 0 || cmd.cmd.reply_len != 6) 969 if (cmd.cmd.status != 0 || cmd.cmd.reply_len != 6)
970 return NULL; 970 return NULL;
971 971
972 /* Fetch address and length from reply */ 972 /* Fetch address and length from reply */
973 addr = *((u16 *)cmd.buffer); 973 addr = *((u16 *)cmd.buffer);
974 len = cmd.buffer[3] << 2; 974 len = cmd.buffer[3] << 2;
975 /* Calucluate total length to allocate, including the 17 bytes 975 /* Calucluate total length to allocate, including the 17 bytes
976 * for "sdb-partition-XX" that we append at the end of the buffer 976 * for "sdb-partition-XX" that we append at the end of the buffer
977 */ 977 */
978 tlen = sizeof(struct property) + len + 18; 978 tlen = sizeof(struct property) + len + 18;
979 979
980 prop = kzalloc(tlen, GFP_KERNEL); 980 prop = kzalloc(tlen, GFP_KERNEL);
981 if (prop == NULL) 981 if (prop == NULL)
982 return NULL; 982 return NULL;
983 hdr = (struct smu_sdbp_header *)(prop + 1); 983 hdr = (struct smu_sdbp_header *)(prop + 1);
984 prop->name = ((char *)prop) + tlen - 18; 984 prop->name = ((char *)prop) + tlen - 18;
985 sprintf(prop->name, "sdb-partition-%02x", id); 985 sprintf(prop->name, "sdb-partition-%02x", id);
986 prop->length = len; 986 prop->length = len;
987 prop->value = hdr; 987 prop->value = hdr;
988 prop->next = NULL; 988 prop->next = NULL;
989 989
990 /* Read the datablock */ 990 /* Read the datablock */
991 if (smu_read_datablock((u8 *)hdr, addr, len)) { 991 if (smu_read_datablock((u8 *)hdr, addr, len)) {
992 printk(KERN_DEBUG "SMU: datablock read failed while reading " 992 printk(KERN_DEBUG "SMU: datablock read failed while reading "
993 "partition %02x !\n", id); 993 "partition %02x !\n", id);
994 goto failure; 994 goto failure;
995 } 995 }
996 996
997 /* Got it, check a few things and create the property */ 997 /* Got it, check a few things and create the property */
998 if (hdr->id != id) { 998 if (hdr->id != id) {
999 printk(KERN_DEBUG "SMU: Reading partition %02x and got " 999 printk(KERN_DEBUG "SMU: Reading partition %02x and got "
1000 "%02x !\n", id, hdr->id); 1000 "%02x !\n", id, hdr->id);
1001 goto failure; 1001 goto failure;
1002 } 1002 }
1003 if (prom_add_property(smu->of_node, prop)) { 1003 if (prom_add_property(smu->of_node, prop)) {
1004 printk(KERN_DEBUG "SMU: Failed creating sdb-partition-%02x " 1004 printk(KERN_DEBUG "SMU: Failed creating sdb-partition-%02x "
1005 "property !\n", id); 1005 "property !\n", id);
1006 goto failure; 1006 goto failure;
1007 } 1007 }
1008 1008
1009 return hdr; 1009 return hdr;
1010 failure: 1010 failure:
1011 kfree(prop); 1011 kfree(prop);
1012 return NULL; 1012 return NULL;
1013 } 1013 }
1014 1014
1015 /* Note: Only allowed to return error code in pointers (using ERR_PTR) 1015 /* Note: Only allowed to return error code in pointers (using ERR_PTR)
1016 * when interruptible is 1 1016 * when interruptible is 1
1017 */ 1017 */
1018 const struct smu_sdbp_header *__smu_get_sdb_partition(int id, 1018 const struct smu_sdbp_header *__smu_get_sdb_partition(int id,
1019 unsigned int *size, int interruptible) 1019 unsigned int *size, int interruptible)
1020 { 1020 {
1021 char pname[32]; 1021 char pname[32];
1022 const struct smu_sdbp_header *part; 1022 const struct smu_sdbp_header *part;
1023 1023
1024 if (!smu) 1024 if (!smu)
1025 return NULL; 1025 return NULL;
1026 1026
1027 sprintf(pname, "sdb-partition-%02x", id); 1027 sprintf(pname, "sdb-partition-%02x", id);
1028 1028
1029 DPRINTK("smu_get_sdb_partition(%02x)\n", id); 1029 DPRINTK("smu_get_sdb_partition(%02x)\n", id);
1030 1030
1031 if (interruptible) { 1031 if (interruptible) {
1032 int rc; 1032 int rc;
1033 rc = mutex_lock_interruptible(&smu_part_access); 1033 rc = mutex_lock_interruptible(&smu_part_access);
1034 if (rc) 1034 if (rc)
1035 return ERR_PTR(rc); 1035 return ERR_PTR(rc);
1036 } else 1036 } else
1037 mutex_lock(&smu_part_access); 1037 mutex_lock(&smu_part_access);
1038 1038
1039 part = of_get_property(smu->of_node, pname, size); 1039 part = of_get_property(smu->of_node, pname, size);
1040 if (part == NULL) { 1040 if (part == NULL) {
1041 DPRINTK("trying to extract from SMU ...\n"); 1041 DPRINTK("trying to extract from SMU ...\n");
1042 part = smu_create_sdb_partition(id); 1042 part = smu_create_sdb_partition(id);
1043 if (part != NULL && size) 1043 if (part != NULL && size)
1044 *size = part->len << 2; 1044 *size = part->len << 2;
1045 } 1045 }
1046 mutex_unlock(&smu_part_access); 1046 mutex_unlock(&smu_part_access);
1047 return part; 1047 return part;
1048 } 1048 }
1049 1049
1050 const struct smu_sdbp_header *smu_get_sdb_partition(int id, unsigned int *size) 1050 const struct smu_sdbp_header *smu_get_sdb_partition(int id, unsigned int *size)
1051 { 1051 {
1052 return __smu_get_sdb_partition(id, size, 0); 1052 return __smu_get_sdb_partition(id, size, 0);
1053 } 1053 }
1054 EXPORT_SYMBOL(smu_get_sdb_partition); 1054 EXPORT_SYMBOL(smu_get_sdb_partition);
1055 1055
1056 1056
1057 /* 1057 /*
1058 * Userland driver interface 1058 * Userland driver interface
1059 */ 1059 */
1060 1060
1061 1061
1062 static LIST_HEAD(smu_clist); 1062 static LIST_HEAD(smu_clist);
1063 static DEFINE_SPINLOCK(smu_clist_lock); 1063 static DEFINE_SPINLOCK(smu_clist_lock);
1064 1064
1065 enum smu_file_mode { 1065 enum smu_file_mode {
1066 smu_file_commands, 1066 smu_file_commands,
1067 smu_file_events, 1067 smu_file_events,
1068 smu_file_closing 1068 smu_file_closing
1069 }; 1069 };
1070 1070
1071 struct smu_private 1071 struct smu_private
1072 { 1072 {
1073 struct list_head list; 1073 struct list_head list;
1074 enum smu_file_mode mode; 1074 enum smu_file_mode mode;
1075 int busy; 1075 int busy;
1076 struct smu_cmd cmd; 1076 struct smu_cmd cmd;
1077 spinlock_t lock; 1077 spinlock_t lock;
1078 wait_queue_head_t wait; 1078 wait_queue_head_t wait;
1079 u8 buffer[SMU_MAX_DATA]; 1079 u8 buffer[SMU_MAX_DATA];
1080 }; 1080 };
1081 1081
1082 1082
1083 static int smu_open(struct inode *inode, struct file *file) 1083 static int smu_open(struct inode *inode, struct file *file)
1084 { 1084 {
1085 struct smu_private *pp; 1085 struct smu_private *pp;
1086 unsigned long flags; 1086 unsigned long flags;
1087 1087
1088 pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL); 1088 pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL);
1089 if (pp == 0) 1089 if (pp == 0)
1090 return -ENOMEM; 1090 return -ENOMEM;
1091 spin_lock_init(&pp->lock); 1091 spin_lock_init(&pp->lock);
1092 pp->mode = smu_file_commands; 1092 pp->mode = smu_file_commands;
1093 init_waitqueue_head(&pp->wait); 1093 init_waitqueue_head(&pp->wait);
1094 1094
1095 lock_kernel(); 1095 lock_kernel();
1096 spin_lock_irqsave(&smu_clist_lock, flags); 1096 spin_lock_irqsave(&smu_clist_lock, flags);
1097 list_add(&pp->list, &smu_clist); 1097 list_add(&pp->list, &smu_clist);
1098 spin_unlock_irqrestore(&smu_clist_lock, flags); 1098 spin_unlock_irqrestore(&smu_clist_lock, flags);
1099 file->private_data = pp; 1099 file->private_data = pp;
1100 unlock_kernel(); 1100 unlock_kernel();
1101 1101
1102 return 0; 1102 return 0;
1103 } 1103 }
1104 1104
1105 1105
1106 static void smu_user_cmd_done(struct smu_cmd *cmd, void *misc) 1106 static void smu_user_cmd_done(struct smu_cmd *cmd, void *misc)
1107 { 1107 {
1108 struct smu_private *pp = misc; 1108 struct smu_private *pp = misc;
1109 1109
1110 wake_up_all(&pp->wait); 1110 wake_up_all(&pp->wait);
1111 } 1111 }
1112 1112
1113 1113
1114 static ssize_t smu_write(struct file *file, const char __user *buf, 1114 static ssize_t smu_write(struct file *file, const char __user *buf,
1115 size_t count, loff_t *ppos) 1115 size_t count, loff_t *ppos)
1116 { 1116 {
1117 struct smu_private *pp = file->private_data; 1117 struct smu_private *pp = file->private_data;
1118 unsigned long flags; 1118 unsigned long flags;
1119 struct smu_user_cmd_hdr hdr; 1119 struct smu_user_cmd_hdr hdr;
1120 int rc = 0; 1120 int rc = 0;
1121 1121
1122 if (pp->busy) 1122 if (pp->busy)
1123 return -EBUSY; 1123 return -EBUSY;
1124 else if (copy_from_user(&hdr, buf, sizeof(hdr))) 1124 else if (copy_from_user(&hdr, buf, sizeof(hdr)))
1125 return -EFAULT; 1125 return -EFAULT;
1126 else if (hdr.cmdtype == SMU_CMDTYPE_WANTS_EVENTS) { 1126 else if (hdr.cmdtype == SMU_CMDTYPE_WANTS_EVENTS) {
1127 pp->mode = smu_file_events; 1127 pp->mode = smu_file_events;
1128 return 0; 1128 return 0;
1129 } else if (hdr.cmdtype == SMU_CMDTYPE_GET_PARTITION) { 1129 } else if (hdr.cmdtype == SMU_CMDTYPE_GET_PARTITION) {
1130 const struct smu_sdbp_header *part; 1130 const struct smu_sdbp_header *part;
1131 part = __smu_get_sdb_partition(hdr.cmd, NULL, 1); 1131 part = __smu_get_sdb_partition(hdr.cmd, NULL, 1);
1132 if (part == NULL) 1132 if (part == NULL)
1133 return -EINVAL; 1133 return -EINVAL;
1134 else if (IS_ERR(part)) 1134 else if (IS_ERR(part))
1135 return PTR_ERR(part); 1135 return PTR_ERR(part);
1136 return 0; 1136 return 0;
1137 } else if (hdr.cmdtype != SMU_CMDTYPE_SMU) 1137 } else if (hdr.cmdtype != SMU_CMDTYPE_SMU)
1138 return -EINVAL; 1138 return -EINVAL;
1139 else if (pp->mode != smu_file_commands) 1139 else if (pp->mode != smu_file_commands)
1140 return -EBADFD; 1140 return -EBADFD;
1141 else if (hdr.data_len > SMU_MAX_DATA) 1141 else if (hdr.data_len > SMU_MAX_DATA)
1142 return -EINVAL; 1142 return -EINVAL;
1143 1143
1144 spin_lock_irqsave(&pp->lock, flags); 1144 spin_lock_irqsave(&pp->lock, flags);
1145 if (pp->busy) { 1145 if (pp->busy) {
1146 spin_unlock_irqrestore(&pp->lock, flags); 1146 spin_unlock_irqrestore(&pp->lock, flags);
1147 return -EBUSY; 1147 return -EBUSY;
1148 } 1148 }
1149 pp->busy = 1; 1149 pp->busy = 1;
1150 pp->cmd.status = 1; 1150 pp->cmd.status = 1;
1151 spin_unlock_irqrestore(&pp->lock, flags); 1151 spin_unlock_irqrestore(&pp->lock, flags);
1152 1152
1153 if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) { 1153 if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) {
1154 pp->busy = 0; 1154 pp->busy = 0;
1155 return -EFAULT; 1155 return -EFAULT;
1156 } 1156 }
1157 1157
1158 pp->cmd.cmd = hdr.cmd; 1158 pp->cmd.cmd = hdr.cmd;
1159 pp->cmd.data_len = hdr.data_len; 1159 pp->cmd.data_len = hdr.data_len;
1160 pp->cmd.reply_len = SMU_MAX_DATA; 1160 pp->cmd.reply_len = SMU_MAX_DATA;
1161 pp->cmd.data_buf = pp->buffer; 1161 pp->cmd.data_buf = pp->buffer;
1162 pp->cmd.reply_buf = pp->buffer; 1162 pp->cmd.reply_buf = pp->buffer;
1163 pp->cmd.done = smu_user_cmd_done; 1163 pp->cmd.done = smu_user_cmd_done;
1164 pp->cmd.misc = pp; 1164 pp->cmd.misc = pp;
1165 rc = smu_queue_cmd(&pp->cmd); 1165 rc = smu_queue_cmd(&pp->cmd);
1166 if (rc < 0) 1166 if (rc < 0)
1167 return rc; 1167 return rc;
1168 return count; 1168 return count;
1169 } 1169 }
1170 1170
1171 1171
1172 static ssize_t smu_read_command(struct file *file, struct smu_private *pp, 1172 static ssize_t smu_read_command(struct file *file, struct smu_private *pp,
1173 char __user *buf, size_t count) 1173 char __user *buf, size_t count)
1174 { 1174 {
1175 DECLARE_WAITQUEUE(wait, current); 1175 DECLARE_WAITQUEUE(wait, current);
1176 struct smu_user_reply_hdr hdr; 1176 struct smu_user_reply_hdr hdr;
1177 unsigned long flags; 1177 unsigned long flags;
1178 int size, rc = 0; 1178 int size, rc = 0;
1179 1179
1180 if (!pp->busy) 1180 if (!pp->busy)
1181 return 0; 1181 return 0;
1182 if (count < sizeof(struct smu_user_reply_hdr)) 1182 if (count < sizeof(struct smu_user_reply_hdr))
1183 return -EOVERFLOW; 1183 return -EOVERFLOW;
1184 spin_lock_irqsave(&pp->lock, flags); 1184 spin_lock_irqsave(&pp->lock, flags);
1185 if (pp->cmd.status == 1) { 1185 if (pp->cmd.status == 1) {
1186 if (file->f_flags & O_NONBLOCK) 1186 if (file->f_flags & O_NONBLOCK) {
1187 spin_unlock_irqrestore(&pp->lock, flags);
1187 return -EAGAIN; 1188 return -EAGAIN;
1189 }
1188 add_wait_queue(&pp->wait, &wait); 1190 add_wait_queue(&pp->wait, &wait);
1189 for (;;) { 1191 for (;;) {
1190 set_current_state(TASK_INTERRUPTIBLE); 1192 set_current_state(TASK_INTERRUPTIBLE);
1191 rc = 0; 1193 rc = 0;
1192 if (pp->cmd.status != 1) 1194 if (pp->cmd.status != 1)
1193 break; 1195 break;
1194 rc = -ERESTARTSYS; 1196 rc = -ERESTARTSYS;
1195 if (signal_pending(current)) 1197 if (signal_pending(current))
1196 break; 1198 break;
1197 spin_unlock_irqrestore(&pp->lock, flags); 1199 spin_unlock_irqrestore(&pp->lock, flags);
1198 schedule(); 1200 schedule();
1199 spin_lock_irqsave(&pp->lock, flags); 1201 spin_lock_irqsave(&pp->lock, flags);
1200 } 1202 }
1201 set_current_state(TASK_RUNNING); 1203 set_current_state(TASK_RUNNING);
1202 remove_wait_queue(&pp->wait, &wait); 1204 remove_wait_queue(&pp->wait, &wait);
1203 } 1205 }
1204 spin_unlock_irqrestore(&pp->lock, flags); 1206 spin_unlock_irqrestore(&pp->lock, flags);
1205 if (rc) 1207 if (rc)
1206 return rc; 1208 return rc;
1207 if (pp->cmd.status != 0) 1209 if (pp->cmd.status != 0)
1208 pp->cmd.reply_len = 0; 1210 pp->cmd.reply_len = 0;
1209 size = sizeof(hdr) + pp->cmd.reply_len; 1211 size = sizeof(hdr) + pp->cmd.reply_len;
1210 if (count < size) 1212 if (count < size)
1211 size = count; 1213 size = count;
1212 rc = size; 1214 rc = size;
1213 hdr.status = pp->cmd.status; 1215 hdr.status = pp->cmd.status;
1214 hdr.reply_len = pp->cmd.reply_len; 1216 hdr.reply_len = pp->cmd.reply_len;
1215 if (copy_to_user(buf, &hdr, sizeof(hdr))) 1217 if (copy_to_user(buf, &hdr, sizeof(hdr)))
1216 return -EFAULT; 1218 return -EFAULT;
1217 size -= sizeof(hdr); 1219 size -= sizeof(hdr);
1218 if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size)) 1220 if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size))
1219 return -EFAULT; 1221 return -EFAULT;
1220 pp->busy = 0; 1222 pp->busy = 0;
1221 1223
1222 return rc; 1224 return rc;
1223 } 1225 }
1224 1226
1225 1227
1226 static ssize_t smu_read_events(struct file *file, struct smu_private *pp, 1228 static ssize_t smu_read_events(struct file *file, struct smu_private *pp,
1227 char __user *buf, size_t count) 1229 char __user *buf, size_t count)
1228 { 1230 {
1229 /* Not implemented */ 1231 /* Not implemented */
1230 msleep_interruptible(1000); 1232 msleep_interruptible(1000);
1231 return 0; 1233 return 0;
1232 } 1234 }
1233 1235
1234 1236
1235 static ssize_t smu_read(struct file *file, char __user *buf, 1237 static ssize_t smu_read(struct file *file, char __user *buf,
1236 size_t count, loff_t *ppos) 1238 size_t count, loff_t *ppos)
1237 { 1239 {
1238 struct smu_private *pp = file->private_data; 1240 struct smu_private *pp = file->private_data;
1239 1241
1240 if (pp->mode == smu_file_commands) 1242 if (pp->mode == smu_file_commands)
1241 return smu_read_command(file, pp, buf, count); 1243 return smu_read_command(file, pp, buf, count);
1242 if (pp->mode == smu_file_events) 1244 if (pp->mode == smu_file_events)
1243 return smu_read_events(file, pp, buf, count); 1245 return smu_read_events(file, pp, buf, count);
1244 1246
1245 return -EBADFD; 1247 return -EBADFD;
1246 } 1248 }
1247 1249
1248 static unsigned int smu_fpoll(struct file *file, poll_table *wait) 1250 static unsigned int smu_fpoll(struct file *file, poll_table *wait)
1249 { 1251 {
1250 struct smu_private *pp = file->private_data; 1252 struct smu_private *pp = file->private_data;
1251 unsigned int mask = 0; 1253 unsigned int mask = 0;
1252 unsigned long flags; 1254 unsigned long flags;
1253 1255
1254 if (pp == 0) 1256 if (pp == 0)
1255 return 0; 1257 return 0;
1256 1258
1257 if (pp->mode == smu_file_commands) { 1259 if (pp->mode == smu_file_commands) {
1258 poll_wait(file, &pp->wait, wait); 1260 poll_wait(file, &pp->wait, wait);
1259 1261
1260 spin_lock_irqsave(&pp->lock, flags); 1262 spin_lock_irqsave(&pp->lock, flags);
1261 if (pp->busy && pp->cmd.status != 1) 1263 if (pp->busy && pp->cmd.status != 1)
1262 mask |= POLLIN; 1264 mask |= POLLIN;
1263 spin_unlock_irqrestore(&pp->lock, flags); 1265 spin_unlock_irqrestore(&pp->lock, flags);
1264 } if (pp->mode == smu_file_events) { 1266 } if (pp->mode == smu_file_events) {
1265 /* Not yet implemented */ 1267 /* Not yet implemented */
1266 } 1268 }
1267 return mask; 1269 return mask;
1268 } 1270 }
1269 1271
1270 static int smu_release(struct inode *inode, struct file *file) 1272 static int smu_release(struct inode *inode, struct file *file)
1271 { 1273 {
1272 struct smu_private *pp = file->private_data; 1274 struct smu_private *pp = file->private_data;
1273 unsigned long flags; 1275 unsigned long flags;
1274 unsigned int busy; 1276 unsigned int busy;
1275 1277
1276 if (pp == 0) 1278 if (pp == 0)
1277 return 0; 1279 return 0;
1278 1280
1279 file->private_data = NULL; 1281 file->private_data = NULL;
1280 1282
1281 /* Mark file as closing to avoid races with new request */ 1283 /* Mark file as closing to avoid races with new request */
1282 spin_lock_irqsave(&pp->lock, flags); 1284 spin_lock_irqsave(&pp->lock, flags);
1283 pp->mode = smu_file_closing; 1285 pp->mode = smu_file_closing;
1284 busy = pp->busy; 1286 busy = pp->busy;
1285 1287
1286 /* Wait for any pending request to complete */ 1288 /* Wait for any pending request to complete */
1287 if (busy && pp->cmd.status == 1) { 1289 if (busy && pp->cmd.status == 1) {
1288 DECLARE_WAITQUEUE(wait, current); 1290 DECLARE_WAITQUEUE(wait, current);
1289 1291
1290 add_wait_queue(&pp->wait, &wait); 1292 add_wait_queue(&pp->wait, &wait);
1291 for (;;) { 1293 for (;;) {
1292 set_current_state(TASK_UNINTERRUPTIBLE); 1294 set_current_state(TASK_UNINTERRUPTIBLE);
1293 if (pp->cmd.status != 1) 1295 if (pp->cmd.status != 1)
1294 break; 1296 break;
1295 spin_unlock_irqrestore(&pp->lock, flags); 1297 spin_unlock_irqrestore(&pp->lock, flags);
1296 schedule(); 1298 schedule();
1297 spin_lock_irqsave(&pp->lock, flags); 1299 spin_lock_irqsave(&pp->lock, flags);
1298 } 1300 }
1299 set_current_state(TASK_RUNNING); 1301 set_current_state(TASK_RUNNING);
1300 remove_wait_queue(&pp->wait, &wait); 1302 remove_wait_queue(&pp->wait, &wait);
1301 } 1303 }
1302 spin_unlock_irqrestore(&pp->lock, flags); 1304 spin_unlock_irqrestore(&pp->lock, flags);
1303 1305
1304 spin_lock_irqsave(&smu_clist_lock, flags); 1306 spin_lock_irqsave(&smu_clist_lock, flags);
1305 list_del(&pp->list); 1307 list_del(&pp->list);
1306 spin_unlock_irqrestore(&smu_clist_lock, flags); 1308 spin_unlock_irqrestore(&smu_clist_lock, flags);
1307 kfree(pp); 1309 kfree(pp);
1308 1310
1309 return 0; 1311 return 0;
1310 } 1312 }
1311 1313
1312 1314
1313 static const struct file_operations smu_device_fops = { 1315 static const struct file_operations smu_device_fops = {
1314 .llseek = no_llseek, 1316 .llseek = no_llseek,
1315 .read = smu_read, 1317 .read = smu_read,
1316 .write = smu_write, 1318 .write = smu_write,
1317 .poll = smu_fpoll, 1319 .poll = smu_fpoll,
1318 .open = smu_open, 1320 .open = smu_open,
1319 .release = smu_release, 1321 .release = smu_release,
1320 }; 1322 };
1321 1323
1322 static struct miscdevice pmu_device = { 1324 static struct miscdevice pmu_device = {
1323 MISC_DYNAMIC_MINOR, "smu", &smu_device_fops 1325 MISC_DYNAMIC_MINOR, "smu", &smu_device_fops
1324 }; 1326 };
1325 1327
1326 static int smu_device_init(void) 1328 static int smu_device_init(void)
1327 { 1329 {
1328 if (!smu) 1330 if (!smu)
1329 return -ENODEV; 1331 return -ENODEV;
1330 if (misc_register(&pmu_device) < 0) 1332 if (misc_register(&pmu_device) < 0)
1331 printk(KERN_ERR "via-pmu: cannot register misc device.\n"); 1333 printk(KERN_ERR "via-pmu: cannot register misc device.\n");
1332 return 0; 1334 return 0;
1333 } 1335 }
1334 device_initcall(smu_device_init); 1336 device_initcall(smu_device_init);
1335 1337