Commit adf094931ffb25ef4b381559918f1a34181a5273
Committed by
Greg Kroah-Hartman
1 parent
238c6d5483
Exists in
master
and in
39 other branches
PM: Simplify the new suspend/hibernation framework for devices
PM: Simplify the new suspend/hibernation framework for devices Following the discussion at the Kernel Summit, simplify the new device PM framework by merging 'struct pm_ops' and 'struct pm_ext_ops' and removing pointers to 'struct pm_ext_ops' from 'struct platform_driver' and 'struct pci_driver'. After this change, the suspend/hibernation callbacks will only reside in 'struct device_driver' as well as at the bus type/ device class/device type level. Accordingly, PCI and platform device drivers are now expected to put their suspend/hibernation callbacks into the 'struct device_driver' embedded in 'struct pci_driver' or 'struct platform_driver', respectively. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Acked-by: Pavel Machek <pavel@suse.cz> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Showing 8 changed files with 119 additions and 151 deletions Inline Diff
drivers/base/platform.c
1 | /* | 1 | /* |
2 | * platform.c - platform 'pseudo' bus for legacy devices | 2 | * platform.c - platform 'pseudo' bus for legacy devices |
3 | * | 3 | * |
4 | * Copyright (c) 2002-3 Patrick Mochel | 4 | * Copyright (c) 2002-3 Patrick Mochel |
5 | * Copyright (c) 2002-3 Open Source Development Labs | 5 | * Copyright (c) 2002-3 Open Source Development Labs |
6 | * | 6 | * |
7 | * This file is released under the GPLv2 | 7 | * This file is released under the GPLv2 |
8 | * | 8 | * |
9 | * Please see Documentation/driver-model/platform.txt for more | 9 | * Please see Documentation/driver-model/platform.txt for more |
10 | * information. | 10 | * information. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/dma-mapping.h> | 16 | #include <linux/dma-mapping.h> |
17 | #include <linux/bootmem.h> | 17 | #include <linux/bootmem.h> |
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | 20 | ||
21 | #include "base.h" | 21 | #include "base.h" |
22 | 22 | ||
23 | #define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ | 23 | #define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ |
24 | driver)) | 24 | driver)) |
25 | 25 | ||
26 | struct device platform_bus = { | 26 | struct device platform_bus = { |
27 | .bus_id = "platform", | 27 | .bus_id = "platform", |
28 | }; | 28 | }; |
29 | EXPORT_SYMBOL_GPL(platform_bus); | 29 | EXPORT_SYMBOL_GPL(platform_bus); |
30 | 30 | ||
31 | /** | 31 | /** |
32 | * platform_get_resource - get a resource for a device | 32 | * platform_get_resource - get a resource for a device |
33 | * @dev: platform device | 33 | * @dev: platform device |
34 | * @type: resource type | 34 | * @type: resource type |
35 | * @num: resource index | 35 | * @num: resource index |
36 | */ | 36 | */ |
37 | struct resource *platform_get_resource(struct platform_device *dev, | 37 | struct resource *platform_get_resource(struct platform_device *dev, |
38 | unsigned int type, unsigned int num) | 38 | unsigned int type, unsigned int num) |
39 | { | 39 | { |
40 | int i; | 40 | int i; |
41 | 41 | ||
42 | for (i = 0; i < dev->num_resources; i++) { | 42 | for (i = 0; i < dev->num_resources; i++) { |
43 | struct resource *r = &dev->resource[i]; | 43 | struct resource *r = &dev->resource[i]; |
44 | 44 | ||
45 | if (type == resource_type(r) && num-- == 0) | 45 | if (type == resource_type(r) && num-- == 0) |
46 | return r; | 46 | return r; |
47 | } | 47 | } |
48 | return NULL; | 48 | return NULL; |
49 | } | 49 | } |
50 | EXPORT_SYMBOL_GPL(platform_get_resource); | 50 | EXPORT_SYMBOL_GPL(platform_get_resource); |
51 | 51 | ||
52 | /** | 52 | /** |
53 | * platform_get_irq - get an IRQ for a device | 53 | * platform_get_irq - get an IRQ for a device |
54 | * @dev: platform device | 54 | * @dev: platform device |
55 | * @num: IRQ number index | 55 | * @num: IRQ number index |
56 | */ | 56 | */ |
57 | int platform_get_irq(struct platform_device *dev, unsigned int num) | 57 | int platform_get_irq(struct platform_device *dev, unsigned int num) |
58 | { | 58 | { |
59 | struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); | 59 | struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); |
60 | 60 | ||
61 | return r ? r->start : -ENXIO; | 61 | return r ? r->start : -ENXIO; |
62 | } | 62 | } |
63 | EXPORT_SYMBOL_GPL(platform_get_irq); | 63 | EXPORT_SYMBOL_GPL(platform_get_irq); |
64 | 64 | ||
65 | /** | 65 | /** |
66 | * platform_get_resource_byname - get a resource for a device by name | 66 | * platform_get_resource_byname - get a resource for a device by name |
67 | * @dev: platform device | 67 | * @dev: platform device |
68 | * @type: resource type | 68 | * @type: resource type |
69 | * @name: resource name | 69 | * @name: resource name |
70 | */ | 70 | */ |
71 | struct resource *platform_get_resource_byname(struct platform_device *dev, | 71 | struct resource *platform_get_resource_byname(struct platform_device *dev, |
72 | unsigned int type, char *name) | 72 | unsigned int type, char *name) |
73 | { | 73 | { |
74 | int i; | 74 | int i; |
75 | 75 | ||
76 | for (i = 0; i < dev->num_resources; i++) { | 76 | for (i = 0; i < dev->num_resources; i++) { |
77 | struct resource *r = &dev->resource[i]; | 77 | struct resource *r = &dev->resource[i]; |
78 | 78 | ||
79 | if (type == resource_type(r) && !strcmp(r->name, name)) | 79 | if (type == resource_type(r) && !strcmp(r->name, name)) |
80 | return r; | 80 | return r; |
81 | } | 81 | } |
82 | return NULL; | 82 | return NULL; |
83 | } | 83 | } |
84 | EXPORT_SYMBOL_GPL(platform_get_resource_byname); | 84 | EXPORT_SYMBOL_GPL(platform_get_resource_byname); |
85 | 85 | ||
86 | /** | 86 | /** |
87 | * platform_get_irq - get an IRQ for a device | 87 | * platform_get_irq - get an IRQ for a device |
88 | * @dev: platform device | 88 | * @dev: platform device |
89 | * @name: IRQ name | 89 | * @name: IRQ name |
90 | */ | 90 | */ |
91 | int platform_get_irq_byname(struct platform_device *dev, char *name) | 91 | int platform_get_irq_byname(struct platform_device *dev, char *name) |
92 | { | 92 | { |
93 | struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ, | 93 | struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ, |
94 | name); | 94 | name); |
95 | 95 | ||
96 | return r ? r->start : -ENXIO; | 96 | return r ? r->start : -ENXIO; |
97 | } | 97 | } |
98 | EXPORT_SYMBOL_GPL(platform_get_irq_byname); | 98 | EXPORT_SYMBOL_GPL(platform_get_irq_byname); |
99 | 99 | ||
100 | /** | 100 | /** |
101 | * platform_add_devices - add a numbers of platform devices | 101 | * platform_add_devices - add a numbers of platform devices |
102 | * @devs: array of platform devices to add | 102 | * @devs: array of platform devices to add |
103 | * @num: number of platform devices in array | 103 | * @num: number of platform devices in array |
104 | */ | 104 | */ |
105 | int platform_add_devices(struct platform_device **devs, int num) | 105 | int platform_add_devices(struct platform_device **devs, int num) |
106 | { | 106 | { |
107 | int i, ret = 0; | 107 | int i, ret = 0; |
108 | 108 | ||
109 | for (i = 0; i < num; i++) { | 109 | for (i = 0; i < num; i++) { |
110 | ret = platform_device_register(devs[i]); | 110 | ret = platform_device_register(devs[i]); |
111 | if (ret) { | 111 | if (ret) { |
112 | while (--i >= 0) | 112 | while (--i >= 0) |
113 | platform_device_unregister(devs[i]); | 113 | platform_device_unregister(devs[i]); |
114 | break; | 114 | break; |
115 | } | 115 | } |
116 | } | 116 | } |
117 | 117 | ||
118 | return ret; | 118 | return ret; |
119 | } | 119 | } |
120 | EXPORT_SYMBOL_GPL(platform_add_devices); | 120 | EXPORT_SYMBOL_GPL(platform_add_devices); |
121 | 121 | ||
122 | struct platform_object { | 122 | struct platform_object { |
123 | struct platform_device pdev; | 123 | struct platform_device pdev; |
124 | char name[1]; | 124 | char name[1]; |
125 | }; | 125 | }; |
126 | 126 | ||
127 | /** | 127 | /** |
128 | * platform_device_put | 128 | * platform_device_put |
129 | * @pdev: platform device to free | 129 | * @pdev: platform device to free |
130 | * | 130 | * |
131 | * Free all memory associated with a platform device. This function must | 131 | * Free all memory associated with a platform device. This function must |
132 | * _only_ be externally called in error cases. All other usage is a bug. | 132 | * _only_ be externally called in error cases. All other usage is a bug. |
133 | */ | 133 | */ |
134 | void platform_device_put(struct platform_device *pdev) | 134 | void platform_device_put(struct platform_device *pdev) |
135 | { | 135 | { |
136 | if (pdev) | 136 | if (pdev) |
137 | put_device(&pdev->dev); | 137 | put_device(&pdev->dev); |
138 | } | 138 | } |
139 | EXPORT_SYMBOL_GPL(platform_device_put); | 139 | EXPORT_SYMBOL_GPL(platform_device_put); |
140 | 140 | ||
141 | static void platform_device_release(struct device *dev) | 141 | static void platform_device_release(struct device *dev) |
142 | { | 142 | { |
143 | struct platform_object *pa = container_of(dev, struct platform_object, | 143 | struct platform_object *pa = container_of(dev, struct platform_object, |
144 | pdev.dev); | 144 | pdev.dev); |
145 | 145 | ||
146 | kfree(pa->pdev.dev.platform_data); | 146 | kfree(pa->pdev.dev.platform_data); |
147 | kfree(pa->pdev.resource); | 147 | kfree(pa->pdev.resource); |
148 | kfree(pa); | 148 | kfree(pa); |
149 | } | 149 | } |
150 | 150 | ||
151 | /** | 151 | /** |
152 | * platform_device_alloc | 152 | * platform_device_alloc |
153 | * @name: base name of the device we're adding | 153 | * @name: base name of the device we're adding |
154 | * @id: instance id | 154 | * @id: instance id |
155 | * | 155 | * |
156 | * Create a platform device object which can have other objects attached | 156 | * Create a platform device object which can have other objects attached |
157 | * to it, and which will have attached objects freed when it is released. | 157 | * to it, and which will have attached objects freed when it is released. |
158 | */ | 158 | */ |
159 | struct platform_device *platform_device_alloc(const char *name, int id) | 159 | struct platform_device *platform_device_alloc(const char *name, int id) |
160 | { | 160 | { |
161 | struct platform_object *pa; | 161 | struct platform_object *pa; |
162 | 162 | ||
163 | pa = kzalloc(sizeof(struct platform_object) + strlen(name), GFP_KERNEL); | 163 | pa = kzalloc(sizeof(struct platform_object) + strlen(name), GFP_KERNEL); |
164 | if (pa) { | 164 | if (pa) { |
165 | strcpy(pa->name, name); | 165 | strcpy(pa->name, name); |
166 | pa->pdev.name = pa->name; | 166 | pa->pdev.name = pa->name; |
167 | pa->pdev.id = id; | 167 | pa->pdev.id = id; |
168 | device_initialize(&pa->pdev.dev); | 168 | device_initialize(&pa->pdev.dev); |
169 | pa->pdev.dev.release = platform_device_release; | 169 | pa->pdev.dev.release = platform_device_release; |
170 | } | 170 | } |
171 | 171 | ||
172 | return pa ? &pa->pdev : NULL; | 172 | return pa ? &pa->pdev : NULL; |
173 | } | 173 | } |
174 | EXPORT_SYMBOL_GPL(platform_device_alloc); | 174 | EXPORT_SYMBOL_GPL(platform_device_alloc); |
175 | 175 | ||
176 | /** | 176 | /** |
177 | * platform_device_add_resources | 177 | * platform_device_add_resources |
178 | * @pdev: platform device allocated by platform_device_alloc to add resources to | 178 | * @pdev: platform device allocated by platform_device_alloc to add resources to |
179 | * @res: set of resources that needs to be allocated for the device | 179 | * @res: set of resources that needs to be allocated for the device |
180 | * @num: number of resources | 180 | * @num: number of resources |
181 | * | 181 | * |
182 | * Add a copy of the resources to the platform device. The memory | 182 | * Add a copy of the resources to the platform device. The memory |
183 | * associated with the resources will be freed when the platform device is | 183 | * associated with the resources will be freed when the platform device is |
184 | * released. | 184 | * released. |
185 | */ | 185 | */ |
186 | int platform_device_add_resources(struct platform_device *pdev, | 186 | int platform_device_add_resources(struct platform_device *pdev, |
187 | struct resource *res, unsigned int num) | 187 | struct resource *res, unsigned int num) |
188 | { | 188 | { |
189 | struct resource *r; | 189 | struct resource *r; |
190 | 190 | ||
191 | r = kmalloc(sizeof(struct resource) * num, GFP_KERNEL); | 191 | r = kmalloc(sizeof(struct resource) * num, GFP_KERNEL); |
192 | if (r) { | 192 | if (r) { |
193 | memcpy(r, res, sizeof(struct resource) * num); | 193 | memcpy(r, res, sizeof(struct resource) * num); |
194 | pdev->resource = r; | 194 | pdev->resource = r; |
195 | pdev->num_resources = num; | 195 | pdev->num_resources = num; |
196 | } | 196 | } |
197 | return r ? 0 : -ENOMEM; | 197 | return r ? 0 : -ENOMEM; |
198 | } | 198 | } |
199 | EXPORT_SYMBOL_GPL(platform_device_add_resources); | 199 | EXPORT_SYMBOL_GPL(platform_device_add_resources); |
200 | 200 | ||
201 | /** | 201 | /** |
202 | * platform_device_add_data | 202 | * platform_device_add_data |
203 | * @pdev: platform device allocated by platform_device_alloc to add resources to | 203 | * @pdev: platform device allocated by platform_device_alloc to add resources to |
204 | * @data: platform specific data for this platform device | 204 | * @data: platform specific data for this platform device |
205 | * @size: size of platform specific data | 205 | * @size: size of platform specific data |
206 | * | 206 | * |
207 | * Add a copy of platform specific data to the platform device's | 207 | * Add a copy of platform specific data to the platform device's |
208 | * platform_data pointer. The memory associated with the platform data | 208 | * platform_data pointer. The memory associated with the platform data |
209 | * will be freed when the platform device is released. | 209 | * will be freed when the platform device is released. |
210 | */ | 210 | */ |
211 | int platform_device_add_data(struct platform_device *pdev, const void *data, | 211 | int platform_device_add_data(struct platform_device *pdev, const void *data, |
212 | size_t size) | 212 | size_t size) |
213 | { | 213 | { |
214 | void *d; | 214 | void *d; |
215 | 215 | ||
216 | d = kmalloc(size, GFP_KERNEL); | 216 | d = kmalloc(size, GFP_KERNEL); |
217 | if (d) { | 217 | if (d) { |
218 | memcpy(d, data, size); | 218 | memcpy(d, data, size); |
219 | pdev->dev.platform_data = d; | 219 | pdev->dev.platform_data = d; |
220 | } | 220 | } |
221 | return d ? 0 : -ENOMEM; | 221 | return d ? 0 : -ENOMEM; |
222 | } | 222 | } |
223 | EXPORT_SYMBOL_GPL(platform_device_add_data); | 223 | EXPORT_SYMBOL_GPL(platform_device_add_data); |
224 | 224 | ||
225 | /** | 225 | /** |
226 | * platform_device_add - add a platform device to device hierarchy | 226 | * platform_device_add - add a platform device to device hierarchy |
227 | * @pdev: platform device we're adding | 227 | * @pdev: platform device we're adding |
228 | * | 228 | * |
229 | * This is part 2 of platform_device_register(), though may be called | 229 | * This is part 2 of platform_device_register(), though may be called |
230 | * separately _iff_ pdev was allocated by platform_device_alloc(). | 230 | * separately _iff_ pdev was allocated by platform_device_alloc(). |
231 | */ | 231 | */ |
232 | int platform_device_add(struct platform_device *pdev) | 232 | int platform_device_add(struct platform_device *pdev) |
233 | { | 233 | { |
234 | int i, ret = 0; | 234 | int i, ret = 0; |
235 | 235 | ||
236 | if (!pdev) | 236 | if (!pdev) |
237 | return -EINVAL; | 237 | return -EINVAL; |
238 | 238 | ||
239 | if (!pdev->dev.parent) | 239 | if (!pdev->dev.parent) |
240 | pdev->dev.parent = &platform_bus; | 240 | pdev->dev.parent = &platform_bus; |
241 | 241 | ||
242 | pdev->dev.bus = &platform_bus_type; | 242 | pdev->dev.bus = &platform_bus_type; |
243 | 243 | ||
244 | if (pdev->id != -1) | 244 | if (pdev->id != -1) |
245 | snprintf(pdev->dev.bus_id, BUS_ID_SIZE, "%s.%d", pdev->name, | 245 | snprintf(pdev->dev.bus_id, BUS_ID_SIZE, "%s.%d", pdev->name, |
246 | pdev->id); | 246 | pdev->id); |
247 | else | 247 | else |
248 | strlcpy(pdev->dev.bus_id, pdev->name, BUS_ID_SIZE); | 248 | strlcpy(pdev->dev.bus_id, pdev->name, BUS_ID_SIZE); |
249 | 249 | ||
250 | for (i = 0; i < pdev->num_resources; i++) { | 250 | for (i = 0; i < pdev->num_resources; i++) { |
251 | struct resource *p, *r = &pdev->resource[i]; | 251 | struct resource *p, *r = &pdev->resource[i]; |
252 | 252 | ||
253 | if (r->name == NULL) | 253 | if (r->name == NULL) |
254 | r->name = pdev->dev.bus_id; | 254 | r->name = pdev->dev.bus_id; |
255 | 255 | ||
256 | p = r->parent; | 256 | p = r->parent; |
257 | if (!p) { | 257 | if (!p) { |
258 | if (resource_type(r) == IORESOURCE_MEM) | 258 | if (resource_type(r) == IORESOURCE_MEM) |
259 | p = &iomem_resource; | 259 | p = &iomem_resource; |
260 | else if (resource_type(r) == IORESOURCE_IO) | 260 | else if (resource_type(r) == IORESOURCE_IO) |
261 | p = &ioport_resource; | 261 | p = &ioport_resource; |
262 | } | 262 | } |
263 | 263 | ||
264 | if (p && insert_resource(p, r)) { | 264 | if (p && insert_resource(p, r)) { |
265 | printk(KERN_ERR | 265 | printk(KERN_ERR |
266 | "%s: failed to claim resource %d\n", | 266 | "%s: failed to claim resource %d\n", |
267 | pdev->dev.bus_id, i); | 267 | pdev->dev.bus_id, i); |
268 | ret = -EBUSY; | 268 | ret = -EBUSY; |
269 | goto failed; | 269 | goto failed; |
270 | } | 270 | } |
271 | } | 271 | } |
272 | 272 | ||
273 | pr_debug("Registering platform device '%s'. Parent at %s\n", | 273 | pr_debug("Registering platform device '%s'. Parent at %s\n", |
274 | pdev->dev.bus_id, pdev->dev.parent->bus_id); | 274 | pdev->dev.bus_id, pdev->dev.parent->bus_id); |
275 | 275 | ||
276 | ret = device_add(&pdev->dev); | 276 | ret = device_add(&pdev->dev); |
277 | if (ret == 0) | 277 | if (ret == 0) |
278 | return ret; | 278 | return ret; |
279 | 279 | ||
280 | failed: | 280 | failed: |
281 | while (--i >= 0) { | 281 | while (--i >= 0) { |
282 | struct resource *r = &pdev->resource[i]; | 282 | struct resource *r = &pdev->resource[i]; |
283 | unsigned long type = resource_type(r); | 283 | unsigned long type = resource_type(r); |
284 | 284 | ||
285 | if (type == IORESOURCE_MEM || type == IORESOURCE_IO) | 285 | if (type == IORESOURCE_MEM || type == IORESOURCE_IO) |
286 | release_resource(r); | 286 | release_resource(r); |
287 | } | 287 | } |
288 | 288 | ||
289 | return ret; | 289 | return ret; |
290 | } | 290 | } |
291 | EXPORT_SYMBOL_GPL(platform_device_add); | 291 | EXPORT_SYMBOL_GPL(platform_device_add); |
292 | 292 | ||
293 | /** | 293 | /** |
294 | * platform_device_del - remove a platform-level device | 294 | * platform_device_del - remove a platform-level device |
295 | * @pdev: platform device we're removing | 295 | * @pdev: platform device we're removing |
296 | * | 296 | * |
297 | * Note that this function will also release all memory- and port-based | 297 | * Note that this function will also release all memory- and port-based |
298 | * resources owned by the device (@dev->resource). This function must | 298 | * resources owned by the device (@dev->resource). This function must |
299 | * _only_ be externally called in error cases. All other usage is a bug. | 299 | * _only_ be externally called in error cases. All other usage is a bug. |
300 | */ | 300 | */ |
301 | void platform_device_del(struct platform_device *pdev) | 301 | void platform_device_del(struct platform_device *pdev) |
302 | { | 302 | { |
303 | int i; | 303 | int i; |
304 | 304 | ||
305 | if (pdev) { | 305 | if (pdev) { |
306 | device_del(&pdev->dev); | 306 | device_del(&pdev->dev); |
307 | 307 | ||
308 | for (i = 0; i < pdev->num_resources; i++) { | 308 | for (i = 0; i < pdev->num_resources; i++) { |
309 | struct resource *r = &pdev->resource[i]; | 309 | struct resource *r = &pdev->resource[i]; |
310 | unsigned long type = resource_type(r); | 310 | unsigned long type = resource_type(r); |
311 | 311 | ||
312 | if (type == IORESOURCE_MEM || type == IORESOURCE_IO) | 312 | if (type == IORESOURCE_MEM || type == IORESOURCE_IO) |
313 | release_resource(r); | 313 | release_resource(r); |
314 | } | 314 | } |
315 | } | 315 | } |
316 | } | 316 | } |
317 | EXPORT_SYMBOL_GPL(platform_device_del); | 317 | EXPORT_SYMBOL_GPL(platform_device_del); |
318 | 318 | ||
319 | /** | 319 | /** |
320 | * platform_device_register - add a platform-level device | 320 | * platform_device_register - add a platform-level device |
321 | * @pdev: platform device we're adding | 321 | * @pdev: platform device we're adding |
322 | */ | 322 | */ |
323 | int platform_device_register(struct platform_device *pdev) | 323 | int platform_device_register(struct platform_device *pdev) |
324 | { | 324 | { |
325 | device_initialize(&pdev->dev); | 325 | device_initialize(&pdev->dev); |
326 | return platform_device_add(pdev); | 326 | return platform_device_add(pdev); |
327 | } | 327 | } |
328 | EXPORT_SYMBOL_GPL(platform_device_register); | 328 | EXPORT_SYMBOL_GPL(platform_device_register); |
329 | 329 | ||
330 | /** | 330 | /** |
331 | * platform_device_unregister - unregister a platform-level device | 331 | * platform_device_unregister - unregister a platform-level device |
332 | * @pdev: platform device we're unregistering | 332 | * @pdev: platform device we're unregistering |
333 | * | 333 | * |
334 | * Unregistration is done in 2 steps. First we release all resources | 334 | * Unregistration is done in 2 steps. First we release all resources |
335 | * and remove it from the subsystem, then we drop reference count by | 335 | * and remove it from the subsystem, then we drop reference count by |
336 | * calling platform_device_put(). | 336 | * calling platform_device_put(). |
337 | */ | 337 | */ |
338 | void platform_device_unregister(struct platform_device *pdev) | 338 | void platform_device_unregister(struct platform_device *pdev) |
339 | { | 339 | { |
340 | platform_device_del(pdev); | 340 | platform_device_del(pdev); |
341 | platform_device_put(pdev); | 341 | platform_device_put(pdev); |
342 | } | 342 | } |
343 | EXPORT_SYMBOL_GPL(platform_device_unregister); | 343 | EXPORT_SYMBOL_GPL(platform_device_unregister); |
344 | 344 | ||
345 | /** | 345 | /** |
346 | * platform_device_register_simple | 346 | * platform_device_register_simple |
347 | * @name: base name of the device we're adding | 347 | * @name: base name of the device we're adding |
348 | * @id: instance id | 348 | * @id: instance id |
349 | * @res: set of resources that needs to be allocated for the device | 349 | * @res: set of resources that needs to be allocated for the device |
350 | * @num: number of resources | 350 | * @num: number of resources |
351 | * | 351 | * |
352 | * This function creates a simple platform device that requires minimal | 352 | * This function creates a simple platform device that requires minimal |
353 | * resource and memory management. Canned release function freeing memory | 353 | * resource and memory management. Canned release function freeing memory |
354 | * allocated for the device allows drivers using such devices to be | 354 | * allocated for the device allows drivers using such devices to be |
355 | * unloaded without waiting for the last reference to the device to be | 355 | * unloaded without waiting for the last reference to the device to be |
356 | * dropped. | 356 | * dropped. |
357 | * | 357 | * |
358 | * This interface is primarily intended for use with legacy drivers which | 358 | * This interface is primarily intended for use with legacy drivers which |
359 | * probe hardware directly. Because such drivers create sysfs device nodes | 359 | * probe hardware directly. Because such drivers create sysfs device nodes |
360 | * themselves, rather than letting system infrastructure handle such device | 360 | * themselves, rather than letting system infrastructure handle such device |
361 | * enumeration tasks, they don't fully conform to the Linux driver model. | 361 | * enumeration tasks, they don't fully conform to the Linux driver model. |
362 | * In particular, when such drivers are built as modules, they can't be | 362 | * In particular, when such drivers are built as modules, they can't be |
363 | * "hotplugged". | 363 | * "hotplugged". |
364 | */ | 364 | */ |
365 | struct platform_device *platform_device_register_simple(const char *name, | 365 | struct platform_device *platform_device_register_simple(const char *name, |
366 | int id, | 366 | int id, |
367 | struct resource *res, | 367 | struct resource *res, |
368 | unsigned int num) | 368 | unsigned int num) |
369 | { | 369 | { |
370 | struct platform_device *pdev; | 370 | struct platform_device *pdev; |
371 | int retval; | 371 | int retval; |
372 | 372 | ||
373 | pdev = platform_device_alloc(name, id); | 373 | pdev = platform_device_alloc(name, id); |
374 | if (!pdev) { | 374 | if (!pdev) { |
375 | retval = -ENOMEM; | 375 | retval = -ENOMEM; |
376 | goto error; | 376 | goto error; |
377 | } | 377 | } |
378 | 378 | ||
379 | if (num) { | 379 | if (num) { |
380 | retval = platform_device_add_resources(pdev, res, num); | 380 | retval = platform_device_add_resources(pdev, res, num); |
381 | if (retval) | 381 | if (retval) |
382 | goto error; | 382 | goto error; |
383 | } | 383 | } |
384 | 384 | ||
385 | retval = platform_device_add(pdev); | 385 | retval = platform_device_add(pdev); |
386 | if (retval) | 386 | if (retval) |
387 | goto error; | 387 | goto error; |
388 | 388 | ||
389 | return pdev; | 389 | return pdev; |
390 | 390 | ||
391 | error: | 391 | error: |
392 | platform_device_put(pdev); | 392 | platform_device_put(pdev); |
393 | return ERR_PTR(retval); | 393 | return ERR_PTR(retval); |
394 | } | 394 | } |
395 | EXPORT_SYMBOL_GPL(platform_device_register_simple); | 395 | EXPORT_SYMBOL_GPL(platform_device_register_simple); |
396 | 396 | ||
397 | /** | 397 | /** |
398 | * platform_device_register_data | 398 | * platform_device_register_data |
399 | * @parent: parent device for the device we're adding | 399 | * @parent: parent device for the device we're adding |
400 | * @name: base name of the device we're adding | 400 | * @name: base name of the device we're adding |
401 | * @id: instance id | 401 | * @id: instance id |
402 | * @data: platform specific data for this platform device | 402 | * @data: platform specific data for this platform device |
403 | * @size: size of platform specific data | 403 | * @size: size of platform specific data |
404 | * | 404 | * |
405 | * This function creates a simple platform device that requires minimal | 405 | * This function creates a simple platform device that requires minimal |
406 | * resource and memory management. Canned release function freeing memory | 406 | * resource and memory management. Canned release function freeing memory |
407 | * allocated for the device allows drivers using such devices to be | 407 | * allocated for the device allows drivers using such devices to be |
408 | * unloaded without waiting for the last reference to the device to be | 408 | * unloaded without waiting for the last reference to the device to be |
409 | * dropped. | 409 | * dropped. |
410 | */ | 410 | */ |
411 | struct platform_device *platform_device_register_data( | 411 | struct platform_device *platform_device_register_data( |
412 | struct device *parent, | 412 | struct device *parent, |
413 | const char *name, int id, | 413 | const char *name, int id, |
414 | const void *data, size_t size) | 414 | const void *data, size_t size) |
415 | { | 415 | { |
416 | struct platform_device *pdev; | 416 | struct platform_device *pdev; |
417 | int retval; | 417 | int retval; |
418 | 418 | ||
419 | pdev = platform_device_alloc(name, id); | 419 | pdev = platform_device_alloc(name, id); |
420 | if (!pdev) { | 420 | if (!pdev) { |
421 | retval = -ENOMEM; | 421 | retval = -ENOMEM; |
422 | goto error; | 422 | goto error; |
423 | } | 423 | } |
424 | 424 | ||
425 | pdev->dev.parent = parent; | 425 | pdev->dev.parent = parent; |
426 | 426 | ||
427 | if (size) { | 427 | if (size) { |
428 | retval = platform_device_add_data(pdev, data, size); | 428 | retval = platform_device_add_data(pdev, data, size); |
429 | if (retval) | 429 | if (retval) |
430 | goto error; | 430 | goto error; |
431 | } | 431 | } |
432 | 432 | ||
433 | retval = platform_device_add(pdev); | 433 | retval = platform_device_add(pdev); |
434 | if (retval) | 434 | if (retval) |
435 | goto error; | 435 | goto error; |
436 | 436 | ||
437 | return pdev; | 437 | return pdev; |
438 | 438 | ||
439 | error: | 439 | error: |
440 | platform_device_put(pdev); | 440 | platform_device_put(pdev); |
441 | return ERR_PTR(retval); | 441 | return ERR_PTR(retval); |
442 | } | 442 | } |
443 | 443 | ||
444 | static int platform_drv_probe(struct device *_dev) | 444 | static int platform_drv_probe(struct device *_dev) |
445 | { | 445 | { |
446 | struct platform_driver *drv = to_platform_driver(_dev->driver); | 446 | struct platform_driver *drv = to_platform_driver(_dev->driver); |
447 | struct platform_device *dev = to_platform_device(_dev); | 447 | struct platform_device *dev = to_platform_device(_dev); |
448 | 448 | ||
449 | return drv->probe(dev); | 449 | return drv->probe(dev); |
450 | } | 450 | } |
451 | 451 | ||
452 | static int platform_drv_probe_fail(struct device *_dev) | 452 | static int platform_drv_probe_fail(struct device *_dev) |
453 | { | 453 | { |
454 | return -ENXIO; | 454 | return -ENXIO; |
455 | } | 455 | } |
456 | 456 | ||
457 | static int platform_drv_remove(struct device *_dev) | 457 | static int platform_drv_remove(struct device *_dev) |
458 | { | 458 | { |
459 | struct platform_driver *drv = to_platform_driver(_dev->driver); | 459 | struct platform_driver *drv = to_platform_driver(_dev->driver); |
460 | struct platform_device *dev = to_platform_device(_dev); | 460 | struct platform_device *dev = to_platform_device(_dev); |
461 | 461 | ||
462 | return drv->remove(dev); | 462 | return drv->remove(dev); |
463 | } | 463 | } |
464 | 464 | ||
465 | static void platform_drv_shutdown(struct device *_dev) | 465 | static void platform_drv_shutdown(struct device *_dev) |
466 | { | 466 | { |
467 | struct platform_driver *drv = to_platform_driver(_dev->driver); | 467 | struct platform_driver *drv = to_platform_driver(_dev->driver); |
468 | struct platform_device *dev = to_platform_device(_dev); | 468 | struct platform_device *dev = to_platform_device(_dev); |
469 | 469 | ||
470 | drv->shutdown(dev); | 470 | drv->shutdown(dev); |
471 | } | 471 | } |
472 | 472 | ||
473 | static int platform_drv_suspend(struct device *_dev, pm_message_t state) | 473 | static int platform_drv_suspend(struct device *_dev, pm_message_t state) |
474 | { | 474 | { |
475 | struct platform_driver *drv = to_platform_driver(_dev->driver); | 475 | struct platform_driver *drv = to_platform_driver(_dev->driver); |
476 | struct platform_device *dev = to_platform_device(_dev); | 476 | struct platform_device *dev = to_platform_device(_dev); |
477 | 477 | ||
478 | return drv->suspend(dev, state); | 478 | return drv->suspend(dev, state); |
479 | } | 479 | } |
480 | 480 | ||
481 | static int platform_drv_resume(struct device *_dev) | 481 | static int platform_drv_resume(struct device *_dev) |
482 | { | 482 | { |
483 | struct platform_driver *drv = to_platform_driver(_dev->driver); | 483 | struct platform_driver *drv = to_platform_driver(_dev->driver); |
484 | struct platform_device *dev = to_platform_device(_dev); | 484 | struct platform_device *dev = to_platform_device(_dev); |
485 | 485 | ||
486 | return drv->resume(dev); | 486 | return drv->resume(dev); |
487 | } | 487 | } |
488 | 488 | ||
489 | /** | 489 | /** |
490 | * platform_driver_register | 490 | * platform_driver_register |
491 | * @drv: platform driver structure | 491 | * @drv: platform driver structure |
492 | */ | 492 | */ |
493 | int platform_driver_register(struct platform_driver *drv) | 493 | int platform_driver_register(struct platform_driver *drv) |
494 | { | 494 | { |
495 | drv->driver.bus = &platform_bus_type; | 495 | drv->driver.bus = &platform_bus_type; |
496 | if (drv->probe) | 496 | if (drv->probe) |
497 | drv->driver.probe = platform_drv_probe; | 497 | drv->driver.probe = platform_drv_probe; |
498 | if (drv->remove) | 498 | if (drv->remove) |
499 | drv->driver.remove = platform_drv_remove; | 499 | drv->driver.remove = platform_drv_remove; |
500 | if (drv->shutdown) | 500 | if (drv->shutdown) |
501 | drv->driver.shutdown = platform_drv_shutdown; | 501 | drv->driver.shutdown = platform_drv_shutdown; |
502 | if (drv->suspend) | 502 | if (drv->suspend) |
503 | drv->driver.suspend = platform_drv_suspend; | 503 | drv->driver.suspend = platform_drv_suspend; |
504 | if (drv->resume) | 504 | if (drv->resume) |
505 | drv->driver.resume = platform_drv_resume; | 505 | drv->driver.resume = platform_drv_resume; |
506 | if (drv->pm) | ||
507 | drv->driver.pm = &drv->pm->base; | ||
508 | return driver_register(&drv->driver); | 506 | return driver_register(&drv->driver); |
509 | } | 507 | } |
510 | EXPORT_SYMBOL_GPL(platform_driver_register); | 508 | EXPORT_SYMBOL_GPL(platform_driver_register); |
511 | 509 | ||
512 | /** | 510 | /** |
513 | * platform_driver_unregister | 511 | * platform_driver_unregister |
514 | * @drv: platform driver structure | 512 | * @drv: platform driver structure |
515 | */ | 513 | */ |
516 | void platform_driver_unregister(struct platform_driver *drv) | 514 | void platform_driver_unregister(struct platform_driver *drv) |
517 | { | 515 | { |
518 | driver_unregister(&drv->driver); | 516 | driver_unregister(&drv->driver); |
519 | } | 517 | } |
520 | EXPORT_SYMBOL_GPL(platform_driver_unregister); | 518 | EXPORT_SYMBOL_GPL(platform_driver_unregister); |
521 | 519 | ||
522 | /** | 520 | /** |
523 | * platform_driver_probe - register driver for non-hotpluggable device | 521 | * platform_driver_probe - register driver for non-hotpluggable device |
524 | * @drv: platform driver structure | 522 | * @drv: platform driver structure |
525 | * @probe: the driver probe routine, probably from an __init section | 523 | * @probe: the driver probe routine, probably from an __init section |
526 | * | 524 | * |
527 | * Use this instead of platform_driver_register() when you know the device | 525 | * Use this instead of platform_driver_register() when you know the device |
528 | * is not hotpluggable and has already been registered, and you want to | 526 | * is not hotpluggable and has already been registered, and you want to |
529 | * remove its run-once probe() infrastructure from memory after the driver | 527 | * remove its run-once probe() infrastructure from memory after the driver |
530 | * has bound to the device. | 528 | * has bound to the device. |
531 | * | 529 | * |
532 | * One typical use for this would be with drivers for controllers integrated | 530 | * One typical use for this would be with drivers for controllers integrated |
533 | * into system-on-chip processors, where the controller devices have been | 531 | * into system-on-chip processors, where the controller devices have been |
534 | * configured as part of board setup. | 532 | * configured as part of board setup. |
535 | * | 533 | * |
536 | * Returns zero if the driver registered and bound to a device, else returns | 534 | * Returns zero if the driver registered and bound to a device, else returns |
537 | * a negative error code and with the driver not registered. | 535 | * a negative error code and with the driver not registered. |
538 | */ | 536 | */ |
539 | int __init_or_module platform_driver_probe(struct platform_driver *drv, | 537 | int __init_or_module platform_driver_probe(struct platform_driver *drv, |
540 | int (*probe)(struct platform_device *)) | 538 | int (*probe)(struct platform_device *)) |
541 | { | 539 | { |
542 | int retval, code; | 540 | int retval, code; |
543 | 541 | ||
544 | /* temporary section violation during probe() */ | 542 | /* temporary section violation during probe() */ |
545 | drv->probe = probe; | 543 | drv->probe = probe; |
546 | retval = code = platform_driver_register(drv); | 544 | retval = code = platform_driver_register(drv); |
547 | 545 | ||
548 | /* Fixup that section violation, being paranoid about code scanning | 546 | /* Fixup that section violation, being paranoid about code scanning |
549 | * the list of drivers in order to probe new devices. Check to see | 547 | * the list of drivers in order to probe new devices. Check to see |
550 | * if the probe was successful, and make sure any forced probes of | 548 | * if the probe was successful, and make sure any forced probes of |
551 | * new devices fail. | 549 | * new devices fail. |
552 | */ | 550 | */ |
553 | spin_lock(&platform_bus_type.p->klist_drivers.k_lock); | 551 | spin_lock(&platform_bus_type.p->klist_drivers.k_lock); |
554 | drv->probe = NULL; | 552 | drv->probe = NULL; |
555 | if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) | 553 | if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) |
556 | retval = -ENODEV; | 554 | retval = -ENODEV; |
557 | drv->driver.probe = platform_drv_probe_fail; | 555 | drv->driver.probe = platform_drv_probe_fail; |
558 | spin_unlock(&platform_bus_type.p->klist_drivers.k_lock); | 556 | spin_unlock(&platform_bus_type.p->klist_drivers.k_lock); |
559 | 557 | ||
560 | if (code != retval) | 558 | if (code != retval) |
561 | platform_driver_unregister(drv); | 559 | platform_driver_unregister(drv); |
562 | return retval; | 560 | return retval; |
563 | } | 561 | } |
564 | EXPORT_SYMBOL_GPL(platform_driver_probe); | 562 | EXPORT_SYMBOL_GPL(platform_driver_probe); |
565 | 563 | ||
566 | /* modalias support enables more hands-off userspace setup: | 564 | /* modalias support enables more hands-off userspace setup: |
567 | * (a) environment variable lets new-style hotplug events work once system is | 565 | * (a) environment variable lets new-style hotplug events work once system is |
568 | * fully running: "modprobe $MODALIAS" | 566 | * fully running: "modprobe $MODALIAS" |
569 | * (b) sysfs attribute lets new-style coldplug recover from hotplug events | 567 | * (b) sysfs attribute lets new-style coldplug recover from hotplug events |
570 | * mishandled before system is fully running: "modprobe $(cat modalias)" | 568 | * mishandled before system is fully running: "modprobe $(cat modalias)" |
571 | */ | 569 | */ |
572 | static ssize_t modalias_show(struct device *dev, struct device_attribute *a, | 570 | static ssize_t modalias_show(struct device *dev, struct device_attribute *a, |
573 | char *buf) | 571 | char *buf) |
574 | { | 572 | { |
575 | struct platform_device *pdev = to_platform_device(dev); | 573 | struct platform_device *pdev = to_platform_device(dev); |
576 | int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); | 574 | int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); |
577 | 575 | ||
578 | return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; | 576 | return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; |
579 | } | 577 | } |
580 | 578 | ||
581 | static struct device_attribute platform_dev_attrs[] = { | 579 | static struct device_attribute platform_dev_attrs[] = { |
582 | __ATTR_RO(modalias), | 580 | __ATTR_RO(modalias), |
583 | __ATTR_NULL, | 581 | __ATTR_NULL, |
584 | }; | 582 | }; |
585 | 583 | ||
586 | static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) | 584 | static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) |
587 | { | 585 | { |
588 | struct platform_device *pdev = to_platform_device(dev); | 586 | struct platform_device *pdev = to_platform_device(dev); |
589 | 587 | ||
590 | add_uevent_var(env, "MODALIAS=platform:%s", pdev->name); | 588 | add_uevent_var(env, "MODALIAS=platform:%s", pdev->name); |
591 | return 0; | 589 | return 0; |
592 | } | 590 | } |
593 | 591 | ||
594 | /** | 592 | /** |
595 | * platform_match - bind platform device to platform driver. | 593 | * platform_match - bind platform device to platform driver. |
596 | * @dev: device. | 594 | * @dev: device. |
597 | * @drv: driver. | 595 | * @drv: driver. |
598 | * | 596 | * |
599 | * Platform device IDs are assumed to be encoded like this: | 597 | * Platform device IDs are assumed to be encoded like this: |
600 | * "<name><instance>", where <name> is a short description of the type of | 598 | * "<name><instance>", where <name> is a short description of the type of |
601 | * device, like "pci" or "floppy", and <instance> is the enumerated | 599 | * device, like "pci" or "floppy", and <instance> is the enumerated |
602 | * instance of the device, like '0' or '42'. Driver IDs are simply | 600 | * instance of the device, like '0' or '42'. Driver IDs are simply |
603 | * "<name>". So, extract the <name> from the platform_device structure, | 601 | * "<name>". So, extract the <name> from the platform_device structure, |
604 | * and compare it against the name of the driver. Return whether they match | 602 | * and compare it against the name of the driver. Return whether they match |
605 | * or not. | 603 | * or not. |
606 | */ | 604 | */ |
607 | static int platform_match(struct device *dev, struct device_driver *drv) | 605 | static int platform_match(struct device *dev, struct device_driver *drv) |
608 | { | 606 | { |
609 | struct platform_device *pdev; | 607 | struct platform_device *pdev; |
610 | 608 | ||
611 | pdev = container_of(dev, struct platform_device, dev); | 609 | pdev = container_of(dev, struct platform_device, dev); |
612 | return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0); | 610 | return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0); |
613 | } | 611 | } |
614 | 612 | ||
615 | #ifdef CONFIG_PM_SLEEP | 613 | #ifdef CONFIG_PM_SLEEP |
616 | 614 | ||
617 | static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) | 615 | static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) |
618 | { | 616 | { |
619 | int ret = 0; | 617 | int ret = 0; |
620 | 618 | ||
621 | if (dev->driver && dev->driver->suspend) | 619 | if (dev->driver && dev->driver->suspend) |
622 | ret = dev->driver->suspend(dev, mesg); | 620 | ret = dev->driver->suspend(dev, mesg); |
623 | 621 | ||
624 | return ret; | 622 | return ret; |
625 | } | 623 | } |
626 | 624 | ||
627 | static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg) | 625 | static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg) |
628 | { | 626 | { |
629 | struct platform_driver *drv = to_platform_driver(dev->driver); | 627 | struct platform_driver *drv = to_platform_driver(dev->driver); |
630 | struct platform_device *pdev; | 628 | struct platform_device *pdev; |
631 | int ret = 0; | 629 | int ret = 0; |
632 | 630 | ||
633 | pdev = container_of(dev, struct platform_device, dev); | 631 | pdev = container_of(dev, struct platform_device, dev); |
634 | if (dev->driver && drv->suspend_late) | 632 | if (dev->driver && drv->suspend_late) |
635 | ret = drv->suspend_late(pdev, mesg); | 633 | ret = drv->suspend_late(pdev, mesg); |
636 | 634 | ||
637 | return ret; | 635 | return ret; |
638 | } | 636 | } |
639 | 637 | ||
640 | static int platform_legacy_resume_early(struct device *dev) | 638 | static int platform_legacy_resume_early(struct device *dev) |
641 | { | 639 | { |
642 | struct platform_driver *drv = to_platform_driver(dev->driver); | 640 | struct platform_driver *drv = to_platform_driver(dev->driver); |
643 | struct platform_device *pdev; | 641 | struct platform_device *pdev; |
644 | int ret = 0; | 642 | int ret = 0; |
645 | 643 | ||
646 | pdev = container_of(dev, struct platform_device, dev); | 644 | pdev = container_of(dev, struct platform_device, dev); |
647 | if (dev->driver && drv->resume_early) | 645 | if (dev->driver && drv->resume_early) |
648 | ret = drv->resume_early(pdev); | 646 | ret = drv->resume_early(pdev); |
649 | 647 | ||
650 | return ret; | 648 | return ret; |
651 | } | 649 | } |
652 | 650 | ||
653 | static int platform_legacy_resume(struct device *dev) | 651 | static int platform_legacy_resume(struct device *dev) |
654 | { | 652 | { |
655 | int ret = 0; | 653 | int ret = 0; |
656 | 654 | ||
657 | if (dev->driver && dev->driver->resume) | 655 | if (dev->driver && dev->driver->resume) |
658 | ret = dev->driver->resume(dev); | 656 | ret = dev->driver->resume(dev); |
659 | 657 | ||
660 | return ret; | 658 | return ret; |
661 | } | 659 | } |
662 | 660 | ||
663 | static int platform_pm_prepare(struct device *dev) | 661 | static int platform_pm_prepare(struct device *dev) |
664 | { | 662 | { |
665 | struct device_driver *drv = dev->driver; | 663 | struct device_driver *drv = dev->driver; |
666 | int ret = 0; | 664 | int ret = 0; |
667 | 665 | ||
668 | if (drv && drv->pm && drv->pm->prepare) | 666 | if (drv && drv->pm && drv->pm->prepare) |
669 | ret = drv->pm->prepare(dev); | 667 | ret = drv->pm->prepare(dev); |
670 | 668 | ||
671 | return ret; | 669 | return ret; |
672 | } | 670 | } |
673 | 671 | ||
674 | static void platform_pm_complete(struct device *dev) | 672 | static void platform_pm_complete(struct device *dev) |
675 | { | 673 | { |
676 | struct device_driver *drv = dev->driver; | 674 | struct device_driver *drv = dev->driver; |
677 | 675 | ||
678 | if (drv && drv->pm && drv->pm->complete) | 676 | if (drv && drv->pm && drv->pm->complete) |
679 | drv->pm->complete(dev); | 677 | drv->pm->complete(dev); |
680 | } | 678 | } |
681 | 679 | ||
682 | #ifdef CONFIG_SUSPEND | 680 | #ifdef CONFIG_SUSPEND |
683 | 681 | ||
684 | static int platform_pm_suspend(struct device *dev) | 682 | static int platform_pm_suspend(struct device *dev) |
685 | { | 683 | { |
686 | struct device_driver *drv = dev->driver; | 684 | struct device_driver *drv = dev->driver; |
687 | int ret = 0; | 685 | int ret = 0; |
688 | 686 | ||
689 | if (drv && drv->pm) { | 687 | if (!drv) |
688 | return 0; | ||
689 | |||
690 | if (drv->pm) { | ||
690 | if (drv->pm->suspend) | 691 | if (drv->pm->suspend) |
691 | ret = drv->pm->suspend(dev); | 692 | ret = drv->pm->suspend(dev); |
692 | } else { | 693 | } else { |
693 | ret = platform_legacy_suspend(dev, PMSG_SUSPEND); | 694 | ret = platform_legacy_suspend(dev, PMSG_SUSPEND); |
694 | } | 695 | } |
695 | 696 | ||
696 | return ret; | 697 | return ret; |
697 | } | 698 | } |
698 | 699 | ||
699 | static int platform_pm_suspend_noirq(struct device *dev) | 700 | static int platform_pm_suspend_noirq(struct device *dev) |
700 | { | 701 | { |
701 | struct platform_driver *pdrv; | 702 | struct device_driver *drv = dev->driver; |
702 | int ret = 0; | 703 | int ret = 0; |
703 | 704 | ||
704 | if (!dev->driver) | 705 | if (!drv) |
705 | return 0; | 706 | return 0; |
706 | 707 | ||
707 | pdrv = to_platform_driver(dev->driver); | 708 | if (drv->pm) { |
708 | if (pdrv->pm) { | 709 | if (drv->pm->suspend_noirq) |
709 | if (pdrv->pm->suspend_noirq) | 710 | ret = drv->pm->suspend_noirq(dev); |
710 | ret = pdrv->pm->suspend_noirq(dev); | ||
711 | } else { | 711 | } else { |
712 | ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND); | 712 | ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND); |
713 | } | 713 | } |
714 | 714 | ||
715 | return ret; | 715 | return ret; |
716 | } | 716 | } |
717 | 717 | ||
718 | static int platform_pm_resume(struct device *dev) | 718 | static int platform_pm_resume(struct device *dev) |
719 | { | 719 | { |
720 | struct device_driver *drv = dev->driver; | 720 | struct device_driver *drv = dev->driver; |
721 | int ret = 0; | 721 | int ret = 0; |
722 | 722 | ||
723 | if (drv && drv->pm) { | 723 | if (!drv) |
724 | return 0; | ||
725 | |||
726 | if (drv->pm) { | ||
724 | if (drv->pm->resume) | 727 | if (drv->pm->resume) |
725 | ret = drv->pm->resume(dev); | 728 | ret = drv->pm->resume(dev); |
726 | } else { | 729 | } else { |
727 | ret = platform_legacy_resume(dev); | 730 | ret = platform_legacy_resume(dev); |
728 | } | 731 | } |
729 | 732 | ||
730 | return ret; | 733 | return ret; |
731 | } | 734 | } |
732 | 735 | ||
733 | static int platform_pm_resume_noirq(struct device *dev) | 736 | static int platform_pm_resume_noirq(struct device *dev) |
734 | { | 737 | { |
735 | struct platform_driver *pdrv; | 738 | struct device_driver *drv = dev->driver; |
736 | int ret = 0; | 739 | int ret = 0; |
737 | 740 | ||
738 | if (!dev->driver) | 741 | if (!drv) |
739 | return 0; | 742 | return 0; |
740 | 743 | ||
741 | pdrv = to_platform_driver(dev->driver); | 744 | if (drv->pm) { |
742 | if (pdrv->pm) { | 745 | if (drv->pm->resume_noirq) |
743 | if (pdrv->pm->resume_noirq) | 746 | ret = drv->pm->resume_noirq(dev); |
744 | ret = pdrv->pm->resume_noirq(dev); | ||
745 | } else { | 747 | } else { |
746 | ret = platform_legacy_resume_early(dev); | 748 | ret = platform_legacy_resume_early(dev); |
747 | } | 749 | } |
748 | 750 | ||
749 | return ret; | 751 | return ret; |
750 | } | 752 | } |
751 | 753 | ||
752 | #else /* !CONFIG_SUSPEND */ | 754 | #else /* !CONFIG_SUSPEND */ |
753 | 755 | ||
754 | #define platform_pm_suspend NULL | 756 | #define platform_pm_suspend NULL |
755 | #define platform_pm_resume NULL | 757 | #define platform_pm_resume NULL |
756 | #define platform_pm_suspend_noirq NULL | 758 | #define platform_pm_suspend_noirq NULL |
757 | #define platform_pm_resume_noirq NULL | 759 | #define platform_pm_resume_noirq NULL |
758 | 760 | ||
759 | #endif /* !CONFIG_SUSPEND */ | 761 | #endif /* !CONFIG_SUSPEND */ |
760 | 762 | ||
761 | #ifdef CONFIG_HIBERNATION | 763 | #ifdef CONFIG_HIBERNATION |
762 | 764 | ||
763 | static int platform_pm_freeze(struct device *dev) | 765 | static int platform_pm_freeze(struct device *dev) |
764 | { | 766 | { |
765 | struct device_driver *drv = dev->driver; | 767 | struct device_driver *drv = dev->driver; |
766 | int ret = 0; | 768 | int ret = 0; |
767 | 769 | ||
768 | if (!drv) | 770 | if (!drv) |
769 | return 0; | 771 | return 0; |
770 | 772 | ||
771 | if (drv->pm) { | 773 | if (drv->pm) { |
772 | if (drv->pm->freeze) | 774 | if (drv->pm->freeze) |
773 | ret = drv->pm->freeze(dev); | 775 | ret = drv->pm->freeze(dev); |
774 | } else { | 776 | } else { |
775 | ret = platform_legacy_suspend(dev, PMSG_FREEZE); | 777 | ret = platform_legacy_suspend(dev, PMSG_FREEZE); |
776 | } | 778 | } |
777 | 779 | ||
778 | return ret; | 780 | return ret; |
779 | } | 781 | } |
780 | 782 | ||
781 | static int platform_pm_freeze_noirq(struct device *dev) | 783 | static int platform_pm_freeze_noirq(struct device *dev) |
782 | { | 784 | { |
783 | struct platform_driver *pdrv; | 785 | struct device_driver *drv = dev->driver; |
784 | int ret = 0; | 786 | int ret = 0; |
785 | 787 | ||
786 | if (!dev->driver) | 788 | if (!drv) |
787 | return 0; | 789 | return 0; |
788 | 790 | ||
789 | pdrv = to_platform_driver(dev->driver); | 791 | if (drv->pm) { |
790 | if (pdrv->pm) { | 792 | if (drv->pm->freeze_noirq) |
791 | if (pdrv->pm->freeze_noirq) | 793 | ret = drv->pm->freeze_noirq(dev); |
792 | ret = pdrv->pm->freeze_noirq(dev); | ||
793 | } else { | 794 | } else { |
794 | ret = platform_legacy_suspend_late(dev, PMSG_FREEZE); | 795 | ret = platform_legacy_suspend_late(dev, PMSG_FREEZE); |
795 | } | 796 | } |
796 | 797 | ||
797 | return ret; | 798 | return ret; |
798 | } | 799 | } |
799 | 800 | ||
800 | static int platform_pm_thaw(struct device *dev) | 801 | static int platform_pm_thaw(struct device *dev) |
801 | { | 802 | { |
802 | struct device_driver *drv = dev->driver; | 803 | struct device_driver *drv = dev->driver; |
803 | int ret = 0; | 804 | int ret = 0; |
804 | 805 | ||
805 | if (drv && drv->pm) { | 806 | if (!drv) |
807 | return 0; | ||
808 | |||
809 | if (drv->pm) { | ||
806 | if (drv->pm->thaw) | 810 | if (drv->pm->thaw) |
807 | ret = drv->pm->thaw(dev); | 811 | ret = drv->pm->thaw(dev); |
808 | } else { | 812 | } else { |
809 | ret = platform_legacy_resume(dev); | 813 | ret = platform_legacy_resume(dev); |
810 | } | 814 | } |
811 | 815 | ||
812 | return ret; | 816 | return ret; |
813 | } | 817 | } |
814 | 818 | ||
815 | static int platform_pm_thaw_noirq(struct device *dev) | 819 | static int platform_pm_thaw_noirq(struct device *dev) |
816 | { | 820 | { |
817 | struct platform_driver *pdrv; | 821 | struct device_driver *drv = dev->driver; |
818 | int ret = 0; | 822 | int ret = 0; |
819 | 823 | ||
820 | if (!dev->driver) | 824 | if (!drv) |
821 | return 0; | 825 | return 0; |
822 | 826 | ||
823 | pdrv = to_platform_driver(dev->driver); | 827 | if (drv->pm) { |
824 | if (pdrv->pm) { | 828 | if (drv->pm->thaw_noirq) |
825 | if (pdrv->pm->thaw_noirq) | 829 | ret = drv->pm->thaw_noirq(dev); |
826 | ret = pdrv->pm->thaw_noirq(dev); | ||
827 | } else { | 830 | } else { |
828 | ret = platform_legacy_resume_early(dev); | 831 | ret = platform_legacy_resume_early(dev); |
829 | } | 832 | } |
830 | 833 | ||
831 | return ret; | 834 | return ret; |
832 | } | 835 | } |
833 | 836 | ||
834 | static int platform_pm_poweroff(struct device *dev) | 837 | static int platform_pm_poweroff(struct device *dev) |
835 | { | 838 | { |
836 | struct device_driver *drv = dev->driver; | 839 | struct device_driver *drv = dev->driver; |
837 | int ret = 0; | 840 | int ret = 0; |
838 | 841 | ||
839 | if (drv && drv->pm) { | 842 | if (!drv) |
843 | return 0; | ||
844 | |||
845 | if (drv->pm) { | ||
840 | if (drv->pm->poweroff) | 846 | if (drv->pm->poweroff) |
841 | ret = drv->pm->poweroff(dev); | 847 | ret = drv->pm->poweroff(dev); |
842 | } else { | 848 | } else { |
843 | ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); | 849 | ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); |
844 | } | 850 | } |
845 | 851 | ||
846 | return ret; | 852 | return ret; |
847 | } | 853 | } |
848 | 854 | ||
849 | static int platform_pm_poweroff_noirq(struct device *dev) | 855 | static int platform_pm_poweroff_noirq(struct device *dev) |
850 | { | 856 | { |
851 | struct platform_driver *pdrv; | 857 | struct device_driver *drv = dev->driver; |
852 | int ret = 0; | 858 | int ret = 0; |
853 | 859 | ||
854 | if (!dev->driver) | 860 | if (!drv) |
855 | return 0; | 861 | return 0; |
856 | 862 | ||
857 | pdrv = to_platform_driver(dev->driver); | 863 | if (drv->pm) { |
858 | if (pdrv->pm) { | 864 | if (drv->pm->poweroff_noirq) |
859 | if (pdrv->pm->poweroff_noirq) | 865 | ret = drv->pm->poweroff_noirq(dev); |
860 | ret = pdrv->pm->poweroff_noirq(dev); | ||
861 | } else { | 866 | } else { |
862 | ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE); | 867 | ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE); |
863 | } | 868 | } |
864 | 869 | ||
865 | return ret; | 870 | return ret; |
866 | } | 871 | } |
867 | 872 | ||
868 | static int platform_pm_restore(struct device *dev) | 873 | static int platform_pm_restore(struct device *dev) |
869 | { | 874 | { |
870 | struct device_driver *drv = dev->driver; | 875 | struct device_driver *drv = dev->driver; |
871 | int ret = 0; | 876 | int ret = 0; |
872 | 877 | ||
873 | if (drv && drv->pm) { | 878 | if (!drv) |
879 | return 0; | ||
880 | |||
881 | if (drv->pm) { | ||
874 | if (drv->pm->restore) | 882 | if (drv->pm->restore) |
875 | ret = drv->pm->restore(dev); | 883 | ret = drv->pm->restore(dev); |
876 | } else { | 884 | } else { |
877 | ret = platform_legacy_resume(dev); | 885 | ret = platform_legacy_resume(dev); |
878 | } | 886 | } |
879 | 887 | ||
880 | return ret; | 888 | return ret; |
881 | } | 889 | } |
882 | 890 | ||
883 | static int platform_pm_restore_noirq(struct device *dev) | 891 | static int platform_pm_restore_noirq(struct device *dev) |
884 | { | 892 | { |
885 | struct platform_driver *pdrv; | 893 | struct device_driver *drv = dev->driver; |
886 | int ret = 0; | 894 | int ret = 0; |
887 | 895 | ||
888 | if (!dev->driver) | 896 | if (!drv) |
889 | return 0; | 897 | return 0; |
890 | 898 | ||
891 | pdrv = to_platform_driver(dev->driver); | 899 | if (drv->pm) { |
892 | if (pdrv->pm) { | 900 | if (drv->pm->restore_noirq) |
893 | if (pdrv->pm->restore_noirq) | 901 | ret = drv->pm->restore_noirq(dev); |
894 | ret = pdrv->pm->restore_noirq(dev); | ||
895 | } else { | 902 | } else { |
896 | ret = platform_legacy_resume_early(dev); | 903 | ret = platform_legacy_resume_early(dev); |
897 | } | 904 | } |
898 | 905 | ||
899 | return ret; | 906 | return ret; |
900 | } | 907 | } |
901 | 908 | ||
902 | #else /* !CONFIG_HIBERNATION */ | 909 | #else /* !CONFIG_HIBERNATION */ |
903 | 910 | ||
904 | #define platform_pm_freeze NULL | 911 | #define platform_pm_freeze NULL |
905 | #define platform_pm_thaw NULL | 912 | #define platform_pm_thaw NULL |
906 | #define platform_pm_poweroff NULL | 913 | #define platform_pm_poweroff NULL |
907 | #define platform_pm_restore NULL | 914 | #define platform_pm_restore NULL |
908 | #define platform_pm_freeze_noirq NULL | 915 | #define platform_pm_freeze_noirq NULL |
909 | #define platform_pm_thaw_noirq NULL | 916 | #define platform_pm_thaw_noirq NULL |
910 | #define platform_pm_poweroff_noirq NULL | 917 | #define platform_pm_poweroff_noirq NULL |
911 | #define platform_pm_restore_noirq NULL | 918 | #define platform_pm_restore_noirq NULL |
912 | 919 | ||
913 | #endif /* !CONFIG_HIBERNATION */ | 920 | #endif /* !CONFIG_HIBERNATION */ |
914 | 921 | ||
915 | static struct pm_ext_ops platform_pm_ops = { | 922 | static struct dev_pm_ops platform_dev_pm_ops = { |
916 | .base = { | 923 | .prepare = platform_pm_prepare, |
917 | .prepare = platform_pm_prepare, | 924 | .complete = platform_pm_complete, |
918 | .complete = platform_pm_complete, | 925 | .suspend = platform_pm_suspend, |
919 | .suspend = platform_pm_suspend, | 926 | .resume = platform_pm_resume, |
920 | .resume = platform_pm_resume, | 927 | .freeze = platform_pm_freeze, |
921 | .freeze = platform_pm_freeze, | 928 | .thaw = platform_pm_thaw, |
922 | .thaw = platform_pm_thaw, | 929 | .poweroff = platform_pm_poweroff, |
923 | .poweroff = platform_pm_poweroff, | 930 | .restore = platform_pm_restore, |
924 | .restore = platform_pm_restore, | ||
925 | }, | ||
926 | .suspend_noirq = platform_pm_suspend_noirq, | 931 | .suspend_noirq = platform_pm_suspend_noirq, |
927 | .resume_noirq = platform_pm_resume_noirq, | 932 | .resume_noirq = platform_pm_resume_noirq, |
928 | .freeze_noirq = platform_pm_freeze_noirq, | 933 | .freeze_noirq = platform_pm_freeze_noirq, |
929 | .thaw_noirq = platform_pm_thaw_noirq, | 934 | .thaw_noirq = platform_pm_thaw_noirq, |
930 | .poweroff_noirq = platform_pm_poweroff_noirq, | 935 | .poweroff_noirq = platform_pm_poweroff_noirq, |
931 | .restore_noirq = platform_pm_restore_noirq, | 936 | .restore_noirq = platform_pm_restore_noirq, |
932 | }; | 937 | }; |
933 | 938 | ||
934 | #define PLATFORM_PM_OPS_PTR &platform_pm_ops | 939 | #define PLATFORM_PM_OPS_PTR (&platform_dev_pm_ops) |
935 | 940 | ||
936 | #else /* !CONFIG_PM_SLEEP */ | 941 | #else /* !CONFIG_PM_SLEEP */ |
937 | 942 | ||
938 | #define PLATFORM_PM_OPS_PTR NULL | 943 | #define PLATFORM_PM_OPS_PTR NULL |
939 | 944 | ||
940 | #endif /* !CONFIG_PM_SLEEP */ | 945 | #endif /* !CONFIG_PM_SLEEP */ |
941 | 946 | ||
942 | struct bus_type platform_bus_type = { | 947 | struct bus_type platform_bus_type = { |
943 | .name = "platform", | 948 | .name = "platform", |
944 | .dev_attrs = platform_dev_attrs, | 949 | .dev_attrs = platform_dev_attrs, |
945 | .match = platform_match, | 950 | .match = platform_match, |
946 | .uevent = platform_uevent, | 951 | .uevent = platform_uevent, |
947 | .pm = PLATFORM_PM_OPS_PTR, | 952 | .pm = PLATFORM_PM_OPS_PTR, |
948 | }; | 953 | }; |
949 | EXPORT_SYMBOL_GPL(platform_bus_type); | 954 | EXPORT_SYMBOL_GPL(platform_bus_type); |
950 | 955 | ||
951 | int __init platform_bus_init(void) | 956 | int __init platform_bus_init(void) |
952 | { | 957 | { |
953 | int error; | 958 | int error; |
954 | 959 | ||
955 | error = device_register(&platform_bus); | 960 | error = device_register(&platform_bus); |
956 | if (error) | 961 | if (error) |
957 | return error; | 962 | return error; |
958 | error = bus_register(&platform_bus_type); | 963 | error = bus_register(&platform_bus_type); |
959 | if (error) | 964 | if (error) |
960 | device_unregister(&platform_bus); | 965 | device_unregister(&platform_bus); |
961 | return error; | 966 | return error; |
962 | } | 967 | } |
963 | 968 | ||
964 | #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK | 969 | #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK |
965 | u64 dma_get_required_mask(struct device *dev) | 970 | u64 dma_get_required_mask(struct device *dev) |
966 | { | 971 | { |
967 | u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); | 972 | u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); |
968 | u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); | 973 | u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); |
969 | u64 mask; | 974 | u64 mask; |
970 | 975 | ||
971 | if (!high_totalram) { | 976 | if (!high_totalram) { |
972 | /* convert to mask just covering totalram */ | 977 | /* convert to mask just covering totalram */ |
973 | low_totalram = (1 << (fls(low_totalram) - 1)); | 978 | low_totalram = (1 << (fls(low_totalram) - 1)); |
974 | low_totalram += low_totalram - 1; | 979 | low_totalram += low_totalram - 1; |
975 | mask = low_totalram; | 980 | mask = low_totalram; |
drivers/base/power/main.c
1 | /* | 1 | /* |
2 | * drivers/base/power/main.c - Where the driver meets power management. | 2 | * drivers/base/power/main.c - Where the driver meets power management. |
3 | * | 3 | * |
4 | * Copyright (c) 2003 Patrick Mochel | 4 | * Copyright (c) 2003 Patrick Mochel |
5 | * Copyright (c) 2003 Open Source Development Lab | 5 | * Copyright (c) 2003 Open Source Development Lab |
6 | * | 6 | * |
7 | * This file is released under the GPLv2 | 7 | * This file is released under the GPLv2 |
8 | * | 8 | * |
9 | * | 9 | * |
10 | * The driver model core calls device_pm_add() when a device is registered. | 10 | * The driver model core calls device_pm_add() when a device is registered. |
11 | * This will intialize the embedded device_pm_info object in the device | 11 | * This will intialize the embedded device_pm_info object in the device |
12 | * and add it to the list of power-controlled devices. sysfs entries for | 12 | * and add it to the list of power-controlled devices. sysfs entries for |
13 | * controlling device power management will also be added. | 13 | * controlling device power management will also be added. |
14 | * | 14 | * |
15 | * A separate list is used for keeping track of power info, because the power | 15 | * A separate list is used for keeping track of power info, because the power |
16 | * domain dependencies may differ from the ancestral dependencies that the | 16 | * domain dependencies may differ from the ancestral dependencies that the |
17 | * subsystem list maintains. | 17 | * subsystem list maintains. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/kallsyms.h> | 21 | #include <linux/kallsyms.h> |
22 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
23 | #include <linux/pm.h> | 23 | #include <linux/pm.h> |
24 | #include <linux/resume-trace.h> | 24 | #include <linux/resume-trace.h> |
25 | #include <linux/rwsem.h> | 25 | #include <linux/rwsem.h> |
26 | 26 | ||
27 | #include "../base.h" | 27 | #include "../base.h" |
28 | #include "power.h" | 28 | #include "power.h" |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * The entries in the dpm_list list are in a depth first order, simply | 31 | * The entries in the dpm_list list are in a depth first order, simply |
32 | * because children are guaranteed to be discovered after parents, and | 32 | * because children are guaranteed to be discovered after parents, and |
33 | * are inserted at the back of the list on discovery. | 33 | * are inserted at the back of the list on discovery. |
34 | * | 34 | * |
35 | * Since device_pm_add() may be called with a device semaphore held, | 35 | * Since device_pm_add() may be called with a device semaphore held, |
36 | * we must never try to acquire a device semaphore while holding | 36 | * we must never try to acquire a device semaphore while holding |
37 | * dpm_list_mutex. | 37 | * dpm_list_mutex. |
38 | */ | 38 | */ |
39 | 39 | ||
40 | LIST_HEAD(dpm_list); | 40 | LIST_HEAD(dpm_list); |
41 | 41 | ||
42 | static DEFINE_MUTEX(dpm_list_mtx); | 42 | static DEFINE_MUTEX(dpm_list_mtx); |
43 | 43 | ||
44 | /* | 44 | /* |
45 | * Set once the preparation of devices for a PM transition has started, reset | 45 | * Set once the preparation of devices for a PM transition has started, reset |
46 | * before starting to resume devices. Protected by dpm_list_mtx. | 46 | * before starting to resume devices. Protected by dpm_list_mtx. |
47 | */ | 47 | */ |
48 | static bool transition_started; | 48 | static bool transition_started; |
49 | 49 | ||
50 | /** | 50 | /** |
51 | * device_pm_lock - lock the list of active devices used by the PM core | 51 | * device_pm_lock - lock the list of active devices used by the PM core |
52 | */ | 52 | */ |
53 | void device_pm_lock(void) | 53 | void device_pm_lock(void) |
54 | { | 54 | { |
55 | mutex_lock(&dpm_list_mtx); | 55 | mutex_lock(&dpm_list_mtx); |
56 | } | 56 | } |
57 | 57 | ||
58 | /** | 58 | /** |
59 | * device_pm_unlock - unlock the list of active devices used by the PM core | 59 | * device_pm_unlock - unlock the list of active devices used by the PM core |
60 | */ | 60 | */ |
61 | void device_pm_unlock(void) | 61 | void device_pm_unlock(void) |
62 | { | 62 | { |
63 | mutex_unlock(&dpm_list_mtx); | 63 | mutex_unlock(&dpm_list_mtx); |
64 | } | 64 | } |
65 | 65 | ||
66 | /** | 66 | /** |
67 | * device_pm_add - add a device to the list of active devices | 67 | * device_pm_add - add a device to the list of active devices |
68 | * @dev: Device to be added to the list | 68 | * @dev: Device to be added to the list |
69 | */ | 69 | */ |
70 | void device_pm_add(struct device *dev) | 70 | void device_pm_add(struct device *dev) |
71 | { | 71 | { |
72 | pr_debug("PM: Adding info for %s:%s\n", | 72 | pr_debug("PM: Adding info for %s:%s\n", |
73 | dev->bus ? dev->bus->name : "No Bus", | 73 | dev->bus ? dev->bus->name : "No Bus", |
74 | kobject_name(&dev->kobj)); | 74 | kobject_name(&dev->kobj)); |
75 | mutex_lock(&dpm_list_mtx); | 75 | mutex_lock(&dpm_list_mtx); |
76 | if (dev->parent) { | 76 | if (dev->parent) { |
77 | if (dev->parent->power.status >= DPM_SUSPENDING) | 77 | if (dev->parent->power.status >= DPM_SUSPENDING) |
78 | dev_warn(dev, "parent %s should not be sleeping\n", | 78 | dev_warn(dev, "parent %s should not be sleeping\n", |
79 | dev->parent->bus_id); | 79 | dev->parent->bus_id); |
80 | } else if (transition_started) { | 80 | } else if (transition_started) { |
81 | /* | 81 | /* |
82 | * We refuse to register parentless devices while a PM | 82 | * We refuse to register parentless devices while a PM |
83 | * transition is in progress in order to avoid leaving them | 83 | * transition is in progress in order to avoid leaving them |
84 | * unhandled down the road | 84 | * unhandled down the road |
85 | */ | 85 | */ |
86 | dev_WARN(dev, "Parentless device registered during a PM transaction\n"); | 86 | dev_WARN(dev, "Parentless device registered during a PM transaction\n"); |
87 | } | 87 | } |
88 | 88 | ||
89 | list_add_tail(&dev->power.entry, &dpm_list); | 89 | list_add_tail(&dev->power.entry, &dpm_list); |
90 | mutex_unlock(&dpm_list_mtx); | 90 | mutex_unlock(&dpm_list_mtx); |
91 | } | 91 | } |
92 | 92 | ||
93 | /** | 93 | /** |
94 | * device_pm_remove - remove a device from the list of active devices | 94 | * device_pm_remove - remove a device from the list of active devices |
95 | * @dev: Device to be removed from the list | 95 | * @dev: Device to be removed from the list |
96 | * | 96 | * |
97 | * This function also removes the device's PM-related sysfs attributes. | 97 | * This function also removes the device's PM-related sysfs attributes. |
98 | */ | 98 | */ |
99 | void device_pm_remove(struct device *dev) | 99 | void device_pm_remove(struct device *dev) |
100 | { | 100 | { |
101 | pr_debug("PM: Removing info for %s:%s\n", | 101 | pr_debug("PM: Removing info for %s:%s\n", |
102 | dev->bus ? dev->bus->name : "No Bus", | 102 | dev->bus ? dev->bus->name : "No Bus", |
103 | kobject_name(&dev->kobj)); | 103 | kobject_name(&dev->kobj)); |
104 | mutex_lock(&dpm_list_mtx); | 104 | mutex_lock(&dpm_list_mtx); |
105 | list_del_init(&dev->power.entry); | 105 | list_del_init(&dev->power.entry); |
106 | mutex_unlock(&dpm_list_mtx); | 106 | mutex_unlock(&dpm_list_mtx); |
107 | } | 107 | } |
108 | 108 | ||
109 | /** | 109 | /** |
110 | * pm_op - execute the PM operation appropiate for given PM event | 110 | * pm_op - execute the PM operation appropiate for given PM event |
111 | * @dev: Device. | 111 | * @dev: Device. |
112 | * @ops: PM operations to choose from. | 112 | * @ops: PM operations to choose from. |
113 | * @state: PM transition of the system being carried out. | 113 | * @state: PM transition of the system being carried out. |
114 | */ | 114 | */ |
115 | static int pm_op(struct device *dev, struct pm_ops *ops, pm_message_t state) | 115 | static int pm_op(struct device *dev, struct dev_pm_ops *ops, |
116 | pm_message_t state) | ||
116 | { | 117 | { |
117 | int error = 0; | 118 | int error = 0; |
118 | 119 | ||
119 | switch (state.event) { | 120 | switch (state.event) { |
120 | #ifdef CONFIG_SUSPEND | 121 | #ifdef CONFIG_SUSPEND |
121 | case PM_EVENT_SUSPEND: | 122 | case PM_EVENT_SUSPEND: |
122 | if (ops->suspend) { | 123 | if (ops->suspend) { |
123 | error = ops->suspend(dev); | 124 | error = ops->suspend(dev); |
124 | suspend_report_result(ops->suspend, error); | 125 | suspend_report_result(ops->suspend, error); |
125 | } | 126 | } |
126 | break; | 127 | break; |
127 | case PM_EVENT_RESUME: | 128 | case PM_EVENT_RESUME: |
128 | if (ops->resume) { | 129 | if (ops->resume) { |
129 | error = ops->resume(dev); | 130 | error = ops->resume(dev); |
130 | suspend_report_result(ops->resume, error); | 131 | suspend_report_result(ops->resume, error); |
131 | } | 132 | } |
132 | break; | 133 | break; |
133 | #endif /* CONFIG_SUSPEND */ | 134 | #endif /* CONFIG_SUSPEND */ |
134 | #ifdef CONFIG_HIBERNATION | 135 | #ifdef CONFIG_HIBERNATION |
135 | case PM_EVENT_FREEZE: | 136 | case PM_EVENT_FREEZE: |
136 | case PM_EVENT_QUIESCE: | 137 | case PM_EVENT_QUIESCE: |
137 | if (ops->freeze) { | 138 | if (ops->freeze) { |
138 | error = ops->freeze(dev); | 139 | error = ops->freeze(dev); |
139 | suspend_report_result(ops->freeze, error); | 140 | suspend_report_result(ops->freeze, error); |
140 | } | 141 | } |
141 | break; | 142 | break; |
142 | case PM_EVENT_HIBERNATE: | 143 | case PM_EVENT_HIBERNATE: |
143 | if (ops->poweroff) { | 144 | if (ops->poweroff) { |
144 | error = ops->poweroff(dev); | 145 | error = ops->poweroff(dev); |
145 | suspend_report_result(ops->poweroff, error); | 146 | suspend_report_result(ops->poweroff, error); |
146 | } | 147 | } |
147 | break; | 148 | break; |
148 | case PM_EVENT_THAW: | 149 | case PM_EVENT_THAW: |
149 | case PM_EVENT_RECOVER: | 150 | case PM_EVENT_RECOVER: |
150 | if (ops->thaw) { | 151 | if (ops->thaw) { |
151 | error = ops->thaw(dev); | 152 | error = ops->thaw(dev); |
152 | suspend_report_result(ops->thaw, error); | 153 | suspend_report_result(ops->thaw, error); |
153 | } | 154 | } |
154 | break; | 155 | break; |
155 | case PM_EVENT_RESTORE: | 156 | case PM_EVENT_RESTORE: |
156 | if (ops->restore) { | 157 | if (ops->restore) { |
157 | error = ops->restore(dev); | 158 | error = ops->restore(dev); |
158 | suspend_report_result(ops->restore, error); | 159 | suspend_report_result(ops->restore, error); |
159 | } | 160 | } |
160 | break; | 161 | break; |
161 | #endif /* CONFIG_HIBERNATION */ | 162 | #endif /* CONFIG_HIBERNATION */ |
162 | default: | 163 | default: |
163 | error = -EINVAL; | 164 | error = -EINVAL; |
164 | } | 165 | } |
165 | return error; | 166 | return error; |
166 | } | 167 | } |
167 | 168 | ||
168 | /** | 169 | /** |
169 | * pm_noirq_op - execute the PM operation appropiate for given PM event | 170 | * pm_noirq_op - execute the PM operation appropiate for given PM event |
170 | * @dev: Device. | 171 | * @dev: Device. |
171 | * @ops: PM operations to choose from. | 172 | * @ops: PM operations to choose from. |
172 | * @state: PM transition of the system being carried out. | 173 | * @state: PM transition of the system being carried out. |
173 | * | 174 | * |
174 | * The operation is executed with interrupts disabled by the only remaining | 175 | * The operation is executed with interrupts disabled by the only remaining |
175 | * functional CPU in the system. | 176 | * functional CPU in the system. |
176 | */ | 177 | */ |
177 | static int pm_noirq_op(struct device *dev, struct pm_ext_ops *ops, | 178 | static int pm_noirq_op(struct device *dev, struct dev_pm_ops *ops, |
178 | pm_message_t state) | 179 | pm_message_t state) |
179 | { | 180 | { |
180 | int error = 0; | 181 | int error = 0; |
181 | 182 | ||
182 | switch (state.event) { | 183 | switch (state.event) { |
183 | #ifdef CONFIG_SUSPEND | 184 | #ifdef CONFIG_SUSPEND |
184 | case PM_EVENT_SUSPEND: | 185 | case PM_EVENT_SUSPEND: |
185 | if (ops->suspend_noirq) { | 186 | if (ops->suspend_noirq) { |
186 | error = ops->suspend_noirq(dev); | 187 | error = ops->suspend_noirq(dev); |
187 | suspend_report_result(ops->suspend_noirq, error); | 188 | suspend_report_result(ops->suspend_noirq, error); |
188 | } | 189 | } |
189 | break; | 190 | break; |
190 | case PM_EVENT_RESUME: | 191 | case PM_EVENT_RESUME: |
191 | if (ops->resume_noirq) { | 192 | if (ops->resume_noirq) { |
192 | error = ops->resume_noirq(dev); | 193 | error = ops->resume_noirq(dev); |
193 | suspend_report_result(ops->resume_noirq, error); | 194 | suspend_report_result(ops->resume_noirq, error); |
194 | } | 195 | } |
195 | break; | 196 | break; |
196 | #endif /* CONFIG_SUSPEND */ | 197 | #endif /* CONFIG_SUSPEND */ |
197 | #ifdef CONFIG_HIBERNATION | 198 | #ifdef CONFIG_HIBERNATION |
198 | case PM_EVENT_FREEZE: | 199 | case PM_EVENT_FREEZE: |
199 | case PM_EVENT_QUIESCE: | 200 | case PM_EVENT_QUIESCE: |
200 | if (ops->freeze_noirq) { | 201 | if (ops->freeze_noirq) { |
201 | error = ops->freeze_noirq(dev); | 202 | error = ops->freeze_noirq(dev); |
202 | suspend_report_result(ops->freeze_noirq, error); | 203 | suspend_report_result(ops->freeze_noirq, error); |
203 | } | 204 | } |
204 | break; | 205 | break; |
205 | case PM_EVENT_HIBERNATE: | 206 | case PM_EVENT_HIBERNATE: |
206 | if (ops->poweroff_noirq) { | 207 | if (ops->poweroff_noirq) { |
207 | error = ops->poweroff_noirq(dev); | 208 | error = ops->poweroff_noirq(dev); |
208 | suspend_report_result(ops->poweroff_noirq, error); | 209 | suspend_report_result(ops->poweroff_noirq, error); |
209 | } | 210 | } |
210 | break; | 211 | break; |
211 | case PM_EVENT_THAW: | 212 | case PM_EVENT_THAW: |
212 | case PM_EVENT_RECOVER: | 213 | case PM_EVENT_RECOVER: |
213 | if (ops->thaw_noirq) { | 214 | if (ops->thaw_noirq) { |
214 | error = ops->thaw_noirq(dev); | 215 | error = ops->thaw_noirq(dev); |
215 | suspend_report_result(ops->thaw_noirq, error); | 216 | suspend_report_result(ops->thaw_noirq, error); |
216 | } | 217 | } |
217 | break; | 218 | break; |
218 | case PM_EVENT_RESTORE: | 219 | case PM_EVENT_RESTORE: |
219 | if (ops->restore_noirq) { | 220 | if (ops->restore_noirq) { |
220 | error = ops->restore_noirq(dev); | 221 | error = ops->restore_noirq(dev); |
221 | suspend_report_result(ops->restore_noirq, error); | 222 | suspend_report_result(ops->restore_noirq, error); |
222 | } | 223 | } |
223 | break; | 224 | break; |
224 | #endif /* CONFIG_HIBERNATION */ | 225 | #endif /* CONFIG_HIBERNATION */ |
225 | default: | 226 | default: |
226 | error = -EINVAL; | 227 | error = -EINVAL; |
227 | } | 228 | } |
228 | return error; | 229 | return error; |
229 | } | 230 | } |
230 | 231 | ||
231 | static char *pm_verb(int event) | 232 | static char *pm_verb(int event) |
232 | { | 233 | { |
233 | switch (event) { | 234 | switch (event) { |
234 | case PM_EVENT_SUSPEND: | 235 | case PM_EVENT_SUSPEND: |
235 | return "suspend"; | 236 | return "suspend"; |
236 | case PM_EVENT_RESUME: | 237 | case PM_EVENT_RESUME: |
237 | return "resume"; | 238 | return "resume"; |
238 | case PM_EVENT_FREEZE: | 239 | case PM_EVENT_FREEZE: |
239 | return "freeze"; | 240 | return "freeze"; |
240 | case PM_EVENT_QUIESCE: | 241 | case PM_EVENT_QUIESCE: |
241 | return "quiesce"; | 242 | return "quiesce"; |
242 | case PM_EVENT_HIBERNATE: | 243 | case PM_EVENT_HIBERNATE: |
243 | return "hibernate"; | 244 | return "hibernate"; |
244 | case PM_EVENT_THAW: | 245 | case PM_EVENT_THAW: |
245 | return "thaw"; | 246 | return "thaw"; |
246 | case PM_EVENT_RESTORE: | 247 | case PM_EVENT_RESTORE: |
247 | return "restore"; | 248 | return "restore"; |
248 | case PM_EVENT_RECOVER: | 249 | case PM_EVENT_RECOVER: |
249 | return "recover"; | 250 | return "recover"; |
250 | default: | 251 | default: |
251 | return "(unknown PM event)"; | 252 | return "(unknown PM event)"; |
252 | } | 253 | } |
253 | } | 254 | } |
254 | 255 | ||
255 | static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) | 256 | static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) |
256 | { | 257 | { |
257 | dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), | 258 | dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), |
258 | ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? | 259 | ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? |
259 | ", may wakeup" : ""); | 260 | ", may wakeup" : ""); |
260 | } | 261 | } |
261 | 262 | ||
262 | static void pm_dev_err(struct device *dev, pm_message_t state, char *info, | 263 | static void pm_dev_err(struct device *dev, pm_message_t state, char *info, |
263 | int error) | 264 | int error) |
264 | { | 265 | { |
265 | printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", | 266 | printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", |
266 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); | 267 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); |
267 | } | 268 | } |
268 | 269 | ||
269 | /*------------------------- Resume routines -------------------------*/ | 270 | /*------------------------- Resume routines -------------------------*/ |
270 | 271 | ||
271 | /** | 272 | /** |
272 | * resume_device_noirq - Power on one device (early resume). | 273 | * resume_device_noirq - Power on one device (early resume). |
273 | * @dev: Device. | 274 | * @dev: Device. |
274 | * @state: PM transition of the system being carried out. | 275 | * @state: PM transition of the system being carried out. |
275 | * | 276 | * |
276 | * Must be called with interrupts disabled. | 277 | * Must be called with interrupts disabled. |
277 | */ | 278 | */ |
278 | static int resume_device_noirq(struct device *dev, pm_message_t state) | 279 | static int resume_device_noirq(struct device *dev, pm_message_t state) |
279 | { | 280 | { |
280 | int error = 0; | 281 | int error = 0; |
281 | 282 | ||
282 | TRACE_DEVICE(dev); | 283 | TRACE_DEVICE(dev); |
283 | TRACE_RESUME(0); | 284 | TRACE_RESUME(0); |
284 | 285 | ||
285 | if (!dev->bus) | 286 | if (!dev->bus) |
286 | goto End; | 287 | goto End; |
287 | 288 | ||
288 | if (dev->bus->pm) { | 289 | if (dev->bus->pm) { |
289 | pm_dev_dbg(dev, state, "EARLY "); | 290 | pm_dev_dbg(dev, state, "EARLY "); |
290 | error = pm_noirq_op(dev, dev->bus->pm, state); | 291 | error = pm_noirq_op(dev, dev->bus->pm, state); |
291 | } else if (dev->bus->resume_early) { | 292 | } else if (dev->bus->resume_early) { |
292 | pm_dev_dbg(dev, state, "legacy EARLY "); | 293 | pm_dev_dbg(dev, state, "legacy EARLY "); |
293 | error = dev->bus->resume_early(dev); | 294 | error = dev->bus->resume_early(dev); |
294 | } | 295 | } |
295 | End: | 296 | End: |
296 | TRACE_RESUME(error); | 297 | TRACE_RESUME(error); |
297 | return error; | 298 | return error; |
298 | } | 299 | } |
299 | 300 | ||
300 | /** | 301 | /** |
301 | * dpm_power_up - Power on all regular (non-sysdev) devices. | 302 | * dpm_power_up - Power on all regular (non-sysdev) devices. |
302 | * @state: PM transition of the system being carried out. | 303 | * @state: PM transition of the system being carried out. |
303 | * | 304 | * |
304 | * Execute the appropriate "noirq resume" callback for all devices marked | 305 | * Execute the appropriate "noirq resume" callback for all devices marked |
305 | * as DPM_OFF_IRQ. | 306 | * as DPM_OFF_IRQ. |
306 | * | 307 | * |
307 | * Must be called with interrupts disabled and only one CPU running. | 308 | * Must be called with interrupts disabled and only one CPU running. |
308 | */ | 309 | */ |
309 | static void dpm_power_up(pm_message_t state) | 310 | static void dpm_power_up(pm_message_t state) |
310 | { | 311 | { |
311 | struct device *dev; | 312 | struct device *dev; |
312 | 313 | ||
313 | list_for_each_entry(dev, &dpm_list, power.entry) | 314 | list_for_each_entry(dev, &dpm_list, power.entry) |
314 | if (dev->power.status > DPM_OFF) { | 315 | if (dev->power.status > DPM_OFF) { |
315 | int error; | 316 | int error; |
316 | 317 | ||
317 | dev->power.status = DPM_OFF; | 318 | dev->power.status = DPM_OFF; |
318 | error = resume_device_noirq(dev, state); | 319 | error = resume_device_noirq(dev, state); |
319 | if (error) | 320 | if (error) |
320 | pm_dev_err(dev, state, " early", error); | 321 | pm_dev_err(dev, state, " early", error); |
321 | } | 322 | } |
322 | } | 323 | } |
323 | 324 | ||
324 | /** | 325 | /** |
325 | * device_power_up - Turn on all devices that need special attention. | 326 | * device_power_up - Turn on all devices that need special attention. |
326 | * @state: PM transition of the system being carried out. | 327 | * @state: PM transition of the system being carried out. |
327 | * | 328 | * |
328 | * Power on system devices, then devices that required we shut them down | 329 | * Power on system devices, then devices that required we shut them down |
329 | * with interrupts disabled. | 330 | * with interrupts disabled. |
330 | * | 331 | * |
331 | * Must be called with interrupts disabled. | 332 | * Must be called with interrupts disabled. |
332 | */ | 333 | */ |
333 | void device_power_up(pm_message_t state) | 334 | void device_power_up(pm_message_t state) |
334 | { | 335 | { |
335 | sysdev_resume(); | 336 | sysdev_resume(); |
336 | dpm_power_up(state); | 337 | dpm_power_up(state); |
337 | } | 338 | } |
338 | EXPORT_SYMBOL_GPL(device_power_up); | 339 | EXPORT_SYMBOL_GPL(device_power_up); |
339 | 340 | ||
340 | /** | 341 | /** |
341 | * resume_device - Restore state for one device. | 342 | * resume_device - Restore state for one device. |
342 | * @dev: Device. | 343 | * @dev: Device. |
343 | * @state: PM transition of the system being carried out. | 344 | * @state: PM transition of the system being carried out. |
344 | */ | 345 | */ |
345 | static int resume_device(struct device *dev, pm_message_t state) | 346 | static int resume_device(struct device *dev, pm_message_t state) |
346 | { | 347 | { |
347 | int error = 0; | 348 | int error = 0; |
348 | 349 | ||
349 | TRACE_DEVICE(dev); | 350 | TRACE_DEVICE(dev); |
350 | TRACE_RESUME(0); | 351 | TRACE_RESUME(0); |
351 | 352 | ||
352 | down(&dev->sem); | 353 | down(&dev->sem); |
353 | 354 | ||
354 | if (dev->bus) { | 355 | if (dev->bus) { |
355 | if (dev->bus->pm) { | 356 | if (dev->bus->pm) { |
356 | pm_dev_dbg(dev, state, ""); | 357 | pm_dev_dbg(dev, state, ""); |
357 | error = pm_op(dev, &dev->bus->pm->base, state); | 358 | error = pm_op(dev, dev->bus->pm, state); |
358 | } else if (dev->bus->resume) { | 359 | } else if (dev->bus->resume) { |
359 | pm_dev_dbg(dev, state, "legacy "); | 360 | pm_dev_dbg(dev, state, "legacy "); |
360 | error = dev->bus->resume(dev); | 361 | error = dev->bus->resume(dev); |
361 | } | 362 | } |
362 | if (error) | 363 | if (error) |
363 | goto End; | 364 | goto End; |
364 | } | 365 | } |
365 | 366 | ||
366 | if (dev->type) { | 367 | if (dev->type) { |
367 | if (dev->type->pm) { | 368 | if (dev->type->pm) { |
368 | pm_dev_dbg(dev, state, "type "); | 369 | pm_dev_dbg(dev, state, "type "); |
369 | error = pm_op(dev, dev->type->pm, state); | 370 | error = pm_op(dev, dev->type->pm, state); |
370 | } else if (dev->type->resume) { | 371 | } else if (dev->type->resume) { |
371 | pm_dev_dbg(dev, state, "legacy type "); | 372 | pm_dev_dbg(dev, state, "legacy type "); |
372 | error = dev->type->resume(dev); | 373 | error = dev->type->resume(dev); |
373 | } | 374 | } |
374 | if (error) | 375 | if (error) |
375 | goto End; | 376 | goto End; |
376 | } | 377 | } |
377 | 378 | ||
378 | if (dev->class) { | 379 | if (dev->class) { |
379 | if (dev->class->pm) { | 380 | if (dev->class->pm) { |
380 | pm_dev_dbg(dev, state, "class "); | 381 | pm_dev_dbg(dev, state, "class "); |
381 | error = pm_op(dev, dev->class->pm, state); | 382 | error = pm_op(dev, dev->class->pm, state); |
382 | } else if (dev->class->resume) { | 383 | } else if (dev->class->resume) { |
383 | pm_dev_dbg(dev, state, "legacy class "); | 384 | pm_dev_dbg(dev, state, "legacy class "); |
384 | error = dev->class->resume(dev); | 385 | error = dev->class->resume(dev); |
385 | } | 386 | } |
386 | } | 387 | } |
387 | End: | 388 | End: |
388 | up(&dev->sem); | 389 | up(&dev->sem); |
389 | 390 | ||
390 | TRACE_RESUME(error); | 391 | TRACE_RESUME(error); |
391 | return error; | 392 | return error; |
392 | } | 393 | } |
393 | 394 | ||
394 | /** | 395 | /** |
395 | * dpm_resume - Resume every device. | 396 | * dpm_resume - Resume every device. |
396 | * @state: PM transition of the system being carried out. | 397 | * @state: PM transition of the system being carried out. |
397 | * | 398 | * |
398 | * Execute the appropriate "resume" callback for all devices the status of | 399 | * Execute the appropriate "resume" callback for all devices the status of |
399 | * which indicates that they are inactive. | 400 | * which indicates that they are inactive. |
400 | */ | 401 | */ |
401 | static void dpm_resume(pm_message_t state) | 402 | static void dpm_resume(pm_message_t state) |
402 | { | 403 | { |
403 | struct list_head list; | 404 | struct list_head list; |
404 | 405 | ||
405 | INIT_LIST_HEAD(&list); | 406 | INIT_LIST_HEAD(&list); |
406 | mutex_lock(&dpm_list_mtx); | 407 | mutex_lock(&dpm_list_mtx); |
407 | transition_started = false; | 408 | transition_started = false; |
408 | while (!list_empty(&dpm_list)) { | 409 | while (!list_empty(&dpm_list)) { |
409 | struct device *dev = to_device(dpm_list.next); | 410 | struct device *dev = to_device(dpm_list.next); |
410 | 411 | ||
411 | get_device(dev); | 412 | get_device(dev); |
412 | if (dev->power.status >= DPM_OFF) { | 413 | if (dev->power.status >= DPM_OFF) { |
413 | int error; | 414 | int error; |
414 | 415 | ||
415 | dev->power.status = DPM_RESUMING; | 416 | dev->power.status = DPM_RESUMING; |
416 | mutex_unlock(&dpm_list_mtx); | 417 | mutex_unlock(&dpm_list_mtx); |
417 | 418 | ||
418 | error = resume_device(dev, state); | 419 | error = resume_device(dev, state); |
419 | 420 | ||
420 | mutex_lock(&dpm_list_mtx); | 421 | mutex_lock(&dpm_list_mtx); |
421 | if (error) | 422 | if (error) |
422 | pm_dev_err(dev, state, "", error); | 423 | pm_dev_err(dev, state, "", error); |
423 | } else if (dev->power.status == DPM_SUSPENDING) { | 424 | } else if (dev->power.status == DPM_SUSPENDING) { |
424 | /* Allow new children of the device to be registered */ | 425 | /* Allow new children of the device to be registered */ |
425 | dev->power.status = DPM_RESUMING; | 426 | dev->power.status = DPM_RESUMING; |
426 | } | 427 | } |
427 | if (!list_empty(&dev->power.entry)) | 428 | if (!list_empty(&dev->power.entry)) |
428 | list_move_tail(&dev->power.entry, &list); | 429 | list_move_tail(&dev->power.entry, &list); |
429 | put_device(dev); | 430 | put_device(dev); |
430 | } | 431 | } |
431 | list_splice(&list, &dpm_list); | 432 | list_splice(&list, &dpm_list); |
432 | mutex_unlock(&dpm_list_mtx); | 433 | mutex_unlock(&dpm_list_mtx); |
433 | } | 434 | } |
434 | 435 | ||
435 | /** | 436 | /** |
436 | * complete_device - Complete a PM transition for given device | 437 | * complete_device - Complete a PM transition for given device |
437 | * @dev: Device. | 438 | * @dev: Device. |
438 | * @state: PM transition of the system being carried out. | 439 | * @state: PM transition of the system being carried out. |
439 | */ | 440 | */ |
440 | static void complete_device(struct device *dev, pm_message_t state) | 441 | static void complete_device(struct device *dev, pm_message_t state) |
441 | { | 442 | { |
442 | down(&dev->sem); | 443 | down(&dev->sem); |
443 | 444 | ||
444 | if (dev->class && dev->class->pm && dev->class->pm->complete) { | 445 | if (dev->class && dev->class->pm && dev->class->pm->complete) { |
445 | pm_dev_dbg(dev, state, "completing class "); | 446 | pm_dev_dbg(dev, state, "completing class "); |
446 | dev->class->pm->complete(dev); | 447 | dev->class->pm->complete(dev); |
447 | } | 448 | } |
448 | 449 | ||
449 | if (dev->type && dev->type->pm && dev->type->pm->complete) { | 450 | if (dev->type && dev->type->pm && dev->type->pm->complete) { |
450 | pm_dev_dbg(dev, state, "completing type "); | 451 | pm_dev_dbg(dev, state, "completing type "); |
451 | dev->type->pm->complete(dev); | 452 | dev->type->pm->complete(dev); |
452 | } | 453 | } |
453 | 454 | ||
454 | if (dev->bus && dev->bus->pm && dev->bus->pm->base.complete) { | 455 | if (dev->bus && dev->bus->pm && dev->bus->pm->complete) { |
455 | pm_dev_dbg(dev, state, "completing "); | 456 | pm_dev_dbg(dev, state, "completing "); |
456 | dev->bus->pm->base.complete(dev); | 457 | dev->bus->pm->complete(dev); |
457 | } | 458 | } |
458 | 459 | ||
459 | up(&dev->sem); | 460 | up(&dev->sem); |
460 | } | 461 | } |
461 | 462 | ||
462 | /** | 463 | /** |
463 | * dpm_complete - Complete a PM transition for all devices. | 464 | * dpm_complete - Complete a PM transition for all devices. |
464 | * @state: PM transition of the system being carried out. | 465 | * @state: PM transition of the system being carried out. |
465 | * | 466 | * |
466 | * Execute the ->complete() callbacks for all devices that are not marked | 467 | * Execute the ->complete() callbacks for all devices that are not marked |
467 | * as DPM_ON. | 468 | * as DPM_ON. |
468 | */ | 469 | */ |
469 | static void dpm_complete(pm_message_t state) | 470 | static void dpm_complete(pm_message_t state) |
470 | { | 471 | { |
471 | struct list_head list; | 472 | struct list_head list; |
472 | 473 | ||
473 | INIT_LIST_HEAD(&list); | 474 | INIT_LIST_HEAD(&list); |
474 | mutex_lock(&dpm_list_mtx); | 475 | mutex_lock(&dpm_list_mtx); |
475 | while (!list_empty(&dpm_list)) { | 476 | while (!list_empty(&dpm_list)) { |
476 | struct device *dev = to_device(dpm_list.prev); | 477 | struct device *dev = to_device(dpm_list.prev); |
477 | 478 | ||
478 | get_device(dev); | 479 | get_device(dev); |
479 | if (dev->power.status > DPM_ON) { | 480 | if (dev->power.status > DPM_ON) { |
480 | dev->power.status = DPM_ON; | 481 | dev->power.status = DPM_ON; |
481 | mutex_unlock(&dpm_list_mtx); | 482 | mutex_unlock(&dpm_list_mtx); |
482 | 483 | ||
483 | complete_device(dev, state); | 484 | complete_device(dev, state); |
484 | 485 | ||
485 | mutex_lock(&dpm_list_mtx); | 486 | mutex_lock(&dpm_list_mtx); |
486 | } | 487 | } |
487 | if (!list_empty(&dev->power.entry)) | 488 | if (!list_empty(&dev->power.entry)) |
488 | list_move(&dev->power.entry, &list); | 489 | list_move(&dev->power.entry, &list); |
489 | put_device(dev); | 490 | put_device(dev); |
490 | } | 491 | } |
491 | list_splice(&list, &dpm_list); | 492 | list_splice(&list, &dpm_list); |
492 | mutex_unlock(&dpm_list_mtx); | 493 | mutex_unlock(&dpm_list_mtx); |
493 | } | 494 | } |
494 | 495 | ||
495 | /** | 496 | /** |
496 | * device_resume - Restore state of each device in system. | 497 | * device_resume - Restore state of each device in system. |
497 | * @state: PM transition of the system being carried out. | 498 | * @state: PM transition of the system being carried out. |
498 | * | 499 | * |
499 | * Resume all the devices, unlock them all, and allow new | 500 | * Resume all the devices, unlock them all, and allow new |
500 | * devices to be registered once again. | 501 | * devices to be registered once again. |
501 | */ | 502 | */ |
502 | void device_resume(pm_message_t state) | 503 | void device_resume(pm_message_t state) |
503 | { | 504 | { |
504 | might_sleep(); | 505 | might_sleep(); |
505 | dpm_resume(state); | 506 | dpm_resume(state); |
506 | dpm_complete(state); | 507 | dpm_complete(state); |
507 | } | 508 | } |
508 | EXPORT_SYMBOL_GPL(device_resume); | 509 | EXPORT_SYMBOL_GPL(device_resume); |
509 | 510 | ||
510 | 511 | ||
511 | /*------------------------- Suspend routines -------------------------*/ | 512 | /*------------------------- Suspend routines -------------------------*/ |
512 | 513 | ||
513 | /** | 514 | /** |
514 | * resume_event - return a PM message representing the resume event | 515 | * resume_event - return a PM message representing the resume event |
515 | * corresponding to given sleep state. | 516 | * corresponding to given sleep state. |
516 | * @sleep_state: PM message representing a sleep state. | 517 | * @sleep_state: PM message representing a sleep state. |
517 | */ | 518 | */ |
518 | static pm_message_t resume_event(pm_message_t sleep_state) | 519 | static pm_message_t resume_event(pm_message_t sleep_state) |
519 | { | 520 | { |
520 | switch (sleep_state.event) { | 521 | switch (sleep_state.event) { |
521 | case PM_EVENT_SUSPEND: | 522 | case PM_EVENT_SUSPEND: |
522 | return PMSG_RESUME; | 523 | return PMSG_RESUME; |
523 | case PM_EVENT_FREEZE: | 524 | case PM_EVENT_FREEZE: |
524 | case PM_EVENT_QUIESCE: | 525 | case PM_EVENT_QUIESCE: |
525 | return PMSG_RECOVER; | 526 | return PMSG_RECOVER; |
526 | case PM_EVENT_HIBERNATE: | 527 | case PM_EVENT_HIBERNATE: |
527 | return PMSG_RESTORE; | 528 | return PMSG_RESTORE; |
528 | } | 529 | } |
529 | return PMSG_ON; | 530 | return PMSG_ON; |
530 | } | 531 | } |
531 | 532 | ||
532 | /** | 533 | /** |
533 | * suspend_device_noirq - Shut down one device (late suspend). | 534 | * suspend_device_noirq - Shut down one device (late suspend). |
534 | * @dev: Device. | 535 | * @dev: Device. |
535 | * @state: PM transition of the system being carried out. | 536 | * @state: PM transition of the system being carried out. |
536 | * | 537 | * |
537 | * This is called with interrupts off and only a single CPU running. | 538 | * This is called with interrupts off and only a single CPU running. |
538 | */ | 539 | */ |
539 | static int suspend_device_noirq(struct device *dev, pm_message_t state) | 540 | static int suspend_device_noirq(struct device *dev, pm_message_t state) |
540 | { | 541 | { |
541 | int error = 0; | 542 | int error = 0; |
542 | 543 | ||
543 | if (!dev->bus) | 544 | if (!dev->bus) |
544 | return 0; | 545 | return 0; |
545 | 546 | ||
546 | if (dev->bus->pm) { | 547 | if (dev->bus->pm) { |
547 | pm_dev_dbg(dev, state, "LATE "); | 548 | pm_dev_dbg(dev, state, "LATE "); |
548 | error = pm_noirq_op(dev, dev->bus->pm, state); | 549 | error = pm_noirq_op(dev, dev->bus->pm, state); |
549 | } else if (dev->bus->suspend_late) { | 550 | } else if (dev->bus->suspend_late) { |
550 | pm_dev_dbg(dev, state, "legacy LATE "); | 551 | pm_dev_dbg(dev, state, "legacy LATE "); |
551 | error = dev->bus->suspend_late(dev, state); | 552 | error = dev->bus->suspend_late(dev, state); |
552 | suspend_report_result(dev->bus->suspend_late, error); | 553 | suspend_report_result(dev->bus->suspend_late, error); |
553 | } | 554 | } |
554 | return error; | 555 | return error; |
555 | } | 556 | } |
556 | 557 | ||
557 | /** | 558 | /** |
558 | * device_power_down - Shut down special devices. | 559 | * device_power_down - Shut down special devices. |
559 | * @state: PM transition of the system being carried out. | 560 | * @state: PM transition of the system being carried out. |
560 | * | 561 | * |
561 | * Power down devices that require interrupts to be disabled. | 562 | * Power down devices that require interrupts to be disabled. |
562 | * Then power down system devices. | 563 | * Then power down system devices. |
563 | * | 564 | * |
564 | * Must be called with interrupts disabled and only one CPU running. | 565 | * Must be called with interrupts disabled and only one CPU running. |
565 | */ | 566 | */ |
566 | int device_power_down(pm_message_t state) | 567 | int device_power_down(pm_message_t state) |
567 | { | 568 | { |
568 | struct device *dev; | 569 | struct device *dev; |
569 | int error = 0; | 570 | int error = 0; |
570 | 571 | ||
571 | list_for_each_entry_reverse(dev, &dpm_list, power.entry) { | 572 | list_for_each_entry_reverse(dev, &dpm_list, power.entry) { |
572 | error = suspend_device_noirq(dev, state); | 573 | error = suspend_device_noirq(dev, state); |
573 | if (error) { | 574 | if (error) { |
574 | pm_dev_err(dev, state, " late", error); | 575 | pm_dev_err(dev, state, " late", error); |
575 | break; | 576 | break; |
576 | } | 577 | } |
577 | dev->power.status = DPM_OFF_IRQ; | 578 | dev->power.status = DPM_OFF_IRQ; |
578 | } | 579 | } |
579 | if (!error) | 580 | if (!error) |
580 | error = sysdev_suspend(state); | 581 | error = sysdev_suspend(state); |
581 | if (error) | 582 | if (error) |
582 | dpm_power_up(resume_event(state)); | 583 | dpm_power_up(resume_event(state)); |
583 | return error; | 584 | return error; |
584 | } | 585 | } |
585 | EXPORT_SYMBOL_GPL(device_power_down); | 586 | EXPORT_SYMBOL_GPL(device_power_down); |
586 | 587 | ||
587 | /** | 588 | /** |
588 | * suspend_device - Save state of one device. | 589 | * suspend_device - Save state of one device. |
589 | * @dev: Device. | 590 | * @dev: Device. |
590 | * @state: PM transition of the system being carried out. | 591 | * @state: PM transition of the system being carried out. |
591 | */ | 592 | */ |
592 | static int suspend_device(struct device *dev, pm_message_t state) | 593 | static int suspend_device(struct device *dev, pm_message_t state) |
593 | { | 594 | { |
594 | int error = 0; | 595 | int error = 0; |
595 | 596 | ||
596 | down(&dev->sem); | 597 | down(&dev->sem); |
597 | 598 | ||
598 | if (dev->class) { | 599 | if (dev->class) { |
599 | if (dev->class->pm) { | 600 | if (dev->class->pm) { |
600 | pm_dev_dbg(dev, state, "class "); | 601 | pm_dev_dbg(dev, state, "class "); |
601 | error = pm_op(dev, dev->class->pm, state); | 602 | error = pm_op(dev, dev->class->pm, state); |
602 | } else if (dev->class->suspend) { | 603 | } else if (dev->class->suspend) { |
603 | pm_dev_dbg(dev, state, "legacy class "); | 604 | pm_dev_dbg(dev, state, "legacy class "); |
604 | error = dev->class->suspend(dev, state); | 605 | error = dev->class->suspend(dev, state); |
605 | suspend_report_result(dev->class->suspend, error); | 606 | suspend_report_result(dev->class->suspend, error); |
606 | } | 607 | } |
607 | if (error) | 608 | if (error) |
608 | goto End; | 609 | goto End; |
609 | } | 610 | } |
610 | 611 | ||
611 | if (dev->type) { | 612 | if (dev->type) { |
612 | if (dev->type->pm) { | 613 | if (dev->type->pm) { |
613 | pm_dev_dbg(dev, state, "type "); | 614 | pm_dev_dbg(dev, state, "type "); |
614 | error = pm_op(dev, dev->type->pm, state); | 615 | error = pm_op(dev, dev->type->pm, state); |
615 | } else if (dev->type->suspend) { | 616 | } else if (dev->type->suspend) { |
616 | pm_dev_dbg(dev, state, "legacy type "); | 617 | pm_dev_dbg(dev, state, "legacy type "); |
617 | error = dev->type->suspend(dev, state); | 618 | error = dev->type->suspend(dev, state); |
618 | suspend_report_result(dev->type->suspend, error); | 619 | suspend_report_result(dev->type->suspend, error); |
619 | } | 620 | } |
620 | if (error) | 621 | if (error) |
621 | goto End; | 622 | goto End; |
622 | } | 623 | } |
623 | 624 | ||
624 | if (dev->bus) { | 625 | if (dev->bus) { |
625 | if (dev->bus->pm) { | 626 | if (dev->bus->pm) { |
626 | pm_dev_dbg(dev, state, ""); | 627 | pm_dev_dbg(dev, state, ""); |
627 | error = pm_op(dev, &dev->bus->pm->base, state); | 628 | error = pm_op(dev, dev->bus->pm, state); |
628 | } else if (dev->bus->suspend) { | 629 | } else if (dev->bus->suspend) { |
629 | pm_dev_dbg(dev, state, "legacy "); | 630 | pm_dev_dbg(dev, state, "legacy "); |
630 | error = dev->bus->suspend(dev, state); | 631 | error = dev->bus->suspend(dev, state); |
631 | suspend_report_result(dev->bus->suspend, error); | 632 | suspend_report_result(dev->bus->suspend, error); |
632 | } | 633 | } |
633 | } | 634 | } |
634 | End: | 635 | End: |
635 | up(&dev->sem); | 636 | up(&dev->sem); |
636 | 637 | ||
637 | return error; | 638 | return error; |
638 | } | 639 | } |
639 | 640 | ||
640 | /** | 641 | /** |
641 | * dpm_suspend - Suspend every device. | 642 | * dpm_suspend - Suspend every device. |
642 | * @state: PM transition of the system being carried out. | 643 | * @state: PM transition of the system being carried out. |
643 | * | 644 | * |
644 | * Execute the appropriate "suspend" callbacks for all devices. | 645 | * Execute the appropriate "suspend" callbacks for all devices. |
645 | */ | 646 | */ |
646 | static int dpm_suspend(pm_message_t state) | 647 | static int dpm_suspend(pm_message_t state) |
647 | { | 648 | { |
648 | struct list_head list; | 649 | struct list_head list; |
649 | int error = 0; | 650 | int error = 0; |
650 | 651 | ||
651 | INIT_LIST_HEAD(&list); | 652 | INIT_LIST_HEAD(&list); |
652 | mutex_lock(&dpm_list_mtx); | 653 | mutex_lock(&dpm_list_mtx); |
653 | while (!list_empty(&dpm_list)) { | 654 | while (!list_empty(&dpm_list)) { |
654 | struct device *dev = to_device(dpm_list.prev); | 655 | struct device *dev = to_device(dpm_list.prev); |
655 | 656 | ||
656 | get_device(dev); | 657 | get_device(dev); |
657 | mutex_unlock(&dpm_list_mtx); | 658 | mutex_unlock(&dpm_list_mtx); |
658 | 659 | ||
659 | error = suspend_device(dev, state); | 660 | error = suspend_device(dev, state); |
660 | 661 | ||
661 | mutex_lock(&dpm_list_mtx); | 662 | mutex_lock(&dpm_list_mtx); |
662 | if (error) { | 663 | if (error) { |
663 | pm_dev_err(dev, state, "", error); | 664 | pm_dev_err(dev, state, "", error); |
664 | put_device(dev); | 665 | put_device(dev); |
665 | break; | 666 | break; |
666 | } | 667 | } |
667 | dev->power.status = DPM_OFF; | 668 | dev->power.status = DPM_OFF; |
668 | if (!list_empty(&dev->power.entry)) | 669 | if (!list_empty(&dev->power.entry)) |
669 | list_move(&dev->power.entry, &list); | 670 | list_move(&dev->power.entry, &list); |
670 | put_device(dev); | 671 | put_device(dev); |
671 | } | 672 | } |
672 | list_splice(&list, dpm_list.prev); | 673 | list_splice(&list, dpm_list.prev); |
673 | mutex_unlock(&dpm_list_mtx); | 674 | mutex_unlock(&dpm_list_mtx); |
674 | return error; | 675 | return error; |
675 | } | 676 | } |
676 | 677 | ||
677 | /** | 678 | /** |
678 | * prepare_device - Execute the ->prepare() callback(s) for given device. | 679 | * prepare_device - Execute the ->prepare() callback(s) for given device. |
679 | * @dev: Device. | 680 | * @dev: Device. |
680 | * @state: PM transition of the system being carried out. | 681 | * @state: PM transition of the system being carried out. |
681 | */ | 682 | */ |
682 | static int prepare_device(struct device *dev, pm_message_t state) | 683 | static int prepare_device(struct device *dev, pm_message_t state) |
683 | { | 684 | { |
684 | int error = 0; | 685 | int error = 0; |
685 | 686 | ||
686 | down(&dev->sem); | 687 | down(&dev->sem); |
687 | 688 | ||
688 | if (dev->bus && dev->bus->pm && dev->bus->pm->base.prepare) { | 689 | if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { |
689 | pm_dev_dbg(dev, state, "preparing "); | 690 | pm_dev_dbg(dev, state, "preparing "); |
690 | error = dev->bus->pm->base.prepare(dev); | 691 | error = dev->bus->pm->prepare(dev); |
691 | suspend_report_result(dev->bus->pm->base.prepare, error); | 692 | suspend_report_result(dev->bus->pm->prepare, error); |
692 | if (error) | 693 | if (error) |
693 | goto End; | 694 | goto End; |
694 | } | 695 | } |
695 | 696 | ||
696 | if (dev->type && dev->type->pm && dev->type->pm->prepare) { | 697 | if (dev->type && dev->type->pm && dev->type->pm->prepare) { |
697 | pm_dev_dbg(dev, state, "preparing type "); | 698 | pm_dev_dbg(dev, state, "preparing type "); |
698 | error = dev->type->pm->prepare(dev); | 699 | error = dev->type->pm->prepare(dev); |
699 | suspend_report_result(dev->type->pm->prepare, error); | 700 | suspend_report_result(dev->type->pm->prepare, error); |
700 | if (error) | 701 | if (error) |
701 | goto End; | 702 | goto End; |
702 | } | 703 | } |
703 | 704 | ||
704 | if (dev->class && dev->class->pm && dev->class->pm->prepare) { | 705 | if (dev->class && dev->class->pm && dev->class->pm->prepare) { |
705 | pm_dev_dbg(dev, state, "preparing class "); | 706 | pm_dev_dbg(dev, state, "preparing class "); |
706 | error = dev->class->pm->prepare(dev); | 707 | error = dev->class->pm->prepare(dev); |
707 | suspend_report_result(dev->class->pm->prepare, error); | 708 | suspend_report_result(dev->class->pm->prepare, error); |
708 | } | 709 | } |
709 | End: | 710 | End: |
710 | up(&dev->sem); | 711 | up(&dev->sem); |
711 | 712 | ||
712 | return error; | 713 | return error; |
713 | } | 714 | } |
714 | 715 | ||
715 | /** | 716 | /** |
716 | * dpm_prepare - Prepare all devices for a PM transition. | 717 | * dpm_prepare - Prepare all devices for a PM transition. |
717 | * @state: PM transition of the system being carried out. | 718 | * @state: PM transition of the system being carried out. |
718 | * | 719 | * |
719 | * Execute the ->prepare() callback for all devices. | 720 | * Execute the ->prepare() callback for all devices. |
720 | */ | 721 | */ |
721 | static int dpm_prepare(pm_message_t state) | 722 | static int dpm_prepare(pm_message_t state) |
722 | { | 723 | { |
723 | struct list_head list; | 724 | struct list_head list; |
724 | int error = 0; | 725 | int error = 0; |
725 | 726 | ||
726 | INIT_LIST_HEAD(&list); | 727 | INIT_LIST_HEAD(&list); |
727 | mutex_lock(&dpm_list_mtx); | 728 | mutex_lock(&dpm_list_mtx); |
728 | transition_started = true; | 729 | transition_started = true; |
729 | while (!list_empty(&dpm_list)) { | 730 | while (!list_empty(&dpm_list)) { |
730 | struct device *dev = to_device(dpm_list.next); | 731 | struct device *dev = to_device(dpm_list.next); |
731 | 732 | ||
732 | get_device(dev); | 733 | get_device(dev); |
733 | dev->power.status = DPM_PREPARING; | 734 | dev->power.status = DPM_PREPARING; |
734 | mutex_unlock(&dpm_list_mtx); | 735 | mutex_unlock(&dpm_list_mtx); |
735 | 736 | ||
736 | error = prepare_device(dev, state); | 737 | error = prepare_device(dev, state); |
737 | 738 | ||
738 | mutex_lock(&dpm_list_mtx); | 739 | mutex_lock(&dpm_list_mtx); |
739 | if (error) { | 740 | if (error) { |
740 | dev->power.status = DPM_ON; | 741 | dev->power.status = DPM_ON; |
741 | if (error == -EAGAIN) { | 742 | if (error == -EAGAIN) { |
742 | put_device(dev); | 743 | put_device(dev); |
743 | continue; | 744 | continue; |
744 | } | 745 | } |
745 | printk(KERN_ERR "PM: Failed to prepare device %s " | 746 | printk(KERN_ERR "PM: Failed to prepare device %s " |
746 | "for power transition: error %d\n", | 747 | "for power transition: error %d\n", |
747 | kobject_name(&dev->kobj), error); | 748 | kobject_name(&dev->kobj), error); |
748 | put_device(dev); | 749 | put_device(dev); |
749 | break; | 750 | break; |
750 | } | 751 | } |
751 | dev->power.status = DPM_SUSPENDING; | 752 | dev->power.status = DPM_SUSPENDING; |
752 | if (!list_empty(&dev->power.entry)) | 753 | if (!list_empty(&dev->power.entry)) |
753 | list_move_tail(&dev->power.entry, &list); | 754 | list_move_tail(&dev->power.entry, &list); |
754 | put_device(dev); | 755 | put_device(dev); |
755 | } | 756 | } |
756 | list_splice(&list, &dpm_list); | 757 | list_splice(&list, &dpm_list); |
757 | mutex_unlock(&dpm_list_mtx); | 758 | mutex_unlock(&dpm_list_mtx); |
758 | return error; | 759 | return error; |
759 | } | 760 | } |
760 | 761 | ||
761 | /** | 762 | /** |
762 | * device_suspend - Save state and stop all devices in system. | 763 | * device_suspend - Save state and stop all devices in system. |
763 | * @state: PM transition of the system being carried out. | 764 | * @state: PM transition of the system being carried out. |
764 | * | 765 | * |
765 | * Prepare and suspend all devices. | 766 | * Prepare and suspend all devices. |
766 | */ | 767 | */ |
767 | int device_suspend(pm_message_t state) | 768 | int device_suspend(pm_message_t state) |
768 | { | 769 | { |
769 | int error; | 770 | int error; |
770 | 771 | ||
771 | might_sleep(); | 772 | might_sleep(); |
772 | error = dpm_prepare(state); | 773 | error = dpm_prepare(state); |
773 | if (!error) | 774 | if (!error) |
774 | error = dpm_suspend(state); | 775 | error = dpm_suspend(state); |
775 | return error; | 776 | return error; |
776 | } | 777 | } |
777 | EXPORT_SYMBOL_GPL(device_suspend); | 778 | EXPORT_SYMBOL_GPL(device_suspend); |
778 | 779 | ||
779 | void __suspend_report_result(const char *function, void *fn, int ret) | 780 | void __suspend_report_result(const char *function, void *fn, int ret) |
780 | { | 781 | { |
781 | if (ret) | 782 | if (ret) |
782 | printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); | 783 | printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); |
783 | } | 784 | } |
784 | EXPORT_SYMBOL_GPL(__suspend_report_result); | 785 | EXPORT_SYMBOL_GPL(__suspend_report_result); |
785 | 786 |
drivers/pci/pci-driver.c
1 | /* | 1 | /* |
2 | * drivers/pci/pci-driver.c | 2 | * drivers/pci/pci-driver.c |
3 | * | 3 | * |
4 | * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com> | 4 | * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com> |
5 | * (C) Copyright 2007 Novell Inc. | 5 | * (C) Copyright 2007 Novell Inc. |
6 | * | 6 | * |
7 | * Released under the GPL v2 only. | 7 | * Released under the GPL v2 only. |
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | #include <linux/mempolicy.h> | 15 | #include <linux/mempolicy.h> |
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include "pci.h" | 19 | #include "pci.h" |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * Dynamic device IDs are disabled for !CONFIG_HOTPLUG | 22 | * Dynamic device IDs are disabled for !CONFIG_HOTPLUG |
23 | */ | 23 | */ |
24 | 24 | ||
25 | struct pci_dynid { | 25 | struct pci_dynid { |
26 | struct list_head node; | 26 | struct list_head node; |
27 | struct pci_device_id id; | 27 | struct pci_device_id id; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | #ifdef CONFIG_HOTPLUG | 30 | #ifdef CONFIG_HOTPLUG |
31 | 31 | ||
32 | /** | 32 | /** |
33 | * store_new_id - add a new PCI device ID to this driver and re-probe devices | 33 | * store_new_id - add a new PCI device ID to this driver and re-probe devices |
34 | * @driver: target device driver | 34 | * @driver: target device driver |
35 | * @buf: buffer for scanning device ID data | 35 | * @buf: buffer for scanning device ID data |
36 | * @count: input size | 36 | * @count: input size |
37 | * | 37 | * |
38 | * Adds a new dynamic pci device ID to this driver, | 38 | * Adds a new dynamic pci device ID to this driver, |
39 | * and causes the driver to probe for all devices again. | 39 | * and causes the driver to probe for all devices again. |
40 | */ | 40 | */ |
41 | static ssize_t | 41 | static ssize_t |
42 | store_new_id(struct device_driver *driver, const char *buf, size_t count) | 42 | store_new_id(struct device_driver *driver, const char *buf, size_t count) |
43 | { | 43 | { |
44 | struct pci_dynid *dynid; | 44 | struct pci_dynid *dynid; |
45 | struct pci_driver *pdrv = to_pci_driver(driver); | 45 | struct pci_driver *pdrv = to_pci_driver(driver); |
46 | const struct pci_device_id *ids = pdrv->id_table; | 46 | const struct pci_device_id *ids = pdrv->id_table; |
47 | __u32 vendor, device, subvendor=PCI_ANY_ID, | 47 | __u32 vendor, device, subvendor=PCI_ANY_ID, |
48 | subdevice=PCI_ANY_ID, class=0, class_mask=0; | 48 | subdevice=PCI_ANY_ID, class=0, class_mask=0; |
49 | unsigned long driver_data=0; | 49 | unsigned long driver_data=0; |
50 | int fields=0; | 50 | int fields=0; |
51 | int retval; | 51 | int retval; |
52 | 52 | ||
53 | fields = sscanf(buf, "%x %x %x %x %x %x %lx", | 53 | fields = sscanf(buf, "%x %x %x %x %x %x %lx", |
54 | &vendor, &device, &subvendor, &subdevice, | 54 | &vendor, &device, &subvendor, &subdevice, |
55 | &class, &class_mask, &driver_data); | 55 | &class, &class_mask, &driver_data); |
56 | if (fields < 2) | 56 | if (fields < 2) |
57 | return -EINVAL; | 57 | return -EINVAL; |
58 | 58 | ||
59 | /* Only accept driver_data values that match an existing id_table | 59 | /* Only accept driver_data values that match an existing id_table |
60 | entry */ | 60 | entry */ |
61 | retval = -EINVAL; | 61 | retval = -EINVAL; |
62 | while (ids->vendor || ids->subvendor || ids->class_mask) { | 62 | while (ids->vendor || ids->subvendor || ids->class_mask) { |
63 | if (driver_data == ids->driver_data) { | 63 | if (driver_data == ids->driver_data) { |
64 | retval = 0; | 64 | retval = 0; |
65 | break; | 65 | break; |
66 | } | 66 | } |
67 | ids++; | 67 | ids++; |
68 | } | 68 | } |
69 | if (retval) /* No match */ | 69 | if (retval) /* No match */ |
70 | return retval; | 70 | return retval; |
71 | 71 | ||
72 | dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); | 72 | dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); |
73 | if (!dynid) | 73 | if (!dynid) |
74 | return -ENOMEM; | 74 | return -ENOMEM; |
75 | 75 | ||
76 | dynid->id.vendor = vendor; | 76 | dynid->id.vendor = vendor; |
77 | dynid->id.device = device; | 77 | dynid->id.device = device; |
78 | dynid->id.subvendor = subvendor; | 78 | dynid->id.subvendor = subvendor; |
79 | dynid->id.subdevice = subdevice; | 79 | dynid->id.subdevice = subdevice; |
80 | dynid->id.class = class; | 80 | dynid->id.class = class; |
81 | dynid->id.class_mask = class_mask; | 81 | dynid->id.class_mask = class_mask; |
82 | dynid->id.driver_data = driver_data; | 82 | dynid->id.driver_data = driver_data; |
83 | 83 | ||
84 | spin_lock(&pdrv->dynids.lock); | 84 | spin_lock(&pdrv->dynids.lock); |
85 | list_add_tail(&dynid->node, &pdrv->dynids.list); | 85 | list_add_tail(&dynid->node, &pdrv->dynids.list); |
86 | spin_unlock(&pdrv->dynids.lock); | 86 | spin_unlock(&pdrv->dynids.lock); |
87 | 87 | ||
88 | if (get_driver(&pdrv->driver)) { | 88 | if (get_driver(&pdrv->driver)) { |
89 | retval = driver_attach(&pdrv->driver); | 89 | retval = driver_attach(&pdrv->driver); |
90 | put_driver(&pdrv->driver); | 90 | put_driver(&pdrv->driver); |
91 | } | 91 | } |
92 | 92 | ||
93 | if (retval) | 93 | if (retval) |
94 | return retval; | 94 | return retval; |
95 | return count; | 95 | return count; |
96 | } | 96 | } |
97 | static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); | 97 | static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); |
98 | 98 | ||
99 | static void | 99 | static void |
100 | pci_free_dynids(struct pci_driver *drv) | 100 | pci_free_dynids(struct pci_driver *drv) |
101 | { | 101 | { |
102 | struct pci_dynid *dynid, *n; | 102 | struct pci_dynid *dynid, *n; |
103 | 103 | ||
104 | spin_lock(&drv->dynids.lock); | 104 | spin_lock(&drv->dynids.lock); |
105 | list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { | 105 | list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { |
106 | list_del(&dynid->node); | 106 | list_del(&dynid->node); |
107 | kfree(dynid); | 107 | kfree(dynid); |
108 | } | 108 | } |
109 | spin_unlock(&drv->dynids.lock); | 109 | spin_unlock(&drv->dynids.lock); |
110 | } | 110 | } |
111 | 111 | ||
112 | static int | 112 | static int |
113 | pci_create_newid_file(struct pci_driver *drv) | 113 | pci_create_newid_file(struct pci_driver *drv) |
114 | { | 114 | { |
115 | int error = 0; | 115 | int error = 0; |
116 | if (drv->probe != NULL) | 116 | if (drv->probe != NULL) |
117 | error = driver_create_file(&drv->driver, &driver_attr_new_id); | 117 | error = driver_create_file(&drv->driver, &driver_attr_new_id); |
118 | return error; | 118 | return error; |
119 | } | 119 | } |
120 | 120 | ||
121 | static void pci_remove_newid_file(struct pci_driver *drv) | 121 | static void pci_remove_newid_file(struct pci_driver *drv) |
122 | { | 122 | { |
123 | driver_remove_file(&drv->driver, &driver_attr_new_id); | 123 | driver_remove_file(&drv->driver, &driver_attr_new_id); |
124 | } | 124 | } |
125 | #else /* !CONFIG_HOTPLUG */ | 125 | #else /* !CONFIG_HOTPLUG */ |
126 | static inline void pci_free_dynids(struct pci_driver *drv) {} | 126 | static inline void pci_free_dynids(struct pci_driver *drv) {} |
127 | static inline int pci_create_newid_file(struct pci_driver *drv) | 127 | static inline int pci_create_newid_file(struct pci_driver *drv) |
128 | { | 128 | { |
129 | return 0; | 129 | return 0; |
130 | } | 130 | } |
131 | static inline void pci_remove_newid_file(struct pci_driver *drv) {} | 131 | static inline void pci_remove_newid_file(struct pci_driver *drv) {} |
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | /** | 134 | /** |
135 | * pci_match_id - See if a pci device matches a given pci_id table | 135 | * pci_match_id - See if a pci device matches a given pci_id table |
136 | * @ids: array of PCI device id structures to search in | 136 | * @ids: array of PCI device id structures to search in |
137 | * @dev: the PCI device structure to match against. | 137 | * @dev: the PCI device structure to match against. |
138 | * | 138 | * |
139 | * Used by a driver to check whether a PCI device present in the | 139 | * Used by a driver to check whether a PCI device present in the |
140 | * system is in its list of supported devices. Returns the matching | 140 | * system is in its list of supported devices. Returns the matching |
141 | * pci_device_id structure or %NULL if there is no match. | 141 | * pci_device_id structure or %NULL if there is no match. |
142 | * | 142 | * |
143 | * Deprecated, don't use this as it will not catch any dynamic ids | 143 | * Deprecated, don't use this as it will not catch any dynamic ids |
144 | * that a driver might want to check for. | 144 | * that a driver might want to check for. |
145 | */ | 145 | */ |
146 | const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, | 146 | const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, |
147 | struct pci_dev *dev) | 147 | struct pci_dev *dev) |
148 | { | 148 | { |
149 | if (ids) { | 149 | if (ids) { |
150 | while (ids->vendor || ids->subvendor || ids->class_mask) { | 150 | while (ids->vendor || ids->subvendor || ids->class_mask) { |
151 | if (pci_match_one_device(ids, dev)) | 151 | if (pci_match_one_device(ids, dev)) |
152 | return ids; | 152 | return ids; |
153 | ids++; | 153 | ids++; |
154 | } | 154 | } |
155 | } | 155 | } |
156 | return NULL; | 156 | return NULL; |
157 | } | 157 | } |
158 | 158 | ||
159 | /** | 159 | /** |
160 | * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure | 160 | * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure |
161 | * @drv: the PCI driver to match against | 161 | * @drv: the PCI driver to match against |
162 | * @dev: the PCI device structure to match against | 162 | * @dev: the PCI device structure to match against |
163 | * | 163 | * |
164 | * Used by a driver to check whether a PCI device present in the | 164 | * Used by a driver to check whether a PCI device present in the |
165 | * system is in its list of supported devices. Returns the matching | 165 | * system is in its list of supported devices. Returns the matching |
166 | * pci_device_id structure or %NULL if there is no match. | 166 | * pci_device_id structure or %NULL if there is no match. |
167 | */ | 167 | */ |
168 | static const struct pci_device_id *pci_match_device(struct pci_driver *drv, | 168 | static const struct pci_device_id *pci_match_device(struct pci_driver *drv, |
169 | struct pci_dev *dev) | 169 | struct pci_dev *dev) |
170 | { | 170 | { |
171 | struct pci_dynid *dynid; | 171 | struct pci_dynid *dynid; |
172 | 172 | ||
173 | /* Look at the dynamic ids first, before the static ones */ | 173 | /* Look at the dynamic ids first, before the static ones */ |
174 | spin_lock(&drv->dynids.lock); | 174 | spin_lock(&drv->dynids.lock); |
175 | list_for_each_entry(dynid, &drv->dynids.list, node) { | 175 | list_for_each_entry(dynid, &drv->dynids.list, node) { |
176 | if (pci_match_one_device(&dynid->id, dev)) { | 176 | if (pci_match_one_device(&dynid->id, dev)) { |
177 | spin_unlock(&drv->dynids.lock); | 177 | spin_unlock(&drv->dynids.lock); |
178 | return &dynid->id; | 178 | return &dynid->id; |
179 | } | 179 | } |
180 | } | 180 | } |
181 | spin_unlock(&drv->dynids.lock); | 181 | spin_unlock(&drv->dynids.lock); |
182 | 182 | ||
183 | return pci_match_id(drv->id_table, dev); | 183 | return pci_match_id(drv->id_table, dev); |
184 | } | 184 | } |
185 | 185 | ||
186 | static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, | 186 | static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, |
187 | const struct pci_device_id *id) | 187 | const struct pci_device_id *id) |
188 | { | 188 | { |
189 | int error; | 189 | int error; |
190 | #ifdef CONFIG_NUMA | 190 | #ifdef CONFIG_NUMA |
191 | /* Execute driver initialization on node where the | 191 | /* Execute driver initialization on node where the |
192 | device's bus is attached to. This way the driver likely | 192 | device's bus is attached to. This way the driver likely |
193 | allocates its local memory on the right node without | 193 | allocates its local memory on the right node without |
194 | any need to change it. */ | 194 | any need to change it. */ |
195 | struct mempolicy *oldpol; | 195 | struct mempolicy *oldpol; |
196 | cpumask_t oldmask = current->cpus_allowed; | 196 | cpumask_t oldmask = current->cpus_allowed; |
197 | int node = dev_to_node(&dev->dev); | 197 | int node = dev_to_node(&dev->dev); |
198 | 198 | ||
199 | if (node >= 0) { | 199 | if (node >= 0) { |
200 | node_to_cpumask_ptr(nodecpumask, node); | 200 | node_to_cpumask_ptr(nodecpumask, node); |
201 | set_cpus_allowed_ptr(current, nodecpumask); | 201 | set_cpus_allowed_ptr(current, nodecpumask); |
202 | } | 202 | } |
203 | /* And set default memory allocation policy */ | 203 | /* And set default memory allocation policy */ |
204 | oldpol = current->mempolicy; | 204 | oldpol = current->mempolicy; |
205 | current->mempolicy = NULL; /* fall back to system default policy */ | 205 | current->mempolicy = NULL; /* fall back to system default policy */ |
206 | #endif | 206 | #endif |
207 | error = drv->probe(dev, id); | 207 | error = drv->probe(dev, id); |
208 | #ifdef CONFIG_NUMA | 208 | #ifdef CONFIG_NUMA |
209 | set_cpus_allowed_ptr(current, &oldmask); | 209 | set_cpus_allowed_ptr(current, &oldmask); |
210 | current->mempolicy = oldpol; | 210 | current->mempolicy = oldpol; |
211 | #endif | 211 | #endif |
212 | return error; | 212 | return error; |
213 | } | 213 | } |
214 | 214 | ||
215 | /** | 215 | /** |
216 | * __pci_device_probe() | 216 | * __pci_device_probe() |
217 | * @drv: driver to call to check if it wants the PCI device | 217 | * @drv: driver to call to check if it wants the PCI device |
218 | * @pci_dev: PCI device being probed | 218 | * @pci_dev: PCI device being probed |
219 | * | 219 | * |
220 | * returns 0 on success, else error. | 220 | * returns 0 on success, else error. |
221 | * side-effect: pci_dev->driver is set to drv when drv claims pci_dev. | 221 | * side-effect: pci_dev->driver is set to drv when drv claims pci_dev. |
222 | */ | 222 | */ |
223 | static int | 223 | static int |
224 | __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) | 224 | __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) |
225 | { | 225 | { |
226 | const struct pci_device_id *id; | 226 | const struct pci_device_id *id; |
227 | int error = 0; | 227 | int error = 0; |
228 | 228 | ||
229 | if (!pci_dev->driver && drv->probe) { | 229 | if (!pci_dev->driver && drv->probe) { |
230 | error = -ENODEV; | 230 | error = -ENODEV; |
231 | 231 | ||
232 | id = pci_match_device(drv, pci_dev); | 232 | id = pci_match_device(drv, pci_dev); |
233 | if (id) | 233 | if (id) |
234 | error = pci_call_probe(drv, pci_dev, id); | 234 | error = pci_call_probe(drv, pci_dev, id); |
235 | if (error >= 0) { | 235 | if (error >= 0) { |
236 | pci_dev->driver = drv; | 236 | pci_dev->driver = drv; |
237 | error = 0; | 237 | error = 0; |
238 | } | 238 | } |
239 | } | 239 | } |
240 | return error; | 240 | return error; |
241 | } | 241 | } |
242 | 242 | ||
243 | static int pci_device_probe(struct device * dev) | 243 | static int pci_device_probe(struct device * dev) |
244 | { | 244 | { |
245 | int error = 0; | 245 | int error = 0; |
246 | struct pci_driver *drv; | 246 | struct pci_driver *drv; |
247 | struct pci_dev *pci_dev; | 247 | struct pci_dev *pci_dev; |
248 | 248 | ||
249 | drv = to_pci_driver(dev->driver); | 249 | drv = to_pci_driver(dev->driver); |
250 | pci_dev = to_pci_dev(dev); | 250 | pci_dev = to_pci_dev(dev); |
251 | pci_dev_get(pci_dev); | 251 | pci_dev_get(pci_dev); |
252 | error = __pci_device_probe(drv, pci_dev); | 252 | error = __pci_device_probe(drv, pci_dev); |
253 | if (error) | 253 | if (error) |
254 | pci_dev_put(pci_dev); | 254 | pci_dev_put(pci_dev); |
255 | 255 | ||
256 | return error; | 256 | return error; |
257 | } | 257 | } |
258 | 258 | ||
259 | static int pci_device_remove(struct device * dev) | 259 | static int pci_device_remove(struct device * dev) |
260 | { | 260 | { |
261 | struct pci_dev * pci_dev = to_pci_dev(dev); | 261 | struct pci_dev * pci_dev = to_pci_dev(dev); |
262 | struct pci_driver * drv = pci_dev->driver; | 262 | struct pci_driver * drv = pci_dev->driver; |
263 | 263 | ||
264 | if (drv) { | 264 | if (drv) { |
265 | if (drv->remove) | 265 | if (drv->remove) |
266 | drv->remove(pci_dev); | 266 | drv->remove(pci_dev); |
267 | pci_dev->driver = NULL; | 267 | pci_dev->driver = NULL; |
268 | } | 268 | } |
269 | 269 | ||
270 | /* | 270 | /* |
271 | * If the device is still on, set the power state as "unknown", | 271 | * If the device is still on, set the power state as "unknown", |
272 | * since it might change by the next time we load the driver. | 272 | * since it might change by the next time we load the driver. |
273 | */ | 273 | */ |
274 | if (pci_dev->current_state == PCI_D0) | 274 | if (pci_dev->current_state == PCI_D0) |
275 | pci_dev->current_state = PCI_UNKNOWN; | 275 | pci_dev->current_state = PCI_UNKNOWN; |
276 | 276 | ||
277 | /* | 277 | /* |
278 | * We would love to complain here if pci_dev->is_enabled is set, that | 278 | * We would love to complain here if pci_dev->is_enabled is set, that |
279 | * the driver should have called pci_disable_device(), but the | 279 | * the driver should have called pci_disable_device(), but the |
280 | * unfortunate fact is there are too many odd BIOS and bridge setups | 280 | * unfortunate fact is there are too many odd BIOS and bridge setups |
281 | * that don't like drivers doing that all of the time. | 281 | * that don't like drivers doing that all of the time. |
282 | * Oh well, we can dream of sane hardware when we sleep, no matter how | 282 | * Oh well, we can dream of sane hardware when we sleep, no matter how |
283 | * horrible the crap we have to deal with is when we are awake... | 283 | * horrible the crap we have to deal with is when we are awake... |
284 | */ | 284 | */ |
285 | 285 | ||
286 | pci_dev_put(pci_dev); | 286 | pci_dev_put(pci_dev); |
287 | return 0; | 287 | return 0; |
288 | } | 288 | } |
289 | 289 | ||
290 | static void pci_device_shutdown(struct device *dev) | 290 | static void pci_device_shutdown(struct device *dev) |
291 | { | 291 | { |
292 | struct pci_dev *pci_dev = to_pci_dev(dev); | 292 | struct pci_dev *pci_dev = to_pci_dev(dev); |
293 | struct pci_driver *drv = pci_dev->driver; | 293 | struct pci_driver *drv = pci_dev->driver; |
294 | 294 | ||
295 | if (drv && drv->shutdown) | 295 | if (drv && drv->shutdown) |
296 | drv->shutdown(pci_dev); | 296 | drv->shutdown(pci_dev); |
297 | pci_msi_shutdown(pci_dev); | 297 | pci_msi_shutdown(pci_dev); |
298 | pci_msix_shutdown(pci_dev); | 298 | pci_msix_shutdown(pci_dev); |
299 | } | 299 | } |
300 | 300 | ||
301 | #ifdef CONFIG_PM_SLEEP | 301 | #ifdef CONFIG_PM_SLEEP |
302 | 302 | ||
303 | /* | 303 | /* |
304 | * Default "suspend" method for devices that have no driver provided suspend, | 304 | * Default "suspend" method for devices that have no driver provided suspend, |
305 | * or not even a driver at all. | 305 | * or not even a driver at all. |
306 | */ | 306 | */ |
307 | static void pci_default_pm_suspend(struct pci_dev *pci_dev) | 307 | static void pci_default_pm_suspend(struct pci_dev *pci_dev) |
308 | { | 308 | { |
309 | pci_save_state(pci_dev); | 309 | pci_save_state(pci_dev); |
310 | /* | 310 | /* |
311 | * mark its power state as "unknown", since we don't know if | 311 | * mark its power state as "unknown", since we don't know if |
312 | * e.g. the BIOS will change its device state when we suspend. | 312 | * e.g. the BIOS will change its device state when we suspend. |
313 | */ | 313 | */ |
314 | if (pci_dev->current_state == PCI_D0) | 314 | if (pci_dev->current_state == PCI_D0) |
315 | pci_dev->current_state = PCI_UNKNOWN; | 315 | pci_dev->current_state = PCI_UNKNOWN; |
316 | } | 316 | } |
317 | 317 | ||
318 | /* | 318 | /* |
319 | * Default "resume" method for devices that have no driver provided resume, | 319 | * Default "resume" method for devices that have no driver provided resume, |
320 | * or not even a driver at all. | 320 | * or not even a driver at all. |
321 | */ | 321 | */ |
322 | static int pci_default_pm_resume(struct pci_dev *pci_dev) | 322 | static int pci_default_pm_resume(struct pci_dev *pci_dev) |
323 | { | 323 | { |
324 | int retval = 0; | 324 | int retval = 0; |
325 | 325 | ||
326 | /* restore the PCI config space */ | 326 | /* restore the PCI config space */ |
327 | pci_restore_state(pci_dev); | 327 | pci_restore_state(pci_dev); |
328 | /* if the device was enabled before suspend, reenable */ | 328 | /* if the device was enabled before suspend, reenable */ |
329 | retval = pci_reenable_device(pci_dev); | 329 | retval = pci_reenable_device(pci_dev); |
330 | /* | 330 | /* |
331 | * if the device was busmaster before the suspend, make it busmaster | 331 | * if the device was busmaster before the suspend, make it busmaster |
332 | * again | 332 | * again |
333 | */ | 333 | */ |
334 | if (pci_dev->is_busmaster) | 334 | if (pci_dev->is_busmaster) |
335 | pci_set_master(pci_dev); | 335 | pci_set_master(pci_dev); |
336 | 336 | ||
337 | return retval; | 337 | return retval; |
338 | } | 338 | } |
339 | 339 | ||
340 | static int pci_legacy_suspend(struct device *dev, pm_message_t state) | 340 | static int pci_legacy_suspend(struct device *dev, pm_message_t state) |
341 | { | 341 | { |
342 | struct pci_dev * pci_dev = to_pci_dev(dev); | 342 | struct pci_dev * pci_dev = to_pci_dev(dev); |
343 | struct pci_driver * drv = pci_dev->driver; | 343 | struct pci_driver * drv = pci_dev->driver; |
344 | int i = 0; | 344 | int i = 0; |
345 | 345 | ||
346 | if (drv && drv->suspend) { | 346 | if (drv && drv->suspend) { |
347 | i = drv->suspend(pci_dev, state); | 347 | i = drv->suspend(pci_dev, state); |
348 | suspend_report_result(drv->suspend, i); | 348 | suspend_report_result(drv->suspend, i); |
349 | } else { | 349 | } else { |
350 | pci_default_pm_suspend(pci_dev); | 350 | pci_default_pm_suspend(pci_dev); |
351 | } | 351 | } |
352 | return i; | 352 | return i; |
353 | } | 353 | } |
354 | 354 | ||
355 | static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) | 355 | static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) |
356 | { | 356 | { |
357 | struct pci_dev * pci_dev = to_pci_dev(dev); | 357 | struct pci_dev * pci_dev = to_pci_dev(dev); |
358 | struct pci_driver * drv = pci_dev->driver; | 358 | struct pci_driver * drv = pci_dev->driver; |
359 | int i = 0; | 359 | int i = 0; |
360 | 360 | ||
361 | if (drv && drv->suspend_late) { | 361 | if (drv && drv->suspend_late) { |
362 | i = drv->suspend_late(pci_dev, state); | 362 | i = drv->suspend_late(pci_dev, state); |
363 | suspend_report_result(drv->suspend_late, i); | 363 | suspend_report_result(drv->suspend_late, i); |
364 | } | 364 | } |
365 | return i; | 365 | return i; |
366 | } | 366 | } |
367 | 367 | ||
368 | static int pci_legacy_resume(struct device *dev) | 368 | static int pci_legacy_resume(struct device *dev) |
369 | { | 369 | { |
370 | int error; | 370 | int error; |
371 | struct pci_dev * pci_dev = to_pci_dev(dev); | 371 | struct pci_dev * pci_dev = to_pci_dev(dev); |
372 | struct pci_driver * drv = pci_dev->driver; | 372 | struct pci_driver * drv = pci_dev->driver; |
373 | 373 | ||
374 | if (drv && drv->resume) | 374 | if (drv && drv->resume) |
375 | error = drv->resume(pci_dev); | 375 | error = drv->resume(pci_dev); |
376 | else | 376 | else |
377 | error = pci_default_pm_resume(pci_dev); | 377 | error = pci_default_pm_resume(pci_dev); |
378 | return error; | 378 | return error; |
379 | } | 379 | } |
380 | 380 | ||
381 | static int pci_legacy_resume_early(struct device *dev) | 381 | static int pci_legacy_resume_early(struct device *dev) |
382 | { | 382 | { |
383 | int error = 0; | 383 | int error = 0; |
384 | struct pci_dev * pci_dev = to_pci_dev(dev); | 384 | struct pci_dev * pci_dev = to_pci_dev(dev); |
385 | struct pci_driver * drv = pci_dev->driver; | 385 | struct pci_driver * drv = pci_dev->driver; |
386 | 386 | ||
387 | if (drv && drv->resume_early) | 387 | if (drv && drv->resume_early) |
388 | error = drv->resume_early(pci_dev); | 388 | error = drv->resume_early(pci_dev); |
389 | return error; | 389 | return error; |
390 | } | 390 | } |
391 | 391 | ||
392 | static int pci_pm_prepare(struct device *dev) | 392 | static int pci_pm_prepare(struct device *dev) |
393 | { | 393 | { |
394 | struct device_driver *drv = dev->driver; | 394 | struct device_driver *drv = dev->driver; |
395 | int error = 0; | 395 | int error = 0; |
396 | 396 | ||
397 | if (drv && drv->pm && drv->pm->prepare) | 397 | if (drv && drv->pm && drv->pm->prepare) |
398 | error = drv->pm->prepare(dev); | 398 | error = drv->pm->prepare(dev); |
399 | 399 | ||
400 | return error; | 400 | return error; |
401 | } | 401 | } |
402 | 402 | ||
403 | static void pci_pm_complete(struct device *dev) | 403 | static void pci_pm_complete(struct device *dev) |
404 | { | 404 | { |
405 | struct device_driver *drv = dev->driver; | 405 | struct device_driver *drv = dev->driver; |
406 | 406 | ||
407 | if (drv && drv->pm && drv->pm->complete) | 407 | if (drv && drv->pm && drv->pm->complete) |
408 | drv->pm->complete(dev); | 408 | drv->pm->complete(dev); |
409 | } | 409 | } |
410 | 410 | ||
411 | #ifdef CONFIG_SUSPEND | 411 | #ifdef CONFIG_SUSPEND |
412 | 412 | ||
413 | static int pci_pm_suspend(struct device *dev) | 413 | static int pci_pm_suspend(struct device *dev) |
414 | { | 414 | { |
415 | struct pci_dev *pci_dev = to_pci_dev(dev); | 415 | struct pci_dev *pci_dev = to_pci_dev(dev); |
416 | struct device_driver *drv = dev->driver; | 416 | struct device_driver *drv = dev->driver; |
417 | int error = 0; | 417 | int error = 0; |
418 | 418 | ||
419 | if (drv && drv->pm) { | 419 | if (drv && drv->pm) { |
420 | if (drv->pm->suspend) { | 420 | if (drv->pm->suspend) { |
421 | error = drv->pm->suspend(dev); | 421 | error = drv->pm->suspend(dev); |
422 | suspend_report_result(drv->pm->suspend, error); | 422 | suspend_report_result(drv->pm->suspend, error); |
423 | } else { | 423 | } else { |
424 | pci_default_pm_suspend(pci_dev); | 424 | pci_default_pm_suspend(pci_dev); |
425 | } | 425 | } |
426 | } else { | 426 | } else { |
427 | error = pci_legacy_suspend(dev, PMSG_SUSPEND); | 427 | error = pci_legacy_suspend(dev, PMSG_SUSPEND); |
428 | } | 428 | } |
429 | pci_fixup_device(pci_fixup_suspend, pci_dev); | 429 | pci_fixup_device(pci_fixup_suspend, pci_dev); |
430 | 430 | ||
431 | return error; | 431 | return error; |
432 | } | 432 | } |
433 | 433 | ||
434 | static int pci_pm_suspend_noirq(struct device *dev) | 434 | static int pci_pm_suspend_noirq(struct device *dev) |
435 | { | 435 | { |
436 | struct pci_dev *pci_dev = to_pci_dev(dev); | 436 | struct device_driver *drv = dev->driver; |
437 | struct pci_driver *drv = pci_dev->driver; | ||
438 | int error = 0; | 437 | int error = 0; |
439 | 438 | ||
440 | if (drv && drv->pm) { | 439 | if (drv && drv->pm) { |
441 | if (drv->pm->suspend_noirq) { | 440 | if (drv->pm->suspend_noirq) { |
442 | error = drv->pm->suspend_noirq(dev); | 441 | error = drv->pm->suspend_noirq(dev); |
443 | suspend_report_result(drv->pm->suspend_noirq, error); | 442 | suspend_report_result(drv->pm->suspend_noirq, error); |
444 | } | 443 | } |
445 | } else { | 444 | } else { |
446 | error = pci_legacy_suspend_late(dev, PMSG_SUSPEND); | 445 | error = pci_legacy_suspend_late(dev, PMSG_SUSPEND); |
447 | } | 446 | } |
448 | 447 | ||
449 | return error; | 448 | return error; |
450 | } | 449 | } |
451 | 450 | ||
452 | static int pci_pm_resume(struct device *dev) | 451 | static int pci_pm_resume(struct device *dev) |
453 | { | 452 | { |
454 | struct pci_dev *pci_dev = to_pci_dev(dev); | 453 | struct pci_dev *pci_dev = to_pci_dev(dev); |
455 | struct device_driver *drv = dev->driver; | 454 | struct device_driver *drv = dev->driver; |
456 | int error; | 455 | int error; |
457 | 456 | ||
458 | pci_fixup_device(pci_fixup_resume, pci_dev); | 457 | pci_fixup_device(pci_fixup_resume, pci_dev); |
459 | 458 | ||
460 | if (drv && drv->pm) { | 459 | if (drv && drv->pm) { |
461 | error = drv->pm->resume ? drv->pm->resume(dev) : | 460 | error = drv->pm->resume ? drv->pm->resume(dev) : |
462 | pci_default_pm_resume(pci_dev); | 461 | pci_default_pm_resume(pci_dev); |
463 | } else { | 462 | } else { |
464 | error = pci_legacy_resume(dev); | 463 | error = pci_legacy_resume(dev); |
465 | } | 464 | } |
466 | 465 | ||
467 | return error; | 466 | return error; |
468 | } | 467 | } |
469 | 468 | ||
470 | static int pci_pm_resume_noirq(struct device *dev) | 469 | static int pci_pm_resume_noirq(struct device *dev) |
471 | { | 470 | { |
472 | struct pci_dev *pci_dev = to_pci_dev(dev); | 471 | struct device_driver *drv = dev->driver; |
473 | struct pci_driver *drv = pci_dev->driver; | ||
474 | int error = 0; | 472 | int error = 0; |
475 | 473 | ||
476 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | 474 | pci_fixup_device(pci_fixup_resume_early, to_pci_dev(dev)); |
477 | 475 | ||
478 | if (drv && drv->pm) { | 476 | if (drv && drv->pm) { |
479 | if (drv->pm->resume_noirq) | 477 | if (drv->pm->resume_noirq) |
480 | error = drv->pm->resume_noirq(dev); | 478 | error = drv->pm->resume_noirq(dev); |
481 | } else { | 479 | } else { |
482 | error = pci_legacy_resume_early(dev); | 480 | error = pci_legacy_resume_early(dev); |
483 | } | 481 | } |
484 | 482 | ||
485 | return error; | 483 | return error; |
486 | } | 484 | } |
487 | 485 | ||
488 | #else /* !CONFIG_SUSPEND */ | 486 | #else /* !CONFIG_SUSPEND */ |
489 | 487 | ||
490 | #define pci_pm_suspend NULL | 488 | #define pci_pm_suspend NULL |
491 | #define pci_pm_suspend_noirq NULL | 489 | #define pci_pm_suspend_noirq NULL |
492 | #define pci_pm_resume NULL | 490 | #define pci_pm_resume NULL |
493 | #define pci_pm_resume_noirq NULL | 491 | #define pci_pm_resume_noirq NULL |
494 | 492 | ||
495 | #endif /* !CONFIG_SUSPEND */ | 493 | #endif /* !CONFIG_SUSPEND */ |
496 | 494 | ||
497 | #ifdef CONFIG_HIBERNATION | 495 | #ifdef CONFIG_HIBERNATION |
498 | 496 | ||
499 | static int pci_pm_freeze(struct device *dev) | 497 | static int pci_pm_freeze(struct device *dev) |
500 | { | 498 | { |
501 | struct pci_dev *pci_dev = to_pci_dev(dev); | 499 | struct pci_dev *pci_dev = to_pci_dev(dev); |
502 | struct device_driver *drv = dev->driver; | 500 | struct device_driver *drv = dev->driver; |
503 | int error = 0; | 501 | int error = 0; |
504 | 502 | ||
505 | if (drv && drv->pm) { | 503 | if (drv && drv->pm) { |
506 | if (drv->pm->freeze) { | 504 | if (drv->pm->freeze) { |
507 | error = drv->pm->freeze(dev); | 505 | error = drv->pm->freeze(dev); |
508 | suspend_report_result(drv->pm->freeze, error); | 506 | suspend_report_result(drv->pm->freeze, error); |
509 | } else { | 507 | } else { |
510 | pci_default_pm_suspend(pci_dev); | 508 | pci_default_pm_suspend(pci_dev); |
511 | } | 509 | } |
512 | } else { | 510 | } else { |
513 | error = pci_legacy_suspend(dev, PMSG_FREEZE); | 511 | error = pci_legacy_suspend(dev, PMSG_FREEZE); |
514 | pci_fixup_device(pci_fixup_suspend, pci_dev); | 512 | pci_fixup_device(pci_fixup_suspend, pci_dev); |
515 | } | 513 | } |
516 | 514 | ||
517 | return error; | 515 | return error; |
518 | } | 516 | } |
519 | 517 | ||
520 | static int pci_pm_freeze_noirq(struct device *dev) | 518 | static int pci_pm_freeze_noirq(struct device *dev) |
521 | { | 519 | { |
522 | struct pci_dev *pci_dev = to_pci_dev(dev); | 520 | struct device_driver *drv = dev->driver; |
523 | struct pci_driver *drv = pci_dev->driver; | ||
524 | int error = 0; | 521 | int error = 0; |
525 | 522 | ||
526 | if (drv && drv->pm) { | 523 | if (drv && drv->pm) { |
527 | if (drv->pm->freeze_noirq) { | 524 | if (drv->pm->freeze_noirq) { |
528 | error = drv->pm->freeze_noirq(dev); | 525 | error = drv->pm->freeze_noirq(dev); |
529 | suspend_report_result(drv->pm->freeze_noirq, error); | 526 | suspend_report_result(drv->pm->freeze_noirq, error); |
530 | } | 527 | } |
531 | } else { | 528 | } else { |
532 | error = pci_legacy_suspend_late(dev, PMSG_FREEZE); | 529 | error = pci_legacy_suspend_late(dev, PMSG_FREEZE); |
533 | } | 530 | } |
534 | 531 | ||
535 | return error; | 532 | return error; |
536 | } | 533 | } |
537 | 534 | ||
538 | static int pci_pm_thaw(struct device *dev) | 535 | static int pci_pm_thaw(struct device *dev) |
539 | { | 536 | { |
540 | struct device_driver *drv = dev->driver; | 537 | struct device_driver *drv = dev->driver; |
541 | int error = 0; | 538 | int error = 0; |
542 | 539 | ||
543 | if (drv && drv->pm) { | 540 | if (drv && drv->pm) { |
544 | if (drv->pm->thaw) | 541 | if (drv->pm->thaw) |
545 | error = drv->pm->thaw(dev); | 542 | error = drv->pm->thaw(dev); |
546 | } else { | 543 | } else { |
547 | pci_fixup_device(pci_fixup_resume, to_pci_dev(dev)); | 544 | pci_fixup_device(pci_fixup_resume, to_pci_dev(dev)); |
548 | error = pci_legacy_resume(dev); | 545 | error = pci_legacy_resume(dev); |
549 | } | 546 | } |
550 | 547 | ||
551 | return error; | 548 | return error; |
552 | } | 549 | } |
553 | 550 | ||
554 | static int pci_pm_thaw_noirq(struct device *dev) | 551 | static int pci_pm_thaw_noirq(struct device *dev) |
555 | { | 552 | { |
556 | struct pci_dev *pci_dev = to_pci_dev(dev); | 553 | struct device_driver *drv = dev->driver; |
557 | struct pci_driver *drv = pci_dev->driver; | ||
558 | int error = 0; | 554 | int error = 0; |
559 | 555 | ||
560 | if (drv && drv->pm) { | 556 | if (drv && drv->pm) { |
561 | if (drv->pm->thaw_noirq) | 557 | if (drv->pm->thaw_noirq) |
562 | error = drv->pm->thaw_noirq(dev); | 558 | error = drv->pm->thaw_noirq(dev); |
563 | } else { | 559 | } else { |
564 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | 560 | pci_fixup_device(pci_fixup_resume_early, to_pci_dev(dev)); |
565 | error = pci_legacy_resume_early(dev); | 561 | error = pci_legacy_resume_early(dev); |
566 | } | 562 | } |
567 | 563 | ||
568 | return error; | 564 | return error; |
569 | } | 565 | } |
570 | 566 | ||
571 | static int pci_pm_poweroff(struct device *dev) | 567 | static int pci_pm_poweroff(struct device *dev) |
572 | { | 568 | { |
573 | struct device_driver *drv = dev->driver; | 569 | struct device_driver *drv = dev->driver; |
574 | int error = 0; | 570 | int error = 0; |
575 | 571 | ||
576 | pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev)); | 572 | pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev)); |
577 | 573 | ||
578 | if (drv && drv->pm) { | 574 | if (drv && drv->pm) { |
579 | if (drv->pm->poweroff) { | 575 | if (drv->pm->poweroff) { |
580 | error = drv->pm->poweroff(dev); | 576 | error = drv->pm->poweroff(dev); |
581 | suspend_report_result(drv->pm->poweroff, error); | 577 | suspend_report_result(drv->pm->poweroff, error); |
582 | } | 578 | } |
583 | } else { | 579 | } else { |
584 | error = pci_legacy_suspend(dev, PMSG_HIBERNATE); | 580 | error = pci_legacy_suspend(dev, PMSG_HIBERNATE); |
585 | } | 581 | } |
586 | 582 | ||
587 | return error; | 583 | return error; |
588 | } | 584 | } |
589 | 585 | ||
590 | static int pci_pm_poweroff_noirq(struct device *dev) | 586 | static int pci_pm_poweroff_noirq(struct device *dev) |
591 | { | 587 | { |
592 | struct pci_dev *pci_dev = to_pci_dev(dev); | 588 | struct device_driver *drv = dev->driver; |
593 | struct pci_driver *drv = pci_dev->driver; | ||
594 | int error = 0; | 589 | int error = 0; |
595 | 590 | ||
596 | if (drv && drv->pm) { | 591 | if (drv && drv->pm) { |
597 | if (drv->pm->poweroff_noirq) { | 592 | if (drv->pm->poweroff_noirq) { |
598 | error = drv->pm->poweroff_noirq(dev); | 593 | error = drv->pm->poweroff_noirq(dev); |
599 | suspend_report_result(drv->pm->poweroff_noirq, error); | 594 | suspend_report_result(drv->pm->poweroff_noirq, error); |
600 | } | 595 | } |
601 | } else { | 596 | } else { |
602 | error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE); | 597 | error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE); |
603 | } | 598 | } |
604 | 599 | ||
605 | return error; | 600 | return error; |
606 | } | 601 | } |
607 | 602 | ||
608 | static int pci_pm_restore(struct device *dev) | 603 | static int pci_pm_restore(struct device *dev) |
609 | { | 604 | { |
610 | struct pci_dev *pci_dev = to_pci_dev(dev); | 605 | struct pci_dev *pci_dev = to_pci_dev(dev); |
611 | struct device_driver *drv = dev->driver; | 606 | struct device_driver *drv = dev->driver; |
612 | int error; | 607 | int error; |
613 | 608 | ||
614 | if (drv && drv->pm) { | 609 | if (drv && drv->pm) { |
615 | error = drv->pm->restore ? drv->pm->restore(dev) : | 610 | error = drv->pm->restore ? drv->pm->restore(dev) : |
616 | pci_default_pm_resume(pci_dev); | 611 | pci_default_pm_resume(pci_dev); |
617 | } else { | 612 | } else { |
618 | error = pci_legacy_resume(dev); | 613 | error = pci_legacy_resume(dev); |
619 | } | 614 | } |
620 | pci_fixup_device(pci_fixup_resume, pci_dev); | 615 | pci_fixup_device(pci_fixup_resume, pci_dev); |
621 | 616 | ||
622 | return error; | 617 | return error; |
623 | } | 618 | } |
624 | 619 | ||
625 | static int pci_pm_restore_noirq(struct device *dev) | 620 | static int pci_pm_restore_noirq(struct device *dev) |
626 | { | 621 | { |
627 | struct pci_dev *pci_dev = to_pci_dev(dev); | 622 | struct pci_dev *pci_dev = to_pci_dev(dev); |
628 | struct pci_driver *drv = pci_dev->driver; | 623 | struct device_driver *drv = dev->driver; |
629 | int error = 0; | 624 | int error = 0; |
630 | 625 | ||
631 | pci_fixup_device(pci_fixup_resume, pci_dev); | 626 | pci_fixup_device(pci_fixup_resume, pci_dev); |
632 | 627 | ||
633 | if (drv && drv->pm) { | 628 | if (drv && drv->pm) { |
634 | if (drv->pm->restore_noirq) | 629 | if (drv->pm->restore_noirq) |
635 | error = drv->pm->restore_noirq(dev); | 630 | error = drv->pm->restore_noirq(dev); |
636 | } else { | 631 | } else { |
637 | error = pci_legacy_resume_early(dev); | 632 | error = pci_legacy_resume_early(dev); |
638 | } | 633 | } |
639 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | 634 | pci_fixup_device(pci_fixup_resume_early, pci_dev); |
640 | 635 | ||
641 | return error; | 636 | return error; |
642 | } | 637 | } |
643 | 638 | ||
644 | #else /* !CONFIG_HIBERNATION */ | 639 | #else /* !CONFIG_HIBERNATION */ |
645 | 640 | ||
646 | #define pci_pm_freeze NULL | 641 | #define pci_pm_freeze NULL |
647 | #define pci_pm_freeze_noirq NULL | 642 | #define pci_pm_freeze_noirq NULL |
648 | #define pci_pm_thaw NULL | 643 | #define pci_pm_thaw NULL |
649 | #define pci_pm_thaw_noirq NULL | 644 | #define pci_pm_thaw_noirq NULL |
650 | #define pci_pm_poweroff NULL | 645 | #define pci_pm_poweroff NULL |
651 | #define pci_pm_poweroff_noirq NULL | 646 | #define pci_pm_poweroff_noirq NULL |
652 | #define pci_pm_restore NULL | 647 | #define pci_pm_restore NULL |
653 | #define pci_pm_restore_noirq NULL | 648 | #define pci_pm_restore_noirq NULL |
654 | 649 | ||
655 | #endif /* !CONFIG_HIBERNATION */ | 650 | #endif /* !CONFIG_HIBERNATION */ |
656 | 651 | ||
657 | struct pm_ext_ops pci_pm_ops = { | 652 | struct dev_pm_ops pci_dev_pm_ops = { |
658 | .base = { | 653 | .prepare = pci_pm_prepare, |
659 | .prepare = pci_pm_prepare, | 654 | .complete = pci_pm_complete, |
660 | .complete = pci_pm_complete, | 655 | .suspend = pci_pm_suspend, |
661 | .suspend = pci_pm_suspend, | 656 | .resume = pci_pm_resume, |
662 | .resume = pci_pm_resume, | 657 | .freeze = pci_pm_freeze, |
663 | .freeze = pci_pm_freeze, | 658 | .thaw = pci_pm_thaw, |
664 | .thaw = pci_pm_thaw, | 659 | .poweroff = pci_pm_poweroff, |
665 | .poweroff = pci_pm_poweroff, | 660 | .restore = pci_pm_restore, |
666 | .restore = pci_pm_restore, | ||
667 | }, | ||
668 | .suspend_noirq = pci_pm_suspend_noirq, | 661 | .suspend_noirq = pci_pm_suspend_noirq, |
669 | .resume_noirq = pci_pm_resume_noirq, | 662 | .resume_noirq = pci_pm_resume_noirq, |
670 | .freeze_noirq = pci_pm_freeze_noirq, | 663 | .freeze_noirq = pci_pm_freeze_noirq, |
671 | .thaw_noirq = pci_pm_thaw_noirq, | 664 | .thaw_noirq = pci_pm_thaw_noirq, |
672 | .poweroff_noirq = pci_pm_poweroff_noirq, | 665 | .poweroff_noirq = pci_pm_poweroff_noirq, |
673 | .restore_noirq = pci_pm_restore_noirq, | 666 | .restore_noirq = pci_pm_restore_noirq, |
674 | }; | 667 | }; |
675 | 668 | ||
676 | #define PCI_PM_OPS_PTR &pci_pm_ops | 669 | #define PCI_PM_OPS_PTR (&pci_dev_pm_ops) |
677 | 670 | ||
678 | #else /* !CONFIG_PM_SLEEP */ | 671 | #else /* !CONFIG_PM_SLEEP */ |
679 | 672 | ||
680 | #define PCI_PM_OPS_PTR NULL | 673 | #define PCI_PM_OPS_PTR NULL |
681 | 674 | ||
682 | #endif /* !CONFIG_PM_SLEEP */ | 675 | #endif /* !CONFIG_PM_SLEEP */ |
683 | 676 | ||
684 | /** | 677 | /** |
685 | * __pci_register_driver - register a new pci driver | 678 | * __pci_register_driver - register a new pci driver |
686 | * @drv: the driver structure to register | 679 | * @drv: the driver structure to register |
687 | * @owner: owner module of drv | 680 | * @owner: owner module of drv |
688 | * @mod_name: module name string | 681 | * @mod_name: module name string |
689 | * | 682 | * |
690 | * Adds the driver structure to the list of registered drivers. | 683 | * Adds the driver structure to the list of registered drivers. |
691 | * Returns a negative value on error, otherwise 0. | 684 | * Returns a negative value on error, otherwise 0. |
692 | * If no error occurred, the driver remains registered even if | 685 | * If no error occurred, the driver remains registered even if |
693 | * no device was claimed during registration. | 686 | * no device was claimed during registration. |
694 | */ | 687 | */ |
695 | int __pci_register_driver(struct pci_driver *drv, struct module *owner, | 688 | int __pci_register_driver(struct pci_driver *drv, struct module *owner, |
696 | const char *mod_name) | 689 | const char *mod_name) |
697 | { | 690 | { |
698 | int error; | 691 | int error; |
699 | 692 | ||
700 | /* initialize common driver fields */ | 693 | /* initialize common driver fields */ |
701 | drv->driver.name = drv->name; | 694 | drv->driver.name = drv->name; |
702 | drv->driver.bus = &pci_bus_type; | 695 | drv->driver.bus = &pci_bus_type; |
703 | drv->driver.owner = owner; | 696 | drv->driver.owner = owner; |
704 | drv->driver.mod_name = mod_name; | 697 | drv->driver.mod_name = mod_name; |
705 | |||
706 | if (drv->pm) | ||
707 | drv->driver.pm = &drv->pm->base; | ||
708 | 698 | ||
709 | spin_lock_init(&drv->dynids.lock); | 699 | spin_lock_init(&drv->dynids.lock); |
710 | INIT_LIST_HEAD(&drv->dynids.list); | 700 | INIT_LIST_HEAD(&drv->dynids.list); |
711 | 701 | ||
712 | /* register with core */ | 702 | /* register with core */ |
713 | error = driver_register(&drv->driver); | 703 | error = driver_register(&drv->driver); |
714 | if (error) | 704 | if (error) |
715 | return error; | 705 | return error; |
716 | 706 | ||
717 | error = pci_create_newid_file(drv); | 707 | error = pci_create_newid_file(drv); |
718 | if (error) | 708 | if (error) |
719 | driver_unregister(&drv->driver); | 709 | driver_unregister(&drv->driver); |
720 | 710 | ||
721 | return error; | 711 | return error; |
722 | } | 712 | } |
723 | 713 | ||
724 | /** | 714 | /** |
725 | * pci_unregister_driver - unregister a pci driver | 715 | * pci_unregister_driver - unregister a pci driver |
726 | * @drv: the driver structure to unregister | 716 | * @drv: the driver structure to unregister |
727 | * | 717 | * |
728 | * Deletes the driver structure from the list of registered PCI drivers, | 718 | * Deletes the driver structure from the list of registered PCI drivers, |
729 | * gives it a chance to clean up by calling its remove() function for | 719 | * gives it a chance to clean up by calling its remove() function for |
730 | * each device it was responsible for, and marks those devices as | 720 | * each device it was responsible for, and marks those devices as |
731 | * driverless. | 721 | * driverless. |
732 | */ | 722 | */ |
733 | 723 | ||
734 | void | 724 | void |
735 | pci_unregister_driver(struct pci_driver *drv) | 725 | pci_unregister_driver(struct pci_driver *drv) |
736 | { | 726 | { |
737 | pci_remove_newid_file(drv); | 727 | pci_remove_newid_file(drv); |
738 | driver_unregister(&drv->driver); | 728 | driver_unregister(&drv->driver); |
739 | pci_free_dynids(drv); | 729 | pci_free_dynids(drv); |
740 | } | 730 | } |
741 | 731 | ||
742 | static struct pci_driver pci_compat_driver = { | 732 | static struct pci_driver pci_compat_driver = { |
743 | .name = "compat" | 733 | .name = "compat" |
744 | }; | 734 | }; |
745 | 735 | ||
746 | /** | 736 | /** |
747 | * pci_dev_driver - get the pci_driver of a device | 737 | * pci_dev_driver - get the pci_driver of a device |
748 | * @dev: the device to query | 738 | * @dev: the device to query |
749 | * | 739 | * |
750 | * Returns the appropriate pci_driver structure or %NULL if there is no | 740 | * Returns the appropriate pci_driver structure or %NULL if there is no |
751 | * registered driver for the device. | 741 | * registered driver for the device. |
752 | */ | 742 | */ |
753 | struct pci_driver * | 743 | struct pci_driver * |
754 | pci_dev_driver(const struct pci_dev *dev) | 744 | pci_dev_driver(const struct pci_dev *dev) |
755 | { | 745 | { |
756 | if (dev->driver) | 746 | if (dev->driver) |
757 | return dev->driver; | 747 | return dev->driver; |
758 | else { | 748 | else { |
759 | int i; | 749 | int i; |
760 | for(i=0; i<=PCI_ROM_RESOURCE; i++) | 750 | for(i=0; i<=PCI_ROM_RESOURCE; i++) |
761 | if (dev->resource[i].flags & IORESOURCE_BUSY) | 751 | if (dev->resource[i].flags & IORESOURCE_BUSY) |
762 | return &pci_compat_driver; | 752 | return &pci_compat_driver; |
763 | } | 753 | } |
764 | return NULL; | 754 | return NULL; |
765 | } | 755 | } |
766 | 756 | ||
767 | /** | 757 | /** |
768 | * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure | 758 | * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure |
769 | * @dev: the PCI device structure to match against | 759 | * @dev: the PCI device structure to match against |
770 | * @drv: the device driver to search for matching PCI device id structures | 760 | * @drv: the device driver to search for matching PCI device id structures |
771 | * | 761 | * |
772 | * Used by a driver to check whether a PCI device present in the | 762 | * Used by a driver to check whether a PCI device present in the |
773 | * system is in its list of supported devices. Returns the matching | 763 | * system is in its list of supported devices. Returns the matching |
774 | * pci_device_id structure or %NULL if there is no match. | 764 | * pci_device_id structure or %NULL if there is no match. |
775 | */ | 765 | */ |
776 | static int pci_bus_match(struct device *dev, struct device_driver *drv) | 766 | static int pci_bus_match(struct device *dev, struct device_driver *drv) |
777 | { | 767 | { |
778 | struct pci_dev *pci_dev = to_pci_dev(dev); | 768 | struct pci_dev *pci_dev = to_pci_dev(dev); |
779 | struct pci_driver *pci_drv = to_pci_driver(drv); | 769 | struct pci_driver *pci_drv = to_pci_driver(drv); |
780 | const struct pci_device_id *found_id; | 770 | const struct pci_device_id *found_id; |
781 | 771 | ||
782 | found_id = pci_match_device(pci_drv, pci_dev); | 772 | found_id = pci_match_device(pci_drv, pci_dev); |
783 | if (found_id) | 773 | if (found_id) |
784 | return 1; | 774 | return 1; |
785 | 775 | ||
786 | return 0; | 776 | return 0; |
787 | } | 777 | } |
788 | 778 | ||
789 | /** | 779 | /** |
790 | * pci_dev_get - increments the reference count of the pci device structure | 780 | * pci_dev_get - increments the reference count of the pci device structure |
791 | * @dev: the device being referenced | 781 | * @dev: the device being referenced |
792 | * | 782 | * |
793 | * Each live reference to a device should be refcounted. | 783 | * Each live reference to a device should be refcounted. |
794 | * | 784 | * |
795 | * Drivers for PCI devices should normally record such references in | 785 | * Drivers for PCI devices should normally record such references in |
796 | * their probe() methods, when they bind to a device, and release | 786 | * their probe() methods, when they bind to a device, and release |
797 | * them by calling pci_dev_put(), in their disconnect() methods. | 787 | * them by calling pci_dev_put(), in their disconnect() methods. |
798 | * | 788 | * |
799 | * A pointer to the device with the incremented reference counter is returned. | 789 | * A pointer to the device with the incremented reference counter is returned. |
800 | */ | 790 | */ |
801 | struct pci_dev *pci_dev_get(struct pci_dev *dev) | 791 | struct pci_dev *pci_dev_get(struct pci_dev *dev) |
802 | { | 792 | { |
803 | if (dev) | 793 | if (dev) |
804 | get_device(&dev->dev); | 794 | get_device(&dev->dev); |
805 | return dev; | 795 | return dev; |
806 | } | 796 | } |
807 | 797 | ||
808 | /** | 798 | /** |
809 | * pci_dev_put - release a use of the pci device structure | 799 | * pci_dev_put - release a use of the pci device structure |
810 | * @dev: device that's been disconnected | 800 | * @dev: device that's been disconnected |
811 | * | 801 | * |
812 | * Must be called when a user of a device is finished with it. When the last | 802 | * Must be called when a user of a device is finished with it. When the last |
813 | * user of the device calls this function, the memory of the device is freed. | 803 | * user of the device calls this function, the memory of the device is freed. |
814 | */ | 804 | */ |
815 | void pci_dev_put(struct pci_dev *dev) | 805 | void pci_dev_put(struct pci_dev *dev) |
816 | { | 806 | { |
817 | if (dev) | 807 | if (dev) |
818 | put_device(&dev->dev); | 808 | put_device(&dev->dev); |
819 | } | 809 | } |
820 | 810 | ||
821 | #ifndef CONFIG_HOTPLUG | 811 | #ifndef CONFIG_HOTPLUG |
822 | int pci_uevent(struct device *dev, struct kobj_uevent_env *env) | 812 | int pci_uevent(struct device *dev, struct kobj_uevent_env *env) |
823 | { | 813 | { |
824 | return -ENODEV; | 814 | return -ENODEV; |
825 | } | 815 | } |
826 | #endif | 816 | #endif |
827 | 817 | ||
828 | struct bus_type pci_bus_type = { | 818 | struct bus_type pci_bus_type = { |
829 | .name = "pci", | 819 | .name = "pci", |
830 | .match = pci_bus_match, | 820 | .match = pci_bus_match, |
831 | .uevent = pci_uevent, | 821 | .uevent = pci_uevent, |
832 | .probe = pci_device_probe, | 822 | .probe = pci_device_probe, |
833 | .remove = pci_device_remove, | 823 | .remove = pci_device_remove, |
834 | .shutdown = pci_device_shutdown, | 824 | .shutdown = pci_device_shutdown, |
835 | .dev_attrs = pci_dev_attrs, | 825 | .dev_attrs = pci_dev_attrs, |
836 | .pm = PCI_PM_OPS_PTR, | 826 | .pm = PCI_PM_OPS_PTR, |
837 | }; | 827 | }; |
838 | 828 | ||
839 | static int __init pci_driver_init(void) | 829 | static int __init pci_driver_init(void) |
840 | { | 830 | { |
841 | return bus_register(&pci_bus_type); | 831 | return bus_register(&pci_bus_type); |
842 | } | 832 | } |
843 | 833 | ||
844 | postcore_initcall(pci_driver_init); | 834 | postcore_initcall(pci_driver_init); |
845 | 835 | ||
846 | EXPORT_SYMBOL(pci_match_id); | 836 | EXPORT_SYMBOL(pci_match_id); |
847 | EXPORT_SYMBOL(__pci_register_driver); | 837 | EXPORT_SYMBOL(__pci_register_driver); |
848 | EXPORT_SYMBOL(pci_unregister_driver); | 838 | EXPORT_SYMBOL(pci_unregister_driver); |
849 | EXPORT_SYMBOL(pci_dev_driver); | 839 | EXPORT_SYMBOL(pci_dev_driver); |
850 | EXPORT_SYMBOL(pci_bus_type); | 840 | EXPORT_SYMBOL(pci_bus_type); |
851 | EXPORT_SYMBOL(pci_dev_get); | 841 | EXPORT_SYMBOL(pci_dev_get); |
852 | EXPORT_SYMBOL(pci_dev_put); | 842 | EXPORT_SYMBOL(pci_dev_put); |
853 | 843 |
drivers/usb/core/usb.c
1 | /* | 1 | /* |
2 | * drivers/usb/core/usb.c | 2 | * drivers/usb/core/usb.c |
3 | * | 3 | * |
4 | * (C) Copyright Linus Torvalds 1999 | 4 | * (C) Copyright Linus Torvalds 1999 |
5 | * (C) Copyright Johannes Erdfelt 1999-2001 | 5 | * (C) Copyright Johannes Erdfelt 1999-2001 |
6 | * (C) Copyright Andreas Gal 1999 | 6 | * (C) Copyright Andreas Gal 1999 |
7 | * (C) Copyright Gregory P. Smith 1999 | 7 | * (C) Copyright Gregory P. Smith 1999 |
8 | * (C) Copyright Deti Fliegl 1999 (new USB architecture) | 8 | * (C) Copyright Deti Fliegl 1999 (new USB architecture) |
9 | * (C) Copyright Randy Dunlap 2000 | 9 | * (C) Copyright Randy Dunlap 2000 |
10 | * (C) Copyright David Brownell 2000-2004 | 10 | * (C) Copyright David Brownell 2000-2004 |
11 | * (C) Copyright Yggdrasil Computing, Inc. 2000 | 11 | * (C) Copyright Yggdrasil Computing, Inc. 2000 |
12 | * (usb_device_id matching changes by Adam J. Richter) | 12 | * (usb_device_id matching changes by Adam J. Richter) |
13 | * (C) Copyright Greg Kroah-Hartman 2002-2003 | 13 | * (C) Copyright Greg Kroah-Hartman 2002-2003 |
14 | * | 14 | * |
15 | * NOTE! This is not actually a driver at all, rather this is | 15 | * NOTE! This is not actually a driver at all, rather this is |
16 | * just a collection of helper routines that implement the | 16 | * just a collection of helper routines that implement the |
17 | * generic USB things that the real drivers can use.. | 17 | * generic USB things that the real drivers can use.. |
18 | * | 18 | * |
19 | * Think of this as a "USB library" rather than anything else. | 19 | * Think of this as a "USB library" rather than anything else. |
20 | * It should be considered a slave, with no callbacks. Callbacks | 20 | * It should be considered a slave, with no callbacks. Callbacks |
21 | * are evil. | 21 | * are evil. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> | 25 | #include <linux/moduleparam.h> |
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | #include <linux/bitops.h> | 27 | #include <linux/bitops.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/interrupt.h> /* for in_interrupt() */ | 29 | #include <linux/interrupt.h> /* for in_interrupt() */ |
30 | #include <linux/kmod.h> | 30 | #include <linux/kmod.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/spinlock.h> | 32 | #include <linux/spinlock.h> |
33 | #include <linux/errno.h> | 33 | #include <linux/errno.h> |
34 | #include <linux/usb.h> | 34 | #include <linux/usb.h> |
35 | #include <linux/mutex.h> | 35 | #include <linux/mutex.h> |
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | 37 | ||
38 | #include <asm/io.h> | 38 | #include <asm/io.h> |
39 | #include <linux/scatterlist.h> | 39 | #include <linux/scatterlist.h> |
40 | #include <linux/mm.h> | 40 | #include <linux/mm.h> |
41 | #include <linux/dma-mapping.h> | 41 | #include <linux/dma-mapping.h> |
42 | 42 | ||
43 | #include "hcd.h" | 43 | #include "hcd.h" |
44 | #include "usb.h" | 44 | #include "usb.h" |
45 | 45 | ||
46 | 46 | ||
47 | const char *usbcore_name = "usbcore"; | 47 | const char *usbcore_name = "usbcore"; |
48 | 48 | ||
49 | static int nousb; /* Disable USB when built into kernel image */ | 49 | static int nousb; /* Disable USB when built into kernel image */ |
50 | 50 | ||
51 | /* Workqueue for autosuspend and for remote wakeup of root hubs */ | 51 | /* Workqueue for autosuspend and for remote wakeup of root hubs */ |
52 | struct workqueue_struct *ksuspend_usb_wq; | 52 | struct workqueue_struct *ksuspend_usb_wq; |
53 | 53 | ||
54 | #ifdef CONFIG_USB_SUSPEND | 54 | #ifdef CONFIG_USB_SUSPEND |
55 | static int usb_autosuspend_delay = 2; /* Default delay value, | 55 | static int usb_autosuspend_delay = 2; /* Default delay value, |
56 | * in seconds */ | 56 | * in seconds */ |
57 | module_param_named(autosuspend, usb_autosuspend_delay, int, 0644); | 57 | module_param_named(autosuspend, usb_autosuspend_delay, int, 0644); |
58 | MODULE_PARM_DESC(autosuspend, "default autosuspend delay"); | 58 | MODULE_PARM_DESC(autosuspend, "default autosuspend delay"); |
59 | 59 | ||
60 | #else | 60 | #else |
61 | #define usb_autosuspend_delay 0 | 61 | #define usb_autosuspend_delay 0 |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | 64 | ||
65 | /** | 65 | /** |
66 | * usb_ifnum_to_if - get the interface object with a given interface number | 66 | * usb_ifnum_to_if - get the interface object with a given interface number |
67 | * @dev: the device whose current configuration is considered | 67 | * @dev: the device whose current configuration is considered |
68 | * @ifnum: the desired interface | 68 | * @ifnum: the desired interface |
69 | * | 69 | * |
70 | * This walks the device descriptor for the currently active configuration | 70 | * This walks the device descriptor for the currently active configuration |
71 | * and returns a pointer to the interface with that particular interface | 71 | * and returns a pointer to the interface with that particular interface |
72 | * number, or null. | 72 | * number, or null. |
73 | * | 73 | * |
74 | * Note that configuration descriptors are not required to assign interface | 74 | * Note that configuration descriptors are not required to assign interface |
75 | * numbers sequentially, so that it would be incorrect to assume that | 75 | * numbers sequentially, so that it would be incorrect to assume that |
76 | * the first interface in that descriptor corresponds to interface zero. | 76 | * the first interface in that descriptor corresponds to interface zero. |
77 | * This routine helps device drivers avoid such mistakes. | 77 | * This routine helps device drivers avoid such mistakes. |
78 | * However, you should make sure that you do the right thing with any | 78 | * However, you should make sure that you do the right thing with any |
79 | * alternate settings available for this interfaces. | 79 | * alternate settings available for this interfaces. |
80 | * | 80 | * |
81 | * Don't call this function unless you are bound to one of the interfaces | 81 | * Don't call this function unless you are bound to one of the interfaces |
82 | * on this device or you have locked the device! | 82 | * on this device or you have locked the device! |
83 | */ | 83 | */ |
84 | struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, | 84 | struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, |
85 | unsigned ifnum) | 85 | unsigned ifnum) |
86 | { | 86 | { |
87 | struct usb_host_config *config = dev->actconfig; | 87 | struct usb_host_config *config = dev->actconfig; |
88 | int i; | 88 | int i; |
89 | 89 | ||
90 | if (!config) | 90 | if (!config) |
91 | return NULL; | 91 | return NULL; |
92 | for (i = 0; i < config->desc.bNumInterfaces; i++) | 92 | for (i = 0; i < config->desc.bNumInterfaces; i++) |
93 | if (config->interface[i]->altsetting[0] | 93 | if (config->interface[i]->altsetting[0] |
94 | .desc.bInterfaceNumber == ifnum) | 94 | .desc.bInterfaceNumber == ifnum) |
95 | return config->interface[i]; | 95 | return config->interface[i]; |
96 | 96 | ||
97 | return NULL; | 97 | return NULL; |
98 | } | 98 | } |
99 | EXPORT_SYMBOL_GPL(usb_ifnum_to_if); | 99 | EXPORT_SYMBOL_GPL(usb_ifnum_to_if); |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number. | 102 | * usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number. |
103 | * @intf: the interface containing the altsetting in question | 103 | * @intf: the interface containing the altsetting in question |
104 | * @altnum: the desired alternate setting number | 104 | * @altnum: the desired alternate setting number |
105 | * | 105 | * |
106 | * This searches the altsetting array of the specified interface for | 106 | * This searches the altsetting array of the specified interface for |
107 | * an entry with the correct bAlternateSetting value and returns a pointer | 107 | * an entry with the correct bAlternateSetting value and returns a pointer |
108 | * to that entry, or null. | 108 | * to that entry, or null. |
109 | * | 109 | * |
110 | * Note that altsettings need not be stored sequentially by number, so | 110 | * Note that altsettings need not be stored sequentially by number, so |
111 | * it would be incorrect to assume that the first altsetting entry in | 111 | * it would be incorrect to assume that the first altsetting entry in |
112 | * the array corresponds to altsetting zero. This routine helps device | 112 | * the array corresponds to altsetting zero. This routine helps device |
113 | * drivers avoid such mistakes. | 113 | * drivers avoid such mistakes. |
114 | * | 114 | * |
115 | * Don't call this function unless you are bound to the intf interface | 115 | * Don't call this function unless you are bound to the intf interface |
116 | * or you have locked the device! | 116 | * or you have locked the device! |
117 | */ | 117 | */ |
118 | struct usb_host_interface *usb_altnum_to_altsetting( | 118 | struct usb_host_interface *usb_altnum_to_altsetting( |
119 | const struct usb_interface *intf, | 119 | const struct usb_interface *intf, |
120 | unsigned int altnum) | 120 | unsigned int altnum) |
121 | { | 121 | { |
122 | int i; | 122 | int i; |
123 | 123 | ||
124 | for (i = 0; i < intf->num_altsetting; i++) { | 124 | for (i = 0; i < intf->num_altsetting; i++) { |
125 | if (intf->altsetting[i].desc.bAlternateSetting == altnum) | 125 | if (intf->altsetting[i].desc.bAlternateSetting == altnum) |
126 | return &intf->altsetting[i]; | 126 | return &intf->altsetting[i]; |
127 | } | 127 | } |
128 | return NULL; | 128 | return NULL; |
129 | } | 129 | } |
130 | EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting); | 130 | EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting); |
131 | 131 | ||
132 | struct find_interface_arg { | 132 | struct find_interface_arg { |
133 | int minor; | 133 | int minor; |
134 | struct usb_interface *interface; | 134 | struct usb_interface *interface; |
135 | }; | 135 | }; |
136 | 136 | ||
137 | static int __find_interface(struct device *dev, void *data) | 137 | static int __find_interface(struct device *dev, void *data) |
138 | { | 138 | { |
139 | struct find_interface_arg *arg = data; | 139 | struct find_interface_arg *arg = data; |
140 | struct usb_interface *intf; | 140 | struct usb_interface *intf; |
141 | 141 | ||
142 | /* can't look at usb devices, only interfaces */ | 142 | /* can't look at usb devices, only interfaces */ |
143 | if (is_usb_device(dev)) | 143 | if (is_usb_device(dev)) |
144 | return 0; | 144 | return 0; |
145 | 145 | ||
146 | intf = to_usb_interface(dev); | 146 | intf = to_usb_interface(dev); |
147 | if (intf->minor != -1 && intf->minor == arg->minor) { | 147 | if (intf->minor != -1 && intf->minor == arg->minor) { |
148 | arg->interface = intf; | 148 | arg->interface = intf; |
149 | return 1; | 149 | return 1; |
150 | } | 150 | } |
151 | return 0; | 151 | return 0; |
152 | } | 152 | } |
153 | 153 | ||
154 | /** | 154 | /** |
155 | * usb_find_interface - find usb_interface pointer for driver and device | 155 | * usb_find_interface - find usb_interface pointer for driver and device |
156 | * @drv: the driver whose current configuration is considered | 156 | * @drv: the driver whose current configuration is considered |
157 | * @minor: the minor number of the desired device | 157 | * @minor: the minor number of the desired device |
158 | * | 158 | * |
159 | * This walks the driver device list and returns a pointer to the interface | 159 | * This walks the driver device list and returns a pointer to the interface |
160 | * with the matching minor. Note, this only works for devices that share the | 160 | * with the matching minor. Note, this only works for devices that share the |
161 | * USB major number. | 161 | * USB major number. |
162 | */ | 162 | */ |
163 | struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor) | 163 | struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor) |
164 | { | 164 | { |
165 | struct find_interface_arg argb; | 165 | struct find_interface_arg argb; |
166 | int retval; | 166 | int retval; |
167 | 167 | ||
168 | argb.minor = minor; | 168 | argb.minor = minor; |
169 | argb.interface = NULL; | 169 | argb.interface = NULL; |
170 | /* eat the error, it will be in argb.interface */ | 170 | /* eat the error, it will be in argb.interface */ |
171 | retval = driver_for_each_device(&drv->drvwrap.driver, NULL, &argb, | 171 | retval = driver_for_each_device(&drv->drvwrap.driver, NULL, &argb, |
172 | __find_interface); | 172 | __find_interface); |
173 | return argb.interface; | 173 | return argb.interface; |
174 | } | 174 | } |
175 | EXPORT_SYMBOL_GPL(usb_find_interface); | 175 | EXPORT_SYMBOL_GPL(usb_find_interface); |
176 | 176 | ||
177 | /** | 177 | /** |
178 | * usb_release_dev - free a usb device structure when all users of it are finished. | 178 | * usb_release_dev - free a usb device structure when all users of it are finished. |
179 | * @dev: device that's been disconnected | 179 | * @dev: device that's been disconnected |
180 | * | 180 | * |
181 | * Will be called only by the device core when all users of this usb device are | 181 | * Will be called only by the device core when all users of this usb device are |
182 | * done. | 182 | * done. |
183 | */ | 183 | */ |
184 | static void usb_release_dev(struct device *dev) | 184 | static void usb_release_dev(struct device *dev) |
185 | { | 185 | { |
186 | struct usb_device *udev; | 186 | struct usb_device *udev; |
187 | 187 | ||
188 | udev = to_usb_device(dev); | 188 | udev = to_usb_device(dev); |
189 | 189 | ||
190 | usb_destroy_configuration(udev); | 190 | usb_destroy_configuration(udev); |
191 | usb_put_hcd(bus_to_hcd(udev->bus)); | 191 | usb_put_hcd(bus_to_hcd(udev->bus)); |
192 | kfree(udev->product); | 192 | kfree(udev->product); |
193 | kfree(udev->manufacturer); | 193 | kfree(udev->manufacturer); |
194 | kfree(udev->serial); | 194 | kfree(udev->serial); |
195 | kfree(udev); | 195 | kfree(udev); |
196 | } | 196 | } |
197 | 197 | ||
198 | #ifdef CONFIG_HOTPLUG | 198 | #ifdef CONFIG_HOTPLUG |
199 | static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | 199 | static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env) |
200 | { | 200 | { |
201 | struct usb_device *usb_dev; | 201 | struct usb_device *usb_dev; |
202 | 202 | ||
203 | usb_dev = to_usb_device(dev); | 203 | usb_dev = to_usb_device(dev); |
204 | 204 | ||
205 | if (add_uevent_var(env, "BUSNUM=%03d", usb_dev->bus->busnum)) | 205 | if (add_uevent_var(env, "BUSNUM=%03d", usb_dev->bus->busnum)) |
206 | return -ENOMEM; | 206 | return -ENOMEM; |
207 | 207 | ||
208 | if (add_uevent_var(env, "DEVNUM=%03d", usb_dev->devnum)) | 208 | if (add_uevent_var(env, "DEVNUM=%03d", usb_dev->devnum)) |
209 | return -ENOMEM; | 209 | return -ENOMEM; |
210 | 210 | ||
211 | return 0; | 211 | return 0; |
212 | } | 212 | } |
213 | 213 | ||
214 | #else | 214 | #else |
215 | 215 | ||
216 | static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | 216 | static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env) |
217 | { | 217 | { |
218 | return -ENODEV; | 218 | return -ENODEV; |
219 | } | 219 | } |
220 | #endif /* CONFIG_HOTPLUG */ | 220 | #endif /* CONFIG_HOTPLUG */ |
221 | 221 | ||
222 | #ifdef CONFIG_PM | 222 | #ifdef CONFIG_PM |
223 | 223 | ||
224 | static int ksuspend_usb_init(void) | 224 | static int ksuspend_usb_init(void) |
225 | { | 225 | { |
226 | /* This workqueue is supposed to be both freezable and | 226 | /* This workqueue is supposed to be both freezable and |
227 | * singlethreaded. Its job doesn't justify running on more | 227 | * singlethreaded. Its job doesn't justify running on more |
228 | * than one CPU. | 228 | * than one CPU. |
229 | */ | 229 | */ |
230 | ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd"); | 230 | ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd"); |
231 | if (!ksuspend_usb_wq) | 231 | if (!ksuspend_usb_wq) |
232 | return -ENOMEM; | 232 | return -ENOMEM; |
233 | return 0; | 233 | return 0; |
234 | } | 234 | } |
235 | 235 | ||
236 | static void ksuspend_usb_cleanup(void) | 236 | static void ksuspend_usb_cleanup(void) |
237 | { | 237 | { |
238 | destroy_workqueue(ksuspend_usb_wq); | 238 | destroy_workqueue(ksuspend_usb_wq); |
239 | } | 239 | } |
240 | 240 | ||
241 | /* USB device Power-Management thunks. | 241 | /* USB device Power-Management thunks. |
242 | * There's no need to distinguish here between quiescing a USB device | 242 | * There's no need to distinguish here between quiescing a USB device |
243 | * and powering it down; the generic_suspend() routine takes care of | 243 | * and powering it down; the generic_suspend() routine takes care of |
244 | * it by skipping the usb_port_suspend() call for a quiesce. And for | 244 | * it by skipping the usb_port_suspend() call for a quiesce. And for |
245 | * USB interfaces there's no difference at all. | 245 | * USB interfaces there's no difference at all. |
246 | */ | 246 | */ |
247 | 247 | ||
248 | static int usb_dev_prepare(struct device *dev) | 248 | static int usb_dev_prepare(struct device *dev) |
249 | { | 249 | { |
250 | return 0; /* Implement eventually? */ | 250 | return 0; /* Implement eventually? */ |
251 | } | 251 | } |
252 | 252 | ||
253 | static void usb_dev_complete(struct device *dev) | 253 | static void usb_dev_complete(struct device *dev) |
254 | { | 254 | { |
255 | /* Currently used only for rebinding interfaces */ | 255 | /* Currently used only for rebinding interfaces */ |
256 | usb_resume(dev); /* Implement eventually? */ | 256 | usb_resume(dev); /* Implement eventually? */ |
257 | } | 257 | } |
258 | 258 | ||
259 | static int usb_dev_suspend(struct device *dev) | 259 | static int usb_dev_suspend(struct device *dev) |
260 | { | 260 | { |
261 | return usb_suspend(dev, PMSG_SUSPEND); | 261 | return usb_suspend(dev, PMSG_SUSPEND); |
262 | } | 262 | } |
263 | 263 | ||
264 | static int usb_dev_resume(struct device *dev) | 264 | static int usb_dev_resume(struct device *dev) |
265 | { | 265 | { |
266 | return usb_resume(dev); | 266 | return usb_resume(dev); |
267 | } | 267 | } |
268 | 268 | ||
269 | static int usb_dev_freeze(struct device *dev) | 269 | static int usb_dev_freeze(struct device *dev) |
270 | { | 270 | { |
271 | return usb_suspend(dev, PMSG_FREEZE); | 271 | return usb_suspend(dev, PMSG_FREEZE); |
272 | } | 272 | } |
273 | 273 | ||
274 | static int usb_dev_thaw(struct device *dev) | 274 | static int usb_dev_thaw(struct device *dev) |
275 | { | 275 | { |
276 | return usb_resume(dev); | 276 | return usb_resume(dev); |
277 | } | 277 | } |
278 | 278 | ||
279 | static int usb_dev_poweroff(struct device *dev) | 279 | static int usb_dev_poweroff(struct device *dev) |
280 | { | 280 | { |
281 | return usb_suspend(dev, PMSG_HIBERNATE); | 281 | return usb_suspend(dev, PMSG_HIBERNATE); |
282 | } | 282 | } |
283 | 283 | ||
284 | static int usb_dev_restore(struct device *dev) | 284 | static int usb_dev_restore(struct device *dev) |
285 | { | 285 | { |
286 | return usb_resume(dev); | 286 | return usb_resume(dev); |
287 | } | 287 | } |
288 | 288 | ||
289 | static struct pm_ops usb_device_pm_ops = { | 289 | static struct dev_pm_ops usb_device_pm_ops = { |
290 | .prepare = usb_dev_prepare, | 290 | .prepare = usb_dev_prepare, |
291 | .complete = usb_dev_complete, | 291 | .complete = usb_dev_complete, |
292 | .suspend = usb_dev_suspend, | 292 | .suspend = usb_dev_suspend, |
293 | .resume = usb_dev_resume, | 293 | .resume = usb_dev_resume, |
294 | .freeze = usb_dev_freeze, | 294 | .freeze = usb_dev_freeze, |
295 | .thaw = usb_dev_thaw, | 295 | .thaw = usb_dev_thaw, |
296 | .poweroff = usb_dev_poweroff, | 296 | .poweroff = usb_dev_poweroff, |
297 | .restore = usb_dev_restore, | 297 | .restore = usb_dev_restore, |
298 | }; | 298 | }; |
299 | 299 | ||
300 | #else | 300 | #else |
301 | 301 | ||
302 | #define ksuspend_usb_init() 0 | 302 | #define ksuspend_usb_init() 0 |
303 | #define ksuspend_usb_cleanup() do {} while (0) | 303 | #define ksuspend_usb_cleanup() do {} while (0) |
304 | #define usb_device_pm_ops (*(struct pm_ops *)0) | 304 | #define usb_device_pm_ops (*(struct dev_pm_ops *)0) |
305 | 305 | ||
306 | #endif /* CONFIG_PM */ | 306 | #endif /* CONFIG_PM */ |
307 | 307 | ||
308 | struct device_type usb_device_type = { | 308 | struct device_type usb_device_type = { |
309 | .name = "usb_device", | 309 | .name = "usb_device", |
310 | .release = usb_release_dev, | 310 | .release = usb_release_dev, |
311 | .uevent = usb_dev_uevent, | 311 | .uevent = usb_dev_uevent, |
312 | .pm = &usb_device_pm_ops, | 312 | .pm = &usb_device_pm_ops, |
313 | }; | 313 | }; |
314 | 314 | ||
315 | 315 | ||
316 | /* Returns 1 if @usb_bus is WUSB, 0 otherwise */ | 316 | /* Returns 1 if @usb_bus is WUSB, 0 otherwise */ |
317 | static unsigned usb_bus_is_wusb(struct usb_bus *bus) | 317 | static unsigned usb_bus_is_wusb(struct usb_bus *bus) |
318 | { | 318 | { |
319 | struct usb_hcd *hcd = container_of(bus, struct usb_hcd, self); | 319 | struct usb_hcd *hcd = container_of(bus, struct usb_hcd, self); |
320 | return hcd->wireless; | 320 | return hcd->wireless; |
321 | } | 321 | } |
322 | 322 | ||
323 | 323 | ||
324 | /** | 324 | /** |
325 | * usb_alloc_dev - usb device constructor (usbcore-internal) | 325 | * usb_alloc_dev - usb device constructor (usbcore-internal) |
326 | * @parent: hub to which device is connected; null to allocate a root hub | 326 | * @parent: hub to which device is connected; null to allocate a root hub |
327 | * @bus: bus used to access the device | 327 | * @bus: bus used to access the device |
328 | * @port1: one-based index of port; ignored for root hubs | 328 | * @port1: one-based index of port; ignored for root hubs |
329 | * Context: !in_interrupt() | 329 | * Context: !in_interrupt() |
330 | * | 330 | * |
331 | * Only hub drivers (including virtual root hub drivers for host | 331 | * Only hub drivers (including virtual root hub drivers for host |
332 | * controllers) should ever call this. | 332 | * controllers) should ever call this. |
333 | * | 333 | * |
334 | * This call may not be used in a non-sleeping context. | 334 | * This call may not be used in a non-sleeping context. |
335 | */ | 335 | */ |
336 | struct usb_device *usb_alloc_dev(struct usb_device *parent, | 336 | struct usb_device *usb_alloc_dev(struct usb_device *parent, |
337 | struct usb_bus *bus, unsigned port1) | 337 | struct usb_bus *bus, unsigned port1) |
338 | { | 338 | { |
339 | struct usb_device *dev; | 339 | struct usb_device *dev; |
340 | struct usb_hcd *usb_hcd = container_of(bus, struct usb_hcd, self); | 340 | struct usb_hcd *usb_hcd = container_of(bus, struct usb_hcd, self); |
341 | unsigned root_hub = 0; | 341 | unsigned root_hub = 0; |
342 | 342 | ||
343 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 343 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
344 | if (!dev) | 344 | if (!dev) |
345 | return NULL; | 345 | return NULL; |
346 | 346 | ||
347 | if (!usb_get_hcd(bus_to_hcd(bus))) { | 347 | if (!usb_get_hcd(bus_to_hcd(bus))) { |
348 | kfree(dev); | 348 | kfree(dev); |
349 | return NULL; | 349 | return NULL; |
350 | } | 350 | } |
351 | 351 | ||
352 | device_initialize(&dev->dev); | 352 | device_initialize(&dev->dev); |
353 | dev->dev.bus = &usb_bus_type; | 353 | dev->dev.bus = &usb_bus_type; |
354 | dev->dev.type = &usb_device_type; | 354 | dev->dev.type = &usb_device_type; |
355 | dev->dev.groups = usb_device_groups; | 355 | dev->dev.groups = usb_device_groups; |
356 | dev->dev.dma_mask = bus->controller->dma_mask; | 356 | dev->dev.dma_mask = bus->controller->dma_mask; |
357 | set_dev_node(&dev->dev, dev_to_node(bus->controller)); | 357 | set_dev_node(&dev->dev, dev_to_node(bus->controller)); |
358 | dev->state = USB_STATE_ATTACHED; | 358 | dev->state = USB_STATE_ATTACHED; |
359 | atomic_set(&dev->urbnum, 0); | 359 | atomic_set(&dev->urbnum, 0); |
360 | 360 | ||
361 | INIT_LIST_HEAD(&dev->ep0.urb_list); | 361 | INIT_LIST_HEAD(&dev->ep0.urb_list); |
362 | dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE; | 362 | dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE; |
363 | dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT; | 363 | dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT; |
364 | /* ep0 maxpacket comes later, from device descriptor */ | 364 | /* ep0 maxpacket comes later, from device descriptor */ |
365 | usb_enable_endpoint(dev, &dev->ep0); | 365 | usb_enable_endpoint(dev, &dev->ep0); |
366 | dev->can_submit = 1; | 366 | dev->can_submit = 1; |
367 | 367 | ||
368 | /* Save readable and stable topology id, distinguishing devices | 368 | /* Save readable and stable topology id, distinguishing devices |
369 | * by location for diagnostics, tools, driver model, etc. The | 369 | * by location for diagnostics, tools, driver model, etc. The |
370 | * string is a path along hub ports, from the root. Each device's | 370 | * string is a path along hub ports, from the root. Each device's |
371 | * dev->devpath will be stable until USB is re-cabled, and hubs | 371 | * dev->devpath will be stable until USB is re-cabled, and hubs |
372 | * are often labeled with these port numbers. The name isn't | 372 | * are often labeled with these port numbers. The name isn't |
373 | * as stable: bus->busnum changes easily from modprobe order, | 373 | * as stable: bus->busnum changes easily from modprobe order, |
374 | * cardbus or pci hotplugging, and so on. | 374 | * cardbus or pci hotplugging, and so on. |
375 | */ | 375 | */ |
376 | if (unlikely(!parent)) { | 376 | if (unlikely(!parent)) { |
377 | dev->devpath[0] = '0'; | 377 | dev->devpath[0] = '0'; |
378 | 378 | ||
379 | dev->dev.parent = bus->controller; | 379 | dev->dev.parent = bus->controller; |
380 | dev_set_name(&dev->dev, "usb%d", bus->busnum); | 380 | dev_set_name(&dev->dev, "usb%d", bus->busnum); |
381 | root_hub = 1; | 381 | root_hub = 1; |
382 | } else { | 382 | } else { |
383 | /* match any labeling on the hubs; it's one-based */ | 383 | /* match any labeling on the hubs; it's one-based */ |
384 | if (parent->devpath[0] == '0') | 384 | if (parent->devpath[0] == '0') |
385 | snprintf(dev->devpath, sizeof dev->devpath, | 385 | snprintf(dev->devpath, sizeof dev->devpath, |
386 | "%d", port1); | 386 | "%d", port1); |
387 | else | 387 | else |
388 | snprintf(dev->devpath, sizeof dev->devpath, | 388 | snprintf(dev->devpath, sizeof dev->devpath, |
389 | "%s.%d", parent->devpath, port1); | 389 | "%s.%d", parent->devpath, port1); |
390 | 390 | ||
391 | dev->dev.parent = &parent->dev; | 391 | dev->dev.parent = &parent->dev; |
392 | dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath); | 392 | dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath); |
393 | 393 | ||
394 | /* hub driver sets up TT records */ | 394 | /* hub driver sets up TT records */ |
395 | } | 395 | } |
396 | 396 | ||
397 | dev->portnum = port1; | 397 | dev->portnum = port1; |
398 | dev->bus = bus; | 398 | dev->bus = bus; |
399 | dev->parent = parent; | 399 | dev->parent = parent; |
400 | INIT_LIST_HEAD(&dev->filelist); | 400 | INIT_LIST_HEAD(&dev->filelist); |
401 | 401 | ||
402 | #ifdef CONFIG_PM | 402 | #ifdef CONFIG_PM |
403 | mutex_init(&dev->pm_mutex); | 403 | mutex_init(&dev->pm_mutex); |
404 | INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work); | 404 | INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work); |
405 | dev->autosuspend_delay = usb_autosuspend_delay * HZ; | 405 | dev->autosuspend_delay = usb_autosuspend_delay * HZ; |
406 | dev->connect_time = jiffies; | 406 | dev->connect_time = jiffies; |
407 | dev->active_duration = -jiffies; | 407 | dev->active_duration = -jiffies; |
408 | #endif | 408 | #endif |
409 | if (root_hub) /* Root hub always ok [and always wired] */ | 409 | if (root_hub) /* Root hub always ok [and always wired] */ |
410 | dev->authorized = 1; | 410 | dev->authorized = 1; |
411 | else { | 411 | else { |
412 | dev->authorized = usb_hcd->authorized_default; | 412 | dev->authorized = usb_hcd->authorized_default; |
413 | dev->wusb = usb_bus_is_wusb(bus)? 1 : 0; | 413 | dev->wusb = usb_bus_is_wusb(bus)? 1 : 0; |
414 | } | 414 | } |
415 | return dev; | 415 | return dev; |
416 | } | 416 | } |
417 | 417 | ||
418 | /** | 418 | /** |
419 | * usb_get_dev - increments the reference count of the usb device structure | 419 | * usb_get_dev - increments the reference count of the usb device structure |
420 | * @dev: the device being referenced | 420 | * @dev: the device being referenced |
421 | * | 421 | * |
422 | * Each live reference to a device should be refcounted. | 422 | * Each live reference to a device should be refcounted. |
423 | * | 423 | * |
424 | * Drivers for USB interfaces should normally record such references in | 424 | * Drivers for USB interfaces should normally record such references in |
425 | * their probe() methods, when they bind to an interface, and release | 425 | * their probe() methods, when they bind to an interface, and release |
426 | * them by calling usb_put_dev(), in their disconnect() methods. | 426 | * them by calling usb_put_dev(), in their disconnect() methods. |
427 | * | 427 | * |
428 | * A pointer to the device with the incremented reference counter is returned. | 428 | * A pointer to the device with the incremented reference counter is returned. |
429 | */ | 429 | */ |
430 | struct usb_device *usb_get_dev(struct usb_device *dev) | 430 | struct usb_device *usb_get_dev(struct usb_device *dev) |
431 | { | 431 | { |
432 | if (dev) | 432 | if (dev) |
433 | get_device(&dev->dev); | 433 | get_device(&dev->dev); |
434 | return dev; | 434 | return dev; |
435 | } | 435 | } |
436 | EXPORT_SYMBOL_GPL(usb_get_dev); | 436 | EXPORT_SYMBOL_GPL(usb_get_dev); |
437 | 437 | ||
438 | /** | 438 | /** |
439 | * usb_put_dev - release a use of the usb device structure | 439 | * usb_put_dev - release a use of the usb device structure |
440 | * @dev: device that's been disconnected | 440 | * @dev: device that's been disconnected |
441 | * | 441 | * |
442 | * Must be called when a user of a device is finished with it. When the last | 442 | * Must be called when a user of a device is finished with it. When the last |
443 | * user of the device calls this function, the memory of the device is freed. | 443 | * user of the device calls this function, the memory of the device is freed. |
444 | */ | 444 | */ |
445 | void usb_put_dev(struct usb_device *dev) | 445 | void usb_put_dev(struct usb_device *dev) |
446 | { | 446 | { |
447 | if (dev) | 447 | if (dev) |
448 | put_device(&dev->dev); | 448 | put_device(&dev->dev); |
449 | } | 449 | } |
450 | EXPORT_SYMBOL_GPL(usb_put_dev); | 450 | EXPORT_SYMBOL_GPL(usb_put_dev); |
451 | 451 | ||
452 | /** | 452 | /** |
453 | * usb_get_intf - increments the reference count of the usb interface structure | 453 | * usb_get_intf - increments the reference count of the usb interface structure |
454 | * @intf: the interface being referenced | 454 | * @intf: the interface being referenced |
455 | * | 455 | * |
456 | * Each live reference to a interface must be refcounted. | 456 | * Each live reference to a interface must be refcounted. |
457 | * | 457 | * |
458 | * Drivers for USB interfaces should normally record such references in | 458 | * Drivers for USB interfaces should normally record such references in |
459 | * their probe() methods, when they bind to an interface, and release | 459 | * their probe() methods, when they bind to an interface, and release |
460 | * them by calling usb_put_intf(), in their disconnect() methods. | 460 | * them by calling usb_put_intf(), in their disconnect() methods. |
461 | * | 461 | * |
462 | * A pointer to the interface with the incremented reference counter is | 462 | * A pointer to the interface with the incremented reference counter is |
463 | * returned. | 463 | * returned. |
464 | */ | 464 | */ |
465 | struct usb_interface *usb_get_intf(struct usb_interface *intf) | 465 | struct usb_interface *usb_get_intf(struct usb_interface *intf) |
466 | { | 466 | { |
467 | if (intf) | 467 | if (intf) |
468 | get_device(&intf->dev); | 468 | get_device(&intf->dev); |
469 | return intf; | 469 | return intf; |
470 | } | 470 | } |
471 | EXPORT_SYMBOL_GPL(usb_get_intf); | 471 | EXPORT_SYMBOL_GPL(usb_get_intf); |
472 | 472 | ||
473 | /** | 473 | /** |
474 | * usb_put_intf - release a use of the usb interface structure | 474 | * usb_put_intf - release a use of the usb interface structure |
475 | * @intf: interface that's been decremented | 475 | * @intf: interface that's been decremented |
476 | * | 476 | * |
477 | * Must be called when a user of an interface is finished with it. When the | 477 | * Must be called when a user of an interface is finished with it. When the |
478 | * last user of the interface calls this function, the memory of the interface | 478 | * last user of the interface calls this function, the memory of the interface |
479 | * is freed. | 479 | * is freed. |
480 | */ | 480 | */ |
481 | void usb_put_intf(struct usb_interface *intf) | 481 | void usb_put_intf(struct usb_interface *intf) |
482 | { | 482 | { |
483 | if (intf) | 483 | if (intf) |
484 | put_device(&intf->dev); | 484 | put_device(&intf->dev); |
485 | } | 485 | } |
486 | EXPORT_SYMBOL_GPL(usb_put_intf); | 486 | EXPORT_SYMBOL_GPL(usb_put_intf); |
487 | 487 | ||
488 | /* USB device locking | 488 | /* USB device locking |
489 | * | 489 | * |
490 | * USB devices and interfaces are locked using the semaphore in their | 490 | * USB devices and interfaces are locked using the semaphore in their |
491 | * embedded struct device. The hub driver guarantees that whenever a | 491 | * embedded struct device. The hub driver guarantees that whenever a |
492 | * device is connected or disconnected, drivers are called with the | 492 | * device is connected or disconnected, drivers are called with the |
493 | * USB device locked as well as their particular interface. | 493 | * USB device locked as well as their particular interface. |
494 | * | 494 | * |
495 | * Complications arise when several devices are to be locked at the same | 495 | * Complications arise when several devices are to be locked at the same |
496 | * time. Only hub-aware drivers that are part of usbcore ever have to | 496 | * time. Only hub-aware drivers that are part of usbcore ever have to |
497 | * do this; nobody else needs to worry about it. The rule for locking | 497 | * do this; nobody else needs to worry about it. The rule for locking |
498 | * is simple: | 498 | * is simple: |
499 | * | 499 | * |
500 | * When locking both a device and its parent, always lock the | 500 | * When locking both a device and its parent, always lock the |
501 | * the parent first. | 501 | * the parent first. |
502 | */ | 502 | */ |
503 | 503 | ||
504 | /** | 504 | /** |
505 | * usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure | 505 | * usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure |
506 | * @udev: device that's being locked | 506 | * @udev: device that's being locked |
507 | * @iface: interface bound to the driver making the request (optional) | 507 | * @iface: interface bound to the driver making the request (optional) |
508 | * | 508 | * |
509 | * Attempts to acquire the device lock, but fails if the device is | 509 | * Attempts to acquire the device lock, but fails if the device is |
510 | * NOTATTACHED or SUSPENDED, or if iface is specified and the interface | 510 | * NOTATTACHED or SUSPENDED, or if iface is specified and the interface |
511 | * is neither BINDING nor BOUND. Rather than sleeping to wait for the | 511 | * is neither BINDING nor BOUND. Rather than sleeping to wait for the |
512 | * lock, the routine polls repeatedly. This is to prevent deadlock with | 512 | * lock, the routine polls repeatedly. This is to prevent deadlock with |
513 | * disconnect; in some drivers (such as usb-storage) the disconnect() | 513 | * disconnect; in some drivers (such as usb-storage) the disconnect() |
514 | * or suspend() method will block waiting for a device reset to complete. | 514 | * or suspend() method will block waiting for a device reset to complete. |
515 | * | 515 | * |
516 | * Returns a negative error code for failure, otherwise 1 or 0 to indicate | 516 | * Returns a negative error code for failure, otherwise 1 or 0 to indicate |
517 | * that the device will or will not have to be unlocked. (0 can be | 517 | * that the device will or will not have to be unlocked. (0 can be |
518 | * returned when an interface is given and is BINDING, because in that | 518 | * returned when an interface is given and is BINDING, because in that |
519 | * case the driver already owns the device lock.) | 519 | * case the driver already owns the device lock.) |
520 | */ | 520 | */ |
521 | int usb_lock_device_for_reset(struct usb_device *udev, | 521 | int usb_lock_device_for_reset(struct usb_device *udev, |
522 | const struct usb_interface *iface) | 522 | const struct usb_interface *iface) |
523 | { | 523 | { |
524 | unsigned long jiffies_expire = jiffies + HZ; | 524 | unsigned long jiffies_expire = jiffies + HZ; |
525 | 525 | ||
526 | if (udev->state == USB_STATE_NOTATTACHED) | 526 | if (udev->state == USB_STATE_NOTATTACHED) |
527 | return -ENODEV; | 527 | return -ENODEV; |
528 | if (udev->state == USB_STATE_SUSPENDED) | 528 | if (udev->state == USB_STATE_SUSPENDED) |
529 | return -EHOSTUNREACH; | 529 | return -EHOSTUNREACH; |
530 | if (iface) { | 530 | if (iface) { |
531 | switch (iface->condition) { | 531 | switch (iface->condition) { |
532 | case USB_INTERFACE_BINDING: | 532 | case USB_INTERFACE_BINDING: |
533 | return 0; | 533 | return 0; |
534 | case USB_INTERFACE_BOUND: | 534 | case USB_INTERFACE_BOUND: |
535 | break; | 535 | break; |
536 | default: | 536 | default: |
537 | return -EINTR; | 537 | return -EINTR; |
538 | } | 538 | } |
539 | } | 539 | } |
540 | 540 | ||
541 | while (usb_trylock_device(udev) != 0) { | 541 | while (usb_trylock_device(udev) != 0) { |
542 | 542 | ||
543 | /* If we can't acquire the lock after waiting one second, | 543 | /* If we can't acquire the lock after waiting one second, |
544 | * we're probably deadlocked */ | 544 | * we're probably deadlocked */ |
545 | if (time_after(jiffies, jiffies_expire)) | 545 | if (time_after(jiffies, jiffies_expire)) |
546 | return -EBUSY; | 546 | return -EBUSY; |
547 | 547 | ||
548 | msleep(15); | 548 | msleep(15); |
549 | if (udev->state == USB_STATE_NOTATTACHED) | 549 | if (udev->state == USB_STATE_NOTATTACHED) |
550 | return -ENODEV; | 550 | return -ENODEV; |
551 | if (udev->state == USB_STATE_SUSPENDED) | 551 | if (udev->state == USB_STATE_SUSPENDED) |
552 | return -EHOSTUNREACH; | 552 | return -EHOSTUNREACH; |
553 | if (iface && iface->condition != USB_INTERFACE_BOUND) | 553 | if (iface && iface->condition != USB_INTERFACE_BOUND) |
554 | return -EINTR; | 554 | return -EINTR; |
555 | } | 555 | } |
556 | return 1; | 556 | return 1; |
557 | } | 557 | } |
558 | EXPORT_SYMBOL_GPL(usb_lock_device_for_reset); | 558 | EXPORT_SYMBOL_GPL(usb_lock_device_for_reset); |
559 | 559 | ||
560 | static struct usb_device *match_device(struct usb_device *dev, | 560 | static struct usb_device *match_device(struct usb_device *dev, |
561 | u16 vendor_id, u16 product_id) | 561 | u16 vendor_id, u16 product_id) |
562 | { | 562 | { |
563 | struct usb_device *ret_dev = NULL; | 563 | struct usb_device *ret_dev = NULL; |
564 | int child; | 564 | int child; |
565 | 565 | ||
566 | dev_dbg(&dev->dev, "check for vendor %04x, product %04x ...\n", | 566 | dev_dbg(&dev->dev, "check for vendor %04x, product %04x ...\n", |
567 | le16_to_cpu(dev->descriptor.idVendor), | 567 | le16_to_cpu(dev->descriptor.idVendor), |
568 | le16_to_cpu(dev->descriptor.idProduct)); | 568 | le16_to_cpu(dev->descriptor.idProduct)); |
569 | 569 | ||
570 | /* see if this device matches */ | 570 | /* see if this device matches */ |
571 | if ((vendor_id == le16_to_cpu(dev->descriptor.idVendor)) && | 571 | if ((vendor_id == le16_to_cpu(dev->descriptor.idVendor)) && |
572 | (product_id == le16_to_cpu(dev->descriptor.idProduct))) { | 572 | (product_id == le16_to_cpu(dev->descriptor.idProduct))) { |
573 | dev_dbg(&dev->dev, "matched this device!\n"); | 573 | dev_dbg(&dev->dev, "matched this device!\n"); |
574 | ret_dev = usb_get_dev(dev); | 574 | ret_dev = usb_get_dev(dev); |
575 | goto exit; | 575 | goto exit; |
576 | } | 576 | } |
577 | 577 | ||
578 | /* look through all of the children of this device */ | 578 | /* look through all of the children of this device */ |
579 | for (child = 0; child < dev->maxchild; ++child) { | 579 | for (child = 0; child < dev->maxchild; ++child) { |
580 | if (dev->children[child]) { | 580 | if (dev->children[child]) { |
581 | usb_lock_device(dev->children[child]); | 581 | usb_lock_device(dev->children[child]); |
582 | ret_dev = match_device(dev->children[child], | 582 | ret_dev = match_device(dev->children[child], |
583 | vendor_id, product_id); | 583 | vendor_id, product_id); |
584 | usb_unlock_device(dev->children[child]); | 584 | usb_unlock_device(dev->children[child]); |
585 | if (ret_dev) | 585 | if (ret_dev) |
586 | goto exit; | 586 | goto exit; |
587 | } | 587 | } |
588 | } | 588 | } |
589 | exit: | 589 | exit: |
590 | return ret_dev; | 590 | return ret_dev; |
591 | } | 591 | } |
592 | 592 | ||
593 | /** | 593 | /** |
594 | * usb_find_device - find a specific usb device in the system | 594 | * usb_find_device - find a specific usb device in the system |
595 | * @vendor_id: the vendor id of the device to find | 595 | * @vendor_id: the vendor id of the device to find |
596 | * @product_id: the product id of the device to find | 596 | * @product_id: the product id of the device to find |
597 | * | 597 | * |
598 | * Returns a pointer to a struct usb_device if such a specified usb | 598 | * Returns a pointer to a struct usb_device if such a specified usb |
599 | * device is present in the system currently. The usage count of the | 599 | * device is present in the system currently. The usage count of the |
600 | * device will be incremented if a device is found. Make sure to call | 600 | * device will be incremented if a device is found. Make sure to call |
601 | * usb_put_dev() when the caller is finished with the device. | 601 | * usb_put_dev() when the caller is finished with the device. |
602 | * | 602 | * |
603 | * If a device with the specified vendor and product id is not found, | 603 | * If a device with the specified vendor and product id is not found, |
604 | * NULL is returned. | 604 | * NULL is returned. |
605 | */ | 605 | */ |
606 | struct usb_device *usb_find_device(u16 vendor_id, u16 product_id) | 606 | struct usb_device *usb_find_device(u16 vendor_id, u16 product_id) |
607 | { | 607 | { |
608 | struct list_head *buslist; | 608 | struct list_head *buslist; |
609 | struct usb_bus *bus; | 609 | struct usb_bus *bus; |
610 | struct usb_device *dev = NULL; | 610 | struct usb_device *dev = NULL; |
611 | 611 | ||
612 | mutex_lock(&usb_bus_list_lock); | 612 | mutex_lock(&usb_bus_list_lock); |
613 | for (buslist = usb_bus_list.next; | 613 | for (buslist = usb_bus_list.next; |
614 | buslist != &usb_bus_list; | 614 | buslist != &usb_bus_list; |
615 | buslist = buslist->next) { | 615 | buslist = buslist->next) { |
616 | bus = container_of(buslist, struct usb_bus, bus_list); | 616 | bus = container_of(buslist, struct usb_bus, bus_list); |
617 | if (!bus->root_hub) | 617 | if (!bus->root_hub) |
618 | continue; | 618 | continue; |
619 | usb_lock_device(bus->root_hub); | 619 | usb_lock_device(bus->root_hub); |
620 | dev = match_device(bus->root_hub, vendor_id, product_id); | 620 | dev = match_device(bus->root_hub, vendor_id, product_id); |
621 | usb_unlock_device(bus->root_hub); | 621 | usb_unlock_device(bus->root_hub); |
622 | if (dev) | 622 | if (dev) |
623 | goto exit; | 623 | goto exit; |
624 | } | 624 | } |
625 | exit: | 625 | exit: |
626 | mutex_unlock(&usb_bus_list_lock); | 626 | mutex_unlock(&usb_bus_list_lock); |
627 | return dev; | 627 | return dev; |
628 | } | 628 | } |
629 | 629 | ||
630 | /** | 630 | /** |
631 | * usb_get_current_frame_number - return current bus frame number | 631 | * usb_get_current_frame_number - return current bus frame number |
632 | * @dev: the device whose bus is being queried | 632 | * @dev: the device whose bus is being queried |
633 | * | 633 | * |
634 | * Returns the current frame number for the USB host controller | 634 | * Returns the current frame number for the USB host controller |
635 | * used with the given USB device. This can be used when scheduling | 635 | * used with the given USB device. This can be used when scheduling |
636 | * isochronous requests. | 636 | * isochronous requests. |
637 | * | 637 | * |
638 | * Note that different kinds of host controller have different | 638 | * Note that different kinds of host controller have different |
639 | * "scheduling horizons". While one type might support scheduling only | 639 | * "scheduling horizons". While one type might support scheduling only |
640 | * 32 frames into the future, others could support scheduling up to | 640 | * 32 frames into the future, others could support scheduling up to |
641 | * 1024 frames into the future. | 641 | * 1024 frames into the future. |
642 | */ | 642 | */ |
643 | int usb_get_current_frame_number(struct usb_device *dev) | 643 | int usb_get_current_frame_number(struct usb_device *dev) |
644 | { | 644 | { |
645 | return usb_hcd_get_frame_number(dev); | 645 | return usb_hcd_get_frame_number(dev); |
646 | } | 646 | } |
647 | EXPORT_SYMBOL_GPL(usb_get_current_frame_number); | 647 | EXPORT_SYMBOL_GPL(usb_get_current_frame_number); |
648 | 648 | ||
649 | /*-------------------------------------------------------------------*/ | 649 | /*-------------------------------------------------------------------*/ |
650 | /* | 650 | /* |
651 | * __usb_get_extra_descriptor() finds a descriptor of specific type in the | 651 | * __usb_get_extra_descriptor() finds a descriptor of specific type in the |
652 | * extra field of the interface and endpoint descriptor structs. | 652 | * extra field of the interface and endpoint descriptor structs. |
653 | */ | 653 | */ |
654 | 654 | ||
655 | int __usb_get_extra_descriptor(char *buffer, unsigned size, | 655 | int __usb_get_extra_descriptor(char *buffer, unsigned size, |
656 | unsigned char type, void **ptr) | 656 | unsigned char type, void **ptr) |
657 | { | 657 | { |
658 | struct usb_descriptor_header *header; | 658 | struct usb_descriptor_header *header; |
659 | 659 | ||
660 | while (size >= sizeof(struct usb_descriptor_header)) { | 660 | while (size >= sizeof(struct usb_descriptor_header)) { |
661 | header = (struct usb_descriptor_header *)buffer; | 661 | header = (struct usb_descriptor_header *)buffer; |
662 | 662 | ||
663 | if (header->bLength < 2) { | 663 | if (header->bLength < 2) { |
664 | printk(KERN_ERR | 664 | printk(KERN_ERR |
665 | "%s: bogus descriptor, type %d length %d\n", | 665 | "%s: bogus descriptor, type %d length %d\n", |
666 | usbcore_name, | 666 | usbcore_name, |
667 | header->bDescriptorType, | 667 | header->bDescriptorType, |
668 | header->bLength); | 668 | header->bLength); |
669 | return -1; | 669 | return -1; |
670 | } | 670 | } |
671 | 671 | ||
672 | if (header->bDescriptorType == type) { | 672 | if (header->bDescriptorType == type) { |
673 | *ptr = header; | 673 | *ptr = header; |
674 | return 0; | 674 | return 0; |
675 | } | 675 | } |
676 | 676 | ||
677 | buffer += header->bLength; | 677 | buffer += header->bLength; |
678 | size -= header->bLength; | 678 | size -= header->bLength; |
679 | } | 679 | } |
680 | return -1; | 680 | return -1; |
681 | } | 681 | } |
682 | EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor); | 682 | EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor); |
683 | 683 | ||
684 | /** | 684 | /** |
685 | * usb_buffer_alloc - allocate dma-consistent buffer for URB_NO_xxx_DMA_MAP | 685 | * usb_buffer_alloc - allocate dma-consistent buffer for URB_NO_xxx_DMA_MAP |
686 | * @dev: device the buffer will be used with | 686 | * @dev: device the buffer will be used with |
687 | * @size: requested buffer size | 687 | * @size: requested buffer size |
688 | * @mem_flags: affect whether allocation may block | 688 | * @mem_flags: affect whether allocation may block |
689 | * @dma: used to return DMA address of buffer | 689 | * @dma: used to return DMA address of buffer |
690 | * | 690 | * |
691 | * Return value is either null (indicating no buffer could be allocated), or | 691 | * Return value is either null (indicating no buffer could be allocated), or |
692 | * the cpu-space pointer to a buffer that may be used to perform DMA to the | 692 | * the cpu-space pointer to a buffer that may be used to perform DMA to the |
693 | * specified device. Such cpu-space buffers are returned along with the DMA | 693 | * specified device. Such cpu-space buffers are returned along with the DMA |
694 | * address (through the pointer provided). | 694 | * address (through the pointer provided). |
695 | * | 695 | * |
696 | * These buffers are used with URB_NO_xxx_DMA_MAP set in urb->transfer_flags | 696 | * These buffers are used with URB_NO_xxx_DMA_MAP set in urb->transfer_flags |
697 | * to avoid behaviors like using "DMA bounce buffers", or thrashing IOMMU | 697 | * to avoid behaviors like using "DMA bounce buffers", or thrashing IOMMU |
698 | * hardware during URB completion/resubmit. The implementation varies between | 698 | * hardware during URB completion/resubmit. The implementation varies between |
699 | * platforms, depending on details of how DMA will work to this device. | 699 | * platforms, depending on details of how DMA will work to this device. |
700 | * Using these buffers also eliminates cacheline sharing problems on | 700 | * Using these buffers also eliminates cacheline sharing problems on |
701 | * architectures where CPU caches are not DMA-coherent. On systems without | 701 | * architectures where CPU caches are not DMA-coherent. On systems without |
702 | * bus-snooping caches, these buffers are uncached. | 702 | * bus-snooping caches, these buffers are uncached. |
703 | * | 703 | * |
704 | * When the buffer is no longer used, free it with usb_buffer_free(). | 704 | * When the buffer is no longer used, free it with usb_buffer_free(). |
705 | */ | 705 | */ |
706 | void *usb_buffer_alloc(struct usb_device *dev, size_t size, gfp_t mem_flags, | 706 | void *usb_buffer_alloc(struct usb_device *dev, size_t size, gfp_t mem_flags, |
707 | dma_addr_t *dma) | 707 | dma_addr_t *dma) |
708 | { | 708 | { |
709 | if (!dev || !dev->bus) | 709 | if (!dev || !dev->bus) |
710 | return NULL; | 710 | return NULL; |
711 | return hcd_buffer_alloc(dev->bus, size, mem_flags, dma); | 711 | return hcd_buffer_alloc(dev->bus, size, mem_flags, dma); |
712 | } | 712 | } |
713 | EXPORT_SYMBOL_GPL(usb_buffer_alloc); | 713 | EXPORT_SYMBOL_GPL(usb_buffer_alloc); |
714 | 714 | ||
715 | /** | 715 | /** |
716 | * usb_buffer_free - free memory allocated with usb_buffer_alloc() | 716 | * usb_buffer_free - free memory allocated with usb_buffer_alloc() |
717 | * @dev: device the buffer was used with | 717 | * @dev: device the buffer was used with |
718 | * @size: requested buffer size | 718 | * @size: requested buffer size |
719 | * @addr: CPU address of buffer | 719 | * @addr: CPU address of buffer |
720 | * @dma: DMA address of buffer | 720 | * @dma: DMA address of buffer |
721 | * | 721 | * |
722 | * This reclaims an I/O buffer, letting it be reused. The memory must have | 722 | * This reclaims an I/O buffer, letting it be reused. The memory must have |
723 | * been allocated using usb_buffer_alloc(), and the parameters must match | 723 | * been allocated using usb_buffer_alloc(), and the parameters must match |
724 | * those provided in that allocation request. | 724 | * those provided in that allocation request. |
725 | */ | 725 | */ |
726 | void usb_buffer_free(struct usb_device *dev, size_t size, void *addr, | 726 | void usb_buffer_free(struct usb_device *dev, size_t size, void *addr, |
727 | dma_addr_t dma) | 727 | dma_addr_t dma) |
728 | { | 728 | { |
729 | if (!dev || !dev->bus) | 729 | if (!dev || !dev->bus) |
730 | return; | 730 | return; |
731 | if (!addr) | 731 | if (!addr) |
732 | return; | 732 | return; |
733 | hcd_buffer_free(dev->bus, size, addr, dma); | 733 | hcd_buffer_free(dev->bus, size, addr, dma); |
734 | } | 734 | } |
735 | EXPORT_SYMBOL_GPL(usb_buffer_free); | 735 | EXPORT_SYMBOL_GPL(usb_buffer_free); |
736 | 736 | ||
737 | /** | 737 | /** |
738 | * usb_buffer_map - create DMA mapping(s) for an urb | 738 | * usb_buffer_map - create DMA mapping(s) for an urb |
739 | * @urb: urb whose transfer_buffer/setup_packet will be mapped | 739 | * @urb: urb whose transfer_buffer/setup_packet will be mapped |
740 | * | 740 | * |
741 | * Return value is either null (indicating no buffer could be mapped), or | 741 | * Return value is either null (indicating no buffer could be mapped), or |
742 | * the parameter. URB_NO_TRANSFER_DMA_MAP and URB_NO_SETUP_DMA_MAP are | 742 | * the parameter. URB_NO_TRANSFER_DMA_MAP and URB_NO_SETUP_DMA_MAP are |
743 | * added to urb->transfer_flags if the operation succeeds. If the device | 743 | * added to urb->transfer_flags if the operation succeeds. If the device |
744 | * is connected to this system through a non-DMA controller, this operation | 744 | * is connected to this system through a non-DMA controller, this operation |
745 | * always succeeds. | 745 | * always succeeds. |
746 | * | 746 | * |
747 | * This call would normally be used for an urb which is reused, perhaps | 747 | * This call would normally be used for an urb which is reused, perhaps |
748 | * as the target of a large periodic transfer, with usb_buffer_dmasync() | 748 | * as the target of a large periodic transfer, with usb_buffer_dmasync() |
749 | * calls to synchronize memory and dma state. | 749 | * calls to synchronize memory and dma state. |
750 | * | 750 | * |
751 | * Reverse the effect of this call with usb_buffer_unmap(). | 751 | * Reverse the effect of this call with usb_buffer_unmap(). |
752 | */ | 752 | */ |
753 | #if 0 | 753 | #if 0 |
754 | struct urb *usb_buffer_map(struct urb *urb) | 754 | struct urb *usb_buffer_map(struct urb *urb) |
755 | { | 755 | { |
756 | struct usb_bus *bus; | 756 | struct usb_bus *bus; |
757 | struct device *controller; | 757 | struct device *controller; |
758 | 758 | ||
759 | if (!urb | 759 | if (!urb |
760 | || !urb->dev | 760 | || !urb->dev |
761 | || !(bus = urb->dev->bus) | 761 | || !(bus = urb->dev->bus) |
762 | || !(controller = bus->controller)) | 762 | || !(controller = bus->controller)) |
763 | return NULL; | 763 | return NULL; |
764 | 764 | ||
765 | if (controller->dma_mask) { | 765 | if (controller->dma_mask) { |
766 | urb->transfer_dma = dma_map_single(controller, | 766 | urb->transfer_dma = dma_map_single(controller, |
767 | urb->transfer_buffer, urb->transfer_buffer_length, | 767 | urb->transfer_buffer, urb->transfer_buffer_length, |
768 | usb_pipein(urb->pipe) | 768 | usb_pipein(urb->pipe) |
769 | ? DMA_FROM_DEVICE : DMA_TO_DEVICE); | 769 | ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
770 | if (usb_pipecontrol(urb->pipe)) | 770 | if (usb_pipecontrol(urb->pipe)) |
771 | urb->setup_dma = dma_map_single(controller, | 771 | urb->setup_dma = dma_map_single(controller, |
772 | urb->setup_packet, | 772 | urb->setup_packet, |
773 | sizeof(struct usb_ctrlrequest), | 773 | sizeof(struct usb_ctrlrequest), |
774 | DMA_TO_DEVICE); | 774 | DMA_TO_DEVICE); |
775 | /* FIXME generic api broken like pci, can't report errors */ | 775 | /* FIXME generic api broken like pci, can't report errors */ |
776 | /* if (urb->transfer_dma == DMA_ADDR_INVALID) return 0; */ | 776 | /* if (urb->transfer_dma == DMA_ADDR_INVALID) return 0; */ |
777 | } else | 777 | } else |
778 | urb->transfer_dma = ~0; | 778 | urb->transfer_dma = ~0; |
779 | urb->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | 779 | urb->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP |
780 | | URB_NO_SETUP_DMA_MAP); | 780 | | URB_NO_SETUP_DMA_MAP); |
781 | return urb; | 781 | return urb; |
782 | } | 782 | } |
783 | EXPORT_SYMBOL_GPL(usb_buffer_map); | 783 | EXPORT_SYMBOL_GPL(usb_buffer_map); |
784 | #endif /* 0 */ | 784 | #endif /* 0 */ |
785 | 785 | ||
786 | /* XXX DISABLED, no users currently. If you wish to re-enable this | 786 | /* XXX DISABLED, no users currently. If you wish to re-enable this |
787 | * XXX please determine whether the sync is to transfer ownership of | 787 | * XXX please determine whether the sync is to transfer ownership of |
788 | * XXX the buffer from device to cpu or vice verse, and thusly use the | 788 | * XXX the buffer from device to cpu or vice verse, and thusly use the |
789 | * XXX appropriate _for_{cpu,device}() method. -DaveM | 789 | * XXX appropriate _for_{cpu,device}() method. -DaveM |
790 | */ | 790 | */ |
791 | #if 0 | 791 | #if 0 |
792 | 792 | ||
793 | /** | 793 | /** |
794 | * usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s) | 794 | * usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s) |
795 | * @urb: urb whose transfer_buffer/setup_packet will be synchronized | 795 | * @urb: urb whose transfer_buffer/setup_packet will be synchronized |
796 | */ | 796 | */ |
797 | void usb_buffer_dmasync(struct urb *urb) | 797 | void usb_buffer_dmasync(struct urb *urb) |
798 | { | 798 | { |
799 | struct usb_bus *bus; | 799 | struct usb_bus *bus; |
800 | struct device *controller; | 800 | struct device *controller; |
801 | 801 | ||
802 | if (!urb | 802 | if (!urb |
803 | || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) | 803 | || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) |
804 | || !urb->dev | 804 | || !urb->dev |
805 | || !(bus = urb->dev->bus) | 805 | || !(bus = urb->dev->bus) |
806 | || !(controller = bus->controller)) | 806 | || !(controller = bus->controller)) |
807 | return; | 807 | return; |
808 | 808 | ||
809 | if (controller->dma_mask) { | 809 | if (controller->dma_mask) { |
810 | dma_sync_single(controller, | 810 | dma_sync_single(controller, |
811 | urb->transfer_dma, urb->transfer_buffer_length, | 811 | urb->transfer_dma, urb->transfer_buffer_length, |
812 | usb_pipein(urb->pipe) | 812 | usb_pipein(urb->pipe) |
813 | ? DMA_FROM_DEVICE : DMA_TO_DEVICE); | 813 | ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
814 | if (usb_pipecontrol(urb->pipe)) | 814 | if (usb_pipecontrol(urb->pipe)) |
815 | dma_sync_single(controller, | 815 | dma_sync_single(controller, |
816 | urb->setup_dma, | 816 | urb->setup_dma, |
817 | sizeof(struct usb_ctrlrequest), | 817 | sizeof(struct usb_ctrlrequest), |
818 | DMA_TO_DEVICE); | 818 | DMA_TO_DEVICE); |
819 | } | 819 | } |
820 | } | 820 | } |
821 | EXPORT_SYMBOL_GPL(usb_buffer_dmasync); | 821 | EXPORT_SYMBOL_GPL(usb_buffer_dmasync); |
822 | #endif | 822 | #endif |
823 | 823 | ||
824 | /** | 824 | /** |
825 | * usb_buffer_unmap - free DMA mapping(s) for an urb | 825 | * usb_buffer_unmap - free DMA mapping(s) for an urb |
826 | * @urb: urb whose transfer_buffer will be unmapped | 826 | * @urb: urb whose transfer_buffer will be unmapped |
827 | * | 827 | * |
828 | * Reverses the effect of usb_buffer_map(). | 828 | * Reverses the effect of usb_buffer_map(). |
829 | */ | 829 | */ |
830 | #if 0 | 830 | #if 0 |
831 | void usb_buffer_unmap(struct urb *urb) | 831 | void usb_buffer_unmap(struct urb *urb) |
832 | { | 832 | { |
833 | struct usb_bus *bus; | 833 | struct usb_bus *bus; |
834 | struct device *controller; | 834 | struct device *controller; |
835 | 835 | ||
836 | if (!urb | 836 | if (!urb |
837 | || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) | 837 | || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) |
838 | || !urb->dev | 838 | || !urb->dev |
839 | || !(bus = urb->dev->bus) | 839 | || !(bus = urb->dev->bus) |
840 | || !(controller = bus->controller)) | 840 | || !(controller = bus->controller)) |
841 | return; | 841 | return; |
842 | 842 | ||
843 | if (controller->dma_mask) { | 843 | if (controller->dma_mask) { |
844 | dma_unmap_single(controller, | 844 | dma_unmap_single(controller, |
845 | urb->transfer_dma, urb->transfer_buffer_length, | 845 | urb->transfer_dma, urb->transfer_buffer_length, |
846 | usb_pipein(urb->pipe) | 846 | usb_pipein(urb->pipe) |
847 | ? DMA_FROM_DEVICE : DMA_TO_DEVICE); | 847 | ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
848 | if (usb_pipecontrol(urb->pipe)) | 848 | if (usb_pipecontrol(urb->pipe)) |
849 | dma_unmap_single(controller, | 849 | dma_unmap_single(controller, |
850 | urb->setup_dma, | 850 | urb->setup_dma, |
851 | sizeof(struct usb_ctrlrequest), | 851 | sizeof(struct usb_ctrlrequest), |
852 | DMA_TO_DEVICE); | 852 | DMA_TO_DEVICE); |
853 | } | 853 | } |
854 | urb->transfer_flags &= ~(URB_NO_TRANSFER_DMA_MAP | 854 | urb->transfer_flags &= ~(URB_NO_TRANSFER_DMA_MAP |
855 | | URB_NO_SETUP_DMA_MAP); | 855 | | URB_NO_SETUP_DMA_MAP); |
856 | } | 856 | } |
857 | EXPORT_SYMBOL_GPL(usb_buffer_unmap); | 857 | EXPORT_SYMBOL_GPL(usb_buffer_unmap); |
858 | #endif /* 0 */ | 858 | #endif /* 0 */ |
859 | 859 | ||
860 | /** | 860 | /** |
861 | * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint | 861 | * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint |
862 | * @dev: device to which the scatterlist will be mapped | 862 | * @dev: device to which the scatterlist will be mapped |
863 | * @is_in: mapping transfer direction | 863 | * @is_in: mapping transfer direction |
864 | * @sg: the scatterlist to map | 864 | * @sg: the scatterlist to map |
865 | * @nents: the number of entries in the scatterlist | 865 | * @nents: the number of entries in the scatterlist |
866 | * | 866 | * |
867 | * Return value is either < 0 (indicating no buffers could be mapped), or | 867 | * Return value is either < 0 (indicating no buffers could be mapped), or |
868 | * the number of DMA mapping array entries in the scatterlist. | 868 | * the number of DMA mapping array entries in the scatterlist. |
869 | * | 869 | * |
870 | * The caller is responsible for placing the resulting DMA addresses from | 870 | * The caller is responsible for placing the resulting DMA addresses from |
871 | * the scatterlist into URB transfer buffer pointers, and for setting the | 871 | * the scatterlist into URB transfer buffer pointers, and for setting the |
872 | * URB_NO_TRANSFER_DMA_MAP transfer flag in each of those URBs. | 872 | * URB_NO_TRANSFER_DMA_MAP transfer flag in each of those URBs. |
873 | * | 873 | * |
874 | * Top I/O rates come from queuing URBs, instead of waiting for each one | 874 | * Top I/O rates come from queuing URBs, instead of waiting for each one |
875 | * to complete before starting the next I/O. This is particularly easy | 875 | * to complete before starting the next I/O. This is particularly easy |
876 | * to do with scatterlists. Just allocate and submit one URB for each DMA | 876 | * to do with scatterlists. Just allocate and submit one URB for each DMA |
877 | * mapping entry returned, stopping on the first error or when all succeed. | 877 | * mapping entry returned, stopping on the first error or when all succeed. |
878 | * Better yet, use the usb_sg_*() calls, which do that (and more) for you. | 878 | * Better yet, use the usb_sg_*() calls, which do that (and more) for you. |
879 | * | 879 | * |
880 | * This call would normally be used when translating scatterlist requests, | 880 | * This call would normally be used when translating scatterlist requests, |
881 | * rather than usb_buffer_map(), since on some hardware (with IOMMUs) it | 881 | * rather than usb_buffer_map(), since on some hardware (with IOMMUs) it |
882 | * may be able to coalesce mappings for improved I/O efficiency. | 882 | * may be able to coalesce mappings for improved I/O efficiency. |
883 | * | 883 | * |
884 | * Reverse the effect of this call with usb_buffer_unmap_sg(). | 884 | * Reverse the effect of this call with usb_buffer_unmap_sg(). |
885 | */ | 885 | */ |
886 | int usb_buffer_map_sg(const struct usb_device *dev, int is_in, | 886 | int usb_buffer_map_sg(const struct usb_device *dev, int is_in, |
887 | struct scatterlist *sg, int nents) | 887 | struct scatterlist *sg, int nents) |
888 | { | 888 | { |
889 | struct usb_bus *bus; | 889 | struct usb_bus *bus; |
890 | struct device *controller; | 890 | struct device *controller; |
891 | 891 | ||
892 | if (!dev | 892 | if (!dev |
893 | || !(bus = dev->bus) | 893 | || !(bus = dev->bus) |
894 | || !(controller = bus->controller) | 894 | || !(controller = bus->controller) |
895 | || !controller->dma_mask) | 895 | || !controller->dma_mask) |
896 | return -1; | 896 | return -1; |
897 | 897 | ||
898 | /* FIXME generic api broken like pci, can't report errors */ | 898 | /* FIXME generic api broken like pci, can't report errors */ |
899 | return dma_map_sg(controller, sg, nents, | 899 | return dma_map_sg(controller, sg, nents, |
900 | is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); | 900 | is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
901 | } | 901 | } |
902 | EXPORT_SYMBOL_GPL(usb_buffer_map_sg); | 902 | EXPORT_SYMBOL_GPL(usb_buffer_map_sg); |
903 | 903 | ||
904 | /* XXX DISABLED, no users currently. If you wish to re-enable this | 904 | /* XXX DISABLED, no users currently. If you wish to re-enable this |
905 | * XXX please determine whether the sync is to transfer ownership of | 905 | * XXX please determine whether the sync is to transfer ownership of |
906 | * XXX the buffer from device to cpu or vice verse, and thusly use the | 906 | * XXX the buffer from device to cpu or vice verse, and thusly use the |
907 | * XXX appropriate _for_{cpu,device}() method. -DaveM | 907 | * XXX appropriate _for_{cpu,device}() method. -DaveM |
908 | */ | 908 | */ |
909 | #if 0 | 909 | #if 0 |
910 | 910 | ||
911 | /** | 911 | /** |
912 | * usb_buffer_dmasync_sg - synchronize DMA and CPU view of scatterlist buffer(s) | 912 | * usb_buffer_dmasync_sg - synchronize DMA and CPU view of scatterlist buffer(s) |
913 | * @dev: device to which the scatterlist will be mapped | 913 | * @dev: device to which the scatterlist will be mapped |
914 | * @is_in: mapping transfer direction | 914 | * @is_in: mapping transfer direction |
915 | * @sg: the scatterlist to synchronize | 915 | * @sg: the scatterlist to synchronize |
916 | * @n_hw_ents: the positive return value from usb_buffer_map_sg | 916 | * @n_hw_ents: the positive return value from usb_buffer_map_sg |
917 | * | 917 | * |
918 | * Use this when you are re-using a scatterlist's data buffers for | 918 | * Use this when you are re-using a scatterlist's data buffers for |
919 | * another USB request. | 919 | * another USB request. |
920 | */ | 920 | */ |
921 | void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in, | 921 | void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in, |
922 | struct scatterlist *sg, int n_hw_ents) | 922 | struct scatterlist *sg, int n_hw_ents) |
923 | { | 923 | { |
924 | struct usb_bus *bus; | 924 | struct usb_bus *bus; |
925 | struct device *controller; | 925 | struct device *controller; |
926 | 926 | ||
927 | if (!dev | 927 | if (!dev |
928 | || !(bus = dev->bus) | 928 | || !(bus = dev->bus) |
929 | || !(controller = bus->controller) | 929 | || !(controller = bus->controller) |
930 | || !controller->dma_mask) | 930 | || !controller->dma_mask) |
931 | return; | 931 | return; |
932 | 932 | ||
933 | dma_sync_sg(controller, sg, n_hw_ents, | 933 | dma_sync_sg(controller, sg, n_hw_ents, |
934 | is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); | 934 | is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
935 | } | 935 | } |
936 | EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg); | 936 | EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg); |
937 | #endif | 937 | #endif |
938 | 938 | ||
939 | /** | 939 | /** |
940 | * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist | 940 | * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist |
941 | * @dev: device to which the scatterlist will be mapped | 941 | * @dev: device to which the scatterlist will be mapped |
942 | * @is_in: mapping transfer direction | 942 | * @is_in: mapping transfer direction |
943 | * @sg: the scatterlist to unmap | 943 | * @sg: the scatterlist to unmap |
944 | * @n_hw_ents: the positive return value from usb_buffer_map_sg | 944 | * @n_hw_ents: the positive return value from usb_buffer_map_sg |
945 | * | 945 | * |
946 | * Reverses the effect of usb_buffer_map_sg(). | 946 | * Reverses the effect of usb_buffer_map_sg(). |
947 | */ | 947 | */ |
948 | void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in, | 948 | void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in, |
949 | struct scatterlist *sg, int n_hw_ents) | 949 | struct scatterlist *sg, int n_hw_ents) |
950 | { | 950 | { |
951 | struct usb_bus *bus; | 951 | struct usb_bus *bus; |
952 | struct device *controller; | 952 | struct device *controller; |
953 | 953 | ||
954 | if (!dev | 954 | if (!dev |
955 | || !(bus = dev->bus) | 955 | || !(bus = dev->bus) |
956 | || !(controller = bus->controller) | 956 | || !(controller = bus->controller) |
957 | || !controller->dma_mask) | 957 | || !controller->dma_mask) |
958 | return; | 958 | return; |
959 | 959 | ||
960 | dma_unmap_sg(controller, sg, n_hw_ents, | 960 | dma_unmap_sg(controller, sg, n_hw_ents, |
961 | is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); | 961 | is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
962 | } | 962 | } |
963 | EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg); | 963 | EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg); |
964 | 964 | ||
965 | /* format to disable USB on kernel command line is: nousb */ | 965 | /* format to disable USB on kernel command line is: nousb */ |
966 | __module_param_call("", nousb, param_set_bool, param_get_bool, &nousb, 0444); | 966 | __module_param_call("", nousb, param_set_bool, param_get_bool, &nousb, 0444); |
967 | 967 | ||
968 | /* | 968 | /* |
969 | * for external read access to <nousb> | 969 | * for external read access to <nousb> |
970 | */ | 970 | */ |
971 | int usb_disabled(void) | 971 | int usb_disabled(void) |
972 | { | 972 | { |
973 | return nousb; | 973 | return nousb; |
974 | } | 974 | } |
975 | EXPORT_SYMBOL_GPL(usb_disabled); | 975 | EXPORT_SYMBOL_GPL(usb_disabled); |
976 | 976 | ||
977 | /* | 977 | /* |
978 | * Init | 978 | * Init |
979 | */ | 979 | */ |
980 | static int __init usb_init(void) | 980 | static int __init usb_init(void) |
981 | { | 981 | { |
982 | int retval; | 982 | int retval; |
983 | if (nousb) { | 983 | if (nousb) { |
984 | pr_info("%s: USB support disabled\n", usbcore_name); | 984 | pr_info("%s: USB support disabled\n", usbcore_name); |
985 | return 0; | 985 | return 0; |
986 | } | 986 | } |
987 | 987 | ||
988 | retval = ksuspend_usb_init(); | 988 | retval = ksuspend_usb_init(); |
989 | if (retval) | 989 | if (retval) |
990 | goto out; | 990 | goto out; |
991 | retval = bus_register(&usb_bus_type); | 991 | retval = bus_register(&usb_bus_type); |
992 | if (retval) | 992 | if (retval) |
993 | goto bus_register_failed; | 993 | goto bus_register_failed; |
994 | retval = usb_host_init(); | 994 | retval = usb_host_init(); |
995 | if (retval) | 995 | if (retval) |
996 | goto host_init_failed; | 996 | goto host_init_failed; |
997 | retval = usb_major_init(); | 997 | retval = usb_major_init(); |
998 | if (retval) | 998 | if (retval) |
999 | goto major_init_failed; | 999 | goto major_init_failed; |
1000 | retval = usb_register(&usbfs_driver); | 1000 | retval = usb_register(&usbfs_driver); |
1001 | if (retval) | 1001 | if (retval) |
1002 | goto driver_register_failed; | 1002 | goto driver_register_failed; |
1003 | retval = usb_devio_init(); | 1003 | retval = usb_devio_init(); |
1004 | if (retval) | 1004 | if (retval) |
1005 | goto usb_devio_init_failed; | 1005 | goto usb_devio_init_failed; |
1006 | retval = usbfs_init(); | 1006 | retval = usbfs_init(); |
1007 | if (retval) | 1007 | if (retval) |
1008 | goto fs_init_failed; | 1008 | goto fs_init_failed; |
1009 | retval = usb_hub_init(); | 1009 | retval = usb_hub_init(); |
1010 | if (retval) | 1010 | if (retval) |
1011 | goto hub_init_failed; | 1011 | goto hub_init_failed; |
1012 | retval = usb_register_device_driver(&usb_generic_driver, THIS_MODULE); | 1012 | retval = usb_register_device_driver(&usb_generic_driver, THIS_MODULE); |
1013 | if (!retval) | 1013 | if (!retval) |
1014 | goto out; | 1014 | goto out; |
1015 | 1015 | ||
1016 | usb_hub_cleanup(); | 1016 | usb_hub_cleanup(); |
1017 | hub_init_failed: | 1017 | hub_init_failed: |
1018 | usbfs_cleanup(); | 1018 | usbfs_cleanup(); |
1019 | fs_init_failed: | 1019 | fs_init_failed: |
1020 | usb_devio_cleanup(); | 1020 | usb_devio_cleanup(); |
1021 | usb_devio_init_failed: | 1021 | usb_devio_init_failed: |
1022 | usb_deregister(&usbfs_driver); | 1022 | usb_deregister(&usbfs_driver); |
1023 | driver_register_failed: | 1023 | driver_register_failed: |
1024 | usb_major_cleanup(); | 1024 | usb_major_cleanup(); |
1025 | major_init_failed: | 1025 | major_init_failed: |
1026 | usb_host_cleanup(); | 1026 | usb_host_cleanup(); |
1027 | host_init_failed: | 1027 | host_init_failed: |
1028 | bus_unregister(&usb_bus_type); | 1028 | bus_unregister(&usb_bus_type); |
1029 | bus_register_failed: | 1029 | bus_register_failed: |
1030 | ksuspend_usb_cleanup(); | 1030 | ksuspend_usb_cleanup(); |
1031 | out: | 1031 | out: |
1032 | return retval; | 1032 | return retval; |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | /* | 1035 | /* |
1036 | * Cleanup | 1036 | * Cleanup |
1037 | */ | 1037 | */ |
1038 | static void __exit usb_exit(void) | 1038 | static void __exit usb_exit(void) |
1039 | { | 1039 | { |
1040 | /* This will matter if shutdown/reboot does exitcalls. */ | 1040 | /* This will matter if shutdown/reboot does exitcalls. */ |
1041 | if (nousb) | 1041 | if (nousb) |
1042 | return; | 1042 | return; |
1043 | 1043 | ||
1044 | usb_deregister_device_driver(&usb_generic_driver); | 1044 | usb_deregister_device_driver(&usb_generic_driver); |
1045 | usb_major_cleanup(); | 1045 | usb_major_cleanup(); |
1046 | usbfs_cleanup(); | 1046 | usbfs_cleanup(); |
1047 | usb_deregister(&usbfs_driver); | 1047 | usb_deregister(&usbfs_driver); |
1048 | usb_devio_cleanup(); | 1048 | usb_devio_cleanup(); |
1049 | usb_hub_cleanup(); | 1049 | usb_hub_cleanup(); |
1050 | usb_host_cleanup(); | 1050 | usb_host_cleanup(); |
1051 | bus_unregister(&usb_bus_type); | 1051 | bus_unregister(&usb_bus_type); |
1052 | ksuspend_usb_cleanup(); | 1052 | ksuspend_usb_cleanup(); |
1053 | } | 1053 | } |
1054 | 1054 | ||
1055 | subsys_initcall(usb_init); | 1055 | subsys_initcall(usb_init); |
1056 | module_exit(usb_exit); | 1056 | module_exit(usb_exit); |
1057 | MODULE_LICENSE("GPL"); | 1057 | MODULE_LICENSE("GPL"); |
1058 | 1058 |
include/linux/device.h
1 | /* | 1 | /* |
2 | * device.h - generic, centralized driver model | 2 | * device.h - generic, centralized driver model |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> | 4 | * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> |
5 | * Copyright (c) 2004-2007 Greg Kroah-Hartman <gregkh@suse.de> | 5 | * Copyright (c) 2004-2007 Greg Kroah-Hartman <gregkh@suse.de> |
6 | * | 6 | * |
7 | * This file is released under the GPLv2 | 7 | * This file is released under the GPLv2 |
8 | * | 8 | * |
9 | * See Documentation/driver-model/ for more information. | 9 | * See Documentation/driver-model/ for more information. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef _DEVICE_H_ | 12 | #ifndef _DEVICE_H_ |
13 | #define _DEVICE_H_ | 13 | #define _DEVICE_H_ |
14 | 14 | ||
15 | #include <linux/ioport.h> | 15 | #include <linux/ioport.h> |
16 | #include <linux/kobject.h> | 16 | #include <linux/kobject.h> |
17 | #include <linux/klist.h> | 17 | #include <linux/klist.h> |
18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
19 | #include <linux/lockdep.h> | 19 | #include <linux/lockdep.h> |
20 | #include <linux/compiler.h> | 20 | #include <linux/compiler.h> |
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/pm.h> | 23 | #include <linux/pm.h> |
24 | #include <linux/semaphore.h> | 24 | #include <linux/semaphore.h> |
25 | #include <asm/atomic.h> | 25 | #include <asm/atomic.h> |
26 | #include <asm/device.h> | 26 | #include <asm/device.h> |
27 | 27 | ||
28 | #define BUS_ID_SIZE 20 | 28 | #define BUS_ID_SIZE 20 |
29 | 29 | ||
30 | struct device; | 30 | struct device; |
31 | struct device_driver; | 31 | struct device_driver; |
32 | struct driver_private; | 32 | struct driver_private; |
33 | struct class; | 33 | struct class; |
34 | struct class_private; | 34 | struct class_private; |
35 | struct bus_type; | 35 | struct bus_type; |
36 | struct bus_type_private; | 36 | struct bus_type_private; |
37 | 37 | ||
38 | struct bus_attribute { | 38 | struct bus_attribute { |
39 | struct attribute attr; | 39 | struct attribute attr; |
40 | ssize_t (*show)(struct bus_type *bus, char *buf); | 40 | ssize_t (*show)(struct bus_type *bus, char *buf); |
41 | ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); | 41 | ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); |
42 | }; | 42 | }; |
43 | 43 | ||
44 | #define BUS_ATTR(_name, _mode, _show, _store) \ | 44 | #define BUS_ATTR(_name, _mode, _show, _store) \ |
45 | struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) | 45 | struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) |
46 | 46 | ||
47 | extern int __must_check bus_create_file(struct bus_type *, | 47 | extern int __must_check bus_create_file(struct bus_type *, |
48 | struct bus_attribute *); | 48 | struct bus_attribute *); |
49 | extern void bus_remove_file(struct bus_type *, struct bus_attribute *); | 49 | extern void bus_remove_file(struct bus_type *, struct bus_attribute *); |
50 | 50 | ||
51 | struct bus_type { | 51 | struct bus_type { |
52 | const char *name; | 52 | const char *name; |
53 | struct bus_attribute *bus_attrs; | 53 | struct bus_attribute *bus_attrs; |
54 | struct device_attribute *dev_attrs; | 54 | struct device_attribute *dev_attrs; |
55 | struct driver_attribute *drv_attrs; | 55 | struct driver_attribute *drv_attrs; |
56 | 56 | ||
57 | int (*match)(struct device *dev, struct device_driver *drv); | 57 | int (*match)(struct device *dev, struct device_driver *drv); |
58 | int (*uevent)(struct device *dev, struct kobj_uevent_env *env); | 58 | int (*uevent)(struct device *dev, struct kobj_uevent_env *env); |
59 | int (*probe)(struct device *dev); | 59 | int (*probe)(struct device *dev); |
60 | int (*remove)(struct device *dev); | 60 | int (*remove)(struct device *dev); |
61 | void (*shutdown)(struct device *dev); | 61 | void (*shutdown)(struct device *dev); |
62 | 62 | ||
63 | int (*suspend)(struct device *dev, pm_message_t state); | 63 | int (*suspend)(struct device *dev, pm_message_t state); |
64 | int (*suspend_late)(struct device *dev, pm_message_t state); | 64 | int (*suspend_late)(struct device *dev, pm_message_t state); |
65 | int (*resume_early)(struct device *dev); | 65 | int (*resume_early)(struct device *dev); |
66 | int (*resume)(struct device *dev); | 66 | int (*resume)(struct device *dev); |
67 | 67 | ||
68 | struct pm_ext_ops *pm; | 68 | struct dev_pm_ops *pm; |
69 | 69 | ||
70 | struct bus_type_private *p; | 70 | struct bus_type_private *p; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | extern int __must_check bus_register(struct bus_type *bus); | 73 | extern int __must_check bus_register(struct bus_type *bus); |
74 | extern void bus_unregister(struct bus_type *bus); | 74 | extern void bus_unregister(struct bus_type *bus); |
75 | 75 | ||
76 | extern int __must_check bus_rescan_devices(struct bus_type *bus); | 76 | extern int __must_check bus_rescan_devices(struct bus_type *bus); |
77 | 77 | ||
78 | /* iterator helpers for buses */ | 78 | /* iterator helpers for buses */ |
79 | 79 | ||
80 | int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, | 80 | int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, |
81 | int (*fn)(struct device *dev, void *data)); | 81 | int (*fn)(struct device *dev, void *data)); |
82 | struct device *bus_find_device(struct bus_type *bus, struct device *start, | 82 | struct device *bus_find_device(struct bus_type *bus, struct device *start, |
83 | void *data, | 83 | void *data, |
84 | int (*match)(struct device *dev, void *data)); | 84 | int (*match)(struct device *dev, void *data)); |
85 | struct device *bus_find_device_by_name(struct bus_type *bus, | 85 | struct device *bus_find_device_by_name(struct bus_type *bus, |
86 | struct device *start, | 86 | struct device *start, |
87 | const char *name); | 87 | const char *name); |
88 | 88 | ||
89 | int __must_check bus_for_each_drv(struct bus_type *bus, | 89 | int __must_check bus_for_each_drv(struct bus_type *bus, |
90 | struct device_driver *start, void *data, | 90 | struct device_driver *start, void *data, |
91 | int (*fn)(struct device_driver *, void *)); | 91 | int (*fn)(struct device_driver *, void *)); |
92 | 92 | ||
93 | void bus_sort_breadthfirst(struct bus_type *bus, | 93 | void bus_sort_breadthfirst(struct bus_type *bus, |
94 | int (*compare)(const struct device *a, | 94 | int (*compare)(const struct device *a, |
95 | const struct device *b)); | 95 | const struct device *b)); |
96 | /* | 96 | /* |
97 | * Bus notifiers: Get notified of addition/removal of devices | 97 | * Bus notifiers: Get notified of addition/removal of devices |
98 | * and binding/unbinding of drivers to devices. | 98 | * and binding/unbinding of drivers to devices. |
99 | * In the long run, it should be a replacement for the platform | 99 | * In the long run, it should be a replacement for the platform |
100 | * notify hooks. | 100 | * notify hooks. |
101 | */ | 101 | */ |
102 | struct notifier_block; | 102 | struct notifier_block; |
103 | 103 | ||
104 | extern int bus_register_notifier(struct bus_type *bus, | 104 | extern int bus_register_notifier(struct bus_type *bus, |
105 | struct notifier_block *nb); | 105 | struct notifier_block *nb); |
106 | extern int bus_unregister_notifier(struct bus_type *bus, | 106 | extern int bus_unregister_notifier(struct bus_type *bus, |
107 | struct notifier_block *nb); | 107 | struct notifier_block *nb); |
108 | 108 | ||
109 | /* All 4 notifers below get called with the target struct device * | 109 | /* All 4 notifers below get called with the target struct device * |
110 | * as an argument. Note that those functions are likely to be called | 110 | * as an argument. Note that those functions are likely to be called |
111 | * with the device semaphore held in the core, so be careful. | 111 | * with the device semaphore held in the core, so be careful. |
112 | */ | 112 | */ |
113 | #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ | 113 | #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ |
114 | #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */ | 114 | #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */ |
115 | #define BUS_NOTIFY_BOUND_DRIVER 0x00000003 /* driver bound to device */ | 115 | #define BUS_NOTIFY_BOUND_DRIVER 0x00000003 /* driver bound to device */ |
116 | #define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be | 116 | #define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be |
117 | unbound */ | 117 | unbound */ |
118 | 118 | ||
119 | extern struct kset *bus_get_kset(struct bus_type *bus); | 119 | extern struct kset *bus_get_kset(struct bus_type *bus); |
120 | extern struct klist *bus_get_device_klist(struct bus_type *bus); | 120 | extern struct klist *bus_get_device_klist(struct bus_type *bus); |
121 | 121 | ||
122 | struct device_driver { | 122 | struct device_driver { |
123 | const char *name; | 123 | const char *name; |
124 | struct bus_type *bus; | 124 | struct bus_type *bus; |
125 | 125 | ||
126 | struct module *owner; | 126 | struct module *owner; |
127 | const char *mod_name; /* used for built-in modules */ | 127 | const char *mod_name; /* used for built-in modules */ |
128 | 128 | ||
129 | int (*probe) (struct device *dev); | 129 | int (*probe) (struct device *dev); |
130 | int (*remove) (struct device *dev); | 130 | int (*remove) (struct device *dev); |
131 | void (*shutdown) (struct device *dev); | 131 | void (*shutdown) (struct device *dev); |
132 | int (*suspend) (struct device *dev, pm_message_t state); | 132 | int (*suspend) (struct device *dev, pm_message_t state); |
133 | int (*resume) (struct device *dev); | 133 | int (*resume) (struct device *dev); |
134 | struct attribute_group **groups; | 134 | struct attribute_group **groups; |
135 | 135 | ||
136 | struct pm_ops *pm; | 136 | struct dev_pm_ops *pm; |
137 | 137 | ||
138 | struct driver_private *p; | 138 | struct driver_private *p; |
139 | }; | 139 | }; |
140 | 140 | ||
141 | 141 | ||
142 | extern int __must_check driver_register(struct device_driver *drv); | 142 | extern int __must_check driver_register(struct device_driver *drv); |
143 | extern void driver_unregister(struct device_driver *drv); | 143 | extern void driver_unregister(struct device_driver *drv); |
144 | 144 | ||
145 | extern struct device_driver *get_driver(struct device_driver *drv); | 145 | extern struct device_driver *get_driver(struct device_driver *drv); |
146 | extern void put_driver(struct device_driver *drv); | 146 | extern void put_driver(struct device_driver *drv); |
147 | extern struct device_driver *driver_find(const char *name, | 147 | extern struct device_driver *driver_find(const char *name, |
148 | struct bus_type *bus); | 148 | struct bus_type *bus); |
149 | extern int driver_probe_done(void); | 149 | extern int driver_probe_done(void); |
150 | 150 | ||
151 | /* sysfs interface for exporting driver attributes */ | 151 | /* sysfs interface for exporting driver attributes */ |
152 | 152 | ||
153 | struct driver_attribute { | 153 | struct driver_attribute { |
154 | struct attribute attr; | 154 | struct attribute attr; |
155 | ssize_t (*show)(struct device_driver *driver, char *buf); | 155 | ssize_t (*show)(struct device_driver *driver, char *buf); |
156 | ssize_t (*store)(struct device_driver *driver, const char *buf, | 156 | ssize_t (*store)(struct device_driver *driver, const char *buf, |
157 | size_t count); | 157 | size_t count); |
158 | }; | 158 | }; |
159 | 159 | ||
160 | #define DRIVER_ATTR(_name, _mode, _show, _store) \ | 160 | #define DRIVER_ATTR(_name, _mode, _show, _store) \ |
161 | struct driver_attribute driver_attr_##_name = \ | 161 | struct driver_attribute driver_attr_##_name = \ |
162 | __ATTR(_name, _mode, _show, _store) | 162 | __ATTR(_name, _mode, _show, _store) |
163 | 163 | ||
164 | extern int __must_check driver_create_file(struct device_driver *driver, | 164 | extern int __must_check driver_create_file(struct device_driver *driver, |
165 | struct driver_attribute *attr); | 165 | struct driver_attribute *attr); |
166 | extern void driver_remove_file(struct device_driver *driver, | 166 | extern void driver_remove_file(struct device_driver *driver, |
167 | struct driver_attribute *attr); | 167 | struct driver_attribute *attr); |
168 | 168 | ||
169 | extern int __must_check driver_add_kobj(struct device_driver *drv, | 169 | extern int __must_check driver_add_kobj(struct device_driver *drv, |
170 | struct kobject *kobj, | 170 | struct kobject *kobj, |
171 | const char *fmt, ...); | 171 | const char *fmt, ...); |
172 | 172 | ||
173 | extern int __must_check driver_for_each_device(struct device_driver *drv, | 173 | extern int __must_check driver_for_each_device(struct device_driver *drv, |
174 | struct device *start, | 174 | struct device *start, |
175 | void *data, | 175 | void *data, |
176 | int (*fn)(struct device *dev, | 176 | int (*fn)(struct device *dev, |
177 | void *)); | 177 | void *)); |
178 | struct device *driver_find_device(struct device_driver *drv, | 178 | struct device *driver_find_device(struct device_driver *drv, |
179 | struct device *start, void *data, | 179 | struct device *start, void *data, |
180 | int (*match)(struct device *dev, void *data)); | 180 | int (*match)(struct device *dev, void *data)); |
181 | 181 | ||
182 | /* | 182 | /* |
183 | * device classes | 183 | * device classes |
184 | */ | 184 | */ |
185 | struct class { | 185 | struct class { |
186 | const char *name; | 186 | const char *name; |
187 | struct module *owner; | 187 | struct module *owner; |
188 | 188 | ||
189 | struct class_attribute *class_attrs; | 189 | struct class_attribute *class_attrs; |
190 | struct device_attribute *dev_attrs; | 190 | struct device_attribute *dev_attrs; |
191 | struct kobject *dev_kobj; | 191 | struct kobject *dev_kobj; |
192 | 192 | ||
193 | int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); | 193 | int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); |
194 | 194 | ||
195 | void (*class_release)(struct class *class); | 195 | void (*class_release)(struct class *class); |
196 | void (*dev_release)(struct device *dev); | 196 | void (*dev_release)(struct device *dev); |
197 | 197 | ||
198 | int (*suspend)(struct device *dev, pm_message_t state); | 198 | int (*suspend)(struct device *dev, pm_message_t state); |
199 | int (*resume)(struct device *dev); | 199 | int (*resume)(struct device *dev); |
200 | 200 | ||
201 | struct pm_ops *pm; | 201 | struct dev_pm_ops *pm; |
202 | struct class_private *p; | 202 | struct class_private *p; |
203 | }; | 203 | }; |
204 | 204 | ||
205 | struct class_dev_iter { | 205 | struct class_dev_iter { |
206 | struct klist_iter ki; | 206 | struct klist_iter ki; |
207 | const struct device_type *type; | 207 | const struct device_type *type; |
208 | }; | 208 | }; |
209 | 209 | ||
210 | extern struct kobject *sysfs_dev_block_kobj; | 210 | extern struct kobject *sysfs_dev_block_kobj; |
211 | extern struct kobject *sysfs_dev_char_kobj; | 211 | extern struct kobject *sysfs_dev_char_kobj; |
212 | extern int __must_check __class_register(struct class *class, | 212 | extern int __must_check __class_register(struct class *class, |
213 | struct lock_class_key *key); | 213 | struct lock_class_key *key); |
214 | extern void class_unregister(struct class *class); | 214 | extern void class_unregister(struct class *class); |
215 | 215 | ||
216 | /* This is a #define to keep the compiler from merging different | 216 | /* This is a #define to keep the compiler from merging different |
217 | * instances of the __key variable */ | 217 | * instances of the __key variable */ |
218 | #define class_register(class) \ | 218 | #define class_register(class) \ |
219 | ({ \ | 219 | ({ \ |
220 | static struct lock_class_key __key; \ | 220 | static struct lock_class_key __key; \ |
221 | __class_register(class, &__key); \ | 221 | __class_register(class, &__key); \ |
222 | }) | 222 | }) |
223 | 223 | ||
224 | extern void class_dev_iter_init(struct class_dev_iter *iter, | 224 | extern void class_dev_iter_init(struct class_dev_iter *iter, |
225 | struct class *class, | 225 | struct class *class, |
226 | struct device *start, | 226 | struct device *start, |
227 | const struct device_type *type); | 227 | const struct device_type *type); |
228 | extern struct device *class_dev_iter_next(struct class_dev_iter *iter); | 228 | extern struct device *class_dev_iter_next(struct class_dev_iter *iter); |
229 | extern void class_dev_iter_exit(struct class_dev_iter *iter); | 229 | extern void class_dev_iter_exit(struct class_dev_iter *iter); |
230 | 230 | ||
231 | extern int class_for_each_device(struct class *class, struct device *start, | 231 | extern int class_for_each_device(struct class *class, struct device *start, |
232 | void *data, | 232 | void *data, |
233 | int (*fn)(struct device *dev, void *data)); | 233 | int (*fn)(struct device *dev, void *data)); |
234 | extern struct device *class_find_device(struct class *class, | 234 | extern struct device *class_find_device(struct class *class, |
235 | struct device *start, void *data, | 235 | struct device *start, void *data, |
236 | int (*match)(struct device *, void *)); | 236 | int (*match)(struct device *, void *)); |
237 | 237 | ||
238 | struct class_attribute { | 238 | struct class_attribute { |
239 | struct attribute attr; | 239 | struct attribute attr; |
240 | ssize_t (*show)(struct class *class, char *buf); | 240 | ssize_t (*show)(struct class *class, char *buf); |
241 | ssize_t (*store)(struct class *class, const char *buf, size_t count); | 241 | ssize_t (*store)(struct class *class, const char *buf, size_t count); |
242 | }; | 242 | }; |
243 | 243 | ||
244 | #define CLASS_ATTR(_name, _mode, _show, _store) \ | 244 | #define CLASS_ATTR(_name, _mode, _show, _store) \ |
245 | struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) | 245 | struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) |
246 | 246 | ||
247 | extern int __must_check class_create_file(struct class *class, | 247 | extern int __must_check class_create_file(struct class *class, |
248 | const struct class_attribute *attr); | 248 | const struct class_attribute *attr); |
249 | extern void class_remove_file(struct class *class, | 249 | extern void class_remove_file(struct class *class, |
250 | const struct class_attribute *attr); | 250 | const struct class_attribute *attr); |
251 | 251 | ||
252 | struct class_interface { | 252 | struct class_interface { |
253 | struct list_head node; | 253 | struct list_head node; |
254 | struct class *class; | 254 | struct class *class; |
255 | 255 | ||
256 | int (*add_dev) (struct device *, struct class_interface *); | 256 | int (*add_dev) (struct device *, struct class_interface *); |
257 | void (*remove_dev) (struct device *, struct class_interface *); | 257 | void (*remove_dev) (struct device *, struct class_interface *); |
258 | }; | 258 | }; |
259 | 259 | ||
260 | extern int __must_check class_interface_register(struct class_interface *); | 260 | extern int __must_check class_interface_register(struct class_interface *); |
261 | extern void class_interface_unregister(struct class_interface *); | 261 | extern void class_interface_unregister(struct class_interface *); |
262 | 262 | ||
263 | extern struct class * __must_check __class_create(struct module *owner, | 263 | extern struct class * __must_check __class_create(struct module *owner, |
264 | const char *name, | 264 | const char *name, |
265 | struct lock_class_key *key); | 265 | struct lock_class_key *key); |
266 | extern void class_destroy(struct class *cls); | 266 | extern void class_destroy(struct class *cls); |
267 | 267 | ||
268 | /* This is a #define to keep the compiler from merging different | 268 | /* This is a #define to keep the compiler from merging different |
269 | * instances of the __key variable */ | 269 | * instances of the __key variable */ |
270 | #define class_create(owner, name) \ | 270 | #define class_create(owner, name) \ |
271 | ({ \ | 271 | ({ \ |
272 | static struct lock_class_key __key; \ | 272 | static struct lock_class_key __key; \ |
273 | __class_create(owner, name, &__key); \ | 273 | __class_create(owner, name, &__key); \ |
274 | }) | 274 | }) |
275 | 275 | ||
276 | /* | 276 | /* |
277 | * The type of device, "struct device" is embedded in. A class | 277 | * The type of device, "struct device" is embedded in. A class |
278 | * or bus can contain devices of different types | 278 | * or bus can contain devices of different types |
279 | * like "partitions" and "disks", "mouse" and "event". | 279 | * like "partitions" and "disks", "mouse" and "event". |
280 | * This identifies the device type and carries type-specific | 280 | * This identifies the device type and carries type-specific |
281 | * information, equivalent to the kobj_type of a kobject. | 281 | * information, equivalent to the kobj_type of a kobject. |
282 | * If "name" is specified, the uevent will contain it in | 282 | * If "name" is specified, the uevent will contain it in |
283 | * the DEVTYPE variable. | 283 | * the DEVTYPE variable. |
284 | */ | 284 | */ |
285 | struct device_type { | 285 | struct device_type { |
286 | const char *name; | 286 | const char *name; |
287 | struct attribute_group **groups; | 287 | struct attribute_group **groups; |
288 | int (*uevent)(struct device *dev, struct kobj_uevent_env *env); | 288 | int (*uevent)(struct device *dev, struct kobj_uevent_env *env); |
289 | void (*release)(struct device *dev); | 289 | void (*release)(struct device *dev); |
290 | 290 | ||
291 | int (*suspend)(struct device *dev, pm_message_t state); | 291 | int (*suspend)(struct device *dev, pm_message_t state); |
292 | int (*resume)(struct device *dev); | 292 | int (*resume)(struct device *dev); |
293 | 293 | ||
294 | struct pm_ops *pm; | 294 | struct dev_pm_ops *pm; |
295 | }; | 295 | }; |
296 | 296 | ||
297 | /* interface for exporting device attributes */ | 297 | /* interface for exporting device attributes */ |
298 | struct device_attribute { | 298 | struct device_attribute { |
299 | struct attribute attr; | 299 | struct attribute attr; |
300 | ssize_t (*show)(struct device *dev, struct device_attribute *attr, | 300 | ssize_t (*show)(struct device *dev, struct device_attribute *attr, |
301 | char *buf); | 301 | char *buf); |
302 | ssize_t (*store)(struct device *dev, struct device_attribute *attr, | 302 | ssize_t (*store)(struct device *dev, struct device_attribute *attr, |
303 | const char *buf, size_t count); | 303 | const char *buf, size_t count); |
304 | }; | 304 | }; |
305 | 305 | ||
306 | #define DEVICE_ATTR(_name, _mode, _show, _store) \ | 306 | #define DEVICE_ATTR(_name, _mode, _show, _store) \ |
307 | struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) | 307 | struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) |
308 | 308 | ||
309 | extern int __must_check device_create_file(struct device *device, | 309 | extern int __must_check device_create_file(struct device *device, |
310 | struct device_attribute *entry); | 310 | struct device_attribute *entry); |
311 | extern void device_remove_file(struct device *dev, | 311 | extern void device_remove_file(struct device *dev, |
312 | struct device_attribute *attr); | 312 | struct device_attribute *attr); |
313 | extern int __must_check device_create_bin_file(struct device *dev, | 313 | extern int __must_check device_create_bin_file(struct device *dev, |
314 | struct bin_attribute *attr); | 314 | struct bin_attribute *attr); |
315 | extern void device_remove_bin_file(struct device *dev, | 315 | extern void device_remove_bin_file(struct device *dev, |
316 | struct bin_attribute *attr); | 316 | struct bin_attribute *attr); |
317 | extern int device_schedule_callback_owner(struct device *dev, | 317 | extern int device_schedule_callback_owner(struct device *dev, |
318 | void (*func)(struct device *dev), struct module *owner); | 318 | void (*func)(struct device *dev), struct module *owner); |
319 | 319 | ||
320 | /* This is a macro to avoid include problems with THIS_MODULE */ | 320 | /* This is a macro to avoid include problems with THIS_MODULE */ |
321 | #define device_schedule_callback(dev, func) \ | 321 | #define device_schedule_callback(dev, func) \ |
322 | device_schedule_callback_owner(dev, func, THIS_MODULE) | 322 | device_schedule_callback_owner(dev, func, THIS_MODULE) |
323 | 323 | ||
324 | /* device resource management */ | 324 | /* device resource management */ |
325 | typedef void (*dr_release_t)(struct device *dev, void *res); | 325 | typedef void (*dr_release_t)(struct device *dev, void *res); |
326 | typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); | 326 | typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); |
327 | 327 | ||
328 | #ifdef CONFIG_DEBUG_DEVRES | 328 | #ifdef CONFIG_DEBUG_DEVRES |
329 | extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp, | 329 | extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp, |
330 | const char *name); | 330 | const char *name); |
331 | #define devres_alloc(release, size, gfp) \ | 331 | #define devres_alloc(release, size, gfp) \ |
332 | __devres_alloc(release, size, gfp, #release) | 332 | __devres_alloc(release, size, gfp, #release) |
333 | #else | 333 | #else |
334 | extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp); | 334 | extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp); |
335 | #endif | 335 | #endif |
336 | extern void devres_free(void *res); | 336 | extern void devres_free(void *res); |
337 | extern void devres_add(struct device *dev, void *res); | 337 | extern void devres_add(struct device *dev, void *res); |
338 | extern void *devres_find(struct device *dev, dr_release_t release, | 338 | extern void *devres_find(struct device *dev, dr_release_t release, |
339 | dr_match_t match, void *match_data); | 339 | dr_match_t match, void *match_data); |
340 | extern void *devres_get(struct device *dev, void *new_res, | 340 | extern void *devres_get(struct device *dev, void *new_res, |
341 | dr_match_t match, void *match_data); | 341 | dr_match_t match, void *match_data); |
342 | extern void *devres_remove(struct device *dev, dr_release_t release, | 342 | extern void *devres_remove(struct device *dev, dr_release_t release, |
343 | dr_match_t match, void *match_data); | 343 | dr_match_t match, void *match_data); |
344 | extern int devres_destroy(struct device *dev, dr_release_t release, | 344 | extern int devres_destroy(struct device *dev, dr_release_t release, |
345 | dr_match_t match, void *match_data); | 345 | dr_match_t match, void *match_data); |
346 | 346 | ||
347 | /* devres group */ | 347 | /* devres group */ |
348 | extern void * __must_check devres_open_group(struct device *dev, void *id, | 348 | extern void * __must_check devres_open_group(struct device *dev, void *id, |
349 | gfp_t gfp); | 349 | gfp_t gfp); |
350 | extern void devres_close_group(struct device *dev, void *id); | 350 | extern void devres_close_group(struct device *dev, void *id); |
351 | extern void devres_remove_group(struct device *dev, void *id); | 351 | extern void devres_remove_group(struct device *dev, void *id); |
352 | extern int devres_release_group(struct device *dev, void *id); | 352 | extern int devres_release_group(struct device *dev, void *id); |
353 | 353 | ||
354 | /* managed kzalloc/kfree for device drivers, no kmalloc, always use kzalloc */ | 354 | /* managed kzalloc/kfree for device drivers, no kmalloc, always use kzalloc */ |
355 | extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp); | 355 | extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp); |
356 | extern void devm_kfree(struct device *dev, void *p); | 356 | extern void devm_kfree(struct device *dev, void *p); |
357 | 357 | ||
358 | struct device_dma_parameters { | 358 | struct device_dma_parameters { |
359 | /* | 359 | /* |
360 | * a low level driver may set these to teach IOMMU code about | 360 | * a low level driver may set these to teach IOMMU code about |
361 | * sg limitations. | 361 | * sg limitations. |
362 | */ | 362 | */ |
363 | unsigned int max_segment_size; | 363 | unsigned int max_segment_size; |
364 | unsigned long segment_boundary_mask; | 364 | unsigned long segment_boundary_mask; |
365 | }; | 365 | }; |
366 | 366 | ||
367 | struct device { | 367 | struct device { |
368 | struct klist klist_children; | 368 | struct klist klist_children; |
369 | struct klist_node knode_parent; /* node in sibling list */ | 369 | struct klist_node knode_parent; /* node in sibling list */ |
370 | struct klist_node knode_driver; | 370 | struct klist_node knode_driver; |
371 | struct klist_node knode_bus; | 371 | struct klist_node knode_bus; |
372 | struct device *parent; | 372 | struct device *parent; |
373 | 373 | ||
374 | struct kobject kobj; | 374 | struct kobject kobj; |
375 | char bus_id[BUS_ID_SIZE]; /* position on parent bus */ | 375 | char bus_id[BUS_ID_SIZE]; /* position on parent bus */ |
376 | const char *init_name; /* initial name of the device */ | 376 | const char *init_name; /* initial name of the device */ |
377 | struct device_type *type; | 377 | struct device_type *type; |
378 | unsigned uevent_suppress:1; | 378 | unsigned uevent_suppress:1; |
379 | 379 | ||
380 | struct semaphore sem; /* semaphore to synchronize calls to | 380 | struct semaphore sem; /* semaphore to synchronize calls to |
381 | * its driver. | 381 | * its driver. |
382 | */ | 382 | */ |
383 | 383 | ||
384 | struct bus_type *bus; /* type of bus device is on */ | 384 | struct bus_type *bus; /* type of bus device is on */ |
385 | struct device_driver *driver; /* which driver has allocated this | 385 | struct device_driver *driver; /* which driver has allocated this |
386 | device */ | 386 | device */ |
387 | void *driver_data; /* data private to the driver */ | 387 | void *driver_data; /* data private to the driver */ |
388 | void *platform_data; /* Platform specific data, device | 388 | void *platform_data; /* Platform specific data, device |
389 | core doesn't touch it */ | 389 | core doesn't touch it */ |
390 | struct dev_pm_info power; | 390 | struct dev_pm_info power; |
391 | 391 | ||
392 | #ifdef CONFIG_NUMA | 392 | #ifdef CONFIG_NUMA |
393 | int numa_node; /* NUMA node this device is close to */ | 393 | int numa_node; /* NUMA node this device is close to */ |
394 | #endif | 394 | #endif |
395 | u64 *dma_mask; /* dma mask (if dma'able device) */ | 395 | u64 *dma_mask; /* dma mask (if dma'able device) */ |
396 | u64 coherent_dma_mask;/* Like dma_mask, but for | 396 | u64 coherent_dma_mask;/* Like dma_mask, but for |
397 | alloc_coherent mappings as | 397 | alloc_coherent mappings as |
398 | not all hardware supports | 398 | not all hardware supports |
399 | 64 bit addresses for consistent | 399 | 64 bit addresses for consistent |
400 | allocations such descriptors. */ | 400 | allocations such descriptors. */ |
401 | 401 | ||
402 | struct device_dma_parameters *dma_parms; | 402 | struct device_dma_parameters *dma_parms; |
403 | 403 | ||
404 | struct list_head dma_pools; /* dma pools (if dma'ble) */ | 404 | struct list_head dma_pools; /* dma pools (if dma'ble) */ |
405 | 405 | ||
406 | struct dma_coherent_mem *dma_mem; /* internal for coherent mem | 406 | struct dma_coherent_mem *dma_mem; /* internal for coherent mem |
407 | override */ | 407 | override */ |
408 | /* arch specific additions */ | 408 | /* arch specific additions */ |
409 | struct dev_archdata archdata; | 409 | struct dev_archdata archdata; |
410 | 410 | ||
411 | spinlock_t devres_lock; | 411 | spinlock_t devres_lock; |
412 | struct list_head devres_head; | 412 | struct list_head devres_head; |
413 | 413 | ||
414 | struct klist_node knode_class; | 414 | struct klist_node knode_class; |
415 | struct class *class; | 415 | struct class *class; |
416 | dev_t devt; /* dev_t, creates the sysfs "dev" */ | 416 | dev_t devt; /* dev_t, creates the sysfs "dev" */ |
417 | struct attribute_group **groups; /* optional groups */ | 417 | struct attribute_group **groups; /* optional groups */ |
418 | 418 | ||
419 | void (*release)(struct device *dev); | 419 | void (*release)(struct device *dev); |
420 | }; | 420 | }; |
421 | 421 | ||
422 | /* Get the wakeup routines, which depend on struct device */ | 422 | /* Get the wakeup routines, which depend on struct device */ |
423 | #include <linux/pm_wakeup.h> | 423 | #include <linux/pm_wakeup.h> |
424 | 424 | ||
425 | static inline const char *dev_name(const struct device *dev) | 425 | static inline const char *dev_name(const struct device *dev) |
426 | { | 426 | { |
427 | /* will be changed into kobject_name(&dev->kobj) in the near future */ | 427 | /* will be changed into kobject_name(&dev->kobj) in the near future */ |
428 | return dev->bus_id; | 428 | return dev->bus_id; |
429 | } | 429 | } |
430 | 430 | ||
431 | extern int dev_set_name(struct device *dev, const char *name, ...) | 431 | extern int dev_set_name(struct device *dev, const char *name, ...) |
432 | __attribute__((format(printf, 2, 3))); | 432 | __attribute__((format(printf, 2, 3))); |
433 | 433 | ||
434 | #ifdef CONFIG_NUMA | 434 | #ifdef CONFIG_NUMA |
435 | static inline int dev_to_node(struct device *dev) | 435 | static inline int dev_to_node(struct device *dev) |
436 | { | 436 | { |
437 | return dev->numa_node; | 437 | return dev->numa_node; |
438 | } | 438 | } |
439 | static inline void set_dev_node(struct device *dev, int node) | 439 | static inline void set_dev_node(struct device *dev, int node) |
440 | { | 440 | { |
441 | dev->numa_node = node; | 441 | dev->numa_node = node; |
442 | } | 442 | } |
443 | #else | 443 | #else |
444 | static inline int dev_to_node(struct device *dev) | 444 | static inline int dev_to_node(struct device *dev) |
445 | { | 445 | { |
446 | return -1; | 446 | return -1; |
447 | } | 447 | } |
448 | static inline void set_dev_node(struct device *dev, int node) | 448 | static inline void set_dev_node(struct device *dev, int node) |
449 | { | 449 | { |
450 | } | 450 | } |
451 | #endif | 451 | #endif |
452 | 452 | ||
453 | static inline void *dev_get_drvdata(const struct device *dev) | 453 | static inline void *dev_get_drvdata(const struct device *dev) |
454 | { | 454 | { |
455 | return dev->driver_data; | 455 | return dev->driver_data; |
456 | } | 456 | } |
457 | 457 | ||
458 | static inline void dev_set_drvdata(struct device *dev, void *data) | 458 | static inline void dev_set_drvdata(struct device *dev, void *data) |
459 | { | 459 | { |
460 | dev->driver_data = data; | 460 | dev->driver_data = data; |
461 | } | 461 | } |
462 | 462 | ||
463 | static inline int device_is_registered(struct device *dev) | 463 | static inline int device_is_registered(struct device *dev) |
464 | { | 464 | { |
465 | return dev->kobj.state_in_sysfs; | 465 | return dev->kobj.state_in_sysfs; |
466 | } | 466 | } |
467 | 467 | ||
468 | void driver_init(void); | 468 | void driver_init(void); |
469 | 469 | ||
470 | /* | 470 | /* |
471 | * High level routines for use by the bus drivers | 471 | * High level routines for use by the bus drivers |
472 | */ | 472 | */ |
473 | extern int __must_check device_register(struct device *dev); | 473 | extern int __must_check device_register(struct device *dev); |
474 | extern void device_unregister(struct device *dev); | 474 | extern void device_unregister(struct device *dev); |
475 | extern void device_initialize(struct device *dev); | 475 | extern void device_initialize(struct device *dev); |
476 | extern int __must_check device_add(struct device *dev); | 476 | extern int __must_check device_add(struct device *dev); |
477 | extern void device_del(struct device *dev); | 477 | extern void device_del(struct device *dev); |
478 | extern int device_for_each_child(struct device *dev, void *data, | 478 | extern int device_for_each_child(struct device *dev, void *data, |
479 | int (*fn)(struct device *dev, void *data)); | 479 | int (*fn)(struct device *dev, void *data)); |
480 | extern struct device *device_find_child(struct device *dev, void *data, | 480 | extern struct device *device_find_child(struct device *dev, void *data, |
481 | int (*match)(struct device *dev, void *data)); | 481 | int (*match)(struct device *dev, void *data)); |
482 | extern int device_rename(struct device *dev, char *new_name); | 482 | extern int device_rename(struct device *dev, char *new_name); |
483 | extern int device_move(struct device *dev, struct device *new_parent); | 483 | extern int device_move(struct device *dev, struct device *new_parent); |
484 | 484 | ||
485 | /* | 485 | /* |
486 | * Manual binding of a device to driver. See drivers/base/bus.c | 486 | * Manual binding of a device to driver. See drivers/base/bus.c |
487 | * for information on use. | 487 | * for information on use. |
488 | */ | 488 | */ |
489 | extern int __must_check device_bind_driver(struct device *dev); | 489 | extern int __must_check device_bind_driver(struct device *dev); |
490 | extern void device_release_driver(struct device *dev); | 490 | extern void device_release_driver(struct device *dev); |
491 | extern int __must_check device_attach(struct device *dev); | 491 | extern int __must_check device_attach(struct device *dev); |
492 | extern int __must_check driver_attach(struct device_driver *drv); | 492 | extern int __must_check driver_attach(struct device_driver *drv); |
493 | extern int __must_check device_reprobe(struct device *dev); | 493 | extern int __must_check device_reprobe(struct device *dev); |
494 | 494 | ||
495 | /* | 495 | /* |
496 | * Easy functions for dynamically creating devices on the fly | 496 | * Easy functions for dynamically creating devices on the fly |
497 | */ | 497 | */ |
498 | extern struct device *device_create_vargs(struct class *cls, | 498 | extern struct device *device_create_vargs(struct class *cls, |
499 | struct device *parent, | 499 | struct device *parent, |
500 | dev_t devt, | 500 | dev_t devt, |
501 | void *drvdata, | 501 | void *drvdata, |
502 | const char *fmt, | 502 | const char *fmt, |
503 | va_list vargs); | 503 | va_list vargs); |
504 | extern struct device *device_create(struct class *cls, struct device *parent, | 504 | extern struct device *device_create(struct class *cls, struct device *parent, |
505 | dev_t devt, void *drvdata, | 505 | dev_t devt, void *drvdata, |
506 | const char *fmt, ...) | 506 | const char *fmt, ...) |
507 | __attribute__((format(printf, 5, 6))); | 507 | __attribute__((format(printf, 5, 6))); |
508 | extern void device_destroy(struct class *cls, dev_t devt); | 508 | extern void device_destroy(struct class *cls, dev_t devt); |
509 | 509 | ||
510 | /* | 510 | /* |
511 | * Platform "fixup" functions - allow the platform to have their say | 511 | * Platform "fixup" functions - allow the platform to have their say |
512 | * about devices and actions that the general device layer doesn't | 512 | * about devices and actions that the general device layer doesn't |
513 | * know about. | 513 | * know about. |
514 | */ | 514 | */ |
515 | /* Notify platform of device discovery */ | 515 | /* Notify platform of device discovery */ |
516 | extern int (*platform_notify)(struct device *dev); | 516 | extern int (*platform_notify)(struct device *dev); |
517 | 517 | ||
518 | extern int (*platform_notify_remove)(struct device *dev); | 518 | extern int (*platform_notify_remove)(struct device *dev); |
519 | 519 | ||
520 | 520 | ||
521 | /** | 521 | /** |
522 | * get_device - atomically increment the reference count for the device. | 522 | * get_device - atomically increment the reference count for the device. |
523 | * | 523 | * |
524 | */ | 524 | */ |
525 | extern struct device *get_device(struct device *dev); | 525 | extern struct device *get_device(struct device *dev); |
526 | extern void put_device(struct device *dev); | 526 | extern void put_device(struct device *dev); |
527 | 527 | ||
528 | 528 | ||
529 | /* drivers/base/power/shutdown.c */ | 529 | /* drivers/base/power/shutdown.c */ |
530 | extern void device_shutdown(void); | 530 | extern void device_shutdown(void); |
531 | 531 | ||
532 | /* drivers/base/sys.c */ | 532 | /* drivers/base/sys.c */ |
533 | extern void sysdev_shutdown(void); | 533 | extern void sysdev_shutdown(void); |
534 | 534 | ||
535 | /* debugging and troubleshooting/diagnostic helpers. */ | 535 | /* debugging and troubleshooting/diagnostic helpers. */ |
536 | extern const char *dev_driver_string(const struct device *dev); | 536 | extern const char *dev_driver_string(const struct device *dev); |
537 | #define dev_printk(level, dev, format, arg...) \ | 537 | #define dev_printk(level, dev, format, arg...) \ |
538 | printk(level "%s %s: " format , dev_driver_string(dev) , \ | 538 | printk(level "%s %s: " format , dev_driver_string(dev) , \ |
539 | dev_name(dev) , ## arg) | 539 | dev_name(dev) , ## arg) |
540 | 540 | ||
541 | #define dev_emerg(dev, format, arg...) \ | 541 | #define dev_emerg(dev, format, arg...) \ |
542 | dev_printk(KERN_EMERG , dev , format , ## arg) | 542 | dev_printk(KERN_EMERG , dev , format , ## arg) |
543 | #define dev_alert(dev, format, arg...) \ | 543 | #define dev_alert(dev, format, arg...) \ |
544 | dev_printk(KERN_ALERT , dev , format , ## arg) | 544 | dev_printk(KERN_ALERT , dev , format , ## arg) |
545 | #define dev_crit(dev, format, arg...) \ | 545 | #define dev_crit(dev, format, arg...) \ |
546 | dev_printk(KERN_CRIT , dev , format , ## arg) | 546 | dev_printk(KERN_CRIT , dev , format , ## arg) |
547 | #define dev_err(dev, format, arg...) \ | 547 | #define dev_err(dev, format, arg...) \ |
548 | dev_printk(KERN_ERR , dev , format , ## arg) | 548 | dev_printk(KERN_ERR , dev , format , ## arg) |
549 | #define dev_warn(dev, format, arg...) \ | 549 | #define dev_warn(dev, format, arg...) \ |
550 | dev_printk(KERN_WARNING , dev , format , ## arg) | 550 | dev_printk(KERN_WARNING , dev , format , ## arg) |
551 | #define dev_notice(dev, format, arg...) \ | 551 | #define dev_notice(dev, format, arg...) \ |
552 | dev_printk(KERN_NOTICE , dev , format , ## arg) | 552 | dev_printk(KERN_NOTICE , dev , format , ## arg) |
553 | #define dev_info(dev, format, arg...) \ | 553 | #define dev_info(dev, format, arg...) \ |
554 | dev_printk(KERN_INFO , dev , format , ## arg) | 554 | dev_printk(KERN_INFO , dev , format , ## arg) |
555 | 555 | ||
556 | #if defined(CONFIG_DYNAMIC_PRINTK_DEBUG) | 556 | #if defined(CONFIG_DYNAMIC_PRINTK_DEBUG) |
557 | #define dev_dbg(dev, format, ...) do { \ | 557 | #define dev_dbg(dev, format, ...) do { \ |
558 | dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ | 558 | dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ |
559 | } while (0) | 559 | } while (0) |
560 | #elif defined(DEBUG) | 560 | #elif defined(DEBUG) |
561 | #define dev_dbg(dev, format, arg...) \ | 561 | #define dev_dbg(dev, format, arg...) \ |
562 | dev_printk(KERN_DEBUG , dev , format , ## arg) | 562 | dev_printk(KERN_DEBUG , dev , format , ## arg) |
563 | #else | 563 | #else |
564 | #define dev_dbg(dev, format, arg...) \ | 564 | #define dev_dbg(dev, format, arg...) \ |
565 | ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) | 565 | ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) |
566 | #endif | 566 | #endif |
567 | 567 | ||
568 | #ifdef VERBOSE_DEBUG | 568 | #ifdef VERBOSE_DEBUG |
569 | #define dev_vdbg dev_dbg | 569 | #define dev_vdbg dev_dbg |
570 | #else | 570 | #else |
571 | 571 | ||
572 | #define dev_vdbg(dev, format, arg...) \ | 572 | #define dev_vdbg(dev, format, arg...) \ |
573 | ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) | 573 | ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) |
574 | #endif | 574 | #endif |
575 | 575 | ||
576 | /* | 576 | /* |
577 | * dev_WARN() acts like dev_printk(), but with the key difference | 577 | * dev_WARN() acts like dev_printk(), but with the key difference |
578 | * of using a WARN/WARN_ON to get the message out, including the | 578 | * of using a WARN/WARN_ON to get the message out, including the |
579 | * file/line information and a backtrace. | 579 | * file/line information and a backtrace. |
580 | */ | 580 | */ |
581 | #define dev_WARN(dev, format, arg...) \ | 581 | #define dev_WARN(dev, format, arg...) \ |
582 | WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg); | 582 | WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg); |
583 | 583 | ||
584 | /* Create alias, so I can be autoloaded. */ | 584 | /* Create alias, so I can be autoloaded. */ |
585 | #define MODULE_ALIAS_CHARDEV(major,minor) \ | 585 | #define MODULE_ALIAS_CHARDEV(major,minor) \ |
586 | MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) | 586 | MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) |
587 | #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ | 587 | #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ |
588 | MODULE_ALIAS("char-major-" __stringify(major) "-*") | 588 | MODULE_ALIAS("char-major-" __stringify(major) "-*") |
589 | #endif /* _DEVICE_H_ */ | 589 | #endif /* _DEVICE_H_ */ |
590 | 590 |
include/linux/pci.h
1 | /* | 1 | /* |
2 | * pci.h | 2 | * pci.h |
3 | * | 3 | * |
4 | * PCI defines and function prototypes | 4 | * PCI defines and function prototypes |
5 | * Copyright 1994, Drew Eckhardt | 5 | * Copyright 1994, Drew Eckhardt |
6 | * Copyright 1997--1999 Martin Mares <mj@ucw.cz> | 6 | * Copyright 1997--1999 Martin Mares <mj@ucw.cz> |
7 | * | 7 | * |
8 | * For more information, please consult the following manuals (look at | 8 | * For more information, please consult the following manuals (look at |
9 | * http://www.pcisig.com/ for how to get them): | 9 | * http://www.pcisig.com/ for how to get them): |
10 | * | 10 | * |
11 | * PCI BIOS Specification | 11 | * PCI BIOS Specification |
12 | * PCI Local Bus Specification | 12 | * PCI Local Bus Specification |
13 | * PCI to PCI Bridge Specification | 13 | * PCI to PCI Bridge Specification |
14 | * PCI System Design Guide | 14 | * PCI System Design Guide |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #ifndef LINUX_PCI_H | 17 | #ifndef LINUX_PCI_H |
18 | #define LINUX_PCI_H | 18 | #define LINUX_PCI_H |
19 | 19 | ||
20 | #include <linux/pci_regs.h> /* The pci register defines */ | 20 | #include <linux/pci_regs.h> /* The pci register defines */ |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * The PCI interface treats multi-function devices as independent | 23 | * The PCI interface treats multi-function devices as independent |
24 | * devices. The slot/function address of each device is encoded | 24 | * devices. The slot/function address of each device is encoded |
25 | * in a single byte as follows: | 25 | * in a single byte as follows: |
26 | * | 26 | * |
27 | * 7:3 = slot | 27 | * 7:3 = slot |
28 | * 2:0 = function | 28 | * 2:0 = function |
29 | */ | 29 | */ |
30 | #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) | 30 | #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) |
31 | #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) | 31 | #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) |
32 | #define PCI_FUNC(devfn) ((devfn) & 0x07) | 32 | #define PCI_FUNC(devfn) ((devfn) & 0x07) |
33 | 33 | ||
34 | /* Ioctls for /proc/bus/pci/X/Y nodes. */ | 34 | /* Ioctls for /proc/bus/pci/X/Y nodes. */ |
35 | #define PCIIOC_BASE ('P' << 24 | 'C' << 16 | 'I' << 8) | 35 | #define PCIIOC_BASE ('P' << 24 | 'C' << 16 | 'I' << 8) |
36 | #define PCIIOC_CONTROLLER (PCIIOC_BASE | 0x00) /* Get controller for PCI device. */ | 36 | #define PCIIOC_CONTROLLER (PCIIOC_BASE | 0x00) /* Get controller for PCI device. */ |
37 | #define PCIIOC_MMAP_IS_IO (PCIIOC_BASE | 0x01) /* Set mmap state to I/O space. */ | 37 | #define PCIIOC_MMAP_IS_IO (PCIIOC_BASE | 0x01) /* Set mmap state to I/O space. */ |
38 | #define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */ | 38 | #define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */ |
39 | #define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */ | 39 | #define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */ |
40 | 40 | ||
41 | #ifdef __KERNEL__ | 41 | #ifdef __KERNEL__ |
42 | 42 | ||
43 | #include <linux/mod_devicetable.h> | 43 | #include <linux/mod_devicetable.h> |
44 | 44 | ||
45 | #include <linux/types.h> | 45 | #include <linux/types.h> |
46 | #include <linux/init.h> | 46 | #include <linux/init.h> |
47 | #include <linux/ioport.h> | 47 | #include <linux/ioport.h> |
48 | #include <linux/list.h> | 48 | #include <linux/list.h> |
49 | #include <linux/compiler.h> | 49 | #include <linux/compiler.h> |
50 | #include <linux/errno.h> | 50 | #include <linux/errno.h> |
51 | #include <linux/kobject.h> | 51 | #include <linux/kobject.h> |
52 | #include <asm/atomic.h> | 52 | #include <asm/atomic.h> |
53 | #include <linux/device.h> | 53 | #include <linux/device.h> |
54 | #include <linux/io.h> | 54 | #include <linux/io.h> |
55 | 55 | ||
56 | /* Include the ID list */ | 56 | /* Include the ID list */ |
57 | #include <linux/pci_ids.h> | 57 | #include <linux/pci_ids.h> |
58 | 58 | ||
59 | /* pci_slot represents a physical slot */ | 59 | /* pci_slot represents a physical slot */ |
60 | struct pci_slot { | 60 | struct pci_slot { |
61 | struct pci_bus *bus; /* The bus this slot is on */ | 61 | struct pci_bus *bus; /* The bus this slot is on */ |
62 | struct list_head list; /* node in list of slots on this bus */ | 62 | struct list_head list; /* node in list of slots on this bus */ |
63 | struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */ | 63 | struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */ |
64 | unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ | 64 | unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ |
65 | struct kobject kobj; | 65 | struct kobject kobj; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static inline const char *pci_slot_name(const struct pci_slot *slot) | 68 | static inline const char *pci_slot_name(const struct pci_slot *slot) |
69 | { | 69 | { |
70 | return kobject_name(&slot->kobj); | 70 | return kobject_name(&slot->kobj); |
71 | } | 71 | } |
72 | 72 | ||
73 | /* File state for mmap()s on /proc/bus/pci/X/Y */ | 73 | /* File state for mmap()s on /proc/bus/pci/X/Y */ |
74 | enum pci_mmap_state { | 74 | enum pci_mmap_state { |
75 | pci_mmap_io, | 75 | pci_mmap_io, |
76 | pci_mmap_mem | 76 | pci_mmap_mem |
77 | }; | 77 | }; |
78 | 78 | ||
79 | /* This defines the direction arg to the DMA mapping routines. */ | 79 | /* This defines the direction arg to the DMA mapping routines. */ |
80 | #define PCI_DMA_BIDIRECTIONAL 0 | 80 | #define PCI_DMA_BIDIRECTIONAL 0 |
81 | #define PCI_DMA_TODEVICE 1 | 81 | #define PCI_DMA_TODEVICE 1 |
82 | #define PCI_DMA_FROMDEVICE 2 | 82 | #define PCI_DMA_FROMDEVICE 2 |
83 | #define PCI_DMA_NONE 3 | 83 | #define PCI_DMA_NONE 3 |
84 | 84 | ||
85 | #define DEVICE_COUNT_RESOURCE 12 | 85 | #define DEVICE_COUNT_RESOURCE 12 |
86 | 86 | ||
87 | typedef int __bitwise pci_power_t; | 87 | typedef int __bitwise pci_power_t; |
88 | 88 | ||
89 | #define PCI_D0 ((pci_power_t __force) 0) | 89 | #define PCI_D0 ((pci_power_t __force) 0) |
90 | #define PCI_D1 ((pci_power_t __force) 1) | 90 | #define PCI_D1 ((pci_power_t __force) 1) |
91 | #define PCI_D2 ((pci_power_t __force) 2) | 91 | #define PCI_D2 ((pci_power_t __force) 2) |
92 | #define PCI_D3hot ((pci_power_t __force) 3) | 92 | #define PCI_D3hot ((pci_power_t __force) 3) |
93 | #define PCI_D3cold ((pci_power_t __force) 4) | 93 | #define PCI_D3cold ((pci_power_t __force) 4) |
94 | #define PCI_UNKNOWN ((pci_power_t __force) 5) | 94 | #define PCI_UNKNOWN ((pci_power_t __force) 5) |
95 | #define PCI_POWER_ERROR ((pci_power_t __force) -1) | 95 | #define PCI_POWER_ERROR ((pci_power_t __force) -1) |
96 | 96 | ||
97 | /** The pci_channel state describes connectivity between the CPU and | 97 | /** The pci_channel state describes connectivity between the CPU and |
98 | * the pci device. If some PCI bus between here and the pci device | 98 | * the pci device. If some PCI bus between here and the pci device |
99 | * has crashed or locked up, this info is reflected here. | 99 | * has crashed or locked up, this info is reflected here. |
100 | */ | 100 | */ |
101 | typedef unsigned int __bitwise pci_channel_state_t; | 101 | typedef unsigned int __bitwise pci_channel_state_t; |
102 | 102 | ||
103 | enum pci_channel_state { | 103 | enum pci_channel_state { |
104 | /* I/O channel is in normal state */ | 104 | /* I/O channel is in normal state */ |
105 | pci_channel_io_normal = (__force pci_channel_state_t) 1, | 105 | pci_channel_io_normal = (__force pci_channel_state_t) 1, |
106 | 106 | ||
107 | /* I/O to channel is blocked */ | 107 | /* I/O to channel is blocked */ |
108 | pci_channel_io_frozen = (__force pci_channel_state_t) 2, | 108 | pci_channel_io_frozen = (__force pci_channel_state_t) 2, |
109 | 109 | ||
110 | /* PCI card is dead */ | 110 | /* PCI card is dead */ |
111 | pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, | 111 | pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, |
112 | }; | 112 | }; |
113 | 113 | ||
114 | typedef unsigned int __bitwise pcie_reset_state_t; | 114 | typedef unsigned int __bitwise pcie_reset_state_t; |
115 | 115 | ||
116 | enum pcie_reset_state { | 116 | enum pcie_reset_state { |
117 | /* Reset is NOT asserted (Use to deassert reset) */ | 117 | /* Reset is NOT asserted (Use to deassert reset) */ |
118 | pcie_deassert_reset = (__force pcie_reset_state_t) 1, | 118 | pcie_deassert_reset = (__force pcie_reset_state_t) 1, |
119 | 119 | ||
120 | /* Use #PERST to reset PCI-E device */ | 120 | /* Use #PERST to reset PCI-E device */ |
121 | pcie_warm_reset = (__force pcie_reset_state_t) 2, | 121 | pcie_warm_reset = (__force pcie_reset_state_t) 2, |
122 | 122 | ||
123 | /* Use PCI-E Hot Reset to reset device */ | 123 | /* Use PCI-E Hot Reset to reset device */ |
124 | pcie_hot_reset = (__force pcie_reset_state_t) 3 | 124 | pcie_hot_reset = (__force pcie_reset_state_t) 3 |
125 | }; | 125 | }; |
126 | 126 | ||
127 | typedef unsigned short __bitwise pci_dev_flags_t; | 127 | typedef unsigned short __bitwise pci_dev_flags_t; |
128 | enum pci_dev_flags { | 128 | enum pci_dev_flags { |
129 | /* INTX_DISABLE in PCI_COMMAND register disables MSI | 129 | /* INTX_DISABLE in PCI_COMMAND register disables MSI |
130 | * generation too. | 130 | * generation too. |
131 | */ | 131 | */ |
132 | PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, | 132 | PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, |
133 | /* Device configuration is irrevocably lost if disabled into D3 */ | 133 | /* Device configuration is irrevocably lost if disabled into D3 */ |
134 | PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, | 134 | PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, |
135 | }; | 135 | }; |
136 | 136 | ||
137 | enum pci_irq_reroute_variant { | 137 | enum pci_irq_reroute_variant { |
138 | INTEL_IRQ_REROUTE_VARIANT = 1, | 138 | INTEL_IRQ_REROUTE_VARIANT = 1, |
139 | MAX_IRQ_REROUTE_VARIANTS = 3 | 139 | MAX_IRQ_REROUTE_VARIANTS = 3 |
140 | }; | 140 | }; |
141 | 141 | ||
142 | typedef unsigned short __bitwise pci_bus_flags_t; | 142 | typedef unsigned short __bitwise pci_bus_flags_t; |
143 | enum pci_bus_flags { | 143 | enum pci_bus_flags { |
144 | PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, | 144 | PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, |
145 | PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, | 145 | PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, |
146 | }; | 146 | }; |
147 | 147 | ||
148 | struct pci_cap_saved_state { | 148 | struct pci_cap_saved_state { |
149 | struct hlist_node next; | 149 | struct hlist_node next; |
150 | char cap_nr; | 150 | char cap_nr; |
151 | u32 data[0]; | 151 | u32 data[0]; |
152 | }; | 152 | }; |
153 | 153 | ||
154 | struct pcie_link_state; | 154 | struct pcie_link_state; |
155 | struct pci_vpd; | 155 | struct pci_vpd; |
156 | 156 | ||
157 | /* | 157 | /* |
158 | * The pci_dev structure is used to describe PCI devices. | 158 | * The pci_dev structure is used to describe PCI devices. |
159 | */ | 159 | */ |
160 | struct pci_dev { | 160 | struct pci_dev { |
161 | struct list_head bus_list; /* node in per-bus list */ | 161 | struct list_head bus_list; /* node in per-bus list */ |
162 | struct pci_bus *bus; /* bus this device is on */ | 162 | struct pci_bus *bus; /* bus this device is on */ |
163 | struct pci_bus *subordinate; /* bus this device bridges to */ | 163 | struct pci_bus *subordinate; /* bus this device bridges to */ |
164 | 164 | ||
165 | void *sysdata; /* hook for sys-specific extension */ | 165 | void *sysdata; /* hook for sys-specific extension */ |
166 | struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ | 166 | struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ |
167 | struct pci_slot *slot; /* Physical slot this device is in */ | 167 | struct pci_slot *slot; /* Physical slot this device is in */ |
168 | 168 | ||
169 | unsigned int devfn; /* encoded device & function index */ | 169 | unsigned int devfn; /* encoded device & function index */ |
170 | unsigned short vendor; | 170 | unsigned short vendor; |
171 | unsigned short device; | 171 | unsigned short device; |
172 | unsigned short subsystem_vendor; | 172 | unsigned short subsystem_vendor; |
173 | unsigned short subsystem_device; | 173 | unsigned short subsystem_device; |
174 | unsigned int class; /* 3 bytes: (base,sub,prog-if) */ | 174 | unsigned int class; /* 3 bytes: (base,sub,prog-if) */ |
175 | u8 revision; /* PCI revision, low byte of class word */ | 175 | u8 revision; /* PCI revision, low byte of class word */ |
176 | u8 hdr_type; /* PCI header type (`multi' flag masked out) */ | 176 | u8 hdr_type; /* PCI header type (`multi' flag masked out) */ |
177 | u8 pcie_type; /* PCI-E device/port type */ | 177 | u8 pcie_type; /* PCI-E device/port type */ |
178 | u8 rom_base_reg; /* which config register controls the ROM */ | 178 | u8 rom_base_reg; /* which config register controls the ROM */ |
179 | u8 pin; /* which interrupt pin this device uses */ | 179 | u8 pin; /* which interrupt pin this device uses */ |
180 | 180 | ||
181 | struct pci_driver *driver; /* which driver has allocated this device */ | 181 | struct pci_driver *driver; /* which driver has allocated this device */ |
182 | u64 dma_mask; /* Mask of the bits of bus address this | 182 | u64 dma_mask; /* Mask of the bits of bus address this |
183 | device implements. Normally this is | 183 | device implements. Normally this is |
184 | 0xffffffff. You only need to change | 184 | 0xffffffff. You only need to change |
185 | this if your device has broken DMA | 185 | this if your device has broken DMA |
186 | or supports 64-bit transfers. */ | 186 | or supports 64-bit transfers. */ |
187 | 187 | ||
188 | struct device_dma_parameters dma_parms; | 188 | struct device_dma_parameters dma_parms; |
189 | 189 | ||
190 | pci_power_t current_state; /* Current operating state. In ACPI-speak, | 190 | pci_power_t current_state; /* Current operating state. In ACPI-speak, |
191 | this is D0-D3, D0 being fully functional, | 191 | this is D0-D3, D0 being fully functional, |
192 | and D3 being off. */ | 192 | and D3 being off. */ |
193 | int pm_cap; /* PM capability offset in the | 193 | int pm_cap; /* PM capability offset in the |
194 | configuration space */ | 194 | configuration space */ |
195 | unsigned int pme_support:5; /* Bitmask of states from which PME# | 195 | unsigned int pme_support:5; /* Bitmask of states from which PME# |
196 | can be generated */ | 196 | can be generated */ |
197 | unsigned int d1_support:1; /* Low power state D1 is supported */ | 197 | unsigned int d1_support:1; /* Low power state D1 is supported */ |
198 | unsigned int d2_support:1; /* Low power state D2 is supported */ | 198 | unsigned int d2_support:1; /* Low power state D2 is supported */ |
199 | unsigned int no_d1d2:1; /* Only allow D0 and D3 */ | 199 | unsigned int no_d1d2:1; /* Only allow D0 and D3 */ |
200 | 200 | ||
201 | #ifdef CONFIG_PCIEASPM | 201 | #ifdef CONFIG_PCIEASPM |
202 | struct pcie_link_state *link_state; /* ASPM link state. */ | 202 | struct pcie_link_state *link_state; /* ASPM link state. */ |
203 | #endif | 203 | #endif |
204 | 204 | ||
205 | pci_channel_state_t error_state; /* current connectivity state */ | 205 | pci_channel_state_t error_state; /* current connectivity state */ |
206 | struct device dev; /* Generic device interface */ | 206 | struct device dev; /* Generic device interface */ |
207 | 207 | ||
208 | int cfg_size; /* Size of configuration space */ | 208 | int cfg_size; /* Size of configuration space */ |
209 | 209 | ||
210 | /* | 210 | /* |
211 | * Instead of touching interrupt line and base address registers | 211 | * Instead of touching interrupt line and base address registers |
212 | * directly, use the values stored here. They might be different! | 212 | * directly, use the values stored here. They might be different! |
213 | */ | 213 | */ |
214 | unsigned int irq; | 214 | unsigned int irq; |
215 | struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ | 215 | struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ |
216 | 216 | ||
217 | /* These fields are used by common fixups */ | 217 | /* These fields are used by common fixups */ |
218 | unsigned int transparent:1; /* Transparent PCI bridge */ | 218 | unsigned int transparent:1; /* Transparent PCI bridge */ |
219 | unsigned int multifunction:1;/* Part of multi-function device */ | 219 | unsigned int multifunction:1;/* Part of multi-function device */ |
220 | /* keep track of device state */ | 220 | /* keep track of device state */ |
221 | unsigned int is_added:1; | 221 | unsigned int is_added:1; |
222 | unsigned int is_busmaster:1; /* device is busmaster */ | 222 | unsigned int is_busmaster:1; /* device is busmaster */ |
223 | unsigned int no_msi:1; /* device may not use msi */ | 223 | unsigned int no_msi:1; /* device may not use msi */ |
224 | unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ | 224 | unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ |
225 | unsigned int broken_parity_status:1; /* Device generates false positive parity */ | 225 | unsigned int broken_parity_status:1; /* Device generates false positive parity */ |
226 | unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ | 226 | unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ |
227 | unsigned int msi_enabled:1; | 227 | unsigned int msi_enabled:1; |
228 | unsigned int msix_enabled:1; | 228 | unsigned int msix_enabled:1; |
229 | unsigned int ari_enabled:1; /* ARI forwarding */ | 229 | unsigned int ari_enabled:1; /* ARI forwarding */ |
230 | unsigned int is_managed:1; | 230 | unsigned int is_managed:1; |
231 | unsigned int is_pcie:1; | 231 | unsigned int is_pcie:1; |
232 | pci_dev_flags_t dev_flags; | 232 | pci_dev_flags_t dev_flags; |
233 | atomic_t enable_cnt; /* pci_enable_device has been called */ | 233 | atomic_t enable_cnt; /* pci_enable_device has been called */ |
234 | 234 | ||
235 | u32 saved_config_space[16]; /* config space saved at suspend time */ | 235 | u32 saved_config_space[16]; /* config space saved at suspend time */ |
236 | struct hlist_head saved_cap_space; | 236 | struct hlist_head saved_cap_space; |
237 | struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ | 237 | struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ |
238 | int rom_attr_enabled; /* has display of the rom attribute been enabled? */ | 238 | int rom_attr_enabled; /* has display of the rom attribute been enabled? */ |
239 | struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ | 239 | struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ |
240 | struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ | 240 | struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ |
241 | #ifdef CONFIG_PCI_MSI | 241 | #ifdef CONFIG_PCI_MSI |
242 | struct list_head msi_list; | 242 | struct list_head msi_list; |
243 | #endif | 243 | #endif |
244 | struct pci_vpd *vpd; | 244 | struct pci_vpd *vpd; |
245 | }; | 245 | }; |
246 | 246 | ||
247 | extern struct pci_dev *alloc_pci_dev(void); | 247 | extern struct pci_dev *alloc_pci_dev(void); |
248 | 248 | ||
249 | #define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list) | 249 | #define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list) |
250 | #define to_pci_dev(n) container_of(n, struct pci_dev, dev) | 250 | #define to_pci_dev(n) container_of(n, struct pci_dev, dev) |
251 | #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) | 251 | #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) |
252 | 252 | ||
253 | static inline int pci_channel_offline(struct pci_dev *pdev) | 253 | static inline int pci_channel_offline(struct pci_dev *pdev) |
254 | { | 254 | { |
255 | return (pdev->error_state != pci_channel_io_normal); | 255 | return (pdev->error_state != pci_channel_io_normal); |
256 | } | 256 | } |
257 | 257 | ||
258 | static inline struct pci_cap_saved_state *pci_find_saved_cap( | 258 | static inline struct pci_cap_saved_state *pci_find_saved_cap( |
259 | struct pci_dev *pci_dev, char cap) | 259 | struct pci_dev *pci_dev, char cap) |
260 | { | 260 | { |
261 | struct pci_cap_saved_state *tmp; | 261 | struct pci_cap_saved_state *tmp; |
262 | struct hlist_node *pos; | 262 | struct hlist_node *pos; |
263 | 263 | ||
264 | hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) { | 264 | hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) { |
265 | if (tmp->cap_nr == cap) | 265 | if (tmp->cap_nr == cap) |
266 | return tmp; | 266 | return tmp; |
267 | } | 267 | } |
268 | return NULL; | 268 | return NULL; |
269 | } | 269 | } |
270 | 270 | ||
271 | static inline void pci_add_saved_cap(struct pci_dev *pci_dev, | 271 | static inline void pci_add_saved_cap(struct pci_dev *pci_dev, |
272 | struct pci_cap_saved_state *new_cap) | 272 | struct pci_cap_saved_state *new_cap) |
273 | { | 273 | { |
274 | hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); | 274 | hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); |
275 | } | 275 | } |
276 | 276 | ||
277 | /* | 277 | /* |
278 | * For PCI devices, the region numbers are assigned this way: | 278 | * For PCI devices, the region numbers are assigned this way: |
279 | * | 279 | * |
280 | * 0-5 standard PCI regions | 280 | * 0-5 standard PCI regions |
281 | * 6 expansion ROM | 281 | * 6 expansion ROM |
282 | * 7-10 bridges: address space assigned to buses behind the bridge | 282 | * 7-10 bridges: address space assigned to buses behind the bridge |
283 | */ | 283 | */ |
284 | 284 | ||
285 | #define PCI_ROM_RESOURCE 6 | 285 | #define PCI_ROM_RESOURCE 6 |
286 | #define PCI_BRIDGE_RESOURCES 7 | 286 | #define PCI_BRIDGE_RESOURCES 7 |
287 | #define PCI_NUM_RESOURCES 11 | 287 | #define PCI_NUM_RESOURCES 11 |
288 | 288 | ||
289 | #ifndef PCI_BUS_NUM_RESOURCES | 289 | #ifndef PCI_BUS_NUM_RESOURCES |
290 | #define PCI_BUS_NUM_RESOURCES 16 | 290 | #define PCI_BUS_NUM_RESOURCES 16 |
291 | #endif | 291 | #endif |
292 | 292 | ||
293 | #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ | 293 | #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ |
294 | 294 | ||
295 | struct pci_bus { | 295 | struct pci_bus { |
296 | struct list_head node; /* node in list of buses */ | 296 | struct list_head node; /* node in list of buses */ |
297 | struct pci_bus *parent; /* parent bus this bridge is on */ | 297 | struct pci_bus *parent; /* parent bus this bridge is on */ |
298 | struct list_head children; /* list of child buses */ | 298 | struct list_head children; /* list of child buses */ |
299 | struct list_head devices; /* list of devices on this bus */ | 299 | struct list_head devices; /* list of devices on this bus */ |
300 | struct pci_dev *self; /* bridge device as seen by parent */ | 300 | struct pci_dev *self; /* bridge device as seen by parent */ |
301 | struct list_head slots; /* list of slots on this bus */ | 301 | struct list_head slots; /* list of slots on this bus */ |
302 | struct resource *resource[PCI_BUS_NUM_RESOURCES]; | 302 | struct resource *resource[PCI_BUS_NUM_RESOURCES]; |
303 | /* address space routed to this bus */ | 303 | /* address space routed to this bus */ |
304 | 304 | ||
305 | struct pci_ops *ops; /* configuration access functions */ | 305 | struct pci_ops *ops; /* configuration access functions */ |
306 | void *sysdata; /* hook for sys-specific extension */ | 306 | void *sysdata; /* hook for sys-specific extension */ |
307 | struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ | 307 | struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ |
308 | 308 | ||
309 | unsigned char number; /* bus number */ | 309 | unsigned char number; /* bus number */ |
310 | unsigned char primary; /* number of primary bridge */ | 310 | unsigned char primary; /* number of primary bridge */ |
311 | unsigned char secondary; /* number of secondary bridge */ | 311 | unsigned char secondary; /* number of secondary bridge */ |
312 | unsigned char subordinate; /* max number of subordinate buses */ | 312 | unsigned char subordinate; /* max number of subordinate buses */ |
313 | 313 | ||
314 | char name[48]; | 314 | char name[48]; |
315 | 315 | ||
316 | unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ | 316 | unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ |
317 | pci_bus_flags_t bus_flags; /* Inherited by child busses */ | 317 | pci_bus_flags_t bus_flags; /* Inherited by child busses */ |
318 | struct device *bridge; | 318 | struct device *bridge; |
319 | struct device dev; | 319 | struct device dev; |
320 | struct bin_attribute *legacy_io; /* legacy I/O for this bus */ | 320 | struct bin_attribute *legacy_io; /* legacy I/O for this bus */ |
321 | struct bin_attribute *legacy_mem; /* legacy mem */ | 321 | struct bin_attribute *legacy_mem; /* legacy mem */ |
322 | unsigned int is_added:1; | 322 | unsigned int is_added:1; |
323 | }; | 323 | }; |
324 | 324 | ||
325 | #define pci_bus_b(n) list_entry(n, struct pci_bus, node) | 325 | #define pci_bus_b(n) list_entry(n, struct pci_bus, node) |
326 | #define to_pci_bus(n) container_of(n, struct pci_bus, dev) | 326 | #define to_pci_bus(n) container_of(n, struct pci_bus, dev) |
327 | 327 | ||
328 | /* | 328 | /* |
329 | * Error values that may be returned by PCI functions. | 329 | * Error values that may be returned by PCI functions. |
330 | */ | 330 | */ |
331 | #define PCIBIOS_SUCCESSFUL 0x00 | 331 | #define PCIBIOS_SUCCESSFUL 0x00 |
332 | #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 | 332 | #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 |
333 | #define PCIBIOS_BAD_VENDOR_ID 0x83 | 333 | #define PCIBIOS_BAD_VENDOR_ID 0x83 |
334 | #define PCIBIOS_DEVICE_NOT_FOUND 0x86 | 334 | #define PCIBIOS_DEVICE_NOT_FOUND 0x86 |
335 | #define PCIBIOS_BAD_REGISTER_NUMBER 0x87 | 335 | #define PCIBIOS_BAD_REGISTER_NUMBER 0x87 |
336 | #define PCIBIOS_SET_FAILED 0x88 | 336 | #define PCIBIOS_SET_FAILED 0x88 |
337 | #define PCIBIOS_BUFFER_TOO_SMALL 0x89 | 337 | #define PCIBIOS_BUFFER_TOO_SMALL 0x89 |
338 | 338 | ||
339 | /* Low-level architecture-dependent routines */ | 339 | /* Low-level architecture-dependent routines */ |
340 | 340 | ||
341 | struct pci_ops { | 341 | struct pci_ops { |
342 | int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); | 342 | int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); |
343 | int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); | 343 | int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); |
344 | }; | 344 | }; |
345 | 345 | ||
346 | /* | 346 | /* |
347 | * ACPI needs to be able to access PCI config space before we've done a | 347 | * ACPI needs to be able to access PCI config space before we've done a |
348 | * PCI bus scan and created pci_bus structures. | 348 | * PCI bus scan and created pci_bus structures. |
349 | */ | 349 | */ |
350 | extern int raw_pci_read(unsigned int domain, unsigned int bus, | 350 | extern int raw_pci_read(unsigned int domain, unsigned int bus, |
351 | unsigned int devfn, int reg, int len, u32 *val); | 351 | unsigned int devfn, int reg, int len, u32 *val); |
352 | extern int raw_pci_write(unsigned int domain, unsigned int bus, | 352 | extern int raw_pci_write(unsigned int domain, unsigned int bus, |
353 | unsigned int devfn, int reg, int len, u32 val); | 353 | unsigned int devfn, int reg, int len, u32 val); |
354 | 354 | ||
355 | struct pci_bus_region { | 355 | struct pci_bus_region { |
356 | resource_size_t start; | 356 | resource_size_t start; |
357 | resource_size_t end; | 357 | resource_size_t end; |
358 | }; | 358 | }; |
359 | 359 | ||
360 | struct pci_dynids { | 360 | struct pci_dynids { |
361 | spinlock_t lock; /* protects list, index */ | 361 | spinlock_t lock; /* protects list, index */ |
362 | struct list_head list; /* for IDs added at runtime */ | 362 | struct list_head list; /* for IDs added at runtime */ |
363 | }; | 363 | }; |
364 | 364 | ||
365 | /* ---------------------------------------------------------------- */ | 365 | /* ---------------------------------------------------------------- */ |
366 | /** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides | 366 | /** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides |
367 | * a set of callbacks in struct pci_error_handlers, then that device driver | 367 | * a set of callbacks in struct pci_error_handlers, then that device driver |
368 | * will be notified of PCI bus errors, and will be driven to recovery | 368 | * will be notified of PCI bus errors, and will be driven to recovery |
369 | * when an error occurs. | 369 | * when an error occurs. |
370 | */ | 370 | */ |
371 | 371 | ||
372 | typedef unsigned int __bitwise pci_ers_result_t; | 372 | typedef unsigned int __bitwise pci_ers_result_t; |
373 | 373 | ||
374 | enum pci_ers_result { | 374 | enum pci_ers_result { |
375 | /* no result/none/not supported in device driver */ | 375 | /* no result/none/not supported in device driver */ |
376 | PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, | 376 | PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, |
377 | 377 | ||
378 | /* Device driver can recover without slot reset */ | 378 | /* Device driver can recover without slot reset */ |
379 | PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, | 379 | PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, |
380 | 380 | ||
381 | /* Device driver wants slot to be reset. */ | 381 | /* Device driver wants slot to be reset. */ |
382 | PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, | 382 | PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, |
383 | 383 | ||
384 | /* Device has completely failed, is unrecoverable */ | 384 | /* Device has completely failed, is unrecoverable */ |
385 | PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4, | 385 | PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4, |
386 | 386 | ||
387 | /* Device driver is fully recovered and operational */ | 387 | /* Device driver is fully recovered and operational */ |
388 | PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5, | 388 | PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5, |
389 | }; | 389 | }; |
390 | 390 | ||
391 | /* PCI bus error event callbacks */ | 391 | /* PCI bus error event callbacks */ |
392 | struct pci_error_handlers { | 392 | struct pci_error_handlers { |
393 | /* PCI bus error detected on this device */ | 393 | /* PCI bus error detected on this device */ |
394 | pci_ers_result_t (*error_detected)(struct pci_dev *dev, | 394 | pci_ers_result_t (*error_detected)(struct pci_dev *dev, |
395 | enum pci_channel_state error); | 395 | enum pci_channel_state error); |
396 | 396 | ||
397 | /* MMIO has been re-enabled, but not DMA */ | 397 | /* MMIO has been re-enabled, but not DMA */ |
398 | pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); | 398 | pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); |
399 | 399 | ||
400 | /* PCI Express link has been reset */ | 400 | /* PCI Express link has been reset */ |
401 | pci_ers_result_t (*link_reset)(struct pci_dev *dev); | 401 | pci_ers_result_t (*link_reset)(struct pci_dev *dev); |
402 | 402 | ||
403 | /* PCI slot has been reset */ | 403 | /* PCI slot has been reset */ |
404 | pci_ers_result_t (*slot_reset)(struct pci_dev *dev); | 404 | pci_ers_result_t (*slot_reset)(struct pci_dev *dev); |
405 | 405 | ||
406 | /* Device driver may resume normal operations */ | 406 | /* Device driver may resume normal operations */ |
407 | void (*resume)(struct pci_dev *dev); | 407 | void (*resume)(struct pci_dev *dev); |
408 | }; | 408 | }; |
409 | 409 | ||
410 | /* ---------------------------------------------------------------- */ | 410 | /* ---------------------------------------------------------------- */ |
411 | 411 | ||
412 | struct module; | 412 | struct module; |
413 | struct pci_driver { | 413 | struct pci_driver { |
414 | struct list_head node; | 414 | struct list_head node; |
415 | char *name; | 415 | char *name; |
416 | const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ | 416 | const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ |
417 | int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ | 417 | int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ |
418 | void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ | 418 | void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ |
419 | int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ | 419 | int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ |
420 | int (*suspend_late) (struct pci_dev *dev, pm_message_t state); | 420 | int (*suspend_late) (struct pci_dev *dev, pm_message_t state); |
421 | int (*resume_early) (struct pci_dev *dev); | 421 | int (*resume_early) (struct pci_dev *dev); |
422 | int (*resume) (struct pci_dev *dev); /* Device woken up */ | 422 | int (*resume) (struct pci_dev *dev); /* Device woken up */ |
423 | void (*shutdown) (struct pci_dev *dev); | 423 | void (*shutdown) (struct pci_dev *dev); |
424 | struct pm_ext_ops *pm; | ||
425 | struct pci_error_handlers *err_handler; | 424 | struct pci_error_handlers *err_handler; |
426 | struct device_driver driver; | 425 | struct device_driver driver; |
427 | struct pci_dynids dynids; | 426 | struct pci_dynids dynids; |
428 | }; | 427 | }; |
429 | 428 | ||
430 | #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) | 429 | #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) |
431 | 430 | ||
432 | /** | 431 | /** |
433 | * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table | 432 | * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table |
434 | * @_table: device table name | 433 | * @_table: device table name |
435 | * | 434 | * |
436 | * This macro is used to create a struct pci_device_id array (a device table) | 435 | * This macro is used to create a struct pci_device_id array (a device table) |
437 | * in a generic manner. | 436 | * in a generic manner. |
438 | */ | 437 | */ |
439 | #define DEFINE_PCI_DEVICE_TABLE(_table) \ | 438 | #define DEFINE_PCI_DEVICE_TABLE(_table) \ |
440 | const struct pci_device_id _table[] __devinitconst | 439 | const struct pci_device_id _table[] __devinitconst |
441 | 440 | ||
442 | /** | 441 | /** |
443 | * PCI_DEVICE - macro used to describe a specific pci device | 442 | * PCI_DEVICE - macro used to describe a specific pci device |
444 | * @vend: the 16 bit PCI Vendor ID | 443 | * @vend: the 16 bit PCI Vendor ID |
445 | * @dev: the 16 bit PCI Device ID | 444 | * @dev: the 16 bit PCI Device ID |
446 | * | 445 | * |
447 | * This macro is used to create a struct pci_device_id that matches a | 446 | * This macro is used to create a struct pci_device_id that matches a |
448 | * specific device. The subvendor and subdevice fields will be set to | 447 | * specific device. The subvendor and subdevice fields will be set to |
449 | * PCI_ANY_ID. | 448 | * PCI_ANY_ID. |
450 | */ | 449 | */ |
451 | #define PCI_DEVICE(vend,dev) \ | 450 | #define PCI_DEVICE(vend,dev) \ |
452 | .vendor = (vend), .device = (dev), \ | 451 | .vendor = (vend), .device = (dev), \ |
453 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID | 452 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID |
454 | 453 | ||
455 | /** | 454 | /** |
456 | * PCI_DEVICE_CLASS - macro used to describe a specific pci device class | 455 | * PCI_DEVICE_CLASS - macro used to describe a specific pci device class |
457 | * @dev_class: the class, subclass, prog-if triple for this device | 456 | * @dev_class: the class, subclass, prog-if triple for this device |
458 | * @dev_class_mask: the class mask for this device | 457 | * @dev_class_mask: the class mask for this device |
459 | * | 458 | * |
460 | * This macro is used to create a struct pci_device_id that matches a | 459 | * This macro is used to create a struct pci_device_id that matches a |
461 | * specific PCI class. The vendor, device, subvendor, and subdevice | 460 | * specific PCI class. The vendor, device, subvendor, and subdevice |
462 | * fields will be set to PCI_ANY_ID. | 461 | * fields will be set to PCI_ANY_ID. |
463 | */ | 462 | */ |
464 | #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \ | 463 | #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \ |
465 | .class = (dev_class), .class_mask = (dev_class_mask), \ | 464 | .class = (dev_class), .class_mask = (dev_class_mask), \ |
466 | .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ | 465 | .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ |
467 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID | 466 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID |
468 | 467 | ||
469 | /** | 468 | /** |
470 | * PCI_VDEVICE - macro used to describe a specific pci device in short form | 469 | * PCI_VDEVICE - macro used to describe a specific pci device in short form |
471 | * @vendor: the vendor name | 470 | * @vendor: the vendor name |
472 | * @device: the 16 bit PCI Device ID | 471 | * @device: the 16 bit PCI Device ID |
473 | * | 472 | * |
474 | * This macro is used to create a struct pci_device_id that matches a | 473 | * This macro is used to create a struct pci_device_id that matches a |
475 | * specific PCI device. The subvendor, and subdevice fields will be set | 474 | * specific PCI device. The subvendor, and subdevice fields will be set |
476 | * to PCI_ANY_ID. The macro allows the next field to follow as the device | 475 | * to PCI_ANY_ID. The macro allows the next field to follow as the device |
477 | * private data. | 476 | * private data. |
478 | */ | 477 | */ |
479 | 478 | ||
480 | #define PCI_VDEVICE(vendor, device) \ | 479 | #define PCI_VDEVICE(vendor, device) \ |
481 | PCI_VENDOR_ID_##vendor, (device), \ | 480 | PCI_VENDOR_ID_##vendor, (device), \ |
482 | PCI_ANY_ID, PCI_ANY_ID, 0, 0 | 481 | PCI_ANY_ID, PCI_ANY_ID, 0, 0 |
483 | 482 | ||
484 | /* these external functions are only available when PCI support is enabled */ | 483 | /* these external functions are only available when PCI support is enabled */ |
485 | #ifdef CONFIG_PCI | 484 | #ifdef CONFIG_PCI |
486 | 485 | ||
487 | extern struct bus_type pci_bus_type; | 486 | extern struct bus_type pci_bus_type; |
488 | 487 | ||
489 | /* Do NOT directly access these two variables, unless you are arch specific pci | 488 | /* Do NOT directly access these two variables, unless you are arch specific pci |
490 | * code, or pci core code. */ | 489 | * code, or pci core code. */ |
491 | extern struct list_head pci_root_buses; /* list of all known PCI buses */ | 490 | extern struct list_head pci_root_buses; /* list of all known PCI buses */ |
492 | /* Some device drivers need know if pci is initiated */ | 491 | /* Some device drivers need know if pci is initiated */ |
493 | extern int no_pci_devices(void); | 492 | extern int no_pci_devices(void); |
494 | 493 | ||
495 | void pcibios_fixup_bus(struct pci_bus *); | 494 | void pcibios_fixup_bus(struct pci_bus *); |
496 | int __must_check pcibios_enable_device(struct pci_dev *, int mask); | 495 | int __must_check pcibios_enable_device(struct pci_dev *, int mask); |
497 | char *pcibios_setup(char *str); | 496 | char *pcibios_setup(char *str); |
498 | 497 | ||
499 | /* Used only when drivers/pci/setup.c is used */ | 498 | /* Used only when drivers/pci/setup.c is used */ |
500 | void pcibios_align_resource(void *, struct resource *, resource_size_t, | 499 | void pcibios_align_resource(void *, struct resource *, resource_size_t, |
501 | resource_size_t); | 500 | resource_size_t); |
502 | void pcibios_update_irq(struct pci_dev *, int irq); | 501 | void pcibios_update_irq(struct pci_dev *, int irq); |
503 | 502 | ||
504 | /* Generic PCI functions used internally */ | 503 | /* Generic PCI functions used internally */ |
505 | 504 | ||
506 | extern struct pci_bus *pci_find_bus(int domain, int busnr); | 505 | extern struct pci_bus *pci_find_bus(int domain, int busnr); |
507 | void pci_bus_add_devices(struct pci_bus *bus); | 506 | void pci_bus_add_devices(struct pci_bus *bus); |
508 | struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, | 507 | struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, |
509 | struct pci_ops *ops, void *sysdata); | 508 | struct pci_ops *ops, void *sysdata); |
510 | static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, | 509 | static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, |
511 | void *sysdata) | 510 | void *sysdata) |
512 | { | 511 | { |
513 | struct pci_bus *root_bus; | 512 | struct pci_bus *root_bus; |
514 | root_bus = pci_scan_bus_parented(NULL, bus, ops, sysdata); | 513 | root_bus = pci_scan_bus_parented(NULL, bus, ops, sysdata); |
515 | if (root_bus) | 514 | if (root_bus) |
516 | pci_bus_add_devices(root_bus); | 515 | pci_bus_add_devices(root_bus); |
517 | return root_bus; | 516 | return root_bus; |
518 | } | 517 | } |
519 | struct pci_bus *pci_create_bus(struct device *parent, int bus, | 518 | struct pci_bus *pci_create_bus(struct device *parent, int bus, |
520 | struct pci_ops *ops, void *sysdata); | 519 | struct pci_ops *ops, void *sysdata); |
521 | struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, | 520 | struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, |
522 | int busnr); | 521 | int busnr); |
523 | struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, | 522 | struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, |
524 | const char *name, | 523 | const char *name, |
525 | struct hotplug_slot *hotplug); | 524 | struct hotplug_slot *hotplug); |
526 | void pci_destroy_slot(struct pci_slot *slot); | 525 | void pci_destroy_slot(struct pci_slot *slot); |
527 | void pci_renumber_slot(struct pci_slot *slot, int slot_nr); | 526 | void pci_renumber_slot(struct pci_slot *slot, int slot_nr); |
528 | int pci_scan_slot(struct pci_bus *bus, int devfn); | 527 | int pci_scan_slot(struct pci_bus *bus, int devfn); |
529 | struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); | 528 | struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); |
530 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); | 529 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); |
531 | unsigned int pci_scan_child_bus(struct pci_bus *bus); | 530 | unsigned int pci_scan_child_bus(struct pci_bus *bus); |
532 | int __must_check pci_bus_add_device(struct pci_dev *dev); | 531 | int __must_check pci_bus_add_device(struct pci_dev *dev); |
533 | void pci_read_bridge_bases(struct pci_bus *child); | 532 | void pci_read_bridge_bases(struct pci_bus *child); |
534 | struct resource *pci_find_parent_resource(const struct pci_dev *dev, | 533 | struct resource *pci_find_parent_resource(const struct pci_dev *dev, |
535 | struct resource *res); | 534 | struct resource *res); |
536 | int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); | 535 | int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); |
537 | extern struct pci_dev *pci_dev_get(struct pci_dev *dev); | 536 | extern struct pci_dev *pci_dev_get(struct pci_dev *dev); |
538 | extern void pci_dev_put(struct pci_dev *dev); | 537 | extern void pci_dev_put(struct pci_dev *dev); |
539 | extern void pci_remove_bus(struct pci_bus *b); | 538 | extern void pci_remove_bus(struct pci_bus *b); |
540 | extern void pci_remove_bus_device(struct pci_dev *dev); | 539 | extern void pci_remove_bus_device(struct pci_dev *dev); |
541 | extern void pci_stop_bus_device(struct pci_dev *dev); | 540 | extern void pci_stop_bus_device(struct pci_dev *dev); |
542 | void pci_setup_cardbus(struct pci_bus *bus); | 541 | void pci_setup_cardbus(struct pci_bus *bus); |
543 | extern void pci_sort_breadthfirst(void); | 542 | extern void pci_sort_breadthfirst(void); |
544 | 543 | ||
545 | /* Generic PCI functions exported to card drivers */ | 544 | /* Generic PCI functions exported to card drivers */ |
546 | 545 | ||
547 | #ifdef CONFIG_PCI_LEGACY | 546 | #ifdef CONFIG_PCI_LEGACY |
548 | struct pci_dev __deprecated *pci_find_device(unsigned int vendor, | 547 | struct pci_dev __deprecated *pci_find_device(unsigned int vendor, |
549 | unsigned int device, | 548 | unsigned int device, |
550 | struct pci_dev *from); | 549 | struct pci_dev *from); |
551 | struct pci_dev __deprecated *pci_find_slot(unsigned int bus, | 550 | struct pci_dev __deprecated *pci_find_slot(unsigned int bus, |
552 | unsigned int devfn); | 551 | unsigned int devfn); |
553 | #endif /* CONFIG_PCI_LEGACY */ | 552 | #endif /* CONFIG_PCI_LEGACY */ |
554 | 553 | ||
555 | enum pci_lost_interrupt_reason { | 554 | enum pci_lost_interrupt_reason { |
556 | PCI_LOST_IRQ_NO_INFORMATION = 0, | 555 | PCI_LOST_IRQ_NO_INFORMATION = 0, |
557 | PCI_LOST_IRQ_DISABLE_MSI, | 556 | PCI_LOST_IRQ_DISABLE_MSI, |
558 | PCI_LOST_IRQ_DISABLE_MSIX, | 557 | PCI_LOST_IRQ_DISABLE_MSIX, |
559 | PCI_LOST_IRQ_DISABLE_ACPI, | 558 | PCI_LOST_IRQ_DISABLE_ACPI, |
560 | }; | 559 | }; |
561 | enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev); | 560 | enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev); |
562 | int pci_find_capability(struct pci_dev *dev, int cap); | 561 | int pci_find_capability(struct pci_dev *dev, int cap); |
563 | int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); | 562 | int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); |
564 | int pci_find_ext_capability(struct pci_dev *dev, int cap); | 563 | int pci_find_ext_capability(struct pci_dev *dev, int cap); |
565 | int pci_find_ht_capability(struct pci_dev *dev, int ht_cap); | 564 | int pci_find_ht_capability(struct pci_dev *dev, int ht_cap); |
566 | int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap); | 565 | int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap); |
567 | struct pci_bus *pci_find_next_bus(const struct pci_bus *from); | 566 | struct pci_bus *pci_find_next_bus(const struct pci_bus *from); |
568 | 567 | ||
569 | struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, | 568 | struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, |
570 | struct pci_dev *from); | 569 | struct pci_dev *from); |
571 | struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, | 570 | struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, |
572 | unsigned int ss_vendor, unsigned int ss_device, | 571 | unsigned int ss_vendor, unsigned int ss_device, |
573 | struct pci_dev *from); | 572 | struct pci_dev *from); |
574 | struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); | 573 | struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); |
575 | struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn); | 574 | struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn); |
576 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); | 575 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); |
577 | int pci_dev_present(const struct pci_device_id *ids); | 576 | int pci_dev_present(const struct pci_device_id *ids); |
578 | 577 | ||
579 | int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, | 578 | int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, |
580 | int where, u8 *val); | 579 | int where, u8 *val); |
581 | int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, | 580 | int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, |
582 | int where, u16 *val); | 581 | int where, u16 *val); |
583 | int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn, | 582 | int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn, |
584 | int where, u32 *val); | 583 | int where, u32 *val); |
585 | int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, | 584 | int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, |
586 | int where, u8 val); | 585 | int where, u8 val); |
587 | int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, | 586 | int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, |
588 | int where, u16 val); | 587 | int where, u16 val); |
589 | int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, | 588 | int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, |
590 | int where, u32 val); | 589 | int where, u32 val); |
591 | 590 | ||
592 | static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val) | 591 | static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val) |
593 | { | 592 | { |
594 | return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); | 593 | return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); |
595 | } | 594 | } |
596 | static inline int pci_read_config_word(struct pci_dev *dev, int where, u16 *val) | 595 | static inline int pci_read_config_word(struct pci_dev *dev, int where, u16 *val) |
597 | { | 596 | { |
598 | return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); | 597 | return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); |
599 | } | 598 | } |
600 | static inline int pci_read_config_dword(struct pci_dev *dev, int where, | 599 | static inline int pci_read_config_dword(struct pci_dev *dev, int where, |
601 | u32 *val) | 600 | u32 *val) |
602 | { | 601 | { |
603 | return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); | 602 | return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); |
604 | } | 603 | } |
605 | static inline int pci_write_config_byte(struct pci_dev *dev, int where, u8 val) | 604 | static inline int pci_write_config_byte(struct pci_dev *dev, int where, u8 val) |
606 | { | 605 | { |
607 | return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); | 606 | return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); |
608 | } | 607 | } |
609 | static inline int pci_write_config_word(struct pci_dev *dev, int where, u16 val) | 608 | static inline int pci_write_config_word(struct pci_dev *dev, int where, u16 val) |
610 | { | 609 | { |
611 | return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); | 610 | return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); |
612 | } | 611 | } |
613 | static inline int pci_write_config_dword(struct pci_dev *dev, int where, | 612 | static inline int pci_write_config_dword(struct pci_dev *dev, int where, |
614 | u32 val) | 613 | u32 val) |
615 | { | 614 | { |
616 | return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); | 615 | return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); |
617 | } | 616 | } |
618 | 617 | ||
619 | int __must_check pci_enable_device(struct pci_dev *dev); | 618 | int __must_check pci_enable_device(struct pci_dev *dev); |
620 | int __must_check pci_enable_device_io(struct pci_dev *dev); | 619 | int __must_check pci_enable_device_io(struct pci_dev *dev); |
621 | int __must_check pci_enable_device_mem(struct pci_dev *dev); | 620 | int __must_check pci_enable_device_mem(struct pci_dev *dev); |
622 | int __must_check pci_reenable_device(struct pci_dev *); | 621 | int __must_check pci_reenable_device(struct pci_dev *); |
623 | int __must_check pcim_enable_device(struct pci_dev *pdev); | 622 | int __must_check pcim_enable_device(struct pci_dev *pdev); |
624 | void pcim_pin_device(struct pci_dev *pdev); | 623 | void pcim_pin_device(struct pci_dev *pdev); |
625 | 624 | ||
626 | static inline int pci_is_managed(struct pci_dev *pdev) | 625 | static inline int pci_is_managed(struct pci_dev *pdev) |
627 | { | 626 | { |
628 | return pdev->is_managed; | 627 | return pdev->is_managed; |
629 | } | 628 | } |
630 | 629 | ||
631 | void pci_disable_device(struct pci_dev *dev); | 630 | void pci_disable_device(struct pci_dev *dev); |
632 | void pci_set_master(struct pci_dev *dev); | 631 | void pci_set_master(struct pci_dev *dev); |
633 | int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); | 632 | int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); |
634 | #define HAVE_PCI_SET_MWI | 633 | #define HAVE_PCI_SET_MWI |
635 | int __must_check pci_set_mwi(struct pci_dev *dev); | 634 | int __must_check pci_set_mwi(struct pci_dev *dev); |
636 | int pci_try_set_mwi(struct pci_dev *dev); | 635 | int pci_try_set_mwi(struct pci_dev *dev); |
637 | void pci_clear_mwi(struct pci_dev *dev); | 636 | void pci_clear_mwi(struct pci_dev *dev); |
638 | void pci_intx(struct pci_dev *dev, int enable); | 637 | void pci_intx(struct pci_dev *dev, int enable); |
639 | void pci_msi_off(struct pci_dev *dev); | 638 | void pci_msi_off(struct pci_dev *dev); |
640 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask); | 639 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask); |
641 | int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); | 640 | int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); |
642 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); | 641 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); |
643 | int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); | 642 | int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); |
644 | int pcix_get_max_mmrbc(struct pci_dev *dev); | 643 | int pcix_get_max_mmrbc(struct pci_dev *dev); |
645 | int pcix_get_mmrbc(struct pci_dev *dev); | 644 | int pcix_get_mmrbc(struct pci_dev *dev); |
646 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); | 645 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); |
647 | int pcie_get_readrq(struct pci_dev *dev); | 646 | int pcie_get_readrq(struct pci_dev *dev); |
648 | int pcie_set_readrq(struct pci_dev *dev, int rq); | 647 | int pcie_set_readrq(struct pci_dev *dev, int rq); |
649 | int pci_reset_function(struct pci_dev *dev); | 648 | int pci_reset_function(struct pci_dev *dev); |
650 | int pci_execute_reset_function(struct pci_dev *dev); | 649 | int pci_execute_reset_function(struct pci_dev *dev); |
651 | void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); | 650 | void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); |
652 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); | 651 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); |
653 | int pci_select_bars(struct pci_dev *dev, unsigned long flags); | 652 | int pci_select_bars(struct pci_dev *dev, unsigned long flags); |
654 | 653 | ||
655 | /* ROM control related routines */ | 654 | /* ROM control related routines */ |
656 | int pci_enable_rom(struct pci_dev *pdev); | 655 | int pci_enable_rom(struct pci_dev *pdev); |
657 | void pci_disable_rom(struct pci_dev *pdev); | 656 | void pci_disable_rom(struct pci_dev *pdev); |
658 | void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); | 657 | void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); |
659 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); | 658 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); |
660 | size_t pci_get_rom_size(void __iomem *rom, size_t size); | 659 | size_t pci_get_rom_size(void __iomem *rom, size_t size); |
661 | 660 | ||
662 | /* Power management related routines */ | 661 | /* Power management related routines */ |
663 | int pci_save_state(struct pci_dev *dev); | 662 | int pci_save_state(struct pci_dev *dev); |
664 | int pci_restore_state(struct pci_dev *dev); | 663 | int pci_restore_state(struct pci_dev *dev); |
665 | int pci_set_power_state(struct pci_dev *dev, pci_power_t state); | 664 | int pci_set_power_state(struct pci_dev *dev, pci_power_t state); |
666 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); | 665 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); |
667 | bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); | 666 | bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); |
668 | void pci_pme_active(struct pci_dev *dev, bool enable); | 667 | void pci_pme_active(struct pci_dev *dev, bool enable); |
669 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); | 668 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); |
670 | int pci_wake_from_d3(struct pci_dev *dev, bool enable); | 669 | int pci_wake_from_d3(struct pci_dev *dev, bool enable); |
671 | pci_power_t pci_target_state(struct pci_dev *dev); | 670 | pci_power_t pci_target_state(struct pci_dev *dev); |
672 | int pci_prepare_to_sleep(struct pci_dev *dev); | 671 | int pci_prepare_to_sleep(struct pci_dev *dev); |
673 | int pci_back_from_sleep(struct pci_dev *dev); | 672 | int pci_back_from_sleep(struct pci_dev *dev); |
674 | 673 | ||
675 | /* Functions for PCI Hotplug drivers to use */ | 674 | /* Functions for PCI Hotplug drivers to use */ |
676 | int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); | 675 | int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); |
677 | 676 | ||
678 | /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ | 677 | /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ |
679 | void pci_bus_assign_resources(struct pci_bus *bus); | 678 | void pci_bus_assign_resources(struct pci_bus *bus); |
680 | void pci_bus_size_bridges(struct pci_bus *bus); | 679 | void pci_bus_size_bridges(struct pci_bus *bus); |
681 | int pci_claim_resource(struct pci_dev *, int); | 680 | int pci_claim_resource(struct pci_dev *, int); |
682 | void pci_assign_unassigned_resources(void); | 681 | void pci_assign_unassigned_resources(void); |
683 | void pdev_enable_device(struct pci_dev *); | 682 | void pdev_enable_device(struct pci_dev *); |
684 | void pdev_sort_resources(struct pci_dev *, struct resource_list *); | 683 | void pdev_sort_resources(struct pci_dev *, struct resource_list *); |
685 | int pci_enable_resources(struct pci_dev *, int mask); | 684 | int pci_enable_resources(struct pci_dev *, int mask); |
686 | void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), | 685 | void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), |
687 | int (*)(struct pci_dev *, u8, u8)); | 686 | int (*)(struct pci_dev *, u8, u8)); |
688 | #define HAVE_PCI_REQ_REGIONS 2 | 687 | #define HAVE_PCI_REQ_REGIONS 2 |
689 | int __must_check pci_request_regions(struct pci_dev *, const char *); | 688 | int __must_check pci_request_regions(struct pci_dev *, const char *); |
690 | void pci_release_regions(struct pci_dev *); | 689 | void pci_release_regions(struct pci_dev *); |
691 | int __must_check pci_request_region(struct pci_dev *, int, const char *); | 690 | int __must_check pci_request_region(struct pci_dev *, int, const char *); |
692 | void pci_release_region(struct pci_dev *, int); | 691 | void pci_release_region(struct pci_dev *, int); |
693 | int pci_request_selected_regions(struct pci_dev *, int, const char *); | 692 | int pci_request_selected_regions(struct pci_dev *, int, const char *); |
694 | void pci_release_selected_regions(struct pci_dev *, int); | 693 | void pci_release_selected_regions(struct pci_dev *, int); |
695 | 694 | ||
696 | /* drivers/pci/bus.c */ | 695 | /* drivers/pci/bus.c */ |
697 | int __must_check pci_bus_alloc_resource(struct pci_bus *bus, | 696 | int __must_check pci_bus_alloc_resource(struct pci_bus *bus, |
698 | struct resource *res, resource_size_t size, | 697 | struct resource *res, resource_size_t size, |
699 | resource_size_t align, resource_size_t min, | 698 | resource_size_t align, resource_size_t min, |
700 | unsigned int type_mask, | 699 | unsigned int type_mask, |
701 | void (*alignf)(void *, struct resource *, | 700 | void (*alignf)(void *, struct resource *, |
702 | resource_size_t, resource_size_t), | 701 | resource_size_t, resource_size_t), |
703 | void *alignf_data); | 702 | void *alignf_data); |
704 | void pci_enable_bridges(struct pci_bus *bus); | 703 | void pci_enable_bridges(struct pci_bus *bus); |
705 | 704 | ||
706 | /* Proper probing supporting hot-pluggable devices */ | 705 | /* Proper probing supporting hot-pluggable devices */ |
707 | int __must_check __pci_register_driver(struct pci_driver *, struct module *, | 706 | int __must_check __pci_register_driver(struct pci_driver *, struct module *, |
708 | const char *mod_name); | 707 | const char *mod_name); |
709 | 708 | ||
710 | /* | 709 | /* |
711 | * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded | 710 | * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded |
712 | */ | 711 | */ |
713 | #define pci_register_driver(driver) \ | 712 | #define pci_register_driver(driver) \ |
714 | __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) | 713 | __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) |
715 | 714 | ||
716 | void pci_unregister_driver(struct pci_driver *dev); | 715 | void pci_unregister_driver(struct pci_driver *dev); |
717 | void pci_remove_behind_bridge(struct pci_dev *dev); | 716 | void pci_remove_behind_bridge(struct pci_dev *dev); |
718 | struct pci_driver *pci_dev_driver(const struct pci_dev *dev); | 717 | struct pci_driver *pci_dev_driver(const struct pci_dev *dev); |
719 | const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, | 718 | const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, |
720 | struct pci_dev *dev); | 719 | struct pci_dev *dev); |
721 | int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, | 720 | int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, |
722 | int pass); | 721 | int pass); |
723 | 722 | ||
724 | void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), | 723 | void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), |
725 | void *userdata); | 724 | void *userdata); |
726 | int pci_cfg_space_size_ext(struct pci_dev *dev); | 725 | int pci_cfg_space_size_ext(struct pci_dev *dev); |
727 | int pci_cfg_space_size(struct pci_dev *dev); | 726 | int pci_cfg_space_size(struct pci_dev *dev); |
728 | unsigned char pci_bus_max_busnr(struct pci_bus *bus); | 727 | unsigned char pci_bus_max_busnr(struct pci_bus *bus); |
729 | 728 | ||
730 | /* kmem_cache style wrapper around pci_alloc_consistent() */ | 729 | /* kmem_cache style wrapper around pci_alloc_consistent() */ |
731 | 730 | ||
732 | #include <linux/dmapool.h> | 731 | #include <linux/dmapool.h> |
733 | 732 | ||
734 | #define pci_pool dma_pool | 733 | #define pci_pool dma_pool |
735 | #define pci_pool_create(name, pdev, size, align, allocation) \ | 734 | #define pci_pool_create(name, pdev, size, align, allocation) \ |
736 | dma_pool_create(name, &pdev->dev, size, align, allocation) | 735 | dma_pool_create(name, &pdev->dev, size, align, allocation) |
737 | #define pci_pool_destroy(pool) dma_pool_destroy(pool) | 736 | #define pci_pool_destroy(pool) dma_pool_destroy(pool) |
738 | #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) | 737 | #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) |
739 | #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) | 738 | #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) |
740 | 739 | ||
741 | enum pci_dma_burst_strategy { | 740 | enum pci_dma_burst_strategy { |
742 | PCI_DMA_BURST_INFINITY, /* make bursts as large as possible, | 741 | PCI_DMA_BURST_INFINITY, /* make bursts as large as possible, |
743 | strategy_parameter is N/A */ | 742 | strategy_parameter is N/A */ |
744 | PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter | 743 | PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter |
745 | byte boundaries */ | 744 | byte boundaries */ |
746 | PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of | 745 | PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of |
747 | strategy_parameter byte boundaries */ | 746 | strategy_parameter byte boundaries */ |
748 | }; | 747 | }; |
749 | 748 | ||
750 | struct msix_entry { | 749 | struct msix_entry { |
751 | u32 vector; /* kernel uses to write allocated vector */ | 750 | u32 vector; /* kernel uses to write allocated vector */ |
752 | u16 entry; /* driver uses to specify entry, OS writes */ | 751 | u16 entry; /* driver uses to specify entry, OS writes */ |
753 | }; | 752 | }; |
754 | 753 | ||
755 | 754 | ||
756 | #ifndef CONFIG_PCI_MSI | 755 | #ifndef CONFIG_PCI_MSI |
757 | static inline int pci_enable_msi(struct pci_dev *dev) | 756 | static inline int pci_enable_msi(struct pci_dev *dev) |
758 | { | 757 | { |
759 | return -1; | 758 | return -1; |
760 | } | 759 | } |
761 | 760 | ||
762 | static inline void pci_msi_shutdown(struct pci_dev *dev) | 761 | static inline void pci_msi_shutdown(struct pci_dev *dev) |
763 | { } | 762 | { } |
764 | static inline void pci_disable_msi(struct pci_dev *dev) | 763 | static inline void pci_disable_msi(struct pci_dev *dev) |
765 | { } | 764 | { } |
766 | 765 | ||
767 | static inline int pci_enable_msix(struct pci_dev *dev, | 766 | static inline int pci_enable_msix(struct pci_dev *dev, |
768 | struct msix_entry *entries, int nvec) | 767 | struct msix_entry *entries, int nvec) |
769 | { | 768 | { |
770 | return -1; | 769 | return -1; |
771 | } | 770 | } |
772 | 771 | ||
773 | static inline void pci_msix_shutdown(struct pci_dev *dev) | 772 | static inline void pci_msix_shutdown(struct pci_dev *dev) |
774 | { } | 773 | { } |
775 | static inline void pci_disable_msix(struct pci_dev *dev) | 774 | static inline void pci_disable_msix(struct pci_dev *dev) |
776 | { } | 775 | { } |
777 | 776 | ||
778 | static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) | 777 | static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) |
779 | { } | 778 | { } |
780 | 779 | ||
781 | static inline void pci_restore_msi_state(struct pci_dev *dev) | 780 | static inline void pci_restore_msi_state(struct pci_dev *dev) |
782 | { } | 781 | { } |
783 | #else | 782 | #else |
784 | extern int pci_enable_msi(struct pci_dev *dev); | 783 | extern int pci_enable_msi(struct pci_dev *dev); |
785 | extern void pci_msi_shutdown(struct pci_dev *dev); | 784 | extern void pci_msi_shutdown(struct pci_dev *dev); |
786 | extern void pci_disable_msi(struct pci_dev *dev); | 785 | extern void pci_disable_msi(struct pci_dev *dev); |
787 | extern int pci_enable_msix(struct pci_dev *dev, | 786 | extern int pci_enable_msix(struct pci_dev *dev, |
788 | struct msix_entry *entries, int nvec); | 787 | struct msix_entry *entries, int nvec); |
789 | extern void pci_msix_shutdown(struct pci_dev *dev); | 788 | extern void pci_msix_shutdown(struct pci_dev *dev); |
790 | extern void pci_disable_msix(struct pci_dev *dev); | 789 | extern void pci_disable_msix(struct pci_dev *dev); |
791 | extern void msi_remove_pci_irq_vectors(struct pci_dev *dev); | 790 | extern void msi_remove_pci_irq_vectors(struct pci_dev *dev); |
792 | extern void pci_restore_msi_state(struct pci_dev *dev); | 791 | extern void pci_restore_msi_state(struct pci_dev *dev); |
793 | #endif | 792 | #endif |
794 | 793 | ||
795 | #ifdef CONFIG_HT_IRQ | 794 | #ifdef CONFIG_HT_IRQ |
796 | /* The functions a driver should call */ | 795 | /* The functions a driver should call */ |
797 | int ht_create_irq(struct pci_dev *dev, int idx); | 796 | int ht_create_irq(struct pci_dev *dev, int idx); |
798 | void ht_destroy_irq(unsigned int irq); | 797 | void ht_destroy_irq(unsigned int irq); |
799 | #endif /* CONFIG_HT_IRQ */ | 798 | #endif /* CONFIG_HT_IRQ */ |
800 | 799 | ||
801 | extern void pci_block_user_cfg_access(struct pci_dev *dev); | 800 | extern void pci_block_user_cfg_access(struct pci_dev *dev); |
802 | extern void pci_unblock_user_cfg_access(struct pci_dev *dev); | 801 | extern void pci_unblock_user_cfg_access(struct pci_dev *dev); |
803 | 802 | ||
804 | /* | 803 | /* |
805 | * PCI domain support. Sometimes called PCI segment (eg by ACPI), | 804 | * PCI domain support. Sometimes called PCI segment (eg by ACPI), |
806 | * a PCI domain is defined to be a set of PCI busses which share | 805 | * a PCI domain is defined to be a set of PCI busses which share |
807 | * configuration space. | 806 | * configuration space. |
808 | */ | 807 | */ |
809 | #ifdef CONFIG_PCI_DOMAINS | 808 | #ifdef CONFIG_PCI_DOMAINS |
810 | extern int pci_domains_supported; | 809 | extern int pci_domains_supported; |
811 | #else | 810 | #else |
812 | enum { pci_domains_supported = 0 }; | 811 | enum { pci_domains_supported = 0 }; |
813 | static inline int pci_domain_nr(struct pci_bus *bus) | 812 | static inline int pci_domain_nr(struct pci_bus *bus) |
814 | { | 813 | { |
815 | return 0; | 814 | return 0; |
816 | } | 815 | } |
817 | 816 | ||
818 | static inline int pci_proc_domain(struct pci_bus *bus) | 817 | static inline int pci_proc_domain(struct pci_bus *bus) |
819 | { | 818 | { |
820 | return 0; | 819 | return 0; |
821 | } | 820 | } |
822 | #endif /* CONFIG_PCI_DOMAINS */ | 821 | #endif /* CONFIG_PCI_DOMAINS */ |
823 | 822 | ||
824 | #else /* CONFIG_PCI is not enabled */ | 823 | #else /* CONFIG_PCI is not enabled */ |
825 | 824 | ||
826 | /* | 825 | /* |
827 | * If the system does not have PCI, clearly these return errors. Define | 826 | * If the system does not have PCI, clearly these return errors. Define |
828 | * these as simple inline functions to avoid hair in drivers. | 827 | * these as simple inline functions to avoid hair in drivers. |
829 | */ | 828 | */ |
830 | 829 | ||
831 | #define _PCI_NOP(o, s, t) \ | 830 | #define _PCI_NOP(o, s, t) \ |
832 | static inline int pci_##o##_config_##s(struct pci_dev *dev, \ | 831 | static inline int pci_##o##_config_##s(struct pci_dev *dev, \ |
833 | int where, t val) \ | 832 | int where, t val) \ |
834 | { return PCIBIOS_FUNC_NOT_SUPPORTED; } | 833 | { return PCIBIOS_FUNC_NOT_SUPPORTED; } |
835 | 834 | ||
836 | #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \ | 835 | #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \ |
837 | _PCI_NOP(o, word, u16 x) \ | 836 | _PCI_NOP(o, word, u16 x) \ |
838 | _PCI_NOP(o, dword, u32 x) | 837 | _PCI_NOP(o, dword, u32 x) |
839 | _PCI_NOP_ALL(read, *) | 838 | _PCI_NOP_ALL(read, *) |
840 | _PCI_NOP_ALL(write,) | 839 | _PCI_NOP_ALL(write,) |
841 | 840 | ||
842 | static inline struct pci_dev *pci_find_device(unsigned int vendor, | 841 | static inline struct pci_dev *pci_find_device(unsigned int vendor, |
843 | unsigned int device, | 842 | unsigned int device, |
844 | struct pci_dev *from) | 843 | struct pci_dev *from) |
845 | { | 844 | { |
846 | return NULL; | 845 | return NULL; |
847 | } | 846 | } |
848 | 847 | ||
849 | static inline struct pci_dev *pci_find_slot(unsigned int bus, | 848 | static inline struct pci_dev *pci_find_slot(unsigned int bus, |
850 | unsigned int devfn) | 849 | unsigned int devfn) |
851 | { | 850 | { |
852 | return NULL; | 851 | return NULL; |
853 | } | 852 | } |
854 | 853 | ||
855 | static inline struct pci_dev *pci_get_device(unsigned int vendor, | 854 | static inline struct pci_dev *pci_get_device(unsigned int vendor, |
856 | unsigned int device, | 855 | unsigned int device, |
857 | struct pci_dev *from) | 856 | struct pci_dev *from) |
858 | { | 857 | { |
859 | return NULL; | 858 | return NULL; |
860 | } | 859 | } |
861 | 860 | ||
862 | static inline struct pci_dev *pci_get_subsys(unsigned int vendor, | 861 | static inline struct pci_dev *pci_get_subsys(unsigned int vendor, |
863 | unsigned int device, | 862 | unsigned int device, |
864 | unsigned int ss_vendor, | 863 | unsigned int ss_vendor, |
865 | unsigned int ss_device, | 864 | unsigned int ss_device, |
866 | struct pci_dev *from) | 865 | struct pci_dev *from) |
867 | { | 866 | { |
868 | return NULL; | 867 | return NULL; |
869 | } | 868 | } |
870 | 869 | ||
871 | static inline struct pci_dev *pci_get_class(unsigned int class, | 870 | static inline struct pci_dev *pci_get_class(unsigned int class, |
872 | struct pci_dev *from) | 871 | struct pci_dev *from) |
873 | { | 872 | { |
874 | return NULL; | 873 | return NULL; |
875 | } | 874 | } |
876 | 875 | ||
877 | #define pci_dev_present(ids) (0) | 876 | #define pci_dev_present(ids) (0) |
878 | #define no_pci_devices() (1) | 877 | #define no_pci_devices() (1) |
879 | #define pci_dev_put(dev) do { } while (0) | 878 | #define pci_dev_put(dev) do { } while (0) |
880 | 879 | ||
881 | static inline void pci_set_master(struct pci_dev *dev) | 880 | static inline void pci_set_master(struct pci_dev *dev) |
882 | { } | 881 | { } |
883 | 882 | ||
884 | static inline int pci_enable_device(struct pci_dev *dev) | 883 | static inline int pci_enable_device(struct pci_dev *dev) |
885 | { | 884 | { |
886 | return -EIO; | 885 | return -EIO; |
887 | } | 886 | } |
888 | 887 | ||
889 | static inline void pci_disable_device(struct pci_dev *dev) | 888 | static inline void pci_disable_device(struct pci_dev *dev) |
890 | { } | 889 | { } |
891 | 890 | ||
892 | static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) | 891 | static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) |
893 | { | 892 | { |
894 | return -EIO; | 893 | return -EIO; |
895 | } | 894 | } |
896 | 895 | ||
897 | static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | 896 | static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) |
898 | { | 897 | { |
899 | return -EIO; | 898 | return -EIO; |
900 | } | 899 | } |
901 | 900 | ||
902 | static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, | 901 | static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, |
903 | unsigned int size) | 902 | unsigned int size) |
904 | { | 903 | { |
905 | return -EIO; | 904 | return -EIO; |
906 | } | 905 | } |
907 | 906 | ||
908 | static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, | 907 | static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, |
909 | unsigned long mask) | 908 | unsigned long mask) |
910 | { | 909 | { |
911 | return -EIO; | 910 | return -EIO; |
912 | } | 911 | } |
913 | 912 | ||
914 | static inline int pci_assign_resource(struct pci_dev *dev, int i) | 913 | static inline int pci_assign_resource(struct pci_dev *dev, int i) |
915 | { | 914 | { |
916 | return -EBUSY; | 915 | return -EBUSY; |
917 | } | 916 | } |
918 | 917 | ||
919 | static inline int __pci_register_driver(struct pci_driver *drv, | 918 | static inline int __pci_register_driver(struct pci_driver *drv, |
920 | struct module *owner) | 919 | struct module *owner) |
921 | { | 920 | { |
922 | return 0; | 921 | return 0; |
923 | } | 922 | } |
924 | 923 | ||
925 | static inline int pci_register_driver(struct pci_driver *drv) | 924 | static inline int pci_register_driver(struct pci_driver *drv) |
926 | { | 925 | { |
927 | return 0; | 926 | return 0; |
928 | } | 927 | } |
929 | 928 | ||
930 | static inline void pci_unregister_driver(struct pci_driver *drv) | 929 | static inline void pci_unregister_driver(struct pci_driver *drv) |
931 | { } | 930 | { } |
932 | 931 | ||
933 | static inline int pci_find_capability(struct pci_dev *dev, int cap) | 932 | static inline int pci_find_capability(struct pci_dev *dev, int cap) |
934 | { | 933 | { |
935 | return 0; | 934 | return 0; |
936 | } | 935 | } |
937 | 936 | ||
938 | static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, | 937 | static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, |
939 | int cap) | 938 | int cap) |
940 | { | 939 | { |
941 | return 0; | 940 | return 0; |
942 | } | 941 | } |
943 | 942 | ||
944 | static inline int pci_find_ext_capability(struct pci_dev *dev, int cap) | 943 | static inline int pci_find_ext_capability(struct pci_dev *dev, int cap) |
945 | { | 944 | { |
946 | return 0; | 945 | return 0; |
947 | } | 946 | } |
948 | 947 | ||
949 | /* Power management related routines */ | 948 | /* Power management related routines */ |
950 | static inline int pci_save_state(struct pci_dev *dev) | 949 | static inline int pci_save_state(struct pci_dev *dev) |
951 | { | 950 | { |
952 | return 0; | 951 | return 0; |
953 | } | 952 | } |
954 | 953 | ||
955 | static inline int pci_restore_state(struct pci_dev *dev) | 954 | static inline int pci_restore_state(struct pci_dev *dev) |
956 | { | 955 | { |
957 | return 0; | 956 | return 0; |
958 | } | 957 | } |
959 | 958 | ||
960 | static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | 959 | static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
961 | { | 960 | { |
962 | return 0; | 961 | return 0; |
963 | } | 962 | } |
964 | 963 | ||
965 | static inline pci_power_t pci_choose_state(struct pci_dev *dev, | 964 | static inline pci_power_t pci_choose_state(struct pci_dev *dev, |
966 | pm_message_t state) | 965 | pm_message_t state) |
967 | { | 966 | { |
968 | return PCI_D0; | 967 | return PCI_D0; |
969 | } | 968 | } |
970 | 969 | ||
971 | static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, | 970 | static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, |
972 | int enable) | 971 | int enable) |
973 | { | 972 | { |
974 | return 0; | 973 | return 0; |
975 | } | 974 | } |
976 | 975 | ||
977 | static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) | 976 | static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) |
978 | { | 977 | { |
979 | return -EIO; | 978 | return -EIO; |
980 | } | 979 | } |
981 | 980 | ||
982 | static inline void pci_release_regions(struct pci_dev *dev) | 981 | static inline void pci_release_regions(struct pci_dev *dev) |
983 | { } | 982 | { } |
984 | 983 | ||
985 | #define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0) | 984 | #define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0) |
986 | 985 | ||
987 | static inline void pci_block_user_cfg_access(struct pci_dev *dev) | 986 | static inline void pci_block_user_cfg_access(struct pci_dev *dev) |
988 | { } | 987 | { } |
989 | 988 | ||
990 | static inline void pci_unblock_user_cfg_access(struct pci_dev *dev) | 989 | static inline void pci_unblock_user_cfg_access(struct pci_dev *dev) |
991 | { } | 990 | { } |
992 | 991 | ||
993 | static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) | 992 | static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) |
994 | { return NULL; } | 993 | { return NULL; } |
995 | 994 | ||
996 | static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, | 995 | static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, |
997 | unsigned int devfn) | 996 | unsigned int devfn) |
998 | { return NULL; } | 997 | { return NULL; } |
999 | 998 | ||
1000 | static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, | 999 | static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, |
1001 | unsigned int devfn) | 1000 | unsigned int devfn) |
1002 | { return NULL; } | 1001 | { return NULL; } |
1003 | 1002 | ||
1004 | #endif /* CONFIG_PCI */ | 1003 | #endif /* CONFIG_PCI */ |
1005 | 1004 | ||
1006 | /* Include architecture-dependent settings and functions */ | 1005 | /* Include architecture-dependent settings and functions */ |
1007 | 1006 | ||
1008 | #include <asm/pci.h> | 1007 | #include <asm/pci.h> |
1009 | 1008 | ||
1010 | /* these helpers provide future and backwards compatibility | 1009 | /* these helpers provide future and backwards compatibility |
1011 | * for accessing popular PCI BAR info */ | 1010 | * for accessing popular PCI BAR info */ |
1012 | #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) | 1011 | #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) |
1013 | #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) | 1012 | #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) |
1014 | #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) | 1013 | #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) |
1015 | #define pci_resource_len(dev,bar) \ | 1014 | #define pci_resource_len(dev,bar) \ |
1016 | ((pci_resource_start((dev), (bar)) == 0 && \ | 1015 | ((pci_resource_start((dev), (bar)) == 0 && \ |
1017 | pci_resource_end((dev), (bar)) == \ | 1016 | pci_resource_end((dev), (bar)) == \ |
1018 | pci_resource_start((dev), (bar))) ? 0 : \ | 1017 | pci_resource_start((dev), (bar))) ? 0 : \ |
1019 | \ | 1018 | \ |
1020 | (pci_resource_end((dev), (bar)) - \ | 1019 | (pci_resource_end((dev), (bar)) - \ |
1021 | pci_resource_start((dev), (bar)) + 1)) | 1020 | pci_resource_start((dev), (bar)) + 1)) |
1022 | 1021 | ||
1023 | /* Similar to the helpers above, these manipulate per-pci_dev | 1022 | /* Similar to the helpers above, these manipulate per-pci_dev |
1024 | * driver-specific data. They are really just a wrapper around | 1023 | * driver-specific data. They are really just a wrapper around |
1025 | * the generic device structure functions of these calls. | 1024 | * the generic device structure functions of these calls. |
1026 | */ | 1025 | */ |
1027 | static inline void *pci_get_drvdata(struct pci_dev *pdev) | 1026 | static inline void *pci_get_drvdata(struct pci_dev *pdev) |
1028 | { | 1027 | { |
1029 | return dev_get_drvdata(&pdev->dev); | 1028 | return dev_get_drvdata(&pdev->dev); |
1030 | } | 1029 | } |
1031 | 1030 | ||
1032 | static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) | 1031 | static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) |
1033 | { | 1032 | { |
1034 | dev_set_drvdata(&pdev->dev, data); | 1033 | dev_set_drvdata(&pdev->dev, data); |
1035 | } | 1034 | } |
1036 | 1035 | ||
1037 | /* If you want to know what to call your pci_dev, ask this function. | 1036 | /* If you want to know what to call your pci_dev, ask this function. |
1038 | * Again, it's a wrapper around the generic device. | 1037 | * Again, it's a wrapper around the generic device. |
1039 | */ | 1038 | */ |
1040 | static inline const char *pci_name(struct pci_dev *pdev) | 1039 | static inline const char *pci_name(struct pci_dev *pdev) |
1041 | { | 1040 | { |
1042 | return dev_name(&pdev->dev); | 1041 | return dev_name(&pdev->dev); |
1043 | } | 1042 | } |
1044 | 1043 | ||
1045 | 1044 | ||
1046 | /* Some archs don't want to expose struct resource to userland as-is | 1045 | /* Some archs don't want to expose struct resource to userland as-is |
1047 | * in sysfs and /proc | 1046 | * in sysfs and /proc |
1048 | */ | 1047 | */ |
1049 | #ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER | 1048 | #ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER |
1050 | static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, | 1049 | static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, |
1051 | const struct resource *rsrc, resource_size_t *start, | 1050 | const struct resource *rsrc, resource_size_t *start, |
1052 | resource_size_t *end) | 1051 | resource_size_t *end) |
1053 | { | 1052 | { |
1054 | *start = rsrc->start; | 1053 | *start = rsrc->start; |
1055 | *end = rsrc->end; | 1054 | *end = rsrc->end; |
1056 | } | 1055 | } |
1057 | #endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */ | 1056 | #endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */ |
1058 | 1057 | ||
1059 | 1058 | ||
1060 | /* | 1059 | /* |
1061 | * The world is not perfect and supplies us with broken PCI devices. | 1060 | * The world is not perfect and supplies us with broken PCI devices. |
1062 | * For at least a part of these bugs we need a work-around, so both | 1061 | * For at least a part of these bugs we need a work-around, so both |
1063 | * generic (drivers/pci/quirks.c) and per-architecture code can define | 1062 | * generic (drivers/pci/quirks.c) and per-architecture code can define |
1064 | * fixup hooks to be called for particular buggy devices. | 1063 | * fixup hooks to be called for particular buggy devices. |
1065 | */ | 1064 | */ |
1066 | 1065 | ||
1067 | struct pci_fixup { | 1066 | struct pci_fixup { |
1068 | u16 vendor, device; /* You can use PCI_ANY_ID here of course */ | 1067 | u16 vendor, device; /* You can use PCI_ANY_ID here of course */ |
1069 | void (*hook)(struct pci_dev *dev); | 1068 | void (*hook)(struct pci_dev *dev); |
1070 | }; | 1069 | }; |
1071 | 1070 | ||
1072 | enum pci_fixup_pass { | 1071 | enum pci_fixup_pass { |
1073 | pci_fixup_early, /* Before probing BARs */ | 1072 | pci_fixup_early, /* Before probing BARs */ |
1074 | pci_fixup_header, /* After reading configuration header */ | 1073 | pci_fixup_header, /* After reading configuration header */ |
1075 | pci_fixup_final, /* Final phase of device fixups */ | 1074 | pci_fixup_final, /* Final phase of device fixups */ |
1076 | pci_fixup_enable, /* pci_enable_device() time */ | 1075 | pci_fixup_enable, /* pci_enable_device() time */ |
1077 | pci_fixup_resume, /* pci_device_resume() */ | 1076 | pci_fixup_resume, /* pci_device_resume() */ |
1078 | pci_fixup_suspend, /* pci_device_suspend */ | 1077 | pci_fixup_suspend, /* pci_device_suspend */ |
1079 | pci_fixup_resume_early, /* pci_device_resume_early() */ | 1078 | pci_fixup_resume_early, /* pci_device_resume_early() */ |
1080 | }; | 1079 | }; |
1081 | 1080 | ||
1082 | /* Anonymous variables would be nice... */ | 1081 | /* Anonymous variables would be nice... */ |
1083 | #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, hook) \ | 1082 | #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, hook) \ |
1084 | static const struct pci_fixup __pci_fixup_##name __used \ | 1083 | static const struct pci_fixup __pci_fixup_##name __used \ |
1085 | __attribute__((__section__(#section))) = { vendor, device, hook }; | 1084 | __attribute__((__section__(#section))) = { vendor, device, hook }; |
1086 | #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ | 1085 | #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ |
1087 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ | 1086 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ |
1088 | vendor##device##hook, vendor, device, hook) | 1087 | vendor##device##hook, vendor, device, hook) |
1089 | #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ | 1088 | #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ |
1090 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ | 1089 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ |
1091 | vendor##device##hook, vendor, device, hook) | 1090 | vendor##device##hook, vendor, device, hook) |
1092 | #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ | 1091 | #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ |
1093 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ | 1092 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ |
1094 | vendor##device##hook, vendor, device, hook) | 1093 | vendor##device##hook, vendor, device, hook) |
1095 | #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ | 1094 | #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ |
1096 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ | 1095 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ |
1097 | vendor##device##hook, vendor, device, hook) | 1096 | vendor##device##hook, vendor, device, hook) |
1098 | #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ | 1097 | #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ |
1099 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ | 1098 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ |
1100 | resume##vendor##device##hook, vendor, device, hook) | 1099 | resume##vendor##device##hook, vendor, device, hook) |
1101 | #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ | 1100 | #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ |
1102 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ | 1101 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ |
1103 | resume_early##vendor##device##hook, vendor, device, hook) | 1102 | resume_early##vendor##device##hook, vendor, device, hook) |
1104 | #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ | 1103 | #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ |
1105 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ | 1104 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ |
1106 | suspend##vendor##device##hook, vendor, device, hook) | 1105 | suspend##vendor##device##hook, vendor, device, hook) |
1107 | 1106 | ||
1108 | 1107 | ||
1109 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); | 1108 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); |
1110 | 1109 | ||
1111 | void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); | 1110 | void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); |
1112 | void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); | 1111 | void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); |
1113 | void __iomem * const *pcim_iomap_table(struct pci_dev *pdev); | 1112 | void __iomem * const *pcim_iomap_table(struct pci_dev *pdev); |
1114 | int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name); | 1113 | int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name); |
1115 | int pcim_iomap_regions_request_all(struct pci_dev *pdev, u16 mask, | 1114 | int pcim_iomap_regions_request_all(struct pci_dev *pdev, u16 mask, |
1116 | const char *name); | 1115 | const char *name); |
1117 | void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask); | 1116 | void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask); |
1118 | 1117 | ||
1119 | extern int pci_pci_problems; | 1118 | extern int pci_pci_problems; |
1120 | #define PCIPCI_FAIL 1 /* No PCI PCI DMA */ | 1119 | #define PCIPCI_FAIL 1 /* No PCI PCI DMA */ |
1121 | #define PCIPCI_TRITON 2 | 1120 | #define PCIPCI_TRITON 2 |
1122 | #define PCIPCI_NATOMA 4 | 1121 | #define PCIPCI_NATOMA 4 |
1123 | #define PCIPCI_VIAETBF 8 | 1122 | #define PCIPCI_VIAETBF 8 |
1124 | #define PCIPCI_VSFX 16 | 1123 | #define PCIPCI_VSFX 16 |
1125 | #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */ | 1124 | #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */ |
1126 | #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */ | 1125 | #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */ |
1127 | 1126 | ||
1128 | extern unsigned long pci_cardbus_io_size; | 1127 | extern unsigned long pci_cardbus_io_size; |
1129 | extern unsigned long pci_cardbus_mem_size; | 1128 | extern unsigned long pci_cardbus_mem_size; |
1130 | 1129 | ||
1131 | int pcibios_add_platform_entries(struct pci_dev *dev); | 1130 | int pcibios_add_platform_entries(struct pci_dev *dev); |
1132 | void pcibios_disable_device(struct pci_dev *dev); | 1131 | void pcibios_disable_device(struct pci_dev *dev); |
1133 | int pcibios_set_pcie_reset_state(struct pci_dev *dev, | 1132 | int pcibios_set_pcie_reset_state(struct pci_dev *dev, |
1134 | enum pcie_reset_state state); | 1133 | enum pcie_reset_state state); |
1135 | 1134 | ||
1136 | #ifdef CONFIG_PCI_MMCONFIG | 1135 | #ifdef CONFIG_PCI_MMCONFIG |
1137 | extern void __init pci_mmcfg_early_init(void); | 1136 | extern void __init pci_mmcfg_early_init(void); |
1138 | extern void __init pci_mmcfg_late_init(void); | 1137 | extern void __init pci_mmcfg_late_init(void); |
1139 | #else | 1138 | #else |
1140 | static inline void pci_mmcfg_early_init(void) { } | 1139 | static inline void pci_mmcfg_early_init(void) { } |
1141 | static inline void pci_mmcfg_late_init(void) { } | 1140 | static inline void pci_mmcfg_late_init(void) { } |
1142 | #endif | 1141 | #endif |
1143 | 1142 | ||
1144 | #ifdef CONFIG_HAS_IOMEM | 1143 | #ifdef CONFIG_HAS_IOMEM |
1145 | static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) | 1144 | static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) |
1146 | { | 1145 | { |
1147 | /* | 1146 | /* |
1148 | * Make sure the BAR is actually a memory resource, not an IO resource | 1147 | * Make sure the BAR is actually a memory resource, not an IO resource |
1149 | */ | 1148 | */ |
1150 | if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { | 1149 | if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { |
1151 | WARN_ON(1); | 1150 | WARN_ON(1); |
1152 | return NULL; | 1151 | return NULL; |
1153 | } | 1152 | } |
1154 | return ioremap_nocache(pci_resource_start(pdev, bar), | 1153 | return ioremap_nocache(pci_resource_start(pdev, bar), |
1155 | pci_resource_len(pdev, bar)); | 1154 | pci_resource_len(pdev, bar)); |
1156 | } | 1155 | } |
1157 | #endif | 1156 | #endif |
1158 | 1157 | ||
1159 | #endif /* __KERNEL__ */ | 1158 | #endif /* __KERNEL__ */ |
1160 | #endif /* LINUX_PCI_H */ | 1159 | #endif /* LINUX_PCI_H */ |
1161 | 1160 |
include/linux/platform_device.h
1 | /* | 1 | /* |
2 | * platform_device.h - generic, centralized driver model | 2 | * platform_device.h - generic, centralized driver model |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> | 4 | * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> |
5 | * | 5 | * |
6 | * This file is released under the GPLv2 | 6 | * This file is released under the GPLv2 |
7 | * | 7 | * |
8 | * See Documentation/driver-model/ for more information. | 8 | * See Documentation/driver-model/ for more information. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _PLATFORM_DEVICE_H_ | 11 | #ifndef _PLATFORM_DEVICE_H_ |
12 | #define _PLATFORM_DEVICE_H_ | 12 | #define _PLATFORM_DEVICE_H_ |
13 | 13 | ||
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | 15 | ||
16 | struct platform_device { | 16 | struct platform_device { |
17 | const char * name; | 17 | const char * name; |
18 | int id; | 18 | int id; |
19 | struct device dev; | 19 | struct device dev; |
20 | u32 num_resources; | 20 | u32 num_resources; |
21 | struct resource * resource; | 21 | struct resource * resource; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | #define to_platform_device(x) container_of((x), struct platform_device, dev) | 24 | #define to_platform_device(x) container_of((x), struct platform_device, dev) |
25 | 25 | ||
26 | extern int platform_device_register(struct platform_device *); | 26 | extern int platform_device_register(struct platform_device *); |
27 | extern void platform_device_unregister(struct platform_device *); | 27 | extern void platform_device_unregister(struct platform_device *); |
28 | 28 | ||
29 | extern struct bus_type platform_bus_type; | 29 | extern struct bus_type platform_bus_type; |
30 | extern struct device platform_bus; | 30 | extern struct device platform_bus; |
31 | 31 | ||
32 | extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int); | 32 | extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int); |
33 | extern int platform_get_irq(struct platform_device *, unsigned int); | 33 | extern int platform_get_irq(struct platform_device *, unsigned int); |
34 | extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, char *); | 34 | extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, char *); |
35 | extern int platform_get_irq_byname(struct platform_device *, char *); | 35 | extern int platform_get_irq_byname(struct platform_device *, char *); |
36 | extern int platform_add_devices(struct platform_device **, int); | 36 | extern int platform_add_devices(struct platform_device **, int); |
37 | 37 | ||
38 | extern struct platform_device *platform_device_register_simple(const char *, int id, | 38 | extern struct platform_device *platform_device_register_simple(const char *, int id, |
39 | struct resource *, unsigned int); | 39 | struct resource *, unsigned int); |
40 | extern struct platform_device *platform_device_register_data(struct device *, | 40 | extern struct platform_device *platform_device_register_data(struct device *, |
41 | const char *, int, const void *, size_t); | 41 | const char *, int, const void *, size_t); |
42 | 42 | ||
43 | extern struct platform_device *platform_device_alloc(const char *name, int id); | 43 | extern struct platform_device *platform_device_alloc(const char *name, int id); |
44 | extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num); | 44 | extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num); |
45 | extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); | 45 | extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); |
46 | extern int platform_device_add(struct platform_device *pdev); | 46 | extern int platform_device_add(struct platform_device *pdev); |
47 | extern void platform_device_del(struct platform_device *pdev); | 47 | extern void platform_device_del(struct platform_device *pdev); |
48 | extern void platform_device_put(struct platform_device *pdev); | 48 | extern void platform_device_put(struct platform_device *pdev); |
49 | 49 | ||
50 | struct platform_driver { | 50 | struct platform_driver { |
51 | int (*probe)(struct platform_device *); | 51 | int (*probe)(struct platform_device *); |
52 | int (*remove)(struct platform_device *); | 52 | int (*remove)(struct platform_device *); |
53 | void (*shutdown)(struct platform_device *); | 53 | void (*shutdown)(struct platform_device *); |
54 | int (*suspend)(struct platform_device *, pm_message_t state); | 54 | int (*suspend)(struct platform_device *, pm_message_t state); |
55 | int (*suspend_late)(struct platform_device *, pm_message_t state); | 55 | int (*suspend_late)(struct platform_device *, pm_message_t state); |
56 | int (*resume_early)(struct platform_device *); | 56 | int (*resume_early)(struct platform_device *); |
57 | int (*resume)(struct platform_device *); | 57 | int (*resume)(struct platform_device *); |
58 | struct pm_ext_ops *pm; | ||
59 | struct device_driver driver; | 58 | struct device_driver driver; |
60 | }; | 59 | }; |
61 | 60 | ||
62 | extern int platform_driver_register(struct platform_driver *); | 61 | extern int platform_driver_register(struct platform_driver *); |
63 | extern void platform_driver_unregister(struct platform_driver *); | 62 | extern void platform_driver_unregister(struct platform_driver *); |
64 | 63 | ||
65 | /* non-hotpluggable platform devices may use this so that probe() and | 64 | /* non-hotpluggable platform devices may use this so that probe() and |
66 | * its support may live in __init sections, conserving runtime memory. | 65 | * its support may live in __init sections, conserving runtime memory. |
67 | */ | 66 | */ |
68 | extern int platform_driver_probe(struct platform_driver *driver, | 67 | extern int platform_driver_probe(struct platform_driver *driver, |
69 | int (*probe)(struct platform_device *)); | 68 | int (*probe)(struct platform_device *)); |
70 | 69 | ||
71 | #define platform_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev) | 70 | #define platform_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev) |
72 | #define platform_set_drvdata(_dev,data) dev_set_drvdata(&(_dev)->dev, (data)) | 71 | #define platform_set_drvdata(_dev,data) dev_set_drvdata(&(_dev)->dev, (data)) |
73 | 72 | ||
74 | #endif /* _PLATFORM_DEVICE_H_ */ | 73 | #endif /* _PLATFORM_DEVICE_H_ */ |
75 | 74 |
include/linux/pm.h
1 | /* | 1 | /* |
2 | * pm.h - Power management interface | 2 | * pm.h - Power management interface |
3 | * | 3 | * |
4 | * Copyright (C) 2000 Andrew Henroid | 4 | * Copyright (C) 2000 Andrew Henroid |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #ifndef _LINUX_PM_H | 21 | #ifndef _LINUX_PM_H |
22 | #define _LINUX_PM_H | 22 | #define _LINUX_PM_H |
23 | 23 | ||
24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * Callbacks for platform drivers to implement. | 27 | * Callbacks for platform drivers to implement. |
28 | */ | 28 | */ |
29 | extern void (*pm_idle)(void); | 29 | extern void (*pm_idle)(void); |
30 | extern void (*pm_power_off)(void); | 30 | extern void (*pm_power_off)(void); |
31 | extern void (*pm_power_off_prepare)(void); | 31 | extern void (*pm_power_off_prepare)(void); |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * Device power management | 34 | * Device power management |
35 | */ | 35 | */ |
36 | 36 | ||
37 | struct device; | 37 | struct device; |
38 | 38 | ||
39 | typedef struct pm_message { | 39 | typedef struct pm_message { |
40 | int event; | 40 | int event; |
41 | } pm_message_t; | 41 | } pm_message_t; |
42 | 42 | ||
43 | /** | 43 | /** |
44 | * struct pm_ops - device PM callbacks | 44 | * struct dev_pm_ops - device PM callbacks |
45 | * | 45 | * |
46 | * Several driver power state transitions are externally visible, affecting | 46 | * Several driver power state transitions are externally visible, affecting |
47 | * the state of pending I/O queues and (for drivers that touch hardware) | 47 | * the state of pending I/O queues and (for drivers that touch hardware) |
48 | * interrupts, wakeups, DMA, and other hardware state. There may also be | 48 | * interrupts, wakeups, DMA, and other hardware state. There may also be |
49 | * internal transitions to various low power modes, which are transparent | 49 | * internal transitions to various low power modes, which are transparent |
50 | * to the rest of the driver stack (such as a driver that's ON gating off | 50 | * to the rest of the driver stack (such as a driver that's ON gating off |
51 | * clocks which are not in active use). | 51 | * clocks which are not in active use). |
52 | * | 52 | * |
53 | * The externally visible transitions are handled with the help of the following | 53 | * The externally visible transitions are handled with the help of the following |
54 | * callbacks included in this structure: | 54 | * callbacks included in this structure: |
55 | * | 55 | * |
56 | * @prepare: Prepare the device for the upcoming transition, but do NOT change | 56 | * @prepare: Prepare the device for the upcoming transition, but do NOT change |
57 | * its hardware state. Prevent new children of the device from being | 57 | * its hardware state. Prevent new children of the device from being |
58 | * registered after @prepare() returns (the driver's subsystem and | 58 | * registered after @prepare() returns (the driver's subsystem and |
59 | * generally the rest of the kernel is supposed to prevent new calls to the | 59 | * generally the rest of the kernel is supposed to prevent new calls to the |
60 | * probe method from being made too once @prepare() has succeeded). If | 60 | * probe method from being made too once @prepare() has succeeded). If |
61 | * @prepare() detects a situation it cannot handle (e.g. registration of a | 61 | * @prepare() detects a situation it cannot handle (e.g. registration of a |
62 | * child already in progress), it may return -EAGAIN, so that the PM core | 62 | * child already in progress), it may return -EAGAIN, so that the PM core |
63 | * can execute it once again (e.g. after the new child has been registered) | 63 | * can execute it once again (e.g. after the new child has been registered) |
64 | * to recover from the race condition. This method is executed for all | 64 | * to recover from the race condition. This method is executed for all |
65 | * kinds of suspend transitions and is followed by one of the suspend | 65 | * kinds of suspend transitions and is followed by one of the suspend |
66 | * callbacks: @suspend(), @freeze(), or @poweroff(). | 66 | * callbacks: @suspend(), @freeze(), or @poweroff(). |
67 | * The PM core executes @prepare() for all devices before starting to | 67 | * The PM core executes @prepare() for all devices before starting to |
68 | * execute suspend callbacks for any of them, so drivers may assume all of | 68 | * execute suspend callbacks for any of them, so drivers may assume all of |
69 | * the other devices to be present and functional while @prepare() is being | 69 | * the other devices to be present and functional while @prepare() is being |
70 | * executed. In particular, it is safe to make GFP_KERNEL memory | 70 | * executed. In particular, it is safe to make GFP_KERNEL memory |
71 | * allocations from within @prepare(). However, drivers may NOT assume | 71 | * allocations from within @prepare(). However, drivers may NOT assume |
72 | * anything about the availability of the user space at that time and it | 72 | * anything about the availability of the user space at that time and it |
73 | * is not correct to request firmware from within @prepare() (it's too | 73 | * is not correct to request firmware from within @prepare() (it's too |
74 | * late to do that). [To work around this limitation, drivers may | 74 | * late to do that). [To work around this limitation, drivers may |
75 | * register suspend and hibernation notifiers that are executed before the | 75 | * register suspend and hibernation notifiers that are executed before the |
76 | * freezing of tasks.] | 76 | * freezing of tasks.] |
77 | * | 77 | * |
78 | * @complete: Undo the changes made by @prepare(). This method is executed for | 78 | * @complete: Undo the changes made by @prepare(). This method is executed for |
79 | * all kinds of resume transitions, following one of the resume callbacks: | 79 | * all kinds of resume transitions, following one of the resume callbacks: |
80 | * @resume(), @thaw(), @restore(). Also called if the state transition | 80 | * @resume(), @thaw(), @restore(). Also called if the state transition |
81 | * fails before the driver's suspend callback (@suspend(), @freeze(), | 81 | * fails before the driver's suspend callback (@suspend(), @freeze(), |
82 | * @poweroff()) can be executed (e.g. if the suspend callback fails for one | 82 | * @poweroff()) can be executed (e.g. if the suspend callback fails for one |
83 | * of the other devices that the PM core has unsuccessfully attempted to | 83 | * of the other devices that the PM core has unsuccessfully attempted to |
84 | * suspend earlier). | 84 | * suspend earlier). |
85 | * The PM core executes @complete() after it has executed the appropriate | 85 | * The PM core executes @complete() after it has executed the appropriate |
86 | * resume callback for all devices. | 86 | * resume callback for all devices. |
87 | * | 87 | * |
88 | * @suspend: Executed before putting the system into a sleep state in which the | 88 | * @suspend: Executed before putting the system into a sleep state in which the |
89 | * contents of main memory are preserved. Quiesce the device, put it into | 89 | * contents of main memory are preserved. Quiesce the device, put it into |
90 | * a low power state appropriate for the upcoming system state (such as | 90 | * a low power state appropriate for the upcoming system state (such as |
91 | * PCI_D3hot), and enable wakeup events as appropriate. | 91 | * PCI_D3hot), and enable wakeup events as appropriate. |
92 | * | 92 | * |
93 | * @resume: Executed after waking the system up from a sleep state in which the | 93 | * @resume: Executed after waking the system up from a sleep state in which the |
94 | * contents of main memory were preserved. Put the device into the | 94 | * contents of main memory were preserved. Put the device into the |
95 | * appropriate state, according to the information saved in memory by the | 95 | * appropriate state, according to the information saved in memory by the |
96 | * preceding @suspend(). The driver starts working again, responding to | 96 | * preceding @suspend(). The driver starts working again, responding to |
97 | * hardware events and software requests. The hardware may have gone | 97 | * hardware events and software requests. The hardware may have gone |
98 | * through a power-off reset, or it may have maintained state from the | 98 | * through a power-off reset, or it may have maintained state from the |
99 | * previous suspend() which the driver may rely on while resuming. On most | 99 | * previous suspend() which the driver may rely on while resuming. On most |
100 | * platforms, there are no restrictions on availability of resources like | 100 | * platforms, there are no restrictions on availability of resources like |
101 | * clocks during @resume(). | 101 | * clocks during @resume(). |
102 | * | 102 | * |
103 | * @freeze: Hibernation-specific, executed before creating a hibernation image. | 103 | * @freeze: Hibernation-specific, executed before creating a hibernation image. |
104 | * Quiesce operations so that a consistent image can be created, but do NOT | 104 | * Quiesce operations so that a consistent image can be created, but do NOT |
105 | * otherwise put the device into a low power device state and do NOT emit | 105 | * otherwise put the device into a low power device state and do NOT emit |
106 | * system wakeup events. Save in main memory the device settings to be | 106 | * system wakeup events. Save in main memory the device settings to be |
107 | * used by @restore() during the subsequent resume from hibernation or by | 107 | * used by @restore() during the subsequent resume from hibernation or by |
108 | * the subsequent @thaw(), if the creation of the image or the restoration | 108 | * the subsequent @thaw(), if the creation of the image or the restoration |
109 | * of main memory contents from it fails. | 109 | * of main memory contents from it fails. |
110 | * | 110 | * |
111 | * @thaw: Hibernation-specific, executed after creating a hibernation image OR | 111 | * @thaw: Hibernation-specific, executed after creating a hibernation image OR |
112 | * if the creation of the image fails. Also executed after a failing | 112 | * if the creation of the image fails. Also executed after a failing |
113 | * attempt to restore the contents of main memory from such an image. | 113 | * attempt to restore the contents of main memory from such an image. |
114 | * Undo the changes made by the preceding @freeze(), so the device can be | 114 | * Undo the changes made by the preceding @freeze(), so the device can be |
115 | * operated in the same way as immediately before the call to @freeze(). | 115 | * operated in the same way as immediately before the call to @freeze(). |
116 | * | 116 | * |
117 | * @poweroff: Hibernation-specific, executed after saving a hibernation image. | 117 | * @poweroff: Hibernation-specific, executed after saving a hibernation image. |
118 | * Quiesce the device, put it into a low power state appropriate for the | 118 | * Quiesce the device, put it into a low power state appropriate for the |
119 | * upcoming system state (such as PCI_D3hot), and enable wakeup events as | 119 | * upcoming system state (such as PCI_D3hot), and enable wakeup events as |
120 | * appropriate. | 120 | * appropriate. |
121 | * | 121 | * |
122 | * @restore: Hibernation-specific, executed after restoring the contents of main | 122 | * @restore: Hibernation-specific, executed after restoring the contents of main |
123 | * memory from a hibernation image. Driver starts working again, | 123 | * memory from a hibernation image. Driver starts working again, |
124 | * responding to hardware events and software requests. Drivers may NOT | 124 | * responding to hardware events and software requests. Drivers may NOT |
125 | * make ANY assumptions about the hardware state right prior to @restore(). | 125 | * make ANY assumptions about the hardware state right prior to @restore(). |
126 | * On most platforms, there are no restrictions on availability of | 126 | * On most platforms, there are no restrictions on availability of |
127 | * resources like clocks during @restore(). | 127 | * resources like clocks during @restore(). |
128 | * | 128 | * |
129 | * All of the above callbacks, except for @complete(), return error codes. | ||
130 | * However, the error codes returned by the resume operations, @resume(), | ||
131 | * @thaw(), and @restore(), do not cause the PM core to abort the resume | ||
132 | * transition during which they are returned. The error codes returned in | ||
133 | * that cases are only printed by the PM core to the system logs for debugging | ||
134 | * purposes. Still, it is recommended that drivers only return error codes | ||
135 | * from their resume methods in case of an unrecoverable failure (i.e. when the | ||
136 | * device being handled refuses to resume and becomes unusable) to allow us to | ||
137 | * modify the PM core in the future, so that it can avoid attempting to handle | ||
138 | * devices that failed to resume and their children. | ||
139 | * | ||
140 | * It is allowed to unregister devices while the above callbacks are being | ||
141 | * executed. However, it is not allowed to unregister a device from within any | ||
142 | * of its own callbacks. | ||
143 | */ | ||
144 | |||
145 | struct pm_ops { | ||
146 | int (*prepare)(struct device *dev); | ||
147 | void (*complete)(struct device *dev); | ||
148 | int (*suspend)(struct device *dev); | ||
149 | int (*resume)(struct device *dev); | ||
150 | int (*freeze)(struct device *dev); | ||
151 | int (*thaw)(struct device *dev); | ||
152 | int (*poweroff)(struct device *dev); | ||
153 | int (*restore)(struct device *dev); | ||
154 | }; | ||
155 | |||
156 | /** | ||
157 | * struct pm_ext_ops - extended device PM callbacks | ||
158 | * | ||
159 | * Some devices require certain operations related to suspend and hibernation | ||
160 | * to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below | ||
161 | * is defined, adding callbacks to be executed with interrupts disabled to | ||
162 | * 'struct pm_ops'. | ||
163 | * | ||
164 | * The following callbacks included in 'struct pm_ext_ops' are executed with | ||
165 | * the nonboot CPUs switched off and with interrupts disabled on the only | ||
166 | * functional CPU. They also are executed with the PM core list of devices | ||
167 | * locked, so they must NOT unregister any devices. | ||
168 | * | ||
169 | * @suspend_noirq: Complete the operations of ->suspend() by carrying out any | 129 | * @suspend_noirq: Complete the operations of ->suspend() by carrying out any |
170 | * actions required for suspending the device that need interrupts to be | 130 | * actions required for suspending the device that need interrupts to be |
171 | * disabled | 131 | * disabled |
172 | * | 132 | * |
173 | * @resume_noirq: Prepare for the execution of ->resume() by carrying out any | 133 | * @resume_noirq: Prepare for the execution of ->resume() by carrying out any |
174 | * actions required for resuming the device that need interrupts to be | 134 | * actions required for resuming the device that need interrupts to be |
175 | * disabled | 135 | * disabled |
176 | * | 136 | * |
177 | * @freeze_noirq: Complete the operations of ->freeze() by carrying out any | 137 | * @freeze_noirq: Complete the operations of ->freeze() by carrying out any |
178 | * actions required for freezing the device that need interrupts to be | 138 | * actions required for freezing the device that need interrupts to be |
179 | * disabled | 139 | * disabled |
180 | * | 140 | * |
181 | * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any | 141 | * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any |
182 | * actions required for thawing the device that need interrupts to be | 142 | * actions required for thawing the device that need interrupts to be |
183 | * disabled | 143 | * disabled |
184 | * | 144 | * |
185 | * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any | 145 | * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any |
186 | * actions required for handling the device that need interrupts to be | 146 | * actions required for handling the device that need interrupts to be |
187 | * disabled | 147 | * disabled |
188 | * | 148 | * |
189 | * @restore_noirq: Prepare for the execution of ->restore() by carrying out any | 149 | * @restore_noirq: Prepare for the execution of ->restore() by carrying out any |
190 | * actions required for restoring the operations of the device that need | 150 | * actions required for restoring the operations of the device that need |
191 | * interrupts to be disabled | 151 | * interrupts to be disabled |
192 | * | 152 | * |
193 | * All of the above callbacks return error codes, but the error codes returned | 153 | * All of the above callbacks, except for @complete(), return error codes. |
194 | * by the resume operations, @resume_noirq(), @thaw_noirq(), and | 154 | * However, the error codes returned by the resume operations, @resume(), |
195 | * @restore_noirq(), do not cause the PM core to abort the resume transition | 155 | * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do |
196 | * during which they are returned. The error codes returned in that cases are | 156 | * not cause the PM core to abort the resume transition during which they are |
197 | * only printed by the PM core to the system logs for debugging purposes. | 157 | * returned. The error codes returned in that cases are only printed by the PM |
198 | * Still, as stated above, it is recommended that drivers only return error | 158 | * core to the system logs for debugging purposes. Still, it is recommended |
199 | * codes from their resume methods if the device being handled fails to resume | 159 | * that drivers only return error codes from their resume methods in case of an |
200 | * and is not usable any more. | 160 | * unrecoverable failure (i.e. when the device being handled refuses to resume |
161 | * and becomes unusable) to allow us to modify the PM core in the future, so | ||
162 | * that it can avoid attempting to handle devices that failed to resume and | ||
163 | * their children. | ||
164 | * | ||
165 | * It is allowed to unregister devices while the above callbacks are being | ||
166 | * executed. However, it is not allowed to unregister a device from within any | ||
167 | * of its own callbacks. | ||
201 | */ | 168 | */ |
202 | 169 | ||
203 | struct pm_ext_ops { | 170 | struct dev_pm_ops { |
204 | struct pm_ops base; | 171 | int (*prepare)(struct device *dev); |
172 | void (*complete)(struct device *dev); | ||
173 | int (*suspend)(struct device *dev); | ||
174 | int (*resume)(struct device *dev); | ||
175 | int (*freeze)(struct device *dev); | ||
176 | int (*thaw)(struct device *dev); | ||
177 | int (*poweroff)(struct device *dev); | ||
178 | int (*restore)(struct device *dev); | ||
205 | int (*suspend_noirq)(struct device *dev); | 179 | int (*suspend_noirq)(struct device *dev); |
206 | int (*resume_noirq)(struct device *dev); | 180 | int (*resume_noirq)(struct device *dev); |
207 | int (*freeze_noirq)(struct device *dev); | 181 | int (*freeze_noirq)(struct device *dev); |
208 | int (*thaw_noirq)(struct device *dev); | 182 | int (*thaw_noirq)(struct device *dev); |
209 | int (*poweroff_noirq)(struct device *dev); | 183 | int (*poweroff_noirq)(struct device *dev); |
210 | int (*restore_noirq)(struct device *dev); | 184 | int (*restore_noirq)(struct device *dev); |
211 | }; | 185 | }; |
212 | 186 | ||
213 | /** | 187 | /** |
214 | * PM_EVENT_ messages | 188 | * PM_EVENT_ messages |
215 | * | 189 | * |
216 | * The following PM_EVENT_ messages are defined for the internal use of the PM | 190 | * The following PM_EVENT_ messages are defined for the internal use of the PM |
217 | * core, in order to provide a mechanism allowing the high level suspend and | 191 | * core, in order to provide a mechanism allowing the high level suspend and |
218 | * hibernation code to convey the necessary information to the device PM core | 192 | * hibernation code to convey the necessary information to the device PM core |
219 | * code: | 193 | * code: |
220 | * | 194 | * |
221 | * ON No transition. | 195 | * ON No transition. |
222 | * | 196 | * |
223 | * FREEZE System is going to hibernate, call ->prepare() and ->freeze() | 197 | * FREEZE System is going to hibernate, call ->prepare() and ->freeze() |
224 | * for all devices. | 198 | * for all devices. |
225 | * | 199 | * |
226 | * SUSPEND System is going to suspend, call ->prepare() and ->suspend() | 200 | * SUSPEND System is going to suspend, call ->prepare() and ->suspend() |
227 | * for all devices. | 201 | * for all devices. |
228 | * | 202 | * |
229 | * HIBERNATE Hibernation image has been saved, call ->prepare() and | 203 | * HIBERNATE Hibernation image has been saved, call ->prepare() and |
230 | * ->poweroff() for all devices. | 204 | * ->poweroff() for all devices. |
231 | * | 205 | * |
232 | * QUIESCE Contents of main memory are going to be restored from a (loaded) | 206 | * QUIESCE Contents of main memory are going to be restored from a (loaded) |
233 | * hibernation image, call ->prepare() and ->freeze() for all | 207 | * hibernation image, call ->prepare() and ->freeze() for all |
234 | * devices. | 208 | * devices. |
235 | * | 209 | * |
236 | * RESUME System is resuming, call ->resume() and ->complete() for all | 210 | * RESUME System is resuming, call ->resume() and ->complete() for all |
237 | * devices. | 211 | * devices. |
238 | * | 212 | * |
239 | * THAW Hibernation image has been created, call ->thaw() and | 213 | * THAW Hibernation image has been created, call ->thaw() and |
240 | * ->complete() for all devices. | 214 | * ->complete() for all devices. |
241 | * | 215 | * |
242 | * RESTORE Contents of main memory have been restored from a hibernation | 216 | * RESTORE Contents of main memory have been restored from a hibernation |
243 | * image, call ->restore() and ->complete() for all devices. | 217 | * image, call ->restore() and ->complete() for all devices. |
244 | * | 218 | * |
245 | * RECOVER Creation of a hibernation image or restoration of the main | 219 | * RECOVER Creation of a hibernation image or restoration of the main |
246 | * memory contents from a hibernation image has failed, call | 220 | * memory contents from a hibernation image has failed, call |
247 | * ->thaw() and ->complete() for all devices. | 221 | * ->thaw() and ->complete() for all devices. |
248 | * | 222 | * |
249 | * The following PM_EVENT_ messages are defined for internal use by | 223 | * The following PM_EVENT_ messages are defined for internal use by |
250 | * kernel subsystems. They are never issued by the PM core. | 224 | * kernel subsystems. They are never issued by the PM core. |
251 | * | 225 | * |
252 | * USER_SUSPEND Manual selective suspend was issued by userspace. | 226 | * USER_SUSPEND Manual selective suspend was issued by userspace. |
253 | * | 227 | * |
254 | * USER_RESUME Manual selective resume was issued by userspace. | 228 | * USER_RESUME Manual selective resume was issued by userspace. |
255 | * | 229 | * |
256 | * REMOTE_WAKEUP Remote-wakeup request was received from the device. | 230 | * REMOTE_WAKEUP Remote-wakeup request was received from the device. |
257 | * | 231 | * |
258 | * AUTO_SUSPEND Automatic (device idle) runtime suspend was | 232 | * AUTO_SUSPEND Automatic (device idle) runtime suspend was |
259 | * initiated by the subsystem. | 233 | * initiated by the subsystem. |
260 | * | 234 | * |
261 | * AUTO_RESUME Automatic (device needed) runtime resume was | 235 | * AUTO_RESUME Automatic (device needed) runtime resume was |
262 | * requested by a driver. | 236 | * requested by a driver. |
263 | */ | 237 | */ |
264 | 238 | ||
265 | #define PM_EVENT_ON 0x0000 | 239 | #define PM_EVENT_ON 0x0000 |
266 | #define PM_EVENT_FREEZE 0x0001 | 240 | #define PM_EVENT_FREEZE 0x0001 |
267 | #define PM_EVENT_SUSPEND 0x0002 | 241 | #define PM_EVENT_SUSPEND 0x0002 |
268 | #define PM_EVENT_HIBERNATE 0x0004 | 242 | #define PM_EVENT_HIBERNATE 0x0004 |
269 | #define PM_EVENT_QUIESCE 0x0008 | 243 | #define PM_EVENT_QUIESCE 0x0008 |
270 | #define PM_EVENT_RESUME 0x0010 | 244 | #define PM_EVENT_RESUME 0x0010 |
271 | #define PM_EVENT_THAW 0x0020 | 245 | #define PM_EVENT_THAW 0x0020 |
272 | #define PM_EVENT_RESTORE 0x0040 | 246 | #define PM_EVENT_RESTORE 0x0040 |
273 | #define PM_EVENT_RECOVER 0x0080 | 247 | #define PM_EVENT_RECOVER 0x0080 |
274 | #define PM_EVENT_USER 0x0100 | 248 | #define PM_EVENT_USER 0x0100 |
275 | #define PM_EVENT_REMOTE 0x0200 | 249 | #define PM_EVENT_REMOTE 0x0200 |
276 | #define PM_EVENT_AUTO 0x0400 | 250 | #define PM_EVENT_AUTO 0x0400 |
277 | 251 | ||
278 | #define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) | 252 | #define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) |
279 | #define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) | 253 | #define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) |
280 | #define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) | 254 | #define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) |
281 | #define PM_EVENT_REMOTE_WAKEUP (PM_EVENT_REMOTE | PM_EVENT_RESUME) | 255 | #define PM_EVENT_REMOTE_WAKEUP (PM_EVENT_REMOTE | PM_EVENT_RESUME) |
282 | #define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) | 256 | #define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) |
283 | #define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) | 257 | #define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) |
284 | 258 | ||
285 | #define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) | 259 | #define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) |
286 | #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) | 260 | #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) |
287 | #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) | 261 | #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) |
288 | #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) | 262 | #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) |
289 | #define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) | 263 | #define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) |
290 | #define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) | 264 | #define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) |
291 | #define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) | 265 | #define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) |
292 | #define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) | 266 | #define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) |
293 | #define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) | 267 | #define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) |
294 | #define PMSG_USER_SUSPEND ((struct pm_messge) \ | 268 | #define PMSG_USER_SUSPEND ((struct pm_messge) \ |
295 | { .event = PM_EVENT_USER_SUSPEND, }) | 269 | { .event = PM_EVENT_USER_SUSPEND, }) |
296 | #define PMSG_USER_RESUME ((struct pm_messge) \ | 270 | #define PMSG_USER_RESUME ((struct pm_messge) \ |
297 | { .event = PM_EVENT_USER_RESUME, }) | 271 | { .event = PM_EVENT_USER_RESUME, }) |
298 | #define PMSG_REMOTE_RESUME ((struct pm_messge) \ | 272 | #define PMSG_REMOTE_RESUME ((struct pm_messge) \ |
299 | { .event = PM_EVENT_REMOTE_RESUME, }) | 273 | { .event = PM_EVENT_REMOTE_RESUME, }) |
300 | #define PMSG_AUTO_SUSPEND ((struct pm_messge) \ | 274 | #define PMSG_AUTO_SUSPEND ((struct pm_messge) \ |
301 | { .event = PM_EVENT_AUTO_SUSPEND, }) | 275 | { .event = PM_EVENT_AUTO_SUSPEND, }) |
302 | #define PMSG_AUTO_RESUME ((struct pm_messge) \ | 276 | #define PMSG_AUTO_RESUME ((struct pm_messge) \ |
303 | { .event = PM_EVENT_AUTO_RESUME, }) | 277 | { .event = PM_EVENT_AUTO_RESUME, }) |
304 | 278 | ||
305 | /** | 279 | /** |
306 | * Device power management states | 280 | * Device power management states |
307 | * | 281 | * |
308 | * These state labels are used internally by the PM core to indicate the current | 282 | * These state labels are used internally by the PM core to indicate the current |
309 | * status of a device with respect to the PM core operations. | 283 | * status of a device with respect to the PM core operations. |
310 | * | 284 | * |
311 | * DPM_ON Device is regarded as operational. Set this way | 285 | * DPM_ON Device is regarded as operational. Set this way |
312 | * initially and when ->complete() is about to be called. | 286 | * initially and when ->complete() is about to be called. |
313 | * Also set when ->prepare() fails. | 287 | * Also set when ->prepare() fails. |
314 | * | 288 | * |
315 | * DPM_PREPARING Device is going to be prepared for a PM transition. Set | 289 | * DPM_PREPARING Device is going to be prepared for a PM transition. Set |
316 | * when ->prepare() is about to be called. | 290 | * when ->prepare() is about to be called. |
317 | * | 291 | * |
318 | * DPM_RESUMING Device is going to be resumed. Set when ->resume(), | 292 | * DPM_RESUMING Device is going to be resumed. Set when ->resume(), |
319 | * ->thaw(), or ->restore() is about to be called. | 293 | * ->thaw(), or ->restore() is about to be called. |
320 | * | 294 | * |
321 | * DPM_SUSPENDING Device has been prepared for a power transition. Set | 295 | * DPM_SUSPENDING Device has been prepared for a power transition. Set |
322 | * when ->prepare() has just succeeded. | 296 | * when ->prepare() has just succeeded. |
323 | * | 297 | * |
324 | * DPM_OFF Device is regarded as inactive. Set immediately after | 298 | * DPM_OFF Device is regarded as inactive. Set immediately after |
325 | * ->suspend(), ->freeze(), or ->poweroff() has succeeded. | 299 | * ->suspend(), ->freeze(), or ->poweroff() has succeeded. |
326 | * Also set when ->resume()_noirq, ->thaw_noirq(), or | 300 | * Also set when ->resume()_noirq, ->thaw_noirq(), or |
327 | * ->restore_noirq() is about to be called. | 301 | * ->restore_noirq() is about to be called. |
328 | * | 302 | * |
329 | * DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after | 303 | * DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after |
330 | * ->suspend_noirq(), ->freeze_noirq(), or | 304 | * ->suspend_noirq(), ->freeze_noirq(), or |
331 | * ->poweroff_noirq() has just succeeded. | 305 | * ->poweroff_noirq() has just succeeded. |
332 | */ | 306 | */ |
333 | 307 | ||
334 | enum dpm_state { | 308 | enum dpm_state { |
335 | DPM_INVALID, | 309 | DPM_INVALID, |
336 | DPM_ON, | 310 | DPM_ON, |
337 | DPM_PREPARING, | 311 | DPM_PREPARING, |
338 | DPM_RESUMING, | 312 | DPM_RESUMING, |
339 | DPM_SUSPENDING, | 313 | DPM_SUSPENDING, |
340 | DPM_OFF, | 314 | DPM_OFF, |
341 | DPM_OFF_IRQ, | 315 | DPM_OFF_IRQ, |
342 | }; | 316 | }; |
343 | 317 | ||
344 | struct dev_pm_info { | 318 | struct dev_pm_info { |
345 | pm_message_t power_state; | 319 | pm_message_t power_state; |
346 | unsigned can_wakeup:1; | 320 | unsigned can_wakeup:1; |
347 | unsigned should_wakeup:1; | 321 | unsigned should_wakeup:1; |
348 | enum dpm_state status; /* Owned by the PM core */ | 322 | enum dpm_state status; /* Owned by the PM core */ |
349 | #ifdef CONFIG_PM_SLEEP | 323 | #ifdef CONFIG_PM_SLEEP |
350 | struct list_head entry; | 324 | struct list_head entry; |
351 | #endif | 325 | #endif |
352 | }; | 326 | }; |
353 | 327 | ||
354 | /* | 328 | /* |
355 | * The PM_EVENT_ messages are also used by drivers implementing the legacy | 329 | * The PM_EVENT_ messages are also used by drivers implementing the legacy |
356 | * suspend framework, based on the ->suspend() and ->resume() callbacks common | 330 | * suspend framework, based on the ->suspend() and ->resume() callbacks common |
357 | * for suspend and hibernation transitions, according to the rules below. | 331 | * for suspend and hibernation transitions, according to the rules below. |
358 | */ | 332 | */ |
359 | 333 | ||
360 | /* Necessary, because several drivers use PM_EVENT_PRETHAW */ | 334 | /* Necessary, because several drivers use PM_EVENT_PRETHAW */ |
361 | #define PM_EVENT_PRETHAW PM_EVENT_QUIESCE | 335 | #define PM_EVENT_PRETHAW PM_EVENT_QUIESCE |
362 | 336 | ||
363 | /* | 337 | /* |
364 | * One transition is triggered by resume(), after a suspend() call; the | 338 | * One transition is triggered by resume(), after a suspend() call; the |
365 | * message is implicit: | 339 | * message is implicit: |
366 | * | 340 | * |
367 | * ON Driver starts working again, responding to hardware events | 341 | * ON Driver starts working again, responding to hardware events |
368 | * and software requests. The hardware may have gone through | 342 | * and software requests. The hardware may have gone through |
369 | * a power-off reset, or it may have maintained state from the | 343 | * a power-off reset, or it may have maintained state from the |
370 | * previous suspend() which the driver will rely on while | 344 | * previous suspend() which the driver will rely on while |
371 | * resuming. On most platforms, there are no restrictions on | 345 | * resuming. On most platforms, there are no restrictions on |
372 | * availability of resources like clocks during resume(). | 346 | * availability of resources like clocks during resume(). |
373 | * | 347 | * |
374 | * Other transitions are triggered by messages sent using suspend(). All | 348 | * Other transitions are triggered by messages sent using suspend(). All |
375 | * these transitions quiesce the driver, so that I/O queues are inactive. | 349 | * these transitions quiesce the driver, so that I/O queues are inactive. |
376 | * That commonly entails turning off IRQs and DMA; there may be rules | 350 | * That commonly entails turning off IRQs and DMA; there may be rules |
377 | * about how to quiesce that are specific to the bus or the device's type. | 351 | * about how to quiesce that are specific to the bus or the device's type. |
378 | * (For example, network drivers mark the link state.) Other details may | 352 | * (For example, network drivers mark the link state.) Other details may |
379 | * differ according to the message: | 353 | * differ according to the message: |
380 | * | 354 | * |
381 | * SUSPEND Quiesce, enter a low power device state appropriate for | 355 | * SUSPEND Quiesce, enter a low power device state appropriate for |
382 | * the upcoming system state (such as PCI_D3hot), and enable | 356 | * the upcoming system state (such as PCI_D3hot), and enable |
383 | * wakeup events as appropriate. | 357 | * wakeup events as appropriate. |
384 | * | 358 | * |
385 | * HIBERNATE Enter a low power device state appropriate for the hibernation | 359 | * HIBERNATE Enter a low power device state appropriate for the hibernation |
386 | * state (eg. ACPI S4) and enable wakeup events as appropriate. | 360 | * state (eg. ACPI S4) and enable wakeup events as appropriate. |
387 | * | 361 | * |
388 | * FREEZE Quiesce operations so that a consistent image can be saved; | 362 | * FREEZE Quiesce operations so that a consistent image can be saved; |
389 | * but do NOT otherwise enter a low power device state, and do | 363 | * but do NOT otherwise enter a low power device state, and do |
390 | * NOT emit system wakeup events. | 364 | * NOT emit system wakeup events. |
391 | * | 365 | * |
392 | * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring | 366 | * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring |
393 | * the system from a snapshot taken after an earlier FREEZE. | 367 | * the system from a snapshot taken after an earlier FREEZE. |
394 | * Some drivers will need to reset their hardware state instead | 368 | * Some drivers will need to reset their hardware state instead |
395 | * of preserving it, to ensure that it's never mistaken for the | 369 | * of preserving it, to ensure that it's never mistaken for the |
396 | * state which that earlier snapshot had set up. | 370 | * state which that earlier snapshot had set up. |
397 | * | 371 | * |
398 | * A minimally power-aware driver treats all messages as SUSPEND, fully | 372 | * A minimally power-aware driver treats all messages as SUSPEND, fully |
399 | * reinitializes its device during resume() -- whether or not it was reset | 373 | * reinitializes its device during resume() -- whether or not it was reset |
400 | * during the suspend/resume cycle -- and can't issue wakeup events. | 374 | * during the suspend/resume cycle -- and can't issue wakeup events. |
401 | * | 375 | * |
402 | * More power-aware drivers may also use low power states at runtime as | 376 | * More power-aware drivers may also use low power states at runtime as |
403 | * well as during system sleep states like PM_SUSPEND_STANDBY. They may | 377 | * well as during system sleep states like PM_SUSPEND_STANDBY. They may |
404 | * be able to use wakeup events to exit from runtime low-power states, | 378 | * be able to use wakeup events to exit from runtime low-power states, |
405 | * or from system low-power states such as standby or suspend-to-RAM. | 379 | * or from system low-power states such as standby or suspend-to-RAM. |
406 | */ | 380 | */ |
407 | 381 | ||
408 | #ifdef CONFIG_PM_SLEEP | 382 | #ifdef CONFIG_PM_SLEEP |
409 | extern void device_pm_lock(void); | 383 | extern void device_pm_lock(void); |
410 | extern void device_power_up(pm_message_t state); | 384 | extern void device_power_up(pm_message_t state); |
411 | extern void device_resume(pm_message_t state); | 385 | extern void device_resume(pm_message_t state); |
412 | 386 | ||
413 | extern void device_pm_unlock(void); | 387 | extern void device_pm_unlock(void); |
414 | extern int device_power_down(pm_message_t state); | 388 | extern int device_power_down(pm_message_t state); |
415 | extern int device_suspend(pm_message_t state); | 389 | extern int device_suspend(pm_message_t state); |
416 | extern int device_prepare_suspend(pm_message_t state); | 390 | extern int device_prepare_suspend(pm_message_t state); |
417 | 391 | ||
418 | extern void __suspend_report_result(const char *function, void *fn, int ret); | 392 | extern void __suspend_report_result(const char *function, void *fn, int ret); |
419 | 393 | ||
420 | #define suspend_report_result(fn, ret) \ | 394 | #define suspend_report_result(fn, ret) \ |
421 | do { \ | 395 | do { \ |
422 | __suspend_report_result(__func__, fn, ret); \ | 396 | __suspend_report_result(__func__, fn, ret); \ |
423 | } while (0) | 397 | } while (0) |
424 | 398 | ||
425 | #else /* !CONFIG_PM_SLEEP */ | 399 | #else /* !CONFIG_PM_SLEEP */ |
426 | 400 | ||
427 | static inline int device_suspend(pm_message_t state) | 401 | static inline int device_suspend(pm_message_t state) |
428 | { | 402 | { |
429 | return 0; | 403 | return 0; |
430 | } | 404 | } |
431 | 405 | ||
432 | #define suspend_report_result(fn, ret) do {} while (0) | 406 | #define suspend_report_result(fn, ret) do {} while (0) |