Commit 3a83f992490f8235661b768e53bd5f14915420ac
1 parent
7b1998116b
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
ACPI: Eliminate the DEVICE_ACPI_HANDLE() macro
Since DEVICE_ACPI_HANDLE() is now literally identical to ACPI_HANDLE(), replace it with the latter everywhere and drop its definition from include/acpi.h. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 21 changed files with 42 additions and 44 deletions Inline Diff
- drivers/acpi/device_pm.c
- drivers/gpu/drm/i915/intel_acpi.c
- drivers/gpu/drm/i915/intel_opregion.c
- drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
- drivers/gpu/drm/nouveau/nouveau_acpi.c
- drivers/gpu/drm/radeon/radeon_acpi.c
- drivers/gpu/drm/radeon/radeon_atpx_handler.c
- drivers/gpu/drm/radeon/radeon_bios.c
- drivers/ide/ide-acpi.c
- drivers/pci/hotplug/acpi_pcihp.c
- drivers/pci/hotplug/pciehp_acpi.c
- drivers/pci/ioapic.c
- drivers/pci/pci-acpi.c
- drivers/pci/pci-label.c
- drivers/platform/x86/apple-gmux.c
- drivers/pnp/pnpacpi/core.c
- drivers/usb/core/hub.c
- drivers/usb/core/usb-acpi.c
- drivers/xen/pci.c
- include/linux/acpi.h
- include/linux/pci-acpi.h
drivers/acpi/device_pm.c
1 | /* | 1 | /* |
2 | * drivers/acpi/device_pm.c - ACPI device power management routines. | 2 | * drivers/acpi/device_pm.c - ACPI device power management routines. |
3 | * | 3 | * |
4 | * Copyright (C) 2012, Intel Corp. | 4 | * Copyright (C) 2012, Intel Corp. |
5 | * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 5 | * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> |
6 | * | 6 | * |
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as published | 10 | * it under the terms of the GNU General Public License version 2 as published |
11 | * by the Free Software Foundation. | 11 | * by the Free Software Foundation. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, but | 13 | * This program is distributed in the hope that it will be useful, but |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | * General Public License for more details. | 16 | * General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License along | 18 | * You should have received a copy of the GNU General Public License along |
19 | * with this program; if not, write to the Free Software Foundation, Inc., | 19 | * with this program; if not, write to the Free Software Foundation, Inc., |
20 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | 20 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
21 | * | 21 | * |
22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/acpi.h> | 25 | #include <linux/acpi.h> |
26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
27 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
28 | #include <linux/pm_qos.h> | 28 | #include <linux/pm_qos.h> |
29 | #include <linux/pm_runtime.h> | 29 | #include <linux/pm_runtime.h> |
30 | 30 | ||
31 | #include "internal.h" | 31 | #include "internal.h" |
32 | 32 | ||
33 | #define _COMPONENT ACPI_POWER_COMPONENT | 33 | #define _COMPONENT ACPI_POWER_COMPONENT |
34 | ACPI_MODULE_NAME("device_pm"); | 34 | ACPI_MODULE_NAME("device_pm"); |
35 | 35 | ||
36 | /** | 36 | /** |
37 | * acpi_power_state_string - String representation of ACPI device power state. | 37 | * acpi_power_state_string - String representation of ACPI device power state. |
38 | * @state: ACPI device power state to return the string representation of. | 38 | * @state: ACPI device power state to return the string representation of. |
39 | */ | 39 | */ |
40 | const char *acpi_power_state_string(int state) | 40 | const char *acpi_power_state_string(int state) |
41 | { | 41 | { |
42 | switch (state) { | 42 | switch (state) { |
43 | case ACPI_STATE_D0: | 43 | case ACPI_STATE_D0: |
44 | return "D0"; | 44 | return "D0"; |
45 | case ACPI_STATE_D1: | 45 | case ACPI_STATE_D1: |
46 | return "D1"; | 46 | return "D1"; |
47 | case ACPI_STATE_D2: | 47 | case ACPI_STATE_D2: |
48 | return "D2"; | 48 | return "D2"; |
49 | case ACPI_STATE_D3_HOT: | 49 | case ACPI_STATE_D3_HOT: |
50 | return "D3hot"; | 50 | return "D3hot"; |
51 | case ACPI_STATE_D3_COLD: | 51 | case ACPI_STATE_D3_COLD: |
52 | return "D3cold"; | 52 | return "D3cold"; |
53 | default: | 53 | default: |
54 | return "(unknown)"; | 54 | return "(unknown)"; |
55 | } | 55 | } |
56 | } | 56 | } |
57 | 57 | ||
58 | /** | 58 | /** |
59 | * acpi_device_get_power - Get power state of an ACPI device. | 59 | * acpi_device_get_power - Get power state of an ACPI device. |
60 | * @device: Device to get the power state of. | 60 | * @device: Device to get the power state of. |
61 | * @state: Place to store the power state of the device. | 61 | * @state: Place to store the power state of the device. |
62 | * | 62 | * |
63 | * This function does not update the device's power.state field, but it may | 63 | * This function does not update the device's power.state field, but it may |
64 | * update its parent's power.state field (when the parent's power state is | 64 | * update its parent's power.state field (when the parent's power state is |
65 | * unknown and the device's power state turns out to be D0). | 65 | * unknown and the device's power state turns out to be D0). |
66 | */ | 66 | */ |
67 | int acpi_device_get_power(struct acpi_device *device, int *state) | 67 | int acpi_device_get_power(struct acpi_device *device, int *state) |
68 | { | 68 | { |
69 | int result = ACPI_STATE_UNKNOWN; | 69 | int result = ACPI_STATE_UNKNOWN; |
70 | 70 | ||
71 | if (!device || !state) | 71 | if (!device || !state) |
72 | return -EINVAL; | 72 | return -EINVAL; |
73 | 73 | ||
74 | if (!device->flags.power_manageable) { | 74 | if (!device->flags.power_manageable) { |
75 | /* TBD: Non-recursive algorithm for walking up hierarchy. */ | 75 | /* TBD: Non-recursive algorithm for walking up hierarchy. */ |
76 | *state = device->parent ? | 76 | *state = device->parent ? |
77 | device->parent->power.state : ACPI_STATE_D0; | 77 | device->parent->power.state : ACPI_STATE_D0; |
78 | goto out; | 78 | goto out; |
79 | } | 79 | } |
80 | 80 | ||
81 | /* | 81 | /* |
82 | * Get the device's power state from power resources settings and _PSC, | 82 | * Get the device's power state from power resources settings and _PSC, |
83 | * if available. | 83 | * if available. |
84 | */ | 84 | */ |
85 | if (device->power.flags.power_resources) { | 85 | if (device->power.flags.power_resources) { |
86 | int error = acpi_power_get_inferred_state(device, &result); | 86 | int error = acpi_power_get_inferred_state(device, &result); |
87 | if (error) | 87 | if (error) |
88 | return error; | 88 | return error; |
89 | } | 89 | } |
90 | if (device->power.flags.explicit_get) { | 90 | if (device->power.flags.explicit_get) { |
91 | acpi_handle handle = device->handle; | 91 | acpi_handle handle = device->handle; |
92 | unsigned long long psc; | 92 | unsigned long long psc; |
93 | acpi_status status; | 93 | acpi_status status; |
94 | 94 | ||
95 | status = acpi_evaluate_integer(handle, "_PSC", NULL, &psc); | 95 | status = acpi_evaluate_integer(handle, "_PSC", NULL, &psc); |
96 | if (ACPI_FAILURE(status)) | 96 | if (ACPI_FAILURE(status)) |
97 | return -ENODEV; | 97 | return -ENODEV; |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * The power resources settings may indicate a power state | 100 | * The power resources settings may indicate a power state |
101 | * shallower than the actual power state of the device. | 101 | * shallower than the actual power state of the device. |
102 | * | 102 | * |
103 | * Moreover, on systems predating ACPI 4.0, if the device | 103 | * Moreover, on systems predating ACPI 4.0, if the device |
104 | * doesn't depend on any power resources and _PSC returns 3, | 104 | * doesn't depend on any power resources and _PSC returns 3, |
105 | * that means "power off". We need to maintain compatibility | 105 | * that means "power off". We need to maintain compatibility |
106 | * with those systems. | 106 | * with those systems. |
107 | */ | 107 | */ |
108 | if (psc > result && psc < ACPI_STATE_D3_COLD) | 108 | if (psc > result && psc < ACPI_STATE_D3_COLD) |
109 | result = psc; | 109 | result = psc; |
110 | else if (result == ACPI_STATE_UNKNOWN) | 110 | else if (result == ACPI_STATE_UNKNOWN) |
111 | result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_COLD : psc; | 111 | result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_COLD : psc; |
112 | } | 112 | } |
113 | 113 | ||
114 | /* | 114 | /* |
115 | * If we were unsure about the device parent's power state up to this | 115 | * If we were unsure about the device parent's power state up to this |
116 | * point, the fact that the device is in D0 implies that the parent has | 116 | * point, the fact that the device is in D0 implies that the parent has |
117 | * to be in D0 too, except if ignore_parent is set. | 117 | * to be in D0 too, except if ignore_parent is set. |
118 | */ | 118 | */ |
119 | if (!device->power.flags.ignore_parent && device->parent | 119 | if (!device->power.flags.ignore_parent && device->parent |
120 | && device->parent->power.state == ACPI_STATE_UNKNOWN | 120 | && device->parent->power.state == ACPI_STATE_UNKNOWN |
121 | && result == ACPI_STATE_D0) | 121 | && result == ACPI_STATE_D0) |
122 | device->parent->power.state = ACPI_STATE_D0; | 122 | device->parent->power.state = ACPI_STATE_D0; |
123 | 123 | ||
124 | *state = result; | 124 | *state = result; |
125 | 125 | ||
126 | out: | 126 | out: |
127 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n", | 127 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n", |
128 | device->pnp.bus_id, acpi_power_state_string(*state))); | 128 | device->pnp.bus_id, acpi_power_state_string(*state))); |
129 | 129 | ||
130 | return 0; | 130 | return 0; |
131 | } | 131 | } |
132 | 132 | ||
133 | static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state) | 133 | static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state) |
134 | { | 134 | { |
135 | if (adev->power.states[state].flags.explicit_set) { | 135 | if (adev->power.states[state].flags.explicit_set) { |
136 | char method[5] = { '_', 'P', 'S', '0' + state, '\0' }; | 136 | char method[5] = { '_', 'P', 'S', '0' + state, '\0' }; |
137 | acpi_status status; | 137 | acpi_status status; |
138 | 138 | ||
139 | status = acpi_evaluate_object(adev->handle, method, NULL, NULL); | 139 | status = acpi_evaluate_object(adev->handle, method, NULL, NULL); |
140 | if (ACPI_FAILURE(status)) | 140 | if (ACPI_FAILURE(status)) |
141 | return -ENODEV; | 141 | return -ENODEV; |
142 | } | 142 | } |
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | 145 | ||
146 | /** | 146 | /** |
147 | * acpi_device_set_power - Set power state of an ACPI device. | 147 | * acpi_device_set_power - Set power state of an ACPI device. |
148 | * @device: Device to set the power state of. | 148 | * @device: Device to set the power state of. |
149 | * @state: New power state to set. | 149 | * @state: New power state to set. |
150 | * | 150 | * |
151 | * Callers must ensure that the device is power manageable before using this | 151 | * Callers must ensure that the device is power manageable before using this |
152 | * function. | 152 | * function. |
153 | */ | 153 | */ |
154 | int acpi_device_set_power(struct acpi_device *device, int state) | 154 | int acpi_device_set_power(struct acpi_device *device, int state) |
155 | { | 155 | { |
156 | int result = 0; | 156 | int result = 0; |
157 | bool cut_power = false; | 157 | bool cut_power = false; |
158 | 158 | ||
159 | if (!device || !device->flags.power_manageable | 159 | if (!device || !device->flags.power_manageable |
160 | || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD)) | 160 | || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD)) |
161 | return -EINVAL; | 161 | return -EINVAL; |
162 | 162 | ||
163 | /* Make sure this is a valid target state */ | 163 | /* Make sure this is a valid target state */ |
164 | 164 | ||
165 | if (state == device->power.state) { | 165 | if (state == device->power.state) { |
166 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already in %s\n", | 166 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already in %s\n", |
167 | device->pnp.bus_id, | 167 | device->pnp.bus_id, |
168 | acpi_power_state_string(state))); | 168 | acpi_power_state_string(state))); |
169 | return 0; | 169 | return 0; |
170 | } | 170 | } |
171 | 171 | ||
172 | if (!device->power.states[state].flags.valid) { | 172 | if (!device->power.states[state].flags.valid) { |
173 | dev_warn(&device->dev, "Power state %s not supported\n", | 173 | dev_warn(&device->dev, "Power state %s not supported\n", |
174 | acpi_power_state_string(state)); | 174 | acpi_power_state_string(state)); |
175 | return -ENODEV; | 175 | return -ENODEV; |
176 | } | 176 | } |
177 | if (!device->power.flags.ignore_parent && | 177 | if (!device->power.flags.ignore_parent && |
178 | device->parent && (state < device->parent->power.state)) { | 178 | device->parent && (state < device->parent->power.state)) { |
179 | dev_warn(&device->dev, | 179 | dev_warn(&device->dev, |
180 | "Cannot transition to power state %s for parent in %s\n", | 180 | "Cannot transition to power state %s for parent in %s\n", |
181 | acpi_power_state_string(state), | 181 | acpi_power_state_string(state), |
182 | acpi_power_state_string(device->parent->power.state)); | 182 | acpi_power_state_string(device->parent->power.state)); |
183 | return -ENODEV; | 183 | return -ENODEV; |
184 | } | 184 | } |
185 | 185 | ||
186 | /* For D3cold we should first transition into D3hot. */ | 186 | /* For D3cold we should first transition into D3hot. */ |
187 | if (state == ACPI_STATE_D3_COLD | 187 | if (state == ACPI_STATE_D3_COLD |
188 | && device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible) { | 188 | && device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible) { |
189 | state = ACPI_STATE_D3_HOT; | 189 | state = ACPI_STATE_D3_HOT; |
190 | cut_power = true; | 190 | cut_power = true; |
191 | } | 191 | } |
192 | 192 | ||
193 | if (state < device->power.state && state != ACPI_STATE_D0 | 193 | if (state < device->power.state && state != ACPI_STATE_D0 |
194 | && device->power.state >= ACPI_STATE_D3_HOT) { | 194 | && device->power.state >= ACPI_STATE_D3_HOT) { |
195 | dev_warn(&device->dev, | 195 | dev_warn(&device->dev, |
196 | "Cannot transition to non-D0 state from D3\n"); | 196 | "Cannot transition to non-D0 state from D3\n"); |
197 | return -ENODEV; | 197 | return -ENODEV; |
198 | } | 198 | } |
199 | 199 | ||
200 | /* | 200 | /* |
201 | * Transition Power | 201 | * Transition Power |
202 | * ---------------- | 202 | * ---------------- |
203 | * In accordance with the ACPI specification first apply power (via | 203 | * In accordance with the ACPI specification first apply power (via |
204 | * power resources) and then evalute _PSx. | 204 | * power resources) and then evalute _PSx. |
205 | */ | 205 | */ |
206 | if (device->power.flags.power_resources) { | 206 | if (device->power.flags.power_resources) { |
207 | result = acpi_power_transition(device, state); | 207 | result = acpi_power_transition(device, state); |
208 | if (result) | 208 | if (result) |
209 | goto end; | 209 | goto end; |
210 | } | 210 | } |
211 | result = acpi_dev_pm_explicit_set(device, state); | 211 | result = acpi_dev_pm_explicit_set(device, state); |
212 | if (result) | 212 | if (result) |
213 | goto end; | 213 | goto end; |
214 | 214 | ||
215 | if (cut_power) { | 215 | if (cut_power) { |
216 | device->power.state = state; | 216 | device->power.state = state; |
217 | state = ACPI_STATE_D3_COLD; | 217 | state = ACPI_STATE_D3_COLD; |
218 | result = acpi_power_transition(device, state); | 218 | result = acpi_power_transition(device, state); |
219 | } | 219 | } |
220 | 220 | ||
221 | end: | 221 | end: |
222 | if (result) { | 222 | if (result) { |
223 | dev_warn(&device->dev, "Failed to change power state to %s\n", | 223 | dev_warn(&device->dev, "Failed to change power state to %s\n", |
224 | acpi_power_state_string(state)); | 224 | acpi_power_state_string(state)); |
225 | } else { | 225 | } else { |
226 | device->power.state = state; | 226 | device->power.state = state; |
227 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 227 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
228 | "Device [%s] transitioned to %s\n", | 228 | "Device [%s] transitioned to %s\n", |
229 | device->pnp.bus_id, | 229 | device->pnp.bus_id, |
230 | acpi_power_state_string(state))); | 230 | acpi_power_state_string(state))); |
231 | } | 231 | } |
232 | 232 | ||
233 | return result; | 233 | return result; |
234 | } | 234 | } |
235 | EXPORT_SYMBOL(acpi_device_set_power); | 235 | EXPORT_SYMBOL(acpi_device_set_power); |
236 | 236 | ||
237 | int acpi_bus_set_power(acpi_handle handle, int state) | 237 | int acpi_bus_set_power(acpi_handle handle, int state) |
238 | { | 238 | { |
239 | struct acpi_device *device; | 239 | struct acpi_device *device; |
240 | int result; | 240 | int result; |
241 | 241 | ||
242 | result = acpi_bus_get_device(handle, &device); | 242 | result = acpi_bus_get_device(handle, &device); |
243 | if (result) | 243 | if (result) |
244 | return result; | 244 | return result; |
245 | 245 | ||
246 | return acpi_device_set_power(device, state); | 246 | return acpi_device_set_power(device, state); |
247 | } | 247 | } |
248 | EXPORT_SYMBOL(acpi_bus_set_power); | 248 | EXPORT_SYMBOL(acpi_bus_set_power); |
249 | 249 | ||
250 | int acpi_bus_init_power(struct acpi_device *device) | 250 | int acpi_bus_init_power(struct acpi_device *device) |
251 | { | 251 | { |
252 | int state; | 252 | int state; |
253 | int result; | 253 | int result; |
254 | 254 | ||
255 | if (!device) | 255 | if (!device) |
256 | return -EINVAL; | 256 | return -EINVAL; |
257 | 257 | ||
258 | device->power.state = ACPI_STATE_UNKNOWN; | 258 | device->power.state = ACPI_STATE_UNKNOWN; |
259 | 259 | ||
260 | result = acpi_device_get_power(device, &state); | 260 | result = acpi_device_get_power(device, &state); |
261 | if (result) | 261 | if (result) |
262 | return result; | 262 | return result; |
263 | 263 | ||
264 | if (state < ACPI_STATE_D3_COLD && device->power.flags.power_resources) { | 264 | if (state < ACPI_STATE_D3_COLD && device->power.flags.power_resources) { |
265 | result = acpi_power_on_resources(device, state); | 265 | result = acpi_power_on_resources(device, state); |
266 | if (result) | 266 | if (result) |
267 | return result; | 267 | return result; |
268 | 268 | ||
269 | result = acpi_dev_pm_explicit_set(device, state); | 269 | result = acpi_dev_pm_explicit_set(device, state); |
270 | if (result) | 270 | if (result) |
271 | return result; | 271 | return result; |
272 | } else if (state == ACPI_STATE_UNKNOWN) { | 272 | } else if (state == ACPI_STATE_UNKNOWN) { |
273 | /* | 273 | /* |
274 | * No power resources and missing _PSC? Cross fingers and make | 274 | * No power resources and missing _PSC? Cross fingers and make |
275 | * it D0 in hope that this is what the BIOS put the device into. | 275 | * it D0 in hope that this is what the BIOS put the device into. |
276 | * [We tried to force D0 here by executing _PS0, but that broke | 276 | * [We tried to force D0 here by executing _PS0, but that broke |
277 | * Toshiba P870-303 in a nasty way.] | 277 | * Toshiba P870-303 in a nasty way.] |
278 | */ | 278 | */ |
279 | state = ACPI_STATE_D0; | 279 | state = ACPI_STATE_D0; |
280 | } | 280 | } |
281 | device->power.state = state; | 281 | device->power.state = state; |
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
284 | 284 | ||
285 | /** | 285 | /** |
286 | * acpi_device_fix_up_power - Force device with missing _PSC into D0. | 286 | * acpi_device_fix_up_power - Force device with missing _PSC into D0. |
287 | * @device: Device object whose power state is to be fixed up. | 287 | * @device: Device object whose power state is to be fixed up. |
288 | * | 288 | * |
289 | * Devices without power resources and _PSC, but having _PS0 and _PS3 defined, | 289 | * Devices without power resources and _PSC, but having _PS0 and _PS3 defined, |
290 | * are assumed to be put into D0 by the BIOS. However, in some cases that may | 290 | * are assumed to be put into D0 by the BIOS. However, in some cases that may |
291 | * not be the case and this function should be used then. | 291 | * not be the case and this function should be used then. |
292 | */ | 292 | */ |
293 | int acpi_device_fix_up_power(struct acpi_device *device) | 293 | int acpi_device_fix_up_power(struct acpi_device *device) |
294 | { | 294 | { |
295 | int ret = 0; | 295 | int ret = 0; |
296 | 296 | ||
297 | if (!device->power.flags.power_resources | 297 | if (!device->power.flags.power_resources |
298 | && !device->power.flags.explicit_get | 298 | && !device->power.flags.explicit_get |
299 | && device->power.state == ACPI_STATE_D0) | 299 | && device->power.state == ACPI_STATE_D0) |
300 | ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); | 300 | ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); |
301 | 301 | ||
302 | return ret; | 302 | return ret; |
303 | } | 303 | } |
304 | 304 | ||
305 | int acpi_bus_update_power(acpi_handle handle, int *state_p) | 305 | int acpi_bus_update_power(acpi_handle handle, int *state_p) |
306 | { | 306 | { |
307 | struct acpi_device *device; | 307 | struct acpi_device *device; |
308 | int state; | 308 | int state; |
309 | int result; | 309 | int result; |
310 | 310 | ||
311 | result = acpi_bus_get_device(handle, &device); | 311 | result = acpi_bus_get_device(handle, &device); |
312 | if (result) | 312 | if (result) |
313 | return result; | 313 | return result; |
314 | 314 | ||
315 | result = acpi_device_get_power(device, &state); | 315 | result = acpi_device_get_power(device, &state); |
316 | if (result) | 316 | if (result) |
317 | return result; | 317 | return result; |
318 | 318 | ||
319 | if (state == ACPI_STATE_UNKNOWN) { | 319 | if (state == ACPI_STATE_UNKNOWN) { |
320 | state = ACPI_STATE_D0; | 320 | state = ACPI_STATE_D0; |
321 | result = acpi_device_set_power(device, state); | 321 | result = acpi_device_set_power(device, state); |
322 | if (result) | 322 | if (result) |
323 | return result; | 323 | return result; |
324 | } else { | 324 | } else { |
325 | if (device->power.flags.power_resources) { | 325 | if (device->power.flags.power_resources) { |
326 | /* | 326 | /* |
327 | * We don't need to really switch the state, bu we need | 327 | * We don't need to really switch the state, bu we need |
328 | * to update the power resources' reference counters. | 328 | * to update the power resources' reference counters. |
329 | */ | 329 | */ |
330 | result = acpi_power_transition(device, state); | 330 | result = acpi_power_transition(device, state); |
331 | if (result) | 331 | if (result) |
332 | return result; | 332 | return result; |
333 | } | 333 | } |
334 | device->power.state = state; | 334 | device->power.state = state; |
335 | } | 335 | } |
336 | if (state_p) | 336 | if (state_p) |
337 | *state_p = state; | 337 | *state_p = state; |
338 | 338 | ||
339 | return 0; | 339 | return 0; |
340 | } | 340 | } |
341 | EXPORT_SYMBOL_GPL(acpi_bus_update_power); | 341 | EXPORT_SYMBOL_GPL(acpi_bus_update_power); |
342 | 342 | ||
343 | bool acpi_bus_power_manageable(acpi_handle handle) | 343 | bool acpi_bus_power_manageable(acpi_handle handle) |
344 | { | 344 | { |
345 | struct acpi_device *device; | 345 | struct acpi_device *device; |
346 | int result; | 346 | int result; |
347 | 347 | ||
348 | result = acpi_bus_get_device(handle, &device); | 348 | result = acpi_bus_get_device(handle, &device); |
349 | return result ? false : device->flags.power_manageable; | 349 | return result ? false : device->flags.power_manageable; |
350 | } | 350 | } |
351 | EXPORT_SYMBOL(acpi_bus_power_manageable); | 351 | EXPORT_SYMBOL(acpi_bus_power_manageable); |
352 | 352 | ||
353 | #ifdef CONFIG_PM | 353 | #ifdef CONFIG_PM |
354 | static DEFINE_MUTEX(acpi_pm_notifier_lock); | 354 | static DEFINE_MUTEX(acpi_pm_notifier_lock); |
355 | 355 | ||
356 | /** | 356 | /** |
357 | * acpi_add_pm_notifier - Register PM notifier for given ACPI device. | 357 | * acpi_add_pm_notifier - Register PM notifier for given ACPI device. |
358 | * @adev: ACPI device to add the notifier for. | 358 | * @adev: ACPI device to add the notifier for. |
359 | * @context: Context information to pass to the notifier routine. | 359 | * @context: Context information to pass to the notifier routine. |
360 | * | 360 | * |
361 | * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of | 361 | * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of |
362 | * PM wakeup events. For example, wakeup events may be generated for bridges | 362 | * PM wakeup events. For example, wakeup events may be generated for bridges |
363 | * if one of the devices below the bridge is signaling wakeup, even if the | 363 | * if one of the devices below the bridge is signaling wakeup, even if the |
364 | * bridge itself doesn't have a wakeup GPE associated with it. | 364 | * bridge itself doesn't have a wakeup GPE associated with it. |
365 | */ | 365 | */ |
366 | acpi_status acpi_add_pm_notifier(struct acpi_device *adev, | 366 | acpi_status acpi_add_pm_notifier(struct acpi_device *adev, |
367 | acpi_notify_handler handler, void *context) | 367 | acpi_notify_handler handler, void *context) |
368 | { | 368 | { |
369 | acpi_status status = AE_ALREADY_EXISTS; | 369 | acpi_status status = AE_ALREADY_EXISTS; |
370 | 370 | ||
371 | mutex_lock(&acpi_pm_notifier_lock); | 371 | mutex_lock(&acpi_pm_notifier_lock); |
372 | 372 | ||
373 | if (adev->wakeup.flags.notifier_present) | 373 | if (adev->wakeup.flags.notifier_present) |
374 | goto out; | 374 | goto out; |
375 | 375 | ||
376 | status = acpi_install_notify_handler(adev->handle, | 376 | status = acpi_install_notify_handler(adev->handle, |
377 | ACPI_SYSTEM_NOTIFY, | 377 | ACPI_SYSTEM_NOTIFY, |
378 | handler, context); | 378 | handler, context); |
379 | if (ACPI_FAILURE(status)) | 379 | if (ACPI_FAILURE(status)) |
380 | goto out; | 380 | goto out; |
381 | 381 | ||
382 | adev->wakeup.flags.notifier_present = true; | 382 | adev->wakeup.flags.notifier_present = true; |
383 | 383 | ||
384 | out: | 384 | out: |
385 | mutex_unlock(&acpi_pm_notifier_lock); | 385 | mutex_unlock(&acpi_pm_notifier_lock); |
386 | return status; | 386 | return status; |
387 | } | 387 | } |
388 | 388 | ||
389 | /** | 389 | /** |
390 | * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device. | 390 | * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device. |
391 | * @adev: ACPI device to remove the notifier from. | 391 | * @adev: ACPI device to remove the notifier from. |
392 | */ | 392 | */ |
393 | acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, | 393 | acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, |
394 | acpi_notify_handler handler) | 394 | acpi_notify_handler handler) |
395 | { | 395 | { |
396 | acpi_status status = AE_BAD_PARAMETER; | 396 | acpi_status status = AE_BAD_PARAMETER; |
397 | 397 | ||
398 | mutex_lock(&acpi_pm_notifier_lock); | 398 | mutex_lock(&acpi_pm_notifier_lock); |
399 | 399 | ||
400 | if (!adev->wakeup.flags.notifier_present) | 400 | if (!adev->wakeup.flags.notifier_present) |
401 | goto out; | 401 | goto out; |
402 | 402 | ||
403 | status = acpi_remove_notify_handler(adev->handle, | 403 | status = acpi_remove_notify_handler(adev->handle, |
404 | ACPI_SYSTEM_NOTIFY, | 404 | ACPI_SYSTEM_NOTIFY, |
405 | handler); | 405 | handler); |
406 | if (ACPI_FAILURE(status)) | 406 | if (ACPI_FAILURE(status)) |
407 | goto out; | 407 | goto out; |
408 | 408 | ||
409 | adev->wakeup.flags.notifier_present = false; | 409 | adev->wakeup.flags.notifier_present = false; |
410 | 410 | ||
411 | out: | 411 | out: |
412 | mutex_unlock(&acpi_pm_notifier_lock); | 412 | mutex_unlock(&acpi_pm_notifier_lock); |
413 | return status; | 413 | return status; |
414 | } | 414 | } |
415 | 415 | ||
416 | bool acpi_bus_can_wakeup(acpi_handle handle) | 416 | bool acpi_bus_can_wakeup(acpi_handle handle) |
417 | { | 417 | { |
418 | struct acpi_device *device; | 418 | struct acpi_device *device; |
419 | int result; | 419 | int result; |
420 | 420 | ||
421 | result = acpi_bus_get_device(handle, &device); | 421 | result = acpi_bus_get_device(handle, &device); |
422 | return result ? false : device->wakeup.flags.valid; | 422 | return result ? false : device->wakeup.flags.valid; |
423 | } | 423 | } |
424 | EXPORT_SYMBOL(acpi_bus_can_wakeup); | 424 | EXPORT_SYMBOL(acpi_bus_can_wakeup); |
425 | 425 | ||
426 | /** | 426 | /** |
427 | * acpi_dev_pm_get_state - Get preferred power state of ACPI device. | 427 | * acpi_dev_pm_get_state - Get preferred power state of ACPI device. |
428 | * @dev: Device whose preferred target power state to return. | 428 | * @dev: Device whose preferred target power state to return. |
429 | * @adev: ACPI device node corresponding to @dev. | 429 | * @adev: ACPI device node corresponding to @dev. |
430 | * @target_state: System state to match the resultant device state. | 430 | * @target_state: System state to match the resultant device state. |
431 | * @d_min_p: Location to store the highest power state available to the device. | 431 | * @d_min_p: Location to store the highest power state available to the device. |
432 | * @d_max_p: Location to store the lowest power state available to the device. | 432 | * @d_max_p: Location to store the lowest power state available to the device. |
433 | * | 433 | * |
434 | * Find the lowest power (highest number) and highest power (lowest number) ACPI | 434 | * Find the lowest power (highest number) and highest power (lowest number) ACPI |
435 | * device power states that the device can be in while the system is in the | 435 | * device power states that the device can be in while the system is in the |
436 | * state represented by @target_state. Store the integer numbers representing | 436 | * state represented by @target_state. Store the integer numbers representing |
437 | * those stats in the memory locations pointed to by @d_max_p and @d_min_p, | 437 | * those stats in the memory locations pointed to by @d_max_p and @d_min_p, |
438 | * respectively. | 438 | * respectively. |
439 | * | 439 | * |
440 | * Callers must ensure that @dev and @adev are valid pointers and that @adev | 440 | * Callers must ensure that @dev and @adev are valid pointers and that @adev |
441 | * actually corresponds to @dev before using this function. | 441 | * actually corresponds to @dev before using this function. |
442 | * | 442 | * |
443 | * Returns 0 on success or -ENODATA when one of the ACPI methods fails or | 443 | * Returns 0 on success or -ENODATA when one of the ACPI methods fails or |
444 | * returns a value that doesn't make sense. The memory locations pointed to by | 444 | * returns a value that doesn't make sense. The memory locations pointed to by |
445 | * @d_max_p and @d_min_p are only modified on success. | 445 | * @d_max_p and @d_min_p are only modified on success. |
446 | */ | 446 | */ |
447 | static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev, | 447 | static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev, |
448 | u32 target_state, int *d_min_p, int *d_max_p) | 448 | u32 target_state, int *d_min_p, int *d_max_p) |
449 | { | 449 | { |
450 | char method[] = { '_', 'S', '0' + target_state, 'D', '\0' }; | 450 | char method[] = { '_', 'S', '0' + target_state, 'D', '\0' }; |
451 | acpi_handle handle = adev->handle; | 451 | acpi_handle handle = adev->handle; |
452 | unsigned long long ret; | 452 | unsigned long long ret; |
453 | int d_min, d_max; | 453 | int d_min, d_max; |
454 | bool wakeup = false; | 454 | bool wakeup = false; |
455 | acpi_status status; | 455 | acpi_status status; |
456 | 456 | ||
457 | /* | 457 | /* |
458 | * If the system state is S0, the lowest power state the device can be | 458 | * If the system state is S0, the lowest power state the device can be |
459 | * in is D3cold, unless the device has _S0W and is supposed to signal | 459 | * in is D3cold, unless the device has _S0W and is supposed to signal |
460 | * wakeup, in which case the return value of _S0W has to be used as the | 460 | * wakeup, in which case the return value of _S0W has to be used as the |
461 | * lowest power state available to the device. | 461 | * lowest power state available to the device. |
462 | */ | 462 | */ |
463 | d_min = ACPI_STATE_D0; | 463 | d_min = ACPI_STATE_D0; |
464 | d_max = ACPI_STATE_D3_COLD; | 464 | d_max = ACPI_STATE_D3_COLD; |
465 | 465 | ||
466 | /* | 466 | /* |
467 | * If present, _SxD methods return the minimum D-state (highest power | 467 | * If present, _SxD methods return the minimum D-state (highest power |
468 | * state) we can use for the corresponding S-states. Otherwise, the | 468 | * state) we can use for the corresponding S-states. Otherwise, the |
469 | * minimum D-state is D0 (ACPI 3.x). | 469 | * minimum D-state is D0 (ACPI 3.x). |
470 | */ | 470 | */ |
471 | if (target_state > ACPI_STATE_S0) { | 471 | if (target_state > ACPI_STATE_S0) { |
472 | /* | 472 | /* |
473 | * We rely on acpi_evaluate_integer() not clobbering the integer | 473 | * We rely on acpi_evaluate_integer() not clobbering the integer |
474 | * provided if AE_NOT_FOUND is returned. | 474 | * provided if AE_NOT_FOUND is returned. |
475 | */ | 475 | */ |
476 | ret = d_min; | 476 | ret = d_min; |
477 | status = acpi_evaluate_integer(handle, method, NULL, &ret); | 477 | status = acpi_evaluate_integer(handle, method, NULL, &ret); |
478 | if ((ACPI_FAILURE(status) && status != AE_NOT_FOUND) | 478 | if ((ACPI_FAILURE(status) && status != AE_NOT_FOUND) |
479 | || ret > ACPI_STATE_D3_COLD) | 479 | || ret > ACPI_STATE_D3_COLD) |
480 | return -ENODATA; | 480 | return -ENODATA; |
481 | 481 | ||
482 | /* | 482 | /* |
483 | * We need to handle legacy systems where D3hot and D3cold are | 483 | * We need to handle legacy systems where D3hot and D3cold are |
484 | * the same and 3 is returned in both cases, so fall back to | 484 | * the same and 3 is returned in both cases, so fall back to |
485 | * D3cold if D3hot is not a valid state. | 485 | * D3cold if D3hot is not a valid state. |
486 | */ | 486 | */ |
487 | if (!adev->power.states[ret].flags.valid) { | 487 | if (!adev->power.states[ret].flags.valid) { |
488 | if (ret == ACPI_STATE_D3_HOT) | 488 | if (ret == ACPI_STATE_D3_HOT) |
489 | ret = ACPI_STATE_D3_COLD; | 489 | ret = ACPI_STATE_D3_COLD; |
490 | else | 490 | else |
491 | return -ENODATA; | 491 | return -ENODATA; |
492 | } | 492 | } |
493 | d_min = ret; | 493 | d_min = ret; |
494 | wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid | 494 | wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid |
495 | && adev->wakeup.sleep_state >= target_state; | 495 | && adev->wakeup.sleep_state >= target_state; |
496 | } else if (dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) != | 496 | } else if (dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) != |
497 | PM_QOS_FLAGS_NONE) { | 497 | PM_QOS_FLAGS_NONE) { |
498 | wakeup = adev->wakeup.flags.valid; | 498 | wakeup = adev->wakeup.flags.valid; |
499 | } | 499 | } |
500 | 500 | ||
501 | /* | 501 | /* |
502 | * If _PRW says we can wake up the system from the target sleep state, | 502 | * If _PRW says we can wake up the system from the target sleep state, |
503 | * the D-state returned by _SxD is sufficient for that (we assume a | 503 | * the D-state returned by _SxD is sufficient for that (we assume a |
504 | * wakeup-aware driver if wake is set). Still, if _SxW exists | 504 | * wakeup-aware driver if wake is set). Still, if _SxW exists |
505 | * (ACPI 3.x), it should return the maximum (lowest power) D-state that | 505 | * (ACPI 3.x), it should return the maximum (lowest power) D-state that |
506 | * can wake the system. _S0W may be valid, too. | 506 | * can wake the system. _S0W may be valid, too. |
507 | */ | 507 | */ |
508 | if (wakeup) { | 508 | if (wakeup) { |
509 | method[3] = 'W'; | 509 | method[3] = 'W'; |
510 | status = acpi_evaluate_integer(handle, method, NULL, &ret); | 510 | status = acpi_evaluate_integer(handle, method, NULL, &ret); |
511 | if (status == AE_NOT_FOUND) { | 511 | if (status == AE_NOT_FOUND) { |
512 | if (target_state > ACPI_STATE_S0) | 512 | if (target_state > ACPI_STATE_S0) |
513 | d_max = d_min; | 513 | d_max = d_min; |
514 | } else if (ACPI_SUCCESS(status) && ret <= ACPI_STATE_D3_COLD) { | 514 | } else if (ACPI_SUCCESS(status) && ret <= ACPI_STATE_D3_COLD) { |
515 | /* Fall back to D3cold if ret is not a valid state. */ | 515 | /* Fall back to D3cold if ret is not a valid state. */ |
516 | if (!adev->power.states[ret].flags.valid) | 516 | if (!adev->power.states[ret].flags.valid) |
517 | ret = ACPI_STATE_D3_COLD; | 517 | ret = ACPI_STATE_D3_COLD; |
518 | 518 | ||
519 | d_max = ret > d_min ? ret : d_min; | 519 | d_max = ret > d_min ? ret : d_min; |
520 | } else { | 520 | } else { |
521 | return -ENODATA; | 521 | return -ENODATA; |
522 | } | 522 | } |
523 | } | 523 | } |
524 | 524 | ||
525 | if (d_min_p) | 525 | if (d_min_p) |
526 | *d_min_p = d_min; | 526 | *d_min_p = d_min; |
527 | 527 | ||
528 | if (d_max_p) | 528 | if (d_max_p) |
529 | *d_max_p = d_max; | 529 | *d_max_p = d_max; |
530 | 530 | ||
531 | return 0; | 531 | return 0; |
532 | } | 532 | } |
533 | 533 | ||
534 | /** | 534 | /** |
535 | * acpi_pm_device_sleep_state - Get preferred power state of ACPI device. | 535 | * acpi_pm_device_sleep_state - Get preferred power state of ACPI device. |
536 | * @dev: Device whose preferred target power state to return. | 536 | * @dev: Device whose preferred target power state to return. |
537 | * @d_min_p: Location to store the upper limit of the allowed states range. | 537 | * @d_min_p: Location to store the upper limit of the allowed states range. |
538 | * @d_max_in: Deepest low-power state to take into consideration. | 538 | * @d_max_in: Deepest low-power state to take into consideration. |
539 | * Return value: Preferred power state of the device on success, -ENODEV | 539 | * Return value: Preferred power state of the device on success, -ENODEV |
540 | * if there's no 'struct acpi_device' for @dev, -EINVAL if @d_max_in is | 540 | * if there's no 'struct acpi_device' for @dev, -EINVAL if @d_max_in is |
541 | * incorrect, or -ENODATA on ACPI method failure. | 541 | * incorrect, or -ENODATA on ACPI method failure. |
542 | * | 542 | * |
543 | * The caller must ensure that @dev is valid before using this function. | 543 | * The caller must ensure that @dev is valid before using this function. |
544 | */ | 544 | */ |
545 | int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) | 545 | int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) |
546 | { | 546 | { |
547 | acpi_handle handle = DEVICE_ACPI_HANDLE(dev); | 547 | acpi_handle handle = ACPI_HANDLE(dev); |
548 | struct acpi_device *adev; | 548 | struct acpi_device *adev; |
549 | int ret, d_min, d_max; | 549 | int ret, d_min, d_max; |
550 | 550 | ||
551 | if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3_COLD) | 551 | if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3_COLD) |
552 | return -EINVAL; | 552 | return -EINVAL; |
553 | 553 | ||
554 | if (d_max_in > ACPI_STATE_D3_HOT) { | 554 | if (d_max_in > ACPI_STATE_D3_HOT) { |
555 | enum pm_qos_flags_status stat; | 555 | enum pm_qos_flags_status stat; |
556 | 556 | ||
557 | stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF); | 557 | stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF); |
558 | if (stat == PM_QOS_FLAGS_ALL) | 558 | if (stat == PM_QOS_FLAGS_ALL) |
559 | d_max_in = ACPI_STATE_D3_HOT; | 559 | d_max_in = ACPI_STATE_D3_HOT; |
560 | } | 560 | } |
561 | 561 | ||
562 | if (!handle || acpi_bus_get_device(handle, &adev)) { | 562 | if (!handle || acpi_bus_get_device(handle, &adev)) { |
563 | dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); | 563 | dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); |
564 | return -ENODEV; | 564 | return -ENODEV; |
565 | } | 565 | } |
566 | 566 | ||
567 | ret = acpi_dev_pm_get_state(dev, adev, acpi_target_system_state(), | 567 | ret = acpi_dev_pm_get_state(dev, adev, acpi_target_system_state(), |
568 | &d_min, &d_max); | 568 | &d_min, &d_max); |
569 | if (ret) | 569 | if (ret) |
570 | return ret; | 570 | return ret; |
571 | 571 | ||
572 | if (d_max_in < d_min) | 572 | if (d_max_in < d_min) |
573 | return -EINVAL; | 573 | return -EINVAL; |
574 | 574 | ||
575 | if (d_max > d_max_in) { | 575 | if (d_max > d_max_in) { |
576 | for (d_max = d_max_in; d_max > d_min; d_max--) { | 576 | for (d_max = d_max_in; d_max > d_min; d_max--) { |
577 | if (adev->power.states[d_max].flags.valid) | 577 | if (adev->power.states[d_max].flags.valid) |
578 | break; | 578 | break; |
579 | } | 579 | } |
580 | } | 580 | } |
581 | 581 | ||
582 | if (d_min_p) | 582 | if (d_min_p) |
583 | *d_min_p = d_min; | 583 | *d_min_p = d_min; |
584 | 584 | ||
585 | return d_max; | 585 | return d_max; |
586 | } | 586 | } |
587 | EXPORT_SYMBOL(acpi_pm_device_sleep_state); | 587 | EXPORT_SYMBOL(acpi_pm_device_sleep_state); |
588 | 588 | ||
589 | #ifdef CONFIG_PM_RUNTIME | 589 | #ifdef CONFIG_PM_RUNTIME |
590 | /** | 590 | /** |
591 | * acpi_wakeup_device - Wakeup notification handler for ACPI devices. | 591 | * acpi_wakeup_device - Wakeup notification handler for ACPI devices. |
592 | * @handle: ACPI handle of the device the notification is for. | 592 | * @handle: ACPI handle of the device the notification is for. |
593 | * @event: Type of the signaled event. | 593 | * @event: Type of the signaled event. |
594 | * @context: Device corresponding to @handle. | 594 | * @context: Device corresponding to @handle. |
595 | */ | 595 | */ |
596 | static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context) | 596 | static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context) |
597 | { | 597 | { |
598 | struct device *dev = context; | 598 | struct device *dev = context; |
599 | 599 | ||
600 | if (event == ACPI_NOTIFY_DEVICE_WAKE && dev) { | 600 | if (event == ACPI_NOTIFY_DEVICE_WAKE && dev) { |
601 | pm_wakeup_event(dev, 0); | 601 | pm_wakeup_event(dev, 0); |
602 | pm_runtime_resume(dev); | 602 | pm_runtime_resume(dev); |
603 | } | 603 | } |
604 | } | 604 | } |
605 | 605 | ||
606 | /** | 606 | /** |
607 | * __acpi_device_run_wake - Enable/disable runtime remote wakeup for device. | 607 | * __acpi_device_run_wake - Enable/disable runtime remote wakeup for device. |
608 | * @adev: ACPI device to enable/disable the remote wakeup for. | 608 | * @adev: ACPI device to enable/disable the remote wakeup for. |
609 | * @enable: Whether to enable or disable the wakeup functionality. | 609 | * @enable: Whether to enable or disable the wakeup functionality. |
610 | * | 610 | * |
611 | * Enable/disable the GPE associated with @adev so that it can generate | 611 | * Enable/disable the GPE associated with @adev so that it can generate |
612 | * wakeup signals for the device in response to external (remote) events and | 612 | * wakeup signals for the device in response to external (remote) events and |
613 | * enable/disable device wakeup power. | 613 | * enable/disable device wakeup power. |
614 | * | 614 | * |
615 | * Callers must ensure that @adev is a valid ACPI device node before executing | 615 | * Callers must ensure that @adev is a valid ACPI device node before executing |
616 | * this function. | 616 | * this function. |
617 | */ | 617 | */ |
618 | int __acpi_device_run_wake(struct acpi_device *adev, bool enable) | 618 | int __acpi_device_run_wake(struct acpi_device *adev, bool enable) |
619 | { | 619 | { |
620 | struct acpi_device_wakeup *wakeup = &adev->wakeup; | 620 | struct acpi_device_wakeup *wakeup = &adev->wakeup; |
621 | 621 | ||
622 | if (enable) { | 622 | if (enable) { |
623 | acpi_status res; | 623 | acpi_status res; |
624 | int error; | 624 | int error; |
625 | 625 | ||
626 | error = acpi_enable_wakeup_device_power(adev, ACPI_STATE_S0); | 626 | error = acpi_enable_wakeup_device_power(adev, ACPI_STATE_S0); |
627 | if (error) | 627 | if (error) |
628 | return error; | 628 | return error; |
629 | 629 | ||
630 | res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number); | 630 | res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number); |
631 | if (ACPI_FAILURE(res)) { | 631 | if (ACPI_FAILURE(res)) { |
632 | acpi_disable_wakeup_device_power(adev); | 632 | acpi_disable_wakeup_device_power(adev); |
633 | return -EIO; | 633 | return -EIO; |
634 | } | 634 | } |
635 | } else { | 635 | } else { |
636 | acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number); | 636 | acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number); |
637 | acpi_disable_wakeup_device_power(adev); | 637 | acpi_disable_wakeup_device_power(adev); |
638 | } | 638 | } |
639 | return 0; | 639 | return 0; |
640 | } | 640 | } |
641 | 641 | ||
642 | /** | 642 | /** |
643 | * acpi_pm_device_run_wake - Enable/disable remote wakeup for given device. | 643 | * acpi_pm_device_run_wake - Enable/disable remote wakeup for given device. |
644 | * @dev: Device to enable/disable the platform to wake up. | 644 | * @dev: Device to enable/disable the platform to wake up. |
645 | * @enable: Whether to enable or disable the wakeup functionality. | 645 | * @enable: Whether to enable or disable the wakeup functionality. |
646 | */ | 646 | */ |
647 | int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) | 647 | int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) |
648 | { | 648 | { |
649 | struct acpi_device *adev; | 649 | struct acpi_device *adev; |
650 | acpi_handle handle; | 650 | acpi_handle handle; |
651 | 651 | ||
652 | if (!device_run_wake(phys_dev)) | 652 | if (!device_run_wake(phys_dev)) |
653 | return -EINVAL; | 653 | return -EINVAL; |
654 | 654 | ||
655 | handle = DEVICE_ACPI_HANDLE(phys_dev); | 655 | handle = ACPI_HANDLE(phys_dev); |
656 | if (!handle || acpi_bus_get_device(handle, &adev)) { | 656 | if (!handle || acpi_bus_get_device(handle, &adev)) { |
657 | dev_dbg(phys_dev, "ACPI handle without context in %s!\n", | 657 | dev_dbg(phys_dev, "ACPI handle without context in %s!\n", |
658 | __func__); | 658 | __func__); |
659 | return -ENODEV; | 659 | return -ENODEV; |
660 | } | 660 | } |
661 | 661 | ||
662 | return __acpi_device_run_wake(adev, enable); | 662 | return __acpi_device_run_wake(adev, enable); |
663 | } | 663 | } |
664 | EXPORT_SYMBOL(acpi_pm_device_run_wake); | 664 | EXPORT_SYMBOL(acpi_pm_device_run_wake); |
665 | #else | 665 | #else |
666 | static inline void acpi_wakeup_device(acpi_handle handle, u32 event, | 666 | static inline void acpi_wakeup_device(acpi_handle handle, u32 event, |
667 | void *context) {} | 667 | void *context) {} |
668 | #endif /* CONFIG_PM_RUNTIME */ | 668 | #endif /* CONFIG_PM_RUNTIME */ |
669 | 669 | ||
670 | #ifdef CONFIG_PM_SLEEP | 670 | #ifdef CONFIG_PM_SLEEP |
671 | /** | 671 | /** |
672 | * __acpi_device_sleep_wake - Enable or disable device to wake up the system. | 672 | * __acpi_device_sleep_wake - Enable or disable device to wake up the system. |
673 | * @dev: Device to enable/desible to wake up the system. | 673 | * @dev: Device to enable/desible to wake up the system. |
674 | * @target_state: System state the device is supposed to wake up from. | 674 | * @target_state: System state the device is supposed to wake up from. |
675 | * @enable: Whether to enable or disable @dev to wake up the system. | 675 | * @enable: Whether to enable or disable @dev to wake up the system. |
676 | */ | 676 | */ |
677 | int __acpi_device_sleep_wake(struct acpi_device *adev, u32 target_state, | 677 | int __acpi_device_sleep_wake(struct acpi_device *adev, u32 target_state, |
678 | bool enable) | 678 | bool enable) |
679 | { | 679 | { |
680 | return enable ? | 680 | return enable ? |
681 | acpi_enable_wakeup_device_power(adev, target_state) : | 681 | acpi_enable_wakeup_device_power(adev, target_state) : |
682 | acpi_disable_wakeup_device_power(adev); | 682 | acpi_disable_wakeup_device_power(adev); |
683 | } | 683 | } |
684 | 684 | ||
685 | /** | 685 | /** |
686 | * acpi_pm_device_sleep_wake - Enable or disable device to wake up the system. | 686 | * acpi_pm_device_sleep_wake - Enable or disable device to wake up the system. |
687 | * @dev: Device to enable/desible to wake up the system from sleep states. | 687 | * @dev: Device to enable/desible to wake up the system from sleep states. |
688 | * @enable: Whether to enable or disable @dev to wake up the system. | 688 | * @enable: Whether to enable or disable @dev to wake up the system. |
689 | */ | 689 | */ |
690 | int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | 690 | int acpi_pm_device_sleep_wake(struct device *dev, bool enable) |
691 | { | 691 | { |
692 | acpi_handle handle; | 692 | acpi_handle handle; |
693 | struct acpi_device *adev; | 693 | struct acpi_device *adev; |
694 | int error; | 694 | int error; |
695 | 695 | ||
696 | if (!device_can_wakeup(dev)) | 696 | if (!device_can_wakeup(dev)) |
697 | return -EINVAL; | 697 | return -EINVAL; |
698 | 698 | ||
699 | handle = DEVICE_ACPI_HANDLE(dev); | 699 | handle = ACPI_HANDLE(dev); |
700 | if (!handle || acpi_bus_get_device(handle, &adev)) { | 700 | if (!handle || acpi_bus_get_device(handle, &adev)) { |
701 | dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); | 701 | dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); |
702 | return -ENODEV; | 702 | return -ENODEV; |
703 | } | 703 | } |
704 | 704 | ||
705 | error = __acpi_device_sleep_wake(adev, acpi_target_system_state(), | 705 | error = __acpi_device_sleep_wake(adev, acpi_target_system_state(), |
706 | enable); | 706 | enable); |
707 | if (!error) | 707 | if (!error) |
708 | dev_info(dev, "System wakeup %s by ACPI\n", | 708 | dev_info(dev, "System wakeup %s by ACPI\n", |
709 | enable ? "enabled" : "disabled"); | 709 | enable ? "enabled" : "disabled"); |
710 | 710 | ||
711 | return error; | 711 | return error; |
712 | } | 712 | } |
713 | #endif /* CONFIG_PM_SLEEP */ | 713 | #endif /* CONFIG_PM_SLEEP */ |
714 | 714 | ||
715 | /** | 715 | /** |
716 | * acpi_dev_pm_get_node - Get ACPI device node for the given physical device. | 716 | * acpi_dev_pm_get_node - Get ACPI device node for the given physical device. |
717 | * @dev: Device to get the ACPI node for. | 717 | * @dev: Device to get the ACPI node for. |
718 | */ | 718 | */ |
719 | struct acpi_device *acpi_dev_pm_get_node(struct device *dev) | 719 | struct acpi_device *acpi_dev_pm_get_node(struct device *dev) |
720 | { | 720 | { |
721 | acpi_handle handle = DEVICE_ACPI_HANDLE(dev); | 721 | acpi_handle handle = ACPI_HANDLE(dev); |
722 | struct acpi_device *adev; | 722 | struct acpi_device *adev; |
723 | 723 | ||
724 | return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL; | 724 | return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL; |
725 | } | 725 | } |
726 | 726 | ||
727 | /** | 727 | /** |
728 | * acpi_dev_pm_low_power - Put ACPI device into a low-power state. | 728 | * acpi_dev_pm_low_power - Put ACPI device into a low-power state. |
729 | * @dev: Device to put into a low-power state. | 729 | * @dev: Device to put into a low-power state. |
730 | * @adev: ACPI device node corresponding to @dev. | 730 | * @adev: ACPI device node corresponding to @dev. |
731 | * @system_state: System state to choose the device state for. | 731 | * @system_state: System state to choose the device state for. |
732 | */ | 732 | */ |
733 | static int acpi_dev_pm_low_power(struct device *dev, struct acpi_device *adev, | 733 | static int acpi_dev_pm_low_power(struct device *dev, struct acpi_device *adev, |
734 | u32 system_state) | 734 | u32 system_state) |
735 | { | 735 | { |
736 | int ret, state; | 736 | int ret, state; |
737 | 737 | ||
738 | if (!acpi_device_power_manageable(adev)) | 738 | if (!acpi_device_power_manageable(adev)) |
739 | return 0; | 739 | return 0; |
740 | 740 | ||
741 | ret = acpi_dev_pm_get_state(dev, adev, system_state, NULL, &state); | 741 | ret = acpi_dev_pm_get_state(dev, adev, system_state, NULL, &state); |
742 | return ret ? ret : acpi_device_set_power(adev, state); | 742 | return ret ? ret : acpi_device_set_power(adev, state); |
743 | } | 743 | } |
744 | 744 | ||
745 | /** | 745 | /** |
746 | * acpi_dev_pm_full_power - Put ACPI device into the full-power state. | 746 | * acpi_dev_pm_full_power - Put ACPI device into the full-power state. |
747 | * @adev: ACPI device node to put into the full-power state. | 747 | * @adev: ACPI device node to put into the full-power state. |
748 | */ | 748 | */ |
749 | static int acpi_dev_pm_full_power(struct acpi_device *adev) | 749 | static int acpi_dev_pm_full_power(struct acpi_device *adev) |
750 | { | 750 | { |
751 | return acpi_device_power_manageable(adev) ? | 751 | return acpi_device_power_manageable(adev) ? |
752 | acpi_device_set_power(adev, ACPI_STATE_D0) : 0; | 752 | acpi_device_set_power(adev, ACPI_STATE_D0) : 0; |
753 | } | 753 | } |
754 | 754 | ||
755 | #ifdef CONFIG_PM_RUNTIME | 755 | #ifdef CONFIG_PM_RUNTIME |
756 | /** | 756 | /** |
757 | * acpi_dev_runtime_suspend - Put device into a low-power state using ACPI. | 757 | * acpi_dev_runtime_suspend - Put device into a low-power state using ACPI. |
758 | * @dev: Device to put into a low-power state. | 758 | * @dev: Device to put into a low-power state. |
759 | * | 759 | * |
760 | * Put the given device into a runtime low-power state using the standard ACPI | 760 | * Put the given device into a runtime low-power state using the standard ACPI |
761 | * mechanism. Set up remote wakeup if desired, choose the state to put the | 761 | * mechanism. Set up remote wakeup if desired, choose the state to put the |
762 | * device into (this checks if remote wakeup is expected to work too), and set | 762 | * device into (this checks if remote wakeup is expected to work too), and set |
763 | * the power state of the device. | 763 | * the power state of the device. |
764 | */ | 764 | */ |
765 | int acpi_dev_runtime_suspend(struct device *dev) | 765 | int acpi_dev_runtime_suspend(struct device *dev) |
766 | { | 766 | { |
767 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | 767 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); |
768 | bool remote_wakeup; | 768 | bool remote_wakeup; |
769 | int error; | 769 | int error; |
770 | 770 | ||
771 | if (!adev) | 771 | if (!adev) |
772 | return 0; | 772 | return 0; |
773 | 773 | ||
774 | remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) > | 774 | remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) > |
775 | PM_QOS_FLAGS_NONE; | 775 | PM_QOS_FLAGS_NONE; |
776 | error = __acpi_device_run_wake(adev, remote_wakeup); | 776 | error = __acpi_device_run_wake(adev, remote_wakeup); |
777 | if (remote_wakeup && error) | 777 | if (remote_wakeup && error) |
778 | return -EAGAIN; | 778 | return -EAGAIN; |
779 | 779 | ||
780 | error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); | 780 | error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); |
781 | if (error) | 781 | if (error) |
782 | __acpi_device_run_wake(adev, false); | 782 | __acpi_device_run_wake(adev, false); |
783 | 783 | ||
784 | return error; | 784 | return error; |
785 | } | 785 | } |
786 | EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend); | 786 | EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend); |
787 | 787 | ||
788 | /** | 788 | /** |
789 | * acpi_dev_runtime_resume - Put device into the full-power state using ACPI. | 789 | * acpi_dev_runtime_resume - Put device into the full-power state using ACPI. |
790 | * @dev: Device to put into the full-power state. | 790 | * @dev: Device to put into the full-power state. |
791 | * | 791 | * |
792 | * Put the given device into the full-power state using the standard ACPI | 792 | * Put the given device into the full-power state using the standard ACPI |
793 | * mechanism at run time. Set the power state of the device to ACPI D0 and | 793 | * mechanism at run time. Set the power state of the device to ACPI D0 and |
794 | * disable remote wakeup. | 794 | * disable remote wakeup. |
795 | */ | 795 | */ |
796 | int acpi_dev_runtime_resume(struct device *dev) | 796 | int acpi_dev_runtime_resume(struct device *dev) |
797 | { | 797 | { |
798 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | 798 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); |
799 | int error; | 799 | int error; |
800 | 800 | ||
801 | if (!adev) | 801 | if (!adev) |
802 | return 0; | 802 | return 0; |
803 | 803 | ||
804 | error = acpi_dev_pm_full_power(adev); | 804 | error = acpi_dev_pm_full_power(adev); |
805 | __acpi_device_run_wake(adev, false); | 805 | __acpi_device_run_wake(adev, false); |
806 | return error; | 806 | return error; |
807 | } | 807 | } |
808 | EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume); | 808 | EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume); |
809 | 809 | ||
810 | /** | 810 | /** |
811 | * acpi_subsys_runtime_suspend - Suspend device using ACPI. | 811 | * acpi_subsys_runtime_suspend - Suspend device using ACPI. |
812 | * @dev: Device to suspend. | 812 | * @dev: Device to suspend. |
813 | * | 813 | * |
814 | * Carry out the generic runtime suspend procedure for @dev and use ACPI to put | 814 | * Carry out the generic runtime suspend procedure for @dev and use ACPI to put |
815 | * it into a runtime low-power state. | 815 | * it into a runtime low-power state. |
816 | */ | 816 | */ |
817 | int acpi_subsys_runtime_suspend(struct device *dev) | 817 | int acpi_subsys_runtime_suspend(struct device *dev) |
818 | { | 818 | { |
819 | int ret = pm_generic_runtime_suspend(dev); | 819 | int ret = pm_generic_runtime_suspend(dev); |
820 | return ret ? ret : acpi_dev_runtime_suspend(dev); | 820 | return ret ? ret : acpi_dev_runtime_suspend(dev); |
821 | } | 821 | } |
822 | EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend); | 822 | EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend); |
823 | 823 | ||
824 | /** | 824 | /** |
825 | * acpi_subsys_runtime_resume - Resume device using ACPI. | 825 | * acpi_subsys_runtime_resume - Resume device using ACPI. |
826 | * @dev: Device to Resume. | 826 | * @dev: Device to Resume. |
827 | * | 827 | * |
828 | * Use ACPI to put the given device into the full-power state and carry out the | 828 | * Use ACPI to put the given device into the full-power state and carry out the |
829 | * generic runtime resume procedure for it. | 829 | * generic runtime resume procedure for it. |
830 | */ | 830 | */ |
831 | int acpi_subsys_runtime_resume(struct device *dev) | 831 | int acpi_subsys_runtime_resume(struct device *dev) |
832 | { | 832 | { |
833 | int ret = acpi_dev_runtime_resume(dev); | 833 | int ret = acpi_dev_runtime_resume(dev); |
834 | return ret ? ret : pm_generic_runtime_resume(dev); | 834 | return ret ? ret : pm_generic_runtime_resume(dev); |
835 | } | 835 | } |
836 | EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume); | 836 | EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume); |
837 | #endif /* CONFIG_PM_RUNTIME */ | 837 | #endif /* CONFIG_PM_RUNTIME */ |
838 | 838 | ||
839 | #ifdef CONFIG_PM_SLEEP | 839 | #ifdef CONFIG_PM_SLEEP |
840 | /** | 840 | /** |
841 | * acpi_dev_suspend_late - Put device into a low-power state using ACPI. | 841 | * acpi_dev_suspend_late - Put device into a low-power state using ACPI. |
842 | * @dev: Device to put into a low-power state. | 842 | * @dev: Device to put into a low-power state. |
843 | * | 843 | * |
844 | * Put the given device into a low-power state during system transition to a | 844 | * Put the given device into a low-power state during system transition to a |
845 | * sleep state using the standard ACPI mechanism. Set up system wakeup if | 845 | * sleep state using the standard ACPI mechanism. Set up system wakeup if |
846 | * desired, choose the state to put the device into (this checks if system | 846 | * desired, choose the state to put the device into (this checks if system |
847 | * wakeup is expected to work too), and set the power state of the device. | 847 | * wakeup is expected to work too), and set the power state of the device. |
848 | */ | 848 | */ |
849 | int acpi_dev_suspend_late(struct device *dev) | 849 | int acpi_dev_suspend_late(struct device *dev) |
850 | { | 850 | { |
851 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | 851 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); |
852 | u32 target_state; | 852 | u32 target_state; |
853 | bool wakeup; | 853 | bool wakeup; |
854 | int error; | 854 | int error; |
855 | 855 | ||
856 | if (!adev) | 856 | if (!adev) |
857 | return 0; | 857 | return 0; |
858 | 858 | ||
859 | target_state = acpi_target_system_state(); | 859 | target_state = acpi_target_system_state(); |
860 | wakeup = device_may_wakeup(dev); | 860 | wakeup = device_may_wakeup(dev); |
861 | error = __acpi_device_sleep_wake(adev, target_state, wakeup); | 861 | error = __acpi_device_sleep_wake(adev, target_state, wakeup); |
862 | if (wakeup && error) | 862 | if (wakeup && error) |
863 | return error; | 863 | return error; |
864 | 864 | ||
865 | error = acpi_dev_pm_low_power(dev, adev, target_state); | 865 | error = acpi_dev_pm_low_power(dev, adev, target_state); |
866 | if (error) | 866 | if (error) |
867 | __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false); | 867 | __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false); |
868 | 868 | ||
869 | return error; | 869 | return error; |
870 | } | 870 | } |
871 | EXPORT_SYMBOL_GPL(acpi_dev_suspend_late); | 871 | EXPORT_SYMBOL_GPL(acpi_dev_suspend_late); |
872 | 872 | ||
873 | /** | 873 | /** |
874 | * acpi_dev_resume_early - Put device into the full-power state using ACPI. | 874 | * acpi_dev_resume_early - Put device into the full-power state using ACPI. |
875 | * @dev: Device to put into the full-power state. | 875 | * @dev: Device to put into the full-power state. |
876 | * | 876 | * |
877 | * Put the given device into the full-power state using the standard ACPI | 877 | * Put the given device into the full-power state using the standard ACPI |
878 | * mechanism during system transition to the working state. Set the power | 878 | * mechanism during system transition to the working state. Set the power |
879 | * state of the device to ACPI D0 and disable remote wakeup. | 879 | * state of the device to ACPI D0 and disable remote wakeup. |
880 | */ | 880 | */ |
881 | int acpi_dev_resume_early(struct device *dev) | 881 | int acpi_dev_resume_early(struct device *dev) |
882 | { | 882 | { |
883 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | 883 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); |
884 | int error; | 884 | int error; |
885 | 885 | ||
886 | if (!adev) | 886 | if (!adev) |
887 | return 0; | 887 | return 0; |
888 | 888 | ||
889 | error = acpi_dev_pm_full_power(adev); | 889 | error = acpi_dev_pm_full_power(adev); |
890 | __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false); | 890 | __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false); |
891 | return error; | 891 | return error; |
892 | } | 892 | } |
893 | EXPORT_SYMBOL_GPL(acpi_dev_resume_early); | 893 | EXPORT_SYMBOL_GPL(acpi_dev_resume_early); |
894 | 894 | ||
895 | /** | 895 | /** |
896 | * acpi_subsys_prepare - Prepare device for system transition to a sleep state. | 896 | * acpi_subsys_prepare - Prepare device for system transition to a sleep state. |
897 | * @dev: Device to prepare. | 897 | * @dev: Device to prepare. |
898 | */ | 898 | */ |
899 | int acpi_subsys_prepare(struct device *dev) | 899 | int acpi_subsys_prepare(struct device *dev) |
900 | { | 900 | { |
901 | /* | 901 | /* |
902 | * Follow PCI and resume devices suspended at run time before running | 902 | * Follow PCI and resume devices suspended at run time before running |
903 | * their system suspend callbacks. | 903 | * their system suspend callbacks. |
904 | */ | 904 | */ |
905 | pm_runtime_resume(dev); | 905 | pm_runtime_resume(dev); |
906 | return pm_generic_prepare(dev); | 906 | return pm_generic_prepare(dev); |
907 | } | 907 | } |
908 | EXPORT_SYMBOL_GPL(acpi_subsys_prepare); | 908 | EXPORT_SYMBOL_GPL(acpi_subsys_prepare); |
909 | 909 | ||
910 | /** | 910 | /** |
911 | * acpi_subsys_suspend_late - Suspend device using ACPI. | 911 | * acpi_subsys_suspend_late - Suspend device using ACPI. |
912 | * @dev: Device to suspend. | 912 | * @dev: Device to suspend. |
913 | * | 913 | * |
914 | * Carry out the generic late suspend procedure for @dev and use ACPI to put | 914 | * Carry out the generic late suspend procedure for @dev and use ACPI to put |
915 | * it into a low-power state during system transition into a sleep state. | 915 | * it into a low-power state during system transition into a sleep state. |
916 | */ | 916 | */ |
917 | int acpi_subsys_suspend_late(struct device *dev) | 917 | int acpi_subsys_suspend_late(struct device *dev) |
918 | { | 918 | { |
919 | int ret = pm_generic_suspend_late(dev); | 919 | int ret = pm_generic_suspend_late(dev); |
920 | return ret ? ret : acpi_dev_suspend_late(dev); | 920 | return ret ? ret : acpi_dev_suspend_late(dev); |
921 | } | 921 | } |
922 | EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late); | 922 | EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late); |
923 | 923 | ||
924 | /** | 924 | /** |
925 | * acpi_subsys_resume_early - Resume device using ACPI. | 925 | * acpi_subsys_resume_early - Resume device using ACPI. |
926 | * @dev: Device to Resume. | 926 | * @dev: Device to Resume. |
927 | * | 927 | * |
928 | * Use ACPI to put the given device into the full-power state and carry out the | 928 | * Use ACPI to put the given device into the full-power state and carry out the |
929 | * generic early resume procedure for it during system transition into the | 929 | * generic early resume procedure for it during system transition into the |
930 | * working state. | 930 | * working state. |
931 | */ | 931 | */ |
932 | int acpi_subsys_resume_early(struct device *dev) | 932 | int acpi_subsys_resume_early(struct device *dev) |
933 | { | 933 | { |
934 | int ret = acpi_dev_resume_early(dev); | 934 | int ret = acpi_dev_resume_early(dev); |
935 | return ret ? ret : pm_generic_resume_early(dev); | 935 | return ret ? ret : pm_generic_resume_early(dev); |
936 | } | 936 | } |
937 | EXPORT_SYMBOL_GPL(acpi_subsys_resume_early); | 937 | EXPORT_SYMBOL_GPL(acpi_subsys_resume_early); |
938 | #endif /* CONFIG_PM_SLEEP */ | 938 | #endif /* CONFIG_PM_SLEEP */ |
939 | 939 | ||
940 | static struct dev_pm_domain acpi_general_pm_domain = { | 940 | static struct dev_pm_domain acpi_general_pm_domain = { |
941 | .ops = { | 941 | .ops = { |
942 | #ifdef CONFIG_PM_RUNTIME | 942 | #ifdef CONFIG_PM_RUNTIME |
943 | .runtime_suspend = acpi_subsys_runtime_suspend, | 943 | .runtime_suspend = acpi_subsys_runtime_suspend, |
944 | .runtime_resume = acpi_subsys_runtime_resume, | 944 | .runtime_resume = acpi_subsys_runtime_resume, |
945 | #endif | 945 | #endif |
946 | #ifdef CONFIG_PM_SLEEP | 946 | #ifdef CONFIG_PM_SLEEP |
947 | .prepare = acpi_subsys_prepare, | 947 | .prepare = acpi_subsys_prepare, |
948 | .suspend_late = acpi_subsys_suspend_late, | 948 | .suspend_late = acpi_subsys_suspend_late, |
949 | .resume_early = acpi_subsys_resume_early, | 949 | .resume_early = acpi_subsys_resume_early, |
950 | .poweroff_late = acpi_subsys_suspend_late, | 950 | .poweroff_late = acpi_subsys_suspend_late, |
951 | .restore_early = acpi_subsys_resume_early, | 951 | .restore_early = acpi_subsys_resume_early, |
952 | #endif | 952 | #endif |
953 | }, | 953 | }, |
954 | }; | 954 | }; |
955 | 955 | ||
956 | /** | 956 | /** |
957 | * acpi_dev_pm_attach - Prepare device for ACPI power management. | 957 | * acpi_dev_pm_attach - Prepare device for ACPI power management. |
958 | * @dev: Device to prepare. | 958 | * @dev: Device to prepare. |
959 | * @power_on: Whether or not to power on the device. | 959 | * @power_on: Whether or not to power on the device. |
960 | * | 960 | * |
961 | * If @dev has a valid ACPI handle that has a valid struct acpi_device object | 961 | * If @dev has a valid ACPI handle that has a valid struct acpi_device object |
962 | * attached to it, install a wakeup notification handler for the device and | 962 | * attached to it, install a wakeup notification handler for the device and |
963 | * add it to the general ACPI PM domain. If @power_on is set, the device will | 963 | * add it to the general ACPI PM domain. If @power_on is set, the device will |
964 | * be put into the ACPI D0 state before the function returns. | 964 | * be put into the ACPI D0 state before the function returns. |
965 | * | 965 | * |
966 | * This assumes that the @dev's bus type uses generic power management callbacks | 966 | * This assumes that the @dev's bus type uses generic power management callbacks |
967 | * (or doesn't use any power management callbacks at all). | 967 | * (or doesn't use any power management callbacks at all). |
968 | * | 968 | * |
969 | * Callers must ensure proper synchronization of this function with power | 969 | * Callers must ensure proper synchronization of this function with power |
970 | * management callbacks. | 970 | * management callbacks. |
971 | */ | 971 | */ |
972 | int acpi_dev_pm_attach(struct device *dev, bool power_on) | 972 | int acpi_dev_pm_attach(struct device *dev, bool power_on) |
973 | { | 973 | { |
974 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | 974 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); |
975 | 975 | ||
976 | if (!adev) | 976 | if (!adev) |
977 | return -ENODEV; | 977 | return -ENODEV; |
978 | 978 | ||
979 | if (dev->pm_domain) | 979 | if (dev->pm_domain) |
980 | return -EEXIST; | 980 | return -EEXIST; |
981 | 981 | ||
982 | acpi_add_pm_notifier(adev, acpi_wakeup_device, dev); | 982 | acpi_add_pm_notifier(adev, acpi_wakeup_device, dev); |
983 | dev->pm_domain = &acpi_general_pm_domain; | 983 | dev->pm_domain = &acpi_general_pm_domain; |
984 | if (power_on) { | 984 | if (power_on) { |
985 | acpi_dev_pm_full_power(adev); | 985 | acpi_dev_pm_full_power(adev); |
986 | __acpi_device_run_wake(adev, false); | 986 | __acpi_device_run_wake(adev, false); |
987 | } | 987 | } |
988 | return 0; | 988 | return 0; |
989 | } | 989 | } |
990 | EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); | 990 | EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); |
991 | 991 | ||
992 | /** | 992 | /** |
993 | * acpi_dev_pm_detach - Remove ACPI power management from the device. | 993 | * acpi_dev_pm_detach - Remove ACPI power management from the device. |
994 | * @dev: Device to take care of. | 994 | * @dev: Device to take care of. |
995 | * @power_off: Whether or not to try to remove power from the device. | 995 | * @power_off: Whether or not to try to remove power from the device. |
996 | * | 996 | * |
997 | * Remove the device from the general ACPI PM domain and remove its wakeup | 997 | * Remove the device from the general ACPI PM domain and remove its wakeup |
998 | * notifier. If @power_off is set, additionally remove power from the device if | 998 | * notifier. If @power_off is set, additionally remove power from the device if |
999 | * possible. | 999 | * possible. |
1000 | * | 1000 | * |
1001 | * Callers must ensure proper synchronization of this function with power | 1001 | * Callers must ensure proper synchronization of this function with power |
1002 | * management callbacks. | 1002 | * management callbacks. |
1003 | */ | 1003 | */ |
1004 | void acpi_dev_pm_detach(struct device *dev, bool power_off) | 1004 | void acpi_dev_pm_detach(struct device *dev, bool power_off) |
1005 | { | 1005 | { |
1006 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | 1006 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); |
1007 | 1007 | ||
1008 | if (adev && dev->pm_domain == &acpi_general_pm_domain) { | 1008 | if (adev && dev->pm_domain == &acpi_general_pm_domain) { |
1009 | dev->pm_domain = NULL; | 1009 | dev->pm_domain = NULL; |
1010 | acpi_remove_pm_notifier(adev, acpi_wakeup_device); | 1010 | acpi_remove_pm_notifier(adev, acpi_wakeup_device); |
1011 | if (power_off) { | 1011 | if (power_off) { |
1012 | /* | 1012 | /* |
1013 | * If the device's PM QoS resume latency limit or flags | 1013 | * If the device's PM QoS resume latency limit or flags |
1014 | * have been exposed to user space, they have to be | 1014 | * have been exposed to user space, they have to be |
1015 | * hidden at this point, so that they don't affect the | 1015 | * hidden at this point, so that they don't affect the |
1016 | * choice of the low-power state to put the device into. | 1016 | * choice of the low-power state to put the device into. |
1017 | */ | 1017 | */ |
1018 | dev_pm_qos_hide_latency_limit(dev); | 1018 | dev_pm_qos_hide_latency_limit(dev); |
1019 | dev_pm_qos_hide_flags(dev); | 1019 | dev_pm_qos_hide_flags(dev); |
1020 | __acpi_device_run_wake(adev, false); | 1020 | __acpi_device_run_wake(adev, false); |
1021 | acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); | 1021 | acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); |
1022 | } | 1022 | } |
1023 | } | 1023 | } |
1024 | } | 1024 | } |
1025 | EXPORT_SYMBOL_GPL(acpi_dev_pm_detach); | 1025 | EXPORT_SYMBOL_GPL(acpi_dev_pm_detach); |
1026 | #endif /* CONFIG_PM */ | 1026 | #endif /* CONFIG_PM */ |
1027 | 1027 |
drivers/gpu/drm/i915/intel_acpi.c
1 | /* | 1 | /* |
2 | * Intel ACPI functions | 2 | * Intel ACPI functions |
3 | * | 3 | * |
4 | * _DSM related code stolen from nouveau_acpi.c. | 4 | * _DSM related code stolen from nouveau_acpi.c. |
5 | */ | 5 | */ |
6 | #include <linux/pci.h> | 6 | #include <linux/pci.h> |
7 | #include <linux/acpi.h> | 7 | #include <linux/acpi.h> |
8 | #include <linux/vga_switcheroo.h> | 8 | #include <linux/vga_switcheroo.h> |
9 | #include <acpi/acpi_drivers.h> | 9 | #include <acpi/acpi_drivers.h> |
10 | 10 | ||
11 | #include <drm/drmP.h> | 11 | #include <drm/drmP.h> |
12 | #include "i915_drv.h" | 12 | #include "i915_drv.h" |
13 | 13 | ||
14 | #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ | 14 | #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ |
15 | 15 | ||
16 | #define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */ | 16 | #define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */ |
17 | #define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ | 17 | #define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ |
18 | 18 | ||
19 | static struct intel_dsm_priv { | 19 | static struct intel_dsm_priv { |
20 | acpi_handle dhandle; | 20 | acpi_handle dhandle; |
21 | } intel_dsm_priv; | 21 | } intel_dsm_priv; |
22 | 22 | ||
23 | static const u8 intel_dsm_guid[] = { | 23 | static const u8 intel_dsm_guid[] = { |
24 | 0xd3, 0x73, 0xd8, 0x7e, | 24 | 0xd3, 0x73, 0xd8, 0x7e, |
25 | 0xd0, 0xc2, | 25 | 0xd0, 0xc2, |
26 | 0x4f, 0x4e, | 26 | 0x4f, 0x4e, |
27 | 0xa8, 0x54, | 27 | 0xa8, 0x54, |
28 | 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c | 28 | 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c |
29 | }; | 29 | }; |
30 | 30 | ||
31 | static int intel_dsm(acpi_handle handle, int func) | 31 | static int intel_dsm(acpi_handle handle, int func) |
32 | { | 32 | { |
33 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | 33 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; |
34 | struct acpi_object_list input; | 34 | struct acpi_object_list input; |
35 | union acpi_object params[4]; | 35 | union acpi_object params[4]; |
36 | union acpi_object *obj; | 36 | union acpi_object *obj; |
37 | u32 result; | 37 | u32 result; |
38 | int ret = 0; | 38 | int ret = 0; |
39 | 39 | ||
40 | input.count = 4; | 40 | input.count = 4; |
41 | input.pointer = params; | 41 | input.pointer = params; |
42 | params[0].type = ACPI_TYPE_BUFFER; | 42 | params[0].type = ACPI_TYPE_BUFFER; |
43 | params[0].buffer.length = sizeof(intel_dsm_guid); | 43 | params[0].buffer.length = sizeof(intel_dsm_guid); |
44 | params[0].buffer.pointer = (char *)intel_dsm_guid; | 44 | params[0].buffer.pointer = (char *)intel_dsm_guid; |
45 | params[1].type = ACPI_TYPE_INTEGER; | 45 | params[1].type = ACPI_TYPE_INTEGER; |
46 | params[1].integer.value = INTEL_DSM_REVISION_ID; | 46 | params[1].integer.value = INTEL_DSM_REVISION_ID; |
47 | params[2].type = ACPI_TYPE_INTEGER; | 47 | params[2].type = ACPI_TYPE_INTEGER; |
48 | params[2].integer.value = func; | 48 | params[2].integer.value = func; |
49 | params[3].type = ACPI_TYPE_PACKAGE; | 49 | params[3].type = ACPI_TYPE_PACKAGE; |
50 | params[3].package.count = 0; | 50 | params[3].package.count = 0; |
51 | params[3].package.elements = NULL; | 51 | params[3].package.elements = NULL; |
52 | 52 | ||
53 | ret = acpi_evaluate_object(handle, "_DSM", &input, &output); | 53 | ret = acpi_evaluate_object(handle, "_DSM", &input, &output); |
54 | if (ret) { | 54 | if (ret) { |
55 | DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); | 55 | DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); |
56 | return ret; | 56 | return ret; |
57 | } | 57 | } |
58 | 58 | ||
59 | obj = (union acpi_object *)output.pointer; | 59 | obj = (union acpi_object *)output.pointer; |
60 | 60 | ||
61 | result = 0; | 61 | result = 0; |
62 | switch (obj->type) { | 62 | switch (obj->type) { |
63 | case ACPI_TYPE_INTEGER: | 63 | case ACPI_TYPE_INTEGER: |
64 | result = obj->integer.value; | 64 | result = obj->integer.value; |
65 | break; | 65 | break; |
66 | 66 | ||
67 | case ACPI_TYPE_BUFFER: | 67 | case ACPI_TYPE_BUFFER: |
68 | if (obj->buffer.length == 4) { | 68 | if (obj->buffer.length == 4) { |
69 | result = (obj->buffer.pointer[0] | | 69 | result = (obj->buffer.pointer[0] | |
70 | (obj->buffer.pointer[1] << 8) | | 70 | (obj->buffer.pointer[1] << 8) | |
71 | (obj->buffer.pointer[2] << 16) | | 71 | (obj->buffer.pointer[2] << 16) | |
72 | (obj->buffer.pointer[3] << 24)); | 72 | (obj->buffer.pointer[3] << 24)); |
73 | break; | 73 | break; |
74 | } | 74 | } |
75 | default: | 75 | default: |
76 | ret = -EINVAL; | 76 | ret = -EINVAL; |
77 | break; | 77 | break; |
78 | } | 78 | } |
79 | if (result == 0x80000002) | 79 | if (result == 0x80000002) |
80 | ret = -ENODEV; | 80 | ret = -ENODEV; |
81 | 81 | ||
82 | kfree(output.pointer); | 82 | kfree(output.pointer); |
83 | return ret; | 83 | return ret; |
84 | } | 84 | } |
85 | 85 | ||
86 | static char *intel_dsm_port_name(u8 id) | 86 | static char *intel_dsm_port_name(u8 id) |
87 | { | 87 | { |
88 | switch (id) { | 88 | switch (id) { |
89 | case 0: | 89 | case 0: |
90 | return "Reserved"; | 90 | return "Reserved"; |
91 | case 1: | 91 | case 1: |
92 | return "Analog VGA"; | 92 | return "Analog VGA"; |
93 | case 2: | 93 | case 2: |
94 | return "LVDS"; | 94 | return "LVDS"; |
95 | case 3: | 95 | case 3: |
96 | return "Reserved"; | 96 | return "Reserved"; |
97 | case 4: | 97 | case 4: |
98 | return "HDMI/DVI_B"; | 98 | return "HDMI/DVI_B"; |
99 | case 5: | 99 | case 5: |
100 | return "HDMI/DVI_C"; | 100 | return "HDMI/DVI_C"; |
101 | case 6: | 101 | case 6: |
102 | return "HDMI/DVI_D"; | 102 | return "HDMI/DVI_D"; |
103 | case 7: | 103 | case 7: |
104 | return "DisplayPort_A"; | 104 | return "DisplayPort_A"; |
105 | case 8: | 105 | case 8: |
106 | return "DisplayPort_B"; | 106 | return "DisplayPort_B"; |
107 | case 9: | 107 | case 9: |
108 | return "DisplayPort_C"; | 108 | return "DisplayPort_C"; |
109 | case 0xa: | 109 | case 0xa: |
110 | return "DisplayPort_D"; | 110 | return "DisplayPort_D"; |
111 | case 0xb: | 111 | case 0xb: |
112 | case 0xc: | 112 | case 0xc: |
113 | case 0xd: | 113 | case 0xd: |
114 | return "Reserved"; | 114 | return "Reserved"; |
115 | case 0xe: | 115 | case 0xe: |
116 | return "WiDi"; | 116 | return "WiDi"; |
117 | default: | 117 | default: |
118 | return "bad type"; | 118 | return "bad type"; |
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | static char *intel_dsm_mux_type(u8 type) | 122 | static char *intel_dsm_mux_type(u8 type) |
123 | { | 123 | { |
124 | switch (type) { | 124 | switch (type) { |
125 | case 0: | 125 | case 0: |
126 | return "unknown"; | 126 | return "unknown"; |
127 | case 1: | 127 | case 1: |
128 | return "No MUX, iGPU only"; | 128 | return "No MUX, iGPU only"; |
129 | case 2: | 129 | case 2: |
130 | return "No MUX, dGPU only"; | 130 | return "No MUX, dGPU only"; |
131 | case 3: | 131 | case 3: |
132 | return "MUXed between iGPU and dGPU"; | 132 | return "MUXed between iGPU and dGPU"; |
133 | default: | 133 | default: |
134 | return "bad type"; | 134 | return "bad type"; |
135 | } | 135 | } |
136 | } | 136 | } |
137 | 137 | ||
138 | static void intel_dsm_platform_mux_info(void) | 138 | static void intel_dsm_platform_mux_info(void) |
139 | { | 139 | { |
140 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | 140 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; |
141 | struct acpi_object_list input; | 141 | struct acpi_object_list input; |
142 | union acpi_object params[4]; | 142 | union acpi_object params[4]; |
143 | union acpi_object *pkg; | 143 | union acpi_object *pkg; |
144 | int i, ret; | 144 | int i, ret; |
145 | 145 | ||
146 | input.count = 4; | 146 | input.count = 4; |
147 | input.pointer = params; | 147 | input.pointer = params; |
148 | params[0].type = ACPI_TYPE_BUFFER; | 148 | params[0].type = ACPI_TYPE_BUFFER; |
149 | params[0].buffer.length = sizeof(intel_dsm_guid); | 149 | params[0].buffer.length = sizeof(intel_dsm_guid); |
150 | params[0].buffer.pointer = (char *)intel_dsm_guid; | 150 | params[0].buffer.pointer = (char *)intel_dsm_guid; |
151 | params[1].type = ACPI_TYPE_INTEGER; | 151 | params[1].type = ACPI_TYPE_INTEGER; |
152 | params[1].integer.value = INTEL_DSM_REVISION_ID; | 152 | params[1].integer.value = INTEL_DSM_REVISION_ID; |
153 | params[2].type = ACPI_TYPE_INTEGER; | 153 | params[2].type = ACPI_TYPE_INTEGER; |
154 | params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; | 154 | params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; |
155 | params[3].type = ACPI_TYPE_PACKAGE; | 155 | params[3].type = ACPI_TYPE_PACKAGE; |
156 | params[3].package.count = 0; | 156 | params[3].package.count = 0; |
157 | params[3].package.elements = NULL; | 157 | params[3].package.elements = NULL; |
158 | 158 | ||
159 | ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, | 159 | ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, |
160 | &output); | 160 | &output); |
161 | if (ret) { | 161 | if (ret) { |
162 | DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); | 162 | DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); |
163 | goto out; | 163 | goto out; |
164 | } | 164 | } |
165 | 165 | ||
166 | pkg = (union acpi_object *)output.pointer; | 166 | pkg = (union acpi_object *)output.pointer; |
167 | 167 | ||
168 | if (pkg->type == ACPI_TYPE_PACKAGE) { | 168 | if (pkg->type == ACPI_TYPE_PACKAGE) { |
169 | union acpi_object *connector_count = &pkg->package.elements[0]; | 169 | union acpi_object *connector_count = &pkg->package.elements[0]; |
170 | DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", | 170 | DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", |
171 | (unsigned long long)connector_count->integer.value); | 171 | (unsigned long long)connector_count->integer.value); |
172 | for (i = 1; i < pkg->package.count; i++) { | 172 | for (i = 1; i < pkg->package.count; i++) { |
173 | union acpi_object *obj = &pkg->package.elements[i]; | 173 | union acpi_object *obj = &pkg->package.elements[i]; |
174 | union acpi_object *connector_id = | 174 | union acpi_object *connector_id = |
175 | &obj->package.elements[0]; | 175 | &obj->package.elements[0]; |
176 | union acpi_object *info = &obj->package.elements[1]; | 176 | union acpi_object *info = &obj->package.elements[1]; |
177 | DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", | 177 | DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", |
178 | (unsigned long long)connector_id->integer.value); | 178 | (unsigned long long)connector_id->integer.value); |
179 | DRM_DEBUG_DRIVER(" port id: %s\n", | 179 | DRM_DEBUG_DRIVER(" port id: %s\n", |
180 | intel_dsm_port_name(info->buffer.pointer[0])); | 180 | intel_dsm_port_name(info->buffer.pointer[0])); |
181 | DRM_DEBUG_DRIVER(" display mux info: %s\n", | 181 | DRM_DEBUG_DRIVER(" display mux info: %s\n", |
182 | intel_dsm_mux_type(info->buffer.pointer[1])); | 182 | intel_dsm_mux_type(info->buffer.pointer[1])); |
183 | DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n", | 183 | DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n", |
184 | intel_dsm_mux_type(info->buffer.pointer[2])); | 184 | intel_dsm_mux_type(info->buffer.pointer[2])); |
185 | DRM_DEBUG_DRIVER(" hpd mux info: %s\n", | 185 | DRM_DEBUG_DRIVER(" hpd mux info: %s\n", |
186 | intel_dsm_mux_type(info->buffer.pointer[3])); | 186 | intel_dsm_mux_type(info->buffer.pointer[3])); |
187 | } | 187 | } |
188 | } | 188 | } |
189 | 189 | ||
190 | out: | 190 | out: |
191 | kfree(output.pointer); | 191 | kfree(output.pointer); |
192 | } | 192 | } |
193 | 193 | ||
194 | static bool intel_dsm_pci_probe(struct pci_dev *pdev) | 194 | static bool intel_dsm_pci_probe(struct pci_dev *pdev) |
195 | { | 195 | { |
196 | acpi_handle dhandle; | 196 | acpi_handle dhandle; |
197 | int ret; | 197 | int ret; |
198 | 198 | ||
199 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 199 | dhandle = ACPI_HANDLE(&pdev->dev); |
200 | if (!dhandle) | 200 | if (!dhandle) |
201 | return false; | 201 | return false; |
202 | 202 | ||
203 | if (!acpi_has_method(dhandle, "_DSM")) { | 203 | if (!acpi_has_method(dhandle, "_DSM")) { |
204 | DRM_DEBUG_KMS("no _DSM method for intel device\n"); | 204 | DRM_DEBUG_KMS("no _DSM method for intel device\n"); |
205 | return false; | 205 | return false; |
206 | } | 206 | } |
207 | 207 | ||
208 | ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS); | 208 | ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS); |
209 | if (ret < 0) { | 209 | if (ret < 0) { |
210 | DRM_DEBUG_KMS("failed to get supported _DSM functions\n"); | 210 | DRM_DEBUG_KMS("failed to get supported _DSM functions\n"); |
211 | return false; | 211 | return false; |
212 | } | 212 | } |
213 | 213 | ||
214 | intel_dsm_priv.dhandle = dhandle; | 214 | intel_dsm_priv.dhandle = dhandle; |
215 | 215 | ||
216 | intel_dsm_platform_mux_info(); | 216 | intel_dsm_platform_mux_info(); |
217 | return true; | 217 | return true; |
218 | } | 218 | } |
219 | 219 | ||
220 | static bool intel_dsm_detect(void) | 220 | static bool intel_dsm_detect(void) |
221 | { | 221 | { |
222 | char acpi_method_name[255] = { 0 }; | 222 | char acpi_method_name[255] = { 0 }; |
223 | struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; | 223 | struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; |
224 | struct pci_dev *pdev = NULL; | 224 | struct pci_dev *pdev = NULL; |
225 | bool has_dsm = false; | 225 | bool has_dsm = false; |
226 | int vga_count = 0; | 226 | int vga_count = 0; |
227 | 227 | ||
228 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { | 228 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { |
229 | vga_count++; | 229 | vga_count++; |
230 | has_dsm |= intel_dsm_pci_probe(pdev); | 230 | has_dsm |= intel_dsm_pci_probe(pdev); |
231 | } | 231 | } |
232 | 232 | ||
233 | if (vga_count == 2 && has_dsm) { | 233 | if (vga_count == 2 && has_dsm) { |
234 | acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); | 234 | acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); |
235 | DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n", | 235 | DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n", |
236 | acpi_method_name); | 236 | acpi_method_name); |
237 | return true; | 237 | return true; |
238 | } | 238 | } |
239 | 239 | ||
240 | return false; | 240 | return false; |
241 | } | 241 | } |
242 | 242 | ||
243 | void intel_register_dsm_handler(void) | 243 | void intel_register_dsm_handler(void) |
244 | { | 244 | { |
245 | if (!intel_dsm_detect()) | 245 | if (!intel_dsm_detect()) |
246 | return; | 246 | return; |
247 | } | 247 | } |
248 | 248 | ||
249 | void intel_unregister_dsm_handler(void) | 249 | void intel_unregister_dsm_handler(void) |
250 | { | 250 | { |
251 | } | 251 | } |
252 | 252 |
drivers/gpu/drm/i915/intel_opregion.c
1 | /* | 1 | /* |
2 | * Copyright 2008 Intel Corporation <hong.liu@intel.com> | 2 | * Copyright 2008 Intel Corporation <hong.liu@intel.com> |
3 | * Copyright 2008 Red Hat <mjg@redhat.com> | 3 | * Copyright 2008 Red Hat <mjg@redhat.com> |
4 | * | 4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining | 5 | * Permission is hereby granted, free of charge, to any person obtaining |
6 | * a copy of this software and associated documentation files (the | 6 | * a copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including | 7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, | 8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to | 9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to | 10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: | 11 | * the following conditions: |
12 | * | 12 | * |
13 | * The above copyright notice and this permission notice (including the | 13 | * The above copyright notice and this permission notice (including the |
14 | * next paragraph) shall be included in all copies or substantial | 14 | * next paragraph) shall be included in all copies or substantial |
15 | * portions of the Software. | 15 | * portions of the Software. |
16 | * | 16 | * |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
20 | * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE | 20 | * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE |
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
22 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 22 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
23 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 23 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
24 | * SOFTWARE. | 24 | * SOFTWARE. |
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 28 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
29 | 29 | ||
30 | #include <linux/acpi.h> | 30 | #include <linux/acpi.h> |
31 | #include <linux/acpi_io.h> | 31 | #include <linux/acpi_io.h> |
32 | #include <acpi/video.h> | 32 | #include <acpi/video.h> |
33 | 33 | ||
34 | #include <drm/drmP.h> | 34 | #include <drm/drmP.h> |
35 | #include <drm/i915_drm.h> | 35 | #include <drm/i915_drm.h> |
36 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
37 | #include "intel_drv.h" | 37 | #include "intel_drv.h" |
38 | 38 | ||
39 | #define PCI_ASLE 0xe4 | 39 | #define PCI_ASLE 0xe4 |
40 | #define PCI_ASLS 0xfc | 40 | #define PCI_ASLS 0xfc |
41 | 41 | ||
42 | #define OPREGION_HEADER_OFFSET 0 | 42 | #define OPREGION_HEADER_OFFSET 0 |
43 | #define OPREGION_ACPI_OFFSET 0x100 | 43 | #define OPREGION_ACPI_OFFSET 0x100 |
44 | #define ACPI_CLID 0x01ac /* current lid state indicator */ | 44 | #define ACPI_CLID 0x01ac /* current lid state indicator */ |
45 | #define ACPI_CDCK 0x01b0 /* current docking state indicator */ | 45 | #define ACPI_CDCK 0x01b0 /* current docking state indicator */ |
46 | #define OPREGION_SWSCI_OFFSET 0x200 | 46 | #define OPREGION_SWSCI_OFFSET 0x200 |
47 | #define OPREGION_ASLE_OFFSET 0x300 | 47 | #define OPREGION_ASLE_OFFSET 0x300 |
48 | #define OPREGION_VBT_OFFSET 0x400 | 48 | #define OPREGION_VBT_OFFSET 0x400 |
49 | 49 | ||
50 | #define OPREGION_SIGNATURE "IntelGraphicsMem" | 50 | #define OPREGION_SIGNATURE "IntelGraphicsMem" |
51 | #define MBOX_ACPI (1<<0) | 51 | #define MBOX_ACPI (1<<0) |
52 | #define MBOX_SWSCI (1<<1) | 52 | #define MBOX_SWSCI (1<<1) |
53 | #define MBOX_ASLE (1<<2) | 53 | #define MBOX_ASLE (1<<2) |
54 | 54 | ||
55 | struct opregion_header { | 55 | struct opregion_header { |
56 | u8 signature[16]; | 56 | u8 signature[16]; |
57 | u32 size; | 57 | u32 size; |
58 | u32 opregion_ver; | 58 | u32 opregion_ver; |
59 | u8 bios_ver[32]; | 59 | u8 bios_ver[32]; |
60 | u8 vbios_ver[16]; | 60 | u8 vbios_ver[16]; |
61 | u8 driver_ver[16]; | 61 | u8 driver_ver[16]; |
62 | u32 mboxes; | 62 | u32 mboxes; |
63 | u8 reserved[164]; | 63 | u8 reserved[164]; |
64 | } __attribute__((packed)); | 64 | } __attribute__((packed)); |
65 | 65 | ||
66 | /* OpRegion mailbox #1: public ACPI methods */ | 66 | /* OpRegion mailbox #1: public ACPI methods */ |
67 | struct opregion_acpi { | 67 | struct opregion_acpi { |
68 | u32 drdy; /* driver readiness */ | 68 | u32 drdy; /* driver readiness */ |
69 | u32 csts; /* notification status */ | 69 | u32 csts; /* notification status */ |
70 | u32 cevt; /* current event */ | 70 | u32 cevt; /* current event */ |
71 | u8 rsvd1[20]; | 71 | u8 rsvd1[20]; |
72 | u32 didl[8]; /* supported display devices ID list */ | 72 | u32 didl[8]; /* supported display devices ID list */ |
73 | u32 cpdl[8]; /* currently presented display list */ | 73 | u32 cpdl[8]; /* currently presented display list */ |
74 | u32 cadl[8]; /* currently active display list */ | 74 | u32 cadl[8]; /* currently active display list */ |
75 | u32 nadl[8]; /* next active devices list */ | 75 | u32 nadl[8]; /* next active devices list */ |
76 | u32 aslp; /* ASL sleep time-out */ | 76 | u32 aslp; /* ASL sleep time-out */ |
77 | u32 tidx; /* toggle table index */ | 77 | u32 tidx; /* toggle table index */ |
78 | u32 chpd; /* current hotplug enable indicator */ | 78 | u32 chpd; /* current hotplug enable indicator */ |
79 | u32 clid; /* current lid state*/ | 79 | u32 clid; /* current lid state*/ |
80 | u32 cdck; /* current docking state */ | 80 | u32 cdck; /* current docking state */ |
81 | u32 sxsw; /* Sx state resume */ | 81 | u32 sxsw; /* Sx state resume */ |
82 | u32 evts; /* ASL supported events */ | 82 | u32 evts; /* ASL supported events */ |
83 | u32 cnot; /* current OS notification */ | 83 | u32 cnot; /* current OS notification */ |
84 | u32 nrdy; /* driver status */ | 84 | u32 nrdy; /* driver status */ |
85 | u8 rsvd2[60]; | 85 | u8 rsvd2[60]; |
86 | } __attribute__((packed)); | 86 | } __attribute__((packed)); |
87 | 87 | ||
88 | /* OpRegion mailbox #2: SWSCI */ | 88 | /* OpRegion mailbox #2: SWSCI */ |
89 | struct opregion_swsci { | 89 | struct opregion_swsci { |
90 | u32 scic; /* SWSCI command|status|data */ | 90 | u32 scic; /* SWSCI command|status|data */ |
91 | u32 parm; /* command parameters */ | 91 | u32 parm; /* command parameters */ |
92 | u32 dslp; /* driver sleep time-out */ | 92 | u32 dslp; /* driver sleep time-out */ |
93 | u8 rsvd[244]; | 93 | u8 rsvd[244]; |
94 | } __attribute__((packed)); | 94 | } __attribute__((packed)); |
95 | 95 | ||
96 | /* OpRegion mailbox #3: ASLE */ | 96 | /* OpRegion mailbox #3: ASLE */ |
97 | struct opregion_asle { | 97 | struct opregion_asle { |
98 | u32 ardy; /* driver readiness */ | 98 | u32 ardy; /* driver readiness */ |
99 | u32 aslc; /* ASLE interrupt command */ | 99 | u32 aslc; /* ASLE interrupt command */ |
100 | u32 tche; /* technology enabled indicator */ | 100 | u32 tche; /* technology enabled indicator */ |
101 | u32 alsi; /* current ALS illuminance reading */ | 101 | u32 alsi; /* current ALS illuminance reading */ |
102 | u32 bclp; /* backlight brightness to set */ | 102 | u32 bclp; /* backlight brightness to set */ |
103 | u32 pfit; /* panel fitting state */ | 103 | u32 pfit; /* panel fitting state */ |
104 | u32 cblv; /* current brightness level */ | 104 | u32 cblv; /* current brightness level */ |
105 | u16 bclm[20]; /* backlight level duty cycle mapping table */ | 105 | u16 bclm[20]; /* backlight level duty cycle mapping table */ |
106 | u32 cpfm; /* current panel fitting mode */ | 106 | u32 cpfm; /* current panel fitting mode */ |
107 | u32 epfm; /* enabled panel fitting modes */ | 107 | u32 epfm; /* enabled panel fitting modes */ |
108 | u8 plut[74]; /* panel LUT and identifier */ | 108 | u8 plut[74]; /* panel LUT and identifier */ |
109 | u32 pfmb; /* PWM freq and min brightness */ | 109 | u32 pfmb; /* PWM freq and min brightness */ |
110 | u8 rsvd[102]; | 110 | u8 rsvd[102]; |
111 | } __attribute__((packed)); | 111 | } __attribute__((packed)); |
112 | 112 | ||
113 | /* Driver readiness indicator */ | 113 | /* Driver readiness indicator */ |
114 | #define ASLE_ARDY_READY (1 << 0) | 114 | #define ASLE_ARDY_READY (1 << 0) |
115 | #define ASLE_ARDY_NOT_READY (0 << 0) | 115 | #define ASLE_ARDY_NOT_READY (0 << 0) |
116 | 116 | ||
117 | /* ASLE irq request bits */ | 117 | /* ASLE irq request bits */ |
118 | #define ASLE_SET_ALS_ILLUM (1 << 0) | 118 | #define ASLE_SET_ALS_ILLUM (1 << 0) |
119 | #define ASLE_SET_BACKLIGHT (1 << 1) | 119 | #define ASLE_SET_BACKLIGHT (1 << 1) |
120 | #define ASLE_SET_PFIT (1 << 2) | 120 | #define ASLE_SET_PFIT (1 << 2) |
121 | #define ASLE_SET_PWM_FREQ (1 << 3) | 121 | #define ASLE_SET_PWM_FREQ (1 << 3) |
122 | #define ASLE_REQ_MSK 0xf | 122 | #define ASLE_REQ_MSK 0xf |
123 | 123 | ||
124 | /* response bits of ASLE irq request */ | 124 | /* response bits of ASLE irq request */ |
125 | #define ASLE_ALS_ILLUM_FAILED (1<<10) | 125 | #define ASLE_ALS_ILLUM_FAILED (1<<10) |
126 | #define ASLE_BACKLIGHT_FAILED (1<<12) | 126 | #define ASLE_BACKLIGHT_FAILED (1<<12) |
127 | #define ASLE_PFIT_FAILED (1<<14) | 127 | #define ASLE_PFIT_FAILED (1<<14) |
128 | #define ASLE_PWM_FREQ_FAILED (1<<16) | 128 | #define ASLE_PWM_FREQ_FAILED (1<<16) |
129 | 129 | ||
130 | /* Technology enabled indicator */ | 130 | /* Technology enabled indicator */ |
131 | #define ASLE_TCHE_ALS_EN (1 << 0) | 131 | #define ASLE_TCHE_ALS_EN (1 << 0) |
132 | #define ASLE_TCHE_BLC_EN (1 << 1) | 132 | #define ASLE_TCHE_BLC_EN (1 << 1) |
133 | #define ASLE_TCHE_PFIT_EN (1 << 2) | 133 | #define ASLE_TCHE_PFIT_EN (1 << 2) |
134 | #define ASLE_TCHE_PFMB_EN (1 << 3) | 134 | #define ASLE_TCHE_PFMB_EN (1 << 3) |
135 | 135 | ||
136 | /* ASLE backlight brightness to set */ | 136 | /* ASLE backlight brightness to set */ |
137 | #define ASLE_BCLP_VALID (1<<31) | 137 | #define ASLE_BCLP_VALID (1<<31) |
138 | #define ASLE_BCLP_MSK (~(1<<31)) | 138 | #define ASLE_BCLP_MSK (~(1<<31)) |
139 | 139 | ||
140 | /* ASLE panel fitting request */ | 140 | /* ASLE panel fitting request */ |
141 | #define ASLE_PFIT_VALID (1<<31) | 141 | #define ASLE_PFIT_VALID (1<<31) |
142 | #define ASLE_PFIT_CENTER (1<<0) | 142 | #define ASLE_PFIT_CENTER (1<<0) |
143 | #define ASLE_PFIT_STRETCH_TEXT (1<<1) | 143 | #define ASLE_PFIT_STRETCH_TEXT (1<<1) |
144 | #define ASLE_PFIT_STRETCH_GFX (1<<2) | 144 | #define ASLE_PFIT_STRETCH_GFX (1<<2) |
145 | 145 | ||
146 | /* PWM frequency and minimum brightness */ | 146 | /* PWM frequency and minimum brightness */ |
147 | #define ASLE_PFMB_BRIGHTNESS_MASK (0xff) | 147 | #define ASLE_PFMB_BRIGHTNESS_MASK (0xff) |
148 | #define ASLE_PFMB_BRIGHTNESS_VALID (1<<8) | 148 | #define ASLE_PFMB_BRIGHTNESS_VALID (1<<8) |
149 | #define ASLE_PFMB_PWM_MASK (0x7ffffe00) | 149 | #define ASLE_PFMB_PWM_MASK (0x7ffffe00) |
150 | #define ASLE_PFMB_PWM_VALID (1<<31) | 150 | #define ASLE_PFMB_PWM_VALID (1<<31) |
151 | 151 | ||
152 | #define ASLE_CBLV_VALID (1<<31) | 152 | #define ASLE_CBLV_VALID (1<<31) |
153 | 153 | ||
154 | #define ACPI_OTHER_OUTPUT (0<<8) | 154 | #define ACPI_OTHER_OUTPUT (0<<8) |
155 | #define ACPI_VGA_OUTPUT (1<<8) | 155 | #define ACPI_VGA_OUTPUT (1<<8) |
156 | #define ACPI_TV_OUTPUT (2<<8) | 156 | #define ACPI_TV_OUTPUT (2<<8) |
157 | #define ACPI_DIGITAL_OUTPUT (3<<8) | 157 | #define ACPI_DIGITAL_OUTPUT (3<<8) |
158 | #define ACPI_LVDS_OUTPUT (4<<8) | 158 | #define ACPI_LVDS_OUTPUT (4<<8) |
159 | 159 | ||
160 | #ifdef CONFIG_ACPI | 160 | #ifdef CONFIG_ACPI |
161 | static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | 161 | static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) |
162 | { | 162 | { |
163 | struct drm_i915_private *dev_priv = dev->dev_private; | 163 | struct drm_i915_private *dev_priv = dev->dev_private; |
164 | struct opregion_asle __iomem *asle = dev_priv->opregion.asle; | 164 | struct opregion_asle __iomem *asle = dev_priv->opregion.asle; |
165 | 165 | ||
166 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); | 166 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); |
167 | 167 | ||
168 | if (!(bclp & ASLE_BCLP_VALID)) | 168 | if (!(bclp & ASLE_BCLP_VALID)) |
169 | return ASLE_BACKLIGHT_FAILED; | 169 | return ASLE_BACKLIGHT_FAILED; |
170 | 170 | ||
171 | bclp &= ASLE_BCLP_MSK; | 171 | bclp &= ASLE_BCLP_MSK; |
172 | if (bclp > 255) | 172 | if (bclp > 255) |
173 | return ASLE_BACKLIGHT_FAILED; | 173 | return ASLE_BACKLIGHT_FAILED; |
174 | 174 | ||
175 | intel_panel_set_backlight(dev, bclp, 255); | 175 | intel_panel_set_backlight(dev, bclp, 255); |
176 | iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); | 176 | iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); |
177 | 177 | ||
178 | return 0; | 178 | return 0; |
179 | } | 179 | } |
180 | 180 | ||
181 | static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) | 181 | static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) |
182 | { | 182 | { |
183 | /* alsi is the current ALS reading in lux. 0 indicates below sensor | 183 | /* alsi is the current ALS reading in lux. 0 indicates below sensor |
184 | range, 0xffff indicates above sensor range. 1-0xfffe are valid */ | 184 | range, 0xffff indicates above sensor range. 1-0xfffe are valid */ |
185 | DRM_DEBUG_DRIVER("Illum is not supported\n"); | 185 | DRM_DEBUG_DRIVER("Illum is not supported\n"); |
186 | return ASLE_ALS_ILLUM_FAILED; | 186 | return ASLE_ALS_ILLUM_FAILED; |
187 | } | 187 | } |
188 | 188 | ||
189 | static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) | 189 | static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) |
190 | { | 190 | { |
191 | DRM_DEBUG_DRIVER("PWM freq is not supported\n"); | 191 | DRM_DEBUG_DRIVER("PWM freq is not supported\n"); |
192 | return ASLE_PWM_FREQ_FAILED; | 192 | return ASLE_PWM_FREQ_FAILED; |
193 | } | 193 | } |
194 | 194 | ||
195 | static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) | 195 | static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) |
196 | { | 196 | { |
197 | /* Panel fitting is currently controlled by the X code, so this is a | 197 | /* Panel fitting is currently controlled by the X code, so this is a |
198 | noop until modesetting support works fully */ | 198 | noop until modesetting support works fully */ |
199 | DRM_DEBUG_DRIVER("Pfit is not supported\n"); | 199 | DRM_DEBUG_DRIVER("Pfit is not supported\n"); |
200 | return ASLE_PFIT_FAILED; | 200 | return ASLE_PFIT_FAILED; |
201 | } | 201 | } |
202 | 202 | ||
203 | void intel_opregion_asle_intr(struct drm_device *dev) | 203 | void intel_opregion_asle_intr(struct drm_device *dev) |
204 | { | 204 | { |
205 | struct drm_i915_private *dev_priv = dev->dev_private; | 205 | struct drm_i915_private *dev_priv = dev->dev_private; |
206 | struct opregion_asle __iomem *asle = dev_priv->opregion.asle; | 206 | struct opregion_asle __iomem *asle = dev_priv->opregion.asle; |
207 | u32 asle_stat = 0; | 207 | u32 asle_stat = 0; |
208 | u32 asle_req; | 208 | u32 asle_req; |
209 | 209 | ||
210 | if (!asle) | 210 | if (!asle) |
211 | return; | 211 | return; |
212 | 212 | ||
213 | asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK; | 213 | asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK; |
214 | 214 | ||
215 | if (!asle_req) { | 215 | if (!asle_req) { |
216 | DRM_DEBUG_DRIVER("non asle set request??\n"); | 216 | DRM_DEBUG_DRIVER("non asle set request??\n"); |
217 | return; | 217 | return; |
218 | } | 218 | } |
219 | 219 | ||
220 | if (asle_req & ASLE_SET_ALS_ILLUM) | 220 | if (asle_req & ASLE_SET_ALS_ILLUM) |
221 | asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi)); | 221 | asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi)); |
222 | 222 | ||
223 | if (asle_req & ASLE_SET_BACKLIGHT) | 223 | if (asle_req & ASLE_SET_BACKLIGHT) |
224 | asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); | 224 | asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); |
225 | 225 | ||
226 | if (asle_req & ASLE_SET_PFIT) | 226 | if (asle_req & ASLE_SET_PFIT) |
227 | asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit)); | 227 | asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit)); |
228 | 228 | ||
229 | if (asle_req & ASLE_SET_PWM_FREQ) | 229 | if (asle_req & ASLE_SET_PWM_FREQ) |
230 | asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb)); | 230 | asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb)); |
231 | 231 | ||
232 | iowrite32(asle_stat, &asle->aslc); | 232 | iowrite32(asle_stat, &asle->aslc); |
233 | } | 233 | } |
234 | 234 | ||
235 | #define ACPI_EV_DISPLAY_SWITCH (1<<0) | 235 | #define ACPI_EV_DISPLAY_SWITCH (1<<0) |
236 | #define ACPI_EV_LID (1<<1) | 236 | #define ACPI_EV_LID (1<<1) |
237 | #define ACPI_EV_DOCK (1<<2) | 237 | #define ACPI_EV_DOCK (1<<2) |
238 | 238 | ||
239 | static struct intel_opregion *system_opregion; | 239 | static struct intel_opregion *system_opregion; |
240 | 240 | ||
241 | static int intel_opregion_video_event(struct notifier_block *nb, | 241 | static int intel_opregion_video_event(struct notifier_block *nb, |
242 | unsigned long val, void *data) | 242 | unsigned long val, void *data) |
243 | { | 243 | { |
244 | /* The only video events relevant to opregion are 0x80. These indicate | 244 | /* The only video events relevant to opregion are 0x80. These indicate |
245 | either a docking event, lid switch or display switch request. In | 245 | either a docking event, lid switch or display switch request. In |
246 | Linux, these are handled by the dock, button and video drivers. | 246 | Linux, these are handled by the dock, button and video drivers. |
247 | */ | 247 | */ |
248 | 248 | ||
249 | struct opregion_acpi __iomem *acpi; | 249 | struct opregion_acpi __iomem *acpi; |
250 | struct acpi_bus_event *event = data; | 250 | struct acpi_bus_event *event = data; |
251 | int ret = NOTIFY_OK; | 251 | int ret = NOTIFY_OK; |
252 | 252 | ||
253 | if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) | 253 | if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) |
254 | return NOTIFY_DONE; | 254 | return NOTIFY_DONE; |
255 | 255 | ||
256 | if (!system_opregion) | 256 | if (!system_opregion) |
257 | return NOTIFY_DONE; | 257 | return NOTIFY_DONE; |
258 | 258 | ||
259 | acpi = system_opregion->acpi; | 259 | acpi = system_opregion->acpi; |
260 | 260 | ||
261 | if (event->type == 0x80 && | 261 | if (event->type == 0x80 && |
262 | (ioread32(&acpi->cevt) & 1) == 0) | 262 | (ioread32(&acpi->cevt) & 1) == 0) |
263 | ret = NOTIFY_BAD; | 263 | ret = NOTIFY_BAD; |
264 | 264 | ||
265 | iowrite32(0, &acpi->csts); | 265 | iowrite32(0, &acpi->csts); |
266 | 266 | ||
267 | return ret; | 267 | return ret; |
268 | } | 268 | } |
269 | 269 | ||
270 | static struct notifier_block intel_opregion_notifier = { | 270 | static struct notifier_block intel_opregion_notifier = { |
271 | .notifier_call = intel_opregion_video_event, | 271 | .notifier_call = intel_opregion_video_event, |
272 | }; | 272 | }; |
273 | 273 | ||
274 | /* | 274 | /* |
275 | * Initialise the DIDL field in opregion. This passes a list of devices to | 275 | * Initialise the DIDL field in opregion. This passes a list of devices to |
276 | * the firmware. Values are defined by section B.4.2 of the ACPI specification | 276 | * the firmware. Values are defined by section B.4.2 of the ACPI specification |
277 | * (version 3) | 277 | * (version 3) |
278 | */ | 278 | */ |
279 | 279 | ||
280 | static void intel_didl_outputs(struct drm_device *dev) | 280 | static void intel_didl_outputs(struct drm_device *dev) |
281 | { | 281 | { |
282 | struct drm_i915_private *dev_priv = dev->dev_private; | 282 | struct drm_i915_private *dev_priv = dev->dev_private; |
283 | struct intel_opregion *opregion = &dev_priv->opregion; | 283 | struct intel_opregion *opregion = &dev_priv->opregion; |
284 | struct drm_connector *connector; | 284 | struct drm_connector *connector; |
285 | acpi_handle handle; | 285 | acpi_handle handle; |
286 | struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; | 286 | struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; |
287 | unsigned long long device_id; | 287 | unsigned long long device_id; |
288 | acpi_status status; | 288 | acpi_status status; |
289 | u32 temp; | 289 | u32 temp; |
290 | int i = 0; | 290 | int i = 0; |
291 | 291 | ||
292 | handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); | 292 | handle = ACPI_HANDLE(&dev->pdev->dev); |
293 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) | 293 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) |
294 | return; | 294 | return; |
295 | 295 | ||
296 | if (acpi_is_video_device(handle)) | 296 | if (acpi_is_video_device(handle)) |
297 | acpi_video_bus = acpi_dev; | 297 | acpi_video_bus = acpi_dev; |
298 | else { | 298 | else { |
299 | list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { | 299 | list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { |
300 | if (acpi_is_video_device(acpi_cdev->handle)) { | 300 | if (acpi_is_video_device(acpi_cdev->handle)) { |
301 | acpi_video_bus = acpi_cdev; | 301 | acpi_video_bus = acpi_cdev; |
302 | break; | 302 | break; |
303 | } | 303 | } |
304 | } | 304 | } |
305 | } | 305 | } |
306 | 306 | ||
307 | if (!acpi_video_bus) { | 307 | if (!acpi_video_bus) { |
308 | pr_warn("No ACPI video bus found\n"); | 308 | pr_warn("No ACPI video bus found\n"); |
309 | return; | 309 | return; |
310 | } | 310 | } |
311 | 311 | ||
312 | list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { | 312 | list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { |
313 | if (i >= 8) { | 313 | if (i >= 8) { |
314 | dev_dbg(&dev->pdev->dev, | 314 | dev_dbg(&dev->pdev->dev, |
315 | "More than 8 outputs detected via ACPI\n"); | 315 | "More than 8 outputs detected via ACPI\n"); |
316 | return; | 316 | return; |
317 | } | 317 | } |
318 | status = | 318 | status = |
319 | acpi_evaluate_integer(acpi_cdev->handle, "_ADR", | 319 | acpi_evaluate_integer(acpi_cdev->handle, "_ADR", |
320 | NULL, &device_id); | 320 | NULL, &device_id); |
321 | if (ACPI_SUCCESS(status)) { | 321 | if (ACPI_SUCCESS(status)) { |
322 | if (!device_id) | 322 | if (!device_id) |
323 | goto blind_set; | 323 | goto blind_set; |
324 | iowrite32((u32)(device_id & 0x0f0f), | 324 | iowrite32((u32)(device_id & 0x0f0f), |
325 | &opregion->acpi->didl[i]); | 325 | &opregion->acpi->didl[i]); |
326 | i++; | 326 | i++; |
327 | } | 327 | } |
328 | } | 328 | } |
329 | 329 | ||
330 | end: | 330 | end: |
331 | /* If fewer than 8 outputs, the list must be null terminated */ | 331 | /* If fewer than 8 outputs, the list must be null terminated */ |
332 | if (i < 8) | 332 | if (i < 8) |
333 | iowrite32(0, &opregion->acpi->didl[i]); | 333 | iowrite32(0, &opregion->acpi->didl[i]); |
334 | return; | 334 | return; |
335 | 335 | ||
336 | blind_set: | 336 | blind_set: |
337 | i = 0; | 337 | i = 0; |
338 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 338 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
339 | int output_type = ACPI_OTHER_OUTPUT; | 339 | int output_type = ACPI_OTHER_OUTPUT; |
340 | if (i >= 8) { | 340 | if (i >= 8) { |
341 | dev_dbg(&dev->pdev->dev, | 341 | dev_dbg(&dev->pdev->dev, |
342 | "More than 8 outputs in connector list\n"); | 342 | "More than 8 outputs in connector list\n"); |
343 | return; | 343 | return; |
344 | } | 344 | } |
345 | switch (connector->connector_type) { | 345 | switch (connector->connector_type) { |
346 | case DRM_MODE_CONNECTOR_VGA: | 346 | case DRM_MODE_CONNECTOR_VGA: |
347 | case DRM_MODE_CONNECTOR_DVIA: | 347 | case DRM_MODE_CONNECTOR_DVIA: |
348 | output_type = ACPI_VGA_OUTPUT; | 348 | output_type = ACPI_VGA_OUTPUT; |
349 | break; | 349 | break; |
350 | case DRM_MODE_CONNECTOR_Composite: | 350 | case DRM_MODE_CONNECTOR_Composite: |
351 | case DRM_MODE_CONNECTOR_SVIDEO: | 351 | case DRM_MODE_CONNECTOR_SVIDEO: |
352 | case DRM_MODE_CONNECTOR_Component: | 352 | case DRM_MODE_CONNECTOR_Component: |
353 | case DRM_MODE_CONNECTOR_9PinDIN: | 353 | case DRM_MODE_CONNECTOR_9PinDIN: |
354 | output_type = ACPI_TV_OUTPUT; | 354 | output_type = ACPI_TV_OUTPUT; |
355 | break; | 355 | break; |
356 | case DRM_MODE_CONNECTOR_DVII: | 356 | case DRM_MODE_CONNECTOR_DVII: |
357 | case DRM_MODE_CONNECTOR_DVID: | 357 | case DRM_MODE_CONNECTOR_DVID: |
358 | case DRM_MODE_CONNECTOR_DisplayPort: | 358 | case DRM_MODE_CONNECTOR_DisplayPort: |
359 | case DRM_MODE_CONNECTOR_HDMIA: | 359 | case DRM_MODE_CONNECTOR_HDMIA: |
360 | case DRM_MODE_CONNECTOR_HDMIB: | 360 | case DRM_MODE_CONNECTOR_HDMIB: |
361 | output_type = ACPI_DIGITAL_OUTPUT; | 361 | output_type = ACPI_DIGITAL_OUTPUT; |
362 | break; | 362 | break; |
363 | case DRM_MODE_CONNECTOR_LVDS: | 363 | case DRM_MODE_CONNECTOR_LVDS: |
364 | output_type = ACPI_LVDS_OUTPUT; | 364 | output_type = ACPI_LVDS_OUTPUT; |
365 | break; | 365 | break; |
366 | } | 366 | } |
367 | temp = ioread32(&opregion->acpi->didl[i]); | 367 | temp = ioread32(&opregion->acpi->didl[i]); |
368 | iowrite32(temp | (1<<31) | output_type | i, | 368 | iowrite32(temp | (1<<31) | output_type | i, |
369 | &opregion->acpi->didl[i]); | 369 | &opregion->acpi->didl[i]); |
370 | i++; | 370 | i++; |
371 | } | 371 | } |
372 | goto end; | 372 | goto end; |
373 | } | 373 | } |
374 | 374 | ||
375 | static void intel_setup_cadls(struct drm_device *dev) | 375 | static void intel_setup_cadls(struct drm_device *dev) |
376 | { | 376 | { |
377 | struct drm_i915_private *dev_priv = dev->dev_private; | 377 | struct drm_i915_private *dev_priv = dev->dev_private; |
378 | struct intel_opregion *opregion = &dev_priv->opregion; | 378 | struct intel_opregion *opregion = &dev_priv->opregion; |
379 | int i = 0; | 379 | int i = 0; |
380 | u32 disp_id; | 380 | u32 disp_id; |
381 | 381 | ||
382 | /* Initialize the CADL field by duplicating the DIDL values. | 382 | /* Initialize the CADL field by duplicating the DIDL values. |
383 | * Technically, this is not always correct as display outputs may exist, | 383 | * Technically, this is not always correct as display outputs may exist, |
384 | * but not active. This initialization is necessary for some Clevo | 384 | * but not active. This initialization is necessary for some Clevo |
385 | * laptops that check this field before processing the brightness and | 385 | * laptops that check this field before processing the brightness and |
386 | * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if | 386 | * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if |
387 | * there are less than eight devices. */ | 387 | * there are less than eight devices. */ |
388 | do { | 388 | do { |
389 | disp_id = ioread32(&opregion->acpi->didl[i]); | 389 | disp_id = ioread32(&opregion->acpi->didl[i]); |
390 | iowrite32(disp_id, &opregion->acpi->cadl[i]); | 390 | iowrite32(disp_id, &opregion->acpi->cadl[i]); |
391 | } while (++i < 8 && disp_id != 0); | 391 | } while (++i < 8 && disp_id != 0); |
392 | } | 392 | } |
393 | 393 | ||
394 | void intel_opregion_init(struct drm_device *dev) | 394 | void intel_opregion_init(struct drm_device *dev) |
395 | { | 395 | { |
396 | struct drm_i915_private *dev_priv = dev->dev_private; | 396 | struct drm_i915_private *dev_priv = dev->dev_private; |
397 | struct intel_opregion *opregion = &dev_priv->opregion; | 397 | struct intel_opregion *opregion = &dev_priv->opregion; |
398 | 398 | ||
399 | if (!opregion->header) | 399 | if (!opregion->header) |
400 | return; | 400 | return; |
401 | 401 | ||
402 | if (opregion->acpi) { | 402 | if (opregion->acpi) { |
403 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 403 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
404 | intel_didl_outputs(dev); | 404 | intel_didl_outputs(dev); |
405 | intel_setup_cadls(dev); | 405 | intel_setup_cadls(dev); |
406 | } | 406 | } |
407 | 407 | ||
408 | /* Notify BIOS we are ready to handle ACPI video ext notifs. | 408 | /* Notify BIOS we are ready to handle ACPI video ext notifs. |
409 | * Right now, all the events are handled by the ACPI video module. | 409 | * Right now, all the events are handled by the ACPI video module. |
410 | * We don't actually need to do anything with them. */ | 410 | * We don't actually need to do anything with them. */ |
411 | iowrite32(0, &opregion->acpi->csts); | 411 | iowrite32(0, &opregion->acpi->csts); |
412 | iowrite32(1, &opregion->acpi->drdy); | 412 | iowrite32(1, &opregion->acpi->drdy); |
413 | 413 | ||
414 | system_opregion = opregion; | 414 | system_opregion = opregion; |
415 | register_acpi_notifier(&intel_opregion_notifier); | 415 | register_acpi_notifier(&intel_opregion_notifier); |
416 | } | 416 | } |
417 | 417 | ||
418 | if (opregion->asle) { | 418 | if (opregion->asle) { |
419 | iowrite32(ASLE_TCHE_BLC_EN, &opregion->asle->tche); | 419 | iowrite32(ASLE_TCHE_BLC_EN, &opregion->asle->tche); |
420 | iowrite32(ASLE_ARDY_READY, &opregion->asle->ardy); | 420 | iowrite32(ASLE_ARDY_READY, &opregion->asle->ardy); |
421 | } | 421 | } |
422 | } | 422 | } |
423 | 423 | ||
424 | void intel_opregion_fini(struct drm_device *dev) | 424 | void intel_opregion_fini(struct drm_device *dev) |
425 | { | 425 | { |
426 | struct drm_i915_private *dev_priv = dev->dev_private; | 426 | struct drm_i915_private *dev_priv = dev->dev_private; |
427 | struct intel_opregion *opregion = &dev_priv->opregion; | 427 | struct intel_opregion *opregion = &dev_priv->opregion; |
428 | 428 | ||
429 | if (!opregion->header) | 429 | if (!opregion->header) |
430 | return; | 430 | return; |
431 | 431 | ||
432 | if (opregion->asle) | 432 | if (opregion->asle) |
433 | iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy); | 433 | iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy); |
434 | 434 | ||
435 | if (opregion->acpi) { | 435 | if (opregion->acpi) { |
436 | iowrite32(0, &opregion->acpi->drdy); | 436 | iowrite32(0, &opregion->acpi->drdy); |
437 | 437 | ||
438 | system_opregion = NULL; | 438 | system_opregion = NULL; |
439 | unregister_acpi_notifier(&intel_opregion_notifier); | 439 | unregister_acpi_notifier(&intel_opregion_notifier); |
440 | } | 440 | } |
441 | 441 | ||
442 | /* just clear all opregion memory pointers now */ | 442 | /* just clear all opregion memory pointers now */ |
443 | iounmap(opregion->header); | 443 | iounmap(opregion->header); |
444 | opregion->header = NULL; | 444 | opregion->header = NULL; |
445 | opregion->acpi = NULL; | 445 | opregion->acpi = NULL; |
446 | opregion->swsci = NULL; | 446 | opregion->swsci = NULL; |
447 | opregion->asle = NULL; | 447 | opregion->asle = NULL; |
448 | opregion->vbt = NULL; | 448 | opregion->vbt = NULL; |
449 | } | 449 | } |
450 | #endif | 450 | #endif |
451 | 451 | ||
452 | int intel_opregion_setup(struct drm_device *dev) | 452 | int intel_opregion_setup(struct drm_device *dev) |
453 | { | 453 | { |
454 | struct drm_i915_private *dev_priv = dev->dev_private; | 454 | struct drm_i915_private *dev_priv = dev->dev_private; |
455 | struct intel_opregion *opregion = &dev_priv->opregion; | 455 | struct intel_opregion *opregion = &dev_priv->opregion; |
456 | void __iomem *base; | 456 | void __iomem *base; |
457 | u32 asls, mboxes; | 457 | u32 asls, mboxes; |
458 | char buf[sizeof(OPREGION_SIGNATURE)]; | 458 | char buf[sizeof(OPREGION_SIGNATURE)]; |
459 | int err = 0; | 459 | int err = 0; |
460 | 460 | ||
461 | pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); | 461 | pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); |
462 | DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); | 462 | DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); |
463 | if (asls == 0) { | 463 | if (asls == 0) { |
464 | DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); | 464 | DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); |
465 | return -ENOTSUPP; | 465 | return -ENOTSUPP; |
466 | } | 466 | } |
467 | 467 | ||
468 | base = acpi_os_ioremap(asls, OPREGION_SIZE); | 468 | base = acpi_os_ioremap(asls, OPREGION_SIZE); |
469 | if (!base) | 469 | if (!base) |
470 | return -ENOMEM; | 470 | return -ENOMEM; |
471 | 471 | ||
472 | memcpy_fromio(buf, base, sizeof(buf)); | 472 | memcpy_fromio(buf, base, sizeof(buf)); |
473 | 473 | ||
474 | if (memcmp(buf, OPREGION_SIGNATURE, 16)) { | 474 | if (memcmp(buf, OPREGION_SIGNATURE, 16)) { |
475 | DRM_DEBUG_DRIVER("opregion signature mismatch\n"); | 475 | DRM_DEBUG_DRIVER("opregion signature mismatch\n"); |
476 | err = -EINVAL; | 476 | err = -EINVAL; |
477 | goto err_out; | 477 | goto err_out; |
478 | } | 478 | } |
479 | opregion->header = base; | 479 | opregion->header = base; |
480 | opregion->vbt = base + OPREGION_VBT_OFFSET; | 480 | opregion->vbt = base + OPREGION_VBT_OFFSET; |
481 | 481 | ||
482 | opregion->lid_state = base + ACPI_CLID; | 482 | opregion->lid_state = base + ACPI_CLID; |
483 | 483 | ||
484 | mboxes = ioread32(&opregion->header->mboxes); | 484 | mboxes = ioread32(&opregion->header->mboxes); |
485 | if (mboxes & MBOX_ACPI) { | 485 | if (mboxes & MBOX_ACPI) { |
486 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); | 486 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); |
487 | opregion->acpi = base + OPREGION_ACPI_OFFSET; | 487 | opregion->acpi = base + OPREGION_ACPI_OFFSET; |
488 | } | 488 | } |
489 | 489 | ||
490 | if (mboxes & MBOX_SWSCI) { | 490 | if (mboxes & MBOX_SWSCI) { |
491 | DRM_DEBUG_DRIVER("SWSCI supported\n"); | 491 | DRM_DEBUG_DRIVER("SWSCI supported\n"); |
492 | opregion->swsci = base + OPREGION_SWSCI_OFFSET; | 492 | opregion->swsci = base + OPREGION_SWSCI_OFFSET; |
493 | } | 493 | } |
494 | if (mboxes & MBOX_ASLE) { | 494 | if (mboxes & MBOX_ASLE) { |
495 | DRM_DEBUG_DRIVER("ASLE supported\n"); | 495 | DRM_DEBUG_DRIVER("ASLE supported\n"); |
496 | opregion->asle = base + OPREGION_ASLE_OFFSET; | 496 | opregion->asle = base + OPREGION_ASLE_OFFSET; |
497 | 497 | ||
498 | iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy); | 498 | iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy); |
499 | } | 499 | } |
500 | 500 | ||
501 | return 0; | 501 | return 0; |
502 | 502 | ||
503 | err_out: | 503 | err_out: |
504 | iounmap(base); | 504 | iounmap(base); |
505 | return err; | 505 | return err; |
506 | } | 506 | } |
507 | 507 |
drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
1 | /* | 1 | /* |
2 | * Copyright 2011 Red Hat Inc. | 2 | * Copyright 2011 Red Hat Inc. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation | 6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the | 8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: | 9 | * Software is furnished to do so, subject to the following conditions: |
10 | * | 10 | * |
11 | * The above copyright notice and this permission notice shall be included in | 11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. | 12 | * all copies or substantial portions of the Software. |
13 | * | 13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. | 20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/option.h> | 25 | #include <core/option.h> |
26 | 26 | ||
27 | #include <subdev/i2c.h> | 27 | #include <subdev/i2c.h> |
28 | #include <subdev/mxm.h> | 28 | #include <subdev/mxm.h> |
29 | #include <subdev/bios.h> | 29 | #include <subdev/bios.h> |
30 | #include <subdev/bios/mxm.h> | 30 | #include <subdev/bios/mxm.h> |
31 | 31 | ||
32 | #include "mxms.h" | 32 | #include "mxms.h" |
33 | 33 | ||
34 | static bool | 34 | static bool |
35 | mxm_shadow_rom_fetch(struct nouveau_i2c_port *i2c, u8 addr, | 35 | mxm_shadow_rom_fetch(struct nouveau_i2c_port *i2c, u8 addr, |
36 | u8 offset, u8 size, u8 *data) | 36 | u8 offset, u8 size, u8 *data) |
37 | { | 37 | { |
38 | struct i2c_msg msgs[] = { | 38 | struct i2c_msg msgs[] = { |
39 | { .addr = addr, .flags = 0, .len = 1, .buf = &offset }, | 39 | { .addr = addr, .flags = 0, .len = 1, .buf = &offset }, |
40 | { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, }, | 40 | { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, }, |
41 | }; | 41 | }; |
42 | 42 | ||
43 | return i2c_transfer(&i2c->adapter, msgs, 2) == 2; | 43 | return i2c_transfer(&i2c->adapter, msgs, 2) == 2; |
44 | } | 44 | } |
45 | 45 | ||
46 | static bool | 46 | static bool |
47 | mxm_shadow_rom(struct nouveau_mxm *mxm, u8 version) | 47 | mxm_shadow_rom(struct nouveau_mxm *mxm, u8 version) |
48 | { | 48 | { |
49 | struct nouveau_bios *bios = nouveau_bios(mxm); | 49 | struct nouveau_bios *bios = nouveau_bios(mxm); |
50 | struct nouveau_i2c *i2c = nouveau_i2c(mxm); | 50 | struct nouveau_i2c *i2c = nouveau_i2c(mxm); |
51 | struct nouveau_i2c_port *port = NULL; | 51 | struct nouveau_i2c_port *port = NULL; |
52 | u8 i2cidx, mxms[6], addr, size; | 52 | u8 i2cidx, mxms[6], addr, size; |
53 | 53 | ||
54 | i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f; | 54 | i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f; |
55 | if (i2cidx < 0x0f) | 55 | if (i2cidx < 0x0f) |
56 | port = i2c->find(i2c, i2cidx); | 56 | port = i2c->find(i2c, i2cidx); |
57 | if (!port) | 57 | if (!port) |
58 | return false; | 58 | return false; |
59 | 59 | ||
60 | addr = 0x54; | 60 | addr = 0x54; |
61 | if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) { | 61 | if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) { |
62 | addr = 0x56; | 62 | addr = 0x56; |
63 | if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) | 63 | if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) |
64 | return false; | 64 | return false; |
65 | } | 65 | } |
66 | 66 | ||
67 | mxm->mxms = mxms; | 67 | mxm->mxms = mxms; |
68 | size = mxms_headerlen(mxm) + mxms_structlen(mxm); | 68 | size = mxms_headerlen(mxm) + mxms_structlen(mxm); |
69 | mxm->mxms = kmalloc(size, GFP_KERNEL); | 69 | mxm->mxms = kmalloc(size, GFP_KERNEL); |
70 | 70 | ||
71 | if (mxm->mxms && | 71 | if (mxm->mxms && |
72 | mxm_shadow_rom_fetch(port, addr, 0, size, mxm->mxms)) | 72 | mxm_shadow_rom_fetch(port, addr, 0, size, mxm->mxms)) |
73 | return true; | 73 | return true; |
74 | 74 | ||
75 | kfree(mxm->mxms); | 75 | kfree(mxm->mxms); |
76 | mxm->mxms = NULL; | 76 | mxm->mxms = NULL; |
77 | return false; | 77 | return false; |
78 | } | 78 | } |
79 | 79 | ||
80 | #if defined(CONFIG_ACPI) | 80 | #if defined(CONFIG_ACPI) |
81 | static bool | 81 | static bool |
82 | mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version) | 82 | mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version) |
83 | { | 83 | { |
84 | struct nouveau_device *device = nv_device(mxm); | 84 | struct nouveau_device *device = nv_device(mxm); |
85 | static char muid[] = { | 85 | static char muid[] = { |
86 | 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C, | 86 | 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C, |
87 | 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65 | 87 | 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65 |
88 | }; | 88 | }; |
89 | u32 mxms_args[] = { 0x00000000 }; | 89 | u32 mxms_args[] = { 0x00000000 }; |
90 | union acpi_object args[4] = { | 90 | union acpi_object args[4] = { |
91 | /* _DSM MUID */ | 91 | /* _DSM MUID */ |
92 | { .buffer.type = 3, | 92 | { .buffer.type = 3, |
93 | .buffer.length = sizeof(muid), | 93 | .buffer.length = sizeof(muid), |
94 | .buffer.pointer = muid, | 94 | .buffer.pointer = muid, |
95 | }, | 95 | }, |
96 | /* spec says this can be zero to mean "highest revision", but | 96 | /* spec says this can be zero to mean "highest revision", but |
97 | * of course there's at least one bios out there which fails | 97 | * of course there's at least one bios out there which fails |
98 | * unless you pass in exactly the version it supports.. | 98 | * unless you pass in exactly the version it supports.. |
99 | */ | 99 | */ |
100 | { .integer.type = ACPI_TYPE_INTEGER, | 100 | { .integer.type = ACPI_TYPE_INTEGER, |
101 | .integer.value = (version & 0xf0) << 4 | (version & 0x0f), | 101 | .integer.value = (version & 0xf0) << 4 | (version & 0x0f), |
102 | }, | 102 | }, |
103 | /* MXMS function */ | 103 | /* MXMS function */ |
104 | { .integer.type = ACPI_TYPE_INTEGER, | 104 | { .integer.type = ACPI_TYPE_INTEGER, |
105 | .integer.value = 0x00000010, | 105 | .integer.value = 0x00000010, |
106 | }, | 106 | }, |
107 | /* Pointer to MXMS arguments */ | 107 | /* Pointer to MXMS arguments */ |
108 | { .buffer.type = ACPI_TYPE_BUFFER, | 108 | { .buffer.type = ACPI_TYPE_BUFFER, |
109 | .buffer.length = sizeof(mxms_args), | 109 | .buffer.length = sizeof(mxms_args), |
110 | .buffer.pointer = (char *)mxms_args, | 110 | .buffer.pointer = (char *)mxms_args, |
111 | }, | 111 | }, |
112 | }; | 112 | }; |
113 | struct acpi_object_list list = { ARRAY_SIZE(args), args }; | 113 | struct acpi_object_list list = { ARRAY_SIZE(args), args }; |
114 | struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL }; | 114 | struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL }; |
115 | union acpi_object *obj; | 115 | union acpi_object *obj; |
116 | acpi_handle handle; | 116 | acpi_handle handle; |
117 | int ret; | 117 | int ret; |
118 | 118 | ||
119 | handle = DEVICE_ACPI_HANDLE(&device->pdev->dev); | 119 | handle = ACPI_HANDLE(&device->pdev->dev); |
120 | if (!handle) | 120 | if (!handle) |
121 | return false; | 121 | return false; |
122 | 122 | ||
123 | ret = acpi_evaluate_object(handle, "_DSM", &list, &retn); | 123 | ret = acpi_evaluate_object(handle, "_DSM", &list, &retn); |
124 | if (ret) { | 124 | if (ret) { |
125 | nv_debug(mxm, "DSM MXMS failed: %d\n", ret); | 125 | nv_debug(mxm, "DSM MXMS failed: %d\n", ret); |
126 | return false; | 126 | return false; |
127 | } | 127 | } |
128 | 128 | ||
129 | obj = retn.pointer; | 129 | obj = retn.pointer; |
130 | if (obj->type == ACPI_TYPE_BUFFER) { | 130 | if (obj->type == ACPI_TYPE_BUFFER) { |
131 | mxm->mxms = kmemdup(obj->buffer.pointer, | 131 | mxm->mxms = kmemdup(obj->buffer.pointer, |
132 | obj->buffer.length, GFP_KERNEL); | 132 | obj->buffer.length, GFP_KERNEL); |
133 | } else | 133 | } else |
134 | if (obj->type == ACPI_TYPE_INTEGER) { | 134 | if (obj->type == ACPI_TYPE_INTEGER) { |
135 | nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value); | 135 | nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value); |
136 | } | 136 | } |
137 | 137 | ||
138 | kfree(obj); | 138 | kfree(obj); |
139 | return mxm->mxms != NULL; | 139 | return mxm->mxms != NULL; |
140 | } | 140 | } |
141 | #endif | 141 | #endif |
142 | 142 | ||
143 | #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) | 143 | #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) |
144 | 144 | ||
145 | #define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0" | 145 | #define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0" |
146 | 146 | ||
147 | static u8 | 147 | static u8 |
148 | wmi_wmmx_mxmi(struct nouveau_mxm *mxm, u8 version) | 148 | wmi_wmmx_mxmi(struct nouveau_mxm *mxm, u8 version) |
149 | { | 149 | { |
150 | u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 }; | 150 | u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 }; |
151 | struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args }; | 151 | struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args }; |
152 | struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL }; | 152 | struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL }; |
153 | union acpi_object *obj; | 153 | union acpi_object *obj; |
154 | acpi_status status; | 154 | acpi_status status; |
155 | 155 | ||
156 | status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn); | 156 | status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn); |
157 | if (ACPI_FAILURE(status)) { | 157 | if (ACPI_FAILURE(status)) { |
158 | nv_debug(mxm, "WMMX MXMI returned %d\n", status); | 158 | nv_debug(mxm, "WMMX MXMI returned %d\n", status); |
159 | return 0x00; | 159 | return 0x00; |
160 | } | 160 | } |
161 | 161 | ||
162 | obj = retn.pointer; | 162 | obj = retn.pointer; |
163 | if (obj->type == ACPI_TYPE_INTEGER) { | 163 | if (obj->type == ACPI_TYPE_INTEGER) { |
164 | version = obj->integer.value; | 164 | version = obj->integer.value; |
165 | nv_debug(mxm, "WMMX MXMI version %d.%d\n", | 165 | nv_debug(mxm, "WMMX MXMI version %d.%d\n", |
166 | (version >> 4), version & 0x0f); | 166 | (version >> 4), version & 0x0f); |
167 | } else { | 167 | } else { |
168 | version = 0; | 168 | version = 0; |
169 | nv_debug(mxm, "WMMX MXMI returned non-integer\n"); | 169 | nv_debug(mxm, "WMMX MXMI returned non-integer\n"); |
170 | } | 170 | } |
171 | 171 | ||
172 | kfree(obj); | 172 | kfree(obj); |
173 | return version; | 173 | return version; |
174 | } | 174 | } |
175 | 175 | ||
176 | static bool | 176 | static bool |
177 | mxm_shadow_wmi(struct nouveau_mxm *mxm, u8 version) | 177 | mxm_shadow_wmi(struct nouveau_mxm *mxm, u8 version) |
178 | { | 178 | { |
179 | u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 }; | 179 | u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 }; |
180 | struct acpi_buffer args = { sizeof(mxms_args), mxms_args }; | 180 | struct acpi_buffer args = { sizeof(mxms_args), mxms_args }; |
181 | struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL }; | 181 | struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL }; |
182 | union acpi_object *obj; | 182 | union acpi_object *obj; |
183 | acpi_status status; | 183 | acpi_status status; |
184 | 184 | ||
185 | if (!wmi_has_guid(WMI_WMMX_GUID)) { | 185 | if (!wmi_has_guid(WMI_WMMX_GUID)) { |
186 | nv_debug(mxm, "WMMX GUID not found\n"); | 186 | nv_debug(mxm, "WMMX GUID not found\n"); |
187 | return false; | 187 | return false; |
188 | } | 188 | } |
189 | 189 | ||
190 | mxms_args[1] = wmi_wmmx_mxmi(mxm, 0x00); | 190 | mxms_args[1] = wmi_wmmx_mxmi(mxm, 0x00); |
191 | if (!mxms_args[1]) | 191 | if (!mxms_args[1]) |
192 | mxms_args[1] = wmi_wmmx_mxmi(mxm, version); | 192 | mxms_args[1] = wmi_wmmx_mxmi(mxm, version); |
193 | if (!mxms_args[1]) | 193 | if (!mxms_args[1]) |
194 | return false; | 194 | return false; |
195 | 195 | ||
196 | status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn); | 196 | status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn); |
197 | if (ACPI_FAILURE(status)) { | 197 | if (ACPI_FAILURE(status)) { |
198 | nv_debug(mxm, "WMMX MXMS returned %d\n", status); | 198 | nv_debug(mxm, "WMMX MXMS returned %d\n", status); |
199 | return false; | 199 | return false; |
200 | } | 200 | } |
201 | 201 | ||
202 | obj = retn.pointer; | 202 | obj = retn.pointer; |
203 | if (obj->type == ACPI_TYPE_BUFFER) { | 203 | if (obj->type == ACPI_TYPE_BUFFER) { |
204 | mxm->mxms = kmemdup(obj->buffer.pointer, | 204 | mxm->mxms = kmemdup(obj->buffer.pointer, |
205 | obj->buffer.length, GFP_KERNEL); | 205 | obj->buffer.length, GFP_KERNEL); |
206 | } | 206 | } |
207 | 207 | ||
208 | kfree(obj); | 208 | kfree(obj); |
209 | return mxm->mxms != NULL; | 209 | return mxm->mxms != NULL; |
210 | } | 210 | } |
211 | #endif | 211 | #endif |
212 | 212 | ||
213 | static struct mxm_shadow_h { | 213 | static struct mxm_shadow_h { |
214 | const char *name; | 214 | const char *name; |
215 | bool (*exec)(struct nouveau_mxm *, u8 version); | 215 | bool (*exec)(struct nouveau_mxm *, u8 version); |
216 | } _mxm_shadow[] = { | 216 | } _mxm_shadow[] = { |
217 | { "ROM", mxm_shadow_rom }, | 217 | { "ROM", mxm_shadow_rom }, |
218 | #if defined(CONFIG_ACPI) | 218 | #if defined(CONFIG_ACPI) |
219 | { "DSM", mxm_shadow_dsm }, | 219 | { "DSM", mxm_shadow_dsm }, |
220 | #endif | 220 | #endif |
221 | #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) | 221 | #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) |
222 | { "WMI", mxm_shadow_wmi }, | 222 | { "WMI", mxm_shadow_wmi }, |
223 | #endif | 223 | #endif |
224 | {} | 224 | {} |
225 | }; | 225 | }; |
226 | 226 | ||
227 | static int | 227 | static int |
228 | mxm_shadow(struct nouveau_mxm *mxm, u8 version) | 228 | mxm_shadow(struct nouveau_mxm *mxm, u8 version) |
229 | { | 229 | { |
230 | struct mxm_shadow_h *shadow = _mxm_shadow; | 230 | struct mxm_shadow_h *shadow = _mxm_shadow; |
231 | do { | 231 | do { |
232 | nv_debug(mxm, "checking %s\n", shadow->name); | 232 | nv_debug(mxm, "checking %s\n", shadow->name); |
233 | if (shadow->exec(mxm, version)) { | 233 | if (shadow->exec(mxm, version)) { |
234 | if (mxms_valid(mxm)) | 234 | if (mxms_valid(mxm)) |
235 | return 0; | 235 | return 0; |
236 | kfree(mxm->mxms); | 236 | kfree(mxm->mxms); |
237 | mxm->mxms = NULL; | 237 | mxm->mxms = NULL; |
238 | } | 238 | } |
239 | } while ((++shadow)->name); | 239 | } while ((++shadow)->name); |
240 | return -ENOENT; | 240 | return -ENOENT; |
241 | } | 241 | } |
242 | 242 | ||
243 | int | 243 | int |
244 | nouveau_mxm_create_(struct nouveau_object *parent, | 244 | nouveau_mxm_create_(struct nouveau_object *parent, |
245 | struct nouveau_object *engine, | 245 | struct nouveau_object *engine, |
246 | struct nouveau_oclass *oclass, int length, void **pobject) | 246 | struct nouveau_oclass *oclass, int length, void **pobject) |
247 | { | 247 | { |
248 | struct nouveau_device *device = nv_device(parent); | 248 | struct nouveau_device *device = nv_device(parent); |
249 | struct nouveau_bios *bios = nouveau_bios(device); | 249 | struct nouveau_bios *bios = nouveau_bios(device); |
250 | struct nouveau_mxm *mxm; | 250 | struct nouveau_mxm *mxm; |
251 | u8 ver, len; | 251 | u8 ver, len; |
252 | u16 data; | 252 | u16 data; |
253 | int ret; | 253 | int ret; |
254 | 254 | ||
255 | ret = nouveau_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm", | 255 | ret = nouveau_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm", |
256 | length, pobject); | 256 | length, pobject); |
257 | mxm = *pobject; | 257 | mxm = *pobject; |
258 | if (ret) | 258 | if (ret) |
259 | return ret; | 259 | return ret; |
260 | 260 | ||
261 | data = mxm_table(bios, &ver, &len); | 261 | data = mxm_table(bios, &ver, &len); |
262 | if (!data || !(ver = nv_ro08(bios, data))) { | 262 | if (!data || !(ver = nv_ro08(bios, data))) { |
263 | nv_debug(mxm, "no VBIOS data, nothing to do\n"); | 263 | nv_debug(mxm, "no VBIOS data, nothing to do\n"); |
264 | return 0; | 264 | return 0; |
265 | } | 265 | } |
266 | 266 | ||
267 | nv_info(mxm, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f); | 267 | nv_info(mxm, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f); |
268 | 268 | ||
269 | if (mxm_shadow(mxm, ver)) { | 269 | if (mxm_shadow(mxm, ver)) { |
270 | nv_info(mxm, "failed to locate valid SIS\n"); | 270 | nv_info(mxm, "failed to locate valid SIS\n"); |
271 | #if 0 | 271 | #if 0 |
272 | /* we should, perhaps, fall back to some kind of limited | 272 | /* we should, perhaps, fall back to some kind of limited |
273 | * mode here if the x86 vbios hasn't already done the | 273 | * mode here if the x86 vbios hasn't already done the |
274 | * work for us (so we prevent loading with completely | 274 | * work for us (so we prevent loading with completely |
275 | * whacked vbios tables). | 275 | * whacked vbios tables). |
276 | */ | 276 | */ |
277 | return -EINVAL; | 277 | return -EINVAL; |
278 | #else | 278 | #else |
279 | return 0; | 279 | return 0; |
280 | #endif | 280 | #endif |
281 | } | 281 | } |
282 | 282 | ||
283 | nv_info(mxm, "MXMS Version %d.%d\n", | 283 | nv_info(mxm, "MXMS Version %d.%d\n", |
284 | mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff); | 284 | mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff); |
285 | mxms_foreach(mxm, 0, NULL, NULL); | 285 | mxms_foreach(mxm, 0, NULL, NULL); |
286 | 286 | ||
287 | if (nouveau_boolopt(device->cfgopt, "NvMXMDCB", true)) | 287 | if (nouveau_boolopt(device->cfgopt, "NvMXMDCB", true)) |
288 | mxm->action |= MXM_SANITISE_DCB; | 288 | mxm->action |= MXM_SANITISE_DCB; |
289 | return 0; | 289 | return 0; |
290 | } | 290 | } |
291 | 291 |
drivers/gpu/drm/nouveau/nouveau_acpi.c
1 | #include <linux/pci.h> | 1 | #include <linux/pci.h> |
2 | #include <linux/acpi.h> | 2 | #include <linux/acpi.h> |
3 | #include <linux/slab.h> | 3 | #include <linux/slab.h> |
4 | #include <acpi/acpi_drivers.h> | 4 | #include <acpi/acpi_drivers.h> |
5 | #include <acpi/acpi_bus.h> | 5 | #include <acpi/acpi_bus.h> |
6 | #include <acpi/video.h> | 6 | #include <acpi/video.h> |
7 | #include <acpi/acpi.h> | 7 | #include <acpi/acpi.h> |
8 | #include <linux/mxm-wmi.h> | 8 | #include <linux/mxm-wmi.h> |
9 | 9 | ||
10 | #include <linux/vga_switcheroo.h> | 10 | #include <linux/vga_switcheroo.h> |
11 | 11 | ||
12 | #include <drm/drm_edid.h> | 12 | #include <drm/drm_edid.h> |
13 | 13 | ||
14 | #include "nouveau_drm.h" | 14 | #include "nouveau_drm.h" |
15 | #include "nouveau_acpi.h" | 15 | #include "nouveau_acpi.h" |
16 | 16 | ||
17 | #define NOUVEAU_DSM_LED 0x02 | 17 | #define NOUVEAU_DSM_LED 0x02 |
18 | #define NOUVEAU_DSM_LED_STATE 0x00 | 18 | #define NOUVEAU_DSM_LED_STATE 0x00 |
19 | #define NOUVEAU_DSM_LED_OFF 0x10 | 19 | #define NOUVEAU_DSM_LED_OFF 0x10 |
20 | #define NOUVEAU_DSM_LED_STAMINA 0x11 | 20 | #define NOUVEAU_DSM_LED_STAMINA 0x11 |
21 | #define NOUVEAU_DSM_LED_SPEED 0x12 | 21 | #define NOUVEAU_DSM_LED_SPEED 0x12 |
22 | 22 | ||
23 | #define NOUVEAU_DSM_POWER 0x03 | 23 | #define NOUVEAU_DSM_POWER 0x03 |
24 | #define NOUVEAU_DSM_POWER_STATE 0x00 | 24 | #define NOUVEAU_DSM_POWER_STATE 0x00 |
25 | #define NOUVEAU_DSM_POWER_SPEED 0x01 | 25 | #define NOUVEAU_DSM_POWER_SPEED 0x01 |
26 | #define NOUVEAU_DSM_POWER_STAMINA 0x02 | 26 | #define NOUVEAU_DSM_POWER_STAMINA 0x02 |
27 | 27 | ||
28 | #define NOUVEAU_DSM_OPTIMUS_CAPS 0x1A | 28 | #define NOUVEAU_DSM_OPTIMUS_CAPS 0x1A |
29 | #define NOUVEAU_DSM_OPTIMUS_FLAGS 0x1B | 29 | #define NOUVEAU_DSM_OPTIMUS_FLAGS 0x1B |
30 | 30 | ||
31 | #define NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 (3 << 24) | 31 | #define NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 (3 << 24) |
32 | #define NOUVEAU_DSM_OPTIMUS_NO_POWERDOWN_PS3 (2 << 24) | 32 | #define NOUVEAU_DSM_OPTIMUS_NO_POWERDOWN_PS3 (2 << 24) |
33 | #define NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED (1) | 33 | #define NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED (1) |
34 | 34 | ||
35 | #define NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN (NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 | NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED) | 35 | #define NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN (NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 | NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED) |
36 | 36 | ||
37 | /* result of the optimus caps function */ | 37 | /* result of the optimus caps function */ |
38 | #define OPTIMUS_ENABLED (1 << 0) | 38 | #define OPTIMUS_ENABLED (1 << 0) |
39 | #define OPTIMUS_STATUS_MASK (3 << 3) | 39 | #define OPTIMUS_STATUS_MASK (3 << 3) |
40 | #define OPTIMUS_STATUS_OFF (0 << 3) | 40 | #define OPTIMUS_STATUS_OFF (0 << 3) |
41 | #define OPTIMUS_STATUS_ON_ENABLED (1 << 3) | 41 | #define OPTIMUS_STATUS_ON_ENABLED (1 << 3) |
42 | #define OPTIMUS_STATUS_PWR_STABLE (3 << 3) | 42 | #define OPTIMUS_STATUS_PWR_STABLE (3 << 3) |
43 | #define OPTIMUS_DISPLAY_HOTPLUG (1 << 6) | 43 | #define OPTIMUS_DISPLAY_HOTPLUG (1 << 6) |
44 | #define OPTIMUS_CAPS_MASK (7 << 24) | 44 | #define OPTIMUS_CAPS_MASK (7 << 24) |
45 | #define OPTIMUS_DYNAMIC_PWR_CAP (1 << 24) | 45 | #define OPTIMUS_DYNAMIC_PWR_CAP (1 << 24) |
46 | 46 | ||
47 | #define OPTIMUS_AUDIO_CAPS_MASK (3 << 27) | 47 | #define OPTIMUS_AUDIO_CAPS_MASK (3 << 27) |
48 | #define OPTIMUS_HDA_CODEC_MASK (2 << 27) /* hda bios control */ | 48 | #define OPTIMUS_HDA_CODEC_MASK (2 << 27) /* hda bios control */ |
49 | 49 | ||
50 | static struct nouveau_dsm_priv { | 50 | static struct nouveau_dsm_priv { |
51 | bool dsm_detected; | 51 | bool dsm_detected; |
52 | bool optimus_detected; | 52 | bool optimus_detected; |
53 | acpi_handle dhandle; | 53 | acpi_handle dhandle; |
54 | acpi_handle rom_handle; | 54 | acpi_handle rom_handle; |
55 | } nouveau_dsm_priv; | 55 | } nouveau_dsm_priv; |
56 | 56 | ||
57 | bool nouveau_is_optimus(void) { | 57 | bool nouveau_is_optimus(void) { |
58 | return nouveau_dsm_priv.optimus_detected; | 58 | return nouveau_dsm_priv.optimus_detected; |
59 | } | 59 | } |
60 | 60 | ||
61 | bool nouveau_is_v1_dsm(void) { | 61 | bool nouveau_is_v1_dsm(void) { |
62 | return nouveau_dsm_priv.dsm_detected; | 62 | return nouveau_dsm_priv.dsm_detected; |
63 | } | 63 | } |
64 | 64 | ||
65 | #define NOUVEAU_DSM_HAS_MUX 0x1 | 65 | #define NOUVEAU_DSM_HAS_MUX 0x1 |
66 | #define NOUVEAU_DSM_HAS_OPT 0x2 | 66 | #define NOUVEAU_DSM_HAS_OPT 0x2 |
67 | 67 | ||
68 | static const char nouveau_dsm_muid[] = { | 68 | static const char nouveau_dsm_muid[] = { |
69 | 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, | 69 | 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, |
70 | 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, | 70 | 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static const char nouveau_op_dsm_muid[] = { | 73 | static const char nouveau_op_dsm_muid[] = { |
74 | 0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47, | 74 | 0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47, |
75 | 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0, | 75 | 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0, |
76 | }; | 76 | }; |
77 | 77 | ||
78 | static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result) | 78 | static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result) |
79 | { | 79 | { |
80 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | 80 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; |
81 | struct acpi_object_list input; | 81 | struct acpi_object_list input; |
82 | union acpi_object params[4]; | 82 | union acpi_object params[4]; |
83 | union acpi_object *obj; | 83 | union acpi_object *obj; |
84 | int i, err; | 84 | int i, err; |
85 | char args_buff[4]; | 85 | char args_buff[4]; |
86 | 86 | ||
87 | input.count = 4; | 87 | input.count = 4; |
88 | input.pointer = params; | 88 | input.pointer = params; |
89 | params[0].type = ACPI_TYPE_BUFFER; | 89 | params[0].type = ACPI_TYPE_BUFFER; |
90 | params[0].buffer.length = sizeof(nouveau_op_dsm_muid); | 90 | params[0].buffer.length = sizeof(nouveau_op_dsm_muid); |
91 | params[0].buffer.pointer = (char *)nouveau_op_dsm_muid; | 91 | params[0].buffer.pointer = (char *)nouveau_op_dsm_muid; |
92 | params[1].type = ACPI_TYPE_INTEGER; | 92 | params[1].type = ACPI_TYPE_INTEGER; |
93 | params[1].integer.value = 0x00000100; | 93 | params[1].integer.value = 0x00000100; |
94 | params[2].type = ACPI_TYPE_INTEGER; | 94 | params[2].type = ACPI_TYPE_INTEGER; |
95 | params[2].integer.value = func; | 95 | params[2].integer.value = func; |
96 | params[3].type = ACPI_TYPE_BUFFER; | 96 | params[3].type = ACPI_TYPE_BUFFER; |
97 | params[3].buffer.length = 4; | 97 | params[3].buffer.length = 4; |
98 | /* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */ | 98 | /* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */ |
99 | for (i = 0; i < 4; i++) | 99 | for (i = 0; i < 4; i++) |
100 | args_buff[i] = (arg >> i * 8) & 0xFF; | 100 | args_buff[i] = (arg >> i * 8) & 0xFF; |
101 | params[3].buffer.pointer = args_buff; | 101 | params[3].buffer.pointer = args_buff; |
102 | 102 | ||
103 | err = acpi_evaluate_object(handle, "_DSM", &input, &output); | 103 | err = acpi_evaluate_object(handle, "_DSM", &input, &output); |
104 | if (err) { | 104 | if (err) { |
105 | printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); | 105 | printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); |
106 | return err; | 106 | return err; |
107 | } | 107 | } |
108 | 108 | ||
109 | obj = (union acpi_object *)output.pointer; | 109 | obj = (union acpi_object *)output.pointer; |
110 | 110 | ||
111 | if (obj->type == ACPI_TYPE_INTEGER) | 111 | if (obj->type == ACPI_TYPE_INTEGER) |
112 | if (obj->integer.value == 0x80000002) { | 112 | if (obj->integer.value == 0x80000002) { |
113 | return -ENODEV; | 113 | return -ENODEV; |
114 | } | 114 | } |
115 | 115 | ||
116 | if (obj->type == ACPI_TYPE_BUFFER) { | 116 | if (obj->type == ACPI_TYPE_BUFFER) { |
117 | if (obj->buffer.length == 4 && result) { | 117 | if (obj->buffer.length == 4 && result) { |
118 | *result = 0; | 118 | *result = 0; |
119 | *result |= obj->buffer.pointer[0]; | 119 | *result |= obj->buffer.pointer[0]; |
120 | *result |= (obj->buffer.pointer[1] << 8); | 120 | *result |= (obj->buffer.pointer[1] << 8); |
121 | *result |= (obj->buffer.pointer[2] << 16); | 121 | *result |= (obj->buffer.pointer[2] << 16); |
122 | *result |= (obj->buffer.pointer[3] << 24); | 122 | *result |= (obj->buffer.pointer[3] << 24); |
123 | } | 123 | } |
124 | } | 124 | } |
125 | 125 | ||
126 | kfree(output.pointer); | 126 | kfree(output.pointer); |
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result) | 130 | static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result) |
131 | { | 131 | { |
132 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | 132 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; |
133 | struct acpi_object_list input; | 133 | struct acpi_object_list input; |
134 | union acpi_object params[4]; | 134 | union acpi_object params[4]; |
135 | union acpi_object *obj; | 135 | union acpi_object *obj; |
136 | int err; | 136 | int err; |
137 | 137 | ||
138 | input.count = 4; | 138 | input.count = 4; |
139 | input.pointer = params; | 139 | input.pointer = params; |
140 | params[0].type = ACPI_TYPE_BUFFER; | 140 | params[0].type = ACPI_TYPE_BUFFER; |
141 | params[0].buffer.length = sizeof(nouveau_dsm_muid); | 141 | params[0].buffer.length = sizeof(nouveau_dsm_muid); |
142 | params[0].buffer.pointer = (char *)nouveau_dsm_muid; | 142 | params[0].buffer.pointer = (char *)nouveau_dsm_muid; |
143 | params[1].type = ACPI_TYPE_INTEGER; | 143 | params[1].type = ACPI_TYPE_INTEGER; |
144 | params[1].integer.value = 0x00000102; | 144 | params[1].integer.value = 0x00000102; |
145 | params[2].type = ACPI_TYPE_INTEGER; | 145 | params[2].type = ACPI_TYPE_INTEGER; |
146 | params[2].integer.value = func; | 146 | params[2].integer.value = func; |
147 | params[3].type = ACPI_TYPE_INTEGER; | 147 | params[3].type = ACPI_TYPE_INTEGER; |
148 | params[3].integer.value = arg; | 148 | params[3].integer.value = arg; |
149 | 149 | ||
150 | err = acpi_evaluate_object(handle, "_DSM", &input, &output); | 150 | err = acpi_evaluate_object(handle, "_DSM", &input, &output); |
151 | if (err) { | 151 | if (err) { |
152 | printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); | 152 | printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); |
153 | return err; | 153 | return err; |
154 | } | 154 | } |
155 | 155 | ||
156 | obj = (union acpi_object *)output.pointer; | 156 | obj = (union acpi_object *)output.pointer; |
157 | 157 | ||
158 | if (obj->type == ACPI_TYPE_INTEGER) | 158 | if (obj->type == ACPI_TYPE_INTEGER) |
159 | if (obj->integer.value == 0x80000002) | 159 | if (obj->integer.value == 0x80000002) |
160 | return -ENODEV; | 160 | return -ENODEV; |
161 | 161 | ||
162 | if (obj->type == ACPI_TYPE_BUFFER) { | 162 | if (obj->type == ACPI_TYPE_BUFFER) { |
163 | if (obj->buffer.length == 4 && result) { | 163 | if (obj->buffer.length == 4 && result) { |
164 | *result = 0; | 164 | *result = 0; |
165 | *result |= obj->buffer.pointer[0]; | 165 | *result |= obj->buffer.pointer[0]; |
166 | *result |= (obj->buffer.pointer[1] << 8); | 166 | *result |= (obj->buffer.pointer[1] << 8); |
167 | *result |= (obj->buffer.pointer[2] << 16); | 167 | *result |= (obj->buffer.pointer[2] << 16); |
168 | *result |= (obj->buffer.pointer[3] << 24); | 168 | *result |= (obj->buffer.pointer[3] << 24); |
169 | } | 169 | } |
170 | } | 170 | } |
171 | 171 | ||
172 | kfree(output.pointer); | 172 | kfree(output.pointer); |
173 | return 0; | 173 | return 0; |
174 | } | 174 | } |
175 | 175 | ||
176 | /* Returns 1 if a DSM function is usable and 0 otherwise */ | 176 | /* Returns 1 if a DSM function is usable and 0 otherwise */ |
177 | static int nouveau_test_dsm(acpi_handle test_handle, | 177 | static int nouveau_test_dsm(acpi_handle test_handle, |
178 | int (*dsm_func)(acpi_handle, int, int, uint32_t *), | 178 | int (*dsm_func)(acpi_handle, int, int, uint32_t *), |
179 | int sfnc) | 179 | int sfnc) |
180 | { | 180 | { |
181 | u32 result = 0; | 181 | u32 result = 0; |
182 | 182 | ||
183 | /* Function 0 returns a Buffer containing available functions. The args | 183 | /* Function 0 returns a Buffer containing available functions. The args |
184 | * parameter is ignored for function 0, so just put 0 in it */ | 184 | * parameter is ignored for function 0, so just put 0 in it */ |
185 | if (dsm_func(test_handle, 0, 0, &result)) | 185 | if (dsm_func(test_handle, 0, 0, &result)) |
186 | return 0; | 186 | return 0; |
187 | 187 | ||
188 | /* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If | 188 | /* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If |
189 | * the n-th bit is enabled, function n is supported */ | 189 | * the n-th bit is enabled, function n is supported */ |
190 | return result & 1 && result & (1 << sfnc); | 190 | return result & 1 && result & (1 << sfnc); |
191 | } | 191 | } |
192 | 192 | ||
193 | static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id) | 193 | static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id) |
194 | { | 194 | { |
195 | mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); | 195 | mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); |
196 | mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); | 196 | mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); |
197 | return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL); | 197 | return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL); |
198 | } | 198 | } |
199 | 199 | ||
200 | static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state) | 200 | static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state) |
201 | { | 201 | { |
202 | int arg; | 202 | int arg; |
203 | if (state == VGA_SWITCHEROO_ON) | 203 | if (state == VGA_SWITCHEROO_ON) |
204 | arg = NOUVEAU_DSM_POWER_SPEED; | 204 | arg = NOUVEAU_DSM_POWER_SPEED; |
205 | else | 205 | else |
206 | arg = NOUVEAU_DSM_POWER_STAMINA; | 206 | arg = NOUVEAU_DSM_POWER_STAMINA; |
207 | nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL); | 207 | nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL); |
208 | return 0; | 208 | return 0; |
209 | } | 209 | } |
210 | 210 | ||
211 | static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) | 211 | static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) |
212 | { | 212 | { |
213 | if (!nouveau_dsm_priv.dsm_detected) | 213 | if (!nouveau_dsm_priv.dsm_detected) |
214 | return 0; | 214 | return 0; |
215 | if (id == VGA_SWITCHEROO_IGD) | 215 | if (id == VGA_SWITCHEROO_IGD) |
216 | return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); | 216 | return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); |
217 | else | 217 | else |
218 | return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED); | 218 | return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED); |
219 | } | 219 | } |
220 | 220 | ||
221 | static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, | 221 | static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, |
222 | enum vga_switcheroo_state state) | 222 | enum vga_switcheroo_state state) |
223 | { | 223 | { |
224 | if (id == VGA_SWITCHEROO_IGD) | 224 | if (id == VGA_SWITCHEROO_IGD) |
225 | return 0; | 225 | return 0; |
226 | 226 | ||
227 | /* Optimus laptops have the card already disabled in | 227 | /* Optimus laptops have the card already disabled in |
228 | * nouveau_switcheroo_set_state */ | 228 | * nouveau_switcheroo_set_state */ |
229 | if (!nouveau_dsm_priv.dsm_detected) | 229 | if (!nouveau_dsm_priv.dsm_detected) |
230 | return 0; | 230 | return 0; |
231 | 231 | ||
232 | return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); | 232 | return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); |
233 | } | 233 | } |
234 | 234 | ||
235 | static int nouveau_dsm_get_client_id(struct pci_dev *pdev) | 235 | static int nouveau_dsm_get_client_id(struct pci_dev *pdev) |
236 | { | 236 | { |
237 | /* easy option one - intel vendor ID means Integrated */ | 237 | /* easy option one - intel vendor ID means Integrated */ |
238 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) | 238 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) |
239 | return VGA_SWITCHEROO_IGD; | 239 | return VGA_SWITCHEROO_IGD; |
240 | 240 | ||
241 | /* is this device on Bus 0? - this may need improving */ | 241 | /* is this device on Bus 0? - this may need improving */ |
242 | if (pdev->bus->number == 0) | 242 | if (pdev->bus->number == 0) |
243 | return VGA_SWITCHEROO_IGD; | 243 | return VGA_SWITCHEROO_IGD; |
244 | 244 | ||
245 | return VGA_SWITCHEROO_DIS; | 245 | return VGA_SWITCHEROO_DIS; |
246 | } | 246 | } |
247 | 247 | ||
248 | static struct vga_switcheroo_handler nouveau_dsm_handler = { | 248 | static struct vga_switcheroo_handler nouveau_dsm_handler = { |
249 | .switchto = nouveau_dsm_switchto, | 249 | .switchto = nouveau_dsm_switchto, |
250 | .power_state = nouveau_dsm_power_state, | 250 | .power_state = nouveau_dsm_power_state, |
251 | .get_client_id = nouveau_dsm_get_client_id, | 251 | .get_client_id = nouveau_dsm_get_client_id, |
252 | }; | 252 | }; |
253 | 253 | ||
254 | static int nouveau_dsm_pci_probe(struct pci_dev *pdev) | 254 | static int nouveau_dsm_pci_probe(struct pci_dev *pdev) |
255 | { | 255 | { |
256 | acpi_handle dhandle; | 256 | acpi_handle dhandle; |
257 | int retval = 0; | 257 | int retval = 0; |
258 | 258 | ||
259 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 259 | dhandle = ACPI_HANDLE(&pdev->dev); |
260 | if (!dhandle) | 260 | if (!dhandle) |
261 | return false; | 261 | return false; |
262 | 262 | ||
263 | if (!acpi_has_method(dhandle, "_DSM")) | 263 | if (!acpi_has_method(dhandle, "_DSM")) |
264 | return false; | 264 | return false; |
265 | 265 | ||
266 | if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) | 266 | if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) |
267 | retval |= NOUVEAU_DSM_HAS_MUX; | 267 | retval |= NOUVEAU_DSM_HAS_MUX; |
268 | 268 | ||
269 | if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm, | 269 | if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm, |
270 | NOUVEAU_DSM_OPTIMUS_CAPS)) | 270 | NOUVEAU_DSM_OPTIMUS_CAPS)) |
271 | retval |= NOUVEAU_DSM_HAS_OPT; | 271 | retval |= NOUVEAU_DSM_HAS_OPT; |
272 | 272 | ||
273 | if (retval & NOUVEAU_DSM_HAS_OPT) { | 273 | if (retval & NOUVEAU_DSM_HAS_OPT) { |
274 | uint32_t result; | 274 | uint32_t result; |
275 | nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0, | 275 | nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0, |
276 | &result); | 276 | &result); |
277 | dev_info(&pdev->dev, "optimus capabilities: %s, status %s%s\n", | 277 | dev_info(&pdev->dev, "optimus capabilities: %s, status %s%s\n", |
278 | (result & OPTIMUS_ENABLED) ? "enabled" : "disabled", | 278 | (result & OPTIMUS_ENABLED) ? "enabled" : "disabled", |
279 | (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "", | 279 | (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "", |
280 | (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : ""); | 280 | (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : ""); |
281 | } | 281 | } |
282 | if (retval) | 282 | if (retval) |
283 | nouveau_dsm_priv.dhandle = dhandle; | 283 | nouveau_dsm_priv.dhandle = dhandle; |
284 | 284 | ||
285 | return retval; | 285 | return retval; |
286 | } | 286 | } |
287 | 287 | ||
288 | static bool nouveau_dsm_detect(void) | 288 | static bool nouveau_dsm_detect(void) |
289 | { | 289 | { |
290 | char acpi_method_name[255] = { 0 }; | 290 | char acpi_method_name[255] = { 0 }; |
291 | struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; | 291 | struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; |
292 | struct pci_dev *pdev = NULL; | 292 | struct pci_dev *pdev = NULL; |
293 | int has_dsm = 0; | 293 | int has_dsm = 0; |
294 | int has_optimus = 0; | 294 | int has_optimus = 0; |
295 | int vga_count = 0; | 295 | int vga_count = 0; |
296 | bool guid_valid; | 296 | bool guid_valid; |
297 | int retval; | 297 | int retval; |
298 | bool ret = false; | 298 | bool ret = false; |
299 | 299 | ||
300 | /* lookup the MXM GUID */ | 300 | /* lookup the MXM GUID */ |
301 | guid_valid = mxm_wmi_supported(); | 301 | guid_valid = mxm_wmi_supported(); |
302 | 302 | ||
303 | if (guid_valid) | 303 | if (guid_valid) |
304 | printk("MXM: GUID detected in BIOS\n"); | 304 | printk("MXM: GUID detected in BIOS\n"); |
305 | 305 | ||
306 | /* now do DSM detection */ | 306 | /* now do DSM detection */ |
307 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { | 307 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { |
308 | vga_count++; | 308 | vga_count++; |
309 | 309 | ||
310 | retval = nouveau_dsm_pci_probe(pdev); | 310 | retval = nouveau_dsm_pci_probe(pdev); |
311 | if (retval & NOUVEAU_DSM_HAS_MUX) | 311 | if (retval & NOUVEAU_DSM_HAS_MUX) |
312 | has_dsm |= 1; | 312 | has_dsm |= 1; |
313 | if (retval & NOUVEAU_DSM_HAS_OPT) | 313 | if (retval & NOUVEAU_DSM_HAS_OPT) |
314 | has_optimus = 1; | 314 | has_optimus = 1; |
315 | } | 315 | } |
316 | 316 | ||
317 | /* find the optimus DSM or the old v1 DSM */ | 317 | /* find the optimus DSM or the old v1 DSM */ |
318 | if (has_optimus == 1) { | 318 | if (has_optimus == 1) { |
319 | acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, | 319 | acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, |
320 | &buffer); | 320 | &buffer); |
321 | printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", | 321 | printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", |
322 | acpi_method_name); | 322 | acpi_method_name); |
323 | nouveau_dsm_priv.optimus_detected = true; | 323 | nouveau_dsm_priv.optimus_detected = true; |
324 | ret = true; | 324 | ret = true; |
325 | } else if (vga_count == 2 && has_dsm && guid_valid) { | 325 | } else if (vga_count == 2 && has_dsm && guid_valid) { |
326 | acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, | 326 | acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, |
327 | &buffer); | 327 | &buffer); |
328 | printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", | 328 | printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", |
329 | acpi_method_name); | 329 | acpi_method_name); |
330 | nouveau_dsm_priv.dsm_detected = true; | 330 | nouveau_dsm_priv.dsm_detected = true; |
331 | ret = true; | 331 | ret = true; |
332 | } | 332 | } |
333 | 333 | ||
334 | 334 | ||
335 | return ret; | 335 | return ret; |
336 | } | 336 | } |
337 | 337 | ||
338 | void nouveau_register_dsm_handler(void) | 338 | void nouveau_register_dsm_handler(void) |
339 | { | 339 | { |
340 | bool r; | 340 | bool r; |
341 | 341 | ||
342 | r = nouveau_dsm_detect(); | 342 | r = nouveau_dsm_detect(); |
343 | if (!r) | 343 | if (!r) |
344 | return; | 344 | return; |
345 | 345 | ||
346 | vga_switcheroo_register_handler(&nouveau_dsm_handler); | 346 | vga_switcheroo_register_handler(&nouveau_dsm_handler); |
347 | } | 347 | } |
348 | 348 | ||
349 | /* Must be called for Optimus models before the card can be turned off */ | 349 | /* Must be called for Optimus models before the card can be turned off */ |
350 | void nouveau_switcheroo_optimus_dsm(void) | 350 | void nouveau_switcheroo_optimus_dsm(void) |
351 | { | 351 | { |
352 | u32 result = 0; | 352 | u32 result = 0; |
353 | if (!nouveau_dsm_priv.optimus_detected) | 353 | if (!nouveau_dsm_priv.optimus_detected) |
354 | return; | 354 | return; |
355 | 355 | ||
356 | nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS, | 356 | nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS, |
357 | 0x3, &result); | 357 | 0x3, &result); |
358 | 358 | ||
359 | nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, | 359 | nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, |
360 | NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result); | 360 | NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result); |
361 | 361 | ||
362 | } | 362 | } |
363 | 363 | ||
364 | void nouveau_unregister_dsm_handler(void) | 364 | void nouveau_unregister_dsm_handler(void) |
365 | { | 365 | { |
366 | if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected) | 366 | if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected) |
367 | vga_switcheroo_unregister_handler(); | 367 | vga_switcheroo_unregister_handler(); |
368 | } | 368 | } |
369 | 369 | ||
370 | /* retrieve the ROM in 4k blocks */ | 370 | /* retrieve the ROM in 4k blocks */ |
371 | static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, | 371 | static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, |
372 | int offset, int len) | 372 | int offset, int len) |
373 | { | 373 | { |
374 | acpi_status status; | 374 | acpi_status status; |
375 | union acpi_object rom_arg_elements[2], *obj; | 375 | union acpi_object rom_arg_elements[2], *obj; |
376 | struct acpi_object_list rom_arg; | 376 | struct acpi_object_list rom_arg; |
377 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; | 377 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; |
378 | 378 | ||
379 | rom_arg.count = 2; | 379 | rom_arg.count = 2; |
380 | rom_arg.pointer = &rom_arg_elements[0]; | 380 | rom_arg.pointer = &rom_arg_elements[0]; |
381 | 381 | ||
382 | rom_arg_elements[0].type = ACPI_TYPE_INTEGER; | 382 | rom_arg_elements[0].type = ACPI_TYPE_INTEGER; |
383 | rom_arg_elements[0].integer.value = offset; | 383 | rom_arg_elements[0].integer.value = offset; |
384 | 384 | ||
385 | rom_arg_elements[1].type = ACPI_TYPE_INTEGER; | 385 | rom_arg_elements[1].type = ACPI_TYPE_INTEGER; |
386 | rom_arg_elements[1].integer.value = len; | 386 | rom_arg_elements[1].integer.value = len; |
387 | 387 | ||
388 | status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer); | 388 | status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer); |
389 | if (ACPI_FAILURE(status)) { | 389 | if (ACPI_FAILURE(status)) { |
390 | printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status)); | 390 | printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status)); |
391 | return -ENODEV; | 391 | return -ENODEV; |
392 | } | 392 | } |
393 | obj = (union acpi_object *)buffer.pointer; | 393 | obj = (union acpi_object *)buffer.pointer; |
394 | memcpy(bios+offset, obj->buffer.pointer, len); | 394 | memcpy(bios+offset, obj->buffer.pointer, len); |
395 | kfree(buffer.pointer); | 395 | kfree(buffer.pointer); |
396 | return len; | 396 | return len; |
397 | } | 397 | } |
398 | 398 | ||
399 | bool nouveau_acpi_rom_supported(struct pci_dev *pdev) | 399 | bool nouveau_acpi_rom_supported(struct pci_dev *pdev) |
400 | { | 400 | { |
401 | acpi_status status; | 401 | acpi_status status; |
402 | acpi_handle dhandle, rom_handle; | 402 | acpi_handle dhandle, rom_handle; |
403 | 403 | ||
404 | if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) | 404 | if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) |
405 | return false; | 405 | return false; |
406 | 406 | ||
407 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 407 | dhandle = ACPI_HANDLE(&pdev->dev); |
408 | if (!dhandle) | 408 | if (!dhandle) |
409 | return false; | 409 | return false; |
410 | 410 | ||
411 | status = acpi_get_handle(dhandle, "_ROM", &rom_handle); | 411 | status = acpi_get_handle(dhandle, "_ROM", &rom_handle); |
412 | if (ACPI_FAILURE(status)) | 412 | if (ACPI_FAILURE(status)) |
413 | return false; | 413 | return false; |
414 | 414 | ||
415 | nouveau_dsm_priv.rom_handle = rom_handle; | 415 | nouveau_dsm_priv.rom_handle = rom_handle; |
416 | return true; | 416 | return true; |
417 | } | 417 | } |
418 | 418 | ||
419 | int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) | 419 | int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) |
420 | { | 420 | { |
421 | return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); | 421 | return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); |
422 | } | 422 | } |
423 | 423 | ||
424 | void * | 424 | void * |
425 | nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) | 425 | nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) |
426 | { | 426 | { |
427 | struct acpi_device *acpidev; | 427 | struct acpi_device *acpidev; |
428 | acpi_handle handle; | 428 | acpi_handle handle; |
429 | int type, ret; | 429 | int type, ret; |
430 | void *edid; | 430 | void *edid; |
431 | 431 | ||
432 | switch (connector->connector_type) { | 432 | switch (connector->connector_type) { |
433 | case DRM_MODE_CONNECTOR_LVDS: | 433 | case DRM_MODE_CONNECTOR_LVDS: |
434 | case DRM_MODE_CONNECTOR_eDP: | 434 | case DRM_MODE_CONNECTOR_eDP: |
435 | type = ACPI_VIDEO_DISPLAY_LCD; | 435 | type = ACPI_VIDEO_DISPLAY_LCD; |
436 | break; | 436 | break; |
437 | default: | 437 | default: |
438 | return NULL; | 438 | return NULL; |
439 | } | 439 | } |
440 | 440 | ||
441 | handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); | 441 | handle = ACPI_HANDLE(&dev->pdev->dev); |
442 | if (!handle) | 442 | if (!handle) |
443 | return NULL; | 443 | return NULL; |
444 | 444 | ||
445 | ret = acpi_bus_get_device(handle, &acpidev); | 445 | ret = acpi_bus_get_device(handle, &acpidev); |
446 | if (ret) | 446 | if (ret) |
447 | return NULL; | 447 | return NULL; |
448 | 448 | ||
449 | ret = acpi_video_get_edid(acpidev, type, -1, &edid); | 449 | ret = acpi_video_get_edid(acpidev, type, -1, &edid); |
450 | if (ret < 0) | 450 | if (ret < 0) |
451 | return NULL; | 451 | return NULL; |
452 | 452 | ||
453 | return kmemdup(edid, EDID_LENGTH, GFP_KERNEL); | 453 | return kmemdup(edid, EDID_LENGTH, GFP_KERNEL); |
454 | } | 454 | } |
455 | 455 |
drivers/gpu/drm/radeon/radeon_acpi.c
1 | /* | 1 | /* |
2 | * Copyright 2012 Advanced Micro Devices, Inc. | 2 | * Copyright 2012 Advanced Micro Devices, Inc. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation | 6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the | 8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: | 9 | * Software is furnished to do so, subject to the following conditions: |
10 | * | 10 | * |
11 | * The above copyright notice and this permission notice shall be included in | 11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. | 12 | * all copies or substantial portions of the Software. |
13 | * | 13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. | 20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/pci.h> | 24 | #include <linux/pci.h> |
25 | #include <linux/acpi.h> | 25 | #include <linux/acpi.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/power_supply.h> | 27 | #include <linux/power_supply.h> |
28 | #include <acpi/acpi_drivers.h> | 28 | #include <acpi/acpi_drivers.h> |
29 | #include <acpi/acpi_bus.h> | 29 | #include <acpi/acpi_bus.h> |
30 | #include <acpi/video.h> | 30 | #include <acpi/video.h> |
31 | 31 | ||
32 | #include <drm/drmP.h> | 32 | #include <drm/drmP.h> |
33 | #include <drm/drm_crtc_helper.h> | 33 | #include <drm/drm_crtc_helper.h> |
34 | #include "radeon.h" | 34 | #include "radeon.h" |
35 | #include "radeon_acpi.h" | 35 | #include "radeon_acpi.h" |
36 | #include "atom.h" | 36 | #include "atom.h" |
37 | 37 | ||
38 | #include <linux/vga_switcheroo.h> | 38 | #include <linux/vga_switcheroo.h> |
39 | 39 | ||
40 | #define ACPI_AC_CLASS "ac_adapter" | 40 | #define ACPI_AC_CLASS "ac_adapter" |
41 | 41 | ||
42 | extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev); | 42 | extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev); |
43 | 43 | ||
44 | struct atif_verify_interface { | 44 | struct atif_verify_interface { |
45 | u16 size; /* structure size in bytes (includes size field) */ | 45 | u16 size; /* structure size in bytes (includes size field) */ |
46 | u16 version; /* version */ | 46 | u16 version; /* version */ |
47 | u32 notification_mask; /* supported notifications mask */ | 47 | u32 notification_mask; /* supported notifications mask */ |
48 | u32 function_bits; /* supported functions bit vector */ | 48 | u32 function_bits; /* supported functions bit vector */ |
49 | } __packed; | 49 | } __packed; |
50 | 50 | ||
51 | struct atif_system_params { | 51 | struct atif_system_params { |
52 | u16 size; /* structure size in bytes (includes size field) */ | 52 | u16 size; /* structure size in bytes (includes size field) */ |
53 | u32 valid_mask; /* valid flags mask */ | 53 | u32 valid_mask; /* valid flags mask */ |
54 | u32 flags; /* flags */ | 54 | u32 flags; /* flags */ |
55 | u8 command_code; /* notify command code */ | 55 | u8 command_code; /* notify command code */ |
56 | } __packed; | 56 | } __packed; |
57 | 57 | ||
58 | struct atif_sbios_requests { | 58 | struct atif_sbios_requests { |
59 | u16 size; /* structure size in bytes (includes size field) */ | 59 | u16 size; /* structure size in bytes (includes size field) */ |
60 | u32 pending; /* pending sbios requests */ | 60 | u32 pending; /* pending sbios requests */ |
61 | u8 panel_exp_mode; /* panel expansion mode */ | 61 | u8 panel_exp_mode; /* panel expansion mode */ |
62 | u8 thermal_gfx; /* thermal state: target gfx controller */ | 62 | u8 thermal_gfx; /* thermal state: target gfx controller */ |
63 | u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */ | 63 | u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */ |
64 | u8 forced_power_gfx; /* forced power state: target gfx controller */ | 64 | u8 forced_power_gfx; /* forced power state: target gfx controller */ |
65 | u8 forced_power_state; /* forced power state: state id */ | 65 | u8 forced_power_state; /* forced power state: state id */ |
66 | u8 system_power_src; /* system power source */ | 66 | u8 system_power_src; /* system power source */ |
67 | u8 backlight_level; /* panel backlight level (0-255) */ | 67 | u8 backlight_level; /* panel backlight level (0-255) */ |
68 | } __packed; | 68 | } __packed; |
69 | 69 | ||
70 | #define ATIF_NOTIFY_MASK 0x3 | 70 | #define ATIF_NOTIFY_MASK 0x3 |
71 | #define ATIF_NOTIFY_NONE 0 | 71 | #define ATIF_NOTIFY_NONE 0 |
72 | #define ATIF_NOTIFY_81 1 | 72 | #define ATIF_NOTIFY_81 1 |
73 | #define ATIF_NOTIFY_N 2 | 73 | #define ATIF_NOTIFY_N 2 |
74 | 74 | ||
75 | struct atcs_verify_interface { | 75 | struct atcs_verify_interface { |
76 | u16 size; /* structure size in bytes (includes size field) */ | 76 | u16 size; /* structure size in bytes (includes size field) */ |
77 | u16 version; /* version */ | 77 | u16 version; /* version */ |
78 | u32 function_bits; /* supported functions bit vector */ | 78 | u32 function_bits; /* supported functions bit vector */ |
79 | } __packed; | 79 | } __packed; |
80 | 80 | ||
81 | #define ATCS_VALID_FLAGS_MASK 0x3 | 81 | #define ATCS_VALID_FLAGS_MASK 0x3 |
82 | 82 | ||
83 | struct atcs_pref_req_input { | 83 | struct atcs_pref_req_input { |
84 | u16 size; /* structure size in bytes (includes size field) */ | 84 | u16 size; /* structure size in bytes (includes size field) */ |
85 | u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */ | 85 | u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */ |
86 | u16 valid_flags_mask; /* valid flags mask */ | 86 | u16 valid_flags_mask; /* valid flags mask */ |
87 | u16 flags; /* flags */ | 87 | u16 flags; /* flags */ |
88 | u8 req_type; /* request type */ | 88 | u8 req_type; /* request type */ |
89 | u8 perf_req; /* performance request */ | 89 | u8 perf_req; /* performance request */ |
90 | } __packed; | 90 | } __packed; |
91 | 91 | ||
92 | struct atcs_pref_req_output { | 92 | struct atcs_pref_req_output { |
93 | u16 size; /* structure size in bytes (includes size field) */ | 93 | u16 size; /* structure size in bytes (includes size field) */ |
94 | u8 ret_val; /* return value */ | 94 | u8 ret_val; /* return value */ |
95 | } __packed; | 95 | } __packed; |
96 | 96 | ||
97 | /* Call the ATIF method | 97 | /* Call the ATIF method |
98 | */ | 98 | */ |
99 | /** | 99 | /** |
100 | * radeon_atif_call - call an ATIF method | 100 | * radeon_atif_call - call an ATIF method |
101 | * | 101 | * |
102 | * @handle: acpi handle | 102 | * @handle: acpi handle |
103 | * @function: the ATIF function to execute | 103 | * @function: the ATIF function to execute |
104 | * @params: ATIF function params | 104 | * @params: ATIF function params |
105 | * | 105 | * |
106 | * Executes the requested ATIF function (all asics). | 106 | * Executes the requested ATIF function (all asics). |
107 | * Returns a pointer to the acpi output buffer. | 107 | * Returns a pointer to the acpi output buffer. |
108 | */ | 108 | */ |
109 | static union acpi_object *radeon_atif_call(acpi_handle handle, int function, | 109 | static union acpi_object *radeon_atif_call(acpi_handle handle, int function, |
110 | struct acpi_buffer *params) | 110 | struct acpi_buffer *params) |
111 | { | 111 | { |
112 | acpi_status status; | 112 | acpi_status status; |
113 | union acpi_object atif_arg_elements[2]; | 113 | union acpi_object atif_arg_elements[2]; |
114 | struct acpi_object_list atif_arg; | 114 | struct acpi_object_list atif_arg; |
115 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 115 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
116 | 116 | ||
117 | atif_arg.count = 2; | 117 | atif_arg.count = 2; |
118 | atif_arg.pointer = &atif_arg_elements[0]; | 118 | atif_arg.pointer = &atif_arg_elements[0]; |
119 | 119 | ||
120 | atif_arg_elements[0].type = ACPI_TYPE_INTEGER; | 120 | atif_arg_elements[0].type = ACPI_TYPE_INTEGER; |
121 | atif_arg_elements[0].integer.value = function; | 121 | atif_arg_elements[0].integer.value = function; |
122 | 122 | ||
123 | if (params) { | 123 | if (params) { |
124 | atif_arg_elements[1].type = ACPI_TYPE_BUFFER; | 124 | atif_arg_elements[1].type = ACPI_TYPE_BUFFER; |
125 | atif_arg_elements[1].buffer.length = params->length; | 125 | atif_arg_elements[1].buffer.length = params->length; |
126 | atif_arg_elements[1].buffer.pointer = params->pointer; | 126 | atif_arg_elements[1].buffer.pointer = params->pointer; |
127 | } else { | 127 | } else { |
128 | /* We need a second fake parameter */ | 128 | /* We need a second fake parameter */ |
129 | atif_arg_elements[1].type = ACPI_TYPE_INTEGER; | 129 | atif_arg_elements[1].type = ACPI_TYPE_INTEGER; |
130 | atif_arg_elements[1].integer.value = 0; | 130 | atif_arg_elements[1].integer.value = 0; |
131 | } | 131 | } |
132 | 132 | ||
133 | status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); | 133 | status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); |
134 | 134 | ||
135 | /* Fail only if calling the method fails and ATIF is supported */ | 135 | /* Fail only if calling the method fails and ATIF is supported */ |
136 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | 136 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { |
137 | DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", | 137 | DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", |
138 | acpi_format_exception(status)); | 138 | acpi_format_exception(status)); |
139 | kfree(buffer.pointer); | 139 | kfree(buffer.pointer); |
140 | return NULL; | 140 | return NULL; |
141 | } | 141 | } |
142 | 142 | ||
143 | return buffer.pointer; | 143 | return buffer.pointer; |
144 | } | 144 | } |
145 | 145 | ||
146 | /** | 146 | /** |
147 | * radeon_atif_parse_notification - parse supported notifications | 147 | * radeon_atif_parse_notification - parse supported notifications |
148 | * | 148 | * |
149 | * @n: supported notifications struct | 149 | * @n: supported notifications struct |
150 | * @mask: supported notifications mask from ATIF | 150 | * @mask: supported notifications mask from ATIF |
151 | * | 151 | * |
152 | * Use the supported notifications mask from ATIF function | 152 | * Use the supported notifications mask from ATIF function |
153 | * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications | 153 | * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications |
154 | * are supported (all asics). | 154 | * are supported (all asics). |
155 | */ | 155 | */ |
156 | static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask) | 156 | static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask) |
157 | { | 157 | { |
158 | n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED; | 158 | n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED; |
159 | n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED; | 159 | n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED; |
160 | n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED; | 160 | n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED; |
161 | n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED; | 161 | n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED; |
162 | n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED; | 162 | n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED; |
163 | n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED; | 163 | n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED; |
164 | n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED; | 164 | n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED; |
165 | n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED; | 165 | n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED; |
166 | n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED; | 166 | n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED; |
167 | } | 167 | } |
168 | 168 | ||
169 | /** | 169 | /** |
170 | * radeon_atif_parse_functions - parse supported functions | 170 | * radeon_atif_parse_functions - parse supported functions |
171 | * | 171 | * |
172 | * @f: supported functions struct | 172 | * @f: supported functions struct |
173 | * @mask: supported functions mask from ATIF | 173 | * @mask: supported functions mask from ATIF |
174 | * | 174 | * |
175 | * Use the supported functions mask from ATIF function | 175 | * Use the supported functions mask from ATIF function |
176 | * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions | 176 | * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions |
177 | * are supported (all asics). | 177 | * are supported (all asics). |
178 | */ | 178 | */ |
179 | static void radeon_atif_parse_functions(struct radeon_atif_functions *f, u32 mask) | 179 | static void radeon_atif_parse_functions(struct radeon_atif_functions *f, u32 mask) |
180 | { | 180 | { |
181 | f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED; | 181 | f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED; |
182 | f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED; | 182 | f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED; |
183 | f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED; | 183 | f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED; |
184 | f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED; | 184 | f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED; |
185 | f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED; | 185 | f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED; |
186 | f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED; | 186 | f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED; |
187 | f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED; | 187 | f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED; |
188 | f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED; | 188 | f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED; |
189 | f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED; | 189 | f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED; |
190 | f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED; | 190 | f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED; |
191 | } | 191 | } |
192 | 192 | ||
193 | /** | 193 | /** |
194 | * radeon_atif_verify_interface - verify ATIF | 194 | * radeon_atif_verify_interface - verify ATIF |
195 | * | 195 | * |
196 | * @handle: acpi handle | 196 | * @handle: acpi handle |
197 | * @atif: radeon atif struct | 197 | * @atif: radeon atif struct |
198 | * | 198 | * |
199 | * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function | 199 | * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function |
200 | * to initialize ATIF and determine what features are supported | 200 | * to initialize ATIF and determine what features are supported |
201 | * (all asics). | 201 | * (all asics). |
202 | * returns 0 on success, error on failure. | 202 | * returns 0 on success, error on failure. |
203 | */ | 203 | */ |
204 | static int radeon_atif_verify_interface(acpi_handle handle, | 204 | static int radeon_atif_verify_interface(acpi_handle handle, |
205 | struct radeon_atif *atif) | 205 | struct radeon_atif *atif) |
206 | { | 206 | { |
207 | union acpi_object *info; | 207 | union acpi_object *info; |
208 | struct atif_verify_interface output; | 208 | struct atif_verify_interface output; |
209 | size_t size; | 209 | size_t size; |
210 | int err = 0; | 210 | int err = 0; |
211 | 211 | ||
212 | info = radeon_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); | 212 | info = radeon_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); |
213 | if (!info) | 213 | if (!info) |
214 | return -EIO; | 214 | return -EIO; |
215 | 215 | ||
216 | memset(&output, 0, sizeof(output)); | 216 | memset(&output, 0, sizeof(output)); |
217 | 217 | ||
218 | size = *(u16 *) info->buffer.pointer; | 218 | size = *(u16 *) info->buffer.pointer; |
219 | if (size < 12) { | 219 | if (size < 12) { |
220 | DRM_INFO("ATIF buffer is too small: %zu\n", size); | 220 | DRM_INFO("ATIF buffer is too small: %zu\n", size); |
221 | err = -EINVAL; | 221 | err = -EINVAL; |
222 | goto out; | 222 | goto out; |
223 | } | 223 | } |
224 | size = min(sizeof(output), size); | 224 | size = min(sizeof(output), size); |
225 | 225 | ||
226 | memcpy(&output, info->buffer.pointer, size); | 226 | memcpy(&output, info->buffer.pointer, size); |
227 | 227 | ||
228 | /* TODO: check version? */ | 228 | /* TODO: check version? */ |
229 | DRM_DEBUG_DRIVER("ATIF version %u\n", output.version); | 229 | DRM_DEBUG_DRIVER("ATIF version %u\n", output.version); |
230 | 230 | ||
231 | radeon_atif_parse_notification(&atif->notifications, output.notification_mask); | 231 | radeon_atif_parse_notification(&atif->notifications, output.notification_mask); |
232 | radeon_atif_parse_functions(&atif->functions, output.function_bits); | 232 | radeon_atif_parse_functions(&atif->functions, output.function_bits); |
233 | 233 | ||
234 | out: | 234 | out: |
235 | kfree(info); | 235 | kfree(info); |
236 | return err; | 236 | return err; |
237 | } | 237 | } |
238 | 238 | ||
239 | /** | 239 | /** |
240 | * radeon_atif_get_notification_params - determine notify configuration | 240 | * radeon_atif_get_notification_params - determine notify configuration |
241 | * | 241 | * |
242 | * @handle: acpi handle | 242 | * @handle: acpi handle |
243 | * @n: atif notification configuration struct | 243 | * @n: atif notification configuration struct |
244 | * | 244 | * |
245 | * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function | 245 | * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function |
246 | * to determine if a notifier is used and if so which one | 246 | * to determine if a notifier is used and if so which one |
247 | * (all asics). This is either Notify(VGA, 0x81) or Notify(VGA, n) | 247 | * (all asics). This is either Notify(VGA, 0x81) or Notify(VGA, n) |
248 | * where n is specified in the result if a notifier is used. | 248 | * where n is specified in the result if a notifier is used. |
249 | * Returns 0 on success, error on failure. | 249 | * Returns 0 on success, error on failure. |
250 | */ | 250 | */ |
251 | static int radeon_atif_get_notification_params(acpi_handle handle, | 251 | static int radeon_atif_get_notification_params(acpi_handle handle, |
252 | struct radeon_atif_notification_cfg *n) | 252 | struct radeon_atif_notification_cfg *n) |
253 | { | 253 | { |
254 | union acpi_object *info; | 254 | union acpi_object *info; |
255 | struct atif_system_params params; | 255 | struct atif_system_params params; |
256 | size_t size; | 256 | size_t size; |
257 | int err = 0; | 257 | int err = 0; |
258 | 258 | ||
259 | info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL); | 259 | info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL); |
260 | if (!info) { | 260 | if (!info) { |
261 | err = -EIO; | 261 | err = -EIO; |
262 | goto out; | 262 | goto out; |
263 | } | 263 | } |
264 | 264 | ||
265 | size = *(u16 *) info->buffer.pointer; | 265 | size = *(u16 *) info->buffer.pointer; |
266 | if (size < 10) { | 266 | if (size < 10) { |
267 | err = -EINVAL; | 267 | err = -EINVAL; |
268 | goto out; | 268 | goto out; |
269 | } | 269 | } |
270 | 270 | ||
271 | memset(¶ms, 0, sizeof(params)); | 271 | memset(¶ms, 0, sizeof(params)); |
272 | size = min(sizeof(params), size); | 272 | size = min(sizeof(params), size); |
273 | memcpy(¶ms, info->buffer.pointer, size); | 273 | memcpy(¶ms, info->buffer.pointer, size); |
274 | 274 | ||
275 | DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n", | 275 | DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n", |
276 | params.flags, params.valid_mask); | 276 | params.flags, params.valid_mask); |
277 | params.flags = params.flags & params.valid_mask; | 277 | params.flags = params.flags & params.valid_mask; |
278 | 278 | ||
279 | if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) { | 279 | if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) { |
280 | n->enabled = false; | 280 | n->enabled = false; |
281 | n->command_code = 0; | 281 | n->command_code = 0; |
282 | } else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) { | 282 | } else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) { |
283 | n->enabled = true; | 283 | n->enabled = true; |
284 | n->command_code = 0x81; | 284 | n->command_code = 0x81; |
285 | } else { | 285 | } else { |
286 | if (size < 11) { | 286 | if (size < 11) { |
287 | err = -EINVAL; | 287 | err = -EINVAL; |
288 | goto out; | 288 | goto out; |
289 | } | 289 | } |
290 | n->enabled = true; | 290 | n->enabled = true; |
291 | n->command_code = params.command_code; | 291 | n->command_code = params.command_code; |
292 | } | 292 | } |
293 | 293 | ||
294 | out: | 294 | out: |
295 | DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n", | 295 | DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n", |
296 | (n->enabled ? "enabled" : "disabled"), | 296 | (n->enabled ? "enabled" : "disabled"), |
297 | n->command_code); | 297 | n->command_code); |
298 | kfree(info); | 298 | kfree(info); |
299 | return err; | 299 | return err; |
300 | } | 300 | } |
301 | 301 | ||
302 | /** | 302 | /** |
303 | * radeon_atif_get_sbios_requests - get requested sbios event | 303 | * radeon_atif_get_sbios_requests - get requested sbios event |
304 | * | 304 | * |
305 | * @handle: acpi handle | 305 | * @handle: acpi handle |
306 | * @req: atif sbios request struct | 306 | * @req: atif sbios request struct |
307 | * | 307 | * |
308 | * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function | 308 | * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function |
309 | * to determine what requests the sbios is making to the driver | 309 | * to determine what requests the sbios is making to the driver |
310 | * (all asics). | 310 | * (all asics). |
311 | * Returns 0 on success, error on failure. | 311 | * Returns 0 on success, error on failure. |
312 | */ | 312 | */ |
313 | static int radeon_atif_get_sbios_requests(acpi_handle handle, | 313 | static int radeon_atif_get_sbios_requests(acpi_handle handle, |
314 | struct atif_sbios_requests *req) | 314 | struct atif_sbios_requests *req) |
315 | { | 315 | { |
316 | union acpi_object *info; | 316 | union acpi_object *info; |
317 | size_t size; | 317 | size_t size; |
318 | int count = 0; | 318 | int count = 0; |
319 | 319 | ||
320 | info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL); | 320 | info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL); |
321 | if (!info) | 321 | if (!info) |
322 | return -EIO; | 322 | return -EIO; |
323 | 323 | ||
324 | size = *(u16 *)info->buffer.pointer; | 324 | size = *(u16 *)info->buffer.pointer; |
325 | if (size < 0xd) { | 325 | if (size < 0xd) { |
326 | count = -EINVAL; | 326 | count = -EINVAL; |
327 | goto out; | 327 | goto out; |
328 | } | 328 | } |
329 | memset(req, 0, sizeof(*req)); | 329 | memset(req, 0, sizeof(*req)); |
330 | 330 | ||
331 | size = min(sizeof(*req), size); | 331 | size = min(sizeof(*req), size); |
332 | memcpy(req, info->buffer.pointer, size); | 332 | memcpy(req, info->buffer.pointer, size); |
333 | DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending); | 333 | DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending); |
334 | 334 | ||
335 | count = hweight32(req->pending); | 335 | count = hweight32(req->pending); |
336 | 336 | ||
337 | out: | 337 | out: |
338 | kfree(info); | 338 | kfree(info); |
339 | return count; | 339 | return count; |
340 | } | 340 | } |
341 | 341 | ||
342 | /** | 342 | /** |
343 | * radeon_atif_handler - handle ATIF notify requests | 343 | * radeon_atif_handler - handle ATIF notify requests |
344 | * | 344 | * |
345 | * @rdev: radeon_device pointer | 345 | * @rdev: radeon_device pointer |
346 | * @event: atif sbios request struct | 346 | * @event: atif sbios request struct |
347 | * | 347 | * |
348 | * Checks the acpi event and if it matches an atif event, | 348 | * Checks the acpi event and if it matches an atif event, |
349 | * handles it. | 349 | * handles it. |
350 | * Returns NOTIFY code | 350 | * Returns NOTIFY code |
351 | */ | 351 | */ |
352 | int radeon_atif_handler(struct radeon_device *rdev, | 352 | int radeon_atif_handler(struct radeon_device *rdev, |
353 | struct acpi_bus_event *event) | 353 | struct acpi_bus_event *event) |
354 | { | 354 | { |
355 | struct radeon_atif *atif = &rdev->atif; | 355 | struct radeon_atif *atif = &rdev->atif; |
356 | struct atif_sbios_requests req; | 356 | struct atif_sbios_requests req; |
357 | acpi_handle handle; | 357 | acpi_handle handle; |
358 | int count; | 358 | int count; |
359 | 359 | ||
360 | DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", | 360 | DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", |
361 | event->device_class, event->type); | 361 | event->device_class, event->type); |
362 | 362 | ||
363 | if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) | 363 | if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) |
364 | return NOTIFY_DONE; | 364 | return NOTIFY_DONE; |
365 | 365 | ||
366 | if (!atif->notification_cfg.enabled || | 366 | if (!atif->notification_cfg.enabled || |
367 | event->type != atif->notification_cfg.command_code) | 367 | event->type != atif->notification_cfg.command_code) |
368 | /* Not our event */ | 368 | /* Not our event */ |
369 | return NOTIFY_DONE; | 369 | return NOTIFY_DONE; |
370 | 370 | ||
371 | /* Check pending SBIOS requests */ | 371 | /* Check pending SBIOS requests */ |
372 | handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); | 372 | handle = ACPI_HANDLE(&rdev->pdev->dev); |
373 | count = radeon_atif_get_sbios_requests(handle, &req); | 373 | count = radeon_atif_get_sbios_requests(handle, &req); |
374 | 374 | ||
375 | if (count <= 0) | 375 | if (count <= 0) |
376 | return NOTIFY_DONE; | 376 | return NOTIFY_DONE; |
377 | 377 | ||
378 | DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); | 378 | DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); |
379 | 379 | ||
380 | if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) { | 380 | if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) { |
381 | struct radeon_encoder *enc = atif->encoder_for_bl; | 381 | struct radeon_encoder *enc = atif->encoder_for_bl; |
382 | 382 | ||
383 | if (enc) { | 383 | if (enc) { |
384 | DRM_DEBUG_DRIVER("Changing brightness to %d\n", | 384 | DRM_DEBUG_DRIVER("Changing brightness to %d\n", |
385 | req.backlight_level); | 385 | req.backlight_level); |
386 | 386 | ||
387 | radeon_set_backlight_level(rdev, enc, req.backlight_level); | 387 | radeon_set_backlight_level(rdev, enc, req.backlight_level); |
388 | 388 | ||
389 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) | 389 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) |
390 | if (rdev->is_atom_bios) { | 390 | if (rdev->is_atom_bios) { |
391 | struct radeon_encoder_atom_dig *dig = enc->enc_priv; | 391 | struct radeon_encoder_atom_dig *dig = enc->enc_priv; |
392 | backlight_force_update(dig->bl_dev, | 392 | backlight_force_update(dig->bl_dev, |
393 | BACKLIGHT_UPDATE_HOTKEY); | 393 | BACKLIGHT_UPDATE_HOTKEY); |
394 | } else { | 394 | } else { |
395 | struct radeon_encoder_lvds *dig = enc->enc_priv; | 395 | struct radeon_encoder_lvds *dig = enc->enc_priv; |
396 | backlight_force_update(dig->bl_dev, | 396 | backlight_force_update(dig->bl_dev, |
397 | BACKLIGHT_UPDATE_HOTKEY); | 397 | BACKLIGHT_UPDATE_HOTKEY); |
398 | } | 398 | } |
399 | #endif | 399 | #endif |
400 | } | 400 | } |
401 | } | 401 | } |
402 | /* TODO: check other events */ | 402 | /* TODO: check other events */ |
403 | 403 | ||
404 | /* We've handled the event, stop the notifier chain. The ACPI interface | 404 | /* We've handled the event, stop the notifier chain. The ACPI interface |
405 | * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to | 405 | * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to |
406 | * userspace if the event was generated only to signal a SBIOS | 406 | * userspace if the event was generated only to signal a SBIOS |
407 | * request. | 407 | * request. |
408 | */ | 408 | */ |
409 | return NOTIFY_BAD; | 409 | return NOTIFY_BAD; |
410 | } | 410 | } |
411 | 411 | ||
412 | /* Call the ATCS method | 412 | /* Call the ATCS method |
413 | */ | 413 | */ |
414 | /** | 414 | /** |
415 | * radeon_atcs_call - call an ATCS method | 415 | * radeon_atcs_call - call an ATCS method |
416 | * | 416 | * |
417 | * @handle: acpi handle | 417 | * @handle: acpi handle |
418 | * @function: the ATCS function to execute | 418 | * @function: the ATCS function to execute |
419 | * @params: ATCS function params | 419 | * @params: ATCS function params |
420 | * | 420 | * |
421 | * Executes the requested ATCS function (all asics). | 421 | * Executes the requested ATCS function (all asics). |
422 | * Returns a pointer to the acpi output buffer. | 422 | * Returns a pointer to the acpi output buffer. |
423 | */ | 423 | */ |
424 | static union acpi_object *radeon_atcs_call(acpi_handle handle, int function, | 424 | static union acpi_object *radeon_atcs_call(acpi_handle handle, int function, |
425 | struct acpi_buffer *params) | 425 | struct acpi_buffer *params) |
426 | { | 426 | { |
427 | acpi_status status; | 427 | acpi_status status; |
428 | union acpi_object atcs_arg_elements[2]; | 428 | union acpi_object atcs_arg_elements[2]; |
429 | struct acpi_object_list atcs_arg; | 429 | struct acpi_object_list atcs_arg; |
430 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 430 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
431 | 431 | ||
432 | atcs_arg.count = 2; | 432 | atcs_arg.count = 2; |
433 | atcs_arg.pointer = &atcs_arg_elements[0]; | 433 | atcs_arg.pointer = &atcs_arg_elements[0]; |
434 | 434 | ||
435 | atcs_arg_elements[0].type = ACPI_TYPE_INTEGER; | 435 | atcs_arg_elements[0].type = ACPI_TYPE_INTEGER; |
436 | atcs_arg_elements[0].integer.value = function; | 436 | atcs_arg_elements[0].integer.value = function; |
437 | 437 | ||
438 | if (params) { | 438 | if (params) { |
439 | atcs_arg_elements[1].type = ACPI_TYPE_BUFFER; | 439 | atcs_arg_elements[1].type = ACPI_TYPE_BUFFER; |
440 | atcs_arg_elements[1].buffer.length = params->length; | 440 | atcs_arg_elements[1].buffer.length = params->length; |
441 | atcs_arg_elements[1].buffer.pointer = params->pointer; | 441 | atcs_arg_elements[1].buffer.pointer = params->pointer; |
442 | } else { | 442 | } else { |
443 | /* We need a second fake parameter */ | 443 | /* We need a second fake parameter */ |
444 | atcs_arg_elements[1].type = ACPI_TYPE_INTEGER; | 444 | atcs_arg_elements[1].type = ACPI_TYPE_INTEGER; |
445 | atcs_arg_elements[1].integer.value = 0; | 445 | atcs_arg_elements[1].integer.value = 0; |
446 | } | 446 | } |
447 | 447 | ||
448 | status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer); | 448 | status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer); |
449 | 449 | ||
450 | /* Fail only if calling the method fails and ATIF is supported */ | 450 | /* Fail only if calling the method fails and ATIF is supported */ |
451 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | 451 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { |
452 | DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n", | 452 | DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n", |
453 | acpi_format_exception(status)); | 453 | acpi_format_exception(status)); |
454 | kfree(buffer.pointer); | 454 | kfree(buffer.pointer); |
455 | return NULL; | 455 | return NULL; |
456 | } | 456 | } |
457 | 457 | ||
458 | return buffer.pointer; | 458 | return buffer.pointer; |
459 | } | 459 | } |
460 | 460 | ||
461 | /** | 461 | /** |
462 | * radeon_atcs_parse_functions - parse supported functions | 462 | * radeon_atcs_parse_functions - parse supported functions |
463 | * | 463 | * |
464 | * @f: supported functions struct | 464 | * @f: supported functions struct |
465 | * @mask: supported functions mask from ATCS | 465 | * @mask: supported functions mask from ATCS |
466 | * | 466 | * |
467 | * Use the supported functions mask from ATCS function | 467 | * Use the supported functions mask from ATCS function |
468 | * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions | 468 | * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions |
469 | * are supported (all asics). | 469 | * are supported (all asics). |
470 | */ | 470 | */ |
471 | static void radeon_atcs_parse_functions(struct radeon_atcs_functions *f, u32 mask) | 471 | static void radeon_atcs_parse_functions(struct radeon_atcs_functions *f, u32 mask) |
472 | { | 472 | { |
473 | f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED; | 473 | f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED; |
474 | f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED; | 474 | f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED; |
475 | f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED; | 475 | f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED; |
476 | f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED; | 476 | f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED; |
477 | } | 477 | } |
478 | 478 | ||
479 | /** | 479 | /** |
480 | * radeon_atcs_verify_interface - verify ATCS | 480 | * radeon_atcs_verify_interface - verify ATCS |
481 | * | 481 | * |
482 | * @handle: acpi handle | 482 | * @handle: acpi handle |
483 | * @atcs: radeon atcs struct | 483 | * @atcs: radeon atcs struct |
484 | * | 484 | * |
485 | * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function | 485 | * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function |
486 | * to initialize ATCS and determine what features are supported | 486 | * to initialize ATCS and determine what features are supported |
487 | * (all asics). | 487 | * (all asics). |
488 | * returns 0 on success, error on failure. | 488 | * returns 0 on success, error on failure. |
489 | */ | 489 | */ |
490 | static int radeon_atcs_verify_interface(acpi_handle handle, | 490 | static int radeon_atcs_verify_interface(acpi_handle handle, |
491 | struct radeon_atcs *atcs) | 491 | struct radeon_atcs *atcs) |
492 | { | 492 | { |
493 | union acpi_object *info; | 493 | union acpi_object *info; |
494 | struct atcs_verify_interface output; | 494 | struct atcs_verify_interface output; |
495 | size_t size; | 495 | size_t size; |
496 | int err = 0; | 496 | int err = 0; |
497 | 497 | ||
498 | info = radeon_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL); | 498 | info = radeon_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL); |
499 | if (!info) | 499 | if (!info) |
500 | return -EIO; | 500 | return -EIO; |
501 | 501 | ||
502 | memset(&output, 0, sizeof(output)); | 502 | memset(&output, 0, sizeof(output)); |
503 | 503 | ||
504 | size = *(u16 *) info->buffer.pointer; | 504 | size = *(u16 *) info->buffer.pointer; |
505 | if (size < 8) { | 505 | if (size < 8) { |
506 | DRM_INFO("ATCS buffer is too small: %zu\n", size); | 506 | DRM_INFO("ATCS buffer is too small: %zu\n", size); |
507 | err = -EINVAL; | 507 | err = -EINVAL; |
508 | goto out; | 508 | goto out; |
509 | } | 509 | } |
510 | size = min(sizeof(output), size); | 510 | size = min(sizeof(output), size); |
511 | 511 | ||
512 | memcpy(&output, info->buffer.pointer, size); | 512 | memcpy(&output, info->buffer.pointer, size); |
513 | 513 | ||
514 | /* TODO: check version? */ | 514 | /* TODO: check version? */ |
515 | DRM_DEBUG_DRIVER("ATCS version %u\n", output.version); | 515 | DRM_DEBUG_DRIVER("ATCS version %u\n", output.version); |
516 | 516 | ||
517 | radeon_atcs_parse_functions(&atcs->functions, output.function_bits); | 517 | radeon_atcs_parse_functions(&atcs->functions, output.function_bits); |
518 | 518 | ||
519 | out: | 519 | out: |
520 | kfree(info); | 520 | kfree(info); |
521 | return err; | 521 | return err; |
522 | } | 522 | } |
523 | 523 | ||
524 | /** | 524 | /** |
525 | * radeon_acpi_is_pcie_performance_request_supported | 525 | * radeon_acpi_is_pcie_performance_request_supported |
526 | * | 526 | * |
527 | * @rdev: radeon_device pointer | 527 | * @rdev: radeon_device pointer |
528 | * | 528 | * |
529 | * Check if the ATCS pcie_perf_req and pcie_dev_rdy methods | 529 | * Check if the ATCS pcie_perf_req and pcie_dev_rdy methods |
530 | * are supported (all asics). | 530 | * are supported (all asics). |
531 | * returns true if supported, false if not. | 531 | * returns true if supported, false if not. |
532 | */ | 532 | */ |
533 | bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev) | 533 | bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev) |
534 | { | 534 | { |
535 | struct radeon_atcs *atcs = &rdev->atcs; | 535 | struct radeon_atcs *atcs = &rdev->atcs; |
536 | 536 | ||
537 | if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy) | 537 | if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy) |
538 | return true; | 538 | return true; |
539 | 539 | ||
540 | return false; | 540 | return false; |
541 | } | 541 | } |
542 | 542 | ||
543 | /** | 543 | /** |
544 | * radeon_acpi_pcie_notify_device_ready | 544 | * radeon_acpi_pcie_notify_device_ready |
545 | * | 545 | * |
546 | * @rdev: radeon_device pointer | 546 | * @rdev: radeon_device pointer |
547 | * | 547 | * |
548 | * Executes the PCIE_DEVICE_READY_NOTIFICATION method | 548 | * Executes the PCIE_DEVICE_READY_NOTIFICATION method |
549 | * (all asics). | 549 | * (all asics). |
550 | * returns 0 on success, error on failure. | 550 | * returns 0 on success, error on failure. |
551 | */ | 551 | */ |
552 | int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev) | 552 | int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev) |
553 | { | 553 | { |
554 | acpi_handle handle; | 554 | acpi_handle handle; |
555 | union acpi_object *info; | 555 | union acpi_object *info; |
556 | struct radeon_atcs *atcs = &rdev->atcs; | 556 | struct radeon_atcs *atcs = &rdev->atcs; |
557 | 557 | ||
558 | /* Get the device handle */ | 558 | /* Get the device handle */ |
559 | handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); | 559 | handle = ACPI_HANDLE(&rdev->pdev->dev); |
560 | if (!handle) | 560 | if (!handle) |
561 | return -EINVAL; | 561 | return -EINVAL; |
562 | 562 | ||
563 | if (!atcs->functions.pcie_dev_rdy) | 563 | if (!atcs->functions.pcie_dev_rdy) |
564 | return -EINVAL; | 564 | return -EINVAL; |
565 | 565 | ||
566 | info = radeon_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL); | 566 | info = radeon_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL); |
567 | if (!info) | 567 | if (!info) |
568 | return -EIO; | 568 | return -EIO; |
569 | 569 | ||
570 | kfree(info); | 570 | kfree(info); |
571 | 571 | ||
572 | return 0; | 572 | return 0; |
573 | } | 573 | } |
574 | 574 | ||
575 | /** | 575 | /** |
576 | * radeon_acpi_pcie_performance_request | 576 | * radeon_acpi_pcie_performance_request |
577 | * | 577 | * |
578 | * @rdev: radeon_device pointer | 578 | * @rdev: radeon_device pointer |
579 | * @perf_req: requested perf level (pcie gen speed) | 579 | * @perf_req: requested perf level (pcie gen speed) |
580 | * @advertise: set advertise caps flag if set | 580 | * @advertise: set advertise caps flag if set |
581 | * | 581 | * |
582 | * Executes the PCIE_PERFORMANCE_REQUEST method to | 582 | * Executes the PCIE_PERFORMANCE_REQUEST method to |
583 | * change the pcie gen speed (all asics). | 583 | * change the pcie gen speed (all asics). |
584 | * returns 0 on success, error on failure. | 584 | * returns 0 on success, error on failure. |
585 | */ | 585 | */ |
586 | int radeon_acpi_pcie_performance_request(struct radeon_device *rdev, | 586 | int radeon_acpi_pcie_performance_request(struct radeon_device *rdev, |
587 | u8 perf_req, bool advertise) | 587 | u8 perf_req, bool advertise) |
588 | { | 588 | { |
589 | acpi_handle handle; | 589 | acpi_handle handle; |
590 | union acpi_object *info; | 590 | union acpi_object *info; |
591 | struct radeon_atcs *atcs = &rdev->atcs; | 591 | struct radeon_atcs *atcs = &rdev->atcs; |
592 | struct atcs_pref_req_input atcs_input; | 592 | struct atcs_pref_req_input atcs_input; |
593 | struct atcs_pref_req_output atcs_output; | 593 | struct atcs_pref_req_output atcs_output; |
594 | struct acpi_buffer params; | 594 | struct acpi_buffer params; |
595 | size_t size; | 595 | size_t size; |
596 | u32 retry = 3; | 596 | u32 retry = 3; |
597 | 597 | ||
598 | /* Get the device handle */ | 598 | /* Get the device handle */ |
599 | handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); | 599 | handle = ACPI_HANDLE(&rdev->pdev->dev); |
600 | if (!handle) | 600 | if (!handle) |
601 | return -EINVAL; | 601 | return -EINVAL; |
602 | 602 | ||
603 | if (!atcs->functions.pcie_perf_req) | 603 | if (!atcs->functions.pcie_perf_req) |
604 | return -EINVAL; | 604 | return -EINVAL; |
605 | 605 | ||
606 | atcs_input.size = sizeof(struct atcs_pref_req_input); | 606 | atcs_input.size = sizeof(struct atcs_pref_req_input); |
607 | /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */ | 607 | /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */ |
608 | atcs_input.client_id = rdev->pdev->devfn | (rdev->pdev->bus->number << 8); | 608 | atcs_input.client_id = rdev->pdev->devfn | (rdev->pdev->bus->number << 8); |
609 | atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK; | 609 | atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK; |
610 | atcs_input.flags = ATCS_WAIT_FOR_COMPLETION; | 610 | atcs_input.flags = ATCS_WAIT_FOR_COMPLETION; |
611 | if (advertise) | 611 | if (advertise) |
612 | atcs_input.flags |= ATCS_ADVERTISE_CAPS; | 612 | atcs_input.flags |= ATCS_ADVERTISE_CAPS; |
613 | atcs_input.req_type = ATCS_PCIE_LINK_SPEED; | 613 | atcs_input.req_type = ATCS_PCIE_LINK_SPEED; |
614 | atcs_input.perf_req = perf_req; | 614 | atcs_input.perf_req = perf_req; |
615 | 615 | ||
616 | params.length = sizeof(struct atcs_pref_req_input); | 616 | params.length = sizeof(struct atcs_pref_req_input); |
617 | params.pointer = &atcs_input; | 617 | params.pointer = &atcs_input; |
618 | 618 | ||
619 | while (retry--) { | 619 | while (retry--) { |
620 | info = radeon_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, ¶ms); | 620 | info = radeon_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, ¶ms); |
621 | if (!info) | 621 | if (!info) |
622 | return -EIO; | 622 | return -EIO; |
623 | 623 | ||
624 | memset(&atcs_output, 0, sizeof(atcs_output)); | 624 | memset(&atcs_output, 0, sizeof(atcs_output)); |
625 | 625 | ||
626 | size = *(u16 *) info->buffer.pointer; | 626 | size = *(u16 *) info->buffer.pointer; |
627 | if (size < 3) { | 627 | if (size < 3) { |
628 | DRM_INFO("ATCS buffer is too small: %zu\n", size); | 628 | DRM_INFO("ATCS buffer is too small: %zu\n", size); |
629 | kfree(info); | 629 | kfree(info); |
630 | return -EINVAL; | 630 | return -EINVAL; |
631 | } | 631 | } |
632 | size = min(sizeof(atcs_output), size); | 632 | size = min(sizeof(atcs_output), size); |
633 | 633 | ||
634 | memcpy(&atcs_output, info->buffer.pointer, size); | 634 | memcpy(&atcs_output, info->buffer.pointer, size); |
635 | 635 | ||
636 | kfree(info); | 636 | kfree(info); |
637 | 637 | ||
638 | switch (atcs_output.ret_val) { | 638 | switch (atcs_output.ret_val) { |
639 | case ATCS_REQUEST_REFUSED: | 639 | case ATCS_REQUEST_REFUSED: |
640 | default: | 640 | default: |
641 | return -EINVAL; | 641 | return -EINVAL; |
642 | case ATCS_REQUEST_COMPLETE: | 642 | case ATCS_REQUEST_COMPLETE: |
643 | return 0; | 643 | return 0; |
644 | case ATCS_REQUEST_IN_PROGRESS: | 644 | case ATCS_REQUEST_IN_PROGRESS: |
645 | udelay(10); | 645 | udelay(10); |
646 | break; | 646 | break; |
647 | } | 647 | } |
648 | } | 648 | } |
649 | 649 | ||
650 | return 0; | 650 | return 0; |
651 | } | 651 | } |
652 | 652 | ||
653 | /** | 653 | /** |
654 | * radeon_acpi_event - handle notify events | 654 | * radeon_acpi_event - handle notify events |
655 | * | 655 | * |
656 | * @nb: notifier block | 656 | * @nb: notifier block |
657 | * @val: val | 657 | * @val: val |
658 | * @data: acpi event | 658 | * @data: acpi event |
659 | * | 659 | * |
660 | * Calls relevant radeon functions in response to various | 660 | * Calls relevant radeon functions in response to various |
661 | * acpi events. | 661 | * acpi events. |
662 | * Returns NOTIFY code | 662 | * Returns NOTIFY code |
663 | */ | 663 | */ |
664 | static int radeon_acpi_event(struct notifier_block *nb, | 664 | static int radeon_acpi_event(struct notifier_block *nb, |
665 | unsigned long val, | 665 | unsigned long val, |
666 | void *data) | 666 | void *data) |
667 | { | 667 | { |
668 | struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); | 668 | struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); |
669 | struct acpi_bus_event *entry = (struct acpi_bus_event *)data; | 669 | struct acpi_bus_event *entry = (struct acpi_bus_event *)data; |
670 | 670 | ||
671 | if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { | 671 | if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { |
672 | if (power_supply_is_system_supplied() > 0) | 672 | if (power_supply_is_system_supplied() > 0) |
673 | DRM_DEBUG_DRIVER("pm: AC\n"); | 673 | DRM_DEBUG_DRIVER("pm: AC\n"); |
674 | else | 674 | else |
675 | DRM_DEBUG_DRIVER("pm: DC\n"); | 675 | DRM_DEBUG_DRIVER("pm: DC\n"); |
676 | 676 | ||
677 | radeon_pm_acpi_event_handler(rdev); | 677 | radeon_pm_acpi_event_handler(rdev); |
678 | } | 678 | } |
679 | 679 | ||
680 | /* Check for pending SBIOS requests */ | 680 | /* Check for pending SBIOS requests */ |
681 | return radeon_atif_handler(rdev, entry); | 681 | return radeon_atif_handler(rdev, entry); |
682 | } | 682 | } |
683 | 683 | ||
684 | /* Call all ACPI methods here */ | 684 | /* Call all ACPI methods here */ |
685 | /** | 685 | /** |
686 | * radeon_acpi_init - init driver acpi support | 686 | * radeon_acpi_init - init driver acpi support |
687 | * | 687 | * |
688 | * @rdev: radeon_device pointer | 688 | * @rdev: radeon_device pointer |
689 | * | 689 | * |
690 | * Verifies the AMD ACPI interfaces and registers with the acpi | 690 | * Verifies the AMD ACPI interfaces and registers with the acpi |
691 | * notifier chain (all asics). | 691 | * notifier chain (all asics). |
692 | * Returns 0 on success, error on failure. | 692 | * Returns 0 on success, error on failure. |
693 | */ | 693 | */ |
694 | int radeon_acpi_init(struct radeon_device *rdev) | 694 | int radeon_acpi_init(struct radeon_device *rdev) |
695 | { | 695 | { |
696 | acpi_handle handle; | 696 | acpi_handle handle; |
697 | struct radeon_atif *atif = &rdev->atif; | 697 | struct radeon_atif *atif = &rdev->atif; |
698 | struct radeon_atcs *atcs = &rdev->atcs; | 698 | struct radeon_atcs *atcs = &rdev->atcs; |
699 | int ret; | 699 | int ret; |
700 | 700 | ||
701 | /* Get the device handle */ | 701 | /* Get the device handle */ |
702 | handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); | 702 | handle = ACPI_HANDLE(&rdev->pdev->dev); |
703 | 703 | ||
704 | /* No need to proceed if we're sure that ATIF is not supported */ | 704 | /* No need to proceed if we're sure that ATIF is not supported */ |
705 | if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) | 705 | if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) |
706 | return 0; | 706 | return 0; |
707 | 707 | ||
708 | /* Call the ATCS method */ | 708 | /* Call the ATCS method */ |
709 | ret = radeon_atcs_verify_interface(handle, atcs); | 709 | ret = radeon_atcs_verify_interface(handle, atcs); |
710 | if (ret) { | 710 | if (ret) { |
711 | DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); | 711 | DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); |
712 | } | 712 | } |
713 | 713 | ||
714 | /* Call the ATIF method */ | 714 | /* Call the ATIF method */ |
715 | ret = radeon_atif_verify_interface(handle, atif); | 715 | ret = radeon_atif_verify_interface(handle, atif); |
716 | if (ret) { | 716 | if (ret) { |
717 | DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); | 717 | DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); |
718 | goto out; | 718 | goto out; |
719 | } | 719 | } |
720 | 720 | ||
721 | if (atif->notifications.brightness_change) { | 721 | if (atif->notifications.brightness_change) { |
722 | struct drm_encoder *tmp; | 722 | struct drm_encoder *tmp; |
723 | struct radeon_encoder *target = NULL; | 723 | struct radeon_encoder *target = NULL; |
724 | 724 | ||
725 | /* Find the encoder controlling the brightness */ | 725 | /* Find the encoder controlling the brightness */ |
726 | list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list, | 726 | list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list, |
727 | head) { | 727 | head) { |
728 | struct radeon_encoder *enc = to_radeon_encoder(tmp); | 728 | struct radeon_encoder *enc = to_radeon_encoder(tmp); |
729 | 729 | ||
730 | if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) && | 730 | if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) && |
731 | enc->enc_priv) { | 731 | enc->enc_priv) { |
732 | if (rdev->is_atom_bios) { | 732 | if (rdev->is_atom_bios) { |
733 | struct radeon_encoder_atom_dig *dig = enc->enc_priv; | 733 | struct radeon_encoder_atom_dig *dig = enc->enc_priv; |
734 | if (dig->bl_dev) { | 734 | if (dig->bl_dev) { |
735 | target = enc; | 735 | target = enc; |
736 | break; | 736 | break; |
737 | } | 737 | } |
738 | } else { | 738 | } else { |
739 | struct radeon_encoder_lvds *dig = enc->enc_priv; | 739 | struct radeon_encoder_lvds *dig = enc->enc_priv; |
740 | if (dig->bl_dev) { | 740 | if (dig->bl_dev) { |
741 | target = enc; | 741 | target = enc; |
742 | break; | 742 | break; |
743 | } | 743 | } |
744 | } | 744 | } |
745 | } | 745 | } |
746 | } | 746 | } |
747 | 747 | ||
748 | atif->encoder_for_bl = target; | 748 | atif->encoder_for_bl = target; |
749 | if (!target) { | 749 | if (!target) { |
750 | /* Brightness change notification is enabled, but we | 750 | /* Brightness change notification is enabled, but we |
751 | * didn't find a backlight controller, this should | 751 | * didn't find a backlight controller, this should |
752 | * never happen. | 752 | * never happen. |
753 | */ | 753 | */ |
754 | DRM_ERROR("Cannot find a backlight controller\n"); | 754 | DRM_ERROR("Cannot find a backlight controller\n"); |
755 | } | 755 | } |
756 | } | 756 | } |
757 | 757 | ||
758 | if (atif->functions.sbios_requests && !atif->functions.system_params) { | 758 | if (atif->functions.sbios_requests && !atif->functions.system_params) { |
759 | /* XXX check this workraround, if sbios request function is | 759 | /* XXX check this workraround, if sbios request function is |
760 | * present we have to see how it's configured in the system | 760 | * present we have to see how it's configured in the system |
761 | * params | 761 | * params |
762 | */ | 762 | */ |
763 | atif->functions.system_params = true; | 763 | atif->functions.system_params = true; |
764 | } | 764 | } |
765 | 765 | ||
766 | if (atif->functions.system_params) { | 766 | if (atif->functions.system_params) { |
767 | ret = radeon_atif_get_notification_params(handle, | 767 | ret = radeon_atif_get_notification_params(handle, |
768 | &atif->notification_cfg); | 768 | &atif->notification_cfg); |
769 | if (ret) { | 769 | if (ret) { |
770 | DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", | 770 | DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", |
771 | ret); | 771 | ret); |
772 | /* Disable notification */ | 772 | /* Disable notification */ |
773 | atif->notification_cfg.enabled = false; | 773 | atif->notification_cfg.enabled = false; |
774 | } | 774 | } |
775 | } | 775 | } |
776 | 776 | ||
777 | out: | 777 | out: |
778 | rdev->acpi_nb.notifier_call = radeon_acpi_event; | 778 | rdev->acpi_nb.notifier_call = radeon_acpi_event; |
779 | register_acpi_notifier(&rdev->acpi_nb); | 779 | register_acpi_notifier(&rdev->acpi_nb); |
780 | 780 | ||
781 | return ret; | 781 | return ret; |
782 | } | 782 | } |
783 | 783 | ||
784 | /** | 784 | /** |
785 | * radeon_acpi_fini - tear down driver acpi support | 785 | * radeon_acpi_fini - tear down driver acpi support |
786 | * | 786 | * |
787 | * @rdev: radeon_device pointer | 787 | * @rdev: radeon_device pointer |
788 | * | 788 | * |
789 | * Unregisters with the acpi notifier chain (all asics). | 789 | * Unregisters with the acpi notifier chain (all asics). |
790 | */ | 790 | */ |
791 | void radeon_acpi_fini(struct radeon_device *rdev) | 791 | void radeon_acpi_fini(struct radeon_device *rdev) |
792 | { | 792 | { |
793 | unregister_acpi_notifier(&rdev->acpi_nb); | 793 | unregister_acpi_notifier(&rdev->acpi_nb); |
794 | } | 794 | } |
795 | 795 |
drivers/gpu/drm/radeon/radeon_atpx_handler.c
1 | /* | 1 | /* |
2 | * Copyright (c) 2010 Red Hat Inc. | 2 | * Copyright (c) 2010 Red Hat Inc. |
3 | * Author : Dave Airlie <airlied@redhat.com> | 3 | * Author : Dave Airlie <airlied@redhat.com> |
4 | * | 4 | * |
5 | * Licensed under GPLv2 | 5 | * Licensed under GPLv2 |
6 | * | 6 | * |
7 | * ATPX support for both Intel/ATI | 7 | * ATPX support for both Intel/ATI |
8 | */ | 8 | */ |
9 | #include <linux/vga_switcheroo.h> | 9 | #include <linux/vga_switcheroo.h> |
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/acpi.h> | 11 | #include <linux/acpi.h> |
12 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
13 | 13 | ||
14 | #include "radeon_acpi.h" | 14 | #include "radeon_acpi.h" |
15 | 15 | ||
16 | struct radeon_atpx_functions { | 16 | struct radeon_atpx_functions { |
17 | bool px_params; | 17 | bool px_params; |
18 | bool power_cntl; | 18 | bool power_cntl; |
19 | bool disp_mux_cntl; | 19 | bool disp_mux_cntl; |
20 | bool i2c_mux_cntl; | 20 | bool i2c_mux_cntl; |
21 | bool switch_start; | 21 | bool switch_start; |
22 | bool switch_end; | 22 | bool switch_end; |
23 | bool disp_connectors_mapping; | 23 | bool disp_connectors_mapping; |
24 | bool disp_detetion_ports; | 24 | bool disp_detetion_ports; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | struct radeon_atpx { | 27 | struct radeon_atpx { |
28 | acpi_handle handle; | 28 | acpi_handle handle; |
29 | struct radeon_atpx_functions functions; | 29 | struct radeon_atpx_functions functions; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | static struct radeon_atpx_priv { | 32 | static struct radeon_atpx_priv { |
33 | bool atpx_detected; | 33 | bool atpx_detected; |
34 | /* handle for device - and atpx */ | 34 | /* handle for device - and atpx */ |
35 | acpi_handle dhandle; | 35 | acpi_handle dhandle; |
36 | struct radeon_atpx atpx; | 36 | struct radeon_atpx atpx; |
37 | } radeon_atpx_priv; | 37 | } radeon_atpx_priv; |
38 | 38 | ||
39 | struct atpx_verify_interface { | 39 | struct atpx_verify_interface { |
40 | u16 size; /* structure size in bytes (includes size field) */ | 40 | u16 size; /* structure size in bytes (includes size field) */ |
41 | u16 version; /* version */ | 41 | u16 version; /* version */ |
42 | u32 function_bits; /* supported functions bit vector */ | 42 | u32 function_bits; /* supported functions bit vector */ |
43 | } __packed; | 43 | } __packed; |
44 | 44 | ||
45 | struct atpx_px_params { | 45 | struct atpx_px_params { |
46 | u16 size; /* structure size in bytes (includes size field) */ | 46 | u16 size; /* structure size in bytes (includes size field) */ |
47 | u32 valid_flags; /* which flags are valid */ | 47 | u32 valid_flags; /* which flags are valid */ |
48 | u32 flags; /* flags */ | 48 | u32 flags; /* flags */ |
49 | } __packed; | 49 | } __packed; |
50 | 50 | ||
51 | struct atpx_power_control { | 51 | struct atpx_power_control { |
52 | u16 size; | 52 | u16 size; |
53 | u8 dgpu_state; | 53 | u8 dgpu_state; |
54 | } __packed; | 54 | } __packed; |
55 | 55 | ||
56 | struct atpx_mux { | 56 | struct atpx_mux { |
57 | u16 size; | 57 | u16 size; |
58 | u16 mux; | 58 | u16 mux; |
59 | } __packed; | 59 | } __packed; |
60 | 60 | ||
61 | /** | 61 | /** |
62 | * radeon_atpx_call - call an ATPX method | 62 | * radeon_atpx_call - call an ATPX method |
63 | * | 63 | * |
64 | * @handle: acpi handle | 64 | * @handle: acpi handle |
65 | * @function: the ATPX function to execute | 65 | * @function: the ATPX function to execute |
66 | * @params: ATPX function params | 66 | * @params: ATPX function params |
67 | * | 67 | * |
68 | * Executes the requested ATPX function (all asics). | 68 | * Executes the requested ATPX function (all asics). |
69 | * Returns a pointer to the acpi output buffer. | 69 | * Returns a pointer to the acpi output buffer. |
70 | */ | 70 | */ |
71 | static union acpi_object *radeon_atpx_call(acpi_handle handle, int function, | 71 | static union acpi_object *radeon_atpx_call(acpi_handle handle, int function, |
72 | struct acpi_buffer *params) | 72 | struct acpi_buffer *params) |
73 | { | 73 | { |
74 | acpi_status status; | 74 | acpi_status status; |
75 | union acpi_object atpx_arg_elements[2]; | 75 | union acpi_object atpx_arg_elements[2]; |
76 | struct acpi_object_list atpx_arg; | 76 | struct acpi_object_list atpx_arg; |
77 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 77 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
78 | 78 | ||
79 | atpx_arg.count = 2; | 79 | atpx_arg.count = 2; |
80 | atpx_arg.pointer = &atpx_arg_elements[0]; | 80 | atpx_arg.pointer = &atpx_arg_elements[0]; |
81 | 81 | ||
82 | atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; | 82 | atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; |
83 | atpx_arg_elements[0].integer.value = function; | 83 | atpx_arg_elements[0].integer.value = function; |
84 | 84 | ||
85 | if (params) { | 85 | if (params) { |
86 | atpx_arg_elements[1].type = ACPI_TYPE_BUFFER; | 86 | atpx_arg_elements[1].type = ACPI_TYPE_BUFFER; |
87 | atpx_arg_elements[1].buffer.length = params->length; | 87 | atpx_arg_elements[1].buffer.length = params->length; |
88 | atpx_arg_elements[1].buffer.pointer = params->pointer; | 88 | atpx_arg_elements[1].buffer.pointer = params->pointer; |
89 | } else { | 89 | } else { |
90 | /* We need a second fake parameter */ | 90 | /* We need a second fake parameter */ |
91 | atpx_arg_elements[1].type = ACPI_TYPE_INTEGER; | 91 | atpx_arg_elements[1].type = ACPI_TYPE_INTEGER; |
92 | atpx_arg_elements[1].integer.value = 0; | 92 | atpx_arg_elements[1].integer.value = 0; |
93 | } | 93 | } |
94 | 94 | ||
95 | status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); | 95 | status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); |
96 | 96 | ||
97 | /* Fail only if calling the method fails and ATPX is supported */ | 97 | /* Fail only if calling the method fails and ATPX is supported */ |
98 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | 98 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { |
99 | printk("failed to evaluate ATPX got %s\n", | 99 | printk("failed to evaluate ATPX got %s\n", |
100 | acpi_format_exception(status)); | 100 | acpi_format_exception(status)); |
101 | kfree(buffer.pointer); | 101 | kfree(buffer.pointer); |
102 | return NULL; | 102 | return NULL; |
103 | } | 103 | } |
104 | 104 | ||
105 | return buffer.pointer; | 105 | return buffer.pointer; |
106 | } | 106 | } |
107 | 107 | ||
108 | /** | 108 | /** |
109 | * radeon_atpx_parse_functions - parse supported functions | 109 | * radeon_atpx_parse_functions - parse supported functions |
110 | * | 110 | * |
111 | * @f: supported functions struct | 111 | * @f: supported functions struct |
112 | * @mask: supported functions mask from ATPX | 112 | * @mask: supported functions mask from ATPX |
113 | * | 113 | * |
114 | * Use the supported functions mask from ATPX function | 114 | * Use the supported functions mask from ATPX function |
115 | * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions | 115 | * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions |
116 | * are supported (all asics). | 116 | * are supported (all asics). |
117 | */ | 117 | */ |
118 | static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mask) | 118 | static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mask) |
119 | { | 119 | { |
120 | f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED; | 120 | f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED; |
121 | f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED; | 121 | f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED; |
122 | f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED; | 122 | f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED; |
123 | f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED; | 123 | f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED; |
124 | f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED; | 124 | f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED; |
125 | f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED; | 125 | f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED; |
126 | f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED; | 126 | f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED; |
127 | f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED; | 127 | f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED; |
128 | } | 128 | } |
129 | 129 | ||
130 | /** | 130 | /** |
131 | * radeon_atpx_validate_functions - validate ATPX functions | 131 | * radeon_atpx_validate_functions - validate ATPX functions |
132 | * | 132 | * |
133 | * @atpx: radeon atpx struct | 133 | * @atpx: radeon atpx struct |
134 | * | 134 | * |
135 | * Validate that required functions are enabled (all asics). | 135 | * Validate that required functions are enabled (all asics). |
136 | * returns 0 on success, error on failure. | 136 | * returns 0 on success, error on failure. |
137 | */ | 137 | */ |
138 | static int radeon_atpx_validate(struct radeon_atpx *atpx) | 138 | static int radeon_atpx_validate(struct radeon_atpx *atpx) |
139 | { | 139 | { |
140 | /* make sure required functions are enabled */ | 140 | /* make sure required functions are enabled */ |
141 | /* dGPU power control is required */ | 141 | /* dGPU power control is required */ |
142 | atpx->functions.power_cntl = true; | 142 | atpx->functions.power_cntl = true; |
143 | 143 | ||
144 | if (atpx->functions.px_params) { | 144 | if (atpx->functions.px_params) { |
145 | union acpi_object *info; | 145 | union acpi_object *info; |
146 | struct atpx_px_params output; | 146 | struct atpx_px_params output; |
147 | size_t size; | 147 | size_t size; |
148 | u32 valid_bits; | 148 | u32 valid_bits; |
149 | 149 | ||
150 | info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); | 150 | info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); |
151 | if (!info) | 151 | if (!info) |
152 | return -EIO; | 152 | return -EIO; |
153 | 153 | ||
154 | memset(&output, 0, sizeof(output)); | 154 | memset(&output, 0, sizeof(output)); |
155 | 155 | ||
156 | size = *(u16 *) info->buffer.pointer; | 156 | size = *(u16 *) info->buffer.pointer; |
157 | if (size < 10) { | 157 | if (size < 10) { |
158 | printk("ATPX buffer is too small: %zu\n", size); | 158 | printk("ATPX buffer is too small: %zu\n", size); |
159 | kfree(info); | 159 | kfree(info); |
160 | return -EINVAL; | 160 | return -EINVAL; |
161 | } | 161 | } |
162 | size = min(sizeof(output), size); | 162 | size = min(sizeof(output), size); |
163 | 163 | ||
164 | memcpy(&output, info->buffer.pointer, size); | 164 | memcpy(&output, info->buffer.pointer, size); |
165 | 165 | ||
166 | valid_bits = output.flags & output.valid_flags; | 166 | valid_bits = output.flags & output.valid_flags; |
167 | /* if separate mux flag is set, mux controls are required */ | 167 | /* if separate mux flag is set, mux controls are required */ |
168 | if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { | 168 | if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { |
169 | atpx->functions.i2c_mux_cntl = true; | 169 | atpx->functions.i2c_mux_cntl = true; |
170 | atpx->functions.disp_mux_cntl = true; | 170 | atpx->functions.disp_mux_cntl = true; |
171 | } | 171 | } |
172 | /* if any outputs are muxed, mux controls are required */ | 172 | /* if any outputs are muxed, mux controls are required */ |
173 | if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | | 173 | if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | |
174 | ATPX_TV_SIGNAL_MUXED | | 174 | ATPX_TV_SIGNAL_MUXED | |
175 | ATPX_DFP_SIGNAL_MUXED)) | 175 | ATPX_DFP_SIGNAL_MUXED)) |
176 | atpx->functions.disp_mux_cntl = true; | 176 | atpx->functions.disp_mux_cntl = true; |
177 | 177 | ||
178 | kfree(info); | 178 | kfree(info); |
179 | } | 179 | } |
180 | return 0; | 180 | return 0; |
181 | } | 181 | } |
182 | 182 | ||
183 | /** | 183 | /** |
184 | * radeon_atpx_verify_interface - verify ATPX | 184 | * radeon_atpx_verify_interface - verify ATPX |
185 | * | 185 | * |
186 | * @atpx: radeon atpx struct | 186 | * @atpx: radeon atpx struct |
187 | * | 187 | * |
188 | * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function | 188 | * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function |
189 | * to initialize ATPX and determine what features are supported | 189 | * to initialize ATPX and determine what features are supported |
190 | * (all asics). | 190 | * (all asics). |
191 | * returns 0 on success, error on failure. | 191 | * returns 0 on success, error on failure. |
192 | */ | 192 | */ |
193 | static int radeon_atpx_verify_interface(struct radeon_atpx *atpx) | 193 | static int radeon_atpx_verify_interface(struct radeon_atpx *atpx) |
194 | { | 194 | { |
195 | union acpi_object *info; | 195 | union acpi_object *info; |
196 | struct atpx_verify_interface output; | 196 | struct atpx_verify_interface output; |
197 | size_t size; | 197 | size_t size; |
198 | int err = 0; | 198 | int err = 0; |
199 | 199 | ||
200 | info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL); | 200 | info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL); |
201 | if (!info) | 201 | if (!info) |
202 | return -EIO; | 202 | return -EIO; |
203 | 203 | ||
204 | memset(&output, 0, sizeof(output)); | 204 | memset(&output, 0, sizeof(output)); |
205 | 205 | ||
206 | size = *(u16 *) info->buffer.pointer; | 206 | size = *(u16 *) info->buffer.pointer; |
207 | if (size < 8) { | 207 | if (size < 8) { |
208 | printk("ATPX buffer is too small: %zu\n", size); | 208 | printk("ATPX buffer is too small: %zu\n", size); |
209 | err = -EINVAL; | 209 | err = -EINVAL; |
210 | goto out; | 210 | goto out; |
211 | } | 211 | } |
212 | size = min(sizeof(output), size); | 212 | size = min(sizeof(output), size); |
213 | 213 | ||
214 | memcpy(&output, info->buffer.pointer, size); | 214 | memcpy(&output, info->buffer.pointer, size); |
215 | 215 | ||
216 | /* TODO: check version? */ | 216 | /* TODO: check version? */ |
217 | printk("ATPX version %u\n", output.version); | 217 | printk("ATPX version %u\n", output.version); |
218 | 218 | ||
219 | radeon_atpx_parse_functions(&atpx->functions, output.function_bits); | 219 | radeon_atpx_parse_functions(&atpx->functions, output.function_bits); |
220 | 220 | ||
221 | out: | 221 | out: |
222 | kfree(info); | 222 | kfree(info); |
223 | return err; | 223 | return err; |
224 | } | 224 | } |
225 | 225 | ||
226 | /** | 226 | /** |
227 | * radeon_atpx_set_discrete_state - power up/down discrete GPU | 227 | * radeon_atpx_set_discrete_state - power up/down discrete GPU |
228 | * | 228 | * |
229 | * @atpx: atpx info struct | 229 | * @atpx: atpx info struct |
230 | * @state: discrete GPU state (0 = power down, 1 = power up) | 230 | * @state: discrete GPU state (0 = power down, 1 = power up) |
231 | * | 231 | * |
232 | * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to | 232 | * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to |
233 | * power down/up the discrete GPU (all asics). | 233 | * power down/up the discrete GPU (all asics). |
234 | * Returns 0 on success, error on failure. | 234 | * Returns 0 on success, error on failure. |
235 | */ | 235 | */ |
236 | static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state) | 236 | static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state) |
237 | { | 237 | { |
238 | struct acpi_buffer params; | 238 | struct acpi_buffer params; |
239 | union acpi_object *info; | 239 | union acpi_object *info; |
240 | struct atpx_power_control input; | 240 | struct atpx_power_control input; |
241 | 241 | ||
242 | if (atpx->functions.power_cntl) { | 242 | if (atpx->functions.power_cntl) { |
243 | input.size = 3; | 243 | input.size = 3; |
244 | input.dgpu_state = state; | 244 | input.dgpu_state = state; |
245 | params.length = input.size; | 245 | params.length = input.size; |
246 | params.pointer = &input; | 246 | params.pointer = &input; |
247 | info = radeon_atpx_call(atpx->handle, | 247 | info = radeon_atpx_call(atpx->handle, |
248 | ATPX_FUNCTION_POWER_CONTROL, | 248 | ATPX_FUNCTION_POWER_CONTROL, |
249 | ¶ms); | 249 | ¶ms); |
250 | if (!info) | 250 | if (!info) |
251 | return -EIO; | 251 | return -EIO; |
252 | kfree(info); | 252 | kfree(info); |
253 | } | 253 | } |
254 | return 0; | 254 | return 0; |
255 | } | 255 | } |
256 | 256 | ||
257 | /** | 257 | /** |
258 | * radeon_atpx_switch_disp_mux - switch display mux | 258 | * radeon_atpx_switch_disp_mux - switch display mux |
259 | * | 259 | * |
260 | * @atpx: atpx info struct | 260 | * @atpx: atpx info struct |
261 | * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) | 261 | * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) |
262 | * | 262 | * |
263 | * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to | 263 | * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to |
264 | * switch the display mux between the discrete GPU and integrated GPU | 264 | * switch the display mux between the discrete GPU and integrated GPU |
265 | * (all asics). | 265 | * (all asics). |
266 | * Returns 0 on success, error on failure. | 266 | * Returns 0 on success, error on failure. |
267 | */ | 267 | */ |
268 | static int radeon_atpx_switch_disp_mux(struct radeon_atpx *atpx, u16 mux_id) | 268 | static int radeon_atpx_switch_disp_mux(struct radeon_atpx *atpx, u16 mux_id) |
269 | { | 269 | { |
270 | struct acpi_buffer params; | 270 | struct acpi_buffer params; |
271 | union acpi_object *info; | 271 | union acpi_object *info; |
272 | struct atpx_mux input; | 272 | struct atpx_mux input; |
273 | 273 | ||
274 | if (atpx->functions.disp_mux_cntl) { | 274 | if (atpx->functions.disp_mux_cntl) { |
275 | input.size = 4; | 275 | input.size = 4; |
276 | input.mux = mux_id; | 276 | input.mux = mux_id; |
277 | params.length = input.size; | 277 | params.length = input.size; |
278 | params.pointer = &input; | 278 | params.pointer = &input; |
279 | info = radeon_atpx_call(atpx->handle, | 279 | info = radeon_atpx_call(atpx->handle, |
280 | ATPX_FUNCTION_DISPLAY_MUX_CONTROL, | 280 | ATPX_FUNCTION_DISPLAY_MUX_CONTROL, |
281 | ¶ms); | 281 | ¶ms); |
282 | if (!info) | 282 | if (!info) |
283 | return -EIO; | 283 | return -EIO; |
284 | kfree(info); | 284 | kfree(info); |
285 | } | 285 | } |
286 | return 0; | 286 | return 0; |
287 | } | 287 | } |
288 | 288 | ||
289 | /** | 289 | /** |
290 | * radeon_atpx_switch_i2c_mux - switch i2c/hpd mux | 290 | * radeon_atpx_switch_i2c_mux - switch i2c/hpd mux |
291 | * | 291 | * |
292 | * @atpx: atpx info struct | 292 | * @atpx: atpx info struct |
293 | * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) | 293 | * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) |
294 | * | 294 | * |
295 | * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to | 295 | * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to |
296 | * switch the i2c/hpd mux between the discrete GPU and integrated GPU | 296 | * switch the i2c/hpd mux between the discrete GPU and integrated GPU |
297 | * (all asics). | 297 | * (all asics). |
298 | * Returns 0 on success, error on failure. | 298 | * Returns 0 on success, error on failure. |
299 | */ | 299 | */ |
300 | static int radeon_atpx_switch_i2c_mux(struct radeon_atpx *atpx, u16 mux_id) | 300 | static int radeon_atpx_switch_i2c_mux(struct radeon_atpx *atpx, u16 mux_id) |
301 | { | 301 | { |
302 | struct acpi_buffer params; | 302 | struct acpi_buffer params; |
303 | union acpi_object *info; | 303 | union acpi_object *info; |
304 | struct atpx_mux input; | 304 | struct atpx_mux input; |
305 | 305 | ||
306 | if (atpx->functions.i2c_mux_cntl) { | 306 | if (atpx->functions.i2c_mux_cntl) { |
307 | input.size = 4; | 307 | input.size = 4; |
308 | input.mux = mux_id; | 308 | input.mux = mux_id; |
309 | params.length = input.size; | 309 | params.length = input.size; |
310 | params.pointer = &input; | 310 | params.pointer = &input; |
311 | info = radeon_atpx_call(atpx->handle, | 311 | info = radeon_atpx_call(atpx->handle, |
312 | ATPX_FUNCTION_I2C_MUX_CONTROL, | 312 | ATPX_FUNCTION_I2C_MUX_CONTROL, |
313 | ¶ms); | 313 | ¶ms); |
314 | if (!info) | 314 | if (!info) |
315 | return -EIO; | 315 | return -EIO; |
316 | kfree(info); | 316 | kfree(info); |
317 | } | 317 | } |
318 | return 0; | 318 | return 0; |
319 | } | 319 | } |
320 | 320 | ||
321 | /** | 321 | /** |
322 | * radeon_atpx_switch_start - notify the sbios of a GPU switch | 322 | * radeon_atpx_switch_start - notify the sbios of a GPU switch |
323 | * | 323 | * |
324 | * @atpx: atpx info struct | 324 | * @atpx: atpx info struct |
325 | * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) | 325 | * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) |
326 | * | 326 | * |
327 | * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX | 327 | * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX |
328 | * function to notify the sbios that a switch between the discrete GPU and | 328 | * function to notify the sbios that a switch between the discrete GPU and |
329 | * integrated GPU has begun (all asics). | 329 | * integrated GPU has begun (all asics). |
330 | * Returns 0 on success, error on failure. | 330 | * Returns 0 on success, error on failure. |
331 | */ | 331 | */ |
332 | static int radeon_atpx_switch_start(struct radeon_atpx *atpx, u16 mux_id) | 332 | static int radeon_atpx_switch_start(struct radeon_atpx *atpx, u16 mux_id) |
333 | { | 333 | { |
334 | struct acpi_buffer params; | 334 | struct acpi_buffer params; |
335 | union acpi_object *info; | 335 | union acpi_object *info; |
336 | struct atpx_mux input; | 336 | struct atpx_mux input; |
337 | 337 | ||
338 | if (atpx->functions.switch_start) { | 338 | if (atpx->functions.switch_start) { |
339 | input.size = 4; | 339 | input.size = 4; |
340 | input.mux = mux_id; | 340 | input.mux = mux_id; |
341 | params.length = input.size; | 341 | params.length = input.size; |
342 | params.pointer = &input; | 342 | params.pointer = &input; |
343 | info = radeon_atpx_call(atpx->handle, | 343 | info = radeon_atpx_call(atpx->handle, |
344 | ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION, | 344 | ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION, |
345 | ¶ms); | 345 | ¶ms); |
346 | if (!info) | 346 | if (!info) |
347 | return -EIO; | 347 | return -EIO; |
348 | kfree(info); | 348 | kfree(info); |
349 | } | 349 | } |
350 | return 0; | 350 | return 0; |
351 | } | 351 | } |
352 | 352 | ||
353 | /** | 353 | /** |
354 | * radeon_atpx_switch_end - notify the sbios of a GPU switch | 354 | * radeon_atpx_switch_end - notify the sbios of a GPU switch |
355 | * | 355 | * |
356 | * @atpx: atpx info struct | 356 | * @atpx: atpx info struct |
357 | * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) | 357 | * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) |
358 | * | 358 | * |
359 | * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX | 359 | * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX |
360 | * function to notify the sbios that a switch between the discrete GPU and | 360 | * function to notify the sbios that a switch between the discrete GPU and |
361 | * integrated GPU has ended (all asics). | 361 | * integrated GPU has ended (all asics). |
362 | * Returns 0 on success, error on failure. | 362 | * Returns 0 on success, error on failure. |
363 | */ | 363 | */ |
364 | static int radeon_atpx_switch_end(struct radeon_atpx *atpx, u16 mux_id) | 364 | static int radeon_atpx_switch_end(struct radeon_atpx *atpx, u16 mux_id) |
365 | { | 365 | { |
366 | struct acpi_buffer params; | 366 | struct acpi_buffer params; |
367 | union acpi_object *info; | 367 | union acpi_object *info; |
368 | struct atpx_mux input; | 368 | struct atpx_mux input; |
369 | 369 | ||
370 | if (atpx->functions.switch_end) { | 370 | if (atpx->functions.switch_end) { |
371 | input.size = 4; | 371 | input.size = 4; |
372 | input.mux = mux_id; | 372 | input.mux = mux_id; |
373 | params.length = input.size; | 373 | params.length = input.size; |
374 | params.pointer = &input; | 374 | params.pointer = &input; |
375 | info = radeon_atpx_call(atpx->handle, | 375 | info = radeon_atpx_call(atpx->handle, |
376 | ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION, | 376 | ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION, |
377 | ¶ms); | 377 | ¶ms); |
378 | if (!info) | 378 | if (!info) |
379 | return -EIO; | 379 | return -EIO; |
380 | kfree(info); | 380 | kfree(info); |
381 | } | 381 | } |
382 | return 0; | 382 | return 0; |
383 | } | 383 | } |
384 | 384 | ||
385 | /** | 385 | /** |
386 | * radeon_atpx_switchto - switch to the requested GPU | 386 | * radeon_atpx_switchto - switch to the requested GPU |
387 | * | 387 | * |
388 | * @id: GPU to switch to | 388 | * @id: GPU to switch to |
389 | * | 389 | * |
390 | * Execute the necessary ATPX functions to switch between the discrete GPU and | 390 | * Execute the necessary ATPX functions to switch between the discrete GPU and |
391 | * integrated GPU (all asics). | 391 | * integrated GPU (all asics). |
392 | * Returns 0 on success, error on failure. | 392 | * Returns 0 on success, error on failure. |
393 | */ | 393 | */ |
394 | static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) | 394 | static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) |
395 | { | 395 | { |
396 | u16 gpu_id; | 396 | u16 gpu_id; |
397 | 397 | ||
398 | if (id == VGA_SWITCHEROO_IGD) | 398 | if (id == VGA_SWITCHEROO_IGD) |
399 | gpu_id = ATPX_INTEGRATED_GPU; | 399 | gpu_id = ATPX_INTEGRATED_GPU; |
400 | else | 400 | else |
401 | gpu_id = ATPX_DISCRETE_GPU; | 401 | gpu_id = ATPX_DISCRETE_GPU; |
402 | 402 | ||
403 | radeon_atpx_switch_start(&radeon_atpx_priv.atpx, gpu_id); | 403 | radeon_atpx_switch_start(&radeon_atpx_priv.atpx, gpu_id); |
404 | radeon_atpx_switch_disp_mux(&radeon_atpx_priv.atpx, gpu_id); | 404 | radeon_atpx_switch_disp_mux(&radeon_atpx_priv.atpx, gpu_id); |
405 | radeon_atpx_switch_i2c_mux(&radeon_atpx_priv.atpx, gpu_id); | 405 | radeon_atpx_switch_i2c_mux(&radeon_atpx_priv.atpx, gpu_id); |
406 | radeon_atpx_switch_end(&radeon_atpx_priv.atpx, gpu_id); | 406 | radeon_atpx_switch_end(&radeon_atpx_priv.atpx, gpu_id); |
407 | 407 | ||
408 | return 0; | 408 | return 0; |
409 | } | 409 | } |
410 | 410 | ||
411 | /** | 411 | /** |
412 | * radeon_atpx_power_state - power down/up the requested GPU | 412 | * radeon_atpx_power_state - power down/up the requested GPU |
413 | * | 413 | * |
414 | * @id: GPU to power down/up | 414 | * @id: GPU to power down/up |
415 | * @state: requested power state (0 = off, 1 = on) | 415 | * @state: requested power state (0 = off, 1 = on) |
416 | * | 416 | * |
417 | * Execute the necessary ATPX function to power down/up the discrete GPU | 417 | * Execute the necessary ATPX function to power down/up the discrete GPU |
418 | * (all asics). | 418 | * (all asics). |
419 | * Returns 0 on success, error on failure. | 419 | * Returns 0 on success, error on failure. |
420 | */ | 420 | */ |
421 | static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, | 421 | static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, |
422 | enum vga_switcheroo_state state) | 422 | enum vga_switcheroo_state state) |
423 | { | 423 | { |
424 | /* on w500 ACPI can't change intel gpu state */ | 424 | /* on w500 ACPI can't change intel gpu state */ |
425 | if (id == VGA_SWITCHEROO_IGD) | 425 | if (id == VGA_SWITCHEROO_IGD) |
426 | return 0; | 426 | return 0; |
427 | 427 | ||
428 | radeon_atpx_set_discrete_state(&radeon_atpx_priv.atpx, state); | 428 | radeon_atpx_set_discrete_state(&radeon_atpx_priv.atpx, state); |
429 | return 0; | 429 | return 0; |
430 | } | 430 | } |
431 | 431 | ||
432 | /** | 432 | /** |
433 | * radeon_atpx_pci_probe_handle - look up the ATPX handle | 433 | * radeon_atpx_pci_probe_handle - look up the ATPX handle |
434 | * | 434 | * |
435 | * @pdev: pci device | 435 | * @pdev: pci device |
436 | * | 436 | * |
437 | * Look up the ATPX handles (all asics). | 437 | * Look up the ATPX handles (all asics). |
438 | * Returns true if the handles are found, false if not. | 438 | * Returns true if the handles are found, false if not. |
439 | */ | 439 | */ |
440 | static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) | 440 | static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) |
441 | { | 441 | { |
442 | acpi_handle dhandle, atpx_handle; | 442 | acpi_handle dhandle, atpx_handle; |
443 | acpi_status status; | 443 | acpi_status status; |
444 | 444 | ||
445 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 445 | dhandle = ACPI_HANDLE(&pdev->dev); |
446 | if (!dhandle) | 446 | if (!dhandle) |
447 | return false; | 447 | return false; |
448 | 448 | ||
449 | status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); | 449 | status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); |
450 | if (ACPI_FAILURE(status)) | 450 | if (ACPI_FAILURE(status)) |
451 | return false; | 451 | return false; |
452 | 452 | ||
453 | radeon_atpx_priv.dhandle = dhandle; | 453 | radeon_atpx_priv.dhandle = dhandle; |
454 | radeon_atpx_priv.atpx.handle = atpx_handle; | 454 | radeon_atpx_priv.atpx.handle = atpx_handle; |
455 | return true; | 455 | return true; |
456 | } | 456 | } |
457 | 457 | ||
458 | /** | 458 | /** |
459 | * radeon_atpx_init - verify the ATPX interface | 459 | * radeon_atpx_init - verify the ATPX interface |
460 | * | 460 | * |
461 | * Verify the ATPX interface (all asics). | 461 | * Verify the ATPX interface (all asics). |
462 | * Returns 0 on success, error on failure. | 462 | * Returns 0 on success, error on failure. |
463 | */ | 463 | */ |
464 | static int radeon_atpx_init(void) | 464 | static int radeon_atpx_init(void) |
465 | { | 465 | { |
466 | int r; | 466 | int r; |
467 | 467 | ||
468 | /* set up the ATPX handle */ | 468 | /* set up the ATPX handle */ |
469 | r = radeon_atpx_verify_interface(&radeon_atpx_priv.atpx); | 469 | r = radeon_atpx_verify_interface(&radeon_atpx_priv.atpx); |
470 | if (r) | 470 | if (r) |
471 | return r; | 471 | return r; |
472 | 472 | ||
473 | /* validate the atpx setup */ | 473 | /* validate the atpx setup */ |
474 | r = radeon_atpx_validate(&radeon_atpx_priv.atpx); | 474 | r = radeon_atpx_validate(&radeon_atpx_priv.atpx); |
475 | if (r) | 475 | if (r) |
476 | return r; | 476 | return r; |
477 | 477 | ||
478 | return 0; | 478 | return 0; |
479 | } | 479 | } |
480 | 480 | ||
481 | /** | 481 | /** |
482 | * radeon_atpx_get_client_id - get the client id | 482 | * radeon_atpx_get_client_id - get the client id |
483 | * | 483 | * |
484 | * @pdev: pci device | 484 | * @pdev: pci device |
485 | * | 485 | * |
486 | * look up whether we are the integrated or discrete GPU (all asics). | 486 | * look up whether we are the integrated or discrete GPU (all asics). |
487 | * Returns the client id. | 487 | * Returns the client id. |
488 | */ | 488 | */ |
489 | static int radeon_atpx_get_client_id(struct pci_dev *pdev) | 489 | static int radeon_atpx_get_client_id(struct pci_dev *pdev) |
490 | { | 490 | { |
491 | if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) | 491 | if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev)) |
492 | return VGA_SWITCHEROO_IGD; | 492 | return VGA_SWITCHEROO_IGD; |
493 | else | 493 | else |
494 | return VGA_SWITCHEROO_DIS; | 494 | return VGA_SWITCHEROO_DIS; |
495 | } | 495 | } |
496 | 496 | ||
497 | static struct vga_switcheroo_handler radeon_atpx_handler = { | 497 | static struct vga_switcheroo_handler radeon_atpx_handler = { |
498 | .switchto = radeon_atpx_switchto, | 498 | .switchto = radeon_atpx_switchto, |
499 | .power_state = radeon_atpx_power_state, | 499 | .power_state = radeon_atpx_power_state, |
500 | .init = radeon_atpx_init, | 500 | .init = radeon_atpx_init, |
501 | .get_client_id = radeon_atpx_get_client_id, | 501 | .get_client_id = radeon_atpx_get_client_id, |
502 | }; | 502 | }; |
503 | 503 | ||
504 | /** | 504 | /** |
505 | * radeon_atpx_detect - detect whether we have PX | 505 | * radeon_atpx_detect - detect whether we have PX |
506 | * | 506 | * |
507 | * Check if we have a PX system (all asics). | 507 | * Check if we have a PX system (all asics). |
508 | * Returns true if we have a PX system, false if not. | 508 | * Returns true if we have a PX system, false if not. |
509 | */ | 509 | */ |
510 | static bool radeon_atpx_detect(void) | 510 | static bool radeon_atpx_detect(void) |
511 | { | 511 | { |
512 | char acpi_method_name[255] = { 0 }; | 512 | char acpi_method_name[255] = { 0 }; |
513 | struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; | 513 | struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; |
514 | struct pci_dev *pdev = NULL; | 514 | struct pci_dev *pdev = NULL; |
515 | bool has_atpx = false; | 515 | bool has_atpx = false; |
516 | int vga_count = 0; | 516 | int vga_count = 0; |
517 | 517 | ||
518 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { | 518 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { |
519 | vga_count++; | 519 | vga_count++; |
520 | 520 | ||
521 | has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); | 521 | has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); |
522 | } | 522 | } |
523 | 523 | ||
524 | if (has_atpx && vga_count == 2) { | 524 | if (has_atpx && vga_count == 2) { |
525 | acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer); | 525 | acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer); |
526 | printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", | 526 | printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", |
527 | acpi_method_name); | 527 | acpi_method_name); |
528 | radeon_atpx_priv.atpx_detected = true; | 528 | radeon_atpx_priv.atpx_detected = true; |
529 | return true; | 529 | return true; |
530 | } | 530 | } |
531 | return false; | 531 | return false; |
532 | } | 532 | } |
533 | 533 | ||
534 | /** | 534 | /** |
535 | * radeon_register_atpx_handler - register with vga_switcheroo | 535 | * radeon_register_atpx_handler - register with vga_switcheroo |
536 | * | 536 | * |
537 | * Register the PX callbacks with vga_switcheroo (all asics). | 537 | * Register the PX callbacks with vga_switcheroo (all asics). |
538 | */ | 538 | */ |
539 | void radeon_register_atpx_handler(void) | 539 | void radeon_register_atpx_handler(void) |
540 | { | 540 | { |
541 | bool r; | 541 | bool r; |
542 | 542 | ||
543 | /* detect if we have any ATPX + 2 VGA in the system */ | 543 | /* detect if we have any ATPX + 2 VGA in the system */ |
544 | r = radeon_atpx_detect(); | 544 | r = radeon_atpx_detect(); |
545 | if (!r) | 545 | if (!r) |
546 | return; | 546 | return; |
547 | 547 | ||
548 | vga_switcheroo_register_handler(&radeon_atpx_handler); | 548 | vga_switcheroo_register_handler(&radeon_atpx_handler); |
549 | } | 549 | } |
550 | 550 | ||
551 | /** | 551 | /** |
552 | * radeon_unregister_atpx_handler - unregister with vga_switcheroo | 552 | * radeon_unregister_atpx_handler - unregister with vga_switcheroo |
553 | * | 553 | * |
554 | * Unregister the PX callbacks with vga_switcheroo (all asics). | 554 | * Unregister the PX callbacks with vga_switcheroo (all asics). |
555 | */ | 555 | */ |
556 | void radeon_unregister_atpx_handler(void) | 556 | void radeon_unregister_atpx_handler(void) |
557 | { | 557 | { |
558 | vga_switcheroo_unregister_handler(); | 558 | vga_switcheroo_unregister_handler(); |
559 | } | 559 | } |
560 | 560 |
drivers/gpu/drm/radeon/radeon_bios.c
1 | /* | 1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | 2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. | 3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. | 4 | * Copyright 2009 Jerome Glisse. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation | 8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the | 10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: | 11 | * Software is furnished to do so, subject to the following conditions: |
12 | * | 12 | * |
13 | * The above copyright notice and this permission notice shall be included in | 13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. | 14 | * all copies or substantial portions of the Software. |
15 | * | 15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. | 22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * | 23 | * |
24 | * Authors: Dave Airlie | 24 | * Authors: Dave Airlie |
25 | * Alex Deucher | 25 | * Alex Deucher |
26 | * Jerome Glisse | 26 | * Jerome Glisse |
27 | */ | 27 | */ |
28 | #include <drm/drmP.h> | 28 | #include <drm/drmP.h> |
29 | #include "radeon_reg.h" | 29 | #include "radeon_reg.h" |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | 32 | ||
33 | #include <linux/vga_switcheroo.h> | 33 | #include <linux/vga_switcheroo.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/acpi.h> | 35 | #include <linux/acpi.h> |
36 | /* | 36 | /* |
37 | * BIOS. | 37 | * BIOS. |
38 | */ | 38 | */ |
39 | 39 | ||
40 | /* If you boot an IGP board with a discrete card as the primary, | 40 | /* If you boot an IGP board with a discrete card as the primary, |
41 | * the IGP rom is not accessible via the rom bar as the IGP rom is | 41 | * the IGP rom is not accessible via the rom bar as the IGP rom is |
42 | * part of the system bios. On boot, the system bios puts a | 42 | * part of the system bios. On boot, the system bios puts a |
43 | * copy of the igp rom at the start of vram if a discrete card is | 43 | * copy of the igp rom at the start of vram if a discrete card is |
44 | * present. | 44 | * present. |
45 | */ | 45 | */ |
46 | static bool igp_read_bios_from_vram(struct radeon_device *rdev) | 46 | static bool igp_read_bios_from_vram(struct radeon_device *rdev) |
47 | { | 47 | { |
48 | uint8_t __iomem *bios; | 48 | uint8_t __iomem *bios; |
49 | resource_size_t vram_base; | 49 | resource_size_t vram_base; |
50 | resource_size_t size = 256 * 1024; /* ??? */ | 50 | resource_size_t size = 256 * 1024; /* ??? */ |
51 | 51 | ||
52 | if (!(rdev->flags & RADEON_IS_IGP)) | 52 | if (!(rdev->flags & RADEON_IS_IGP)) |
53 | if (!radeon_card_posted(rdev)) | 53 | if (!radeon_card_posted(rdev)) |
54 | return false; | 54 | return false; |
55 | 55 | ||
56 | rdev->bios = NULL; | 56 | rdev->bios = NULL; |
57 | vram_base = pci_resource_start(rdev->pdev, 0); | 57 | vram_base = pci_resource_start(rdev->pdev, 0); |
58 | bios = ioremap(vram_base, size); | 58 | bios = ioremap(vram_base, size); |
59 | if (!bios) { | 59 | if (!bios) { |
60 | return false; | 60 | return false; |
61 | } | 61 | } |
62 | 62 | ||
63 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | 63 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { |
64 | iounmap(bios); | 64 | iounmap(bios); |
65 | return false; | 65 | return false; |
66 | } | 66 | } |
67 | rdev->bios = kmalloc(size, GFP_KERNEL); | 67 | rdev->bios = kmalloc(size, GFP_KERNEL); |
68 | if (rdev->bios == NULL) { | 68 | if (rdev->bios == NULL) { |
69 | iounmap(bios); | 69 | iounmap(bios); |
70 | return false; | 70 | return false; |
71 | } | 71 | } |
72 | memcpy_fromio(rdev->bios, bios, size); | 72 | memcpy_fromio(rdev->bios, bios, size); |
73 | iounmap(bios); | 73 | iounmap(bios); |
74 | return true; | 74 | return true; |
75 | } | 75 | } |
76 | 76 | ||
77 | static bool radeon_read_bios(struct radeon_device *rdev) | 77 | static bool radeon_read_bios(struct radeon_device *rdev) |
78 | { | 78 | { |
79 | uint8_t __iomem *bios; | 79 | uint8_t __iomem *bios; |
80 | size_t size; | 80 | size_t size; |
81 | 81 | ||
82 | rdev->bios = NULL; | 82 | rdev->bios = NULL; |
83 | /* XXX: some cards may return 0 for rom size? ddx has a workaround */ | 83 | /* XXX: some cards may return 0 for rom size? ddx has a workaround */ |
84 | bios = pci_map_rom(rdev->pdev, &size); | 84 | bios = pci_map_rom(rdev->pdev, &size); |
85 | if (!bios) { | 85 | if (!bios) { |
86 | return false; | 86 | return false; |
87 | } | 87 | } |
88 | 88 | ||
89 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | 89 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { |
90 | pci_unmap_rom(rdev->pdev, bios); | 90 | pci_unmap_rom(rdev->pdev, bios); |
91 | return false; | 91 | return false; |
92 | } | 92 | } |
93 | rdev->bios = kmemdup(bios, size, GFP_KERNEL); | 93 | rdev->bios = kmemdup(bios, size, GFP_KERNEL); |
94 | if (rdev->bios == NULL) { | 94 | if (rdev->bios == NULL) { |
95 | pci_unmap_rom(rdev->pdev, bios); | 95 | pci_unmap_rom(rdev->pdev, bios); |
96 | return false; | 96 | return false; |
97 | } | 97 | } |
98 | pci_unmap_rom(rdev->pdev, bios); | 98 | pci_unmap_rom(rdev->pdev, bios); |
99 | return true; | 99 | return true; |
100 | } | 100 | } |
101 | 101 | ||
102 | static bool radeon_read_platform_bios(struct radeon_device *rdev) | 102 | static bool radeon_read_platform_bios(struct radeon_device *rdev) |
103 | { | 103 | { |
104 | uint8_t __iomem *bios; | 104 | uint8_t __iomem *bios; |
105 | size_t size; | 105 | size_t size; |
106 | 106 | ||
107 | rdev->bios = NULL; | 107 | rdev->bios = NULL; |
108 | 108 | ||
109 | bios = pci_platform_rom(rdev->pdev, &size); | 109 | bios = pci_platform_rom(rdev->pdev, &size); |
110 | if (!bios) { | 110 | if (!bios) { |
111 | return false; | 111 | return false; |
112 | } | 112 | } |
113 | 113 | ||
114 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | 114 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { |
115 | return false; | 115 | return false; |
116 | } | 116 | } |
117 | rdev->bios = kmemdup(bios, size, GFP_KERNEL); | 117 | rdev->bios = kmemdup(bios, size, GFP_KERNEL); |
118 | if (rdev->bios == NULL) { | 118 | if (rdev->bios == NULL) { |
119 | return false; | 119 | return false; |
120 | } | 120 | } |
121 | 121 | ||
122 | return true; | 122 | return true; |
123 | } | 123 | } |
124 | 124 | ||
125 | #ifdef CONFIG_ACPI | 125 | #ifdef CONFIG_ACPI |
126 | /* ATRM is used to get the BIOS on the discrete cards in | 126 | /* ATRM is used to get the BIOS on the discrete cards in |
127 | * dual-gpu systems. | 127 | * dual-gpu systems. |
128 | */ | 128 | */ |
129 | /* retrieve the ROM in 4k blocks */ | 129 | /* retrieve the ROM in 4k blocks */ |
130 | #define ATRM_BIOS_PAGE 4096 | 130 | #define ATRM_BIOS_PAGE 4096 |
131 | /** | 131 | /** |
132 | * radeon_atrm_call - fetch a chunk of the vbios | 132 | * radeon_atrm_call - fetch a chunk of the vbios |
133 | * | 133 | * |
134 | * @atrm_handle: acpi ATRM handle | 134 | * @atrm_handle: acpi ATRM handle |
135 | * @bios: vbios image pointer | 135 | * @bios: vbios image pointer |
136 | * @offset: offset of vbios image data to fetch | 136 | * @offset: offset of vbios image data to fetch |
137 | * @len: length of vbios image data to fetch | 137 | * @len: length of vbios image data to fetch |
138 | * | 138 | * |
139 | * Executes ATRM to fetch a chunk of the discrete | 139 | * Executes ATRM to fetch a chunk of the discrete |
140 | * vbios image on PX systems (all asics). | 140 | * vbios image on PX systems (all asics). |
141 | * Returns the length of the buffer fetched. | 141 | * Returns the length of the buffer fetched. |
142 | */ | 142 | */ |
143 | static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, | 143 | static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, |
144 | int offset, int len) | 144 | int offset, int len) |
145 | { | 145 | { |
146 | acpi_status status; | 146 | acpi_status status; |
147 | union acpi_object atrm_arg_elements[2], *obj; | 147 | union acpi_object atrm_arg_elements[2], *obj; |
148 | struct acpi_object_list atrm_arg; | 148 | struct acpi_object_list atrm_arg; |
149 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; | 149 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; |
150 | 150 | ||
151 | atrm_arg.count = 2; | 151 | atrm_arg.count = 2; |
152 | atrm_arg.pointer = &atrm_arg_elements[0]; | 152 | atrm_arg.pointer = &atrm_arg_elements[0]; |
153 | 153 | ||
154 | atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; | 154 | atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; |
155 | atrm_arg_elements[0].integer.value = offset; | 155 | atrm_arg_elements[0].integer.value = offset; |
156 | 156 | ||
157 | atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; | 157 | atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; |
158 | atrm_arg_elements[1].integer.value = len; | 158 | atrm_arg_elements[1].integer.value = len; |
159 | 159 | ||
160 | status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); | 160 | status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); |
161 | if (ACPI_FAILURE(status)) { | 161 | if (ACPI_FAILURE(status)) { |
162 | printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); | 162 | printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); |
163 | return -ENODEV; | 163 | return -ENODEV; |
164 | } | 164 | } |
165 | 165 | ||
166 | obj = (union acpi_object *)buffer.pointer; | 166 | obj = (union acpi_object *)buffer.pointer; |
167 | memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); | 167 | memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); |
168 | len = obj->buffer.length; | 168 | len = obj->buffer.length; |
169 | kfree(buffer.pointer); | 169 | kfree(buffer.pointer); |
170 | return len; | 170 | return len; |
171 | } | 171 | } |
172 | 172 | ||
173 | static bool radeon_atrm_get_bios(struct radeon_device *rdev) | 173 | static bool radeon_atrm_get_bios(struct radeon_device *rdev) |
174 | { | 174 | { |
175 | int ret; | 175 | int ret; |
176 | int size = 256 * 1024; | 176 | int size = 256 * 1024; |
177 | int i; | 177 | int i; |
178 | struct pci_dev *pdev = NULL; | 178 | struct pci_dev *pdev = NULL; |
179 | acpi_handle dhandle, atrm_handle; | 179 | acpi_handle dhandle, atrm_handle; |
180 | acpi_status status; | 180 | acpi_status status; |
181 | bool found = false; | 181 | bool found = false; |
182 | 182 | ||
183 | /* ATRM is for the discrete card only */ | 183 | /* ATRM is for the discrete card only */ |
184 | if (rdev->flags & RADEON_IS_IGP) | 184 | if (rdev->flags & RADEON_IS_IGP) |
185 | return false; | 185 | return false; |
186 | 186 | ||
187 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { | 187 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { |
188 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 188 | dhandle = ACPI_HANDLE(&pdev->dev); |
189 | if (!dhandle) | 189 | if (!dhandle) |
190 | continue; | 190 | continue; |
191 | 191 | ||
192 | status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); | 192 | status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); |
193 | if (!ACPI_FAILURE(status)) { | 193 | if (!ACPI_FAILURE(status)) { |
194 | found = true; | 194 | found = true; |
195 | break; | 195 | break; |
196 | } | 196 | } |
197 | } | 197 | } |
198 | 198 | ||
199 | if (!found) | 199 | if (!found) |
200 | return false; | 200 | return false; |
201 | 201 | ||
202 | rdev->bios = kmalloc(size, GFP_KERNEL); | 202 | rdev->bios = kmalloc(size, GFP_KERNEL); |
203 | if (!rdev->bios) { | 203 | if (!rdev->bios) { |
204 | DRM_ERROR("Unable to allocate bios\n"); | 204 | DRM_ERROR("Unable to allocate bios\n"); |
205 | return false; | 205 | return false; |
206 | } | 206 | } |
207 | 207 | ||
208 | for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { | 208 | for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { |
209 | ret = radeon_atrm_call(atrm_handle, | 209 | ret = radeon_atrm_call(atrm_handle, |
210 | rdev->bios, | 210 | rdev->bios, |
211 | (i * ATRM_BIOS_PAGE), | 211 | (i * ATRM_BIOS_PAGE), |
212 | ATRM_BIOS_PAGE); | 212 | ATRM_BIOS_PAGE); |
213 | if (ret < ATRM_BIOS_PAGE) | 213 | if (ret < ATRM_BIOS_PAGE) |
214 | break; | 214 | break; |
215 | } | 215 | } |
216 | 216 | ||
217 | if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) { | 217 | if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) { |
218 | kfree(rdev->bios); | 218 | kfree(rdev->bios); |
219 | return false; | 219 | return false; |
220 | } | 220 | } |
221 | return true; | 221 | return true; |
222 | } | 222 | } |
223 | #else | 223 | #else |
224 | static inline bool radeon_atrm_get_bios(struct radeon_device *rdev) | 224 | static inline bool radeon_atrm_get_bios(struct radeon_device *rdev) |
225 | { | 225 | { |
226 | return false; | 226 | return false; |
227 | } | 227 | } |
228 | #endif | 228 | #endif |
229 | 229 | ||
230 | static bool ni_read_disabled_bios(struct radeon_device *rdev) | 230 | static bool ni_read_disabled_bios(struct radeon_device *rdev) |
231 | { | 231 | { |
232 | u32 bus_cntl; | 232 | u32 bus_cntl; |
233 | u32 d1vga_control; | 233 | u32 d1vga_control; |
234 | u32 d2vga_control; | 234 | u32 d2vga_control; |
235 | u32 vga_render_control; | 235 | u32 vga_render_control; |
236 | u32 rom_cntl; | 236 | u32 rom_cntl; |
237 | bool r; | 237 | bool r; |
238 | 238 | ||
239 | bus_cntl = RREG32(R600_BUS_CNTL); | 239 | bus_cntl = RREG32(R600_BUS_CNTL); |
240 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); | 240 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); |
241 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); | 241 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); |
242 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); | 242 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); |
243 | rom_cntl = RREG32(R600_ROM_CNTL); | 243 | rom_cntl = RREG32(R600_ROM_CNTL); |
244 | 244 | ||
245 | /* enable the rom */ | 245 | /* enable the rom */ |
246 | WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); | 246 | WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); |
247 | if (!ASIC_IS_NODCE(rdev)) { | 247 | if (!ASIC_IS_NODCE(rdev)) { |
248 | /* Disable VGA mode */ | 248 | /* Disable VGA mode */ |
249 | WREG32(AVIVO_D1VGA_CONTROL, | 249 | WREG32(AVIVO_D1VGA_CONTROL, |
250 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | 250 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | |
251 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); | 251 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); |
252 | WREG32(AVIVO_D2VGA_CONTROL, | 252 | WREG32(AVIVO_D2VGA_CONTROL, |
253 | (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | 253 | (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | |
254 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); | 254 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); |
255 | WREG32(AVIVO_VGA_RENDER_CONTROL, | 255 | WREG32(AVIVO_VGA_RENDER_CONTROL, |
256 | (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); | 256 | (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); |
257 | } | 257 | } |
258 | WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE); | 258 | WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE); |
259 | 259 | ||
260 | r = radeon_read_bios(rdev); | 260 | r = radeon_read_bios(rdev); |
261 | 261 | ||
262 | /* restore regs */ | 262 | /* restore regs */ |
263 | WREG32(R600_BUS_CNTL, bus_cntl); | 263 | WREG32(R600_BUS_CNTL, bus_cntl); |
264 | if (!ASIC_IS_NODCE(rdev)) { | 264 | if (!ASIC_IS_NODCE(rdev)) { |
265 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); | 265 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); |
266 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); | 266 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); |
267 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); | 267 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); |
268 | } | 268 | } |
269 | WREG32(R600_ROM_CNTL, rom_cntl); | 269 | WREG32(R600_ROM_CNTL, rom_cntl); |
270 | return r; | 270 | return r; |
271 | } | 271 | } |
272 | 272 | ||
273 | static bool r700_read_disabled_bios(struct radeon_device *rdev) | 273 | static bool r700_read_disabled_bios(struct radeon_device *rdev) |
274 | { | 274 | { |
275 | uint32_t viph_control; | 275 | uint32_t viph_control; |
276 | uint32_t bus_cntl; | 276 | uint32_t bus_cntl; |
277 | uint32_t d1vga_control; | 277 | uint32_t d1vga_control; |
278 | uint32_t d2vga_control; | 278 | uint32_t d2vga_control; |
279 | uint32_t vga_render_control; | 279 | uint32_t vga_render_control; |
280 | uint32_t rom_cntl; | 280 | uint32_t rom_cntl; |
281 | uint32_t cg_spll_func_cntl = 0; | 281 | uint32_t cg_spll_func_cntl = 0; |
282 | uint32_t cg_spll_status; | 282 | uint32_t cg_spll_status; |
283 | bool r; | 283 | bool r; |
284 | 284 | ||
285 | viph_control = RREG32(RADEON_VIPH_CONTROL); | 285 | viph_control = RREG32(RADEON_VIPH_CONTROL); |
286 | bus_cntl = RREG32(R600_BUS_CNTL); | 286 | bus_cntl = RREG32(R600_BUS_CNTL); |
287 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); | 287 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); |
288 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); | 288 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); |
289 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); | 289 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); |
290 | rom_cntl = RREG32(R600_ROM_CNTL); | 290 | rom_cntl = RREG32(R600_ROM_CNTL); |
291 | 291 | ||
292 | /* disable VIP */ | 292 | /* disable VIP */ |
293 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); | 293 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); |
294 | /* enable the rom */ | 294 | /* enable the rom */ |
295 | WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); | 295 | WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); |
296 | /* Disable VGA mode */ | 296 | /* Disable VGA mode */ |
297 | WREG32(AVIVO_D1VGA_CONTROL, | 297 | WREG32(AVIVO_D1VGA_CONTROL, |
298 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | 298 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | |
299 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); | 299 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); |
300 | WREG32(AVIVO_D2VGA_CONTROL, | 300 | WREG32(AVIVO_D2VGA_CONTROL, |
301 | (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | 301 | (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | |
302 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); | 302 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); |
303 | WREG32(AVIVO_VGA_RENDER_CONTROL, | 303 | WREG32(AVIVO_VGA_RENDER_CONTROL, |
304 | (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); | 304 | (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); |
305 | 305 | ||
306 | if (rdev->family == CHIP_RV730) { | 306 | if (rdev->family == CHIP_RV730) { |
307 | cg_spll_func_cntl = RREG32(R600_CG_SPLL_FUNC_CNTL); | 307 | cg_spll_func_cntl = RREG32(R600_CG_SPLL_FUNC_CNTL); |
308 | 308 | ||
309 | /* enable bypass mode */ | 309 | /* enable bypass mode */ |
310 | WREG32(R600_CG_SPLL_FUNC_CNTL, (cg_spll_func_cntl | | 310 | WREG32(R600_CG_SPLL_FUNC_CNTL, (cg_spll_func_cntl | |
311 | R600_SPLL_BYPASS_EN)); | 311 | R600_SPLL_BYPASS_EN)); |
312 | 312 | ||
313 | /* wait for SPLL_CHG_STATUS to change to 1 */ | 313 | /* wait for SPLL_CHG_STATUS to change to 1 */ |
314 | cg_spll_status = 0; | 314 | cg_spll_status = 0; |
315 | while (!(cg_spll_status & R600_SPLL_CHG_STATUS)) | 315 | while (!(cg_spll_status & R600_SPLL_CHG_STATUS)) |
316 | cg_spll_status = RREG32(R600_CG_SPLL_STATUS); | 316 | cg_spll_status = RREG32(R600_CG_SPLL_STATUS); |
317 | 317 | ||
318 | WREG32(R600_ROM_CNTL, (rom_cntl & ~R600_SCK_OVERWRITE)); | 318 | WREG32(R600_ROM_CNTL, (rom_cntl & ~R600_SCK_OVERWRITE)); |
319 | } else | 319 | } else |
320 | WREG32(R600_ROM_CNTL, (rom_cntl | R600_SCK_OVERWRITE)); | 320 | WREG32(R600_ROM_CNTL, (rom_cntl | R600_SCK_OVERWRITE)); |
321 | 321 | ||
322 | r = radeon_read_bios(rdev); | 322 | r = radeon_read_bios(rdev); |
323 | 323 | ||
324 | /* restore regs */ | 324 | /* restore regs */ |
325 | if (rdev->family == CHIP_RV730) { | 325 | if (rdev->family == CHIP_RV730) { |
326 | WREG32(R600_CG_SPLL_FUNC_CNTL, cg_spll_func_cntl); | 326 | WREG32(R600_CG_SPLL_FUNC_CNTL, cg_spll_func_cntl); |
327 | 327 | ||
328 | /* wait for SPLL_CHG_STATUS to change to 1 */ | 328 | /* wait for SPLL_CHG_STATUS to change to 1 */ |
329 | cg_spll_status = 0; | 329 | cg_spll_status = 0; |
330 | while (!(cg_spll_status & R600_SPLL_CHG_STATUS)) | 330 | while (!(cg_spll_status & R600_SPLL_CHG_STATUS)) |
331 | cg_spll_status = RREG32(R600_CG_SPLL_STATUS); | 331 | cg_spll_status = RREG32(R600_CG_SPLL_STATUS); |
332 | } | 332 | } |
333 | WREG32(RADEON_VIPH_CONTROL, viph_control); | 333 | WREG32(RADEON_VIPH_CONTROL, viph_control); |
334 | WREG32(R600_BUS_CNTL, bus_cntl); | 334 | WREG32(R600_BUS_CNTL, bus_cntl); |
335 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); | 335 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); |
336 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); | 336 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); |
337 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); | 337 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); |
338 | WREG32(R600_ROM_CNTL, rom_cntl); | 338 | WREG32(R600_ROM_CNTL, rom_cntl); |
339 | return r; | 339 | return r; |
340 | } | 340 | } |
341 | 341 | ||
342 | static bool r600_read_disabled_bios(struct radeon_device *rdev) | 342 | static bool r600_read_disabled_bios(struct radeon_device *rdev) |
343 | { | 343 | { |
344 | uint32_t viph_control; | 344 | uint32_t viph_control; |
345 | uint32_t bus_cntl; | 345 | uint32_t bus_cntl; |
346 | uint32_t d1vga_control; | 346 | uint32_t d1vga_control; |
347 | uint32_t d2vga_control; | 347 | uint32_t d2vga_control; |
348 | uint32_t vga_render_control; | 348 | uint32_t vga_render_control; |
349 | uint32_t rom_cntl; | 349 | uint32_t rom_cntl; |
350 | uint32_t general_pwrmgt; | 350 | uint32_t general_pwrmgt; |
351 | uint32_t low_vid_lower_gpio_cntl; | 351 | uint32_t low_vid_lower_gpio_cntl; |
352 | uint32_t medium_vid_lower_gpio_cntl; | 352 | uint32_t medium_vid_lower_gpio_cntl; |
353 | uint32_t high_vid_lower_gpio_cntl; | 353 | uint32_t high_vid_lower_gpio_cntl; |
354 | uint32_t ctxsw_vid_lower_gpio_cntl; | 354 | uint32_t ctxsw_vid_lower_gpio_cntl; |
355 | uint32_t lower_gpio_enable; | 355 | uint32_t lower_gpio_enable; |
356 | bool r; | 356 | bool r; |
357 | 357 | ||
358 | viph_control = RREG32(RADEON_VIPH_CONTROL); | 358 | viph_control = RREG32(RADEON_VIPH_CONTROL); |
359 | bus_cntl = RREG32(R600_BUS_CNTL); | 359 | bus_cntl = RREG32(R600_BUS_CNTL); |
360 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); | 360 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); |
361 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); | 361 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); |
362 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); | 362 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); |
363 | rom_cntl = RREG32(R600_ROM_CNTL); | 363 | rom_cntl = RREG32(R600_ROM_CNTL); |
364 | general_pwrmgt = RREG32(R600_GENERAL_PWRMGT); | 364 | general_pwrmgt = RREG32(R600_GENERAL_PWRMGT); |
365 | low_vid_lower_gpio_cntl = RREG32(R600_LOW_VID_LOWER_GPIO_CNTL); | 365 | low_vid_lower_gpio_cntl = RREG32(R600_LOW_VID_LOWER_GPIO_CNTL); |
366 | medium_vid_lower_gpio_cntl = RREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL); | 366 | medium_vid_lower_gpio_cntl = RREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL); |
367 | high_vid_lower_gpio_cntl = RREG32(R600_HIGH_VID_LOWER_GPIO_CNTL); | 367 | high_vid_lower_gpio_cntl = RREG32(R600_HIGH_VID_LOWER_GPIO_CNTL); |
368 | ctxsw_vid_lower_gpio_cntl = RREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL); | 368 | ctxsw_vid_lower_gpio_cntl = RREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL); |
369 | lower_gpio_enable = RREG32(R600_LOWER_GPIO_ENABLE); | 369 | lower_gpio_enable = RREG32(R600_LOWER_GPIO_ENABLE); |
370 | 370 | ||
371 | /* disable VIP */ | 371 | /* disable VIP */ |
372 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); | 372 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); |
373 | /* enable the rom */ | 373 | /* enable the rom */ |
374 | WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); | 374 | WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); |
375 | /* Disable VGA mode */ | 375 | /* Disable VGA mode */ |
376 | WREG32(AVIVO_D1VGA_CONTROL, | 376 | WREG32(AVIVO_D1VGA_CONTROL, |
377 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | 377 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | |
378 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); | 378 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); |
379 | WREG32(AVIVO_D2VGA_CONTROL, | 379 | WREG32(AVIVO_D2VGA_CONTROL, |
380 | (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | 380 | (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | |
381 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); | 381 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); |
382 | WREG32(AVIVO_VGA_RENDER_CONTROL, | 382 | WREG32(AVIVO_VGA_RENDER_CONTROL, |
383 | (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); | 383 | (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); |
384 | 384 | ||
385 | WREG32(R600_ROM_CNTL, | 385 | WREG32(R600_ROM_CNTL, |
386 | ((rom_cntl & ~R600_SCK_PRESCALE_CRYSTAL_CLK_MASK) | | 386 | ((rom_cntl & ~R600_SCK_PRESCALE_CRYSTAL_CLK_MASK) | |
387 | (1 << R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT) | | 387 | (1 << R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT) | |
388 | R600_SCK_OVERWRITE)); | 388 | R600_SCK_OVERWRITE)); |
389 | 389 | ||
390 | WREG32(R600_GENERAL_PWRMGT, (general_pwrmgt & ~R600_OPEN_DRAIN_PADS)); | 390 | WREG32(R600_GENERAL_PWRMGT, (general_pwrmgt & ~R600_OPEN_DRAIN_PADS)); |
391 | WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, | 391 | WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, |
392 | (low_vid_lower_gpio_cntl & ~0x400)); | 392 | (low_vid_lower_gpio_cntl & ~0x400)); |
393 | WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, | 393 | WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, |
394 | (medium_vid_lower_gpio_cntl & ~0x400)); | 394 | (medium_vid_lower_gpio_cntl & ~0x400)); |
395 | WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, | 395 | WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, |
396 | (high_vid_lower_gpio_cntl & ~0x400)); | 396 | (high_vid_lower_gpio_cntl & ~0x400)); |
397 | WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, | 397 | WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, |
398 | (ctxsw_vid_lower_gpio_cntl & ~0x400)); | 398 | (ctxsw_vid_lower_gpio_cntl & ~0x400)); |
399 | WREG32(R600_LOWER_GPIO_ENABLE, (lower_gpio_enable | 0x400)); | 399 | WREG32(R600_LOWER_GPIO_ENABLE, (lower_gpio_enable | 0x400)); |
400 | 400 | ||
401 | r = radeon_read_bios(rdev); | 401 | r = radeon_read_bios(rdev); |
402 | 402 | ||
403 | /* restore regs */ | 403 | /* restore regs */ |
404 | WREG32(RADEON_VIPH_CONTROL, viph_control); | 404 | WREG32(RADEON_VIPH_CONTROL, viph_control); |
405 | WREG32(R600_BUS_CNTL, bus_cntl); | 405 | WREG32(R600_BUS_CNTL, bus_cntl); |
406 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); | 406 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); |
407 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); | 407 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); |
408 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); | 408 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); |
409 | WREG32(R600_ROM_CNTL, rom_cntl); | 409 | WREG32(R600_ROM_CNTL, rom_cntl); |
410 | WREG32(R600_GENERAL_PWRMGT, general_pwrmgt); | 410 | WREG32(R600_GENERAL_PWRMGT, general_pwrmgt); |
411 | WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, low_vid_lower_gpio_cntl); | 411 | WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, low_vid_lower_gpio_cntl); |
412 | WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, medium_vid_lower_gpio_cntl); | 412 | WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, medium_vid_lower_gpio_cntl); |
413 | WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, high_vid_lower_gpio_cntl); | 413 | WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, high_vid_lower_gpio_cntl); |
414 | WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, ctxsw_vid_lower_gpio_cntl); | 414 | WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, ctxsw_vid_lower_gpio_cntl); |
415 | WREG32(R600_LOWER_GPIO_ENABLE, lower_gpio_enable); | 415 | WREG32(R600_LOWER_GPIO_ENABLE, lower_gpio_enable); |
416 | return r; | 416 | return r; |
417 | } | 417 | } |
418 | 418 | ||
419 | static bool avivo_read_disabled_bios(struct radeon_device *rdev) | 419 | static bool avivo_read_disabled_bios(struct radeon_device *rdev) |
420 | { | 420 | { |
421 | uint32_t seprom_cntl1; | 421 | uint32_t seprom_cntl1; |
422 | uint32_t viph_control; | 422 | uint32_t viph_control; |
423 | uint32_t bus_cntl; | 423 | uint32_t bus_cntl; |
424 | uint32_t d1vga_control; | 424 | uint32_t d1vga_control; |
425 | uint32_t d2vga_control; | 425 | uint32_t d2vga_control; |
426 | uint32_t vga_render_control; | 426 | uint32_t vga_render_control; |
427 | uint32_t gpiopad_a; | 427 | uint32_t gpiopad_a; |
428 | uint32_t gpiopad_en; | 428 | uint32_t gpiopad_en; |
429 | uint32_t gpiopad_mask; | 429 | uint32_t gpiopad_mask; |
430 | bool r; | 430 | bool r; |
431 | 431 | ||
432 | seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1); | 432 | seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1); |
433 | viph_control = RREG32(RADEON_VIPH_CONTROL); | 433 | viph_control = RREG32(RADEON_VIPH_CONTROL); |
434 | bus_cntl = RREG32(RV370_BUS_CNTL); | 434 | bus_cntl = RREG32(RV370_BUS_CNTL); |
435 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); | 435 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); |
436 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); | 436 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); |
437 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); | 437 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); |
438 | gpiopad_a = RREG32(RADEON_GPIOPAD_A); | 438 | gpiopad_a = RREG32(RADEON_GPIOPAD_A); |
439 | gpiopad_en = RREG32(RADEON_GPIOPAD_EN); | 439 | gpiopad_en = RREG32(RADEON_GPIOPAD_EN); |
440 | gpiopad_mask = RREG32(RADEON_GPIOPAD_MASK); | 440 | gpiopad_mask = RREG32(RADEON_GPIOPAD_MASK); |
441 | 441 | ||
442 | WREG32(RADEON_SEPROM_CNTL1, | 442 | WREG32(RADEON_SEPROM_CNTL1, |
443 | ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) | | 443 | ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) | |
444 | (0xc << RADEON_SCK_PRESCALE_SHIFT))); | 444 | (0xc << RADEON_SCK_PRESCALE_SHIFT))); |
445 | WREG32(RADEON_GPIOPAD_A, 0); | 445 | WREG32(RADEON_GPIOPAD_A, 0); |
446 | WREG32(RADEON_GPIOPAD_EN, 0); | 446 | WREG32(RADEON_GPIOPAD_EN, 0); |
447 | WREG32(RADEON_GPIOPAD_MASK, 0); | 447 | WREG32(RADEON_GPIOPAD_MASK, 0); |
448 | 448 | ||
449 | /* disable VIP */ | 449 | /* disable VIP */ |
450 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); | 450 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); |
451 | 451 | ||
452 | /* enable the rom */ | 452 | /* enable the rom */ |
453 | WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM)); | 453 | WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM)); |
454 | 454 | ||
455 | /* Disable VGA mode */ | 455 | /* Disable VGA mode */ |
456 | WREG32(AVIVO_D1VGA_CONTROL, | 456 | WREG32(AVIVO_D1VGA_CONTROL, |
457 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | 457 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | |
458 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); | 458 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); |
459 | WREG32(AVIVO_D2VGA_CONTROL, | 459 | WREG32(AVIVO_D2VGA_CONTROL, |
460 | (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | 460 | (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | |
461 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); | 461 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); |
462 | WREG32(AVIVO_VGA_RENDER_CONTROL, | 462 | WREG32(AVIVO_VGA_RENDER_CONTROL, |
463 | (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); | 463 | (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); |
464 | 464 | ||
465 | r = radeon_read_bios(rdev); | 465 | r = radeon_read_bios(rdev); |
466 | 466 | ||
467 | /* restore regs */ | 467 | /* restore regs */ |
468 | WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1); | 468 | WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1); |
469 | WREG32(RADEON_VIPH_CONTROL, viph_control); | 469 | WREG32(RADEON_VIPH_CONTROL, viph_control); |
470 | WREG32(RV370_BUS_CNTL, bus_cntl); | 470 | WREG32(RV370_BUS_CNTL, bus_cntl); |
471 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); | 471 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); |
472 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); | 472 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); |
473 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); | 473 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); |
474 | WREG32(RADEON_GPIOPAD_A, gpiopad_a); | 474 | WREG32(RADEON_GPIOPAD_A, gpiopad_a); |
475 | WREG32(RADEON_GPIOPAD_EN, gpiopad_en); | 475 | WREG32(RADEON_GPIOPAD_EN, gpiopad_en); |
476 | WREG32(RADEON_GPIOPAD_MASK, gpiopad_mask); | 476 | WREG32(RADEON_GPIOPAD_MASK, gpiopad_mask); |
477 | return r; | 477 | return r; |
478 | } | 478 | } |
479 | 479 | ||
480 | static bool legacy_read_disabled_bios(struct radeon_device *rdev) | 480 | static bool legacy_read_disabled_bios(struct radeon_device *rdev) |
481 | { | 481 | { |
482 | uint32_t seprom_cntl1; | 482 | uint32_t seprom_cntl1; |
483 | uint32_t viph_control; | 483 | uint32_t viph_control; |
484 | uint32_t bus_cntl; | 484 | uint32_t bus_cntl; |
485 | uint32_t crtc_gen_cntl; | 485 | uint32_t crtc_gen_cntl; |
486 | uint32_t crtc2_gen_cntl; | 486 | uint32_t crtc2_gen_cntl; |
487 | uint32_t crtc_ext_cntl; | 487 | uint32_t crtc_ext_cntl; |
488 | uint32_t fp2_gen_cntl; | 488 | uint32_t fp2_gen_cntl; |
489 | bool r; | 489 | bool r; |
490 | 490 | ||
491 | seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1); | 491 | seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1); |
492 | viph_control = RREG32(RADEON_VIPH_CONTROL); | 492 | viph_control = RREG32(RADEON_VIPH_CONTROL); |
493 | if (rdev->flags & RADEON_IS_PCIE) | 493 | if (rdev->flags & RADEON_IS_PCIE) |
494 | bus_cntl = RREG32(RV370_BUS_CNTL); | 494 | bus_cntl = RREG32(RV370_BUS_CNTL); |
495 | else | 495 | else |
496 | bus_cntl = RREG32(RADEON_BUS_CNTL); | 496 | bus_cntl = RREG32(RADEON_BUS_CNTL); |
497 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); | 497 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); |
498 | crtc2_gen_cntl = 0; | 498 | crtc2_gen_cntl = 0; |
499 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); | 499 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); |
500 | fp2_gen_cntl = 0; | 500 | fp2_gen_cntl = 0; |
501 | 501 | ||
502 | if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { | 502 | if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { |
503 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); | 503 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
504 | } | 504 | } |
505 | 505 | ||
506 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { | 506 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
507 | crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); | 507 | crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); |
508 | } | 508 | } |
509 | 509 | ||
510 | WREG32(RADEON_SEPROM_CNTL1, | 510 | WREG32(RADEON_SEPROM_CNTL1, |
511 | ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) | | 511 | ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) | |
512 | (0xc << RADEON_SCK_PRESCALE_SHIFT))); | 512 | (0xc << RADEON_SCK_PRESCALE_SHIFT))); |
513 | 513 | ||
514 | /* disable VIP */ | 514 | /* disable VIP */ |
515 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); | 515 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); |
516 | 516 | ||
517 | /* enable the rom */ | 517 | /* enable the rom */ |
518 | if (rdev->flags & RADEON_IS_PCIE) | 518 | if (rdev->flags & RADEON_IS_PCIE) |
519 | WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM)); | 519 | WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM)); |
520 | else | 520 | else |
521 | WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); | 521 | WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); |
522 | 522 | ||
523 | /* Turn off mem requests and CRTC for both controllers */ | 523 | /* Turn off mem requests and CRTC for both controllers */ |
524 | WREG32(RADEON_CRTC_GEN_CNTL, | 524 | WREG32(RADEON_CRTC_GEN_CNTL, |
525 | ((crtc_gen_cntl & ~RADEON_CRTC_EN) | | 525 | ((crtc_gen_cntl & ~RADEON_CRTC_EN) | |
526 | (RADEON_CRTC_DISP_REQ_EN_B | | 526 | (RADEON_CRTC_DISP_REQ_EN_B | |
527 | RADEON_CRTC_EXT_DISP_EN))); | 527 | RADEON_CRTC_EXT_DISP_EN))); |
528 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { | 528 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
529 | WREG32(RADEON_CRTC2_GEN_CNTL, | 529 | WREG32(RADEON_CRTC2_GEN_CNTL, |
530 | ((crtc2_gen_cntl & ~RADEON_CRTC2_EN) | | 530 | ((crtc2_gen_cntl & ~RADEON_CRTC2_EN) | |
531 | RADEON_CRTC2_DISP_REQ_EN_B)); | 531 | RADEON_CRTC2_DISP_REQ_EN_B)); |
532 | } | 532 | } |
533 | /* Turn off CRTC */ | 533 | /* Turn off CRTC */ |
534 | WREG32(RADEON_CRTC_EXT_CNTL, | 534 | WREG32(RADEON_CRTC_EXT_CNTL, |
535 | ((crtc_ext_cntl & ~RADEON_CRTC_CRT_ON) | | 535 | ((crtc_ext_cntl & ~RADEON_CRTC_CRT_ON) | |
536 | (RADEON_CRTC_SYNC_TRISTAT | | 536 | (RADEON_CRTC_SYNC_TRISTAT | |
537 | RADEON_CRTC_DISPLAY_DIS))); | 537 | RADEON_CRTC_DISPLAY_DIS))); |
538 | 538 | ||
539 | if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { | 539 | if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { |
540 | WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON)); | 540 | WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON)); |
541 | } | 541 | } |
542 | 542 | ||
543 | r = radeon_read_bios(rdev); | 543 | r = radeon_read_bios(rdev); |
544 | 544 | ||
545 | /* restore regs */ | 545 | /* restore regs */ |
546 | WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1); | 546 | WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1); |
547 | WREG32(RADEON_VIPH_CONTROL, viph_control); | 547 | WREG32(RADEON_VIPH_CONTROL, viph_control); |
548 | if (rdev->flags & RADEON_IS_PCIE) | 548 | if (rdev->flags & RADEON_IS_PCIE) |
549 | WREG32(RV370_BUS_CNTL, bus_cntl); | 549 | WREG32(RV370_BUS_CNTL, bus_cntl); |
550 | else | 550 | else |
551 | WREG32(RADEON_BUS_CNTL, bus_cntl); | 551 | WREG32(RADEON_BUS_CNTL, bus_cntl); |
552 | WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); | 552 | WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); |
553 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { | 553 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
554 | WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); | 554 | WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); |
555 | } | 555 | } |
556 | WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); | 556 | WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); |
557 | if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { | 557 | if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { |
558 | WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); | 558 | WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); |
559 | } | 559 | } |
560 | return r; | 560 | return r; |
561 | } | 561 | } |
562 | 562 | ||
563 | static bool radeon_read_disabled_bios(struct radeon_device *rdev) | 563 | static bool radeon_read_disabled_bios(struct radeon_device *rdev) |
564 | { | 564 | { |
565 | if (rdev->flags & RADEON_IS_IGP) | 565 | if (rdev->flags & RADEON_IS_IGP) |
566 | return igp_read_bios_from_vram(rdev); | 566 | return igp_read_bios_from_vram(rdev); |
567 | else if (rdev->family >= CHIP_BARTS) | 567 | else if (rdev->family >= CHIP_BARTS) |
568 | return ni_read_disabled_bios(rdev); | 568 | return ni_read_disabled_bios(rdev); |
569 | else if (rdev->family >= CHIP_RV770) | 569 | else if (rdev->family >= CHIP_RV770) |
570 | return r700_read_disabled_bios(rdev); | 570 | return r700_read_disabled_bios(rdev); |
571 | else if (rdev->family >= CHIP_R600) | 571 | else if (rdev->family >= CHIP_R600) |
572 | return r600_read_disabled_bios(rdev); | 572 | return r600_read_disabled_bios(rdev); |
573 | else if (rdev->family >= CHIP_RS600) | 573 | else if (rdev->family >= CHIP_RS600) |
574 | return avivo_read_disabled_bios(rdev); | 574 | return avivo_read_disabled_bios(rdev); |
575 | else | 575 | else |
576 | return legacy_read_disabled_bios(rdev); | 576 | return legacy_read_disabled_bios(rdev); |
577 | } | 577 | } |
578 | 578 | ||
579 | #ifdef CONFIG_ACPI | 579 | #ifdef CONFIG_ACPI |
580 | static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) | 580 | static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) |
581 | { | 581 | { |
582 | bool ret = false; | 582 | bool ret = false; |
583 | struct acpi_table_header *hdr; | 583 | struct acpi_table_header *hdr; |
584 | acpi_size tbl_size; | 584 | acpi_size tbl_size; |
585 | UEFI_ACPI_VFCT *vfct; | 585 | UEFI_ACPI_VFCT *vfct; |
586 | GOP_VBIOS_CONTENT *vbios; | 586 | GOP_VBIOS_CONTENT *vbios; |
587 | VFCT_IMAGE_HEADER *vhdr; | 587 | VFCT_IMAGE_HEADER *vhdr; |
588 | 588 | ||
589 | if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size))) | 589 | if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size))) |
590 | return false; | 590 | return false; |
591 | if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { | 591 | if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { |
592 | DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); | 592 | DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); |
593 | goto out_unmap; | 593 | goto out_unmap; |
594 | } | 594 | } |
595 | 595 | ||
596 | vfct = (UEFI_ACPI_VFCT *)hdr; | 596 | vfct = (UEFI_ACPI_VFCT *)hdr; |
597 | if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) { | 597 | if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) { |
598 | DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); | 598 | DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); |
599 | goto out_unmap; | 599 | goto out_unmap; |
600 | } | 600 | } |
601 | 601 | ||
602 | vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset); | 602 | vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset); |
603 | vhdr = &vbios->VbiosHeader; | 603 | vhdr = &vbios->VbiosHeader; |
604 | DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n", | 604 | DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n", |
605 | vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction, | 605 | vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction, |
606 | vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength); | 606 | vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength); |
607 | 607 | ||
608 | if (vhdr->PCIBus != rdev->pdev->bus->number || | 608 | if (vhdr->PCIBus != rdev->pdev->bus->number || |
609 | vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) || | 609 | vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) || |
610 | vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) || | 610 | vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) || |
611 | vhdr->VendorID != rdev->pdev->vendor || | 611 | vhdr->VendorID != rdev->pdev->vendor || |
612 | vhdr->DeviceID != rdev->pdev->device) { | 612 | vhdr->DeviceID != rdev->pdev->device) { |
613 | DRM_INFO("ACPI VFCT table is not for this card\n"); | 613 | DRM_INFO("ACPI VFCT table is not for this card\n"); |
614 | goto out_unmap; | 614 | goto out_unmap; |
615 | }; | 615 | }; |
616 | 616 | ||
617 | if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) { | 617 | if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) { |
618 | DRM_ERROR("ACPI VFCT image truncated\n"); | 618 | DRM_ERROR("ACPI VFCT image truncated\n"); |
619 | goto out_unmap; | 619 | goto out_unmap; |
620 | } | 620 | } |
621 | 621 | ||
622 | rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL); | 622 | rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL); |
623 | ret = !!rdev->bios; | 623 | ret = !!rdev->bios; |
624 | 624 | ||
625 | out_unmap: | 625 | out_unmap: |
626 | return ret; | 626 | return ret; |
627 | } | 627 | } |
628 | #else | 628 | #else |
629 | static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev) | 629 | static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev) |
630 | { | 630 | { |
631 | return false; | 631 | return false; |
632 | } | 632 | } |
633 | #endif | 633 | #endif |
634 | 634 | ||
635 | bool radeon_get_bios(struct radeon_device *rdev) | 635 | bool radeon_get_bios(struct radeon_device *rdev) |
636 | { | 636 | { |
637 | bool r; | 637 | bool r; |
638 | uint16_t tmp; | 638 | uint16_t tmp; |
639 | 639 | ||
640 | r = radeon_atrm_get_bios(rdev); | 640 | r = radeon_atrm_get_bios(rdev); |
641 | if (r == false) | 641 | if (r == false) |
642 | r = radeon_acpi_vfct_bios(rdev); | 642 | r = radeon_acpi_vfct_bios(rdev); |
643 | if (r == false) | 643 | if (r == false) |
644 | r = igp_read_bios_from_vram(rdev); | 644 | r = igp_read_bios_from_vram(rdev); |
645 | if (r == false) | 645 | if (r == false) |
646 | r = radeon_read_bios(rdev); | 646 | r = radeon_read_bios(rdev); |
647 | if (r == false) { | 647 | if (r == false) { |
648 | r = radeon_read_disabled_bios(rdev); | 648 | r = radeon_read_disabled_bios(rdev); |
649 | } | 649 | } |
650 | if (r == false) { | 650 | if (r == false) { |
651 | r = radeon_read_platform_bios(rdev); | 651 | r = radeon_read_platform_bios(rdev); |
652 | } | 652 | } |
653 | if (r == false || rdev->bios == NULL) { | 653 | if (r == false || rdev->bios == NULL) { |
654 | DRM_ERROR("Unable to locate a BIOS ROM\n"); | 654 | DRM_ERROR("Unable to locate a BIOS ROM\n"); |
655 | rdev->bios = NULL; | 655 | rdev->bios = NULL; |
656 | return false; | 656 | return false; |
657 | } | 657 | } |
658 | if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) { | 658 | if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) { |
659 | printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]); | 659 | printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]); |
660 | goto free_bios; | 660 | goto free_bios; |
661 | } | 661 | } |
662 | 662 | ||
663 | tmp = RBIOS16(0x18); | 663 | tmp = RBIOS16(0x18); |
664 | if (RBIOS8(tmp + 0x14) != 0x0) { | 664 | if (RBIOS8(tmp + 0x14) != 0x0) { |
665 | DRM_INFO("Not an x86 BIOS ROM, not using.\n"); | 665 | DRM_INFO("Not an x86 BIOS ROM, not using.\n"); |
666 | goto free_bios; | 666 | goto free_bios; |
667 | } | 667 | } |
668 | 668 | ||
669 | rdev->bios_header_start = RBIOS16(0x48); | 669 | rdev->bios_header_start = RBIOS16(0x48); |
670 | if (!rdev->bios_header_start) { | 670 | if (!rdev->bios_header_start) { |
671 | goto free_bios; | 671 | goto free_bios; |
672 | } | 672 | } |
673 | tmp = rdev->bios_header_start + 4; | 673 | tmp = rdev->bios_header_start + 4; |
674 | if (!memcmp(rdev->bios + tmp, "ATOM", 4) || | 674 | if (!memcmp(rdev->bios + tmp, "ATOM", 4) || |
675 | !memcmp(rdev->bios + tmp, "MOTA", 4)) { | 675 | !memcmp(rdev->bios + tmp, "MOTA", 4)) { |
676 | rdev->is_atom_bios = true; | 676 | rdev->is_atom_bios = true; |
677 | } else { | 677 | } else { |
678 | rdev->is_atom_bios = false; | 678 | rdev->is_atom_bios = false; |
679 | } | 679 | } |
680 | 680 | ||
681 | DRM_DEBUG("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM"); | 681 | DRM_DEBUG("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM"); |
682 | return true; | 682 | return true; |
683 | free_bios: | 683 | free_bios: |
684 | kfree(rdev->bios); | 684 | kfree(rdev->bios); |
685 | rdev->bios = NULL; | 685 | rdev->bios = NULL; |
686 | return false; | 686 | return false; |
687 | } | 687 | } |
688 | 688 |
drivers/ide/ide-acpi.c
1 | /* | 1 | /* |
2 | * Provides ACPI support for IDE drives. | 2 | * Provides ACPI support for IDE drives. |
3 | * | 3 | * |
4 | * Copyright (C) 2005 Intel Corp. | 4 | * Copyright (C) 2005 Intel Corp. |
5 | * Copyright (C) 2005 Randy Dunlap | 5 | * Copyright (C) 2005 Randy Dunlap |
6 | * Copyright (C) 2006 SUSE Linux Products GmbH | 6 | * Copyright (C) 2006 SUSE Linux Products GmbH |
7 | * Copyright (C) 2006 Hannes Reinecke | 7 | * Copyright (C) 2006 Hannes Reinecke |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/acpi.h> | 10 | #include <linux/acpi.h> |
11 | #include <linux/ata.h> | 11 | #include <linux/ata.h> |
12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <acpi/acpi.h> | 17 | #include <acpi/acpi.h> |
18 | #include <linux/ide.h> | 18 | #include <linux/ide.h> |
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/dmi.h> | 20 | #include <linux/dmi.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | 22 | ||
23 | #define REGS_PER_GTF 7 | 23 | #define REGS_PER_GTF 7 |
24 | 24 | ||
25 | struct GTM_buffer { | 25 | struct GTM_buffer { |
26 | u32 PIO_speed0; | 26 | u32 PIO_speed0; |
27 | u32 DMA_speed0; | 27 | u32 DMA_speed0; |
28 | u32 PIO_speed1; | 28 | u32 PIO_speed1; |
29 | u32 DMA_speed1; | 29 | u32 DMA_speed1; |
30 | u32 GTM_flags; | 30 | u32 GTM_flags; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct ide_acpi_drive_link { | 33 | struct ide_acpi_drive_link { |
34 | acpi_handle obj_handle; | 34 | acpi_handle obj_handle; |
35 | u8 idbuff[512]; | 35 | u8 idbuff[512]; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | struct ide_acpi_hwif_link { | 38 | struct ide_acpi_hwif_link { |
39 | ide_hwif_t *hwif; | 39 | ide_hwif_t *hwif; |
40 | acpi_handle obj_handle; | 40 | acpi_handle obj_handle; |
41 | struct GTM_buffer gtm; | 41 | struct GTM_buffer gtm; |
42 | struct ide_acpi_drive_link master; | 42 | struct ide_acpi_drive_link master; |
43 | struct ide_acpi_drive_link slave; | 43 | struct ide_acpi_drive_link slave; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | #undef DEBUGGING | 46 | #undef DEBUGGING |
47 | /* note: adds function name and KERN_DEBUG */ | 47 | /* note: adds function name and KERN_DEBUG */ |
48 | #ifdef DEBUGGING | 48 | #ifdef DEBUGGING |
49 | #define DEBPRINT(fmt, args...) \ | 49 | #define DEBPRINT(fmt, args...) \ |
50 | printk(KERN_DEBUG "%s: " fmt, __func__, ## args) | 50 | printk(KERN_DEBUG "%s: " fmt, __func__, ## args) |
51 | #else | 51 | #else |
52 | #define DEBPRINT(fmt, args...) do {} while (0) | 52 | #define DEBPRINT(fmt, args...) do {} while (0) |
53 | #endif /* DEBUGGING */ | 53 | #endif /* DEBUGGING */ |
54 | 54 | ||
55 | static bool ide_noacpi; | 55 | static bool ide_noacpi; |
56 | module_param_named(noacpi, ide_noacpi, bool, 0); | 56 | module_param_named(noacpi, ide_noacpi, bool, 0); |
57 | MODULE_PARM_DESC(noacpi, "disable IDE ACPI support"); | 57 | MODULE_PARM_DESC(noacpi, "disable IDE ACPI support"); |
58 | 58 | ||
59 | static bool ide_acpigtf; | 59 | static bool ide_acpigtf; |
60 | module_param_named(acpigtf, ide_acpigtf, bool, 0); | 60 | module_param_named(acpigtf, ide_acpigtf, bool, 0); |
61 | MODULE_PARM_DESC(acpigtf, "enable IDE ACPI _GTF support"); | 61 | MODULE_PARM_DESC(acpigtf, "enable IDE ACPI _GTF support"); |
62 | 62 | ||
63 | static bool ide_acpionboot; | 63 | static bool ide_acpionboot; |
64 | module_param_named(acpionboot, ide_acpionboot, bool, 0); | 64 | module_param_named(acpionboot, ide_acpionboot, bool, 0); |
65 | MODULE_PARM_DESC(acpionboot, "call IDE ACPI methods on boot"); | 65 | MODULE_PARM_DESC(acpionboot, "call IDE ACPI methods on boot"); |
66 | 66 | ||
67 | static bool ide_noacpi_psx; | 67 | static bool ide_noacpi_psx; |
68 | static int no_acpi_psx(const struct dmi_system_id *id) | 68 | static int no_acpi_psx(const struct dmi_system_id *id) |
69 | { | 69 | { |
70 | ide_noacpi_psx = true; | 70 | ide_noacpi_psx = true; |
71 | printk(KERN_NOTICE"%s detected - disable ACPI _PSx.\n", id->ident); | 71 | printk(KERN_NOTICE"%s detected - disable ACPI _PSx.\n", id->ident); |
72 | return 0; | 72 | return 0; |
73 | } | 73 | } |
74 | 74 | ||
75 | static const struct dmi_system_id ide_acpi_dmi_table[] = { | 75 | static const struct dmi_system_id ide_acpi_dmi_table[] = { |
76 | /* Bug 9673. */ | 76 | /* Bug 9673. */ |
77 | /* We should check if this is because ACPI NVS isn't save/restored. */ | 77 | /* We should check if this is because ACPI NVS isn't save/restored. */ |
78 | { | 78 | { |
79 | .callback = no_acpi_psx, | 79 | .callback = no_acpi_psx, |
80 | .ident = "HP nx9005", | 80 | .ident = "HP nx9005", |
81 | .matches = { | 81 | .matches = { |
82 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies Ltd."), | 82 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies Ltd."), |
83 | DMI_MATCH(DMI_BIOS_VERSION, "KAM1.60") | 83 | DMI_MATCH(DMI_BIOS_VERSION, "KAM1.60") |
84 | }, | 84 | }, |
85 | }, | 85 | }, |
86 | 86 | ||
87 | { } /* terminate list */ | 87 | { } /* terminate list */ |
88 | }; | 88 | }; |
89 | 89 | ||
90 | int ide_acpi_init(void) | 90 | int ide_acpi_init(void) |
91 | { | 91 | { |
92 | dmi_check_system(ide_acpi_dmi_table); | 92 | dmi_check_system(ide_acpi_dmi_table); |
93 | return 0; | 93 | return 0; |
94 | } | 94 | } |
95 | 95 | ||
96 | bool ide_port_acpi(ide_hwif_t *hwif) | 96 | bool ide_port_acpi(ide_hwif_t *hwif) |
97 | { | 97 | { |
98 | return ide_noacpi == 0 && hwif->acpidata; | 98 | return ide_noacpi == 0 && hwif->acpidata; |
99 | } | 99 | } |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * ide_get_dev_handle - finds acpi_handle and PCI device.function | 102 | * ide_get_dev_handle - finds acpi_handle and PCI device.function |
103 | * @dev: device to locate | 103 | * @dev: device to locate |
104 | * @handle: returned acpi_handle for @dev | 104 | * @handle: returned acpi_handle for @dev |
105 | * @pcidevfn: return PCI device.func for @dev | 105 | * @pcidevfn: return PCI device.func for @dev |
106 | * | 106 | * |
107 | * Returns the ACPI object handle to the corresponding PCI device. | 107 | * Returns the ACPI object handle to the corresponding PCI device. |
108 | * | 108 | * |
109 | * Returns 0 on success, <0 on error. | 109 | * Returns 0 on success, <0 on error. |
110 | */ | 110 | */ |
111 | static int ide_get_dev_handle(struct device *dev, acpi_handle *handle, | 111 | static int ide_get_dev_handle(struct device *dev, acpi_handle *handle, |
112 | u64 *pcidevfn) | 112 | u64 *pcidevfn) |
113 | { | 113 | { |
114 | struct pci_dev *pdev = to_pci_dev(dev); | 114 | struct pci_dev *pdev = to_pci_dev(dev); |
115 | unsigned int bus, devnum, func; | 115 | unsigned int bus, devnum, func; |
116 | u64 addr; | 116 | u64 addr; |
117 | acpi_handle dev_handle; | 117 | acpi_handle dev_handle; |
118 | acpi_status status; | 118 | acpi_status status; |
119 | struct acpi_device_info *dinfo = NULL; | 119 | struct acpi_device_info *dinfo = NULL; |
120 | int ret = -ENODEV; | 120 | int ret = -ENODEV; |
121 | 121 | ||
122 | bus = pdev->bus->number; | 122 | bus = pdev->bus->number; |
123 | devnum = PCI_SLOT(pdev->devfn); | 123 | devnum = PCI_SLOT(pdev->devfn); |
124 | func = PCI_FUNC(pdev->devfn); | 124 | func = PCI_FUNC(pdev->devfn); |
125 | /* ACPI _ADR encoding for PCI bus: */ | 125 | /* ACPI _ADR encoding for PCI bus: */ |
126 | addr = (u64)(devnum << 16 | func); | 126 | addr = (u64)(devnum << 16 | func); |
127 | 127 | ||
128 | DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func); | 128 | DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func); |
129 | 129 | ||
130 | dev_handle = DEVICE_ACPI_HANDLE(dev); | 130 | dev_handle = ACPI_HANDLE(dev); |
131 | if (!dev_handle) { | 131 | if (!dev_handle) { |
132 | DEBPRINT("no acpi handle for device\n"); | 132 | DEBPRINT("no acpi handle for device\n"); |
133 | goto err; | 133 | goto err; |
134 | } | 134 | } |
135 | 135 | ||
136 | status = acpi_get_object_info(dev_handle, &dinfo); | 136 | status = acpi_get_object_info(dev_handle, &dinfo); |
137 | if (ACPI_FAILURE(status)) { | 137 | if (ACPI_FAILURE(status)) { |
138 | DEBPRINT("get_object_info for device failed\n"); | 138 | DEBPRINT("get_object_info for device failed\n"); |
139 | goto err; | 139 | goto err; |
140 | } | 140 | } |
141 | if (dinfo && (dinfo->valid & ACPI_VALID_ADR) && | 141 | if (dinfo && (dinfo->valid & ACPI_VALID_ADR) && |
142 | dinfo->address == addr) { | 142 | dinfo->address == addr) { |
143 | *pcidevfn = addr; | 143 | *pcidevfn = addr; |
144 | *handle = dev_handle; | 144 | *handle = dev_handle; |
145 | } else { | 145 | } else { |
146 | DEBPRINT("get_object_info for device has wrong " | 146 | DEBPRINT("get_object_info for device has wrong " |
147 | " address: %llu, should be %u\n", | 147 | " address: %llu, should be %u\n", |
148 | dinfo ? (unsigned long long)dinfo->address : -1ULL, | 148 | dinfo ? (unsigned long long)dinfo->address : -1ULL, |
149 | (unsigned int)addr); | 149 | (unsigned int)addr); |
150 | goto err; | 150 | goto err; |
151 | } | 151 | } |
152 | 152 | ||
153 | DEBPRINT("for dev=0x%x.%x, addr=0x%llx, *handle=0x%p\n", | 153 | DEBPRINT("for dev=0x%x.%x, addr=0x%llx, *handle=0x%p\n", |
154 | devnum, func, (unsigned long long)addr, *handle); | 154 | devnum, func, (unsigned long long)addr, *handle); |
155 | ret = 0; | 155 | ret = 0; |
156 | err: | 156 | err: |
157 | kfree(dinfo); | 157 | kfree(dinfo); |
158 | return ret; | 158 | return ret; |
159 | } | 159 | } |
160 | 160 | ||
161 | /** | 161 | /** |
162 | * ide_acpi_hwif_get_handle - Get ACPI object handle for a given hwif | 162 | * ide_acpi_hwif_get_handle - Get ACPI object handle for a given hwif |
163 | * @hwif: device to locate | 163 | * @hwif: device to locate |
164 | * | 164 | * |
165 | * Retrieves the object handle for a given hwif. | 165 | * Retrieves the object handle for a given hwif. |
166 | * | 166 | * |
167 | * Returns handle on success, 0 on error. | 167 | * Returns handle on success, 0 on error. |
168 | */ | 168 | */ |
169 | static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif) | 169 | static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif) |
170 | { | 170 | { |
171 | struct device *dev = hwif->gendev.parent; | 171 | struct device *dev = hwif->gendev.parent; |
172 | acpi_handle uninitialized_var(dev_handle); | 172 | acpi_handle uninitialized_var(dev_handle); |
173 | u64 pcidevfn; | 173 | u64 pcidevfn; |
174 | acpi_handle chan_handle; | 174 | acpi_handle chan_handle; |
175 | int err; | 175 | int err; |
176 | 176 | ||
177 | DEBPRINT("ENTER: device %s\n", hwif->name); | 177 | DEBPRINT("ENTER: device %s\n", hwif->name); |
178 | 178 | ||
179 | if (!dev) { | 179 | if (!dev) { |
180 | DEBPRINT("no PCI device for %s\n", hwif->name); | 180 | DEBPRINT("no PCI device for %s\n", hwif->name); |
181 | return NULL; | 181 | return NULL; |
182 | } | 182 | } |
183 | 183 | ||
184 | err = ide_get_dev_handle(dev, &dev_handle, &pcidevfn); | 184 | err = ide_get_dev_handle(dev, &dev_handle, &pcidevfn); |
185 | if (err < 0) { | 185 | if (err < 0) { |
186 | DEBPRINT("ide_get_dev_handle failed (%d)\n", err); | 186 | DEBPRINT("ide_get_dev_handle failed (%d)\n", err); |
187 | return NULL; | 187 | return NULL; |
188 | } | 188 | } |
189 | 189 | ||
190 | /* get child objects of dev_handle == channel objects, | 190 | /* get child objects of dev_handle == channel objects, |
191 | * + _their_ children == drive objects */ | 191 | * + _their_ children == drive objects */ |
192 | /* channel is hwif->channel */ | 192 | /* channel is hwif->channel */ |
193 | chan_handle = acpi_get_child(dev_handle, hwif->channel); | 193 | chan_handle = acpi_get_child(dev_handle, hwif->channel); |
194 | DEBPRINT("chan adr=%d: handle=0x%p\n", | 194 | DEBPRINT("chan adr=%d: handle=0x%p\n", |
195 | hwif->channel, chan_handle); | 195 | hwif->channel, chan_handle); |
196 | 196 | ||
197 | return chan_handle; | 197 | return chan_handle; |
198 | } | 198 | } |
199 | 199 | ||
200 | /** | 200 | /** |
201 | * do_drive_get_GTF - get the drive bootup default taskfile settings | 201 | * do_drive_get_GTF - get the drive bootup default taskfile settings |
202 | * @drive: the drive for which the taskfile settings should be retrieved | 202 | * @drive: the drive for which the taskfile settings should be retrieved |
203 | * @gtf_length: number of bytes of _GTF data returned at @gtf_address | 203 | * @gtf_length: number of bytes of _GTF data returned at @gtf_address |
204 | * @gtf_address: buffer containing _GTF taskfile arrays | 204 | * @gtf_address: buffer containing _GTF taskfile arrays |
205 | * | 205 | * |
206 | * The _GTF method has no input parameters. | 206 | * The _GTF method has no input parameters. |
207 | * It returns a variable number of register set values (registers | 207 | * It returns a variable number of register set values (registers |
208 | * hex 1F1..1F7, taskfiles). | 208 | * hex 1F1..1F7, taskfiles). |
209 | * The <variable number> is not known in advance, so have ACPI-CA | 209 | * The <variable number> is not known in advance, so have ACPI-CA |
210 | * allocate the buffer as needed and return it, then free it later. | 210 | * allocate the buffer as needed and return it, then free it later. |
211 | * | 211 | * |
212 | * The returned @gtf_length and @gtf_address are only valid if the | 212 | * The returned @gtf_length and @gtf_address are only valid if the |
213 | * function return value is 0. | 213 | * function return value is 0. |
214 | */ | 214 | */ |
215 | static int do_drive_get_GTF(ide_drive_t *drive, | 215 | static int do_drive_get_GTF(ide_drive_t *drive, |
216 | unsigned int *gtf_length, unsigned long *gtf_address, | 216 | unsigned int *gtf_length, unsigned long *gtf_address, |
217 | unsigned long *obj_loc) | 217 | unsigned long *obj_loc) |
218 | { | 218 | { |
219 | acpi_status status; | 219 | acpi_status status; |
220 | struct acpi_buffer output; | 220 | struct acpi_buffer output; |
221 | union acpi_object *out_obj; | 221 | union acpi_object *out_obj; |
222 | int err = -ENODEV; | 222 | int err = -ENODEV; |
223 | 223 | ||
224 | *gtf_length = 0; | 224 | *gtf_length = 0; |
225 | *gtf_address = 0UL; | 225 | *gtf_address = 0UL; |
226 | *obj_loc = 0UL; | 226 | *obj_loc = 0UL; |
227 | 227 | ||
228 | if (!drive->acpidata->obj_handle) { | 228 | if (!drive->acpidata->obj_handle) { |
229 | DEBPRINT("No ACPI object found for %s\n", drive->name); | 229 | DEBPRINT("No ACPI object found for %s\n", drive->name); |
230 | goto out; | 230 | goto out; |
231 | } | 231 | } |
232 | 232 | ||
233 | /* Setting up output buffer */ | 233 | /* Setting up output buffer */ |
234 | output.length = ACPI_ALLOCATE_BUFFER; | 234 | output.length = ACPI_ALLOCATE_BUFFER; |
235 | output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ | 235 | output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ |
236 | 236 | ||
237 | /* _GTF has no input parameters */ | 237 | /* _GTF has no input parameters */ |
238 | err = -EIO; | 238 | err = -EIO; |
239 | status = acpi_evaluate_object(drive->acpidata->obj_handle, "_GTF", | 239 | status = acpi_evaluate_object(drive->acpidata->obj_handle, "_GTF", |
240 | NULL, &output); | 240 | NULL, &output); |
241 | if (ACPI_FAILURE(status)) { | 241 | if (ACPI_FAILURE(status)) { |
242 | printk(KERN_DEBUG | 242 | printk(KERN_DEBUG |
243 | "%s: Run _GTF error: status = 0x%x\n", | 243 | "%s: Run _GTF error: status = 0x%x\n", |
244 | __func__, status); | 244 | __func__, status); |
245 | goto out; | 245 | goto out; |
246 | } | 246 | } |
247 | 247 | ||
248 | if (!output.length || !output.pointer) { | 248 | if (!output.length || !output.pointer) { |
249 | DEBPRINT("Run _GTF: " | 249 | DEBPRINT("Run _GTF: " |
250 | "length or ptr is NULL (0x%llx, 0x%p)\n", | 250 | "length or ptr is NULL (0x%llx, 0x%p)\n", |
251 | (unsigned long long)output.length, | 251 | (unsigned long long)output.length, |
252 | output.pointer); | 252 | output.pointer); |
253 | goto out; | 253 | goto out; |
254 | } | 254 | } |
255 | 255 | ||
256 | out_obj = output.pointer; | 256 | out_obj = output.pointer; |
257 | if (out_obj->type != ACPI_TYPE_BUFFER) { | 257 | if (out_obj->type != ACPI_TYPE_BUFFER) { |
258 | DEBPRINT("Run _GTF: error: " | 258 | DEBPRINT("Run _GTF: error: " |
259 | "expected object type of ACPI_TYPE_BUFFER, " | 259 | "expected object type of ACPI_TYPE_BUFFER, " |
260 | "got 0x%x\n", out_obj->type); | 260 | "got 0x%x\n", out_obj->type); |
261 | err = -ENOENT; | 261 | err = -ENOENT; |
262 | kfree(output.pointer); | 262 | kfree(output.pointer); |
263 | goto out; | 263 | goto out; |
264 | } | 264 | } |
265 | 265 | ||
266 | if (!out_obj->buffer.length || !out_obj->buffer.pointer || | 266 | if (!out_obj->buffer.length || !out_obj->buffer.pointer || |
267 | out_obj->buffer.length % REGS_PER_GTF) { | 267 | out_obj->buffer.length % REGS_PER_GTF) { |
268 | printk(KERN_ERR | 268 | printk(KERN_ERR |
269 | "%s: unexpected GTF length (%d) or addr (0x%p)\n", | 269 | "%s: unexpected GTF length (%d) or addr (0x%p)\n", |
270 | __func__, out_obj->buffer.length, | 270 | __func__, out_obj->buffer.length, |
271 | out_obj->buffer.pointer); | 271 | out_obj->buffer.pointer); |
272 | err = -ENOENT; | 272 | err = -ENOENT; |
273 | kfree(output.pointer); | 273 | kfree(output.pointer); |
274 | goto out; | 274 | goto out; |
275 | } | 275 | } |
276 | 276 | ||
277 | *gtf_length = out_obj->buffer.length; | 277 | *gtf_length = out_obj->buffer.length; |
278 | *gtf_address = (unsigned long)out_obj->buffer.pointer; | 278 | *gtf_address = (unsigned long)out_obj->buffer.pointer; |
279 | *obj_loc = (unsigned long)out_obj; | 279 | *obj_loc = (unsigned long)out_obj; |
280 | DEBPRINT("returning gtf_length=%d, gtf_address=0x%lx, obj_loc=0x%lx\n", | 280 | DEBPRINT("returning gtf_length=%d, gtf_address=0x%lx, obj_loc=0x%lx\n", |
281 | *gtf_length, *gtf_address, *obj_loc); | 281 | *gtf_length, *gtf_address, *obj_loc); |
282 | err = 0; | 282 | err = 0; |
283 | out: | 283 | out: |
284 | return err; | 284 | return err; |
285 | } | 285 | } |
286 | 286 | ||
287 | /** | 287 | /** |
288 | * do_drive_set_taskfiles - write the drive taskfile settings from _GTF | 288 | * do_drive_set_taskfiles - write the drive taskfile settings from _GTF |
289 | * @drive: the drive to which the taskfile command should be sent | 289 | * @drive: the drive to which the taskfile command should be sent |
290 | * @gtf_length: total number of bytes of _GTF taskfiles | 290 | * @gtf_length: total number of bytes of _GTF taskfiles |
291 | * @gtf_address: location of _GTF taskfile arrays | 291 | * @gtf_address: location of _GTF taskfile arrays |
292 | * | 292 | * |
293 | * Write {gtf_address, length gtf_length} in groups of | 293 | * Write {gtf_address, length gtf_length} in groups of |
294 | * REGS_PER_GTF bytes. | 294 | * REGS_PER_GTF bytes. |
295 | */ | 295 | */ |
296 | static int do_drive_set_taskfiles(ide_drive_t *drive, | 296 | static int do_drive_set_taskfiles(ide_drive_t *drive, |
297 | unsigned int gtf_length, | 297 | unsigned int gtf_length, |
298 | unsigned long gtf_address) | 298 | unsigned long gtf_address) |
299 | { | 299 | { |
300 | int rc = 0, err; | 300 | int rc = 0, err; |
301 | int gtf_count = gtf_length / REGS_PER_GTF; | 301 | int gtf_count = gtf_length / REGS_PER_GTF; |
302 | int ix; | 302 | int ix; |
303 | 303 | ||
304 | DEBPRINT("total GTF bytes=%u (0x%x), gtf_count=%d, addr=0x%lx\n", | 304 | DEBPRINT("total GTF bytes=%u (0x%x), gtf_count=%d, addr=0x%lx\n", |
305 | gtf_length, gtf_length, gtf_count, gtf_address); | 305 | gtf_length, gtf_length, gtf_count, gtf_address); |
306 | 306 | ||
307 | /* send all taskfile registers (0x1f1-0x1f7) *in*that*order* */ | 307 | /* send all taskfile registers (0x1f1-0x1f7) *in*that*order* */ |
308 | for (ix = 0; ix < gtf_count; ix++) { | 308 | for (ix = 0; ix < gtf_count; ix++) { |
309 | u8 *gtf = (u8 *)(gtf_address + ix * REGS_PER_GTF); | 309 | u8 *gtf = (u8 *)(gtf_address + ix * REGS_PER_GTF); |
310 | struct ide_cmd cmd; | 310 | struct ide_cmd cmd; |
311 | 311 | ||
312 | DEBPRINT("(0x1f1-1f7): " | 312 | DEBPRINT("(0x1f1-1f7): " |
313 | "hex: %02x %02x %02x %02x %02x %02x %02x\n", | 313 | "hex: %02x %02x %02x %02x %02x %02x %02x\n", |
314 | gtf[0], gtf[1], gtf[2], | 314 | gtf[0], gtf[1], gtf[2], |
315 | gtf[3], gtf[4], gtf[5], gtf[6]); | 315 | gtf[3], gtf[4], gtf[5], gtf[6]); |
316 | 316 | ||
317 | if (!ide_acpigtf) { | 317 | if (!ide_acpigtf) { |
318 | DEBPRINT("_GTF execution disabled\n"); | 318 | DEBPRINT("_GTF execution disabled\n"); |
319 | continue; | 319 | continue; |
320 | } | 320 | } |
321 | 321 | ||
322 | /* convert GTF to taskfile */ | 322 | /* convert GTF to taskfile */ |
323 | memset(&cmd, 0, sizeof(cmd)); | 323 | memset(&cmd, 0, sizeof(cmd)); |
324 | memcpy(&cmd.tf.feature, gtf, REGS_PER_GTF); | 324 | memcpy(&cmd.tf.feature, gtf, REGS_PER_GTF); |
325 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; | 325 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
326 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | 326 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; |
327 | 327 | ||
328 | err = ide_no_data_taskfile(drive, &cmd); | 328 | err = ide_no_data_taskfile(drive, &cmd); |
329 | if (err) { | 329 | if (err) { |
330 | printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n", | 330 | printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n", |
331 | __func__, err); | 331 | __func__, err); |
332 | rc = err; | 332 | rc = err; |
333 | } | 333 | } |
334 | } | 334 | } |
335 | 335 | ||
336 | return rc; | 336 | return rc; |
337 | } | 337 | } |
338 | 338 | ||
339 | /** | 339 | /** |
340 | * ide_acpi_exec_tfs - get then write drive taskfile settings | 340 | * ide_acpi_exec_tfs - get then write drive taskfile settings |
341 | * @drive: the drive for which the taskfile settings should be | 341 | * @drive: the drive for which the taskfile settings should be |
342 | * written. | 342 | * written. |
343 | * | 343 | * |
344 | * According to the ACPI spec this should be called after _STM | 344 | * According to the ACPI spec this should be called after _STM |
345 | * has been evaluated for the interface. Some ACPI vendors interpret | 345 | * has been evaluated for the interface. Some ACPI vendors interpret |
346 | * that as a hard requirement and modify the taskfile according | 346 | * that as a hard requirement and modify the taskfile according |
347 | * to the Identify Drive information passed down with _STM. | 347 | * to the Identify Drive information passed down with _STM. |
348 | * So one should really make sure to call this only after _STM has | 348 | * So one should really make sure to call this only after _STM has |
349 | * been executed. | 349 | * been executed. |
350 | */ | 350 | */ |
351 | int ide_acpi_exec_tfs(ide_drive_t *drive) | 351 | int ide_acpi_exec_tfs(ide_drive_t *drive) |
352 | { | 352 | { |
353 | int ret; | 353 | int ret; |
354 | unsigned int gtf_length; | 354 | unsigned int gtf_length; |
355 | unsigned long gtf_address; | 355 | unsigned long gtf_address; |
356 | unsigned long obj_loc; | 356 | unsigned long obj_loc; |
357 | 357 | ||
358 | DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn); | 358 | DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn); |
359 | 359 | ||
360 | ret = do_drive_get_GTF(drive, >f_length, >f_address, &obj_loc); | 360 | ret = do_drive_get_GTF(drive, >f_length, >f_address, &obj_loc); |
361 | if (ret < 0) { | 361 | if (ret < 0) { |
362 | DEBPRINT("get_GTF error (%d)\n", ret); | 362 | DEBPRINT("get_GTF error (%d)\n", ret); |
363 | return ret; | 363 | return ret; |
364 | } | 364 | } |
365 | 365 | ||
366 | DEBPRINT("call set_taskfiles, drive=%s\n", drive->name); | 366 | DEBPRINT("call set_taskfiles, drive=%s\n", drive->name); |
367 | 367 | ||
368 | ret = do_drive_set_taskfiles(drive, gtf_length, gtf_address); | 368 | ret = do_drive_set_taskfiles(drive, gtf_length, gtf_address); |
369 | kfree((void *)obj_loc); | 369 | kfree((void *)obj_loc); |
370 | if (ret < 0) { | 370 | if (ret < 0) { |
371 | DEBPRINT("set_taskfiles error (%d)\n", ret); | 371 | DEBPRINT("set_taskfiles error (%d)\n", ret); |
372 | } | 372 | } |
373 | 373 | ||
374 | DEBPRINT("ret=%d\n", ret); | 374 | DEBPRINT("ret=%d\n", ret); |
375 | 375 | ||
376 | return ret; | 376 | return ret; |
377 | } | 377 | } |
378 | 378 | ||
379 | /** | 379 | /** |
380 | * ide_acpi_get_timing - get the channel (controller) timings | 380 | * ide_acpi_get_timing - get the channel (controller) timings |
381 | * @hwif: target IDE interface (channel) | 381 | * @hwif: target IDE interface (channel) |
382 | * | 382 | * |
383 | * This function executes the _GTM ACPI method for the target channel. | 383 | * This function executes the _GTM ACPI method for the target channel. |
384 | * | 384 | * |
385 | */ | 385 | */ |
386 | void ide_acpi_get_timing(ide_hwif_t *hwif) | 386 | void ide_acpi_get_timing(ide_hwif_t *hwif) |
387 | { | 387 | { |
388 | acpi_status status; | 388 | acpi_status status; |
389 | struct acpi_buffer output; | 389 | struct acpi_buffer output; |
390 | union acpi_object *out_obj; | 390 | union acpi_object *out_obj; |
391 | 391 | ||
392 | /* Setting up output buffer for _GTM */ | 392 | /* Setting up output buffer for _GTM */ |
393 | output.length = ACPI_ALLOCATE_BUFFER; | 393 | output.length = ACPI_ALLOCATE_BUFFER; |
394 | output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ | 394 | output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ |
395 | 395 | ||
396 | /* _GTM has no input parameters */ | 396 | /* _GTM has no input parameters */ |
397 | status = acpi_evaluate_object(hwif->acpidata->obj_handle, "_GTM", | 397 | status = acpi_evaluate_object(hwif->acpidata->obj_handle, "_GTM", |
398 | NULL, &output); | 398 | NULL, &output); |
399 | 399 | ||
400 | DEBPRINT("_GTM status: %d, outptr: 0x%p, outlen: 0x%llx\n", | 400 | DEBPRINT("_GTM status: %d, outptr: 0x%p, outlen: 0x%llx\n", |
401 | status, output.pointer, | 401 | status, output.pointer, |
402 | (unsigned long long)output.length); | 402 | (unsigned long long)output.length); |
403 | 403 | ||
404 | if (ACPI_FAILURE(status)) { | 404 | if (ACPI_FAILURE(status)) { |
405 | DEBPRINT("Run _GTM error: status = 0x%x\n", status); | 405 | DEBPRINT("Run _GTM error: status = 0x%x\n", status); |
406 | return; | 406 | return; |
407 | } | 407 | } |
408 | 408 | ||
409 | if (!output.length || !output.pointer) { | 409 | if (!output.length || !output.pointer) { |
410 | DEBPRINT("Run _GTM: length or ptr is NULL (0x%llx, 0x%p)\n", | 410 | DEBPRINT("Run _GTM: length or ptr is NULL (0x%llx, 0x%p)\n", |
411 | (unsigned long long)output.length, | 411 | (unsigned long long)output.length, |
412 | output.pointer); | 412 | output.pointer); |
413 | kfree(output.pointer); | 413 | kfree(output.pointer); |
414 | return; | 414 | return; |
415 | } | 415 | } |
416 | 416 | ||
417 | out_obj = output.pointer; | 417 | out_obj = output.pointer; |
418 | if (out_obj->type != ACPI_TYPE_BUFFER) { | 418 | if (out_obj->type != ACPI_TYPE_BUFFER) { |
419 | DEBPRINT("Run _GTM: error: " | 419 | DEBPRINT("Run _GTM: error: " |
420 | "expected object type of ACPI_TYPE_BUFFER, " | 420 | "expected object type of ACPI_TYPE_BUFFER, " |
421 | "got 0x%x\n", out_obj->type); | 421 | "got 0x%x\n", out_obj->type); |
422 | kfree(output.pointer); | 422 | kfree(output.pointer); |
423 | return; | 423 | return; |
424 | } | 424 | } |
425 | 425 | ||
426 | if (!out_obj->buffer.length || !out_obj->buffer.pointer || | 426 | if (!out_obj->buffer.length || !out_obj->buffer.pointer || |
427 | out_obj->buffer.length != sizeof(struct GTM_buffer)) { | 427 | out_obj->buffer.length != sizeof(struct GTM_buffer)) { |
428 | printk(KERN_ERR | 428 | printk(KERN_ERR |
429 | "%s: unexpected _GTM length (0x%x)[should be 0x%zx] or " | 429 | "%s: unexpected _GTM length (0x%x)[should be 0x%zx] or " |
430 | "addr (0x%p)\n", | 430 | "addr (0x%p)\n", |
431 | __func__, out_obj->buffer.length, | 431 | __func__, out_obj->buffer.length, |
432 | sizeof(struct GTM_buffer), out_obj->buffer.pointer); | 432 | sizeof(struct GTM_buffer), out_obj->buffer.pointer); |
433 | kfree(output.pointer); | 433 | kfree(output.pointer); |
434 | return; | 434 | return; |
435 | } | 435 | } |
436 | 436 | ||
437 | memcpy(&hwif->acpidata->gtm, out_obj->buffer.pointer, | 437 | memcpy(&hwif->acpidata->gtm, out_obj->buffer.pointer, |
438 | sizeof(struct GTM_buffer)); | 438 | sizeof(struct GTM_buffer)); |
439 | 439 | ||
440 | DEBPRINT("_GTM info: ptr: 0x%p, len: 0x%x, exp.len: 0x%Zx\n", | 440 | DEBPRINT("_GTM info: ptr: 0x%p, len: 0x%x, exp.len: 0x%Zx\n", |
441 | out_obj->buffer.pointer, out_obj->buffer.length, | 441 | out_obj->buffer.pointer, out_obj->buffer.length, |
442 | sizeof(struct GTM_buffer)); | 442 | sizeof(struct GTM_buffer)); |
443 | 443 | ||
444 | DEBPRINT("_GTM fields: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", | 444 | DEBPRINT("_GTM fields: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", |
445 | hwif->acpidata->gtm.PIO_speed0, | 445 | hwif->acpidata->gtm.PIO_speed0, |
446 | hwif->acpidata->gtm.DMA_speed0, | 446 | hwif->acpidata->gtm.DMA_speed0, |
447 | hwif->acpidata->gtm.PIO_speed1, | 447 | hwif->acpidata->gtm.PIO_speed1, |
448 | hwif->acpidata->gtm.DMA_speed1, | 448 | hwif->acpidata->gtm.DMA_speed1, |
449 | hwif->acpidata->gtm.GTM_flags); | 449 | hwif->acpidata->gtm.GTM_flags); |
450 | 450 | ||
451 | kfree(output.pointer); | 451 | kfree(output.pointer); |
452 | } | 452 | } |
453 | 453 | ||
454 | /** | 454 | /** |
455 | * ide_acpi_push_timing - set the channel (controller) timings | 455 | * ide_acpi_push_timing - set the channel (controller) timings |
456 | * @hwif: target IDE interface (channel) | 456 | * @hwif: target IDE interface (channel) |
457 | * | 457 | * |
458 | * This function executes the _STM ACPI method for the target channel. | 458 | * This function executes the _STM ACPI method for the target channel. |
459 | * | 459 | * |
460 | * _STM requires Identify Drive data, which has to passed as an argument. | 460 | * _STM requires Identify Drive data, which has to passed as an argument. |
461 | * Unfortunately drive->id is a mangled version which we can't readily | 461 | * Unfortunately drive->id is a mangled version which we can't readily |
462 | * use; hence we'll get the information afresh. | 462 | * use; hence we'll get the information afresh. |
463 | */ | 463 | */ |
464 | void ide_acpi_push_timing(ide_hwif_t *hwif) | 464 | void ide_acpi_push_timing(ide_hwif_t *hwif) |
465 | { | 465 | { |
466 | acpi_status status; | 466 | acpi_status status; |
467 | struct acpi_object_list input; | 467 | struct acpi_object_list input; |
468 | union acpi_object in_params[3]; | 468 | union acpi_object in_params[3]; |
469 | struct ide_acpi_drive_link *master = &hwif->acpidata->master; | 469 | struct ide_acpi_drive_link *master = &hwif->acpidata->master; |
470 | struct ide_acpi_drive_link *slave = &hwif->acpidata->slave; | 470 | struct ide_acpi_drive_link *slave = &hwif->acpidata->slave; |
471 | 471 | ||
472 | /* Give the GTM buffer + drive Identify data to the channel via the | 472 | /* Give the GTM buffer + drive Identify data to the channel via the |
473 | * _STM method: */ | 473 | * _STM method: */ |
474 | /* setup input parameters buffer for _STM */ | 474 | /* setup input parameters buffer for _STM */ |
475 | input.count = 3; | 475 | input.count = 3; |
476 | input.pointer = in_params; | 476 | input.pointer = in_params; |
477 | in_params[0].type = ACPI_TYPE_BUFFER; | 477 | in_params[0].type = ACPI_TYPE_BUFFER; |
478 | in_params[0].buffer.length = sizeof(struct GTM_buffer); | 478 | in_params[0].buffer.length = sizeof(struct GTM_buffer); |
479 | in_params[0].buffer.pointer = (u8 *)&hwif->acpidata->gtm; | 479 | in_params[0].buffer.pointer = (u8 *)&hwif->acpidata->gtm; |
480 | in_params[1].type = ACPI_TYPE_BUFFER; | 480 | in_params[1].type = ACPI_TYPE_BUFFER; |
481 | in_params[1].buffer.length = ATA_ID_WORDS * 2; | 481 | in_params[1].buffer.length = ATA_ID_WORDS * 2; |
482 | in_params[1].buffer.pointer = (u8 *)&master->idbuff; | 482 | in_params[1].buffer.pointer = (u8 *)&master->idbuff; |
483 | in_params[2].type = ACPI_TYPE_BUFFER; | 483 | in_params[2].type = ACPI_TYPE_BUFFER; |
484 | in_params[2].buffer.length = ATA_ID_WORDS * 2; | 484 | in_params[2].buffer.length = ATA_ID_WORDS * 2; |
485 | in_params[2].buffer.pointer = (u8 *)&slave->idbuff; | 485 | in_params[2].buffer.pointer = (u8 *)&slave->idbuff; |
486 | /* Output buffer: _STM has no output */ | 486 | /* Output buffer: _STM has no output */ |
487 | 487 | ||
488 | status = acpi_evaluate_object(hwif->acpidata->obj_handle, "_STM", | 488 | status = acpi_evaluate_object(hwif->acpidata->obj_handle, "_STM", |
489 | &input, NULL); | 489 | &input, NULL); |
490 | 490 | ||
491 | if (ACPI_FAILURE(status)) { | 491 | if (ACPI_FAILURE(status)) { |
492 | DEBPRINT("Run _STM error: status = 0x%x\n", status); | 492 | DEBPRINT("Run _STM error: status = 0x%x\n", status); |
493 | } | 493 | } |
494 | DEBPRINT("_STM status: %d\n", status); | 494 | DEBPRINT("_STM status: %d\n", status); |
495 | } | 495 | } |
496 | 496 | ||
497 | /** | 497 | /** |
498 | * ide_acpi_set_state - set the channel power state | 498 | * ide_acpi_set_state - set the channel power state |
499 | * @hwif: target IDE interface | 499 | * @hwif: target IDE interface |
500 | * @on: state, on/off | 500 | * @on: state, on/off |
501 | * | 501 | * |
502 | * This function executes the _PS0/_PS3 ACPI method to set the power state. | 502 | * This function executes the _PS0/_PS3 ACPI method to set the power state. |
503 | * ACPI spec requires _PS0 when IDE power on and _PS3 when power off | 503 | * ACPI spec requires _PS0 when IDE power on and _PS3 when power off |
504 | */ | 504 | */ |
505 | void ide_acpi_set_state(ide_hwif_t *hwif, int on) | 505 | void ide_acpi_set_state(ide_hwif_t *hwif, int on) |
506 | { | 506 | { |
507 | ide_drive_t *drive; | 507 | ide_drive_t *drive; |
508 | int i; | 508 | int i; |
509 | 509 | ||
510 | if (ide_noacpi_psx) | 510 | if (ide_noacpi_psx) |
511 | return; | 511 | return; |
512 | 512 | ||
513 | DEBPRINT("ENTER:\n"); | 513 | DEBPRINT("ENTER:\n"); |
514 | 514 | ||
515 | /* channel first and then drives for power on and verse versa for power off */ | 515 | /* channel first and then drives for power on and verse versa for power off */ |
516 | if (on) | 516 | if (on) |
517 | acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0); | 517 | acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0); |
518 | 518 | ||
519 | ide_port_for_each_present_dev(i, drive, hwif) { | 519 | ide_port_for_each_present_dev(i, drive, hwif) { |
520 | if (drive->acpidata->obj_handle) | 520 | if (drive->acpidata->obj_handle) |
521 | acpi_bus_set_power(drive->acpidata->obj_handle, | 521 | acpi_bus_set_power(drive->acpidata->obj_handle, |
522 | on ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD); | 522 | on ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD); |
523 | } | 523 | } |
524 | 524 | ||
525 | if (!on) | 525 | if (!on) |
526 | acpi_bus_set_power(hwif->acpidata->obj_handle, | 526 | acpi_bus_set_power(hwif->acpidata->obj_handle, |
527 | ACPI_STATE_D3_COLD); | 527 | ACPI_STATE_D3_COLD); |
528 | } | 528 | } |
529 | 529 | ||
530 | /** | 530 | /** |
531 | * ide_acpi_init_port - initialize the ACPI link for an IDE interface | 531 | * ide_acpi_init_port - initialize the ACPI link for an IDE interface |
532 | * @hwif: target IDE interface (channel) | 532 | * @hwif: target IDE interface (channel) |
533 | * | 533 | * |
534 | * The ACPI spec is not quite clear when the drive identify buffer | 534 | * The ACPI spec is not quite clear when the drive identify buffer |
535 | * should be obtained. Calling IDENTIFY DEVICE during shutdown | 535 | * should be obtained. Calling IDENTIFY DEVICE during shutdown |
536 | * is not the best of ideas as the drive might already being put to | 536 | * is not the best of ideas as the drive might already being put to |
537 | * sleep. And obviously we can't call it during resume. | 537 | * sleep. And obviously we can't call it during resume. |
538 | * So we get the information during startup; but this means that | 538 | * So we get the information during startup; but this means that |
539 | * any changes during run-time will be lost after resume. | 539 | * any changes during run-time will be lost after resume. |
540 | */ | 540 | */ |
541 | void ide_acpi_init_port(ide_hwif_t *hwif) | 541 | void ide_acpi_init_port(ide_hwif_t *hwif) |
542 | { | 542 | { |
543 | hwif->acpidata = kzalloc(sizeof(struct ide_acpi_hwif_link), GFP_KERNEL); | 543 | hwif->acpidata = kzalloc(sizeof(struct ide_acpi_hwif_link), GFP_KERNEL); |
544 | if (!hwif->acpidata) | 544 | if (!hwif->acpidata) |
545 | return; | 545 | return; |
546 | 546 | ||
547 | hwif->acpidata->obj_handle = ide_acpi_hwif_get_handle(hwif); | 547 | hwif->acpidata->obj_handle = ide_acpi_hwif_get_handle(hwif); |
548 | if (!hwif->acpidata->obj_handle) { | 548 | if (!hwif->acpidata->obj_handle) { |
549 | DEBPRINT("no ACPI object for %s found\n", hwif->name); | 549 | DEBPRINT("no ACPI object for %s found\n", hwif->name); |
550 | kfree(hwif->acpidata); | 550 | kfree(hwif->acpidata); |
551 | hwif->acpidata = NULL; | 551 | hwif->acpidata = NULL; |
552 | } | 552 | } |
553 | } | 553 | } |
554 | 554 | ||
555 | void ide_acpi_port_init_devices(ide_hwif_t *hwif) | 555 | void ide_acpi_port_init_devices(ide_hwif_t *hwif) |
556 | { | 556 | { |
557 | ide_drive_t *drive; | 557 | ide_drive_t *drive; |
558 | int i, err; | 558 | int i, err; |
559 | 559 | ||
560 | if (hwif->acpidata == NULL) | 560 | if (hwif->acpidata == NULL) |
561 | return; | 561 | return; |
562 | 562 | ||
563 | /* | 563 | /* |
564 | * The ACPI spec mandates that we send information | 564 | * The ACPI spec mandates that we send information |
565 | * for both drives, regardless whether they are connected | 565 | * for both drives, regardless whether they are connected |
566 | * or not. | 566 | * or not. |
567 | */ | 567 | */ |
568 | hwif->devices[0]->acpidata = &hwif->acpidata->master; | 568 | hwif->devices[0]->acpidata = &hwif->acpidata->master; |
569 | hwif->devices[1]->acpidata = &hwif->acpidata->slave; | 569 | hwif->devices[1]->acpidata = &hwif->acpidata->slave; |
570 | 570 | ||
571 | /* get _ADR info for each device */ | 571 | /* get _ADR info for each device */ |
572 | ide_port_for_each_present_dev(i, drive, hwif) { | 572 | ide_port_for_each_present_dev(i, drive, hwif) { |
573 | acpi_handle dev_handle; | 573 | acpi_handle dev_handle; |
574 | 574 | ||
575 | DEBPRINT("ENTER: %s at channel#: %d port#: %d\n", | 575 | DEBPRINT("ENTER: %s at channel#: %d port#: %d\n", |
576 | drive->name, hwif->channel, drive->dn & 1); | 576 | drive->name, hwif->channel, drive->dn & 1); |
577 | 577 | ||
578 | /* TBD: could also check ACPI object VALID bits */ | 578 | /* TBD: could also check ACPI object VALID bits */ |
579 | dev_handle = acpi_get_child(hwif->acpidata->obj_handle, | 579 | dev_handle = acpi_get_child(hwif->acpidata->obj_handle, |
580 | drive->dn & 1); | 580 | drive->dn & 1); |
581 | 581 | ||
582 | DEBPRINT("drive %s handle 0x%p\n", drive->name, dev_handle); | 582 | DEBPRINT("drive %s handle 0x%p\n", drive->name, dev_handle); |
583 | 583 | ||
584 | drive->acpidata->obj_handle = dev_handle; | 584 | drive->acpidata->obj_handle = dev_handle; |
585 | } | 585 | } |
586 | 586 | ||
587 | /* send IDENTIFY for each device */ | 587 | /* send IDENTIFY for each device */ |
588 | ide_port_for_each_present_dev(i, drive, hwif) { | 588 | ide_port_for_each_present_dev(i, drive, hwif) { |
589 | err = taskfile_lib_get_identify(drive, drive->acpidata->idbuff); | 589 | err = taskfile_lib_get_identify(drive, drive->acpidata->idbuff); |
590 | if (err) | 590 | if (err) |
591 | DEBPRINT("identify device %s failed (%d)\n", | 591 | DEBPRINT("identify device %s failed (%d)\n", |
592 | drive->name, err); | 592 | drive->name, err); |
593 | } | 593 | } |
594 | 594 | ||
595 | if (ide_noacpi || ide_acpionboot == 0) { | 595 | if (ide_noacpi || ide_acpionboot == 0) { |
596 | DEBPRINT("ACPI methods disabled on boot\n"); | 596 | DEBPRINT("ACPI methods disabled on boot\n"); |
597 | return; | 597 | return; |
598 | } | 598 | } |
599 | 599 | ||
600 | /* ACPI _PS0 before _STM */ | 600 | /* ACPI _PS0 before _STM */ |
601 | ide_acpi_set_state(hwif, 1); | 601 | ide_acpi_set_state(hwif, 1); |
602 | /* | 602 | /* |
603 | * ACPI requires us to call _STM on startup | 603 | * ACPI requires us to call _STM on startup |
604 | */ | 604 | */ |
605 | ide_acpi_get_timing(hwif); | 605 | ide_acpi_get_timing(hwif); |
606 | ide_acpi_push_timing(hwif); | 606 | ide_acpi_push_timing(hwif); |
607 | 607 | ||
608 | ide_port_for_each_present_dev(i, drive, hwif) { | 608 | ide_port_for_each_present_dev(i, drive, hwif) { |
609 | ide_acpi_exec_tfs(drive); | 609 | ide_acpi_exec_tfs(drive); |
610 | } | 610 | } |
611 | } | 611 | } |
612 | 612 |
drivers/pci/hotplug/acpi_pcihp.c
1 | /* | 1 | /* |
2 | * Common ACPI functions for hot plug platforms | 2 | * Common ACPI functions for hot plug platforms |
3 | * | 3 | * |
4 | * Copyright (C) 2006 Intel Corporation | 4 | * Copyright (C) 2006 Intel Corporation |
5 | * | 5 | * |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or (at | 10 | * the Free Software Foundation; either version 2 of the License, or (at |
11 | * your option) any later version. | 11 | * your option) any later version. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, but | 13 | * This program is distributed in the hope that it will be useful, but |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 15 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
16 | * NON INFRINGEMENT. See the GNU General Public License for more | 16 | * NON INFRINGEMENT. See the GNU General Public License for more |
17 | * details. | 17 | * details. |
18 | * | 18 | * |
19 | * You should have received a copy of the GNU General Public License | 19 | * You should have received a copy of the GNU General Public License |
20 | * along with this program; if not, write to the Free Software | 20 | * along with this program; if not, write to the Free Software |
21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
22 | * | 22 | * |
23 | * Send feedback to <kristen.c.accardi@intel.com> | 23 | * Send feedback to <kristen.c.accardi@intel.com> |
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/moduleparam.h> | 28 | #include <linux/moduleparam.h> |
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
32 | #include <linux/pci_hotplug.h> | 32 | #include <linux/pci_hotplug.h> |
33 | #include <linux/acpi.h> | 33 | #include <linux/acpi.h> |
34 | #include <linux/pci-acpi.h> | 34 | #include <linux/pci-acpi.h> |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | 36 | ||
37 | #define MY_NAME "acpi_pcihp" | 37 | #define MY_NAME "acpi_pcihp" |
38 | 38 | ||
39 | #define dbg(fmt, arg...) do { if (debug_acpi) printk(KERN_DEBUG "%s: %s: " fmt , MY_NAME , __func__ , ## arg); } while (0) | 39 | #define dbg(fmt, arg...) do { if (debug_acpi) printk(KERN_DEBUG "%s: %s: " fmt , MY_NAME , __func__ , ## arg); } while (0) |
40 | #define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg) | 40 | #define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg) |
41 | #define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg) | 41 | #define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg) |
42 | #define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg) | 42 | #define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg) |
43 | 43 | ||
44 | #define METHOD_NAME__SUN "_SUN" | 44 | #define METHOD_NAME__SUN "_SUN" |
45 | #define METHOD_NAME_OSHP "OSHP" | 45 | #define METHOD_NAME_OSHP "OSHP" |
46 | 46 | ||
47 | static bool debug_acpi; | 47 | static bool debug_acpi; |
48 | 48 | ||
49 | static acpi_status | 49 | static acpi_status |
50 | decode_type0_hpx_record(union acpi_object *record, struct hotplug_params *hpx) | 50 | decode_type0_hpx_record(union acpi_object *record, struct hotplug_params *hpx) |
51 | { | 51 | { |
52 | int i; | 52 | int i; |
53 | union acpi_object *fields = record->package.elements; | 53 | union acpi_object *fields = record->package.elements; |
54 | u32 revision = fields[1].integer.value; | 54 | u32 revision = fields[1].integer.value; |
55 | 55 | ||
56 | switch (revision) { | 56 | switch (revision) { |
57 | case 1: | 57 | case 1: |
58 | if (record->package.count != 6) | 58 | if (record->package.count != 6) |
59 | return AE_ERROR; | 59 | return AE_ERROR; |
60 | for (i = 2; i < 6; i++) | 60 | for (i = 2; i < 6; i++) |
61 | if (fields[i].type != ACPI_TYPE_INTEGER) | 61 | if (fields[i].type != ACPI_TYPE_INTEGER) |
62 | return AE_ERROR; | 62 | return AE_ERROR; |
63 | hpx->t0 = &hpx->type0_data; | 63 | hpx->t0 = &hpx->type0_data; |
64 | hpx->t0->revision = revision; | 64 | hpx->t0->revision = revision; |
65 | hpx->t0->cache_line_size = fields[2].integer.value; | 65 | hpx->t0->cache_line_size = fields[2].integer.value; |
66 | hpx->t0->latency_timer = fields[3].integer.value; | 66 | hpx->t0->latency_timer = fields[3].integer.value; |
67 | hpx->t0->enable_serr = fields[4].integer.value; | 67 | hpx->t0->enable_serr = fields[4].integer.value; |
68 | hpx->t0->enable_perr = fields[5].integer.value; | 68 | hpx->t0->enable_perr = fields[5].integer.value; |
69 | break; | 69 | break; |
70 | default: | 70 | default: |
71 | printk(KERN_WARNING | 71 | printk(KERN_WARNING |
72 | "%s: Type 0 Revision %d record not supported\n", | 72 | "%s: Type 0 Revision %d record not supported\n", |
73 | __func__, revision); | 73 | __func__, revision); |
74 | return AE_ERROR; | 74 | return AE_ERROR; |
75 | } | 75 | } |
76 | return AE_OK; | 76 | return AE_OK; |
77 | } | 77 | } |
78 | 78 | ||
79 | static acpi_status | 79 | static acpi_status |
80 | decode_type1_hpx_record(union acpi_object *record, struct hotplug_params *hpx) | 80 | decode_type1_hpx_record(union acpi_object *record, struct hotplug_params *hpx) |
81 | { | 81 | { |
82 | int i; | 82 | int i; |
83 | union acpi_object *fields = record->package.elements; | 83 | union acpi_object *fields = record->package.elements; |
84 | u32 revision = fields[1].integer.value; | 84 | u32 revision = fields[1].integer.value; |
85 | 85 | ||
86 | switch (revision) { | 86 | switch (revision) { |
87 | case 1: | 87 | case 1: |
88 | if (record->package.count != 5) | 88 | if (record->package.count != 5) |
89 | return AE_ERROR; | 89 | return AE_ERROR; |
90 | for (i = 2; i < 5; i++) | 90 | for (i = 2; i < 5; i++) |
91 | if (fields[i].type != ACPI_TYPE_INTEGER) | 91 | if (fields[i].type != ACPI_TYPE_INTEGER) |
92 | return AE_ERROR; | 92 | return AE_ERROR; |
93 | hpx->t1 = &hpx->type1_data; | 93 | hpx->t1 = &hpx->type1_data; |
94 | hpx->t1->revision = revision; | 94 | hpx->t1->revision = revision; |
95 | hpx->t1->max_mem_read = fields[2].integer.value; | 95 | hpx->t1->max_mem_read = fields[2].integer.value; |
96 | hpx->t1->avg_max_split = fields[3].integer.value; | 96 | hpx->t1->avg_max_split = fields[3].integer.value; |
97 | hpx->t1->tot_max_split = fields[4].integer.value; | 97 | hpx->t1->tot_max_split = fields[4].integer.value; |
98 | break; | 98 | break; |
99 | default: | 99 | default: |
100 | printk(KERN_WARNING | 100 | printk(KERN_WARNING |
101 | "%s: Type 1 Revision %d record not supported\n", | 101 | "%s: Type 1 Revision %d record not supported\n", |
102 | __func__, revision); | 102 | __func__, revision); |
103 | return AE_ERROR; | 103 | return AE_ERROR; |
104 | } | 104 | } |
105 | return AE_OK; | 105 | return AE_OK; |
106 | } | 106 | } |
107 | 107 | ||
108 | static acpi_status | 108 | static acpi_status |
109 | decode_type2_hpx_record(union acpi_object *record, struct hotplug_params *hpx) | 109 | decode_type2_hpx_record(union acpi_object *record, struct hotplug_params *hpx) |
110 | { | 110 | { |
111 | int i; | 111 | int i; |
112 | union acpi_object *fields = record->package.elements; | 112 | union acpi_object *fields = record->package.elements; |
113 | u32 revision = fields[1].integer.value; | 113 | u32 revision = fields[1].integer.value; |
114 | 114 | ||
115 | switch (revision) { | 115 | switch (revision) { |
116 | case 1: | 116 | case 1: |
117 | if (record->package.count != 18) | 117 | if (record->package.count != 18) |
118 | return AE_ERROR; | 118 | return AE_ERROR; |
119 | for (i = 2; i < 18; i++) | 119 | for (i = 2; i < 18; i++) |
120 | if (fields[i].type != ACPI_TYPE_INTEGER) | 120 | if (fields[i].type != ACPI_TYPE_INTEGER) |
121 | return AE_ERROR; | 121 | return AE_ERROR; |
122 | hpx->t2 = &hpx->type2_data; | 122 | hpx->t2 = &hpx->type2_data; |
123 | hpx->t2->revision = revision; | 123 | hpx->t2->revision = revision; |
124 | hpx->t2->unc_err_mask_and = fields[2].integer.value; | 124 | hpx->t2->unc_err_mask_and = fields[2].integer.value; |
125 | hpx->t2->unc_err_mask_or = fields[3].integer.value; | 125 | hpx->t2->unc_err_mask_or = fields[3].integer.value; |
126 | hpx->t2->unc_err_sever_and = fields[4].integer.value; | 126 | hpx->t2->unc_err_sever_and = fields[4].integer.value; |
127 | hpx->t2->unc_err_sever_or = fields[5].integer.value; | 127 | hpx->t2->unc_err_sever_or = fields[5].integer.value; |
128 | hpx->t2->cor_err_mask_and = fields[6].integer.value; | 128 | hpx->t2->cor_err_mask_and = fields[6].integer.value; |
129 | hpx->t2->cor_err_mask_or = fields[7].integer.value; | 129 | hpx->t2->cor_err_mask_or = fields[7].integer.value; |
130 | hpx->t2->adv_err_cap_and = fields[8].integer.value; | 130 | hpx->t2->adv_err_cap_and = fields[8].integer.value; |
131 | hpx->t2->adv_err_cap_or = fields[9].integer.value; | 131 | hpx->t2->adv_err_cap_or = fields[9].integer.value; |
132 | hpx->t2->pci_exp_devctl_and = fields[10].integer.value; | 132 | hpx->t2->pci_exp_devctl_and = fields[10].integer.value; |
133 | hpx->t2->pci_exp_devctl_or = fields[11].integer.value; | 133 | hpx->t2->pci_exp_devctl_or = fields[11].integer.value; |
134 | hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value; | 134 | hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value; |
135 | hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value; | 135 | hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value; |
136 | hpx->t2->sec_unc_err_sever_and = fields[14].integer.value; | 136 | hpx->t2->sec_unc_err_sever_and = fields[14].integer.value; |
137 | hpx->t2->sec_unc_err_sever_or = fields[15].integer.value; | 137 | hpx->t2->sec_unc_err_sever_or = fields[15].integer.value; |
138 | hpx->t2->sec_unc_err_mask_and = fields[16].integer.value; | 138 | hpx->t2->sec_unc_err_mask_and = fields[16].integer.value; |
139 | hpx->t2->sec_unc_err_mask_or = fields[17].integer.value; | 139 | hpx->t2->sec_unc_err_mask_or = fields[17].integer.value; |
140 | break; | 140 | break; |
141 | default: | 141 | default: |
142 | printk(KERN_WARNING | 142 | printk(KERN_WARNING |
143 | "%s: Type 2 Revision %d record not supported\n", | 143 | "%s: Type 2 Revision %d record not supported\n", |
144 | __func__, revision); | 144 | __func__, revision); |
145 | return AE_ERROR; | 145 | return AE_ERROR; |
146 | } | 146 | } |
147 | return AE_OK; | 147 | return AE_OK; |
148 | } | 148 | } |
149 | 149 | ||
150 | static acpi_status | 150 | static acpi_status |
151 | acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) | 151 | acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) |
152 | { | 152 | { |
153 | acpi_status status; | 153 | acpi_status status; |
154 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | 154 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; |
155 | union acpi_object *package, *record, *fields; | 155 | union acpi_object *package, *record, *fields; |
156 | u32 type; | 156 | u32 type; |
157 | int i; | 157 | int i; |
158 | 158 | ||
159 | /* Clear the return buffer with zeros */ | 159 | /* Clear the return buffer with zeros */ |
160 | memset(hpx, 0, sizeof(struct hotplug_params)); | 160 | memset(hpx, 0, sizeof(struct hotplug_params)); |
161 | 161 | ||
162 | status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); | 162 | status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); |
163 | if (ACPI_FAILURE(status)) | 163 | if (ACPI_FAILURE(status)) |
164 | return status; | 164 | return status; |
165 | 165 | ||
166 | package = (union acpi_object *)buffer.pointer; | 166 | package = (union acpi_object *)buffer.pointer; |
167 | if (package->type != ACPI_TYPE_PACKAGE) { | 167 | if (package->type != ACPI_TYPE_PACKAGE) { |
168 | status = AE_ERROR; | 168 | status = AE_ERROR; |
169 | goto exit; | 169 | goto exit; |
170 | } | 170 | } |
171 | 171 | ||
172 | for (i = 0; i < package->package.count; i++) { | 172 | for (i = 0; i < package->package.count; i++) { |
173 | record = &package->package.elements[i]; | 173 | record = &package->package.elements[i]; |
174 | if (record->type != ACPI_TYPE_PACKAGE) { | 174 | if (record->type != ACPI_TYPE_PACKAGE) { |
175 | status = AE_ERROR; | 175 | status = AE_ERROR; |
176 | goto exit; | 176 | goto exit; |
177 | } | 177 | } |
178 | 178 | ||
179 | fields = record->package.elements; | 179 | fields = record->package.elements; |
180 | if (fields[0].type != ACPI_TYPE_INTEGER || | 180 | if (fields[0].type != ACPI_TYPE_INTEGER || |
181 | fields[1].type != ACPI_TYPE_INTEGER) { | 181 | fields[1].type != ACPI_TYPE_INTEGER) { |
182 | status = AE_ERROR; | 182 | status = AE_ERROR; |
183 | goto exit; | 183 | goto exit; |
184 | } | 184 | } |
185 | 185 | ||
186 | type = fields[0].integer.value; | 186 | type = fields[0].integer.value; |
187 | switch (type) { | 187 | switch (type) { |
188 | case 0: | 188 | case 0: |
189 | status = decode_type0_hpx_record(record, hpx); | 189 | status = decode_type0_hpx_record(record, hpx); |
190 | if (ACPI_FAILURE(status)) | 190 | if (ACPI_FAILURE(status)) |
191 | goto exit; | 191 | goto exit; |
192 | break; | 192 | break; |
193 | case 1: | 193 | case 1: |
194 | status = decode_type1_hpx_record(record, hpx); | 194 | status = decode_type1_hpx_record(record, hpx); |
195 | if (ACPI_FAILURE(status)) | 195 | if (ACPI_FAILURE(status)) |
196 | goto exit; | 196 | goto exit; |
197 | break; | 197 | break; |
198 | case 2: | 198 | case 2: |
199 | status = decode_type2_hpx_record(record, hpx); | 199 | status = decode_type2_hpx_record(record, hpx); |
200 | if (ACPI_FAILURE(status)) | 200 | if (ACPI_FAILURE(status)) |
201 | goto exit; | 201 | goto exit; |
202 | break; | 202 | break; |
203 | default: | 203 | default: |
204 | printk(KERN_ERR "%s: Type %d record not supported\n", | 204 | printk(KERN_ERR "%s: Type %d record not supported\n", |
205 | __func__, type); | 205 | __func__, type); |
206 | status = AE_ERROR; | 206 | status = AE_ERROR; |
207 | goto exit; | 207 | goto exit; |
208 | } | 208 | } |
209 | } | 209 | } |
210 | exit: | 210 | exit: |
211 | kfree(buffer.pointer); | 211 | kfree(buffer.pointer); |
212 | return status; | 212 | return status; |
213 | } | 213 | } |
214 | 214 | ||
215 | static acpi_status | 215 | static acpi_status |
216 | acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) | 216 | acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) |
217 | { | 217 | { |
218 | acpi_status status; | 218 | acpi_status status; |
219 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 219 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
220 | union acpi_object *package, *fields; | 220 | union acpi_object *package, *fields; |
221 | int i; | 221 | int i; |
222 | 222 | ||
223 | memset(hpp, 0, sizeof(struct hotplug_params)); | 223 | memset(hpp, 0, sizeof(struct hotplug_params)); |
224 | 224 | ||
225 | status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); | 225 | status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); |
226 | if (ACPI_FAILURE(status)) | 226 | if (ACPI_FAILURE(status)) |
227 | return status; | 227 | return status; |
228 | 228 | ||
229 | package = (union acpi_object *) buffer.pointer; | 229 | package = (union acpi_object *) buffer.pointer; |
230 | if (package->type != ACPI_TYPE_PACKAGE || | 230 | if (package->type != ACPI_TYPE_PACKAGE || |
231 | package->package.count != 4) { | 231 | package->package.count != 4) { |
232 | status = AE_ERROR; | 232 | status = AE_ERROR; |
233 | goto exit; | 233 | goto exit; |
234 | } | 234 | } |
235 | 235 | ||
236 | fields = package->package.elements; | 236 | fields = package->package.elements; |
237 | for (i = 0; i < 4; i++) { | 237 | for (i = 0; i < 4; i++) { |
238 | if (fields[i].type != ACPI_TYPE_INTEGER) { | 238 | if (fields[i].type != ACPI_TYPE_INTEGER) { |
239 | status = AE_ERROR; | 239 | status = AE_ERROR; |
240 | goto exit; | 240 | goto exit; |
241 | } | 241 | } |
242 | } | 242 | } |
243 | 243 | ||
244 | hpp->t0 = &hpp->type0_data; | 244 | hpp->t0 = &hpp->type0_data; |
245 | hpp->t0->revision = 1; | 245 | hpp->t0->revision = 1; |
246 | hpp->t0->cache_line_size = fields[0].integer.value; | 246 | hpp->t0->cache_line_size = fields[0].integer.value; |
247 | hpp->t0->latency_timer = fields[1].integer.value; | 247 | hpp->t0->latency_timer = fields[1].integer.value; |
248 | hpp->t0->enable_serr = fields[2].integer.value; | 248 | hpp->t0->enable_serr = fields[2].integer.value; |
249 | hpp->t0->enable_perr = fields[3].integer.value; | 249 | hpp->t0->enable_perr = fields[3].integer.value; |
250 | 250 | ||
251 | exit: | 251 | exit: |
252 | kfree(buffer.pointer); | 252 | kfree(buffer.pointer); |
253 | return status; | 253 | return status; |
254 | } | 254 | } |
255 | 255 | ||
256 | 256 | ||
257 | 257 | ||
258 | /* acpi_run_oshp - get control of hotplug from the firmware | 258 | /* acpi_run_oshp - get control of hotplug from the firmware |
259 | * | 259 | * |
260 | * @handle - the handle of the hotplug controller. | 260 | * @handle - the handle of the hotplug controller. |
261 | */ | 261 | */ |
262 | static acpi_status acpi_run_oshp(acpi_handle handle) | 262 | static acpi_status acpi_run_oshp(acpi_handle handle) |
263 | { | 263 | { |
264 | acpi_status status; | 264 | acpi_status status; |
265 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; | 265 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; |
266 | 266 | ||
267 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); | 267 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); |
268 | 268 | ||
269 | /* run OSHP */ | 269 | /* run OSHP */ |
270 | status = acpi_evaluate_object(handle, METHOD_NAME_OSHP, NULL, NULL); | 270 | status = acpi_evaluate_object(handle, METHOD_NAME_OSHP, NULL, NULL); |
271 | if (ACPI_FAILURE(status)) | 271 | if (ACPI_FAILURE(status)) |
272 | if (status != AE_NOT_FOUND) | 272 | if (status != AE_NOT_FOUND) |
273 | printk(KERN_ERR "%s:%s OSHP fails=0x%x\n", | 273 | printk(KERN_ERR "%s:%s OSHP fails=0x%x\n", |
274 | __func__, (char *)string.pointer, status); | 274 | __func__, (char *)string.pointer, status); |
275 | else | 275 | else |
276 | dbg("%s:%s OSHP not found\n", | 276 | dbg("%s:%s OSHP not found\n", |
277 | __func__, (char *)string.pointer); | 277 | __func__, (char *)string.pointer); |
278 | else | 278 | else |
279 | pr_debug("%s:%s OSHP passes\n", __func__, | 279 | pr_debug("%s:%s OSHP passes\n", __func__, |
280 | (char *)string.pointer); | 280 | (char *)string.pointer); |
281 | 281 | ||
282 | kfree(string.pointer); | 282 | kfree(string.pointer); |
283 | return status; | 283 | return status; |
284 | } | 284 | } |
285 | 285 | ||
286 | /* pci_get_hp_params | 286 | /* pci_get_hp_params |
287 | * | 287 | * |
288 | * @dev - the pci_dev for which we want parameters | 288 | * @dev - the pci_dev for which we want parameters |
289 | * @hpp - allocated by the caller | 289 | * @hpp - allocated by the caller |
290 | */ | 290 | */ |
291 | int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) | 291 | int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) |
292 | { | 292 | { |
293 | acpi_status status; | 293 | acpi_status status; |
294 | acpi_handle handle, phandle; | 294 | acpi_handle handle, phandle; |
295 | struct pci_bus *pbus; | 295 | struct pci_bus *pbus; |
296 | 296 | ||
297 | handle = NULL; | 297 | handle = NULL; |
298 | for (pbus = dev->bus; pbus; pbus = pbus->parent) { | 298 | for (pbus = dev->bus; pbus; pbus = pbus->parent) { |
299 | handle = acpi_pci_get_bridge_handle(pbus); | 299 | handle = acpi_pci_get_bridge_handle(pbus); |
300 | if (handle) | 300 | if (handle) |
301 | break; | 301 | break; |
302 | } | 302 | } |
303 | 303 | ||
304 | /* | 304 | /* |
305 | * _HPP settings apply to all child buses, until another _HPP is | 305 | * _HPP settings apply to all child buses, until another _HPP is |
306 | * encountered. If we don't find an _HPP for the input pci dev, | 306 | * encountered. If we don't find an _HPP for the input pci dev, |
307 | * look for it in the parent device scope since that would apply to | 307 | * look for it in the parent device scope since that would apply to |
308 | * this pci dev. | 308 | * this pci dev. |
309 | */ | 309 | */ |
310 | while (handle) { | 310 | while (handle) { |
311 | status = acpi_run_hpx(handle, hpp); | 311 | status = acpi_run_hpx(handle, hpp); |
312 | if (ACPI_SUCCESS(status)) | 312 | if (ACPI_SUCCESS(status)) |
313 | return 0; | 313 | return 0; |
314 | status = acpi_run_hpp(handle, hpp); | 314 | status = acpi_run_hpp(handle, hpp); |
315 | if (ACPI_SUCCESS(status)) | 315 | if (ACPI_SUCCESS(status)) |
316 | return 0; | 316 | return 0; |
317 | if (acpi_is_root_bridge(handle)) | 317 | if (acpi_is_root_bridge(handle)) |
318 | break; | 318 | break; |
319 | status = acpi_get_parent(handle, &phandle); | 319 | status = acpi_get_parent(handle, &phandle); |
320 | if (ACPI_FAILURE(status)) | 320 | if (ACPI_FAILURE(status)) |
321 | break; | 321 | break; |
322 | handle = phandle; | 322 | handle = phandle; |
323 | } | 323 | } |
324 | return -ENODEV; | 324 | return -ENODEV; |
325 | } | 325 | } |
326 | EXPORT_SYMBOL_GPL(pci_get_hp_params); | 326 | EXPORT_SYMBOL_GPL(pci_get_hp_params); |
327 | 327 | ||
328 | /** | 328 | /** |
329 | * acpi_get_hp_hw_control_from_firmware | 329 | * acpi_get_hp_hw_control_from_firmware |
330 | * @dev: the pci_dev of the bridge that has a hotplug controller | 330 | * @dev: the pci_dev of the bridge that has a hotplug controller |
331 | * @flags: requested control bits for _OSC | 331 | * @flags: requested control bits for _OSC |
332 | * | 332 | * |
333 | * Attempt to take hotplug control from firmware. | 333 | * Attempt to take hotplug control from firmware. |
334 | */ | 334 | */ |
335 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | 335 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) |
336 | { | 336 | { |
337 | acpi_status status; | 337 | acpi_status status; |
338 | acpi_handle chandle, handle; | 338 | acpi_handle chandle, handle; |
339 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; | 339 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; |
340 | 340 | ||
341 | flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL; | 341 | flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL; |
342 | if (!flags) { | 342 | if (!flags) { |
343 | err("Invalid flags %u specified!\n", flags); | 343 | err("Invalid flags %u specified!\n", flags); |
344 | return -EINVAL; | 344 | return -EINVAL; |
345 | } | 345 | } |
346 | 346 | ||
347 | /* | 347 | /* |
348 | * Per PCI firmware specification, we should run the ACPI _OSC | 348 | * Per PCI firmware specification, we should run the ACPI _OSC |
349 | * method to get control of hotplug hardware before using it. If | 349 | * method to get control of hotplug hardware before using it. If |
350 | * an _OSC is missing, we look for an OSHP to do the same thing. | 350 | * an _OSC is missing, we look for an OSHP to do the same thing. |
351 | * To handle different BIOS behavior, we look for _OSC on a root | 351 | * To handle different BIOS behavior, we look for _OSC on a root |
352 | * bridge preferentially (according to PCI fw spec). Later for | 352 | * bridge preferentially (according to PCI fw spec). Later for |
353 | * OSHP within the scope of the hotplug controller and its parents, | 353 | * OSHP within the scope of the hotplug controller and its parents, |
354 | * up to the host bridge under which this controller exists. | 354 | * up to the host bridge under which this controller exists. |
355 | */ | 355 | */ |
356 | handle = acpi_find_root_bridge_handle(pdev); | 356 | handle = acpi_find_root_bridge_handle(pdev); |
357 | if (handle) { | 357 | if (handle) { |
358 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); | 358 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); |
359 | dbg("Trying to get hotplug control for %s\n", | 359 | dbg("Trying to get hotplug control for %s\n", |
360 | (char *)string.pointer); | 360 | (char *)string.pointer); |
361 | status = acpi_pci_osc_control_set(handle, &flags, flags); | 361 | status = acpi_pci_osc_control_set(handle, &flags, flags); |
362 | if (ACPI_SUCCESS(status)) | 362 | if (ACPI_SUCCESS(status)) |
363 | goto got_one; | 363 | goto got_one; |
364 | if (status == AE_SUPPORT) | 364 | if (status == AE_SUPPORT) |
365 | goto no_control; | 365 | goto no_control; |
366 | kfree(string.pointer); | 366 | kfree(string.pointer); |
367 | string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; | 367 | string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; |
368 | } | 368 | } |
369 | 369 | ||
370 | handle = DEVICE_ACPI_HANDLE(&pdev->dev); | 370 | handle = ACPI_HANDLE(&pdev->dev); |
371 | if (!handle) { | 371 | if (!handle) { |
372 | /* | 372 | /* |
373 | * This hotplug controller was not listed in the ACPI name | 373 | * This hotplug controller was not listed in the ACPI name |
374 | * space at all. Try to get acpi handle of parent pci bus. | 374 | * space at all. Try to get acpi handle of parent pci bus. |
375 | */ | 375 | */ |
376 | struct pci_bus *pbus; | 376 | struct pci_bus *pbus; |
377 | for (pbus = pdev->bus; pbus; pbus = pbus->parent) { | 377 | for (pbus = pdev->bus; pbus; pbus = pbus->parent) { |
378 | handle = acpi_pci_get_bridge_handle(pbus); | 378 | handle = acpi_pci_get_bridge_handle(pbus); |
379 | if (handle) | 379 | if (handle) |
380 | break; | 380 | break; |
381 | } | 381 | } |
382 | } | 382 | } |
383 | 383 | ||
384 | while (handle) { | 384 | while (handle) { |
385 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); | 385 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); |
386 | dbg("Trying to get hotplug control for %s \n", | 386 | dbg("Trying to get hotplug control for %s \n", |
387 | (char *)string.pointer); | 387 | (char *)string.pointer); |
388 | status = acpi_run_oshp(handle); | 388 | status = acpi_run_oshp(handle); |
389 | if (ACPI_SUCCESS(status)) | 389 | if (ACPI_SUCCESS(status)) |
390 | goto got_one; | 390 | goto got_one; |
391 | if (acpi_is_root_bridge(handle)) | 391 | if (acpi_is_root_bridge(handle)) |
392 | break; | 392 | break; |
393 | chandle = handle; | 393 | chandle = handle; |
394 | status = acpi_get_parent(chandle, &handle); | 394 | status = acpi_get_parent(chandle, &handle); |
395 | if (ACPI_FAILURE(status)) | 395 | if (ACPI_FAILURE(status)) |
396 | break; | 396 | break; |
397 | } | 397 | } |
398 | no_control: | 398 | no_control: |
399 | dbg("Cannot get control of hotplug hardware for pci %s\n", | 399 | dbg("Cannot get control of hotplug hardware for pci %s\n", |
400 | pci_name(pdev)); | 400 | pci_name(pdev)); |
401 | kfree(string.pointer); | 401 | kfree(string.pointer); |
402 | return -ENODEV; | 402 | return -ENODEV; |
403 | got_one: | 403 | got_one: |
404 | dbg("Gained control for hotplug HW for pci %s (%s)\n", | 404 | dbg("Gained control for hotplug HW for pci %s (%s)\n", |
405 | pci_name(pdev), (char *)string.pointer); | 405 | pci_name(pdev), (char *)string.pointer); |
406 | kfree(string.pointer); | 406 | kfree(string.pointer); |
407 | return 0; | 407 | return 0; |
408 | } | 408 | } |
409 | EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); | 409 | EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); |
410 | 410 | ||
411 | static int pcihp_is_ejectable(acpi_handle handle) | 411 | static int pcihp_is_ejectable(acpi_handle handle) |
412 | { | 412 | { |
413 | acpi_status status; | 413 | acpi_status status; |
414 | unsigned long long removable; | 414 | unsigned long long removable; |
415 | if (!acpi_has_method(handle, "_ADR")) | 415 | if (!acpi_has_method(handle, "_ADR")) |
416 | return 0; | 416 | return 0; |
417 | if (acpi_has_method(handle, "_EJ0")) | 417 | if (acpi_has_method(handle, "_EJ0")) |
418 | return 1; | 418 | return 1; |
419 | status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable); | 419 | status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable); |
420 | if (ACPI_SUCCESS(status) && removable) | 420 | if (ACPI_SUCCESS(status) && removable) |
421 | return 1; | 421 | return 1; |
422 | return 0; | 422 | return 0; |
423 | } | 423 | } |
424 | 424 | ||
425 | /** | 425 | /** |
426 | * acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot | 426 | * acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot |
427 | * @pbus: the PCI bus of the PCI slot corresponding to 'handle' | 427 | * @pbus: the PCI bus of the PCI slot corresponding to 'handle' |
428 | * @handle: ACPI handle to check | 428 | * @handle: ACPI handle to check |
429 | * | 429 | * |
430 | * Return 1 if handle is ejectable PCI slot, 0 otherwise. | 430 | * Return 1 if handle is ejectable PCI slot, 0 otherwise. |
431 | */ | 431 | */ |
432 | int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle) | 432 | int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle) |
433 | { | 433 | { |
434 | acpi_handle bridge_handle, parent_handle; | 434 | acpi_handle bridge_handle, parent_handle; |
435 | 435 | ||
436 | if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus))) | 436 | if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus))) |
437 | return 0; | 437 | return 0; |
438 | if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle)))) | 438 | if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle)))) |
439 | return 0; | 439 | return 0; |
440 | if (bridge_handle != parent_handle) | 440 | if (bridge_handle != parent_handle) |
441 | return 0; | 441 | return 0; |
442 | return pcihp_is_ejectable(handle); | 442 | return pcihp_is_ejectable(handle); |
443 | } | 443 | } |
444 | EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable); | 444 | EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable); |
445 | 445 | ||
446 | static acpi_status | 446 | static acpi_status |
447 | check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv) | 447 | check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv) |
448 | { | 448 | { |
449 | int *found = (int *)context; | 449 | int *found = (int *)context; |
450 | if (pcihp_is_ejectable(handle)) { | 450 | if (pcihp_is_ejectable(handle)) { |
451 | *found = 1; | 451 | *found = 1; |
452 | return AE_CTRL_TERMINATE; | 452 | return AE_CTRL_TERMINATE; |
453 | } | 453 | } |
454 | return AE_OK; | 454 | return AE_OK; |
455 | } | 455 | } |
456 | 456 | ||
457 | /** | 457 | /** |
458 | * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots | 458 | * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots |
459 | * @handle - handle of the PCI bus to scan | 459 | * @handle - handle of the PCI bus to scan |
460 | * | 460 | * |
461 | * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. | 461 | * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. |
462 | */ | 462 | */ |
463 | int acpi_pci_detect_ejectable(acpi_handle handle) | 463 | int acpi_pci_detect_ejectable(acpi_handle handle) |
464 | { | 464 | { |
465 | int found = 0; | 465 | int found = 0; |
466 | 466 | ||
467 | if (!handle) | 467 | if (!handle) |
468 | return found; | 468 | return found; |
469 | 469 | ||
470 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | 470 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, |
471 | check_hotplug, NULL, (void *)&found, NULL); | 471 | check_hotplug, NULL, (void *)&found, NULL); |
472 | return found; | 472 | return found; |
473 | } | 473 | } |
474 | EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable); | 474 | EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable); |
475 | 475 | ||
476 | module_param(debug_acpi, bool, 0644); | 476 | module_param(debug_acpi, bool, 0644); |
477 | MODULE_PARM_DESC(debug_acpi, "Debugging mode for ACPI enabled or not"); | 477 | MODULE_PARM_DESC(debug_acpi, "Debugging mode for ACPI enabled or not"); |
478 | 478 |
drivers/pci/hotplug/pciehp_acpi.c
1 | /* | 1 | /* |
2 | * ACPI related functions for PCI Express Hot Plug driver. | 2 | * ACPI related functions for PCI Express Hot Plug driver. |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Kenji Kaneshige | 4 | * Copyright (C) 2008 Kenji Kaneshige |
5 | * Copyright (C) 2008 Fujitsu Limited. | 5 | * Copyright (C) 2008 Fujitsu Limited. |
6 | * | 6 | * |
7 | * All rights reserved. | 7 | * All rights reserved. |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
11 | * the Free Software Foundation; either version 2 of the License, or (at | 11 | * the Free Software Foundation; either version 2 of the License, or (at |
12 | * your option) any later version. | 12 | * your option) any later version. |
13 | * | 13 | * |
14 | * This program is distributed in the hope that it will be useful, but | 14 | * This program is distributed in the hope that it will be useful, but |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 15 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 16 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
17 | * NON INFRINGEMENT. See the GNU General Public License for more | 17 | * NON INFRINGEMENT. See the GNU General Public License for more |
18 | * details. | 18 | * details. |
19 | * | 19 | * |
20 | * You should have received a copy of the GNU General Public License | 20 | * You should have received a copy of the GNU General Public License |
21 | * along with this program; if not, write to the Free Software | 21 | * along with this program; if not, write to the Free Software |
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
23 | * | 23 | * |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/acpi.h> | 26 | #include <linux/acpi.h> |
27 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
28 | #include <linux/pci_hotplug.h> | 28 | #include <linux/pci_hotplug.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include "pciehp.h" | 31 | #include "pciehp.h" |
32 | 32 | ||
33 | #define PCIEHP_DETECT_PCIE (0) | 33 | #define PCIEHP_DETECT_PCIE (0) |
34 | #define PCIEHP_DETECT_ACPI (1) | 34 | #define PCIEHP_DETECT_ACPI (1) |
35 | #define PCIEHP_DETECT_AUTO (2) | 35 | #define PCIEHP_DETECT_AUTO (2) |
36 | #define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO | 36 | #define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO |
37 | 37 | ||
38 | struct dummy_slot { | 38 | struct dummy_slot { |
39 | u32 number; | 39 | u32 number; |
40 | struct list_head list; | 40 | struct list_head list; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static int slot_detection_mode; | 43 | static int slot_detection_mode; |
44 | static char *pciehp_detect_mode; | 44 | static char *pciehp_detect_mode; |
45 | module_param(pciehp_detect_mode, charp, 0444); | 45 | module_param(pciehp_detect_mode, charp, 0444); |
46 | MODULE_PARM_DESC(pciehp_detect_mode, | 46 | MODULE_PARM_DESC(pciehp_detect_mode, |
47 | "Slot detection mode: pcie, acpi, auto\n" | 47 | "Slot detection mode: pcie, acpi, auto\n" |
48 | " pcie - Use PCIe based slot detection\n" | 48 | " pcie - Use PCIe based slot detection\n" |
49 | " acpi - Use ACPI for slot detection\n" | 49 | " acpi - Use ACPI for slot detection\n" |
50 | " auto(default) - Auto select mode. Use acpi option if duplicate\n" | 50 | " auto(default) - Auto select mode. Use acpi option if duplicate\n" |
51 | " slot ids are found. Otherwise, use pcie option\n"); | 51 | " slot ids are found. Otherwise, use pcie option\n"); |
52 | 52 | ||
53 | int pciehp_acpi_slot_detection_check(struct pci_dev *dev) | 53 | int pciehp_acpi_slot_detection_check(struct pci_dev *dev) |
54 | { | 54 | { |
55 | if (slot_detection_mode != PCIEHP_DETECT_ACPI) | 55 | if (slot_detection_mode != PCIEHP_DETECT_ACPI) |
56 | return 0; | 56 | return 0; |
57 | if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev))) | 57 | if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev))) |
58 | return 0; | 58 | return 0; |
59 | return -ENODEV; | 59 | return -ENODEV; |
60 | } | 60 | } |
61 | 61 | ||
62 | static int __init parse_detect_mode(void) | 62 | static int __init parse_detect_mode(void) |
63 | { | 63 | { |
64 | if (!pciehp_detect_mode) | 64 | if (!pciehp_detect_mode) |
65 | return PCIEHP_DETECT_DEFAULT; | 65 | return PCIEHP_DETECT_DEFAULT; |
66 | if (!strcmp(pciehp_detect_mode, "pcie")) | 66 | if (!strcmp(pciehp_detect_mode, "pcie")) |
67 | return PCIEHP_DETECT_PCIE; | 67 | return PCIEHP_DETECT_PCIE; |
68 | if (!strcmp(pciehp_detect_mode, "acpi")) | 68 | if (!strcmp(pciehp_detect_mode, "acpi")) |
69 | return PCIEHP_DETECT_ACPI; | 69 | return PCIEHP_DETECT_ACPI; |
70 | if (!strcmp(pciehp_detect_mode, "auto")) | 70 | if (!strcmp(pciehp_detect_mode, "auto")) |
71 | return PCIEHP_DETECT_AUTO; | 71 | return PCIEHP_DETECT_AUTO; |
72 | warn("bad specifier '%s' for pciehp_detect_mode. Use default\n", | 72 | warn("bad specifier '%s' for pciehp_detect_mode. Use default\n", |
73 | pciehp_detect_mode); | 73 | pciehp_detect_mode); |
74 | return PCIEHP_DETECT_DEFAULT; | 74 | return PCIEHP_DETECT_DEFAULT; |
75 | } | 75 | } |
76 | 76 | ||
77 | static int __initdata dup_slot_id; | 77 | static int __initdata dup_slot_id; |
78 | static int __initdata acpi_slot_detected; | 78 | static int __initdata acpi_slot_detected; |
79 | static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); | 79 | static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); |
80 | 80 | ||
81 | /* Dummy driver for dumplicate name detection */ | 81 | /* Dummy driver for dumplicate name detection */ |
82 | static int __init dummy_probe(struct pcie_device *dev) | 82 | static int __init dummy_probe(struct pcie_device *dev) |
83 | { | 83 | { |
84 | u32 slot_cap; | 84 | u32 slot_cap; |
85 | acpi_handle handle; | 85 | acpi_handle handle; |
86 | struct dummy_slot *slot, *tmp; | 86 | struct dummy_slot *slot, *tmp; |
87 | struct pci_dev *pdev = dev->port; | 87 | struct pci_dev *pdev = dev->port; |
88 | 88 | ||
89 | pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); | 89 | pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); |
90 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | 90 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); |
91 | if (!slot) | 91 | if (!slot) |
92 | return -ENOMEM; | 92 | return -ENOMEM; |
93 | slot->number = (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19; | 93 | slot->number = (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19; |
94 | list_for_each_entry(tmp, &dummy_slots, list) { | 94 | list_for_each_entry(tmp, &dummy_slots, list) { |
95 | if (tmp->number == slot->number) | 95 | if (tmp->number == slot->number) |
96 | dup_slot_id++; | 96 | dup_slot_id++; |
97 | } | 97 | } |
98 | list_add_tail(&slot->list, &dummy_slots); | 98 | list_add_tail(&slot->list, &dummy_slots); |
99 | handle = DEVICE_ACPI_HANDLE(&pdev->dev); | 99 | handle = ACPI_HANDLE(&pdev->dev); |
100 | if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) | 100 | if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) |
101 | acpi_slot_detected = 1; | 101 | acpi_slot_detected = 1; |
102 | return -ENODEV; /* dummy driver always returns error */ | 102 | return -ENODEV; /* dummy driver always returns error */ |
103 | } | 103 | } |
104 | 104 | ||
105 | static struct pcie_port_service_driver __initdata dummy_driver = { | 105 | static struct pcie_port_service_driver __initdata dummy_driver = { |
106 | .name = "pciehp_dummy", | 106 | .name = "pciehp_dummy", |
107 | .port_type = PCIE_ANY_PORT, | 107 | .port_type = PCIE_ANY_PORT, |
108 | .service = PCIE_PORT_SERVICE_HP, | 108 | .service = PCIE_PORT_SERVICE_HP, |
109 | .probe = dummy_probe, | 109 | .probe = dummy_probe, |
110 | }; | 110 | }; |
111 | 111 | ||
112 | static int __init select_detection_mode(void) | 112 | static int __init select_detection_mode(void) |
113 | { | 113 | { |
114 | struct dummy_slot *slot, *tmp; | 114 | struct dummy_slot *slot, *tmp; |
115 | if (pcie_port_service_register(&dummy_driver)) | 115 | if (pcie_port_service_register(&dummy_driver)) |
116 | return PCIEHP_DETECT_ACPI; | 116 | return PCIEHP_DETECT_ACPI; |
117 | pcie_port_service_unregister(&dummy_driver); | 117 | pcie_port_service_unregister(&dummy_driver); |
118 | list_for_each_entry_safe(slot, tmp, &dummy_slots, list) { | 118 | list_for_each_entry_safe(slot, tmp, &dummy_slots, list) { |
119 | list_del(&slot->list); | 119 | list_del(&slot->list); |
120 | kfree(slot); | 120 | kfree(slot); |
121 | } | 121 | } |
122 | if (acpi_slot_detected && dup_slot_id) | 122 | if (acpi_slot_detected && dup_slot_id) |
123 | return PCIEHP_DETECT_ACPI; | 123 | return PCIEHP_DETECT_ACPI; |
124 | return PCIEHP_DETECT_PCIE; | 124 | return PCIEHP_DETECT_PCIE; |
125 | } | 125 | } |
126 | 126 | ||
127 | void __init pciehp_acpi_slot_detection_init(void) | 127 | void __init pciehp_acpi_slot_detection_init(void) |
128 | { | 128 | { |
129 | slot_detection_mode = parse_detect_mode(); | 129 | slot_detection_mode = parse_detect_mode(); |
130 | if (slot_detection_mode != PCIEHP_DETECT_AUTO) | 130 | if (slot_detection_mode != PCIEHP_DETECT_AUTO) |
131 | goto out; | 131 | goto out; |
132 | slot_detection_mode = select_detection_mode(); | 132 | slot_detection_mode = select_detection_mode(); |
133 | out: | 133 | out: |
134 | if (slot_detection_mode == PCIEHP_DETECT_ACPI) | 134 | if (slot_detection_mode == PCIEHP_DETECT_ACPI) |
135 | info("Using ACPI for slot detection.\n"); | 135 | info("Using ACPI for slot detection.\n"); |
136 | } | 136 | } |
137 | 137 |
drivers/pci/ioapic.c
1 | /* | 1 | /* |
2 | * IOAPIC/IOxAPIC/IOSAPIC driver | 2 | * IOAPIC/IOxAPIC/IOSAPIC driver |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Fujitsu Limited. | 4 | * Copyright (C) 2009 Fujitsu Limited. |
5 | * (c) Copyright 2009 Hewlett-Packard Development Company, L.P. | 5 | * (c) Copyright 2009 Hewlett-Packard Development Company, L.P. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * This driver manages PCI I/O APICs added by hotplug after boot. We try to | 13 | * This driver manages PCI I/O APICs added by hotplug after boot. We try to |
14 | * claim all I/O APIC PCI devices, but those present at boot were registered | 14 | * claim all I/O APIC PCI devices, but those present at boot were registered |
15 | * when we parsed the ACPI MADT, so we'll fail when we try to re-register | 15 | * when we parsed the ACPI MADT, so we'll fail when we try to re-register |
16 | * them. | 16 | * them. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/acpi.h> | 21 | #include <linux/acpi.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <acpi/acpi_bus.h> | 23 | #include <acpi/acpi_bus.h> |
24 | 24 | ||
25 | struct ioapic { | 25 | struct ioapic { |
26 | acpi_handle handle; | 26 | acpi_handle handle; |
27 | u32 gsi_base; | 27 | u32 gsi_base; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) | 30 | static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) |
31 | { | 31 | { |
32 | acpi_handle handle; | 32 | acpi_handle handle; |
33 | acpi_status status; | 33 | acpi_status status; |
34 | unsigned long long gsb; | 34 | unsigned long long gsb; |
35 | struct ioapic *ioapic; | 35 | struct ioapic *ioapic; |
36 | int ret; | 36 | int ret; |
37 | char *type; | 37 | char *type; |
38 | struct resource *res; | 38 | struct resource *res; |
39 | 39 | ||
40 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | 40 | handle = ACPI_HANDLE(&dev->dev); |
41 | if (!handle) | 41 | if (!handle) |
42 | return -EINVAL; | 42 | return -EINVAL; |
43 | 43 | ||
44 | status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb); | 44 | status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb); |
45 | if (ACPI_FAILURE(status)) | 45 | if (ACPI_FAILURE(status)) |
46 | return -EINVAL; | 46 | return -EINVAL; |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * The previous code in acpiphp evaluated _MAT if _GSB failed, but | 49 | * The previous code in acpiphp evaluated _MAT if _GSB failed, but |
50 | * ACPI spec 4.0 sec 6.2.2 requires _GSB for hot-pluggable I/O APICs. | 50 | * ACPI spec 4.0 sec 6.2.2 requires _GSB for hot-pluggable I/O APICs. |
51 | */ | 51 | */ |
52 | 52 | ||
53 | ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL); | 53 | ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL); |
54 | if (!ioapic) | 54 | if (!ioapic) |
55 | return -ENOMEM; | 55 | return -ENOMEM; |
56 | 56 | ||
57 | ioapic->handle = handle; | 57 | ioapic->handle = handle; |
58 | ioapic->gsi_base = (u32) gsb; | 58 | ioapic->gsi_base = (u32) gsb; |
59 | 59 | ||
60 | if (dev->class == PCI_CLASS_SYSTEM_PIC_IOAPIC) | 60 | if (dev->class == PCI_CLASS_SYSTEM_PIC_IOAPIC) |
61 | type = "IOAPIC"; | 61 | type = "IOAPIC"; |
62 | else | 62 | else |
63 | type = "IOxAPIC"; | 63 | type = "IOxAPIC"; |
64 | 64 | ||
65 | ret = pci_enable_device(dev); | 65 | ret = pci_enable_device(dev); |
66 | if (ret < 0) | 66 | if (ret < 0) |
67 | goto exit_free; | 67 | goto exit_free; |
68 | 68 | ||
69 | pci_set_master(dev); | 69 | pci_set_master(dev); |
70 | 70 | ||
71 | if (pci_request_region(dev, 0, type)) | 71 | if (pci_request_region(dev, 0, type)) |
72 | goto exit_disable; | 72 | goto exit_disable; |
73 | 73 | ||
74 | res = &dev->resource[0]; | 74 | res = &dev->resource[0]; |
75 | if (acpi_register_ioapic(ioapic->handle, res->start, ioapic->gsi_base)) | 75 | if (acpi_register_ioapic(ioapic->handle, res->start, ioapic->gsi_base)) |
76 | goto exit_release; | 76 | goto exit_release; |
77 | 77 | ||
78 | pci_set_drvdata(dev, ioapic); | 78 | pci_set_drvdata(dev, ioapic); |
79 | dev_info(&dev->dev, "%s at %pR, GSI %u\n", type, res, ioapic->gsi_base); | 79 | dev_info(&dev->dev, "%s at %pR, GSI %u\n", type, res, ioapic->gsi_base); |
80 | return 0; | 80 | return 0; |
81 | 81 | ||
82 | exit_release: | 82 | exit_release: |
83 | pci_release_region(dev, 0); | 83 | pci_release_region(dev, 0); |
84 | exit_disable: | 84 | exit_disable: |
85 | pci_disable_device(dev); | 85 | pci_disable_device(dev); |
86 | exit_free: | 86 | exit_free: |
87 | kfree(ioapic); | 87 | kfree(ioapic); |
88 | return -ENODEV; | 88 | return -ENODEV; |
89 | } | 89 | } |
90 | 90 | ||
91 | static void ioapic_remove(struct pci_dev *dev) | 91 | static void ioapic_remove(struct pci_dev *dev) |
92 | { | 92 | { |
93 | struct ioapic *ioapic = pci_get_drvdata(dev); | 93 | struct ioapic *ioapic = pci_get_drvdata(dev); |
94 | 94 | ||
95 | acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base); | 95 | acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base); |
96 | pci_release_region(dev, 0); | 96 | pci_release_region(dev, 0); |
97 | pci_disable_device(dev); | 97 | pci_disable_device(dev); |
98 | kfree(ioapic); | 98 | kfree(ioapic); |
99 | } | 99 | } |
100 | 100 | ||
101 | 101 | ||
102 | static DEFINE_PCI_DEVICE_TABLE(ioapic_devices) = { | 102 | static DEFINE_PCI_DEVICE_TABLE(ioapic_devices) = { |
103 | { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) }, | 103 | { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) }, |
104 | { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) }, | 104 | { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) }, |
105 | { } | 105 | { } |
106 | }; | 106 | }; |
107 | MODULE_DEVICE_TABLE(pci, ioapic_devices); | 107 | MODULE_DEVICE_TABLE(pci, ioapic_devices); |
108 | 108 | ||
109 | static struct pci_driver ioapic_driver = { | 109 | static struct pci_driver ioapic_driver = { |
110 | .name = "ioapic", | 110 | .name = "ioapic", |
111 | .id_table = ioapic_devices, | 111 | .id_table = ioapic_devices, |
112 | .probe = ioapic_probe, | 112 | .probe = ioapic_probe, |
113 | .remove = ioapic_remove, | 113 | .remove = ioapic_remove, |
114 | }; | 114 | }; |
115 | 115 | ||
116 | module_pci_driver(ioapic_driver); | 116 | module_pci_driver(ioapic_driver); |
117 | 117 | ||
118 | MODULE_LICENSE("GPL"); | 118 | MODULE_LICENSE("GPL"); |
119 | 119 |
drivers/pci/pci-acpi.c
1 | /* | 1 | /* |
2 | * File: pci-acpi.c | 2 | * File: pci-acpi.c |
3 | * Purpose: Provide PCI support in ACPI | 3 | * Purpose: Provide PCI support in ACPI |
4 | * | 4 | * |
5 | * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com> | 5 | * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com> |
6 | * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com> | 6 | * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com> |
7 | * Copyright (C) 2004 Intel Corp. | 7 | * Copyright (C) 2004 Intel Corp. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/pci-aspm.h> | 14 | #include <linux/pci-aspm.h> |
15 | #include <acpi/acpi.h> | 15 | #include <acpi/acpi.h> |
16 | #include <acpi/acpi_bus.h> | 16 | #include <acpi/acpi_bus.h> |
17 | 17 | ||
18 | #include <linux/pci-acpi.h> | 18 | #include <linux/pci-acpi.h> |
19 | #include <linux/pm_runtime.h> | 19 | #include <linux/pm_runtime.h> |
20 | #include <linux/pm_qos.h> | 20 | #include <linux/pm_qos.h> |
21 | #include "pci.h" | 21 | #include "pci.h" |
22 | 22 | ||
23 | /** | 23 | /** |
24 | * pci_acpi_wake_bus - Wake-up notification handler for root buses. | 24 | * pci_acpi_wake_bus - Wake-up notification handler for root buses. |
25 | * @handle: ACPI handle of a device the notification is for. | 25 | * @handle: ACPI handle of a device the notification is for. |
26 | * @event: Type of the signaled event. | 26 | * @event: Type of the signaled event. |
27 | * @context: PCI root bus to wake up devices on. | 27 | * @context: PCI root bus to wake up devices on. |
28 | */ | 28 | */ |
29 | static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context) | 29 | static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context) |
30 | { | 30 | { |
31 | struct pci_bus *pci_bus = context; | 31 | struct pci_bus *pci_bus = context; |
32 | 32 | ||
33 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus) | 33 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus) |
34 | pci_pme_wakeup_bus(pci_bus); | 34 | pci_pme_wakeup_bus(pci_bus); |
35 | } | 35 | } |
36 | 36 | ||
37 | /** | 37 | /** |
38 | * pci_acpi_wake_dev - Wake-up notification handler for PCI devices. | 38 | * pci_acpi_wake_dev - Wake-up notification handler for PCI devices. |
39 | * @handle: ACPI handle of a device the notification is for. | 39 | * @handle: ACPI handle of a device the notification is for. |
40 | * @event: Type of the signaled event. | 40 | * @event: Type of the signaled event. |
41 | * @context: PCI device object to wake up. | 41 | * @context: PCI device object to wake up. |
42 | */ | 42 | */ |
43 | static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) | 43 | static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) |
44 | { | 44 | { |
45 | struct pci_dev *pci_dev = context; | 45 | struct pci_dev *pci_dev = context; |
46 | 46 | ||
47 | if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev) | 47 | if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev) |
48 | return; | 48 | return; |
49 | 49 | ||
50 | if (pci_dev->pme_poll) | 50 | if (pci_dev->pme_poll) |
51 | pci_dev->pme_poll = false; | 51 | pci_dev->pme_poll = false; |
52 | 52 | ||
53 | if (pci_dev->current_state == PCI_D3cold) { | 53 | if (pci_dev->current_state == PCI_D3cold) { |
54 | pci_wakeup_event(pci_dev); | 54 | pci_wakeup_event(pci_dev); |
55 | pm_runtime_resume(&pci_dev->dev); | 55 | pm_runtime_resume(&pci_dev->dev); |
56 | return; | 56 | return; |
57 | } | 57 | } |
58 | 58 | ||
59 | /* Clear PME Status if set. */ | 59 | /* Clear PME Status if set. */ |
60 | if (pci_dev->pme_support) | 60 | if (pci_dev->pme_support) |
61 | pci_check_pme_status(pci_dev); | 61 | pci_check_pme_status(pci_dev); |
62 | 62 | ||
63 | pci_wakeup_event(pci_dev); | 63 | pci_wakeup_event(pci_dev); |
64 | pm_runtime_resume(&pci_dev->dev); | 64 | pm_runtime_resume(&pci_dev->dev); |
65 | 65 | ||
66 | if (pci_dev->subordinate) | 66 | if (pci_dev->subordinate) |
67 | pci_pme_wakeup_bus(pci_dev->subordinate); | 67 | pci_pme_wakeup_bus(pci_dev->subordinate); |
68 | } | 68 | } |
69 | 69 | ||
70 | /** | 70 | /** |
71 | * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. | 71 | * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. |
72 | * @dev: ACPI device to add the notifier for. | 72 | * @dev: ACPI device to add the notifier for. |
73 | * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. | 73 | * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. |
74 | */ | 74 | */ |
75 | acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, | 75 | acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, |
76 | struct pci_bus *pci_bus) | 76 | struct pci_bus *pci_bus) |
77 | { | 77 | { |
78 | return acpi_add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); | 78 | return acpi_add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); |
79 | } | 79 | } |
80 | 80 | ||
81 | /** | 81 | /** |
82 | * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier. | 82 | * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier. |
83 | * @dev: ACPI device to remove the notifier from. | 83 | * @dev: ACPI device to remove the notifier from. |
84 | */ | 84 | */ |
85 | acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) | 85 | acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) |
86 | { | 86 | { |
87 | return acpi_remove_pm_notifier(dev, pci_acpi_wake_bus); | 87 | return acpi_remove_pm_notifier(dev, pci_acpi_wake_bus); |
88 | } | 88 | } |
89 | 89 | ||
90 | /** | 90 | /** |
91 | * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. | 91 | * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. |
92 | * @dev: ACPI device to add the notifier for. | 92 | * @dev: ACPI device to add the notifier for. |
93 | * @pci_dev: PCI device to check for the PME status if an event is signaled. | 93 | * @pci_dev: PCI device to check for the PME status if an event is signaled. |
94 | */ | 94 | */ |
95 | acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, | 95 | acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, |
96 | struct pci_dev *pci_dev) | 96 | struct pci_dev *pci_dev) |
97 | { | 97 | { |
98 | return acpi_add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); | 98 | return acpi_add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); |
99 | } | 99 | } |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier. | 102 | * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier. |
103 | * @dev: ACPI device to remove the notifier from. | 103 | * @dev: ACPI device to remove the notifier from. |
104 | */ | 104 | */ |
105 | acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) | 105 | acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) |
106 | { | 106 | { |
107 | return acpi_remove_pm_notifier(dev, pci_acpi_wake_dev); | 107 | return acpi_remove_pm_notifier(dev, pci_acpi_wake_dev); |
108 | } | 108 | } |
109 | 109 | ||
110 | phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) | 110 | phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) |
111 | { | 111 | { |
112 | acpi_status status = AE_NOT_EXIST; | 112 | acpi_status status = AE_NOT_EXIST; |
113 | unsigned long long mcfg_addr; | 113 | unsigned long long mcfg_addr; |
114 | 114 | ||
115 | if (handle) | 115 | if (handle) |
116 | status = acpi_evaluate_integer(handle, METHOD_NAME__CBA, | 116 | status = acpi_evaluate_integer(handle, METHOD_NAME__CBA, |
117 | NULL, &mcfg_addr); | 117 | NULL, &mcfg_addr); |
118 | if (ACPI_FAILURE(status)) | 118 | if (ACPI_FAILURE(status)) |
119 | return 0; | 119 | return 0; |
120 | 120 | ||
121 | return (phys_addr_t)mcfg_addr; | 121 | return (phys_addr_t)mcfg_addr; |
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * _SxD returns the D-state with the highest power | 125 | * _SxD returns the D-state with the highest power |
126 | * (lowest D-state number) supported in the S-state "x". | 126 | * (lowest D-state number) supported in the S-state "x". |
127 | * | 127 | * |
128 | * If the devices does not have a _PRW | 128 | * If the devices does not have a _PRW |
129 | * (Power Resources for Wake) supporting system wakeup from "x" | 129 | * (Power Resources for Wake) supporting system wakeup from "x" |
130 | * then the OS is free to choose a lower power (higher number | 130 | * then the OS is free to choose a lower power (higher number |
131 | * D-state) than the return value from _SxD. | 131 | * D-state) than the return value from _SxD. |
132 | * | 132 | * |
133 | * But if _PRW is enabled at S-state "x", the OS | 133 | * But if _PRW is enabled at S-state "x", the OS |
134 | * must not choose a power lower than _SxD -- | 134 | * must not choose a power lower than _SxD -- |
135 | * unless the device has an _SxW method specifying | 135 | * unless the device has an _SxW method specifying |
136 | * the lowest power (highest D-state number) the device | 136 | * the lowest power (highest D-state number) the device |
137 | * may enter while still able to wake the system. | 137 | * may enter while still able to wake the system. |
138 | * | 138 | * |
139 | * ie. depending on global OS policy: | 139 | * ie. depending on global OS policy: |
140 | * | 140 | * |
141 | * if (_PRW at S-state x) | 141 | * if (_PRW at S-state x) |
142 | * choose from highest power _SxD to lowest power _SxW | 142 | * choose from highest power _SxD to lowest power _SxW |
143 | * else // no _PRW at S-state x | 143 | * else // no _PRW at S-state x |
144 | * choose highest power _SxD or any lower power | 144 | * choose highest power _SxD or any lower power |
145 | */ | 145 | */ |
146 | 146 | ||
147 | static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) | 147 | static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) |
148 | { | 148 | { |
149 | int acpi_state, d_max; | 149 | int acpi_state, d_max; |
150 | 150 | ||
151 | if (pdev->no_d3cold) | 151 | if (pdev->no_d3cold) |
152 | d_max = ACPI_STATE_D3_HOT; | 152 | d_max = ACPI_STATE_D3_HOT; |
153 | else | 153 | else |
154 | d_max = ACPI_STATE_D3_COLD; | 154 | d_max = ACPI_STATE_D3_COLD; |
155 | acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max); | 155 | acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max); |
156 | if (acpi_state < 0) | 156 | if (acpi_state < 0) |
157 | return PCI_POWER_ERROR; | 157 | return PCI_POWER_ERROR; |
158 | 158 | ||
159 | switch (acpi_state) { | 159 | switch (acpi_state) { |
160 | case ACPI_STATE_D0: | 160 | case ACPI_STATE_D0: |
161 | return PCI_D0; | 161 | return PCI_D0; |
162 | case ACPI_STATE_D1: | 162 | case ACPI_STATE_D1: |
163 | return PCI_D1; | 163 | return PCI_D1; |
164 | case ACPI_STATE_D2: | 164 | case ACPI_STATE_D2: |
165 | return PCI_D2; | 165 | return PCI_D2; |
166 | case ACPI_STATE_D3_HOT: | 166 | case ACPI_STATE_D3_HOT: |
167 | return PCI_D3hot; | 167 | return PCI_D3hot; |
168 | case ACPI_STATE_D3_COLD: | 168 | case ACPI_STATE_D3_COLD: |
169 | return PCI_D3cold; | 169 | return PCI_D3cold; |
170 | } | 170 | } |
171 | return PCI_POWER_ERROR; | 171 | return PCI_POWER_ERROR; |
172 | } | 172 | } |
173 | 173 | ||
174 | static bool acpi_pci_power_manageable(struct pci_dev *dev) | 174 | static bool acpi_pci_power_manageable(struct pci_dev *dev) |
175 | { | 175 | { |
176 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); | 176 | acpi_handle handle = ACPI_HANDLE(&dev->dev); |
177 | 177 | ||
178 | return handle ? acpi_bus_power_manageable(handle) : false; | 178 | return handle ? acpi_bus_power_manageable(handle) : false; |
179 | } | 179 | } |
180 | 180 | ||
181 | static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) | 181 | static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
182 | { | 182 | { |
183 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); | 183 | acpi_handle handle = ACPI_HANDLE(&dev->dev); |
184 | static const u8 state_conv[] = { | 184 | static const u8 state_conv[] = { |
185 | [PCI_D0] = ACPI_STATE_D0, | 185 | [PCI_D0] = ACPI_STATE_D0, |
186 | [PCI_D1] = ACPI_STATE_D1, | 186 | [PCI_D1] = ACPI_STATE_D1, |
187 | [PCI_D2] = ACPI_STATE_D2, | 187 | [PCI_D2] = ACPI_STATE_D2, |
188 | [PCI_D3hot] = ACPI_STATE_D3_COLD, | 188 | [PCI_D3hot] = ACPI_STATE_D3_COLD, |
189 | [PCI_D3cold] = ACPI_STATE_D3_COLD, | 189 | [PCI_D3cold] = ACPI_STATE_D3_COLD, |
190 | }; | 190 | }; |
191 | int error = -EINVAL; | 191 | int error = -EINVAL; |
192 | 192 | ||
193 | /* If the ACPI device has _EJ0, ignore the device */ | 193 | /* If the ACPI device has _EJ0, ignore the device */ |
194 | if (!handle || acpi_has_method(handle, "_EJ0")) | 194 | if (!handle || acpi_has_method(handle, "_EJ0")) |
195 | return -ENODEV; | 195 | return -ENODEV; |
196 | 196 | ||
197 | switch (state) { | 197 | switch (state) { |
198 | case PCI_D3cold: | 198 | case PCI_D3cold: |
199 | if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == | 199 | if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == |
200 | PM_QOS_FLAGS_ALL) { | 200 | PM_QOS_FLAGS_ALL) { |
201 | error = -EBUSY; | 201 | error = -EBUSY; |
202 | break; | 202 | break; |
203 | } | 203 | } |
204 | case PCI_D0: | 204 | case PCI_D0: |
205 | case PCI_D1: | 205 | case PCI_D1: |
206 | case PCI_D2: | 206 | case PCI_D2: |
207 | case PCI_D3hot: | 207 | case PCI_D3hot: |
208 | error = acpi_bus_set_power(handle, state_conv[state]); | 208 | error = acpi_bus_set_power(handle, state_conv[state]); |
209 | } | 209 | } |
210 | 210 | ||
211 | if (!error) | 211 | if (!error) |
212 | dev_dbg(&dev->dev, "power state changed by ACPI to %s\n", | 212 | dev_dbg(&dev->dev, "power state changed by ACPI to %s\n", |
213 | acpi_power_state_string(state_conv[state])); | 213 | acpi_power_state_string(state_conv[state])); |
214 | 214 | ||
215 | return error; | 215 | return error; |
216 | } | 216 | } |
217 | 217 | ||
218 | static bool acpi_pci_can_wakeup(struct pci_dev *dev) | 218 | static bool acpi_pci_can_wakeup(struct pci_dev *dev) |
219 | { | 219 | { |
220 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); | 220 | acpi_handle handle = ACPI_HANDLE(&dev->dev); |
221 | 221 | ||
222 | return handle ? acpi_bus_can_wakeup(handle) : false; | 222 | return handle ? acpi_bus_can_wakeup(handle) : false; |
223 | } | 223 | } |
224 | 224 | ||
225 | static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) | 225 | static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) |
226 | { | 226 | { |
227 | while (bus->parent) { | 227 | while (bus->parent) { |
228 | if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable)) | 228 | if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable)) |
229 | return; | 229 | return; |
230 | bus = bus->parent; | 230 | bus = bus->parent; |
231 | } | 231 | } |
232 | 232 | ||
233 | /* We have reached the root bus. */ | 233 | /* We have reached the root bus. */ |
234 | if (bus->bridge) | 234 | if (bus->bridge) |
235 | acpi_pm_device_sleep_wake(bus->bridge, enable); | 235 | acpi_pm_device_sleep_wake(bus->bridge, enable); |
236 | } | 236 | } |
237 | 237 | ||
238 | static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) | 238 | static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) |
239 | { | 239 | { |
240 | if (acpi_pci_can_wakeup(dev)) | 240 | if (acpi_pci_can_wakeup(dev)) |
241 | return acpi_pm_device_sleep_wake(&dev->dev, enable); | 241 | return acpi_pm_device_sleep_wake(&dev->dev, enable); |
242 | 242 | ||
243 | acpi_pci_propagate_wakeup_enable(dev->bus, enable); | 243 | acpi_pci_propagate_wakeup_enable(dev->bus, enable); |
244 | return 0; | 244 | return 0; |
245 | } | 245 | } |
246 | 246 | ||
247 | static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) | 247 | static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) |
248 | { | 248 | { |
249 | while (bus->parent) { | 249 | while (bus->parent) { |
250 | struct pci_dev *bridge = bus->self; | 250 | struct pci_dev *bridge = bus->self; |
251 | 251 | ||
252 | if (bridge->pme_interrupt) | 252 | if (bridge->pme_interrupt) |
253 | return; | 253 | return; |
254 | if (!acpi_pm_device_run_wake(&bridge->dev, enable)) | 254 | if (!acpi_pm_device_run_wake(&bridge->dev, enable)) |
255 | return; | 255 | return; |
256 | bus = bus->parent; | 256 | bus = bus->parent; |
257 | } | 257 | } |
258 | 258 | ||
259 | /* We have reached the root bus. */ | 259 | /* We have reached the root bus. */ |
260 | if (bus->bridge) | 260 | if (bus->bridge) |
261 | acpi_pm_device_run_wake(bus->bridge, enable); | 261 | acpi_pm_device_run_wake(bus->bridge, enable); |
262 | } | 262 | } |
263 | 263 | ||
264 | static int acpi_pci_run_wake(struct pci_dev *dev, bool enable) | 264 | static int acpi_pci_run_wake(struct pci_dev *dev, bool enable) |
265 | { | 265 | { |
266 | /* | 266 | /* |
267 | * Per PCI Express Base Specification Revision 2.0 section | 267 | * Per PCI Express Base Specification Revision 2.0 section |
268 | * 5.3.3.2 Link Wakeup, platform support is needed for D3cold | 268 | * 5.3.3.2 Link Wakeup, platform support is needed for D3cold |
269 | * waking up to power on the main link even if there is PME | 269 | * waking up to power on the main link even if there is PME |
270 | * support for D3cold | 270 | * support for D3cold |
271 | */ | 271 | */ |
272 | if (dev->pme_interrupt && !dev->runtime_d3cold) | 272 | if (dev->pme_interrupt && !dev->runtime_d3cold) |
273 | return 0; | 273 | return 0; |
274 | 274 | ||
275 | if (!acpi_pm_device_run_wake(&dev->dev, enable)) | 275 | if (!acpi_pm_device_run_wake(&dev->dev, enable)) |
276 | return 0; | 276 | return 0; |
277 | 277 | ||
278 | acpi_pci_propagate_run_wake(dev->bus, enable); | 278 | acpi_pci_propagate_run_wake(dev->bus, enable); |
279 | return 0; | 279 | return 0; |
280 | } | 280 | } |
281 | 281 | ||
282 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { | 282 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { |
283 | .is_manageable = acpi_pci_power_manageable, | 283 | .is_manageable = acpi_pci_power_manageable, |
284 | .set_state = acpi_pci_set_power_state, | 284 | .set_state = acpi_pci_set_power_state, |
285 | .choose_state = acpi_pci_choose_state, | 285 | .choose_state = acpi_pci_choose_state, |
286 | .sleep_wake = acpi_pci_sleep_wake, | 286 | .sleep_wake = acpi_pci_sleep_wake, |
287 | .run_wake = acpi_pci_run_wake, | 287 | .run_wake = acpi_pci_run_wake, |
288 | }; | 288 | }; |
289 | 289 | ||
290 | void acpi_pci_add_bus(struct pci_bus *bus) | 290 | void acpi_pci_add_bus(struct pci_bus *bus) |
291 | { | 291 | { |
292 | if (acpi_pci_disabled || !bus->bridge) | 292 | if (acpi_pci_disabled || !bus->bridge) |
293 | return; | 293 | return; |
294 | 294 | ||
295 | acpi_pci_slot_enumerate(bus); | 295 | acpi_pci_slot_enumerate(bus); |
296 | acpiphp_enumerate_slots(bus); | 296 | acpiphp_enumerate_slots(bus); |
297 | } | 297 | } |
298 | 298 | ||
299 | void acpi_pci_remove_bus(struct pci_bus *bus) | 299 | void acpi_pci_remove_bus(struct pci_bus *bus) |
300 | { | 300 | { |
301 | if (acpi_pci_disabled || !bus->bridge) | 301 | if (acpi_pci_disabled || !bus->bridge) |
302 | return; | 302 | return; |
303 | 303 | ||
304 | acpiphp_remove_slots(bus); | 304 | acpiphp_remove_slots(bus); |
305 | acpi_pci_slot_remove(bus); | 305 | acpi_pci_slot_remove(bus); |
306 | } | 306 | } |
307 | 307 | ||
308 | /* ACPI bus type */ | 308 | /* ACPI bus type */ |
309 | static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) | 309 | static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) |
310 | { | 310 | { |
311 | struct pci_dev *pci_dev = to_pci_dev(dev); | 311 | struct pci_dev *pci_dev = to_pci_dev(dev); |
312 | bool is_bridge; | 312 | bool is_bridge; |
313 | u64 addr; | 313 | u64 addr; |
314 | 314 | ||
315 | /* | 315 | /* |
316 | * pci_is_bridge() is not suitable here, because pci_dev->subordinate | 316 | * pci_is_bridge() is not suitable here, because pci_dev->subordinate |
317 | * is set only after acpi_pci_find_device() has been called for the | 317 | * is set only after acpi_pci_find_device() has been called for the |
318 | * given device. | 318 | * given device. |
319 | */ | 319 | */ |
320 | is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE | 320 | is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE |
321 | || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; | 321 | || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; |
322 | /* Please ref to ACPI spec for the syntax of _ADR */ | 322 | /* Please ref to ACPI spec for the syntax of _ADR */ |
323 | addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); | 323 | addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); |
324 | *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge); | 324 | *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge); |
325 | if (!*handle) | 325 | if (!*handle) |
326 | return -ENODEV; | 326 | return -ENODEV; |
327 | return 0; | 327 | return 0; |
328 | } | 328 | } |
329 | 329 | ||
330 | static void pci_acpi_setup(struct device *dev) | 330 | static void pci_acpi_setup(struct device *dev) |
331 | { | 331 | { |
332 | struct pci_dev *pci_dev = to_pci_dev(dev); | 332 | struct pci_dev *pci_dev = to_pci_dev(dev); |
333 | acpi_handle handle = ACPI_HANDLE(dev); | 333 | acpi_handle handle = ACPI_HANDLE(dev); |
334 | struct acpi_device *adev; | 334 | struct acpi_device *adev; |
335 | 335 | ||
336 | if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid) | 336 | if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid) |
337 | return; | 337 | return; |
338 | 338 | ||
339 | device_set_wakeup_capable(dev, true); | 339 | device_set_wakeup_capable(dev, true); |
340 | acpi_pci_sleep_wake(pci_dev, false); | 340 | acpi_pci_sleep_wake(pci_dev, false); |
341 | 341 | ||
342 | pci_acpi_add_pm_notifier(adev, pci_dev); | 342 | pci_acpi_add_pm_notifier(adev, pci_dev); |
343 | if (adev->wakeup.flags.run_wake) | 343 | if (adev->wakeup.flags.run_wake) |
344 | device_set_run_wake(dev, true); | 344 | device_set_run_wake(dev, true); |
345 | } | 345 | } |
346 | 346 | ||
347 | static void pci_acpi_cleanup(struct device *dev) | 347 | static void pci_acpi_cleanup(struct device *dev) |
348 | { | 348 | { |
349 | acpi_handle handle = ACPI_HANDLE(dev); | 349 | acpi_handle handle = ACPI_HANDLE(dev); |
350 | struct acpi_device *adev; | 350 | struct acpi_device *adev; |
351 | 351 | ||
352 | if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) { | 352 | if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) { |
353 | device_set_wakeup_capable(dev, false); | 353 | device_set_wakeup_capable(dev, false); |
354 | device_set_run_wake(dev, false); | 354 | device_set_run_wake(dev, false); |
355 | pci_acpi_remove_pm_notifier(adev); | 355 | pci_acpi_remove_pm_notifier(adev); |
356 | } | 356 | } |
357 | } | 357 | } |
358 | 358 | ||
359 | static bool pci_acpi_bus_match(struct device *dev) | 359 | static bool pci_acpi_bus_match(struct device *dev) |
360 | { | 360 | { |
361 | return dev->bus == &pci_bus_type; | 361 | return dev->bus == &pci_bus_type; |
362 | } | 362 | } |
363 | 363 | ||
364 | static struct acpi_bus_type acpi_pci_bus = { | 364 | static struct acpi_bus_type acpi_pci_bus = { |
365 | .name = "PCI", | 365 | .name = "PCI", |
366 | .match = pci_acpi_bus_match, | 366 | .match = pci_acpi_bus_match, |
367 | .find_device = acpi_pci_find_device, | 367 | .find_device = acpi_pci_find_device, |
368 | .setup = pci_acpi_setup, | 368 | .setup = pci_acpi_setup, |
369 | .cleanup = pci_acpi_cleanup, | 369 | .cleanup = pci_acpi_cleanup, |
370 | }; | 370 | }; |
371 | 371 | ||
372 | static int __init acpi_pci_init(void) | 372 | static int __init acpi_pci_init(void) |
373 | { | 373 | { |
374 | int ret; | 374 | int ret; |
375 | 375 | ||
376 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) { | 376 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) { |
377 | pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n"); | 377 | pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n"); |
378 | pci_no_msi(); | 378 | pci_no_msi(); |
379 | } | 379 | } |
380 | 380 | ||
381 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { | 381 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { |
382 | pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); | 382 | pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); |
383 | pcie_no_aspm(); | 383 | pcie_no_aspm(); |
384 | } | 384 | } |
385 | 385 | ||
386 | ret = register_acpi_bus_type(&acpi_pci_bus); | 386 | ret = register_acpi_bus_type(&acpi_pci_bus); |
387 | if (ret) | 387 | if (ret) |
388 | return 0; | 388 | return 0; |
389 | 389 | ||
390 | pci_set_platform_pm(&acpi_pci_platform_pm); | 390 | pci_set_platform_pm(&acpi_pci_platform_pm); |
391 | acpi_pci_slot_init(); | 391 | acpi_pci_slot_init(); |
392 | acpiphp_init(); | 392 | acpiphp_init(); |
393 | 393 | ||
394 | return 0; | 394 | return 0; |
395 | } | 395 | } |
396 | arch_initcall(acpi_pci_init); | 396 | arch_initcall(acpi_pci_init); |
397 | 397 |
drivers/pci/pci-label.c
1 | /* | 1 | /* |
2 | * Purpose: Export the firmware instance and label associated with | 2 | * Purpose: Export the firmware instance and label associated with |
3 | * a pci device to sysfs | 3 | * a pci device to sysfs |
4 | * Copyright (C) 2010 Dell Inc. | 4 | * Copyright (C) 2010 Dell Inc. |
5 | * by Narendra K <Narendra_K@dell.com>, | 5 | * by Narendra K <Narendra_K@dell.com>, |
6 | * Jordan Hargrave <Jordan_Hargrave@dell.com> | 6 | * Jordan Hargrave <Jordan_Hargrave@dell.com> |
7 | * | 7 | * |
8 | * PCI Firmware Specification Revision 3.1 section 4.6.7 (DSM for Naming a | 8 | * PCI Firmware Specification Revision 3.1 section 4.6.7 (DSM for Naming a |
9 | * PCI or PCI Express Device Under Operating Systems) defines an instance | 9 | * PCI or PCI Express Device Under Operating Systems) defines an instance |
10 | * number and string name. This code retrieves them and exports them to sysfs. | 10 | * number and string name. This code retrieves them and exports them to sysfs. |
11 | * If the system firmware does not provide the ACPI _DSM (Device Specific | 11 | * If the system firmware does not provide the ACPI _DSM (Device Specific |
12 | * Method), then the SMBIOS type 41 instance number and string is exported to | 12 | * Method), then the SMBIOS type 41 instance number and string is exported to |
13 | * sysfs. | 13 | * sysfs. |
14 | * | 14 | * |
15 | * SMBIOS defines type 41 for onboard pci devices. This code retrieves | 15 | * SMBIOS defines type 41 for onboard pci devices. This code retrieves |
16 | * the instance number and string from the type 41 record and exports | 16 | * the instance number and string from the type 41 record and exports |
17 | * it to sysfs. | 17 | * it to sysfs. |
18 | * | 18 | * |
19 | * Please see http://linux.dell.com/wiki/index.php/Oss/libnetdevname for more | 19 | * Please see http://linux.dell.com/wiki/index.php/Oss/libnetdevname for more |
20 | * information. | 20 | * information. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/dmi.h> | 23 | #include <linux/dmi.h> |
24 | #include <linux/sysfs.h> | 24 | #include <linux/sysfs.h> |
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <linux/pci_ids.h> | 26 | #include <linux/pci_ids.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/device.h> | 28 | #include <linux/device.h> |
29 | #include <linux/nls.h> | 29 | #include <linux/nls.h> |
30 | #include <linux/acpi.h> | 30 | #include <linux/acpi.h> |
31 | #include <linux/pci-acpi.h> | 31 | #include <linux/pci-acpi.h> |
32 | #include <acpi/acpi_bus.h> | 32 | #include <acpi/acpi_bus.h> |
33 | #include "pci.h" | 33 | #include "pci.h" |
34 | 34 | ||
35 | #define DEVICE_LABEL_DSM 0x07 | 35 | #define DEVICE_LABEL_DSM 0x07 |
36 | 36 | ||
37 | #ifndef CONFIG_DMI | 37 | #ifndef CONFIG_DMI |
38 | 38 | ||
39 | static inline int | 39 | static inline int |
40 | pci_create_smbiosname_file(struct pci_dev *pdev) | 40 | pci_create_smbiosname_file(struct pci_dev *pdev) |
41 | { | 41 | { |
42 | return -1; | 42 | return -1; |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline void | 45 | static inline void |
46 | pci_remove_smbiosname_file(struct pci_dev *pdev) | 46 | pci_remove_smbiosname_file(struct pci_dev *pdev) |
47 | { | 47 | { |
48 | } | 48 | } |
49 | 49 | ||
50 | #else | 50 | #else |
51 | 51 | ||
52 | enum smbios_attr_enum { | 52 | enum smbios_attr_enum { |
53 | SMBIOS_ATTR_NONE = 0, | 53 | SMBIOS_ATTR_NONE = 0, |
54 | SMBIOS_ATTR_LABEL_SHOW, | 54 | SMBIOS_ATTR_LABEL_SHOW, |
55 | SMBIOS_ATTR_INSTANCE_SHOW, | 55 | SMBIOS_ATTR_INSTANCE_SHOW, |
56 | }; | 56 | }; |
57 | 57 | ||
58 | static size_t | 58 | static size_t |
59 | find_smbios_instance_string(struct pci_dev *pdev, char *buf, | 59 | find_smbios_instance_string(struct pci_dev *pdev, char *buf, |
60 | enum smbios_attr_enum attribute) | 60 | enum smbios_attr_enum attribute) |
61 | { | 61 | { |
62 | const struct dmi_device *dmi; | 62 | const struct dmi_device *dmi; |
63 | struct dmi_dev_onboard *donboard; | 63 | struct dmi_dev_onboard *donboard; |
64 | int bus; | 64 | int bus; |
65 | int devfn; | 65 | int devfn; |
66 | 66 | ||
67 | bus = pdev->bus->number; | 67 | bus = pdev->bus->number; |
68 | devfn = pdev->devfn; | 68 | devfn = pdev->devfn; |
69 | 69 | ||
70 | dmi = NULL; | 70 | dmi = NULL; |
71 | while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, | 71 | while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, |
72 | NULL, dmi)) != NULL) { | 72 | NULL, dmi)) != NULL) { |
73 | donboard = dmi->device_data; | 73 | donboard = dmi->device_data; |
74 | if (donboard && donboard->bus == bus && | 74 | if (donboard && donboard->bus == bus && |
75 | donboard->devfn == devfn) { | 75 | donboard->devfn == devfn) { |
76 | if (buf) { | 76 | if (buf) { |
77 | if (attribute == SMBIOS_ATTR_INSTANCE_SHOW) | 77 | if (attribute == SMBIOS_ATTR_INSTANCE_SHOW) |
78 | return scnprintf(buf, PAGE_SIZE, | 78 | return scnprintf(buf, PAGE_SIZE, |
79 | "%d\n", | 79 | "%d\n", |
80 | donboard->instance); | 80 | donboard->instance); |
81 | else if (attribute == SMBIOS_ATTR_LABEL_SHOW) | 81 | else if (attribute == SMBIOS_ATTR_LABEL_SHOW) |
82 | return scnprintf(buf, PAGE_SIZE, | 82 | return scnprintf(buf, PAGE_SIZE, |
83 | "%s\n", | 83 | "%s\n", |
84 | dmi->name); | 84 | dmi->name); |
85 | } | 85 | } |
86 | return strlen(dmi->name); | 86 | return strlen(dmi->name); |
87 | } | 87 | } |
88 | } | 88 | } |
89 | return 0; | 89 | return 0; |
90 | } | 90 | } |
91 | 91 | ||
92 | static umode_t | 92 | static umode_t |
93 | smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr, | 93 | smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr, |
94 | int n) | 94 | int n) |
95 | { | 95 | { |
96 | struct device *dev; | 96 | struct device *dev; |
97 | struct pci_dev *pdev; | 97 | struct pci_dev *pdev; |
98 | 98 | ||
99 | dev = container_of(kobj, struct device, kobj); | 99 | dev = container_of(kobj, struct device, kobj); |
100 | pdev = to_pci_dev(dev); | 100 | pdev = to_pci_dev(dev); |
101 | 101 | ||
102 | return find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE) ? | 102 | return find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE) ? |
103 | S_IRUGO : 0; | 103 | S_IRUGO : 0; |
104 | } | 104 | } |
105 | 105 | ||
106 | static ssize_t | 106 | static ssize_t |
107 | smbioslabel_show(struct device *dev, struct device_attribute *attr, char *buf) | 107 | smbioslabel_show(struct device *dev, struct device_attribute *attr, char *buf) |
108 | { | 108 | { |
109 | struct pci_dev *pdev; | 109 | struct pci_dev *pdev; |
110 | pdev = to_pci_dev(dev); | 110 | pdev = to_pci_dev(dev); |
111 | 111 | ||
112 | return find_smbios_instance_string(pdev, buf, | 112 | return find_smbios_instance_string(pdev, buf, |
113 | SMBIOS_ATTR_LABEL_SHOW); | 113 | SMBIOS_ATTR_LABEL_SHOW); |
114 | } | 114 | } |
115 | 115 | ||
116 | static ssize_t | 116 | static ssize_t |
117 | smbiosinstance_show(struct device *dev, | 117 | smbiosinstance_show(struct device *dev, |
118 | struct device_attribute *attr, char *buf) | 118 | struct device_attribute *attr, char *buf) |
119 | { | 119 | { |
120 | struct pci_dev *pdev; | 120 | struct pci_dev *pdev; |
121 | pdev = to_pci_dev(dev); | 121 | pdev = to_pci_dev(dev); |
122 | 122 | ||
123 | return find_smbios_instance_string(pdev, buf, | 123 | return find_smbios_instance_string(pdev, buf, |
124 | SMBIOS_ATTR_INSTANCE_SHOW); | 124 | SMBIOS_ATTR_INSTANCE_SHOW); |
125 | } | 125 | } |
126 | 126 | ||
127 | static struct device_attribute smbios_attr_label = { | 127 | static struct device_attribute smbios_attr_label = { |
128 | .attr = {.name = "label", .mode = 0444}, | 128 | .attr = {.name = "label", .mode = 0444}, |
129 | .show = smbioslabel_show, | 129 | .show = smbioslabel_show, |
130 | }; | 130 | }; |
131 | 131 | ||
132 | static struct device_attribute smbios_attr_instance = { | 132 | static struct device_attribute smbios_attr_instance = { |
133 | .attr = {.name = "index", .mode = 0444}, | 133 | .attr = {.name = "index", .mode = 0444}, |
134 | .show = smbiosinstance_show, | 134 | .show = smbiosinstance_show, |
135 | }; | 135 | }; |
136 | 136 | ||
137 | static struct attribute *smbios_attributes[] = { | 137 | static struct attribute *smbios_attributes[] = { |
138 | &smbios_attr_label.attr, | 138 | &smbios_attr_label.attr, |
139 | &smbios_attr_instance.attr, | 139 | &smbios_attr_instance.attr, |
140 | NULL, | 140 | NULL, |
141 | }; | 141 | }; |
142 | 142 | ||
143 | static struct attribute_group smbios_attr_group = { | 143 | static struct attribute_group smbios_attr_group = { |
144 | .attrs = smbios_attributes, | 144 | .attrs = smbios_attributes, |
145 | .is_visible = smbios_instance_string_exist, | 145 | .is_visible = smbios_instance_string_exist, |
146 | }; | 146 | }; |
147 | 147 | ||
148 | static int | 148 | static int |
149 | pci_create_smbiosname_file(struct pci_dev *pdev) | 149 | pci_create_smbiosname_file(struct pci_dev *pdev) |
150 | { | 150 | { |
151 | return sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group); | 151 | return sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group); |
152 | } | 152 | } |
153 | 153 | ||
154 | static void | 154 | static void |
155 | pci_remove_smbiosname_file(struct pci_dev *pdev) | 155 | pci_remove_smbiosname_file(struct pci_dev *pdev) |
156 | { | 156 | { |
157 | sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group); | 157 | sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group); |
158 | } | 158 | } |
159 | 159 | ||
160 | #endif | 160 | #endif |
161 | 161 | ||
162 | #ifndef CONFIG_ACPI | 162 | #ifndef CONFIG_ACPI |
163 | 163 | ||
164 | static inline int | 164 | static inline int |
165 | pci_create_acpi_index_label_files(struct pci_dev *pdev) | 165 | pci_create_acpi_index_label_files(struct pci_dev *pdev) |
166 | { | 166 | { |
167 | return -1; | 167 | return -1; |
168 | } | 168 | } |
169 | 169 | ||
170 | static inline int | 170 | static inline int |
171 | pci_remove_acpi_index_label_files(struct pci_dev *pdev) | 171 | pci_remove_acpi_index_label_files(struct pci_dev *pdev) |
172 | { | 172 | { |
173 | return -1; | 173 | return -1; |
174 | } | 174 | } |
175 | 175 | ||
176 | static inline bool | 176 | static inline bool |
177 | device_has_dsm(struct device *dev) | 177 | device_has_dsm(struct device *dev) |
178 | { | 178 | { |
179 | return false; | 179 | return false; |
180 | } | 180 | } |
181 | 181 | ||
182 | #else | 182 | #else |
183 | 183 | ||
184 | static const char device_label_dsm_uuid[] = { | 184 | static const char device_label_dsm_uuid[] = { |
185 | 0xD0, 0x37, 0xC9, 0xE5, 0x53, 0x35, 0x7A, 0x4D, | 185 | 0xD0, 0x37, 0xC9, 0xE5, 0x53, 0x35, 0x7A, 0x4D, |
186 | 0x91, 0x17, 0xEA, 0x4D, 0x19, 0xC3, 0x43, 0x4D | 186 | 0x91, 0x17, 0xEA, 0x4D, 0x19, 0xC3, 0x43, 0x4D |
187 | }; | 187 | }; |
188 | 188 | ||
189 | enum acpi_attr_enum { | 189 | enum acpi_attr_enum { |
190 | ACPI_ATTR_NONE = 0, | 190 | ACPI_ATTR_NONE = 0, |
191 | ACPI_ATTR_LABEL_SHOW, | 191 | ACPI_ATTR_LABEL_SHOW, |
192 | ACPI_ATTR_INDEX_SHOW, | 192 | ACPI_ATTR_INDEX_SHOW, |
193 | }; | 193 | }; |
194 | 194 | ||
195 | static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf) | 195 | static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf) |
196 | { | 196 | { |
197 | int len; | 197 | int len; |
198 | len = utf16s_to_utf8s((const wchar_t *)obj-> | 198 | len = utf16s_to_utf8s((const wchar_t *)obj-> |
199 | package.elements[1].string.pointer, | 199 | package.elements[1].string.pointer, |
200 | obj->package.elements[1].string.length, | 200 | obj->package.elements[1].string.length, |
201 | UTF16_LITTLE_ENDIAN, | 201 | UTF16_LITTLE_ENDIAN, |
202 | buf, PAGE_SIZE); | 202 | buf, PAGE_SIZE); |
203 | buf[len] = '\n'; | 203 | buf[len] = '\n'; |
204 | } | 204 | } |
205 | 205 | ||
206 | static int | 206 | static int |
207 | dsm_get_label(acpi_handle handle, int func, | 207 | dsm_get_label(acpi_handle handle, int func, |
208 | struct acpi_buffer *output, | 208 | struct acpi_buffer *output, |
209 | char *buf, enum acpi_attr_enum attribute) | 209 | char *buf, enum acpi_attr_enum attribute) |
210 | { | 210 | { |
211 | struct acpi_object_list input; | 211 | struct acpi_object_list input; |
212 | union acpi_object params[4]; | 212 | union acpi_object params[4]; |
213 | union acpi_object *obj; | 213 | union acpi_object *obj; |
214 | int len = 0; | 214 | int len = 0; |
215 | 215 | ||
216 | int err; | 216 | int err; |
217 | 217 | ||
218 | input.count = 4; | 218 | input.count = 4; |
219 | input.pointer = params; | 219 | input.pointer = params; |
220 | params[0].type = ACPI_TYPE_BUFFER; | 220 | params[0].type = ACPI_TYPE_BUFFER; |
221 | params[0].buffer.length = sizeof(device_label_dsm_uuid); | 221 | params[0].buffer.length = sizeof(device_label_dsm_uuid); |
222 | params[0].buffer.pointer = (char *)device_label_dsm_uuid; | 222 | params[0].buffer.pointer = (char *)device_label_dsm_uuid; |
223 | params[1].type = ACPI_TYPE_INTEGER; | 223 | params[1].type = ACPI_TYPE_INTEGER; |
224 | params[1].integer.value = 0x02; | 224 | params[1].integer.value = 0x02; |
225 | params[2].type = ACPI_TYPE_INTEGER; | 225 | params[2].type = ACPI_TYPE_INTEGER; |
226 | params[2].integer.value = func; | 226 | params[2].integer.value = func; |
227 | params[3].type = ACPI_TYPE_PACKAGE; | 227 | params[3].type = ACPI_TYPE_PACKAGE; |
228 | params[3].package.count = 0; | 228 | params[3].package.count = 0; |
229 | params[3].package.elements = NULL; | 229 | params[3].package.elements = NULL; |
230 | 230 | ||
231 | err = acpi_evaluate_object(handle, "_DSM", &input, output); | 231 | err = acpi_evaluate_object(handle, "_DSM", &input, output); |
232 | if (err) | 232 | if (err) |
233 | return -1; | 233 | return -1; |
234 | 234 | ||
235 | obj = (union acpi_object *)output->pointer; | 235 | obj = (union acpi_object *)output->pointer; |
236 | 236 | ||
237 | switch (obj->type) { | 237 | switch (obj->type) { |
238 | case ACPI_TYPE_PACKAGE: | 238 | case ACPI_TYPE_PACKAGE: |
239 | if (obj->package.count != 2) | 239 | if (obj->package.count != 2) |
240 | break; | 240 | break; |
241 | len = obj->package.elements[0].integer.value; | 241 | len = obj->package.elements[0].integer.value; |
242 | if (buf) { | 242 | if (buf) { |
243 | if (attribute == ACPI_ATTR_INDEX_SHOW) | 243 | if (attribute == ACPI_ATTR_INDEX_SHOW) |
244 | scnprintf(buf, PAGE_SIZE, "%llu\n", | 244 | scnprintf(buf, PAGE_SIZE, "%llu\n", |
245 | obj->package.elements[0].integer.value); | 245 | obj->package.elements[0].integer.value); |
246 | else if (attribute == ACPI_ATTR_LABEL_SHOW) | 246 | else if (attribute == ACPI_ATTR_LABEL_SHOW) |
247 | dsm_label_utf16s_to_utf8s(obj, buf); | 247 | dsm_label_utf16s_to_utf8s(obj, buf); |
248 | kfree(output->pointer); | 248 | kfree(output->pointer); |
249 | return strlen(buf); | 249 | return strlen(buf); |
250 | } | 250 | } |
251 | kfree(output->pointer); | 251 | kfree(output->pointer); |
252 | return len; | 252 | return len; |
253 | break; | 253 | break; |
254 | default: | 254 | default: |
255 | kfree(output->pointer); | 255 | kfree(output->pointer); |
256 | } | 256 | } |
257 | return -1; | 257 | return -1; |
258 | } | 258 | } |
259 | 259 | ||
260 | static bool | 260 | static bool |
261 | device_has_dsm(struct device *dev) | 261 | device_has_dsm(struct device *dev) |
262 | { | 262 | { |
263 | acpi_handle handle; | 263 | acpi_handle handle; |
264 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; | 264 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; |
265 | 265 | ||
266 | handle = DEVICE_ACPI_HANDLE(dev); | 266 | handle = ACPI_HANDLE(dev); |
267 | 267 | ||
268 | if (!handle) | 268 | if (!handle) |
269 | return FALSE; | 269 | return FALSE; |
270 | 270 | ||
271 | if (dsm_get_label(handle, DEVICE_LABEL_DSM, &output, NULL, | 271 | if (dsm_get_label(handle, DEVICE_LABEL_DSM, &output, NULL, |
272 | ACPI_ATTR_NONE) > 0) | 272 | ACPI_ATTR_NONE) > 0) |
273 | return TRUE; | 273 | return TRUE; |
274 | 274 | ||
275 | return FALSE; | 275 | return FALSE; |
276 | } | 276 | } |
277 | 277 | ||
278 | static umode_t | 278 | static umode_t |
279 | acpi_index_string_exist(struct kobject *kobj, struct attribute *attr, int n) | 279 | acpi_index_string_exist(struct kobject *kobj, struct attribute *attr, int n) |
280 | { | 280 | { |
281 | struct device *dev; | 281 | struct device *dev; |
282 | 282 | ||
283 | dev = container_of(kobj, struct device, kobj); | 283 | dev = container_of(kobj, struct device, kobj); |
284 | 284 | ||
285 | if (device_has_dsm(dev)) | 285 | if (device_has_dsm(dev)) |
286 | return S_IRUGO; | 286 | return S_IRUGO; |
287 | 287 | ||
288 | return 0; | 288 | return 0; |
289 | } | 289 | } |
290 | 290 | ||
291 | static ssize_t | 291 | static ssize_t |
292 | acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf) | 292 | acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf) |
293 | { | 293 | { |
294 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; | 294 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; |
295 | acpi_handle handle; | 295 | acpi_handle handle; |
296 | int length; | 296 | int length; |
297 | 297 | ||
298 | handle = DEVICE_ACPI_HANDLE(dev); | 298 | handle = ACPI_HANDLE(dev); |
299 | 299 | ||
300 | if (!handle) | 300 | if (!handle) |
301 | return -1; | 301 | return -1; |
302 | 302 | ||
303 | length = dsm_get_label(handle, DEVICE_LABEL_DSM, | 303 | length = dsm_get_label(handle, DEVICE_LABEL_DSM, |
304 | &output, buf, ACPI_ATTR_LABEL_SHOW); | 304 | &output, buf, ACPI_ATTR_LABEL_SHOW); |
305 | 305 | ||
306 | if (length < 1) | 306 | if (length < 1) |
307 | return -1; | 307 | return -1; |
308 | 308 | ||
309 | return length; | 309 | return length; |
310 | } | 310 | } |
311 | 311 | ||
312 | static ssize_t | 312 | static ssize_t |
313 | acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf) | 313 | acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf) |
314 | { | 314 | { |
315 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; | 315 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; |
316 | acpi_handle handle; | 316 | acpi_handle handle; |
317 | int length; | 317 | int length; |
318 | 318 | ||
319 | handle = DEVICE_ACPI_HANDLE(dev); | 319 | handle = ACPI_HANDLE(dev); |
320 | 320 | ||
321 | if (!handle) | 321 | if (!handle) |
322 | return -1; | 322 | return -1; |
323 | 323 | ||
324 | length = dsm_get_label(handle, DEVICE_LABEL_DSM, | 324 | length = dsm_get_label(handle, DEVICE_LABEL_DSM, |
325 | &output, buf, ACPI_ATTR_INDEX_SHOW); | 325 | &output, buf, ACPI_ATTR_INDEX_SHOW); |
326 | 326 | ||
327 | if (length < 0) | 327 | if (length < 0) |
328 | return -1; | 328 | return -1; |
329 | 329 | ||
330 | return length; | 330 | return length; |
331 | 331 | ||
332 | } | 332 | } |
333 | 333 | ||
334 | static struct device_attribute acpi_attr_label = { | 334 | static struct device_attribute acpi_attr_label = { |
335 | .attr = {.name = "label", .mode = 0444}, | 335 | .attr = {.name = "label", .mode = 0444}, |
336 | .show = acpilabel_show, | 336 | .show = acpilabel_show, |
337 | }; | 337 | }; |
338 | 338 | ||
339 | static struct device_attribute acpi_attr_index = { | 339 | static struct device_attribute acpi_attr_index = { |
340 | .attr = {.name = "acpi_index", .mode = 0444}, | 340 | .attr = {.name = "acpi_index", .mode = 0444}, |
341 | .show = acpiindex_show, | 341 | .show = acpiindex_show, |
342 | }; | 342 | }; |
343 | 343 | ||
344 | static struct attribute *acpi_attributes[] = { | 344 | static struct attribute *acpi_attributes[] = { |
345 | &acpi_attr_label.attr, | 345 | &acpi_attr_label.attr, |
346 | &acpi_attr_index.attr, | 346 | &acpi_attr_index.attr, |
347 | NULL, | 347 | NULL, |
348 | }; | 348 | }; |
349 | 349 | ||
350 | static struct attribute_group acpi_attr_group = { | 350 | static struct attribute_group acpi_attr_group = { |
351 | .attrs = acpi_attributes, | 351 | .attrs = acpi_attributes, |
352 | .is_visible = acpi_index_string_exist, | 352 | .is_visible = acpi_index_string_exist, |
353 | }; | 353 | }; |
354 | 354 | ||
355 | static int | 355 | static int |
356 | pci_create_acpi_index_label_files(struct pci_dev *pdev) | 356 | pci_create_acpi_index_label_files(struct pci_dev *pdev) |
357 | { | 357 | { |
358 | return sysfs_create_group(&pdev->dev.kobj, &acpi_attr_group); | 358 | return sysfs_create_group(&pdev->dev.kobj, &acpi_attr_group); |
359 | } | 359 | } |
360 | 360 | ||
361 | static int | 361 | static int |
362 | pci_remove_acpi_index_label_files(struct pci_dev *pdev) | 362 | pci_remove_acpi_index_label_files(struct pci_dev *pdev) |
363 | { | 363 | { |
364 | sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group); | 364 | sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group); |
365 | return 0; | 365 | return 0; |
366 | } | 366 | } |
367 | #endif | 367 | #endif |
368 | 368 | ||
369 | void pci_create_firmware_label_files(struct pci_dev *pdev) | 369 | void pci_create_firmware_label_files(struct pci_dev *pdev) |
370 | { | 370 | { |
371 | if (device_has_dsm(&pdev->dev)) | 371 | if (device_has_dsm(&pdev->dev)) |
372 | pci_create_acpi_index_label_files(pdev); | 372 | pci_create_acpi_index_label_files(pdev); |
373 | else | 373 | else |
374 | pci_create_smbiosname_file(pdev); | 374 | pci_create_smbiosname_file(pdev); |
375 | } | 375 | } |
376 | 376 | ||
377 | void pci_remove_firmware_label_files(struct pci_dev *pdev) | 377 | void pci_remove_firmware_label_files(struct pci_dev *pdev) |
378 | { | 378 | { |
379 | if (device_has_dsm(&pdev->dev)) | 379 | if (device_has_dsm(&pdev->dev)) |
380 | pci_remove_acpi_index_label_files(pdev); | 380 | pci_remove_acpi_index_label_files(pdev); |
381 | else | 381 | else |
382 | pci_remove_smbiosname_file(pdev); | 382 | pci_remove_smbiosname_file(pdev); |
383 | } | 383 | } |
384 | 384 |
drivers/platform/x86/apple-gmux.c
1 | /* | 1 | /* |
2 | * Gmux driver for Apple laptops | 2 | * Gmux driver for Apple laptops |
3 | * | 3 | * |
4 | * Copyright (C) Canonical Ltd. <seth.forshee@canonical.com> | 4 | * Copyright (C) Canonical Ltd. <seth.forshee@canonical.com> |
5 | * Copyright (C) 2010-2012 Andreas Heider <andreas@meetr.de> | 5 | * Copyright (C) 2010-2012 Andreas Heider <andreas@meetr.de> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/backlight.h> | 17 | #include <linux/backlight.h> |
18 | #include <linux/acpi.h> | 18 | #include <linux/acpi.h> |
19 | #include <linux/pnp.h> | 19 | #include <linux/pnp.h> |
20 | #include <linux/apple_bl.h> | 20 | #include <linux/apple_bl.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/vga_switcheroo.h> | 24 | #include <linux/vga_switcheroo.h> |
25 | #include <acpi/video.h> | 25 | #include <acpi/video.h> |
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | 27 | ||
28 | struct apple_gmux_data { | 28 | struct apple_gmux_data { |
29 | unsigned long iostart; | 29 | unsigned long iostart; |
30 | unsigned long iolen; | 30 | unsigned long iolen; |
31 | bool indexed; | 31 | bool indexed; |
32 | struct mutex index_lock; | 32 | struct mutex index_lock; |
33 | 33 | ||
34 | struct backlight_device *bdev; | 34 | struct backlight_device *bdev; |
35 | 35 | ||
36 | /* switcheroo data */ | 36 | /* switcheroo data */ |
37 | acpi_handle dhandle; | 37 | acpi_handle dhandle; |
38 | int gpe; | 38 | int gpe; |
39 | enum vga_switcheroo_client_id resume_client_id; | 39 | enum vga_switcheroo_client_id resume_client_id; |
40 | enum vga_switcheroo_state power_state; | 40 | enum vga_switcheroo_state power_state; |
41 | struct completion powerchange_done; | 41 | struct completion powerchange_done; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static struct apple_gmux_data *apple_gmux_data; | 44 | static struct apple_gmux_data *apple_gmux_data; |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * gmux port offsets. Many of these are not yet used, but may be in the | 47 | * gmux port offsets. Many of these are not yet used, but may be in the |
48 | * future, and it's useful to have them documented here anyhow. | 48 | * future, and it's useful to have them documented here anyhow. |
49 | */ | 49 | */ |
50 | #define GMUX_PORT_VERSION_MAJOR 0x04 | 50 | #define GMUX_PORT_VERSION_MAJOR 0x04 |
51 | #define GMUX_PORT_VERSION_MINOR 0x05 | 51 | #define GMUX_PORT_VERSION_MINOR 0x05 |
52 | #define GMUX_PORT_VERSION_RELEASE 0x06 | 52 | #define GMUX_PORT_VERSION_RELEASE 0x06 |
53 | #define GMUX_PORT_SWITCH_DISPLAY 0x10 | 53 | #define GMUX_PORT_SWITCH_DISPLAY 0x10 |
54 | #define GMUX_PORT_SWITCH_GET_DISPLAY 0x11 | 54 | #define GMUX_PORT_SWITCH_GET_DISPLAY 0x11 |
55 | #define GMUX_PORT_INTERRUPT_ENABLE 0x14 | 55 | #define GMUX_PORT_INTERRUPT_ENABLE 0x14 |
56 | #define GMUX_PORT_INTERRUPT_STATUS 0x16 | 56 | #define GMUX_PORT_INTERRUPT_STATUS 0x16 |
57 | #define GMUX_PORT_SWITCH_DDC 0x28 | 57 | #define GMUX_PORT_SWITCH_DDC 0x28 |
58 | #define GMUX_PORT_SWITCH_EXTERNAL 0x40 | 58 | #define GMUX_PORT_SWITCH_EXTERNAL 0x40 |
59 | #define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41 | 59 | #define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41 |
60 | #define GMUX_PORT_DISCRETE_POWER 0x50 | 60 | #define GMUX_PORT_DISCRETE_POWER 0x50 |
61 | #define GMUX_PORT_MAX_BRIGHTNESS 0x70 | 61 | #define GMUX_PORT_MAX_BRIGHTNESS 0x70 |
62 | #define GMUX_PORT_BRIGHTNESS 0x74 | 62 | #define GMUX_PORT_BRIGHTNESS 0x74 |
63 | #define GMUX_PORT_VALUE 0xc2 | 63 | #define GMUX_PORT_VALUE 0xc2 |
64 | #define GMUX_PORT_READ 0xd0 | 64 | #define GMUX_PORT_READ 0xd0 |
65 | #define GMUX_PORT_WRITE 0xd4 | 65 | #define GMUX_PORT_WRITE 0xd4 |
66 | 66 | ||
67 | #define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4) | 67 | #define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4) |
68 | 68 | ||
69 | #define GMUX_INTERRUPT_ENABLE 0xff | 69 | #define GMUX_INTERRUPT_ENABLE 0xff |
70 | #define GMUX_INTERRUPT_DISABLE 0x00 | 70 | #define GMUX_INTERRUPT_DISABLE 0x00 |
71 | 71 | ||
72 | #define GMUX_INTERRUPT_STATUS_ACTIVE 0 | 72 | #define GMUX_INTERRUPT_STATUS_ACTIVE 0 |
73 | #define GMUX_INTERRUPT_STATUS_DISPLAY (1 << 0) | 73 | #define GMUX_INTERRUPT_STATUS_DISPLAY (1 << 0) |
74 | #define GMUX_INTERRUPT_STATUS_POWER (1 << 2) | 74 | #define GMUX_INTERRUPT_STATUS_POWER (1 << 2) |
75 | #define GMUX_INTERRUPT_STATUS_HOTPLUG (1 << 3) | 75 | #define GMUX_INTERRUPT_STATUS_HOTPLUG (1 << 3) |
76 | 76 | ||
77 | #define GMUX_BRIGHTNESS_MASK 0x00ffffff | 77 | #define GMUX_BRIGHTNESS_MASK 0x00ffffff |
78 | #define GMUX_MAX_BRIGHTNESS GMUX_BRIGHTNESS_MASK | 78 | #define GMUX_MAX_BRIGHTNESS GMUX_BRIGHTNESS_MASK |
79 | 79 | ||
80 | static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port) | 80 | static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port) |
81 | { | 81 | { |
82 | return inb(gmux_data->iostart + port); | 82 | return inb(gmux_data->iostart + port); |
83 | } | 83 | } |
84 | 84 | ||
85 | static void gmux_pio_write8(struct apple_gmux_data *gmux_data, int port, | 85 | static void gmux_pio_write8(struct apple_gmux_data *gmux_data, int port, |
86 | u8 val) | 86 | u8 val) |
87 | { | 87 | { |
88 | outb(val, gmux_data->iostart + port); | 88 | outb(val, gmux_data->iostart + port); |
89 | } | 89 | } |
90 | 90 | ||
91 | static u32 gmux_pio_read32(struct apple_gmux_data *gmux_data, int port) | 91 | static u32 gmux_pio_read32(struct apple_gmux_data *gmux_data, int port) |
92 | { | 92 | { |
93 | return inl(gmux_data->iostart + port); | 93 | return inl(gmux_data->iostart + port); |
94 | } | 94 | } |
95 | 95 | ||
96 | static void gmux_pio_write32(struct apple_gmux_data *gmux_data, int port, | 96 | static void gmux_pio_write32(struct apple_gmux_data *gmux_data, int port, |
97 | u32 val) | 97 | u32 val) |
98 | { | 98 | { |
99 | int i; | 99 | int i; |
100 | u8 tmpval; | 100 | u8 tmpval; |
101 | 101 | ||
102 | for (i = 0; i < 4; i++) { | 102 | for (i = 0; i < 4; i++) { |
103 | tmpval = (val >> (i * 8)) & 0xff; | 103 | tmpval = (val >> (i * 8)) & 0xff; |
104 | outb(tmpval, gmux_data->iostart + port + i); | 104 | outb(tmpval, gmux_data->iostart + port + i); |
105 | } | 105 | } |
106 | } | 106 | } |
107 | 107 | ||
108 | static int gmux_index_wait_ready(struct apple_gmux_data *gmux_data) | 108 | static int gmux_index_wait_ready(struct apple_gmux_data *gmux_data) |
109 | { | 109 | { |
110 | int i = 200; | 110 | int i = 200; |
111 | u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); | 111 | u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); |
112 | 112 | ||
113 | while (i && (gwr & 0x01)) { | 113 | while (i && (gwr & 0x01)) { |
114 | inb(gmux_data->iostart + GMUX_PORT_READ); | 114 | inb(gmux_data->iostart + GMUX_PORT_READ); |
115 | gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); | 115 | gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); |
116 | udelay(100); | 116 | udelay(100); |
117 | i--; | 117 | i--; |
118 | } | 118 | } |
119 | 119 | ||
120 | return !!i; | 120 | return !!i; |
121 | } | 121 | } |
122 | 122 | ||
123 | static int gmux_index_wait_complete(struct apple_gmux_data *gmux_data) | 123 | static int gmux_index_wait_complete(struct apple_gmux_data *gmux_data) |
124 | { | 124 | { |
125 | int i = 200; | 125 | int i = 200; |
126 | u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); | 126 | u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); |
127 | 127 | ||
128 | while (i && !(gwr & 0x01)) { | 128 | while (i && !(gwr & 0x01)) { |
129 | gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); | 129 | gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); |
130 | udelay(100); | 130 | udelay(100); |
131 | i--; | 131 | i--; |
132 | } | 132 | } |
133 | 133 | ||
134 | if (gwr & 0x01) | 134 | if (gwr & 0x01) |
135 | inb(gmux_data->iostart + GMUX_PORT_READ); | 135 | inb(gmux_data->iostart + GMUX_PORT_READ); |
136 | 136 | ||
137 | return !!i; | 137 | return !!i; |
138 | } | 138 | } |
139 | 139 | ||
140 | static u8 gmux_index_read8(struct apple_gmux_data *gmux_data, int port) | 140 | static u8 gmux_index_read8(struct apple_gmux_data *gmux_data, int port) |
141 | { | 141 | { |
142 | u8 val; | 142 | u8 val; |
143 | 143 | ||
144 | mutex_lock(&gmux_data->index_lock); | 144 | mutex_lock(&gmux_data->index_lock); |
145 | gmux_index_wait_ready(gmux_data); | 145 | gmux_index_wait_ready(gmux_data); |
146 | outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ); | 146 | outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ); |
147 | gmux_index_wait_complete(gmux_data); | 147 | gmux_index_wait_complete(gmux_data); |
148 | val = inb(gmux_data->iostart + GMUX_PORT_VALUE); | 148 | val = inb(gmux_data->iostart + GMUX_PORT_VALUE); |
149 | mutex_unlock(&gmux_data->index_lock); | 149 | mutex_unlock(&gmux_data->index_lock); |
150 | 150 | ||
151 | return val; | 151 | return val; |
152 | } | 152 | } |
153 | 153 | ||
154 | static void gmux_index_write8(struct apple_gmux_data *gmux_data, int port, | 154 | static void gmux_index_write8(struct apple_gmux_data *gmux_data, int port, |
155 | u8 val) | 155 | u8 val) |
156 | { | 156 | { |
157 | mutex_lock(&gmux_data->index_lock); | 157 | mutex_lock(&gmux_data->index_lock); |
158 | outb(val, gmux_data->iostart + GMUX_PORT_VALUE); | 158 | outb(val, gmux_data->iostart + GMUX_PORT_VALUE); |
159 | gmux_index_wait_ready(gmux_data); | 159 | gmux_index_wait_ready(gmux_data); |
160 | outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE); | 160 | outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE); |
161 | gmux_index_wait_complete(gmux_data); | 161 | gmux_index_wait_complete(gmux_data); |
162 | mutex_unlock(&gmux_data->index_lock); | 162 | mutex_unlock(&gmux_data->index_lock); |
163 | } | 163 | } |
164 | 164 | ||
165 | static u32 gmux_index_read32(struct apple_gmux_data *gmux_data, int port) | 165 | static u32 gmux_index_read32(struct apple_gmux_data *gmux_data, int port) |
166 | { | 166 | { |
167 | u32 val; | 167 | u32 val; |
168 | 168 | ||
169 | mutex_lock(&gmux_data->index_lock); | 169 | mutex_lock(&gmux_data->index_lock); |
170 | gmux_index_wait_ready(gmux_data); | 170 | gmux_index_wait_ready(gmux_data); |
171 | outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ); | 171 | outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ); |
172 | gmux_index_wait_complete(gmux_data); | 172 | gmux_index_wait_complete(gmux_data); |
173 | val = inl(gmux_data->iostart + GMUX_PORT_VALUE); | 173 | val = inl(gmux_data->iostart + GMUX_PORT_VALUE); |
174 | mutex_unlock(&gmux_data->index_lock); | 174 | mutex_unlock(&gmux_data->index_lock); |
175 | 175 | ||
176 | return val; | 176 | return val; |
177 | } | 177 | } |
178 | 178 | ||
179 | static void gmux_index_write32(struct apple_gmux_data *gmux_data, int port, | 179 | static void gmux_index_write32(struct apple_gmux_data *gmux_data, int port, |
180 | u32 val) | 180 | u32 val) |
181 | { | 181 | { |
182 | int i; | 182 | int i; |
183 | u8 tmpval; | 183 | u8 tmpval; |
184 | 184 | ||
185 | mutex_lock(&gmux_data->index_lock); | 185 | mutex_lock(&gmux_data->index_lock); |
186 | 186 | ||
187 | for (i = 0; i < 4; i++) { | 187 | for (i = 0; i < 4; i++) { |
188 | tmpval = (val >> (i * 8)) & 0xff; | 188 | tmpval = (val >> (i * 8)) & 0xff; |
189 | outb(tmpval, gmux_data->iostart + GMUX_PORT_VALUE + i); | 189 | outb(tmpval, gmux_data->iostart + GMUX_PORT_VALUE + i); |
190 | } | 190 | } |
191 | 191 | ||
192 | gmux_index_wait_ready(gmux_data); | 192 | gmux_index_wait_ready(gmux_data); |
193 | outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE); | 193 | outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE); |
194 | gmux_index_wait_complete(gmux_data); | 194 | gmux_index_wait_complete(gmux_data); |
195 | mutex_unlock(&gmux_data->index_lock); | 195 | mutex_unlock(&gmux_data->index_lock); |
196 | } | 196 | } |
197 | 197 | ||
198 | static u8 gmux_read8(struct apple_gmux_data *gmux_data, int port) | 198 | static u8 gmux_read8(struct apple_gmux_data *gmux_data, int port) |
199 | { | 199 | { |
200 | if (gmux_data->indexed) | 200 | if (gmux_data->indexed) |
201 | return gmux_index_read8(gmux_data, port); | 201 | return gmux_index_read8(gmux_data, port); |
202 | else | 202 | else |
203 | return gmux_pio_read8(gmux_data, port); | 203 | return gmux_pio_read8(gmux_data, port); |
204 | } | 204 | } |
205 | 205 | ||
206 | static void gmux_write8(struct apple_gmux_data *gmux_data, int port, u8 val) | 206 | static void gmux_write8(struct apple_gmux_data *gmux_data, int port, u8 val) |
207 | { | 207 | { |
208 | if (gmux_data->indexed) | 208 | if (gmux_data->indexed) |
209 | gmux_index_write8(gmux_data, port, val); | 209 | gmux_index_write8(gmux_data, port, val); |
210 | else | 210 | else |
211 | gmux_pio_write8(gmux_data, port, val); | 211 | gmux_pio_write8(gmux_data, port, val); |
212 | } | 212 | } |
213 | 213 | ||
214 | static u32 gmux_read32(struct apple_gmux_data *gmux_data, int port) | 214 | static u32 gmux_read32(struct apple_gmux_data *gmux_data, int port) |
215 | { | 215 | { |
216 | if (gmux_data->indexed) | 216 | if (gmux_data->indexed) |
217 | return gmux_index_read32(gmux_data, port); | 217 | return gmux_index_read32(gmux_data, port); |
218 | else | 218 | else |
219 | return gmux_pio_read32(gmux_data, port); | 219 | return gmux_pio_read32(gmux_data, port); |
220 | } | 220 | } |
221 | 221 | ||
222 | static void gmux_write32(struct apple_gmux_data *gmux_data, int port, | 222 | static void gmux_write32(struct apple_gmux_data *gmux_data, int port, |
223 | u32 val) | 223 | u32 val) |
224 | { | 224 | { |
225 | if (gmux_data->indexed) | 225 | if (gmux_data->indexed) |
226 | gmux_index_write32(gmux_data, port, val); | 226 | gmux_index_write32(gmux_data, port, val); |
227 | else | 227 | else |
228 | gmux_pio_write32(gmux_data, port, val); | 228 | gmux_pio_write32(gmux_data, port, val); |
229 | } | 229 | } |
230 | 230 | ||
231 | static bool gmux_is_indexed(struct apple_gmux_data *gmux_data) | 231 | static bool gmux_is_indexed(struct apple_gmux_data *gmux_data) |
232 | { | 232 | { |
233 | u16 val; | 233 | u16 val; |
234 | 234 | ||
235 | outb(0xaa, gmux_data->iostart + 0xcc); | 235 | outb(0xaa, gmux_data->iostart + 0xcc); |
236 | outb(0x55, gmux_data->iostart + 0xcd); | 236 | outb(0x55, gmux_data->iostart + 0xcd); |
237 | outb(0x00, gmux_data->iostart + 0xce); | 237 | outb(0x00, gmux_data->iostart + 0xce); |
238 | 238 | ||
239 | val = inb(gmux_data->iostart + 0xcc) | | 239 | val = inb(gmux_data->iostart + 0xcc) | |
240 | (inb(gmux_data->iostart + 0xcd) << 8); | 240 | (inb(gmux_data->iostart + 0xcd) << 8); |
241 | 241 | ||
242 | if (val == 0x55aa) | 242 | if (val == 0x55aa) |
243 | return true; | 243 | return true; |
244 | 244 | ||
245 | return false; | 245 | return false; |
246 | } | 246 | } |
247 | 247 | ||
248 | static int gmux_get_brightness(struct backlight_device *bd) | 248 | static int gmux_get_brightness(struct backlight_device *bd) |
249 | { | 249 | { |
250 | struct apple_gmux_data *gmux_data = bl_get_data(bd); | 250 | struct apple_gmux_data *gmux_data = bl_get_data(bd); |
251 | return gmux_read32(gmux_data, GMUX_PORT_BRIGHTNESS) & | 251 | return gmux_read32(gmux_data, GMUX_PORT_BRIGHTNESS) & |
252 | GMUX_BRIGHTNESS_MASK; | 252 | GMUX_BRIGHTNESS_MASK; |
253 | } | 253 | } |
254 | 254 | ||
255 | static int gmux_update_status(struct backlight_device *bd) | 255 | static int gmux_update_status(struct backlight_device *bd) |
256 | { | 256 | { |
257 | struct apple_gmux_data *gmux_data = bl_get_data(bd); | 257 | struct apple_gmux_data *gmux_data = bl_get_data(bd); |
258 | u32 brightness = bd->props.brightness; | 258 | u32 brightness = bd->props.brightness; |
259 | 259 | ||
260 | if (bd->props.state & BL_CORE_SUSPENDED) | 260 | if (bd->props.state & BL_CORE_SUSPENDED) |
261 | return 0; | 261 | return 0; |
262 | 262 | ||
263 | gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness); | 263 | gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness); |
264 | 264 | ||
265 | return 0; | 265 | return 0; |
266 | } | 266 | } |
267 | 267 | ||
268 | static const struct backlight_ops gmux_bl_ops = { | 268 | static const struct backlight_ops gmux_bl_ops = { |
269 | .options = BL_CORE_SUSPENDRESUME, | 269 | .options = BL_CORE_SUSPENDRESUME, |
270 | .get_brightness = gmux_get_brightness, | 270 | .get_brightness = gmux_get_brightness, |
271 | .update_status = gmux_update_status, | 271 | .update_status = gmux_update_status, |
272 | }; | 272 | }; |
273 | 273 | ||
274 | static int gmux_switchto(enum vga_switcheroo_client_id id) | 274 | static int gmux_switchto(enum vga_switcheroo_client_id id) |
275 | { | 275 | { |
276 | if (id == VGA_SWITCHEROO_IGD) { | 276 | if (id == VGA_SWITCHEROO_IGD) { |
277 | gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1); | 277 | gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1); |
278 | gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2); | 278 | gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2); |
279 | gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2); | 279 | gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2); |
280 | } else { | 280 | } else { |
281 | gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2); | 281 | gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2); |
282 | gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3); | 282 | gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3); |
283 | gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3); | 283 | gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3); |
284 | } | 284 | } |
285 | 285 | ||
286 | return 0; | 286 | return 0; |
287 | } | 287 | } |
288 | 288 | ||
289 | static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data, | 289 | static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data, |
290 | enum vga_switcheroo_state state) | 290 | enum vga_switcheroo_state state) |
291 | { | 291 | { |
292 | INIT_COMPLETION(gmux_data->powerchange_done); | 292 | INIT_COMPLETION(gmux_data->powerchange_done); |
293 | 293 | ||
294 | if (state == VGA_SWITCHEROO_ON) { | 294 | if (state == VGA_SWITCHEROO_ON) { |
295 | gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); | 295 | gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); |
296 | gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 3); | 296 | gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 3); |
297 | pr_debug("Discrete card powered up\n"); | 297 | pr_debug("Discrete card powered up\n"); |
298 | } else { | 298 | } else { |
299 | gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); | 299 | gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); |
300 | gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 0); | 300 | gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 0); |
301 | pr_debug("Discrete card powered down\n"); | 301 | pr_debug("Discrete card powered down\n"); |
302 | } | 302 | } |
303 | 303 | ||
304 | gmux_data->power_state = state; | 304 | gmux_data->power_state = state; |
305 | 305 | ||
306 | if (gmux_data->gpe >= 0 && | 306 | if (gmux_data->gpe >= 0 && |
307 | !wait_for_completion_interruptible_timeout(&gmux_data->powerchange_done, | 307 | !wait_for_completion_interruptible_timeout(&gmux_data->powerchange_done, |
308 | msecs_to_jiffies(200))) | 308 | msecs_to_jiffies(200))) |
309 | pr_warn("Timeout waiting for gmux switch to complete\n"); | 309 | pr_warn("Timeout waiting for gmux switch to complete\n"); |
310 | 310 | ||
311 | return 0; | 311 | return 0; |
312 | } | 312 | } |
313 | 313 | ||
314 | static int gmux_set_power_state(enum vga_switcheroo_client_id id, | 314 | static int gmux_set_power_state(enum vga_switcheroo_client_id id, |
315 | enum vga_switcheroo_state state) | 315 | enum vga_switcheroo_state state) |
316 | { | 316 | { |
317 | if (id == VGA_SWITCHEROO_IGD) | 317 | if (id == VGA_SWITCHEROO_IGD) |
318 | return 0; | 318 | return 0; |
319 | 319 | ||
320 | return gmux_set_discrete_state(apple_gmux_data, state); | 320 | return gmux_set_discrete_state(apple_gmux_data, state); |
321 | } | 321 | } |
322 | 322 | ||
323 | static int gmux_get_client_id(struct pci_dev *pdev) | 323 | static int gmux_get_client_id(struct pci_dev *pdev) |
324 | { | 324 | { |
325 | /* | 325 | /* |
326 | * Early Macbook Pros with switchable graphics use nvidia | 326 | * Early Macbook Pros with switchable graphics use nvidia |
327 | * integrated graphics. Hardcode that the 9400M is integrated. | 327 | * integrated graphics. Hardcode that the 9400M is integrated. |
328 | */ | 328 | */ |
329 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) | 329 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) |
330 | return VGA_SWITCHEROO_IGD; | 330 | return VGA_SWITCHEROO_IGD; |
331 | else if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && | 331 | else if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && |
332 | pdev->device == 0x0863) | 332 | pdev->device == 0x0863) |
333 | return VGA_SWITCHEROO_IGD; | 333 | return VGA_SWITCHEROO_IGD; |
334 | else | 334 | else |
335 | return VGA_SWITCHEROO_DIS; | 335 | return VGA_SWITCHEROO_DIS; |
336 | } | 336 | } |
337 | 337 | ||
338 | static enum vga_switcheroo_client_id | 338 | static enum vga_switcheroo_client_id |
339 | gmux_active_client(struct apple_gmux_data *gmux_data) | 339 | gmux_active_client(struct apple_gmux_data *gmux_data) |
340 | { | 340 | { |
341 | if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2) | 341 | if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2) |
342 | return VGA_SWITCHEROO_IGD; | 342 | return VGA_SWITCHEROO_IGD; |
343 | 343 | ||
344 | return VGA_SWITCHEROO_DIS; | 344 | return VGA_SWITCHEROO_DIS; |
345 | } | 345 | } |
346 | 346 | ||
347 | static struct vga_switcheroo_handler gmux_handler = { | 347 | static struct vga_switcheroo_handler gmux_handler = { |
348 | .switchto = gmux_switchto, | 348 | .switchto = gmux_switchto, |
349 | .power_state = gmux_set_power_state, | 349 | .power_state = gmux_set_power_state, |
350 | .get_client_id = gmux_get_client_id, | 350 | .get_client_id = gmux_get_client_id, |
351 | }; | 351 | }; |
352 | 352 | ||
353 | static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data) | 353 | static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data) |
354 | { | 354 | { |
355 | gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, | 355 | gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, |
356 | GMUX_INTERRUPT_DISABLE); | 356 | GMUX_INTERRUPT_DISABLE); |
357 | } | 357 | } |
358 | 358 | ||
359 | static inline void gmux_enable_interrupts(struct apple_gmux_data *gmux_data) | 359 | static inline void gmux_enable_interrupts(struct apple_gmux_data *gmux_data) |
360 | { | 360 | { |
361 | gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, | 361 | gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, |
362 | GMUX_INTERRUPT_ENABLE); | 362 | GMUX_INTERRUPT_ENABLE); |
363 | } | 363 | } |
364 | 364 | ||
365 | static inline u8 gmux_interrupt_get_status(struct apple_gmux_data *gmux_data) | 365 | static inline u8 gmux_interrupt_get_status(struct apple_gmux_data *gmux_data) |
366 | { | 366 | { |
367 | return gmux_read8(gmux_data, GMUX_PORT_INTERRUPT_STATUS); | 367 | return gmux_read8(gmux_data, GMUX_PORT_INTERRUPT_STATUS); |
368 | } | 368 | } |
369 | 369 | ||
370 | static void gmux_clear_interrupts(struct apple_gmux_data *gmux_data) | 370 | static void gmux_clear_interrupts(struct apple_gmux_data *gmux_data) |
371 | { | 371 | { |
372 | u8 status; | 372 | u8 status; |
373 | 373 | ||
374 | /* to clear interrupts write back current status */ | 374 | /* to clear interrupts write back current status */ |
375 | status = gmux_interrupt_get_status(gmux_data); | 375 | status = gmux_interrupt_get_status(gmux_data); |
376 | gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_STATUS, status); | 376 | gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_STATUS, status); |
377 | } | 377 | } |
378 | 378 | ||
379 | static void gmux_notify_handler(acpi_handle device, u32 value, void *context) | 379 | static void gmux_notify_handler(acpi_handle device, u32 value, void *context) |
380 | { | 380 | { |
381 | u8 status; | 381 | u8 status; |
382 | struct pnp_dev *pnp = (struct pnp_dev *)context; | 382 | struct pnp_dev *pnp = (struct pnp_dev *)context; |
383 | struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); | 383 | struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); |
384 | 384 | ||
385 | status = gmux_interrupt_get_status(gmux_data); | 385 | status = gmux_interrupt_get_status(gmux_data); |
386 | gmux_disable_interrupts(gmux_data); | 386 | gmux_disable_interrupts(gmux_data); |
387 | pr_debug("Notify handler called: status %d\n", status); | 387 | pr_debug("Notify handler called: status %d\n", status); |
388 | 388 | ||
389 | gmux_clear_interrupts(gmux_data); | 389 | gmux_clear_interrupts(gmux_data); |
390 | gmux_enable_interrupts(gmux_data); | 390 | gmux_enable_interrupts(gmux_data); |
391 | 391 | ||
392 | if (status & GMUX_INTERRUPT_STATUS_POWER) | 392 | if (status & GMUX_INTERRUPT_STATUS_POWER) |
393 | complete(&gmux_data->powerchange_done); | 393 | complete(&gmux_data->powerchange_done); |
394 | } | 394 | } |
395 | 395 | ||
396 | static int gmux_suspend(struct device *dev) | 396 | static int gmux_suspend(struct device *dev) |
397 | { | 397 | { |
398 | struct pnp_dev *pnp = to_pnp_dev(dev); | 398 | struct pnp_dev *pnp = to_pnp_dev(dev); |
399 | struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); | 399 | struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); |
400 | 400 | ||
401 | gmux_data->resume_client_id = gmux_active_client(gmux_data); | 401 | gmux_data->resume_client_id = gmux_active_client(gmux_data); |
402 | gmux_disable_interrupts(gmux_data); | 402 | gmux_disable_interrupts(gmux_data); |
403 | return 0; | 403 | return 0; |
404 | } | 404 | } |
405 | 405 | ||
406 | static int gmux_resume(struct device *dev) | 406 | static int gmux_resume(struct device *dev) |
407 | { | 407 | { |
408 | struct pnp_dev *pnp = to_pnp_dev(dev); | 408 | struct pnp_dev *pnp = to_pnp_dev(dev); |
409 | struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); | 409 | struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); |
410 | 410 | ||
411 | gmux_enable_interrupts(gmux_data); | 411 | gmux_enable_interrupts(gmux_data); |
412 | gmux_switchto(gmux_data->resume_client_id); | 412 | gmux_switchto(gmux_data->resume_client_id); |
413 | if (gmux_data->power_state == VGA_SWITCHEROO_OFF) | 413 | if (gmux_data->power_state == VGA_SWITCHEROO_OFF) |
414 | gmux_set_discrete_state(gmux_data, gmux_data->power_state); | 414 | gmux_set_discrete_state(gmux_data, gmux_data->power_state); |
415 | return 0; | 415 | return 0; |
416 | } | 416 | } |
417 | 417 | ||
418 | static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) | 418 | static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) |
419 | { | 419 | { |
420 | struct apple_gmux_data *gmux_data; | 420 | struct apple_gmux_data *gmux_data; |
421 | struct resource *res; | 421 | struct resource *res; |
422 | struct backlight_properties props; | 422 | struct backlight_properties props; |
423 | struct backlight_device *bdev; | 423 | struct backlight_device *bdev; |
424 | u8 ver_major, ver_minor, ver_release; | 424 | u8 ver_major, ver_minor, ver_release; |
425 | int ret = -ENXIO; | 425 | int ret = -ENXIO; |
426 | acpi_status status; | 426 | acpi_status status; |
427 | unsigned long long gpe; | 427 | unsigned long long gpe; |
428 | 428 | ||
429 | if (apple_gmux_data) | 429 | if (apple_gmux_data) |
430 | return -EBUSY; | 430 | return -EBUSY; |
431 | 431 | ||
432 | gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL); | 432 | gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL); |
433 | if (!gmux_data) | 433 | if (!gmux_data) |
434 | return -ENOMEM; | 434 | return -ENOMEM; |
435 | pnp_set_drvdata(pnp, gmux_data); | 435 | pnp_set_drvdata(pnp, gmux_data); |
436 | 436 | ||
437 | res = pnp_get_resource(pnp, IORESOURCE_IO, 0); | 437 | res = pnp_get_resource(pnp, IORESOURCE_IO, 0); |
438 | if (!res) { | 438 | if (!res) { |
439 | pr_err("Failed to find gmux I/O resource\n"); | 439 | pr_err("Failed to find gmux I/O resource\n"); |
440 | goto err_free; | 440 | goto err_free; |
441 | } | 441 | } |
442 | 442 | ||
443 | gmux_data->iostart = res->start; | 443 | gmux_data->iostart = res->start; |
444 | gmux_data->iolen = res->end - res->start; | 444 | gmux_data->iolen = res->end - res->start; |
445 | 445 | ||
446 | if (gmux_data->iolen < GMUX_MIN_IO_LEN) { | 446 | if (gmux_data->iolen < GMUX_MIN_IO_LEN) { |
447 | pr_err("gmux I/O region too small (%lu < %u)\n", | 447 | pr_err("gmux I/O region too small (%lu < %u)\n", |
448 | gmux_data->iolen, GMUX_MIN_IO_LEN); | 448 | gmux_data->iolen, GMUX_MIN_IO_LEN); |
449 | goto err_free; | 449 | goto err_free; |
450 | } | 450 | } |
451 | 451 | ||
452 | if (!request_region(gmux_data->iostart, gmux_data->iolen, | 452 | if (!request_region(gmux_data->iostart, gmux_data->iolen, |
453 | "Apple gmux")) { | 453 | "Apple gmux")) { |
454 | pr_err("gmux I/O already in use\n"); | 454 | pr_err("gmux I/O already in use\n"); |
455 | goto err_free; | 455 | goto err_free; |
456 | } | 456 | } |
457 | 457 | ||
458 | /* | 458 | /* |
459 | * Invalid version information may indicate either that the gmux | 459 | * Invalid version information may indicate either that the gmux |
460 | * device isn't present or that it's a new one that uses indexed | 460 | * device isn't present or that it's a new one that uses indexed |
461 | * io | 461 | * io |
462 | */ | 462 | */ |
463 | 463 | ||
464 | ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR); | 464 | ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR); |
465 | ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR); | 465 | ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR); |
466 | ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE); | 466 | ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE); |
467 | if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) { | 467 | if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) { |
468 | if (gmux_is_indexed(gmux_data)) { | 468 | if (gmux_is_indexed(gmux_data)) { |
469 | u32 version; | 469 | u32 version; |
470 | mutex_init(&gmux_data->index_lock); | 470 | mutex_init(&gmux_data->index_lock); |
471 | gmux_data->indexed = true; | 471 | gmux_data->indexed = true; |
472 | version = gmux_read32(gmux_data, | 472 | version = gmux_read32(gmux_data, |
473 | GMUX_PORT_VERSION_MAJOR); | 473 | GMUX_PORT_VERSION_MAJOR); |
474 | ver_major = (version >> 24) & 0xff; | 474 | ver_major = (version >> 24) & 0xff; |
475 | ver_minor = (version >> 16) & 0xff; | 475 | ver_minor = (version >> 16) & 0xff; |
476 | ver_release = (version >> 8) & 0xff; | 476 | ver_release = (version >> 8) & 0xff; |
477 | } else { | 477 | } else { |
478 | pr_info("gmux device not present\n"); | 478 | pr_info("gmux device not present\n"); |
479 | ret = -ENODEV; | 479 | ret = -ENODEV; |
480 | goto err_release; | 480 | goto err_release; |
481 | } | 481 | } |
482 | } | 482 | } |
483 | pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor, | 483 | pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor, |
484 | ver_release, (gmux_data->indexed ? "indexed" : "classic")); | 484 | ver_release, (gmux_data->indexed ? "indexed" : "classic")); |
485 | 485 | ||
486 | memset(&props, 0, sizeof(props)); | 486 | memset(&props, 0, sizeof(props)); |
487 | props.type = BACKLIGHT_PLATFORM; | 487 | props.type = BACKLIGHT_PLATFORM; |
488 | props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS); | 488 | props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS); |
489 | 489 | ||
490 | /* | 490 | /* |
491 | * Currently it's assumed that the maximum brightness is less than | 491 | * Currently it's assumed that the maximum brightness is less than |
492 | * 2^24 for compatibility with old gmux versions. Cap the max | 492 | * 2^24 for compatibility with old gmux versions. Cap the max |
493 | * brightness at this value, but print a warning if the hardware | 493 | * brightness at this value, but print a warning if the hardware |
494 | * reports something higher so that it can be fixed. | 494 | * reports something higher so that it can be fixed. |
495 | */ | 495 | */ |
496 | if (WARN_ON(props.max_brightness > GMUX_MAX_BRIGHTNESS)) | 496 | if (WARN_ON(props.max_brightness > GMUX_MAX_BRIGHTNESS)) |
497 | props.max_brightness = GMUX_MAX_BRIGHTNESS; | 497 | props.max_brightness = GMUX_MAX_BRIGHTNESS; |
498 | 498 | ||
499 | bdev = backlight_device_register("gmux_backlight", &pnp->dev, | 499 | bdev = backlight_device_register("gmux_backlight", &pnp->dev, |
500 | gmux_data, &gmux_bl_ops, &props); | 500 | gmux_data, &gmux_bl_ops, &props); |
501 | if (IS_ERR(bdev)) { | 501 | if (IS_ERR(bdev)) { |
502 | ret = PTR_ERR(bdev); | 502 | ret = PTR_ERR(bdev); |
503 | goto err_release; | 503 | goto err_release; |
504 | } | 504 | } |
505 | 505 | ||
506 | gmux_data->bdev = bdev; | 506 | gmux_data->bdev = bdev; |
507 | bdev->props.brightness = gmux_get_brightness(bdev); | 507 | bdev->props.brightness = gmux_get_brightness(bdev); |
508 | backlight_update_status(bdev); | 508 | backlight_update_status(bdev); |
509 | 509 | ||
510 | /* | 510 | /* |
511 | * The backlight situation on Macs is complicated. If the gmux is | 511 | * The backlight situation on Macs is complicated. If the gmux is |
512 | * present it's the best choice, because it always works for | 512 | * present it's the best choice, because it always works for |
513 | * backlight control and supports more levels than other options. | 513 | * backlight control and supports more levels than other options. |
514 | * Disable the other backlight choices. | 514 | * Disable the other backlight choices. |
515 | */ | 515 | */ |
516 | acpi_video_dmi_promote_vendor(); | 516 | acpi_video_dmi_promote_vendor(); |
517 | acpi_video_unregister(); | 517 | acpi_video_unregister(); |
518 | apple_bl_unregister(); | 518 | apple_bl_unregister(); |
519 | 519 | ||
520 | gmux_data->power_state = VGA_SWITCHEROO_ON; | 520 | gmux_data->power_state = VGA_SWITCHEROO_ON; |
521 | 521 | ||
522 | gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev); | 522 | gmux_data->dhandle = ACPI_HANDLE(&pnp->dev); |
523 | if (!gmux_data->dhandle) { | 523 | if (!gmux_data->dhandle) { |
524 | pr_err("Cannot find acpi handle for pnp device %s\n", | 524 | pr_err("Cannot find acpi handle for pnp device %s\n", |
525 | dev_name(&pnp->dev)); | 525 | dev_name(&pnp->dev)); |
526 | ret = -ENODEV; | 526 | ret = -ENODEV; |
527 | goto err_notify; | 527 | goto err_notify; |
528 | } | 528 | } |
529 | 529 | ||
530 | status = acpi_evaluate_integer(gmux_data->dhandle, "GMGP", NULL, &gpe); | 530 | status = acpi_evaluate_integer(gmux_data->dhandle, "GMGP", NULL, &gpe); |
531 | if (ACPI_SUCCESS(status)) { | 531 | if (ACPI_SUCCESS(status)) { |
532 | gmux_data->gpe = (int)gpe; | 532 | gmux_data->gpe = (int)gpe; |
533 | 533 | ||
534 | status = acpi_install_notify_handler(gmux_data->dhandle, | 534 | status = acpi_install_notify_handler(gmux_data->dhandle, |
535 | ACPI_DEVICE_NOTIFY, | 535 | ACPI_DEVICE_NOTIFY, |
536 | &gmux_notify_handler, pnp); | 536 | &gmux_notify_handler, pnp); |
537 | if (ACPI_FAILURE(status)) { | 537 | if (ACPI_FAILURE(status)) { |
538 | pr_err("Install notify handler failed: %s\n", | 538 | pr_err("Install notify handler failed: %s\n", |
539 | acpi_format_exception(status)); | 539 | acpi_format_exception(status)); |
540 | ret = -ENODEV; | 540 | ret = -ENODEV; |
541 | goto err_notify; | 541 | goto err_notify; |
542 | } | 542 | } |
543 | 543 | ||
544 | status = acpi_enable_gpe(NULL, gmux_data->gpe); | 544 | status = acpi_enable_gpe(NULL, gmux_data->gpe); |
545 | if (ACPI_FAILURE(status)) { | 545 | if (ACPI_FAILURE(status)) { |
546 | pr_err("Cannot enable gpe: %s\n", | 546 | pr_err("Cannot enable gpe: %s\n", |
547 | acpi_format_exception(status)); | 547 | acpi_format_exception(status)); |
548 | goto err_enable_gpe; | 548 | goto err_enable_gpe; |
549 | } | 549 | } |
550 | } else { | 550 | } else { |
551 | pr_warn("No GPE found for gmux\n"); | 551 | pr_warn("No GPE found for gmux\n"); |
552 | gmux_data->gpe = -1; | 552 | gmux_data->gpe = -1; |
553 | } | 553 | } |
554 | 554 | ||
555 | if (vga_switcheroo_register_handler(&gmux_handler)) { | 555 | if (vga_switcheroo_register_handler(&gmux_handler)) { |
556 | ret = -ENODEV; | 556 | ret = -ENODEV; |
557 | goto err_register_handler; | 557 | goto err_register_handler; |
558 | } | 558 | } |
559 | 559 | ||
560 | init_completion(&gmux_data->powerchange_done); | 560 | init_completion(&gmux_data->powerchange_done); |
561 | apple_gmux_data = gmux_data; | 561 | apple_gmux_data = gmux_data; |
562 | gmux_enable_interrupts(gmux_data); | 562 | gmux_enable_interrupts(gmux_data); |
563 | 563 | ||
564 | return 0; | 564 | return 0; |
565 | 565 | ||
566 | err_register_handler: | 566 | err_register_handler: |
567 | if (gmux_data->gpe >= 0) | 567 | if (gmux_data->gpe >= 0) |
568 | acpi_disable_gpe(NULL, gmux_data->gpe); | 568 | acpi_disable_gpe(NULL, gmux_data->gpe); |
569 | err_enable_gpe: | 569 | err_enable_gpe: |
570 | if (gmux_data->gpe >= 0) | 570 | if (gmux_data->gpe >= 0) |
571 | acpi_remove_notify_handler(gmux_data->dhandle, | 571 | acpi_remove_notify_handler(gmux_data->dhandle, |
572 | ACPI_DEVICE_NOTIFY, | 572 | ACPI_DEVICE_NOTIFY, |
573 | &gmux_notify_handler); | 573 | &gmux_notify_handler); |
574 | err_notify: | 574 | err_notify: |
575 | backlight_device_unregister(bdev); | 575 | backlight_device_unregister(bdev); |
576 | err_release: | 576 | err_release: |
577 | release_region(gmux_data->iostart, gmux_data->iolen); | 577 | release_region(gmux_data->iostart, gmux_data->iolen); |
578 | err_free: | 578 | err_free: |
579 | kfree(gmux_data); | 579 | kfree(gmux_data); |
580 | return ret; | 580 | return ret; |
581 | } | 581 | } |
582 | 582 | ||
583 | static void gmux_remove(struct pnp_dev *pnp) | 583 | static void gmux_remove(struct pnp_dev *pnp) |
584 | { | 584 | { |
585 | struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); | 585 | struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); |
586 | 586 | ||
587 | vga_switcheroo_unregister_handler(); | 587 | vga_switcheroo_unregister_handler(); |
588 | gmux_disable_interrupts(gmux_data); | 588 | gmux_disable_interrupts(gmux_data); |
589 | if (gmux_data->gpe >= 0) { | 589 | if (gmux_data->gpe >= 0) { |
590 | acpi_disable_gpe(NULL, gmux_data->gpe); | 590 | acpi_disable_gpe(NULL, gmux_data->gpe); |
591 | acpi_remove_notify_handler(gmux_data->dhandle, | 591 | acpi_remove_notify_handler(gmux_data->dhandle, |
592 | ACPI_DEVICE_NOTIFY, | 592 | ACPI_DEVICE_NOTIFY, |
593 | &gmux_notify_handler); | 593 | &gmux_notify_handler); |
594 | } | 594 | } |
595 | 595 | ||
596 | backlight_device_unregister(gmux_data->bdev); | 596 | backlight_device_unregister(gmux_data->bdev); |
597 | 597 | ||
598 | release_region(gmux_data->iostart, gmux_data->iolen); | 598 | release_region(gmux_data->iostart, gmux_data->iolen); |
599 | apple_gmux_data = NULL; | 599 | apple_gmux_data = NULL; |
600 | kfree(gmux_data); | 600 | kfree(gmux_data); |
601 | 601 | ||
602 | acpi_video_dmi_demote_vendor(); | 602 | acpi_video_dmi_demote_vendor(); |
603 | acpi_video_register(); | 603 | acpi_video_register(); |
604 | apple_bl_register(); | 604 | apple_bl_register(); |
605 | } | 605 | } |
606 | 606 | ||
607 | static const struct pnp_device_id gmux_device_ids[] = { | 607 | static const struct pnp_device_id gmux_device_ids[] = { |
608 | {"APP000B", 0}, | 608 | {"APP000B", 0}, |
609 | {"", 0} | 609 | {"", 0} |
610 | }; | 610 | }; |
611 | 611 | ||
612 | static const struct dev_pm_ops gmux_dev_pm_ops = { | 612 | static const struct dev_pm_ops gmux_dev_pm_ops = { |
613 | .suspend = gmux_suspend, | 613 | .suspend = gmux_suspend, |
614 | .resume = gmux_resume, | 614 | .resume = gmux_resume, |
615 | }; | 615 | }; |
616 | 616 | ||
617 | static struct pnp_driver gmux_pnp_driver = { | 617 | static struct pnp_driver gmux_pnp_driver = { |
618 | .name = "apple-gmux", | 618 | .name = "apple-gmux", |
619 | .probe = gmux_probe, | 619 | .probe = gmux_probe, |
620 | .remove = gmux_remove, | 620 | .remove = gmux_remove, |
621 | .id_table = gmux_device_ids, | 621 | .id_table = gmux_device_ids, |
622 | .driver = { | 622 | .driver = { |
623 | .pm = &gmux_dev_pm_ops, | 623 | .pm = &gmux_dev_pm_ops, |
624 | }, | 624 | }, |
625 | }; | 625 | }; |
626 | 626 | ||
627 | static int __init apple_gmux_init(void) | 627 | static int __init apple_gmux_init(void) |
628 | { | 628 | { |
629 | return pnp_register_driver(&gmux_pnp_driver); | 629 | return pnp_register_driver(&gmux_pnp_driver); |
630 | } | 630 | } |
631 | 631 | ||
632 | static void __exit apple_gmux_exit(void) | 632 | static void __exit apple_gmux_exit(void) |
633 | { | 633 | { |
634 | pnp_unregister_driver(&gmux_pnp_driver); | 634 | pnp_unregister_driver(&gmux_pnp_driver); |
635 | } | 635 | } |
636 | 636 | ||
637 | module_init(apple_gmux_init); | 637 | module_init(apple_gmux_init); |
638 | module_exit(apple_gmux_exit); | 638 | module_exit(apple_gmux_exit); |
639 | 639 | ||
640 | MODULE_AUTHOR("Seth Forshee <seth.forshee@canonical.com>"); | 640 | MODULE_AUTHOR("Seth Forshee <seth.forshee@canonical.com>"); |
641 | MODULE_DESCRIPTION("Apple Gmux Driver"); | 641 | MODULE_DESCRIPTION("Apple Gmux Driver"); |
642 | MODULE_LICENSE("GPL"); | 642 | MODULE_LICENSE("GPL"); |
643 | MODULE_DEVICE_TABLE(pnp, gmux_device_ids); | 643 | MODULE_DEVICE_TABLE(pnp, gmux_device_ids); |
644 | 644 |
drivers/pnp/pnpacpi/core.c
1 | /* | 1 | /* |
2 | * pnpacpi -- PnP ACPI driver | 2 | * pnpacpi -- PnP ACPI driver |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Matthieu Castet <castet.matthieu@free.fr> | 4 | * Copyright (c) 2004 Matthieu Castet <castet.matthieu@free.fr> |
5 | * Copyright (c) 2004 Li Shaohua <shaohua.li@intel.com> | 5 | * Copyright (c) 2004 Li Shaohua <shaohua.li@intel.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of the GNU General Public License as published by the | 8 | * under the terms of the GNU General Public License as published by the |
9 | * Free Software Foundation; either version 2, or (at your option) any | 9 | * Free Software Foundation; either version 2, or (at your option) any |
10 | * later version. | 10 | * later version. |
11 | * | 11 | * |
12 | * This program is distributed in the hope that it will be useful, but | 12 | * This program is distributed in the hope that it will be useful, but |
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | * | 16 | * |
17 | * You should have received a copy of the GNU General Public License | 17 | * You should have received a copy of the GNU General Public License |
18 | * along with this program; if not, write to the Free Software | 18 | * along with this program; if not, write to the Free Software |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/export.h> | 22 | #include <linux/export.h> |
23 | #include <linux/acpi.h> | 23 | #include <linux/acpi.h> |
24 | #include <linux/pnp.h> | 24 | #include <linux/pnp.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/mod_devicetable.h> | 26 | #include <linux/mod_devicetable.h> |
27 | #include <acpi/acpi_bus.h> | 27 | #include <acpi/acpi_bus.h> |
28 | 28 | ||
29 | #include "../base.h" | 29 | #include "../base.h" |
30 | #include "pnpacpi.h" | 30 | #include "pnpacpi.h" |
31 | 31 | ||
32 | static int num; | 32 | static int num; |
33 | 33 | ||
34 | /* We need only to blacklist devices that have already an acpi driver that | 34 | /* We need only to blacklist devices that have already an acpi driver that |
35 | * can't use pnp layer. We don't need to blacklist device that are directly | 35 | * can't use pnp layer. We don't need to blacklist device that are directly |
36 | * used by the kernel (PCI root, ...), as it is harmless and there were | 36 | * used by the kernel (PCI root, ...), as it is harmless and there were |
37 | * already present in pnpbios. But there is an exception for devices that | 37 | * already present in pnpbios. But there is an exception for devices that |
38 | * have irqs (PIC, Timer) because we call acpi_register_gsi. | 38 | * have irqs (PIC, Timer) because we call acpi_register_gsi. |
39 | * Finally, only devices that have a CRS method need to be in this list. | 39 | * Finally, only devices that have a CRS method need to be in this list. |
40 | */ | 40 | */ |
41 | static struct acpi_device_id excluded_id_list[] __initdata = { | 41 | static struct acpi_device_id excluded_id_list[] __initdata = { |
42 | {"PNP0C09", 0}, /* EC */ | 42 | {"PNP0C09", 0}, /* EC */ |
43 | {"PNP0C0F", 0}, /* Link device */ | 43 | {"PNP0C0F", 0}, /* Link device */ |
44 | {"PNP0000", 0}, /* PIC */ | 44 | {"PNP0000", 0}, /* PIC */ |
45 | {"PNP0100", 0}, /* Timer */ | 45 | {"PNP0100", 0}, /* Timer */ |
46 | {"", 0}, | 46 | {"", 0}, |
47 | }; | 47 | }; |
48 | 48 | ||
49 | static inline int __init is_exclusive_device(struct acpi_device *dev) | 49 | static inline int __init is_exclusive_device(struct acpi_device *dev) |
50 | { | 50 | { |
51 | return (!acpi_match_device_ids(dev, excluded_id_list)); | 51 | return (!acpi_match_device_ids(dev, excluded_id_list)); |
52 | } | 52 | } |
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Compatible Device IDs | 55 | * Compatible Device IDs |
56 | */ | 56 | */ |
57 | #define TEST_HEX(c) \ | 57 | #define TEST_HEX(c) \ |
58 | if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \ | 58 | if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \ |
59 | return 0 | 59 | return 0 |
60 | #define TEST_ALPHA(c) \ | 60 | #define TEST_ALPHA(c) \ |
61 | if (!('A' <= (c) && (c) <= 'Z')) \ | 61 | if (!('A' <= (c) && (c) <= 'Z')) \ |
62 | return 0 | 62 | return 0 |
63 | static int __init ispnpidacpi(const char *id) | 63 | static int __init ispnpidacpi(const char *id) |
64 | { | 64 | { |
65 | TEST_ALPHA(id[0]); | 65 | TEST_ALPHA(id[0]); |
66 | TEST_ALPHA(id[1]); | 66 | TEST_ALPHA(id[1]); |
67 | TEST_ALPHA(id[2]); | 67 | TEST_ALPHA(id[2]); |
68 | TEST_HEX(id[3]); | 68 | TEST_HEX(id[3]); |
69 | TEST_HEX(id[4]); | 69 | TEST_HEX(id[4]); |
70 | TEST_HEX(id[5]); | 70 | TEST_HEX(id[5]); |
71 | TEST_HEX(id[6]); | 71 | TEST_HEX(id[6]); |
72 | if (id[7] != '\0') | 72 | if (id[7] != '\0') |
73 | return 0; | 73 | return 0; |
74 | return 1; | 74 | return 1; |
75 | } | 75 | } |
76 | 76 | ||
77 | static int pnpacpi_get_resources(struct pnp_dev *dev) | 77 | static int pnpacpi_get_resources(struct pnp_dev *dev) |
78 | { | 78 | { |
79 | pnp_dbg(&dev->dev, "get resources\n"); | 79 | pnp_dbg(&dev->dev, "get resources\n"); |
80 | return pnpacpi_parse_allocated_resource(dev); | 80 | return pnpacpi_parse_allocated_resource(dev); |
81 | } | 81 | } |
82 | 82 | ||
83 | static int pnpacpi_set_resources(struct pnp_dev *dev) | 83 | static int pnpacpi_set_resources(struct pnp_dev *dev) |
84 | { | 84 | { |
85 | struct acpi_device *acpi_dev; | 85 | struct acpi_device *acpi_dev; |
86 | acpi_handle handle; | 86 | acpi_handle handle; |
87 | struct acpi_buffer buffer; | 87 | struct acpi_buffer buffer; |
88 | int ret; | 88 | int ret; |
89 | 89 | ||
90 | pnp_dbg(&dev->dev, "set resources\n"); | 90 | pnp_dbg(&dev->dev, "set resources\n"); |
91 | 91 | ||
92 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | 92 | handle = ACPI_HANDLE(&dev->dev); |
93 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { | 93 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { |
94 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | 94 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); |
95 | return -ENODEV; | 95 | return -ENODEV; |
96 | } | 96 | } |
97 | 97 | ||
98 | if (WARN_ON_ONCE(acpi_dev != dev->data)) | 98 | if (WARN_ON_ONCE(acpi_dev != dev->data)) |
99 | dev->data = acpi_dev; | 99 | dev->data = acpi_dev; |
100 | 100 | ||
101 | ret = pnpacpi_build_resource_template(dev, &buffer); | 101 | ret = pnpacpi_build_resource_template(dev, &buffer); |
102 | if (ret) | 102 | if (ret) |
103 | return ret; | 103 | return ret; |
104 | ret = pnpacpi_encode_resources(dev, &buffer); | 104 | ret = pnpacpi_encode_resources(dev, &buffer); |
105 | if (ret) { | 105 | if (ret) { |
106 | kfree(buffer.pointer); | 106 | kfree(buffer.pointer); |
107 | return ret; | 107 | return ret; |
108 | } | 108 | } |
109 | if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer))) | 109 | if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer))) |
110 | ret = -EINVAL; | 110 | ret = -EINVAL; |
111 | else if (acpi_bus_power_manageable(handle)) | 111 | else if (acpi_bus_power_manageable(handle)) |
112 | ret = acpi_bus_set_power(handle, ACPI_STATE_D0); | 112 | ret = acpi_bus_set_power(handle, ACPI_STATE_D0); |
113 | kfree(buffer.pointer); | 113 | kfree(buffer.pointer); |
114 | return ret; | 114 | return ret; |
115 | } | 115 | } |
116 | 116 | ||
117 | static int pnpacpi_disable_resources(struct pnp_dev *dev) | 117 | static int pnpacpi_disable_resources(struct pnp_dev *dev) |
118 | { | 118 | { |
119 | struct acpi_device *acpi_dev; | 119 | struct acpi_device *acpi_dev; |
120 | acpi_handle handle; | 120 | acpi_handle handle; |
121 | int ret; | 121 | int ret; |
122 | 122 | ||
123 | dev_dbg(&dev->dev, "disable resources\n"); | 123 | dev_dbg(&dev->dev, "disable resources\n"); |
124 | 124 | ||
125 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | 125 | handle = ACPI_HANDLE(&dev->dev); |
126 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { | 126 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { |
127 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | 127 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); |
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
131 | /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ | 131 | /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ |
132 | ret = 0; | 132 | ret = 0; |
133 | if (acpi_bus_power_manageable(handle)) | 133 | if (acpi_bus_power_manageable(handle)) |
134 | acpi_bus_set_power(handle, ACPI_STATE_D3_COLD); | 134 | acpi_bus_set_power(handle, ACPI_STATE_D3_COLD); |
135 | /* continue even if acpi_bus_set_power() fails */ | 135 | /* continue even if acpi_bus_set_power() fails */ |
136 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL))) | 136 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL))) |
137 | ret = -ENODEV; | 137 | ret = -ENODEV; |
138 | return ret; | 138 | return ret; |
139 | } | 139 | } |
140 | 140 | ||
141 | #ifdef CONFIG_ACPI_SLEEP | 141 | #ifdef CONFIG_ACPI_SLEEP |
142 | static bool pnpacpi_can_wakeup(struct pnp_dev *dev) | 142 | static bool pnpacpi_can_wakeup(struct pnp_dev *dev) |
143 | { | 143 | { |
144 | struct acpi_device *acpi_dev; | 144 | struct acpi_device *acpi_dev; |
145 | acpi_handle handle; | 145 | acpi_handle handle; |
146 | 146 | ||
147 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | 147 | handle = ACPI_HANDLE(&dev->dev); |
148 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { | 148 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { |
149 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | 149 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); |
150 | return false; | 150 | return false; |
151 | } | 151 | } |
152 | 152 | ||
153 | return acpi_bus_can_wakeup(handle); | 153 | return acpi_bus_can_wakeup(handle); |
154 | } | 154 | } |
155 | 155 | ||
156 | static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) | 156 | static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) |
157 | { | 157 | { |
158 | struct acpi_device *acpi_dev; | 158 | struct acpi_device *acpi_dev; |
159 | acpi_handle handle; | 159 | acpi_handle handle; |
160 | int error = 0; | 160 | int error = 0; |
161 | 161 | ||
162 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | 162 | handle = ACPI_HANDLE(&dev->dev); |
163 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { | 163 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { |
164 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | 164 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); |
165 | return 0; | 165 | return 0; |
166 | } | 166 | } |
167 | 167 | ||
168 | if (device_can_wakeup(&dev->dev)) { | 168 | if (device_can_wakeup(&dev->dev)) { |
169 | error = acpi_pm_device_sleep_wake(&dev->dev, | 169 | error = acpi_pm_device_sleep_wake(&dev->dev, |
170 | device_may_wakeup(&dev->dev)); | 170 | device_may_wakeup(&dev->dev)); |
171 | if (error) | 171 | if (error) |
172 | return error; | 172 | return error; |
173 | } | 173 | } |
174 | 174 | ||
175 | if (acpi_bus_power_manageable(handle)) { | 175 | if (acpi_bus_power_manageable(handle)) { |
176 | int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL, | 176 | int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL, |
177 | ACPI_STATE_D3_COLD); | 177 | ACPI_STATE_D3_COLD); |
178 | if (power_state < 0) | 178 | if (power_state < 0) |
179 | power_state = (state.event == PM_EVENT_ON) ? | 179 | power_state = (state.event == PM_EVENT_ON) ? |
180 | ACPI_STATE_D0 : ACPI_STATE_D3_COLD; | 180 | ACPI_STATE_D0 : ACPI_STATE_D3_COLD; |
181 | 181 | ||
182 | /* | 182 | /* |
183 | * acpi_bus_set_power() often fails (keyboard port can't be | 183 | * acpi_bus_set_power() often fails (keyboard port can't be |
184 | * powered-down?), and in any case, our return value is ignored | 184 | * powered-down?), and in any case, our return value is ignored |
185 | * by pnp_bus_suspend(). Hence we don't revert the wakeup | 185 | * by pnp_bus_suspend(). Hence we don't revert the wakeup |
186 | * setting if the set_power fails. | 186 | * setting if the set_power fails. |
187 | */ | 187 | */ |
188 | error = acpi_bus_set_power(handle, power_state); | 188 | error = acpi_bus_set_power(handle, power_state); |
189 | } | 189 | } |
190 | 190 | ||
191 | return error; | 191 | return error; |
192 | } | 192 | } |
193 | 193 | ||
194 | static int pnpacpi_resume(struct pnp_dev *dev) | 194 | static int pnpacpi_resume(struct pnp_dev *dev) |
195 | { | 195 | { |
196 | struct acpi_device *acpi_dev; | 196 | struct acpi_device *acpi_dev; |
197 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); | 197 | acpi_handle handle = ACPI_HANDLE(&dev->dev); |
198 | int error = 0; | 198 | int error = 0; |
199 | 199 | ||
200 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { | 200 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { |
201 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | 201 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); |
202 | return -ENODEV; | 202 | return -ENODEV; |
203 | } | 203 | } |
204 | 204 | ||
205 | if (device_may_wakeup(&dev->dev)) | 205 | if (device_may_wakeup(&dev->dev)) |
206 | acpi_pm_device_sleep_wake(&dev->dev, false); | 206 | acpi_pm_device_sleep_wake(&dev->dev, false); |
207 | 207 | ||
208 | if (acpi_bus_power_manageable(handle)) | 208 | if (acpi_bus_power_manageable(handle)) |
209 | error = acpi_bus_set_power(handle, ACPI_STATE_D0); | 209 | error = acpi_bus_set_power(handle, ACPI_STATE_D0); |
210 | 210 | ||
211 | return error; | 211 | return error; |
212 | } | 212 | } |
213 | #endif | 213 | #endif |
214 | 214 | ||
215 | struct pnp_protocol pnpacpi_protocol = { | 215 | struct pnp_protocol pnpacpi_protocol = { |
216 | .name = "Plug and Play ACPI", | 216 | .name = "Plug and Play ACPI", |
217 | .get = pnpacpi_get_resources, | 217 | .get = pnpacpi_get_resources, |
218 | .set = pnpacpi_set_resources, | 218 | .set = pnpacpi_set_resources, |
219 | .disable = pnpacpi_disable_resources, | 219 | .disable = pnpacpi_disable_resources, |
220 | #ifdef CONFIG_ACPI_SLEEP | 220 | #ifdef CONFIG_ACPI_SLEEP |
221 | .can_wakeup = pnpacpi_can_wakeup, | 221 | .can_wakeup = pnpacpi_can_wakeup, |
222 | .suspend = pnpacpi_suspend, | 222 | .suspend = pnpacpi_suspend, |
223 | .resume = pnpacpi_resume, | 223 | .resume = pnpacpi_resume, |
224 | #endif | 224 | #endif |
225 | }; | 225 | }; |
226 | EXPORT_SYMBOL(pnpacpi_protocol); | 226 | EXPORT_SYMBOL(pnpacpi_protocol); |
227 | 227 | ||
228 | static char *__init pnpacpi_get_id(struct acpi_device *device) | 228 | static char *__init pnpacpi_get_id(struct acpi_device *device) |
229 | { | 229 | { |
230 | struct acpi_hardware_id *id; | 230 | struct acpi_hardware_id *id; |
231 | 231 | ||
232 | list_for_each_entry(id, &device->pnp.ids, list) { | 232 | list_for_each_entry(id, &device->pnp.ids, list) { |
233 | if (ispnpidacpi(id->id)) | 233 | if (ispnpidacpi(id->id)) |
234 | return id->id; | 234 | return id->id; |
235 | } | 235 | } |
236 | 236 | ||
237 | return NULL; | 237 | return NULL; |
238 | } | 238 | } |
239 | 239 | ||
240 | static int __init pnpacpi_add_device(struct acpi_device *device) | 240 | static int __init pnpacpi_add_device(struct acpi_device *device) |
241 | { | 241 | { |
242 | struct pnp_dev *dev; | 242 | struct pnp_dev *dev; |
243 | char *pnpid; | 243 | char *pnpid; |
244 | struct acpi_hardware_id *id; | 244 | struct acpi_hardware_id *id; |
245 | 245 | ||
246 | /* Skip devices that are already bound */ | 246 | /* Skip devices that are already bound */ |
247 | if (device->physical_node_count) | 247 | if (device->physical_node_count) |
248 | return 0; | 248 | return 0; |
249 | 249 | ||
250 | /* | 250 | /* |
251 | * If a PnPacpi device is not present , the device | 251 | * If a PnPacpi device is not present , the device |
252 | * driver should not be loaded. | 252 | * driver should not be loaded. |
253 | */ | 253 | */ |
254 | if (!acpi_has_method(device->handle, "_CRS")) | 254 | if (!acpi_has_method(device->handle, "_CRS")) |
255 | return 0; | 255 | return 0; |
256 | 256 | ||
257 | pnpid = pnpacpi_get_id(device); | 257 | pnpid = pnpacpi_get_id(device); |
258 | if (!pnpid) | 258 | if (!pnpid) |
259 | return 0; | 259 | return 0; |
260 | 260 | ||
261 | if (is_exclusive_device(device) || !device->status.present) | 261 | if (is_exclusive_device(device) || !device->status.present) |
262 | return 0; | 262 | return 0; |
263 | 263 | ||
264 | dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid); | 264 | dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid); |
265 | if (!dev) | 265 | if (!dev) |
266 | return -ENOMEM; | 266 | return -ENOMEM; |
267 | 267 | ||
268 | dev->data = device; | 268 | dev->data = device; |
269 | /* .enabled means the device can decode the resources */ | 269 | /* .enabled means the device can decode the resources */ |
270 | dev->active = device->status.enabled; | 270 | dev->active = device->status.enabled; |
271 | if (acpi_has_method(device->handle, "_SRS")) | 271 | if (acpi_has_method(device->handle, "_SRS")) |
272 | dev->capabilities |= PNP_CONFIGURABLE; | 272 | dev->capabilities |= PNP_CONFIGURABLE; |
273 | dev->capabilities |= PNP_READ; | 273 | dev->capabilities |= PNP_READ; |
274 | if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE)) | 274 | if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE)) |
275 | dev->capabilities |= PNP_WRITE; | 275 | dev->capabilities |= PNP_WRITE; |
276 | if (device->flags.removable) | 276 | if (device->flags.removable) |
277 | dev->capabilities |= PNP_REMOVABLE; | 277 | dev->capabilities |= PNP_REMOVABLE; |
278 | if (acpi_has_method(device->handle, "_DIS")) | 278 | if (acpi_has_method(device->handle, "_DIS")) |
279 | dev->capabilities |= PNP_DISABLE; | 279 | dev->capabilities |= PNP_DISABLE; |
280 | 280 | ||
281 | if (strlen(acpi_device_name(device))) | 281 | if (strlen(acpi_device_name(device))) |
282 | strncpy(dev->name, acpi_device_name(device), sizeof(dev->name)); | 282 | strncpy(dev->name, acpi_device_name(device), sizeof(dev->name)); |
283 | else | 283 | else |
284 | strncpy(dev->name, acpi_device_bid(device), sizeof(dev->name)); | 284 | strncpy(dev->name, acpi_device_bid(device), sizeof(dev->name)); |
285 | 285 | ||
286 | if (dev->active) | 286 | if (dev->active) |
287 | pnpacpi_parse_allocated_resource(dev); | 287 | pnpacpi_parse_allocated_resource(dev); |
288 | 288 | ||
289 | if (dev->capabilities & PNP_CONFIGURABLE) | 289 | if (dev->capabilities & PNP_CONFIGURABLE) |
290 | pnpacpi_parse_resource_option_data(dev); | 290 | pnpacpi_parse_resource_option_data(dev); |
291 | 291 | ||
292 | list_for_each_entry(id, &device->pnp.ids, list) { | 292 | list_for_each_entry(id, &device->pnp.ids, list) { |
293 | if (!strcmp(id->id, pnpid)) | 293 | if (!strcmp(id->id, pnpid)) |
294 | continue; | 294 | continue; |
295 | if (!ispnpidacpi(id->id)) | 295 | if (!ispnpidacpi(id->id)) |
296 | continue; | 296 | continue; |
297 | pnp_add_id(dev, id->id); | 297 | pnp_add_id(dev, id->id); |
298 | } | 298 | } |
299 | 299 | ||
300 | /* clear out the damaged flags */ | 300 | /* clear out the damaged flags */ |
301 | if (!dev->active) | 301 | if (!dev->active) |
302 | pnp_init_resources(dev); | 302 | pnp_init_resources(dev); |
303 | pnp_add_device(dev); | 303 | pnp_add_device(dev); |
304 | num++; | 304 | num++; |
305 | 305 | ||
306 | return AE_OK; | 306 | return AE_OK; |
307 | } | 307 | } |
308 | 308 | ||
309 | static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle, | 309 | static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle, |
310 | u32 lvl, void *context, | 310 | u32 lvl, void *context, |
311 | void **rv) | 311 | void **rv) |
312 | { | 312 | { |
313 | struct acpi_device *device; | 313 | struct acpi_device *device; |
314 | 314 | ||
315 | if (!acpi_bus_get_device(handle, &device)) | 315 | if (!acpi_bus_get_device(handle, &device)) |
316 | pnpacpi_add_device(device); | 316 | pnpacpi_add_device(device); |
317 | else | 317 | else |
318 | return AE_CTRL_DEPTH; | 318 | return AE_CTRL_DEPTH; |
319 | return AE_OK; | 319 | return AE_OK; |
320 | } | 320 | } |
321 | 321 | ||
322 | static int __init acpi_pnp_match(struct device *dev, void *_pnp) | 322 | static int __init acpi_pnp_match(struct device *dev, void *_pnp) |
323 | { | 323 | { |
324 | struct acpi_device *acpi = to_acpi_device(dev); | 324 | struct acpi_device *acpi = to_acpi_device(dev); |
325 | struct pnp_dev *pnp = _pnp; | 325 | struct pnp_dev *pnp = _pnp; |
326 | 326 | ||
327 | /* true means it matched */ | 327 | /* true means it matched */ |
328 | return !acpi->physical_node_count | 328 | return !acpi->physical_node_count |
329 | && compare_pnp_id(pnp->id, acpi_device_hid(acpi)); | 329 | && compare_pnp_id(pnp->id, acpi_device_hid(acpi)); |
330 | } | 330 | } |
331 | 331 | ||
332 | static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle) | 332 | static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle) |
333 | { | 333 | { |
334 | struct device *adev; | 334 | struct device *adev; |
335 | struct acpi_device *acpi; | 335 | struct acpi_device *acpi; |
336 | 336 | ||
337 | adev = bus_find_device(&acpi_bus_type, NULL, | 337 | adev = bus_find_device(&acpi_bus_type, NULL, |
338 | to_pnp_dev(dev), acpi_pnp_match); | 338 | to_pnp_dev(dev), acpi_pnp_match); |
339 | if (!adev) | 339 | if (!adev) |
340 | return -ENODEV; | 340 | return -ENODEV; |
341 | 341 | ||
342 | acpi = to_acpi_device(adev); | 342 | acpi = to_acpi_device(adev); |
343 | *handle = acpi->handle; | 343 | *handle = acpi->handle; |
344 | put_device(adev); | 344 | put_device(adev); |
345 | return 0; | 345 | return 0; |
346 | } | 346 | } |
347 | 347 | ||
348 | /* complete initialization of a PNPACPI device includes having | 348 | /* complete initialization of a PNPACPI device includes having |
349 | * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling. | 349 | * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling. |
350 | */ | 350 | */ |
351 | static bool acpi_pnp_bus_match(struct device *dev) | 351 | static bool acpi_pnp_bus_match(struct device *dev) |
352 | { | 352 | { |
353 | return dev->bus == &pnp_bus_type; | 353 | return dev->bus == &pnp_bus_type; |
354 | } | 354 | } |
355 | 355 | ||
356 | static struct acpi_bus_type __initdata acpi_pnp_bus = { | 356 | static struct acpi_bus_type __initdata acpi_pnp_bus = { |
357 | .name = "PNP", | 357 | .name = "PNP", |
358 | .match = acpi_pnp_bus_match, | 358 | .match = acpi_pnp_bus_match, |
359 | .find_device = acpi_pnp_find_device, | 359 | .find_device = acpi_pnp_find_device, |
360 | }; | 360 | }; |
361 | 361 | ||
362 | int pnpacpi_disabled __initdata; | 362 | int pnpacpi_disabled __initdata; |
363 | static int __init pnpacpi_init(void) | 363 | static int __init pnpacpi_init(void) |
364 | { | 364 | { |
365 | if (acpi_disabled || pnpacpi_disabled) { | 365 | if (acpi_disabled || pnpacpi_disabled) { |
366 | printk(KERN_INFO "pnp: PnP ACPI: disabled\n"); | 366 | printk(KERN_INFO "pnp: PnP ACPI: disabled\n"); |
367 | return 0; | 367 | return 0; |
368 | } | 368 | } |
369 | printk(KERN_INFO "pnp: PnP ACPI init\n"); | 369 | printk(KERN_INFO "pnp: PnP ACPI init\n"); |
370 | pnp_register_protocol(&pnpacpi_protocol); | 370 | pnp_register_protocol(&pnpacpi_protocol); |
371 | register_acpi_bus_type(&acpi_pnp_bus); | 371 | register_acpi_bus_type(&acpi_pnp_bus); |
372 | acpi_get_devices(NULL, pnpacpi_add_device_handler, NULL, NULL); | 372 | acpi_get_devices(NULL, pnpacpi_add_device_handler, NULL, NULL); |
373 | printk(KERN_INFO "pnp: PnP ACPI: found %d devices\n", num); | 373 | printk(KERN_INFO "pnp: PnP ACPI: found %d devices\n", num); |
374 | unregister_acpi_bus_type(&acpi_pnp_bus); | 374 | unregister_acpi_bus_type(&acpi_pnp_bus); |
375 | pnp_platform_devices = 1; | 375 | pnp_platform_devices = 1; |
376 | return 0; | 376 | return 0; |
377 | } | 377 | } |
378 | 378 | ||
379 | fs_initcall(pnpacpi_init); | 379 | fs_initcall(pnpacpi_init); |
380 | 380 | ||
381 | static int __init pnpacpi_setup(char *str) | 381 | static int __init pnpacpi_setup(char *str) |
382 | { | 382 | { |
383 | if (str == NULL) | 383 | if (str == NULL) |
384 | return 1; | 384 | return 1; |
385 | if (!strncmp(str, "off", 3)) | 385 | if (!strncmp(str, "off", 3)) |
386 | pnpacpi_disabled = 1; | 386 | pnpacpi_disabled = 1; |
387 | return 1; | 387 | return 1; |
388 | } | 388 | } |
389 | 389 | ||
390 | __setup("pnpacpi=", pnpacpi_setup); | 390 | __setup("pnpacpi=", pnpacpi_setup); |
391 | 391 |
drivers/usb/core/hub.c
1 | /* | 1 | /* |
2 | * USB hub driver. | 2 | * USB hub driver. |
3 | * | 3 | * |
4 | * (C) Copyright 1999 Linus Torvalds | 4 | * (C) Copyright 1999 Linus Torvalds |
5 | * (C) Copyright 1999 Johannes Erdfelt | 5 | * (C) Copyright 1999 Johannes Erdfelt |
6 | * (C) Copyright 1999 Gregory P. Smith | 6 | * (C) Copyright 1999 Gregory P. Smith |
7 | * (C) Copyright 2001 Brad Hards (bhards@bigpond.net.au) | 7 | * (C) Copyright 2001 Brad Hards (bhards@bigpond.net.au) |
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
15 | #include <linux/completion.h> | 15 | #include <linux/completion.h> |
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/list.h> | 17 | #include <linux/list.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/ioctl.h> | 19 | #include <linux/ioctl.h> |
20 | #include <linux/usb.h> | 20 | #include <linux/usb.h> |
21 | #include <linux/usbdevice_fs.h> | 21 | #include <linux/usbdevice_fs.h> |
22 | #include <linux/usb/hcd.h> | 22 | #include <linux/usb/hcd.h> |
23 | #include <linux/usb/otg.h> | 23 | #include <linux/usb/otg.h> |
24 | #include <linux/usb/quirks.h> | 24 | #include <linux/usb/quirks.h> |
25 | #include <linux/kthread.h> | 25 | #include <linux/kthread.h> |
26 | #include <linux/mutex.h> | 26 | #include <linux/mutex.h> |
27 | #include <linux/freezer.h> | 27 | #include <linux/freezer.h> |
28 | #include <linux/random.h> | 28 | #include <linux/random.h> |
29 | #include <linux/pm_qos.h> | 29 | #include <linux/pm_qos.h> |
30 | 30 | ||
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
32 | #include <asm/byteorder.h> | 32 | #include <asm/byteorder.h> |
33 | 33 | ||
34 | #include "hub.h" | 34 | #include "hub.h" |
35 | 35 | ||
36 | /* if we are in debug mode, always announce new devices */ | 36 | /* if we are in debug mode, always announce new devices */ |
37 | #ifdef DEBUG | 37 | #ifdef DEBUG |
38 | #ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES | 38 | #ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES |
39 | #define CONFIG_USB_ANNOUNCE_NEW_DEVICES | 39 | #define CONFIG_USB_ANNOUNCE_NEW_DEVICES |
40 | #endif | 40 | #endif |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #define USB_VENDOR_GENESYS_LOGIC 0x05e3 | 43 | #define USB_VENDOR_GENESYS_LOGIC 0x05e3 |
44 | #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 | 44 | #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 |
45 | 45 | ||
46 | static inline int hub_is_superspeed(struct usb_device *hdev) | 46 | static inline int hub_is_superspeed(struct usb_device *hdev) |
47 | { | 47 | { |
48 | return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS); | 48 | return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS); |
49 | } | 49 | } |
50 | 50 | ||
51 | /* Protect struct usb_device->state and ->children members | 51 | /* Protect struct usb_device->state and ->children members |
52 | * Note: Both are also protected by ->dev.sem, except that ->state can | 52 | * Note: Both are also protected by ->dev.sem, except that ->state can |
53 | * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */ | 53 | * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */ |
54 | static DEFINE_SPINLOCK(device_state_lock); | 54 | static DEFINE_SPINLOCK(device_state_lock); |
55 | 55 | ||
56 | /* khubd's worklist and its lock */ | 56 | /* khubd's worklist and its lock */ |
57 | static DEFINE_SPINLOCK(hub_event_lock); | 57 | static DEFINE_SPINLOCK(hub_event_lock); |
58 | static LIST_HEAD(hub_event_list); /* List of hubs needing servicing */ | 58 | static LIST_HEAD(hub_event_list); /* List of hubs needing servicing */ |
59 | 59 | ||
60 | /* Wakes up khubd */ | 60 | /* Wakes up khubd */ |
61 | static DECLARE_WAIT_QUEUE_HEAD(khubd_wait); | 61 | static DECLARE_WAIT_QUEUE_HEAD(khubd_wait); |
62 | 62 | ||
63 | static struct task_struct *khubd_task; | 63 | static struct task_struct *khubd_task; |
64 | 64 | ||
65 | /* cycle leds on hubs that aren't blinking for attention */ | 65 | /* cycle leds on hubs that aren't blinking for attention */ |
66 | static bool blinkenlights = 0; | 66 | static bool blinkenlights = 0; |
67 | module_param (blinkenlights, bool, S_IRUGO); | 67 | module_param (blinkenlights, bool, S_IRUGO); |
68 | MODULE_PARM_DESC (blinkenlights, "true to cycle leds on hubs"); | 68 | MODULE_PARM_DESC (blinkenlights, "true to cycle leds on hubs"); |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Device SATA8000 FW1.0 from DATAST0R Technology Corp requires about | 71 | * Device SATA8000 FW1.0 from DATAST0R Technology Corp requires about |
72 | * 10 seconds to send reply for the initial 64-byte descriptor request. | 72 | * 10 seconds to send reply for the initial 64-byte descriptor request. |
73 | */ | 73 | */ |
74 | /* define initial 64-byte descriptor request timeout in milliseconds */ | 74 | /* define initial 64-byte descriptor request timeout in milliseconds */ |
75 | static int initial_descriptor_timeout = USB_CTRL_GET_TIMEOUT; | 75 | static int initial_descriptor_timeout = USB_CTRL_GET_TIMEOUT; |
76 | module_param(initial_descriptor_timeout, int, S_IRUGO|S_IWUSR); | 76 | module_param(initial_descriptor_timeout, int, S_IRUGO|S_IWUSR); |
77 | MODULE_PARM_DESC(initial_descriptor_timeout, | 77 | MODULE_PARM_DESC(initial_descriptor_timeout, |
78 | "initial 64-byte descriptor request timeout in milliseconds " | 78 | "initial 64-byte descriptor request timeout in milliseconds " |
79 | "(default 5000 - 5.0 seconds)"); | 79 | "(default 5000 - 5.0 seconds)"); |
80 | 80 | ||
81 | /* | 81 | /* |
82 | * As of 2.6.10 we introduce a new USB device initialization scheme which | 82 | * As of 2.6.10 we introduce a new USB device initialization scheme which |
83 | * closely resembles the way Windows works. Hopefully it will be compatible | 83 | * closely resembles the way Windows works. Hopefully it will be compatible |
84 | * with a wider range of devices than the old scheme. However some previously | 84 | * with a wider range of devices than the old scheme. However some previously |
85 | * working devices may start giving rise to "device not accepting address" | 85 | * working devices may start giving rise to "device not accepting address" |
86 | * errors; if that happens the user can try the old scheme by adjusting the | 86 | * errors; if that happens the user can try the old scheme by adjusting the |
87 | * following module parameters. | 87 | * following module parameters. |
88 | * | 88 | * |
89 | * For maximum flexibility there are two boolean parameters to control the | 89 | * For maximum flexibility there are two boolean parameters to control the |
90 | * hub driver's behavior. On the first initialization attempt, if the | 90 | * hub driver's behavior. On the first initialization attempt, if the |
91 | * "old_scheme_first" parameter is set then the old scheme will be used, | 91 | * "old_scheme_first" parameter is set then the old scheme will be used, |
92 | * otherwise the new scheme is used. If that fails and "use_both_schemes" | 92 | * otherwise the new scheme is used. If that fails and "use_both_schemes" |
93 | * is set, then the driver will make another attempt, using the other scheme. | 93 | * is set, then the driver will make another attempt, using the other scheme. |
94 | */ | 94 | */ |
95 | static bool old_scheme_first = 0; | 95 | static bool old_scheme_first = 0; |
96 | module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR); | 96 | module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR); |
97 | MODULE_PARM_DESC(old_scheme_first, | 97 | MODULE_PARM_DESC(old_scheme_first, |
98 | "start with the old device initialization scheme"); | 98 | "start with the old device initialization scheme"); |
99 | 99 | ||
100 | static bool use_both_schemes = 1; | 100 | static bool use_both_schemes = 1; |
101 | module_param(use_both_schemes, bool, S_IRUGO | S_IWUSR); | 101 | module_param(use_both_schemes, bool, S_IRUGO | S_IWUSR); |
102 | MODULE_PARM_DESC(use_both_schemes, | 102 | MODULE_PARM_DESC(use_both_schemes, |
103 | "try the other device initialization scheme if the " | 103 | "try the other device initialization scheme if the " |
104 | "first one fails"); | 104 | "first one fails"); |
105 | 105 | ||
106 | /* Mutual exclusion for EHCI CF initialization. This interferes with | 106 | /* Mutual exclusion for EHCI CF initialization. This interferes with |
107 | * port reset on some companion controllers. | 107 | * port reset on some companion controllers. |
108 | */ | 108 | */ |
109 | DECLARE_RWSEM(ehci_cf_port_reset_rwsem); | 109 | DECLARE_RWSEM(ehci_cf_port_reset_rwsem); |
110 | EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); | 110 | EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); |
111 | 111 | ||
112 | #define HUB_DEBOUNCE_TIMEOUT 2000 | 112 | #define HUB_DEBOUNCE_TIMEOUT 2000 |
113 | #define HUB_DEBOUNCE_STEP 25 | 113 | #define HUB_DEBOUNCE_STEP 25 |
114 | #define HUB_DEBOUNCE_STABLE 100 | 114 | #define HUB_DEBOUNCE_STABLE 100 |
115 | 115 | ||
116 | static int usb_reset_and_verify_device(struct usb_device *udev); | 116 | static int usb_reset_and_verify_device(struct usb_device *udev); |
117 | 117 | ||
118 | static inline char *portspeed(struct usb_hub *hub, int portstatus) | 118 | static inline char *portspeed(struct usb_hub *hub, int portstatus) |
119 | { | 119 | { |
120 | if (hub_is_superspeed(hub->hdev)) | 120 | if (hub_is_superspeed(hub->hdev)) |
121 | return "5.0 Gb/s"; | 121 | return "5.0 Gb/s"; |
122 | if (portstatus & USB_PORT_STAT_HIGH_SPEED) | 122 | if (portstatus & USB_PORT_STAT_HIGH_SPEED) |
123 | return "480 Mb/s"; | 123 | return "480 Mb/s"; |
124 | else if (portstatus & USB_PORT_STAT_LOW_SPEED) | 124 | else if (portstatus & USB_PORT_STAT_LOW_SPEED) |
125 | return "1.5 Mb/s"; | 125 | return "1.5 Mb/s"; |
126 | else | 126 | else |
127 | return "12 Mb/s"; | 127 | return "12 Mb/s"; |
128 | } | 128 | } |
129 | 129 | ||
130 | /* Note that hdev or one of its children must be locked! */ | 130 | /* Note that hdev or one of its children must be locked! */ |
131 | struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) | 131 | struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) |
132 | { | 132 | { |
133 | if (!hdev || !hdev->actconfig || !hdev->maxchild) | 133 | if (!hdev || !hdev->actconfig || !hdev->maxchild) |
134 | return NULL; | 134 | return NULL; |
135 | return usb_get_intfdata(hdev->actconfig->interface[0]); | 135 | return usb_get_intfdata(hdev->actconfig->interface[0]); |
136 | } | 136 | } |
137 | 137 | ||
138 | int usb_device_supports_lpm(struct usb_device *udev) | 138 | int usb_device_supports_lpm(struct usb_device *udev) |
139 | { | 139 | { |
140 | /* USB 2.1 (and greater) devices indicate LPM support through | 140 | /* USB 2.1 (and greater) devices indicate LPM support through |
141 | * their USB 2.0 Extended Capabilities BOS descriptor. | 141 | * their USB 2.0 Extended Capabilities BOS descriptor. |
142 | */ | 142 | */ |
143 | if (udev->speed == USB_SPEED_HIGH) { | 143 | if (udev->speed == USB_SPEED_HIGH) { |
144 | if (udev->bos->ext_cap && | 144 | if (udev->bos->ext_cap && |
145 | (USB_LPM_SUPPORT & | 145 | (USB_LPM_SUPPORT & |
146 | le32_to_cpu(udev->bos->ext_cap->bmAttributes))) | 146 | le32_to_cpu(udev->bos->ext_cap->bmAttributes))) |
147 | return 1; | 147 | return 1; |
148 | return 0; | 148 | return 0; |
149 | } | 149 | } |
150 | 150 | ||
151 | /* All USB 3.0 must support LPM, but we need their max exit latency | 151 | /* All USB 3.0 must support LPM, but we need their max exit latency |
152 | * information from the SuperSpeed Extended Capabilities BOS descriptor. | 152 | * information from the SuperSpeed Extended Capabilities BOS descriptor. |
153 | */ | 153 | */ |
154 | if (!udev->bos->ss_cap) { | 154 | if (!udev->bos->ss_cap) { |
155 | dev_warn(&udev->dev, "No LPM exit latency info found. " | 155 | dev_warn(&udev->dev, "No LPM exit latency info found. " |
156 | "Power management will be impacted.\n"); | 156 | "Power management will be impacted.\n"); |
157 | return 0; | 157 | return 0; |
158 | } | 158 | } |
159 | 159 | ||
160 | /* udev is root hub */ | 160 | /* udev is root hub */ |
161 | if (!udev->parent) | 161 | if (!udev->parent) |
162 | return 1; | 162 | return 1; |
163 | 163 | ||
164 | if (udev->parent->lpm_capable) | 164 | if (udev->parent->lpm_capable) |
165 | return 1; | 165 | return 1; |
166 | 166 | ||
167 | dev_warn(&udev->dev, "Parent hub missing LPM exit latency info. " | 167 | dev_warn(&udev->dev, "Parent hub missing LPM exit latency info. " |
168 | "Power management will be impacted.\n"); | 168 | "Power management will be impacted.\n"); |
169 | return 0; | 169 | return 0; |
170 | } | 170 | } |
171 | 171 | ||
172 | /* | 172 | /* |
173 | * Set the Maximum Exit Latency (MEL) for the host to initiate a transition from | 173 | * Set the Maximum Exit Latency (MEL) for the host to initiate a transition from |
174 | * either U1 or U2. | 174 | * either U1 or U2. |
175 | */ | 175 | */ |
176 | static void usb_set_lpm_mel(struct usb_device *udev, | 176 | static void usb_set_lpm_mel(struct usb_device *udev, |
177 | struct usb3_lpm_parameters *udev_lpm_params, | 177 | struct usb3_lpm_parameters *udev_lpm_params, |
178 | unsigned int udev_exit_latency, | 178 | unsigned int udev_exit_latency, |
179 | struct usb_hub *hub, | 179 | struct usb_hub *hub, |
180 | struct usb3_lpm_parameters *hub_lpm_params, | 180 | struct usb3_lpm_parameters *hub_lpm_params, |
181 | unsigned int hub_exit_latency) | 181 | unsigned int hub_exit_latency) |
182 | { | 182 | { |
183 | unsigned int total_mel; | 183 | unsigned int total_mel; |
184 | unsigned int device_mel; | 184 | unsigned int device_mel; |
185 | unsigned int hub_mel; | 185 | unsigned int hub_mel; |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * Calculate the time it takes to transition all links from the roothub | 188 | * Calculate the time it takes to transition all links from the roothub |
189 | * to the parent hub into U0. The parent hub must then decode the | 189 | * to the parent hub into U0. The parent hub must then decode the |
190 | * packet (hub header decode latency) to figure out which port it was | 190 | * packet (hub header decode latency) to figure out which port it was |
191 | * bound for. | 191 | * bound for. |
192 | * | 192 | * |
193 | * The Hub Header decode latency is expressed in 0.1us intervals (0x1 | 193 | * The Hub Header decode latency is expressed in 0.1us intervals (0x1 |
194 | * means 0.1us). Multiply that by 100 to get nanoseconds. | 194 | * means 0.1us). Multiply that by 100 to get nanoseconds. |
195 | */ | 195 | */ |
196 | total_mel = hub_lpm_params->mel + | 196 | total_mel = hub_lpm_params->mel + |
197 | (hub->descriptor->u.ss.bHubHdrDecLat * 100); | 197 | (hub->descriptor->u.ss.bHubHdrDecLat * 100); |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * How long will it take to transition the downstream hub's port into | 200 | * How long will it take to transition the downstream hub's port into |
201 | * U0? The greater of either the hub exit latency or the device exit | 201 | * U0? The greater of either the hub exit latency or the device exit |
202 | * latency. | 202 | * latency. |
203 | * | 203 | * |
204 | * The BOS U1/U2 exit latencies are expressed in 1us intervals. | 204 | * The BOS U1/U2 exit latencies are expressed in 1us intervals. |
205 | * Multiply that by 1000 to get nanoseconds. | 205 | * Multiply that by 1000 to get nanoseconds. |
206 | */ | 206 | */ |
207 | device_mel = udev_exit_latency * 1000; | 207 | device_mel = udev_exit_latency * 1000; |
208 | hub_mel = hub_exit_latency * 1000; | 208 | hub_mel = hub_exit_latency * 1000; |
209 | if (device_mel > hub_mel) | 209 | if (device_mel > hub_mel) |
210 | total_mel += device_mel; | 210 | total_mel += device_mel; |
211 | else | 211 | else |
212 | total_mel += hub_mel; | 212 | total_mel += hub_mel; |
213 | 213 | ||
214 | udev_lpm_params->mel = total_mel; | 214 | udev_lpm_params->mel = total_mel; |
215 | } | 215 | } |
216 | 216 | ||
217 | /* | 217 | /* |
218 | * Set the maximum Device to Host Exit Latency (PEL) for the device to initiate | 218 | * Set the maximum Device to Host Exit Latency (PEL) for the device to initiate |
219 | * a transition from either U1 or U2. | 219 | * a transition from either U1 or U2. |
220 | */ | 220 | */ |
221 | static void usb_set_lpm_pel(struct usb_device *udev, | 221 | static void usb_set_lpm_pel(struct usb_device *udev, |
222 | struct usb3_lpm_parameters *udev_lpm_params, | 222 | struct usb3_lpm_parameters *udev_lpm_params, |
223 | unsigned int udev_exit_latency, | 223 | unsigned int udev_exit_latency, |
224 | struct usb_hub *hub, | 224 | struct usb_hub *hub, |
225 | struct usb3_lpm_parameters *hub_lpm_params, | 225 | struct usb3_lpm_parameters *hub_lpm_params, |
226 | unsigned int hub_exit_latency, | 226 | unsigned int hub_exit_latency, |
227 | unsigned int port_to_port_exit_latency) | 227 | unsigned int port_to_port_exit_latency) |
228 | { | 228 | { |
229 | unsigned int first_link_pel; | 229 | unsigned int first_link_pel; |
230 | unsigned int hub_pel; | 230 | unsigned int hub_pel; |
231 | 231 | ||
232 | /* | 232 | /* |
233 | * First, the device sends an LFPS to transition the link between the | 233 | * First, the device sends an LFPS to transition the link between the |
234 | * device and the parent hub into U0. The exit latency is the bigger of | 234 | * device and the parent hub into U0. The exit latency is the bigger of |
235 | * the device exit latency or the hub exit latency. | 235 | * the device exit latency or the hub exit latency. |
236 | */ | 236 | */ |
237 | if (udev_exit_latency > hub_exit_latency) | 237 | if (udev_exit_latency > hub_exit_latency) |
238 | first_link_pel = udev_exit_latency * 1000; | 238 | first_link_pel = udev_exit_latency * 1000; |
239 | else | 239 | else |
240 | first_link_pel = hub_exit_latency * 1000; | 240 | first_link_pel = hub_exit_latency * 1000; |
241 | 241 | ||
242 | /* | 242 | /* |
243 | * When the hub starts to receive the LFPS, there is a slight delay for | 243 | * When the hub starts to receive the LFPS, there is a slight delay for |
244 | * it to figure out that one of the ports is sending an LFPS. Then it | 244 | * it to figure out that one of the ports is sending an LFPS. Then it |
245 | * will forward the LFPS to its upstream link. The exit latency is the | 245 | * will forward the LFPS to its upstream link. The exit latency is the |
246 | * delay, plus the PEL that we calculated for this hub. | 246 | * delay, plus the PEL that we calculated for this hub. |
247 | */ | 247 | */ |
248 | hub_pel = port_to_port_exit_latency * 1000 + hub_lpm_params->pel; | 248 | hub_pel = port_to_port_exit_latency * 1000 + hub_lpm_params->pel; |
249 | 249 | ||
250 | /* | 250 | /* |
251 | * According to figure C-7 in the USB 3.0 spec, the PEL for this device | 251 | * According to figure C-7 in the USB 3.0 spec, the PEL for this device |
252 | * is the greater of the two exit latencies. | 252 | * is the greater of the two exit latencies. |
253 | */ | 253 | */ |
254 | if (first_link_pel > hub_pel) | 254 | if (first_link_pel > hub_pel) |
255 | udev_lpm_params->pel = first_link_pel; | 255 | udev_lpm_params->pel = first_link_pel; |
256 | else | 256 | else |
257 | udev_lpm_params->pel = hub_pel; | 257 | udev_lpm_params->pel = hub_pel; |
258 | } | 258 | } |
259 | 259 | ||
260 | /* | 260 | /* |
261 | * Set the System Exit Latency (SEL) to indicate the total worst-case time from | 261 | * Set the System Exit Latency (SEL) to indicate the total worst-case time from |
262 | * when a device initiates a transition to U0, until when it will receive the | 262 | * when a device initiates a transition to U0, until when it will receive the |
263 | * first packet from the host controller. | 263 | * first packet from the host controller. |
264 | * | 264 | * |
265 | * Section C.1.5.1 describes the four components to this: | 265 | * Section C.1.5.1 describes the four components to this: |
266 | * - t1: device PEL | 266 | * - t1: device PEL |
267 | * - t2: time for the ERDY to make it from the device to the host. | 267 | * - t2: time for the ERDY to make it from the device to the host. |
268 | * - t3: a host-specific delay to process the ERDY. | 268 | * - t3: a host-specific delay to process the ERDY. |
269 | * - t4: time for the packet to make it from the host to the device. | 269 | * - t4: time for the packet to make it from the host to the device. |
270 | * | 270 | * |
271 | * t3 is specific to both the xHCI host and the platform the host is integrated | 271 | * t3 is specific to both the xHCI host and the platform the host is integrated |
272 | * into. The Intel HW folks have said it's negligible, FIXME if a different | 272 | * into. The Intel HW folks have said it's negligible, FIXME if a different |
273 | * vendor says otherwise. | 273 | * vendor says otherwise. |
274 | */ | 274 | */ |
275 | static void usb_set_lpm_sel(struct usb_device *udev, | 275 | static void usb_set_lpm_sel(struct usb_device *udev, |
276 | struct usb3_lpm_parameters *udev_lpm_params) | 276 | struct usb3_lpm_parameters *udev_lpm_params) |
277 | { | 277 | { |
278 | struct usb_device *parent; | 278 | struct usb_device *parent; |
279 | unsigned int num_hubs; | 279 | unsigned int num_hubs; |
280 | unsigned int total_sel; | 280 | unsigned int total_sel; |
281 | 281 | ||
282 | /* t1 = device PEL */ | 282 | /* t1 = device PEL */ |
283 | total_sel = udev_lpm_params->pel; | 283 | total_sel = udev_lpm_params->pel; |
284 | /* How many external hubs are in between the device & the root port. */ | 284 | /* How many external hubs are in between the device & the root port. */ |
285 | for (parent = udev->parent, num_hubs = 0; parent->parent; | 285 | for (parent = udev->parent, num_hubs = 0; parent->parent; |
286 | parent = parent->parent) | 286 | parent = parent->parent) |
287 | num_hubs++; | 287 | num_hubs++; |
288 | /* t2 = 2.1us + 250ns * (num_hubs - 1) */ | 288 | /* t2 = 2.1us + 250ns * (num_hubs - 1) */ |
289 | if (num_hubs > 0) | 289 | if (num_hubs > 0) |
290 | total_sel += 2100 + 250 * (num_hubs - 1); | 290 | total_sel += 2100 + 250 * (num_hubs - 1); |
291 | 291 | ||
292 | /* t4 = 250ns * num_hubs */ | 292 | /* t4 = 250ns * num_hubs */ |
293 | total_sel += 250 * num_hubs; | 293 | total_sel += 250 * num_hubs; |
294 | 294 | ||
295 | udev_lpm_params->sel = total_sel; | 295 | udev_lpm_params->sel = total_sel; |
296 | } | 296 | } |
297 | 297 | ||
298 | static void usb_set_lpm_parameters(struct usb_device *udev) | 298 | static void usb_set_lpm_parameters(struct usb_device *udev) |
299 | { | 299 | { |
300 | struct usb_hub *hub; | 300 | struct usb_hub *hub; |
301 | unsigned int port_to_port_delay; | 301 | unsigned int port_to_port_delay; |
302 | unsigned int udev_u1_del; | 302 | unsigned int udev_u1_del; |
303 | unsigned int udev_u2_del; | 303 | unsigned int udev_u2_del; |
304 | unsigned int hub_u1_del; | 304 | unsigned int hub_u1_del; |
305 | unsigned int hub_u2_del; | 305 | unsigned int hub_u2_del; |
306 | 306 | ||
307 | if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER) | 307 | if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER) |
308 | return; | 308 | return; |
309 | 309 | ||
310 | hub = usb_hub_to_struct_hub(udev->parent); | 310 | hub = usb_hub_to_struct_hub(udev->parent); |
311 | /* It doesn't take time to transition the roothub into U0, since it | 311 | /* It doesn't take time to transition the roothub into U0, since it |
312 | * doesn't have an upstream link. | 312 | * doesn't have an upstream link. |
313 | */ | 313 | */ |
314 | if (!hub) | 314 | if (!hub) |
315 | return; | 315 | return; |
316 | 316 | ||
317 | udev_u1_del = udev->bos->ss_cap->bU1devExitLat; | 317 | udev_u1_del = udev->bos->ss_cap->bU1devExitLat; |
318 | udev_u2_del = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat); | 318 | udev_u2_del = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat); |
319 | hub_u1_del = udev->parent->bos->ss_cap->bU1devExitLat; | 319 | hub_u1_del = udev->parent->bos->ss_cap->bU1devExitLat; |
320 | hub_u2_del = le16_to_cpu(udev->parent->bos->ss_cap->bU2DevExitLat); | 320 | hub_u2_del = le16_to_cpu(udev->parent->bos->ss_cap->bU2DevExitLat); |
321 | 321 | ||
322 | usb_set_lpm_mel(udev, &udev->u1_params, udev_u1_del, | 322 | usb_set_lpm_mel(udev, &udev->u1_params, udev_u1_del, |
323 | hub, &udev->parent->u1_params, hub_u1_del); | 323 | hub, &udev->parent->u1_params, hub_u1_del); |
324 | 324 | ||
325 | usb_set_lpm_mel(udev, &udev->u2_params, udev_u2_del, | 325 | usb_set_lpm_mel(udev, &udev->u2_params, udev_u2_del, |
326 | hub, &udev->parent->u2_params, hub_u2_del); | 326 | hub, &udev->parent->u2_params, hub_u2_del); |
327 | 327 | ||
328 | /* | 328 | /* |
329 | * Appendix C, section C.2.2.2, says that there is a slight delay from | 329 | * Appendix C, section C.2.2.2, says that there is a slight delay from |
330 | * when the parent hub notices the downstream port is trying to | 330 | * when the parent hub notices the downstream port is trying to |
331 | * transition to U0 to when the hub initiates a U0 transition on its | 331 | * transition to U0 to when the hub initiates a U0 transition on its |
332 | * upstream port. The section says the delays are tPort2PortU1EL and | 332 | * upstream port. The section says the delays are tPort2PortU1EL and |
333 | * tPort2PortU2EL, but it doesn't define what they are. | 333 | * tPort2PortU2EL, but it doesn't define what they are. |
334 | * | 334 | * |
335 | * The hub chapter, sections 10.4.2.4 and 10.4.2.5 seem to be talking | 335 | * The hub chapter, sections 10.4.2.4 and 10.4.2.5 seem to be talking |
336 | * about the same delays. Use the maximum delay calculations from those | 336 | * about the same delays. Use the maximum delay calculations from those |
337 | * sections. For U1, it's tHubPort2PortExitLat, which is 1us max. For | 337 | * sections. For U1, it's tHubPort2PortExitLat, which is 1us max. For |
338 | * U2, it's tHubPort2PortExitLat + U2DevExitLat - U1DevExitLat. I | 338 | * U2, it's tHubPort2PortExitLat + U2DevExitLat - U1DevExitLat. I |
339 | * assume the device exit latencies they are talking about are the hub | 339 | * assume the device exit latencies they are talking about are the hub |
340 | * exit latencies. | 340 | * exit latencies. |
341 | * | 341 | * |
342 | * What do we do if the U2 exit latency is less than the U1 exit | 342 | * What do we do if the U2 exit latency is less than the U1 exit |
343 | * latency? It's possible, although not likely... | 343 | * latency? It's possible, although not likely... |
344 | */ | 344 | */ |
345 | port_to_port_delay = 1; | 345 | port_to_port_delay = 1; |
346 | 346 | ||
347 | usb_set_lpm_pel(udev, &udev->u1_params, udev_u1_del, | 347 | usb_set_lpm_pel(udev, &udev->u1_params, udev_u1_del, |
348 | hub, &udev->parent->u1_params, hub_u1_del, | 348 | hub, &udev->parent->u1_params, hub_u1_del, |
349 | port_to_port_delay); | 349 | port_to_port_delay); |
350 | 350 | ||
351 | if (hub_u2_del > hub_u1_del) | 351 | if (hub_u2_del > hub_u1_del) |
352 | port_to_port_delay = 1 + hub_u2_del - hub_u1_del; | 352 | port_to_port_delay = 1 + hub_u2_del - hub_u1_del; |
353 | else | 353 | else |
354 | port_to_port_delay = 1 + hub_u1_del; | 354 | port_to_port_delay = 1 + hub_u1_del; |
355 | 355 | ||
356 | usb_set_lpm_pel(udev, &udev->u2_params, udev_u2_del, | 356 | usb_set_lpm_pel(udev, &udev->u2_params, udev_u2_del, |
357 | hub, &udev->parent->u2_params, hub_u2_del, | 357 | hub, &udev->parent->u2_params, hub_u2_del, |
358 | port_to_port_delay); | 358 | port_to_port_delay); |
359 | 359 | ||
360 | /* Now that we've got PEL, calculate SEL. */ | 360 | /* Now that we've got PEL, calculate SEL. */ |
361 | usb_set_lpm_sel(udev, &udev->u1_params); | 361 | usb_set_lpm_sel(udev, &udev->u1_params); |
362 | usb_set_lpm_sel(udev, &udev->u2_params); | 362 | usb_set_lpm_sel(udev, &udev->u2_params); |
363 | } | 363 | } |
364 | 364 | ||
365 | /* USB 2.0 spec Section 11.24.4.5 */ | 365 | /* USB 2.0 spec Section 11.24.4.5 */ |
366 | static int get_hub_descriptor(struct usb_device *hdev, void *data) | 366 | static int get_hub_descriptor(struct usb_device *hdev, void *data) |
367 | { | 367 | { |
368 | int i, ret, size; | 368 | int i, ret, size; |
369 | unsigned dtype; | 369 | unsigned dtype; |
370 | 370 | ||
371 | if (hub_is_superspeed(hdev)) { | 371 | if (hub_is_superspeed(hdev)) { |
372 | dtype = USB_DT_SS_HUB; | 372 | dtype = USB_DT_SS_HUB; |
373 | size = USB_DT_SS_HUB_SIZE; | 373 | size = USB_DT_SS_HUB_SIZE; |
374 | } else { | 374 | } else { |
375 | dtype = USB_DT_HUB; | 375 | dtype = USB_DT_HUB; |
376 | size = sizeof(struct usb_hub_descriptor); | 376 | size = sizeof(struct usb_hub_descriptor); |
377 | } | 377 | } |
378 | 378 | ||
379 | for (i = 0; i < 3; i++) { | 379 | for (i = 0; i < 3; i++) { |
380 | ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), | 380 | ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), |
381 | USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB, | 381 | USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB, |
382 | dtype << 8, 0, data, size, | 382 | dtype << 8, 0, data, size, |
383 | USB_CTRL_GET_TIMEOUT); | 383 | USB_CTRL_GET_TIMEOUT); |
384 | if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2)) | 384 | if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2)) |
385 | return ret; | 385 | return ret; |
386 | } | 386 | } |
387 | return -EINVAL; | 387 | return -EINVAL; |
388 | } | 388 | } |
389 | 389 | ||
390 | /* | 390 | /* |
391 | * USB 2.0 spec Section 11.24.2.1 | 391 | * USB 2.0 spec Section 11.24.2.1 |
392 | */ | 392 | */ |
393 | static int clear_hub_feature(struct usb_device *hdev, int feature) | 393 | static int clear_hub_feature(struct usb_device *hdev, int feature) |
394 | { | 394 | { |
395 | return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), | 395 | return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), |
396 | USB_REQ_CLEAR_FEATURE, USB_RT_HUB, feature, 0, NULL, 0, 1000); | 396 | USB_REQ_CLEAR_FEATURE, USB_RT_HUB, feature, 0, NULL, 0, 1000); |
397 | } | 397 | } |
398 | 398 | ||
399 | /* | 399 | /* |
400 | * USB 2.0 spec Section 11.24.2.2 | 400 | * USB 2.0 spec Section 11.24.2.2 |
401 | */ | 401 | */ |
402 | int usb_clear_port_feature(struct usb_device *hdev, int port1, int feature) | 402 | int usb_clear_port_feature(struct usb_device *hdev, int port1, int feature) |
403 | { | 403 | { |
404 | return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), | 404 | return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), |
405 | USB_REQ_CLEAR_FEATURE, USB_RT_PORT, feature, port1, | 405 | USB_REQ_CLEAR_FEATURE, USB_RT_PORT, feature, port1, |
406 | NULL, 0, 1000); | 406 | NULL, 0, 1000); |
407 | } | 407 | } |
408 | 408 | ||
409 | /* | 409 | /* |
410 | * USB 2.0 spec Section 11.24.2.13 | 410 | * USB 2.0 spec Section 11.24.2.13 |
411 | */ | 411 | */ |
412 | static int set_port_feature(struct usb_device *hdev, int port1, int feature) | 412 | static int set_port_feature(struct usb_device *hdev, int port1, int feature) |
413 | { | 413 | { |
414 | return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), | 414 | return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), |
415 | USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1, | 415 | USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1, |
416 | NULL, 0, 1000); | 416 | NULL, 0, 1000); |
417 | } | 417 | } |
418 | 418 | ||
419 | /* | 419 | /* |
420 | * USB 2.0 spec Section 11.24.2.7.1.10 and table 11-7 | 420 | * USB 2.0 spec Section 11.24.2.7.1.10 and table 11-7 |
421 | * for info about using port indicators | 421 | * for info about using port indicators |
422 | */ | 422 | */ |
423 | static void set_port_led( | 423 | static void set_port_led( |
424 | struct usb_hub *hub, | 424 | struct usb_hub *hub, |
425 | int port1, | 425 | int port1, |
426 | int selector | 426 | int selector |
427 | ) | 427 | ) |
428 | { | 428 | { |
429 | int status = set_port_feature(hub->hdev, (selector << 8) | port1, | 429 | int status = set_port_feature(hub->hdev, (selector << 8) | port1, |
430 | USB_PORT_FEAT_INDICATOR); | 430 | USB_PORT_FEAT_INDICATOR); |
431 | if (status < 0) | 431 | if (status < 0) |
432 | dev_dbg (hub->intfdev, | 432 | dev_dbg (hub->intfdev, |
433 | "port %d indicator %s status %d\n", | 433 | "port %d indicator %s status %d\n", |
434 | port1, | 434 | port1, |
435 | ({ char *s; switch (selector) { | 435 | ({ char *s; switch (selector) { |
436 | case HUB_LED_AMBER: s = "amber"; break; | 436 | case HUB_LED_AMBER: s = "amber"; break; |
437 | case HUB_LED_GREEN: s = "green"; break; | 437 | case HUB_LED_GREEN: s = "green"; break; |
438 | case HUB_LED_OFF: s = "off"; break; | 438 | case HUB_LED_OFF: s = "off"; break; |
439 | case HUB_LED_AUTO: s = "auto"; break; | 439 | case HUB_LED_AUTO: s = "auto"; break; |
440 | default: s = "??"; break; | 440 | default: s = "??"; break; |
441 | } s; }), | 441 | } s; }), |
442 | status); | 442 | status); |
443 | } | 443 | } |
444 | 444 | ||
445 | #define LED_CYCLE_PERIOD ((2*HZ)/3) | 445 | #define LED_CYCLE_PERIOD ((2*HZ)/3) |
446 | 446 | ||
447 | static void led_work (struct work_struct *work) | 447 | static void led_work (struct work_struct *work) |
448 | { | 448 | { |
449 | struct usb_hub *hub = | 449 | struct usb_hub *hub = |
450 | container_of(work, struct usb_hub, leds.work); | 450 | container_of(work, struct usb_hub, leds.work); |
451 | struct usb_device *hdev = hub->hdev; | 451 | struct usb_device *hdev = hub->hdev; |
452 | unsigned i; | 452 | unsigned i; |
453 | unsigned changed = 0; | 453 | unsigned changed = 0; |
454 | int cursor = -1; | 454 | int cursor = -1; |
455 | 455 | ||
456 | if (hdev->state != USB_STATE_CONFIGURED || hub->quiescing) | 456 | if (hdev->state != USB_STATE_CONFIGURED || hub->quiescing) |
457 | return; | 457 | return; |
458 | 458 | ||
459 | for (i = 0; i < hdev->maxchild; i++) { | 459 | for (i = 0; i < hdev->maxchild; i++) { |
460 | unsigned selector, mode; | 460 | unsigned selector, mode; |
461 | 461 | ||
462 | /* 30%-50% duty cycle */ | 462 | /* 30%-50% duty cycle */ |
463 | 463 | ||
464 | switch (hub->indicator[i]) { | 464 | switch (hub->indicator[i]) { |
465 | /* cycle marker */ | 465 | /* cycle marker */ |
466 | case INDICATOR_CYCLE: | 466 | case INDICATOR_CYCLE: |
467 | cursor = i; | 467 | cursor = i; |
468 | selector = HUB_LED_AUTO; | 468 | selector = HUB_LED_AUTO; |
469 | mode = INDICATOR_AUTO; | 469 | mode = INDICATOR_AUTO; |
470 | break; | 470 | break; |
471 | /* blinking green = sw attention */ | 471 | /* blinking green = sw attention */ |
472 | case INDICATOR_GREEN_BLINK: | 472 | case INDICATOR_GREEN_BLINK: |
473 | selector = HUB_LED_GREEN; | 473 | selector = HUB_LED_GREEN; |
474 | mode = INDICATOR_GREEN_BLINK_OFF; | 474 | mode = INDICATOR_GREEN_BLINK_OFF; |
475 | break; | 475 | break; |
476 | case INDICATOR_GREEN_BLINK_OFF: | 476 | case INDICATOR_GREEN_BLINK_OFF: |
477 | selector = HUB_LED_OFF; | 477 | selector = HUB_LED_OFF; |
478 | mode = INDICATOR_GREEN_BLINK; | 478 | mode = INDICATOR_GREEN_BLINK; |
479 | break; | 479 | break; |
480 | /* blinking amber = hw attention */ | 480 | /* blinking amber = hw attention */ |
481 | case INDICATOR_AMBER_BLINK: | 481 | case INDICATOR_AMBER_BLINK: |
482 | selector = HUB_LED_AMBER; | 482 | selector = HUB_LED_AMBER; |
483 | mode = INDICATOR_AMBER_BLINK_OFF; | 483 | mode = INDICATOR_AMBER_BLINK_OFF; |
484 | break; | 484 | break; |
485 | case INDICATOR_AMBER_BLINK_OFF: | 485 | case INDICATOR_AMBER_BLINK_OFF: |
486 | selector = HUB_LED_OFF; | 486 | selector = HUB_LED_OFF; |
487 | mode = INDICATOR_AMBER_BLINK; | 487 | mode = INDICATOR_AMBER_BLINK; |
488 | break; | 488 | break; |
489 | /* blink green/amber = reserved */ | 489 | /* blink green/amber = reserved */ |
490 | case INDICATOR_ALT_BLINK: | 490 | case INDICATOR_ALT_BLINK: |
491 | selector = HUB_LED_GREEN; | 491 | selector = HUB_LED_GREEN; |
492 | mode = INDICATOR_ALT_BLINK_OFF; | 492 | mode = INDICATOR_ALT_BLINK_OFF; |
493 | break; | 493 | break; |
494 | case INDICATOR_ALT_BLINK_OFF: | 494 | case INDICATOR_ALT_BLINK_OFF: |
495 | selector = HUB_LED_AMBER; | 495 | selector = HUB_LED_AMBER; |
496 | mode = INDICATOR_ALT_BLINK; | 496 | mode = INDICATOR_ALT_BLINK; |
497 | break; | 497 | break; |
498 | default: | 498 | default: |
499 | continue; | 499 | continue; |
500 | } | 500 | } |
501 | if (selector != HUB_LED_AUTO) | 501 | if (selector != HUB_LED_AUTO) |
502 | changed = 1; | 502 | changed = 1; |
503 | set_port_led(hub, i + 1, selector); | 503 | set_port_led(hub, i + 1, selector); |
504 | hub->indicator[i] = mode; | 504 | hub->indicator[i] = mode; |
505 | } | 505 | } |
506 | if (!changed && blinkenlights) { | 506 | if (!changed && blinkenlights) { |
507 | cursor++; | 507 | cursor++; |
508 | cursor %= hdev->maxchild; | 508 | cursor %= hdev->maxchild; |
509 | set_port_led(hub, cursor + 1, HUB_LED_GREEN); | 509 | set_port_led(hub, cursor + 1, HUB_LED_GREEN); |
510 | hub->indicator[cursor] = INDICATOR_CYCLE; | 510 | hub->indicator[cursor] = INDICATOR_CYCLE; |
511 | changed++; | 511 | changed++; |
512 | } | 512 | } |
513 | if (changed) | 513 | if (changed) |
514 | schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD); | 514 | schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD); |
515 | } | 515 | } |
516 | 516 | ||
517 | /* use a short timeout for hub/port status fetches */ | 517 | /* use a short timeout for hub/port status fetches */ |
518 | #define USB_STS_TIMEOUT 1000 | 518 | #define USB_STS_TIMEOUT 1000 |
519 | #define USB_STS_RETRIES 5 | 519 | #define USB_STS_RETRIES 5 |
520 | 520 | ||
521 | /* | 521 | /* |
522 | * USB 2.0 spec Section 11.24.2.6 | 522 | * USB 2.0 spec Section 11.24.2.6 |
523 | */ | 523 | */ |
524 | static int get_hub_status(struct usb_device *hdev, | 524 | static int get_hub_status(struct usb_device *hdev, |
525 | struct usb_hub_status *data) | 525 | struct usb_hub_status *data) |
526 | { | 526 | { |
527 | int i, status = -ETIMEDOUT; | 527 | int i, status = -ETIMEDOUT; |
528 | 528 | ||
529 | for (i = 0; i < USB_STS_RETRIES && | 529 | for (i = 0; i < USB_STS_RETRIES && |
530 | (status == -ETIMEDOUT || status == -EPIPE); i++) { | 530 | (status == -ETIMEDOUT || status == -EPIPE); i++) { |
531 | status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), | 531 | status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), |
532 | USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0, | 532 | USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0, |
533 | data, sizeof(*data), USB_STS_TIMEOUT); | 533 | data, sizeof(*data), USB_STS_TIMEOUT); |
534 | } | 534 | } |
535 | return status; | 535 | return status; |
536 | } | 536 | } |
537 | 537 | ||
538 | /* | 538 | /* |
539 | * USB 2.0 spec Section 11.24.2.7 | 539 | * USB 2.0 spec Section 11.24.2.7 |
540 | */ | 540 | */ |
541 | static int get_port_status(struct usb_device *hdev, int port1, | 541 | static int get_port_status(struct usb_device *hdev, int port1, |
542 | struct usb_port_status *data) | 542 | struct usb_port_status *data) |
543 | { | 543 | { |
544 | int i, status = -ETIMEDOUT; | 544 | int i, status = -ETIMEDOUT; |
545 | 545 | ||
546 | for (i = 0; i < USB_STS_RETRIES && | 546 | for (i = 0; i < USB_STS_RETRIES && |
547 | (status == -ETIMEDOUT || status == -EPIPE); i++) { | 547 | (status == -ETIMEDOUT || status == -EPIPE); i++) { |
548 | status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), | 548 | status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), |
549 | USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1, | 549 | USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1, |
550 | data, sizeof(*data), USB_STS_TIMEOUT); | 550 | data, sizeof(*data), USB_STS_TIMEOUT); |
551 | } | 551 | } |
552 | return status; | 552 | return status; |
553 | } | 553 | } |
554 | 554 | ||
555 | static int hub_port_status(struct usb_hub *hub, int port1, | 555 | static int hub_port_status(struct usb_hub *hub, int port1, |
556 | u16 *status, u16 *change) | 556 | u16 *status, u16 *change) |
557 | { | 557 | { |
558 | int ret; | 558 | int ret; |
559 | 559 | ||
560 | mutex_lock(&hub->status_mutex); | 560 | mutex_lock(&hub->status_mutex); |
561 | ret = get_port_status(hub->hdev, port1, &hub->status->port); | 561 | ret = get_port_status(hub->hdev, port1, &hub->status->port); |
562 | if (ret < 4) { | 562 | if (ret < 4) { |
563 | if (ret != -ENODEV) | 563 | if (ret != -ENODEV) |
564 | dev_err(hub->intfdev, | 564 | dev_err(hub->intfdev, |
565 | "%s failed (err = %d)\n", __func__, ret); | 565 | "%s failed (err = %d)\n", __func__, ret); |
566 | if (ret >= 0) | 566 | if (ret >= 0) |
567 | ret = -EIO; | 567 | ret = -EIO; |
568 | } else { | 568 | } else { |
569 | *status = le16_to_cpu(hub->status->port.wPortStatus); | 569 | *status = le16_to_cpu(hub->status->port.wPortStatus); |
570 | *change = le16_to_cpu(hub->status->port.wPortChange); | 570 | *change = le16_to_cpu(hub->status->port.wPortChange); |
571 | 571 | ||
572 | ret = 0; | 572 | ret = 0; |
573 | } | 573 | } |
574 | mutex_unlock(&hub->status_mutex); | 574 | mutex_unlock(&hub->status_mutex); |
575 | return ret; | 575 | return ret; |
576 | } | 576 | } |
577 | 577 | ||
578 | static void kick_khubd(struct usb_hub *hub) | 578 | static void kick_khubd(struct usb_hub *hub) |
579 | { | 579 | { |
580 | unsigned long flags; | 580 | unsigned long flags; |
581 | 581 | ||
582 | spin_lock_irqsave(&hub_event_lock, flags); | 582 | spin_lock_irqsave(&hub_event_lock, flags); |
583 | if (!hub->disconnected && list_empty(&hub->event_list)) { | 583 | if (!hub->disconnected && list_empty(&hub->event_list)) { |
584 | list_add_tail(&hub->event_list, &hub_event_list); | 584 | list_add_tail(&hub->event_list, &hub_event_list); |
585 | 585 | ||
586 | /* Suppress autosuspend until khubd runs */ | 586 | /* Suppress autosuspend until khubd runs */ |
587 | usb_autopm_get_interface_no_resume( | 587 | usb_autopm_get_interface_no_resume( |
588 | to_usb_interface(hub->intfdev)); | 588 | to_usb_interface(hub->intfdev)); |
589 | wake_up(&khubd_wait); | 589 | wake_up(&khubd_wait); |
590 | } | 590 | } |
591 | spin_unlock_irqrestore(&hub_event_lock, flags); | 591 | spin_unlock_irqrestore(&hub_event_lock, flags); |
592 | } | 592 | } |
593 | 593 | ||
594 | void usb_kick_khubd(struct usb_device *hdev) | 594 | void usb_kick_khubd(struct usb_device *hdev) |
595 | { | 595 | { |
596 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); | 596 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); |
597 | 597 | ||
598 | if (hub) | 598 | if (hub) |
599 | kick_khubd(hub); | 599 | kick_khubd(hub); |
600 | } | 600 | } |
601 | 601 | ||
602 | /* | 602 | /* |
603 | * Let the USB core know that a USB 3.0 device has sent a Function Wake Device | 603 | * Let the USB core know that a USB 3.0 device has sent a Function Wake Device |
604 | * Notification, which indicates it had initiated remote wakeup. | 604 | * Notification, which indicates it had initiated remote wakeup. |
605 | * | 605 | * |
606 | * USB 3.0 hubs do not report the port link state change from U3 to U0 when the | 606 | * USB 3.0 hubs do not report the port link state change from U3 to U0 when the |
607 | * device initiates resume, so the USB core will not receive notice of the | 607 | * device initiates resume, so the USB core will not receive notice of the |
608 | * resume through the normal hub interrupt URB. | 608 | * resume through the normal hub interrupt URB. |
609 | */ | 609 | */ |
610 | void usb_wakeup_notification(struct usb_device *hdev, | 610 | void usb_wakeup_notification(struct usb_device *hdev, |
611 | unsigned int portnum) | 611 | unsigned int portnum) |
612 | { | 612 | { |
613 | struct usb_hub *hub; | 613 | struct usb_hub *hub; |
614 | 614 | ||
615 | if (!hdev) | 615 | if (!hdev) |
616 | return; | 616 | return; |
617 | 617 | ||
618 | hub = usb_hub_to_struct_hub(hdev); | 618 | hub = usb_hub_to_struct_hub(hdev); |
619 | if (hub) { | 619 | if (hub) { |
620 | set_bit(portnum, hub->wakeup_bits); | 620 | set_bit(portnum, hub->wakeup_bits); |
621 | kick_khubd(hub); | 621 | kick_khubd(hub); |
622 | } | 622 | } |
623 | } | 623 | } |
624 | EXPORT_SYMBOL_GPL(usb_wakeup_notification); | 624 | EXPORT_SYMBOL_GPL(usb_wakeup_notification); |
625 | 625 | ||
626 | /* completion function, fires on port status changes and various faults */ | 626 | /* completion function, fires on port status changes and various faults */ |
627 | static void hub_irq(struct urb *urb) | 627 | static void hub_irq(struct urb *urb) |
628 | { | 628 | { |
629 | struct usb_hub *hub = urb->context; | 629 | struct usb_hub *hub = urb->context; |
630 | int status = urb->status; | 630 | int status = urb->status; |
631 | unsigned i; | 631 | unsigned i; |
632 | unsigned long bits; | 632 | unsigned long bits; |
633 | 633 | ||
634 | switch (status) { | 634 | switch (status) { |
635 | case -ENOENT: /* synchronous unlink */ | 635 | case -ENOENT: /* synchronous unlink */ |
636 | case -ECONNRESET: /* async unlink */ | 636 | case -ECONNRESET: /* async unlink */ |
637 | case -ESHUTDOWN: /* hardware going away */ | 637 | case -ESHUTDOWN: /* hardware going away */ |
638 | return; | 638 | return; |
639 | 639 | ||
640 | default: /* presumably an error */ | 640 | default: /* presumably an error */ |
641 | /* Cause a hub reset after 10 consecutive errors */ | 641 | /* Cause a hub reset after 10 consecutive errors */ |
642 | dev_dbg (hub->intfdev, "transfer --> %d\n", status); | 642 | dev_dbg (hub->intfdev, "transfer --> %d\n", status); |
643 | if ((++hub->nerrors < 10) || hub->error) | 643 | if ((++hub->nerrors < 10) || hub->error) |
644 | goto resubmit; | 644 | goto resubmit; |
645 | hub->error = status; | 645 | hub->error = status; |
646 | /* FALL THROUGH */ | 646 | /* FALL THROUGH */ |
647 | 647 | ||
648 | /* let khubd handle things */ | 648 | /* let khubd handle things */ |
649 | case 0: /* we got data: port status changed */ | 649 | case 0: /* we got data: port status changed */ |
650 | bits = 0; | 650 | bits = 0; |
651 | for (i = 0; i < urb->actual_length; ++i) | 651 | for (i = 0; i < urb->actual_length; ++i) |
652 | bits |= ((unsigned long) ((*hub->buffer)[i])) | 652 | bits |= ((unsigned long) ((*hub->buffer)[i])) |
653 | << (i*8); | 653 | << (i*8); |
654 | hub->event_bits[0] = bits; | 654 | hub->event_bits[0] = bits; |
655 | break; | 655 | break; |
656 | } | 656 | } |
657 | 657 | ||
658 | hub->nerrors = 0; | 658 | hub->nerrors = 0; |
659 | 659 | ||
660 | /* Something happened, let khubd figure it out */ | 660 | /* Something happened, let khubd figure it out */ |
661 | kick_khubd(hub); | 661 | kick_khubd(hub); |
662 | 662 | ||
663 | resubmit: | 663 | resubmit: |
664 | if (hub->quiescing) | 664 | if (hub->quiescing) |
665 | return; | 665 | return; |
666 | 666 | ||
667 | if ((status = usb_submit_urb (hub->urb, GFP_ATOMIC)) != 0 | 667 | if ((status = usb_submit_urb (hub->urb, GFP_ATOMIC)) != 0 |
668 | && status != -ENODEV && status != -EPERM) | 668 | && status != -ENODEV && status != -EPERM) |
669 | dev_err (hub->intfdev, "resubmit --> %d\n", status); | 669 | dev_err (hub->intfdev, "resubmit --> %d\n", status); |
670 | } | 670 | } |
671 | 671 | ||
672 | /* USB 2.0 spec Section 11.24.2.3 */ | 672 | /* USB 2.0 spec Section 11.24.2.3 */ |
673 | static inline int | 673 | static inline int |
674 | hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) | 674 | hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) |
675 | { | 675 | { |
676 | /* Need to clear both directions for control ep */ | 676 | /* Need to clear both directions for control ep */ |
677 | if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) == | 677 | if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) == |
678 | USB_ENDPOINT_XFER_CONTROL) { | 678 | USB_ENDPOINT_XFER_CONTROL) { |
679 | int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), | 679 | int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), |
680 | HUB_CLEAR_TT_BUFFER, USB_RT_PORT, | 680 | HUB_CLEAR_TT_BUFFER, USB_RT_PORT, |
681 | devinfo ^ 0x8000, tt, NULL, 0, 1000); | 681 | devinfo ^ 0x8000, tt, NULL, 0, 1000); |
682 | if (status) | 682 | if (status) |
683 | return status; | 683 | return status; |
684 | } | 684 | } |
685 | return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), | 685 | return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), |
686 | HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo, | 686 | HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo, |
687 | tt, NULL, 0, 1000); | 687 | tt, NULL, 0, 1000); |
688 | } | 688 | } |
689 | 689 | ||
690 | /* | 690 | /* |
691 | * enumeration blocks khubd for a long time. we use keventd instead, since | 691 | * enumeration blocks khubd for a long time. we use keventd instead, since |
692 | * long blocking there is the exception, not the rule. accordingly, HCDs | 692 | * long blocking there is the exception, not the rule. accordingly, HCDs |
693 | * talking to TTs must queue control transfers (not just bulk and iso), so | 693 | * talking to TTs must queue control transfers (not just bulk and iso), so |
694 | * both can talk to the same hub concurrently. | 694 | * both can talk to the same hub concurrently. |
695 | */ | 695 | */ |
696 | static void hub_tt_work(struct work_struct *work) | 696 | static void hub_tt_work(struct work_struct *work) |
697 | { | 697 | { |
698 | struct usb_hub *hub = | 698 | struct usb_hub *hub = |
699 | container_of(work, struct usb_hub, tt.clear_work); | 699 | container_of(work, struct usb_hub, tt.clear_work); |
700 | unsigned long flags; | 700 | unsigned long flags; |
701 | 701 | ||
702 | spin_lock_irqsave (&hub->tt.lock, flags); | 702 | spin_lock_irqsave (&hub->tt.lock, flags); |
703 | while (!list_empty(&hub->tt.clear_list)) { | 703 | while (!list_empty(&hub->tt.clear_list)) { |
704 | struct list_head *next; | 704 | struct list_head *next; |
705 | struct usb_tt_clear *clear; | 705 | struct usb_tt_clear *clear; |
706 | struct usb_device *hdev = hub->hdev; | 706 | struct usb_device *hdev = hub->hdev; |
707 | const struct hc_driver *drv; | 707 | const struct hc_driver *drv; |
708 | int status; | 708 | int status; |
709 | 709 | ||
710 | next = hub->tt.clear_list.next; | 710 | next = hub->tt.clear_list.next; |
711 | clear = list_entry (next, struct usb_tt_clear, clear_list); | 711 | clear = list_entry (next, struct usb_tt_clear, clear_list); |
712 | list_del (&clear->clear_list); | 712 | list_del (&clear->clear_list); |
713 | 713 | ||
714 | /* drop lock so HCD can concurrently report other TT errors */ | 714 | /* drop lock so HCD can concurrently report other TT errors */ |
715 | spin_unlock_irqrestore (&hub->tt.lock, flags); | 715 | spin_unlock_irqrestore (&hub->tt.lock, flags); |
716 | status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt); | 716 | status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt); |
717 | if (status && status != -ENODEV) | 717 | if (status && status != -ENODEV) |
718 | dev_err (&hdev->dev, | 718 | dev_err (&hdev->dev, |
719 | "clear tt %d (%04x) error %d\n", | 719 | "clear tt %d (%04x) error %d\n", |
720 | clear->tt, clear->devinfo, status); | 720 | clear->tt, clear->devinfo, status); |
721 | 721 | ||
722 | /* Tell the HCD, even if the operation failed */ | 722 | /* Tell the HCD, even if the operation failed */ |
723 | drv = clear->hcd->driver; | 723 | drv = clear->hcd->driver; |
724 | if (drv->clear_tt_buffer_complete) | 724 | if (drv->clear_tt_buffer_complete) |
725 | (drv->clear_tt_buffer_complete)(clear->hcd, clear->ep); | 725 | (drv->clear_tt_buffer_complete)(clear->hcd, clear->ep); |
726 | 726 | ||
727 | kfree(clear); | 727 | kfree(clear); |
728 | spin_lock_irqsave(&hub->tt.lock, flags); | 728 | spin_lock_irqsave(&hub->tt.lock, flags); |
729 | } | 729 | } |
730 | spin_unlock_irqrestore (&hub->tt.lock, flags); | 730 | spin_unlock_irqrestore (&hub->tt.lock, flags); |
731 | } | 731 | } |
732 | 732 | ||
733 | /** | 733 | /** |
734 | * usb_hub_set_port_power - control hub port's power state | 734 | * usb_hub_set_port_power - control hub port's power state |
735 | * @hdev: USB device belonging to the usb hub | 735 | * @hdev: USB device belonging to the usb hub |
736 | * @hub: target hub | 736 | * @hub: target hub |
737 | * @port1: port index | 737 | * @port1: port index |
738 | * @set: expected status | 738 | * @set: expected status |
739 | * | 739 | * |
740 | * call this function to control port's power via setting or | 740 | * call this function to control port's power via setting or |
741 | * clearing the port's PORT_POWER feature. | 741 | * clearing the port's PORT_POWER feature. |
742 | * | 742 | * |
743 | * Return: 0 if successful. A negative error code otherwise. | 743 | * Return: 0 if successful. A negative error code otherwise. |
744 | */ | 744 | */ |
745 | int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub, | 745 | int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub, |
746 | int port1, bool set) | 746 | int port1, bool set) |
747 | { | 747 | { |
748 | int ret; | 748 | int ret; |
749 | struct usb_port *port_dev = hub->ports[port1 - 1]; | 749 | struct usb_port *port_dev = hub->ports[port1 - 1]; |
750 | 750 | ||
751 | if (set) | 751 | if (set) |
752 | ret = set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); | 752 | ret = set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); |
753 | else | 753 | else |
754 | ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_POWER); | 754 | ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_POWER); |
755 | 755 | ||
756 | if (!ret) | 756 | if (!ret) |
757 | port_dev->power_is_on = set; | 757 | port_dev->power_is_on = set; |
758 | return ret; | 758 | return ret; |
759 | } | 759 | } |
760 | 760 | ||
761 | /** | 761 | /** |
762 | * usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub | 762 | * usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub |
763 | * @urb: an URB associated with the failed or incomplete split transaction | 763 | * @urb: an URB associated with the failed or incomplete split transaction |
764 | * | 764 | * |
765 | * High speed HCDs use this to tell the hub driver that some split control or | 765 | * High speed HCDs use this to tell the hub driver that some split control or |
766 | * bulk transaction failed in a way that requires clearing internal state of | 766 | * bulk transaction failed in a way that requires clearing internal state of |
767 | * a transaction translator. This is normally detected (and reported) from | 767 | * a transaction translator. This is normally detected (and reported) from |
768 | * interrupt context. | 768 | * interrupt context. |
769 | * | 769 | * |
770 | * It may not be possible for that hub to handle additional full (or low) | 770 | * It may not be possible for that hub to handle additional full (or low) |
771 | * speed transactions until that state is fully cleared out. | 771 | * speed transactions until that state is fully cleared out. |
772 | * | 772 | * |
773 | * Return: 0 if successful. A negative error code otherwise. | 773 | * Return: 0 if successful. A negative error code otherwise. |
774 | */ | 774 | */ |
775 | int usb_hub_clear_tt_buffer(struct urb *urb) | 775 | int usb_hub_clear_tt_buffer(struct urb *urb) |
776 | { | 776 | { |
777 | struct usb_device *udev = urb->dev; | 777 | struct usb_device *udev = urb->dev; |
778 | int pipe = urb->pipe; | 778 | int pipe = urb->pipe; |
779 | struct usb_tt *tt = udev->tt; | 779 | struct usb_tt *tt = udev->tt; |
780 | unsigned long flags; | 780 | unsigned long flags; |
781 | struct usb_tt_clear *clear; | 781 | struct usb_tt_clear *clear; |
782 | 782 | ||
783 | /* we've got to cope with an arbitrary number of pending TT clears, | 783 | /* we've got to cope with an arbitrary number of pending TT clears, |
784 | * since each TT has "at least two" buffers that can need it (and | 784 | * since each TT has "at least two" buffers that can need it (and |
785 | * there can be many TTs per hub). even if they're uncommon. | 785 | * there can be many TTs per hub). even if they're uncommon. |
786 | */ | 786 | */ |
787 | if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) { | 787 | if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) { |
788 | dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n"); | 788 | dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n"); |
789 | /* FIXME recover somehow ... RESET_TT? */ | 789 | /* FIXME recover somehow ... RESET_TT? */ |
790 | return -ENOMEM; | 790 | return -ENOMEM; |
791 | } | 791 | } |
792 | 792 | ||
793 | /* info that CLEAR_TT_BUFFER needs */ | 793 | /* info that CLEAR_TT_BUFFER needs */ |
794 | clear->tt = tt->multi ? udev->ttport : 1; | 794 | clear->tt = tt->multi ? udev->ttport : 1; |
795 | clear->devinfo = usb_pipeendpoint (pipe); | 795 | clear->devinfo = usb_pipeendpoint (pipe); |
796 | clear->devinfo |= udev->devnum << 4; | 796 | clear->devinfo |= udev->devnum << 4; |
797 | clear->devinfo |= usb_pipecontrol (pipe) | 797 | clear->devinfo |= usb_pipecontrol (pipe) |
798 | ? (USB_ENDPOINT_XFER_CONTROL << 11) | 798 | ? (USB_ENDPOINT_XFER_CONTROL << 11) |
799 | : (USB_ENDPOINT_XFER_BULK << 11); | 799 | : (USB_ENDPOINT_XFER_BULK << 11); |
800 | if (usb_pipein (pipe)) | 800 | if (usb_pipein (pipe)) |
801 | clear->devinfo |= 1 << 15; | 801 | clear->devinfo |= 1 << 15; |
802 | 802 | ||
803 | /* info for completion callback */ | 803 | /* info for completion callback */ |
804 | clear->hcd = bus_to_hcd(udev->bus); | 804 | clear->hcd = bus_to_hcd(udev->bus); |
805 | clear->ep = urb->ep; | 805 | clear->ep = urb->ep; |
806 | 806 | ||
807 | /* tell keventd to clear state for this TT */ | 807 | /* tell keventd to clear state for this TT */ |
808 | spin_lock_irqsave (&tt->lock, flags); | 808 | spin_lock_irqsave (&tt->lock, flags); |
809 | list_add_tail (&clear->clear_list, &tt->clear_list); | 809 | list_add_tail (&clear->clear_list, &tt->clear_list); |
810 | schedule_work(&tt->clear_work); | 810 | schedule_work(&tt->clear_work); |
811 | spin_unlock_irqrestore (&tt->lock, flags); | 811 | spin_unlock_irqrestore (&tt->lock, flags); |
812 | return 0; | 812 | return 0; |
813 | } | 813 | } |
814 | EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer); | 814 | EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer); |
815 | 815 | ||
816 | /* If do_delay is false, return the number of milliseconds the caller | 816 | /* If do_delay is false, return the number of milliseconds the caller |
817 | * needs to delay. | 817 | * needs to delay. |
818 | */ | 818 | */ |
819 | static unsigned hub_power_on(struct usb_hub *hub, bool do_delay) | 819 | static unsigned hub_power_on(struct usb_hub *hub, bool do_delay) |
820 | { | 820 | { |
821 | int port1; | 821 | int port1; |
822 | unsigned pgood_delay = hub->descriptor->bPwrOn2PwrGood * 2; | 822 | unsigned pgood_delay = hub->descriptor->bPwrOn2PwrGood * 2; |
823 | unsigned delay; | 823 | unsigned delay; |
824 | u16 wHubCharacteristics = | 824 | u16 wHubCharacteristics = |
825 | le16_to_cpu(hub->descriptor->wHubCharacteristics); | 825 | le16_to_cpu(hub->descriptor->wHubCharacteristics); |
826 | 826 | ||
827 | /* Enable power on each port. Some hubs have reserved values | 827 | /* Enable power on each port. Some hubs have reserved values |
828 | * of LPSM (> 2) in their descriptors, even though they are | 828 | * of LPSM (> 2) in their descriptors, even though they are |
829 | * USB 2.0 hubs. Some hubs do not implement port-power switching | 829 | * USB 2.0 hubs. Some hubs do not implement port-power switching |
830 | * but only emulate it. In all cases, the ports won't work | 830 | * but only emulate it. In all cases, the ports won't work |
831 | * unless we send these messages to the hub. | 831 | * unless we send these messages to the hub. |
832 | */ | 832 | */ |
833 | if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2) | 833 | if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2) |
834 | dev_dbg(hub->intfdev, "enabling power on all ports\n"); | 834 | dev_dbg(hub->intfdev, "enabling power on all ports\n"); |
835 | else | 835 | else |
836 | dev_dbg(hub->intfdev, "trying to enable port power on " | 836 | dev_dbg(hub->intfdev, "trying to enable port power on " |
837 | "non-switchable hub\n"); | 837 | "non-switchable hub\n"); |
838 | for (port1 = 1; port1 <= hub->hdev->maxchild; port1++) | 838 | for (port1 = 1; port1 <= hub->hdev->maxchild; port1++) |
839 | if (hub->ports[port1 - 1]->power_is_on) | 839 | if (hub->ports[port1 - 1]->power_is_on) |
840 | set_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER); | 840 | set_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER); |
841 | else | 841 | else |
842 | usb_clear_port_feature(hub->hdev, port1, | 842 | usb_clear_port_feature(hub->hdev, port1, |
843 | USB_PORT_FEAT_POWER); | 843 | USB_PORT_FEAT_POWER); |
844 | 844 | ||
845 | /* Wait at least 100 msec for power to become stable */ | 845 | /* Wait at least 100 msec for power to become stable */ |
846 | delay = max(pgood_delay, (unsigned) 100); | 846 | delay = max(pgood_delay, (unsigned) 100); |
847 | if (do_delay) | 847 | if (do_delay) |
848 | msleep(delay); | 848 | msleep(delay); |
849 | return delay; | 849 | return delay; |
850 | } | 850 | } |
851 | 851 | ||
852 | static int hub_hub_status(struct usb_hub *hub, | 852 | static int hub_hub_status(struct usb_hub *hub, |
853 | u16 *status, u16 *change) | 853 | u16 *status, u16 *change) |
854 | { | 854 | { |
855 | int ret; | 855 | int ret; |
856 | 856 | ||
857 | mutex_lock(&hub->status_mutex); | 857 | mutex_lock(&hub->status_mutex); |
858 | ret = get_hub_status(hub->hdev, &hub->status->hub); | 858 | ret = get_hub_status(hub->hdev, &hub->status->hub); |
859 | if (ret < 0) { | 859 | if (ret < 0) { |
860 | if (ret != -ENODEV) | 860 | if (ret != -ENODEV) |
861 | dev_err(hub->intfdev, | 861 | dev_err(hub->intfdev, |
862 | "%s failed (err = %d)\n", __func__, ret); | 862 | "%s failed (err = %d)\n", __func__, ret); |
863 | } else { | 863 | } else { |
864 | *status = le16_to_cpu(hub->status->hub.wHubStatus); | 864 | *status = le16_to_cpu(hub->status->hub.wHubStatus); |
865 | *change = le16_to_cpu(hub->status->hub.wHubChange); | 865 | *change = le16_to_cpu(hub->status->hub.wHubChange); |
866 | ret = 0; | 866 | ret = 0; |
867 | } | 867 | } |
868 | mutex_unlock(&hub->status_mutex); | 868 | mutex_unlock(&hub->status_mutex); |
869 | return ret; | 869 | return ret; |
870 | } | 870 | } |
871 | 871 | ||
872 | static int hub_set_port_link_state(struct usb_hub *hub, int port1, | 872 | static int hub_set_port_link_state(struct usb_hub *hub, int port1, |
873 | unsigned int link_status) | 873 | unsigned int link_status) |
874 | { | 874 | { |
875 | return set_port_feature(hub->hdev, | 875 | return set_port_feature(hub->hdev, |
876 | port1 | (link_status << 3), | 876 | port1 | (link_status << 3), |
877 | USB_PORT_FEAT_LINK_STATE); | 877 | USB_PORT_FEAT_LINK_STATE); |
878 | } | 878 | } |
879 | 879 | ||
880 | /* | 880 | /* |
881 | * If USB 3.0 ports are placed into the Disabled state, they will no longer | 881 | * If USB 3.0 ports are placed into the Disabled state, they will no longer |
882 | * detect any device connects or disconnects. This is generally not what the | 882 | * detect any device connects or disconnects. This is generally not what the |
883 | * USB core wants, since it expects a disabled port to produce a port status | 883 | * USB core wants, since it expects a disabled port to produce a port status |
884 | * change event when a new device connects. | 884 | * change event when a new device connects. |
885 | * | 885 | * |
886 | * Instead, set the link state to Disabled, wait for the link to settle into | 886 | * Instead, set the link state to Disabled, wait for the link to settle into |
887 | * that state, clear any change bits, and then put the port into the RxDetect | 887 | * that state, clear any change bits, and then put the port into the RxDetect |
888 | * state. | 888 | * state. |
889 | */ | 889 | */ |
890 | static int hub_usb3_port_disable(struct usb_hub *hub, int port1) | 890 | static int hub_usb3_port_disable(struct usb_hub *hub, int port1) |
891 | { | 891 | { |
892 | int ret; | 892 | int ret; |
893 | int total_time; | 893 | int total_time; |
894 | u16 portchange, portstatus; | 894 | u16 portchange, portstatus; |
895 | 895 | ||
896 | if (!hub_is_superspeed(hub->hdev)) | 896 | if (!hub_is_superspeed(hub->hdev)) |
897 | return -EINVAL; | 897 | return -EINVAL; |
898 | 898 | ||
899 | ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); | 899 | ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); |
900 | if (ret) | 900 | if (ret) |
901 | return ret; | 901 | return ret; |
902 | 902 | ||
903 | /* Wait for the link to enter the disabled state. */ | 903 | /* Wait for the link to enter the disabled state. */ |
904 | for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { | 904 | for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { |
905 | ret = hub_port_status(hub, port1, &portstatus, &portchange); | 905 | ret = hub_port_status(hub, port1, &portstatus, &portchange); |
906 | if (ret < 0) | 906 | if (ret < 0) |
907 | return ret; | 907 | return ret; |
908 | 908 | ||
909 | if ((portstatus & USB_PORT_STAT_LINK_STATE) == | 909 | if ((portstatus & USB_PORT_STAT_LINK_STATE) == |
910 | USB_SS_PORT_LS_SS_DISABLED) | 910 | USB_SS_PORT_LS_SS_DISABLED) |
911 | break; | 911 | break; |
912 | if (total_time >= HUB_DEBOUNCE_TIMEOUT) | 912 | if (total_time >= HUB_DEBOUNCE_TIMEOUT) |
913 | break; | 913 | break; |
914 | msleep(HUB_DEBOUNCE_STEP); | 914 | msleep(HUB_DEBOUNCE_STEP); |
915 | } | 915 | } |
916 | if (total_time >= HUB_DEBOUNCE_TIMEOUT) | 916 | if (total_time >= HUB_DEBOUNCE_TIMEOUT) |
917 | dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n", | 917 | dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n", |
918 | port1, total_time); | 918 | port1, total_time); |
919 | 919 | ||
920 | return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT); | 920 | return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT); |
921 | } | 921 | } |
922 | 922 | ||
923 | static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) | 923 | static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) |
924 | { | 924 | { |
925 | struct usb_device *hdev = hub->hdev; | 925 | struct usb_device *hdev = hub->hdev; |
926 | int ret = 0; | 926 | int ret = 0; |
927 | 927 | ||
928 | if (hub->ports[port1 - 1]->child && set_state) | 928 | if (hub->ports[port1 - 1]->child && set_state) |
929 | usb_set_device_state(hub->ports[port1 - 1]->child, | 929 | usb_set_device_state(hub->ports[port1 - 1]->child, |
930 | USB_STATE_NOTATTACHED); | 930 | USB_STATE_NOTATTACHED); |
931 | if (!hub->error) { | 931 | if (!hub->error) { |
932 | if (hub_is_superspeed(hub->hdev)) | 932 | if (hub_is_superspeed(hub->hdev)) |
933 | ret = hub_usb3_port_disable(hub, port1); | 933 | ret = hub_usb3_port_disable(hub, port1); |
934 | else | 934 | else |
935 | ret = usb_clear_port_feature(hdev, port1, | 935 | ret = usb_clear_port_feature(hdev, port1, |
936 | USB_PORT_FEAT_ENABLE); | 936 | USB_PORT_FEAT_ENABLE); |
937 | } | 937 | } |
938 | if (ret && ret != -ENODEV) | 938 | if (ret && ret != -ENODEV) |
939 | dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n", | 939 | dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n", |
940 | port1, ret); | 940 | port1, ret); |
941 | return ret; | 941 | return ret; |
942 | } | 942 | } |
943 | 943 | ||
944 | /* | 944 | /* |
945 | * Disable a port and mark a logical connect-change event, so that some | 945 | * Disable a port and mark a logical connect-change event, so that some |
946 | * time later khubd will disconnect() any existing usb_device on the port | 946 | * time later khubd will disconnect() any existing usb_device on the port |
947 | * and will re-enumerate if there actually is a device attached. | 947 | * and will re-enumerate if there actually is a device attached. |
948 | */ | 948 | */ |
949 | static void hub_port_logical_disconnect(struct usb_hub *hub, int port1) | 949 | static void hub_port_logical_disconnect(struct usb_hub *hub, int port1) |
950 | { | 950 | { |
951 | dev_dbg(hub->intfdev, "logical disconnect on port %d\n", port1); | 951 | dev_dbg(hub->intfdev, "logical disconnect on port %d\n", port1); |
952 | hub_port_disable(hub, port1, 1); | 952 | hub_port_disable(hub, port1, 1); |
953 | 953 | ||
954 | /* FIXME let caller ask to power down the port: | 954 | /* FIXME let caller ask to power down the port: |
955 | * - some devices won't enumerate without a VBUS power cycle | 955 | * - some devices won't enumerate without a VBUS power cycle |
956 | * - SRP saves power that way | 956 | * - SRP saves power that way |
957 | * - ... new call, TBD ... | 957 | * - ... new call, TBD ... |
958 | * That's easy if this hub can switch power per-port, and | 958 | * That's easy if this hub can switch power per-port, and |
959 | * khubd reactivates the port later (timer, SRP, etc). | 959 | * khubd reactivates the port later (timer, SRP, etc). |
960 | * Powerdown must be optional, because of reset/DFU. | 960 | * Powerdown must be optional, because of reset/DFU. |
961 | */ | 961 | */ |
962 | 962 | ||
963 | set_bit(port1, hub->change_bits); | 963 | set_bit(port1, hub->change_bits); |
964 | kick_khubd(hub); | 964 | kick_khubd(hub); |
965 | } | 965 | } |
966 | 966 | ||
967 | /** | 967 | /** |
968 | * usb_remove_device - disable a device's port on its parent hub | 968 | * usb_remove_device - disable a device's port on its parent hub |
969 | * @udev: device to be disabled and removed | 969 | * @udev: device to be disabled and removed |
970 | * Context: @udev locked, must be able to sleep. | 970 | * Context: @udev locked, must be able to sleep. |
971 | * | 971 | * |
972 | * After @udev's port has been disabled, khubd is notified and it will | 972 | * After @udev's port has been disabled, khubd is notified and it will |
973 | * see that the device has been disconnected. When the device is | 973 | * see that the device has been disconnected. When the device is |
974 | * physically unplugged and something is plugged in, the events will | 974 | * physically unplugged and something is plugged in, the events will |
975 | * be received and processed normally. | 975 | * be received and processed normally. |
976 | * | 976 | * |
977 | * Return: 0 if successful. A negative error code otherwise. | 977 | * Return: 0 if successful. A negative error code otherwise. |
978 | */ | 978 | */ |
979 | int usb_remove_device(struct usb_device *udev) | 979 | int usb_remove_device(struct usb_device *udev) |
980 | { | 980 | { |
981 | struct usb_hub *hub; | 981 | struct usb_hub *hub; |
982 | struct usb_interface *intf; | 982 | struct usb_interface *intf; |
983 | 983 | ||
984 | if (!udev->parent) /* Can't remove a root hub */ | 984 | if (!udev->parent) /* Can't remove a root hub */ |
985 | return -EINVAL; | 985 | return -EINVAL; |
986 | hub = usb_hub_to_struct_hub(udev->parent); | 986 | hub = usb_hub_to_struct_hub(udev->parent); |
987 | intf = to_usb_interface(hub->intfdev); | 987 | intf = to_usb_interface(hub->intfdev); |
988 | 988 | ||
989 | usb_autopm_get_interface(intf); | 989 | usb_autopm_get_interface(intf); |
990 | set_bit(udev->portnum, hub->removed_bits); | 990 | set_bit(udev->portnum, hub->removed_bits); |
991 | hub_port_logical_disconnect(hub, udev->portnum); | 991 | hub_port_logical_disconnect(hub, udev->portnum); |
992 | usb_autopm_put_interface(intf); | 992 | usb_autopm_put_interface(intf); |
993 | return 0; | 993 | return 0; |
994 | } | 994 | } |
995 | 995 | ||
996 | enum hub_activation_type { | 996 | enum hub_activation_type { |
997 | HUB_INIT, HUB_INIT2, HUB_INIT3, /* INITs must come first */ | 997 | HUB_INIT, HUB_INIT2, HUB_INIT3, /* INITs must come first */ |
998 | HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME, | 998 | HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME, |
999 | }; | 999 | }; |
1000 | 1000 | ||
1001 | static void hub_init_func2(struct work_struct *ws); | 1001 | static void hub_init_func2(struct work_struct *ws); |
1002 | static void hub_init_func3(struct work_struct *ws); | 1002 | static void hub_init_func3(struct work_struct *ws); |
1003 | 1003 | ||
1004 | static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | 1004 | static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) |
1005 | { | 1005 | { |
1006 | struct usb_device *hdev = hub->hdev; | 1006 | struct usb_device *hdev = hub->hdev; |
1007 | struct usb_hcd *hcd; | 1007 | struct usb_hcd *hcd; |
1008 | int ret; | 1008 | int ret; |
1009 | int port1; | 1009 | int port1; |
1010 | int status; | 1010 | int status; |
1011 | bool need_debounce_delay = false; | 1011 | bool need_debounce_delay = false; |
1012 | unsigned delay; | 1012 | unsigned delay; |
1013 | 1013 | ||
1014 | /* Continue a partial initialization */ | 1014 | /* Continue a partial initialization */ |
1015 | if (type == HUB_INIT2) | 1015 | if (type == HUB_INIT2) |
1016 | goto init2; | 1016 | goto init2; |
1017 | if (type == HUB_INIT3) | 1017 | if (type == HUB_INIT3) |
1018 | goto init3; | 1018 | goto init3; |
1019 | 1019 | ||
1020 | /* The superspeed hub except for root hub has to use Hub Depth | 1020 | /* The superspeed hub except for root hub has to use Hub Depth |
1021 | * value as an offset into the route string to locate the bits | 1021 | * value as an offset into the route string to locate the bits |
1022 | * it uses to determine the downstream port number. So hub driver | 1022 | * it uses to determine the downstream port number. So hub driver |
1023 | * should send a set hub depth request to superspeed hub after | 1023 | * should send a set hub depth request to superspeed hub after |
1024 | * the superspeed hub is set configuration in initialization or | 1024 | * the superspeed hub is set configuration in initialization or |
1025 | * reset procedure. | 1025 | * reset procedure. |
1026 | * | 1026 | * |
1027 | * After a resume, port power should still be on. | 1027 | * After a resume, port power should still be on. |
1028 | * For any other type of activation, turn it on. | 1028 | * For any other type of activation, turn it on. |
1029 | */ | 1029 | */ |
1030 | if (type != HUB_RESUME) { | 1030 | if (type != HUB_RESUME) { |
1031 | if (hdev->parent && hub_is_superspeed(hdev)) { | 1031 | if (hdev->parent && hub_is_superspeed(hdev)) { |
1032 | ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), | 1032 | ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), |
1033 | HUB_SET_DEPTH, USB_RT_HUB, | 1033 | HUB_SET_DEPTH, USB_RT_HUB, |
1034 | hdev->level - 1, 0, NULL, 0, | 1034 | hdev->level - 1, 0, NULL, 0, |
1035 | USB_CTRL_SET_TIMEOUT); | 1035 | USB_CTRL_SET_TIMEOUT); |
1036 | if (ret < 0) | 1036 | if (ret < 0) |
1037 | dev_err(hub->intfdev, | 1037 | dev_err(hub->intfdev, |
1038 | "set hub depth failed\n"); | 1038 | "set hub depth failed\n"); |
1039 | } | 1039 | } |
1040 | 1040 | ||
1041 | /* Speed up system boot by using a delayed_work for the | 1041 | /* Speed up system boot by using a delayed_work for the |
1042 | * hub's initial power-up delays. This is pretty awkward | 1042 | * hub's initial power-up delays. This is pretty awkward |
1043 | * and the implementation looks like a home-brewed sort of | 1043 | * and the implementation looks like a home-brewed sort of |
1044 | * setjmp/longjmp, but it saves at least 100 ms for each | 1044 | * setjmp/longjmp, but it saves at least 100 ms for each |
1045 | * root hub (assuming usbcore is compiled into the kernel | 1045 | * root hub (assuming usbcore is compiled into the kernel |
1046 | * rather than as a module). It adds up. | 1046 | * rather than as a module). It adds up. |
1047 | * | 1047 | * |
1048 | * This can't be done for HUB_RESUME or HUB_RESET_RESUME | 1048 | * This can't be done for HUB_RESUME or HUB_RESET_RESUME |
1049 | * because for those activation types the ports have to be | 1049 | * because for those activation types the ports have to be |
1050 | * operational when we return. In theory this could be done | 1050 | * operational when we return. In theory this could be done |
1051 | * for HUB_POST_RESET, but it's easier not to. | 1051 | * for HUB_POST_RESET, but it's easier not to. |
1052 | */ | 1052 | */ |
1053 | if (type == HUB_INIT) { | 1053 | if (type == HUB_INIT) { |
1054 | delay = hub_power_on(hub, false); | 1054 | delay = hub_power_on(hub, false); |
1055 | PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2); | 1055 | PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2); |
1056 | schedule_delayed_work(&hub->init_work, | 1056 | schedule_delayed_work(&hub->init_work, |
1057 | msecs_to_jiffies(delay)); | 1057 | msecs_to_jiffies(delay)); |
1058 | 1058 | ||
1059 | /* Suppress autosuspend until init is done */ | 1059 | /* Suppress autosuspend until init is done */ |
1060 | usb_autopm_get_interface_no_resume( | 1060 | usb_autopm_get_interface_no_resume( |
1061 | to_usb_interface(hub->intfdev)); | 1061 | to_usb_interface(hub->intfdev)); |
1062 | return; /* Continues at init2: below */ | 1062 | return; /* Continues at init2: below */ |
1063 | } else if (type == HUB_RESET_RESUME) { | 1063 | } else if (type == HUB_RESET_RESUME) { |
1064 | /* The internal host controller state for the hub device | 1064 | /* The internal host controller state for the hub device |
1065 | * may be gone after a host power loss on system resume. | 1065 | * may be gone after a host power loss on system resume. |
1066 | * Update the device's info so the HW knows it's a hub. | 1066 | * Update the device's info so the HW knows it's a hub. |
1067 | */ | 1067 | */ |
1068 | hcd = bus_to_hcd(hdev->bus); | 1068 | hcd = bus_to_hcd(hdev->bus); |
1069 | if (hcd->driver->update_hub_device) { | 1069 | if (hcd->driver->update_hub_device) { |
1070 | ret = hcd->driver->update_hub_device(hcd, hdev, | 1070 | ret = hcd->driver->update_hub_device(hcd, hdev, |
1071 | &hub->tt, GFP_NOIO); | 1071 | &hub->tt, GFP_NOIO); |
1072 | if (ret < 0) { | 1072 | if (ret < 0) { |
1073 | dev_err(hub->intfdev, "Host not " | 1073 | dev_err(hub->intfdev, "Host not " |
1074 | "accepting hub info " | 1074 | "accepting hub info " |
1075 | "update.\n"); | 1075 | "update.\n"); |
1076 | dev_err(hub->intfdev, "LS/FS devices " | 1076 | dev_err(hub->intfdev, "LS/FS devices " |
1077 | "and hubs may not work " | 1077 | "and hubs may not work " |
1078 | "under this hub\n."); | 1078 | "under this hub\n."); |
1079 | } | 1079 | } |
1080 | } | 1080 | } |
1081 | hub_power_on(hub, true); | 1081 | hub_power_on(hub, true); |
1082 | } else { | 1082 | } else { |
1083 | hub_power_on(hub, true); | 1083 | hub_power_on(hub, true); |
1084 | } | 1084 | } |
1085 | } | 1085 | } |
1086 | init2: | 1086 | init2: |
1087 | 1087 | ||
1088 | /* Check each port and set hub->change_bits to let khubd know | 1088 | /* Check each port and set hub->change_bits to let khubd know |
1089 | * which ports need attention. | 1089 | * which ports need attention. |
1090 | */ | 1090 | */ |
1091 | for (port1 = 1; port1 <= hdev->maxchild; ++port1) { | 1091 | for (port1 = 1; port1 <= hdev->maxchild; ++port1) { |
1092 | struct usb_device *udev = hub->ports[port1 - 1]->child; | 1092 | struct usb_device *udev = hub->ports[port1 - 1]->child; |
1093 | u16 portstatus, portchange; | 1093 | u16 portstatus, portchange; |
1094 | 1094 | ||
1095 | portstatus = portchange = 0; | 1095 | portstatus = portchange = 0; |
1096 | status = hub_port_status(hub, port1, &portstatus, &portchange); | 1096 | status = hub_port_status(hub, port1, &portstatus, &portchange); |
1097 | if (udev || (portstatus & USB_PORT_STAT_CONNECTION)) | 1097 | if (udev || (portstatus & USB_PORT_STAT_CONNECTION)) |
1098 | dev_dbg(hub->intfdev, | 1098 | dev_dbg(hub->intfdev, |
1099 | "port %d: status %04x change %04x\n", | 1099 | "port %d: status %04x change %04x\n", |
1100 | port1, portstatus, portchange); | 1100 | port1, portstatus, portchange); |
1101 | 1101 | ||
1102 | /* After anything other than HUB_RESUME (i.e., initialization | 1102 | /* After anything other than HUB_RESUME (i.e., initialization |
1103 | * or any sort of reset), every port should be disabled. | 1103 | * or any sort of reset), every port should be disabled. |
1104 | * Unconnected ports should likewise be disabled (paranoia), | 1104 | * Unconnected ports should likewise be disabled (paranoia), |
1105 | * and so should ports for which we have no usb_device. | 1105 | * and so should ports for which we have no usb_device. |
1106 | */ | 1106 | */ |
1107 | if ((portstatus & USB_PORT_STAT_ENABLE) && ( | 1107 | if ((portstatus & USB_PORT_STAT_ENABLE) && ( |
1108 | type != HUB_RESUME || | 1108 | type != HUB_RESUME || |
1109 | !(portstatus & USB_PORT_STAT_CONNECTION) || | 1109 | !(portstatus & USB_PORT_STAT_CONNECTION) || |
1110 | !udev || | 1110 | !udev || |
1111 | udev->state == USB_STATE_NOTATTACHED)) { | 1111 | udev->state == USB_STATE_NOTATTACHED)) { |
1112 | /* | 1112 | /* |
1113 | * USB3 protocol ports will automatically transition | 1113 | * USB3 protocol ports will automatically transition |
1114 | * to Enabled state when detect an USB3.0 device attach. | 1114 | * to Enabled state when detect an USB3.0 device attach. |
1115 | * Do not disable USB3 protocol ports, just pretend | 1115 | * Do not disable USB3 protocol ports, just pretend |
1116 | * power was lost | 1116 | * power was lost |
1117 | */ | 1117 | */ |
1118 | portstatus &= ~USB_PORT_STAT_ENABLE; | 1118 | portstatus &= ~USB_PORT_STAT_ENABLE; |
1119 | if (!hub_is_superspeed(hdev)) | 1119 | if (!hub_is_superspeed(hdev)) |
1120 | usb_clear_port_feature(hdev, port1, | 1120 | usb_clear_port_feature(hdev, port1, |
1121 | USB_PORT_FEAT_ENABLE); | 1121 | USB_PORT_FEAT_ENABLE); |
1122 | } | 1122 | } |
1123 | 1123 | ||
1124 | /* Clear status-change flags; we'll debounce later */ | 1124 | /* Clear status-change flags; we'll debounce later */ |
1125 | if (portchange & USB_PORT_STAT_C_CONNECTION) { | 1125 | if (portchange & USB_PORT_STAT_C_CONNECTION) { |
1126 | need_debounce_delay = true; | 1126 | need_debounce_delay = true; |
1127 | usb_clear_port_feature(hub->hdev, port1, | 1127 | usb_clear_port_feature(hub->hdev, port1, |
1128 | USB_PORT_FEAT_C_CONNECTION); | 1128 | USB_PORT_FEAT_C_CONNECTION); |
1129 | } | 1129 | } |
1130 | if (portchange & USB_PORT_STAT_C_ENABLE) { | 1130 | if (portchange & USB_PORT_STAT_C_ENABLE) { |
1131 | need_debounce_delay = true; | 1131 | need_debounce_delay = true; |
1132 | usb_clear_port_feature(hub->hdev, port1, | 1132 | usb_clear_port_feature(hub->hdev, port1, |
1133 | USB_PORT_FEAT_C_ENABLE); | 1133 | USB_PORT_FEAT_C_ENABLE); |
1134 | } | 1134 | } |
1135 | if (portchange & USB_PORT_STAT_C_RESET) { | 1135 | if (portchange & USB_PORT_STAT_C_RESET) { |
1136 | need_debounce_delay = true; | 1136 | need_debounce_delay = true; |
1137 | usb_clear_port_feature(hub->hdev, port1, | 1137 | usb_clear_port_feature(hub->hdev, port1, |
1138 | USB_PORT_FEAT_C_RESET); | 1138 | USB_PORT_FEAT_C_RESET); |
1139 | } | 1139 | } |
1140 | if ((portchange & USB_PORT_STAT_C_BH_RESET) && | 1140 | if ((portchange & USB_PORT_STAT_C_BH_RESET) && |
1141 | hub_is_superspeed(hub->hdev)) { | 1141 | hub_is_superspeed(hub->hdev)) { |
1142 | need_debounce_delay = true; | 1142 | need_debounce_delay = true; |
1143 | usb_clear_port_feature(hub->hdev, port1, | 1143 | usb_clear_port_feature(hub->hdev, port1, |
1144 | USB_PORT_FEAT_C_BH_PORT_RESET); | 1144 | USB_PORT_FEAT_C_BH_PORT_RESET); |
1145 | } | 1145 | } |
1146 | /* We can forget about a "removed" device when there's a | 1146 | /* We can forget about a "removed" device when there's a |
1147 | * physical disconnect or the connect status changes. | 1147 | * physical disconnect or the connect status changes. |
1148 | */ | 1148 | */ |
1149 | if (!(portstatus & USB_PORT_STAT_CONNECTION) || | 1149 | if (!(portstatus & USB_PORT_STAT_CONNECTION) || |
1150 | (portchange & USB_PORT_STAT_C_CONNECTION)) | 1150 | (portchange & USB_PORT_STAT_C_CONNECTION)) |
1151 | clear_bit(port1, hub->removed_bits); | 1151 | clear_bit(port1, hub->removed_bits); |
1152 | 1152 | ||
1153 | if (!udev || udev->state == USB_STATE_NOTATTACHED) { | 1153 | if (!udev || udev->state == USB_STATE_NOTATTACHED) { |
1154 | /* Tell khubd to disconnect the device or | 1154 | /* Tell khubd to disconnect the device or |
1155 | * check for a new connection | 1155 | * check for a new connection |
1156 | */ | 1156 | */ |
1157 | if (udev || (portstatus & USB_PORT_STAT_CONNECTION)) | 1157 | if (udev || (portstatus & USB_PORT_STAT_CONNECTION)) |
1158 | set_bit(port1, hub->change_bits); | 1158 | set_bit(port1, hub->change_bits); |
1159 | 1159 | ||
1160 | } else if (portstatus & USB_PORT_STAT_ENABLE) { | 1160 | } else if (portstatus & USB_PORT_STAT_ENABLE) { |
1161 | bool port_resumed = (portstatus & | 1161 | bool port_resumed = (portstatus & |
1162 | USB_PORT_STAT_LINK_STATE) == | 1162 | USB_PORT_STAT_LINK_STATE) == |
1163 | USB_SS_PORT_LS_U0; | 1163 | USB_SS_PORT_LS_U0; |
1164 | /* The power session apparently survived the resume. | 1164 | /* The power session apparently survived the resume. |
1165 | * If there was an overcurrent or suspend change | 1165 | * If there was an overcurrent or suspend change |
1166 | * (i.e., remote wakeup request), have khubd | 1166 | * (i.e., remote wakeup request), have khubd |
1167 | * take care of it. Look at the port link state | 1167 | * take care of it. Look at the port link state |
1168 | * for USB 3.0 hubs, since they don't have a suspend | 1168 | * for USB 3.0 hubs, since they don't have a suspend |
1169 | * change bit, and they don't set the port link change | 1169 | * change bit, and they don't set the port link change |
1170 | * bit on device-initiated resume. | 1170 | * bit on device-initiated resume. |
1171 | */ | 1171 | */ |
1172 | if (portchange || (hub_is_superspeed(hub->hdev) && | 1172 | if (portchange || (hub_is_superspeed(hub->hdev) && |
1173 | port_resumed)) | 1173 | port_resumed)) |
1174 | set_bit(port1, hub->change_bits); | 1174 | set_bit(port1, hub->change_bits); |
1175 | 1175 | ||
1176 | } else if (udev->persist_enabled) { | 1176 | } else if (udev->persist_enabled) { |
1177 | struct usb_port *port_dev = hub->ports[port1 - 1]; | 1177 | struct usb_port *port_dev = hub->ports[port1 - 1]; |
1178 | 1178 | ||
1179 | #ifdef CONFIG_PM | 1179 | #ifdef CONFIG_PM |
1180 | udev->reset_resume = 1; | 1180 | udev->reset_resume = 1; |
1181 | #endif | 1181 | #endif |
1182 | /* Don't set the change_bits when the device | 1182 | /* Don't set the change_bits when the device |
1183 | * was powered off. | 1183 | * was powered off. |
1184 | */ | 1184 | */ |
1185 | if (port_dev->power_is_on) | 1185 | if (port_dev->power_is_on) |
1186 | set_bit(port1, hub->change_bits); | 1186 | set_bit(port1, hub->change_bits); |
1187 | 1187 | ||
1188 | } else { | 1188 | } else { |
1189 | /* The power session is gone; tell khubd */ | 1189 | /* The power session is gone; tell khubd */ |
1190 | usb_set_device_state(udev, USB_STATE_NOTATTACHED); | 1190 | usb_set_device_state(udev, USB_STATE_NOTATTACHED); |
1191 | set_bit(port1, hub->change_bits); | 1191 | set_bit(port1, hub->change_bits); |
1192 | } | 1192 | } |
1193 | } | 1193 | } |
1194 | 1194 | ||
1195 | /* If no port-status-change flags were set, we don't need any | 1195 | /* If no port-status-change flags were set, we don't need any |
1196 | * debouncing. If flags were set we can try to debounce the | 1196 | * debouncing. If flags were set we can try to debounce the |
1197 | * ports all at once right now, instead of letting khubd do them | 1197 | * ports all at once right now, instead of letting khubd do them |
1198 | * one at a time later on. | 1198 | * one at a time later on. |
1199 | * | 1199 | * |
1200 | * If any port-status changes do occur during this delay, khubd | 1200 | * If any port-status changes do occur during this delay, khubd |
1201 | * will see them later and handle them normally. | 1201 | * will see them later and handle them normally. |
1202 | */ | 1202 | */ |
1203 | if (need_debounce_delay) { | 1203 | if (need_debounce_delay) { |
1204 | delay = HUB_DEBOUNCE_STABLE; | 1204 | delay = HUB_DEBOUNCE_STABLE; |
1205 | 1205 | ||
1206 | /* Don't do a long sleep inside a workqueue routine */ | 1206 | /* Don't do a long sleep inside a workqueue routine */ |
1207 | if (type == HUB_INIT2) { | 1207 | if (type == HUB_INIT2) { |
1208 | PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3); | 1208 | PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3); |
1209 | schedule_delayed_work(&hub->init_work, | 1209 | schedule_delayed_work(&hub->init_work, |
1210 | msecs_to_jiffies(delay)); | 1210 | msecs_to_jiffies(delay)); |
1211 | return; /* Continues at init3: below */ | 1211 | return; /* Continues at init3: below */ |
1212 | } else { | 1212 | } else { |
1213 | msleep(delay); | 1213 | msleep(delay); |
1214 | } | 1214 | } |
1215 | } | 1215 | } |
1216 | init3: | 1216 | init3: |
1217 | hub->quiescing = 0; | 1217 | hub->quiescing = 0; |
1218 | 1218 | ||
1219 | status = usb_submit_urb(hub->urb, GFP_NOIO); | 1219 | status = usb_submit_urb(hub->urb, GFP_NOIO); |
1220 | if (status < 0) | 1220 | if (status < 0) |
1221 | dev_err(hub->intfdev, "activate --> %d\n", status); | 1221 | dev_err(hub->intfdev, "activate --> %d\n", status); |
1222 | if (hub->has_indicators && blinkenlights) | 1222 | if (hub->has_indicators && blinkenlights) |
1223 | schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD); | 1223 | schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD); |
1224 | 1224 | ||
1225 | /* Scan all ports that need attention */ | 1225 | /* Scan all ports that need attention */ |
1226 | kick_khubd(hub); | 1226 | kick_khubd(hub); |
1227 | 1227 | ||
1228 | /* Allow autosuspend if it was suppressed */ | 1228 | /* Allow autosuspend if it was suppressed */ |
1229 | if (type <= HUB_INIT3) | 1229 | if (type <= HUB_INIT3) |
1230 | usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); | 1230 | usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); |
1231 | } | 1231 | } |
1232 | 1232 | ||
1233 | /* Implement the continuations for the delays above */ | 1233 | /* Implement the continuations for the delays above */ |
1234 | static void hub_init_func2(struct work_struct *ws) | 1234 | static void hub_init_func2(struct work_struct *ws) |
1235 | { | 1235 | { |
1236 | struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work); | 1236 | struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work); |
1237 | 1237 | ||
1238 | hub_activate(hub, HUB_INIT2); | 1238 | hub_activate(hub, HUB_INIT2); |
1239 | } | 1239 | } |
1240 | 1240 | ||
1241 | static void hub_init_func3(struct work_struct *ws) | 1241 | static void hub_init_func3(struct work_struct *ws) |
1242 | { | 1242 | { |
1243 | struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work); | 1243 | struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work); |
1244 | 1244 | ||
1245 | hub_activate(hub, HUB_INIT3); | 1245 | hub_activate(hub, HUB_INIT3); |
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | enum hub_quiescing_type { | 1248 | enum hub_quiescing_type { |
1249 | HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND | 1249 | HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND |
1250 | }; | 1250 | }; |
1251 | 1251 | ||
1252 | static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) | 1252 | static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) |
1253 | { | 1253 | { |
1254 | struct usb_device *hdev = hub->hdev; | 1254 | struct usb_device *hdev = hub->hdev; |
1255 | int i; | 1255 | int i; |
1256 | 1256 | ||
1257 | cancel_delayed_work_sync(&hub->init_work); | 1257 | cancel_delayed_work_sync(&hub->init_work); |
1258 | 1258 | ||
1259 | /* khubd and related activity won't re-trigger */ | 1259 | /* khubd and related activity won't re-trigger */ |
1260 | hub->quiescing = 1; | 1260 | hub->quiescing = 1; |
1261 | 1261 | ||
1262 | if (type != HUB_SUSPEND) { | 1262 | if (type != HUB_SUSPEND) { |
1263 | /* Disconnect all the children */ | 1263 | /* Disconnect all the children */ |
1264 | for (i = 0; i < hdev->maxchild; ++i) { | 1264 | for (i = 0; i < hdev->maxchild; ++i) { |
1265 | if (hub->ports[i]->child) | 1265 | if (hub->ports[i]->child) |
1266 | usb_disconnect(&hub->ports[i]->child); | 1266 | usb_disconnect(&hub->ports[i]->child); |
1267 | } | 1267 | } |
1268 | } | 1268 | } |
1269 | 1269 | ||
1270 | /* Stop khubd and related activity */ | 1270 | /* Stop khubd and related activity */ |
1271 | usb_kill_urb(hub->urb); | 1271 | usb_kill_urb(hub->urb); |
1272 | if (hub->has_indicators) | 1272 | if (hub->has_indicators) |
1273 | cancel_delayed_work_sync(&hub->leds); | 1273 | cancel_delayed_work_sync(&hub->leds); |
1274 | if (hub->tt.hub) | 1274 | if (hub->tt.hub) |
1275 | flush_work(&hub->tt.clear_work); | 1275 | flush_work(&hub->tt.clear_work); |
1276 | } | 1276 | } |
1277 | 1277 | ||
1278 | /* caller has locked the hub device */ | 1278 | /* caller has locked the hub device */ |
1279 | static int hub_pre_reset(struct usb_interface *intf) | 1279 | static int hub_pre_reset(struct usb_interface *intf) |
1280 | { | 1280 | { |
1281 | struct usb_hub *hub = usb_get_intfdata(intf); | 1281 | struct usb_hub *hub = usb_get_intfdata(intf); |
1282 | 1282 | ||
1283 | hub_quiesce(hub, HUB_PRE_RESET); | 1283 | hub_quiesce(hub, HUB_PRE_RESET); |
1284 | return 0; | 1284 | return 0; |
1285 | } | 1285 | } |
1286 | 1286 | ||
1287 | /* caller has locked the hub device */ | 1287 | /* caller has locked the hub device */ |
1288 | static int hub_post_reset(struct usb_interface *intf) | 1288 | static int hub_post_reset(struct usb_interface *intf) |
1289 | { | 1289 | { |
1290 | struct usb_hub *hub = usb_get_intfdata(intf); | 1290 | struct usb_hub *hub = usb_get_intfdata(intf); |
1291 | 1291 | ||
1292 | hub_activate(hub, HUB_POST_RESET); | 1292 | hub_activate(hub, HUB_POST_RESET); |
1293 | return 0; | 1293 | return 0; |
1294 | } | 1294 | } |
1295 | 1295 | ||
1296 | static int hub_configure(struct usb_hub *hub, | 1296 | static int hub_configure(struct usb_hub *hub, |
1297 | struct usb_endpoint_descriptor *endpoint) | 1297 | struct usb_endpoint_descriptor *endpoint) |
1298 | { | 1298 | { |
1299 | struct usb_hcd *hcd; | 1299 | struct usb_hcd *hcd; |
1300 | struct usb_device *hdev = hub->hdev; | 1300 | struct usb_device *hdev = hub->hdev; |
1301 | struct device *hub_dev = hub->intfdev; | 1301 | struct device *hub_dev = hub->intfdev; |
1302 | u16 hubstatus, hubchange; | 1302 | u16 hubstatus, hubchange; |
1303 | u16 wHubCharacteristics; | 1303 | u16 wHubCharacteristics; |
1304 | unsigned int pipe; | 1304 | unsigned int pipe; |
1305 | int maxp, ret, i; | 1305 | int maxp, ret, i; |
1306 | char *message = "out of memory"; | 1306 | char *message = "out of memory"; |
1307 | unsigned unit_load; | 1307 | unsigned unit_load; |
1308 | unsigned full_load; | 1308 | unsigned full_load; |
1309 | 1309 | ||
1310 | hub->buffer = kmalloc(sizeof(*hub->buffer), GFP_KERNEL); | 1310 | hub->buffer = kmalloc(sizeof(*hub->buffer), GFP_KERNEL); |
1311 | if (!hub->buffer) { | 1311 | if (!hub->buffer) { |
1312 | ret = -ENOMEM; | 1312 | ret = -ENOMEM; |
1313 | goto fail; | 1313 | goto fail; |
1314 | } | 1314 | } |
1315 | 1315 | ||
1316 | hub->status = kmalloc(sizeof(*hub->status), GFP_KERNEL); | 1316 | hub->status = kmalloc(sizeof(*hub->status), GFP_KERNEL); |
1317 | if (!hub->status) { | 1317 | if (!hub->status) { |
1318 | ret = -ENOMEM; | 1318 | ret = -ENOMEM; |
1319 | goto fail; | 1319 | goto fail; |
1320 | } | 1320 | } |
1321 | mutex_init(&hub->status_mutex); | 1321 | mutex_init(&hub->status_mutex); |
1322 | 1322 | ||
1323 | hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL); | 1323 | hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL); |
1324 | if (!hub->descriptor) { | 1324 | if (!hub->descriptor) { |
1325 | ret = -ENOMEM; | 1325 | ret = -ENOMEM; |
1326 | goto fail; | 1326 | goto fail; |
1327 | } | 1327 | } |
1328 | 1328 | ||
1329 | /* Request the entire hub descriptor. | 1329 | /* Request the entire hub descriptor. |
1330 | * hub->descriptor can handle USB_MAXCHILDREN ports, | 1330 | * hub->descriptor can handle USB_MAXCHILDREN ports, |
1331 | * but the hub can/will return fewer bytes here. | 1331 | * but the hub can/will return fewer bytes here. |
1332 | */ | 1332 | */ |
1333 | ret = get_hub_descriptor(hdev, hub->descriptor); | 1333 | ret = get_hub_descriptor(hdev, hub->descriptor); |
1334 | if (ret < 0) { | 1334 | if (ret < 0) { |
1335 | message = "can't read hub descriptor"; | 1335 | message = "can't read hub descriptor"; |
1336 | goto fail; | 1336 | goto fail; |
1337 | } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) { | 1337 | } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) { |
1338 | message = "hub has too many ports!"; | 1338 | message = "hub has too many ports!"; |
1339 | ret = -ENODEV; | 1339 | ret = -ENODEV; |
1340 | goto fail; | 1340 | goto fail; |
1341 | } else if (hub->descriptor->bNbrPorts == 0) { | 1341 | } else if (hub->descriptor->bNbrPorts == 0) { |
1342 | message = "hub doesn't have any ports!"; | 1342 | message = "hub doesn't have any ports!"; |
1343 | ret = -ENODEV; | 1343 | ret = -ENODEV; |
1344 | goto fail; | 1344 | goto fail; |
1345 | } | 1345 | } |
1346 | 1346 | ||
1347 | hdev->maxchild = hub->descriptor->bNbrPorts; | 1347 | hdev->maxchild = hub->descriptor->bNbrPorts; |
1348 | dev_info (hub_dev, "%d port%s detected\n", hdev->maxchild, | 1348 | dev_info (hub_dev, "%d port%s detected\n", hdev->maxchild, |
1349 | (hdev->maxchild == 1) ? "" : "s"); | 1349 | (hdev->maxchild == 1) ? "" : "s"); |
1350 | 1350 | ||
1351 | hub->ports = kzalloc(hdev->maxchild * sizeof(struct usb_port *), | 1351 | hub->ports = kzalloc(hdev->maxchild * sizeof(struct usb_port *), |
1352 | GFP_KERNEL); | 1352 | GFP_KERNEL); |
1353 | if (!hub->ports) { | 1353 | if (!hub->ports) { |
1354 | ret = -ENOMEM; | 1354 | ret = -ENOMEM; |
1355 | goto fail; | 1355 | goto fail; |
1356 | } | 1356 | } |
1357 | 1357 | ||
1358 | wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics); | 1358 | wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics); |
1359 | if (hub_is_superspeed(hdev)) { | 1359 | if (hub_is_superspeed(hdev)) { |
1360 | unit_load = 150; | 1360 | unit_load = 150; |
1361 | full_load = 900; | 1361 | full_load = 900; |
1362 | } else { | 1362 | } else { |
1363 | unit_load = 100; | 1363 | unit_load = 100; |
1364 | full_load = 500; | 1364 | full_load = 500; |
1365 | } | 1365 | } |
1366 | 1366 | ||
1367 | /* FIXME for USB 3.0, skip for now */ | 1367 | /* FIXME for USB 3.0, skip for now */ |
1368 | if ((wHubCharacteristics & HUB_CHAR_COMPOUND) && | 1368 | if ((wHubCharacteristics & HUB_CHAR_COMPOUND) && |
1369 | !(hub_is_superspeed(hdev))) { | 1369 | !(hub_is_superspeed(hdev))) { |
1370 | int i; | 1370 | int i; |
1371 | char portstr[USB_MAXCHILDREN + 1]; | 1371 | char portstr[USB_MAXCHILDREN + 1]; |
1372 | 1372 | ||
1373 | for (i = 0; i < hdev->maxchild; i++) | 1373 | for (i = 0; i < hdev->maxchild; i++) |
1374 | portstr[i] = hub->descriptor->u.hs.DeviceRemovable | 1374 | portstr[i] = hub->descriptor->u.hs.DeviceRemovable |
1375 | [((i + 1) / 8)] & (1 << ((i + 1) % 8)) | 1375 | [((i + 1) / 8)] & (1 << ((i + 1) % 8)) |
1376 | ? 'F' : 'R'; | 1376 | ? 'F' : 'R'; |
1377 | portstr[hdev->maxchild] = 0; | 1377 | portstr[hdev->maxchild] = 0; |
1378 | dev_dbg(hub_dev, "compound device; port removable status: %s\n", portstr); | 1378 | dev_dbg(hub_dev, "compound device; port removable status: %s\n", portstr); |
1379 | } else | 1379 | } else |
1380 | dev_dbg(hub_dev, "standalone hub\n"); | 1380 | dev_dbg(hub_dev, "standalone hub\n"); |
1381 | 1381 | ||
1382 | switch (wHubCharacteristics & HUB_CHAR_LPSM) { | 1382 | switch (wHubCharacteristics & HUB_CHAR_LPSM) { |
1383 | case HUB_CHAR_COMMON_LPSM: | 1383 | case HUB_CHAR_COMMON_LPSM: |
1384 | dev_dbg(hub_dev, "ganged power switching\n"); | 1384 | dev_dbg(hub_dev, "ganged power switching\n"); |
1385 | break; | 1385 | break; |
1386 | case HUB_CHAR_INDV_PORT_LPSM: | 1386 | case HUB_CHAR_INDV_PORT_LPSM: |
1387 | dev_dbg(hub_dev, "individual port power switching\n"); | 1387 | dev_dbg(hub_dev, "individual port power switching\n"); |
1388 | break; | 1388 | break; |
1389 | case HUB_CHAR_NO_LPSM: | 1389 | case HUB_CHAR_NO_LPSM: |
1390 | case HUB_CHAR_LPSM: | 1390 | case HUB_CHAR_LPSM: |
1391 | dev_dbg(hub_dev, "no power switching (usb 1.0)\n"); | 1391 | dev_dbg(hub_dev, "no power switching (usb 1.0)\n"); |
1392 | break; | 1392 | break; |
1393 | } | 1393 | } |
1394 | 1394 | ||
1395 | switch (wHubCharacteristics & HUB_CHAR_OCPM) { | 1395 | switch (wHubCharacteristics & HUB_CHAR_OCPM) { |
1396 | case HUB_CHAR_COMMON_OCPM: | 1396 | case HUB_CHAR_COMMON_OCPM: |
1397 | dev_dbg(hub_dev, "global over-current protection\n"); | 1397 | dev_dbg(hub_dev, "global over-current protection\n"); |
1398 | break; | 1398 | break; |
1399 | case HUB_CHAR_INDV_PORT_OCPM: | 1399 | case HUB_CHAR_INDV_PORT_OCPM: |
1400 | dev_dbg(hub_dev, "individual port over-current protection\n"); | 1400 | dev_dbg(hub_dev, "individual port over-current protection\n"); |
1401 | break; | 1401 | break; |
1402 | case HUB_CHAR_NO_OCPM: | 1402 | case HUB_CHAR_NO_OCPM: |
1403 | case HUB_CHAR_OCPM: | 1403 | case HUB_CHAR_OCPM: |
1404 | dev_dbg(hub_dev, "no over-current protection\n"); | 1404 | dev_dbg(hub_dev, "no over-current protection\n"); |
1405 | break; | 1405 | break; |
1406 | } | 1406 | } |
1407 | 1407 | ||
1408 | spin_lock_init (&hub->tt.lock); | 1408 | spin_lock_init (&hub->tt.lock); |
1409 | INIT_LIST_HEAD (&hub->tt.clear_list); | 1409 | INIT_LIST_HEAD (&hub->tt.clear_list); |
1410 | INIT_WORK(&hub->tt.clear_work, hub_tt_work); | 1410 | INIT_WORK(&hub->tt.clear_work, hub_tt_work); |
1411 | switch (hdev->descriptor.bDeviceProtocol) { | 1411 | switch (hdev->descriptor.bDeviceProtocol) { |
1412 | case USB_HUB_PR_FS: | 1412 | case USB_HUB_PR_FS: |
1413 | break; | 1413 | break; |
1414 | case USB_HUB_PR_HS_SINGLE_TT: | 1414 | case USB_HUB_PR_HS_SINGLE_TT: |
1415 | dev_dbg(hub_dev, "Single TT\n"); | 1415 | dev_dbg(hub_dev, "Single TT\n"); |
1416 | hub->tt.hub = hdev; | 1416 | hub->tt.hub = hdev; |
1417 | break; | 1417 | break; |
1418 | case USB_HUB_PR_HS_MULTI_TT: | 1418 | case USB_HUB_PR_HS_MULTI_TT: |
1419 | ret = usb_set_interface(hdev, 0, 1); | 1419 | ret = usb_set_interface(hdev, 0, 1); |
1420 | if (ret == 0) { | 1420 | if (ret == 0) { |
1421 | dev_dbg(hub_dev, "TT per port\n"); | 1421 | dev_dbg(hub_dev, "TT per port\n"); |
1422 | hub->tt.multi = 1; | 1422 | hub->tt.multi = 1; |
1423 | } else | 1423 | } else |
1424 | dev_err(hub_dev, "Using single TT (err %d)\n", | 1424 | dev_err(hub_dev, "Using single TT (err %d)\n", |
1425 | ret); | 1425 | ret); |
1426 | hub->tt.hub = hdev; | 1426 | hub->tt.hub = hdev; |
1427 | break; | 1427 | break; |
1428 | case USB_HUB_PR_SS: | 1428 | case USB_HUB_PR_SS: |
1429 | /* USB 3.0 hubs don't have a TT */ | 1429 | /* USB 3.0 hubs don't have a TT */ |
1430 | break; | 1430 | break; |
1431 | default: | 1431 | default: |
1432 | dev_dbg(hub_dev, "Unrecognized hub protocol %d\n", | 1432 | dev_dbg(hub_dev, "Unrecognized hub protocol %d\n", |
1433 | hdev->descriptor.bDeviceProtocol); | 1433 | hdev->descriptor.bDeviceProtocol); |
1434 | break; | 1434 | break; |
1435 | } | 1435 | } |
1436 | 1436 | ||
1437 | /* Note 8 FS bit times == (8 bits / 12000000 bps) ~= 666ns */ | 1437 | /* Note 8 FS bit times == (8 bits / 12000000 bps) ~= 666ns */ |
1438 | switch (wHubCharacteristics & HUB_CHAR_TTTT) { | 1438 | switch (wHubCharacteristics & HUB_CHAR_TTTT) { |
1439 | case HUB_TTTT_8_BITS: | 1439 | case HUB_TTTT_8_BITS: |
1440 | if (hdev->descriptor.bDeviceProtocol != 0) { | 1440 | if (hdev->descriptor.bDeviceProtocol != 0) { |
1441 | hub->tt.think_time = 666; | 1441 | hub->tt.think_time = 666; |
1442 | dev_dbg(hub_dev, "TT requires at most %d " | 1442 | dev_dbg(hub_dev, "TT requires at most %d " |
1443 | "FS bit times (%d ns)\n", | 1443 | "FS bit times (%d ns)\n", |
1444 | 8, hub->tt.think_time); | 1444 | 8, hub->tt.think_time); |
1445 | } | 1445 | } |
1446 | break; | 1446 | break; |
1447 | case HUB_TTTT_16_BITS: | 1447 | case HUB_TTTT_16_BITS: |
1448 | hub->tt.think_time = 666 * 2; | 1448 | hub->tt.think_time = 666 * 2; |
1449 | dev_dbg(hub_dev, "TT requires at most %d " | 1449 | dev_dbg(hub_dev, "TT requires at most %d " |
1450 | "FS bit times (%d ns)\n", | 1450 | "FS bit times (%d ns)\n", |
1451 | 16, hub->tt.think_time); | 1451 | 16, hub->tt.think_time); |
1452 | break; | 1452 | break; |
1453 | case HUB_TTTT_24_BITS: | 1453 | case HUB_TTTT_24_BITS: |
1454 | hub->tt.think_time = 666 * 3; | 1454 | hub->tt.think_time = 666 * 3; |
1455 | dev_dbg(hub_dev, "TT requires at most %d " | 1455 | dev_dbg(hub_dev, "TT requires at most %d " |
1456 | "FS bit times (%d ns)\n", | 1456 | "FS bit times (%d ns)\n", |
1457 | 24, hub->tt.think_time); | 1457 | 24, hub->tt.think_time); |
1458 | break; | 1458 | break; |
1459 | case HUB_TTTT_32_BITS: | 1459 | case HUB_TTTT_32_BITS: |
1460 | hub->tt.think_time = 666 * 4; | 1460 | hub->tt.think_time = 666 * 4; |
1461 | dev_dbg(hub_dev, "TT requires at most %d " | 1461 | dev_dbg(hub_dev, "TT requires at most %d " |
1462 | "FS bit times (%d ns)\n", | 1462 | "FS bit times (%d ns)\n", |
1463 | 32, hub->tt.think_time); | 1463 | 32, hub->tt.think_time); |
1464 | break; | 1464 | break; |
1465 | } | 1465 | } |
1466 | 1466 | ||
1467 | /* probe() zeroes hub->indicator[] */ | 1467 | /* probe() zeroes hub->indicator[] */ |
1468 | if (wHubCharacteristics & HUB_CHAR_PORTIND) { | 1468 | if (wHubCharacteristics & HUB_CHAR_PORTIND) { |
1469 | hub->has_indicators = 1; | 1469 | hub->has_indicators = 1; |
1470 | dev_dbg(hub_dev, "Port indicators are supported\n"); | 1470 | dev_dbg(hub_dev, "Port indicators are supported\n"); |
1471 | } | 1471 | } |
1472 | 1472 | ||
1473 | dev_dbg(hub_dev, "power on to power good time: %dms\n", | 1473 | dev_dbg(hub_dev, "power on to power good time: %dms\n", |
1474 | hub->descriptor->bPwrOn2PwrGood * 2); | 1474 | hub->descriptor->bPwrOn2PwrGood * 2); |
1475 | 1475 | ||
1476 | /* power budgeting mostly matters with bus-powered hubs, | 1476 | /* power budgeting mostly matters with bus-powered hubs, |
1477 | * and battery-powered root hubs (may provide just 8 mA). | 1477 | * and battery-powered root hubs (may provide just 8 mA). |
1478 | */ | 1478 | */ |
1479 | ret = usb_get_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus); | 1479 | ret = usb_get_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus); |
1480 | if (ret) { | 1480 | if (ret) { |
1481 | message = "can't get hub status"; | 1481 | message = "can't get hub status"; |
1482 | goto fail; | 1482 | goto fail; |
1483 | } | 1483 | } |
1484 | hcd = bus_to_hcd(hdev->bus); | 1484 | hcd = bus_to_hcd(hdev->bus); |
1485 | if (hdev == hdev->bus->root_hub) { | 1485 | if (hdev == hdev->bus->root_hub) { |
1486 | if (hcd->power_budget > 0) | 1486 | if (hcd->power_budget > 0) |
1487 | hdev->bus_mA = hcd->power_budget; | 1487 | hdev->bus_mA = hcd->power_budget; |
1488 | else | 1488 | else |
1489 | hdev->bus_mA = full_load * hdev->maxchild; | 1489 | hdev->bus_mA = full_load * hdev->maxchild; |
1490 | if (hdev->bus_mA >= full_load) | 1490 | if (hdev->bus_mA >= full_load) |
1491 | hub->mA_per_port = full_load; | 1491 | hub->mA_per_port = full_load; |
1492 | else { | 1492 | else { |
1493 | hub->mA_per_port = hdev->bus_mA; | 1493 | hub->mA_per_port = hdev->bus_mA; |
1494 | hub->limited_power = 1; | 1494 | hub->limited_power = 1; |
1495 | } | 1495 | } |
1496 | } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) { | 1496 | } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) { |
1497 | int remaining = hdev->bus_mA - | 1497 | int remaining = hdev->bus_mA - |
1498 | hub->descriptor->bHubContrCurrent; | 1498 | hub->descriptor->bHubContrCurrent; |
1499 | 1499 | ||
1500 | dev_dbg(hub_dev, "hub controller current requirement: %dmA\n", | 1500 | dev_dbg(hub_dev, "hub controller current requirement: %dmA\n", |
1501 | hub->descriptor->bHubContrCurrent); | 1501 | hub->descriptor->bHubContrCurrent); |
1502 | hub->limited_power = 1; | 1502 | hub->limited_power = 1; |
1503 | 1503 | ||
1504 | if (remaining < hdev->maxchild * unit_load) | 1504 | if (remaining < hdev->maxchild * unit_load) |
1505 | dev_warn(hub_dev, | 1505 | dev_warn(hub_dev, |
1506 | "insufficient power available " | 1506 | "insufficient power available " |
1507 | "to use all downstream ports\n"); | 1507 | "to use all downstream ports\n"); |
1508 | hub->mA_per_port = unit_load; /* 7.2.1 */ | 1508 | hub->mA_per_port = unit_load; /* 7.2.1 */ |
1509 | 1509 | ||
1510 | } else { /* Self-powered external hub */ | 1510 | } else { /* Self-powered external hub */ |
1511 | /* FIXME: What about battery-powered external hubs that | 1511 | /* FIXME: What about battery-powered external hubs that |
1512 | * provide less current per port? */ | 1512 | * provide less current per port? */ |
1513 | hub->mA_per_port = full_load; | 1513 | hub->mA_per_port = full_load; |
1514 | } | 1514 | } |
1515 | if (hub->mA_per_port < full_load) | 1515 | if (hub->mA_per_port < full_load) |
1516 | dev_dbg(hub_dev, "%umA bus power budget for each child\n", | 1516 | dev_dbg(hub_dev, "%umA bus power budget for each child\n", |
1517 | hub->mA_per_port); | 1517 | hub->mA_per_port); |
1518 | 1518 | ||
1519 | /* Update the HCD's internal representation of this hub before khubd | 1519 | /* Update the HCD's internal representation of this hub before khubd |
1520 | * starts getting port status changes for devices under the hub. | 1520 | * starts getting port status changes for devices under the hub. |
1521 | */ | 1521 | */ |
1522 | if (hcd->driver->update_hub_device) { | 1522 | if (hcd->driver->update_hub_device) { |
1523 | ret = hcd->driver->update_hub_device(hcd, hdev, | 1523 | ret = hcd->driver->update_hub_device(hcd, hdev, |
1524 | &hub->tt, GFP_KERNEL); | 1524 | &hub->tt, GFP_KERNEL); |
1525 | if (ret < 0) { | 1525 | if (ret < 0) { |
1526 | message = "can't update HCD hub info"; | 1526 | message = "can't update HCD hub info"; |
1527 | goto fail; | 1527 | goto fail; |
1528 | } | 1528 | } |
1529 | } | 1529 | } |
1530 | 1530 | ||
1531 | ret = hub_hub_status(hub, &hubstatus, &hubchange); | 1531 | ret = hub_hub_status(hub, &hubstatus, &hubchange); |
1532 | if (ret < 0) { | 1532 | if (ret < 0) { |
1533 | message = "can't get hub status"; | 1533 | message = "can't get hub status"; |
1534 | goto fail; | 1534 | goto fail; |
1535 | } | 1535 | } |
1536 | 1536 | ||
1537 | /* local power status reports aren't always correct */ | 1537 | /* local power status reports aren't always correct */ |
1538 | if (hdev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_SELFPOWER) | 1538 | if (hdev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_SELFPOWER) |
1539 | dev_dbg(hub_dev, "local power source is %s\n", | 1539 | dev_dbg(hub_dev, "local power source is %s\n", |
1540 | (hubstatus & HUB_STATUS_LOCAL_POWER) | 1540 | (hubstatus & HUB_STATUS_LOCAL_POWER) |
1541 | ? "lost (inactive)" : "good"); | 1541 | ? "lost (inactive)" : "good"); |
1542 | 1542 | ||
1543 | if ((wHubCharacteristics & HUB_CHAR_OCPM) == 0) | 1543 | if ((wHubCharacteristics & HUB_CHAR_OCPM) == 0) |
1544 | dev_dbg(hub_dev, "%sover-current condition exists\n", | 1544 | dev_dbg(hub_dev, "%sover-current condition exists\n", |
1545 | (hubstatus & HUB_STATUS_OVERCURRENT) ? "" : "no "); | 1545 | (hubstatus & HUB_STATUS_OVERCURRENT) ? "" : "no "); |
1546 | 1546 | ||
1547 | /* set up the interrupt endpoint | 1547 | /* set up the interrupt endpoint |
1548 | * We use the EP's maxpacket size instead of (PORTS+1+7)/8 | 1548 | * We use the EP's maxpacket size instead of (PORTS+1+7)/8 |
1549 | * bytes as USB2.0[11.12.3] says because some hubs are known | 1549 | * bytes as USB2.0[11.12.3] says because some hubs are known |
1550 | * to send more data (and thus cause overflow). For root hubs, | 1550 | * to send more data (and thus cause overflow). For root hubs, |
1551 | * maxpktsize is defined in hcd.c's fake endpoint descriptors | 1551 | * maxpktsize is defined in hcd.c's fake endpoint descriptors |
1552 | * to be big enough for at least USB_MAXCHILDREN ports. */ | 1552 | * to be big enough for at least USB_MAXCHILDREN ports. */ |
1553 | pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress); | 1553 | pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress); |
1554 | maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe)); | 1554 | maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe)); |
1555 | 1555 | ||
1556 | if (maxp > sizeof(*hub->buffer)) | 1556 | if (maxp > sizeof(*hub->buffer)) |
1557 | maxp = sizeof(*hub->buffer); | 1557 | maxp = sizeof(*hub->buffer); |
1558 | 1558 | ||
1559 | hub->urb = usb_alloc_urb(0, GFP_KERNEL); | 1559 | hub->urb = usb_alloc_urb(0, GFP_KERNEL); |
1560 | if (!hub->urb) { | 1560 | if (!hub->urb) { |
1561 | ret = -ENOMEM; | 1561 | ret = -ENOMEM; |
1562 | goto fail; | 1562 | goto fail; |
1563 | } | 1563 | } |
1564 | 1564 | ||
1565 | usb_fill_int_urb(hub->urb, hdev, pipe, *hub->buffer, maxp, hub_irq, | 1565 | usb_fill_int_urb(hub->urb, hdev, pipe, *hub->buffer, maxp, hub_irq, |
1566 | hub, endpoint->bInterval); | 1566 | hub, endpoint->bInterval); |
1567 | 1567 | ||
1568 | /* maybe cycle the hub leds */ | 1568 | /* maybe cycle the hub leds */ |
1569 | if (hub->has_indicators && blinkenlights) | 1569 | if (hub->has_indicators && blinkenlights) |
1570 | hub->indicator[0] = INDICATOR_CYCLE; | 1570 | hub->indicator[0] = INDICATOR_CYCLE; |
1571 | 1571 | ||
1572 | for (i = 0; i < hdev->maxchild; i++) { | 1572 | for (i = 0; i < hdev->maxchild; i++) { |
1573 | ret = usb_hub_create_port_device(hub, i + 1); | 1573 | ret = usb_hub_create_port_device(hub, i + 1); |
1574 | if (ret < 0) { | 1574 | if (ret < 0) { |
1575 | dev_err(hub->intfdev, | 1575 | dev_err(hub->intfdev, |
1576 | "couldn't create port%d device.\n", i + 1); | 1576 | "couldn't create port%d device.\n", i + 1); |
1577 | hdev->maxchild = i; | 1577 | hdev->maxchild = i; |
1578 | goto fail_keep_maxchild; | 1578 | goto fail_keep_maxchild; |
1579 | } | 1579 | } |
1580 | } | 1580 | } |
1581 | 1581 | ||
1582 | usb_hub_adjust_deviceremovable(hdev, hub->descriptor); | 1582 | usb_hub_adjust_deviceremovable(hdev, hub->descriptor); |
1583 | 1583 | ||
1584 | hub_activate(hub, HUB_INIT); | 1584 | hub_activate(hub, HUB_INIT); |
1585 | return 0; | 1585 | return 0; |
1586 | 1586 | ||
1587 | fail: | 1587 | fail: |
1588 | hdev->maxchild = 0; | 1588 | hdev->maxchild = 0; |
1589 | fail_keep_maxchild: | 1589 | fail_keep_maxchild: |
1590 | dev_err (hub_dev, "config failed, %s (err %d)\n", | 1590 | dev_err (hub_dev, "config failed, %s (err %d)\n", |
1591 | message, ret); | 1591 | message, ret); |
1592 | /* hub_disconnect() frees urb and descriptor */ | 1592 | /* hub_disconnect() frees urb and descriptor */ |
1593 | return ret; | 1593 | return ret; |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | static void hub_release(struct kref *kref) | 1596 | static void hub_release(struct kref *kref) |
1597 | { | 1597 | { |
1598 | struct usb_hub *hub = container_of(kref, struct usb_hub, kref); | 1598 | struct usb_hub *hub = container_of(kref, struct usb_hub, kref); |
1599 | 1599 | ||
1600 | usb_put_intf(to_usb_interface(hub->intfdev)); | 1600 | usb_put_intf(to_usb_interface(hub->intfdev)); |
1601 | kfree(hub); | 1601 | kfree(hub); |
1602 | } | 1602 | } |
1603 | 1603 | ||
1604 | static unsigned highspeed_hubs; | 1604 | static unsigned highspeed_hubs; |
1605 | 1605 | ||
1606 | static void hub_disconnect(struct usb_interface *intf) | 1606 | static void hub_disconnect(struct usb_interface *intf) |
1607 | { | 1607 | { |
1608 | struct usb_hub *hub = usb_get_intfdata(intf); | 1608 | struct usb_hub *hub = usb_get_intfdata(intf); |
1609 | struct usb_device *hdev = interface_to_usbdev(intf); | 1609 | struct usb_device *hdev = interface_to_usbdev(intf); |
1610 | int i; | 1610 | int i; |
1611 | 1611 | ||
1612 | /* Take the hub off the event list and don't let it be added again */ | 1612 | /* Take the hub off the event list and don't let it be added again */ |
1613 | spin_lock_irq(&hub_event_lock); | 1613 | spin_lock_irq(&hub_event_lock); |
1614 | if (!list_empty(&hub->event_list)) { | 1614 | if (!list_empty(&hub->event_list)) { |
1615 | list_del_init(&hub->event_list); | 1615 | list_del_init(&hub->event_list); |
1616 | usb_autopm_put_interface_no_suspend(intf); | 1616 | usb_autopm_put_interface_no_suspend(intf); |
1617 | } | 1617 | } |
1618 | hub->disconnected = 1; | 1618 | hub->disconnected = 1; |
1619 | spin_unlock_irq(&hub_event_lock); | 1619 | spin_unlock_irq(&hub_event_lock); |
1620 | 1620 | ||
1621 | /* Disconnect all children and quiesce the hub */ | 1621 | /* Disconnect all children and quiesce the hub */ |
1622 | hub->error = 0; | 1622 | hub->error = 0; |
1623 | hub_quiesce(hub, HUB_DISCONNECT); | 1623 | hub_quiesce(hub, HUB_DISCONNECT); |
1624 | 1624 | ||
1625 | usb_set_intfdata (intf, NULL); | 1625 | usb_set_intfdata (intf, NULL); |
1626 | 1626 | ||
1627 | for (i = 0; i < hdev->maxchild; i++) | 1627 | for (i = 0; i < hdev->maxchild; i++) |
1628 | usb_hub_remove_port_device(hub, i + 1); | 1628 | usb_hub_remove_port_device(hub, i + 1); |
1629 | hub->hdev->maxchild = 0; | 1629 | hub->hdev->maxchild = 0; |
1630 | 1630 | ||
1631 | if (hub->hdev->speed == USB_SPEED_HIGH) | 1631 | if (hub->hdev->speed == USB_SPEED_HIGH) |
1632 | highspeed_hubs--; | 1632 | highspeed_hubs--; |
1633 | 1633 | ||
1634 | usb_free_urb(hub->urb); | 1634 | usb_free_urb(hub->urb); |
1635 | kfree(hub->ports); | 1635 | kfree(hub->ports); |
1636 | kfree(hub->descriptor); | 1636 | kfree(hub->descriptor); |
1637 | kfree(hub->status); | 1637 | kfree(hub->status); |
1638 | kfree(hub->buffer); | 1638 | kfree(hub->buffer); |
1639 | 1639 | ||
1640 | pm_suspend_ignore_children(&intf->dev, false); | 1640 | pm_suspend_ignore_children(&intf->dev, false); |
1641 | kref_put(&hub->kref, hub_release); | 1641 | kref_put(&hub->kref, hub_release); |
1642 | } | 1642 | } |
1643 | 1643 | ||
1644 | static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) | 1644 | static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) |
1645 | { | 1645 | { |
1646 | struct usb_host_interface *desc; | 1646 | struct usb_host_interface *desc; |
1647 | struct usb_endpoint_descriptor *endpoint; | 1647 | struct usb_endpoint_descriptor *endpoint; |
1648 | struct usb_device *hdev; | 1648 | struct usb_device *hdev; |
1649 | struct usb_hub *hub; | 1649 | struct usb_hub *hub; |
1650 | 1650 | ||
1651 | desc = intf->cur_altsetting; | 1651 | desc = intf->cur_altsetting; |
1652 | hdev = interface_to_usbdev(intf); | 1652 | hdev = interface_to_usbdev(intf); |
1653 | 1653 | ||
1654 | /* | 1654 | /* |
1655 | * Set default autosuspend delay as 0 to speedup bus suspend, | 1655 | * Set default autosuspend delay as 0 to speedup bus suspend, |
1656 | * based on the below considerations: | 1656 | * based on the below considerations: |
1657 | * | 1657 | * |
1658 | * - Unlike other drivers, the hub driver does not rely on the | 1658 | * - Unlike other drivers, the hub driver does not rely on the |
1659 | * autosuspend delay to provide enough time to handle a wakeup | 1659 | * autosuspend delay to provide enough time to handle a wakeup |
1660 | * event, and the submitted status URB is just to check future | 1660 | * event, and the submitted status URB is just to check future |
1661 | * change on hub downstream ports, so it is safe to do it. | 1661 | * change on hub downstream ports, so it is safe to do it. |
1662 | * | 1662 | * |
1663 | * - The patch might cause one or more auto supend/resume for | 1663 | * - The patch might cause one or more auto supend/resume for |
1664 | * below very rare devices when they are plugged into hub | 1664 | * below very rare devices when they are plugged into hub |
1665 | * first time: | 1665 | * first time: |
1666 | * | 1666 | * |
1667 | * devices having trouble initializing, and disconnect | 1667 | * devices having trouble initializing, and disconnect |
1668 | * themselves from the bus and then reconnect a second | 1668 | * themselves from the bus and then reconnect a second |
1669 | * or so later | 1669 | * or so later |
1670 | * | 1670 | * |
1671 | * devices just for downloading firmware, and disconnects | 1671 | * devices just for downloading firmware, and disconnects |
1672 | * themselves after completing it | 1672 | * themselves after completing it |
1673 | * | 1673 | * |
1674 | * For these quite rare devices, their drivers may change the | 1674 | * For these quite rare devices, their drivers may change the |
1675 | * autosuspend delay of their parent hub in the probe() to one | 1675 | * autosuspend delay of their parent hub in the probe() to one |
1676 | * appropriate value to avoid the subtle problem if someone | 1676 | * appropriate value to avoid the subtle problem if someone |
1677 | * does care it. | 1677 | * does care it. |
1678 | * | 1678 | * |
1679 | * - The patch may cause one or more auto suspend/resume on | 1679 | * - The patch may cause one or more auto suspend/resume on |
1680 | * hub during running 'lsusb', but it is probably too | 1680 | * hub during running 'lsusb', but it is probably too |
1681 | * infrequent to worry about. | 1681 | * infrequent to worry about. |
1682 | * | 1682 | * |
1683 | * - Change autosuspend delay of hub can avoid unnecessary auto | 1683 | * - Change autosuspend delay of hub can avoid unnecessary auto |
1684 | * suspend timer for hub, also may decrease power consumption | 1684 | * suspend timer for hub, also may decrease power consumption |
1685 | * of USB bus. | 1685 | * of USB bus. |
1686 | */ | 1686 | */ |
1687 | pm_runtime_set_autosuspend_delay(&hdev->dev, 0); | 1687 | pm_runtime_set_autosuspend_delay(&hdev->dev, 0); |
1688 | 1688 | ||
1689 | /* Hubs have proper suspend/resume support. */ | 1689 | /* Hubs have proper suspend/resume support. */ |
1690 | usb_enable_autosuspend(hdev); | 1690 | usb_enable_autosuspend(hdev); |
1691 | 1691 | ||
1692 | if (hdev->level == MAX_TOPO_LEVEL) { | 1692 | if (hdev->level == MAX_TOPO_LEVEL) { |
1693 | dev_err(&intf->dev, | 1693 | dev_err(&intf->dev, |
1694 | "Unsupported bus topology: hub nested too deep\n"); | 1694 | "Unsupported bus topology: hub nested too deep\n"); |
1695 | return -E2BIG; | 1695 | return -E2BIG; |
1696 | } | 1696 | } |
1697 | 1697 | ||
1698 | #ifdef CONFIG_USB_OTG_BLACKLIST_HUB | 1698 | #ifdef CONFIG_USB_OTG_BLACKLIST_HUB |
1699 | if (hdev->parent) { | 1699 | if (hdev->parent) { |
1700 | dev_warn(&intf->dev, "ignoring external hub\n"); | 1700 | dev_warn(&intf->dev, "ignoring external hub\n"); |
1701 | return -ENODEV; | 1701 | return -ENODEV; |
1702 | } | 1702 | } |
1703 | #endif | 1703 | #endif |
1704 | 1704 | ||
1705 | /* Some hubs have a subclass of 1, which AFAICT according to the */ | 1705 | /* Some hubs have a subclass of 1, which AFAICT according to the */ |
1706 | /* specs is not defined, but it works */ | 1706 | /* specs is not defined, but it works */ |
1707 | if ((desc->desc.bInterfaceSubClass != 0) && | 1707 | if ((desc->desc.bInterfaceSubClass != 0) && |
1708 | (desc->desc.bInterfaceSubClass != 1)) { | 1708 | (desc->desc.bInterfaceSubClass != 1)) { |
1709 | descriptor_error: | 1709 | descriptor_error: |
1710 | dev_err (&intf->dev, "bad descriptor, ignoring hub\n"); | 1710 | dev_err (&intf->dev, "bad descriptor, ignoring hub\n"); |
1711 | return -EIO; | 1711 | return -EIO; |
1712 | } | 1712 | } |
1713 | 1713 | ||
1714 | /* Multiple endpoints? What kind of mutant ninja-hub is this? */ | 1714 | /* Multiple endpoints? What kind of mutant ninja-hub is this? */ |
1715 | if (desc->desc.bNumEndpoints != 1) | 1715 | if (desc->desc.bNumEndpoints != 1) |
1716 | goto descriptor_error; | 1716 | goto descriptor_error; |
1717 | 1717 | ||
1718 | endpoint = &desc->endpoint[0].desc; | 1718 | endpoint = &desc->endpoint[0].desc; |
1719 | 1719 | ||
1720 | /* If it's not an interrupt in endpoint, we'd better punt! */ | 1720 | /* If it's not an interrupt in endpoint, we'd better punt! */ |
1721 | if (!usb_endpoint_is_int_in(endpoint)) | 1721 | if (!usb_endpoint_is_int_in(endpoint)) |
1722 | goto descriptor_error; | 1722 | goto descriptor_error; |
1723 | 1723 | ||
1724 | /* We found a hub */ | 1724 | /* We found a hub */ |
1725 | dev_info (&intf->dev, "USB hub found\n"); | 1725 | dev_info (&intf->dev, "USB hub found\n"); |
1726 | 1726 | ||
1727 | hub = kzalloc(sizeof(*hub), GFP_KERNEL); | 1727 | hub = kzalloc(sizeof(*hub), GFP_KERNEL); |
1728 | if (!hub) { | 1728 | if (!hub) { |
1729 | dev_dbg (&intf->dev, "couldn't kmalloc hub struct\n"); | 1729 | dev_dbg (&intf->dev, "couldn't kmalloc hub struct\n"); |
1730 | return -ENOMEM; | 1730 | return -ENOMEM; |
1731 | } | 1731 | } |
1732 | 1732 | ||
1733 | kref_init(&hub->kref); | 1733 | kref_init(&hub->kref); |
1734 | INIT_LIST_HEAD(&hub->event_list); | 1734 | INIT_LIST_HEAD(&hub->event_list); |
1735 | hub->intfdev = &intf->dev; | 1735 | hub->intfdev = &intf->dev; |
1736 | hub->hdev = hdev; | 1736 | hub->hdev = hdev; |
1737 | INIT_DELAYED_WORK(&hub->leds, led_work); | 1737 | INIT_DELAYED_WORK(&hub->leds, led_work); |
1738 | INIT_DELAYED_WORK(&hub->init_work, NULL); | 1738 | INIT_DELAYED_WORK(&hub->init_work, NULL); |
1739 | usb_get_intf(intf); | 1739 | usb_get_intf(intf); |
1740 | 1740 | ||
1741 | usb_set_intfdata (intf, hub); | 1741 | usb_set_intfdata (intf, hub); |
1742 | intf->needs_remote_wakeup = 1; | 1742 | intf->needs_remote_wakeup = 1; |
1743 | pm_suspend_ignore_children(&intf->dev, true); | 1743 | pm_suspend_ignore_children(&intf->dev, true); |
1744 | 1744 | ||
1745 | if (hdev->speed == USB_SPEED_HIGH) | 1745 | if (hdev->speed == USB_SPEED_HIGH) |
1746 | highspeed_hubs++; | 1746 | highspeed_hubs++; |
1747 | 1747 | ||
1748 | if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND) | 1748 | if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND) |
1749 | hub->quirk_check_port_auto_suspend = 1; | 1749 | hub->quirk_check_port_auto_suspend = 1; |
1750 | 1750 | ||
1751 | if (hub_configure(hub, endpoint) >= 0) | 1751 | if (hub_configure(hub, endpoint) >= 0) |
1752 | return 0; | 1752 | return 0; |
1753 | 1753 | ||
1754 | hub_disconnect (intf); | 1754 | hub_disconnect (intf); |
1755 | return -ENODEV; | 1755 | return -ENODEV; |
1756 | } | 1756 | } |
1757 | 1757 | ||
1758 | static int | 1758 | static int |
1759 | hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data) | 1759 | hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data) |
1760 | { | 1760 | { |
1761 | struct usb_device *hdev = interface_to_usbdev (intf); | 1761 | struct usb_device *hdev = interface_to_usbdev (intf); |
1762 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); | 1762 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); |
1763 | 1763 | ||
1764 | /* assert ifno == 0 (part of hub spec) */ | 1764 | /* assert ifno == 0 (part of hub spec) */ |
1765 | switch (code) { | 1765 | switch (code) { |
1766 | case USBDEVFS_HUB_PORTINFO: { | 1766 | case USBDEVFS_HUB_PORTINFO: { |
1767 | struct usbdevfs_hub_portinfo *info = user_data; | 1767 | struct usbdevfs_hub_portinfo *info = user_data; |
1768 | int i; | 1768 | int i; |
1769 | 1769 | ||
1770 | spin_lock_irq(&device_state_lock); | 1770 | spin_lock_irq(&device_state_lock); |
1771 | if (hdev->devnum <= 0) | 1771 | if (hdev->devnum <= 0) |
1772 | info->nports = 0; | 1772 | info->nports = 0; |
1773 | else { | 1773 | else { |
1774 | info->nports = hdev->maxchild; | 1774 | info->nports = hdev->maxchild; |
1775 | for (i = 0; i < info->nports; i++) { | 1775 | for (i = 0; i < info->nports; i++) { |
1776 | if (hub->ports[i]->child == NULL) | 1776 | if (hub->ports[i]->child == NULL) |
1777 | info->port[i] = 0; | 1777 | info->port[i] = 0; |
1778 | else | 1778 | else |
1779 | info->port[i] = | 1779 | info->port[i] = |
1780 | hub->ports[i]->child->devnum; | 1780 | hub->ports[i]->child->devnum; |
1781 | } | 1781 | } |
1782 | } | 1782 | } |
1783 | spin_unlock_irq(&device_state_lock); | 1783 | spin_unlock_irq(&device_state_lock); |
1784 | 1784 | ||
1785 | return info->nports + 1; | 1785 | return info->nports + 1; |
1786 | } | 1786 | } |
1787 | 1787 | ||
1788 | default: | 1788 | default: |
1789 | return -ENOSYS; | 1789 | return -ENOSYS; |
1790 | } | 1790 | } |
1791 | } | 1791 | } |
1792 | 1792 | ||
1793 | /* | 1793 | /* |
1794 | * Allow user programs to claim ports on a hub. When a device is attached | 1794 | * Allow user programs to claim ports on a hub. When a device is attached |
1795 | * to one of these "claimed" ports, the program will "own" the device. | 1795 | * to one of these "claimed" ports, the program will "own" the device. |
1796 | */ | 1796 | */ |
1797 | static int find_port_owner(struct usb_device *hdev, unsigned port1, | 1797 | static int find_port_owner(struct usb_device *hdev, unsigned port1, |
1798 | struct dev_state ***ppowner) | 1798 | struct dev_state ***ppowner) |
1799 | { | 1799 | { |
1800 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); | 1800 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); |
1801 | 1801 | ||
1802 | if (hdev->state == USB_STATE_NOTATTACHED) | 1802 | if (hdev->state == USB_STATE_NOTATTACHED) |
1803 | return -ENODEV; | 1803 | return -ENODEV; |
1804 | if (port1 == 0 || port1 > hdev->maxchild) | 1804 | if (port1 == 0 || port1 > hdev->maxchild) |
1805 | return -EINVAL; | 1805 | return -EINVAL; |
1806 | 1806 | ||
1807 | /* Devices not managed by the hub driver | 1807 | /* Devices not managed by the hub driver |
1808 | * will always have maxchild equal to 0. | 1808 | * will always have maxchild equal to 0. |
1809 | */ | 1809 | */ |
1810 | *ppowner = &(hub->ports[port1 - 1]->port_owner); | 1810 | *ppowner = &(hub->ports[port1 - 1]->port_owner); |
1811 | return 0; | 1811 | return 0; |
1812 | } | 1812 | } |
1813 | 1813 | ||
1814 | /* In the following three functions, the caller must hold hdev's lock */ | 1814 | /* In the following three functions, the caller must hold hdev's lock */ |
1815 | int usb_hub_claim_port(struct usb_device *hdev, unsigned port1, | 1815 | int usb_hub_claim_port(struct usb_device *hdev, unsigned port1, |
1816 | struct dev_state *owner) | 1816 | struct dev_state *owner) |
1817 | { | 1817 | { |
1818 | int rc; | 1818 | int rc; |
1819 | struct dev_state **powner; | 1819 | struct dev_state **powner; |
1820 | 1820 | ||
1821 | rc = find_port_owner(hdev, port1, &powner); | 1821 | rc = find_port_owner(hdev, port1, &powner); |
1822 | if (rc) | 1822 | if (rc) |
1823 | return rc; | 1823 | return rc; |
1824 | if (*powner) | 1824 | if (*powner) |
1825 | return -EBUSY; | 1825 | return -EBUSY; |
1826 | *powner = owner; | 1826 | *powner = owner; |
1827 | return rc; | 1827 | return rc; |
1828 | } | 1828 | } |
1829 | 1829 | ||
1830 | int usb_hub_release_port(struct usb_device *hdev, unsigned port1, | 1830 | int usb_hub_release_port(struct usb_device *hdev, unsigned port1, |
1831 | struct dev_state *owner) | 1831 | struct dev_state *owner) |
1832 | { | 1832 | { |
1833 | int rc; | 1833 | int rc; |
1834 | struct dev_state **powner; | 1834 | struct dev_state **powner; |
1835 | 1835 | ||
1836 | rc = find_port_owner(hdev, port1, &powner); | 1836 | rc = find_port_owner(hdev, port1, &powner); |
1837 | if (rc) | 1837 | if (rc) |
1838 | return rc; | 1838 | return rc; |
1839 | if (*powner != owner) | 1839 | if (*powner != owner) |
1840 | return -ENOENT; | 1840 | return -ENOENT; |
1841 | *powner = NULL; | 1841 | *powner = NULL; |
1842 | return rc; | 1842 | return rc; |
1843 | } | 1843 | } |
1844 | 1844 | ||
1845 | void usb_hub_release_all_ports(struct usb_device *hdev, struct dev_state *owner) | 1845 | void usb_hub_release_all_ports(struct usb_device *hdev, struct dev_state *owner) |
1846 | { | 1846 | { |
1847 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); | 1847 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); |
1848 | int n; | 1848 | int n; |
1849 | 1849 | ||
1850 | for (n = 0; n < hdev->maxchild; n++) { | 1850 | for (n = 0; n < hdev->maxchild; n++) { |
1851 | if (hub->ports[n]->port_owner == owner) | 1851 | if (hub->ports[n]->port_owner == owner) |
1852 | hub->ports[n]->port_owner = NULL; | 1852 | hub->ports[n]->port_owner = NULL; |
1853 | } | 1853 | } |
1854 | 1854 | ||
1855 | } | 1855 | } |
1856 | 1856 | ||
1857 | /* The caller must hold udev's lock */ | 1857 | /* The caller must hold udev's lock */ |
1858 | bool usb_device_is_owned(struct usb_device *udev) | 1858 | bool usb_device_is_owned(struct usb_device *udev) |
1859 | { | 1859 | { |
1860 | struct usb_hub *hub; | 1860 | struct usb_hub *hub; |
1861 | 1861 | ||
1862 | if (udev->state == USB_STATE_NOTATTACHED || !udev->parent) | 1862 | if (udev->state == USB_STATE_NOTATTACHED || !udev->parent) |
1863 | return false; | 1863 | return false; |
1864 | hub = usb_hub_to_struct_hub(udev->parent); | 1864 | hub = usb_hub_to_struct_hub(udev->parent); |
1865 | return !!hub->ports[udev->portnum - 1]->port_owner; | 1865 | return !!hub->ports[udev->portnum - 1]->port_owner; |
1866 | } | 1866 | } |
1867 | 1867 | ||
1868 | static void recursively_mark_NOTATTACHED(struct usb_device *udev) | 1868 | static void recursively_mark_NOTATTACHED(struct usb_device *udev) |
1869 | { | 1869 | { |
1870 | struct usb_hub *hub = usb_hub_to_struct_hub(udev); | 1870 | struct usb_hub *hub = usb_hub_to_struct_hub(udev); |
1871 | int i; | 1871 | int i; |
1872 | 1872 | ||
1873 | for (i = 0; i < udev->maxchild; ++i) { | 1873 | for (i = 0; i < udev->maxchild; ++i) { |
1874 | if (hub->ports[i]->child) | 1874 | if (hub->ports[i]->child) |
1875 | recursively_mark_NOTATTACHED(hub->ports[i]->child); | 1875 | recursively_mark_NOTATTACHED(hub->ports[i]->child); |
1876 | } | 1876 | } |
1877 | if (udev->state == USB_STATE_SUSPENDED) | 1877 | if (udev->state == USB_STATE_SUSPENDED) |
1878 | udev->active_duration -= jiffies; | 1878 | udev->active_duration -= jiffies; |
1879 | udev->state = USB_STATE_NOTATTACHED; | 1879 | udev->state = USB_STATE_NOTATTACHED; |
1880 | } | 1880 | } |
1881 | 1881 | ||
1882 | /** | 1882 | /** |
1883 | * usb_set_device_state - change a device's current state (usbcore, hcds) | 1883 | * usb_set_device_state - change a device's current state (usbcore, hcds) |
1884 | * @udev: pointer to device whose state should be changed | 1884 | * @udev: pointer to device whose state should be changed |
1885 | * @new_state: new state value to be stored | 1885 | * @new_state: new state value to be stored |
1886 | * | 1886 | * |
1887 | * udev->state is _not_ fully protected by the device lock. Although | 1887 | * udev->state is _not_ fully protected by the device lock. Although |
1888 | * most transitions are made only while holding the lock, the state can | 1888 | * most transitions are made only while holding the lock, the state can |
1889 | * can change to USB_STATE_NOTATTACHED at almost any time. This | 1889 | * can change to USB_STATE_NOTATTACHED at almost any time. This |
1890 | * is so that devices can be marked as disconnected as soon as possible, | 1890 | * is so that devices can be marked as disconnected as soon as possible, |
1891 | * without having to wait for any semaphores to be released. As a result, | 1891 | * without having to wait for any semaphores to be released. As a result, |
1892 | * all changes to any device's state must be protected by the | 1892 | * all changes to any device's state must be protected by the |
1893 | * device_state_lock spinlock. | 1893 | * device_state_lock spinlock. |
1894 | * | 1894 | * |
1895 | * Once a device has been added to the device tree, all changes to its state | 1895 | * Once a device has been added to the device tree, all changes to its state |
1896 | * should be made using this routine. The state should _not_ be set directly. | 1896 | * should be made using this routine. The state should _not_ be set directly. |
1897 | * | 1897 | * |
1898 | * If udev->state is already USB_STATE_NOTATTACHED then no change is made. | 1898 | * If udev->state is already USB_STATE_NOTATTACHED then no change is made. |
1899 | * Otherwise udev->state is set to new_state, and if new_state is | 1899 | * Otherwise udev->state is set to new_state, and if new_state is |
1900 | * USB_STATE_NOTATTACHED then all of udev's descendants' states are also set | 1900 | * USB_STATE_NOTATTACHED then all of udev's descendants' states are also set |
1901 | * to USB_STATE_NOTATTACHED. | 1901 | * to USB_STATE_NOTATTACHED. |
1902 | */ | 1902 | */ |
1903 | void usb_set_device_state(struct usb_device *udev, | 1903 | void usb_set_device_state(struct usb_device *udev, |
1904 | enum usb_device_state new_state) | 1904 | enum usb_device_state new_state) |
1905 | { | 1905 | { |
1906 | unsigned long flags; | 1906 | unsigned long flags; |
1907 | int wakeup = -1; | 1907 | int wakeup = -1; |
1908 | 1908 | ||
1909 | spin_lock_irqsave(&device_state_lock, flags); | 1909 | spin_lock_irqsave(&device_state_lock, flags); |
1910 | if (udev->state == USB_STATE_NOTATTACHED) | 1910 | if (udev->state == USB_STATE_NOTATTACHED) |
1911 | ; /* do nothing */ | 1911 | ; /* do nothing */ |
1912 | else if (new_state != USB_STATE_NOTATTACHED) { | 1912 | else if (new_state != USB_STATE_NOTATTACHED) { |
1913 | 1913 | ||
1914 | /* root hub wakeup capabilities are managed out-of-band | 1914 | /* root hub wakeup capabilities are managed out-of-band |
1915 | * and may involve silicon errata ... ignore them here. | 1915 | * and may involve silicon errata ... ignore them here. |
1916 | */ | 1916 | */ |
1917 | if (udev->parent) { | 1917 | if (udev->parent) { |
1918 | if (udev->state == USB_STATE_SUSPENDED | 1918 | if (udev->state == USB_STATE_SUSPENDED |
1919 | || new_state == USB_STATE_SUSPENDED) | 1919 | || new_state == USB_STATE_SUSPENDED) |
1920 | ; /* No change to wakeup settings */ | 1920 | ; /* No change to wakeup settings */ |
1921 | else if (new_state == USB_STATE_CONFIGURED) | 1921 | else if (new_state == USB_STATE_CONFIGURED) |
1922 | wakeup = udev->actconfig->desc.bmAttributes | 1922 | wakeup = udev->actconfig->desc.bmAttributes |
1923 | & USB_CONFIG_ATT_WAKEUP; | 1923 | & USB_CONFIG_ATT_WAKEUP; |
1924 | else | 1924 | else |
1925 | wakeup = 0; | 1925 | wakeup = 0; |
1926 | } | 1926 | } |
1927 | if (udev->state == USB_STATE_SUSPENDED && | 1927 | if (udev->state == USB_STATE_SUSPENDED && |
1928 | new_state != USB_STATE_SUSPENDED) | 1928 | new_state != USB_STATE_SUSPENDED) |
1929 | udev->active_duration -= jiffies; | 1929 | udev->active_duration -= jiffies; |
1930 | else if (new_state == USB_STATE_SUSPENDED && | 1930 | else if (new_state == USB_STATE_SUSPENDED && |
1931 | udev->state != USB_STATE_SUSPENDED) | 1931 | udev->state != USB_STATE_SUSPENDED) |
1932 | udev->active_duration += jiffies; | 1932 | udev->active_duration += jiffies; |
1933 | udev->state = new_state; | 1933 | udev->state = new_state; |
1934 | } else | 1934 | } else |
1935 | recursively_mark_NOTATTACHED(udev); | 1935 | recursively_mark_NOTATTACHED(udev); |
1936 | spin_unlock_irqrestore(&device_state_lock, flags); | 1936 | spin_unlock_irqrestore(&device_state_lock, flags); |
1937 | if (wakeup >= 0) | 1937 | if (wakeup >= 0) |
1938 | device_set_wakeup_capable(&udev->dev, wakeup); | 1938 | device_set_wakeup_capable(&udev->dev, wakeup); |
1939 | } | 1939 | } |
1940 | EXPORT_SYMBOL_GPL(usb_set_device_state); | 1940 | EXPORT_SYMBOL_GPL(usb_set_device_state); |
1941 | 1941 | ||
1942 | /* | 1942 | /* |
1943 | * Choose a device number. | 1943 | * Choose a device number. |
1944 | * | 1944 | * |
1945 | * Device numbers are used as filenames in usbfs. On USB-1.1 and | 1945 | * Device numbers are used as filenames in usbfs. On USB-1.1 and |
1946 | * USB-2.0 buses they are also used as device addresses, however on | 1946 | * USB-2.0 buses they are also used as device addresses, however on |
1947 | * USB-3.0 buses the address is assigned by the controller hardware | 1947 | * USB-3.0 buses the address is assigned by the controller hardware |
1948 | * and it usually is not the same as the device number. | 1948 | * and it usually is not the same as the device number. |
1949 | * | 1949 | * |
1950 | * WUSB devices are simple: they have no hubs behind, so the mapping | 1950 | * WUSB devices are simple: they have no hubs behind, so the mapping |
1951 | * device <-> virtual port number becomes 1:1. Why? to simplify the | 1951 | * device <-> virtual port number becomes 1:1. Why? to simplify the |
1952 | * life of the device connection logic in | 1952 | * life of the device connection logic in |
1953 | * drivers/usb/wusbcore/devconnect.c. When we do the initial secret | 1953 | * drivers/usb/wusbcore/devconnect.c. When we do the initial secret |
1954 | * handshake we need to assign a temporary address in the unauthorized | 1954 | * handshake we need to assign a temporary address in the unauthorized |
1955 | * space. For simplicity we use the first virtual port number found to | 1955 | * space. For simplicity we use the first virtual port number found to |
1956 | * be free [drivers/usb/wusbcore/devconnect.c:wusbhc_devconnect_ack()] | 1956 | * be free [drivers/usb/wusbcore/devconnect.c:wusbhc_devconnect_ack()] |
1957 | * and that becomes it's address [X < 128] or its unauthorized address | 1957 | * and that becomes it's address [X < 128] or its unauthorized address |
1958 | * [X | 0x80]. | 1958 | * [X | 0x80]. |
1959 | * | 1959 | * |
1960 | * We add 1 as an offset to the one-based USB-stack port number | 1960 | * We add 1 as an offset to the one-based USB-stack port number |
1961 | * (zero-based wusb virtual port index) for two reasons: (a) dev addr | 1961 | * (zero-based wusb virtual port index) for two reasons: (a) dev addr |
1962 | * 0 is reserved by USB for default address; (b) Linux's USB stack | 1962 | * 0 is reserved by USB for default address; (b) Linux's USB stack |
1963 | * uses always #1 for the root hub of the controller. So USB stack's | 1963 | * uses always #1 for the root hub of the controller. So USB stack's |
1964 | * port #1, which is wusb virtual-port #0 has address #2. | 1964 | * port #1, which is wusb virtual-port #0 has address #2. |
1965 | * | 1965 | * |
1966 | * Devices connected under xHCI are not as simple. The host controller | 1966 | * Devices connected under xHCI are not as simple. The host controller |
1967 | * supports virtualization, so the hardware assigns device addresses and | 1967 | * supports virtualization, so the hardware assigns device addresses and |
1968 | * the HCD must setup data structures before issuing a set address | 1968 | * the HCD must setup data structures before issuing a set address |
1969 | * command to the hardware. | 1969 | * command to the hardware. |
1970 | */ | 1970 | */ |
1971 | static void choose_devnum(struct usb_device *udev) | 1971 | static void choose_devnum(struct usb_device *udev) |
1972 | { | 1972 | { |
1973 | int devnum; | 1973 | int devnum; |
1974 | struct usb_bus *bus = udev->bus; | 1974 | struct usb_bus *bus = udev->bus; |
1975 | 1975 | ||
1976 | /* If khubd ever becomes multithreaded, this will need a lock */ | 1976 | /* If khubd ever becomes multithreaded, this will need a lock */ |
1977 | if (udev->wusb) { | 1977 | if (udev->wusb) { |
1978 | devnum = udev->portnum + 1; | 1978 | devnum = udev->portnum + 1; |
1979 | BUG_ON(test_bit(devnum, bus->devmap.devicemap)); | 1979 | BUG_ON(test_bit(devnum, bus->devmap.devicemap)); |
1980 | } else { | 1980 | } else { |
1981 | /* Try to allocate the next devnum beginning at | 1981 | /* Try to allocate the next devnum beginning at |
1982 | * bus->devnum_next. */ | 1982 | * bus->devnum_next. */ |
1983 | devnum = find_next_zero_bit(bus->devmap.devicemap, 128, | 1983 | devnum = find_next_zero_bit(bus->devmap.devicemap, 128, |
1984 | bus->devnum_next); | 1984 | bus->devnum_next); |
1985 | if (devnum >= 128) | 1985 | if (devnum >= 128) |
1986 | devnum = find_next_zero_bit(bus->devmap.devicemap, | 1986 | devnum = find_next_zero_bit(bus->devmap.devicemap, |
1987 | 128, 1); | 1987 | 128, 1); |
1988 | bus->devnum_next = (devnum >= 127 ? 1 : devnum + 1); | 1988 | bus->devnum_next = (devnum >= 127 ? 1 : devnum + 1); |
1989 | } | 1989 | } |
1990 | if (devnum < 128) { | 1990 | if (devnum < 128) { |
1991 | set_bit(devnum, bus->devmap.devicemap); | 1991 | set_bit(devnum, bus->devmap.devicemap); |
1992 | udev->devnum = devnum; | 1992 | udev->devnum = devnum; |
1993 | } | 1993 | } |
1994 | } | 1994 | } |
1995 | 1995 | ||
1996 | static void release_devnum(struct usb_device *udev) | 1996 | static void release_devnum(struct usb_device *udev) |
1997 | { | 1997 | { |
1998 | if (udev->devnum > 0) { | 1998 | if (udev->devnum > 0) { |
1999 | clear_bit(udev->devnum, udev->bus->devmap.devicemap); | 1999 | clear_bit(udev->devnum, udev->bus->devmap.devicemap); |
2000 | udev->devnum = -1; | 2000 | udev->devnum = -1; |
2001 | } | 2001 | } |
2002 | } | 2002 | } |
2003 | 2003 | ||
2004 | static void update_devnum(struct usb_device *udev, int devnum) | 2004 | static void update_devnum(struct usb_device *udev, int devnum) |
2005 | { | 2005 | { |
2006 | /* The address for a WUSB device is managed by wusbcore. */ | 2006 | /* The address for a WUSB device is managed by wusbcore. */ |
2007 | if (!udev->wusb) | 2007 | if (!udev->wusb) |
2008 | udev->devnum = devnum; | 2008 | udev->devnum = devnum; |
2009 | } | 2009 | } |
2010 | 2010 | ||
2011 | static void hub_free_dev(struct usb_device *udev) | 2011 | static void hub_free_dev(struct usb_device *udev) |
2012 | { | 2012 | { |
2013 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | 2013 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
2014 | 2014 | ||
2015 | /* Root hubs aren't real devices, so don't free HCD resources */ | 2015 | /* Root hubs aren't real devices, so don't free HCD resources */ |
2016 | if (hcd->driver->free_dev && udev->parent) | 2016 | if (hcd->driver->free_dev && udev->parent) |
2017 | hcd->driver->free_dev(hcd, udev); | 2017 | hcd->driver->free_dev(hcd, udev); |
2018 | } | 2018 | } |
2019 | 2019 | ||
2020 | /** | 2020 | /** |
2021 | * usb_disconnect - disconnect a device (usbcore-internal) | 2021 | * usb_disconnect - disconnect a device (usbcore-internal) |
2022 | * @pdev: pointer to device being disconnected | 2022 | * @pdev: pointer to device being disconnected |
2023 | * Context: !in_interrupt () | 2023 | * Context: !in_interrupt () |
2024 | * | 2024 | * |
2025 | * Something got disconnected. Get rid of it and all of its children. | 2025 | * Something got disconnected. Get rid of it and all of its children. |
2026 | * | 2026 | * |
2027 | * If *pdev is a normal device then the parent hub must already be locked. | 2027 | * If *pdev is a normal device then the parent hub must already be locked. |
2028 | * If *pdev is a root hub then the caller must hold the usb_bus_list_lock, | 2028 | * If *pdev is a root hub then the caller must hold the usb_bus_list_lock, |
2029 | * which protects the set of root hubs as well as the list of buses. | 2029 | * which protects the set of root hubs as well as the list of buses. |
2030 | * | 2030 | * |
2031 | * Only hub drivers (including virtual root hub drivers for host | 2031 | * Only hub drivers (including virtual root hub drivers for host |
2032 | * controllers) should ever call this. | 2032 | * controllers) should ever call this. |
2033 | * | 2033 | * |
2034 | * This call is synchronous, and may not be used in an interrupt context. | 2034 | * This call is synchronous, and may not be used in an interrupt context. |
2035 | */ | 2035 | */ |
2036 | void usb_disconnect(struct usb_device **pdev) | 2036 | void usb_disconnect(struct usb_device **pdev) |
2037 | { | 2037 | { |
2038 | struct usb_device *udev = *pdev; | 2038 | struct usb_device *udev = *pdev; |
2039 | struct usb_hub *hub = usb_hub_to_struct_hub(udev); | 2039 | struct usb_hub *hub = usb_hub_to_struct_hub(udev); |
2040 | int i; | 2040 | int i; |
2041 | 2041 | ||
2042 | /* mark the device as inactive, so any further urb submissions for | 2042 | /* mark the device as inactive, so any further urb submissions for |
2043 | * this device (and any of its children) will fail immediately. | 2043 | * this device (and any of its children) will fail immediately. |
2044 | * this quiesces everything except pending urbs. | 2044 | * this quiesces everything except pending urbs. |
2045 | */ | 2045 | */ |
2046 | usb_set_device_state(udev, USB_STATE_NOTATTACHED); | 2046 | usb_set_device_state(udev, USB_STATE_NOTATTACHED); |
2047 | dev_info(&udev->dev, "USB disconnect, device number %d\n", | 2047 | dev_info(&udev->dev, "USB disconnect, device number %d\n", |
2048 | udev->devnum); | 2048 | udev->devnum); |
2049 | 2049 | ||
2050 | usb_lock_device(udev); | 2050 | usb_lock_device(udev); |
2051 | 2051 | ||
2052 | /* Free up all the children before we remove this device */ | 2052 | /* Free up all the children before we remove this device */ |
2053 | for (i = 0; i < udev->maxchild; i++) { | 2053 | for (i = 0; i < udev->maxchild; i++) { |
2054 | if (hub->ports[i]->child) | 2054 | if (hub->ports[i]->child) |
2055 | usb_disconnect(&hub->ports[i]->child); | 2055 | usb_disconnect(&hub->ports[i]->child); |
2056 | } | 2056 | } |
2057 | 2057 | ||
2058 | /* deallocate hcd/hardware state ... nuking all pending urbs and | 2058 | /* deallocate hcd/hardware state ... nuking all pending urbs and |
2059 | * cleaning up all state associated with the current configuration | 2059 | * cleaning up all state associated with the current configuration |
2060 | * so that the hardware is now fully quiesced. | 2060 | * so that the hardware is now fully quiesced. |
2061 | */ | 2061 | */ |
2062 | dev_dbg (&udev->dev, "unregistering device\n"); | 2062 | dev_dbg (&udev->dev, "unregistering device\n"); |
2063 | usb_disable_device(udev, 0); | 2063 | usb_disable_device(udev, 0); |
2064 | usb_hcd_synchronize_unlinks(udev); | 2064 | usb_hcd_synchronize_unlinks(udev); |
2065 | 2065 | ||
2066 | if (udev->parent) { | 2066 | if (udev->parent) { |
2067 | struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); | 2067 | struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); |
2068 | struct usb_port *port_dev = hub->ports[udev->portnum - 1]; | 2068 | struct usb_port *port_dev = hub->ports[udev->portnum - 1]; |
2069 | 2069 | ||
2070 | sysfs_remove_link(&udev->dev.kobj, "port"); | 2070 | sysfs_remove_link(&udev->dev.kobj, "port"); |
2071 | sysfs_remove_link(&port_dev->dev.kobj, "device"); | 2071 | sysfs_remove_link(&port_dev->dev.kobj, "device"); |
2072 | 2072 | ||
2073 | if (!port_dev->did_runtime_put) | 2073 | if (!port_dev->did_runtime_put) |
2074 | pm_runtime_put(&port_dev->dev); | 2074 | pm_runtime_put(&port_dev->dev); |
2075 | else | 2075 | else |
2076 | port_dev->did_runtime_put = false; | 2076 | port_dev->did_runtime_put = false; |
2077 | } | 2077 | } |
2078 | 2078 | ||
2079 | usb_remove_ep_devs(&udev->ep0); | 2079 | usb_remove_ep_devs(&udev->ep0); |
2080 | usb_unlock_device(udev); | 2080 | usb_unlock_device(udev); |
2081 | 2081 | ||
2082 | /* Unregister the device. The device driver is responsible | 2082 | /* Unregister the device. The device driver is responsible |
2083 | * for de-configuring the device and invoking the remove-device | 2083 | * for de-configuring the device and invoking the remove-device |
2084 | * notifier chain (used by usbfs and possibly others). | 2084 | * notifier chain (used by usbfs and possibly others). |
2085 | */ | 2085 | */ |
2086 | device_del(&udev->dev); | 2086 | device_del(&udev->dev); |
2087 | 2087 | ||
2088 | /* Free the device number and delete the parent's children[] | 2088 | /* Free the device number and delete the parent's children[] |
2089 | * (or root_hub) pointer. | 2089 | * (or root_hub) pointer. |
2090 | */ | 2090 | */ |
2091 | release_devnum(udev); | 2091 | release_devnum(udev); |
2092 | 2092 | ||
2093 | /* Avoid races with recursively_mark_NOTATTACHED() */ | 2093 | /* Avoid races with recursively_mark_NOTATTACHED() */ |
2094 | spin_lock_irq(&device_state_lock); | 2094 | spin_lock_irq(&device_state_lock); |
2095 | *pdev = NULL; | 2095 | *pdev = NULL; |
2096 | spin_unlock_irq(&device_state_lock); | 2096 | spin_unlock_irq(&device_state_lock); |
2097 | 2097 | ||
2098 | hub_free_dev(udev); | 2098 | hub_free_dev(udev); |
2099 | 2099 | ||
2100 | put_device(&udev->dev); | 2100 | put_device(&udev->dev); |
2101 | } | 2101 | } |
2102 | 2102 | ||
2103 | #ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES | 2103 | #ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES |
2104 | static void show_string(struct usb_device *udev, char *id, char *string) | 2104 | static void show_string(struct usb_device *udev, char *id, char *string) |
2105 | { | 2105 | { |
2106 | if (!string) | 2106 | if (!string) |
2107 | return; | 2107 | return; |
2108 | dev_info(&udev->dev, "%s: %s\n", id, string); | 2108 | dev_info(&udev->dev, "%s: %s\n", id, string); |
2109 | } | 2109 | } |
2110 | 2110 | ||
2111 | static void announce_device(struct usb_device *udev) | 2111 | static void announce_device(struct usb_device *udev) |
2112 | { | 2112 | { |
2113 | dev_info(&udev->dev, "New USB device found, idVendor=%04x, idProduct=%04x\n", | 2113 | dev_info(&udev->dev, "New USB device found, idVendor=%04x, idProduct=%04x\n", |
2114 | le16_to_cpu(udev->descriptor.idVendor), | 2114 | le16_to_cpu(udev->descriptor.idVendor), |
2115 | le16_to_cpu(udev->descriptor.idProduct)); | 2115 | le16_to_cpu(udev->descriptor.idProduct)); |
2116 | dev_info(&udev->dev, | 2116 | dev_info(&udev->dev, |
2117 | "New USB device strings: Mfr=%d, Product=%d, SerialNumber=%d\n", | 2117 | "New USB device strings: Mfr=%d, Product=%d, SerialNumber=%d\n", |
2118 | udev->descriptor.iManufacturer, | 2118 | udev->descriptor.iManufacturer, |
2119 | udev->descriptor.iProduct, | 2119 | udev->descriptor.iProduct, |
2120 | udev->descriptor.iSerialNumber); | 2120 | udev->descriptor.iSerialNumber); |
2121 | show_string(udev, "Product", udev->product); | 2121 | show_string(udev, "Product", udev->product); |
2122 | show_string(udev, "Manufacturer", udev->manufacturer); | 2122 | show_string(udev, "Manufacturer", udev->manufacturer); |
2123 | show_string(udev, "SerialNumber", udev->serial); | 2123 | show_string(udev, "SerialNumber", udev->serial); |
2124 | } | 2124 | } |
2125 | #else | 2125 | #else |
2126 | static inline void announce_device(struct usb_device *udev) { } | 2126 | static inline void announce_device(struct usb_device *udev) { } |
2127 | #endif | 2127 | #endif |
2128 | 2128 | ||
2129 | #ifdef CONFIG_USB_OTG | 2129 | #ifdef CONFIG_USB_OTG |
2130 | #include "otg_whitelist.h" | 2130 | #include "otg_whitelist.h" |
2131 | #endif | 2131 | #endif |
2132 | 2132 | ||
2133 | /** | 2133 | /** |
2134 | * usb_enumerate_device_otg - FIXME (usbcore-internal) | 2134 | * usb_enumerate_device_otg - FIXME (usbcore-internal) |
2135 | * @udev: newly addressed device (in ADDRESS state) | 2135 | * @udev: newly addressed device (in ADDRESS state) |
2136 | * | 2136 | * |
2137 | * Finish enumeration for On-The-Go devices | 2137 | * Finish enumeration for On-The-Go devices |
2138 | * | 2138 | * |
2139 | * Return: 0 if successful. A negative error code otherwise. | 2139 | * Return: 0 if successful. A negative error code otherwise. |
2140 | */ | 2140 | */ |
2141 | static int usb_enumerate_device_otg(struct usb_device *udev) | 2141 | static int usb_enumerate_device_otg(struct usb_device *udev) |
2142 | { | 2142 | { |
2143 | int err = 0; | 2143 | int err = 0; |
2144 | 2144 | ||
2145 | #ifdef CONFIG_USB_OTG | 2145 | #ifdef CONFIG_USB_OTG |
2146 | /* | 2146 | /* |
2147 | * OTG-aware devices on OTG-capable root hubs may be able to use SRP, | 2147 | * OTG-aware devices on OTG-capable root hubs may be able to use SRP, |
2148 | * to wake us after we've powered off VBUS; and HNP, switching roles | 2148 | * to wake us after we've powered off VBUS; and HNP, switching roles |
2149 | * "host" to "peripheral". The OTG descriptor helps figure this out. | 2149 | * "host" to "peripheral". The OTG descriptor helps figure this out. |
2150 | */ | 2150 | */ |
2151 | if (!udev->bus->is_b_host | 2151 | if (!udev->bus->is_b_host |
2152 | && udev->config | 2152 | && udev->config |
2153 | && udev->parent == udev->bus->root_hub) { | 2153 | && udev->parent == udev->bus->root_hub) { |
2154 | struct usb_otg_descriptor *desc = NULL; | 2154 | struct usb_otg_descriptor *desc = NULL; |
2155 | struct usb_bus *bus = udev->bus; | 2155 | struct usb_bus *bus = udev->bus; |
2156 | 2156 | ||
2157 | /* descriptor may appear anywhere in config */ | 2157 | /* descriptor may appear anywhere in config */ |
2158 | if (__usb_get_extra_descriptor (udev->rawdescriptors[0], | 2158 | if (__usb_get_extra_descriptor (udev->rawdescriptors[0], |
2159 | le16_to_cpu(udev->config[0].desc.wTotalLength), | 2159 | le16_to_cpu(udev->config[0].desc.wTotalLength), |
2160 | USB_DT_OTG, (void **) &desc) == 0) { | 2160 | USB_DT_OTG, (void **) &desc) == 0) { |
2161 | if (desc->bmAttributes & USB_OTG_HNP) { | 2161 | if (desc->bmAttributes & USB_OTG_HNP) { |
2162 | unsigned port1 = udev->portnum; | 2162 | unsigned port1 = udev->portnum; |
2163 | 2163 | ||
2164 | dev_info(&udev->dev, | 2164 | dev_info(&udev->dev, |
2165 | "Dual-Role OTG device on %sHNP port\n", | 2165 | "Dual-Role OTG device on %sHNP port\n", |
2166 | (port1 == bus->otg_port) | 2166 | (port1 == bus->otg_port) |
2167 | ? "" : "non-"); | 2167 | ? "" : "non-"); |
2168 | 2168 | ||
2169 | /* enable HNP before suspend, it's simpler */ | 2169 | /* enable HNP before suspend, it's simpler */ |
2170 | if (port1 == bus->otg_port) | 2170 | if (port1 == bus->otg_port) |
2171 | bus->b_hnp_enable = 1; | 2171 | bus->b_hnp_enable = 1; |
2172 | err = usb_control_msg(udev, | 2172 | err = usb_control_msg(udev, |
2173 | usb_sndctrlpipe(udev, 0), | 2173 | usb_sndctrlpipe(udev, 0), |
2174 | USB_REQ_SET_FEATURE, 0, | 2174 | USB_REQ_SET_FEATURE, 0, |
2175 | bus->b_hnp_enable | 2175 | bus->b_hnp_enable |
2176 | ? USB_DEVICE_B_HNP_ENABLE | 2176 | ? USB_DEVICE_B_HNP_ENABLE |
2177 | : USB_DEVICE_A_ALT_HNP_SUPPORT, | 2177 | : USB_DEVICE_A_ALT_HNP_SUPPORT, |
2178 | 0, NULL, 0, USB_CTRL_SET_TIMEOUT); | 2178 | 0, NULL, 0, USB_CTRL_SET_TIMEOUT); |
2179 | if (err < 0) { | 2179 | if (err < 0) { |
2180 | /* OTG MESSAGE: report errors here, | 2180 | /* OTG MESSAGE: report errors here, |
2181 | * customize to match your product. | 2181 | * customize to match your product. |
2182 | */ | 2182 | */ |
2183 | dev_info(&udev->dev, | 2183 | dev_info(&udev->dev, |
2184 | "can't set HNP mode: %d\n", | 2184 | "can't set HNP mode: %d\n", |
2185 | err); | 2185 | err); |
2186 | bus->b_hnp_enable = 0; | 2186 | bus->b_hnp_enable = 0; |
2187 | } | 2187 | } |
2188 | } | 2188 | } |
2189 | } | 2189 | } |
2190 | } | 2190 | } |
2191 | 2191 | ||
2192 | if (!is_targeted(udev)) { | 2192 | if (!is_targeted(udev)) { |
2193 | 2193 | ||
2194 | /* Maybe it can talk to us, though we can't talk to it. | 2194 | /* Maybe it can talk to us, though we can't talk to it. |
2195 | * (Includes HNP test device.) | 2195 | * (Includes HNP test device.) |
2196 | */ | 2196 | */ |
2197 | if (udev->bus->b_hnp_enable || udev->bus->is_b_host) { | 2197 | if (udev->bus->b_hnp_enable || udev->bus->is_b_host) { |
2198 | err = usb_port_suspend(udev, PMSG_SUSPEND); | 2198 | err = usb_port_suspend(udev, PMSG_SUSPEND); |
2199 | if (err < 0) | 2199 | if (err < 0) |
2200 | dev_dbg(&udev->dev, "HNP fail, %d\n", err); | 2200 | dev_dbg(&udev->dev, "HNP fail, %d\n", err); |
2201 | } | 2201 | } |
2202 | err = -ENOTSUPP; | 2202 | err = -ENOTSUPP; |
2203 | goto fail; | 2203 | goto fail; |
2204 | } | 2204 | } |
2205 | fail: | 2205 | fail: |
2206 | #endif | 2206 | #endif |
2207 | return err; | 2207 | return err; |
2208 | } | 2208 | } |
2209 | 2209 | ||
2210 | 2210 | ||
2211 | /** | 2211 | /** |
2212 | * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal) | 2212 | * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal) |
2213 | * @udev: newly addressed device (in ADDRESS state) | 2213 | * @udev: newly addressed device (in ADDRESS state) |
2214 | * | 2214 | * |
2215 | * This is only called by usb_new_device() and usb_authorize_device() | 2215 | * This is only called by usb_new_device() and usb_authorize_device() |
2216 | * and FIXME -- all comments that apply to them apply here wrt to | 2216 | * and FIXME -- all comments that apply to them apply here wrt to |
2217 | * environment. | 2217 | * environment. |
2218 | * | 2218 | * |
2219 | * If the device is WUSB and not authorized, we don't attempt to read | 2219 | * If the device is WUSB and not authorized, we don't attempt to read |
2220 | * the string descriptors, as they will be errored out by the device | 2220 | * the string descriptors, as they will be errored out by the device |
2221 | * until it has been authorized. | 2221 | * until it has been authorized. |
2222 | * | 2222 | * |
2223 | * Return: 0 if successful. A negative error code otherwise. | 2223 | * Return: 0 if successful. A negative error code otherwise. |
2224 | */ | 2224 | */ |
2225 | static int usb_enumerate_device(struct usb_device *udev) | 2225 | static int usb_enumerate_device(struct usb_device *udev) |
2226 | { | 2226 | { |
2227 | int err; | 2227 | int err; |
2228 | 2228 | ||
2229 | if (udev->config == NULL) { | 2229 | if (udev->config == NULL) { |
2230 | err = usb_get_configuration(udev); | 2230 | err = usb_get_configuration(udev); |
2231 | if (err < 0) { | 2231 | if (err < 0) { |
2232 | if (err != -ENODEV) | 2232 | if (err != -ENODEV) |
2233 | dev_err(&udev->dev, "can't read configurations, error %d\n", | 2233 | dev_err(&udev->dev, "can't read configurations, error %d\n", |
2234 | err); | 2234 | err); |
2235 | return err; | 2235 | return err; |
2236 | } | 2236 | } |
2237 | } | 2237 | } |
2238 | if (udev->wusb == 1 && udev->authorized == 0) { | 2238 | if (udev->wusb == 1 && udev->authorized == 0) { |
2239 | udev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL); | 2239 | udev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL); |
2240 | udev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL); | 2240 | udev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL); |
2241 | udev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL); | 2241 | udev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL); |
2242 | } else { | 2242 | } else { |
2243 | /* read the standard strings and cache them if present */ | 2243 | /* read the standard strings and cache them if present */ |
2244 | udev->product = usb_cache_string(udev, udev->descriptor.iProduct); | 2244 | udev->product = usb_cache_string(udev, udev->descriptor.iProduct); |
2245 | udev->manufacturer = usb_cache_string(udev, | 2245 | udev->manufacturer = usb_cache_string(udev, |
2246 | udev->descriptor.iManufacturer); | 2246 | udev->descriptor.iManufacturer); |
2247 | udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber); | 2247 | udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber); |
2248 | } | 2248 | } |
2249 | err = usb_enumerate_device_otg(udev); | 2249 | err = usb_enumerate_device_otg(udev); |
2250 | if (err < 0) | 2250 | if (err < 0) |
2251 | return err; | 2251 | return err; |
2252 | 2252 | ||
2253 | usb_detect_interface_quirks(udev); | 2253 | usb_detect_interface_quirks(udev); |
2254 | 2254 | ||
2255 | return 0; | 2255 | return 0; |
2256 | } | 2256 | } |
2257 | 2257 | ||
2258 | static void set_usb_port_removable(struct usb_device *udev) | 2258 | static void set_usb_port_removable(struct usb_device *udev) |
2259 | { | 2259 | { |
2260 | struct usb_device *hdev = udev->parent; | 2260 | struct usb_device *hdev = udev->parent; |
2261 | struct usb_hub *hub; | 2261 | struct usb_hub *hub; |
2262 | u8 port = udev->portnum; | 2262 | u8 port = udev->portnum; |
2263 | u16 wHubCharacteristics; | 2263 | u16 wHubCharacteristics; |
2264 | bool removable = true; | 2264 | bool removable = true; |
2265 | 2265 | ||
2266 | if (!hdev) | 2266 | if (!hdev) |
2267 | return; | 2267 | return; |
2268 | 2268 | ||
2269 | hub = usb_hub_to_struct_hub(udev->parent); | 2269 | hub = usb_hub_to_struct_hub(udev->parent); |
2270 | 2270 | ||
2271 | wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics); | 2271 | wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics); |
2272 | 2272 | ||
2273 | if (!(wHubCharacteristics & HUB_CHAR_COMPOUND)) | 2273 | if (!(wHubCharacteristics & HUB_CHAR_COMPOUND)) |
2274 | return; | 2274 | return; |
2275 | 2275 | ||
2276 | if (hub_is_superspeed(hdev)) { | 2276 | if (hub_is_superspeed(hdev)) { |
2277 | if (le16_to_cpu(hub->descriptor->u.ss.DeviceRemovable) | 2277 | if (le16_to_cpu(hub->descriptor->u.ss.DeviceRemovable) |
2278 | & (1 << port)) | 2278 | & (1 << port)) |
2279 | removable = false; | 2279 | removable = false; |
2280 | } else { | 2280 | } else { |
2281 | if (hub->descriptor->u.hs.DeviceRemovable[port / 8] & (1 << (port % 8))) | 2281 | if (hub->descriptor->u.hs.DeviceRemovable[port / 8] & (1 << (port % 8))) |
2282 | removable = false; | 2282 | removable = false; |
2283 | } | 2283 | } |
2284 | 2284 | ||
2285 | if (removable) | 2285 | if (removable) |
2286 | udev->removable = USB_DEVICE_REMOVABLE; | 2286 | udev->removable = USB_DEVICE_REMOVABLE; |
2287 | else | 2287 | else |
2288 | udev->removable = USB_DEVICE_FIXED; | 2288 | udev->removable = USB_DEVICE_FIXED; |
2289 | } | 2289 | } |
2290 | 2290 | ||
2291 | /** | 2291 | /** |
2292 | * usb_new_device - perform initial device setup (usbcore-internal) | 2292 | * usb_new_device - perform initial device setup (usbcore-internal) |
2293 | * @udev: newly addressed device (in ADDRESS state) | 2293 | * @udev: newly addressed device (in ADDRESS state) |
2294 | * | 2294 | * |
2295 | * This is called with devices which have been detected but not fully | 2295 | * This is called with devices which have been detected but not fully |
2296 | * enumerated. The device descriptor is available, but not descriptors | 2296 | * enumerated. The device descriptor is available, but not descriptors |
2297 | * for any device configuration. The caller must have locked either | 2297 | * for any device configuration. The caller must have locked either |
2298 | * the parent hub (if udev is a normal device) or else the | 2298 | * the parent hub (if udev is a normal device) or else the |
2299 | * usb_bus_list_lock (if udev is a root hub). The parent's pointer to | 2299 | * usb_bus_list_lock (if udev is a root hub). The parent's pointer to |
2300 | * udev has already been installed, but udev is not yet visible through | 2300 | * udev has already been installed, but udev is not yet visible through |
2301 | * sysfs or other filesystem code. | 2301 | * sysfs or other filesystem code. |
2302 | * | 2302 | * |
2303 | * This call is synchronous, and may not be used in an interrupt context. | 2303 | * This call is synchronous, and may not be used in an interrupt context. |
2304 | * | 2304 | * |
2305 | * Only the hub driver or root-hub registrar should ever call this. | 2305 | * Only the hub driver or root-hub registrar should ever call this. |
2306 | * | 2306 | * |
2307 | * Return: Whether the device is configured properly or not. Zero if the | 2307 | * Return: Whether the device is configured properly or not. Zero if the |
2308 | * interface was registered with the driver core; else a negative errno | 2308 | * interface was registered with the driver core; else a negative errno |
2309 | * value. | 2309 | * value. |
2310 | * | 2310 | * |
2311 | */ | 2311 | */ |
2312 | int usb_new_device(struct usb_device *udev) | 2312 | int usb_new_device(struct usb_device *udev) |
2313 | { | 2313 | { |
2314 | int err; | 2314 | int err; |
2315 | 2315 | ||
2316 | if (udev->parent) { | 2316 | if (udev->parent) { |
2317 | /* Initialize non-root-hub device wakeup to disabled; | 2317 | /* Initialize non-root-hub device wakeup to disabled; |
2318 | * device (un)configuration controls wakeup capable | 2318 | * device (un)configuration controls wakeup capable |
2319 | * sysfs power/wakeup controls wakeup enabled/disabled | 2319 | * sysfs power/wakeup controls wakeup enabled/disabled |
2320 | */ | 2320 | */ |
2321 | device_init_wakeup(&udev->dev, 0); | 2321 | device_init_wakeup(&udev->dev, 0); |
2322 | } | 2322 | } |
2323 | 2323 | ||
2324 | /* Tell the runtime-PM framework the device is active */ | 2324 | /* Tell the runtime-PM framework the device is active */ |
2325 | pm_runtime_set_active(&udev->dev); | 2325 | pm_runtime_set_active(&udev->dev); |
2326 | pm_runtime_get_noresume(&udev->dev); | 2326 | pm_runtime_get_noresume(&udev->dev); |
2327 | pm_runtime_use_autosuspend(&udev->dev); | 2327 | pm_runtime_use_autosuspend(&udev->dev); |
2328 | pm_runtime_enable(&udev->dev); | 2328 | pm_runtime_enable(&udev->dev); |
2329 | 2329 | ||
2330 | /* By default, forbid autosuspend for all devices. It will be | 2330 | /* By default, forbid autosuspend for all devices. It will be |
2331 | * allowed for hubs during binding. | 2331 | * allowed for hubs during binding. |
2332 | */ | 2332 | */ |
2333 | usb_disable_autosuspend(udev); | 2333 | usb_disable_autosuspend(udev); |
2334 | 2334 | ||
2335 | err = usb_enumerate_device(udev); /* Read descriptors */ | 2335 | err = usb_enumerate_device(udev); /* Read descriptors */ |
2336 | if (err < 0) | 2336 | if (err < 0) |
2337 | goto fail; | 2337 | goto fail; |
2338 | dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n", | 2338 | dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n", |
2339 | udev->devnum, udev->bus->busnum, | 2339 | udev->devnum, udev->bus->busnum, |
2340 | (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); | 2340 | (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); |
2341 | /* export the usbdev device-node for libusb */ | 2341 | /* export the usbdev device-node for libusb */ |
2342 | udev->dev.devt = MKDEV(USB_DEVICE_MAJOR, | 2342 | udev->dev.devt = MKDEV(USB_DEVICE_MAJOR, |
2343 | (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); | 2343 | (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); |
2344 | 2344 | ||
2345 | /* Tell the world! */ | 2345 | /* Tell the world! */ |
2346 | announce_device(udev); | 2346 | announce_device(udev); |
2347 | 2347 | ||
2348 | if (udev->serial) | 2348 | if (udev->serial) |
2349 | add_device_randomness(udev->serial, strlen(udev->serial)); | 2349 | add_device_randomness(udev->serial, strlen(udev->serial)); |
2350 | if (udev->product) | 2350 | if (udev->product) |
2351 | add_device_randomness(udev->product, strlen(udev->product)); | 2351 | add_device_randomness(udev->product, strlen(udev->product)); |
2352 | if (udev->manufacturer) | 2352 | if (udev->manufacturer) |
2353 | add_device_randomness(udev->manufacturer, | 2353 | add_device_randomness(udev->manufacturer, |
2354 | strlen(udev->manufacturer)); | 2354 | strlen(udev->manufacturer)); |
2355 | 2355 | ||
2356 | device_enable_async_suspend(&udev->dev); | 2356 | device_enable_async_suspend(&udev->dev); |
2357 | 2357 | ||
2358 | /* | 2358 | /* |
2359 | * check whether the hub marks this port as non-removable. Do it | 2359 | * check whether the hub marks this port as non-removable. Do it |
2360 | * now so that platform-specific data can override it in | 2360 | * now so that platform-specific data can override it in |
2361 | * device_add() | 2361 | * device_add() |
2362 | */ | 2362 | */ |
2363 | if (udev->parent) | 2363 | if (udev->parent) |
2364 | set_usb_port_removable(udev); | 2364 | set_usb_port_removable(udev); |
2365 | 2365 | ||
2366 | /* Register the device. The device driver is responsible | 2366 | /* Register the device. The device driver is responsible |
2367 | * for configuring the device and invoking the add-device | 2367 | * for configuring the device and invoking the add-device |
2368 | * notifier chain (used by usbfs and possibly others). | 2368 | * notifier chain (used by usbfs and possibly others). |
2369 | */ | 2369 | */ |
2370 | err = device_add(&udev->dev); | 2370 | err = device_add(&udev->dev); |
2371 | if (err) { | 2371 | if (err) { |
2372 | dev_err(&udev->dev, "can't device_add, error %d\n", err); | 2372 | dev_err(&udev->dev, "can't device_add, error %d\n", err); |
2373 | goto fail; | 2373 | goto fail; |
2374 | } | 2374 | } |
2375 | 2375 | ||
2376 | /* Create link files between child device and usb port device. */ | 2376 | /* Create link files between child device and usb port device. */ |
2377 | if (udev->parent) { | 2377 | if (udev->parent) { |
2378 | struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); | 2378 | struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); |
2379 | struct usb_port *port_dev = hub->ports[udev->portnum - 1]; | 2379 | struct usb_port *port_dev = hub->ports[udev->portnum - 1]; |
2380 | 2380 | ||
2381 | err = sysfs_create_link(&udev->dev.kobj, | 2381 | err = sysfs_create_link(&udev->dev.kobj, |
2382 | &port_dev->dev.kobj, "port"); | 2382 | &port_dev->dev.kobj, "port"); |
2383 | if (err) | 2383 | if (err) |
2384 | goto fail; | 2384 | goto fail; |
2385 | 2385 | ||
2386 | err = sysfs_create_link(&port_dev->dev.kobj, | 2386 | err = sysfs_create_link(&port_dev->dev.kobj, |
2387 | &udev->dev.kobj, "device"); | 2387 | &udev->dev.kobj, "device"); |
2388 | if (err) { | 2388 | if (err) { |
2389 | sysfs_remove_link(&udev->dev.kobj, "port"); | 2389 | sysfs_remove_link(&udev->dev.kobj, "port"); |
2390 | goto fail; | 2390 | goto fail; |
2391 | } | 2391 | } |
2392 | 2392 | ||
2393 | pm_runtime_get_sync(&port_dev->dev); | 2393 | pm_runtime_get_sync(&port_dev->dev); |
2394 | } | 2394 | } |
2395 | 2395 | ||
2396 | (void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev); | 2396 | (void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev); |
2397 | usb_mark_last_busy(udev); | 2397 | usb_mark_last_busy(udev); |
2398 | pm_runtime_put_sync_autosuspend(&udev->dev); | 2398 | pm_runtime_put_sync_autosuspend(&udev->dev); |
2399 | return err; | 2399 | return err; |
2400 | 2400 | ||
2401 | fail: | 2401 | fail: |
2402 | usb_set_device_state(udev, USB_STATE_NOTATTACHED); | 2402 | usb_set_device_state(udev, USB_STATE_NOTATTACHED); |
2403 | pm_runtime_disable(&udev->dev); | 2403 | pm_runtime_disable(&udev->dev); |
2404 | pm_runtime_set_suspended(&udev->dev); | 2404 | pm_runtime_set_suspended(&udev->dev); |
2405 | return err; | 2405 | return err; |
2406 | } | 2406 | } |
2407 | 2407 | ||
2408 | 2408 | ||
2409 | /** | 2409 | /** |
2410 | * usb_deauthorize_device - deauthorize a device (usbcore-internal) | 2410 | * usb_deauthorize_device - deauthorize a device (usbcore-internal) |
2411 | * @usb_dev: USB device | 2411 | * @usb_dev: USB device |
2412 | * | 2412 | * |
2413 | * Move the USB device to a very basic state where interfaces are disabled | 2413 | * Move the USB device to a very basic state where interfaces are disabled |
2414 | * and the device is in fact unconfigured and unusable. | 2414 | * and the device is in fact unconfigured and unusable. |
2415 | * | 2415 | * |
2416 | * We share a lock (that we have) with device_del(), so we need to | 2416 | * We share a lock (that we have) with device_del(), so we need to |
2417 | * defer its call. | 2417 | * defer its call. |
2418 | * | 2418 | * |
2419 | * Return: 0. | 2419 | * Return: 0. |
2420 | */ | 2420 | */ |
2421 | int usb_deauthorize_device(struct usb_device *usb_dev) | 2421 | int usb_deauthorize_device(struct usb_device *usb_dev) |
2422 | { | 2422 | { |
2423 | usb_lock_device(usb_dev); | 2423 | usb_lock_device(usb_dev); |
2424 | if (usb_dev->authorized == 0) | 2424 | if (usb_dev->authorized == 0) |
2425 | goto out_unauthorized; | 2425 | goto out_unauthorized; |
2426 | 2426 | ||
2427 | usb_dev->authorized = 0; | 2427 | usb_dev->authorized = 0; |
2428 | usb_set_configuration(usb_dev, -1); | 2428 | usb_set_configuration(usb_dev, -1); |
2429 | 2429 | ||
2430 | kfree(usb_dev->product); | 2430 | kfree(usb_dev->product); |
2431 | usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL); | 2431 | usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL); |
2432 | kfree(usb_dev->manufacturer); | 2432 | kfree(usb_dev->manufacturer); |
2433 | usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL); | 2433 | usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL); |
2434 | kfree(usb_dev->serial); | 2434 | kfree(usb_dev->serial); |
2435 | usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL); | 2435 | usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL); |
2436 | 2436 | ||
2437 | usb_destroy_configuration(usb_dev); | 2437 | usb_destroy_configuration(usb_dev); |
2438 | usb_dev->descriptor.bNumConfigurations = 0; | 2438 | usb_dev->descriptor.bNumConfigurations = 0; |
2439 | 2439 | ||
2440 | out_unauthorized: | 2440 | out_unauthorized: |
2441 | usb_unlock_device(usb_dev); | 2441 | usb_unlock_device(usb_dev); |
2442 | return 0; | 2442 | return 0; |
2443 | } | 2443 | } |
2444 | 2444 | ||
2445 | 2445 | ||
2446 | int usb_authorize_device(struct usb_device *usb_dev) | 2446 | int usb_authorize_device(struct usb_device *usb_dev) |
2447 | { | 2447 | { |
2448 | int result = 0, c; | 2448 | int result = 0, c; |
2449 | 2449 | ||
2450 | usb_lock_device(usb_dev); | 2450 | usb_lock_device(usb_dev); |
2451 | if (usb_dev->authorized == 1) | 2451 | if (usb_dev->authorized == 1) |
2452 | goto out_authorized; | 2452 | goto out_authorized; |
2453 | 2453 | ||
2454 | result = usb_autoresume_device(usb_dev); | 2454 | result = usb_autoresume_device(usb_dev); |
2455 | if (result < 0) { | 2455 | if (result < 0) { |
2456 | dev_err(&usb_dev->dev, | 2456 | dev_err(&usb_dev->dev, |
2457 | "can't autoresume for authorization: %d\n", result); | 2457 | "can't autoresume for authorization: %d\n", result); |
2458 | goto error_autoresume; | 2458 | goto error_autoresume; |
2459 | } | 2459 | } |
2460 | result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor)); | 2460 | result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor)); |
2461 | if (result < 0) { | 2461 | if (result < 0) { |
2462 | dev_err(&usb_dev->dev, "can't re-read device descriptor for " | 2462 | dev_err(&usb_dev->dev, "can't re-read device descriptor for " |
2463 | "authorization: %d\n", result); | 2463 | "authorization: %d\n", result); |
2464 | goto error_device_descriptor; | 2464 | goto error_device_descriptor; |
2465 | } | 2465 | } |
2466 | 2466 | ||
2467 | kfree(usb_dev->product); | 2467 | kfree(usb_dev->product); |
2468 | usb_dev->product = NULL; | 2468 | usb_dev->product = NULL; |
2469 | kfree(usb_dev->manufacturer); | 2469 | kfree(usb_dev->manufacturer); |
2470 | usb_dev->manufacturer = NULL; | 2470 | usb_dev->manufacturer = NULL; |
2471 | kfree(usb_dev->serial); | 2471 | kfree(usb_dev->serial); |
2472 | usb_dev->serial = NULL; | 2472 | usb_dev->serial = NULL; |
2473 | 2473 | ||
2474 | usb_dev->authorized = 1; | 2474 | usb_dev->authorized = 1; |
2475 | result = usb_enumerate_device(usb_dev); | 2475 | result = usb_enumerate_device(usb_dev); |
2476 | if (result < 0) | 2476 | if (result < 0) |
2477 | goto error_enumerate; | 2477 | goto error_enumerate; |
2478 | /* Choose and set the configuration. This registers the interfaces | 2478 | /* Choose and set the configuration. This registers the interfaces |
2479 | * with the driver core and lets interface drivers bind to them. | 2479 | * with the driver core and lets interface drivers bind to them. |
2480 | */ | 2480 | */ |
2481 | c = usb_choose_configuration(usb_dev); | 2481 | c = usb_choose_configuration(usb_dev); |
2482 | if (c >= 0) { | 2482 | if (c >= 0) { |
2483 | result = usb_set_configuration(usb_dev, c); | 2483 | result = usb_set_configuration(usb_dev, c); |
2484 | if (result) { | 2484 | if (result) { |
2485 | dev_err(&usb_dev->dev, | 2485 | dev_err(&usb_dev->dev, |
2486 | "can't set config #%d, error %d\n", c, result); | 2486 | "can't set config #%d, error %d\n", c, result); |
2487 | /* This need not be fatal. The user can try to | 2487 | /* This need not be fatal. The user can try to |
2488 | * set other configurations. */ | 2488 | * set other configurations. */ |
2489 | } | 2489 | } |
2490 | } | 2490 | } |
2491 | dev_info(&usb_dev->dev, "authorized to connect\n"); | 2491 | dev_info(&usb_dev->dev, "authorized to connect\n"); |
2492 | 2492 | ||
2493 | error_enumerate: | 2493 | error_enumerate: |
2494 | error_device_descriptor: | 2494 | error_device_descriptor: |
2495 | usb_autosuspend_device(usb_dev); | 2495 | usb_autosuspend_device(usb_dev); |
2496 | error_autoresume: | 2496 | error_autoresume: |
2497 | out_authorized: | 2497 | out_authorized: |
2498 | usb_unlock_device(usb_dev); /* complements locktree */ | 2498 | usb_unlock_device(usb_dev); /* complements locktree */ |
2499 | return result; | 2499 | return result; |
2500 | } | 2500 | } |
2501 | 2501 | ||
2502 | 2502 | ||
2503 | /* Returns 1 if @hub is a WUSB root hub, 0 otherwise */ | 2503 | /* Returns 1 if @hub is a WUSB root hub, 0 otherwise */ |
2504 | static unsigned hub_is_wusb(struct usb_hub *hub) | 2504 | static unsigned hub_is_wusb(struct usb_hub *hub) |
2505 | { | 2505 | { |
2506 | struct usb_hcd *hcd; | 2506 | struct usb_hcd *hcd; |
2507 | if (hub->hdev->parent != NULL) /* not a root hub? */ | 2507 | if (hub->hdev->parent != NULL) /* not a root hub? */ |
2508 | return 0; | 2508 | return 0; |
2509 | hcd = container_of(hub->hdev->bus, struct usb_hcd, self); | 2509 | hcd = container_of(hub->hdev->bus, struct usb_hcd, self); |
2510 | return hcd->wireless; | 2510 | return hcd->wireless; |
2511 | } | 2511 | } |
2512 | 2512 | ||
2513 | 2513 | ||
2514 | #define PORT_RESET_TRIES 5 | 2514 | #define PORT_RESET_TRIES 5 |
2515 | #define SET_ADDRESS_TRIES 2 | 2515 | #define SET_ADDRESS_TRIES 2 |
2516 | #define GET_DESCRIPTOR_TRIES 2 | 2516 | #define GET_DESCRIPTOR_TRIES 2 |
2517 | #define SET_CONFIG_TRIES (2 * (use_both_schemes + 1)) | 2517 | #define SET_CONFIG_TRIES (2 * (use_both_schemes + 1)) |
2518 | #define USE_NEW_SCHEME(i) ((i) / 2 == (int)old_scheme_first) | 2518 | #define USE_NEW_SCHEME(i) ((i) / 2 == (int)old_scheme_first) |
2519 | 2519 | ||
2520 | #define HUB_ROOT_RESET_TIME 50 /* times are in msec */ | 2520 | #define HUB_ROOT_RESET_TIME 50 /* times are in msec */ |
2521 | #define HUB_SHORT_RESET_TIME 10 | 2521 | #define HUB_SHORT_RESET_TIME 10 |
2522 | #define HUB_BH_RESET_TIME 50 | 2522 | #define HUB_BH_RESET_TIME 50 |
2523 | #define HUB_LONG_RESET_TIME 200 | 2523 | #define HUB_LONG_RESET_TIME 200 |
2524 | #define HUB_RESET_TIMEOUT 800 | 2524 | #define HUB_RESET_TIMEOUT 800 |
2525 | 2525 | ||
2526 | static int hub_port_reset(struct usb_hub *hub, int port1, | 2526 | static int hub_port_reset(struct usb_hub *hub, int port1, |
2527 | struct usb_device *udev, unsigned int delay, bool warm); | 2527 | struct usb_device *udev, unsigned int delay, bool warm); |
2528 | 2528 | ||
2529 | /* Is a USB 3.0 port in the Inactive or Complinance Mode state? | 2529 | /* Is a USB 3.0 port in the Inactive or Complinance Mode state? |
2530 | * Port worm reset is required to recover | 2530 | * Port worm reset is required to recover |
2531 | */ | 2531 | */ |
2532 | static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus) | 2532 | static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus) |
2533 | { | 2533 | { |
2534 | return hub_is_superspeed(hub->hdev) && | 2534 | return hub_is_superspeed(hub->hdev) && |
2535 | (((portstatus & USB_PORT_STAT_LINK_STATE) == | 2535 | (((portstatus & USB_PORT_STAT_LINK_STATE) == |
2536 | USB_SS_PORT_LS_SS_INACTIVE) || | 2536 | USB_SS_PORT_LS_SS_INACTIVE) || |
2537 | ((portstatus & USB_PORT_STAT_LINK_STATE) == | 2537 | ((portstatus & USB_PORT_STAT_LINK_STATE) == |
2538 | USB_SS_PORT_LS_COMP_MOD)) ; | 2538 | USB_SS_PORT_LS_COMP_MOD)) ; |
2539 | } | 2539 | } |
2540 | 2540 | ||
2541 | static int hub_port_wait_reset(struct usb_hub *hub, int port1, | 2541 | static int hub_port_wait_reset(struct usb_hub *hub, int port1, |
2542 | struct usb_device *udev, unsigned int delay, bool warm) | 2542 | struct usb_device *udev, unsigned int delay, bool warm) |
2543 | { | 2543 | { |
2544 | int delay_time, ret; | 2544 | int delay_time, ret; |
2545 | u16 portstatus; | 2545 | u16 portstatus; |
2546 | u16 portchange; | 2546 | u16 portchange; |
2547 | 2547 | ||
2548 | for (delay_time = 0; | 2548 | for (delay_time = 0; |
2549 | delay_time < HUB_RESET_TIMEOUT; | 2549 | delay_time < HUB_RESET_TIMEOUT; |
2550 | delay_time += delay) { | 2550 | delay_time += delay) { |
2551 | /* wait to give the device a chance to reset */ | 2551 | /* wait to give the device a chance to reset */ |
2552 | msleep(delay); | 2552 | msleep(delay); |
2553 | 2553 | ||
2554 | /* read and decode port status */ | 2554 | /* read and decode port status */ |
2555 | ret = hub_port_status(hub, port1, &portstatus, &portchange); | 2555 | ret = hub_port_status(hub, port1, &portstatus, &portchange); |
2556 | if (ret < 0) | 2556 | if (ret < 0) |
2557 | return ret; | 2557 | return ret; |
2558 | 2558 | ||
2559 | /* The port state is unknown until the reset completes. */ | 2559 | /* The port state is unknown until the reset completes. */ |
2560 | if (!(portstatus & USB_PORT_STAT_RESET)) | 2560 | if (!(portstatus & USB_PORT_STAT_RESET)) |
2561 | break; | 2561 | break; |
2562 | 2562 | ||
2563 | /* switch to the long delay after two short delay failures */ | 2563 | /* switch to the long delay after two short delay failures */ |
2564 | if (delay_time >= 2 * HUB_SHORT_RESET_TIME) | 2564 | if (delay_time >= 2 * HUB_SHORT_RESET_TIME) |
2565 | delay = HUB_LONG_RESET_TIME; | 2565 | delay = HUB_LONG_RESET_TIME; |
2566 | 2566 | ||
2567 | dev_dbg (hub->intfdev, | 2567 | dev_dbg (hub->intfdev, |
2568 | "port %d not %sreset yet, waiting %dms\n", | 2568 | "port %d not %sreset yet, waiting %dms\n", |
2569 | port1, warm ? "warm " : "", delay); | 2569 | port1, warm ? "warm " : "", delay); |
2570 | } | 2570 | } |
2571 | 2571 | ||
2572 | if ((portstatus & USB_PORT_STAT_RESET)) | 2572 | if ((portstatus & USB_PORT_STAT_RESET)) |
2573 | return -EBUSY; | 2573 | return -EBUSY; |
2574 | 2574 | ||
2575 | if (hub_port_warm_reset_required(hub, portstatus)) | 2575 | if (hub_port_warm_reset_required(hub, portstatus)) |
2576 | return -ENOTCONN; | 2576 | return -ENOTCONN; |
2577 | 2577 | ||
2578 | /* Device went away? */ | 2578 | /* Device went away? */ |
2579 | if (!(portstatus & USB_PORT_STAT_CONNECTION)) | 2579 | if (!(portstatus & USB_PORT_STAT_CONNECTION)) |
2580 | return -ENOTCONN; | 2580 | return -ENOTCONN; |
2581 | 2581 | ||
2582 | /* bomb out completely if the connection bounced. A USB 3.0 | 2582 | /* bomb out completely if the connection bounced. A USB 3.0 |
2583 | * connection may bounce if multiple warm resets were issued, | 2583 | * connection may bounce if multiple warm resets were issued, |
2584 | * but the device may have successfully re-connected. Ignore it. | 2584 | * but the device may have successfully re-connected. Ignore it. |
2585 | */ | 2585 | */ |
2586 | if (!hub_is_superspeed(hub->hdev) && | 2586 | if (!hub_is_superspeed(hub->hdev) && |
2587 | (portchange & USB_PORT_STAT_C_CONNECTION)) | 2587 | (portchange & USB_PORT_STAT_C_CONNECTION)) |
2588 | return -ENOTCONN; | 2588 | return -ENOTCONN; |
2589 | 2589 | ||
2590 | if (!(portstatus & USB_PORT_STAT_ENABLE)) | 2590 | if (!(portstatus & USB_PORT_STAT_ENABLE)) |
2591 | return -EBUSY; | 2591 | return -EBUSY; |
2592 | 2592 | ||
2593 | if (!udev) | 2593 | if (!udev) |
2594 | return 0; | 2594 | return 0; |
2595 | 2595 | ||
2596 | if (hub_is_wusb(hub)) | 2596 | if (hub_is_wusb(hub)) |
2597 | udev->speed = USB_SPEED_WIRELESS; | 2597 | udev->speed = USB_SPEED_WIRELESS; |
2598 | else if (hub_is_superspeed(hub->hdev)) | 2598 | else if (hub_is_superspeed(hub->hdev)) |
2599 | udev->speed = USB_SPEED_SUPER; | 2599 | udev->speed = USB_SPEED_SUPER; |
2600 | else if (portstatus & USB_PORT_STAT_HIGH_SPEED) | 2600 | else if (portstatus & USB_PORT_STAT_HIGH_SPEED) |
2601 | udev->speed = USB_SPEED_HIGH; | 2601 | udev->speed = USB_SPEED_HIGH; |
2602 | else if (portstatus & USB_PORT_STAT_LOW_SPEED) | 2602 | else if (portstatus & USB_PORT_STAT_LOW_SPEED) |
2603 | udev->speed = USB_SPEED_LOW; | 2603 | udev->speed = USB_SPEED_LOW; |
2604 | else | 2604 | else |
2605 | udev->speed = USB_SPEED_FULL; | 2605 | udev->speed = USB_SPEED_FULL; |
2606 | return 0; | 2606 | return 0; |
2607 | } | 2607 | } |
2608 | 2608 | ||
2609 | static void hub_port_finish_reset(struct usb_hub *hub, int port1, | 2609 | static void hub_port_finish_reset(struct usb_hub *hub, int port1, |
2610 | struct usb_device *udev, int *status) | 2610 | struct usb_device *udev, int *status) |
2611 | { | 2611 | { |
2612 | switch (*status) { | 2612 | switch (*status) { |
2613 | case 0: | 2613 | case 0: |
2614 | /* TRSTRCY = 10 ms; plus some extra */ | 2614 | /* TRSTRCY = 10 ms; plus some extra */ |
2615 | msleep(10 + 40); | 2615 | msleep(10 + 40); |
2616 | if (udev) { | 2616 | if (udev) { |
2617 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | 2617 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
2618 | 2618 | ||
2619 | update_devnum(udev, 0); | 2619 | update_devnum(udev, 0); |
2620 | /* The xHC may think the device is already reset, | 2620 | /* The xHC may think the device is already reset, |
2621 | * so ignore the status. | 2621 | * so ignore the status. |
2622 | */ | 2622 | */ |
2623 | if (hcd->driver->reset_device) | 2623 | if (hcd->driver->reset_device) |
2624 | hcd->driver->reset_device(hcd, udev); | 2624 | hcd->driver->reset_device(hcd, udev); |
2625 | } | 2625 | } |
2626 | /* FALL THROUGH */ | 2626 | /* FALL THROUGH */ |
2627 | case -ENOTCONN: | 2627 | case -ENOTCONN: |
2628 | case -ENODEV: | 2628 | case -ENODEV: |
2629 | usb_clear_port_feature(hub->hdev, | 2629 | usb_clear_port_feature(hub->hdev, |
2630 | port1, USB_PORT_FEAT_C_RESET); | 2630 | port1, USB_PORT_FEAT_C_RESET); |
2631 | if (hub_is_superspeed(hub->hdev)) { | 2631 | if (hub_is_superspeed(hub->hdev)) { |
2632 | usb_clear_port_feature(hub->hdev, port1, | 2632 | usb_clear_port_feature(hub->hdev, port1, |
2633 | USB_PORT_FEAT_C_BH_PORT_RESET); | 2633 | USB_PORT_FEAT_C_BH_PORT_RESET); |
2634 | usb_clear_port_feature(hub->hdev, port1, | 2634 | usb_clear_port_feature(hub->hdev, port1, |
2635 | USB_PORT_FEAT_C_PORT_LINK_STATE); | 2635 | USB_PORT_FEAT_C_PORT_LINK_STATE); |
2636 | usb_clear_port_feature(hub->hdev, port1, | 2636 | usb_clear_port_feature(hub->hdev, port1, |
2637 | USB_PORT_FEAT_C_CONNECTION); | 2637 | USB_PORT_FEAT_C_CONNECTION); |
2638 | } | 2638 | } |
2639 | if (udev) | 2639 | if (udev) |
2640 | usb_set_device_state(udev, *status | 2640 | usb_set_device_state(udev, *status |
2641 | ? USB_STATE_NOTATTACHED | 2641 | ? USB_STATE_NOTATTACHED |
2642 | : USB_STATE_DEFAULT); | 2642 | : USB_STATE_DEFAULT); |
2643 | break; | 2643 | break; |
2644 | } | 2644 | } |
2645 | } | 2645 | } |
2646 | 2646 | ||
2647 | /* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */ | 2647 | /* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */ |
2648 | static int hub_port_reset(struct usb_hub *hub, int port1, | 2648 | static int hub_port_reset(struct usb_hub *hub, int port1, |
2649 | struct usb_device *udev, unsigned int delay, bool warm) | 2649 | struct usb_device *udev, unsigned int delay, bool warm) |
2650 | { | 2650 | { |
2651 | int i, status; | 2651 | int i, status; |
2652 | u16 portchange, portstatus; | 2652 | u16 portchange, portstatus; |
2653 | 2653 | ||
2654 | if (!hub_is_superspeed(hub->hdev)) { | 2654 | if (!hub_is_superspeed(hub->hdev)) { |
2655 | if (warm) { | 2655 | if (warm) { |
2656 | dev_err(hub->intfdev, "only USB3 hub support " | 2656 | dev_err(hub->intfdev, "only USB3 hub support " |
2657 | "warm reset\n"); | 2657 | "warm reset\n"); |
2658 | return -EINVAL; | 2658 | return -EINVAL; |
2659 | } | 2659 | } |
2660 | /* Block EHCI CF initialization during the port reset. | 2660 | /* Block EHCI CF initialization during the port reset. |
2661 | * Some companion controllers don't like it when they mix. | 2661 | * Some companion controllers don't like it when they mix. |
2662 | */ | 2662 | */ |
2663 | down_read(&ehci_cf_port_reset_rwsem); | 2663 | down_read(&ehci_cf_port_reset_rwsem); |
2664 | } else if (!warm) { | 2664 | } else if (!warm) { |
2665 | /* | 2665 | /* |
2666 | * If the caller hasn't explicitly requested a warm reset, | 2666 | * If the caller hasn't explicitly requested a warm reset, |
2667 | * double check and see if one is needed. | 2667 | * double check and see if one is needed. |
2668 | */ | 2668 | */ |
2669 | status = hub_port_status(hub, port1, | 2669 | status = hub_port_status(hub, port1, |
2670 | &portstatus, &portchange); | 2670 | &portstatus, &portchange); |
2671 | if (status < 0) | 2671 | if (status < 0) |
2672 | goto done; | 2672 | goto done; |
2673 | 2673 | ||
2674 | if (hub_port_warm_reset_required(hub, portstatus)) | 2674 | if (hub_port_warm_reset_required(hub, portstatus)) |
2675 | warm = true; | 2675 | warm = true; |
2676 | } | 2676 | } |
2677 | 2677 | ||
2678 | /* Reset the port */ | 2678 | /* Reset the port */ |
2679 | for (i = 0; i < PORT_RESET_TRIES; i++) { | 2679 | for (i = 0; i < PORT_RESET_TRIES; i++) { |
2680 | status = set_port_feature(hub->hdev, port1, (warm ? | 2680 | status = set_port_feature(hub->hdev, port1, (warm ? |
2681 | USB_PORT_FEAT_BH_PORT_RESET : | 2681 | USB_PORT_FEAT_BH_PORT_RESET : |
2682 | USB_PORT_FEAT_RESET)); | 2682 | USB_PORT_FEAT_RESET)); |
2683 | if (status == -ENODEV) { | 2683 | if (status == -ENODEV) { |
2684 | ; /* The hub is gone */ | 2684 | ; /* The hub is gone */ |
2685 | } else if (status) { | 2685 | } else if (status) { |
2686 | dev_err(hub->intfdev, | 2686 | dev_err(hub->intfdev, |
2687 | "cannot %sreset port %d (err = %d)\n", | 2687 | "cannot %sreset port %d (err = %d)\n", |
2688 | warm ? "warm " : "", port1, status); | 2688 | warm ? "warm " : "", port1, status); |
2689 | } else { | 2689 | } else { |
2690 | status = hub_port_wait_reset(hub, port1, udev, delay, | 2690 | status = hub_port_wait_reset(hub, port1, udev, delay, |
2691 | warm); | 2691 | warm); |
2692 | if (status && status != -ENOTCONN && status != -ENODEV) | 2692 | if (status && status != -ENOTCONN && status != -ENODEV) |
2693 | dev_dbg(hub->intfdev, | 2693 | dev_dbg(hub->intfdev, |
2694 | "port_wait_reset: err = %d\n", | 2694 | "port_wait_reset: err = %d\n", |
2695 | status); | 2695 | status); |
2696 | } | 2696 | } |
2697 | 2697 | ||
2698 | /* Check for disconnect or reset */ | 2698 | /* Check for disconnect or reset */ |
2699 | if (status == 0 || status == -ENOTCONN || status == -ENODEV) { | 2699 | if (status == 0 || status == -ENOTCONN || status == -ENODEV) { |
2700 | hub_port_finish_reset(hub, port1, udev, &status); | 2700 | hub_port_finish_reset(hub, port1, udev, &status); |
2701 | 2701 | ||
2702 | if (!hub_is_superspeed(hub->hdev)) | 2702 | if (!hub_is_superspeed(hub->hdev)) |
2703 | goto done; | 2703 | goto done; |
2704 | 2704 | ||
2705 | /* | 2705 | /* |
2706 | * If a USB 3.0 device migrates from reset to an error | 2706 | * If a USB 3.0 device migrates from reset to an error |
2707 | * state, re-issue the warm reset. | 2707 | * state, re-issue the warm reset. |
2708 | */ | 2708 | */ |
2709 | if (hub_port_status(hub, port1, | 2709 | if (hub_port_status(hub, port1, |
2710 | &portstatus, &portchange) < 0) | 2710 | &portstatus, &portchange) < 0) |
2711 | goto done; | 2711 | goto done; |
2712 | 2712 | ||
2713 | if (!hub_port_warm_reset_required(hub, portstatus)) | 2713 | if (!hub_port_warm_reset_required(hub, portstatus)) |
2714 | goto done; | 2714 | goto done; |
2715 | 2715 | ||
2716 | /* | 2716 | /* |
2717 | * If the port is in SS.Inactive or Compliance Mode, the | 2717 | * If the port is in SS.Inactive or Compliance Mode, the |
2718 | * hot or warm reset failed. Try another warm reset. | 2718 | * hot or warm reset failed. Try another warm reset. |
2719 | */ | 2719 | */ |
2720 | if (!warm) { | 2720 | if (!warm) { |
2721 | dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n", | 2721 | dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n", |
2722 | port1); | 2722 | port1); |
2723 | warm = true; | 2723 | warm = true; |
2724 | } | 2724 | } |
2725 | } | 2725 | } |
2726 | 2726 | ||
2727 | dev_dbg (hub->intfdev, | 2727 | dev_dbg (hub->intfdev, |
2728 | "port %d not enabled, trying %sreset again...\n", | 2728 | "port %d not enabled, trying %sreset again...\n", |
2729 | port1, warm ? "warm " : ""); | 2729 | port1, warm ? "warm " : ""); |
2730 | delay = HUB_LONG_RESET_TIME; | 2730 | delay = HUB_LONG_RESET_TIME; |
2731 | } | 2731 | } |
2732 | 2732 | ||
2733 | dev_err (hub->intfdev, | 2733 | dev_err (hub->intfdev, |
2734 | "Cannot enable port %i. Maybe the USB cable is bad?\n", | 2734 | "Cannot enable port %i. Maybe the USB cable is bad?\n", |
2735 | port1); | 2735 | port1); |
2736 | 2736 | ||
2737 | done: | 2737 | done: |
2738 | if (!hub_is_superspeed(hub->hdev)) | 2738 | if (!hub_is_superspeed(hub->hdev)) |
2739 | up_read(&ehci_cf_port_reset_rwsem); | 2739 | up_read(&ehci_cf_port_reset_rwsem); |
2740 | 2740 | ||
2741 | return status; | 2741 | return status; |
2742 | } | 2742 | } |
2743 | 2743 | ||
2744 | /* Check if a port is power on */ | 2744 | /* Check if a port is power on */ |
2745 | static int port_is_power_on(struct usb_hub *hub, unsigned portstatus) | 2745 | static int port_is_power_on(struct usb_hub *hub, unsigned portstatus) |
2746 | { | 2746 | { |
2747 | int ret = 0; | 2747 | int ret = 0; |
2748 | 2748 | ||
2749 | if (hub_is_superspeed(hub->hdev)) { | 2749 | if (hub_is_superspeed(hub->hdev)) { |
2750 | if (portstatus & USB_SS_PORT_STAT_POWER) | 2750 | if (portstatus & USB_SS_PORT_STAT_POWER) |
2751 | ret = 1; | 2751 | ret = 1; |
2752 | } else { | 2752 | } else { |
2753 | if (portstatus & USB_PORT_STAT_POWER) | 2753 | if (portstatus & USB_PORT_STAT_POWER) |
2754 | ret = 1; | 2754 | ret = 1; |
2755 | } | 2755 | } |
2756 | 2756 | ||
2757 | return ret; | 2757 | return ret; |
2758 | } | 2758 | } |
2759 | 2759 | ||
2760 | #ifdef CONFIG_PM | 2760 | #ifdef CONFIG_PM |
2761 | 2761 | ||
2762 | /* Check if a port is suspended(USB2.0 port) or in U3 state(USB3.0 port) */ | 2762 | /* Check if a port is suspended(USB2.0 port) or in U3 state(USB3.0 port) */ |
2763 | static int port_is_suspended(struct usb_hub *hub, unsigned portstatus) | 2763 | static int port_is_suspended(struct usb_hub *hub, unsigned portstatus) |
2764 | { | 2764 | { |
2765 | int ret = 0; | 2765 | int ret = 0; |
2766 | 2766 | ||
2767 | if (hub_is_superspeed(hub->hdev)) { | 2767 | if (hub_is_superspeed(hub->hdev)) { |
2768 | if ((portstatus & USB_PORT_STAT_LINK_STATE) | 2768 | if ((portstatus & USB_PORT_STAT_LINK_STATE) |
2769 | == USB_SS_PORT_LS_U3) | 2769 | == USB_SS_PORT_LS_U3) |
2770 | ret = 1; | 2770 | ret = 1; |
2771 | } else { | 2771 | } else { |
2772 | if (portstatus & USB_PORT_STAT_SUSPEND) | 2772 | if (portstatus & USB_PORT_STAT_SUSPEND) |
2773 | ret = 1; | 2773 | ret = 1; |
2774 | } | 2774 | } |
2775 | 2775 | ||
2776 | return ret; | 2776 | return ret; |
2777 | } | 2777 | } |
2778 | 2778 | ||
2779 | /* Determine whether the device on a port is ready for a normal resume, | 2779 | /* Determine whether the device on a port is ready for a normal resume, |
2780 | * is ready for a reset-resume, or should be disconnected. | 2780 | * is ready for a reset-resume, or should be disconnected. |
2781 | */ | 2781 | */ |
2782 | static int check_port_resume_type(struct usb_device *udev, | 2782 | static int check_port_resume_type(struct usb_device *udev, |
2783 | struct usb_hub *hub, int port1, | 2783 | struct usb_hub *hub, int port1, |
2784 | int status, unsigned portchange, unsigned portstatus) | 2784 | int status, unsigned portchange, unsigned portstatus) |
2785 | { | 2785 | { |
2786 | /* Is the device still present? */ | 2786 | /* Is the device still present? */ |
2787 | if (status || port_is_suspended(hub, portstatus) || | 2787 | if (status || port_is_suspended(hub, portstatus) || |
2788 | !port_is_power_on(hub, portstatus) || | 2788 | !port_is_power_on(hub, portstatus) || |
2789 | !(portstatus & USB_PORT_STAT_CONNECTION)) { | 2789 | !(portstatus & USB_PORT_STAT_CONNECTION)) { |
2790 | if (status >= 0) | 2790 | if (status >= 0) |
2791 | status = -ENODEV; | 2791 | status = -ENODEV; |
2792 | } | 2792 | } |
2793 | 2793 | ||
2794 | /* Can't do a normal resume if the port isn't enabled, | 2794 | /* Can't do a normal resume if the port isn't enabled, |
2795 | * so try a reset-resume instead. | 2795 | * so try a reset-resume instead. |
2796 | */ | 2796 | */ |
2797 | else if (!(portstatus & USB_PORT_STAT_ENABLE) && !udev->reset_resume) { | 2797 | else if (!(portstatus & USB_PORT_STAT_ENABLE) && !udev->reset_resume) { |
2798 | if (udev->persist_enabled) | 2798 | if (udev->persist_enabled) |
2799 | udev->reset_resume = 1; | 2799 | udev->reset_resume = 1; |
2800 | else | 2800 | else |
2801 | status = -ENODEV; | 2801 | status = -ENODEV; |
2802 | } | 2802 | } |
2803 | 2803 | ||
2804 | if (status) { | 2804 | if (status) { |
2805 | dev_dbg(hub->intfdev, | 2805 | dev_dbg(hub->intfdev, |
2806 | "port %d status %04x.%04x after resume, %d\n", | 2806 | "port %d status %04x.%04x after resume, %d\n", |
2807 | port1, portchange, portstatus, status); | 2807 | port1, portchange, portstatus, status); |
2808 | } else if (udev->reset_resume) { | 2808 | } else if (udev->reset_resume) { |
2809 | 2809 | ||
2810 | /* Late port handoff can set status-change bits */ | 2810 | /* Late port handoff can set status-change bits */ |
2811 | if (portchange & USB_PORT_STAT_C_CONNECTION) | 2811 | if (portchange & USB_PORT_STAT_C_CONNECTION) |
2812 | usb_clear_port_feature(hub->hdev, port1, | 2812 | usb_clear_port_feature(hub->hdev, port1, |
2813 | USB_PORT_FEAT_C_CONNECTION); | 2813 | USB_PORT_FEAT_C_CONNECTION); |
2814 | if (portchange & USB_PORT_STAT_C_ENABLE) | 2814 | if (portchange & USB_PORT_STAT_C_ENABLE) |
2815 | usb_clear_port_feature(hub->hdev, port1, | 2815 | usb_clear_port_feature(hub->hdev, port1, |
2816 | USB_PORT_FEAT_C_ENABLE); | 2816 | USB_PORT_FEAT_C_ENABLE); |
2817 | } | 2817 | } |
2818 | 2818 | ||
2819 | return status; | 2819 | return status; |
2820 | } | 2820 | } |
2821 | 2821 | ||
2822 | int usb_disable_ltm(struct usb_device *udev) | 2822 | int usb_disable_ltm(struct usb_device *udev) |
2823 | { | 2823 | { |
2824 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | 2824 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
2825 | 2825 | ||
2826 | /* Check if the roothub and device supports LTM. */ | 2826 | /* Check if the roothub and device supports LTM. */ |
2827 | if (!usb_device_supports_ltm(hcd->self.root_hub) || | 2827 | if (!usb_device_supports_ltm(hcd->self.root_hub) || |
2828 | !usb_device_supports_ltm(udev)) | 2828 | !usb_device_supports_ltm(udev)) |
2829 | return 0; | 2829 | return 0; |
2830 | 2830 | ||
2831 | /* Clear Feature LTM Enable can only be sent if the device is | 2831 | /* Clear Feature LTM Enable can only be sent if the device is |
2832 | * configured. | 2832 | * configured. |
2833 | */ | 2833 | */ |
2834 | if (!udev->actconfig) | 2834 | if (!udev->actconfig) |
2835 | return 0; | 2835 | return 0; |
2836 | 2836 | ||
2837 | return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 2837 | return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
2838 | USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, | 2838 | USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, |
2839 | USB_DEVICE_LTM_ENABLE, 0, NULL, 0, | 2839 | USB_DEVICE_LTM_ENABLE, 0, NULL, 0, |
2840 | USB_CTRL_SET_TIMEOUT); | 2840 | USB_CTRL_SET_TIMEOUT); |
2841 | } | 2841 | } |
2842 | EXPORT_SYMBOL_GPL(usb_disable_ltm); | 2842 | EXPORT_SYMBOL_GPL(usb_disable_ltm); |
2843 | 2843 | ||
2844 | void usb_enable_ltm(struct usb_device *udev) | 2844 | void usb_enable_ltm(struct usb_device *udev) |
2845 | { | 2845 | { |
2846 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | 2846 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
2847 | 2847 | ||
2848 | /* Check if the roothub and device supports LTM. */ | 2848 | /* Check if the roothub and device supports LTM. */ |
2849 | if (!usb_device_supports_ltm(hcd->self.root_hub) || | 2849 | if (!usb_device_supports_ltm(hcd->self.root_hub) || |
2850 | !usb_device_supports_ltm(udev)) | 2850 | !usb_device_supports_ltm(udev)) |
2851 | return; | 2851 | return; |
2852 | 2852 | ||
2853 | /* Set Feature LTM Enable can only be sent if the device is | 2853 | /* Set Feature LTM Enable can only be sent if the device is |
2854 | * configured. | 2854 | * configured. |
2855 | */ | 2855 | */ |
2856 | if (!udev->actconfig) | 2856 | if (!udev->actconfig) |
2857 | return; | 2857 | return; |
2858 | 2858 | ||
2859 | usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 2859 | usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
2860 | USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, | 2860 | USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, |
2861 | USB_DEVICE_LTM_ENABLE, 0, NULL, 0, | 2861 | USB_DEVICE_LTM_ENABLE, 0, NULL, 0, |
2862 | USB_CTRL_SET_TIMEOUT); | 2862 | USB_CTRL_SET_TIMEOUT); |
2863 | } | 2863 | } |
2864 | EXPORT_SYMBOL_GPL(usb_enable_ltm); | 2864 | EXPORT_SYMBOL_GPL(usb_enable_ltm); |
2865 | 2865 | ||
2866 | /* | 2866 | /* |
2867 | * usb_enable_remote_wakeup - enable remote wakeup for a device | 2867 | * usb_enable_remote_wakeup - enable remote wakeup for a device |
2868 | * @udev: target device | 2868 | * @udev: target device |
2869 | * | 2869 | * |
2870 | * For USB-2 devices: Set the device's remote wakeup feature. | 2870 | * For USB-2 devices: Set the device's remote wakeup feature. |
2871 | * | 2871 | * |
2872 | * For USB-3 devices: Assume there's only one function on the device and | 2872 | * For USB-3 devices: Assume there's only one function on the device and |
2873 | * enable remote wake for the first interface. FIXME if the interface | 2873 | * enable remote wake for the first interface. FIXME if the interface |
2874 | * association descriptor shows there's more than one function. | 2874 | * association descriptor shows there's more than one function. |
2875 | */ | 2875 | */ |
2876 | static int usb_enable_remote_wakeup(struct usb_device *udev) | 2876 | static int usb_enable_remote_wakeup(struct usb_device *udev) |
2877 | { | 2877 | { |
2878 | if (udev->speed < USB_SPEED_SUPER) | 2878 | if (udev->speed < USB_SPEED_SUPER) |
2879 | return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 2879 | return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
2880 | USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, | 2880 | USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, |
2881 | USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0, | 2881 | USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0, |
2882 | USB_CTRL_SET_TIMEOUT); | 2882 | USB_CTRL_SET_TIMEOUT); |
2883 | else | 2883 | else |
2884 | return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 2884 | return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
2885 | USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE, | 2885 | USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE, |
2886 | USB_INTRF_FUNC_SUSPEND, | 2886 | USB_INTRF_FUNC_SUSPEND, |
2887 | USB_INTRF_FUNC_SUSPEND_RW | | 2887 | USB_INTRF_FUNC_SUSPEND_RW | |
2888 | USB_INTRF_FUNC_SUSPEND_LP, | 2888 | USB_INTRF_FUNC_SUSPEND_LP, |
2889 | NULL, 0, USB_CTRL_SET_TIMEOUT); | 2889 | NULL, 0, USB_CTRL_SET_TIMEOUT); |
2890 | } | 2890 | } |
2891 | 2891 | ||
2892 | /* | 2892 | /* |
2893 | * usb_disable_remote_wakeup - disable remote wakeup for a device | 2893 | * usb_disable_remote_wakeup - disable remote wakeup for a device |
2894 | * @udev: target device | 2894 | * @udev: target device |
2895 | * | 2895 | * |
2896 | * For USB-2 devices: Clear the device's remote wakeup feature. | 2896 | * For USB-2 devices: Clear the device's remote wakeup feature. |
2897 | * | 2897 | * |
2898 | * For USB-3 devices: Assume there's only one function on the device and | 2898 | * For USB-3 devices: Assume there's only one function on the device and |
2899 | * disable remote wake for the first interface. FIXME if the interface | 2899 | * disable remote wake for the first interface. FIXME if the interface |
2900 | * association descriptor shows there's more than one function. | 2900 | * association descriptor shows there's more than one function. |
2901 | */ | 2901 | */ |
2902 | static int usb_disable_remote_wakeup(struct usb_device *udev) | 2902 | static int usb_disable_remote_wakeup(struct usb_device *udev) |
2903 | { | 2903 | { |
2904 | if (udev->speed < USB_SPEED_SUPER) | 2904 | if (udev->speed < USB_SPEED_SUPER) |
2905 | return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 2905 | return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
2906 | USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, | 2906 | USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, |
2907 | USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0, | 2907 | USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0, |
2908 | USB_CTRL_SET_TIMEOUT); | 2908 | USB_CTRL_SET_TIMEOUT); |
2909 | else | 2909 | else |
2910 | return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 2910 | return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
2911 | USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE, | 2911 | USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE, |
2912 | USB_INTRF_FUNC_SUSPEND, 0, NULL, 0, | 2912 | USB_INTRF_FUNC_SUSPEND, 0, NULL, 0, |
2913 | USB_CTRL_SET_TIMEOUT); | 2913 | USB_CTRL_SET_TIMEOUT); |
2914 | } | 2914 | } |
2915 | 2915 | ||
2916 | /* Count of wakeup-enabled devices at or below udev */ | 2916 | /* Count of wakeup-enabled devices at or below udev */ |
2917 | static unsigned wakeup_enabled_descendants(struct usb_device *udev) | 2917 | static unsigned wakeup_enabled_descendants(struct usb_device *udev) |
2918 | { | 2918 | { |
2919 | struct usb_hub *hub = usb_hub_to_struct_hub(udev); | 2919 | struct usb_hub *hub = usb_hub_to_struct_hub(udev); |
2920 | 2920 | ||
2921 | return udev->do_remote_wakeup + | 2921 | return udev->do_remote_wakeup + |
2922 | (hub ? hub->wakeup_enabled_descendants : 0); | 2922 | (hub ? hub->wakeup_enabled_descendants : 0); |
2923 | } | 2923 | } |
2924 | 2924 | ||
2925 | /* | 2925 | /* |
2926 | * usb_port_suspend - suspend a usb device's upstream port | 2926 | * usb_port_suspend - suspend a usb device's upstream port |
2927 | * @udev: device that's no longer in active use, not a root hub | 2927 | * @udev: device that's no longer in active use, not a root hub |
2928 | * Context: must be able to sleep; device not locked; pm locks held | 2928 | * Context: must be able to sleep; device not locked; pm locks held |
2929 | * | 2929 | * |
2930 | * Suspends a USB device that isn't in active use, conserving power. | 2930 | * Suspends a USB device that isn't in active use, conserving power. |
2931 | * Devices may wake out of a suspend, if anything important happens, | 2931 | * Devices may wake out of a suspend, if anything important happens, |
2932 | * using the remote wakeup mechanism. They may also be taken out of | 2932 | * using the remote wakeup mechanism. They may also be taken out of |
2933 | * suspend by the host, using usb_port_resume(). It's also routine | 2933 | * suspend by the host, using usb_port_resume(). It's also routine |
2934 | * to disconnect devices while they are suspended. | 2934 | * to disconnect devices while they are suspended. |
2935 | * | 2935 | * |
2936 | * This only affects the USB hardware for a device; its interfaces | 2936 | * This only affects the USB hardware for a device; its interfaces |
2937 | * (and, for hubs, child devices) must already have been suspended. | 2937 | * (and, for hubs, child devices) must already have been suspended. |
2938 | * | 2938 | * |
2939 | * Selective port suspend reduces power; most suspended devices draw | 2939 | * Selective port suspend reduces power; most suspended devices draw |
2940 | * less than 500 uA. It's also used in OTG, along with remote wakeup. | 2940 | * less than 500 uA. It's also used in OTG, along with remote wakeup. |
2941 | * All devices below the suspended port are also suspended. | 2941 | * All devices below the suspended port are also suspended. |
2942 | * | 2942 | * |
2943 | * Devices leave suspend state when the host wakes them up. Some devices | 2943 | * Devices leave suspend state when the host wakes them up. Some devices |
2944 | * also support "remote wakeup", where the device can activate the USB | 2944 | * also support "remote wakeup", where the device can activate the USB |
2945 | * tree above them to deliver data, such as a keypress or packet. In | 2945 | * tree above them to deliver data, such as a keypress or packet. In |
2946 | * some cases, this wakes the USB host. | 2946 | * some cases, this wakes the USB host. |
2947 | * | 2947 | * |
2948 | * Suspending OTG devices may trigger HNP, if that's been enabled | 2948 | * Suspending OTG devices may trigger HNP, if that's been enabled |
2949 | * between a pair of dual-role devices. That will change roles, such | 2949 | * between a pair of dual-role devices. That will change roles, such |
2950 | * as from A-Host to A-Peripheral or from B-Host back to B-Peripheral. | 2950 | * as from A-Host to A-Peripheral or from B-Host back to B-Peripheral. |
2951 | * | 2951 | * |
2952 | * Devices on USB hub ports have only one "suspend" state, corresponding | 2952 | * Devices on USB hub ports have only one "suspend" state, corresponding |
2953 | * to ACPI D2, "may cause the device to lose some context". | 2953 | * to ACPI D2, "may cause the device to lose some context". |
2954 | * State transitions include: | 2954 | * State transitions include: |
2955 | * | 2955 | * |
2956 | * - suspend, resume ... when the VBUS power link stays live | 2956 | * - suspend, resume ... when the VBUS power link stays live |
2957 | * - suspend, disconnect ... VBUS lost | 2957 | * - suspend, disconnect ... VBUS lost |
2958 | * | 2958 | * |
2959 | * Once VBUS drop breaks the circuit, the port it's using has to go through | 2959 | * Once VBUS drop breaks the circuit, the port it's using has to go through |
2960 | * normal re-enumeration procedures, starting with enabling VBUS power. | 2960 | * normal re-enumeration procedures, starting with enabling VBUS power. |
2961 | * Other than re-initializing the hub (plug/unplug, except for root hubs), | 2961 | * Other than re-initializing the hub (plug/unplug, except for root hubs), |
2962 | * Linux (2.6) currently has NO mechanisms to initiate that: no khubd | 2962 | * Linux (2.6) currently has NO mechanisms to initiate that: no khubd |
2963 | * timer, no SRP, no requests through sysfs. | 2963 | * timer, no SRP, no requests through sysfs. |
2964 | * | 2964 | * |
2965 | * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get | 2965 | * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get |
2966 | * suspended until their bus goes into global suspend (i.e., the root | 2966 | * suspended until their bus goes into global suspend (i.e., the root |
2967 | * hub is suspended). Nevertheless, we change @udev->state to | 2967 | * hub is suspended). Nevertheless, we change @udev->state to |
2968 | * USB_STATE_SUSPENDED as this is the device's "logical" state. The actual | 2968 | * USB_STATE_SUSPENDED as this is the device's "logical" state. The actual |
2969 | * upstream port setting is stored in @udev->port_is_suspended. | 2969 | * upstream port setting is stored in @udev->port_is_suspended. |
2970 | * | 2970 | * |
2971 | * Returns 0 on success, else negative errno. | 2971 | * Returns 0 on success, else negative errno. |
2972 | */ | 2972 | */ |
2973 | int usb_port_suspend(struct usb_device *udev, pm_message_t msg) | 2973 | int usb_port_suspend(struct usb_device *udev, pm_message_t msg) |
2974 | { | 2974 | { |
2975 | struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); | 2975 | struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); |
2976 | struct usb_port *port_dev = hub->ports[udev->portnum - 1]; | 2976 | struct usb_port *port_dev = hub->ports[udev->portnum - 1]; |
2977 | int port1 = udev->portnum; | 2977 | int port1 = udev->portnum; |
2978 | int status; | 2978 | int status; |
2979 | bool really_suspend = true; | 2979 | bool really_suspend = true; |
2980 | 2980 | ||
2981 | /* enable remote wakeup when appropriate; this lets the device | 2981 | /* enable remote wakeup when appropriate; this lets the device |
2982 | * wake up the upstream hub (including maybe the root hub). | 2982 | * wake up the upstream hub (including maybe the root hub). |
2983 | * | 2983 | * |
2984 | * NOTE: OTG devices may issue remote wakeup (or SRP) even when | 2984 | * NOTE: OTG devices may issue remote wakeup (or SRP) even when |
2985 | * we don't explicitly enable it here. | 2985 | * we don't explicitly enable it here. |
2986 | */ | 2986 | */ |
2987 | if (udev->do_remote_wakeup) { | 2987 | if (udev->do_remote_wakeup) { |
2988 | status = usb_enable_remote_wakeup(udev); | 2988 | status = usb_enable_remote_wakeup(udev); |
2989 | if (status) { | 2989 | if (status) { |
2990 | dev_dbg(&udev->dev, "won't remote wakeup, status %d\n", | 2990 | dev_dbg(&udev->dev, "won't remote wakeup, status %d\n", |
2991 | status); | 2991 | status); |
2992 | /* bail if autosuspend is requested */ | 2992 | /* bail if autosuspend is requested */ |
2993 | if (PMSG_IS_AUTO(msg)) | 2993 | if (PMSG_IS_AUTO(msg)) |
2994 | goto err_wakeup; | 2994 | goto err_wakeup; |
2995 | } | 2995 | } |
2996 | } | 2996 | } |
2997 | 2997 | ||
2998 | /* disable USB2 hardware LPM */ | 2998 | /* disable USB2 hardware LPM */ |
2999 | if (udev->usb2_hw_lpm_enabled == 1) | 2999 | if (udev->usb2_hw_lpm_enabled == 1) |
3000 | usb_set_usb2_hardware_lpm(udev, 0); | 3000 | usb_set_usb2_hardware_lpm(udev, 0); |
3001 | 3001 | ||
3002 | if (usb_disable_ltm(udev)) { | 3002 | if (usb_disable_ltm(udev)) { |
3003 | dev_err(&udev->dev, "Failed to disable LTM before suspend\n."); | 3003 | dev_err(&udev->dev, "Failed to disable LTM before suspend\n."); |
3004 | status = -ENOMEM; | 3004 | status = -ENOMEM; |
3005 | if (PMSG_IS_AUTO(msg)) | 3005 | if (PMSG_IS_AUTO(msg)) |
3006 | goto err_ltm; | 3006 | goto err_ltm; |
3007 | } | 3007 | } |
3008 | if (usb_unlocked_disable_lpm(udev)) { | 3008 | if (usb_unlocked_disable_lpm(udev)) { |
3009 | dev_err(&udev->dev, "Failed to disable LPM before suspend\n."); | 3009 | dev_err(&udev->dev, "Failed to disable LPM before suspend\n."); |
3010 | status = -ENOMEM; | 3010 | status = -ENOMEM; |
3011 | if (PMSG_IS_AUTO(msg)) | 3011 | if (PMSG_IS_AUTO(msg)) |
3012 | goto err_lpm3; | 3012 | goto err_lpm3; |
3013 | } | 3013 | } |
3014 | 3014 | ||
3015 | /* see 7.1.7.6 */ | 3015 | /* see 7.1.7.6 */ |
3016 | if (hub_is_superspeed(hub->hdev)) | 3016 | if (hub_is_superspeed(hub->hdev)) |
3017 | status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3); | 3017 | status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3); |
3018 | 3018 | ||
3019 | /* | 3019 | /* |
3020 | * For system suspend, we do not need to enable the suspend feature | 3020 | * For system suspend, we do not need to enable the suspend feature |
3021 | * on individual USB-2 ports. The devices will automatically go | 3021 | * on individual USB-2 ports. The devices will automatically go |
3022 | * into suspend a few ms after the root hub stops sending packets. | 3022 | * into suspend a few ms after the root hub stops sending packets. |
3023 | * The USB 2.0 spec calls this "global suspend". | 3023 | * The USB 2.0 spec calls this "global suspend". |
3024 | * | 3024 | * |
3025 | * However, many USB hubs have a bug: They don't relay wakeup requests | 3025 | * However, many USB hubs have a bug: They don't relay wakeup requests |
3026 | * from a downstream port if the port's suspend feature isn't on. | 3026 | * from a downstream port if the port's suspend feature isn't on. |
3027 | * Therefore we will turn on the suspend feature if udev or any of its | 3027 | * Therefore we will turn on the suspend feature if udev or any of its |
3028 | * descendants is enabled for remote wakeup. | 3028 | * descendants is enabled for remote wakeup. |
3029 | */ | 3029 | */ |
3030 | else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0) | 3030 | else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0) |
3031 | status = set_port_feature(hub->hdev, port1, | 3031 | status = set_port_feature(hub->hdev, port1, |
3032 | USB_PORT_FEAT_SUSPEND); | 3032 | USB_PORT_FEAT_SUSPEND); |
3033 | else { | 3033 | else { |
3034 | really_suspend = false; | 3034 | really_suspend = false; |
3035 | status = 0; | 3035 | status = 0; |
3036 | } | 3036 | } |
3037 | if (status) { | 3037 | if (status) { |
3038 | dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", | 3038 | dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", |
3039 | port1, status); | 3039 | port1, status); |
3040 | 3040 | ||
3041 | /* Try to enable USB3 LPM and LTM again */ | 3041 | /* Try to enable USB3 LPM and LTM again */ |
3042 | usb_unlocked_enable_lpm(udev); | 3042 | usb_unlocked_enable_lpm(udev); |
3043 | err_lpm3: | 3043 | err_lpm3: |
3044 | usb_enable_ltm(udev); | 3044 | usb_enable_ltm(udev); |
3045 | err_ltm: | 3045 | err_ltm: |
3046 | /* Try to enable USB2 hardware LPM again */ | 3046 | /* Try to enable USB2 hardware LPM again */ |
3047 | if (udev->usb2_hw_lpm_capable == 1) | 3047 | if (udev->usb2_hw_lpm_capable == 1) |
3048 | usb_set_usb2_hardware_lpm(udev, 1); | 3048 | usb_set_usb2_hardware_lpm(udev, 1); |
3049 | 3049 | ||
3050 | if (udev->do_remote_wakeup) | 3050 | if (udev->do_remote_wakeup) |
3051 | (void) usb_disable_remote_wakeup(udev); | 3051 | (void) usb_disable_remote_wakeup(udev); |
3052 | err_wakeup: | 3052 | err_wakeup: |
3053 | 3053 | ||
3054 | /* System sleep transitions should never fail */ | 3054 | /* System sleep transitions should never fail */ |
3055 | if (!PMSG_IS_AUTO(msg)) | 3055 | if (!PMSG_IS_AUTO(msg)) |
3056 | status = 0; | 3056 | status = 0; |
3057 | } else { | 3057 | } else { |
3058 | dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n", | 3058 | dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n", |
3059 | (PMSG_IS_AUTO(msg) ? "auto-" : ""), | 3059 | (PMSG_IS_AUTO(msg) ? "auto-" : ""), |
3060 | udev->do_remote_wakeup); | 3060 | udev->do_remote_wakeup); |
3061 | if (really_suspend) { | 3061 | if (really_suspend) { |
3062 | udev->port_is_suspended = 1; | 3062 | udev->port_is_suspended = 1; |
3063 | 3063 | ||
3064 | /* device has up to 10 msec to fully suspend */ | 3064 | /* device has up to 10 msec to fully suspend */ |
3065 | msleep(10); | 3065 | msleep(10); |
3066 | } | 3066 | } |
3067 | usb_set_device_state(udev, USB_STATE_SUSPENDED); | 3067 | usb_set_device_state(udev, USB_STATE_SUSPENDED); |
3068 | } | 3068 | } |
3069 | 3069 | ||
3070 | if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled) { | 3070 | if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled) { |
3071 | pm_runtime_put_sync(&port_dev->dev); | 3071 | pm_runtime_put_sync(&port_dev->dev); |
3072 | port_dev->did_runtime_put = true; | 3072 | port_dev->did_runtime_put = true; |
3073 | } | 3073 | } |
3074 | 3074 | ||
3075 | usb_mark_last_busy(hub->hdev); | 3075 | usb_mark_last_busy(hub->hdev); |
3076 | return status; | 3076 | return status; |
3077 | } | 3077 | } |
3078 | 3078 | ||
3079 | /* | 3079 | /* |
3080 | * If the USB "suspend" state is in use (rather than "global suspend"), | 3080 | * If the USB "suspend" state is in use (rather than "global suspend"), |
3081 | * many devices will be individually taken out of suspend state using | 3081 | * many devices will be individually taken out of suspend state using |
3082 | * special "resume" signaling. This routine kicks in shortly after | 3082 | * special "resume" signaling. This routine kicks in shortly after |
3083 | * hardware resume signaling is finished, either because of selective | 3083 | * hardware resume signaling is finished, either because of selective |
3084 | * resume (by host) or remote wakeup (by device) ... now see what changed | 3084 | * resume (by host) or remote wakeup (by device) ... now see what changed |
3085 | * in the tree that's rooted at this device. | 3085 | * in the tree that's rooted at this device. |
3086 | * | 3086 | * |
3087 | * If @udev->reset_resume is set then the device is reset before the | 3087 | * If @udev->reset_resume is set then the device is reset before the |
3088 | * status check is done. | 3088 | * status check is done. |
3089 | */ | 3089 | */ |
3090 | static int finish_port_resume(struct usb_device *udev) | 3090 | static int finish_port_resume(struct usb_device *udev) |
3091 | { | 3091 | { |
3092 | int status = 0; | 3092 | int status = 0; |
3093 | u16 devstatus = 0; | 3093 | u16 devstatus = 0; |
3094 | 3094 | ||
3095 | /* caller owns the udev device lock */ | 3095 | /* caller owns the udev device lock */ |
3096 | dev_dbg(&udev->dev, "%s\n", | 3096 | dev_dbg(&udev->dev, "%s\n", |
3097 | udev->reset_resume ? "finish reset-resume" : "finish resume"); | 3097 | udev->reset_resume ? "finish reset-resume" : "finish resume"); |
3098 | 3098 | ||
3099 | /* usb ch9 identifies four variants of SUSPENDED, based on what | 3099 | /* usb ch9 identifies four variants of SUSPENDED, based on what |
3100 | * state the device resumes to. Linux currently won't see the | 3100 | * state the device resumes to. Linux currently won't see the |
3101 | * first two on the host side; they'd be inside hub_port_init() | 3101 | * first two on the host side; they'd be inside hub_port_init() |
3102 | * during many timeouts, but khubd can't suspend until later. | 3102 | * during many timeouts, but khubd can't suspend until later. |
3103 | */ | 3103 | */ |
3104 | usb_set_device_state(udev, udev->actconfig | 3104 | usb_set_device_state(udev, udev->actconfig |
3105 | ? USB_STATE_CONFIGURED | 3105 | ? USB_STATE_CONFIGURED |
3106 | : USB_STATE_ADDRESS); | 3106 | : USB_STATE_ADDRESS); |
3107 | 3107 | ||
3108 | /* 10.5.4.5 says not to reset a suspended port if the attached | 3108 | /* 10.5.4.5 says not to reset a suspended port if the attached |
3109 | * device is enabled for remote wakeup. Hence the reset | 3109 | * device is enabled for remote wakeup. Hence the reset |
3110 | * operation is carried out here, after the port has been | 3110 | * operation is carried out here, after the port has been |
3111 | * resumed. | 3111 | * resumed. |
3112 | */ | 3112 | */ |
3113 | if (udev->reset_resume) | 3113 | if (udev->reset_resume) |
3114 | retry_reset_resume: | 3114 | retry_reset_resume: |
3115 | status = usb_reset_and_verify_device(udev); | 3115 | status = usb_reset_and_verify_device(udev); |
3116 | 3116 | ||
3117 | /* 10.5.4.5 says be sure devices in the tree are still there. | 3117 | /* 10.5.4.5 says be sure devices in the tree are still there. |
3118 | * For now let's assume the device didn't go crazy on resume, | 3118 | * For now let's assume the device didn't go crazy on resume, |
3119 | * and device drivers will know about any resume quirks. | 3119 | * and device drivers will know about any resume quirks. |
3120 | */ | 3120 | */ |
3121 | if (status == 0) { | 3121 | if (status == 0) { |
3122 | devstatus = 0; | 3122 | devstatus = 0; |
3123 | status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus); | 3123 | status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus); |
3124 | 3124 | ||
3125 | /* If a normal resume failed, try doing a reset-resume */ | 3125 | /* If a normal resume failed, try doing a reset-resume */ |
3126 | if (status && !udev->reset_resume && udev->persist_enabled) { | 3126 | if (status && !udev->reset_resume && udev->persist_enabled) { |
3127 | dev_dbg(&udev->dev, "retry with reset-resume\n"); | 3127 | dev_dbg(&udev->dev, "retry with reset-resume\n"); |
3128 | udev->reset_resume = 1; | 3128 | udev->reset_resume = 1; |
3129 | goto retry_reset_resume; | 3129 | goto retry_reset_resume; |
3130 | } | 3130 | } |
3131 | } | 3131 | } |
3132 | 3132 | ||
3133 | if (status) { | 3133 | if (status) { |
3134 | dev_dbg(&udev->dev, "gone after usb resume? status %d\n", | 3134 | dev_dbg(&udev->dev, "gone after usb resume? status %d\n", |
3135 | status); | 3135 | status); |
3136 | /* | 3136 | /* |
3137 | * There are a few quirky devices which violate the standard | 3137 | * There are a few quirky devices which violate the standard |
3138 | * by claiming to have remote wakeup enabled after a reset, | 3138 | * by claiming to have remote wakeup enabled after a reset, |
3139 | * which crash if the feature is cleared, hence check for | 3139 | * which crash if the feature is cleared, hence check for |
3140 | * udev->reset_resume | 3140 | * udev->reset_resume |
3141 | */ | 3141 | */ |
3142 | } else if (udev->actconfig && !udev->reset_resume) { | 3142 | } else if (udev->actconfig && !udev->reset_resume) { |
3143 | if (udev->speed < USB_SPEED_SUPER) { | 3143 | if (udev->speed < USB_SPEED_SUPER) { |
3144 | if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) | 3144 | if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) |
3145 | status = usb_disable_remote_wakeup(udev); | 3145 | status = usb_disable_remote_wakeup(udev); |
3146 | } else { | 3146 | } else { |
3147 | status = usb_get_status(udev, USB_RECIP_INTERFACE, 0, | 3147 | status = usb_get_status(udev, USB_RECIP_INTERFACE, 0, |
3148 | &devstatus); | 3148 | &devstatus); |
3149 | if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP | 3149 | if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP |
3150 | | USB_INTRF_STAT_FUNC_RW)) | 3150 | | USB_INTRF_STAT_FUNC_RW)) |
3151 | status = usb_disable_remote_wakeup(udev); | 3151 | status = usb_disable_remote_wakeup(udev); |
3152 | } | 3152 | } |
3153 | 3153 | ||
3154 | if (status) | 3154 | if (status) |
3155 | dev_dbg(&udev->dev, | 3155 | dev_dbg(&udev->dev, |
3156 | "disable remote wakeup, status %d\n", | 3156 | "disable remote wakeup, status %d\n", |
3157 | status); | 3157 | status); |
3158 | status = 0; | 3158 | status = 0; |
3159 | } | 3159 | } |
3160 | return status; | 3160 | return status; |
3161 | } | 3161 | } |
3162 | 3162 | ||
3163 | /* | 3163 | /* |
3164 | * usb_port_resume - re-activate a suspended usb device's upstream port | 3164 | * usb_port_resume - re-activate a suspended usb device's upstream port |
3165 | * @udev: device to re-activate, not a root hub | 3165 | * @udev: device to re-activate, not a root hub |
3166 | * Context: must be able to sleep; device not locked; pm locks held | 3166 | * Context: must be able to sleep; device not locked; pm locks held |
3167 | * | 3167 | * |
3168 | * This will re-activate the suspended device, increasing power usage | 3168 | * This will re-activate the suspended device, increasing power usage |
3169 | * while letting drivers communicate again with its endpoints. | 3169 | * while letting drivers communicate again with its endpoints. |
3170 | * USB resume explicitly guarantees that the power session between | 3170 | * USB resume explicitly guarantees that the power session between |
3171 | * the host and the device is the same as it was when the device | 3171 | * the host and the device is the same as it was when the device |
3172 | * suspended. | 3172 | * suspended. |
3173 | * | 3173 | * |
3174 | * If @udev->reset_resume is set then this routine won't check that the | 3174 | * If @udev->reset_resume is set then this routine won't check that the |
3175 | * port is still enabled. Furthermore, finish_port_resume() above will | 3175 | * port is still enabled. Furthermore, finish_port_resume() above will |
3176 | * reset @udev. The end result is that a broken power session can be | 3176 | * reset @udev. The end result is that a broken power session can be |
3177 | * recovered and @udev will appear to persist across a loss of VBUS power. | 3177 | * recovered and @udev will appear to persist across a loss of VBUS power. |
3178 | * | 3178 | * |
3179 | * For example, if a host controller doesn't maintain VBUS suspend current | 3179 | * For example, if a host controller doesn't maintain VBUS suspend current |
3180 | * during a system sleep or is reset when the system wakes up, all the USB | 3180 | * during a system sleep or is reset when the system wakes up, all the USB |
3181 | * power sessions below it will be broken. This is especially troublesome | 3181 | * power sessions below it will be broken. This is especially troublesome |
3182 | * for mass-storage devices containing mounted filesystems, since the | 3182 | * for mass-storage devices containing mounted filesystems, since the |
3183 | * device will appear to have disconnected and all the memory mappings | 3183 | * device will appear to have disconnected and all the memory mappings |
3184 | * to it will be lost. Using the USB_PERSIST facility, the device can be | 3184 | * to it will be lost. Using the USB_PERSIST facility, the device can be |
3185 | * made to appear as if it had not disconnected. | 3185 | * made to appear as if it had not disconnected. |
3186 | * | 3186 | * |
3187 | * This facility can be dangerous. Although usb_reset_and_verify_device() makes | 3187 | * This facility can be dangerous. Although usb_reset_and_verify_device() makes |
3188 | * every effort to insure that the same device is present after the | 3188 | * every effort to insure that the same device is present after the |
3189 | * reset as before, it cannot provide a 100% guarantee. Furthermore it's | 3189 | * reset as before, it cannot provide a 100% guarantee. Furthermore it's |
3190 | * quite possible for a device to remain unaltered but its media to be | 3190 | * quite possible for a device to remain unaltered but its media to be |
3191 | * changed. If the user replaces a flash memory card while the system is | 3191 | * changed. If the user replaces a flash memory card while the system is |
3192 | * asleep, he will have only himself to blame when the filesystem on the | 3192 | * asleep, he will have only himself to blame when the filesystem on the |
3193 | * new card is corrupted and the system crashes. | 3193 | * new card is corrupted and the system crashes. |
3194 | * | 3194 | * |
3195 | * Returns 0 on success, else negative errno. | 3195 | * Returns 0 on success, else negative errno. |
3196 | */ | 3196 | */ |
3197 | int usb_port_resume(struct usb_device *udev, pm_message_t msg) | 3197 | int usb_port_resume(struct usb_device *udev, pm_message_t msg) |
3198 | { | 3198 | { |
3199 | struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); | 3199 | struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); |
3200 | struct usb_port *port_dev = hub->ports[udev->portnum - 1]; | 3200 | struct usb_port *port_dev = hub->ports[udev->portnum - 1]; |
3201 | int port1 = udev->portnum; | 3201 | int port1 = udev->portnum; |
3202 | int status; | 3202 | int status; |
3203 | u16 portchange, portstatus; | 3203 | u16 portchange, portstatus; |
3204 | 3204 | ||
3205 | if (port_dev->did_runtime_put) { | 3205 | if (port_dev->did_runtime_put) { |
3206 | status = pm_runtime_get_sync(&port_dev->dev); | 3206 | status = pm_runtime_get_sync(&port_dev->dev); |
3207 | port_dev->did_runtime_put = false; | 3207 | port_dev->did_runtime_put = false; |
3208 | if (status < 0) { | 3208 | if (status < 0) { |
3209 | dev_dbg(&udev->dev, "can't resume usb port, status %d\n", | 3209 | dev_dbg(&udev->dev, "can't resume usb port, status %d\n", |
3210 | status); | 3210 | status); |
3211 | return status; | 3211 | return status; |
3212 | } | 3212 | } |
3213 | } | 3213 | } |
3214 | 3214 | ||
3215 | /* Skip the initial Clear-Suspend step for a remote wakeup */ | 3215 | /* Skip the initial Clear-Suspend step for a remote wakeup */ |
3216 | status = hub_port_status(hub, port1, &portstatus, &portchange); | 3216 | status = hub_port_status(hub, port1, &portstatus, &portchange); |
3217 | if (status == 0 && !port_is_suspended(hub, portstatus)) | 3217 | if (status == 0 && !port_is_suspended(hub, portstatus)) |
3218 | goto SuspendCleared; | 3218 | goto SuspendCleared; |
3219 | 3219 | ||
3220 | /* dev_dbg(hub->intfdev, "resume port %d\n", port1); */ | 3220 | /* dev_dbg(hub->intfdev, "resume port %d\n", port1); */ |
3221 | 3221 | ||
3222 | set_bit(port1, hub->busy_bits); | 3222 | set_bit(port1, hub->busy_bits); |
3223 | 3223 | ||
3224 | /* see 7.1.7.7; affects power usage, but not budgeting */ | 3224 | /* see 7.1.7.7; affects power usage, but not budgeting */ |
3225 | if (hub_is_superspeed(hub->hdev)) | 3225 | if (hub_is_superspeed(hub->hdev)) |
3226 | status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U0); | 3226 | status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U0); |
3227 | else | 3227 | else |
3228 | status = usb_clear_port_feature(hub->hdev, | 3228 | status = usb_clear_port_feature(hub->hdev, |
3229 | port1, USB_PORT_FEAT_SUSPEND); | 3229 | port1, USB_PORT_FEAT_SUSPEND); |
3230 | if (status) { | 3230 | if (status) { |
3231 | dev_dbg(hub->intfdev, "can't resume port %d, status %d\n", | 3231 | dev_dbg(hub->intfdev, "can't resume port %d, status %d\n", |
3232 | port1, status); | 3232 | port1, status); |
3233 | } else { | 3233 | } else { |
3234 | /* drive resume for at least 20 msec */ | 3234 | /* drive resume for at least 20 msec */ |
3235 | dev_dbg(&udev->dev, "usb %sresume\n", | 3235 | dev_dbg(&udev->dev, "usb %sresume\n", |
3236 | (PMSG_IS_AUTO(msg) ? "auto-" : "")); | 3236 | (PMSG_IS_AUTO(msg) ? "auto-" : "")); |
3237 | msleep(25); | 3237 | msleep(25); |
3238 | 3238 | ||
3239 | /* Virtual root hubs can trigger on GET_PORT_STATUS to | 3239 | /* Virtual root hubs can trigger on GET_PORT_STATUS to |
3240 | * stop resume signaling. Then finish the resume | 3240 | * stop resume signaling. Then finish the resume |
3241 | * sequence. | 3241 | * sequence. |
3242 | */ | 3242 | */ |
3243 | status = hub_port_status(hub, port1, &portstatus, &portchange); | 3243 | status = hub_port_status(hub, port1, &portstatus, &portchange); |
3244 | 3244 | ||
3245 | /* TRSMRCY = 10 msec */ | 3245 | /* TRSMRCY = 10 msec */ |
3246 | msleep(10); | 3246 | msleep(10); |
3247 | } | 3247 | } |
3248 | 3248 | ||
3249 | SuspendCleared: | 3249 | SuspendCleared: |
3250 | if (status == 0) { | 3250 | if (status == 0) { |
3251 | udev->port_is_suspended = 0; | 3251 | udev->port_is_suspended = 0; |
3252 | if (hub_is_superspeed(hub->hdev)) { | 3252 | if (hub_is_superspeed(hub->hdev)) { |
3253 | if (portchange & USB_PORT_STAT_C_LINK_STATE) | 3253 | if (portchange & USB_PORT_STAT_C_LINK_STATE) |
3254 | usb_clear_port_feature(hub->hdev, port1, | 3254 | usb_clear_port_feature(hub->hdev, port1, |
3255 | USB_PORT_FEAT_C_PORT_LINK_STATE); | 3255 | USB_PORT_FEAT_C_PORT_LINK_STATE); |
3256 | } else { | 3256 | } else { |
3257 | if (portchange & USB_PORT_STAT_C_SUSPEND) | 3257 | if (portchange & USB_PORT_STAT_C_SUSPEND) |
3258 | usb_clear_port_feature(hub->hdev, port1, | 3258 | usb_clear_port_feature(hub->hdev, port1, |
3259 | USB_PORT_FEAT_C_SUSPEND); | 3259 | USB_PORT_FEAT_C_SUSPEND); |
3260 | } | 3260 | } |
3261 | } | 3261 | } |
3262 | 3262 | ||
3263 | clear_bit(port1, hub->busy_bits); | 3263 | clear_bit(port1, hub->busy_bits); |
3264 | 3264 | ||
3265 | status = check_port_resume_type(udev, | 3265 | status = check_port_resume_type(udev, |
3266 | hub, port1, status, portchange, portstatus); | 3266 | hub, port1, status, portchange, portstatus); |
3267 | if (status == 0) | 3267 | if (status == 0) |
3268 | status = finish_port_resume(udev); | 3268 | status = finish_port_resume(udev); |
3269 | if (status < 0) { | 3269 | if (status < 0) { |
3270 | dev_dbg(&udev->dev, "can't resume, status %d\n", status); | 3270 | dev_dbg(&udev->dev, "can't resume, status %d\n", status); |
3271 | hub_port_logical_disconnect(hub, port1); | 3271 | hub_port_logical_disconnect(hub, port1); |
3272 | } else { | 3272 | } else { |
3273 | /* Try to enable USB2 hardware LPM */ | 3273 | /* Try to enable USB2 hardware LPM */ |
3274 | if (udev->usb2_hw_lpm_capable == 1) | 3274 | if (udev->usb2_hw_lpm_capable == 1) |
3275 | usb_set_usb2_hardware_lpm(udev, 1); | 3275 | usb_set_usb2_hardware_lpm(udev, 1); |
3276 | 3276 | ||
3277 | /* Try to enable USB3 LTM and LPM */ | 3277 | /* Try to enable USB3 LTM and LPM */ |
3278 | usb_enable_ltm(udev); | 3278 | usb_enable_ltm(udev); |
3279 | usb_unlocked_enable_lpm(udev); | 3279 | usb_unlocked_enable_lpm(udev); |
3280 | } | 3280 | } |
3281 | 3281 | ||
3282 | return status; | 3282 | return status; |
3283 | } | 3283 | } |
3284 | 3284 | ||
3285 | #ifdef CONFIG_PM_RUNTIME | 3285 | #ifdef CONFIG_PM_RUNTIME |
3286 | 3286 | ||
3287 | /* caller has locked udev */ | 3287 | /* caller has locked udev */ |
3288 | int usb_remote_wakeup(struct usb_device *udev) | 3288 | int usb_remote_wakeup(struct usb_device *udev) |
3289 | { | 3289 | { |
3290 | int status = 0; | 3290 | int status = 0; |
3291 | 3291 | ||
3292 | if (udev->state == USB_STATE_SUSPENDED) { | 3292 | if (udev->state == USB_STATE_SUSPENDED) { |
3293 | dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-"); | 3293 | dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-"); |
3294 | status = usb_autoresume_device(udev); | 3294 | status = usb_autoresume_device(udev); |
3295 | if (status == 0) { | 3295 | if (status == 0) { |
3296 | /* Let the drivers do their thing, then... */ | 3296 | /* Let the drivers do their thing, then... */ |
3297 | usb_autosuspend_device(udev); | 3297 | usb_autosuspend_device(udev); |
3298 | } | 3298 | } |
3299 | } | 3299 | } |
3300 | return status; | 3300 | return status; |
3301 | } | 3301 | } |
3302 | 3302 | ||
3303 | #endif | 3303 | #endif |
3304 | 3304 | ||
3305 | static int check_ports_changed(struct usb_hub *hub) | 3305 | static int check_ports_changed(struct usb_hub *hub) |
3306 | { | 3306 | { |
3307 | int port1; | 3307 | int port1; |
3308 | 3308 | ||
3309 | for (port1 = 1; port1 <= hub->hdev->maxchild; ++port1) { | 3309 | for (port1 = 1; port1 <= hub->hdev->maxchild; ++port1) { |
3310 | u16 portstatus, portchange; | 3310 | u16 portstatus, portchange; |
3311 | int status; | 3311 | int status; |
3312 | 3312 | ||
3313 | status = hub_port_status(hub, port1, &portstatus, &portchange); | 3313 | status = hub_port_status(hub, port1, &portstatus, &portchange); |
3314 | if (!status && portchange) | 3314 | if (!status && portchange) |
3315 | return 1; | 3315 | return 1; |
3316 | } | 3316 | } |
3317 | return 0; | 3317 | return 0; |
3318 | } | 3318 | } |
3319 | 3319 | ||
3320 | static int hub_suspend(struct usb_interface *intf, pm_message_t msg) | 3320 | static int hub_suspend(struct usb_interface *intf, pm_message_t msg) |
3321 | { | 3321 | { |
3322 | struct usb_hub *hub = usb_get_intfdata (intf); | 3322 | struct usb_hub *hub = usb_get_intfdata (intf); |
3323 | struct usb_device *hdev = hub->hdev; | 3323 | struct usb_device *hdev = hub->hdev; |
3324 | unsigned port1; | 3324 | unsigned port1; |
3325 | int status; | 3325 | int status; |
3326 | 3326 | ||
3327 | /* | 3327 | /* |
3328 | * Warn if children aren't already suspended. | 3328 | * Warn if children aren't already suspended. |
3329 | * Also, add up the number of wakeup-enabled descendants. | 3329 | * Also, add up the number of wakeup-enabled descendants. |
3330 | */ | 3330 | */ |
3331 | hub->wakeup_enabled_descendants = 0; | 3331 | hub->wakeup_enabled_descendants = 0; |
3332 | for (port1 = 1; port1 <= hdev->maxchild; port1++) { | 3332 | for (port1 = 1; port1 <= hdev->maxchild; port1++) { |
3333 | struct usb_device *udev; | 3333 | struct usb_device *udev; |
3334 | 3334 | ||
3335 | udev = hub->ports[port1 - 1]->child; | 3335 | udev = hub->ports[port1 - 1]->child; |
3336 | if (udev && udev->can_submit) { | 3336 | if (udev && udev->can_submit) { |
3337 | dev_warn(&intf->dev, "port %d nyet suspended\n", port1); | 3337 | dev_warn(&intf->dev, "port %d nyet suspended\n", port1); |
3338 | if (PMSG_IS_AUTO(msg)) | 3338 | if (PMSG_IS_AUTO(msg)) |
3339 | return -EBUSY; | 3339 | return -EBUSY; |
3340 | } | 3340 | } |
3341 | if (udev) | 3341 | if (udev) |
3342 | hub->wakeup_enabled_descendants += | 3342 | hub->wakeup_enabled_descendants += |
3343 | wakeup_enabled_descendants(udev); | 3343 | wakeup_enabled_descendants(udev); |
3344 | } | 3344 | } |
3345 | 3345 | ||
3346 | if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) { | 3346 | if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) { |
3347 | /* check if there are changes pending on hub ports */ | 3347 | /* check if there are changes pending on hub ports */ |
3348 | if (check_ports_changed(hub)) { | 3348 | if (check_ports_changed(hub)) { |
3349 | if (PMSG_IS_AUTO(msg)) | 3349 | if (PMSG_IS_AUTO(msg)) |
3350 | return -EBUSY; | 3350 | return -EBUSY; |
3351 | pm_wakeup_event(&hdev->dev, 2000); | 3351 | pm_wakeup_event(&hdev->dev, 2000); |
3352 | } | 3352 | } |
3353 | } | 3353 | } |
3354 | 3354 | ||
3355 | if (hub_is_superspeed(hdev) && hdev->do_remote_wakeup) { | 3355 | if (hub_is_superspeed(hdev) && hdev->do_remote_wakeup) { |
3356 | /* Enable hub to send remote wakeup for all ports. */ | 3356 | /* Enable hub to send remote wakeup for all ports. */ |
3357 | for (port1 = 1; port1 <= hdev->maxchild; port1++) { | 3357 | for (port1 = 1; port1 <= hdev->maxchild; port1++) { |
3358 | status = set_port_feature(hdev, | 3358 | status = set_port_feature(hdev, |
3359 | port1 | | 3359 | port1 | |
3360 | USB_PORT_FEAT_REMOTE_WAKE_CONNECT | | 3360 | USB_PORT_FEAT_REMOTE_WAKE_CONNECT | |
3361 | USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT | | 3361 | USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT | |
3362 | USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT, | 3362 | USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT, |
3363 | USB_PORT_FEAT_REMOTE_WAKE_MASK); | 3363 | USB_PORT_FEAT_REMOTE_WAKE_MASK); |
3364 | } | 3364 | } |
3365 | } | 3365 | } |
3366 | 3366 | ||
3367 | dev_dbg(&intf->dev, "%s\n", __func__); | 3367 | dev_dbg(&intf->dev, "%s\n", __func__); |
3368 | 3368 | ||
3369 | /* stop khubd and related activity */ | 3369 | /* stop khubd and related activity */ |
3370 | hub_quiesce(hub, HUB_SUSPEND); | 3370 | hub_quiesce(hub, HUB_SUSPEND); |
3371 | return 0; | 3371 | return 0; |
3372 | } | 3372 | } |
3373 | 3373 | ||
3374 | static int hub_resume(struct usb_interface *intf) | 3374 | static int hub_resume(struct usb_interface *intf) |
3375 | { | 3375 | { |
3376 | struct usb_hub *hub = usb_get_intfdata(intf); | 3376 | struct usb_hub *hub = usb_get_intfdata(intf); |
3377 | 3377 | ||
3378 | dev_dbg(&intf->dev, "%s\n", __func__); | 3378 | dev_dbg(&intf->dev, "%s\n", __func__); |
3379 | hub_activate(hub, HUB_RESUME); | 3379 | hub_activate(hub, HUB_RESUME); |
3380 | return 0; | 3380 | return 0; |
3381 | } | 3381 | } |
3382 | 3382 | ||
3383 | static int hub_reset_resume(struct usb_interface *intf) | 3383 | static int hub_reset_resume(struct usb_interface *intf) |
3384 | { | 3384 | { |
3385 | struct usb_hub *hub = usb_get_intfdata(intf); | 3385 | struct usb_hub *hub = usb_get_intfdata(intf); |
3386 | 3386 | ||
3387 | dev_dbg(&intf->dev, "%s\n", __func__); | 3387 | dev_dbg(&intf->dev, "%s\n", __func__); |
3388 | hub_activate(hub, HUB_RESET_RESUME); | 3388 | hub_activate(hub, HUB_RESET_RESUME); |
3389 | return 0; | 3389 | return 0; |
3390 | } | 3390 | } |
3391 | 3391 | ||
3392 | /** | 3392 | /** |
3393 | * usb_root_hub_lost_power - called by HCD if the root hub lost Vbus power | 3393 | * usb_root_hub_lost_power - called by HCD if the root hub lost Vbus power |
3394 | * @rhdev: struct usb_device for the root hub | 3394 | * @rhdev: struct usb_device for the root hub |
3395 | * | 3395 | * |
3396 | * The USB host controller driver calls this function when its root hub | 3396 | * The USB host controller driver calls this function when its root hub |
3397 | * is resumed and Vbus power has been interrupted or the controller | 3397 | * is resumed and Vbus power has been interrupted or the controller |
3398 | * has been reset. The routine marks @rhdev as having lost power. | 3398 | * has been reset. The routine marks @rhdev as having lost power. |
3399 | * When the hub driver is resumed it will take notice and carry out | 3399 | * When the hub driver is resumed it will take notice and carry out |
3400 | * power-session recovery for all the "USB-PERSIST"-enabled child devices; | 3400 | * power-session recovery for all the "USB-PERSIST"-enabled child devices; |
3401 | * the others will be disconnected. | 3401 | * the others will be disconnected. |
3402 | */ | 3402 | */ |
3403 | void usb_root_hub_lost_power(struct usb_device *rhdev) | 3403 | void usb_root_hub_lost_power(struct usb_device *rhdev) |
3404 | { | 3404 | { |
3405 | dev_warn(&rhdev->dev, "root hub lost power or was reset\n"); | 3405 | dev_warn(&rhdev->dev, "root hub lost power or was reset\n"); |
3406 | rhdev->reset_resume = 1; | 3406 | rhdev->reset_resume = 1; |
3407 | } | 3407 | } |
3408 | EXPORT_SYMBOL_GPL(usb_root_hub_lost_power); | 3408 | EXPORT_SYMBOL_GPL(usb_root_hub_lost_power); |
3409 | 3409 | ||
3410 | static const char * const usb3_lpm_names[] = { | 3410 | static const char * const usb3_lpm_names[] = { |
3411 | "U0", | 3411 | "U0", |
3412 | "U1", | 3412 | "U1", |
3413 | "U2", | 3413 | "U2", |
3414 | "U3", | 3414 | "U3", |
3415 | }; | 3415 | }; |
3416 | 3416 | ||
3417 | /* | 3417 | /* |
3418 | * Send a Set SEL control transfer to the device, prior to enabling | 3418 | * Send a Set SEL control transfer to the device, prior to enabling |
3419 | * device-initiated U1 or U2. This lets the device know the exit latencies from | 3419 | * device-initiated U1 or U2. This lets the device know the exit latencies from |
3420 | * the time the device initiates a U1 or U2 exit, to the time it will receive a | 3420 | * the time the device initiates a U1 or U2 exit, to the time it will receive a |
3421 | * packet from the host. | 3421 | * packet from the host. |
3422 | * | 3422 | * |
3423 | * This function will fail if the SEL or PEL values for udev are greater than | 3423 | * This function will fail if the SEL or PEL values for udev are greater than |
3424 | * the maximum allowed values for the link state to be enabled. | 3424 | * the maximum allowed values for the link state to be enabled. |
3425 | */ | 3425 | */ |
3426 | static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state) | 3426 | static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state) |
3427 | { | 3427 | { |
3428 | struct usb_set_sel_req *sel_values; | 3428 | struct usb_set_sel_req *sel_values; |
3429 | unsigned long long u1_sel; | 3429 | unsigned long long u1_sel; |
3430 | unsigned long long u1_pel; | 3430 | unsigned long long u1_pel; |
3431 | unsigned long long u2_sel; | 3431 | unsigned long long u2_sel; |
3432 | unsigned long long u2_pel; | 3432 | unsigned long long u2_pel; |
3433 | int ret; | 3433 | int ret; |
3434 | 3434 | ||
3435 | if (udev->state != USB_STATE_CONFIGURED) | 3435 | if (udev->state != USB_STATE_CONFIGURED) |
3436 | return 0; | 3436 | return 0; |
3437 | 3437 | ||
3438 | /* Convert SEL and PEL stored in ns to us */ | 3438 | /* Convert SEL and PEL stored in ns to us */ |
3439 | u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); | 3439 | u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); |
3440 | u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); | 3440 | u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); |
3441 | u2_sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); | 3441 | u2_sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); |
3442 | u2_pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); | 3442 | u2_pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); |
3443 | 3443 | ||
3444 | /* | 3444 | /* |
3445 | * Make sure that the calculated SEL and PEL values for the link | 3445 | * Make sure that the calculated SEL and PEL values for the link |
3446 | * state we're enabling aren't bigger than the max SEL/PEL | 3446 | * state we're enabling aren't bigger than the max SEL/PEL |
3447 | * value that will fit in the SET SEL control transfer. | 3447 | * value that will fit in the SET SEL control transfer. |
3448 | * Otherwise the device would get an incorrect idea of the exit | 3448 | * Otherwise the device would get an incorrect idea of the exit |
3449 | * latency for the link state, and could start a device-initiated | 3449 | * latency for the link state, and could start a device-initiated |
3450 | * U1/U2 when the exit latencies are too high. | 3450 | * U1/U2 when the exit latencies are too high. |
3451 | */ | 3451 | */ |
3452 | if ((state == USB3_LPM_U1 && | 3452 | if ((state == USB3_LPM_U1 && |
3453 | (u1_sel > USB3_LPM_MAX_U1_SEL_PEL || | 3453 | (u1_sel > USB3_LPM_MAX_U1_SEL_PEL || |
3454 | u1_pel > USB3_LPM_MAX_U1_SEL_PEL)) || | 3454 | u1_pel > USB3_LPM_MAX_U1_SEL_PEL)) || |
3455 | (state == USB3_LPM_U2 && | 3455 | (state == USB3_LPM_U2 && |
3456 | (u2_sel > USB3_LPM_MAX_U2_SEL_PEL || | 3456 | (u2_sel > USB3_LPM_MAX_U2_SEL_PEL || |
3457 | u2_pel > USB3_LPM_MAX_U2_SEL_PEL))) { | 3457 | u2_pel > USB3_LPM_MAX_U2_SEL_PEL))) { |
3458 | dev_dbg(&udev->dev, "Device-initiated %s disabled due to long SEL %llu us or PEL %llu us\n", | 3458 | dev_dbg(&udev->dev, "Device-initiated %s disabled due to long SEL %llu us or PEL %llu us\n", |
3459 | usb3_lpm_names[state], u1_sel, u1_pel); | 3459 | usb3_lpm_names[state], u1_sel, u1_pel); |
3460 | return -EINVAL; | 3460 | return -EINVAL; |
3461 | } | 3461 | } |
3462 | 3462 | ||
3463 | /* | 3463 | /* |
3464 | * If we're enabling device-initiated LPM for one link state, | 3464 | * If we're enabling device-initiated LPM for one link state, |
3465 | * but the other link state has a too high SEL or PEL value, | 3465 | * but the other link state has a too high SEL or PEL value, |
3466 | * just set those values to the max in the Set SEL request. | 3466 | * just set those values to the max in the Set SEL request. |
3467 | */ | 3467 | */ |
3468 | if (u1_sel > USB3_LPM_MAX_U1_SEL_PEL) | 3468 | if (u1_sel > USB3_LPM_MAX_U1_SEL_PEL) |
3469 | u1_sel = USB3_LPM_MAX_U1_SEL_PEL; | 3469 | u1_sel = USB3_LPM_MAX_U1_SEL_PEL; |
3470 | 3470 | ||
3471 | if (u1_pel > USB3_LPM_MAX_U1_SEL_PEL) | 3471 | if (u1_pel > USB3_LPM_MAX_U1_SEL_PEL) |
3472 | u1_pel = USB3_LPM_MAX_U1_SEL_PEL; | 3472 | u1_pel = USB3_LPM_MAX_U1_SEL_PEL; |
3473 | 3473 | ||
3474 | if (u2_sel > USB3_LPM_MAX_U2_SEL_PEL) | 3474 | if (u2_sel > USB3_LPM_MAX_U2_SEL_PEL) |
3475 | u2_sel = USB3_LPM_MAX_U2_SEL_PEL; | 3475 | u2_sel = USB3_LPM_MAX_U2_SEL_PEL; |
3476 | 3476 | ||
3477 | if (u2_pel > USB3_LPM_MAX_U2_SEL_PEL) | 3477 | if (u2_pel > USB3_LPM_MAX_U2_SEL_PEL) |
3478 | u2_pel = USB3_LPM_MAX_U2_SEL_PEL; | 3478 | u2_pel = USB3_LPM_MAX_U2_SEL_PEL; |
3479 | 3479 | ||
3480 | /* | 3480 | /* |
3481 | * usb_enable_lpm() can be called as part of a failed device reset, | 3481 | * usb_enable_lpm() can be called as part of a failed device reset, |
3482 | * which may be initiated by an error path of a mass storage driver. | 3482 | * which may be initiated by an error path of a mass storage driver. |
3483 | * Therefore, use GFP_NOIO. | 3483 | * Therefore, use GFP_NOIO. |
3484 | */ | 3484 | */ |
3485 | sel_values = kmalloc(sizeof *(sel_values), GFP_NOIO); | 3485 | sel_values = kmalloc(sizeof *(sel_values), GFP_NOIO); |
3486 | if (!sel_values) | 3486 | if (!sel_values) |
3487 | return -ENOMEM; | 3487 | return -ENOMEM; |
3488 | 3488 | ||
3489 | sel_values->u1_sel = u1_sel; | 3489 | sel_values->u1_sel = u1_sel; |
3490 | sel_values->u1_pel = u1_pel; | 3490 | sel_values->u1_pel = u1_pel; |
3491 | sel_values->u2_sel = cpu_to_le16(u2_sel); | 3491 | sel_values->u2_sel = cpu_to_le16(u2_sel); |
3492 | sel_values->u2_pel = cpu_to_le16(u2_pel); | 3492 | sel_values->u2_pel = cpu_to_le16(u2_pel); |
3493 | 3493 | ||
3494 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 3494 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
3495 | USB_REQ_SET_SEL, | 3495 | USB_REQ_SET_SEL, |
3496 | USB_RECIP_DEVICE, | 3496 | USB_RECIP_DEVICE, |
3497 | 0, 0, | 3497 | 0, 0, |
3498 | sel_values, sizeof *(sel_values), | 3498 | sel_values, sizeof *(sel_values), |
3499 | USB_CTRL_SET_TIMEOUT); | 3499 | USB_CTRL_SET_TIMEOUT); |
3500 | kfree(sel_values); | 3500 | kfree(sel_values); |
3501 | return ret; | 3501 | return ret; |
3502 | } | 3502 | } |
3503 | 3503 | ||
3504 | /* | 3504 | /* |
3505 | * Enable or disable device-initiated U1 or U2 transitions. | 3505 | * Enable or disable device-initiated U1 or U2 transitions. |
3506 | */ | 3506 | */ |
3507 | static int usb_set_device_initiated_lpm(struct usb_device *udev, | 3507 | static int usb_set_device_initiated_lpm(struct usb_device *udev, |
3508 | enum usb3_link_state state, bool enable) | 3508 | enum usb3_link_state state, bool enable) |
3509 | { | 3509 | { |
3510 | int ret; | 3510 | int ret; |
3511 | int feature; | 3511 | int feature; |
3512 | 3512 | ||
3513 | switch (state) { | 3513 | switch (state) { |
3514 | case USB3_LPM_U1: | 3514 | case USB3_LPM_U1: |
3515 | feature = USB_DEVICE_U1_ENABLE; | 3515 | feature = USB_DEVICE_U1_ENABLE; |
3516 | break; | 3516 | break; |
3517 | case USB3_LPM_U2: | 3517 | case USB3_LPM_U2: |
3518 | feature = USB_DEVICE_U2_ENABLE; | 3518 | feature = USB_DEVICE_U2_ENABLE; |
3519 | break; | 3519 | break; |
3520 | default: | 3520 | default: |
3521 | dev_warn(&udev->dev, "%s: Can't %s non-U1 or U2 state.\n", | 3521 | dev_warn(&udev->dev, "%s: Can't %s non-U1 or U2 state.\n", |
3522 | __func__, enable ? "enable" : "disable"); | 3522 | __func__, enable ? "enable" : "disable"); |
3523 | return -EINVAL; | 3523 | return -EINVAL; |
3524 | } | 3524 | } |
3525 | 3525 | ||
3526 | if (udev->state != USB_STATE_CONFIGURED) { | 3526 | if (udev->state != USB_STATE_CONFIGURED) { |
3527 | dev_dbg(&udev->dev, "%s: Can't %s %s state " | 3527 | dev_dbg(&udev->dev, "%s: Can't %s %s state " |
3528 | "for unconfigured device.\n", | 3528 | "for unconfigured device.\n", |
3529 | __func__, enable ? "enable" : "disable", | 3529 | __func__, enable ? "enable" : "disable", |
3530 | usb3_lpm_names[state]); | 3530 | usb3_lpm_names[state]); |
3531 | return 0; | 3531 | return 0; |
3532 | } | 3532 | } |
3533 | 3533 | ||
3534 | if (enable) { | 3534 | if (enable) { |
3535 | /* | 3535 | /* |
3536 | * Now send the control transfer to enable device-initiated LPM | 3536 | * Now send the control transfer to enable device-initiated LPM |
3537 | * for either U1 or U2. | 3537 | * for either U1 or U2. |
3538 | */ | 3538 | */ |
3539 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 3539 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
3540 | USB_REQ_SET_FEATURE, | 3540 | USB_REQ_SET_FEATURE, |
3541 | USB_RECIP_DEVICE, | 3541 | USB_RECIP_DEVICE, |
3542 | feature, | 3542 | feature, |
3543 | 0, NULL, 0, | 3543 | 0, NULL, 0, |
3544 | USB_CTRL_SET_TIMEOUT); | 3544 | USB_CTRL_SET_TIMEOUT); |
3545 | } else { | 3545 | } else { |
3546 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 3546 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
3547 | USB_REQ_CLEAR_FEATURE, | 3547 | USB_REQ_CLEAR_FEATURE, |
3548 | USB_RECIP_DEVICE, | 3548 | USB_RECIP_DEVICE, |
3549 | feature, | 3549 | feature, |
3550 | 0, NULL, 0, | 3550 | 0, NULL, 0, |
3551 | USB_CTRL_SET_TIMEOUT); | 3551 | USB_CTRL_SET_TIMEOUT); |
3552 | } | 3552 | } |
3553 | if (ret < 0) { | 3553 | if (ret < 0) { |
3554 | dev_warn(&udev->dev, "%s of device-initiated %s failed.\n", | 3554 | dev_warn(&udev->dev, "%s of device-initiated %s failed.\n", |
3555 | enable ? "Enable" : "Disable", | 3555 | enable ? "Enable" : "Disable", |
3556 | usb3_lpm_names[state]); | 3556 | usb3_lpm_names[state]); |
3557 | return -EBUSY; | 3557 | return -EBUSY; |
3558 | } | 3558 | } |
3559 | return 0; | 3559 | return 0; |
3560 | } | 3560 | } |
3561 | 3561 | ||
3562 | static int usb_set_lpm_timeout(struct usb_device *udev, | 3562 | static int usb_set_lpm_timeout(struct usb_device *udev, |
3563 | enum usb3_link_state state, int timeout) | 3563 | enum usb3_link_state state, int timeout) |
3564 | { | 3564 | { |
3565 | int ret; | 3565 | int ret; |
3566 | int feature; | 3566 | int feature; |
3567 | 3567 | ||
3568 | switch (state) { | 3568 | switch (state) { |
3569 | case USB3_LPM_U1: | 3569 | case USB3_LPM_U1: |
3570 | feature = USB_PORT_FEAT_U1_TIMEOUT; | 3570 | feature = USB_PORT_FEAT_U1_TIMEOUT; |
3571 | break; | 3571 | break; |
3572 | case USB3_LPM_U2: | 3572 | case USB3_LPM_U2: |
3573 | feature = USB_PORT_FEAT_U2_TIMEOUT; | 3573 | feature = USB_PORT_FEAT_U2_TIMEOUT; |
3574 | break; | 3574 | break; |
3575 | default: | 3575 | default: |
3576 | dev_warn(&udev->dev, "%s: Can't set timeout for non-U1 or U2 state.\n", | 3576 | dev_warn(&udev->dev, "%s: Can't set timeout for non-U1 or U2 state.\n", |
3577 | __func__); | 3577 | __func__); |
3578 | return -EINVAL; | 3578 | return -EINVAL; |
3579 | } | 3579 | } |
3580 | 3580 | ||
3581 | if (state == USB3_LPM_U1 && timeout > USB3_LPM_U1_MAX_TIMEOUT && | 3581 | if (state == USB3_LPM_U1 && timeout > USB3_LPM_U1_MAX_TIMEOUT && |
3582 | timeout != USB3_LPM_DEVICE_INITIATED) { | 3582 | timeout != USB3_LPM_DEVICE_INITIATED) { |
3583 | dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x, " | 3583 | dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x, " |
3584 | "which is a reserved value.\n", | 3584 | "which is a reserved value.\n", |
3585 | usb3_lpm_names[state], timeout); | 3585 | usb3_lpm_names[state], timeout); |
3586 | return -EINVAL; | 3586 | return -EINVAL; |
3587 | } | 3587 | } |
3588 | 3588 | ||
3589 | ret = set_port_feature(udev->parent, | 3589 | ret = set_port_feature(udev->parent, |
3590 | USB_PORT_LPM_TIMEOUT(timeout) | udev->portnum, | 3590 | USB_PORT_LPM_TIMEOUT(timeout) | udev->portnum, |
3591 | feature); | 3591 | feature); |
3592 | if (ret < 0) { | 3592 | if (ret < 0) { |
3593 | dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x," | 3593 | dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x," |
3594 | "error code %i\n", usb3_lpm_names[state], | 3594 | "error code %i\n", usb3_lpm_names[state], |
3595 | timeout, ret); | 3595 | timeout, ret); |
3596 | return -EBUSY; | 3596 | return -EBUSY; |
3597 | } | 3597 | } |
3598 | if (state == USB3_LPM_U1) | 3598 | if (state == USB3_LPM_U1) |
3599 | udev->u1_params.timeout = timeout; | 3599 | udev->u1_params.timeout = timeout; |
3600 | else | 3600 | else |
3601 | udev->u2_params.timeout = timeout; | 3601 | udev->u2_params.timeout = timeout; |
3602 | return 0; | 3602 | return 0; |
3603 | } | 3603 | } |
3604 | 3604 | ||
3605 | /* | 3605 | /* |
3606 | * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated | 3606 | * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated |
3607 | * U1/U2 entry. | 3607 | * U1/U2 entry. |
3608 | * | 3608 | * |
3609 | * We will attempt to enable U1 or U2, but there are no guarantees that the | 3609 | * We will attempt to enable U1 or U2, but there are no guarantees that the |
3610 | * control transfers to set the hub timeout or enable device-initiated U1/U2 | 3610 | * control transfers to set the hub timeout or enable device-initiated U1/U2 |
3611 | * will be successful. | 3611 | * will be successful. |
3612 | * | 3612 | * |
3613 | * If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI | 3613 | * If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI |
3614 | * driver know about it. If that call fails, it should be harmless, and just | 3614 | * driver know about it. If that call fails, it should be harmless, and just |
3615 | * take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency. | 3615 | * take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency. |
3616 | */ | 3616 | */ |
3617 | static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, | 3617 | static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, |
3618 | enum usb3_link_state state) | 3618 | enum usb3_link_state state) |
3619 | { | 3619 | { |
3620 | int timeout, ret; | 3620 | int timeout, ret; |
3621 | __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat; | 3621 | __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat; |
3622 | __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat; | 3622 | __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat; |
3623 | 3623 | ||
3624 | /* If the device says it doesn't have *any* exit latency to come out of | 3624 | /* If the device says it doesn't have *any* exit latency to come out of |
3625 | * U1 or U2, it's probably lying. Assume it doesn't implement that link | 3625 | * U1 or U2, it's probably lying. Assume it doesn't implement that link |
3626 | * state. | 3626 | * state. |
3627 | */ | 3627 | */ |
3628 | if ((state == USB3_LPM_U1 && u1_mel == 0) || | 3628 | if ((state == USB3_LPM_U1 && u1_mel == 0) || |
3629 | (state == USB3_LPM_U2 && u2_mel == 0)) | 3629 | (state == USB3_LPM_U2 && u2_mel == 0)) |
3630 | return; | 3630 | return; |
3631 | 3631 | ||
3632 | /* | 3632 | /* |
3633 | * First, let the device know about the exit latencies | 3633 | * First, let the device know about the exit latencies |
3634 | * associated with the link state we're about to enable. | 3634 | * associated with the link state we're about to enable. |
3635 | */ | 3635 | */ |
3636 | ret = usb_req_set_sel(udev, state); | 3636 | ret = usb_req_set_sel(udev, state); |
3637 | if (ret < 0) { | 3637 | if (ret < 0) { |
3638 | dev_warn(&udev->dev, "Set SEL for device-initiated %s failed.\n", | 3638 | dev_warn(&udev->dev, "Set SEL for device-initiated %s failed.\n", |
3639 | usb3_lpm_names[state]); | 3639 | usb3_lpm_names[state]); |
3640 | return; | 3640 | return; |
3641 | } | 3641 | } |
3642 | 3642 | ||
3643 | /* We allow the host controller to set the U1/U2 timeout internally | 3643 | /* We allow the host controller to set the U1/U2 timeout internally |
3644 | * first, so that it can change its schedule to account for the | 3644 | * first, so that it can change its schedule to account for the |
3645 | * additional latency to send data to a device in a lower power | 3645 | * additional latency to send data to a device in a lower power |
3646 | * link state. | 3646 | * link state. |
3647 | */ | 3647 | */ |
3648 | timeout = hcd->driver->enable_usb3_lpm_timeout(hcd, udev, state); | 3648 | timeout = hcd->driver->enable_usb3_lpm_timeout(hcd, udev, state); |
3649 | 3649 | ||
3650 | /* xHCI host controller doesn't want to enable this LPM state. */ | 3650 | /* xHCI host controller doesn't want to enable this LPM state. */ |
3651 | if (timeout == 0) | 3651 | if (timeout == 0) |
3652 | return; | 3652 | return; |
3653 | 3653 | ||
3654 | if (timeout < 0) { | 3654 | if (timeout < 0) { |
3655 | dev_warn(&udev->dev, "Could not enable %s link state, " | 3655 | dev_warn(&udev->dev, "Could not enable %s link state, " |
3656 | "xHCI error %i.\n", usb3_lpm_names[state], | 3656 | "xHCI error %i.\n", usb3_lpm_names[state], |
3657 | timeout); | 3657 | timeout); |
3658 | return; | 3658 | return; |
3659 | } | 3659 | } |
3660 | 3660 | ||
3661 | if (usb_set_lpm_timeout(udev, state, timeout)) | 3661 | if (usb_set_lpm_timeout(udev, state, timeout)) |
3662 | /* If we can't set the parent hub U1/U2 timeout, | 3662 | /* If we can't set the parent hub U1/U2 timeout, |
3663 | * device-initiated LPM won't be allowed either, so let the xHCI | 3663 | * device-initiated LPM won't be allowed either, so let the xHCI |
3664 | * host know that this link state won't be enabled. | 3664 | * host know that this link state won't be enabled. |
3665 | */ | 3665 | */ |
3666 | hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); | 3666 | hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); |
3667 | 3667 | ||
3668 | /* Only a configured device will accept the Set Feature U1/U2_ENABLE */ | 3668 | /* Only a configured device will accept the Set Feature U1/U2_ENABLE */ |
3669 | else if (udev->actconfig) | 3669 | else if (udev->actconfig) |
3670 | usb_set_device_initiated_lpm(udev, state, true); | 3670 | usb_set_device_initiated_lpm(udev, state, true); |
3671 | 3671 | ||
3672 | } | 3672 | } |
3673 | 3673 | ||
3674 | /* | 3674 | /* |
3675 | * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated | 3675 | * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated |
3676 | * U1/U2 entry. | 3676 | * U1/U2 entry. |
3677 | * | 3677 | * |
3678 | * If this function returns -EBUSY, the parent hub will still allow U1/U2 entry. | 3678 | * If this function returns -EBUSY, the parent hub will still allow U1/U2 entry. |
3679 | * If zero is returned, the parent will not allow the link to go into U1/U2. | 3679 | * If zero is returned, the parent will not allow the link to go into U1/U2. |
3680 | * | 3680 | * |
3681 | * If zero is returned, device-initiated U1/U2 entry may still be enabled, but | 3681 | * If zero is returned, device-initiated U1/U2 entry may still be enabled, but |
3682 | * it won't have an effect on the bus link state because the parent hub will | 3682 | * it won't have an effect on the bus link state because the parent hub will |
3683 | * still disallow device-initiated U1/U2 entry. | 3683 | * still disallow device-initiated U1/U2 entry. |
3684 | * | 3684 | * |
3685 | * If zero is returned, the xHCI host controller may still think U1/U2 entry is | 3685 | * If zero is returned, the xHCI host controller may still think U1/U2 entry is |
3686 | * possible. The result will be slightly more bus bandwidth will be taken up | 3686 | * possible. The result will be slightly more bus bandwidth will be taken up |
3687 | * (to account for U1/U2 exit latency), but it should be harmless. | 3687 | * (to account for U1/U2 exit latency), but it should be harmless. |
3688 | */ | 3688 | */ |
3689 | static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev, | 3689 | static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev, |
3690 | enum usb3_link_state state) | 3690 | enum usb3_link_state state) |
3691 | { | 3691 | { |
3692 | int feature; | 3692 | int feature; |
3693 | 3693 | ||
3694 | switch (state) { | 3694 | switch (state) { |
3695 | case USB3_LPM_U1: | 3695 | case USB3_LPM_U1: |
3696 | feature = USB_PORT_FEAT_U1_TIMEOUT; | 3696 | feature = USB_PORT_FEAT_U1_TIMEOUT; |
3697 | break; | 3697 | break; |
3698 | case USB3_LPM_U2: | 3698 | case USB3_LPM_U2: |
3699 | feature = USB_PORT_FEAT_U2_TIMEOUT; | 3699 | feature = USB_PORT_FEAT_U2_TIMEOUT; |
3700 | break; | 3700 | break; |
3701 | default: | 3701 | default: |
3702 | dev_warn(&udev->dev, "%s: Can't disable non-U1 or U2 state.\n", | 3702 | dev_warn(&udev->dev, "%s: Can't disable non-U1 or U2 state.\n", |
3703 | __func__); | 3703 | __func__); |
3704 | return -EINVAL; | 3704 | return -EINVAL; |
3705 | } | 3705 | } |
3706 | 3706 | ||
3707 | if (usb_set_lpm_timeout(udev, state, 0)) | 3707 | if (usb_set_lpm_timeout(udev, state, 0)) |
3708 | return -EBUSY; | 3708 | return -EBUSY; |
3709 | 3709 | ||
3710 | usb_set_device_initiated_lpm(udev, state, false); | 3710 | usb_set_device_initiated_lpm(udev, state, false); |
3711 | 3711 | ||
3712 | if (hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state)) | 3712 | if (hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state)) |
3713 | dev_warn(&udev->dev, "Could not disable xHCI %s timeout, " | 3713 | dev_warn(&udev->dev, "Could not disable xHCI %s timeout, " |
3714 | "bus schedule bandwidth may be impacted.\n", | 3714 | "bus schedule bandwidth may be impacted.\n", |
3715 | usb3_lpm_names[state]); | 3715 | usb3_lpm_names[state]); |
3716 | return 0; | 3716 | return 0; |
3717 | } | 3717 | } |
3718 | 3718 | ||
3719 | /* | 3719 | /* |
3720 | * Disable hub-initiated and device-initiated U1 and U2 entry. | 3720 | * Disable hub-initiated and device-initiated U1 and U2 entry. |
3721 | * Caller must own the bandwidth_mutex. | 3721 | * Caller must own the bandwidth_mutex. |
3722 | * | 3722 | * |
3723 | * This will call usb_enable_lpm() on failure, which will decrement | 3723 | * This will call usb_enable_lpm() on failure, which will decrement |
3724 | * lpm_disable_count, and will re-enable LPM if lpm_disable_count reaches zero. | 3724 | * lpm_disable_count, and will re-enable LPM if lpm_disable_count reaches zero. |
3725 | */ | 3725 | */ |
3726 | int usb_disable_lpm(struct usb_device *udev) | 3726 | int usb_disable_lpm(struct usb_device *udev) |
3727 | { | 3727 | { |
3728 | struct usb_hcd *hcd; | 3728 | struct usb_hcd *hcd; |
3729 | 3729 | ||
3730 | if (!udev || !udev->parent || | 3730 | if (!udev || !udev->parent || |
3731 | udev->speed != USB_SPEED_SUPER || | 3731 | udev->speed != USB_SPEED_SUPER || |
3732 | !udev->lpm_capable) | 3732 | !udev->lpm_capable) |
3733 | return 0; | 3733 | return 0; |
3734 | 3734 | ||
3735 | hcd = bus_to_hcd(udev->bus); | 3735 | hcd = bus_to_hcd(udev->bus); |
3736 | if (!hcd || !hcd->driver->disable_usb3_lpm_timeout) | 3736 | if (!hcd || !hcd->driver->disable_usb3_lpm_timeout) |
3737 | return 0; | 3737 | return 0; |
3738 | 3738 | ||
3739 | udev->lpm_disable_count++; | 3739 | udev->lpm_disable_count++; |
3740 | if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0)) | 3740 | if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0)) |
3741 | return 0; | 3741 | return 0; |
3742 | 3742 | ||
3743 | /* If LPM is enabled, attempt to disable it. */ | 3743 | /* If LPM is enabled, attempt to disable it. */ |
3744 | if (usb_disable_link_state(hcd, udev, USB3_LPM_U1)) | 3744 | if (usb_disable_link_state(hcd, udev, USB3_LPM_U1)) |
3745 | goto enable_lpm; | 3745 | goto enable_lpm; |
3746 | if (usb_disable_link_state(hcd, udev, USB3_LPM_U2)) | 3746 | if (usb_disable_link_state(hcd, udev, USB3_LPM_U2)) |
3747 | goto enable_lpm; | 3747 | goto enable_lpm; |
3748 | 3748 | ||
3749 | return 0; | 3749 | return 0; |
3750 | 3750 | ||
3751 | enable_lpm: | 3751 | enable_lpm: |
3752 | usb_enable_lpm(udev); | 3752 | usb_enable_lpm(udev); |
3753 | return -EBUSY; | 3753 | return -EBUSY; |
3754 | } | 3754 | } |
3755 | EXPORT_SYMBOL_GPL(usb_disable_lpm); | 3755 | EXPORT_SYMBOL_GPL(usb_disable_lpm); |
3756 | 3756 | ||
3757 | /* Grab the bandwidth_mutex before calling usb_disable_lpm() */ | 3757 | /* Grab the bandwidth_mutex before calling usb_disable_lpm() */ |
3758 | int usb_unlocked_disable_lpm(struct usb_device *udev) | 3758 | int usb_unlocked_disable_lpm(struct usb_device *udev) |
3759 | { | 3759 | { |
3760 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | 3760 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
3761 | int ret; | 3761 | int ret; |
3762 | 3762 | ||
3763 | if (!hcd) | 3763 | if (!hcd) |
3764 | return -EINVAL; | 3764 | return -EINVAL; |
3765 | 3765 | ||
3766 | mutex_lock(hcd->bandwidth_mutex); | 3766 | mutex_lock(hcd->bandwidth_mutex); |
3767 | ret = usb_disable_lpm(udev); | 3767 | ret = usb_disable_lpm(udev); |
3768 | mutex_unlock(hcd->bandwidth_mutex); | 3768 | mutex_unlock(hcd->bandwidth_mutex); |
3769 | 3769 | ||
3770 | return ret; | 3770 | return ret; |
3771 | } | 3771 | } |
3772 | EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm); | 3772 | EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm); |
3773 | 3773 | ||
3774 | /* | 3774 | /* |
3775 | * Attempt to enable device-initiated and hub-initiated U1 and U2 entry. The | 3775 | * Attempt to enable device-initiated and hub-initiated U1 and U2 entry. The |
3776 | * xHCI host policy may prevent U1 or U2 from being enabled. | 3776 | * xHCI host policy may prevent U1 or U2 from being enabled. |
3777 | * | 3777 | * |
3778 | * Other callers may have disabled link PM, so U1 and U2 entry will be disabled | 3778 | * Other callers may have disabled link PM, so U1 and U2 entry will be disabled |
3779 | * until the lpm_disable_count drops to zero. Caller must own the | 3779 | * until the lpm_disable_count drops to zero. Caller must own the |
3780 | * bandwidth_mutex. | 3780 | * bandwidth_mutex. |
3781 | */ | 3781 | */ |
3782 | void usb_enable_lpm(struct usb_device *udev) | 3782 | void usb_enable_lpm(struct usb_device *udev) |
3783 | { | 3783 | { |
3784 | struct usb_hcd *hcd; | 3784 | struct usb_hcd *hcd; |
3785 | 3785 | ||
3786 | if (!udev || !udev->parent || | 3786 | if (!udev || !udev->parent || |
3787 | udev->speed != USB_SPEED_SUPER || | 3787 | udev->speed != USB_SPEED_SUPER || |
3788 | !udev->lpm_capable) | 3788 | !udev->lpm_capable) |
3789 | return; | 3789 | return; |
3790 | 3790 | ||
3791 | udev->lpm_disable_count--; | 3791 | udev->lpm_disable_count--; |
3792 | hcd = bus_to_hcd(udev->bus); | 3792 | hcd = bus_to_hcd(udev->bus); |
3793 | /* Double check that we can both enable and disable LPM. | 3793 | /* Double check that we can both enable and disable LPM. |
3794 | * Device must be configured to accept set feature U1/U2 timeout. | 3794 | * Device must be configured to accept set feature U1/U2 timeout. |
3795 | */ | 3795 | */ |
3796 | if (!hcd || !hcd->driver->enable_usb3_lpm_timeout || | 3796 | if (!hcd || !hcd->driver->enable_usb3_lpm_timeout || |
3797 | !hcd->driver->disable_usb3_lpm_timeout) | 3797 | !hcd->driver->disable_usb3_lpm_timeout) |
3798 | return; | 3798 | return; |
3799 | 3799 | ||
3800 | if (udev->lpm_disable_count > 0) | 3800 | if (udev->lpm_disable_count > 0) |
3801 | return; | 3801 | return; |
3802 | 3802 | ||
3803 | usb_enable_link_state(hcd, udev, USB3_LPM_U1); | 3803 | usb_enable_link_state(hcd, udev, USB3_LPM_U1); |
3804 | usb_enable_link_state(hcd, udev, USB3_LPM_U2); | 3804 | usb_enable_link_state(hcd, udev, USB3_LPM_U2); |
3805 | } | 3805 | } |
3806 | EXPORT_SYMBOL_GPL(usb_enable_lpm); | 3806 | EXPORT_SYMBOL_GPL(usb_enable_lpm); |
3807 | 3807 | ||
3808 | /* Grab the bandwidth_mutex before calling usb_enable_lpm() */ | 3808 | /* Grab the bandwidth_mutex before calling usb_enable_lpm() */ |
3809 | void usb_unlocked_enable_lpm(struct usb_device *udev) | 3809 | void usb_unlocked_enable_lpm(struct usb_device *udev) |
3810 | { | 3810 | { |
3811 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | 3811 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
3812 | 3812 | ||
3813 | if (!hcd) | 3813 | if (!hcd) |
3814 | return; | 3814 | return; |
3815 | 3815 | ||
3816 | mutex_lock(hcd->bandwidth_mutex); | 3816 | mutex_lock(hcd->bandwidth_mutex); |
3817 | usb_enable_lpm(udev); | 3817 | usb_enable_lpm(udev); |
3818 | mutex_unlock(hcd->bandwidth_mutex); | 3818 | mutex_unlock(hcd->bandwidth_mutex); |
3819 | } | 3819 | } |
3820 | EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); | 3820 | EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); |
3821 | 3821 | ||
3822 | 3822 | ||
3823 | #else /* CONFIG_PM */ | 3823 | #else /* CONFIG_PM */ |
3824 | 3824 | ||
3825 | #define hub_suspend NULL | 3825 | #define hub_suspend NULL |
3826 | #define hub_resume NULL | 3826 | #define hub_resume NULL |
3827 | #define hub_reset_resume NULL | 3827 | #define hub_reset_resume NULL |
3828 | 3828 | ||
3829 | int usb_disable_lpm(struct usb_device *udev) | 3829 | int usb_disable_lpm(struct usb_device *udev) |
3830 | { | 3830 | { |
3831 | return 0; | 3831 | return 0; |
3832 | } | 3832 | } |
3833 | EXPORT_SYMBOL_GPL(usb_disable_lpm); | 3833 | EXPORT_SYMBOL_GPL(usb_disable_lpm); |
3834 | 3834 | ||
3835 | void usb_enable_lpm(struct usb_device *udev) { } | 3835 | void usb_enable_lpm(struct usb_device *udev) { } |
3836 | EXPORT_SYMBOL_GPL(usb_enable_lpm); | 3836 | EXPORT_SYMBOL_GPL(usb_enable_lpm); |
3837 | 3837 | ||
3838 | int usb_unlocked_disable_lpm(struct usb_device *udev) | 3838 | int usb_unlocked_disable_lpm(struct usb_device *udev) |
3839 | { | 3839 | { |
3840 | return 0; | 3840 | return 0; |
3841 | } | 3841 | } |
3842 | EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm); | 3842 | EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm); |
3843 | 3843 | ||
3844 | void usb_unlocked_enable_lpm(struct usb_device *udev) { } | 3844 | void usb_unlocked_enable_lpm(struct usb_device *udev) { } |
3845 | EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); | 3845 | EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); |
3846 | 3846 | ||
3847 | int usb_disable_ltm(struct usb_device *udev) | 3847 | int usb_disable_ltm(struct usb_device *udev) |
3848 | { | 3848 | { |
3849 | return 0; | 3849 | return 0; |
3850 | } | 3850 | } |
3851 | EXPORT_SYMBOL_GPL(usb_disable_ltm); | 3851 | EXPORT_SYMBOL_GPL(usb_disable_ltm); |
3852 | 3852 | ||
3853 | void usb_enable_ltm(struct usb_device *udev) { } | 3853 | void usb_enable_ltm(struct usb_device *udev) { } |
3854 | EXPORT_SYMBOL_GPL(usb_enable_ltm); | 3854 | EXPORT_SYMBOL_GPL(usb_enable_ltm); |
3855 | 3855 | ||
3856 | #endif /* CONFIG_PM */ | 3856 | #endif /* CONFIG_PM */ |
3857 | 3857 | ||
3858 | 3858 | ||
3859 | /* USB 2.0 spec, 7.1.7.3 / fig 7-29: | 3859 | /* USB 2.0 spec, 7.1.7.3 / fig 7-29: |
3860 | * | 3860 | * |
3861 | * Between connect detection and reset signaling there must be a delay | 3861 | * Between connect detection and reset signaling there must be a delay |
3862 | * of 100ms at least for debounce and power-settling. The corresponding | 3862 | * of 100ms at least for debounce and power-settling. The corresponding |
3863 | * timer shall restart whenever the downstream port detects a disconnect. | 3863 | * timer shall restart whenever the downstream port detects a disconnect. |
3864 | * | 3864 | * |
3865 | * Apparently there are some bluetooth and irda-dongles and a number of | 3865 | * Apparently there are some bluetooth and irda-dongles and a number of |
3866 | * low-speed devices for which this debounce period may last over a second. | 3866 | * low-speed devices for which this debounce period may last over a second. |
3867 | * Not covered by the spec - but easy to deal with. | 3867 | * Not covered by the spec - but easy to deal with. |
3868 | * | 3868 | * |
3869 | * This implementation uses a 1500ms total debounce timeout; if the | 3869 | * This implementation uses a 1500ms total debounce timeout; if the |
3870 | * connection isn't stable by then it returns -ETIMEDOUT. It checks | 3870 | * connection isn't stable by then it returns -ETIMEDOUT. It checks |
3871 | * every 25ms for transient disconnects. When the port status has been | 3871 | * every 25ms for transient disconnects. When the port status has been |
3872 | * unchanged for 100ms it returns the port status. | 3872 | * unchanged for 100ms it returns the port status. |
3873 | */ | 3873 | */ |
3874 | int hub_port_debounce(struct usb_hub *hub, int port1, bool must_be_connected) | 3874 | int hub_port_debounce(struct usb_hub *hub, int port1, bool must_be_connected) |
3875 | { | 3875 | { |
3876 | int ret; | 3876 | int ret; |
3877 | int total_time, stable_time = 0; | 3877 | int total_time, stable_time = 0; |
3878 | u16 portchange, portstatus; | 3878 | u16 portchange, portstatus; |
3879 | unsigned connection = 0xffff; | 3879 | unsigned connection = 0xffff; |
3880 | 3880 | ||
3881 | for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { | 3881 | for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { |
3882 | ret = hub_port_status(hub, port1, &portstatus, &portchange); | 3882 | ret = hub_port_status(hub, port1, &portstatus, &portchange); |
3883 | if (ret < 0) | 3883 | if (ret < 0) |
3884 | return ret; | 3884 | return ret; |
3885 | 3885 | ||
3886 | if (!(portchange & USB_PORT_STAT_C_CONNECTION) && | 3886 | if (!(portchange & USB_PORT_STAT_C_CONNECTION) && |
3887 | (portstatus & USB_PORT_STAT_CONNECTION) == connection) { | 3887 | (portstatus & USB_PORT_STAT_CONNECTION) == connection) { |
3888 | if (!must_be_connected || | 3888 | if (!must_be_connected || |
3889 | (connection == USB_PORT_STAT_CONNECTION)) | 3889 | (connection == USB_PORT_STAT_CONNECTION)) |
3890 | stable_time += HUB_DEBOUNCE_STEP; | 3890 | stable_time += HUB_DEBOUNCE_STEP; |
3891 | if (stable_time >= HUB_DEBOUNCE_STABLE) | 3891 | if (stable_time >= HUB_DEBOUNCE_STABLE) |
3892 | break; | 3892 | break; |
3893 | } else { | 3893 | } else { |
3894 | stable_time = 0; | 3894 | stable_time = 0; |
3895 | connection = portstatus & USB_PORT_STAT_CONNECTION; | 3895 | connection = portstatus & USB_PORT_STAT_CONNECTION; |
3896 | } | 3896 | } |
3897 | 3897 | ||
3898 | if (portchange & USB_PORT_STAT_C_CONNECTION) { | 3898 | if (portchange & USB_PORT_STAT_C_CONNECTION) { |
3899 | usb_clear_port_feature(hub->hdev, port1, | 3899 | usb_clear_port_feature(hub->hdev, port1, |
3900 | USB_PORT_FEAT_C_CONNECTION); | 3900 | USB_PORT_FEAT_C_CONNECTION); |
3901 | } | 3901 | } |
3902 | 3902 | ||
3903 | if (total_time >= HUB_DEBOUNCE_TIMEOUT) | 3903 | if (total_time >= HUB_DEBOUNCE_TIMEOUT) |
3904 | break; | 3904 | break; |
3905 | msleep(HUB_DEBOUNCE_STEP); | 3905 | msleep(HUB_DEBOUNCE_STEP); |
3906 | } | 3906 | } |
3907 | 3907 | ||
3908 | dev_dbg (hub->intfdev, | 3908 | dev_dbg (hub->intfdev, |
3909 | "debounce: port %d: total %dms stable %dms status 0x%x\n", | 3909 | "debounce: port %d: total %dms stable %dms status 0x%x\n", |
3910 | port1, total_time, stable_time, portstatus); | 3910 | port1, total_time, stable_time, portstatus); |
3911 | 3911 | ||
3912 | if (stable_time < HUB_DEBOUNCE_STABLE) | 3912 | if (stable_time < HUB_DEBOUNCE_STABLE) |
3913 | return -ETIMEDOUT; | 3913 | return -ETIMEDOUT; |
3914 | return portstatus; | 3914 | return portstatus; |
3915 | } | 3915 | } |
3916 | 3916 | ||
3917 | void usb_ep0_reinit(struct usb_device *udev) | 3917 | void usb_ep0_reinit(struct usb_device *udev) |
3918 | { | 3918 | { |
3919 | usb_disable_endpoint(udev, 0 + USB_DIR_IN, true); | 3919 | usb_disable_endpoint(udev, 0 + USB_DIR_IN, true); |
3920 | usb_disable_endpoint(udev, 0 + USB_DIR_OUT, true); | 3920 | usb_disable_endpoint(udev, 0 + USB_DIR_OUT, true); |
3921 | usb_enable_endpoint(udev, &udev->ep0, true); | 3921 | usb_enable_endpoint(udev, &udev->ep0, true); |
3922 | } | 3922 | } |
3923 | EXPORT_SYMBOL_GPL(usb_ep0_reinit); | 3923 | EXPORT_SYMBOL_GPL(usb_ep0_reinit); |
3924 | 3924 | ||
3925 | #define usb_sndaddr0pipe() (PIPE_CONTROL << 30) | 3925 | #define usb_sndaddr0pipe() (PIPE_CONTROL << 30) |
3926 | #define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN) | 3926 | #define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN) |
3927 | 3927 | ||
3928 | static int hub_set_address(struct usb_device *udev, int devnum) | 3928 | static int hub_set_address(struct usb_device *udev, int devnum) |
3929 | { | 3929 | { |
3930 | int retval; | 3930 | int retval; |
3931 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | 3931 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
3932 | 3932 | ||
3933 | /* | 3933 | /* |
3934 | * The host controller will choose the device address, | 3934 | * The host controller will choose the device address, |
3935 | * instead of the core having chosen it earlier | 3935 | * instead of the core having chosen it earlier |
3936 | */ | 3936 | */ |
3937 | if (!hcd->driver->address_device && devnum <= 1) | 3937 | if (!hcd->driver->address_device && devnum <= 1) |
3938 | return -EINVAL; | 3938 | return -EINVAL; |
3939 | if (udev->state == USB_STATE_ADDRESS) | 3939 | if (udev->state == USB_STATE_ADDRESS) |
3940 | return 0; | 3940 | return 0; |
3941 | if (udev->state != USB_STATE_DEFAULT) | 3941 | if (udev->state != USB_STATE_DEFAULT) |
3942 | return -EINVAL; | 3942 | return -EINVAL; |
3943 | if (hcd->driver->address_device) | 3943 | if (hcd->driver->address_device) |
3944 | retval = hcd->driver->address_device(hcd, udev); | 3944 | retval = hcd->driver->address_device(hcd, udev); |
3945 | else | 3945 | else |
3946 | retval = usb_control_msg(udev, usb_sndaddr0pipe(), | 3946 | retval = usb_control_msg(udev, usb_sndaddr0pipe(), |
3947 | USB_REQ_SET_ADDRESS, 0, devnum, 0, | 3947 | USB_REQ_SET_ADDRESS, 0, devnum, 0, |
3948 | NULL, 0, USB_CTRL_SET_TIMEOUT); | 3948 | NULL, 0, USB_CTRL_SET_TIMEOUT); |
3949 | if (retval == 0) { | 3949 | if (retval == 0) { |
3950 | update_devnum(udev, devnum); | 3950 | update_devnum(udev, devnum); |
3951 | /* Device now using proper address. */ | 3951 | /* Device now using proper address. */ |
3952 | usb_set_device_state(udev, USB_STATE_ADDRESS); | 3952 | usb_set_device_state(udev, USB_STATE_ADDRESS); |
3953 | usb_ep0_reinit(udev); | 3953 | usb_ep0_reinit(udev); |
3954 | } | 3954 | } |
3955 | return retval; | 3955 | return retval; |
3956 | } | 3956 | } |
3957 | 3957 | ||
3958 | /* | 3958 | /* |
3959 | * There are reports of USB 3.0 devices that say they support USB 2.0 Link PM | 3959 | * There are reports of USB 3.0 devices that say they support USB 2.0 Link PM |
3960 | * when they're plugged into a USB 2.0 port, but they don't work when LPM is | 3960 | * when they're plugged into a USB 2.0 port, but they don't work when LPM is |
3961 | * enabled. | 3961 | * enabled. |
3962 | * | 3962 | * |
3963 | * Only enable USB 2.0 Link PM if the port is internal (hardwired), or the | 3963 | * Only enable USB 2.0 Link PM if the port is internal (hardwired), or the |
3964 | * device says it supports the new USB 2.0 Link PM errata by setting the BESL | 3964 | * device says it supports the new USB 2.0 Link PM errata by setting the BESL |
3965 | * support bit in the BOS descriptor. | 3965 | * support bit in the BOS descriptor. |
3966 | */ | 3966 | */ |
3967 | static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev) | 3967 | static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev) |
3968 | { | 3968 | { |
3969 | int connect_type; | 3969 | int connect_type; |
3970 | 3970 | ||
3971 | if (!udev->usb2_hw_lpm_capable) | 3971 | if (!udev->usb2_hw_lpm_capable) |
3972 | return; | 3972 | return; |
3973 | 3973 | ||
3974 | connect_type = usb_get_hub_port_connect_type(udev->parent, | 3974 | connect_type = usb_get_hub_port_connect_type(udev->parent, |
3975 | udev->portnum); | 3975 | udev->portnum); |
3976 | 3976 | ||
3977 | if ((udev->bos->ext_cap->bmAttributes & USB_BESL_SUPPORT) || | 3977 | if ((udev->bos->ext_cap->bmAttributes & USB_BESL_SUPPORT) || |
3978 | connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { | 3978 | connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { |
3979 | udev->usb2_hw_lpm_allowed = 1; | 3979 | udev->usb2_hw_lpm_allowed = 1; |
3980 | usb_set_usb2_hardware_lpm(udev, 1); | 3980 | usb_set_usb2_hardware_lpm(udev, 1); |
3981 | } | 3981 | } |
3982 | } | 3982 | } |
3983 | 3983 | ||
3984 | /* Reset device, (re)assign address, get device descriptor. | 3984 | /* Reset device, (re)assign address, get device descriptor. |
3985 | * Device connection must be stable, no more debouncing needed. | 3985 | * Device connection must be stable, no more debouncing needed. |
3986 | * Returns device in USB_STATE_ADDRESS, except on error. | 3986 | * Returns device in USB_STATE_ADDRESS, except on error. |
3987 | * | 3987 | * |
3988 | * If this is called for an already-existing device (as part of | 3988 | * If this is called for an already-existing device (as part of |
3989 | * usb_reset_and_verify_device), the caller must own the device lock. For a | 3989 | * usb_reset_and_verify_device), the caller must own the device lock. For a |
3990 | * newly detected device that is not accessible through any global | 3990 | * newly detected device that is not accessible through any global |
3991 | * pointers, it's not necessary to lock the device. | 3991 | * pointers, it's not necessary to lock the device. |
3992 | */ | 3992 | */ |
3993 | static int | 3993 | static int |
3994 | hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, | 3994 | hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, |
3995 | int retry_counter) | 3995 | int retry_counter) |
3996 | { | 3996 | { |
3997 | static DEFINE_MUTEX(usb_address0_mutex); | 3997 | static DEFINE_MUTEX(usb_address0_mutex); |
3998 | 3998 | ||
3999 | struct usb_device *hdev = hub->hdev; | 3999 | struct usb_device *hdev = hub->hdev; |
4000 | struct usb_hcd *hcd = bus_to_hcd(hdev->bus); | 4000 | struct usb_hcd *hcd = bus_to_hcd(hdev->bus); |
4001 | int i, j, retval; | 4001 | int i, j, retval; |
4002 | unsigned delay = HUB_SHORT_RESET_TIME; | 4002 | unsigned delay = HUB_SHORT_RESET_TIME; |
4003 | enum usb_device_speed oldspeed = udev->speed; | 4003 | enum usb_device_speed oldspeed = udev->speed; |
4004 | const char *speed; | 4004 | const char *speed; |
4005 | int devnum = udev->devnum; | 4005 | int devnum = udev->devnum; |
4006 | 4006 | ||
4007 | /* root hub ports have a slightly longer reset period | 4007 | /* root hub ports have a slightly longer reset period |
4008 | * (from USB 2.0 spec, section 7.1.7.5) | 4008 | * (from USB 2.0 spec, section 7.1.7.5) |
4009 | */ | 4009 | */ |
4010 | if (!hdev->parent) { | 4010 | if (!hdev->parent) { |
4011 | delay = HUB_ROOT_RESET_TIME; | 4011 | delay = HUB_ROOT_RESET_TIME; |
4012 | if (port1 == hdev->bus->otg_port) | 4012 | if (port1 == hdev->bus->otg_port) |
4013 | hdev->bus->b_hnp_enable = 0; | 4013 | hdev->bus->b_hnp_enable = 0; |
4014 | } | 4014 | } |
4015 | 4015 | ||
4016 | /* Some low speed devices have problems with the quick delay, so */ | 4016 | /* Some low speed devices have problems with the quick delay, so */ |
4017 | /* be a bit pessimistic with those devices. RHbug #23670 */ | 4017 | /* be a bit pessimistic with those devices. RHbug #23670 */ |
4018 | if (oldspeed == USB_SPEED_LOW) | 4018 | if (oldspeed == USB_SPEED_LOW) |
4019 | delay = HUB_LONG_RESET_TIME; | 4019 | delay = HUB_LONG_RESET_TIME; |
4020 | 4020 | ||
4021 | mutex_lock(&usb_address0_mutex); | 4021 | mutex_lock(&usb_address0_mutex); |
4022 | 4022 | ||
4023 | /* Reset the device; full speed may morph to high speed */ | 4023 | /* Reset the device; full speed may morph to high speed */ |
4024 | /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ | 4024 | /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ |
4025 | retval = hub_port_reset(hub, port1, udev, delay, false); | 4025 | retval = hub_port_reset(hub, port1, udev, delay, false); |
4026 | if (retval < 0) /* error or disconnect */ | 4026 | if (retval < 0) /* error or disconnect */ |
4027 | goto fail; | 4027 | goto fail; |
4028 | /* success, speed is known */ | 4028 | /* success, speed is known */ |
4029 | 4029 | ||
4030 | retval = -ENODEV; | 4030 | retval = -ENODEV; |
4031 | 4031 | ||
4032 | if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { | 4032 | if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { |
4033 | dev_dbg(&udev->dev, "device reset changed speed!\n"); | 4033 | dev_dbg(&udev->dev, "device reset changed speed!\n"); |
4034 | goto fail; | 4034 | goto fail; |
4035 | } | 4035 | } |
4036 | oldspeed = udev->speed; | 4036 | oldspeed = udev->speed; |
4037 | 4037 | ||
4038 | /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ... | 4038 | /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ... |
4039 | * it's fixed size except for full speed devices. | 4039 | * it's fixed size except for full speed devices. |
4040 | * For Wireless USB devices, ep0 max packet is always 512 (tho | 4040 | * For Wireless USB devices, ep0 max packet is always 512 (tho |
4041 | * reported as 0xff in the device descriptor). WUSB1.0[4.8.1]. | 4041 | * reported as 0xff in the device descriptor). WUSB1.0[4.8.1]. |
4042 | */ | 4042 | */ |
4043 | switch (udev->speed) { | 4043 | switch (udev->speed) { |
4044 | case USB_SPEED_SUPER: | 4044 | case USB_SPEED_SUPER: |
4045 | case USB_SPEED_WIRELESS: /* fixed at 512 */ | 4045 | case USB_SPEED_WIRELESS: /* fixed at 512 */ |
4046 | udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); | 4046 | udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); |
4047 | break; | 4047 | break; |
4048 | case USB_SPEED_HIGH: /* fixed at 64 */ | 4048 | case USB_SPEED_HIGH: /* fixed at 64 */ |
4049 | udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); | 4049 | udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); |
4050 | break; | 4050 | break; |
4051 | case USB_SPEED_FULL: /* 8, 16, 32, or 64 */ | 4051 | case USB_SPEED_FULL: /* 8, 16, 32, or 64 */ |
4052 | /* to determine the ep0 maxpacket size, try to read | 4052 | /* to determine the ep0 maxpacket size, try to read |
4053 | * the device descriptor to get bMaxPacketSize0 and | 4053 | * the device descriptor to get bMaxPacketSize0 and |
4054 | * then correct our initial guess. | 4054 | * then correct our initial guess. |
4055 | */ | 4055 | */ |
4056 | udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); | 4056 | udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); |
4057 | break; | 4057 | break; |
4058 | case USB_SPEED_LOW: /* fixed at 8 */ | 4058 | case USB_SPEED_LOW: /* fixed at 8 */ |
4059 | udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8); | 4059 | udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8); |
4060 | break; | 4060 | break; |
4061 | default: | 4061 | default: |
4062 | goto fail; | 4062 | goto fail; |
4063 | } | 4063 | } |
4064 | 4064 | ||
4065 | if (udev->speed == USB_SPEED_WIRELESS) | 4065 | if (udev->speed == USB_SPEED_WIRELESS) |
4066 | speed = "variable speed Wireless"; | 4066 | speed = "variable speed Wireless"; |
4067 | else | 4067 | else |
4068 | speed = usb_speed_string(udev->speed); | 4068 | speed = usb_speed_string(udev->speed); |
4069 | 4069 | ||
4070 | if (udev->speed != USB_SPEED_SUPER) | 4070 | if (udev->speed != USB_SPEED_SUPER) |
4071 | dev_info(&udev->dev, | 4071 | dev_info(&udev->dev, |
4072 | "%s %s USB device number %d using %s\n", | 4072 | "%s %s USB device number %d using %s\n", |
4073 | (udev->config) ? "reset" : "new", speed, | 4073 | (udev->config) ? "reset" : "new", speed, |
4074 | devnum, udev->bus->controller->driver->name); | 4074 | devnum, udev->bus->controller->driver->name); |
4075 | 4075 | ||
4076 | /* Set up TT records, if needed */ | 4076 | /* Set up TT records, if needed */ |
4077 | if (hdev->tt) { | 4077 | if (hdev->tt) { |
4078 | udev->tt = hdev->tt; | 4078 | udev->tt = hdev->tt; |
4079 | udev->ttport = hdev->ttport; | 4079 | udev->ttport = hdev->ttport; |
4080 | } else if (udev->speed != USB_SPEED_HIGH | 4080 | } else if (udev->speed != USB_SPEED_HIGH |
4081 | && hdev->speed == USB_SPEED_HIGH) { | 4081 | && hdev->speed == USB_SPEED_HIGH) { |
4082 | if (!hub->tt.hub) { | 4082 | if (!hub->tt.hub) { |
4083 | dev_err(&udev->dev, "parent hub has no TT\n"); | 4083 | dev_err(&udev->dev, "parent hub has no TT\n"); |
4084 | retval = -EINVAL; | 4084 | retval = -EINVAL; |
4085 | goto fail; | 4085 | goto fail; |
4086 | } | 4086 | } |
4087 | udev->tt = &hub->tt; | 4087 | udev->tt = &hub->tt; |
4088 | udev->ttport = port1; | 4088 | udev->ttport = port1; |
4089 | } | 4089 | } |
4090 | 4090 | ||
4091 | /* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way? | 4091 | /* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way? |
4092 | * Because device hardware and firmware is sometimes buggy in | 4092 | * Because device hardware and firmware is sometimes buggy in |
4093 | * this area, and this is how Linux has done it for ages. | 4093 | * this area, and this is how Linux has done it for ages. |
4094 | * Change it cautiously. | 4094 | * Change it cautiously. |
4095 | * | 4095 | * |
4096 | * NOTE: If USE_NEW_SCHEME() is true we will start by issuing | 4096 | * NOTE: If USE_NEW_SCHEME() is true we will start by issuing |
4097 | * a 64-byte GET_DESCRIPTOR request. This is what Windows does, | 4097 | * a 64-byte GET_DESCRIPTOR request. This is what Windows does, |
4098 | * so it may help with some non-standards-compliant devices. | 4098 | * so it may help with some non-standards-compliant devices. |
4099 | * Otherwise we start with SET_ADDRESS and then try to read the | 4099 | * Otherwise we start with SET_ADDRESS and then try to read the |
4100 | * first 8 bytes of the device descriptor to get the ep0 maxpacket | 4100 | * first 8 bytes of the device descriptor to get the ep0 maxpacket |
4101 | * value. | 4101 | * value. |
4102 | */ | 4102 | */ |
4103 | for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) { | 4103 | for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) { |
4104 | if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) { | 4104 | if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) { |
4105 | struct usb_device_descriptor *buf; | 4105 | struct usb_device_descriptor *buf; |
4106 | int r = 0; | 4106 | int r = 0; |
4107 | 4107 | ||
4108 | #define GET_DESCRIPTOR_BUFSIZE 64 | 4108 | #define GET_DESCRIPTOR_BUFSIZE 64 |
4109 | buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO); | 4109 | buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO); |
4110 | if (!buf) { | 4110 | if (!buf) { |
4111 | retval = -ENOMEM; | 4111 | retval = -ENOMEM; |
4112 | continue; | 4112 | continue; |
4113 | } | 4113 | } |
4114 | 4114 | ||
4115 | /* Retry on all errors; some devices are flakey. | 4115 | /* Retry on all errors; some devices are flakey. |
4116 | * 255 is for WUSB devices, we actually need to use | 4116 | * 255 is for WUSB devices, we actually need to use |
4117 | * 512 (WUSB1.0[4.8.1]). | 4117 | * 512 (WUSB1.0[4.8.1]). |
4118 | */ | 4118 | */ |
4119 | for (j = 0; j < 3; ++j) { | 4119 | for (j = 0; j < 3; ++j) { |
4120 | buf->bMaxPacketSize0 = 0; | 4120 | buf->bMaxPacketSize0 = 0; |
4121 | r = usb_control_msg(udev, usb_rcvaddr0pipe(), | 4121 | r = usb_control_msg(udev, usb_rcvaddr0pipe(), |
4122 | USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, | 4122 | USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, |
4123 | USB_DT_DEVICE << 8, 0, | 4123 | USB_DT_DEVICE << 8, 0, |
4124 | buf, GET_DESCRIPTOR_BUFSIZE, | 4124 | buf, GET_DESCRIPTOR_BUFSIZE, |
4125 | initial_descriptor_timeout); | 4125 | initial_descriptor_timeout); |
4126 | switch (buf->bMaxPacketSize0) { | 4126 | switch (buf->bMaxPacketSize0) { |
4127 | case 8: case 16: case 32: case 64: case 255: | 4127 | case 8: case 16: case 32: case 64: case 255: |
4128 | if (buf->bDescriptorType == | 4128 | if (buf->bDescriptorType == |
4129 | USB_DT_DEVICE) { | 4129 | USB_DT_DEVICE) { |
4130 | r = 0; | 4130 | r = 0; |
4131 | break; | 4131 | break; |
4132 | } | 4132 | } |
4133 | /* FALL THROUGH */ | 4133 | /* FALL THROUGH */ |
4134 | default: | 4134 | default: |
4135 | if (r == 0) | 4135 | if (r == 0) |
4136 | r = -EPROTO; | 4136 | r = -EPROTO; |
4137 | break; | 4137 | break; |
4138 | } | 4138 | } |
4139 | if (r == 0) | 4139 | if (r == 0) |
4140 | break; | 4140 | break; |
4141 | } | 4141 | } |
4142 | udev->descriptor.bMaxPacketSize0 = | 4142 | udev->descriptor.bMaxPacketSize0 = |
4143 | buf->bMaxPacketSize0; | 4143 | buf->bMaxPacketSize0; |
4144 | kfree(buf); | 4144 | kfree(buf); |
4145 | 4145 | ||
4146 | retval = hub_port_reset(hub, port1, udev, delay, false); | 4146 | retval = hub_port_reset(hub, port1, udev, delay, false); |
4147 | if (retval < 0) /* error or disconnect */ | 4147 | if (retval < 0) /* error or disconnect */ |
4148 | goto fail; | 4148 | goto fail; |
4149 | if (oldspeed != udev->speed) { | 4149 | if (oldspeed != udev->speed) { |
4150 | dev_dbg(&udev->dev, | 4150 | dev_dbg(&udev->dev, |
4151 | "device reset changed speed!\n"); | 4151 | "device reset changed speed!\n"); |
4152 | retval = -ENODEV; | 4152 | retval = -ENODEV; |
4153 | goto fail; | 4153 | goto fail; |
4154 | } | 4154 | } |
4155 | if (r) { | 4155 | if (r) { |
4156 | if (r != -ENODEV) | 4156 | if (r != -ENODEV) |
4157 | dev_err(&udev->dev, "device descriptor read/64, error %d\n", | 4157 | dev_err(&udev->dev, "device descriptor read/64, error %d\n", |
4158 | r); | 4158 | r); |
4159 | retval = -EMSGSIZE; | 4159 | retval = -EMSGSIZE; |
4160 | continue; | 4160 | continue; |
4161 | } | 4161 | } |
4162 | #undef GET_DESCRIPTOR_BUFSIZE | 4162 | #undef GET_DESCRIPTOR_BUFSIZE |
4163 | } | 4163 | } |
4164 | 4164 | ||
4165 | /* | 4165 | /* |
4166 | * If device is WUSB, we already assigned an | 4166 | * If device is WUSB, we already assigned an |
4167 | * unauthorized address in the Connect Ack sequence; | 4167 | * unauthorized address in the Connect Ack sequence; |
4168 | * authorization will assign the final address. | 4168 | * authorization will assign the final address. |
4169 | */ | 4169 | */ |
4170 | if (udev->wusb == 0) { | 4170 | if (udev->wusb == 0) { |
4171 | for (j = 0; j < SET_ADDRESS_TRIES; ++j) { | 4171 | for (j = 0; j < SET_ADDRESS_TRIES; ++j) { |
4172 | retval = hub_set_address(udev, devnum); | 4172 | retval = hub_set_address(udev, devnum); |
4173 | if (retval >= 0) | 4173 | if (retval >= 0) |
4174 | break; | 4174 | break; |
4175 | msleep(200); | 4175 | msleep(200); |
4176 | } | 4176 | } |
4177 | if (retval < 0) { | 4177 | if (retval < 0) { |
4178 | if (retval != -ENODEV) | 4178 | if (retval != -ENODEV) |
4179 | dev_err(&udev->dev, "device not accepting address %d, error %d\n", | 4179 | dev_err(&udev->dev, "device not accepting address %d, error %d\n", |
4180 | devnum, retval); | 4180 | devnum, retval); |
4181 | goto fail; | 4181 | goto fail; |
4182 | } | 4182 | } |
4183 | if (udev->speed == USB_SPEED_SUPER) { | 4183 | if (udev->speed == USB_SPEED_SUPER) { |
4184 | devnum = udev->devnum; | 4184 | devnum = udev->devnum; |
4185 | dev_info(&udev->dev, | 4185 | dev_info(&udev->dev, |
4186 | "%s SuperSpeed USB device number %d using %s\n", | 4186 | "%s SuperSpeed USB device number %d using %s\n", |
4187 | (udev->config) ? "reset" : "new", | 4187 | (udev->config) ? "reset" : "new", |
4188 | devnum, udev->bus->controller->driver->name); | 4188 | devnum, udev->bus->controller->driver->name); |
4189 | } | 4189 | } |
4190 | 4190 | ||
4191 | /* cope with hardware quirkiness: | 4191 | /* cope with hardware quirkiness: |
4192 | * - let SET_ADDRESS settle, some device hardware wants it | 4192 | * - let SET_ADDRESS settle, some device hardware wants it |
4193 | * - read ep0 maxpacket even for high and low speed, | 4193 | * - read ep0 maxpacket even for high and low speed, |
4194 | */ | 4194 | */ |
4195 | msleep(10); | 4195 | msleep(10); |
4196 | if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) | 4196 | if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) |
4197 | break; | 4197 | break; |
4198 | } | 4198 | } |
4199 | 4199 | ||
4200 | retval = usb_get_device_descriptor(udev, 8); | 4200 | retval = usb_get_device_descriptor(udev, 8); |
4201 | if (retval < 8) { | 4201 | if (retval < 8) { |
4202 | if (retval != -ENODEV) | 4202 | if (retval != -ENODEV) |
4203 | dev_err(&udev->dev, | 4203 | dev_err(&udev->dev, |
4204 | "device descriptor read/8, error %d\n", | 4204 | "device descriptor read/8, error %d\n", |
4205 | retval); | 4205 | retval); |
4206 | if (retval >= 0) | 4206 | if (retval >= 0) |
4207 | retval = -EMSGSIZE; | 4207 | retval = -EMSGSIZE; |
4208 | } else { | 4208 | } else { |
4209 | retval = 0; | 4209 | retval = 0; |
4210 | break; | 4210 | break; |
4211 | } | 4211 | } |
4212 | } | 4212 | } |
4213 | if (retval) | 4213 | if (retval) |
4214 | goto fail; | 4214 | goto fail; |
4215 | 4215 | ||
4216 | if (hcd->phy && !hdev->parent) | 4216 | if (hcd->phy && !hdev->parent) |
4217 | usb_phy_notify_connect(hcd->phy, udev->speed); | 4217 | usb_phy_notify_connect(hcd->phy, udev->speed); |
4218 | 4218 | ||
4219 | /* | 4219 | /* |
4220 | * Some superspeed devices have finished the link training process | 4220 | * Some superspeed devices have finished the link training process |
4221 | * and attached to a superspeed hub port, but the device descriptor | 4221 | * and attached to a superspeed hub port, but the device descriptor |
4222 | * got from those devices show they aren't superspeed devices. Warm | 4222 | * got from those devices show they aren't superspeed devices. Warm |
4223 | * reset the port attached by the devices can fix them. | 4223 | * reset the port attached by the devices can fix them. |
4224 | */ | 4224 | */ |
4225 | if ((udev->speed == USB_SPEED_SUPER) && | 4225 | if ((udev->speed == USB_SPEED_SUPER) && |
4226 | (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) { | 4226 | (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) { |
4227 | dev_err(&udev->dev, "got a wrong device descriptor, " | 4227 | dev_err(&udev->dev, "got a wrong device descriptor, " |
4228 | "warm reset device\n"); | 4228 | "warm reset device\n"); |
4229 | hub_port_reset(hub, port1, udev, | 4229 | hub_port_reset(hub, port1, udev, |
4230 | HUB_BH_RESET_TIME, true); | 4230 | HUB_BH_RESET_TIME, true); |
4231 | retval = -EINVAL; | 4231 | retval = -EINVAL; |
4232 | goto fail; | 4232 | goto fail; |
4233 | } | 4233 | } |
4234 | 4234 | ||
4235 | if (udev->descriptor.bMaxPacketSize0 == 0xff || | 4235 | if (udev->descriptor.bMaxPacketSize0 == 0xff || |
4236 | udev->speed == USB_SPEED_SUPER) | 4236 | udev->speed == USB_SPEED_SUPER) |
4237 | i = 512; | 4237 | i = 512; |
4238 | else | 4238 | else |
4239 | i = udev->descriptor.bMaxPacketSize0; | 4239 | i = udev->descriptor.bMaxPacketSize0; |
4240 | if (usb_endpoint_maxp(&udev->ep0.desc) != i) { | 4240 | if (usb_endpoint_maxp(&udev->ep0.desc) != i) { |
4241 | if (udev->speed == USB_SPEED_LOW || | 4241 | if (udev->speed == USB_SPEED_LOW || |
4242 | !(i == 8 || i == 16 || i == 32 || i == 64)) { | 4242 | !(i == 8 || i == 16 || i == 32 || i == 64)) { |
4243 | dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i); | 4243 | dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i); |
4244 | retval = -EMSGSIZE; | 4244 | retval = -EMSGSIZE; |
4245 | goto fail; | 4245 | goto fail; |
4246 | } | 4246 | } |
4247 | if (udev->speed == USB_SPEED_FULL) | 4247 | if (udev->speed == USB_SPEED_FULL) |
4248 | dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i); | 4248 | dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i); |
4249 | else | 4249 | else |
4250 | dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i); | 4250 | dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i); |
4251 | udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i); | 4251 | udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i); |
4252 | usb_ep0_reinit(udev); | 4252 | usb_ep0_reinit(udev); |
4253 | } | 4253 | } |
4254 | 4254 | ||
4255 | retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE); | 4255 | retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE); |
4256 | if (retval < (signed)sizeof(udev->descriptor)) { | 4256 | if (retval < (signed)sizeof(udev->descriptor)) { |
4257 | if (retval != -ENODEV) | 4257 | if (retval != -ENODEV) |
4258 | dev_err(&udev->dev, "device descriptor read/all, error %d\n", | 4258 | dev_err(&udev->dev, "device descriptor read/all, error %d\n", |
4259 | retval); | 4259 | retval); |
4260 | if (retval >= 0) | 4260 | if (retval >= 0) |
4261 | retval = -ENOMSG; | 4261 | retval = -ENOMSG; |
4262 | goto fail; | 4262 | goto fail; |
4263 | } | 4263 | } |
4264 | 4264 | ||
4265 | if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { | 4265 | if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { |
4266 | retval = usb_get_bos_descriptor(udev); | 4266 | retval = usb_get_bos_descriptor(udev); |
4267 | if (!retval) { | 4267 | if (!retval) { |
4268 | udev->lpm_capable = usb_device_supports_lpm(udev); | 4268 | udev->lpm_capable = usb_device_supports_lpm(udev); |
4269 | usb_set_lpm_parameters(udev); | 4269 | usb_set_lpm_parameters(udev); |
4270 | } | 4270 | } |
4271 | } | 4271 | } |
4272 | 4272 | ||
4273 | retval = 0; | 4273 | retval = 0; |
4274 | /* notify HCD that we have a device connected and addressed */ | 4274 | /* notify HCD that we have a device connected and addressed */ |
4275 | if (hcd->driver->update_device) | 4275 | if (hcd->driver->update_device) |
4276 | hcd->driver->update_device(hcd, udev); | 4276 | hcd->driver->update_device(hcd, udev); |
4277 | hub_set_initial_usb2_lpm_policy(udev); | 4277 | hub_set_initial_usb2_lpm_policy(udev); |
4278 | fail: | 4278 | fail: |
4279 | if (retval) { | 4279 | if (retval) { |
4280 | hub_port_disable(hub, port1, 0); | 4280 | hub_port_disable(hub, port1, 0); |
4281 | update_devnum(udev, devnum); /* for disconnect processing */ | 4281 | update_devnum(udev, devnum); /* for disconnect processing */ |
4282 | } | 4282 | } |
4283 | mutex_unlock(&usb_address0_mutex); | 4283 | mutex_unlock(&usb_address0_mutex); |
4284 | return retval; | 4284 | return retval; |
4285 | } | 4285 | } |
4286 | 4286 | ||
4287 | static void | 4287 | static void |
4288 | check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1) | 4288 | check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1) |
4289 | { | 4289 | { |
4290 | struct usb_qualifier_descriptor *qual; | 4290 | struct usb_qualifier_descriptor *qual; |
4291 | int status; | 4291 | int status; |
4292 | 4292 | ||
4293 | qual = kmalloc (sizeof *qual, GFP_KERNEL); | 4293 | qual = kmalloc (sizeof *qual, GFP_KERNEL); |
4294 | if (qual == NULL) | 4294 | if (qual == NULL) |
4295 | return; | 4295 | return; |
4296 | 4296 | ||
4297 | status = usb_get_descriptor (udev, USB_DT_DEVICE_QUALIFIER, 0, | 4297 | status = usb_get_descriptor (udev, USB_DT_DEVICE_QUALIFIER, 0, |
4298 | qual, sizeof *qual); | 4298 | qual, sizeof *qual); |
4299 | if (status == sizeof *qual) { | 4299 | if (status == sizeof *qual) { |
4300 | dev_info(&udev->dev, "not running at top speed; " | 4300 | dev_info(&udev->dev, "not running at top speed; " |
4301 | "connect to a high speed hub\n"); | 4301 | "connect to a high speed hub\n"); |
4302 | /* hub LEDs are probably harder to miss than syslog */ | 4302 | /* hub LEDs are probably harder to miss than syslog */ |
4303 | if (hub->has_indicators) { | 4303 | if (hub->has_indicators) { |
4304 | hub->indicator[port1-1] = INDICATOR_GREEN_BLINK; | 4304 | hub->indicator[port1-1] = INDICATOR_GREEN_BLINK; |
4305 | schedule_delayed_work (&hub->leds, 0); | 4305 | schedule_delayed_work (&hub->leds, 0); |
4306 | } | 4306 | } |
4307 | } | 4307 | } |
4308 | kfree(qual); | 4308 | kfree(qual); |
4309 | } | 4309 | } |
4310 | 4310 | ||
4311 | static unsigned | 4311 | static unsigned |
4312 | hub_power_remaining (struct usb_hub *hub) | 4312 | hub_power_remaining (struct usb_hub *hub) |
4313 | { | 4313 | { |
4314 | struct usb_device *hdev = hub->hdev; | 4314 | struct usb_device *hdev = hub->hdev; |
4315 | int remaining; | 4315 | int remaining; |
4316 | int port1; | 4316 | int port1; |
4317 | 4317 | ||
4318 | if (!hub->limited_power) | 4318 | if (!hub->limited_power) |
4319 | return 0; | 4319 | return 0; |
4320 | 4320 | ||
4321 | remaining = hdev->bus_mA - hub->descriptor->bHubContrCurrent; | 4321 | remaining = hdev->bus_mA - hub->descriptor->bHubContrCurrent; |
4322 | for (port1 = 1; port1 <= hdev->maxchild; ++port1) { | 4322 | for (port1 = 1; port1 <= hdev->maxchild; ++port1) { |
4323 | struct usb_device *udev = hub->ports[port1 - 1]->child; | 4323 | struct usb_device *udev = hub->ports[port1 - 1]->child; |
4324 | int delta; | 4324 | int delta; |
4325 | unsigned unit_load; | 4325 | unsigned unit_load; |
4326 | 4326 | ||
4327 | if (!udev) | 4327 | if (!udev) |
4328 | continue; | 4328 | continue; |
4329 | if (hub_is_superspeed(udev)) | 4329 | if (hub_is_superspeed(udev)) |
4330 | unit_load = 150; | 4330 | unit_load = 150; |
4331 | else | 4331 | else |
4332 | unit_load = 100; | 4332 | unit_load = 100; |
4333 | 4333 | ||
4334 | /* | 4334 | /* |
4335 | * Unconfigured devices may not use more than one unit load, | 4335 | * Unconfigured devices may not use more than one unit load, |
4336 | * or 8mA for OTG ports | 4336 | * or 8mA for OTG ports |
4337 | */ | 4337 | */ |
4338 | if (udev->actconfig) | 4338 | if (udev->actconfig) |
4339 | delta = usb_get_max_power(udev, udev->actconfig); | 4339 | delta = usb_get_max_power(udev, udev->actconfig); |
4340 | else if (port1 != udev->bus->otg_port || hdev->parent) | 4340 | else if (port1 != udev->bus->otg_port || hdev->parent) |
4341 | delta = unit_load; | 4341 | delta = unit_load; |
4342 | else | 4342 | else |
4343 | delta = 8; | 4343 | delta = 8; |
4344 | if (delta > hub->mA_per_port) | 4344 | if (delta > hub->mA_per_port) |
4345 | dev_warn(&udev->dev, | 4345 | dev_warn(&udev->dev, |
4346 | "%dmA is over %umA budget for port %d!\n", | 4346 | "%dmA is over %umA budget for port %d!\n", |
4347 | delta, hub->mA_per_port, port1); | 4347 | delta, hub->mA_per_port, port1); |
4348 | remaining -= delta; | 4348 | remaining -= delta; |
4349 | } | 4349 | } |
4350 | if (remaining < 0) { | 4350 | if (remaining < 0) { |
4351 | dev_warn(hub->intfdev, "%dmA over power budget!\n", | 4351 | dev_warn(hub->intfdev, "%dmA over power budget!\n", |
4352 | -remaining); | 4352 | -remaining); |
4353 | remaining = 0; | 4353 | remaining = 0; |
4354 | } | 4354 | } |
4355 | return remaining; | 4355 | return remaining; |
4356 | } | 4356 | } |
4357 | 4357 | ||
4358 | /* Handle physical or logical connection change events. | 4358 | /* Handle physical or logical connection change events. |
4359 | * This routine is called when: | 4359 | * This routine is called when: |
4360 | * a port connection-change occurs; | 4360 | * a port connection-change occurs; |
4361 | * a port enable-change occurs (often caused by EMI); | 4361 | * a port enable-change occurs (often caused by EMI); |
4362 | * usb_reset_and_verify_device() encounters changed descriptors (as from | 4362 | * usb_reset_and_verify_device() encounters changed descriptors (as from |
4363 | * a firmware download) | 4363 | * a firmware download) |
4364 | * caller already locked the hub | 4364 | * caller already locked the hub |
4365 | */ | 4365 | */ |
4366 | static void hub_port_connect_change(struct usb_hub *hub, int port1, | 4366 | static void hub_port_connect_change(struct usb_hub *hub, int port1, |
4367 | u16 portstatus, u16 portchange) | 4367 | u16 portstatus, u16 portchange) |
4368 | { | 4368 | { |
4369 | struct usb_device *hdev = hub->hdev; | 4369 | struct usb_device *hdev = hub->hdev; |
4370 | struct device *hub_dev = hub->intfdev; | 4370 | struct device *hub_dev = hub->intfdev; |
4371 | struct usb_hcd *hcd = bus_to_hcd(hdev->bus); | 4371 | struct usb_hcd *hcd = bus_to_hcd(hdev->bus); |
4372 | unsigned wHubCharacteristics = | 4372 | unsigned wHubCharacteristics = |
4373 | le16_to_cpu(hub->descriptor->wHubCharacteristics); | 4373 | le16_to_cpu(hub->descriptor->wHubCharacteristics); |
4374 | struct usb_device *udev; | 4374 | struct usb_device *udev; |
4375 | int status, i; | 4375 | int status, i; |
4376 | unsigned unit_load; | 4376 | unsigned unit_load; |
4377 | 4377 | ||
4378 | dev_dbg (hub_dev, | 4378 | dev_dbg (hub_dev, |
4379 | "port %d, status %04x, change %04x, %s\n", | 4379 | "port %d, status %04x, change %04x, %s\n", |
4380 | port1, portstatus, portchange, portspeed(hub, portstatus)); | 4380 | port1, portstatus, portchange, portspeed(hub, portstatus)); |
4381 | 4381 | ||
4382 | if (hub->has_indicators) { | 4382 | if (hub->has_indicators) { |
4383 | set_port_led(hub, port1, HUB_LED_AUTO); | 4383 | set_port_led(hub, port1, HUB_LED_AUTO); |
4384 | hub->indicator[port1-1] = INDICATOR_AUTO; | 4384 | hub->indicator[port1-1] = INDICATOR_AUTO; |
4385 | } | 4385 | } |
4386 | 4386 | ||
4387 | #ifdef CONFIG_USB_OTG | 4387 | #ifdef CONFIG_USB_OTG |
4388 | /* during HNP, don't repeat the debounce */ | 4388 | /* during HNP, don't repeat the debounce */ |
4389 | if (hdev->bus->is_b_host) | 4389 | if (hdev->bus->is_b_host) |
4390 | portchange &= ~(USB_PORT_STAT_C_CONNECTION | | 4390 | portchange &= ~(USB_PORT_STAT_C_CONNECTION | |
4391 | USB_PORT_STAT_C_ENABLE); | 4391 | USB_PORT_STAT_C_ENABLE); |
4392 | #endif | 4392 | #endif |
4393 | 4393 | ||
4394 | /* Try to resuscitate an existing device */ | 4394 | /* Try to resuscitate an existing device */ |
4395 | udev = hub->ports[port1 - 1]->child; | 4395 | udev = hub->ports[port1 - 1]->child; |
4396 | if ((portstatus & USB_PORT_STAT_CONNECTION) && udev && | 4396 | if ((portstatus & USB_PORT_STAT_CONNECTION) && udev && |
4397 | udev->state != USB_STATE_NOTATTACHED) { | 4397 | udev->state != USB_STATE_NOTATTACHED) { |
4398 | usb_lock_device(udev); | 4398 | usb_lock_device(udev); |
4399 | if (portstatus & USB_PORT_STAT_ENABLE) { | 4399 | if (portstatus & USB_PORT_STAT_ENABLE) { |
4400 | status = 0; /* Nothing to do */ | 4400 | status = 0; /* Nothing to do */ |
4401 | 4401 | ||
4402 | #ifdef CONFIG_PM_RUNTIME | 4402 | #ifdef CONFIG_PM_RUNTIME |
4403 | } else if (udev->state == USB_STATE_SUSPENDED && | 4403 | } else if (udev->state == USB_STATE_SUSPENDED && |
4404 | udev->persist_enabled) { | 4404 | udev->persist_enabled) { |
4405 | /* For a suspended device, treat this as a | 4405 | /* For a suspended device, treat this as a |
4406 | * remote wakeup event. | 4406 | * remote wakeup event. |
4407 | */ | 4407 | */ |
4408 | status = usb_remote_wakeup(udev); | 4408 | status = usb_remote_wakeup(udev); |
4409 | #endif | 4409 | #endif |
4410 | 4410 | ||
4411 | } else { | 4411 | } else { |
4412 | status = -ENODEV; /* Don't resuscitate */ | 4412 | status = -ENODEV; /* Don't resuscitate */ |
4413 | } | 4413 | } |
4414 | usb_unlock_device(udev); | 4414 | usb_unlock_device(udev); |
4415 | 4415 | ||
4416 | if (status == 0) { | 4416 | if (status == 0) { |
4417 | clear_bit(port1, hub->change_bits); | 4417 | clear_bit(port1, hub->change_bits); |
4418 | return; | 4418 | return; |
4419 | } | 4419 | } |
4420 | } | 4420 | } |
4421 | 4421 | ||
4422 | /* Disconnect any existing devices under this port */ | 4422 | /* Disconnect any existing devices under this port */ |
4423 | if (udev) { | 4423 | if (udev) { |
4424 | if (hcd->phy && !hdev->parent && | 4424 | if (hcd->phy && !hdev->parent && |
4425 | !(portstatus & USB_PORT_STAT_CONNECTION)) | 4425 | !(portstatus & USB_PORT_STAT_CONNECTION)) |
4426 | usb_phy_notify_disconnect(hcd->phy, udev->speed); | 4426 | usb_phy_notify_disconnect(hcd->phy, udev->speed); |
4427 | usb_disconnect(&hub->ports[port1 - 1]->child); | 4427 | usb_disconnect(&hub->ports[port1 - 1]->child); |
4428 | } | 4428 | } |
4429 | clear_bit(port1, hub->change_bits); | 4429 | clear_bit(port1, hub->change_bits); |
4430 | 4430 | ||
4431 | /* We can forget about a "removed" device when there's a physical | 4431 | /* We can forget about a "removed" device when there's a physical |
4432 | * disconnect or the connect status changes. | 4432 | * disconnect or the connect status changes. |
4433 | */ | 4433 | */ |
4434 | if (!(portstatus & USB_PORT_STAT_CONNECTION) || | 4434 | if (!(portstatus & USB_PORT_STAT_CONNECTION) || |
4435 | (portchange & USB_PORT_STAT_C_CONNECTION)) | 4435 | (portchange & USB_PORT_STAT_C_CONNECTION)) |
4436 | clear_bit(port1, hub->removed_bits); | 4436 | clear_bit(port1, hub->removed_bits); |
4437 | 4437 | ||
4438 | if (portchange & (USB_PORT_STAT_C_CONNECTION | | 4438 | if (portchange & (USB_PORT_STAT_C_CONNECTION | |
4439 | USB_PORT_STAT_C_ENABLE)) { | 4439 | USB_PORT_STAT_C_ENABLE)) { |
4440 | status = hub_port_debounce_be_stable(hub, port1); | 4440 | status = hub_port_debounce_be_stable(hub, port1); |
4441 | if (status < 0) { | 4441 | if (status < 0) { |
4442 | if (status != -ENODEV && printk_ratelimit()) | 4442 | if (status != -ENODEV && printk_ratelimit()) |
4443 | dev_err(hub_dev, "connect-debounce failed, " | 4443 | dev_err(hub_dev, "connect-debounce failed, " |
4444 | "port %d disabled\n", port1); | 4444 | "port %d disabled\n", port1); |
4445 | portstatus &= ~USB_PORT_STAT_CONNECTION; | 4445 | portstatus &= ~USB_PORT_STAT_CONNECTION; |
4446 | } else { | 4446 | } else { |
4447 | portstatus = status; | 4447 | portstatus = status; |
4448 | } | 4448 | } |
4449 | } | 4449 | } |
4450 | 4450 | ||
4451 | /* Return now if debouncing failed or nothing is connected or | 4451 | /* Return now if debouncing failed or nothing is connected or |
4452 | * the device was "removed". | 4452 | * the device was "removed". |
4453 | */ | 4453 | */ |
4454 | if (!(portstatus & USB_PORT_STAT_CONNECTION) || | 4454 | if (!(portstatus & USB_PORT_STAT_CONNECTION) || |
4455 | test_bit(port1, hub->removed_bits)) { | 4455 | test_bit(port1, hub->removed_bits)) { |
4456 | 4456 | ||
4457 | /* maybe switch power back on (e.g. root hub was reset) */ | 4457 | /* maybe switch power back on (e.g. root hub was reset) */ |
4458 | if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2 | 4458 | if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2 |
4459 | && !port_is_power_on(hub, portstatus)) | 4459 | && !port_is_power_on(hub, portstatus)) |
4460 | set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); | 4460 | set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); |
4461 | 4461 | ||
4462 | if (portstatus & USB_PORT_STAT_ENABLE) | 4462 | if (portstatus & USB_PORT_STAT_ENABLE) |
4463 | goto done; | 4463 | goto done; |
4464 | return; | 4464 | return; |
4465 | } | 4465 | } |
4466 | if (hub_is_superspeed(hub->hdev)) | 4466 | if (hub_is_superspeed(hub->hdev)) |
4467 | unit_load = 150; | 4467 | unit_load = 150; |
4468 | else | 4468 | else |
4469 | unit_load = 100; | 4469 | unit_load = 100; |
4470 | 4470 | ||
4471 | status = 0; | 4471 | status = 0; |
4472 | for (i = 0; i < SET_CONFIG_TRIES; i++) { | 4472 | for (i = 0; i < SET_CONFIG_TRIES; i++) { |
4473 | 4473 | ||
4474 | /* reallocate for each attempt, since references | 4474 | /* reallocate for each attempt, since references |
4475 | * to the previous one can escape in various ways | 4475 | * to the previous one can escape in various ways |
4476 | */ | 4476 | */ |
4477 | udev = usb_alloc_dev(hdev, hdev->bus, port1); | 4477 | udev = usb_alloc_dev(hdev, hdev->bus, port1); |
4478 | if (!udev) { | 4478 | if (!udev) { |
4479 | dev_err (hub_dev, | 4479 | dev_err (hub_dev, |
4480 | "couldn't allocate port %d usb_device\n", | 4480 | "couldn't allocate port %d usb_device\n", |
4481 | port1); | 4481 | port1); |
4482 | goto done; | 4482 | goto done; |
4483 | } | 4483 | } |
4484 | 4484 | ||
4485 | usb_set_device_state(udev, USB_STATE_POWERED); | 4485 | usb_set_device_state(udev, USB_STATE_POWERED); |
4486 | udev->bus_mA = hub->mA_per_port; | 4486 | udev->bus_mA = hub->mA_per_port; |
4487 | udev->level = hdev->level + 1; | 4487 | udev->level = hdev->level + 1; |
4488 | udev->wusb = hub_is_wusb(hub); | 4488 | udev->wusb = hub_is_wusb(hub); |
4489 | 4489 | ||
4490 | /* Only USB 3.0 devices are connected to SuperSpeed hubs. */ | 4490 | /* Only USB 3.0 devices are connected to SuperSpeed hubs. */ |
4491 | if (hub_is_superspeed(hub->hdev)) | 4491 | if (hub_is_superspeed(hub->hdev)) |
4492 | udev->speed = USB_SPEED_SUPER; | 4492 | udev->speed = USB_SPEED_SUPER; |
4493 | else | 4493 | else |
4494 | udev->speed = USB_SPEED_UNKNOWN; | 4494 | udev->speed = USB_SPEED_UNKNOWN; |
4495 | 4495 | ||
4496 | choose_devnum(udev); | 4496 | choose_devnum(udev); |
4497 | if (udev->devnum <= 0) { | 4497 | if (udev->devnum <= 0) { |
4498 | status = -ENOTCONN; /* Don't retry */ | 4498 | status = -ENOTCONN; /* Don't retry */ |
4499 | goto loop; | 4499 | goto loop; |
4500 | } | 4500 | } |
4501 | 4501 | ||
4502 | /* reset (non-USB 3.0 devices) and get descriptor */ | 4502 | /* reset (non-USB 3.0 devices) and get descriptor */ |
4503 | status = hub_port_init(hub, udev, port1, i); | 4503 | status = hub_port_init(hub, udev, port1, i); |
4504 | if (status < 0) | 4504 | if (status < 0) |
4505 | goto loop; | 4505 | goto loop; |
4506 | 4506 | ||
4507 | usb_detect_quirks(udev); | 4507 | usb_detect_quirks(udev); |
4508 | if (udev->quirks & USB_QUIRK_DELAY_INIT) | 4508 | if (udev->quirks & USB_QUIRK_DELAY_INIT) |
4509 | msleep(1000); | 4509 | msleep(1000); |
4510 | 4510 | ||
4511 | /* consecutive bus-powered hubs aren't reliable; they can | 4511 | /* consecutive bus-powered hubs aren't reliable; they can |
4512 | * violate the voltage drop budget. if the new child has | 4512 | * violate the voltage drop budget. if the new child has |
4513 | * a "powered" LED, users should notice we didn't enable it | 4513 | * a "powered" LED, users should notice we didn't enable it |
4514 | * (without reading syslog), even without per-port LEDs | 4514 | * (without reading syslog), even without per-port LEDs |
4515 | * on the parent. | 4515 | * on the parent. |
4516 | */ | 4516 | */ |
4517 | if (udev->descriptor.bDeviceClass == USB_CLASS_HUB | 4517 | if (udev->descriptor.bDeviceClass == USB_CLASS_HUB |
4518 | && udev->bus_mA <= unit_load) { | 4518 | && udev->bus_mA <= unit_load) { |
4519 | u16 devstat; | 4519 | u16 devstat; |
4520 | 4520 | ||
4521 | status = usb_get_status(udev, USB_RECIP_DEVICE, 0, | 4521 | status = usb_get_status(udev, USB_RECIP_DEVICE, 0, |
4522 | &devstat); | 4522 | &devstat); |
4523 | if (status) { | 4523 | if (status) { |
4524 | dev_dbg(&udev->dev, "get status %d ?\n", status); | 4524 | dev_dbg(&udev->dev, "get status %d ?\n", status); |
4525 | goto loop_disable; | 4525 | goto loop_disable; |
4526 | } | 4526 | } |
4527 | if ((devstat & (1 << USB_DEVICE_SELF_POWERED)) == 0) { | 4527 | if ((devstat & (1 << USB_DEVICE_SELF_POWERED)) == 0) { |
4528 | dev_err(&udev->dev, | 4528 | dev_err(&udev->dev, |
4529 | "can't connect bus-powered hub " | 4529 | "can't connect bus-powered hub " |
4530 | "to this port\n"); | 4530 | "to this port\n"); |
4531 | if (hub->has_indicators) { | 4531 | if (hub->has_indicators) { |
4532 | hub->indicator[port1-1] = | 4532 | hub->indicator[port1-1] = |
4533 | INDICATOR_AMBER_BLINK; | 4533 | INDICATOR_AMBER_BLINK; |
4534 | schedule_delayed_work (&hub->leds, 0); | 4534 | schedule_delayed_work (&hub->leds, 0); |
4535 | } | 4535 | } |
4536 | status = -ENOTCONN; /* Don't retry */ | 4536 | status = -ENOTCONN; /* Don't retry */ |
4537 | goto loop_disable; | 4537 | goto loop_disable; |
4538 | } | 4538 | } |
4539 | } | 4539 | } |
4540 | 4540 | ||
4541 | /* check for devices running slower than they could */ | 4541 | /* check for devices running slower than they could */ |
4542 | if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200 | 4542 | if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200 |
4543 | && udev->speed == USB_SPEED_FULL | 4543 | && udev->speed == USB_SPEED_FULL |
4544 | && highspeed_hubs != 0) | 4544 | && highspeed_hubs != 0) |
4545 | check_highspeed (hub, udev, port1); | 4545 | check_highspeed (hub, udev, port1); |
4546 | 4546 | ||
4547 | /* Store the parent's children[] pointer. At this point | 4547 | /* Store the parent's children[] pointer. At this point |
4548 | * udev becomes globally accessible, although presumably | 4548 | * udev becomes globally accessible, although presumably |
4549 | * no one will look at it until hdev is unlocked. | 4549 | * no one will look at it until hdev is unlocked. |
4550 | */ | 4550 | */ |
4551 | status = 0; | 4551 | status = 0; |
4552 | 4552 | ||
4553 | /* We mustn't add new devices if the parent hub has | 4553 | /* We mustn't add new devices if the parent hub has |
4554 | * been disconnected; we would race with the | 4554 | * been disconnected; we would race with the |
4555 | * recursively_mark_NOTATTACHED() routine. | 4555 | * recursively_mark_NOTATTACHED() routine. |
4556 | */ | 4556 | */ |
4557 | spin_lock_irq(&device_state_lock); | 4557 | spin_lock_irq(&device_state_lock); |
4558 | if (hdev->state == USB_STATE_NOTATTACHED) | 4558 | if (hdev->state == USB_STATE_NOTATTACHED) |
4559 | status = -ENOTCONN; | 4559 | status = -ENOTCONN; |
4560 | else | 4560 | else |
4561 | hub->ports[port1 - 1]->child = udev; | 4561 | hub->ports[port1 - 1]->child = udev; |
4562 | spin_unlock_irq(&device_state_lock); | 4562 | spin_unlock_irq(&device_state_lock); |
4563 | 4563 | ||
4564 | /* Run it through the hoops (find a driver, etc) */ | 4564 | /* Run it through the hoops (find a driver, etc) */ |
4565 | if (!status) { | 4565 | if (!status) { |
4566 | status = usb_new_device(udev); | 4566 | status = usb_new_device(udev); |
4567 | if (status) { | 4567 | if (status) { |
4568 | spin_lock_irq(&device_state_lock); | 4568 | spin_lock_irq(&device_state_lock); |
4569 | hub->ports[port1 - 1]->child = NULL; | 4569 | hub->ports[port1 - 1]->child = NULL; |
4570 | spin_unlock_irq(&device_state_lock); | 4570 | spin_unlock_irq(&device_state_lock); |
4571 | } | 4571 | } |
4572 | } | 4572 | } |
4573 | 4573 | ||
4574 | if (status) | 4574 | if (status) |
4575 | goto loop_disable; | 4575 | goto loop_disable; |
4576 | 4576 | ||
4577 | status = hub_power_remaining(hub); | 4577 | status = hub_power_remaining(hub); |
4578 | if (status) | 4578 | if (status) |
4579 | dev_dbg(hub_dev, "%dmA power budget left\n", status); | 4579 | dev_dbg(hub_dev, "%dmA power budget left\n", status); |
4580 | 4580 | ||
4581 | return; | 4581 | return; |
4582 | 4582 | ||
4583 | loop_disable: | 4583 | loop_disable: |
4584 | hub_port_disable(hub, port1, 1); | 4584 | hub_port_disable(hub, port1, 1); |
4585 | loop: | 4585 | loop: |
4586 | usb_ep0_reinit(udev); | 4586 | usb_ep0_reinit(udev); |
4587 | release_devnum(udev); | 4587 | release_devnum(udev); |
4588 | hub_free_dev(udev); | 4588 | hub_free_dev(udev); |
4589 | usb_put_dev(udev); | 4589 | usb_put_dev(udev); |
4590 | if ((status == -ENOTCONN) || (status == -ENOTSUPP)) | 4590 | if ((status == -ENOTCONN) || (status == -ENOTSUPP)) |
4591 | break; | 4591 | break; |
4592 | } | 4592 | } |
4593 | if (hub->hdev->parent || | 4593 | if (hub->hdev->parent || |
4594 | !hcd->driver->port_handed_over || | 4594 | !hcd->driver->port_handed_over || |
4595 | !(hcd->driver->port_handed_over)(hcd, port1)) { | 4595 | !(hcd->driver->port_handed_over)(hcd, port1)) { |
4596 | if (status != -ENOTCONN && status != -ENODEV) | 4596 | if (status != -ENOTCONN && status != -ENODEV) |
4597 | dev_err(hub_dev, "unable to enumerate USB device on port %d\n", | 4597 | dev_err(hub_dev, "unable to enumerate USB device on port %d\n", |
4598 | port1); | 4598 | port1); |
4599 | } | 4599 | } |
4600 | 4600 | ||
4601 | done: | 4601 | done: |
4602 | hub_port_disable(hub, port1, 1); | 4602 | hub_port_disable(hub, port1, 1); |
4603 | if (hcd->driver->relinquish_port && !hub->hdev->parent) | 4603 | if (hcd->driver->relinquish_port && !hub->hdev->parent) |
4604 | hcd->driver->relinquish_port(hcd, port1); | 4604 | hcd->driver->relinquish_port(hcd, port1); |
4605 | } | 4605 | } |
4606 | 4606 | ||
4607 | /* Returns 1 if there was a remote wakeup and a connect status change. */ | 4607 | /* Returns 1 if there was a remote wakeup and a connect status change. */ |
4608 | static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port, | 4608 | static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port, |
4609 | u16 portstatus, u16 portchange) | 4609 | u16 portstatus, u16 portchange) |
4610 | { | 4610 | { |
4611 | struct usb_device *hdev; | 4611 | struct usb_device *hdev; |
4612 | struct usb_device *udev; | 4612 | struct usb_device *udev; |
4613 | int connect_change = 0; | 4613 | int connect_change = 0; |
4614 | int ret; | 4614 | int ret; |
4615 | 4615 | ||
4616 | hdev = hub->hdev; | 4616 | hdev = hub->hdev; |
4617 | udev = hub->ports[port - 1]->child; | 4617 | udev = hub->ports[port - 1]->child; |
4618 | if (!hub_is_superspeed(hdev)) { | 4618 | if (!hub_is_superspeed(hdev)) { |
4619 | if (!(portchange & USB_PORT_STAT_C_SUSPEND)) | 4619 | if (!(portchange & USB_PORT_STAT_C_SUSPEND)) |
4620 | return 0; | 4620 | return 0; |
4621 | usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND); | 4621 | usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND); |
4622 | } else { | 4622 | } else { |
4623 | if (!udev || udev->state != USB_STATE_SUSPENDED || | 4623 | if (!udev || udev->state != USB_STATE_SUSPENDED || |
4624 | (portstatus & USB_PORT_STAT_LINK_STATE) != | 4624 | (portstatus & USB_PORT_STAT_LINK_STATE) != |
4625 | USB_SS_PORT_LS_U0) | 4625 | USB_SS_PORT_LS_U0) |
4626 | return 0; | 4626 | return 0; |
4627 | } | 4627 | } |
4628 | 4628 | ||
4629 | if (udev) { | 4629 | if (udev) { |
4630 | /* TRSMRCY = 10 msec */ | 4630 | /* TRSMRCY = 10 msec */ |
4631 | msleep(10); | 4631 | msleep(10); |
4632 | 4632 | ||
4633 | usb_lock_device(udev); | 4633 | usb_lock_device(udev); |
4634 | ret = usb_remote_wakeup(udev); | 4634 | ret = usb_remote_wakeup(udev); |
4635 | usb_unlock_device(udev); | 4635 | usb_unlock_device(udev); |
4636 | if (ret < 0) | 4636 | if (ret < 0) |
4637 | connect_change = 1; | 4637 | connect_change = 1; |
4638 | } else { | 4638 | } else { |
4639 | ret = -ENODEV; | 4639 | ret = -ENODEV; |
4640 | hub_port_disable(hub, port, 1); | 4640 | hub_port_disable(hub, port, 1); |
4641 | } | 4641 | } |
4642 | dev_dbg(hub->intfdev, "resume on port %d, status %d\n", | 4642 | dev_dbg(hub->intfdev, "resume on port %d, status %d\n", |
4643 | port, ret); | 4643 | port, ret); |
4644 | return connect_change; | 4644 | return connect_change; |
4645 | } | 4645 | } |
4646 | 4646 | ||
4647 | static void hub_events(void) | 4647 | static void hub_events(void) |
4648 | { | 4648 | { |
4649 | struct list_head *tmp; | 4649 | struct list_head *tmp; |
4650 | struct usb_device *hdev; | 4650 | struct usb_device *hdev; |
4651 | struct usb_interface *intf; | 4651 | struct usb_interface *intf; |
4652 | struct usb_hub *hub; | 4652 | struct usb_hub *hub; |
4653 | struct device *hub_dev; | 4653 | struct device *hub_dev; |
4654 | u16 hubstatus; | 4654 | u16 hubstatus; |
4655 | u16 hubchange; | 4655 | u16 hubchange; |
4656 | u16 portstatus; | 4656 | u16 portstatus; |
4657 | u16 portchange; | 4657 | u16 portchange; |
4658 | int i, ret; | 4658 | int i, ret; |
4659 | int connect_change, wakeup_change; | 4659 | int connect_change, wakeup_change; |
4660 | 4660 | ||
4661 | /* | 4661 | /* |
4662 | * We restart the list every time to avoid a deadlock with | 4662 | * We restart the list every time to avoid a deadlock with |
4663 | * deleting hubs downstream from this one. This should be | 4663 | * deleting hubs downstream from this one. This should be |
4664 | * safe since we delete the hub from the event list. | 4664 | * safe since we delete the hub from the event list. |
4665 | * Not the most efficient, but avoids deadlocks. | 4665 | * Not the most efficient, but avoids deadlocks. |
4666 | */ | 4666 | */ |
4667 | while (1) { | 4667 | while (1) { |
4668 | 4668 | ||
4669 | /* Grab the first entry at the beginning of the list */ | 4669 | /* Grab the first entry at the beginning of the list */ |
4670 | spin_lock_irq(&hub_event_lock); | 4670 | spin_lock_irq(&hub_event_lock); |
4671 | if (list_empty(&hub_event_list)) { | 4671 | if (list_empty(&hub_event_list)) { |
4672 | spin_unlock_irq(&hub_event_lock); | 4672 | spin_unlock_irq(&hub_event_lock); |
4673 | break; | 4673 | break; |
4674 | } | 4674 | } |
4675 | 4675 | ||
4676 | tmp = hub_event_list.next; | 4676 | tmp = hub_event_list.next; |
4677 | list_del_init(tmp); | 4677 | list_del_init(tmp); |
4678 | 4678 | ||
4679 | hub = list_entry(tmp, struct usb_hub, event_list); | 4679 | hub = list_entry(tmp, struct usb_hub, event_list); |
4680 | kref_get(&hub->kref); | 4680 | kref_get(&hub->kref); |
4681 | spin_unlock_irq(&hub_event_lock); | 4681 | spin_unlock_irq(&hub_event_lock); |
4682 | 4682 | ||
4683 | hdev = hub->hdev; | 4683 | hdev = hub->hdev; |
4684 | hub_dev = hub->intfdev; | 4684 | hub_dev = hub->intfdev; |
4685 | intf = to_usb_interface(hub_dev); | 4685 | intf = to_usb_interface(hub_dev); |
4686 | dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n", | 4686 | dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n", |
4687 | hdev->state, hdev->maxchild, | 4687 | hdev->state, hdev->maxchild, |
4688 | /* NOTE: expects max 15 ports... */ | 4688 | /* NOTE: expects max 15 ports... */ |
4689 | (u16) hub->change_bits[0], | 4689 | (u16) hub->change_bits[0], |
4690 | (u16) hub->event_bits[0]); | 4690 | (u16) hub->event_bits[0]); |
4691 | 4691 | ||
4692 | /* Lock the device, then check to see if we were | 4692 | /* Lock the device, then check to see if we were |
4693 | * disconnected while waiting for the lock to succeed. */ | 4693 | * disconnected while waiting for the lock to succeed. */ |
4694 | usb_lock_device(hdev); | 4694 | usb_lock_device(hdev); |
4695 | if (unlikely(hub->disconnected)) | 4695 | if (unlikely(hub->disconnected)) |
4696 | goto loop_disconnected; | 4696 | goto loop_disconnected; |
4697 | 4697 | ||
4698 | /* If the hub has died, clean up after it */ | 4698 | /* If the hub has died, clean up after it */ |
4699 | if (hdev->state == USB_STATE_NOTATTACHED) { | 4699 | if (hdev->state == USB_STATE_NOTATTACHED) { |
4700 | hub->error = -ENODEV; | 4700 | hub->error = -ENODEV; |
4701 | hub_quiesce(hub, HUB_DISCONNECT); | 4701 | hub_quiesce(hub, HUB_DISCONNECT); |
4702 | goto loop; | 4702 | goto loop; |
4703 | } | 4703 | } |
4704 | 4704 | ||
4705 | /* Autoresume */ | 4705 | /* Autoresume */ |
4706 | ret = usb_autopm_get_interface(intf); | 4706 | ret = usb_autopm_get_interface(intf); |
4707 | if (ret) { | 4707 | if (ret) { |
4708 | dev_dbg(hub_dev, "Can't autoresume: %d\n", ret); | 4708 | dev_dbg(hub_dev, "Can't autoresume: %d\n", ret); |
4709 | goto loop; | 4709 | goto loop; |
4710 | } | 4710 | } |
4711 | 4711 | ||
4712 | /* If this is an inactive hub, do nothing */ | 4712 | /* If this is an inactive hub, do nothing */ |
4713 | if (hub->quiescing) | 4713 | if (hub->quiescing) |
4714 | goto loop_autopm; | 4714 | goto loop_autopm; |
4715 | 4715 | ||
4716 | if (hub->error) { | 4716 | if (hub->error) { |
4717 | dev_dbg (hub_dev, "resetting for error %d\n", | 4717 | dev_dbg (hub_dev, "resetting for error %d\n", |
4718 | hub->error); | 4718 | hub->error); |
4719 | 4719 | ||
4720 | ret = usb_reset_device(hdev); | 4720 | ret = usb_reset_device(hdev); |
4721 | if (ret) { | 4721 | if (ret) { |
4722 | dev_dbg (hub_dev, | 4722 | dev_dbg (hub_dev, |
4723 | "error resetting hub: %d\n", ret); | 4723 | "error resetting hub: %d\n", ret); |
4724 | goto loop_autopm; | 4724 | goto loop_autopm; |
4725 | } | 4725 | } |
4726 | 4726 | ||
4727 | hub->nerrors = 0; | 4727 | hub->nerrors = 0; |
4728 | hub->error = 0; | 4728 | hub->error = 0; |
4729 | } | 4729 | } |
4730 | 4730 | ||
4731 | /* deal with port status changes */ | 4731 | /* deal with port status changes */ |
4732 | for (i = 1; i <= hdev->maxchild; i++) { | 4732 | for (i = 1; i <= hdev->maxchild; i++) { |
4733 | if (test_bit(i, hub->busy_bits)) | 4733 | if (test_bit(i, hub->busy_bits)) |
4734 | continue; | 4734 | continue; |
4735 | connect_change = test_bit(i, hub->change_bits); | 4735 | connect_change = test_bit(i, hub->change_bits); |
4736 | wakeup_change = test_and_clear_bit(i, hub->wakeup_bits); | 4736 | wakeup_change = test_and_clear_bit(i, hub->wakeup_bits); |
4737 | if (!test_and_clear_bit(i, hub->event_bits) && | 4737 | if (!test_and_clear_bit(i, hub->event_bits) && |
4738 | !connect_change && !wakeup_change) | 4738 | !connect_change && !wakeup_change) |
4739 | continue; | 4739 | continue; |
4740 | 4740 | ||
4741 | ret = hub_port_status(hub, i, | 4741 | ret = hub_port_status(hub, i, |
4742 | &portstatus, &portchange); | 4742 | &portstatus, &portchange); |
4743 | if (ret < 0) | 4743 | if (ret < 0) |
4744 | continue; | 4744 | continue; |
4745 | 4745 | ||
4746 | if (portchange & USB_PORT_STAT_C_CONNECTION) { | 4746 | if (portchange & USB_PORT_STAT_C_CONNECTION) { |
4747 | usb_clear_port_feature(hdev, i, | 4747 | usb_clear_port_feature(hdev, i, |
4748 | USB_PORT_FEAT_C_CONNECTION); | 4748 | USB_PORT_FEAT_C_CONNECTION); |
4749 | connect_change = 1; | 4749 | connect_change = 1; |
4750 | } | 4750 | } |
4751 | 4751 | ||
4752 | if (portchange & USB_PORT_STAT_C_ENABLE) { | 4752 | if (portchange & USB_PORT_STAT_C_ENABLE) { |
4753 | if (!connect_change) | 4753 | if (!connect_change) |
4754 | dev_dbg (hub_dev, | 4754 | dev_dbg (hub_dev, |
4755 | "port %d enable change, " | 4755 | "port %d enable change, " |
4756 | "status %08x\n", | 4756 | "status %08x\n", |
4757 | i, portstatus); | 4757 | i, portstatus); |
4758 | usb_clear_port_feature(hdev, i, | 4758 | usb_clear_port_feature(hdev, i, |
4759 | USB_PORT_FEAT_C_ENABLE); | 4759 | USB_PORT_FEAT_C_ENABLE); |
4760 | 4760 | ||
4761 | /* | 4761 | /* |
4762 | * EM interference sometimes causes badly | 4762 | * EM interference sometimes causes badly |
4763 | * shielded USB devices to be shutdown by | 4763 | * shielded USB devices to be shutdown by |
4764 | * the hub, this hack enables them again. | 4764 | * the hub, this hack enables them again. |
4765 | * Works at least with mouse driver. | 4765 | * Works at least with mouse driver. |
4766 | */ | 4766 | */ |
4767 | if (!(portstatus & USB_PORT_STAT_ENABLE) | 4767 | if (!(portstatus & USB_PORT_STAT_ENABLE) |
4768 | && !connect_change | 4768 | && !connect_change |
4769 | && hub->ports[i - 1]->child) { | 4769 | && hub->ports[i - 1]->child) { |
4770 | dev_err (hub_dev, | 4770 | dev_err (hub_dev, |
4771 | "port %i " | 4771 | "port %i " |
4772 | "disabled by hub (EMI?), " | 4772 | "disabled by hub (EMI?), " |
4773 | "re-enabling...\n", | 4773 | "re-enabling...\n", |
4774 | i); | 4774 | i); |
4775 | connect_change = 1; | 4775 | connect_change = 1; |
4776 | } | 4776 | } |
4777 | } | 4777 | } |
4778 | 4778 | ||
4779 | if (hub_handle_remote_wakeup(hub, i, | 4779 | if (hub_handle_remote_wakeup(hub, i, |
4780 | portstatus, portchange)) | 4780 | portstatus, portchange)) |
4781 | connect_change = 1; | 4781 | connect_change = 1; |
4782 | 4782 | ||
4783 | if (portchange & USB_PORT_STAT_C_OVERCURRENT) { | 4783 | if (portchange & USB_PORT_STAT_C_OVERCURRENT) { |
4784 | u16 status = 0; | 4784 | u16 status = 0; |
4785 | u16 unused; | 4785 | u16 unused; |
4786 | 4786 | ||
4787 | dev_dbg(hub_dev, "over-current change on port " | 4787 | dev_dbg(hub_dev, "over-current change on port " |
4788 | "%d\n", i); | 4788 | "%d\n", i); |
4789 | usb_clear_port_feature(hdev, i, | 4789 | usb_clear_port_feature(hdev, i, |
4790 | USB_PORT_FEAT_C_OVER_CURRENT); | 4790 | USB_PORT_FEAT_C_OVER_CURRENT); |
4791 | msleep(100); /* Cool down */ | 4791 | msleep(100); /* Cool down */ |
4792 | hub_power_on(hub, true); | 4792 | hub_power_on(hub, true); |
4793 | hub_port_status(hub, i, &status, &unused); | 4793 | hub_port_status(hub, i, &status, &unused); |
4794 | if (status & USB_PORT_STAT_OVERCURRENT) | 4794 | if (status & USB_PORT_STAT_OVERCURRENT) |
4795 | dev_err(hub_dev, "over-current " | 4795 | dev_err(hub_dev, "over-current " |
4796 | "condition on port %d\n", i); | 4796 | "condition on port %d\n", i); |
4797 | } | 4797 | } |
4798 | 4798 | ||
4799 | if (portchange & USB_PORT_STAT_C_RESET) { | 4799 | if (portchange & USB_PORT_STAT_C_RESET) { |
4800 | dev_dbg (hub_dev, | 4800 | dev_dbg (hub_dev, |
4801 | "reset change on port %d\n", | 4801 | "reset change on port %d\n", |
4802 | i); | 4802 | i); |
4803 | usb_clear_port_feature(hdev, i, | 4803 | usb_clear_port_feature(hdev, i, |
4804 | USB_PORT_FEAT_C_RESET); | 4804 | USB_PORT_FEAT_C_RESET); |
4805 | } | 4805 | } |
4806 | if ((portchange & USB_PORT_STAT_C_BH_RESET) && | 4806 | if ((portchange & USB_PORT_STAT_C_BH_RESET) && |
4807 | hub_is_superspeed(hub->hdev)) { | 4807 | hub_is_superspeed(hub->hdev)) { |
4808 | dev_dbg(hub_dev, | 4808 | dev_dbg(hub_dev, |
4809 | "warm reset change on port %d\n", | 4809 | "warm reset change on port %d\n", |
4810 | i); | 4810 | i); |
4811 | usb_clear_port_feature(hdev, i, | 4811 | usb_clear_port_feature(hdev, i, |
4812 | USB_PORT_FEAT_C_BH_PORT_RESET); | 4812 | USB_PORT_FEAT_C_BH_PORT_RESET); |
4813 | } | 4813 | } |
4814 | if (portchange & USB_PORT_STAT_C_LINK_STATE) { | 4814 | if (portchange & USB_PORT_STAT_C_LINK_STATE) { |
4815 | usb_clear_port_feature(hub->hdev, i, | 4815 | usb_clear_port_feature(hub->hdev, i, |
4816 | USB_PORT_FEAT_C_PORT_LINK_STATE); | 4816 | USB_PORT_FEAT_C_PORT_LINK_STATE); |
4817 | } | 4817 | } |
4818 | if (portchange & USB_PORT_STAT_C_CONFIG_ERROR) { | 4818 | if (portchange & USB_PORT_STAT_C_CONFIG_ERROR) { |
4819 | dev_warn(hub_dev, | 4819 | dev_warn(hub_dev, |
4820 | "config error on port %d\n", | 4820 | "config error on port %d\n", |
4821 | i); | 4821 | i); |
4822 | usb_clear_port_feature(hub->hdev, i, | 4822 | usb_clear_port_feature(hub->hdev, i, |
4823 | USB_PORT_FEAT_C_PORT_CONFIG_ERROR); | 4823 | USB_PORT_FEAT_C_PORT_CONFIG_ERROR); |
4824 | } | 4824 | } |
4825 | 4825 | ||
4826 | /* Warm reset a USB3 protocol port if it's in | 4826 | /* Warm reset a USB3 protocol port if it's in |
4827 | * SS.Inactive state. | 4827 | * SS.Inactive state. |
4828 | */ | 4828 | */ |
4829 | if (hub_port_warm_reset_required(hub, portstatus)) { | 4829 | if (hub_port_warm_reset_required(hub, portstatus)) { |
4830 | int status; | 4830 | int status; |
4831 | struct usb_device *udev = | 4831 | struct usb_device *udev = |
4832 | hub->ports[i - 1]->child; | 4832 | hub->ports[i - 1]->child; |
4833 | 4833 | ||
4834 | dev_dbg(hub_dev, "warm reset port %d\n", i); | 4834 | dev_dbg(hub_dev, "warm reset port %d\n", i); |
4835 | if (!udev || !(portstatus & | 4835 | if (!udev || !(portstatus & |
4836 | USB_PORT_STAT_CONNECTION)) { | 4836 | USB_PORT_STAT_CONNECTION)) { |
4837 | status = hub_port_reset(hub, i, | 4837 | status = hub_port_reset(hub, i, |
4838 | NULL, HUB_BH_RESET_TIME, | 4838 | NULL, HUB_BH_RESET_TIME, |
4839 | true); | 4839 | true); |
4840 | if (status < 0) | 4840 | if (status < 0) |
4841 | hub_port_disable(hub, i, 1); | 4841 | hub_port_disable(hub, i, 1); |
4842 | } else { | 4842 | } else { |
4843 | usb_lock_device(udev); | 4843 | usb_lock_device(udev); |
4844 | status = usb_reset_device(udev); | 4844 | status = usb_reset_device(udev); |
4845 | usb_unlock_device(udev); | 4845 | usb_unlock_device(udev); |
4846 | connect_change = 0; | 4846 | connect_change = 0; |
4847 | } | 4847 | } |
4848 | } | 4848 | } |
4849 | 4849 | ||
4850 | if (connect_change) | 4850 | if (connect_change) |
4851 | hub_port_connect_change(hub, i, | 4851 | hub_port_connect_change(hub, i, |
4852 | portstatus, portchange); | 4852 | portstatus, portchange); |
4853 | } /* end for i */ | 4853 | } /* end for i */ |
4854 | 4854 | ||
4855 | /* deal with hub status changes */ | 4855 | /* deal with hub status changes */ |
4856 | if (test_and_clear_bit(0, hub->event_bits) == 0) | 4856 | if (test_and_clear_bit(0, hub->event_bits) == 0) |
4857 | ; /* do nothing */ | 4857 | ; /* do nothing */ |
4858 | else if (hub_hub_status(hub, &hubstatus, &hubchange) < 0) | 4858 | else if (hub_hub_status(hub, &hubstatus, &hubchange) < 0) |
4859 | dev_err (hub_dev, "get_hub_status failed\n"); | 4859 | dev_err (hub_dev, "get_hub_status failed\n"); |
4860 | else { | 4860 | else { |
4861 | if (hubchange & HUB_CHANGE_LOCAL_POWER) { | 4861 | if (hubchange & HUB_CHANGE_LOCAL_POWER) { |
4862 | dev_dbg (hub_dev, "power change\n"); | 4862 | dev_dbg (hub_dev, "power change\n"); |
4863 | clear_hub_feature(hdev, C_HUB_LOCAL_POWER); | 4863 | clear_hub_feature(hdev, C_HUB_LOCAL_POWER); |
4864 | if (hubstatus & HUB_STATUS_LOCAL_POWER) | 4864 | if (hubstatus & HUB_STATUS_LOCAL_POWER) |
4865 | /* FIXME: Is this always true? */ | 4865 | /* FIXME: Is this always true? */ |
4866 | hub->limited_power = 1; | 4866 | hub->limited_power = 1; |
4867 | else | 4867 | else |
4868 | hub->limited_power = 0; | 4868 | hub->limited_power = 0; |
4869 | } | 4869 | } |
4870 | if (hubchange & HUB_CHANGE_OVERCURRENT) { | 4870 | if (hubchange & HUB_CHANGE_OVERCURRENT) { |
4871 | u16 status = 0; | 4871 | u16 status = 0; |
4872 | u16 unused; | 4872 | u16 unused; |
4873 | 4873 | ||
4874 | dev_dbg(hub_dev, "over-current change\n"); | 4874 | dev_dbg(hub_dev, "over-current change\n"); |
4875 | clear_hub_feature(hdev, C_HUB_OVER_CURRENT); | 4875 | clear_hub_feature(hdev, C_HUB_OVER_CURRENT); |
4876 | msleep(500); /* Cool down */ | 4876 | msleep(500); /* Cool down */ |
4877 | hub_power_on(hub, true); | 4877 | hub_power_on(hub, true); |
4878 | hub_hub_status(hub, &status, &unused); | 4878 | hub_hub_status(hub, &status, &unused); |
4879 | if (status & HUB_STATUS_OVERCURRENT) | 4879 | if (status & HUB_STATUS_OVERCURRENT) |
4880 | dev_err(hub_dev, "over-current " | 4880 | dev_err(hub_dev, "over-current " |
4881 | "condition\n"); | 4881 | "condition\n"); |
4882 | } | 4882 | } |
4883 | } | 4883 | } |
4884 | 4884 | ||
4885 | loop_autopm: | 4885 | loop_autopm: |
4886 | /* Balance the usb_autopm_get_interface() above */ | 4886 | /* Balance the usb_autopm_get_interface() above */ |
4887 | usb_autopm_put_interface_no_suspend(intf); | 4887 | usb_autopm_put_interface_no_suspend(intf); |
4888 | loop: | 4888 | loop: |
4889 | /* Balance the usb_autopm_get_interface_no_resume() in | 4889 | /* Balance the usb_autopm_get_interface_no_resume() in |
4890 | * kick_khubd() and allow autosuspend. | 4890 | * kick_khubd() and allow autosuspend. |
4891 | */ | 4891 | */ |
4892 | usb_autopm_put_interface(intf); | 4892 | usb_autopm_put_interface(intf); |
4893 | loop_disconnected: | 4893 | loop_disconnected: |
4894 | usb_unlock_device(hdev); | 4894 | usb_unlock_device(hdev); |
4895 | kref_put(&hub->kref, hub_release); | 4895 | kref_put(&hub->kref, hub_release); |
4896 | 4896 | ||
4897 | } /* end while (1) */ | 4897 | } /* end while (1) */ |
4898 | } | 4898 | } |
4899 | 4899 | ||
4900 | static int hub_thread(void *__unused) | 4900 | static int hub_thread(void *__unused) |
4901 | { | 4901 | { |
4902 | /* khubd needs to be freezable to avoid intefering with USB-PERSIST | 4902 | /* khubd needs to be freezable to avoid intefering with USB-PERSIST |
4903 | * port handover. Otherwise it might see that a full-speed device | 4903 | * port handover. Otherwise it might see that a full-speed device |
4904 | * was gone before the EHCI controller had handed its port over to | 4904 | * was gone before the EHCI controller had handed its port over to |
4905 | * the companion full-speed controller. | 4905 | * the companion full-speed controller. |
4906 | */ | 4906 | */ |
4907 | set_freezable(); | 4907 | set_freezable(); |
4908 | 4908 | ||
4909 | do { | 4909 | do { |
4910 | hub_events(); | 4910 | hub_events(); |
4911 | wait_event_freezable(khubd_wait, | 4911 | wait_event_freezable(khubd_wait, |
4912 | !list_empty(&hub_event_list) || | 4912 | !list_empty(&hub_event_list) || |
4913 | kthread_should_stop()); | 4913 | kthread_should_stop()); |
4914 | } while (!kthread_should_stop() || !list_empty(&hub_event_list)); | 4914 | } while (!kthread_should_stop() || !list_empty(&hub_event_list)); |
4915 | 4915 | ||
4916 | pr_debug("%s: khubd exiting\n", usbcore_name); | 4916 | pr_debug("%s: khubd exiting\n", usbcore_name); |
4917 | return 0; | 4917 | return 0; |
4918 | } | 4918 | } |
4919 | 4919 | ||
4920 | static const struct usb_device_id hub_id_table[] = { | 4920 | static const struct usb_device_id hub_id_table[] = { |
4921 | { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | 4921 | { .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
4922 | | USB_DEVICE_ID_MATCH_INT_CLASS, | 4922 | | USB_DEVICE_ID_MATCH_INT_CLASS, |
4923 | .idVendor = USB_VENDOR_GENESYS_LOGIC, | 4923 | .idVendor = USB_VENDOR_GENESYS_LOGIC, |
4924 | .bInterfaceClass = USB_CLASS_HUB, | 4924 | .bInterfaceClass = USB_CLASS_HUB, |
4925 | .driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND}, | 4925 | .driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND}, |
4926 | { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS, | 4926 | { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS, |
4927 | .bDeviceClass = USB_CLASS_HUB}, | 4927 | .bDeviceClass = USB_CLASS_HUB}, |
4928 | { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, | 4928 | { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, |
4929 | .bInterfaceClass = USB_CLASS_HUB}, | 4929 | .bInterfaceClass = USB_CLASS_HUB}, |
4930 | { } /* Terminating entry */ | 4930 | { } /* Terminating entry */ |
4931 | }; | 4931 | }; |
4932 | 4932 | ||
4933 | MODULE_DEVICE_TABLE (usb, hub_id_table); | 4933 | MODULE_DEVICE_TABLE (usb, hub_id_table); |
4934 | 4934 | ||
4935 | static struct usb_driver hub_driver = { | 4935 | static struct usb_driver hub_driver = { |
4936 | .name = "hub", | 4936 | .name = "hub", |
4937 | .probe = hub_probe, | 4937 | .probe = hub_probe, |
4938 | .disconnect = hub_disconnect, | 4938 | .disconnect = hub_disconnect, |
4939 | .suspend = hub_suspend, | 4939 | .suspend = hub_suspend, |
4940 | .resume = hub_resume, | 4940 | .resume = hub_resume, |
4941 | .reset_resume = hub_reset_resume, | 4941 | .reset_resume = hub_reset_resume, |
4942 | .pre_reset = hub_pre_reset, | 4942 | .pre_reset = hub_pre_reset, |
4943 | .post_reset = hub_post_reset, | 4943 | .post_reset = hub_post_reset, |
4944 | .unlocked_ioctl = hub_ioctl, | 4944 | .unlocked_ioctl = hub_ioctl, |
4945 | .id_table = hub_id_table, | 4945 | .id_table = hub_id_table, |
4946 | .supports_autosuspend = 1, | 4946 | .supports_autosuspend = 1, |
4947 | }; | 4947 | }; |
4948 | 4948 | ||
4949 | int usb_hub_init(void) | 4949 | int usb_hub_init(void) |
4950 | { | 4950 | { |
4951 | if (usb_register(&hub_driver) < 0) { | 4951 | if (usb_register(&hub_driver) < 0) { |
4952 | printk(KERN_ERR "%s: can't register hub driver\n", | 4952 | printk(KERN_ERR "%s: can't register hub driver\n", |
4953 | usbcore_name); | 4953 | usbcore_name); |
4954 | return -1; | 4954 | return -1; |
4955 | } | 4955 | } |
4956 | 4956 | ||
4957 | khubd_task = kthread_run(hub_thread, NULL, "khubd"); | 4957 | khubd_task = kthread_run(hub_thread, NULL, "khubd"); |
4958 | if (!IS_ERR(khubd_task)) | 4958 | if (!IS_ERR(khubd_task)) |
4959 | return 0; | 4959 | return 0; |
4960 | 4960 | ||
4961 | /* Fall through if kernel_thread failed */ | 4961 | /* Fall through if kernel_thread failed */ |
4962 | usb_deregister(&hub_driver); | 4962 | usb_deregister(&hub_driver); |
4963 | printk(KERN_ERR "%s: can't start khubd\n", usbcore_name); | 4963 | printk(KERN_ERR "%s: can't start khubd\n", usbcore_name); |
4964 | 4964 | ||
4965 | return -1; | 4965 | return -1; |
4966 | } | 4966 | } |
4967 | 4967 | ||
4968 | void usb_hub_cleanup(void) | 4968 | void usb_hub_cleanup(void) |
4969 | { | 4969 | { |
4970 | kthread_stop(khubd_task); | 4970 | kthread_stop(khubd_task); |
4971 | 4971 | ||
4972 | /* | 4972 | /* |
4973 | * Hub resources are freed for us by usb_deregister. It calls | 4973 | * Hub resources are freed for us by usb_deregister. It calls |
4974 | * usb_driver_purge on every device which in turn calls that | 4974 | * usb_driver_purge on every device which in turn calls that |
4975 | * devices disconnect function if it is using this driver. | 4975 | * devices disconnect function if it is using this driver. |
4976 | * The hub_disconnect function takes care of releasing the | 4976 | * The hub_disconnect function takes care of releasing the |
4977 | * individual hub resources. -greg | 4977 | * individual hub resources. -greg |
4978 | */ | 4978 | */ |
4979 | usb_deregister(&hub_driver); | 4979 | usb_deregister(&hub_driver); |
4980 | } /* usb_hub_cleanup() */ | 4980 | } /* usb_hub_cleanup() */ |
4981 | 4981 | ||
4982 | static int descriptors_changed(struct usb_device *udev, | 4982 | static int descriptors_changed(struct usb_device *udev, |
4983 | struct usb_device_descriptor *old_device_descriptor, | 4983 | struct usb_device_descriptor *old_device_descriptor, |
4984 | struct usb_host_bos *old_bos) | 4984 | struct usb_host_bos *old_bos) |
4985 | { | 4985 | { |
4986 | int changed = 0; | 4986 | int changed = 0; |
4987 | unsigned index; | 4987 | unsigned index; |
4988 | unsigned serial_len = 0; | 4988 | unsigned serial_len = 0; |
4989 | unsigned len; | 4989 | unsigned len; |
4990 | unsigned old_length; | 4990 | unsigned old_length; |
4991 | int length; | 4991 | int length; |
4992 | char *buf; | 4992 | char *buf; |
4993 | 4993 | ||
4994 | if (memcmp(&udev->descriptor, old_device_descriptor, | 4994 | if (memcmp(&udev->descriptor, old_device_descriptor, |
4995 | sizeof(*old_device_descriptor)) != 0) | 4995 | sizeof(*old_device_descriptor)) != 0) |
4996 | return 1; | 4996 | return 1; |
4997 | 4997 | ||
4998 | if ((old_bos && !udev->bos) || (!old_bos && udev->bos)) | 4998 | if ((old_bos && !udev->bos) || (!old_bos && udev->bos)) |
4999 | return 1; | 4999 | return 1; |
5000 | if (udev->bos) { | 5000 | if (udev->bos) { |
5001 | len = le16_to_cpu(udev->bos->desc->wTotalLength); | 5001 | len = le16_to_cpu(udev->bos->desc->wTotalLength); |
5002 | if (len != le16_to_cpu(old_bos->desc->wTotalLength)) | 5002 | if (len != le16_to_cpu(old_bos->desc->wTotalLength)) |
5003 | return 1; | 5003 | return 1; |
5004 | if (memcmp(udev->bos->desc, old_bos->desc, len)) | 5004 | if (memcmp(udev->bos->desc, old_bos->desc, len)) |
5005 | return 1; | 5005 | return 1; |
5006 | } | 5006 | } |
5007 | 5007 | ||
5008 | /* Since the idVendor, idProduct, and bcdDevice values in the | 5008 | /* Since the idVendor, idProduct, and bcdDevice values in the |
5009 | * device descriptor haven't changed, we will assume the | 5009 | * device descriptor haven't changed, we will assume the |
5010 | * Manufacturer and Product strings haven't changed either. | 5010 | * Manufacturer and Product strings haven't changed either. |
5011 | * But the SerialNumber string could be different (e.g., a | 5011 | * But the SerialNumber string could be different (e.g., a |
5012 | * different flash card of the same brand). | 5012 | * different flash card of the same brand). |
5013 | */ | 5013 | */ |
5014 | if (udev->serial) | 5014 | if (udev->serial) |
5015 | serial_len = strlen(udev->serial) + 1; | 5015 | serial_len = strlen(udev->serial) + 1; |
5016 | 5016 | ||
5017 | len = serial_len; | 5017 | len = serial_len; |
5018 | for (index = 0; index < udev->descriptor.bNumConfigurations; index++) { | 5018 | for (index = 0; index < udev->descriptor.bNumConfigurations; index++) { |
5019 | old_length = le16_to_cpu(udev->config[index].desc.wTotalLength); | 5019 | old_length = le16_to_cpu(udev->config[index].desc.wTotalLength); |
5020 | len = max(len, old_length); | 5020 | len = max(len, old_length); |
5021 | } | 5021 | } |
5022 | 5022 | ||
5023 | buf = kmalloc(len, GFP_NOIO); | 5023 | buf = kmalloc(len, GFP_NOIO); |
5024 | if (buf == NULL) { | 5024 | if (buf == NULL) { |
5025 | dev_err(&udev->dev, "no mem to re-read configs after reset\n"); | 5025 | dev_err(&udev->dev, "no mem to re-read configs after reset\n"); |
5026 | /* assume the worst */ | 5026 | /* assume the worst */ |
5027 | return 1; | 5027 | return 1; |
5028 | } | 5028 | } |
5029 | for (index = 0; index < udev->descriptor.bNumConfigurations; index++) { | 5029 | for (index = 0; index < udev->descriptor.bNumConfigurations; index++) { |
5030 | old_length = le16_to_cpu(udev->config[index].desc.wTotalLength); | 5030 | old_length = le16_to_cpu(udev->config[index].desc.wTotalLength); |
5031 | length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf, | 5031 | length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf, |
5032 | old_length); | 5032 | old_length); |
5033 | if (length != old_length) { | 5033 | if (length != old_length) { |
5034 | dev_dbg(&udev->dev, "config index %d, error %d\n", | 5034 | dev_dbg(&udev->dev, "config index %d, error %d\n", |
5035 | index, length); | 5035 | index, length); |
5036 | changed = 1; | 5036 | changed = 1; |
5037 | break; | 5037 | break; |
5038 | } | 5038 | } |
5039 | if (memcmp (buf, udev->rawdescriptors[index], old_length) | 5039 | if (memcmp (buf, udev->rawdescriptors[index], old_length) |
5040 | != 0) { | 5040 | != 0) { |
5041 | dev_dbg(&udev->dev, "config index %d changed (#%d)\n", | 5041 | dev_dbg(&udev->dev, "config index %d changed (#%d)\n", |
5042 | index, | 5042 | index, |
5043 | ((struct usb_config_descriptor *) buf)-> | 5043 | ((struct usb_config_descriptor *) buf)-> |
5044 | bConfigurationValue); | 5044 | bConfigurationValue); |
5045 | changed = 1; | 5045 | changed = 1; |
5046 | break; | 5046 | break; |
5047 | } | 5047 | } |
5048 | } | 5048 | } |
5049 | 5049 | ||
5050 | if (!changed && serial_len) { | 5050 | if (!changed && serial_len) { |
5051 | length = usb_string(udev, udev->descriptor.iSerialNumber, | 5051 | length = usb_string(udev, udev->descriptor.iSerialNumber, |
5052 | buf, serial_len); | 5052 | buf, serial_len); |
5053 | if (length + 1 != serial_len) { | 5053 | if (length + 1 != serial_len) { |
5054 | dev_dbg(&udev->dev, "serial string error %d\n", | 5054 | dev_dbg(&udev->dev, "serial string error %d\n", |
5055 | length); | 5055 | length); |
5056 | changed = 1; | 5056 | changed = 1; |
5057 | } else if (memcmp(buf, udev->serial, length) != 0) { | 5057 | } else if (memcmp(buf, udev->serial, length) != 0) { |
5058 | dev_dbg(&udev->dev, "serial string changed\n"); | 5058 | dev_dbg(&udev->dev, "serial string changed\n"); |
5059 | changed = 1; | 5059 | changed = 1; |
5060 | } | 5060 | } |
5061 | } | 5061 | } |
5062 | 5062 | ||
5063 | kfree(buf); | 5063 | kfree(buf); |
5064 | return changed; | 5064 | return changed; |
5065 | } | 5065 | } |
5066 | 5066 | ||
5067 | /** | 5067 | /** |
5068 | * usb_reset_and_verify_device - perform a USB port reset to reinitialize a device | 5068 | * usb_reset_and_verify_device - perform a USB port reset to reinitialize a device |
5069 | * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) | 5069 | * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) |
5070 | * | 5070 | * |
5071 | * WARNING - don't use this routine to reset a composite device | 5071 | * WARNING - don't use this routine to reset a composite device |
5072 | * (one with multiple interfaces owned by separate drivers)! | 5072 | * (one with multiple interfaces owned by separate drivers)! |
5073 | * Use usb_reset_device() instead. | 5073 | * Use usb_reset_device() instead. |
5074 | * | 5074 | * |
5075 | * Do a port reset, reassign the device's address, and establish its | 5075 | * Do a port reset, reassign the device's address, and establish its |
5076 | * former operating configuration. If the reset fails, or the device's | 5076 | * former operating configuration. If the reset fails, or the device's |
5077 | * descriptors change from their values before the reset, or the original | 5077 | * descriptors change from their values before the reset, or the original |
5078 | * configuration and altsettings cannot be restored, a flag will be set | 5078 | * configuration and altsettings cannot be restored, a flag will be set |
5079 | * telling khubd to pretend the device has been disconnected and then | 5079 | * telling khubd to pretend the device has been disconnected and then |
5080 | * re-connected. All drivers will be unbound, and the device will be | 5080 | * re-connected. All drivers will be unbound, and the device will be |
5081 | * re-enumerated and probed all over again. | 5081 | * re-enumerated and probed all over again. |
5082 | * | 5082 | * |
5083 | * Return: 0 if the reset succeeded, -ENODEV if the device has been | 5083 | * Return: 0 if the reset succeeded, -ENODEV if the device has been |
5084 | * flagged for logical disconnection, or some other negative error code | 5084 | * flagged for logical disconnection, or some other negative error code |
5085 | * if the reset wasn't even attempted. | 5085 | * if the reset wasn't even attempted. |
5086 | * | 5086 | * |
5087 | * Note: | 5087 | * Note: |
5088 | * The caller must own the device lock. For example, it's safe to use | 5088 | * The caller must own the device lock. For example, it's safe to use |
5089 | * this from a driver probe() routine after downloading new firmware. | 5089 | * this from a driver probe() routine after downloading new firmware. |
5090 | * For calls that might not occur during probe(), drivers should lock | 5090 | * For calls that might not occur during probe(), drivers should lock |
5091 | * the device using usb_lock_device_for_reset(). | 5091 | * the device using usb_lock_device_for_reset(). |
5092 | * | 5092 | * |
5093 | * Locking exception: This routine may also be called from within an | 5093 | * Locking exception: This routine may also be called from within an |
5094 | * autoresume handler. Such usage won't conflict with other tasks | 5094 | * autoresume handler. Such usage won't conflict with other tasks |
5095 | * holding the device lock because these tasks should always call | 5095 | * holding the device lock because these tasks should always call |
5096 | * usb_autopm_resume_device(), thereby preventing any unwanted autoresume. | 5096 | * usb_autopm_resume_device(), thereby preventing any unwanted autoresume. |
5097 | */ | 5097 | */ |
5098 | static int usb_reset_and_verify_device(struct usb_device *udev) | 5098 | static int usb_reset_and_verify_device(struct usb_device *udev) |
5099 | { | 5099 | { |
5100 | struct usb_device *parent_hdev = udev->parent; | 5100 | struct usb_device *parent_hdev = udev->parent; |
5101 | struct usb_hub *parent_hub; | 5101 | struct usb_hub *parent_hub; |
5102 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | 5102 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
5103 | struct usb_device_descriptor descriptor = udev->descriptor; | 5103 | struct usb_device_descriptor descriptor = udev->descriptor; |
5104 | struct usb_host_bos *bos; | 5104 | struct usb_host_bos *bos; |
5105 | int i, ret = 0; | 5105 | int i, ret = 0; |
5106 | int port1 = udev->portnum; | 5106 | int port1 = udev->portnum; |
5107 | 5107 | ||
5108 | if (udev->state == USB_STATE_NOTATTACHED || | 5108 | if (udev->state == USB_STATE_NOTATTACHED || |
5109 | udev->state == USB_STATE_SUSPENDED) { | 5109 | udev->state == USB_STATE_SUSPENDED) { |
5110 | dev_dbg(&udev->dev, "device reset not allowed in state %d\n", | 5110 | dev_dbg(&udev->dev, "device reset not allowed in state %d\n", |
5111 | udev->state); | 5111 | udev->state); |
5112 | return -EINVAL; | 5112 | return -EINVAL; |
5113 | } | 5113 | } |
5114 | 5114 | ||
5115 | if (!parent_hdev) { | 5115 | if (!parent_hdev) { |
5116 | /* this requires hcd-specific logic; see ohci_restart() */ | 5116 | /* this requires hcd-specific logic; see ohci_restart() */ |
5117 | dev_dbg(&udev->dev, "%s for root hub!\n", __func__); | 5117 | dev_dbg(&udev->dev, "%s for root hub!\n", __func__); |
5118 | return -EISDIR; | 5118 | return -EISDIR; |
5119 | } | 5119 | } |
5120 | parent_hub = usb_hub_to_struct_hub(parent_hdev); | 5120 | parent_hub = usb_hub_to_struct_hub(parent_hdev); |
5121 | 5121 | ||
5122 | /* Disable USB2 hardware LPM. | 5122 | /* Disable USB2 hardware LPM. |
5123 | * It will be re-enabled by the enumeration process. | 5123 | * It will be re-enabled by the enumeration process. |
5124 | */ | 5124 | */ |
5125 | if (udev->usb2_hw_lpm_enabled == 1) | 5125 | if (udev->usb2_hw_lpm_enabled == 1) |
5126 | usb_set_usb2_hardware_lpm(udev, 0); | 5126 | usb_set_usb2_hardware_lpm(udev, 0); |
5127 | 5127 | ||
5128 | bos = udev->bos; | 5128 | bos = udev->bos; |
5129 | udev->bos = NULL; | 5129 | udev->bos = NULL; |
5130 | 5130 | ||
5131 | /* Disable LPM and LTM while we reset the device and reinstall the alt | 5131 | /* Disable LPM and LTM while we reset the device and reinstall the alt |
5132 | * settings. Device-initiated LPM settings, and system exit latency | 5132 | * settings. Device-initiated LPM settings, and system exit latency |
5133 | * settings are cleared when the device is reset, so we have to set | 5133 | * settings are cleared when the device is reset, so we have to set |
5134 | * them up again. | 5134 | * them up again. |
5135 | */ | 5135 | */ |
5136 | ret = usb_unlocked_disable_lpm(udev); | 5136 | ret = usb_unlocked_disable_lpm(udev); |
5137 | if (ret) { | 5137 | if (ret) { |
5138 | dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__); | 5138 | dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__); |
5139 | goto re_enumerate; | 5139 | goto re_enumerate; |
5140 | } | 5140 | } |
5141 | ret = usb_disable_ltm(udev); | 5141 | ret = usb_disable_ltm(udev); |
5142 | if (ret) { | 5142 | if (ret) { |
5143 | dev_err(&udev->dev, "%s Failed to disable LTM\n.", | 5143 | dev_err(&udev->dev, "%s Failed to disable LTM\n.", |
5144 | __func__); | 5144 | __func__); |
5145 | goto re_enumerate; | 5145 | goto re_enumerate; |
5146 | } | 5146 | } |
5147 | 5147 | ||
5148 | set_bit(port1, parent_hub->busy_bits); | 5148 | set_bit(port1, parent_hub->busy_bits); |
5149 | for (i = 0; i < SET_CONFIG_TRIES; ++i) { | 5149 | for (i = 0; i < SET_CONFIG_TRIES; ++i) { |
5150 | 5150 | ||
5151 | /* ep0 maxpacket size may change; let the HCD know about it. | 5151 | /* ep0 maxpacket size may change; let the HCD know about it. |
5152 | * Other endpoints will be handled by re-enumeration. */ | 5152 | * Other endpoints will be handled by re-enumeration. */ |
5153 | usb_ep0_reinit(udev); | 5153 | usb_ep0_reinit(udev); |
5154 | ret = hub_port_init(parent_hub, udev, port1, i); | 5154 | ret = hub_port_init(parent_hub, udev, port1, i); |
5155 | if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV) | 5155 | if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV) |
5156 | break; | 5156 | break; |
5157 | } | 5157 | } |
5158 | clear_bit(port1, parent_hub->busy_bits); | 5158 | clear_bit(port1, parent_hub->busy_bits); |
5159 | 5159 | ||
5160 | if (ret < 0) | 5160 | if (ret < 0) |
5161 | goto re_enumerate; | 5161 | goto re_enumerate; |
5162 | 5162 | ||
5163 | /* Device might have changed firmware (DFU or similar) */ | 5163 | /* Device might have changed firmware (DFU or similar) */ |
5164 | if (descriptors_changed(udev, &descriptor, bos)) { | 5164 | if (descriptors_changed(udev, &descriptor, bos)) { |
5165 | dev_info(&udev->dev, "device firmware changed\n"); | 5165 | dev_info(&udev->dev, "device firmware changed\n"); |
5166 | udev->descriptor = descriptor; /* for disconnect() calls */ | 5166 | udev->descriptor = descriptor; /* for disconnect() calls */ |
5167 | goto re_enumerate; | 5167 | goto re_enumerate; |
5168 | } | 5168 | } |
5169 | 5169 | ||
5170 | /* Restore the device's previous configuration */ | 5170 | /* Restore the device's previous configuration */ |
5171 | if (!udev->actconfig) | 5171 | if (!udev->actconfig) |
5172 | goto done; | 5172 | goto done; |
5173 | 5173 | ||
5174 | mutex_lock(hcd->bandwidth_mutex); | 5174 | mutex_lock(hcd->bandwidth_mutex); |
5175 | ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL); | 5175 | ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL); |
5176 | if (ret < 0) { | 5176 | if (ret < 0) { |
5177 | dev_warn(&udev->dev, | 5177 | dev_warn(&udev->dev, |
5178 | "Busted HC? Not enough HCD resources for " | 5178 | "Busted HC? Not enough HCD resources for " |
5179 | "old configuration.\n"); | 5179 | "old configuration.\n"); |
5180 | mutex_unlock(hcd->bandwidth_mutex); | 5180 | mutex_unlock(hcd->bandwidth_mutex); |
5181 | goto re_enumerate; | 5181 | goto re_enumerate; |
5182 | } | 5182 | } |
5183 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 5183 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
5184 | USB_REQ_SET_CONFIGURATION, 0, | 5184 | USB_REQ_SET_CONFIGURATION, 0, |
5185 | udev->actconfig->desc.bConfigurationValue, 0, | 5185 | udev->actconfig->desc.bConfigurationValue, 0, |
5186 | NULL, 0, USB_CTRL_SET_TIMEOUT); | 5186 | NULL, 0, USB_CTRL_SET_TIMEOUT); |
5187 | if (ret < 0) { | 5187 | if (ret < 0) { |
5188 | dev_err(&udev->dev, | 5188 | dev_err(&udev->dev, |
5189 | "can't restore configuration #%d (error=%d)\n", | 5189 | "can't restore configuration #%d (error=%d)\n", |
5190 | udev->actconfig->desc.bConfigurationValue, ret); | 5190 | udev->actconfig->desc.bConfigurationValue, ret); |
5191 | mutex_unlock(hcd->bandwidth_mutex); | 5191 | mutex_unlock(hcd->bandwidth_mutex); |
5192 | goto re_enumerate; | 5192 | goto re_enumerate; |
5193 | } | 5193 | } |
5194 | mutex_unlock(hcd->bandwidth_mutex); | 5194 | mutex_unlock(hcd->bandwidth_mutex); |
5195 | usb_set_device_state(udev, USB_STATE_CONFIGURED); | 5195 | usb_set_device_state(udev, USB_STATE_CONFIGURED); |
5196 | 5196 | ||
5197 | /* Put interfaces back into the same altsettings as before. | 5197 | /* Put interfaces back into the same altsettings as before. |
5198 | * Don't bother to send the Set-Interface request for interfaces | 5198 | * Don't bother to send the Set-Interface request for interfaces |
5199 | * that were already in altsetting 0; besides being unnecessary, | 5199 | * that were already in altsetting 0; besides being unnecessary, |
5200 | * many devices can't handle it. Instead just reset the host-side | 5200 | * many devices can't handle it. Instead just reset the host-side |
5201 | * endpoint state. | 5201 | * endpoint state. |
5202 | */ | 5202 | */ |
5203 | for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { | 5203 | for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { |
5204 | struct usb_host_config *config = udev->actconfig; | 5204 | struct usb_host_config *config = udev->actconfig; |
5205 | struct usb_interface *intf = config->interface[i]; | 5205 | struct usb_interface *intf = config->interface[i]; |
5206 | struct usb_interface_descriptor *desc; | 5206 | struct usb_interface_descriptor *desc; |
5207 | 5207 | ||
5208 | desc = &intf->cur_altsetting->desc; | 5208 | desc = &intf->cur_altsetting->desc; |
5209 | if (desc->bAlternateSetting == 0) { | 5209 | if (desc->bAlternateSetting == 0) { |
5210 | usb_disable_interface(udev, intf, true); | 5210 | usb_disable_interface(udev, intf, true); |
5211 | usb_enable_interface(udev, intf, true); | 5211 | usb_enable_interface(udev, intf, true); |
5212 | ret = 0; | 5212 | ret = 0; |
5213 | } else { | 5213 | } else { |
5214 | /* Let the bandwidth allocation function know that this | 5214 | /* Let the bandwidth allocation function know that this |
5215 | * device has been reset, and it will have to use | 5215 | * device has been reset, and it will have to use |
5216 | * alternate setting 0 as the current alternate setting. | 5216 | * alternate setting 0 as the current alternate setting. |
5217 | */ | 5217 | */ |
5218 | intf->resetting_device = 1; | 5218 | intf->resetting_device = 1; |
5219 | ret = usb_set_interface(udev, desc->bInterfaceNumber, | 5219 | ret = usb_set_interface(udev, desc->bInterfaceNumber, |
5220 | desc->bAlternateSetting); | 5220 | desc->bAlternateSetting); |
5221 | intf->resetting_device = 0; | 5221 | intf->resetting_device = 0; |
5222 | } | 5222 | } |
5223 | if (ret < 0) { | 5223 | if (ret < 0) { |
5224 | dev_err(&udev->dev, "failed to restore interface %d " | 5224 | dev_err(&udev->dev, "failed to restore interface %d " |
5225 | "altsetting %d (error=%d)\n", | 5225 | "altsetting %d (error=%d)\n", |
5226 | desc->bInterfaceNumber, | 5226 | desc->bInterfaceNumber, |
5227 | desc->bAlternateSetting, | 5227 | desc->bAlternateSetting, |
5228 | ret); | 5228 | ret); |
5229 | goto re_enumerate; | 5229 | goto re_enumerate; |
5230 | } | 5230 | } |
5231 | } | 5231 | } |
5232 | 5232 | ||
5233 | done: | 5233 | done: |
5234 | /* Now that the alt settings are re-installed, enable LTM and LPM. */ | 5234 | /* Now that the alt settings are re-installed, enable LTM and LPM. */ |
5235 | usb_set_usb2_hardware_lpm(udev, 1); | 5235 | usb_set_usb2_hardware_lpm(udev, 1); |
5236 | usb_unlocked_enable_lpm(udev); | 5236 | usb_unlocked_enable_lpm(udev); |
5237 | usb_enable_ltm(udev); | 5237 | usb_enable_ltm(udev); |
5238 | usb_release_bos_descriptor(udev); | 5238 | usb_release_bos_descriptor(udev); |
5239 | udev->bos = bos; | 5239 | udev->bos = bos; |
5240 | return 0; | 5240 | return 0; |
5241 | 5241 | ||
5242 | re_enumerate: | 5242 | re_enumerate: |
5243 | /* LPM state doesn't matter when we're about to destroy the device. */ | 5243 | /* LPM state doesn't matter when we're about to destroy the device. */ |
5244 | hub_port_logical_disconnect(parent_hub, port1); | 5244 | hub_port_logical_disconnect(parent_hub, port1); |
5245 | usb_release_bos_descriptor(udev); | 5245 | usb_release_bos_descriptor(udev); |
5246 | udev->bos = bos; | 5246 | udev->bos = bos; |
5247 | return -ENODEV; | 5247 | return -ENODEV; |
5248 | } | 5248 | } |
5249 | 5249 | ||
5250 | /** | 5250 | /** |
5251 | * usb_reset_device - warn interface drivers and perform a USB port reset | 5251 | * usb_reset_device - warn interface drivers and perform a USB port reset |
5252 | * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) | 5252 | * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) |
5253 | * | 5253 | * |
5254 | * Warns all drivers bound to registered interfaces (using their pre_reset | 5254 | * Warns all drivers bound to registered interfaces (using their pre_reset |
5255 | * method), performs the port reset, and then lets the drivers know that | 5255 | * method), performs the port reset, and then lets the drivers know that |
5256 | * the reset is over (using their post_reset method). | 5256 | * the reset is over (using their post_reset method). |
5257 | * | 5257 | * |
5258 | * Return: The same as for usb_reset_and_verify_device(). | 5258 | * Return: The same as for usb_reset_and_verify_device(). |
5259 | * | 5259 | * |
5260 | * Note: | 5260 | * Note: |
5261 | * The caller must own the device lock. For example, it's safe to use | 5261 | * The caller must own the device lock. For example, it's safe to use |
5262 | * this from a driver probe() routine after downloading new firmware. | 5262 | * this from a driver probe() routine after downloading new firmware. |
5263 | * For calls that might not occur during probe(), drivers should lock | 5263 | * For calls that might not occur during probe(), drivers should lock |
5264 | * the device using usb_lock_device_for_reset(). | 5264 | * the device using usb_lock_device_for_reset(). |
5265 | * | 5265 | * |
5266 | * If an interface is currently being probed or disconnected, we assume | 5266 | * If an interface is currently being probed or disconnected, we assume |
5267 | * its driver knows how to handle resets. For all other interfaces, | 5267 | * its driver knows how to handle resets. For all other interfaces, |
5268 | * if the driver doesn't have pre_reset and post_reset methods then | 5268 | * if the driver doesn't have pre_reset and post_reset methods then |
5269 | * we attempt to unbind it and rebind afterward. | 5269 | * we attempt to unbind it and rebind afterward. |
5270 | */ | 5270 | */ |
5271 | int usb_reset_device(struct usb_device *udev) | 5271 | int usb_reset_device(struct usb_device *udev) |
5272 | { | 5272 | { |
5273 | int ret; | 5273 | int ret; |
5274 | int i; | 5274 | int i; |
5275 | unsigned int noio_flag; | 5275 | unsigned int noio_flag; |
5276 | struct usb_host_config *config = udev->actconfig; | 5276 | struct usb_host_config *config = udev->actconfig; |
5277 | 5277 | ||
5278 | if (udev->state == USB_STATE_NOTATTACHED || | 5278 | if (udev->state == USB_STATE_NOTATTACHED || |
5279 | udev->state == USB_STATE_SUSPENDED) { | 5279 | udev->state == USB_STATE_SUSPENDED) { |
5280 | dev_dbg(&udev->dev, "device reset not allowed in state %d\n", | 5280 | dev_dbg(&udev->dev, "device reset not allowed in state %d\n", |
5281 | udev->state); | 5281 | udev->state); |
5282 | return -EINVAL; | 5282 | return -EINVAL; |
5283 | } | 5283 | } |
5284 | 5284 | ||
5285 | /* | 5285 | /* |
5286 | * Don't allocate memory with GFP_KERNEL in current | 5286 | * Don't allocate memory with GFP_KERNEL in current |
5287 | * context to avoid possible deadlock if usb mass | 5287 | * context to avoid possible deadlock if usb mass |
5288 | * storage interface or usbnet interface(iSCSI case) | 5288 | * storage interface or usbnet interface(iSCSI case) |
5289 | * is included in current configuration. The easist | 5289 | * is included in current configuration. The easist |
5290 | * approach is to do it for every device reset, | 5290 | * approach is to do it for every device reset, |
5291 | * because the device 'memalloc_noio' flag may have | 5291 | * because the device 'memalloc_noio' flag may have |
5292 | * not been set before reseting the usb device. | 5292 | * not been set before reseting the usb device. |
5293 | */ | 5293 | */ |
5294 | noio_flag = memalloc_noio_save(); | 5294 | noio_flag = memalloc_noio_save(); |
5295 | 5295 | ||
5296 | /* Prevent autosuspend during the reset */ | 5296 | /* Prevent autosuspend during the reset */ |
5297 | usb_autoresume_device(udev); | 5297 | usb_autoresume_device(udev); |
5298 | 5298 | ||
5299 | if (config) { | 5299 | if (config) { |
5300 | for (i = 0; i < config->desc.bNumInterfaces; ++i) { | 5300 | for (i = 0; i < config->desc.bNumInterfaces; ++i) { |
5301 | struct usb_interface *cintf = config->interface[i]; | 5301 | struct usb_interface *cintf = config->interface[i]; |
5302 | struct usb_driver *drv; | 5302 | struct usb_driver *drv; |
5303 | int unbind = 0; | 5303 | int unbind = 0; |
5304 | 5304 | ||
5305 | if (cintf->dev.driver) { | 5305 | if (cintf->dev.driver) { |
5306 | drv = to_usb_driver(cintf->dev.driver); | 5306 | drv = to_usb_driver(cintf->dev.driver); |
5307 | if (drv->pre_reset && drv->post_reset) | 5307 | if (drv->pre_reset && drv->post_reset) |
5308 | unbind = (drv->pre_reset)(cintf); | 5308 | unbind = (drv->pre_reset)(cintf); |
5309 | else if (cintf->condition == | 5309 | else if (cintf->condition == |
5310 | USB_INTERFACE_BOUND) | 5310 | USB_INTERFACE_BOUND) |
5311 | unbind = 1; | 5311 | unbind = 1; |
5312 | if (unbind) | 5312 | if (unbind) |
5313 | usb_forced_unbind_intf(cintf); | 5313 | usb_forced_unbind_intf(cintf); |
5314 | } | 5314 | } |
5315 | } | 5315 | } |
5316 | } | 5316 | } |
5317 | 5317 | ||
5318 | ret = usb_reset_and_verify_device(udev); | 5318 | ret = usb_reset_and_verify_device(udev); |
5319 | 5319 | ||
5320 | if (config) { | 5320 | if (config) { |
5321 | for (i = config->desc.bNumInterfaces - 1; i >= 0; --i) { | 5321 | for (i = config->desc.bNumInterfaces - 1; i >= 0; --i) { |
5322 | struct usb_interface *cintf = config->interface[i]; | 5322 | struct usb_interface *cintf = config->interface[i]; |
5323 | struct usb_driver *drv; | 5323 | struct usb_driver *drv; |
5324 | int rebind = cintf->needs_binding; | 5324 | int rebind = cintf->needs_binding; |
5325 | 5325 | ||
5326 | if (!rebind && cintf->dev.driver) { | 5326 | if (!rebind && cintf->dev.driver) { |
5327 | drv = to_usb_driver(cintf->dev.driver); | 5327 | drv = to_usb_driver(cintf->dev.driver); |
5328 | if (drv->post_reset) | 5328 | if (drv->post_reset) |
5329 | rebind = (drv->post_reset)(cintf); | 5329 | rebind = (drv->post_reset)(cintf); |
5330 | else if (cintf->condition == | 5330 | else if (cintf->condition == |
5331 | USB_INTERFACE_BOUND) | 5331 | USB_INTERFACE_BOUND) |
5332 | rebind = 1; | 5332 | rebind = 1; |
5333 | } | 5333 | } |
5334 | if (ret == 0 && rebind) | 5334 | if (ret == 0 && rebind) |
5335 | usb_rebind_intf(cintf); | 5335 | usb_rebind_intf(cintf); |
5336 | } | 5336 | } |
5337 | } | 5337 | } |
5338 | 5338 | ||
5339 | usb_autosuspend_device(udev); | 5339 | usb_autosuspend_device(udev); |
5340 | memalloc_noio_restore(noio_flag); | 5340 | memalloc_noio_restore(noio_flag); |
5341 | return ret; | 5341 | return ret; |
5342 | } | 5342 | } |
5343 | EXPORT_SYMBOL_GPL(usb_reset_device); | 5343 | EXPORT_SYMBOL_GPL(usb_reset_device); |
5344 | 5344 | ||
5345 | 5345 | ||
5346 | /** | 5346 | /** |
5347 | * usb_queue_reset_device - Reset a USB device from an atomic context | 5347 | * usb_queue_reset_device - Reset a USB device from an atomic context |
5348 | * @iface: USB interface belonging to the device to reset | 5348 | * @iface: USB interface belonging to the device to reset |
5349 | * | 5349 | * |
5350 | * This function can be used to reset a USB device from an atomic | 5350 | * This function can be used to reset a USB device from an atomic |
5351 | * context, where usb_reset_device() won't work (as it blocks). | 5351 | * context, where usb_reset_device() won't work (as it blocks). |
5352 | * | 5352 | * |
5353 | * Doing a reset via this method is functionally equivalent to calling | 5353 | * Doing a reset via this method is functionally equivalent to calling |
5354 | * usb_reset_device(), except for the fact that it is delayed to a | 5354 | * usb_reset_device(), except for the fact that it is delayed to a |
5355 | * workqueue. This means that any drivers bound to other interfaces | 5355 | * workqueue. This means that any drivers bound to other interfaces |
5356 | * might be unbound, as well as users from usbfs in user space. | 5356 | * might be unbound, as well as users from usbfs in user space. |
5357 | * | 5357 | * |
5358 | * Corner cases: | 5358 | * Corner cases: |
5359 | * | 5359 | * |
5360 | * - Scheduling two resets at the same time from two different drivers | 5360 | * - Scheduling two resets at the same time from two different drivers |
5361 | * attached to two different interfaces of the same device is | 5361 | * attached to two different interfaces of the same device is |
5362 | * possible; depending on how the driver attached to each interface | 5362 | * possible; depending on how the driver attached to each interface |
5363 | * handles ->pre_reset(), the second reset might happen or not. | 5363 | * handles ->pre_reset(), the second reset might happen or not. |
5364 | * | 5364 | * |
5365 | * - If a driver is unbound and it had a pending reset, the reset will | 5365 | * - If a driver is unbound and it had a pending reset, the reset will |
5366 | * be cancelled. | 5366 | * be cancelled. |
5367 | * | 5367 | * |
5368 | * - This function can be called during .probe() or .disconnect() | 5368 | * - This function can be called during .probe() or .disconnect() |
5369 | * times. On return from .disconnect(), any pending resets will be | 5369 | * times. On return from .disconnect(), any pending resets will be |
5370 | * cancelled. | 5370 | * cancelled. |
5371 | * | 5371 | * |
5372 | * There is no no need to lock/unlock the @reset_ws as schedule_work() | 5372 | * There is no no need to lock/unlock the @reset_ws as schedule_work() |
5373 | * does its own. | 5373 | * does its own. |
5374 | * | 5374 | * |
5375 | * NOTE: We don't do any reference count tracking because it is not | 5375 | * NOTE: We don't do any reference count tracking because it is not |
5376 | * needed. The lifecycle of the work_struct is tied to the | 5376 | * needed. The lifecycle of the work_struct is tied to the |
5377 | * usb_interface. Before destroying the interface we cancel the | 5377 | * usb_interface. Before destroying the interface we cancel the |
5378 | * work_struct, so the fact that work_struct is queued and or | 5378 | * work_struct, so the fact that work_struct is queued and or |
5379 | * running means the interface (and thus, the device) exist and | 5379 | * running means the interface (and thus, the device) exist and |
5380 | * are referenced. | 5380 | * are referenced. |
5381 | */ | 5381 | */ |
5382 | void usb_queue_reset_device(struct usb_interface *iface) | 5382 | void usb_queue_reset_device(struct usb_interface *iface) |
5383 | { | 5383 | { |
5384 | schedule_work(&iface->reset_ws); | 5384 | schedule_work(&iface->reset_ws); |
5385 | } | 5385 | } |
5386 | EXPORT_SYMBOL_GPL(usb_queue_reset_device); | 5386 | EXPORT_SYMBOL_GPL(usb_queue_reset_device); |
5387 | 5387 | ||
5388 | /** | 5388 | /** |
5389 | * usb_hub_find_child - Get the pointer of child device | 5389 | * usb_hub_find_child - Get the pointer of child device |
5390 | * attached to the port which is specified by @port1. | 5390 | * attached to the port which is specified by @port1. |
5391 | * @hdev: USB device belonging to the usb hub | 5391 | * @hdev: USB device belonging to the usb hub |
5392 | * @port1: port num to indicate which port the child device | 5392 | * @port1: port num to indicate which port the child device |
5393 | * is attached to. | 5393 | * is attached to. |
5394 | * | 5394 | * |
5395 | * USB drivers call this function to get hub's child device | 5395 | * USB drivers call this function to get hub's child device |
5396 | * pointer. | 5396 | * pointer. |
5397 | * | 5397 | * |
5398 | * Return: %NULL if input param is invalid and | 5398 | * Return: %NULL if input param is invalid and |
5399 | * child's usb_device pointer if non-NULL. | 5399 | * child's usb_device pointer if non-NULL. |
5400 | */ | 5400 | */ |
5401 | struct usb_device *usb_hub_find_child(struct usb_device *hdev, | 5401 | struct usb_device *usb_hub_find_child(struct usb_device *hdev, |
5402 | int port1) | 5402 | int port1) |
5403 | { | 5403 | { |
5404 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); | 5404 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); |
5405 | 5405 | ||
5406 | if (port1 < 1 || port1 > hdev->maxchild) | 5406 | if (port1 < 1 || port1 > hdev->maxchild) |
5407 | return NULL; | 5407 | return NULL; |
5408 | return hub->ports[port1 - 1]->child; | 5408 | return hub->ports[port1 - 1]->child; |
5409 | } | 5409 | } |
5410 | EXPORT_SYMBOL_GPL(usb_hub_find_child); | 5410 | EXPORT_SYMBOL_GPL(usb_hub_find_child); |
5411 | 5411 | ||
5412 | /** | 5412 | /** |
5413 | * usb_set_hub_port_connect_type - set hub port connect type. | 5413 | * usb_set_hub_port_connect_type - set hub port connect type. |
5414 | * @hdev: USB device belonging to the usb hub | 5414 | * @hdev: USB device belonging to the usb hub |
5415 | * @port1: port num of the port | 5415 | * @port1: port num of the port |
5416 | * @type: connect type of the port | 5416 | * @type: connect type of the port |
5417 | */ | 5417 | */ |
5418 | void usb_set_hub_port_connect_type(struct usb_device *hdev, int port1, | 5418 | void usb_set_hub_port_connect_type(struct usb_device *hdev, int port1, |
5419 | enum usb_port_connect_type type) | 5419 | enum usb_port_connect_type type) |
5420 | { | 5420 | { |
5421 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); | 5421 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); |
5422 | 5422 | ||
5423 | if (hub) | 5423 | if (hub) |
5424 | hub->ports[port1 - 1]->connect_type = type; | 5424 | hub->ports[port1 - 1]->connect_type = type; |
5425 | } | 5425 | } |
5426 | 5426 | ||
5427 | /** | 5427 | /** |
5428 | * usb_get_hub_port_connect_type - Get the port's connect type | 5428 | * usb_get_hub_port_connect_type - Get the port's connect type |
5429 | * @hdev: USB device belonging to the usb hub | 5429 | * @hdev: USB device belonging to the usb hub |
5430 | * @port1: port num of the port | 5430 | * @port1: port num of the port |
5431 | * | 5431 | * |
5432 | * Return: The connect type of the port if successful. Or | 5432 | * Return: The connect type of the port if successful. Or |
5433 | * USB_PORT_CONNECT_TYPE_UNKNOWN if input params are invalid. | 5433 | * USB_PORT_CONNECT_TYPE_UNKNOWN if input params are invalid. |
5434 | */ | 5434 | */ |
5435 | enum usb_port_connect_type | 5435 | enum usb_port_connect_type |
5436 | usb_get_hub_port_connect_type(struct usb_device *hdev, int port1) | 5436 | usb_get_hub_port_connect_type(struct usb_device *hdev, int port1) |
5437 | { | 5437 | { |
5438 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); | 5438 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); |
5439 | 5439 | ||
5440 | if (!hub) | 5440 | if (!hub) |
5441 | return USB_PORT_CONNECT_TYPE_UNKNOWN; | 5441 | return USB_PORT_CONNECT_TYPE_UNKNOWN; |
5442 | 5442 | ||
5443 | return hub->ports[port1 - 1]->connect_type; | 5443 | return hub->ports[port1 - 1]->connect_type; |
5444 | } | 5444 | } |
5445 | 5445 | ||
5446 | void usb_hub_adjust_deviceremovable(struct usb_device *hdev, | 5446 | void usb_hub_adjust_deviceremovable(struct usb_device *hdev, |
5447 | struct usb_hub_descriptor *desc) | 5447 | struct usb_hub_descriptor *desc) |
5448 | { | 5448 | { |
5449 | enum usb_port_connect_type connect_type; | 5449 | enum usb_port_connect_type connect_type; |
5450 | int i; | 5450 | int i; |
5451 | 5451 | ||
5452 | if (!hub_is_superspeed(hdev)) { | 5452 | if (!hub_is_superspeed(hdev)) { |
5453 | for (i = 1; i <= hdev->maxchild; i++) { | 5453 | for (i = 1; i <= hdev->maxchild; i++) { |
5454 | connect_type = usb_get_hub_port_connect_type(hdev, i); | 5454 | connect_type = usb_get_hub_port_connect_type(hdev, i); |
5455 | 5455 | ||
5456 | if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { | 5456 | if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { |
5457 | u8 mask = 1 << (i%8); | 5457 | u8 mask = 1 << (i%8); |
5458 | 5458 | ||
5459 | if (!(desc->u.hs.DeviceRemovable[i/8] & mask)) { | 5459 | if (!(desc->u.hs.DeviceRemovable[i/8] & mask)) { |
5460 | dev_dbg(&hdev->dev, "usb port%d's DeviceRemovable is changed to 1 according to platform information.\n", | 5460 | dev_dbg(&hdev->dev, "usb port%d's DeviceRemovable is changed to 1 according to platform information.\n", |
5461 | i); | 5461 | i); |
5462 | desc->u.hs.DeviceRemovable[i/8] |= mask; | 5462 | desc->u.hs.DeviceRemovable[i/8] |= mask; |
5463 | } | 5463 | } |
5464 | } | 5464 | } |
5465 | } | 5465 | } |
5466 | } else { | 5466 | } else { |
5467 | u16 port_removable = le16_to_cpu(desc->u.ss.DeviceRemovable); | 5467 | u16 port_removable = le16_to_cpu(desc->u.ss.DeviceRemovable); |
5468 | 5468 | ||
5469 | for (i = 1; i <= hdev->maxchild; i++) { | 5469 | for (i = 1; i <= hdev->maxchild; i++) { |
5470 | connect_type = usb_get_hub_port_connect_type(hdev, i); | 5470 | connect_type = usb_get_hub_port_connect_type(hdev, i); |
5471 | 5471 | ||
5472 | if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { | 5472 | if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { |
5473 | u16 mask = 1 << i; | 5473 | u16 mask = 1 << i; |
5474 | 5474 | ||
5475 | if (!(port_removable & mask)) { | 5475 | if (!(port_removable & mask)) { |
5476 | dev_dbg(&hdev->dev, "usb port%d's DeviceRemovable is changed to 1 according to platform information.\n", | 5476 | dev_dbg(&hdev->dev, "usb port%d's DeviceRemovable is changed to 1 according to platform information.\n", |
5477 | i); | 5477 | i); |
5478 | port_removable |= mask; | 5478 | port_removable |= mask; |
5479 | } | 5479 | } |
5480 | } | 5480 | } |
5481 | } | 5481 | } |
5482 | 5482 | ||
5483 | desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable); | 5483 | desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable); |
5484 | } | 5484 | } |
5485 | } | 5485 | } |
5486 | 5486 | ||
5487 | #ifdef CONFIG_ACPI | 5487 | #ifdef CONFIG_ACPI |
5488 | /** | 5488 | /** |
5489 | * usb_get_hub_port_acpi_handle - Get the usb port's acpi handle | 5489 | * usb_get_hub_port_acpi_handle - Get the usb port's acpi handle |
5490 | * @hdev: USB device belonging to the usb hub | 5490 | * @hdev: USB device belonging to the usb hub |
5491 | * @port1: port num of the port | 5491 | * @port1: port num of the port |
5492 | * | 5492 | * |
5493 | * Return: Port's acpi handle if successful, %NULL if params are | 5493 | * Return: Port's acpi handle if successful, %NULL if params are |
5494 | * invalid. | 5494 | * invalid. |
5495 | */ | 5495 | */ |
5496 | acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev, | 5496 | acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev, |
5497 | int port1) | 5497 | int port1) |
5498 | { | 5498 | { |
5499 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); | 5499 | struct usb_hub *hub = usb_hub_to_struct_hub(hdev); |
5500 | 5500 | ||
5501 | if (!hub) | 5501 | if (!hub) |
5502 | return NULL; | 5502 | return NULL; |
5503 | 5503 | ||
5504 | return DEVICE_ACPI_HANDLE(&hub->ports[port1 - 1]->dev); | 5504 | return ACPI_HANDLE(&hub->ports[port1 - 1]->dev); |
5505 | } | 5505 | } |
5506 | #endif | 5506 | #endif |
5507 | 5507 |
drivers/usb/core/usb-acpi.c
1 | /* | 1 | /* |
2 | * USB-ACPI glue code | 2 | * USB-ACPI glue code |
3 | * | 3 | * |
4 | * Copyright 2012 Red Hat <mjg@redhat.com> | 4 | * Copyright 2012 Red Hat <mjg@redhat.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation, version 2. | 8 | * Software Foundation, version 2. |
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/usb.h> | 12 | #include <linux/usb.h> |
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/acpi.h> | 16 | #include <linux/acpi.h> |
17 | #include <linux/pci.h> | 17 | #include <linux/pci.h> |
18 | #include <linux/usb/hcd.h> | 18 | #include <linux/usb/hcd.h> |
19 | #include <acpi/acpi_bus.h> | 19 | #include <acpi/acpi_bus.h> |
20 | 20 | ||
21 | #include "usb.h" | 21 | #include "usb.h" |
22 | 22 | ||
23 | /** | 23 | /** |
24 | * usb_acpi_power_manageable - check whether usb port has | 24 | * usb_acpi_power_manageable - check whether usb port has |
25 | * acpi power resource. | 25 | * acpi power resource. |
26 | * @hdev: USB device belonging to the usb hub | 26 | * @hdev: USB device belonging to the usb hub |
27 | * @index: port index based zero | 27 | * @index: port index based zero |
28 | * | 28 | * |
29 | * Return true if the port has acpi power resource and false if no. | 29 | * Return true if the port has acpi power resource and false if no. |
30 | */ | 30 | */ |
31 | bool usb_acpi_power_manageable(struct usb_device *hdev, int index) | 31 | bool usb_acpi_power_manageable(struct usb_device *hdev, int index) |
32 | { | 32 | { |
33 | acpi_handle port_handle; | 33 | acpi_handle port_handle; |
34 | int port1 = index + 1; | 34 | int port1 = index + 1; |
35 | 35 | ||
36 | port_handle = usb_get_hub_port_acpi_handle(hdev, | 36 | port_handle = usb_get_hub_port_acpi_handle(hdev, |
37 | port1); | 37 | port1); |
38 | if (port_handle) | 38 | if (port_handle) |
39 | return acpi_bus_power_manageable(port_handle); | 39 | return acpi_bus_power_manageable(port_handle); |
40 | else | 40 | else |
41 | return false; | 41 | return false; |
42 | } | 42 | } |
43 | EXPORT_SYMBOL_GPL(usb_acpi_power_manageable); | 43 | EXPORT_SYMBOL_GPL(usb_acpi_power_manageable); |
44 | 44 | ||
45 | /** | 45 | /** |
46 | * usb_acpi_set_power_state - control usb port's power via acpi power | 46 | * usb_acpi_set_power_state - control usb port's power via acpi power |
47 | * resource | 47 | * resource |
48 | * @hdev: USB device belonging to the usb hub | 48 | * @hdev: USB device belonging to the usb hub |
49 | * @index: port index based zero | 49 | * @index: port index based zero |
50 | * @enable: power state expected to be set | 50 | * @enable: power state expected to be set |
51 | * | 51 | * |
52 | * Notice to use usb_acpi_power_manageable() to check whether the usb port | 52 | * Notice to use usb_acpi_power_manageable() to check whether the usb port |
53 | * has acpi power resource before invoking this function. | 53 | * has acpi power resource before invoking this function. |
54 | * | 54 | * |
55 | * Returns 0 on success, else negative errno. | 55 | * Returns 0 on success, else negative errno. |
56 | */ | 56 | */ |
57 | int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable) | 57 | int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable) |
58 | { | 58 | { |
59 | acpi_handle port_handle; | 59 | acpi_handle port_handle; |
60 | unsigned char state; | 60 | unsigned char state; |
61 | int port1 = index + 1; | 61 | int port1 = index + 1; |
62 | int error = -EINVAL; | 62 | int error = -EINVAL; |
63 | 63 | ||
64 | port_handle = (acpi_handle)usb_get_hub_port_acpi_handle(hdev, | 64 | port_handle = (acpi_handle)usb_get_hub_port_acpi_handle(hdev, |
65 | port1); | 65 | port1); |
66 | if (!port_handle) | 66 | if (!port_handle) |
67 | return error; | 67 | return error; |
68 | 68 | ||
69 | if (enable) | 69 | if (enable) |
70 | state = ACPI_STATE_D0; | 70 | state = ACPI_STATE_D0; |
71 | else | 71 | else |
72 | state = ACPI_STATE_D3_COLD; | 72 | state = ACPI_STATE_D3_COLD; |
73 | 73 | ||
74 | error = acpi_bus_set_power(port_handle, state); | 74 | error = acpi_bus_set_power(port_handle, state); |
75 | if (!error) | 75 | if (!error) |
76 | dev_dbg(&hdev->dev, "The power of hub port %d was set to %d\n", | 76 | dev_dbg(&hdev->dev, "The power of hub port %d was set to %d\n", |
77 | port1, enable); | 77 | port1, enable); |
78 | else | 78 | else |
79 | dev_dbg(&hdev->dev, "The power of hub port failed to be set\n"); | 79 | dev_dbg(&hdev->dev, "The power of hub port failed to be set\n"); |
80 | 80 | ||
81 | return error; | 81 | return error; |
82 | } | 82 | } |
83 | EXPORT_SYMBOL_GPL(usb_acpi_set_power_state); | 83 | EXPORT_SYMBOL_GPL(usb_acpi_set_power_state); |
84 | 84 | ||
85 | static int usb_acpi_check_port_connect_type(struct usb_device *hdev, | 85 | static int usb_acpi_check_port_connect_type(struct usb_device *hdev, |
86 | acpi_handle handle, int port1) | 86 | acpi_handle handle, int port1) |
87 | { | 87 | { |
88 | acpi_status status; | 88 | acpi_status status; |
89 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 89 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
90 | union acpi_object *upc; | 90 | union acpi_object *upc; |
91 | struct acpi_pld_info *pld; | 91 | struct acpi_pld_info *pld; |
92 | int ret = 0; | 92 | int ret = 0; |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * Accoding to ACPI Spec 9.13. PLD indicates whether usb port is | 95 | * Accoding to ACPI Spec 9.13. PLD indicates whether usb port is |
96 | * user visible and _UPC indicates whether it is connectable. If | 96 | * user visible and _UPC indicates whether it is connectable. If |
97 | * the port was visible and connectable, it could be freely connected | 97 | * the port was visible and connectable, it could be freely connected |
98 | * and disconnected with USB devices. If no visible and connectable, | 98 | * and disconnected with USB devices. If no visible and connectable, |
99 | * a usb device is directly hard-wired to the port. If no visible and | 99 | * a usb device is directly hard-wired to the port. If no visible and |
100 | * no connectable, the port would be not used. | 100 | * no connectable, the port would be not used. |
101 | */ | 101 | */ |
102 | status = acpi_get_physical_device_location(handle, &pld); | 102 | status = acpi_get_physical_device_location(handle, &pld); |
103 | if (ACPI_FAILURE(status)) | 103 | if (ACPI_FAILURE(status)) |
104 | return -ENODEV; | 104 | return -ENODEV; |
105 | 105 | ||
106 | status = acpi_evaluate_object(handle, "_UPC", NULL, &buffer); | 106 | status = acpi_evaluate_object(handle, "_UPC", NULL, &buffer); |
107 | upc = buffer.pointer; | 107 | upc = buffer.pointer; |
108 | if (!upc || (upc->type != ACPI_TYPE_PACKAGE) | 108 | if (!upc || (upc->type != ACPI_TYPE_PACKAGE) |
109 | || upc->package.count != 4) { | 109 | || upc->package.count != 4) { |
110 | ret = -EINVAL; | 110 | ret = -EINVAL; |
111 | goto out; | 111 | goto out; |
112 | } | 112 | } |
113 | 113 | ||
114 | if (upc->package.elements[0].integer.value) | 114 | if (upc->package.elements[0].integer.value) |
115 | if (pld->user_visible) | 115 | if (pld->user_visible) |
116 | usb_set_hub_port_connect_type(hdev, port1, | 116 | usb_set_hub_port_connect_type(hdev, port1, |
117 | USB_PORT_CONNECT_TYPE_HOT_PLUG); | 117 | USB_PORT_CONNECT_TYPE_HOT_PLUG); |
118 | else | 118 | else |
119 | usb_set_hub_port_connect_type(hdev, port1, | 119 | usb_set_hub_port_connect_type(hdev, port1, |
120 | USB_PORT_CONNECT_TYPE_HARD_WIRED); | 120 | USB_PORT_CONNECT_TYPE_HARD_WIRED); |
121 | else if (!pld->user_visible) | 121 | else if (!pld->user_visible) |
122 | usb_set_hub_port_connect_type(hdev, port1, USB_PORT_NOT_USED); | 122 | usb_set_hub_port_connect_type(hdev, port1, USB_PORT_NOT_USED); |
123 | 123 | ||
124 | out: | 124 | out: |
125 | ACPI_FREE(pld); | 125 | ACPI_FREE(pld); |
126 | kfree(upc); | 126 | kfree(upc); |
127 | return ret; | 127 | return ret; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) | 130 | static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) |
131 | { | 131 | { |
132 | struct usb_device *udev; | 132 | struct usb_device *udev; |
133 | acpi_handle *parent_handle; | 133 | acpi_handle *parent_handle; |
134 | int port_num; | 134 | int port_num; |
135 | 135 | ||
136 | /* | 136 | /* |
137 | * In the ACPI DSDT table, only usb root hub and usb ports are | 137 | * In the ACPI DSDT table, only usb root hub and usb ports are |
138 | * acpi device nodes. The hierarchy like following. | 138 | * acpi device nodes. The hierarchy like following. |
139 | * Device (EHC1) | 139 | * Device (EHC1) |
140 | * Device (HUBN) | 140 | * Device (HUBN) |
141 | * Device (PR01) | 141 | * Device (PR01) |
142 | * Device (PR11) | 142 | * Device (PR11) |
143 | * Device (PR12) | 143 | * Device (PR12) |
144 | * Device (PR13) | 144 | * Device (PR13) |
145 | * ... | 145 | * ... |
146 | * So all binding process is divided into two parts. binding | 146 | * So all binding process is divided into two parts. binding |
147 | * root hub and usb ports. | 147 | * root hub and usb ports. |
148 | */ | 148 | */ |
149 | if (is_usb_device(dev)) { | 149 | if (is_usb_device(dev)) { |
150 | udev = to_usb_device(dev); | 150 | udev = to_usb_device(dev); |
151 | if (udev->parent) { | 151 | if (udev->parent) { |
152 | enum usb_port_connect_type type; | 152 | enum usb_port_connect_type type; |
153 | 153 | ||
154 | /* | 154 | /* |
155 | * According usb port's connect type to set usb device's | 155 | * According usb port's connect type to set usb device's |
156 | * removability. | 156 | * removability. |
157 | */ | 157 | */ |
158 | type = usb_get_hub_port_connect_type(udev->parent, | 158 | type = usb_get_hub_port_connect_type(udev->parent, |
159 | udev->portnum); | 159 | udev->portnum); |
160 | switch (type) { | 160 | switch (type) { |
161 | case USB_PORT_CONNECT_TYPE_HOT_PLUG: | 161 | case USB_PORT_CONNECT_TYPE_HOT_PLUG: |
162 | udev->removable = USB_DEVICE_REMOVABLE; | 162 | udev->removable = USB_DEVICE_REMOVABLE; |
163 | break; | 163 | break; |
164 | case USB_PORT_CONNECT_TYPE_HARD_WIRED: | 164 | case USB_PORT_CONNECT_TYPE_HARD_WIRED: |
165 | udev->removable = USB_DEVICE_FIXED; | 165 | udev->removable = USB_DEVICE_FIXED; |
166 | break; | 166 | break; |
167 | default: | 167 | default: |
168 | udev->removable = USB_DEVICE_REMOVABLE_UNKNOWN; | 168 | udev->removable = USB_DEVICE_REMOVABLE_UNKNOWN; |
169 | break; | 169 | break; |
170 | } | 170 | } |
171 | 171 | ||
172 | return -ENODEV; | 172 | return -ENODEV; |
173 | } | 173 | } |
174 | 174 | ||
175 | /* root hub's parent is the usb hcd. */ | 175 | /* root hub's parent is the usb hcd. */ |
176 | parent_handle = DEVICE_ACPI_HANDLE(dev->parent); | 176 | parent_handle = ACPI_HANDLE(dev->parent); |
177 | *handle = acpi_get_child(parent_handle, udev->portnum); | 177 | *handle = acpi_get_child(parent_handle, udev->portnum); |
178 | if (!*handle) | 178 | if (!*handle) |
179 | return -ENODEV; | 179 | return -ENODEV; |
180 | return 0; | 180 | return 0; |
181 | } else if (is_usb_port(dev)) { | 181 | } else if (is_usb_port(dev)) { |
182 | sscanf(dev_name(dev), "port%d", &port_num); | 182 | sscanf(dev_name(dev), "port%d", &port_num); |
183 | /* Get the struct usb_device point of port's hub */ | 183 | /* Get the struct usb_device point of port's hub */ |
184 | udev = to_usb_device(dev->parent->parent); | 184 | udev = to_usb_device(dev->parent->parent); |
185 | 185 | ||
186 | /* | 186 | /* |
187 | * The root hub ports' parent is the root hub. The non-root-hub | 187 | * The root hub ports' parent is the root hub. The non-root-hub |
188 | * ports' parent is the parent hub port which the hub is | 188 | * ports' parent is the parent hub port which the hub is |
189 | * connected to. | 189 | * connected to. |
190 | */ | 190 | */ |
191 | if (!udev->parent) { | 191 | if (!udev->parent) { |
192 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | 192 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
193 | int raw_port_num; | 193 | int raw_port_num; |
194 | 194 | ||
195 | raw_port_num = usb_hcd_find_raw_port_number(hcd, | 195 | raw_port_num = usb_hcd_find_raw_port_number(hcd, |
196 | port_num); | 196 | port_num); |
197 | *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev), | 197 | *handle = acpi_get_child(ACPI_HANDLE(&udev->dev), |
198 | raw_port_num); | 198 | raw_port_num); |
199 | if (!*handle) | 199 | if (!*handle) |
200 | return -ENODEV; | 200 | return -ENODEV; |
201 | } else { | 201 | } else { |
202 | parent_handle = | 202 | parent_handle = |
203 | usb_get_hub_port_acpi_handle(udev->parent, | 203 | usb_get_hub_port_acpi_handle(udev->parent, |
204 | udev->portnum); | 204 | udev->portnum); |
205 | if (!parent_handle) | 205 | if (!parent_handle) |
206 | return -ENODEV; | 206 | return -ENODEV; |
207 | 207 | ||
208 | *handle = acpi_get_child(parent_handle, port_num); | 208 | *handle = acpi_get_child(parent_handle, port_num); |
209 | if (!*handle) | 209 | if (!*handle) |
210 | return -ENODEV; | 210 | return -ENODEV; |
211 | } | 211 | } |
212 | usb_acpi_check_port_connect_type(udev, *handle, port_num); | 212 | usb_acpi_check_port_connect_type(udev, *handle, port_num); |
213 | } else | 213 | } else |
214 | return -ENODEV; | 214 | return -ENODEV; |
215 | 215 | ||
216 | return 0; | 216 | return 0; |
217 | } | 217 | } |
218 | 218 | ||
219 | static bool usb_acpi_bus_match(struct device *dev) | 219 | static bool usb_acpi_bus_match(struct device *dev) |
220 | { | 220 | { |
221 | return is_usb_device(dev) || is_usb_port(dev); | 221 | return is_usb_device(dev) || is_usb_port(dev); |
222 | } | 222 | } |
223 | 223 | ||
224 | static struct acpi_bus_type usb_acpi_bus = { | 224 | static struct acpi_bus_type usb_acpi_bus = { |
225 | .name = "USB", | 225 | .name = "USB", |
226 | .match = usb_acpi_bus_match, | 226 | .match = usb_acpi_bus_match, |
227 | .find_device = usb_acpi_find_device, | 227 | .find_device = usb_acpi_find_device, |
228 | }; | 228 | }; |
229 | 229 | ||
230 | int usb_acpi_register(void) | 230 | int usb_acpi_register(void) |
231 | { | 231 | { |
232 | return register_acpi_bus_type(&usb_acpi_bus); | 232 | return register_acpi_bus_type(&usb_acpi_bus); |
233 | } | 233 | } |
234 | 234 | ||
235 | void usb_acpi_unregister(void) | 235 | void usb_acpi_unregister(void) |
236 | { | 236 | { |
237 | unregister_acpi_bus_type(&usb_acpi_bus); | 237 | unregister_acpi_bus_type(&usb_acpi_bus); |
238 | } | 238 | } |
239 | 239 |
drivers/xen/pci.c
1 | /* | 1 | /* |
2 | * Copyright (c) 2009, Intel Corporation. | 2 | * Copyright (c) 2009, Intel Corporation. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, | 5 | * under the terms and conditions of the GNU General Public License, |
6 | * version 2, as published by the Free Software Foundation. | 6 | * version 2, as published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
11 | * more details. | 11 | * more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public License along with | 13 | * You should have received a copy of the GNU General Public License along with |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | 14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
16 | * | 16 | * |
17 | * Author: Weidong Han <weidong.han@intel.com> | 17 | * Author: Weidong Han <weidong.han@intel.com> |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/acpi.h> | 21 | #include <linux/acpi.h> |
22 | #include <xen/xen.h> | 22 | #include <xen/xen.h> |
23 | #include <xen/interface/physdev.h> | 23 | #include <xen/interface/physdev.h> |
24 | #include <xen/interface/xen.h> | 24 | #include <xen/interface/xen.h> |
25 | 25 | ||
26 | #include <asm/xen/hypervisor.h> | 26 | #include <asm/xen/hypervisor.h> |
27 | #include <asm/xen/hypercall.h> | 27 | #include <asm/xen/hypercall.h> |
28 | #include "../pci/pci.h" | 28 | #include "../pci/pci.h" |
29 | 29 | ||
30 | static bool __read_mostly pci_seg_supported = true; | 30 | static bool __read_mostly pci_seg_supported = true; |
31 | 31 | ||
32 | static int xen_add_device(struct device *dev) | 32 | static int xen_add_device(struct device *dev) |
33 | { | 33 | { |
34 | int r; | 34 | int r; |
35 | struct pci_dev *pci_dev = to_pci_dev(dev); | 35 | struct pci_dev *pci_dev = to_pci_dev(dev); |
36 | #ifdef CONFIG_PCI_IOV | 36 | #ifdef CONFIG_PCI_IOV |
37 | struct pci_dev *physfn = pci_dev->physfn; | 37 | struct pci_dev *physfn = pci_dev->physfn; |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | if (pci_seg_supported) { | 40 | if (pci_seg_supported) { |
41 | struct physdev_pci_device_add add = { | 41 | struct physdev_pci_device_add add = { |
42 | .seg = pci_domain_nr(pci_dev->bus), | 42 | .seg = pci_domain_nr(pci_dev->bus), |
43 | .bus = pci_dev->bus->number, | 43 | .bus = pci_dev->bus->number, |
44 | .devfn = pci_dev->devfn | 44 | .devfn = pci_dev->devfn |
45 | }; | 45 | }; |
46 | #ifdef CONFIG_ACPI | 46 | #ifdef CONFIG_ACPI |
47 | acpi_handle handle; | 47 | acpi_handle handle; |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #ifdef CONFIG_PCI_IOV | 50 | #ifdef CONFIG_PCI_IOV |
51 | if (pci_dev->is_virtfn) { | 51 | if (pci_dev->is_virtfn) { |
52 | add.flags = XEN_PCI_DEV_VIRTFN; | 52 | add.flags = XEN_PCI_DEV_VIRTFN; |
53 | add.physfn.bus = physfn->bus->number; | 53 | add.physfn.bus = physfn->bus->number; |
54 | add.physfn.devfn = physfn->devfn; | 54 | add.physfn.devfn = physfn->devfn; |
55 | } else | 55 | } else |
56 | #endif | 56 | #endif |
57 | if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) | 57 | if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) |
58 | add.flags = XEN_PCI_DEV_EXTFN; | 58 | add.flags = XEN_PCI_DEV_EXTFN; |
59 | 59 | ||
60 | #ifdef CONFIG_ACPI | 60 | #ifdef CONFIG_ACPI |
61 | handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); | 61 | handle = ACPI_HANDLE(&pci_dev->dev); |
62 | if (!handle && pci_dev->bus->bridge) | 62 | if (!handle && pci_dev->bus->bridge) |
63 | handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); | 63 | handle = ACPI_HANDLE(pci_dev->bus->bridge); |
64 | #ifdef CONFIG_PCI_IOV | 64 | #ifdef CONFIG_PCI_IOV |
65 | if (!handle && pci_dev->is_virtfn) | 65 | if (!handle && pci_dev->is_virtfn) |
66 | handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge); | 66 | handle = ACPI_HANDLE(physfn->bus->bridge); |
67 | #endif | 67 | #endif |
68 | if (handle) { | 68 | if (handle) { |
69 | acpi_status status; | 69 | acpi_status status; |
70 | 70 | ||
71 | do { | 71 | do { |
72 | unsigned long long pxm; | 72 | unsigned long long pxm; |
73 | 73 | ||
74 | status = acpi_evaluate_integer(handle, "_PXM", | 74 | status = acpi_evaluate_integer(handle, "_PXM", |
75 | NULL, &pxm); | 75 | NULL, &pxm); |
76 | if (ACPI_SUCCESS(status)) { | 76 | if (ACPI_SUCCESS(status)) { |
77 | add.optarr[0] = pxm; | 77 | add.optarr[0] = pxm; |
78 | add.flags |= XEN_PCI_DEV_PXM; | 78 | add.flags |= XEN_PCI_DEV_PXM; |
79 | break; | 79 | break; |
80 | } | 80 | } |
81 | status = acpi_get_parent(handle, &handle); | 81 | status = acpi_get_parent(handle, &handle); |
82 | } while (ACPI_SUCCESS(status)); | 82 | } while (ACPI_SUCCESS(status)); |
83 | } | 83 | } |
84 | #endif /* CONFIG_ACPI */ | 84 | #endif /* CONFIG_ACPI */ |
85 | 85 | ||
86 | r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add); | 86 | r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add); |
87 | if (r != -ENOSYS) | 87 | if (r != -ENOSYS) |
88 | return r; | 88 | return r; |
89 | pci_seg_supported = false; | 89 | pci_seg_supported = false; |
90 | } | 90 | } |
91 | 91 | ||
92 | if (pci_domain_nr(pci_dev->bus)) | 92 | if (pci_domain_nr(pci_dev->bus)) |
93 | r = -ENOSYS; | 93 | r = -ENOSYS; |
94 | #ifdef CONFIG_PCI_IOV | 94 | #ifdef CONFIG_PCI_IOV |
95 | else if (pci_dev->is_virtfn) { | 95 | else if (pci_dev->is_virtfn) { |
96 | struct physdev_manage_pci_ext manage_pci_ext = { | 96 | struct physdev_manage_pci_ext manage_pci_ext = { |
97 | .bus = pci_dev->bus->number, | 97 | .bus = pci_dev->bus->number, |
98 | .devfn = pci_dev->devfn, | 98 | .devfn = pci_dev->devfn, |
99 | .is_virtfn = 1, | 99 | .is_virtfn = 1, |
100 | .physfn.bus = physfn->bus->number, | 100 | .physfn.bus = physfn->bus->number, |
101 | .physfn.devfn = physfn->devfn, | 101 | .physfn.devfn = physfn->devfn, |
102 | }; | 102 | }; |
103 | 103 | ||
104 | r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext, | 104 | r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext, |
105 | &manage_pci_ext); | 105 | &manage_pci_ext); |
106 | } | 106 | } |
107 | #endif | 107 | #endif |
108 | else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) { | 108 | else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) { |
109 | struct physdev_manage_pci_ext manage_pci_ext = { | 109 | struct physdev_manage_pci_ext manage_pci_ext = { |
110 | .bus = pci_dev->bus->number, | 110 | .bus = pci_dev->bus->number, |
111 | .devfn = pci_dev->devfn, | 111 | .devfn = pci_dev->devfn, |
112 | .is_extfn = 1, | 112 | .is_extfn = 1, |
113 | }; | 113 | }; |
114 | 114 | ||
115 | r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext, | 115 | r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext, |
116 | &manage_pci_ext); | 116 | &manage_pci_ext); |
117 | } else { | 117 | } else { |
118 | struct physdev_manage_pci manage_pci = { | 118 | struct physdev_manage_pci manage_pci = { |
119 | .bus = pci_dev->bus->number, | 119 | .bus = pci_dev->bus->number, |
120 | .devfn = pci_dev->devfn, | 120 | .devfn = pci_dev->devfn, |
121 | }; | 121 | }; |
122 | 122 | ||
123 | r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add, | 123 | r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add, |
124 | &manage_pci); | 124 | &manage_pci); |
125 | } | 125 | } |
126 | 126 | ||
127 | return r; | 127 | return r; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int xen_remove_device(struct device *dev) | 130 | static int xen_remove_device(struct device *dev) |
131 | { | 131 | { |
132 | int r; | 132 | int r; |
133 | struct pci_dev *pci_dev = to_pci_dev(dev); | 133 | struct pci_dev *pci_dev = to_pci_dev(dev); |
134 | 134 | ||
135 | if (pci_seg_supported) { | 135 | if (pci_seg_supported) { |
136 | struct physdev_pci_device device = { | 136 | struct physdev_pci_device device = { |
137 | .seg = pci_domain_nr(pci_dev->bus), | 137 | .seg = pci_domain_nr(pci_dev->bus), |
138 | .bus = pci_dev->bus->number, | 138 | .bus = pci_dev->bus->number, |
139 | .devfn = pci_dev->devfn | 139 | .devfn = pci_dev->devfn |
140 | }; | 140 | }; |
141 | 141 | ||
142 | r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove, | 142 | r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove, |
143 | &device); | 143 | &device); |
144 | } else if (pci_domain_nr(pci_dev->bus)) | 144 | } else if (pci_domain_nr(pci_dev->bus)) |
145 | r = -ENOSYS; | 145 | r = -ENOSYS; |
146 | else { | 146 | else { |
147 | struct physdev_manage_pci manage_pci = { | 147 | struct physdev_manage_pci manage_pci = { |
148 | .bus = pci_dev->bus->number, | 148 | .bus = pci_dev->bus->number, |
149 | .devfn = pci_dev->devfn | 149 | .devfn = pci_dev->devfn |
150 | }; | 150 | }; |
151 | 151 | ||
152 | r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove, | 152 | r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove, |
153 | &manage_pci); | 153 | &manage_pci); |
154 | } | 154 | } |
155 | 155 | ||
156 | return r; | 156 | return r; |
157 | } | 157 | } |
158 | 158 | ||
159 | static int xen_pci_notifier(struct notifier_block *nb, | 159 | static int xen_pci_notifier(struct notifier_block *nb, |
160 | unsigned long action, void *data) | 160 | unsigned long action, void *data) |
161 | { | 161 | { |
162 | struct device *dev = data; | 162 | struct device *dev = data; |
163 | int r = 0; | 163 | int r = 0; |
164 | 164 | ||
165 | switch (action) { | 165 | switch (action) { |
166 | case BUS_NOTIFY_ADD_DEVICE: | 166 | case BUS_NOTIFY_ADD_DEVICE: |
167 | r = xen_add_device(dev); | 167 | r = xen_add_device(dev); |
168 | break; | 168 | break; |
169 | case BUS_NOTIFY_DEL_DEVICE: | 169 | case BUS_NOTIFY_DEL_DEVICE: |
170 | r = xen_remove_device(dev); | 170 | r = xen_remove_device(dev); |
171 | break; | 171 | break; |
172 | default: | 172 | default: |
173 | return NOTIFY_DONE; | 173 | return NOTIFY_DONE; |
174 | } | 174 | } |
175 | if (r) | 175 | if (r) |
176 | dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n", | 176 | dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n", |
177 | action == BUS_NOTIFY_ADD_DEVICE ? "add" : | 177 | action == BUS_NOTIFY_ADD_DEVICE ? "add" : |
178 | (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?")); | 178 | (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?")); |
179 | return NOTIFY_OK; | 179 | return NOTIFY_OK; |
180 | } | 180 | } |
181 | 181 | ||
182 | static struct notifier_block device_nb = { | 182 | static struct notifier_block device_nb = { |
183 | .notifier_call = xen_pci_notifier, | 183 | .notifier_call = xen_pci_notifier, |
184 | }; | 184 | }; |
185 | 185 | ||
186 | static int __init register_xen_pci_notifier(void) | 186 | static int __init register_xen_pci_notifier(void) |
187 | { | 187 | { |
188 | if (!xen_initial_domain()) | 188 | if (!xen_initial_domain()) |
189 | return 0; | 189 | return 0; |
190 | 190 | ||
191 | return bus_register_notifier(&pci_bus_type, &device_nb); | 191 | return bus_register_notifier(&pci_bus_type, &device_nb); |
192 | } | 192 | } |
193 | 193 | ||
194 | arch_initcall(register_xen_pci_notifier); | 194 | arch_initcall(register_xen_pci_notifier); |
195 | 195 |
include/linux/acpi.h
1 | /* | 1 | /* |
2 | * acpi.h - ACPI Interface | 2 | * acpi.h - ACPI Interface |
3 | * | 3 | * |
4 | * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | 4 | * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
5 | * | 5 | * |
6 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 6 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or | 10 | * the Free Software Foundation; either version 2 of the License, or |
11 | * (at your option) any later version. | 11 | * (at your option) any later version. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, | 13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software | 19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 | * | 21 | * |
22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #ifndef _LINUX_ACPI_H | 25 | #ifndef _LINUX_ACPI_H |
26 | #define _LINUX_ACPI_H | 26 | #define _LINUX_ACPI_H |
27 | 27 | ||
28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
29 | #include <linux/ioport.h> /* for struct resource */ | 29 | #include <linux/ioport.h> /* for struct resource */ |
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | 31 | ||
32 | #ifdef CONFIG_ACPI | 32 | #ifdef CONFIG_ACPI |
33 | 33 | ||
34 | #ifndef _LINUX | 34 | #ifndef _LINUX |
35 | #define _LINUX | 35 | #define _LINUX |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | #include <linux/list.h> | 38 | #include <linux/list.h> |
39 | #include <linux/mod_devicetable.h> | 39 | #include <linux/mod_devicetable.h> |
40 | 40 | ||
41 | #include <acpi/acpi.h> | 41 | #include <acpi/acpi.h> |
42 | #include <acpi/acpi_bus.h> | 42 | #include <acpi/acpi_bus.h> |
43 | #include <acpi/acpi_drivers.h> | 43 | #include <acpi/acpi_drivers.h> |
44 | #include <acpi/acpi_numa.h> | 44 | #include <acpi/acpi_numa.h> |
45 | #include <asm/acpi.h> | 45 | #include <asm/acpi.h> |
46 | 46 | ||
47 | static inline acpi_handle acpi_device_handle(struct acpi_device *adev) | 47 | static inline acpi_handle acpi_device_handle(struct acpi_device *adev) |
48 | { | 48 | { |
49 | return adev ? adev->handle : NULL; | 49 | return adev ? adev->handle : NULL; |
50 | } | 50 | } |
51 | 51 | ||
52 | #define ACPI_COMPANION(dev) ((dev)->acpi_node.companion) | 52 | #define ACPI_COMPANION(dev) ((dev)->acpi_node.companion) |
53 | #define ACPI_COMPANION_SET(dev, adev) ACPI_COMPANION(dev) = (adev) | 53 | #define ACPI_COMPANION_SET(dev, adev) ACPI_COMPANION(dev) = (adev) |
54 | #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) | 54 | #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) |
55 | 55 | ||
56 | enum acpi_irq_model_id { | 56 | enum acpi_irq_model_id { |
57 | ACPI_IRQ_MODEL_PIC = 0, | 57 | ACPI_IRQ_MODEL_PIC = 0, |
58 | ACPI_IRQ_MODEL_IOAPIC, | 58 | ACPI_IRQ_MODEL_IOAPIC, |
59 | ACPI_IRQ_MODEL_IOSAPIC, | 59 | ACPI_IRQ_MODEL_IOSAPIC, |
60 | ACPI_IRQ_MODEL_PLATFORM, | 60 | ACPI_IRQ_MODEL_PLATFORM, |
61 | ACPI_IRQ_MODEL_COUNT | 61 | ACPI_IRQ_MODEL_COUNT |
62 | }; | 62 | }; |
63 | 63 | ||
64 | extern enum acpi_irq_model_id acpi_irq_model; | 64 | extern enum acpi_irq_model_id acpi_irq_model; |
65 | 65 | ||
66 | enum acpi_interrupt_id { | 66 | enum acpi_interrupt_id { |
67 | ACPI_INTERRUPT_PMI = 1, | 67 | ACPI_INTERRUPT_PMI = 1, |
68 | ACPI_INTERRUPT_INIT, | 68 | ACPI_INTERRUPT_INIT, |
69 | ACPI_INTERRUPT_CPEI, | 69 | ACPI_INTERRUPT_CPEI, |
70 | ACPI_INTERRUPT_COUNT | 70 | ACPI_INTERRUPT_COUNT |
71 | }; | 71 | }; |
72 | 72 | ||
73 | #define ACPI_SPACE_MEM 0 | 73 | #define ACPI_SPACE_MEM 0 |
74 | 74 | ||
75 | enum acpi_address_range_id { | 75 | enum acpi_address_range_id { |
76 | ACPI_ADDRESS_RANGE_MEMORY = 1, | 76 | ACPI_ADDRESS_RANGE_MEMORY = 1, |
77 | ACPI_ADDRESS_RANGE_RESERVED = 2, | 77 | ACPI_ADDRESS_RANGE_RESERVED = 2, |
78 | ACPI_ADDRESS_RANGE_ACPI = 3, | 78 | ACPI_ADDRESS_RANGE_ACPI = 3, |
79 | ACPI_ADDRESS_RANGE_NVS = 4, | 79 | ACPI_ADDRESS_RANGE_NVS = 4, |
80 | ACPI_ADDRESS_RANGE_COUNT | 80 | ACPI_ADDRESS_RANGE_COUNT |
81 | }; | 81 | }; |
82 | 82 | ||
83 | 83 | ||
84 | /* Table Handlers */ | 84 | /* Table Handlers */ |
85 | 85 | ||
86 | typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); | 86 | typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); |
87 | 87 | ||
88 | typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *header, | 88 | typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *header, |
89 | const unsigned long end); | 89 | const unsigned long end); |
90 | 90 | ||
91 | #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE | 91 | #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE |
92 | void acpi_initrd_override(void *data, size_t size); | 92 | void acpi_initrd_override(void *data, size_t size); |
93 | #else | 93 | #else |
94 | static inline void acpi_initrd_override(void *data, size_t size) | 94 | static inline void acpi_initrd_override(void *data, size_t size) |
95 | { | 95 | { |
96 | } | 96 | } |
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | char * __acpi_map_table (unsigned long phys_addr, unsigned long size); | 99 | char * __acpi_map_table (unsigned long phys_addr, unsigned long size); |
100 | void __acpi_unmap_table(char *map, unsigned long size); | 100 | void __acpi_unmap_table(char *map, unsigned long size); |
101 | int early_acpi_boot_init(void); | 101 | int early_acpi_boot_init(void); |
102 | int acpi_boot_init (void); | 102 | int acpi_boot_init (void); |
103 | void acpi_boot_table_init (void); | 103 | void acpi_boot_table_init (void); |
104 | int acpi_mps_check (void); | 104 | int acpi_mps_check (void); |
105 | int acpi_numa_init (void); | 105 | int acpi_numa_init (void); |
106 | 106 | ||
107 | int acpi_table_init (void); | 107 | int acpi_table_init (void); |
108 | int acpi_table_parse(char *id, acpi_tbl_table_handler handler); | 108 | int acpi_table_parse(char *id, acpi_tbl_table_handler handler); |
109 | int __init acpi_table_parse_entries(char *id, unsigned long table_size, | 109 | int __init acpi_table_parse_entries(char *id, unsigned long table_size, |
110 | int entry_id, | 110 | int entry_id, |
111 | acpi_tbl_entry_handler handler, | 111 | acpi_tbl_entry_handler handler, |
112 | unsigned int max_entries); | 112 | unsigned int max_entries); |
113 | int acpi_table_parse_madt(enum acpi_madt_type id, | 113 | int acpi_table_parse_madt(enum acpi_madt_type id, |
114 | acpi_tbl_entry_handler handler, | 114 | acpi_tbl_entry_handler handler, |
115 | unsigned int max_entries); | 115 | unsigned int max_entries); |
116 | int acpi_parse_mcfg (struct acpi_table_header *header); | 116 | int acpi_parse_mcfg (struct acpi_table_header *header); |
117 | void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); | 117 | void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); |
118 | 118 | ||
119 | /* the following four functions are architecture-dependent */ | 119 | /* the following four functions are architecture-dependent */ |
120 | void acpi_numa_slit_init (struct acpi_table_slit *slit); | 120 | void acpi_numa_slit_init (struct acpi_table_slit *slit); |
121 | void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); | 121 | void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); |
122 | void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); | 122 | void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); |
123 | int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); | 123 | int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); |
124 | void acpi_numa_arch_fixup(void); | 124 | void acpi_numa_arch_fixup(void); |
125 | 125 | ||
126 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 126 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
127 | /* Arch dependent functions for cpu hotplug support */ | 127 | /* Arch dependent functions for cpu hotplug support */ |
128 | int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu); | 128 | int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu); |
129 | int acpi_unmap_lsapic(int cpu); | 129 | int acpi_unmap_lsapic(int cpu); |
130 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ | 130 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ |
131 | 131 | ||
132 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); | 132 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); |
133 | int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); | 133 | int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); |
134 | void acpi_irq_stats_init(void); | 134 | void acpi_irq_stats_init(void); |
135 | extern u32 acpi_irq_handled; | 135 | extern u32 acpi_irq_handled; |
136 | extern u32 acpi_irq_not_handled; | 136 | extern u32 acpi_irq_not_handled; |
137 | 137 | ||
138 | extern int sbf_port; | 138 | extern int sbf_port; |
139 | extern unsigned long acpi_realmode_flags; | 139 | extern unsigned long acpi_realmode_flags; |
140 | 140 | ||
141 | int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); | 141 | int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); |
142 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); | 142 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); |
143 | int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); | 143 | int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); |
144 | 144 | ||
145 | #ifdef CONFIG_X86_IO_APIC | 145 | #ifdef CONFIG_X86_IO_APIC |
146 | extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); | 146 | extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); |
147 | #else | 147 | #else |
148 | #define acpi_get_override_irq(gsi, trigger, polarity) (-1) | 148 | #define acpi_get_override_irq(gsi, trigger, polarity) (-1) |
149 | #endif | 149 | #endif |
150 | /* | 150 | /* |
151 | * This function undoes the effect of one call to acpi_register_gsi(). | 151 | * This function undoes the effect of one call to acpi_register_gsi(). |
152 | * If this matches the last registration, any IRQ resources for gsi | 152 | * If this matches the last registration, any IRQ resources for gsi |
153 | * are freed. | 153 | * are freed. |
154 | */ | 154 | */ |
155 | void acpi_unregister_gsi (u32 gsi); | 155 | void acpi_unregister_gsi (u32 gsi); |
156 | 156 | ||
157 | struct pci_dev; | 157 | struct pci_dev; |
158 | 158 | ||
159 | int acpi_pci_irq_enable (struct pci_dev *dev); | 159 | int acpi_pci_irq_enable (struct pci_dev *dev); |
160 | void acpi_penalize_isa_irq(int irq, int active); | 160 | void acpi_penalize_isa_irq(int irq, int active); |
161 | 161 | ||
162 | void acpi_pci_irq_disable (struct pci_dev *dev); | 162 | void acpi_pci_irq_disable (struct pci_dev *dev); |
163 | 163 | ||
164 | extern int ec_read(u8 addr, u8 *val); | 164 | extern int ec_read(u8 addr, u8 *val); |
165 | extern int ec_write(u8 addr, u8 val); | 165 | extern int ec_write(u8 addr, u8 val); |
166 | extern int ec_transaction(u8 command, | 166 | extern int ec_transaction(u8 command, |
167 | const u8 *wdata, unsigned wdata_len, | 167 | const u8 *wdata, unsigned wdata_len, |
168 | u8 *rdata, unsigned rdata_len); | 168 | u8 *rdata, unsigned rdata_len); |
169 | extern acpi_handle ec_get_handle(void); | 169 | extern acpi_handle ec_get_handle(void); |
170 | 170 | ||
171 | #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) | 171 | #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) |
172 | 172 | ||
173 | typedef void (*wmi_notify_handler) (u32 value, void *context); | 173 | typedef void (*wmi_notify_handler) (u32 value, void *context); |
174 | 174 | ||
175 | extern acpi_status wmi_evaluate_method(const char *guid, u8 instance, | 175 | extern acpi_status wmi_evaluate_method(const char *guid, u8 instance, |
176 | u32 method_id, | 176 | u32 method_id, |
177 | const struct acpi_buffer *in, | 177 | const struct acpi_buffer *in, |
178 | struct acpi_buffer *out); | 178 | struct acpi_buffer *out); |
179 | extern acpi_status wmi_query_block(const char *guid, u8 instance, | 179 | extern acpi_status wmi_query_block(const char *guid, u8 instance, |
180 | struct acpi_buffer *out); | 180 | struct acpi_buffer *out); |
181 | extern acpi_status wmi_set_block(const char *guid, u8 instance, | 181 | extern acpi_status wmi_set_block(const char *guid, u8 instance, |
182 | const struct acpi_buffer *in); | 182 | const struct acpi_buffer *in); |
183 | extern acpi_status wmi_install_notify_handler(const char *guid, | 183 | extern acpi_status wmi_install_notify_handler(const char *guid, |
184 | wmi_notify_handler handler, void *data); | 184 | wmi_notify_handler handler, void *data); |
185 | extern acpi_status wmi_remove_notify_handler(const char *guid); | 185 | extern acpi_status wmi_remove_notify_handler(const char *guid); |
186 | extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out); | 186 | extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out); |
187 | extern bool wmi_has_guid(const char *guid); | 187 | extern bool wmi_has_guid(const char *guid); |
188 | 188 | ||
189 | #endif /* CONFIG_ACPI_WMI */ | 189 | #endif /* CONFIG_ACPI_WMI */ |
190 | 190 | ||
191 | #define ACPI_VIDEO_OUTPUT_SWITCHING 0x0001 | 191 | #define ACPI_VIDEO_OUTPUT_SWITCHING 0x0001 |
192 | #define ACPI_VIDEO_DEVICE_POSTING 0x0002 | 192 | #define ACPI_VIDEO_DEVICE_POSTING 0x0002 |
193 | #define ACPI_VIDEO_ROM_AVAILABLE 0x0004 | 193 | #define ACPI_VIDEO_ROM_AVAILABLE 0x0004 |
194 | #define ACPI_VIDEO_BACKLIGHT 0x0008 | 194 | #define ACPI_VIDEO_BACKLIGHT 0x0008 |
195 | #define ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR 0x0010 | 195 | #define ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR 0x0010 |
196 | #define ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO 0x0020 | 196 | #define ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO 0x0020 |
197 | #define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR 0x0040 | 197 | #define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR 0x0040 |
198 | #define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO 0x0080 | 198 | #define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO 0x0080 |
199 | #define ACPI_VIDEO_BACKLIGHT_DMI_VENDOR 0x0100 | 199 | #define ACPI_VIDEO_BACKLIGHT_DMI_VENDOR 0x0100 |
200 | #define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200 | 200 | #define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200 |
201 | #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 | 201 | #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 |
202 | #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 | 202 | #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 |
203 | 203 | ||
204 | #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) | 204 | #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) |
205 | 205 | ||
206 | extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle); | 206 | extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle); |
207 | extern long acpi_is_video_device(acpi_handle handle); | 207 | extern long acpi_is_video_device(acpi_handle handle); |
208 | extern void acpi_video_dmi_promote_vendor(void); | 208 | extern void acpi_video_dmi_promote_vendor(void); |
209 | extern void acpi_video_dmi_demote_vendor(void); | 209 | extern void acpi_video_dmi_demote_vendor(void); |
210 | extern int acpi_video_backlight_support(void); | 210 | extern int acpi_video_backlight_support(void); |
211 | extern int acpi_video_display_switch_support(void); | 211 | extern int acpi_video_display_switch_support(void); |
212 | 212 | ||
213 | #else | 213 | #else |
214 | 214 | ||
215 | static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle) | 215 | static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle) |
216 | { | 216 | { |
217 | return 0; | 217 | return 0; |
218 | } | 218 | } |
219 | 219 | ||
220 | static inline long acpi_is_video_device(acpi_handle handle) | 220 | static inline long acpi_is_video_device(acpi_handle handle) |
221 | { | 221 | { |
222 | return 0; | 222 | return 0; |
223 | } | 223 | } |
224 | 224 | ||
225 | static inline void acpi_video_dmi_promote_vendor(void) | 225 | static inline void acpi_video_dmi_promote_vendor(void) |
226 | { | 226 | { |
227 | } | 227 | } |
228 | 228 | ||
229 | static inline void acpi_video_dmi_demote_vendor(void) | 229 | static inline void acpi_video_dmi_demote_vendor(void) |
230 | { | 230 | { |
231 | } | 231 | } |
232 | 232 | ||
233 | static inline int acpi_video_backlight_support(void) | 233 | static inline int acpi_video_backlight_support(void) |
234 | { | 234 | { |
235 | return 0; | 235 | return 0; |
236 | } | 236 | } |
237 | 237 | ||
238 | static inline int acpi_video_display_switch_support(void) | 238 | static inline int acpi_video_display_switch_support(void) |
239 | { | 239 | { |
240 | return 0; | 240 | return 0; |
241 | } | 241 | } |
242 | 242 | ||
243 | #endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */ | 243 | #endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */ |
244 | 244 | ||
245 | extern int acpi_blacklisted(void); | 245 | extern int acpi_blacklisted(void); |
246 | extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); | 246 | extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); |
247 | extern void acpi_osi_setup(char *str); | 247 | extern void acpi_osi_setup(char *str); |
248 | 248 | ||
249 | #ifdef CONFIG_ACPI_NUMA | 249 | #ifdef CONFIG_ACPI_NUMA |
250 | int acpi_get_pxm(acpi_handle handle); | 250 | int acpi_get_pxm(acpi_handle handle); |
251 | int acpi_get_node(acpi_handle *handle); | 251 | int acpi_get_node(acpi_handle *handle); |
252 | #else | 252 | #else |
253 | static inline int acpi_get_pxm(acpi_handle handle) | 253 | static inline int acpi_get_pxm(acpi_handle handle) |
254 | { | 254 | { |
255 | return 0; | 255 | return 0; |
256 | } | 256 | } |
257 | static inline int acpi_get_node(acpi_handle *handle) | 257 | static inline int acpi_get_node(acpi_handle *handle) |
258 | { | 258 | { |
259 | return 0; | 259 | return 0; |
260 | } | 260 | } |
261 | #endif | 261 | #endif |
262 | extern int acpi_paddr_to_node(u64 start_addr, u64 size); | 262 | extern int acpi_paddr_to_node(u64 start_addr, u64 size); |
263 | 263 | ||
264 | extern int pnpacpi_disabled; | 264 | extern int pnpacpi_disabled; |
265 | 265 | ||
266 | #define PXM_INVAL (-1) | 266 | #define PXM_INVAL (-1) |
267 | 267 | ||
268 | bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); | 268 | bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); |
269 | bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); | 269 | bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); |
270 | bool acpi_dev_resource_address_space(struct acpi_resource *ares, | 270 | bool acpi_dev_resource_address_space(struct acpi_resource *ares, |
271 | struct resource *res); | 271 | struct resource *res); |
272 | bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, | 272 | bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, |
273 | struct resource *res); | 273 | struct resource *res); |
274 | unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); | 274 | unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); |
275 | bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, | 275 | bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, |
276 | struct resource *res); | 276 | struct resource *res); |
277 | 277 | ||
278 | struct resource_list_entry { | 278 | struct resource_list_entry { |
279 | struct list_head node; | 279 | struct list_head node; |
280 | struct resource res; | 280 | struct resource res; |
281 | }; | 281 | }; |
282 | 282 | ||
283 | void acpi_dev_free_resource_list(struct list_head *list); | 283 | void acpi_dev_free_resource_list(struct list_head *list); |
284 | int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, | 284 | int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, |
285 | int (*preproc)(struct acpi_resource *, void *), | 285 | int (*preproc)(struct acpi_resource *, void *), |
286 | void *preproc_data); | 286 | void *preproc_data); |
287 | 287 | ||
288 | int acpi_check_resource_conflict(const struct resource *res); | 288 | int acpi_check_resource_conflict(const struct resource *res); |
289 | 289 | ||
290 | int acpi_check_region(resource_size_t start, resource_size_t n, | 290 | int acpi_check_region(resource_size_t start, resource_size_t n, |
291 | const char *name); | 291 | const char *name); |
292 | 292 | ||
293 | int acpi_resources_are_enforced(void); | 293 | int acpi_resources_are_enforced(void); |
294 | 294 | ||
295 | #ifdef CONFIG_HIBERNATION | 295 | #ifdef CONFIG_HIBERNATION |
296 | void __init acpi_no_s4_hw_signature(void); | 296 | void __init acpi_no_s4_hw_signature(void); |
297 | #endif | 297 | #endif |
298 | 298 | ||
299 | #ifdef CONFIG_PM_SLEEP | 299 | #ifdef CONFIG_PM_SLEEP |
300 | void __init acpi_old_suspend_ordering(void); | 300 | void __init acpi_old_suspend_ordering(void); |
301 | void __init acpi_nvs_nosave(void); | 301 | void __init acpi_nvs_nosave(void); |
302 | void __init acpi_nvs_nosave_s3(void); | 302 | void __init acpi_nvs_nosave_s3(void); |
303 | #endif /* CONFIG_PM_SLEEP */ | 303 | #endif /* CONFIG_PM_SLEEP */ |
304 | 304 | ||
305 | struct acpi_osc_context { | 305 | struct acpi_osc_context { |
306 | char *uuid_str; /* UUID string */ | 306 | char *uuid_str; /* UUID string */ |
307 | int rev; | 307 | int rev; |
308 | struct acpi_buffer cap; /* list of DWORD capabilities */ | 308 | struct acpi_buffer cap; /* list of DWORD capabilities */ |
309 | struct acpi_buffer ret; /* free by caller if success */ | 309 | struct acpi_buffer ret; /* free by caller if success */ |
310 | }; | 310 | }; |
311 | 311 | ||
312 | acpi_status acpi_str_to_uuid(char *str, u8 *uuid); | 312 | acpi_status acpi_str_to_uuid(char *str, u8 *uuid); |
313 | acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); | 313 | acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); |
314 | 314 | ||
315 | /* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */ | 315 | /* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */ |
316 | #define OSC_QUERY_DWORD 0 /* DWORD 1 */ | 316 | #define OSC_QUERY_DWORD 0 /* DWORD 1 */ |
317 | #define OSC_SUPPORT_DWORD 1 /* DWORD 2 */ | 317 | #define OSC_SUPPORT_DWORD 1 /* DWORD 2 */ |
318 | #define OSC_CONTROL_DWORD 2 /* DWORD 3 */ | 318 | #define OSC_CONTROL_DWORD 2 /* DWORD 3 */ |
319 | 319 | ||
320 | /* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */ | 320 | /* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */ |
321 | #define OSC_QUERY_ENABLE 0x00000001 /* input */ | 321 | #define OSC_QUERY_ENABLE 0x00000001 /* input */ |
322 | #define OSC_REQUEST_ERROR 0x00000002 /* return */ | 322 | #define OSC_REQUEST_ERROR 0x00000002 /* return */ |
323 | #define OSC_INVALID_UUID_ERROR 0x00000004 /* return */ | 323 | #define OSC_INVALID_UUID_ERROR 0x00000004 /* return */ |
324 | #define OSC_INVALID_REVISION_ERROR 0x00000008 /* return */ | 324 | #define OSC_INVALID_REVISION_ERROR 0x00000008 /* return */ |
325 | #define OSC_CAPABILITIES_MASK_ERROR 0x00000010 /* return */ | 325 | #define OSC_CAPABILITIES_MASK_ERROR 0x00000010 /* return */ |
326 | 326 | ||
327 | /* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */ | 327 | /* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */ |
328 | #define OSC_SB_PAD_SUPPORT 0x00000001 | 328 | #define OSC_SB_PAD_SUPPORT 0x00000001 |
329 | #define OSC_SB_PPC_OST_SUPPORT 0x00000002 | 329 | #define OSC_SB_PPC_OST_SUPPORT 0x00000002 |
330 | #define OSC_SB_PR3_SUPPORT 0x00000004 | 330 | #define OSC_SB_PR3_SUPPORT 0x00000004 |
331 | #define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008 | 331 | #define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008 |
332 | #define OSC_SB_APEI_SUPPORT 0x00000010 | 332 | #define OSC_SB_APEI_SUPPORT 0x00000010 |
333 | #define OSC_SB_CPC_SUPPORT 0x00000020 | 333 | #define OSC_SB_CPC_SUPPORT 0x00000020 |
334 | 334 | ||
335 | extern bool osc_sb_apei_support_acked; | 335 | extern bool osc_sb_apei_support_acked; |
336 | 336 | ||
337 | /* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ | 337 | /* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ |
338 | #define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 | 338 | #define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 |
339 | #define OSC_PCI_ASPM_SUPPORT 0x00000002 | 339 | #define OSC_PCI_ASPM_SUPPORT 0x00000002 |
340 | #define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 | 340 | #define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 |
341 | #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 | 341 | #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 |
342 | #define OSC_PCI_MSI_SUPPORT 0x00000010 | 342 | #define OSC_PCI_MSI_SUPPORT 0x00000010 |
343 | #define OSC_PCI_SUPPORT_MASKS 0x0000001f | 343 | #define OSC_PCI_SUPPORT_MASKS 0x0000001f |
344 | 344 | ||
345 | /* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ | 345 | /* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ |
346 | #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 | 346 | #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 |
347 | #define OSC_PCI_SHPC_NATIVE_HP_CONTROL 0x00000002 | 347 | #define OSC_PCI_SHPC_NATIVE_HP_CONTROL 0x00000002 |
348 | #define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004 | 348 | #define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004 |
349 | #define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008 | 349 | #define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008 |
350 | #define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 | 350 | #define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 |
351 | #define OSC_PCI_CONTROL_MASKS 0x0000001f | 351 | #define OSC_PCI_CONTROL_MASKS 0x0000001f |
352 | 352 | ||
353 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, | 353 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, |
354 | u32 *mask, u32 req); | 354 | u32 *mask, u32 req); |
355 | 355 | ||
356 | /* Enable _OST when all relevant hotplug operations are enabled */ | 356 | /* Enable _OST when all relevant hotplug operations are enabled */ |
357 | #if defined(CONFIG_ACPI_HOTPLUG_CPU) && \ | 357 | #if defined(CONFIG_ACPI_HOTPLUG_CPU) && \ |
358 | defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \ | 358 | defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \ |
359 | defined(CONFIG_ACPI_CONTAINER) | 359 | defined(CONFIG_ACPI_CONTAINER) |
360 | #define ACPI_HOTPLUG_OST | 360 | #define ACPI_HOTPLUG_OST |
361 | #endif | 361 | #endif |
362 | 362 | ||
363 | /* _OST Source Event Code (OSPM Action) */ | 363 | /* _OST Source Event Code (OSPM Action) */ |
364 | #define ACPI_OST_EC_OSPM_SHUTDOWN 0x100 | 364 | #define ACPI_OST_EC_OSPM_SHUTDOWN 0x100 |
365 | #define ACPI_OST_EC_OSPM_EJECT 0x103 | 365 | #define ACPI_OST_EC_OSPM_EJECT 0x103 |
366 | #define ACPI_OST_EC_OSPM_INSERTION 0x200 | 366 | #define ACPI_OST_EC_OSPM_INSERTION 0x200 |
367 | 367 | ||
368 | /* _OST General Processing Status Code */ | 368 | /* _OST General Processing Status Code */ |
369 | #define ACPI_OST_SC_SUCCESS 0x0 | 369 | #define ACPI_OST_SC_SUCCESS 0x0 |
370 | #define ACPI_OST_SC_NON_SPECIFIC_FAILURE 0x1 | 370 | #define ACPI_OST_SC_NON_SPECIFIC_FAILURE 0x1 |
371 | #define ACPI_OST_SC_UNRECOGNIZED_NOTIFY 0x2 | 371 | #define ACPI_OST_SC_UNRECOGNIZED_NOTIFY 0x2 |
372 | 372 | ||
373 | /* _OST OS Shutdown Processing (0x100) Status Code */ | 373 | /* _OST OS Shutdown Processing (0x100) Status Code */ |
374 | #define ACPI_OST_SC_OS_SHUTDOWN_DENIED 0x80 | 374 | #define ACPI_OST_SC_OS_SHUTDOWN_DENIED 0x80 |
375 | #define ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS 0x81 | 375 | #define ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS 0x81 |
376 | #define ACPI_OST_SC_OS_SHUTDOWN_COMPLETED 0x82 | 376 | #define ACPI_OST_SC_OS_SHUTDOWN_COMPLETED 0x82 |
377 | #define ACPI_OST_SC_OS_SHUTDOWN_NOT_SUPPORTED 0x83 | 377 | #define ACPI_OST_SC_OS_SHUTDOWN_NOT_SUPPORTED 0x83 |
378 | 378 | ||
379 | /* _OST Ejection Request (0x3, 0x103) Status Code */ | 379 | /* _OST Ejection Request (0x3, 0x103) Status Code */ |
380 | #define ACPI_OST_SC_EJECT_NOT_SUPPORTED 0x80 | 380 | #define ACPI_OST_SC_EJECT_NOT_SUPPORTED 0x80 |
381 | #define ACPI_OST_SC_DEVICE_IN_USE 0x81 | 381 | #define ACPI_OST_SC_DEVICE_IN_USE 0x81 |
382 | #define ACPI_OST_SC_DEVICE_BUSY 0x82 | 382 | #define ACPI_OST_SC_DEVICE_BUSY 0x82 |
383 | #define ACPI_OST_SC_EJECT_DEPENDENCY_BUSY 0x83 | 383 | #define ACPI_OST_SC_EJECT_DEPENDENCY_BUSY 0x83 |
384 | #define ACPI_OST_SC_EJECT_IN_PROGRESS 0x84 | 384 | #define ACPI_OST_SC_EJECT_IN_PROGRESS 0x84 |
385 | 385 | ||
386 | /* _OST Insertion Request (0x200) Status Code */ | 386 | /* _OST Insertion Request (0x200) Status Code */ |
387 | #define ACPI_OST_SC_INSERT_IN_PROGRESS 0x80 | 387 | #define ACPI_OST_SC_INSERT_IN_PROGRESS 0x80 |
388 | #define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81 | 388 | #define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81 |
389 | #define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 | 389 | #define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 |
390 | 390 | ||
391 | extern void acpi_early_init(void); | 391 | extern void acpi_early_init(void); |
392 | 392 | ||
393 | extern int acpi_nvs_register(__u64 start, __u64 size); | 393 | extern int acpi_nvs_register(__u64 start, __u64 size); |
394 | 394 | ||
395 | extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), | 395 | extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), |
396 | void *data); | 396 | void *data); |
397 | 397 | ||
398 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, | 398 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, |
399 | const struct device *dev); | 399 | const struct device *dev); |
400 | 400 | ||
401 | static inline bool acpi_driver_match_device(struct device *dev, | 401 | static inline bool acpi_driver_match_device(struct device *dev, |
402 | const struct device_driver *drv) | 402 | const struct device_driver *drv) |
403 | { | 403 | { |
404 | return !!acpi_match_device(drv->acpi_match_table, dev); | 404 | return !!acpi_match_device(drv->acpi_match_table, dev); |
405 | } | 405 | } |
406 | 406 | ||
407 | #define ACPI_PTR(_ptr) (_ptr) | 407 | #define ACPI_PTR(_ptr) (_ptr) |
408 | 408 | ||
409 | #else /* !CONFIG_ACPI */ | 409 | #else /* !CONFIG_ACPI */ |
410 | 410 | ||
411 | #define acpi_disabled 1 | 411 | #define acpi_disabled 1 |
412 | 412 | ||
413 | #define ACPI_COMPANION(dev) (NULL) | 413 | #define ACPI_COMPANION(dev) (NULL) |
414 | #define ACPI_COMPANION_SET(dev, adev) do { } while (0) | 414 | #define ACPI_COMPANION_SET(dev, adev) do { } while (0) |
415 | #define ACPI_HANDLE(dev) (NULL) | 415 | #define ACPI_HANDLE(dev) (NULL) |
416 | 416 | ||
417 | static inline void acpi_early_init(void) { } | 417 | static inline void acpi_early_init(void) { } |
418 | 418 | ||
419 | static inline int early_acpi_boot_init(void) | 419 | static inline int early_acpi_boot_init(void) |
420 | { | 420 | { |
421 | return 0; | 421 | return 0; |
422 | } | 422 | } |
423 | static inline int acpi_boot_init(void) | 423 | static inline int acpi_boot_init(void) |
424 | { | 424 | { |
425 | return 0; | 425 | return 0; |
426 | } | 426 | } |
427 | 427 | ||
428 | static inline void acpi_boot_table_init(void) | 428 | static inline void acpi_boot_table_init(void) |
429 | { | 429 | { |
430 | return; | 430 | return; |
431 | } | 431 | } |
432 | 432 | ||
433 | static inline int acpi_mps_check(void) | 433 | static inline int acpi_mps_check(void) |
434 | { | 434 | { |
435 | return 0; | 435 | return 0; |
436 | } | 436 | } |
437 | 437 | ||
438 | static inline int acpi_check_resource_conflict(struct resource *res) | 438 | static inline int acpi_check_resource_conflict(struct resource *res) |
439 | { | 439 | { |
440 | return 0; | 440 | return 0; |
441 | } | 441 | } |
442 | 442 | ||
443 | static inline int acpi_check_region(resource_size_t start, resource_size_t n, | 443 | static inline int acpi_check_region(resource_size_t start, resource_size_t n, |
444 | const char *name) | 444 | const char *name) |
445 | { | 445 | { |
446 | return 0; | 446 | return 0; |
447 | } | 447 | } |
448 | 448 | ||
449 | struct acpi_table_header; | 449 | struct acpi_table_header; |
450 | static inline int acpi_table_parse(char *id, | 450 | static inline int acpi_table_parse(char *id, |
451 | int (*handler)(struct acpi_table_header *)) | 451 | int (*handler)(struct acpi_table_header *)) |
452 | { | 452 | { |
453 | return -1; | 453 | return -1; |
454 | } | 454 | } |
455 | 455 | ||
456 | static inline int acpi_nvs_register(__u64 start, __u64 size) | 456 | static inline int acpi_nvs_register(__u64 start, __u64 size) |
457 | { | 457 | { |
458 | return 0; | 458 | return 0; |
459 | } | 459 | } |
460 | 460 | ||
461 | static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), | 461 | static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), |
462 | void *data) | 462 | void *data) |
463 | { | 463 | { |
464 | return 0; | 464 | return 0; |
465 | } | 465 | } |
466 | 466 | ||
467 | struct acpi_device_id; | 467 | struct acpi_device_id; |
468 | 468 | ||
469 | static inline const struct acpi_device_id *acpi_match_device( | 469 | static inline const struct acpi_device_id *acpi_match_device( |
470 | const struct acpi_device_id *ids, const struct device *dev) | 470 | const struct acpi_device_id *ids, const struct device *dev) |
471 | { | 471 | { |
472 | return NULL; | 472 | return NULL; |
473 | } | 473 | } |
474 | 474 | ||
475 | static inline bool acpi_driver_match_device(struct device *dev, | 475 | static inline bool acpi_driver_match_device(struct device *dev, |
476 | const struct device_driver *drv) | 476 | const struct device_driver *drv) |
477 | { | 477 | { |
478 | return false; | 478 | return false; |
479 | } | 479 | } |
480 | 480 | ||
481 | #define ACPI_PTR(_ptr) (NULL) | 481 | #define ACPI_PTR(_ptr) (NULL) |
482 | 482 | ||
483 | #endif /* !CONFIG_ACPI */ | 483 | #endif /* !CONFIG_ACPI */ |
484 | 484 | ||
485 | #define DEVICE_ACPI_HANDLE(dev) ACPI_HANDLE(dev) | ||
486 | |||
487 | #ifdef CONFIG_ACPI | 485 | #ifdef CONFIG_ACPI |
488 | void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, | 486 | void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, |
489 | u32 pm1a_ctrl, u32 pm1b_ctrl)); | 487 | u32 pm1a_ctrl, u32 pm1b_ctrl)); |
490 | 488 | ||
491 | acpi_status acpi_os_prepare_sleep(u8 sleep_state, | 489 | acpi_status acpi_os_prepare_sleep(u8 sleep_state, |
492 | u32 pm1a_control, u32 pm1b_control); | 490 | u32 pm1a_control, u32 pm1b_control); |
493 | 491 | ||
494 | void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, | 492 | void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, |
495 | u32 val_a, u32 val_b)); | 493 | u32 val_a, u32 val_b)); |
496 | 494 | ||
497 | acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, | 495 | acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, |
498 | u32 val_a, u32 val_b); | 496 | u32 val_a, u32 val_b); |
499 | 497 | ||
500 | #ifdef CONFIG_X86 | 498 | #ifdef CONFIG_X86 |
501 | void arch_reserve_mem_area(acpi_physical_address addr, size_t size); | 499 | void arch_reserve_mem_area(acpi_physical_address addr, size_t size); |
502 | #else | 500 | #else |
503 | static inline void arch_reserve_mem_area(acpi_physical_address addr, | 501 | static inline void arch_reserve_mem_area(acpi_physical_address addr, |
504 | size_t size) | 502 | size_t size) |
505 | { | 503 | { |
506 | } | 504 | } |
507 | #endif /* CONFIG_X86 */ | 505 | #endif /* CONFIG_X86 */ |
508 | #else | 506 | #else |
509 | #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) | 507 | #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) |
510 | #endif | 508 | #endif |
511 | 509 | ||
512 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME) | 510 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME) |
513 | int acpi_dev_runtime_suspend(struct device *dev); | 511 | int acpi_dev_runtime_suspend(struct device *dev); |
514 | int acpi_dev_runtime_resume(struct device *dev); | 512 | int acpi_dev_runtime_resume(struct device *dev); |
515 | int acpi_subsys_runtime_suspend(struct device *dev); | 513 | int acpi_subsys_runtime_suspend(struct device *dev); |
516 | int acpi_subsys_runtime_resume(struct device *dev); | 514 | int acpi_subsys_runtime_resume(struct device *dev); |
517 | #else | 515 | #else |
518 | static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } | 516 | static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } |
519 | static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } | 517 | static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } |
520 | static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } | 518 | static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } |
521 | static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } | 519 | static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } |
522 | #endif | 520 | #endif |
523 | 521 | ||
524 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) | 522 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) |
525 | int acpi_dev_suspend_late(struct device *dev); | 523 | int acpi_dev_suspend_late(struct device *dev); |
526 | int acpi_dev_resume_early(struct device *dev); | 524 | int acpi_dev_resume_early(struct device *dev); |
527 | int acpi_subsys_prepare(struct device *dev); | 525 | int acpi_subsys_prepare(struct device *dev); |
528 | int acpi_subsys_suspend_late(struct device *dev); | 526 | int acpi_subsys_suspend_late(struct device *dev); |
529 | int acpi_subsys_resume_early(struct device *dev); | 527 | int acpi_subsys_resume_early(struct device *dev); |
530 | #else | 528 | #else |
531 | static inline int acpi_dev_suspend_late(struct device *dev) { return 0; } | 529 | static inline int acpi_dev_suspend_late(struct device *dev) { return 0; } |
532 | static inline int acpi_dev_resume_early(struct device *dev) { return 0; } | 530 | static inline int acpi_dev_resume_early(struct device *dev) { return 0; } |
533 | static inline int acpi_subsys_prepare(struct device *dev) { return 0; } | 531 | static inline int acpi_subsys_prepare(struct device *dev) { return 0; } |
534 | static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } | 532 | static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } |
535 | static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } | 533 | static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } |
536 | #endif | 534 | #endif |
537 | 535 | ||
538 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM) | 536 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM) |
539 | struct acpi_device *acpi_dev_pm_get_node(struct device *dev); | 537 | struct acpi_device *acpi_dev_pm_get_node(struct device *dev); |
540 | int acpi_dev_pm_attach(struct device *dev, bool power_on); | 538 | int acpi_dev_pm_attach(struct device *dev, bool power_on); |
541 | void acpi_dev_pm_detach(struct device *dev, bool power_off); | 539 | void acpi_dev_pm_detach(struct device *dev, bool power_off); |
542 | #else | 540 | #else |
543 | static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) | 541 | static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) |
544 | { | 542 | { |
545 | return NULL; | 543 | return NULL; |
546 | } | 544 | } |
547 | static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) | 545 | static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) |
548 | { | 546 | { |
549 | return -ENODEV; | 547 | return -ENODEV; |
550 | } | 548 | } |
551 | static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {} | 549 | static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {} |
552 | #endif | 550 | #endif |
553 | 551 | ||
554 | #ifdef CONFIG_ACPI | 552 | #ifdef CONFIG_ACPI |
555 | __printf(3, 4) | 553 | __printf(3, 4) |
556 | void acpi_handle_printk(const char *level, acpi_handle handle, | 554 | void acpi_handle_printk(const char *level, acpi_handle handle, |
557 | const char *fmt, ...); | 555 | const char *fmt, ...); |
558 | #else /* !CONFIG_ACPI */ | 556 | #else /* !CONFIG_ACPI */ |
559 | static inline __printf(3, 4) void | 557 | static inline __printf(3, 4) void |
560 | acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} | 558 | acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} |
561 | #endif /* !CONFIG_ACPI */ | 559 | #endif /* !CONFIG_ACPI */ |
562 | 560 | ||
563 | /* | 561 | /* |
564 | * acpi_handle_<level>: Print message with ACPI prefix and object path | 562 | * acpi_handle_<level>: Print message with ACPI prefix and object path |
565 | * | 563 | * |
566 | * These interfaces acquire the global namespace mutex to obtain an object | 564 | * These interfaces acquire the global namespace mutex to obtain an object |
567 | * path. In interrupt context, it shows the object path as <n/a>. | 565 | * path. In interrupt context, it shows the object path as <n/a>. |
568 | */ | 566 | */ |
569 | #define acpi_handle_emerg(handle, fmt, ...) \ | 567 | #define acpi_handle_emerg(handle, fmt, ...) \ |
570 | acpi_handle_printk(KERN_EMERG, handle, fmt, ##__VA_ARGS__) | 568 | acpi_handle_printk(KERN_EMERG, handle, fmt, ##__VA_ARGS__) |
571 | #define acpi_handle_alert(handle, fmt, ...) \ | 569 | #define acpi_handle_alert(handle, fmt, ...) \ |
572 | acpi_handle_printk(KERN_ALERT, handle, fmt, ##__VA_ARGS__) | 570 | acpi_handle_printk(KERN_ALERT, handle, fmt, ##__VA_ARGS__) |
573 | #define acpi_handle_crit(handle, fmt, ...) \ | 571 | #define acpi_handle_crit(handle, fmt, ...) \ |
574 | acpi_handle_printk(KERN_CRIT, handle, fmt, ##__VA_ARGS__) | 572 | acpi_handle_printk(KERN_CRIT, handle, fmt, ##__VA_ARGS__) |
575 | #define acpi_handle_err(handle, fmt, ...) \ | 573 | #define acpi_handle_err(handle, fmt, ...) \ |
576 | acpi_handle_printk(KERN_ERR, handle, fmt, ##__VA_ARGS__) | 574 | acpi_handle_printk(KERN_ERR, handle, fmt, ##__VA_ARGS__) |
577 | #define acpi_handle_warn(handle, fmt, ...) \ | 575 | #define acpi_handle_warn(handle, fmt, ...) \ |
578 | acpi_handle_printk(KERN_WARNING, handle, fmt, ##__VA_ARGS__) | 576 | acpi_handle_printk(KERN_WARNING, handle, fmt, ##__VA_ARGS__) |
579 | #define acpi_handle_notice(handle, fmt, ...) \ | 577 | #define acpi_handle_notice(handle, fmt, ...) \ |
580 | acpi_handle_printk(KERN_NOTICE, handle, fmt, ##__VA_ARGS__) | 578 | acpi_handle_printk(KERN_NOTICE, handle, fmt, ##__VA_ARGS__) |
581 | #define acpi_handle_info(handle, fmt, ...) \ | 579 | #define acpi_handle_info(handle, fmt, ...) \ |
582 | acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__) | 580 | acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__) |
583 | 581 | ||
584 | /* REVISIT: Support CONFIG_DYNAMIC_DEBUG when necessary */ | 582 | /* REVISIT: Support CONFIG_DYNAMIC_DEBUG when necessary */ |
585 | #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) | 583 | #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) |
586 | #define acpi_handle_debug(handle, fmt, ...) \ | 584 | #define acpi_handle_debug(handle, fmt, ...) \ |
587 | acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__) | 585 | acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__) |
588 | #else | 586 | #else |
589 | #define acpi_handle_debug(handle, fmt, ...) \ | 587 | #define acpi_handle_debug(handle, fmt, ...) \ |
590 | ({ \ | 588 | ({ \ |
591 | if (0) \ | 589 | if (0) \ |
592 | acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); \ | 590 | acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); \ |
593 | 0; \ | 591 | 0; \ |
594 | }) | 592 | }) |
595 | #endif | 593 | #endif |
596 | 594 | ||
597 | #endif /*_LINUX_ACPI_H*/ | 595 | #endif /*_LINUX_ACPI_H*/ |
598 | 596 |
include/linux/pci-acpi.h
1 | /* | 1 | /* |
2 | * File pci-acpi.h | 2 | * File pci-acpi.h |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Intel | 4 | * Copyright (C) 2004 Intel |
5 | * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) | 5 | * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef _PCI_ACPI_H_ | 8 | #ifndef _PCI_ACPI_H_ |
9 | #define _PCI_ACPI_H_ | 9 | #define _PCI_ACPI_H_ |
10 | 10 | ||
11 | #include <linux/acpi.h> | 11 | #include <linux/acpi.h> |
12 | 12 | ||
13 | #ifdef CONFIG_ACPI | 13 | #ifdef CONFIG_ACPI |
14 | extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, | 14 | extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, |
15 | struct pci_bus *pci_bus); | 15 | struct pci_bus *pci_bus); |
16 | extern acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev); | 16 | extern acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev); |
17 | extern acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, | 17 | extern acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, |
18 | struct pci_dev *pci_dev); | 18 | struct pci_dev *pci_dev); |
19 | extern acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev); | 19 | extern acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev); |
20 | extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle); | 20 | extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle); |
21 | 21 | ||
22 | static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) | 22 | static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) |
23 | { | 23 | { |
24 | struct pci_bus *pbus = pdev->bus; | 24 | struct pci_bus *pbus = pdev->bus; |
25 | 25 | ||
26 | /* Find a PCI root bus */ | 26 | /* Find a PCI root bus */ |
27 | while (!pci_is_root_bus(pbus)) | 27 | while (!pci_is_root_bus(pbus)) |
28 | pbus = pbus->parent; | 28 | pbus = pbus->parent; |
29 | 29 | ||
30 | return DEVICE_ACPI_HANDLE(pbus->bridge); | 30 | return ACPI_HANDLE(pbus->bridge); |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) | 33 | static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) |
34 | { | 34 | { |
35 | struct device *dev; | 35 | struct device *dev; |
36 | 36 | ||
37 | if (pci_is_root_bus(pbus)) | 37 | if (pci_is_root_bus(pbus)) |
38 | dev = pbus->bridge; | 38 | dev = pbus->bridge; |
39 | else | 39 | else |
40 | dev = &pbus->self->dev; | 40 | dev = &pbus->self->dev; |
41 | 41 | ||
42 | return DEVICE_ACPI_HANDLE(dev); | 42 | return ACPI_HANDLE(dev); |
43 | } | 43 | } |
44 | 44 | ||
45 | void acpi_pci_add_bus(struct pci_bus *bus); | 45 | void acpi_pci_add_bus(struct pci_bus *bus); |
46 | void acpi_pci_remove_bus(struct pci_bus *bus); | 46 | void acpi_pci_remove_bus(struct pci_bus *bus); |
47 | 47 | ||
48 | #ifdef CONFIG_ACPI_PCI_SLOT | 48 | #ifdef CONFIG_ACPI_PCI_SLOT |
49 | void acpi_pci_slot_init(void); | 49 | void acpi_pci_slot_init(void); |
50 | void acpi_pci_slot_enumerate(struct pci_bus *bus); | 50 | void acpi_pci_slot_enumerate(struct pci_bus *bus); |
51 | void acpi_pci_slot_remove(struct pci_bus *bus); | 51 | void acpi_pci_slot_remove(struct pci_bus *bus); |
52 | #else | 52 | #else |
53 | static inline void acpi_pci_slot_init(void) { } | 53 | static inline void acpi_pci_slot_init(void) { } |
54 | static inline void acpi_pci_slot_enumerate(struct pci_bus *bus) { } | 54 | static inline void acpi_pci_slot_enumerate(struct pci_bus *bus) { } |
55 | static inline void acpi_pci_slot_remove(struct pci_bus *bus) { } | 55 | static inline void acpi_pci_slot_remove(struct pci_bus *bus) { } |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #ifdef CONFIG_HOTPLUG_PCI_ACPI | 58 | #ifdef CONFIG_HOTPLUG_PCI_ACPI |
59 | void acpiphp_init(void); | 59 | void acpiphp_init(void); |
60 | void acpiphp_enumerate_slots(struct pci_bus *bus); | 60 | void acpiphp_enumerate_slots(struct pci_bus *bus); |
61 | void acpiphp_remove_slots(struct pci_bus *bus); | 61 | void acpiphp_remove_slots(struct pci_bus *bus); |
62 | void acpiphp_check_host_bridge(acpi_handle handle); | 62 | void acpiphp_check_host_bridge(acpi_handle handle); |
63 | #else | 63 | #else |
64 | static inline void acpiphp_init(void) { } | 64 | static inline void acpiphp_init(void) { } |
65 | static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { } | 65 | static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { } |
66 | static inline void acpiphp_remove_slots(struct pci_bus *bus) { } | 66 | static inline void acpiphp_remove_slots(struct pci_bus *bus) { } |
67 | static inline void acpiphp_check_host_bridge(acpi_handle handle) { } | 67 | static inline void acpiphp_check_host_bridge(acpi_handle handle) { } |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | #else /* CONFIG_ACPI */ | 70 | #else /* CONFIG_ACPI */ |
71 | static inline void acpi_pci_add_bus(struct pci_bus *bus) { } | 71 | static inline void acpi_pci_add_bus(struct pci_bus *bus) { } |
72 | static inline void acpi_pci_remove_bus(struct pci_bus *bus) { } | 72 | static inline void acpi_pci_remove_bus(struct pci_bus *bus) { } |
73 | #endif /* CONFIG_ACPI */ | 73 | #endif /* CONFIG_ACPI */ |
74 | 74 | ||
75 | #ifdef CONFIG_ACPI_APEI | 75 | #ifdef CONFIG_ACPI_APEI |
76 | extern bool aer_acpi_firmware_first(void); | 76 | extern bool aer_acpi_firmware_first(void); |
77 | #else | 77 | #else |
78 | static inline bool aer_acpi_firmware_first(void) { return false; } | 78 | static inline bool aer_acpi_firmware_first(void) { return false; } |
79 | #endif | 79 | #endif |
80 | 80 | ||
81 | #endif /* _PCI_ACPI_H_ */ | 81 | #endif /* _PCI_ACPI_H_ */ |
82 | 82 |