Commit 1bac92cac1d8417bf023571b1608b9793c4ecf86
Committed by
John W. Linville
1 parent
61e3f32c11
Exists in
master
and in
6 other branches
net/rfkill/core.c: use kstrtoul, etc
Use kstrtoul, etc instead of the now deprecated strict_strtoul, etc. A semantic patch rule for the kstrtoul case is as follows: (http://coccinelle.lip6.fr/) // <smpl> @@ expression a,b; {int,long} *c; @@ -strict_strtoul +kstrtoul (a,b,c) // </smpl> Signed-off-by: Julia Lawall <julia@diku.dk> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Showing 1 changed file with 2 additions and 2 deletions Inline Diff
net/rfkill/core.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2006 - 2007 Ivo van Doorn | 2 | * Copyright (C) 2006 - 2007 Ivo van Doorn |
3 | * Copyright (C) 2007 Dmitry Torokhov | 3 | * Copyright (C) 2007 Dmitry Torokhov |
4 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | 4 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the | 17 | * along with this program; if not, write to the |
18 | * Free Software Foundation, Inc., | 18 | * Free Software Foundation, Inc., |
19 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 19 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/workqueue.h> | 25 | #include <linux/workqueue.h> |
26 | #include <linux/capability.h> | 26 | #include <linux/capability.h> |
27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
28 | #include <linux/mutex.h> | 28 | #include <linux/mutex.h> |
29 | #include <linux/rfkill.h> | 29 | #include <linux/rfkill.h> |
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/miscdevice.h> | 32 | #include <linux/miscdevice.h> |
33 | #include <linux/wait.h> | 33 | #include <linux/wait.h> |
34 | #include <linux/poll.h> | 34 | #include <linux/poll.h> |
35 | #include <linux/fs.h> | 35 | #include <linux/fs.h> |
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | 37 | ||
38 | #include "rfkill.h" | 38 | #include "rfkill.h" |
39 | 39 | ||
40 | #define POLL_INTERVAL (5 * HZ) | 40 | #define POLL_INTERVAL (5 * HZ) |
41 | 41 | ||
42 | #define RFKILL_BLOCK_HW BIT(0) | 42 | #define RFKILL_BLOCK_HW BIT(0) |
43 | #define RFKILL_BLOCK_SW BIT(1) | 43 | #define RFKILL_BLOCK_SW BIT(1) |
44 | #define RFKILL_BLOCK_SW_PREV BIT(2) | 44 | #define RFKILL_BLOCK_SW_PREV BIT(2) |
45 | #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\ | 45 | #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\ |
46 | RFKILL_BLOCK_SW |\ | 46 | RFKILL_BLOCK_SW |\ |
47 | RFKILL_BLOCK_SW_PREV) | 47 | RFKILL_BLOCK_SW_PREV) |
48 | #define RFKILL_BLOCK_SW_SETCALL BIT(31) | 48 | #define RFKILL_BLOCK_SW_SETCALL BIT(31) |
49 | 49 | ||
50 | struct rfkill { | 50 | struct rfkill { |
51 | spinlock_t lock; | 51 | spinlock_t lock; |
52 | 52 | ||
53 | const char *name; | 53 | const char *name; |
54 | enum rfkill_type type; | 54 | enum rfkill_type type; |
55 | 55 | ||
56 | unsigned long state; | 56 | unsigned long state; |
57 | 57 | ||
58 | u32 idx; | 58 | u32 idx; |
59 | 59 | ||
60 | bool registered; | 60 | bool registered; |
61 | bool persistent; | 61 | bool persistent; |
62 | 62 | ||
63 | const struct rfkill_ops *ops; | 63 | const struct rfkill_ops *ops; |
64 | void *data; | 64 | void *data; |
65 | 65 | ||
66 | #ifdef CONFIG_RFKILL_LEDS | 66 | #ifdef CONFIG_RFKILL_LEDS |
67 | struct led_trigger led_trigger; | 67 | struct led_trigger led_trigger; |
68 | const char *ledtrigname; | 68 | const char *ledtrigname; |
69 | #endif | 69 | #endif |
70 | 70 | ||
71 | struct device dev; | 71 | struct device dev; |
72 | struct list_head node; | 72 | struct list_head node; |
73 | 73 | ||
74 | struct delayed_work poll_work; | 74 | struct delayed_work poll_work; |
75 | struct work_struct uevent_work; | 75 | struct work_struct uevent_work; |
76 | struct work_struct sync_work; | 76 | struct work_struct sync_work; |
77 | }; | 77 | }; |
78 | #define to_rfkill(d) container_of(d, struct rfkill, dev) | 78 | #define to_rfkill(d) container_of(d, struct rfkill, dev) |
79 | 79 | ||
80 | struct rfkill_int_event { | 80 | struct rfkill_int_event { |
81 | struct list_head list; | 81 | struct list_head list; |
82 | struct rfkill_event ev; | 82 | struct rfkill_event ev; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | struct rfkill_data { | 85 | struct rfkill_data { |
86 | struct list_head list; | 86 | struct list_head list; |
87 | struct list_head events; | 87 | struct list_head events; |
88 | struct mutex mtx; | 88 | struct mutex mtx; |
89 | wait_queue_head_t read_wait; | 89 | wait_queue_head_t read_wait; |
90 | bool input_handler; | 90 | bool input_handler; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | 93 | ||
94 | MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); | 94 | MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); |
95 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); | 95 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); |
96 | MODULE_DESCRIPTION("RF switch support"); | 96 | MODULE_DESCRIPTION("RF switch support"); |
97 | MODULE_LICENSE("GPL"); | 97 | MODULE_LICENSE("GPL"); |
98 | 98 | ||
99 | 99 | ||
100 | /* | 100 | /* |
101 | * The locking here should be made much smarter, we currently have | 101 | * The locking here should be made much smarter, we currently have |
102 | * a bit of a stupid situation because drivers might want to register | 102 | * a bit of a stupid situation because drivers might want to register |
103 | * the rfkill struct under their own lock, and take this lock during | 103 | * the rfkill struct under their own lock, and take this lock during |
104 | * rfkill method calls -- which will cause an AB-BA deadlock situation. | 104 | * rfkill method calls -- which will cause an AB-BA deadlock situation. |
105 | * | 105 | * |
106 | * To fix that, we need to rework this code here to be mostly lock-free | 106 | * To fix that, we need to rework this code here to be mostly lock-free |
107 | * and only use the mutex for list manipulations, not to protect the | 107 | * and only use the mutex for list manipulations, not to protect the |
108 | * various other global variables. Then we can avoid holding the mutex | 108 | * various other global variables. Then we can avoid holding the mutex |
109 | * around driver operations, and all is happy. | 109 | * around driver operations, and all is happy. |
110 | */ | 110 | */ |
111 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | 111 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ |
112 | static DEFINE_MUTEX(rfkill_global_mutex); | 112 | static DEFINE_MUTEX(rfkill_global_mutex); |
113 | static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */ | 113 | static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */ |
114 | 114 | ||
115 | static unsigned int rfkill_default_state = 1; | 115 | static unsigned int rfkill_default_state = 1; |
116 | module_param_named(default_state, rfkill_default_state, uint, 0444); | 116 | module_param_named(default_state, rfkill_default_state, uint, 0444); |
117 | MODULE_PARM_DESC(default_state, | 117 | MODULE_PARM_DESC(default_state, |
118 | "Default initial state for all radio types, 0 = radio off"); | 118 | "Default initial state for all radio types, 0 = radio off"); |
119 | 119 | ||
120 | static struct { | 120 | static struct { |
121 | bool cur, sav; | 121 | bool cur, sav; |
122 | } rfkill_global_states[NUM_RFKILL_TYPES]; | 122 | } rfkill_global_states[NUM_RFKILL_TYPES]; |
123 | 123 | ||
124 | static bool rfkill_epo_lock_active; | 124 | static bool rfkill_epo_lock_active; |
125 | 125 | ||
126 | 126 | ||
127 | #ifdef CONFIG_RFKILL_LEDS | 127 | #ifdef CONFIG_RFKILL_LEDS |
128 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | 128 | static void rfkill_led_trigger_event(struct rfkill *rfkill) |
129 | { | 129 | { |
130 | struct led_trigger *trigger; | 130 | struct led_trigger *trigger; |
131 | 131 | ||
132 | if (!rfkill->registered) | 132 | if (!rfkill->registered) |
133 | return; | 133 | return; |
134 | 134 | ||
135 | trigger = &rfkill->led_trigger; | 135 | trigger = &rfkill->led_trigger; |
136 | 136 | ||
137 | if (rfkill->state & RFKILL_BLOCK_ANY) | 137 | if (rfkill->state & RFKILL_BLOCK_ANY) |
138 | led_trigger_event(trigger, LED_OFF); | 138 | led_trigger_event(trigger, LED_OFF); |
139 | else | 139 | else |
140 | led_trigger_event(trigger, LED_FULL); | 140 | led_trigger_event(trigger, LED_FULL); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void rfkill_led_trigger_activate(struct led_classdev *led) | 143 | static void rfkill_led_trigger_activate(struct led_classdev *led) |
144 | { | 144 | { |
145 | struct rfkill *rfkill; | 145 | struct rfkill *rfkill; |
146 | 146 | ||
147 | rfkill = container_of(led->trigger, struct rfkill, led_trigger); | 147 | rfkill = container_of(led->trigger, struct rfkill, led_trigger); |
148 | 148 | ||
149 | rfkill_led_trigger_event(rfkill); | 149 | rfkill_led_trigger_event(rfkill); |
150 | } | 150 | } |
151 | 151 | ||
152 | static int rfkill_led_trigger_register(struct rfkill *rfkill) | 152 | static int rfkill_led_trigger_register(struct rfkill *rfkill) |
153 | { | 153 | { |
154 | rfkill->led_trigger.name = rfkill->ledtrigname | 154 | rfkill->led_trigger.name = rfkill->ledtrigname |
155 | ? : dev_name(&rfkill->dev); | 155 | ? : dev_name(&rfkill->dev); |
156 | rfkill->led_trigger.activate = rfkill_led_trigger_activate; | 156 | rfkill->led_trigger.activate = rfkill_led_trigger_activate; |
157 | return led_trigger_register(&rfkill->led_trigger); | 157 | return led_trigger_register(&rfkill->led_trigger); |
158 | } | 158 | } |
159 | 159 | ||
160 | static void rfkill_led_trigger_unregister(struct rfkill *rfkill) | 160 | static void rfkill_led_trigger_unregister(struct rfkill *rfkill) |
161 | { | 161 | { |
162 | led_trigger_unregister(&rfkill->led_trigger); | 162 | led_trigger_unregister(&rfkill->led_trigger); |
163 | } | 163 | } |
164 | #else | 164 | #else |
165 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | 165 | static void rfkill_led_trigger_event(struct rfkill *rfkill) |
166 | { | 166 | { |
167 | } | 167 | } |
168 | 168 | ||
169 | static inline int rfkill_led_trigger_register(struct rfkill *rfkill) | 169 | static inline int rfkill_led_trigger_register(struct rfkill *rfkill) |
170 | { | 170 | { |
171 | return 0; | 171 | return 0; |
172 | } | 172 | } |
173 | 173 | ||
174 | static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) | 174 | static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) |
175 | { | 175 | { |
176 | } | 176 | } |
177 | #endif /* CONFIG_RFKILL_LEDS */ | 177 | #endif /* CONFIG_RFKILL_LEDS */ |
178 | 178 | ||
179 | static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, | 179 | static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, |
180 | enum rfkill_operation op) | 180 | enum rfkill_operation op) |
181 | { | 181 | { |
182 | unsigned long flags; | 182 | unsigned long flags; |
183 | 183 | ||
184 | ev->idx = rfkill->idx; | 184 | ev->idx = rfkill->idx; |
185 | ev->type = rfkill->type; | 185 | ev->type = rfkill->type; |
186 | ev->op = op; | 186 | ev->op = op; |
187 | 187 | ||
188 | spin_lock_irqsave(&rfkill->lock, flags); | 188 | spin_lock_irqsave(&rfkill->lock, flags); |
189 | ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW); | 189 | ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW); |
190 | ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW | | 190 | ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW | |
191 | RFKILL_BLOCK_SW_PREV)); | 191 | RFKILL_BLOCK_SW_PREV)); |
192 | spin_unlock_irqrestore(&rfkill->lock, flags); | 192 | spin_unlock_irqrestore(&rfkill->lock, flags); |
193 | } | 193 | } |
194 | 194 | ||
195 | static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) | 195 | static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) |
196 | { | 196 | { |
197 | struct rfkill_data *data; | 197 | struct rfkill_data *data; |
198 | struct rfkill_int_event *ev; | 198 | struct rfkill_int_event *ev; |
199 | 199 | ||
200 | list_for_each_entry(data, &rfkill_fds, list) { | 200 | list_for_each_entry(data, &rfkill_fds, list) { |
201 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | 201 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); |
202 | if (!ev) | 202 | if (!ev) |
203 | continue; | 203 | continue; |
204 | rfkill_fill_event(&ev->ev, rfkill, op); | 204 | rfkill_fill_event(&ev->ev, rfkill, op); |
205 | mutex_lock(&data->mtx); | 205 | mutex_lock(&data->mtx); |
206 | list_add_tail(&ev->list, &data->events); | 206 | list_add_tail(&ev->list, &data->events); |
207 | mutex_unlock(&data->mtx); | 207 | mutex_unlock(&data->mtx); |
208 | wake_up_interruptible(&data->read_wait); | 208 | wake_up_interruptible(&data->read_wait); |
209 | } | 209 | } |
210 | } | 210 | } |
211 | 211 | ||
212 | static void rfkill_event(struct rfkill *rfkill) | 212 | static void rfkill_event(struct rfkill *rfkill) |
213 | { | 213 | { |
214 | if (!rfkill->registered) | 214 | if (!rfkill->registered) |
215 | return; | 215 | return; |
216 | 216 | ||
217 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); | 217 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); |
218 | 218 | ||
219 | /* also send event to /dev/rfkill */ | 219 | /* also send event to /dev/rfkill */ |
220 | rfkill_send_events(rfkill, RFKILL_OP_CHANGE); | 220 | rfkill_send_events(rfkill, RFKILL_OP_CHANGE); |
221 | } | 221 | } |
222 | 222 | ||
223 | static bool __rfkill_set_hw_state(struct rfkill *rfkill, | 223 | static bool __rfkill_set_hw_state(struct rfkill *rfkill, |
224 | bool blocked, bool *change) | 224 | bool blocked, bool *change) |
225 | { | 225 | { |
226 | unsigned long flags; | 226 | unsigned long flags; |
227 | bool prev, any; | 227 | bool prev, any; |
228 | 228 | ||
229 | BUG_ON(!rfkill); | 229 | BUG_ON(!rfkill); |
230 | 230 | ||
231 | spin_lock_irqsave(&rfkill->lock, flags); | 231 | spin_lock_irqsave(&rfkill->lock, flags); |
232 | prev = !!(rfkill->state & RFKILL_BLOCK_HW); | 232 | prev = !!(rfkill->state & RFKILL_BLOCK_HW); |
233 | if (blocked) | 233 | if (blocked) |
234 | rfkill->state |= RFKILL_BLOCK_HW; | 234 | rfkill->state |= RFKILL_BLOCK_HW; |
235 | else | 235 | else |
236 | rfkill->state &= ~RFKILL_BLOCK_HW; | 236 | rfkill->state &= ~RFKILL_BLOCK_HW; |
237 | *change = prev != blocked; | 237 | *change = prev != blocked; |
238 | any = !!(rfkill->state & RFKILL_BLOCK_ANY); | 238 | any = !!(rfkill->state & RFKILL_BLOCK_ANY); |
239 | spin_unlock_irqrestore(&rfkill->lock, flags); | 239 | spin_unlock_irqrestore(&rfkill->lock, flags); |
240 | 240 | ||
241 | rfkill_led_trigger_event(rfkill); | 241 | rfkill_led_trigger_event(rfkill); |
242 | 242 | ||
243 | return any; | 243 | return any; |
244 | } | 244 | } |
245 | 245 | ||
246 | /** | 246 | /** |
247 | * rfkill_set_block - wrapper for set_block method | 247 | * rfkill_set_block - wrapper for set_block method |
248 | * | 248 | * |
249 | * @rfkill: the rfkill struct to use | 249 | * @rfkill: the rfkill struct to use |
250 | * @blocked: the new software state | 250 | * @blocked: the new software state |
251 | * | 251 | * |
252 | * Calls the set_block method (when applicable) and handles notifications | 252 | * Calls the set_block method (when applicable) and handles notifications |
253 | * etc. as well. | 253 | * etc. as well. |
254 | */ | 254 | */ |
255 | static void rfkill_set_block(struct rfkill *rfkill, bool blocked) | 255 | static void rfkill_set_block(struct rfkill *rfkill, bool blocked) |
256 | { | 256 | { |
257 | unsigned long flags; | 257 | unsigned long flags; |
258 | int err; | 258 | int err; |
259 | 259 | ||
260 | if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) | 260 | if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) |
261 | return; | 261 | return; |
262 | 262 | ||
263 | /* | 263 | /* |
264 | * Some platforms (...!) generate input events which affect the | 264 | * Some platforms (...!) generate input events which affect the |
265 | * _hard_ kill state -- whenever something tries to change the | 265 | * _hard_ kill state -- whenever something tries to change the |
266 | * current software state query the hardware state too. | 266 | * current software state query the hardware state too. |
267 | */ | 267 | */ |
268 | if (rfkill->ops->query) | 268 | if (rfkill->ops->query) |
269 | rfkill->ops->query(rfkill, rfkill->data); | 269 | rfkill->ops->query(rfkill, rfkill->data); |
270 | 270 | ||
271 | spin_lock_irqsave(&rfkill->lock, flags); | 271 | spin_lock_irqsave(&rfkill->lock, flags); |
272 | if (rfkill->state & RFKILL_BLOCK_SW) | 272 | if (rfkill->state & RFKILL_BLOCK_SW) |
273 | rfkill->state |= RFKILL_BLOCK_SW_PREV; | 273 | rfkill->state |= RFKILL_BLOCK_SW_PREV; |
274 | else | 274 | else |
275 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | 275 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; |
276 | 276 | ||
277 | if (blocked) | 277 | if (blocked) |
278 | rfkill->state |= RFKILL_BLOCK_SW; | 278 | rfkill->state |= RFKILL_BLOCK_SW; |
279 | else | 279 | else |
280 | rfkill->state &= ~RFKILL_BLOCK_SW; | 280 | rfkill->state &= ~RFKILL_BLOCK_SW; |
281 | 281 | ||
282 | rfkill->state |= RFKILL_BLOCK_SW_SETCALL; | 282 | rfkill->state |= RFKILL_BLOCK_SW_SETCALL; |
283 | spin_unlock_irqrestore(&rfkill->lock, flags); | 283 | spin_unlock_irqrestore(&rfkill->lock, flags); |
284 | 284 | ||
285 | err = rfkill->ops->set_block(rfkill->data, blocked); | 285 | err = rfkill->ops->set_block(rfkill->data, blocked); |
286 | 286 | ||
287 | spin_lock_irqsave(&rfkill->lock, flags); | 287 | spin_lock_irqsave(&rfkill->lock, flags); |
288 | if (err) { | 288 | if (err) { |
289 | /* | 289 | /* |
290 | * Failed -- reset status to _prev, this may be different | 290 | * Failed -- reset status to _prev, this may be different |
291 | * from what set set _PREV to earlier in this function | 291 | * from what set set _PREV to earlier in this function |
292 | * if rfkill_set_sw_state was invoked. | 292 | * if rfkill_set_sw_state was invoked. |
293 | */ | 293 | */ |
294 | if (rfkill->state & RFKILL_BLOCK_SW_PREV) | 294 | if (rfkill->state & RFKILL_BLOCK_SW_PREV) |
295 | rfkill->state |= RFKILL_BLOCK_SW; | 295 | rfkill->state |= RFKILL_BLOCK_SW; |
296 | else | 296 | else |
297 | rfkill->state &= ~RFKILL_BLOCK_SW; | 297 | rfkill->state &= ~RFKILL_BLOCK_SW; |
298 | } | 298 | } |
299 | rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; | 299 | rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; |
300 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | 300 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; |
301 | spin_unlock_irqrestore(&rfkill->lock, flags); | 301 | spin_unlock_irqrestore(&rfkill->lock, flags); |
302 | 302 | ||
303 | rfkill_led_trigger_event(rfkill); | 303 | rfkill_led_trigger_event(rfkill); |
304 | rfkill_event(rfkill); | 304 | rfkill_event(rfkill); |
305 | } | 305 | } |
306 | 306 | ||
307 | #ifdef CONFIG_RFKILL_INPUT | 307 | #ifdef CONFIG_RFKILL_INPUT |
308 | static atomic_t rfkill_input_disabled = ATOMIC_INIT(0); | 308 | static atomic_t rfkill_input_disabled = ATOMIC_INIT(0); |
309 | 309 | ||
310 | /** | 310 | /** |
311 | * __rfkill_switch_all - Toggle state of all switches of given type | 311 | * __rfkill_switch_all - Toggle state of all switches of given type |
312 | * @type: type of interfaces to be affected | 312 | * @type: type of interfaces to be affected |
313 | * @state: the new state | 313 | * @state: the new state |
314 | * | 314 | * |
315 | * This function sets the state of all switches of given type, | 315 | * This function sets the state of all switches of given type, |
316 | * unless a specific switch is claimed by userspace (in which case, | 316 | * unless a specific switch is claimed by userspace (in which case, |
317 | * that switch is left alone) or suspended. | 317 | * that switch is left alone) or suspended. |
318 | * | 318 | * |
319 | * Caller must have acquired rfkill_global_mutex. | 319 | * Caller must have acquired rfkill_global_mutex. |
320 | */ | 320 | */ |
321 | static void __rfkill_switch_all(const enum rfkill_type type, bool blocked) | 321 | static void __rfkill_switch_all(const enum rfkill_type type, bool blocked) |
322 | { | 322 | { |
323 | struct rfkill *rfkill; | 323 | struct rfkill *rfkill; |
324 | 324 | ||
325 | rfkill_global_states[type].cur = blocked; | 325 | rfkill_global_states[type].cur = blocked; |
326 | list_for_each_entry(rfkill, &rfkill_list, node) { | 326 | list_for_each_entry(rfkill, &rfkill_list, node) { |
327 | if (rfkill->type != type) | 327 | if (rfkill->type != type) |
328 | continue; | 328 | continue; |
329 | 329 | ||
330 | rfkill_set_block(rfkill, blocked); | 330 | rfkill_set_block(rfkill, blocked); |
331 | } | 331 | } |
332 | } | 332 | } |
333 | 333 | ||
334 | /** | 334 | /** |
335 | * rfkill_switch_all - Toggle state of all switches of given type | 335 | * rfkill_switch_all - Toggle state of all switches of given type |
336 | * @type: type of interfaces to be affected | 336 | * @type: type of interfaces to be affected |
337 | * @state: the new state | 337 | * @state: the new state |
338 | * | 338 | * |
339 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | 339 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). |
340 | * Please refer to __rfkill_switch_all() for details. | 340 | * Please refer to __rfkill_switch_all() for details. |
341 | * | 341 | * |
342 | * Does nothing if the EPO lock is active. | 342 | * Does nothing if the EPO lock is active. |
343 | */ | 343 | */ |
344 | void rfkill_switch_all(enum rfkill_type type, bool blocked) | 344 | void rfkill_switch_all(enum rfkill_type type, bool blocked) |
345 | { | 345 | { |
346 | if (atomic_read(&rfkill_input_disabled)) | 346 | if (atomic_read(&rfkill_input_disabled)) |
347 | return; | 347 | return; |
348 | 348 | ||
349 | mutex_lock(&rfkill_global_mutex); | 349 | mutex_lock(&rfkill_global_mutex); |
350 | 350 | ||
351 | if (!rfkill_epo_lock_active) | 351 | if (!rfkill_epo_lock_active) |
352 | __rfkill_switch_all(type, blocked); | 352 | __rfkill_switch_all(type, blocked); |
353 | 353 | ||
354 | mutex_unlock(&rfkill_global_mutex); | 354 | mutex_unlock(&rfkill_global_mutex); |
355 | } | 355 | } |
356 | 356 | ||
357 | /** | 357 | /** |
358 | * rfkill_epo - emergency power off all transmitters | 358 | * rfkill_epo - emergency power off all transmitters |
359 | * | 359 | * |
360 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, | 360 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, |
361 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. | 361 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. |
362 | * | 362 | * |
363 | * The global state before the EPO is saved and can be restored later | 363 | * The global state before the EPO is saved and can be restored later |
364 | * using rfkill_restore_states(). | 364 | * using rfkill_restore_states(). |
365 | */ | 365 | */ |
366 | void rfkill_epo(void) | 366 | void rfkill_epo(void) |
367 | { | 367 | { |
368 | struct rfkill *rfkill; | 368 | struct rfkill *rfkill; |
369 | int i; | 369 | int i; |
370 | 370 | ||
371 | if (atomic_read(&rfkill_input_disabled)) | 371 | if (atomic_read(&rfkill_input_disabled)) |
372 | return; | 372 | return; |
373 | 373 | ||
374 | mutex_lock(&rfkill_global_mutex); | 374 | mutex_lock(&rfkill_global_mutex); |
375 | 375 | ||
376 | rfkill_epo_lock_active = true; | 376 | rfkill_epo_lock_active = true; |
377 | list_for_each_entry(rfkill, &rfkill_list, node) | 377 | list_for_each_entry(rfkill, &rfkill_list, node) |
378 | rfkill_set_block(rfkill, true); | 378 | rfkill_set_block(rfkill, true); |
379 | 379 | ||
380 | for (i = 0; i < NUM_RFKILL_TYPES; i++) { | 380 | for (i = 0; i < NUM_RFKILL_TYPES; i++) { |
381 | rfkill_global_states[i].sav = rfkill_global_states[i].cur; | 381 | rfkill_global_states[i].sav = rfkill_global_states[i].cur; |
382 | rfkill_global_states[i].cur = true; | 382 | rfkill_global_states[i].cur = true; |
383 | } | 383 | } |
384 | 384 | ||
385 | mutex_unlock(&rfkill_global_mutex); | 385 | mutex_unlock(&rfkill_global_mutex); |
386 | } | 386 | } |
387 | 387 | ||
388 | /** | 388 | /** |
389 | * rfkill_restore_states - restore global states | 389 | * rfkill_restore_states - restore global states |
390 | * | 390 | * |
391 | * Restore (and sync switches to) the global state from the | 391 | * Restore (and sync switches to) the global state from the |
392 | * states in rfkill_default_states. This can undo the effects of | 392 | * states in rfkill_default_states. This can undo the effects of |
393 | * a call to rfkill_epo(). | 393 | * a call to rfkill_epo(). |
394 | */ | 394 | */ |
395 | void rfkill_restore_states(void) | 395 | void rfkill_restore_states(void) |
396 | { | 396 | { |
397 | int i; | 397 | int i; |
398 | 398 | ||
399 | if (atomic_read(&rfkill_input_disabled)) | 399 | if (atomic_read(&rfkill_input_disabled)) |
400 | return; | 400 | return; |
401 | 401 | ||
402 | mutex_lock(&rfkill_global_mutex); | 402 | mutex_lock(&rfkill_global_mutex); |
403 | 403 | ||
404 | rfkill_epo_lock_active = false; | 404 | rfkill_epo_lock_active = false; |
405 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | 405 | for (i = 0; i < NUM_RFKILL_TYPES; i++) |
406 | __rfkill_switch_all(i, rfkill_global_states[i].sav); | 406 | __rfkill_switch_all(i, rfkill_global_states[i].sav); |
407 | mutex_unlock(&rfkill_global_mutex); | 407 | mutex_unlock(&rfkill_global_mutex); |
408 | } | 408 | } |
409 | 409 | ||
410 | /** | 410 | /** |
411 | * rfkill_remove_epo_lock - unlock state changes | 411 | * rfkill_remove_epo_lock - unlock state changes |
412 | * | 412 | * |
413 | * Used by rfkill-input manually unlock state changes, when | 413 | * Used by rfkill-input manually unlock state changes, when |
414 | * the EPO switch is deactivated. | 414 | * the EPO switch is deactivated. |
415 | */ | 415 | */ |
416 | void rfkill_remove_epo_lock(void) | 416 | void rfkill_remove_epo_lock(void) |
417 | { | 417 | { |
418 | if (atomic_read(&rfkill_input_disabled)) | 418 | if (atomic_read(&rfkill_input_disabled)) |
419 | return; | 419 | return; |
420 | 420 | ||
421 | mutex_lock(&rfkill_global_mutex); | 421 | mutex_lock(&rfkill_global_mutex); |
422 | rfkill_epo_lock_active = false; | 422 | rfkill_epo_lock_active = false; |
423 | mutex_unlock(&rfkill_global_mutex); | 423 | mutex_unlock(&rfkill_global_mutex); |
424 | } | 424 | } |
425 | 425 | ||
426 | /** | 426 | /** |
427 | * rfkill_is_epo_lock_active - returns true EPO is active | 427 | * rfkill_is_epo_lock_active - returns true EPO is active |
428 | * | 428 | * |
429 | * Returns 0 (false) if there is NOT an active EPO contidion, | 429 | * Returns 0 (false) if there is NOT an active EPO contidion, |
430 | * and 1 (true) if there is an active EPO contition, which | 430 | * and 1 (true) if there is an active EPO contition, which |
431 | * locks all radios in one of the BLOCKED states. | 431 | * locks all radios in one of the BLOCKED states. |
432 | * | 432 | * |
433 | * Can be called in atomic context. | 433 | * Can be called in atomic context. |
434 | */ | 434 | */ |
435 | bool rfkill_is_epo_lock_active(void) | 435 | bool rfkill_is_epo_lock_active(void) |
436 | { | 436 | { |
437 | return rfkill_epo_lock_active; | 437 | return rfkill_epo_lock_active; |
438 | } | 438 | } |
439 | 439 | ||
440 | /** | 440 | /** |
441 | * rfkill_get_global_sw_state - returns global state for a type | 441 | * rfkill_get_global_sw_state - returns global state for a type |
442 | * @type: the type to get the global state of | 442 | * @type: the type to get the global state of |
443 | * | 443 | * |
444 | * Returns the current global state for a given wireless | 444 | * Returns the current global state for a given wireless |
445 | * device type. | 445 | * device type. |
446 | */ | 446 | */ |
447 | bool rfkill_get_global_sw_state(const enum rfkill_type type) | 447 | bool rfkill_get_global_sw_state(const enum rfkill_type type) |
448 | { | 448 | { |
449 | return rfkill_global_states[type].cur; | 449 | return rfkill_global_states[type].cur; |
450 | } | 450 | } |
451 | #endif | 451 | #endif |
452 | 452 | ||
453 | 453 | ||
454 | bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) | 454 | bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) |
455 | { | 455 | { |
456 | bool ret, change; | 456 | bool ret, change; |
457 | 457 | ||
458 | ret = __rfkill_set_hw_state(rfkill, blocked, &change); | 458 | ret = __rfkill_set_hw_state(rfkill, blocked, &change); |
459 | 459 | ||
460 | if (!rfkill->registered) | 460 | if (!rfkill->registered) |
461 | return ret; | 461 | return ret; |
462 | 462 | ||
463 | if (change) | 463 | if (change) |
464 | schedule_work(&rfkill->uevent_work); | 464 | schedule_work(&rfkill->uevent_work); |
465 | 465 | ||
466 | return ret; | 466 | return ret; |
467 | } | 467 | } |
468 | EXPORT_SYMBOL(rfkill_set_hw_state); | 468 | EXPORT_SYMBOL(rfkill_set_hw_state); |
469 | 469 | ||
470 | static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | 470 | static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) |
471 | { | 471 | { |
472 | u32 bit = RFKILL_BLOCK_SW; | 472 | u32 bit = RFKILL_BLOCK_SW; |
473 | 473 | ||
474 | /* if in a ops->set_block right now, use other bit */ | 474 | /* if in a ops->set_block right now, use other bit */ |
475 | if (rfkill->state & RFKILL_BLOCK_SW_SETCALL) | 475 | if (rfkill->state & RFKILL_BLOCK_SW_SETCALL) |
476 | bit = RFKILL_BLOCK_SW_PREV; | 476 | bit = RFKILL_BLOCK_SW_PREV; |
477 | 477 | ||
478 | if (blocked) | 478 | if (blocked) |
479 | rfkill->state |= bit; | 479 | rfkill->state |= bit; |
480 | else | 480 | else |
481 | rfkill->state &= ~bit; | 481 | rfkill->state &= ~bit; |
482 | } | 482 | } |
483 | 483 | ||
484 | bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | 484 | bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) |
485 | { | 485 | { |
486 | unsigned long flags; | 486 | unsigned long flags; |
487 | bool prev, hwblock; | 487 | bool prev, hwblock; |
488 | 488 | ||
489 | BUG_ON(!rfkill); | 489 | BUG_ON(!rfkill); |
490 | 490 | ||
491 | spin_lock_irqsave(&rfkill->lock, flags); | 491 | spin_lock_irqsave(&rfkill->lock, flags); |
492 | prev = !!(rfkill->state & RFKILL_BLOCK_SW); | 492 | prev = !!(rfkill->state & RFKILL_BLOCK_SW); |
493 | __rfkill_set_sw_state(rfkill, blocked); | 493 | __rfkill_set_sw_state(rfkill, blocked); |
494 | hwblock = !!(rfkill->state & RFKILL_BLOCK_HW); | 494 | hwblock = !!(rfkill->state & RFKILL_BLOCK_HW); |
495 | blocked = blocked || hwblock; | 495 | blocked = blocked || hwblock; |
496 | spin_unlock_irqrestore(&rfkill->lock, flags); | 496 | spin_unlock_irqrestore(&rfkill->lock, flags); |
497 | 497 | ||
498 | if (!rfkill->registered) | 498 | if (!rfkill->registered) |
499 | return blocked; | 499 | return blocked; |
500 | 500 | ||
501 | if (prev != blocked && !hwblock) | 501 | if (prev != blocked && !hwblock) |
502 | schedule_work(&rfkill->uevent_work); | 502 | schedule_work(&rfkill->uevent_work); |
503 | 503 | ||
504 | rfkill_led_trigger_event(rfkill); | 504 | rfkill_led_trigger_event(rfkill); |
505 | 505 | ||
506 | return blocked; | 506 | return blocked; |
507 | } | 507 | } |
508 | EXPORT_SYMBOL(rfkill_set_sw_state); | 508 | EXPORT_SYMBOL(rfkill_set_sw_state); |
509 | 509 | ||
510 | void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) | 510 | void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) |
511 | { | 511 | { |
512 | unsigned long flags; | 512 | unsigned long flags; |
513 | 513 | ||
514 | BUG_ON(!rfkill); | 514 | BUG_ON(!rfkill); |
515 | BUG_ON(rfkill->registered); | 515 | BUG_ON(rfkill->registered); |
516 | 516 | ||
517 | spin_lock_irqsave(&rfkill->lock, flags); | 517 | spin_lock_irqsave(&rfkill->lock, flags); |
518 | __rfkill_set_sw_state(rfkill, blocked); | 518 | __rfkill_set_sw_state(rfkill, blocked); |
519 | rfkill->persistent = true; | 519 | rfkill->persistent = true; |
520 | spin_unlock_irqrestore(&rfkill->lock, flags); | 520 | spin_unlock_irqrestore(&rfkill->lock, flags); |
521 | } | 521 | } |
522 | EXPORT_SYMBOL(rfkill_init_sw_state); | 522 | EXPORT_SYMBOL(rfkill_init_sw_state); |
523 | 523 | ||
524 | void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) | 524 | void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) |
525 | { | 525 | { |
526 | unsigned long flags; | 526 | unsigned long flags; |
527 | bool swprev, hwprev; | 527 | bool swprev, hwprev; |
528 | 528 | ||
529 | BUG_ON(!rfkill); | 529 | BUG_ON(!rfkill); |
530 | 530 | ||
531 | spin_lock_irqsave(&rfkill->lock, flags); | 531 | spin_lock_irqsave(&rfkill->lock, flags); |
532 | 532 | ||
533 | /* | 533 | /* |
534 | * No need to care about prev/setblock ... this is for uevent only | 534 | * No need to care about prev/setblock ... this is for uevent only |
535 | * and that will get triggered by rfkill_set_block anyway. | 535 | * and that will get triggered by rfkill_set_block anyway. |
536 | */ | 536 | */ |
537 | swprev = !!(rfkill->state & RFKILL_BLOCK_SW); | 537 | swprev = !!(rfkill->state & RFKILL_BLOCK_SW); |
538 | hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); | 538 | hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); |
539 | __rfkill_set_sw_state(rfkill, sw); | 539 | __rfkill_set_sw_state(rfkill, sw); |
540 | if (hw) | 540 | if (hw) |
541 | rfkill->state |= RFKILL_BLOCK_HW; | 541 | rfkill->state |= RFKILL_BLOCK_HW; |
542 | else | 542 | else |
543 | rfkill->state &= ~RFKILL_BLOCK_HW; | 543 | rfkill->state &= ~RFKILL_BLOCK_HW; |
544 | 544 | ||
545 | spin_unlock_irqrestore(&rfkill->lock, flags); | 545 | spin_unlock_irqrestore(&rfkill->lock, flags); |
546 | 546 | ||
547 | if (!rfkill->registered) { | 547 | if (!rfkill->registered) { |
548 | rfkill->persistent = true; | 548 | rfkill->persistent = true; |
549 | } else { | 549 | } else { |
550 | if (swprev != sw || hwprev != hw) | 550 | if (swprev != sw || hwprev != hw) |
551 | schedule_work(&rfkill->uevent_work); | 551 | schedule_work(&rfkill->uevent_work); |
552 | 552 | ||
553 | rfkill_led_trigger_event(rfkill); | 553 | rfkill_led_trigger_event(rfkill); |
554 | } | 554 | } |
555 | } | 555 | } |
556 | EXPORT_SYMBOL(rfkill_set_states); | 556 | EXPORT_SYMBOL(rfkill_set_states); |
557 | 557 | ||
558 | static ssize_t rfkill_name_show(struct device *dev, | 558 | static ssize_t rfkill_name_show(struct device *dev, |
559 | struct device_attribute *attr, | 559 | struct device_attribute *attr, |
560 | char *buf) | 560 | char *buf) |
561 | { | 561 | { |
562 | struct rfkill *rfkill = to_rfkill(dev); | 562 | struct rfkill *rfkill = to_rfkill(dev); |
563 | 563 | ||
564 | return sprintf(buf, "%s\n", rfkill->name); | 564 | return sprintf(buf, "%s\n", rfkill->name); |
565 | } | 565 | } |
566 | 566 | ||
567 | static const char *rfkill_get_type_str(enum rfkill_type type) | 567 | static const char *rfkill_get_type_str(enum rfkill_type type) |
568 | { | 568 | { |
569 | BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1); | 569 | BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1); |
570 | 570 | ||
571 | switch (type) { | 571 | switch (type) { |
572 | case RFKILL_TYPE_WLAN: | 572 | case RFKILL_TYPE_WLAN: |
573 | return "wlan"; | 573 | return "wlan"; |
574 | case RFKILL_TYPE_BLUETOOTH: | 574 | case RFKILL_TYPE_BLUETOOTH: |
575 | return "bluetooth"; | 575 | return "bluetooth"; |
576 | case RFKILL_TYPE_UWB: | 576 | case RFKILL_TYPE_UWB: |
577 | return "ultrawideband"; | 577 | return "ultrawideband"; |
578 | case RFKILL_TYPE_WIMAX: | 578 | case RFKILL_TYPE_WIMAX: |
579 | return "wimax"; | 579 | return "wimax"; |
580 | case RFKILL_TYPE_WWAN: | 580 | case RFKILL_TYPE_WWAN: |
581 | return "wwan"; | 581 | return "wwan"; |
582 | case RFKILL_TYPE_GPS: | 582 | case RFKILL_TYPE_GPS: |
583 | return "gps"; | 583 | return "gps"; |
584 | case RFKILL_TYPE_FM: | 584 | case RFKILL_TYPE_FM: |
585 | return "fm"; | 585 | return "fm"; |
586 | default: | 586 | default: |
587 | BUG(); | 587 | BUG(); |
588 | } | 588 | } |
589 | } | 589 | } |
590 | 590 | ||
591 | static ssize_t rfkill_type_show(struct device *dev, | 591 | static ssize_t rfkill_type_show(struct device *dev, |
592 | struct device_attribute *attr, | 592 | struct device_attribute *attr, |
593 | char *buf) | 593 | char *buf) |
594 | { | 594 | { |
595 | struct rfkill *rfkill = to_rfkill(dev); | 595 | struct rfkill *rfkill = to_rfkill(dev); |
596 | 596 | ||
597 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); | 597 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); |
598 | } | 598 | } |
599 | 599 | ||
600 | static ssize_t rfkill_idx_show(struct device *dev, | 600 | static ssize_t rfkill_idx_show(struct device *dev, |
601 | struct device_attribute *attr, | 601 | struct device_attribute *attr, |
602 | char *buf) | 602 | char *buf) |
603 | { | 603 | { |
604 | struct rfkill *rfkill = to_rfkill(dev); | 604 | struct rfkill *rfkill = to_rfkill(dev); |
605 | 605 | ||
606 | return sprintf(buf, "%d\n", rfkill->idx); | 606 | return sprintf(buf, "%d\n", rfkill->idx); |
607 | } | 607 | } |
608 | 608 | ||
609 | static ssize_t rfkill_persistent_show(struct device *dev, | 609 | static ssize_t rfkill_persistent_show(struct device *dev, |
610 | struct device_attribute *attr, | 610 | struct device_attribute *attr, |
611 | char *buf) | 611 | char *buf) |
612 | { | 612 | { |
613 | struct rfkill *rfkill = to_rfkill(dev); | 613 | struct rfkill *rfkill = to_rfkill(dev); |
614 | 614 | ||
615 | return sprintf(buf, "%d\n", rfkill->persistent); | 615 | return sprintf(buf, "%d\n", rfkill->persistent); |
616 | } | 616 | } |
617 | 617 | ||
618 | static ssize_t rfkill_hard_show(struct device *dev, | 618 | static ssize_t rfkill_hard_show(struct device *dev, |
619 | struct device_attribute *attr, | 619 | struct device_attribute *attr, |
620 | char *buf) | 620 | char *buf) |
621 | { | 621 | { |
622 | struct rfkill *rfkill = to_rfkill(dev); | 622 | struct rfkill *rfkill = to_rfkill(dev); |
623 | 623 | ||
624 | return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 ); | 624 | return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 ); |
625 | } | 625 | } |
626 | 626 | ||
627 | static ssize_t rfkill_soft_show(struct device *dev, | 627 | static ssize_t rfkill_soft_show(struct device *dev, |
628 | struct device_attribute *attr, | 628 | struct device_attribute *attr, |
629 | char *buf) | 629 | char *buf) |
630 | { | 630 | { |
631 | struct rfkill *rfkill = to_rfkill(dev); | 631 | struct rfkill *rfkill = to_rfkill(dev); |
632 | 632 | ||
633 | return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 ); | 633 | return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 ); |
634 | } | 634 | } |
635 | 635 | ||
636 | static ssize_t rfkill_soft_store(struct device *dev, | 636 | static ssize_t rfkill_soft_store(struct device *dev, |
637 | struct device_attribute *attr, | 637 | struct device_attribute *attr, |
638 | const char *buf, size_t count) | 638 | const char *buf, size_t count) |
639 | { | 639 | { |
640 | struct rfkill *rfkill = to_rfkill(dev); | 640 | struct rfkill *rfkill = to_rfkill(dev); |
641 | unsigned long state; | 641 | unsigned long state; |
642 | int err; | 642 | int err; |
643 | 643 | ||
644 | if (!capable(CAP_NET_ADMIN)) | 644 | if (!capable(CAP_NET_ADMIN)) |
645 | return -EPERM; | 645 | return -EPERM; |
646 | 646 | ||
647 | err = strict_strtoul(buf, 0, &state); | 647 | err = kstrtoul(buf, 0, &state); |
648 | if (err) | 648 | if (err) |
649 | return err; | 649 | return err; |
650 | 650 | ||
651 | if (state > 1 ) | 651 | if (state > 1 ) |
652 | return -EINVAL; | 652 | return -EINVAL; |
653 | 653 | ||
654 | mutex_lock(&rfkill_global_mutex); | 654 | mutex_lock(&rfkill_global_mutex); |
655 | rfkill_set_block(rfkill, state); | 655 | rfkill_set_block(rfkill, state); |
656 | mutex_unlock(&rfkill_global_mutex); | 656 | mutex_unlock(&rfkill_global_mutex); |
657 | 657 | ||
658 | return err ?: count; | 658 | return err ?: count; |
659 | } | 659 | } |
660 | 660 | ||
661 | static u8 user_state_from_blocked(unsigned long state) | 661 | static u8 user_state_from_blocked(unsigned long state) |
662 | { | 662 | { |
663 | if (state & RFKILL_BLOCK_HW) | 663 | if (state & RFKILL_BLOCK_HW) |
664 | return RFKILL_USER_STATE_HARD_BLOCKED; | 664 | return RFKILL_USER_STATE_HARD_BLOCKED; |
665 | if (state & RFKILL_BLOCK_SW) | 665 | if (state & RFKILL_BLOCK_SW) |
666 | return RFKILL_USER_STATE_SOFT_BLOCKED; | 666 | return RFKILL_USER_STATE_SOFT_BLOCKED; |
667 | 667 | ||
668 | return RFKILL_USER_STATE_UNBLOCKED; | 668 | return RFKILL_USER_STATE_UNBLOCKED; |
669 | } | 669 | } |
670 | 670 | ||
671 | static ssize_t rfkill_state_show(struct device *dev, | 671 | static ssize_t rfkill_state_show(struct device *dev, |
672 | struct device_attribute *attr, | 672 | struct device_attribute *attr, |
673 | char *buf) | 673 | char *buf) |
674 | { | 674 | { |
675 | struct rfkill *rfkill = to_rfkill(dev); | 675 | struct rfkill *rfkill = to_rfkill(dev); |
676 | 676 | ||
677 | return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state)); | 677 | return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state)); |
678 | } | 678 | } |
679 | 679 | ||
680 | static ssize_t rfkill_state_store(struct device *dev, | 680 | static ssize_t rfkill_state_store(struct device *dev, |
681 | struct device_attribute *attr, | 681 | struct device_attribute *attr, |
682 | const char *buf, size_t count) | 682 | const char *buf, size_t count) |
683 | { | 683 | { |
684 | struct rfkill *rfkill = to_rfkill(dev); | 684 | struct rfkill *rfkill = to_rfkill(dev); |
685 | unsigned long state; | 685 | unsigned long state; |
686 | int err; | 686 | int err; |
687 | 687 | ||
688 | if (!capable(CAP_NET_ADMIN)) | 688 | if (!capable(CAP_NET_ADMIN)) |
689 | return -EPERM; | 689 | return -EPERM; |
690 | 690 | ||
691 | err = strict_strtoul(buf, 0, &state); | 691 | err = kstrtoul(buf, 0, &state); |
692 | if (err) | 692 | if (err) |
693 | return err; | 693 | return err; |
694 | 694 | ||
695 | if (state != RFKILL_USER_STATE_SOFT_BLOCKED && | 695 | if (state != RFKILL_USER_STATE_SOFT_BLOCKED && |
696 | state != RFKILL_USER_STATE_UNBLOCKED) | 696 | state != RFKILL_USER_STATE_UNBLOCKED) |
697 | return -EINVAL; | 697 | return -EINVAL; |
698 | 698 | ||
699 | mutex_lock(&rfkill_global_mutex); | 699 | mutex_lock(&rfkill_global_mutex); |
700 | rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED); | 700 | rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED); |
701 | mutex_unlock(&rfkill_global_mutex); | 701 | mutex_unlock(&rfkill_global_mutex); |
702 | 702 | ||
703 | return err ?: count; | 703 | return err ?: count; |
704 | } | 704 | } |
705 | 705 | ||
706 | static ssize_t rfkill_claim_show(struct device *dev, | 706 | static ssize_t rfkill_claim_show(struct device *dev, |
707 | struct device_attribute *attr, | 707 | struct device_attribute *attr, |
708 | char *buf) | 708 | char *buf) |
709 | { | 709 | { |
710 | return sprintf(buf, "%d\n", 0); | 710 | return sprintf(buf, "%d\n", 0); |
711 | } | 711 | } |
712 | 712 | ||
713 | static ssize_t rfkill_claim_store(struct device *dev, | 713 | static ssize_t rfkill_claim_store(struct device *dev, |
714 | struct device_attribute *attr, | 714 | struct device_attribute *attr, |
715 | const char *buf, size_t count) | 715 | const char *buf, size_t count) |
716 | { | 716 | { |
717 | return -EOPNOTSUPP; | 717 | return -EOPNOTSUPP; |
718 | } | 718 | } |
719 | 719 | ||
720 | static struct device_attribute rfkill_dev_attrs[] = { | 720 | static struct device_attribute rfkill_dev_attrs[] = { |
721 | __ATTR(name, S_IRUGO, rfkill_name_show, NULL), | 721 | __ATTR(name, S_IRUGO, rfkill_name_show, NULL), |
722 | __ATTR(type, S_IRUGO, rfkill_type_show, NULL), | 722 | __ATTR(type, S_IRUGO, rfkill_type_show, NULL), |
723 | __ATTR(index, S_IRUGO, rfkill_idx_show, NULL), | 723 | __ATTR(index, S_IRUGO, rfkill_idx_show, NULL), |
724 | __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL), | 724 | __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL), |
725 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), | 725 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), |
726 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), | 726 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), |
727 | __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store), | 727 | __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store), |
728 | __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL), | 728 | __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL), |
729 | __ATTR_NULL | 729 | __ATTR_NULL |
730 | }; | 730 | }; |
731 | 731 | ||
732 | static void rfkill_release(struct device *dev) | 732 | static void rfkill_release(struct device *dev) |
733 | { | 733 | { |
734 | struct rfkill *rfkill = to_rfkill(dev); | 734 | struct rfkill *rfkill = to_rfkill(dev); |
735 | 735 | ||
736 | kfree(rfkill); | 736 | kfree(rfkill); |
737 | } | 737 | } |
738 | 738 | ||
739 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | 739 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) |
740 | { | 740 | { |
741 | struct rfkill *rfkill = to_rfkill(dev); | 741 | struct rfkill *rfkill = to_rfkill(dev); |
742 | unsigned long flags; | 742 | unsigned long flags; |
743 | u32 state; | 743 | u32 state; |
744 | int error; | 744 | int error; |
745 | 745 | ||
746 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); | 746 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); |
747 | if (error) | 747 | if (error) |
748 | return error; | 748 | return error; |
749 | error = add_uevent_var(env, "RFKILL_TYPE=%s", | 749 | error = add_uevent_var(env, "RFKILL_TYPE=%s", |
750 | rfkill_get_type_str(rfkill->type)); | 750 | rfkill_get_type_str(rfkill->type)); |
751 | if (error) | 751 | if (error) |
752 | return error; | 752 | return error; |
753 | spin_lock_irqsave(&rfkill->lock, flags); | 753 | spin_lock_irqsave(&rfkill->lock, flags); |
754 | state = rfkill->state; | 754 | state = rfkill->state; |
755 | spin_unlock_irqrestore(&rfkill->lock, flags); | 755 | spin_unlock_irqrestore(&rfkill->lock, flags); |
756 | error = add_uevent_var(env, "RFKILL_STATE=%d", | 756 | error = add_uevent_var(env, "RFKILL_STATE=%d", |
757 | user_state_from_blocked(state)); | 757 | user_state_from_blocked(state)); |
758 | return error; | 758 | return error; |
759 | } | 759 | } |
760 | 760 | ||
761 | void rfkill_pause_polling(struct rfkill *rfkill) | 761 | void rfkill_pause_polling(struct rfkill *rfkill) |
762 | { | 762 | { |
763 | BUG_ON(!rfkill); | 763 | BUG_ON(!rfkill); |
764 | 764 | ||
765 | if (!rfkill->ops->poll) | 765 | if (!rfkill->ops->poll) |
766 | return; | 766 | return; |
767 | 767 | ||
768 | cancel_delayed_work_sync(&rfkill->poll_work); | 768 | cancel_delayed_work_sync(&rfkill->poll_work); |
769 | } | 769 | } |
770 | EXPORT_SYMBOL(rfkill_pause_polling); | 770 | EXPORT_SYMBOL(rfkill_pause_polling); |
771 | 771 | ||
772 | void rfkill_resume_polling(struct rfkill *rfkill) | 772 | void rfkill_resume_polling(struct rfkill *rfkill) |
773 | { | 773 | { |
774 | BUG_ON(!rfkill); | 774 | BUG_ON(!rfkill); |
775 | 775 | ||
776 | if (!rfkill->ops->poll) | 776 | if (!rfkill->ops->poll) |
777 | return; | 777 | return; |
778 | 778 | ||
779 | schedule_work(&rfkill->poll_work.work); | 779 | schedule_work(&rfkill->poll_work.work); |
780 | } | 780 | } |
781 | EXPORT_SYMBOL(rfkill_resume_polling); | 781 | EXPORT_SYMBOL(rfkill_resume_polling); |
782 | 782 | ||
783 | static int rfkill_suspend(struct device *dev, pm_message_t state) | 783 | static int rfkill_suspend(struct device *dev, pm_message_t state) |
784 | { | 784 | { |
785 | struct rfkill *rfkill = to_rfkill(dev); | 785 | struct rfkill *rfkill = to_rfkill(dev); |
786 | 786 | ||
787 | rfkill_pause_polling(rfkill); | 787 | rfkill_pause_polling(rfkill); |
788 | 788 | ||
789 | return 0; | 789 | return 0; |
790 | } | 790 | } |
791 | 791 | ||
792 | static int rfkill_resume(struct device *dev) | 792 | static int rfkill_resume(struct device *dev) |
793 | { | 793 | { |
794 | struct rfkill *rfkill = to_rfkill(dev); | 794 | struct rfkill *rfkill = to_rfkill(dev); |
795 | bool cur; | 795 | bool cur; |
796 | 796 | ||
797 | if (!rfkill->persistent) { | 797 | if (!rfkill->persistent) { |
798 | cur = !!(rfkill->state & RFKILL_BLOCK_SW); | 798 | cur = !!(rfkill->state & RFKILL_BLOCK_SW); |
799 | rfkill_set_block(rfkill, cur); | 799 | rfkill_set_block(rfkill, cur); |
800 | } | 800 | } |
801 | 801 | ||
802 | rfkill_resume_polling(rfkill); | 802 | rfkill_resume_polling(rfkill); |
803 | 803 | ||
804 | return 0; | 804 | return 0; |
805 | } | 805 | } |
806 | 806 | ||
807 | static struct class rfkill_class = { | 807 | static struct class rfkill_class = { |
808 | .name = "rfkill", | 808 | .name = "rfkill", |
809 | .dev_release = rfkill_release, | 809 | .dev_release = rfkill_release, |
810 | .dev_attrs = rfkill_dev_attrs, | 810 | .dev_attrs = rfkill_dev_attrs, |
811 | .dev_uevent = rfkill_dev_uevent, | 811 | .dev_uevent = rfkill_dev_uevent, |
812 | .suspend = rfkill_suspend, | 812 | .suspend = rfkill_suspend, |
813 | .resume = rfkill_resume, | 813 | .resume = rfkill_resume, |
814 | }; | 814 | }; |
815 | 815 | ||
816 | bool rfkill_blocked(struct rfkill *rfkill) | 816 | bool rfkill_blocked(struct rfkill *rfkill) |
817 | { | 817 | { |
818 | unsigned long flags; | 818 | unsigned long flags; |
819 | u32 state; | 819 | u32 state; |
820 | 820 | ||
821 | spin_lock_irqsave(&rfkill->lock, flags); | 821 | spin_lock_irqsave(&rfkill->lock, flags); |
822 | state = rfkill->state; | 822 | state = rfkill->state; |
823 | spin_unlock_irqrestore(&rfkill->lock, flags); | 823 | spin_unlock_irqrestore(&rfkill->lock, flags); |
824 | 824 | ||
825 | return !!(state & RFKILL_BLOCK_ANY); | 825 | return !!(state & RFKILL_BLOCK_ANY); |
826 | } | 826 | } |
827 | EXPORT_SYMBOL(rfkill_blocked); | 827 | EXPORT_SYMBOL(rfkill_blocked); |
828 | 828 | ||
829 | 829 | ||
830 | struct rfkill * __must_check rfkill_alloc(const char *name, | 830 | struct rfkill * __must_check rfkill_alloc(const char *name, |
831 | struct device *parent, | 831 | struct device *parent, |
832 | const enum rfkill_type type, | 832 | const enum rfkill_type type, |
833 | const struct rfkill_ops *ops, | 833 | const struct rfkill_ops *ops, |
834 | void *ops_data) | 834 | void *ops_data) |
835 | { | 835 | { |
836 | struct rfkill *rfkill; | 836 | struct rfkill *rfkill; |
837 | struct device *dev; | 837 | struct device *dev; |
838 | 838 | ||
839 | if (WARN_ON(!ops)) | 839 | if (WARN_ON(!ops)) |
840 | return NULL; | 840 | return NULL; |
841 | 841 | ||
842 | if (WARN_ON(!ops->set_block)) | 842 | if (WARN_ON(!ops->set_block)) |
843 | return NULL; | 843 | return NULL; |
844 | 844 | ||
845 | if (WARN_ON(!name)) | 845 | if (WARN_ON(!name)) |
846 | return NULL; | 846 | return NULL; |
847 | 847 | ||
848 | if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) | 848 | if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) |
849 | return NULL; | 849 | return NULL; |
850 | 850 | ||
851 | rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); | 851 | rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); |
852 | if (!rfkill) | 852 | if (!rfkill) |
853 | return NULL; | 853 | return NULL; |
854 | 854 | ||
855 | spin_lock_init(&rfkill->lock); | 855 | spin_lock_init(&rfkill->lock); |
856 | INIT_LIST_HEAD(&rfkill->node); | 856 | INIT_LIST_HEAD(&rfkill->node); |
857 | rfkill->type = type; | 857 | rfkill->type = type; |
858 | rfkill->name = name; | 858 | rfkill->name = name; |
859 | rfkill->ops = ops; | 859 | rfkill->ops = ops; |
860 | rfkill->data = ops_data; | 860 | rfkill->data = ops_data; |
861 | 861 | ||
862 | dev = &rfkill->dev; | 862 | dev = &rfkill->dev; |
863 | dev->class = &rfkill_class; | 863 | dev->class = &rfkill_class; |
864 | dev->parent = parent; | 864 | dev->parent = parent; |
865 | device_initialize(dev); | 865 | device_initialize(dev); |
866 | 866 | ||
867 | return rfkill; | 867 | return rfkill; |
868 | } | 868 | } |
869 | EXPORT_SYMBOL(rfkill_alloc); | 869 | EXPORT_SYMBOL(rfkill_alloc); |
870 | 870 | ||
871 | static void rfkill_poll(struct work_struct *work) | 871 | static void rfkill_poll(struct work_struct *work) |
872 | { | 872 | { |
873 | struct rfkill *rfkill; | 873 | struct rfkill *rfkill; |
874 | 874 | ||
875 | rfkill = container_of(work, struct rfkill, poll_work.work); | 875 | rfkill = container_of(work, struct rfkill, poll_work.work); |
876 | 876 | ||
877 | /* | 877 | /* |
878 | * Poll hardware state -- driver will use one of the | 878 | * Poll hardware state -- driver will use one of the |
879 | * rfkill_set{,_hw,_sw}_state functions and use its | 879 | * rfkill_set{,_hw,_sw}_state functions and use its |
880 | * return value to update the current status. | 880 | * return value to update the current status. |
881 | */ | 881 | */ |
882 | rfkill->ops->poll(rfkill, rfkill->data); | 882 | rfkill->ops->poll(rfkill, rfkill->data); |
883 | 883 | ||
884 | schedule_delayed_work(&rfkill->poll_work, | 884 | schedule_delayed_work(&rfkill->poll_work, |
885 | round_jiffies_relative(POLL_INTERVAL)); | 885 | round_jiffies_relative(POLL_INTERVAL)); |
886 | } | 886 | } |
887 | 887 | ||
888 | static void rfkill_uevent_work(struct work_struct *work) | 888 | static void rfkill_uevent_work(struct work_struct *work) |
889 | { | 889 | { |
890 | struct rfkill *rfkill; | 890 | struct rfkill *rfkill; |
891 | 891 | ||
892 | rfkill = container_of(work, struct rfkill, uevent_work); | 892 | rfkill = container_of(work, struct rfkill, uevent_work); |
893 | 893 | ||
894 | mutex_lock(&rfkill_global_mutex); | 894 | mutex_lock(&rfkill_global_mutex); |
895 | rfkill_event(rfkill); | 895 | rfkill_event(rfkill); |
896 | mutex_unlock(&rfkill_global_mutex); | 896 | mutex_unlock(&rfkill_global_mutex); |
897 | } | 897 | } |
898 | 898 | ||
899 | static void rfkill_sync_work(struct work_struct *work) | 899 | static void rfkill_sync_work(struct work_struct *work) |
900 | { | 900 | { |
901 | struct rfkill *rfkill; | 901 | struct rfkill *rfkill; |
902 | bool cur; | 902 | bool cur; |
903 | 903 | ||
904 | rfkill = container_of(work, struct rfkill, sync_work); | 904 | rfkill = container_of(work, struct rfkill, sync_work); |
905 | 905 | ||
906 | mutex_lock(&rfkill_global_mutex); | 906 | mutex_lock(&rfkill_global_mutex); |
907 | cur = rfkill_global_states[rfkill->type].cur; | 907 | cur = rfkill_global_states[rfkill->type].cur; |
908 | rfkill_set_block(rfkill, cur); | 908 | rfkill_set_block(rfkill, cur); |
909 | mutex_unlock(&rfkill_global_mutex); | 909 | mutex_unlock(&rfkill_global_mutex); |
910 | } | 910 | } |
911 | 911 | ||
912 | int __must_check rfkill_register(struct rfkill *rfkill) | 912 | int __must_check rfkill_register(struct rfkill *rfkill) |
913 | { | 913 | { |
914 | static unsigned long rfkill_no; | 914 | static unsigned long rfkill_no; |
915 | struct device *dev = &rfkill->dev; | 915 | struct device *dev = &rfkill->dev; |
916 | int error; | 916 | int error; |
917 | 917 | ||
918 | BUG_ON(!rfkill); | 918 | BUG_ON(!rfkill); |
919 | 919 | ||
920 | mutex_lock(&rfkill_global_mutex); | 920 | mutex_lock(&rfkill_global_mutex); |
921 | 921 | ||
922 | if (rfkill->registered) { | 922 | if (rfkill->registered) { |
923 | error = -EALREADY; | 923 | error = -EALREADY; |
924 | goto unlock; | 924 | goto unlock; |
925 | } | 925 | } |
926 | 926 | ||
927 | rfkill->idx = rfkill_no; | 927 | rfkill->idx = rfkill_no; |
928 | dev_set_name(dev, "rfkill%lu", rfkill_no); | 928 | dev_set_name(dev, "rfkill%lu", rfkill_no); |
929 | rfkill_no++; | 929 | rfkill_no++; |
930 | 930 | ||
931 | list_add_tail(&rfkill->node, &rfkill_list); | 931 | list_add_tail(&rfkill->node, &rfkill_list); |
932 | 932 | ||
933 | error = device_add(dev); | 933 | error = device_add(dev); |
934 | if (error) | 934 | if (error) |
935 | goto remove; | 935 | goto remove; |
936 | 936 | ||
937 | error = rfkill_led_trigger_register(rfkill); | 937 | error = rfkill_led_trigger_register(rfkill); |
938 | if (error) | 938 | if (error) |
939 | goto devdel; | 939 | goto devdel; |
940 | 940 | ||
941 | rfkill->registered = true; | 941 | rfkill->registered = true; |
942 | 942 | ||
943 | INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll); | 943 | INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll); |
944 | INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work); | 944 | INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work); |
945 | INIT_WORK(&rfkill->sync_work, rfkill_sync_work); | 945 | INIT_WORK(&rfkill->sync_work, rfkill_sync_work); |
946 | 946 | ||
947 | if (rfkill->ops->poll) | 947 | if (rfkill->ops->poll) |
948 | schedule_delayed_work(&rfkill->poll_work, | 948 | schedule_delayed_work(&rfkill->poll_work, |
949 | round_jiffies_relative(POLL_INTERVAL)); | 949 | round_jiffies_relative(POLL_INTERVAL)); |
950 | 950 | ||
951 | if (!rfkill->persistent || rfkill_epo_lock_active) { | 951 | if (!rfkill->persistent || rfkill_epo_lock_active) { |
952 | schedule_work(&rfkill->sync_work); | 952 | schedule_work(&rfkill->sync_work); |
953 | } else { | 953 | } else { |
954 | #ifdef CONFIG_RFKILL_INPUT | 954 | #ifdef CONFIG_RFKILL_INPUT |
955 | bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW); | 955 | bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW); |
956 | 956 | ||
957 | if (!atomic_read(&rfkill_input_disabled)) | 957 | if (!atomic_read(&rfkill_input_disabled)) |
958 | __rfkill_switch_all(rfkill->type, soft_blocked); | 958 | __rfkill_switch_all(rfkill->type, soft_blocked); |
959 | #endif | 959 | #endif |
960 | } | 960 | } |
961 | 961 | ||
962 | rfkill_send_events(rfkill, RFKILL_OP_ADD); | 962 | rfkill_send_events(rfkill, RFKILL_OP_ADD); |
963 | 963 | ||
964 | mutex_unlock(&rfkill_global_mutex); | 964 | mutex_unlock(&rfkill_global_mutex); |
965 | return 0; | 965 | return 0; |
966 | 966 | ||
967 | devdel: | 967 | devdel: |
968 | device_del(&rfkill->dev); | 968 | device_del(&rfkill->dev); |
969 | remove: | 969 | remove: |
970 | list_del_init(&rfkill->node); | 970 | list_del_init(&rfkill->node); |
971 | unlock: | 971 | unlock: |
972 | mutex_unlock(&rfkill_global_mutex); | 972 | mutex_unlock(&rfkill_global_mutex); |
973 | return error; | 973 | return error; |
974 | } | 974 | } |
975 | EXPORT_SYMBOL(rfkill_register); | 975 | EXPORT_SYMBOL(rfkill_register); |
976 | 976 | ||
977 | void rfkill_unregister(struct rfkill *rfkill) | 977 | void rfkill_unregister(struct rfkill *rfkill) |
978 | { | 978 | { |
979 | BUG_ON(!rfkill); | 979 | BUG_ON(!rfkill); |
980 | 980 | ||
981 | if (rfkill->ops->poll) | 981 | if (rfkill->ops->poll) |
982 | cancel_delayed_work_sync(&rfkill->poll_work); | 982 | cancel_delayed_work_sync(&rfkill->poll_work); |
983 | 983 | ||
984 | cancel_work_sync(&rfkill->uevent_work); | 984 | cancel_work_sync(&rfkill->uevent_work); |
985 | cancel_work_sync(&rfkill->sync_work); | 985 | cancel_work_sync(&rfkill->sync_work); |
986 | 986 | ||
987 | rfkill->registered = false; | 987 | rfkill->registered = false; |
988 | 988 | ||
989 | device_del(&rfkill->dev); | 989 | device_del(&rfkill->dev); |
990 | 990 | ||
991 | mutex_lock(&rfkill_global_mutex); | 991 | mutex_lock(&rfkill_global_mutex); |
992 | rfkill_send_events(rfkill, RFKILL_OP_DEL); | 992 | rfkill_send_events(rfkill, RFKILL_OP_DEL); |
993 | list_del_init(&rfkill->node); | 993 | list_del_init(&rfkill->node); |
994 | mutex_unlock(&rfkill_global_mutex); | 994 | mutex_unlock(&rfkill_global_mutex); |
995 | 995 | ||
996 | rfkill_led_trigger_unregister(rfkill); | 996 | rfkill_led_trigger_unregister(rfkill); |
997 | } | 997 | } |
998 | EXPORT_SYMBOL(rfkill_unregister); | 998 | EXPORT_SYMBOL(rfkill_unregister); |
999 | 999 | ||
1000 | void rfkill_destroy(struct rfkill *rfkill) | 1000 | void rfkill_destroy(struct rfkill *rfkill) |
1001 | { | 1001 | { |
1002 | if (rfkill) | 1002 | if (rfkill) |
1003 | put_device(&rfkill->dev); | 1003 | put_device(&rfkill->dev); |
1004 | } | 1004 | } |
1005 | EXPORT_SYMBOL(rfkill_destroy); | 1005 | EXPORT_SYMBOL(rfkill_destroy); |
1006 | 1006 | ||
1007 | static int rfkill_fop_open(struct inode *inode, struct file *file) | 1007 | static int rfkill_fop_open(struct inode *inode, struct file *file) |
1008 | { | 1008 | { |
1009 | struct rfkill_data *data; | 1009 | struct rfkill_data *data; |
1010 | struct rfkill *rfkill; | 1010 | struct rfkill *rfkill; |
1011 | struct rfkill_int_event *ev, *tmp; | 1011 | struct rfkill_int_event *ev, *tmp; |
1012 | 1012 | ||
1013 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 1013 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
1014 | if (!data) | 1014 | if (!data) |
1015 | return -ENOMEM; | 1015 | return -ENOMEM; |
1016 | 1016 | ||
1017 | INIT_LIST_HEAD(&data->events); | 1017 | INIT_LIST_HEAD(&data->events); |
1018 | mutex_init(&data->mtx); | 1018 | mutex_init(&data->mtx); |
1019 | init_waitqueue_head(&data->read_wait); | 1019 | init_waitqueue_head(&data->read_wait); |
1020 | 1020 | ||
1021 | mutex_lock(&rfkill_global_mutex); | 1021 | mutex_lock(&rfkill_global_mutex); |
1022 | mutex_lock(&data->mtx); | 1022 | mutex_lock(&data->mtx); |
1023 | /* | 1023 | /* |
1024 | * start getting events from elsewhere but hold mtx to get | 1024 | * start getting events from elsewhere but hold mtx to get |
1025 | * startup events added first | 1025 | * startup events added first |
1026 | */ | 1026 | */ |
1027 | 1027 | ||
1028 | list_for_each_entry(rfkill, &rfkill_list, node) { | 1028 | list_for_each_entry(rfkill, &rfkill_list, node) { |
1029 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | 1029 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); |
1030 | if (!ev) | 1030 | if (!ev) |
1031 | goto free; | 1031 | goto free; |
1032 | rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); | 1032 | rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); |
1033 | list_add_tail(&ev->list, &data->events); | 1033 | list_add_tail(&ev->list, &data->events); |
1034 | } | 1034 | } |
1035 | list_add(&data->list, &rfkill_fds); | 1035 | list_add(&data->list, &rfkill_fds); |
1036 | mutex_unlock(&data->mtx); | 1036 | mutex_unlock(&data->mtx); |
1037 | mutex_unlock(&rfkill_global_mutex); | 1037 | mutex_unlock(&rfkill_global_mutex); |
1038 | 1038 | ||
1039 | file->private_data = data; | 1039 | file->private_data = data; |
1040 | 1040 | ||
1041 | return nonseekable_open(inode, file); | 1041 | return nonseekable_open(inode, file); |
1042 | 1042 | ||
1043 | free: | 1043 | free: |
1044 | mutex_unlock(&data->mtx); | 1044 | mutex_unlock(&data->mtx); |
1045 | mutex_unlock(&rfkill_global_mutex); | 1045 | mutex_unlock(&rfkill_global_mutex); |
1046 | mutex_destroy(&data->mtx); | 1046 | mutex_destroy(&data->mtx); |
1047 | list_for_each_entry_safe(ev, tmp, &data->events, list) | 1047 | list_for_each_entry_safe(ev, tmp, &data->events, list) |
1048 | kfree(ev); | 1048 | kfree(ev); |
1049 | kfree(data); | 1049 | kfree(data); |
1050 | return -ENOMEM; | 1050 | return -ENOMEM; |
1051 | } | 1051 | } |
1052 | 1052 | ||
1053 | static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) | 1053 | static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) |
1054 | { | 1054 | { |
1055 | struct rfkill_data *data = file->private_data; | 1055 | struct rfkill_data *data = file->private_data; |
1056 | unsigned int res = POLLOUT | POLLWRNORM; | 1056 | unsigned int res = POLLOUT | POLLWRNORM; |
1057 | 1057 | ||
1058 | poll_wait(file, &data->read_wait, wait); | 1058 | poll_wait(file, &data->read_wait, wait); |
1059 | 1059 | ||
1060 | mutex_lock(&data->mtx); | 1060 | mutex_lock(&data->mtx); |
1061 | if (!list_empty(&data->events)) | 1061 | if (!list_empty(&data->events)) |
1062 | res = POLLIN | POLLRDNORM; | 1062 | res = POLLIN | POLLRDNORM; |
1063 | mutex_unlock(&data->mtx); | 1063 | mutex_unlock(&data->mtx); |
1064 | 1064 | ||
1065 | return res; | 1065 | return res; |
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | static bool rfkill_readable(struct rfkill_data *data) | 1068 | static bool rfkill_readable(struct rfkill_data *data) |
1069 | { | 1069 | { |
1070 | bool r; | 1070 | bool r; |
1071 | 1071 | ||
1072 | mutex_lock(&data->mtx); | 1072 | mutex_lock(&data->mtx); |
1073 | r = !list_empty(&data->events); | 1073 | r = !list_empty(&data->events); |
1074 | mutex_unlock(&data->mtx); | 1074 | mutex_unlock(&data->mtx); |
1075 | 1075 | ||
1076 | return r; | 1076 | return r; |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | static ssize_t rfkill_fop_read(struct file *file, char __user *buf, | 1079 | static ssize_t rfkill_fop_read(struct file *file, char __user *buf, |
1080 | size_t count, loff_t *pos) | 1080 | size_t count, loff_t *pos) |
1081 | { | 1081 | { |
1082 | struct rfkill_data *data = file->private_data; | 1082 | struct rfkill_data *data = file->private_data; |
1083 | struct rfkill_int_event *ev; | 1083 | struct rfkill_int_event *ev; |
1084 | unsigned long sz; | 1084 | unsigned long sz; |
1085 | int ret; | 1085 | int ret; |
1086 | 1086 | ||
1087 | mutex_lock(&data->mtx); | 1087 | mutex_lock(&data->mtx); |
1088 | 1088 | ||
1089 | while (list_empty(&data->events)) { | 1089 | while (list_empty(&data->events)) { |
1090 | if (file->f_flags & O_NONBLOCK) { | 1090 | if (file->f_flags & O_NONBLOCK) { |
1091 | ret = -EAGAIN; | 1091 | ret = -EAGAIN; |
1092 | goto out; | 1092 | goto out; |
1093 | } | 1093 | } |
1094 | mutex_unlock(&data->mtx); | 1094 | mutex_unlock(&data->mtx); |
1095 | ret = wait_event_interruptible(data->read_wait, | 1095 | ret = wait_event_interruptible(data->read_wait, |
1096 | rfkill_readable(data)); | 1096 | rfkill_readable(data)); |
1097 | mutex_lock(&data->mtx); | 1097 | mutex_lock(&data->mtx); |
1098 | 1098 | ||
1099 | if (ret) | 1099 | if (ret) |
1100 | goto out; | 1100 | goto out; |
1101 | } | 1101 | } |
1102 | 1102 | ||
1103 | ev = list_first_entry(&data->events, struct rfkill_int_event, | 1103 | ev = list_first_entry(&data->events, struct rfkill_int_event, |
1104 | list); | 1104 | list); |
1105 | 1105 | ||
1106 | sz = min_t(unsigned long, sizeof(ev->ev), count); | 1106 | sz = min_t(unsigned long, sizeof(ev->ev), count); |
1107 | ret = sz; | 1107 | ret = sz; |
1108 | if (copy_to_user(buf, &ev->ev, sz)) | 1108 | if (copy_to_user(buf, &ev->ev, sz)) |
1109 | ret = -EFAULT; | 1109 | ret = -EFAULT; |
1110 | 1110 | ||
1111 | list_del(&ev->list); | 1111 | list_del(&ev->list); |
1112 | kfree(ev); | 1112 | kfree(ev); |
1113 | out: | 1113 | out: |
1114 | mutex_unlock(&data->mtx); | 1114 | mutex_unlock(&data->mtx); |
1115 | return ret; | 1115 | return ret; |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, | 1118 | static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, |
1119 | size_t count, loff_t *pos) | 1119 | size_t count, loff_t *pos) |
1120 | { | 1120 | { |
1121 | struct rfkill *rfkill; | 1121 | struct rfkill *rfkill; |
1122 | struct rfkill_event ev; | 1122 | struct rfkill_event ev; |
1123 | 1123 | ||
1124 | /* we don't need the 'hard' variable but accept it */ | 1124 | /* we don't need the 'hard' variable but accept it */ |
1125 | if (count < RFKILL_EVENT_SIZE_V1 - 1) | 1125 | if (count < RFKILL_EVENT_SIZE_V1 - 1) |
1126 | return -EINVAL; | 1126 | return -EINVAL; |
1127 | 1127 | ||
1128 | /* | 1128 | /* |
1129 | * Copy as much data as we can accept into our 'ev' buffer, | 1129 | * Copy as much data as we can accept into our 'ev' buffer, |
1130 | * but tell userspace how much we've copied so it can determine | 1130 | * but tell userspace how much we've copied so it can determine |
1131 | * our API version even in a write() call, if it cares. | 1131 | * our API version even in a write() call, if it cares. |
1132 | */ | 1132 | */ |
1133 | count = min(count, sizeof(ev)); | 1133 | count = min(count, sizeof(ev)); |
1134 | if (copy_from_user(&ev, buf, count)) | 1134 | if (copy_from_user(&ev, buf, count)) |
1135 | return -EFAULT; | 1135 | return -EFAULT; |
1136 | 1136 | ||
1137 | if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL) | 1137 | if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL) |
1138 | return -EINVAL; | 1138 | return -EINVAL; |
1139 | 1139 | ||
1140 | if (ev.type >= NUM_RFKILL_TYPES) | 1140 | if (ev.type >= NUM_RFKILL_TYPES) |
1141 | return -EINVAL; | 1141 | return -EINVAL; |
1142 | 1142 | ||
1143 | mutex_lock(&rfkill_global_mutex); | 1143 | mutex_lock(&rfkill_global_mutex); |
1144 | 1144 | ||
1145 | if (ev.op == RFKILL_OP_CHANGE_ALL) { | 1145 | if (ev.op == RFKILL_OP_CHANGE_ALL) { |
1146 | if (ev.type == RFKILL_TYPE_ALL) { | 1146 | if (ev.type == RFKILL_TYPE_ALL) { |
1147 | enum rfkill_type i; | 1147 | enum rfkill_type i; |
1148 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | 1148 | for (i = 0; i < NUM_RFKILL_TYPES; i++) |
1149 | rfkill_global_states[i].cur = ev.soft; | 1149 | rfkill_global_states[i].cur = ev.soft; |
1150 | } else { | 1150 | } else { |
1151 | rfkill_global_states[ev.type].cur = ev.soft; | 1151 | rfkill_global_states[ev.type].cur = ev.soft; |
1152 | } | 1152 | } |
1153 | } | 1153 | } |
1154 | 1154 | ||
1155 | list_for_each_entry(rfkill, &rfkill_list, node) { | 1155 | list_for_each_entry(rfkill, &rfkill_list, node) { |
1156 | if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL) | 1156 | if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL) |
1157 | continue; | 1157 | continue; |
1158 | 1158 | ||
1159 | if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL) | 1159 | if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL) |
1160 | continue; | 1160 | continue; |
1161 | 1161 | ||
1162 | rfkill_set_block(rfkill, ev.soft); | 1162 | rfkill_set_block(rfkill, ev.soft); |
1163 | } | 1163 | } |
1164 | mutex_unlock(&rfkill_global_mutex); | 1164 | mutex_unlock(&rfkill_global_mutex); |
1165 | 1165 | ||
1166 | return count; | 1166 | return count; |
1167 | } | 1167 | } |
1168 | 1168 | ||
1169 | static int rfkill_fop_release(struct inode *inode, struct file *file) | 1169 | static int rfkill_fop_release(struct inode *inode, struct file *file) |
1170 | { | 1170 | { |
1171 | struct rfkill_data *data = file->private_data; | 1171 | struct rfkill_data *data = file->private_data; |
1172 | struct rfkill_int_event *ev, *tmp; | 1172 | struct rfkill_int_event *ev, *tmp; |
1173 | 1173 | ||
1174 | mutex_lock(&rfkill_global_mutex); | 1174 | mutex_lock(&rfkill_global_mutex); |
1175 | list_del(&data->list); | 1175 | list_del(&data->list); |
1176 | mutex_unlock(&rfkill_global_mutex); | 1176 | mutex_unlock(&rfkill_global_mutex); |
1177 | 1177 | ||
1178 | mutex_destroy(&data->mtx); | 1178 | mutex_destroy(&data->mtx); |
1179 | list_for_each_entry_safe(ev, tmp, &data->events, list) | 1179 | list_for_each_entry_safe(ev, tmp, &data->events, list) |
1180 | kfree(ev); | 1180 | kfree(ev); |
1181 | 1181 | ||
1182 | #ifdef CONFIG_RFKILL_INPUT | 1182 | #ifdef CONFIG_RFKILL_INPUT |
1183 | if (data->input_handler) | 1183 | if (data->input_handler) |
1184 | if (atomic_dec_return(&rfkill_input_disabled) == 0) | 1184 | if (atomic_dec_return(&rfkill_input_disabled) == 0) |
1185 | printk(KERN_DEBUG "rfkill: input handler enabled\n"); | 1185 | printk(KERN_DEBUG "rfkill: input handler enabled\n"); |
1186 | #endif | 1186 | #endif |
1187 | 1187 | ||
1188 | kfree(data); | 1188 | kfree(data); |
1189 | 1189 | ||
1190 | return 0; | 1190 | return 0; |
1191 | } | 1191 | } |
1192 | 1192 | ||
1193 | #ifdef CONFIG_RFKILL_INPUT | 1193 | #ifdef CONFIG_RFKILL_INPUT |
1194 | static long rfkill_fop_ioctl(struct file *file, unsigned int cmd, | 1194 | static long rfkill_fop_ioctl(struct file *file, unsigned int cmd, |
1195 | unsigned long arg) | 1195 | unsigned long arg) |
1196 | { | 1196 | { |
1197 | struct rfkill_data *data = file->private_data; | 1197 | struct rfkill_data *data = file->private_data; |
1198 | 1198 | ||
1199 | if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC) | 1199 | if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC) |
1200 | return -ENOSYS; | 1200 | return -ENOSYS; |
1201 | 1201 | ||
1202 | if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT) | 1202 | if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT) |
1203 | return -ENOSYS; | 1203 | return -ENOSYS; |
1204 | 1204 | ||
1205 | mutex_lock(&data->mtx); | 1205 | mutex_lock(&data->mtx); |
1206 | 1206 | ||
1207 | if (!data->input_handler) { | 1207 | if (!data->input_handler) { |
1208 | if (atomic_inc_return(&rfkill_input_disabled) == 1) | 1208 | if (atomic_inc_return(&rfkill_input_disabled) == 1) |
1209 | printk(KERN_DEBUG "rfkill: input handler disabled\n"); | 1209 | printk(KERN_DEBUG "rfkill: input handler disabled\n"); |
1210 | data->input_handler = true; | 1210 | data->input_handler = true; |
1211 | } | 1211 | } |
1212 | 1212 | ||
1213 | mutex_unlock(&data->mtx); | 1213 | mutex_unlock(&data->mtx); |
1214 | 1214 | ||
1215 | return 0; | 1215 | return 0; |
1216 | } | 1216 | } |
1217 | #endif | 1217 | #endif |
1218 | 1218 | ||
1219 | static const struct file_operations rfkill_fops = { | 1219 | static const struct file_operations rfkill_fops = { |
1220 | .owner = THIS_MODULE, | 1220 | .owner = THIS_MODULE, |
1221 | .open = rfkill_fop_open, | 1221 | .open = rfkill_fop_open, |
1222 | .read = rfkill_fop_read, | 1222 | .read = rfkill_fop_read, |
1223 | .write = rfkill_fop_write, | 1223 | .write = rfkill_fop_write, |
1224 | .poll = rfkill_fop_poll, | 1224 | .poll = rfkill_fop_poll, |
1225 | .release = rfkill_fop_release, | 1225 | .release = rfkill_fop_release, |
1226 | #ifdef CONFIG_RFKILL_INPUT | 1226 | #ifdef CONFIG_RFKILL_INPUT |
1227 | .unlocked_ioctl = rfkill_fop_ioctl, | 1227 | .unlocked_ioctl = rfkill_fop_ioctl, |
1228 | .compat_ioctl = rfkill_fop_ioctl, | 1228 | .compat_ioctl = rfkill_fop_ioctl, |
1229 | #endif | 1229 | #endif |
1230 | .llseek = no_llseek, | 1230 | .llseek = no_llseek, |
1231 | }; | 1231 | }; |
1232 | 1232 | ||
1233 | static struct miscdevice rfkill_miscdev = { | 1233 | static struct miscdevice rfkill_miscdev = { |
1234 | .name = "rfkill", | 1234 | .name = "rfkill", |
1235 | .fops = &rfkill_fops, | 1235 | .fops = &rfkill_fops, |
1236 | .minor = MISC_DYNAMIC_MINOR, | 1236 | .minor = MISC_DYNAMIC_MINOR, |
1237 | }; | 1237 | }; |
1238 | 1238 | ||
1239 | static int __init rfkill_init(void) | 1239 | static int __init rfkill_init(void) |
1240 | { | 1240 | { |
1241 | int error; | 1241 | int error; |
1242 | int i; | 1242 | int i; |
1243 | 1243 | ||
1244 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | 1244 | for (i = 0; i < NUM_RFKILL_TYPES; i++) |
1245 | rfkill_global_states[i].cur = !rfkill_default_state; | 1245 | rfkill_global_states[i].cur = !rfkill_default_state; |
1246 | 1246 | ||
1247 | error = class_register(&rfkill_class); | 1247 | error = class_register(&rfkill_class); |
1248 | if (error) | 1248 | if (error) |
1249 | goto out; | 1249 | goto out; |
1250 | 1250 | ||
1251 | error = misc_register(&rfkill_miscdev); | 1251 | error = misc_register(&rfkill_miscdev); |
1252 | if (error) { | 1252 | if (error) { |
1253 | class_unregister(&rfkill_class); | 1253 | class_unregister(&rfkill_class); |
1254 | goto out; | 1254 | goto out; |
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | #ifdef CONFIG_RFKILL_INPUT | 1257 | #ifdef CONFIG_RFKILL_INPUT |
1258 | error = rfkill_handler_init(); | 1258 | error = rfkill_handler_init(); |
1259 | if (error) { | 1259 | if (error) { |
1260 | misc_deregister(&rfkill_miscdev); | 1260 | misc_deregister(&rfkill_miscdev); |
1261 | class_unregister(&rfkill_class); | 1261 | class_unregister(&rfkill_class); |
1262 | goto out; | 1262 | goto out; |
1263 | } | 1263 | } |
1264 | #endif | 1264 | #endif |
1265 | 1265 | ||
1266 | out: | 1266 | out: |
1267 | return error; | 1267 | return error; |
1268 | } | 1268 | } |
1269 | subsys_initcall(rfkill_init); | 1269 | subsys_initcall(rfkill_init); |
1270 | 1270 | ||
1271 | static void __exit rfkill_exit(void) | 1271 | static void __exit rfkill_exit(void) |
1272 | { | 1272 | { |
1273 | #ifdef CONFIG_RFKILL_INPUT | 1273 | #ifdef CONFIG_RFKILL_INPUT |
1274 | rfkill_handler_exit(); | 1274 | rfkill_handler_exit(); |
1275 | #endif | 1275 | #endif |
1276 | misc_deregister(&rfkill_miscdev); | 1276 | misc_deregister(&rfkill_miscdev); |
1277 | class_unregister(&rfkill_class); | 1277 | class_unregister(&rfkill_class); |
1278 | } | 1278 | } |
1279 | module_exit(rfkill_exit); | 1279 | module_exit(rfkill_exit); |
1280 | 1280 |