Commit 671e470ed04865ca148b83f46319d14547481340

Authored by David Vrabel
1 parent 5b37717a23

uwb: fix oops when terminating an already terminated reservation

Calling uwb_rsv_terminate() on a reservation already in UWB_RSV_STATE_NONE
should do nothing.

Signed-off-by: David Vrabel <david.vrabel@csr.com>

Showing 1 changed file with 2 additions and 1 deletions Inline Diff

1 /* 1 /*
2 * UWB reservation management. 2 * UWB reservation management.
3 * 3 *
4 * Copyright (C) 2008 Cambridge Silicon Radio Ltd. 4 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation. 8 * 2 as published by the Free Software Foundation.
9 * 9 *
10 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 #include <linux/kernel.h> 18 #include <linux/kernel.h>
19 #include <linux/uwb.h> 19 #include <linux/uwb.h>
20 #include <linux/random.h> 20 #include <linux/random.h>
21 21
22 #include "uwb-internal.h" 22 #include "uwb-internal.h"
23 23
24 static void uwb_rsv_timer(unsigned long arg); 24 static void uwb_rsv_timer(unsigned long arg);
25 25
26 static const char *rsv_states[] = { 26 static const char *rsv_states[] = {
27 [UWB_RSV_STATE_NONE] = "none ", 27 [UWB_RSV_STATE_NONE] = "none ",
28 [UWB_RSV_STATE_O_INITIATED] = "o initiated ", 28 [UWB_RSV_STATE_O_INITIATED] = "o initiated ",
29 [UWB_RSV_STATE_O_PENDING] = "o pending ", 29 [UWB_RSV_STATE_O_PENDING] = "o pending ",
30 [UWB_RSV_STATE_O_MODIFIED] = "o modified ", 30 [UWB_RSV_STATE_O_MODIFIED] = "o modified ",
31 [UWB_RSV_STATE_O_ESTABLISHED] = "o established ", 31 [UWB_RSV_STATE_O_ESTABLISHED] = "o established ",
32 [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ", 32 [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ",
33 [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding", 33 [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding",
34 [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining", 34 [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining",
35 [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ", 35 [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ",
36 [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ", 36 [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ",
37 [UWB_RSV_STATE_T_CONFLICT] = "t conflict ", 37 [UWB_RSV_STATE_T_CONFLICT] = "t conflict ",
38 [UWB_RSV_STATE_T_PENDING] = "t pending ", 38 [UWB_RSV_STATE_T_PENDING] = "t pending ",
39 [UWB_RSV_STATE_T_DENIED] = "t denied ", 39 [UWB_RSV_STATE_T_DENIED] = "t denied ",
40 [UWB_RSV_STATE_T_RESIZED] = "t resized ", 40 [UWB_RSV_STATE_T_RESIZED] = "t resized ",
41 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ", 41 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ",
42 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf", 42 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf",
43 [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend", 43 [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend",
44 [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ", 44 [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ",
45 }; 45 };
46 46
47 static const char *rsv_types[] = { 47 static const char *rsv_types[] = {
48 [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp", 48 [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp",
49 [UWB_DRP_TYPE_HARD] = "hard", 49 [UWB_DRP_TYPE_HARD] = "hard",
50 [UWB_DRP_TYPE_SOFT] = "soft", 50 [UWB_DRP_TYPE_SOFT] = "soft",
51 [UWB_DRP_TYPE_PRIVATE] = "private", 51 [UWB_DRP_TYPE_PRIVATE] = "private",
52 [UWB_DRP_TYPE_PCA] = "pca", 52 [UWB_DRP_TYPE_PCA] = "pca",
53 }; 53 };
54 54
55 bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv) 55 bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv)
56 { 56 {
57 static const bool has_two_drp_ies[] = { 57 static const bool has_two_drp_ies[] = {
58 [UWB_RSV_STATE_O_INITIATED] = false, 58 [UWB_RSV_STATE_O_INITIATED] = false,
59 [UWB_RSV_STATE_O_PENDING] = false, 59 [UWB_RSV_STATE_O_PENDING] = false,
60 [UWB_RSV_STATE_O_MODIFIED] = false, 60 [UWB_RSV_STATE_O_MODIFIED] = false,
61 [UWB_RSV_STATE_O_ESTABLISHED] = false, 61 [UWB_RSV_STATE_O_ESTABLISHED] = false,
62 [UWB_RSV_STATE_O_TO_BE_MOVED] = false, 62 [UWB_RSV_STATE_O_TO_BE_MOVED] = false,
63 [UWB_RSV_STATE_O_MOVE_COMBINING] = false, 63 [UWB_RSV_STATE_O_MOVE_COMBINING] = false,
64 [UWB_RSV_STATE_O_MOVE_REDUCING] = false, 64 [UWB_RSV_STATE_O_MOVE_REDUCING] = false,
65 [UWB_RSV_STATE_O_MOVE_EXPANDING] = true, 65 [UWB_RSV_STATE_O_MOVE_EXPANDING] = true,
66 [UWB_RSV_STATE_T_ACCEPTED] = false, 66 [UWB_RSV_STATE_T_ACCEPTED] = false,
67 [UWB_RSV_STATE_T_CONFLICT] = false, 67 [UWB_RSV_STATE_T_CONFLICT] = false,
68 [UWB_RSV_STATE_T_PENDING] = false, 68 [UWB_RSV_STATE_T_PENDING] = false,
69 [UWB_RSV_STATE_T_DENIED] = false, 69 [UWB_RSV_STATE_T_DENIED] = false,
70 [UWB_RSV_STATE_T_RESIZED] = false, 70 [UWB_RSV_STATE_T_RESIZED] = false,
71 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true, 71 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true,
72 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true, 72 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true,
73 [UWB_RSV_STATE_T_EXPANDING_PENDING] = true, 73 [UWB_RSV_STATE_T_EXPANDING_PENDING] = true,
74 [UWB_RSV_STATE_T_EXPANDING_DENIED] = true, 74 [UWB_RSV_STATE_T_EXPANDING_DENIED] = true,
75 }; 75 };
76 76
77 return has_two_drp_ies[rsv->state]; 77 return has_two_drp_ies[rsv->state];
78 } 78 }
79 79
80 /** 80 /**
81 * uwb_rsv_state_str - return a string for a reservation state 81 * uwb_rsv_state_str - return a string for a reservation state
82 * @state: the reservation state. 82 * @state: the reservation state.
83 */ 83 */
84 const char *uwb_rsv_state_str(enum uwb_rsv_state state) 84 const char *uwb_rsv_state_str(enum uwb_rsv_state state)
85 { 85 {
86 if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST) 86 if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST)
87 return "unknown"; 87 return "unknown";
88 return rsv_states[state]; 88 return rsv_states[state];
89 } 89 }
90 EXPORT_SYMBOL_GPL(uwb_rsv_state_str); 90 EXPORT_SYMBOL_GPL(uwb_rsv_state_str);
91 91
92 /** 92 /**
93 * uwb_rsv_type_str - return a string for a reservation type 93 * uwb_rsv_type_str - return a string for a reservation type
94 * @type: the reservation type 94 * @type: the reservation type
95 */ 95 */
96 const char *uwb_rsv_type_str(enum uwb_drp_type type) 96 const char *uwb_rsv_type_str(enum uwb_drp_type type)
97 { 97 {
98 if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA) 98 if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA)
99 return "invalid"; 99 return "invalid";
100 return rsv_types[type]; 100 return rsv_types[type];
101 } 101 }
102 EXPORT_SYMBOL_GPL(uwb_rsv_type_str); 102 EXPORT_SYMBOL_GPL(uwb_rsv_type_str);
103 103
104 void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) 104 void uwb_rsv_dump(char *text, struct uwb_rsv *rsv)
105 { 105 {
106 struct device *dev = &rsv->rc->uwb_dev.dev; 106 struct device *dev = &rsv->rc->uwb_dev.dev;
107 struct uwb_dev_addr devaddr; 107 struct uwb_dev_addr devaddr;
108 char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; 108 char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
109 109
110 uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); 110 uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
111 if (rsv->target.type == UWB_RSV_TARGET_DEV) 111 if (rsv->target.type == UWB_RSV_TARGET_DEV)
112 devaddr = rsv->target.dev->dev_addr; 112 devaddr = rsv->target.dev->dev_addr;
113 else 113 else
114 devaddr = rsv->target.devaddr; 114 devaddr = rsv->target.devaddr;
115 uwb_dev_addr_print(target, sizeof(target), &devaddr); 115 uwb_dev_addr_print(target, sizeof(target), &devaddr);
116 116
117 dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); 117 dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state));
118 } 118 }
119 119
120 static void uwb_rsv_release(struct kref *kref) 120 static void uwb_rsv_release(struct kref *kref)
121 { 121 {
122 struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref); 122 struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref);
123 123
124 kfree(rsv); 124 kfree(rsv);
125 } 125 }
126 126
127 void uwb_rsv_get(struct uwb_rsv *rsv) 127 void uwb_rsv_get(struct uwb_rsv *rsv)
128 { 128 {
129 kref_get(&rsv->kref); 129 kref_get(&rsv->kref);
130 } 130 }
131 131
132 void uwb_rsv_put(struct uwb_rsv *rsv) 132 void uwb_rsv_put(struct uwb_rsv *rsv)
133 { 133 {
134 kref_put(&rsv->kref, uwb_rsv_release); 134 kref_put(&rsv->kref, uwb_rsv_release);
135 } 135 }
136 136
137 /* 137 /*
138 * Get a free stream index for a reservation. 138 * Get a free stream index for a reservation.
139 * 139 *
140 * If the target is a DevAddr (e.g., a WUSB cluster reservation) then 140 * If the target is a DevAddr (e.g., a WUSB cluster reservation) then
141 * the stream is allocated from a pool of per-RC stream indexes, 141 * the stream is allocated from a pool of per-RC stream indexes,
142 * otherwise a unique stream index for the target is selected. 142 * otherwise a unique stream index for the target is selected.
143 */ 143 */
144 static int uwb_rsv_get_stream(struct uwb_rsv *rsv) 144 static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
145 { 145 {
146 struct uwb_rc *rc = rsv->rc; 146 struct uwb_rc *rc = rsv->rc;
147 struct device *dev = &rc->uwb_dev.dev; 147 struct device *dev = &rc->uwb_dev.dev;
148 unsigned long *streams_bm; 148 unsigned long *streams_bm;
149 int stream; 149 int stream;
150 150
151 switch (rsv->target.type) { 151 switch (rsv->target.type) {
152 case UWB_RSV_TARGET_DEV: 152 case UWB_RSV_TARGET_DEV:
153 streams_bm = rsv->target.dev->streams; 153 streams_bm = rsv->target.dev->streams;
154 break; 154 break;
155 case UWB_RSV_TARGET_DEVADDR: 155 case UWB_RSV_TARGET_DEVADDR:
156 streams_bm = rc->uwb_dev.streams; 156 streams_bm = rc->uwb_dev.streams;
157 break; 157 break;
158 default: 158 default:
159 return -EINVAL; 159 return -EINVAL;
160 } 160 }
161 161
162 stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS); 162 stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS);
163 if (stream >= UWB_NUM_STREAMS) 163 if (stream >= UWB_NUM_STREAMS)
164 return -EBUSY; 164 return -EBUSY;
165 165
166 rsv->stream = stream; 166 rsv->stream = stream;
167 set_bit(stream, streams_bm); 167 set_bit(stream, streams_bm);
168 168
169 dev_dbg(dev, "get stream %d\n", rsv->stream); 169 dev_dbg(dev, "get stream %d\n", rsv->stream);
170 170
171 return 0; 171 return 0;
172 } 172 }
173 173
174 static void uwb_rsv_put_stream(struct uwb_rsv *rsv) 174 static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
175 { 175 {
176 struct uwb_rc *rc = rsv->rc; 176 struct uwb_rc *rc = rsv->rc;
177 struct device *dev = &rc->uwb_dev.dev; 177 struct device *dev = &rc->uwb_dev.dev;
178 unsigned long *streams_bm; 178 unsigned long *streams_bm;
179 179
180 switch (rsv->target.type) { 180 switch (rsv->target.type) {
181 case UWB_RSV_TARGET_DEV: 181 case UWB_RSV_TARGET_DEV:
182 streams_bm = rsv->target.dev->streams; 182 streams_bm = rsv->target.dev->streams;
183 break; 183 break;
184 case UWB_RSV_TARGET_DEVADDR: 184 case UWB_RSV_TARGET_DEVADDR:
185 streams_bm = rc->uwb_dev.streams; 185 streams_bm = rc->uwb_dev.streams;
186 break; 186 break;
187 default: 187 default:
188 return; 188 return;
189 } 189 }
190 190
191 clear_bit(rsv->stream, streams_bm); 191 clear_bit(rsv->stream, streams_bm);
192 192
193 dev_dbg(dev, "put stream %d\n", rsv->stream); 193 dev_dbg(dev, "put stream %d\n", rsv->stream);
194 } 194 }
195 195
196 void uwb_rsv_backoff_win_timer(unsigned long arg) 196 void uwb_rsv_backoff_win_timer(unsigned long arg)
197 { 197 {
198 struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg; 198 struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg;
199 struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow); 199 struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
200 struct device *dev = &rc->uwb_dev.dev; 200 struct device *dev = &rc->uwb_dev.dev;
201 201
202 bow->can_reserve_extra_mases = true; 202 bow->can_reserve_extra_mases = true;
203 if (bow->total_expired <= 4) { 203 if (bow->total_expired <= 4) {
204 bow->total_expired++; 204 bow->total_expired++;
205 } else { 205 } else {
206 /* after 4 backoff window has expired we can exit from 206 /* after 4 backoff window has expired we can exit from
207 * the backoff procedure */ 207 * the backoff procedure */
208 bow->total_expired = 0; 208 bow->total_expired = 0;
209 bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1; 209 bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
210 } 210 }
211 dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n); 211 dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n);
212 212
213 /* try to relocate all the "to be moved" relocations */ 213 /* try to relocate all the "to be moved" relocations */
214 uwb_rsv_handle_drp_avail_change(rc); 214 uwb_rsv_handle_drp_avail_change(rc);
215 } 215 }
216 216
217 void uwb_rsv_backoff_win_increment(struct uwb_rc *rc) 217 void uwb_rsv_backoff_win_increment(struct uwb_rc *rc)
218 { 218 {
219 struct uwb_drp_backoff_win *bow = &rc->bow; 219 struct uwb_drp_backoff_win *bow = &rc->bow;
220 struct device *dev = &rc->uwb_dev.dev; 220 struct device *dev = &rc->uwb_dev.dev;
221 unsigned timeout_us; 221 unsigned timeout_us;
222 222
223 dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window); 223 dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window);
224 224
225 bow->can_reserve_extra_mases = false; 225 bow->can_reserve_extra_mases = false;
226 226
227 if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX) 227 if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX)
228 return; 228 return;
229 229
230 bow->window <<= 1; 230 bow->window <<= 1;
231 bow->n = random32() & (bow->window - 1); 231 bow->n = random32() & (bow->window - 1);
232 dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n); 232 dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n);
233 233
234 /* reset the timer associated variables */ 234 /* reset the timer associated variables */
235 timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; 235 timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US;
236 bow->total_expired = 0; 236 bow->total_expired = 0;
237 mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us)); 237 mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us));
238 } 238 }
239 239
240 static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) 240 static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
241 { 241 {
242 int sframes = UWB_MAX_LOST_BEACONS; 242 int sframes = UWB_MAX_LOST_BEACONS;
243 243
244 /* 244 /*
245 * Multicast reservations can become established within 1 245 * Multicast reservations can become established within 1
246 * super frame and should not be terminated if no response is 246 * super frame and should not be terminated if no response is
247 * received. 247 * received.
248 */ 248 */
249 if (rsv->is_multicast) { 249 if (rsv->is_multicast) {
250 if (rsv->state == UWB_RSV_STATE_O_INITIATED 250 if (rsv->state == UWB_RSV_STATE_O_INITIATED
251 || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING 251 || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING
252 || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING 252 || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING
253 || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) 253 || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING)
254 sframes = 1; 254 sframes = 1;
255 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) 255 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED)
256 sframes = 0; 256 sframes = 0;
257 257
258 } 258 }
259 259
260 if (sframes > 0) { 260 if (sframes > 0) {
261 /* 261 /*
262 * Add an additional 2 superframes to account for the 262 * Add an additional 2 superframes to account for the
263 * time to send the SET DRP IE command. 263 * time to send the SET DRP IE command.
264 */ 264 */
265 unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US; 265 unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US;
266 mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us)); 266 mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us));
267 } else 267 } else
268 del_timer(&rsv->timer); 268 del_timer(&rsv->timer);
269 } 269 }
270 270
271 /* 271 /*
272 * Update a reservations state, and schedule an update of the 272 * Update a reservations state, and schedule an update of the
273 * transmitted DRP IEs. 273 * transmitted DRP IEs.
274 */ 274 */
275 static void uwb_rsv_state_update(struct uwb_rsv *rsv, 275 static void uwb_rsv_state_update(struct uwb_rsv *rsv,
276 enum uwb_rsv_state new_state) 276 enum uwb_rsv_state new_state)
277 { 277 {
278 rsv->state = new_state; 278 rsv->state = new_state;
279 rsv->ie_valid = false; 279 rsv->ie_valid = false;
280 280
281 uwb_rsv_dump("SU", rsv); 281 uwb_rsv_dump("SU", rsv);
282 282
283 uwb_rsv_stroke_timer(rsv); 283 uwb_rsv_stroke_timer(rsv);
284 uwb_rsv_sched_update(rsv->rc); 284 uwb_rsv_sched_update(rsv->rc);
285 } 285 }
286 286
287 static void uwb_rsv_callback(struct uwb_rsv *rsv) 287 static void uwb_rsv_callback(struct uwb_rsv *rsv)
288 { 288 {
289 if (rsv->callback) 289 if (rsv->callback)
290 rsv->callback(rsv); 290 rsv->callback(rsv);
291 } 291 }
292 292
293 void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) 293 void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
294 { 294 {
295 struct uwb_rsv_move *mv = &rsv->mv; 295 struct uwb_rsv_move *mv = &rsv->mv;
296 296
297 if (rsv->state == new_state) { 297 if (rsv->state == new_state) {
298 switch (rsv->state) { 298 switch (rsv->state) {
299 case UWB_RSV_STATE_O_ESTABLISHED: 299 case UWB_RSV_STATE_O_ESTABLISHED:
300 case UWB_RSV_STATE_O_MOVE_EXPANDING: 300 case UWB_RSV_STATE_O_MOVE_EXPANDING:
301 case UWB_RSV_STATE_O_MOVE_COMBINING: 301 case UWB_RSV_STATE_O_MOVE_COMBINING:
302 case UWB_RSV_STATE_O_MOVE_REDUCING: 302 case UWB_RSV_STATE_O_MOVE_REDUCING:
303 case UWB_RSV_STATE_T_ACCEPTED: 303 case UWB_RSV_STATE_T_ACCEPTED:
304 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: 304 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
305 case UWB_RSV_STATE_T_RESIZED: 305 case UWB_RSV_STATE_T_RESIZED:
306 case UWB_RSV_STATE_NONE: 306 case UWB_RSV_STATE_NONE:
307 uwb_rsv_stroke_timer(rsv); 307 uwb_rsv_stroke_timer(rsv);
308 break; 308 break;
309 default: 309 default:
310 /* Expecting a state transition so leave timer 310 /* Expecting a state transition so leave timer
311 as-is. */ 311 as-is. */
312 break; 312 break;
313 } 313 }
314 return; 314 return;
315 } 315 }
316 316
317 uwb_rsv_dump("SC", rsv); 317 uwb_rsv_dump("SC", rsv);
318 318
319 switch (new_state) { 319 switch (new_state) {
320 case UWB_RSV_STATE_NONE: 320 case UWB_RSV_STATE_NONE:
321 uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); 321 uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE);
322 uwb_rsv_callback(rsv); 322 uwb_rsv_callback(rsv);
323 break; 323 break;
324 case UWB_RSV_STATE_O_INITIATED: 324 case UWB_RSV_STATE_O_INITIATED:
325 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED); 325 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED);
326 break; 326 break;
327 case UWB_RSV_STATE_O_PENDING: 327 case UWB_RSV_STATE_O_PENDING:
328 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); 328 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING);
329 break; 329 break;
330 case UWB_RSV_STATE_O_MODIFIED: 330 case UWB_RSV_STATE_O_MODIFIED:
331 /* in the companion there are the MASes to drop */ 331 /* in the companion there are the MASes to drop */
332 bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); 332 bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
333 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED); 333 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED);
334 break; 334 break;
335 case UWB_RSV_STATE_O_ESTABLISHED: 335 case UWB_RSV_STATE_O_ESTABLISHED:
336 if (rsv->state == UWB_RSV_STATE_O_MODIFIED 336 if (rsv->state == UWB_RSV_STATE_O_MODIFIED
337 || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) { 337 || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) {
338 uwb_drp_avail_release(rsv->rc, &mv->companion_mas); 338 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
339 rsv->needs_release_companion_mas = false; 339 rsv->needs_release_companion_mas = false;
340 } 340 }
341 uwb_drp_avail_reserve(rsv->rc, &rsv->mas); 341 uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
342 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); 342 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED);
343 uwb_rsv_callback(rsv); 343 uwb_rsv_callback(rsv);
344 break; 344 break;
345 case UWB_RSV_STATE_O_MOVE_EXPANDING: 345 case UWB_RSV_STATE_O_MOVE_EXPANDING:
346 rsv->needs_release_companion_mas = true; 346 rsv->needs_release_companion_mas = true;
347 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); 347 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
348 break; 348 break;
349 case UWB_RSV_STATE_O_MOVE_COMBINING: 349 case UWB_RSV_STATE_O_MOVE_COMBINING:
350 rsv->needs_release_companion_mas = false; 350 rsv->needs_release_companion_mas = false;
351 uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); 351 uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
352 bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); 352 bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
353 rsv->mas.safe += mv->companion_mas.safe; 353 rsv->mas.safe += mv->companion_mas.safe;
354 rsv->mas.unsafe += mv->companion_mas.unsafe; 354 rsv->mas.unsafe += mv->companion_mas.unsafe;
355 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 355 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
356 break; 356 break;
357 case UWB_RSV_STATE_O_MOVE_REDUCING: 357 case UWB_RSV_STATE_O_MOVE_REDUCING:
358 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); 358 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
359 rsv->needs_release_companion_mas = true; 359 rsv->needs_release_companion_mas = true;
360 rsv->mas.safe = mv->final_mas.safe; 360 rsv->mas.safe = mv->final_mas.safe;
361 rsv->mas.unsafe = mv->final_mas.unsafe; 361 rsv->mas.unsafe = mv->final_mas.unsafe;
362 bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); 362 bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
363 bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS); 363 bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS);
364 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 364 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
365 break; 365 break;
366 case UWB_RSV_STATE_T_ACCEPTED: 366 case UWB_RSV_STATE_T_ACCEPTED:
367 case UWB_RSV_STATE_T_RESIZED: 367 case UWB_RSV_STATE_T_RESIZED:
368 rsv->needs_release_companion_mas = false; 368 rsv->needs_release_companion_mas = false;
369 uwb_drp_avail_reserve(rsv->rc, &rsv->mas); 369 uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
370 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); 370 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED);
371 uwb_rsv_callback(rsv); 371 uwb_rsv_callback(rsv);
372 break; 372 break;
373 case UWB_RSV_STATE_T_DENIED: 373 case UWB_RSV_STATE_T_DENIED:
374 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); 374 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED);
375 break; 375 break;
376 case UWB_RSV_STATE_T_CONFLICT: 376 case UWB_RSV_STATE_T_CONFLICT:
377 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT); 377 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT);
378 break; 378 break;
379 case UWB_RSV_STATE_T_PENDING: 379 case UWB_RSV_STATE_T_PENDING:
380 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING); 380 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING);
381 break; 381 break;
382 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: 382 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
383 rsv->needs_release_companion_mas = true; 383 rsv->needs_release_companion_mas = true;
384 uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); 384 uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
385 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); 385 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
386 break; 386 break;
387 default: 387 default:
388 dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", 388 dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n",
389 uwb_rsv_state_str(new_state), new_state); 389 uwb_rsv_state_str(new_state), new_state);
390 } 390 }
391 } 391 }
392 392
393 static void uwb_rsv_handle_timeout_work(struct work_struct *work) 393 static void uwb_rsv_handle_timeout_work(struct work_struct *work)
394 { 394 {
395 struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, 395 struct uwb_rsv *rsv = container_of(work, struct uwb_rsv,
396 handle_timeout_work); 396 handle_timeout_work);
397 struct uwb_rc *rc = rsv->rc; 397 struct uwb_rc *rc = rsv->rc;
398 398
399 mutex_lock(&rc->rsvs_mutex); 399 mutex_lock(&rc->rsvs_mutex);
400 400
401 uwb_rsv_dump("TO", rsv); 401 uwb_rsv_dump("TO", rsv);
402 402
403 switch (rsv->state) { 403 switch (rsv->state) {
404 case UWB_RSV_STATE_O_INITIATED: 404 case UWB_RSV_STATE_O_INITIATED:
405 if (rsv->is_multicast) { 405 if (rsv->is_multicast) {
406 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 406 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
407 goto unlock; 407 goto unlock;
408 } 408 }
409 break; 409 break;
410 case UWB_RSV_STATE_O_MOVE_EXPANDING: 410 case UWB_RSV_STATE_O_MOVE_EXPANDING:
411 if (rsv->is_multicast) { 411 if (rsv->is_multicast) {
412 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 412 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
413 goto unlock; 413 goto unlock;
414 } 414 }
415 break; 415 break;
416 case UWB_RSV_STATE_O_MOVE_COMBINING: 416 case UWB_RSV_STATE_O_MOVE_COMBINING:
417 if (rsv->is_multicast) { 417 if (rsv->is_multicast) {
418 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 418 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
419 goto unlock; 419 goto unlock;
420 } 420 }
421 break; 421 break;
422 case UWB_RSV_STATE_O_MOVE_REDUCING: 422 case UWB_RSV_STATE_O_MOVE_REDUCING:
423 if (rsv->is_multicast) { 423 if (rsv->is_multicast) {
424 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 424 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
425 goto unlock; 425 goto unlock;
426 } 426 }
427 break; 427 break;
428 case UWB_RSV_STATE_O_ESTABLISHED: 428 case UWB_RSV_STATE_O_ESTABLISHED:
429 if (rsv->is_multicast) 429 if (rsv->is_multicast)
430 goto unlock; 430 goto unlock;
431 break; 431 break;
432 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: 432 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
433 /* 433 /*
434 * The time out could be for the main or of the 434 * The time out could be for the main or of the
435 * companion DRP, assume it's for the companion and 435 * companion DRP, assume it's for the companion and
436 * drop that first. A further time out is required to 436 * drop that first. A further time out is required to
437 * drop the main. 437 * drop the main.
438 */ 438 */
439 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); 439 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
440 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 440 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
441 goto unlock; 441 goto unlock;
442 default: 442 default:
443 break; 443 break;
444 } 444 }
445 445
446 uwb_rsv_remove(rsv); 446 uwb_rsv_remove(rsv);
447 447
448 unlock: 448 unlock:
449 mutex_unlock(&rc->rsvs_mutex); 449 mutex_unlock(&rc->rsvs_mutex);
450 } 450 }
451 451
452 static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) 452 static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
453 { 453 {
454 struct uwb_rsv *rsv; 454 struct uwb_rsv *rsv;
455 455
456 rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL); 456 rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL);
457 if (!rsv) 457 if (!rsv)
458 return NULL; 458 return NULL;
459 459
460 INIT_LIST_HEAD(&rsv->rc_node); 460 INIT_LIST_HEAD(&rsv->rc_node);
461 INIT_LIST_HEAD(&rsv->pal_node); 461 INIT_LIST_HEAD(&rsv->pal_node);
462 kref_init(&rsv->kref); 462 kref_init(&rsv->kref);
463 init_timer(&rsv->timer); 463 init_timer(&rsv->timer);
464 rsv->timer.function = uwb_rsv_timer; 464 rsv->timer.function = uwb_rsv_timer;
465 rsv->timer.data = (unsigned long)rsv; 465 rsv->timer.data = (unsigned long)rsv;
466 466
467 rsv->rc = rc; 467 rsv->rc = rc;
468 INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work); 468 INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
469 469
470 return rsv; 470 return rsv;
471 } 471 }
472 472
473 /** 473 /**
474 * uwb_rsv_create - allocate and initialize a UWB reservation structure 474 * uwb_rsv_create - allocate and initialize a UWB reservation structure
475 * @rc: the radio controller 475 * @rc: the radio controller
476 * @cb: callback to use when the reservation completes or terminates 476 * @cb: callback to use when the reservation completes or terminates
477 * @pal_priv: data private to the PAL to be passed in the callback 477 * @pal_priv: data private to the PAL to be passed in the callback
478 * 478 *
479 * The callback is called when the state of the reservation changes from: 479 * The callback is called when the state of the reservation changes from:
480 * 480 *
481 * - pending to accepted 481 * - pending to accepted
482 * - pending to denined 482 * - pending to denined
483 * - accepted to terminated 483 * - accepted to terminated
484 * - pending to terminated 484 * - pending to terminated
485 */ 485 */
486 struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv) 486 struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv)
487 { 487 {
488 struct uwb_rsv *rsv; 488 struct uwb_rsv *rsv;
489 489
490 rsv = uwb_rsv_alloc(rc); 490 rsv = uwb_rsv_alloc(rc);
491 if (!rsv) 491 if (!rsv)
492 return NULL; 492 return NULL;
493 493
494 rsv->callback = cb; 494 rsv->callback = cb;
495 rsv->pal_priv = pal_priv; 495 rsv->pal_priv = pal_priv;
496 496
497 return rsv; 497 return rsv;
498 } 498 }
499 EXPORT_SYMBOL_GPL(uwb_rsv_create); 499 EXPORT_SYMBOL_GPL(uwb_rsv_create);
500 500
501 void uwb_rsv_remove(struct uwb_rsv *rsv) 501 void uwb_rsv_remove(struct uwb_rsv *rsv)
502 { 502 {
503 uwb_rsv_dump("RM", rsv); 503 uwb_rsv_dump("RM", rsv);
504 504
505 if (rsv->state != UWB_RSV_STATE_NONE) 505 if (rsv->state != UWB_RSV_STATE_NONE)
506 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 506 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
507 507
508 if (rsv->needs_release_companion_mas) 508 if (rsv->needs_release_companion_mas)
509 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 509 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
510 uwb_drp_avail_release(rsv->rc, &rsv->mas); 510 uwb_drp_avail_release(rsv->rc, &rsv->mas);
511 511
512 if (uwb_rsv_is_owner(rsv)) 512 if (uwb_rsv_is_owner(rsv))
513 uwb_rsv_put_stream(rsv); 513 uwb_rsv_put_stream(rsv);
514 514
515 del_timer_sync(&rsv->timer); 515 del_timer_sync(&rsv->timer);
516 uwb_dev_put(rsv->owner); 516 uwb_dev_put(rsv->owner);
517 if (rsv->target.type == UWB_RSV_TARGET_DEV) 517 if (rsv->target.type == UWB_RSV_TARGET_DEV)
518 uwb_dev_put(rsv->target.dev); 518 uwb_dev_put(rsv->target.dev);
519 519
520 list_del_init(&rsv->rc_node); 520 list_del_init(&rsv->rc_node);
521 uwb_rsv_put(rsv); 521 uwb_rsv_put(rsv);
522 } 522 }
523 523
524 /** 524 /**
525 * uwb_rsv_destroy - free a UWB reservation structure 525 * uwb_rsv_destroy - free a UWB reservation structure
526 * @rsv: the reservation to free 526 * @rsv: the reservation to free
527 * 527 *
528 * The reservation must already be terminated. 528 * The reservation must already be terminated.
529 */ 529 */
530 void uwb_rsv_destroy(struct uwb_rsv *rsv) 530 void uwb_rsv_destroy(struct uwb_rsv *rsv)
531 { 531 {
532 uwb_rsv_put(rsv); 532 uwb_rsv_put(rsv);
533 } 533 }
534 EXPORT_SYMBOL_GPL(uwb_rsv_destroy); 534 EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
535 535
536 /** 536 /**
537 * usb_rsv_establish - start a reservation establishment 537 * usb_rsv_establish - start a reservation establishment
538 * @rsv: the reservation 538 * @rsv: the reservation
539 * 539 *
540 * The PAL should fill in @rsv's owner, target, type, max_mas, 540 * The PAL should fill in @rsv's owner, target, type, max_mas,
541 * min_mas, max_interval and is_multicast fields. If the target is a 541 * min_mas, max_interval and is_multicast fields. If the target is a
542 * uwb_dev it must be referenced. 542 * uwb_dev it must be referenced.
543 * 543 *
544 * The reservation's callback will be called when the reservation is 544 * The reservation's callback will be called when the reservation is
545 * accepted, denied or times out. 545 * accepted, denied or times out.
546 */ 546 */
547 int uwb_rsv_establish(struct uwb_rsv *rsv) 547 int uwb_rsv_establish(struct uwb_rsv *rsv)
548 { 548 {
549 struct uwb_rc *rc = rsv->rc; 549 struct uwb_rc *rc = rsv->rc;
550 struct uwb_mas_bm available; 550 struct uwb_mas_bm available;
551 int ret; 551 int ret;
552 552
553 mutex_lock(&rc->rsvs_mutex); 553 mutex_lock(&rc->rsvs_mutex);
554 ret = uwb_rsv_get_stream(rsv); 554 ret = uwb_rsv_get_stream(rsv);
555 if (ret) 555 if (ret)
556 goto out; 556 goto out;
557 557
558 rsv->tiebreaker = random32() & 1; 558 rsv->tiebreaker = random32() & 1;
559 /* get available mas bitmap */ 559 /* get available mas bitmap */
560 uwb_drp_available(rc, &available); 560 uwb_drp_available(rc, &available);
561 561
562 ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas); 562 ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas);
563 if (ret == UWB_RSV_ALLOC_NOT_FOUND) { 563 if (ret == UWB_RSV_ALLOC_NOT_FOUND) {
564 ret = -EBUSY; 564 ret = -EBUSY;
565 uwb_rsv_put_stream(rsv); 565 uwb_rsv_put_stream(rsv);
566 goto out; 566 goto out;
567 } 567 }
568 568
569 ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas); 569 ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas);
570 if (ret != 0) { 570 if (ret != 0) {
571 uwb_rsv_put_stream(rsv); 571 uwb_rsv_put_stream(rsv);
572 goto out; 572 goto out;
573 } 573 }
574 574
575 uwb_rsv_get(rsv); 575 uwb_rsv_get(rsv);
576 list_add_tail(&rsv->rc_node, &rc->reservations); 576 list_add_tail(&rsv->rc_node, &rc->reservations);
577 rsv->owner = &rc->uwb_dev; 577 rsv->owner = &rc->uwb_dev;
578 uwb_dev_get(rsv->owner); 578 uwb_dev_get(rsv->owner);
579 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED); 579 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED);
580 out: 580 out:
581 mutex_unlock(&rc->rsvs_mutex); 581 mutex_unlock(&rc->rsvs_mutex);
582 return ret; 582 return ret;
583 } 583 }
584 EXPORT_SYMBOL_GPL(uwb_rsv_establish); 584 EXPORT_SYMBOL_GPL(uwb_rsv_establish);
585 585
586 /** 586 /**
587 * uwb_rsv_modify - modify an already established reservation 587 * uwb_rsv_modify - modify an already established reservation
588 * @rsv: the reservation to modify 588 * @rsv: the reservation to modify
589 * @max_mas: new maximum MAS to reserve 589 * @max_mas: new maximum MAS to reserve
590 * @min_mas: new minimum MAS to reserve 590 * @min_mas: new minimum MAS to reserve
591 * @max_interval: new max_interval to use 591 * @max_interval: new max_interval to use
592 * 592 *
593 * FIXME: implement this once there are PALs that use it. 593 * FIXME: implement this once there are PALs that use it.
594 */ 594 */
595 int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval) 595 int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval)
596 { 596 {
597 return -ENOSYS; 597 return -ENOSYS;
598 } 598 }
599 EXPORT_SYMBOL_GPL(uwb_rsv_modify); 599 EXPORT_SYMBOL_GPL(uwb_rsv_modify);
600 600
601 /* 601 /*
602 * move an already established reservation (rc->rsvs_mutex must to be 602 * move an already established reservation (rc->rsvs_mutex must to be
603 * taken when tis function is called) 603 * taken when tis function is called)
604 */ 604 */
605 int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available) 605 int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available)
606 { 606 {
607 struct uwb_rc *rc = rsv->rc; 607 struct uwb_rc *rc = rsv->rc;
608 struct uwb_drp_backoff_win *bow = &rc->bow; 608 struct uwb_drp_backoff_win *bow = &rc->bow;
609 struct device *dev = &rc->uwb_dev.dev; 609 struct device *dev = &rc->uwb_dev.dev;
610 struct uwb_rsv_move *mv; 610 struct uwb_rsv_move *mv;
611 int ret = 0; 611 int ret = 0;
612 612
613 if (bow->can_reserve_extra_mases == false) 613 if (bow->can_reserve_extra_mases == false)
614 return -EBUSY; 614 return -EBUSY;
615 615
616 mv = &rsv->mv; 616 mv = &rsv->mv;
617 617
618 if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) { 618 if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) {
619 619
620 if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) { 620 if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) {
621 /* We want to move the reservation */ 621 /* We want to move the reservation */
622 bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS); 622 bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS);
623 uwb_drp_avail_reserve_pending(rc, &mv->companion_mas); 623 uwb_drp_avail_reserve_pending(rc, &mv->companion_mas);
624 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); 624 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
625 } 625 }
626 } else { 626 } else {
627 dev_dbg(dev, "new allocation not found\n"); 627 dev_dbg(dev, "new allocation not found\n");
628 } 628 }
629 629
630 return ret; 630 return ret;
631 } 631 }
632 632
633 /* It will try to move every reservation in state O_ESTABLISHED giving 633 /* It will try to move every reservation in state O_ESTABLISHED giving
634 * to the MAS allocator algorithm an availability that is the real one 634 * to the MAS allocator algorithm an availability that is the real one
635 * plus the allocation already established from the reservation. */ 635 * plus the allocation already established from the reservation. */
636 void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc) 636 void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc)
637 { 637 {
638 struct uwb_drp_backoff_win *bow = &rc->bow; 638 struct uwb_drp_backoff_win *bow = &rc->bow;
639 struct uwb_rsv *rsv; 639 struct uwb_rsv *rsv;
640 struct uwb_mas_bm mas; 640 struct uwb_mas_bm mas;
641 641
642 if (bow->can_reserve_extra_mases == false) 642 if (bow->can_reserve_extra_mases == false)
643 return; 643 return;
644 644
645 list_for_each_entry(rsv, &rc->reservations, rc_node) { 645 list_for_each_entry(rsv, &rc->reservations, rc_node) {
646 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED || 646 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED ||
647 rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) { 647 rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) {
648 uwb_drp_available(rc, &mas); 648 uwb_drp_available(rc, &mas);
649 bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS); 649 bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS);
650 uwb_rsv_try_move(rsv, &mas); 650 uwb_rsv_try_move(rsv, &mas);
651 } 651 }
652 } 652 }
653 653
654 } 654 }
655 655
656 /** 656 /**
657 * uwb_rsv_terminate - terminate an established reservation 657 * uwb_rsv_terminate - terminate an established reservation
658 * @rsv: the reservation to terminate 658 * @rsv: the reservation to terminate
659 * 659 *
660 * A reservation is terminated by removing the DRP IE from the beacon, 660 * A reservation is terminated by removing the DRP IE from the beacon,
661 * the other end will consider the reservation to be terminated when 661 * the other end will consider the reservation to be terminated when
662 * it does not see the DRP IE for at least mMaxLostBeacons. 662 * it does not see the DRP IE for at least mMaxLostBeacons.
663 * 663 *
664 * If applicable, the reference to the target uwb_dev will be released. 664 * If applicable, the reference to the target uwb_dev will be released.
665 */ 665 */
666 void uwb_rsv_terminate(struct uwb_rsv *rsv) 666 void uwb_rsv_terminate(struct uwb_rsv *rsv)
667 { 667 {
668 struct uwb_rc *rc = rsv->rc; 668 struct uwb_rc *rc = rsv->rc;
669 669
670 mutex_lock(&rc->rsvs_mutex); 670 mutex_lock(&rc->rsvs_mutex);
671 671
672 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 672 if (rsv->state != UWB_RSV_STATE_NONE)
673 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
673 674
674 mutex_unlock(&rc->rsvs_mutex); 675 mutex_unlock(&rc->rsvs_mutex);
675 } 676 }
676 EXPORT_SYMBOL_GPL(uwb_rsv_terminate); 677 EXPORT_SYMBOL_GPL(uwb_rsv_terminate);
677 678
678 /** 679 /**
679 * uwb_rsv_accept - accept a new reservation from a peer 680 * uwb_rsv_accept - accept a new reservation from a peer
680 * @rsv: the reservation 681 * @rsv: the reservation
681 * @cb: call back for reservation changes 682 * @cb: call back for reservation changes
682 * @pal_priv: data to be passed in the above call back 683 * @pal_priv: data to be passed in the above call back
683 * 684 *
684 * Reservation requests from peers are denied unless a PAL accepts it 685 * Reservation requests from peers are denied unless a PAL accepts it
685 * by calling this function. 686 * by calling this function.
686 * 687 *
687 * The PAL call uwb_rsv_destroy() for all accepted reservations before 688 * The PAL call uwb_rsv_destroy() for all accepted reservations before
688 * calling uwb_pal_unregister(). 689 * calling uwb_pal_unregister().
689 */ 690 */
690 void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) 691 void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv)
691 { 692 {
692 uwb_rsv_get(rsv); 693 uwb_rsv_get(rsv);
693 694
694 rsv->callback = cb; 695 rsv->callback = cb;
695 rsv->pal_priv = pal_priv; 696 rsv->pal_priv = pal_priv;
696 rsv->state = UWB_RSV_STATE_T_ACCEPTED; 697 rsv->state = UWB_RSV_STATE_T_ACCEPTED;
697 } 698 }
698 EXPORT_SYMBOL_GPL(uwb_rsv_accept); 699 EXPORT_SYMBOL_GPL(uwb_rsv_accept);
699 700
700 /* 701 /*
701 * Is a received DRP IE for this reservation? 702 * Is a received DRP IE for this reservation?
702 */ 703 */
703 static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src, 704 static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src,
704 struct uwb_ie_drp *drp_ie) 705 struct uwb_ie_drp *drp_ie)
705 { 706 {
706 struct uwb_dev_addr *rsv_src; 707 struct uwb_dev_addr *rsv_src;
707 int stream; 708 int stream;
708 709
709 stream = uwb_ie_drp_stream_index(drp_ie); 710 stream = uwb_ie_drp_stream_index(drp_ie);
710 711
711 if (rsv->stream != stream) 712 if (rsv->stream != stream)
712 return false; 713 return false;
713 714
714 switch (rsv->target.type) { 715 switch (rsv->target.type) {
715 case UWB_RSV_TARGET_DEVADDR: 716 case UWB_RSV_TARGET_DEVADDR:
716 return rsv->stream == stream; 717 return rsv->stream == stream;
717 case UWB_RSV_TARGET_DEV: 718 case UWB_RSV_TARGET_DEV:
718 if (uwb_ie_drp_owner(drp_ie)) 719 if (uwb_ie_drp_owner(drp_ie))
719 rsv_src = &rsv->owner->dev_addr; 720 rsv_src = &rsv->owner->dev_addr;
720 else 721 else
721 rsv_src = &rsv->target.dev->dev_addr; 722 rsv_src = &rsv->target.dev->dev_addr;
722 return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0; 723 return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0;
723 } 724 }
724 return false; 725 return false;
725 } 726 }
726 727
727 static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, 728 static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
728 struct uwb_dev *src, 729 struct uwb_dev *src,
729 struct uwb_ie_drp *drp_ie) 730 struct uwb_ie_drp *drp_ie)
730 { 731 {
731 struct uwb_rsv *rsv; 732 struct uwb_rsv *rsv;
732 struct uwb_pal *pal; 733 struct uwb_pal *pal;
733 enum uwb_rsv_state state; 734 enum uwb_rsv_state state;
734 735
735 rsv = uwb_rsv_alloc(rc); 736 rsv = uwb_rsv_alloc(rc);
736 if (!rsv) 737 if (!rsv)
737 return NULL; 738 return NULL;
738 739
739 rsv->rc = rc; 740 rsv->rc = rc;
740 rsv->owner = src; 741 rsv->owner = src;
741 uwb_dev_get(rsv->owner); 742 uwb_dev_get(rsv->owner);
742 rsv->target.type = UWB_RSV_TARGET_DEV; 743 rsv->target.type = UWB_RSV_TARGET_DEV;
743 rsv->target.dev = &rc->uwb_dev; 744 rsv->target.dev = &rc->uwb_dev;
744 uwb_dev_get(&rc->uwb_dev); 745 uwb_dev_get(&rc->uwb_dev);
745 rsv->type = uwb_ie_drp_type(drp_ie); 746 rsv->type = uwb_ie_drp_type(drp_ie);
746 rsv->stream = uwb_ie_drp_stream_index(drp_ie); 747 rsv->stream = uwb_ie_drp_stream_index(drp_ie);
747 uwb_drp_ie_to_bm(&rsv->mas, drp_ie); 748 uwb_drp_ie_to_bm(&rsv->mas, drp_ie);
748 749
749 /* 750 /*
750 * See if any PALs are interested in this reservation. If not, 751 * See if any PALs are interested in this reservation. If not,
751 * deny the request. 752 * deny the request.
752 */ 753 */
753 rsv->state = UWB_RSV_STATE_T_DENIED; 754 rsv->state = UWB_RSV_STATE_T_DENIED;
754 mutex_lock(&rc->uwb_dev.mutex); 755 mutex_lock(&rc->uwb_dev.mutex);
755 list_for_each_entry(pal, &rc->pals, node) { 756 list_for_each_entry(pal, &rc->pals, node) {
756 if (pal->new_rsv) 757 if (pal->new_rsv)
757 pal->new_rsv(pal, rsv); 758 pal->new_rsv(pal, rsv);
758 if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) 759 if (rsv->state == UWB_RSV_STATE_T_ACCEPTED)
759 break; 760 break;
760 } 761 }
761 mutex_unlock(&rc->uwb_dev.mutex); 762 mutex_unlock(&rc->uwb_dev.mutex);
762 763
763 list_add_tail(&rsv->rc_node, &rc->reservations); 764 list_add_tail(&rsv->rc_node, &rc->reservations);
764 state = rsv->state; 765 state = rsv->state;
765 rsv->state = UWB_RSV_STATE_NONE; 766 rsv->state = UWB_RSV_STATE_NONE;
766 767
767 /* FIXME: do something sensible here */ 768 /* FIXME: do something sensible here */
768 if (state == UWB_RSV_STATE_T_ACCEPTED 769 if (state == UWB_RSV_STATE_T_ACCEPTED
769 && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) { 770 && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) {
770 /* FIXME: do something sensible here */ 771 /* FIXME: do something sensible here */
771 } else { 772 } else {
772 uwb_rsv_set_state(rsv, state); 773 uwb_rsv_set_state(rsv, state);
773 } 774 }
774 775
775 return rsv; 776 return rsv;
776 } 777 }
777 778
778 /** 779 /**
779 * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations 780 * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations
780 * @rsv: the reservation. 781 * @rsv: the reservation.
781 * @mas: returns the available MAS. 782 * @mas: returns the available MAS.
782 * 783 *
783 * The usable MAS of a reservation may be less than the negotiated MAS 784 * The usable MAS of a reservation may be less than the negotiated MAS
784 * if alien BPs are present. 785 * if alien BPs are present.
785 */ 786 */
786 void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas) 787 void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas)
787 { 788 {
788 bitmap_zero(mas->bm, UWB_NUM_MAS); 789 bitmap_zero(mas->bm, UWB_NUM_MAS);
789 bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); 790 bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
790 } 791 }
791 EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas); 792 EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas);
792 793
793 /** 794 /**
794 * uwb_rsv_find - find a reservation for a received DRP IE. 795 * uwb_rsv_find - find a reservation for a received DRP IE.
795 * @rc: the radio controller 796 * @rc: the radio controller
796 * @src: source of the DRP IE 797 * @src: source of the DRP IE
797 * @drp_ie: the DRP IE 798 * @drp_ie: the DRP IE
798 * 799 *
799 * If the reservation cannot be found and the DRP IE is from a peer 800 * If the reservation cannot be found and the DRP IE is from a peer
800 * attempting to establish a new reservation, create a new reservation 801 * attempting to establish a new reservation, create a new reservation
801 * and add it to the list. 802 * and add it to the list.
802 */ 803 */
803 struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, 804 struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
804 struct uwb_ie_drp *drp_ie) 805 struct uwb_ie_drp *drp_ie)
805 { 806 {
806 struct uwb_rsv *rsv; 807 struct uwb_rsv *rsv;
807 808
808 list_for_each_entry(rsv, &rc->reservations, rc_node) { 809 list_for_each_entry(rsv, &rc->reservations, rc_node) {
809 if (uwb_rsv_match(rsv, src, drp_ie)) 810 if (uwb_rsv_match(rsv, src, drp_ie))
810 return rsv; 811 return rsv;
811 } 812 }
812 813
813 if (uwb_ie_drp_owner(drp_ie)) 814 if (uwb_ie_drp_owner(drp_ie))
814 return uwb_rsv_new_target(rc, src, drp_ie); 815 return uwb_rsv_new_target(rc, src, drp_ie);
815 816
816 return NULL; 817 return NULL;
817 } 818 }
818 819
819 /* 820 /*
820 * Go through all the reservations and check for timeouts and (if 821 * Go through all the reservations and check for timeouts and (if
821 * necessary) update their DRP IEs. 822 * necessary) update their DRP IEs.
822 * 823 *
823 * FIXME: look at building the SET_DRP_IE command here rather than 824 * FIXME: look at building the SET_DRP_IE command here rather than
824 * having to rescan the list in uwb_rc_send_all_drp_ie(). 825 * having to rescan the list in uwb_rc_send_all_drp_ie().
825 */ 826 */
826 static bool uwb_rsv_update_all(struct uwb_rc *rc) 827 static bool uwb_rsv_update_all(struct uwb_rc *rc)
827 { 828 {
828 struct uwb_rsv *rsv, *t; 829 struct uwb_rsv *rsv, *t;
829 bool ie_updated = false; 830 bool ie_updated = false;
830 831
831 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 832 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
832 if (!rsv->ie_valid) { 833 if (!rsv->ie_valid) {
833 uwb_drp_ie_update(rsv); 834 uwb_drp_ie_update(rsv);
834 ie_updated = true; 835 ie_updated = true;
835 } 836 }
836 } 837 }
837 838
838 return ie_updated; 839 return ie_updated;
839 } 840 }
840 841
841 void uwb_rsv_queue_update(struct uwb_rc *rc) 842 void uwb_rsv_queue_update(struct uwb_rc *rc)
842 { 843 {
843 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; 844 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
844 845
845 queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us)); 846 queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us));
846 } 847 }
847 848
848 /** 849 /**
849 * uwb_rsv_sched_update - schedule an update of the DRP IEs 850 * uwb_rsv_sched_update - schedule an update of the DRP IEs
850 * @rc: the radio controller. 851 * @rc: the radio controller.
851 * 852 *
852 * To improve performance and ensure correctness with [ECMA-368] the 853 * To improve performance and ensure correctness with [ECMA-368] the
853 * number of SET-DRP-IE commands that are done are limited. 854 * number of SET-DRP-IE commands that are done are limited.
854 * 855 *
855 * DRP IEs update come from two sources: DRP events from the hardware 856 * DRP IEs update come from two sources: DRP events from the hardware
856 * which all occur at the beginning of the superframe ('syncronous' 857 * which all occur at the beginning of the superframe ('syncronous'
857 * events) and reservation establishment/termination requests from 858 * events) and reservation establishment/termination requests from
858 * PALs or timers ('asynchronous' events). 859 * PALs or timers ('asynchronous' events).
859 * 860 *
860 * A delayed work ensures that all the synchronous events result in 861 * A delayed work ensures that all the synchronous events result in
861 * one SET-DRP-IE command. 862 * one SET-DRP-IE command.
862 * 863 *
863 * Additional logic (the set_drp_ie_pending and rsv_updated_postponed 864 * Additional logic (the set_drp_ie_pending and rsv_updated_postponed
864 * flags) will prevent an asynchrous event starting a SET-DRP-IE 865 * flags) will prevent an asynchrous event starting a SET-DRP-IE
865 * command if one is currently awaiting a response. 866 * command if one is currently awaiting a response.
866 * 867 *
867 * FIXME: this does leave a window where an asynchrous event can delay 868 * FIXME: this does leave a window where an asynchrous event can delay
868 * the SET-DRP-IE for a synchronous event by one superframe. 869 * the SET-DRP-IE for a synchronous event by one superframe.
869 */ 870 */
870 void uwb_rsv_sched_update(struct uwb_rc *rc) 871 void uwb_rsv_sched_update(struct uwb_rc *rc)
871 { 872 {
872 spin_lock(&rc->rsvs_lock); 873 spin_lock(&rc->rsvs_lock);
873 if (!delayed_work_pending(&rc->rsv_update_work)) { 874 if (!delayed_work_pending(&rc->rsv_update_work)) {
874 if (rc->set_drp_ie_pending > 0) { 875 if (rc->set_drp_ie_pending > 0) {
875 rc->set_drp_ie_pending++; 876 rc->set_drp_ie_pending++;
876 goto unlock; 877 goto unlock;
877 } 878 }
878 uwb_rsv_queue_update(rc); 879 uwb_rsv_queue_update(rc);
879 } 880 }
880 unlock: 881 unlock:
881 spin_unlock(&rc->rsvs_lock); 882 spin_unlock(&rc->rsvs_lock);
882 } 883 }
883 884
884 /* 885 /*
885 * Update DRP IEs and, if necessary, the DRP Availability IE and send 886 * Update DRP IEs and, if necessary, the DRP Availability IE and send
886 * the updated IEs to the radio controller. 887 * the updated IEs to the radio controller.
887 */ 888 */
888 static void uwb_rsv_update_work(struct work_struct *work) 889 static void uwb_rsv_update_work(struct work_struct *work)
889 { 890 {
890 struct uwb_rc *rc = container_of(work, struct uwb_rc, 891 struct uwb_rc *rc = container_of(work, struct uwb_rc,
891 rsv_update_work.work); 892 rsv_update_work.work);
892 bool ie_updated; 893 bool ie_updated;
893 894
894 mutex_lock(&rc->rsvs_mutex); 895 mutex_lock(&rc->rsvs_mutex);
895 896
896 ie_updated = uwb_rsv_update_all(rc); 897 ie_updated = uwb_rsv_update_all(rc);
897 898
898 if (!rc->drp_avail.ie_valid) { 899 if (!rc->drp_avail.ie_valid) {
899 uwb_drp_avail_ie_update(rc); 900 uwb_drp_avail_ie_update(rc);
900 ie_updated = true; 901 ie_updated = true;
901 } 902 }
902 903
903 if (ie_updated && (rc->set_drp_ie_pending == 0)) 904 if (ie_updated && (rc->set_drp_ie_pending == 0))
904 uwb_rc_send_all_drp_ie(rc); 905 uwb_rc_send_all_drp_ie(rc);
905 906
906 mutex_unlock(&rc->rsvs_mutex); 907 mutex_unlock(&rc->rsvs_mutex);
907 } 908 }
908 909
909 static void uwb_rsv_alien_bp_work(struct work_struct *work) 910 static void uwb_rsv_alien_bp_work(struct work_struct *work)
910 { 911 {
911 struct uwb_rc *rc = container_of(work, struct uwb_rc, 912 struct uwb_rc *rc = container_of(work, struct uwb_rc,
912 rsv_alien_bp_work.work); 913 rsv_alien_bp_work.work);
913 struct uwb_rsv *rsv; 914 struct uwb_rsv *rsv;
914 915
915 mutex_lock(&rc->rsvs_mutex); 916 mutex_lock(&rc->rsvs_mutex);
916 917
917 list_for_each_entry(rsv, &rc->reservations, rc_node) { 918 list_for_each_entry(rsv, &rc->reservations, rc_node) {
918 if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) { 919 if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) {
919 rsv->callback(rsv); 920 rsv->callback(rsv);
920 } 921 }
921 } 922 }
922 923
923 mutex_unlock(&rc->rsvs_mutex); 924 mutex_unlock(&rc->rsvs_mutex);
924 } 925 }
925 926
926 static void uwb_rsv_timer(unsigned long arg) 927 static void uwb_rsv_timer(unsigned long arg)
927 { 928 {
928 struct uwb_rsv *rsv = (struct uwb_rsv *)arg; 929 struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
929 930
930 queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work); 931 queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
931 } 932 }
932 933
933 /** 934 /**
934 * uwb_rsv_remove_all - remove all reservations 935 * uwb_rsv_remove_all - remove all reservations
935 * @rc: the radio controller 936 * @rc: the radio controller
936 * 937 *
937 * A DRP IE update is not done. 938 * A DRP IE update is not done.
938 */ 939 */
939 void uwb_rsv_remove_all(struct uwb_rc *rc) 940 void uwb_rsv_remove_all(struct uwb_rc *rc)
940 { 941 {
941 struct uwb_rsv *rsv, *t; 942 struct uwb_rsv *rsv, *t;
942 943
943 mutex_lock(&rc->rsvs_mutex); 944 mutex_lock(&rc->rsvs_mutex);
944 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 945 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
945 uwb_rsv_remove(rsv); 946 uwb_rsv_remove(rsv);
946 } 947 }
947 /* Cancel any postponed update. */ 948 /* Cancel any postponed update. */
948 rc->set_drp_ie_pending = 0; 949 rc->set_drp_ie_pending = 0;
949 mutex_unlock(&rc->rsvs_mutex); 950 mutex_unlock(&rc->rsvs_mutex);
950 951
951 cancel_delayed_work_sync(&rc->rsv_update_work); 952 cancel_delayed_work_sync(&rc->rsv_update_work);
952 } 953 }
953 954
954 void uwb_rsv_init(struct uwb_rc *rc) 955 void uwb_rsv_init(struct uwb_rc *rc)
955 { 956 {
956 INIT_LIST_HEAD(&rc->reservations); 957 INIT_LIST_HEAD(&rc->reservations);
957 INIT_LIST_HEAD(&rc->cnflt_alien_list); 958 INIT_LIST_HEAD(&rc->cnflt_alien_list);
958 mutex_init(&rc->rsvs_mutex); 959 mutex_init(&rc->rsvs_mutex);
959 spin_lock_init(&rc->rsvs_lock); 960 spin_lock_init(&rc->rsvs_lock);
960 INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work); 961 INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work);
961 INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work); 962 INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work);
962 rc->bow.can_reserve_extra_mases = true; 963 rc->bow.can_reserve_extra_mases = true;
963 rc->bow.total_expired = 0; 964 rc->bow.total_expired = 0;
964 rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1; 965 rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
965 init_timer(&rc->bow.timer); 966 init_timer(&rc->bow.timer);
966 rc->bow.timer.function = uwb_rsv_backoff_win_timer; 967 rc->bow.timer.function = uwb_rsv_backoff_win_timer;
967 rc->bow.timer.data = (unsigned long)&rc->bow; 968 rc->bow.timer.data = (unsigned long)&rc->bow;
968 969
969 bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); 970 bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
970 } 971 }
971 972
972 int uwb_rsv_setup(struct uwb_rc *rc) 973 int uwb_rsv_setup(struct uwb_rc *rc)
973 { 974 {
974 char name[16]; 975 char name[16];
975 976
976 snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev)); 977 snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev));
977 rc->rsv_workq = create_singlethread_workqueue(name); 978 rc->rsv_workq = create_singlethread_workqueue(name);
978 if (rc->rsv_workq == NULL) 979 if (rc->rsv_workq == NULL)
979 return -ENOMEM; 980 return -ENOMEM;
980 981
981 return 0; 982 return 0;
982 } 983 }
983 984
984 void uwb_rsv_cleanup(struct uwb_rc *rc) 985 void uwb_rsv_cleanup(struct uwb_rc *rc)
985 { 986 {
986 uwb_rsv_remove_all(rc); 987 uwb_rsv_remove_all(rc);
987 destroy_workqueue(rc->rsv_workq); 988 destroy_workqueue(rc->rsv_workq);
988 } 989 }
989 990