Commit c8577819009473311d5f74112e136f17d7859ee1

Authored by David S. Miller

Merge tag 'master-2014-11-20' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless

John W. Linville says:

====================
pull request: wireless 2014-11-20

Please full this little batch of fixes intended for the 3.18 stream!

For the mac80211 patch, Johannes says:

"Here's another last minute fix, for minstrel HT crashing
depending on the value of some uninitialised stack."

On top of that...

Ben Greear fixes an ath9k regression in which a BSSID mask is
miscalculated.

Dmitry Torokhov corrects an error handling routing in brcmfmac which
was checking an unsigned variable for a negative value.

Johannes Berg avoids a build problem in brcmfmac for arches where
linux/unaligned/access_ok.h and asm/unaligned.h conflict.

Mathy Vanhoef addresses another brcmfmac issue so as to eliminate a
use-after-free of the URB transfer buffer if a timeout occurs.

Please let me know if there are problems!
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 5 changed files Inline Diff

drivers/net/wireless/ath/ath9k/main.c
1 /* 1 /*
2 * Copyright (c) 2008-2011 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies. 6 * copyright notice and this permission notice appear in all copies.
7 * 7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17 #include <linux/nl80211.h> 17 #include <linux/nl80211.h>
18 #include <linux/delay.h> 18 #include <linux/delay.h>
19 #include "ath9k.h" 19 #include "ath9k.h"
20 #include "btcoex.h" 20 #include "btcoex.h"
21 21
22 u8 ath9k_parse_mpdudensity(u8 mpdudensity) 22 u8 ath9k_parse_mpdudensity(u8 mpdudensity)
23 { 23 {
24 /* 24 /*
25 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 25 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
26 * 0 for no restriction 26 * 0 for no restriction
27 * 1 for 1/4 us 27 * 1 for 1/4 us
28 * 2 for 1/2 us 28 * 2 for 1/2 us
29 * 3 for 1 us 29 * 3 for 1 us
30 * 4 for 2 us 30 * 4 for 2 us
31 * 5 for 4 us 31 * 5 for 4 us
32 * 6 for 8 us 32 * 6 for 8 us
33 * 7 for 16 us 33 * 7 for 16 us
34 */ 34 */
35 switch (mpdudensity) { 35 switch (mpdudensity) {
36 case 0: 36 case 0:
37 return 0; 37 return 0;
38 case 1: 38 case 1:
39 case 2: 39 case 2:
40 case 3: 40 case 3:
41 /* Our lower layer calculations limit our precision to 41 /* Our lower layer calculations limit our precision to
42 1 microsecond */ 42 1 microsecond */
43 return 1; 43 return 1;
44 case 4: 44 case 4:
45 return 2; 45 return 2;
46 case 5: 46 case 5:
47 return 4; 47 return 4;
48 case 6: 48 case 6:
49 return 8; 49 return 8;
50 case 7: 50 case 7:
51 return 16; 51 return 16;
52 default: 52 default:
53 return 0; 53 return 0;
54 } 54 }
55 } 55 }
56 56
57 static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq) 57 static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
58 { 58 {
59 bool pending = false; 59 bool pending = false;
60 60
61 spin_lock_bh(&txq->axq_lock); 61 spin_lock_bh(&txq->axq_lock);
62 62
63 if (txq->axq_depth) { 63 if (txq->axq_depth) {
64 pending = true; 64 pending = true;
65 goto out; 65 goto out;
66 } 66 }
67 67
68 if (txq->mac80211_qnum >= 0) { 68 if (txq->mac80211_qnum >= 0) {
69 struct list_head *list; 69 struct list_head *list;
70 70
71 list = &sc->cur_chan->acq[txq->mac80211_qnum]; 71 list = &sc->cur_chan->acq[txq->mac80211_qnum];
72 if (!list_empty(list)) 72 if (!list_empty(list))
73 pending = true; 73 pending = true;
74 } 74 }
75 out: 75 out:
76 spin_unlock_bh(&txq->axq_lock); 76 spin_unlock_bh(&txq->axq_lock);
77 return pending; 77 return pending;
78 } 78 }
79 79
80 static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode) 80 static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
81 { 81 {
82 unsigned long flags; 82 unsigned long flags;
83 bool ret; 83 bool ret;
84 84
85 spin_lock_irqsave(&sc->sc_pm_lock, flags); 85 spin_lock_irqsave(&sc->sc_pm_lock, flags);
86 ret = ath9k_hw_setpower(sc->sc_ah, mode); 86 ret = ath9k_hw_setpower(sc->sc_ah, mode);
87 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 87 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
88 88
89 return ret; 89 return ret;
90 } 90 }
91 91
92 void ath_ps_full_sleep(unsigned long data) 92 void ath_ps_full_sleep(unsigned long data)
93 { 93 {
94 struct ath_softc *sc = (struct ath_softc *) data; 94 struct ath_softc *sc = (struct ath_softc *) data;
95 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 95 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
96 bool reset; 96 bool reset;
97 97
98 spin_lock(&common->cc_lock); 98 spin_lock(&common->cc_lock);
99 ath_hw_cycle_counters_update(common); 99 ath_hw_cycle_counters_update(common);
100 spin_unlock(&common->cc_lock); 100 spin_unlock(&common->cc_lock);
101 101
102 ath9k_hw_setrxabort(sc->sc_ah, 1); 102 ath9k_hw_setrxabort(sc->sc_ah, 1);
103 ath9k_hw_stopdmarecv(sc->sc_ah, &reset); 103 ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
104 104
105 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 105 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
106 } 106 }
107 107
108 void ath9k_ps_wakeup(struct ath_softc *sc) 108 void ath9k_ps_wakeup(struct ath_softc *sc)
109 { 109 {
110 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 110 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
111 unsigned long flags; 111 unsigned long flags;
112 enum ath9k_power_mode power_mode; 112 enum ath9k_power_mode power_mode;
113 113
114 spin_lock_irqsave(&sc->sc_pm_lock, flags); 114 spin_lock_irqsave(&sc->sc_pm_lock, flags);
115 if (++sc->ps_usecount != 1) 115 if (++sc->ps_usecount != 1)
116 goto unlock; 116 goto unlock;
117 117
118 del_timer_sync(&sc->sleep_timer); 118 del_timer_sync(&sc->sleep_timer);
119 power_mode = sc->sc_ah->power_mode; 119 power_mode = sc->sc_ah->power_mode;
120 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 120 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
121 121
122 /* 122 /*
123 * While the hardware is asleep, the cycle counters contain no 123 * While the hardware is asleep, the cycle counters contain no
124 * useful data. Better clear them now so that they don't mess up 124 * useful data. Better clear them now so that they don't mess up
125 * survey data results. 125 * survey data results.
126 */ 126 */
127 if (power_mode != ATH9K_PM_AWAKE) { 127 if (power_mode != ATH9K_PM_AWAKE) {
128 spin_lock(&common->cc_lock); 128 spin_lock(&common->cc_lock);
129 ath_hw_cycle_counters_update(common); 129 ath_hw_cycle_counters_update(common);
130 memset(&common->cc_survey, 0, sizeof(common->cc_survey)); 130 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
131 memset(&common->cc_ani, 0, sizeof(common->cc_ani)); 131 memset(&common->cc_ani, 0, sizeof(common->cc_ani));
132 spin_unlock(&common->cc_lock); 132 spin_unlock(&common->cc_lock);
133 } 133 }
134 134
135 unlock: 135 unlock:
136 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 136 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
137 } 137 }
138 138
139 void ath9k_ps_restore(struct ath_softc *sc) 139 void ath9k_ps_restore(struct ath_softc *sc)
140 { 140 {
141 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 141 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
142 enum ath9k_power_mode mode; 142 enum ath9k_power_mode mode;
143 unsigned long flags; 143 unsigned long flags;
144 144
145 spin_lock_irqsave(&sc->sc_pm_lock, flags); 145 spin_lock_irqsave(&sc->sc_pm_lock, flags);
146 if (--sc->ps_usecount != 0) 146 if (--sc->ps_usecount != 0)
147 goto unlock; 147 goto unlock;
148 148
149 if (sc->ps_idle) { 149 if (sc->ps_idle) {
150 mod_timer(&sc->sleep_timer, jiffies + HZ / 10); 150 mod_timer(&sc->sleep_timer, jiffies + HZ / 10);
151 goto unlock; 151 goto unlock;
152 } 152 }
153 153
154 if (sc->ps_enabled && 154 if (sc->ps_enabled &&
155 !(sc->ps_flags & (PS_WAIT_FOR_BEACON | 155 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
156 PS_WAIT_FOR_CAB | 156 PS_WAIT_FOR_CAB |
157 PS_WAIT_FOR_PSPOLL_DATA | 157 PS_WAIT_FOR_PSPOLL_DATA |
158 PS_WAIT_FOR_TX_ACK | 158 PS_WAIT_FOR_TX_ACK |
159 PS_WAIT_FOR_ANI))) { 159 PS_WAIT_FOR_ANI))) {
160 mode = ATH9K_PM_NETWORK_SLEEP; 160 mode = ATH9K_PM_NETWORK_SLEEP;
161 if (ath9k_hw_btcoex_is_enabled(sc->sc_ah)) 161 if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
162 ath9k_btcoex_stop_gen_timer(sc); 162 ath9k_btcoex_stop_gen_timer(sc);
163 } else { 163 } else {
164 goto unlock; 164 goto unlock;
165 } 165 }
166 166
167 spin_lock(&common->cc_lock); 167 spin_lock(&common->cc_lock);
168 ath_hw_cycle_counters_update(common); 168 ath_hw_cycle_counters_update(common);
169 spin_unlock(&common->cc_lock); 169 spin_unlock(&common->cc_lock);
170 170
171 ath9k_hw_setpower(sc->sc_ah, mode); 171 ath9k_hw_setpower(sc->sc_ah, mode);
172 172
173 unlock: 173 unlock:
174 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 174 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
175 } 175 }
176 176
177 static void __ath_cancel_work(struct ath_softc *sc) 177 static void __ath_cancel_work(struct ath_softc *sc)
178 { 178 {
179 cancel_work_sync(&sc->paprd_work); 179 cancel_work_sync(&sc->paprd_work);
180 cancel_delayed_work_sync(&sc->tx_complete_work); 180 cancel_delayed_work_sync(&sc->tx_complete_work);
181 cancel_delayed_work_sync(&sc->hw_pll_work); 181 cancel_delayed_work_sync(&sc->hw_pll_work);
182 182
183 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 183 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
184 if (ath9k_hw_mci_is_enabled(sc->sc_ah)) 184 if (ath9k_hw_mci_is_enabled(sc->sc_ah))
185 cancel_work_sync(&sc->mci_work); 185 cancel_work_sync(&sc->mci_work);
186 #endif 186 #endif
187 } 187 }
188 188
189 void ath_cancel_work(struct ath_softc *sc) 189 void ath_cancel_work(struct ath_softc *sc)
190 { 190 {
191 __ath_cancel_work(sc); 191 __ath_cancel_work(sc);
192 cancel_work_sync(&sc->hw_reset_work); 192 cancel_work_sync(&sc->hw_reset_work);
193 } 193 }
194 194
195 void ath_restart_work(struct ath_softc *sc) 195 void ath_restart_work(struct ath_softc *sc)
196 { 196 {
197 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 197 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
198 198
199 if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah)) 199 if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
200 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, 200 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
201 msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); 201 msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
202 202
203 ath_start_ani(sc); 203 ath_start_ani(sc);
204 } 204 }
205 205
206 static bool ath_prepare_reset(struct ath_softc *sc) 206 static bool ath_prepare_reset(struct ath_softc *sc)
207 { 207 {
208 struct ath_hw *ah = sc->sc_ah; 208 struct ath_hw *ah = sc->sc_ah;
209 bool ret = true; 209 bool ret = true;
210 210
211 ieee80211_stop_queues(sc->hw); 211 ieee80211_stop_queues(sc->hw);
212 ath_stop_ani(sc); 212 ath_stop_ani(sc);
213 ath9k_hw_disable_interrupts(ah); 213 ath9k_hw_disable_interrupts(ah);
214 214
215 if (!ath_drain_all_txq(sc)) 215 if (!ath_drain_all_txq(sc))
216 ret = false; 216 ret = false;
217 217
218 if (!ath_stoprecv(sc)) 218 if (!ath_stoprecv(sc))
219 ret = false; 219 ret = false;
220 220
221 return ret; 221 return ret;
222 } 222 }
223 223
224 static bool ath_complete_reset(struct ath_softc *sc, bool start) 224 static bool ath_complete_reset(struct ath_softc *sc, bool start)
225 { 225 {
226 struct ath_hw *ah = sc->sc_ah; 226 struct ath_hw *ah = sc->sc_ah;
227 struct ath_common *common = ath9k_hw_common(ah); 227 struct ath_common *common = ath9k_hw_common(ah);
228 unsigned long flags; 228 unsigned long flags;
229 229
230 ath9k_calculate_summary_state(sc, sc->cur_chan); 230 ath9k_calculate_summary_state(sc, sc->cur_chan);
231 ath_startrecv(sc); 231 ath_startrecv(sc);
232 ath9k_cmn_update_txpow(ah, sc->curtxpow, 232 ath9k_cmn_update_txpow(ah, sc->curtxpow,
233 sc->cur_chan->txpower, &sc->curtxpow); 233 sc->cur_chan->txpower, &sc->curtxpow);
234 clear_bit(ATH_OP_HW_RESET, &common->op_flags); 234 clear_bit(ATH_OP_HW_RESET, &common->op_flags);
235 235
236 if (!sc->cur_chan->offchannel && start) { 236 if (!sc->cur_chan->offchannel && start) {
237 /* restore per chanctx TSF timer */ 237 /* restore per chanctx TSF timer */
238 if (sc->cur_chan->tsf_val) { 238 if (sc->cur_chan->tsf_val) {
239 u32 offset; 239 u32 offset;
240 240
241 offset = ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, 241 offset = ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts,
242 NULL); 242 NULL);
243 ath9k_hw_settsf64(ah, sc->cur_chan->tsf_val + offset); 243 ath9k_hw_settsf64(ah, sc->cur_chan->tsf_val + offset);
244 } 244 }
245 245
246 246
247 if (!test_bit(ATH_OP_BEACONS, &common->op_flags)) 247 if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
248 goto work; 248 goto work;
249 249
250 if (ah->opmode == NL80211_IFTYPE_STATION && 250 if (ah->opmode == NL80211_IFTYPE_STATION &&
251 test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) { 251 test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
252 spin_lock_irqsave(&sc->sc_pm_lock, flags); 252 spin_lock_irqsave(&sc->sc_pm_lock, flags);
253 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; 253 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
254 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 254 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
255 } else { 255 } else {
256 ath9k_set_beacon(sc); 256 ath9k_set_beacon(sc);
257 } 257 }
258 work: 258 work:
259 ath_restart_work(sc); 259 ath_restart_work(sc);
260 ath_txq_schedule_all(sc); 260 ath_txq_schedule_all(sc);
261 } 261 }
262 262
263 sc->gtt_cnt = 0; 263 sc->gtt_cnt = 0;
264 264
265 ath9k_hw_set_interrupts(ah); 265 ath9k_hw_set_interrupts(ah);
266 ath9k_hw_enable_interrupts(ah); 266 ath9k_hw_enable_interrupts(ah);
267 ieee80211_wake_queues(sc->hw); 267 ieee80211_wake_queues(sc->hw);
268 ath9k_p2p_ps_timer(sc); 268 ath9k_p2p_ps_timer(sc);
269 269
270 return true; 270 return true;
271 } 271 }
272 272
273 int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) 273 int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
274 { 274 {
275 struct ath_hw *ah = sc->sc_ah; 275 struct ath_hw *ah = sc->sc_ah;
276 struct ath_common *common = ath9k_hw_common(ah); 276 struct ath_common *common = ath9k_hw_common(ah);
277 struct ath9k_hw_cal_data *caldata = NULL; 277 struct ath9k_hw_cal_data *caldata = NULL;
278 bool fastcc = true; 278 bool fastcc = true;
279 int r; 279 int r;
280 280
281 __ath_cancel_work(sc); 281 __ath_cancel_work(sc);
282 282
283 tasklet_disable(&sc->intr_tq); 283 tasklet_disable(&sc->intr_tq);
284 spin_lock_bh(&sc->sc_pcu_lock); 284 spin_lock_bh(&sc->sc_pcu_lock);
285 285
286 if (!sc->cur_chan->offchannel) { 286 if (!sc->cur_chan->offchannel) {
287 fastcc = false; 287 fastcc = false;
288 caldata = &sc->cur_chan->caldata; 288 caldata = &sc->cur_chan->caldata;
289 } 289 }
290 290
291 if (!hchan) { 291 if (!hchan) {
292 fastcc = false; 292 fastcc = false;
293 hchan = ah->curchan; 293 hchan = ah->curchan;
294 } 294 }
295 295
296 if (!ath_prepare_reset(sc)) 296 if (!ath_prepare_reset(sc))
297 fastcc = false; 297 fastcc = false;
298 298
299 if (ath9k_is_chanctx_enabled()) 299 if (ath9k_is_chanctx_enabled())
300 fastcc = false; 300 fastcc = false;
301 301
302 spin_lock_bh(&sc->chan_lock); 302 spin_lock_bh(&sc->chan_lock);
303 sc->cur_chandef = sc->cur_chan->chandef; 303 sc->cur_chandef = sc->cur_chan->chandef;
304 spin_unlock_bh(&sc->chan_lock); 304 spin_unlock_bh(&sc->chan_lock);
305 305
306 ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n", 306 ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
307 hchan->channel, IS_CHAN_HT40(hchan), fastcc); 307 hchan->channel, IS_CHAN_HT40(hchan), fastcc);
308 308
309 r = ath9k_hw_reset(ah, hchan, caldata, fastcc); 309 r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
310 if (r) { 310 if (r) {
311 ath_err(common, 311 ath_err(common,
312 "Unable to reset channel, reset status %d\n", r); 312 "Unable to reset channel, reset status %d\n", r);
313 313
314 ath9k_hw_enable_interrupts(ah); 314 ath9k_hw_enable_interrupts(ah);
315 ath9k_queue_reset(sc, RESET_TYPE_BB_HANG); 315 ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
316 316
317 goto out; 317 goto out;
318 } 318 }
319 319
320 if (ath9k_hw_mci_is_enabled(sc->sc_ah) && 320 if (ath9k_hw_mci_is_enabled(sc->sc_ah) &&
321 sc->cur_chan->offchannel) 321 sc->cur_chan->offchannel)
322 ath9k_mci_set_txpower(sc, true, false); 322 ath9k_mci_set_txpower(sc, true, false);
323 323
324 if (!ath_complete_reset(sc, true)) 324 if (!ath_complete_reset(sc, true))
325 r = -EIO; 325 r = -EIO;
326 326
327 out: 327 out:
328 spin_unlock_bh(&sc->sc_pcu_lock); 328 spin_unlock_bh(&sc->sc_pcu_lock);
329 tasklet_enable(&sc->intr_tq); 329 tasklet_enable(&sc->intr_tq);
330 330
331 return r; 331 return r;
332 } 332 }
333 333
334 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, 334 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
335 struct ieee80211_vif *vif) 335 struct ieee80211_vif *vif)
336 { 336 {
337 struct ath_node *an; 337 struct ath_node *an;
338 an = (struct ath_node *)sta->drv_priv; 338 an = (struct ath_node *)sta->drv_priv;
339 339
340 an->sc = sc; 340 an->sc = sc;
341 an->sta = sta; 341 an->sta = sta;
342 an->vif = vif; 342 an->vif = vif;
343 memset(&an->key_idx, 0, sizeof(an->key_idx)); 343 memset(&an->key_idx, 0, sizeof(an->key_idx));
344 344
345 ath_tx_node_init(sc, an); 345 ath_tx_node_init(sc, an);
346 346
347 ath_dynack_node_init(sc->sc_ah, an); 347 ath_dynack_node_init(sc->sc_ah, an);
348 } 348 }
349 349
350 static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta) 350 static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
351 { 351 {
352 struct ath_node *an = (struct ath_node *)sta->drv_priv; 352 struct ath_node *an = (struct ath_node *)sta->drv_priv;
353 ath_tx_node_cleanup(sc, an); 353 ath_tx_node_cleanup(sc, an);
354 354
355 ath_dynack_node_deinit(sc->sc_ah, an); 355 ath_dynack_node_deinit(sc->sc_ah, an);
356 } 356 }
357 357
358 void ath9k_tasklet(unsigned long data) 358 void ath9k_tasklet(unsigned long data)
359 { 359 {
360 struct ath_softc *sc = (struct ath_softc *)data; 360 struct ath_softc *sc = (struct ath_softc *)data;
361 struct ath_hw *ah = sc->sc_ah; 361 struct ath_hw *ah = sc->sc_ah;
362 struct ath_common *common = ath9k_hw_common(ah); 362 struct ath_common *common = ath9k_hw_common(ah);
363 enum ath_reset_type type; 363 enum ath_reset_type type;
364 unsigned long flags; 364 unsigned long flags;
365 u32 status = sc->intrstatus; 365 u32 status = sc->intrstatus;
366 u32 rxmask; 366 u32 rxmask;
367 367
368 ath9k_ps_wakeup(sc); 368 ath9k_ps_wakeup(sc);
369 spin_lock(&sc->sc_pcu_lock); 369 spin_lock(&sc->sc_pcu_lock);
370 370
371 if (status & ATH9K_INT_FATAL) { 371 if (status & ATH9K_INT_FATAL) {
372 type = RESET_TYPE_FATAL_INT; 372 type = RESET_TYPE_FATAL_INT;
373 ath9k_queue_reset(sc, type); 373 ath9k_queue_reset(sc, type);
374 374
375 /* 375 /*
376 * Increment the ref. counter here so that 376 * Increment the ref. counter here so that
377 * interrupts are enabled in the reset routine. 377 * interrupts are enabled in the reset routine.
378 */ 378 */
379 atomic_inc(&ah->intr_ref_cnt); 379 atomic_inc(&ah->intr_ref_cnt);
380 ath_dbg(common, RESET, "FATAL: Skipping interrupts\n"); 380 ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
381 goto out; 381 goto out;
382 } 382 }
383 383
384 if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) && 384 if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
385 (status & ATH9K_INT_BB_WATCHDOG)) { 385 (status & ATH9K_INT_BB_WATCHDOG)) {
386 spin_lock(&common->cc_lock); 386 spin_lock(&common->cc_lock);
387 ath_hw_cycle_counters_update(common); 387 ath_hw_cycle_counters_update(common);
388 ar9003_hw_bb_watchdog_dbg_info(ah); 388 ar9003_hw_bb_watchdog_dbg_info(ah);
389 spin_unlock(&common->cc_lock); 389 spin_unlock(&common->cc_lock);
390 390
391 if (ar9003_hw_bb_watchdog_check(ah)) { 391 if (ar9003_hw_bb_watchdog_check(ah)) {
392 type = RESET_TYPE_BB_WATCHDOG; 392 type = RESET_TYPE_BB_WATCHDOG;
393 ath9k_queue_reset(sc, type); 393 ath9k_queue_reset(sc, type);
394 394
395 /* 395 /*
396 * Increment the ref. counter here so that 396 * Increment the ref. counter here so that
397 * interrupts are enabled in the reset routine. 397 * interrupts are enabled in the reset routine.
398 */ 398 */
399 atomic_inc(&ah->intr_ref_cnt); 399 atomic_inc(&ah->intr_ref_cnt);
400 ath_dbg(common, RESET, 400 ath_dbg(common, RESET,
401 "BB_WATCHDOG: Skipping interrupts\n"); 401 "BB_WATCHDOG: Skipping interrupts\n");
402 goto out; 402 goto out;
403 } 403 }
404 } 404 }
405 405
406 if (status & ATH9K_INT_GTT) { 406 if (status & ATH9K_INT_GTT) {
407 sc->gtt_cnt++; 407 sc->gtt_cnt++;
408 408
409 if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) { 409 if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
410 type = RESET_TYPE_TX_GTT; 410 type = RESET_TYPE_TX_GTT;
411 ath9k_queue_reset(sc, type); 411 ath9k_queue_reset(sc, type);
412 atomic_inc(&ah->intr_ref_cnt); 412 atomic_inc(&ah->intr_ref_cnt);
413 ath_dbg(common, RESET, 413 ath_dbg(common, RESET,
414 "GTT: Skipping interrupts\n"); 414 "GTT: Skipping interrupts\n");
415 goto out; 415 goto out;
416 } 416 }
417 } 417 }
418 418
419 spin_lock_irqsave(&sc->sc_pm_lock, flags); 419 spin_lock_irqsave(&sc->sc_pm_lock, flags);
420 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) { 420 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
421 /* 421 /*
422 * TSF sync does not look correct; remain awake to sync with 422 * TSF sync does not look correct; remain awake to sync with
423 * the next Beacon. 423 * the next Beacon.
424 */ 424 */
425 ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n"); 425 ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
426 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC; 426 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
427 } 427 }
428 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 428 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
429 429
430 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 430 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
431 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL | 431 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
432 ATH9K_INT_RXORN); 432 ATH9K_INT_RXORN);
433 else 433 else
434 rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 434 rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
435 435
436 if (status & rxmask) { 436 if (status & rxmask) {
437 /* Check for high priority Rx first */ 437 /* Check for high priority Rx first */
438 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 438 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
439 (status & ATH9K_INT_RXHP)) 439 (status & ATH9K_INT_RXHP))
440 ath_rx_tasklet(sc, 0, true); 440 ath_rx_tasklet(sc, 0, true);
441 441
442 ath_rx_tasklet(sc, 0, false); 442 ath_rx_tasklet(sc, 0, false);
443 } 443 }
444 444
445 if (status & ATH9K_INT_TX) { 445 if (status & ATH9K_INT_TX) {
446 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 446 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
447 /* 447 /*
448 * For EDMA chips, TX completion is enabled for the 448 * For EDMA chips, TX completion is enabled for the
449 * beacon queue, so if a beacon has been transmitted 449 * beacon queue, so if a beacon has been transmitted
450 * successfully after a GTT interrupt, the GTT counter 450 * successfully after a GTT interrupt, the GTT counter
451 * gets reset to zero here. 451 * gets reset to zero here.
452 */ 452 */
453 sc->gtt_cnt = 0; 453 sc->gtt_cnt = 0;
454 454
455 ath_tx_edma_tasklet(sc); 455 ath_tx_edma_tasklet(sc);
456 } else { 456 } else {
457 ath_tx_tasklet(sc); 457 ath_tx_tasklet(sc);
458 } 458 }
459 459
460 wake_up(&sc->tx_wait); 460 wake_up(&sc->tx_wait);
461 } 461 }
462 462
463 if (status & ATH9K_INT_GENTIMER) 463 if (status & ATH9K_INT_GENTIMER)
464 ath_gen_timer_isr(sc->sc_ah); 464 ath_gen_timer_isr(sc->sc_ah);
465 465
466 ath9k_btcoex_handle_interrupt(sc, status); 466 ath9k_btcoex_handle_interrupt(sc, status);
467 467
468 /* re-enable hardware interrupt */ 468 /* re-enable hardware interrupt */
469 ath9k_hw_enable_interrupts(ah); 469 ath9k_hw_enable_interrupts(ah);
470 out: 470 out:
471 spin_unlock(&sc->sc_pcu_lock); 471 spin_unlock(&sc->sc_pcu_lock);
472 ath9k_ps_restore(sc); 472 ath9k_ps_restore(sc);
473 } 473 }
474 474
475 irqreturn_t ath_isr(int irq, void *dev) 475 irqreturn_t ath_isr(int irq, void *dev)
476 { 476 {
477 #define SCHED_INTR ( \ 477 #define SCHED_INTR ( \
478 ATH9K_INT_FATAL | \ 478 ATH9K_INT_FATAL | \
479 ATH9K_INT_BB_WATCHDOG | \ 479 ATH9K_INT_BB_WATCHDOG | \
480 ATH9K_INT_RXORN | \ 480 ATH9K_INT_RXORN | \
481 ATH9K_INT_RXEOL | \ 481 ATH9K_INT_RXEOL | \
482 ATH9K_INT_RX | \ 482 ATH9K_INT_RX | \
483 ATH9K_INT_RXLP | \ 483 ATH9K_INT_RXLP | \
484 ATH9K_INT_RXHP | \ 484 ATH9K_INT_RXHP | \
485 ATH9K_INT_TX | \ 485 ATH9K_INT_TX | \
486 ATH9K_INT_BMISS | \ 486 ATH9K_INT_BMISS | \
487 ATH9K_INT_CST | \ 487 ATH9K_INT_CST | \
488 ATH9K_INT_GTT | \ 488 ATH9K_INT_GTT | \
489 ATH9K_INT_TSFOOR | \ 489 ATH9K_INT_TSFOOR | \
490 ATH9K_INT_GENTIMER | \ 490 ATH9K_INT_GENTIMER | \
491 ATH9K_INT_MCI) 491 ATH9K_INT_MCI)
492 492
493 struct ath_softc *sc = dev; 493 struct ath_softc *sc = dev;
494 struct ath_hw *ah = sc->sc_ah; 494 struct ath_hw *ah = sc->sc_ah;
495 struct ath_common *common = ath9k_hw_common(ah); 495 struct ath_common *common = ath9k_hw_common(ah);
496 enum ath9k_int status; 496 enum ath9k_int status;
497 u32 sync_cause = 0; 497 u32 sync_cause = 0;
498 bool sched = false; 498 bool sched = false;
499 499
500 /* 500 /*
501 * The hardware is not ready/present, don't 501 * The hardware is not ready/present, don't
502 * touch anything. Note this can happen early 502 * touch anything. Note this can happen early
503 * on if the IRQ is shared. 503 * on if the IRQ is shared.
504 */ 504 */
505 if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) 505 if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
506 return IRQ_NONE; 506 return IRQ_NONE;
507 507
508 /* shared irq, not for us */ 508 /* shared irq, not for us */
509 509
510 if (!ath9k_hw_intrpend(ah)) 510 if (!ath9k_hw_intrpend(ah))
511 return IRQ_NONE; 511 return IRQ_NONE;
512 512
513 if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) { 513 if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
514 ath9k_hw_kill_interrupts(ah); 514 ath9k_hw_kill_interrupts(ah);
515 return IRQ_HANDLED; 515 return IRQ_HANDLED;
516 } 516 }
517 517
518 /* 518 /*
519 * Figure out the reason(s) for the interrupt. Note 519 * Figure out the reason(s) for the interrupt. Note
520 * that the hal returns a pseudo-ISR that may include 520 * that the hal returns a pseudo-ISR that may include
521 * bits we haven't explicitly enabled so we mask the 521 * bits we haven't explicitly enabled so we mask the
522 * value to insure we only process bits we requested. 522 * value to insure we only process bits we requested.
523 */ 523 */
524 ath9k_hw_getisr(ah, &status, &sync_cause); /* NB: clears ISR too */ 524 ath9k_hw_getisr(ah, &status, &sync_cause); /* NB: clears ISR too */
525 ath9k_debug_sync_cause(sc, sync_cause); 525 ath9k_debug_sync_cause(sc, sync_cause);
526 status &= ah->imask; /* discard unasked-for bits */ 526 status &= ah->imask; /* discard unasked-for bits */
527 527
528 /* 528 /*
529 * If there are no status bits set, then this interrupt was not 529 * If there are no status bits set, then this interrupt was not
530 * for me (should have been caught above). 530 * for me (should have been caught above).
531 */ 531 */
532 if (!status) 532 if (!status)
533 return IRQ_NONE; 533 return IRQ_NONE;
534 534
535 /* Cache the status */ 535 /* Cache the status */
536 sc->intrstatus = status; 536 sc->intrstatus = status;
537 537
538 if (status & SCHED_INTR) 538 if (status & SCHED_INTR)
539 sched = true; 539 sched = true;
540 540
541 /* 541 /*
542 * If a FATAL or RXORN interrupt is received, we have to reset the 542 * If a FATAL or RXORN interrupt is received, we have to reset the
543 * chip immediately. 543 * chip immediately.
544 */ 544 */
545 if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) && 545 if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) &&
546 !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))) 546 !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
547 goto chip_reset; 547 goto chip_reset;
548 548
549 if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) && 549 if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
550 (status & ATH9K_INT_BB_WATCHDOG)) 550 (status & ATH9K_INT_BB_WATCHDOG))
551 goto chip_reset; 551 goto chip_reset;
552 552
553 #ifdef CONFIG_ATH9K_WOW 553 #ifdef CONFIG_ATH9K_WOW
554 if (status & ATH9K_INT_BMISS) { 554 if (status & ATH9K_INT_BMISS) {
555 if (atomic_read(&sc->wow_sleep_proc_intr) == 0) { 555 if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
556 atomic_inc(&sc->wow_got_bmiss_intr); 556 atomic_inc(&sc->wow_got_bmiss_intr);
557 atomic_dec(&sc->wow_sleep_proc_intr); 557 atomic_dec(&sc->wow_sleep_proc_intr);
558 } 558 }
559 } 559 }
560 #endif 560 #endif
561 561
562 if (status & ATH9K_INT_SWBA) 562 if (status & ATH9K_INT_SWBA)
563 tasklet_schedule(&sc->bcon_tasklet); 563 tasklet_schedule(&sc->bcon_tasklet);
564 564
565 if (status & ATH9K_INT_TXURN) 565 if (status & ATH9K_INT_TXURN)
566 ath9k_hw_updatetxtriglevel(ah, true); 566 ath9k_hw_updatetxtriglevel(ah, true);
567 567
568 if (status & ATH9K_INT_RXEOL) { 568 if (status & ATH9K_INT_RXEOL) {
569 ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 569 ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
570 ath9k_hw_set_interrupts(ah); 570 ath9k_hw_set_interrupts(ah);
571 } 571 }
572 572
573 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 573 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
574 if (status & ATH9K_INT_TIM_TIMER) { 574 if (status & ATH9K_INT_TIM_TIMER) {
575 if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle)) 575 if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle))
576 goto chip_reset; 576 goto chip_reset;
577 /* Clear RxAbort bit so that we can 577 /* Clear RxAbort bit so that we can
578 * receive frames */ 578 * receive frames */
579 ath9k_setpower(sc, ATH9K_PM_AWAKE); 579 ath9k_setpower(sc, ATH9K_PM_AWAKE);
580 spin_lock(&sc->sc_pm_lock); 580 spin_lock(&sc->sc_pm_lock);
581 ath9k_hw_setrxabort(sc->sc_ah, 0); 581 ath9k_hw_setrxabort(sc->sc_ah, 0);
582 sc->ps_flags |= PS_WAIT_FOR_BEACON; 582 sc->ps_flags |= PS_WAIT_FOR_BEACON;
583 spin_unlock(&sc->sc_pm_lock); 583 spin_unlock(&sc->sc_pm_lock);
584 } 584 }
585 585
586 chip_reset: 586 chip_reset:
587 587
588 ath_debug_stat_interrupt(sc, status); 588 ath_debug_stat_interrupt(sc, status);
589 589
590 if (sched) { 590 if (sched) {
591 /* turn off every interrupt */ 591 /* turn off every interrupt */
592 ath9k_hw_disable_interrupts(ah); 592 ath9k_hw_disable_interrupts(ah);
593 tasklet_schedule(&sc->intr_tq); 593 tasklet_schedule(&sc->intr_tq);
594 } 594 }
595 595
596 return IRQ_HANDLED; 596 return IRQ_HANDLED;
597 597
598 #undef SCHED_INTR 598 #undef SCHED_INTR
599 } 599 }
600 600
601 int ath_reset(struct ath_softc *sc) 601 int ath_reset(struct ath_softc *sc)
602 { 602 {
603 int r; 603 int r;
604 604
605 ath9k_ps_wakeup(sc); 605 ath9k_ps_wakeup(sc);
606 r = ath_reset_internal(sc, NULL); 606 r = ath_reset_internal(sc, NULL);
607 ath9k_ps_restore(sc); 607 ath9k_ps_restore(sc);
608 608
609 return r; 609 return r;
610 } 610 }
611 611
612 void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type) 612 void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
613 { 613 {
614 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 614 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
615 #ifdef CONFIG_ATH9K_DEBUGFS 615 #ifdef CONFIG_ATH9K_DEBUGFS
616 RESET_STAT_INC(sc, type); 616 RESET_STAT_INC(sc, type);
617 #endif 617 #endif
618 set_bit(ATH_OP_HW_RESET, &common->op_flags); 618 set_bit(ATH_OP_HW_RESET, &common->op_flags);
619 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 619 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
620 } 620 }
621 621
622 void ath_reset_work(struct work_struct *work) 622 void ath_reset_work(struct work_struct *work)
623 { 623 {
624 struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work); 624 struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
625 625
626 ath_reset(sc); 626 ath_reset(sc);
627 } 627 }
628 628
629 /**********************/ 629 /**********************/
630 /* mac80211 callbacks */ 630 /* mac80211 callbacks */
631 /**********************/ 631 /**********************/
632 632
633 static int ath9k_start(struct ieee80211_hw *hw) 633 static int ath9k_start(struct ieee80211_hw *hw)
634 { 634 {
635 struct ath_softc *sc = hw->priv; 635 struct ath_softc *sc = hw->priv;
636 struct ath_hw *ah = sc->sc_ah; 636 struct ath_hw *ah = sc->sc_ah;
637 struct ath_common *common = ath9k_hw_common(ah); 637 struct ath_common *common = ath9k_hw_common(ah);
638 struct ieee80211_channel *curchan = sc->cur_chan->chandef.chan; 638 struct ieee80211_channel *curchan = sc->cur_chan->chandef.chan;
639 struct ath_chanctx *ctx = sc->cur_chan; 639 struct ath_chanctx *ctx = sc->cur_chan;
640 struct ath9k_channel *init_channel; 640 struct ath9k_channel *init_channel;
641 int r; 641 int r;
642 642
643 ath_dbg(common, CONFIG, 643 ath_dbg(common, CONFIG,
644 "Starting driver with initial channel: %d MHz\n", 644 "Starting driver with initial channel: %d MHz\n",
645 curchan->center_freq); 645 curchan->center_freq);
646 646
647 ath9k_ps_wakeup(sc); 647 ath9k_ps_wakeup(sc);
648 mutex_lock(&sc->mutex); 648 mutex_lock(&sc->mutex);
649 649
650 init_channel = ath9k_cmn_get_channel(hw, ah, &ctx->chandef); 650 init_channel = ath9k_cmn_get_channel(hw, ah, &ctx->chandef);
651 sc->cur_chandef = hw->conf.chandef; 651 sc->cur_chandef = hw->conf.chandef;
652 652
653 /* Reset SERDES registers */ 653 /* Reset SERDES registers */
654 ath9k_hw_configpcipowersave(ah, false); 654 ath9k_hw_configpcipowersave(ah, false);
655 655
656 /* 656 /*
657 * The basic interface to setting the hardware in a good 657 * The basic interface to setting the hardware in a good
658 * state is ``reset''. On return the hardware is known to 658 * state is ``reset''. On return the hardware is known to
659 * be powered up and with interrupts disabled. This must 659 * be powered up and with interrupts disabled. This must
660 * be followed by initialization of the appropriate bits 660 * be followed by initialization of the appropriate bits
661 * and then setup of the interrupt mask. 661 * and then setup of the interrupt mask.
662 */ 662 */
663 spin_lock_bh(&sc->sc_pcu_lock); 663 spin_lock_bh(&sc->sc_pcu_lock);
664 664
665 atomic_set(&ah->intr_ref_cnt, -1); 665 atomic_set(&ah->intr_ref_cnt, -1);
666 666
667 r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); 667 r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
668 if (r) { 668 if (r) {
669 ath_err(common, 669 ath_err(common,
670 "Unable to reset hardware; reset status %d (freq %u MHz)\n", 670 "Unable to reset hardware; reset status %d (freq %u MHz)\n",
671 r, curchan->center_freq); 671 r, curchan->center_freq);
672 ah->reset_power_on = false; 672 ah->reset_power_on = false;
673 } 673 }
674 674
675 /* Setup our intr mask. */ 675 /* Setup our intr mask. */
676 ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL | 676 ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
677 ATH9K_INT_RXORN | ATH9K_INT_FATAL | 677 ATH9K_INT_RXORN | ATH9K_INT_FATAL |
678 ATH9K_INT_GLOBAL; 678 ATH9K_INT_GLOBAL;
679 679
680 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 680 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
681 ah->imask |= ATH9K_INT_RXHP | 681 ah->imask |= ATH9K_INT_RXHP |
682 ATH9K_INT_RXLP; 682 ATH9K_INT_RXLP;
683 else 683 else
684 ah->imask |= ATH9K_INT_RX; 684 ah->imask |= ATH9K_INT_RX;
685 685
686 if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) 686 if (ah->config.hw_hang_checks & HW_BB_WATCHDOG)
687 ah->imask |= ATH9K_INT_BB_WATCHDOG; 687 ah->imask |= ATH9K_INT_BB_WATCHDOG;
688 688
689 /* 689 /*
690 * Enable GTT interrupts only for AR9003/AR9004 chips 690 * Enable GTT interrupts only for AR9003/AR9004 chips
691 * for now. 691 * for now.
692 */ 692 */
693 if (AR_SREV_9300_20_OR_LATER(ah)) 693 if (AR_SREV_9300_20_OR_LATER(ah))
694 ah->imask |= ATH9K_INT_GTT; 694 ah->imask |= ATH9K_INT_GTT;
695 695
696 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) 696 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
697 ah->imask |= ATH9K_INT_CST; 697 ah->imask |= ATH9K_INT_CST;
698 698
699 ath_mci_enable(sc); 699 ath_mci_enable(sc);
700 700
701 clear_bit(ATH_OP_INVALID, &common->op_flags); 701 clear_bit(ATH_OP_INVALID, &common->op_flags);
702 sc->sc_ah->is_monitoring = false; 702 sc->sc_ah->is_monitoring = false;
703 703
704 if (!ath_complete_reset(sc, false)) 704 if (!ath_complete_reset(sc, false))
705 ah->reset_power_on = false; 705 ah->reset_power_on = false;
706 706
707 if (ah->led_pin >= 0) { 707 if (ah->led_pin >= 0) {
708 ath9k_hw_cfg_output(ah, ah->led_pin, 708 ath9k_hw_cfg_output(ah, ah->led_pin,
709 AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 709 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
710 ath9k_hw_set_gpio(ah, ah->led_pin, 0); 710 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
711 } 711 }
712 712
713 /* 713 /*
714 * Reset key cache to sane defaults (all entries cleared) instead of 714 * Reset key cache to sane defaults (all entries cleared) instead of
715 * semi-random values after suspend/resume. 715 * semi-random values after suspend/resume.
716 */ 716 */
717 ath9k_cmn_init_crypto(sc->sc_ah); 717 ath9k_cmn_init_crypto(sc->sc_ah);
718 718
719 ath9k_hw_reset_tsf(ah); 719 ath9k_hw_reset_tsf(ah);
720 720
721 spin_unlock_bh(&sc->sc_pcu_lock); 721 spin_unlock_bh(&sc->sc_pcu_lock);
722 722
723 mutex_unlock(&sc->mutex); 723 mutex_unlock(&sc->mutex);
724 724
725 ath9k_ps_restore(sc); 725 ath9k_ps_restore(sc);
726 726
727 return 0; 727 return 0;
728 } 728 }
729 729
730 static void ath9k_tx(struct ieee80211_hw *hw, 730 static void ath9k_tx(struct ieee80211_hw *hw,
731 struct ieee80211_tx_control *control, 731 struct ieee80211_tx_control *control,
732 struct sk_buff *skb) 732 struct sk_buff *skb)
733 { 733 {
734 struct ath_softc *sc = hw->priv; 734 struct ath_softc *sc = hw->priv;
735 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 735 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
736 struct ath_tx_control txctl; 736 struct ath_tx_control txctl;
737 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 737 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
738 unsigned long flags; 738 unsigned long flags;
739 739
740 if (sc->ps_enabled) { 740 if (sc->ps_enabled) {
741 /* 741 /*
742 * mac80211 does not set PM field for normal data frames, so we 742 * mac80211 does not set PM field for normal data frames, so we
743 * need to update that based on the current PS mode. 743 * need to update that based on the current PS mode.
744 */ 744 */
745 if (ieee80211_is_data(hdr->frame_control) && 745 if (ieee80211_is_data(hdr->frame_control) &&
746 !ieee80211_is_nullfunc(hdr->frame_control) && 746 !ieee80211_is_nullfunc(hdr->frame_control) &&
747 !ieee80211_has_pm(hdr->frame_control)) { 747 !ieee80211_has_pm(hdr->frame_control)) {
748 ath_dbg(common, PS, 748 ath_dbg(common, PS,
749 "Add PM=1 for a TX frame while in PS mode\n"); 749 "Add PM=1 for a TX frame while in PS mode\n");
750 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 750 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
751 } 751 }
752 } 752 }
753 753
754 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP)) { 754 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP)) {
755 /* 755 /*
756 * We are using PS-Poll and mac80211 can request TX while in 756 * We are using PS-Poll and mac80211 can request TX while in
757 * power save mode. Need to wake up hardware for the TX to be 757 * power save mode. Need to wake up hardware for the TX to be
758 * completed and if needed, also for RX of buffered frames. 758 * completed and if needed, also for RX of buffered frames.
759 */ 759 */
760 ath9k_ps_wakeup(sc); 760 ath9k_ps_wakeup(sc);
761 spin_lock_irqsave(&sc->sc_pm_lock, flags); 761 spin_lock_irqsave(&sc->sc_pm_lock, flags);
762 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 762 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
763 ath9k_hw_setrxabort(sc->sc_ah, 0); 763 ath9k_hw_setrxabort(sc->sc_ah, 0);
764 if (ieee80211_is_pspoll(hdr->frame_control)) { 764 if (ieee80211_is_pspoll(hdr->frame_control)) {
765 ath_dbg(common, PS, 765 ath_dbg(common, PS,
766 "Sending PS-Poll to pick a buffered frame\n"); 766 "Sending PS-Poll to pick a buffered frame\n");
767 sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA; 767 sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
768 } else { 768 } else {
769 ath_dbg(common, PS, "Wake up to complete TX\n"); 769 ath_dbg(common, PS, "Wake up to complete TX\n");
770 sc->ps_flags |= PS_WAIT_FOR_TX_ACK; 770 sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
771 } 771 }
772 /* 772 /*
773 * The actual restore operation will happen only after 773 * The actual restore operation will happen only after
774 * the ps_flags bit is cleared. We are just dropping 774 * the ps_flags bit is cleared. We are just dropping
775 * the ps_usecount here. 775 * the ps_usecount here.
776 */ 776 */
777 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 777 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
778 ath9k_ps_restore(sc); 778 ath9k_ps_restore(sc);
779 } 779 }
780 780
781 /* 781 /*
782 * Cannot tx while the hardware is in full sleep, it first needs a full 782 * Cannot tx while the hardware is in full sleep, it first needs a full
783 * chip reset to recover from that 783 * chip reset to recover from that
784 */ 784 */
785 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) { 785 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) {
786 ath_err(common, "TX while HW is in FULL_SLEEP mode\n"); 786 ath_err(common, "TX while HW is in FULL_SLEEP mode\n");
787 goto exit; 787 goto exit;
788 } 788 }
789 789
790 memset(&txctl, 0, sizeof(struct ath_tx_control)); 790 memset(&txctl, 0, sizeof(struct ath_tx_control));
791 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; 791 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
792 txctl.sta = control->sta; 792 txctl.sta = control->sta;
793 793
794 ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb); 794 ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
795 795
796 if (ath_tx_start(hw, skb, &txctl) != 0) { 796 if (ath_tx_start(hw, skb, &txctl) != 0) {
797 ath_dbg(common, XMIT, "TX failed\n"); 797 ath_dbg(common, XMIT, "TX failed\n");
798 TX_STAT_INC(txctl.txq->axq_qnum, txfailed); 798 TX_STAT_INC(txctl.txq->axq_qnum, txfailed);
799 goto exit; 799 goto exit;
800 } 800 }
801 801
802 return; 802 return;
803 exit: 803 exit:
804 ieee80211_free_txskb(hw, skb); 804 ieee80211_free_txskb(hw, skb);
805 } 805 }
806 806
807 static void ath9k_stop(struct ieee80211_hw *hw) 807 static void ath9k_stop(struct ieee80211_hw *hw)
808 { 808 {
809 struct ath_softc *sc = hw->priv; 809 struct ath_softc *sc = hw->priv;
810 struct ath_hw *ah = sc->sc_ah; 810 struct ath_hw *ah = sc->sc_ah;
811 struct ath_common *common = ath9k_hw_common(ah); 811 struct ath_common *common = ath9k_hw_common(ah);
812 bool prev_idle; 812 bool prev_idle;
813 813
814 ath9k_deinit_channel_context(sc); 814 ath9k_deinit_channel_context(sc);
815 815
816 mutex_lock(&sc->mutex); 816 mutex_lock(&sc->mutex);
817 817
818 ath_cancel_work(sc); 818 ath_cancel_work(sc);
819 819
820 if (test_bit(ATH_OP_INVALID, &common->op_flags)) { 820 if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
821 ath_dbg(common, ANY, "Device not present\n"); 821 ath_dbg(common, ANY, "Device not present\n");
822 mutex_unlock(&sc->mutex); 822 mutex_unlock(&sc->mutex);
823 return; 823 return;
824 } 824 }
825 825
826 /* Ensure HW is awake when we try to shut it down. */ 826 /* Ensure HW is awake when we try to shut it down. */
827 ath9k_ps_wakeup(sc); 827 ath9k_ps_wakeup(sc);
828 828
829 spin_lock_bh(&sc->sc_pcu_lock); 829 spin_lock_bh(&sc->sc_pcu_lock);
830 830
831 /* prevent tasklets to enable interrupts once we disable them */ 831 /* prevent tasklets to enable interrupts once we disable them */
832 ah->imask &= ~ATH9K_INT_GLOBAL; 832 ah->imask &= ~ATH9K_INT_GLOBAL;
833 833
834 /* make sure h/w will not generate any interrupt 834 /* make sure h/w will not generate any interrupt
835 * before setting the invalid flag. */ 835 * before setting the invalid flag. */
836 ath9k_hw_disable_interrupts(ah); 836 ath9k_hw_disable_interrupts(ah);
837 837
838 spin_unlock_bh(&sc->sc_pcu_lock); 838 spin_unlock_bh(&sc->sc_pcu_lock);
839 839
840 /* we can now sync irq and kill any running tasklets, since we already 840 /* we can now sync irq and kill any running tasklets, since we already
841 * disabled interrupts and not holding a spin lock */ 841 * disabled interrupts and not holding a spin lock */
842 synchronize_irq(sc->irq); 842 synchronize_irq(sc->irq);
843 tasklet_kill(&sc->intr_tq); 843 tasklet_kill(&sc->intr_tq);
844 tasklet_kill(&sc->bcon_tasklet); 844 tasklet_kill(&sc->bcon_tasklet);
845 845
846 prev_idle = sc->ps_idle; 846 prev_idle = sc->ps_idle;
847 sc->ps_idle = true; 847 sc->ps_idle = true;
848 848
849 spin_lock_bh(&sc->sc_pcu_lock); 849 spin_lock_bh(&sc->sc_pcu_lock);
850 850
851 if (ah->led_pin >= 0) { 851 if (ah->led_pin >= 0) {
852 ath9k_hw_set_gpio(ah, ah->led_pin, 1); 852 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
853 ath9k_hw_cfg_gpio_input(ah, ah->led_pin); 853 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
854 } 854 }
855 855
856 ath_prepare_reset(sc); 856 ath_prepare_reset(sc);
857 857
858 if (sc->rx.frag) { 858 if (sc->rx.frag) {
859 dev_kfree_skb_any(sc->rx.frag); 859 dev_kfree_skb_any(sc->rx.frag);
860 sc->rx.frag = NULL; 860 sc->rx.frag = NULL;
861 } 861 }
862 862
863 if (!ah->curchan) 863 if (!ah->curchan)
864 ah->curchan = ath9k_cmn_get_channel(hw, ah, 864 ah->curchan = ath9k_cmn_get_channel(hw, ah,
865 &sc->cur_chan->chandef); 865 &sc->cur_chan->chandef);
866 866
867 ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 867 ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
868 ath9k_hw_phy_disable(ah); 868 ath9k_hw_phy_disable(ah);
869 869
870 ath9k_hw_configpcipowersave(ah, true); 870 ath9k_hw_configpcipowersave(ah, true);
871 871
872 spin_unlock_bh(&sc->sc_pcu_lock); 872 spin_unlock_bh(&sc->sc_pcu_lock);
873 873
874 ath9k_ps_restore(sc); 874 ath9k_ps_restore(sc);
875 875
876 set_bit(ATH_OP_INVALID, &common->op_flags); 876 set_bit(ATH_OP_INVALID, &common->op_flags);
877 sc->ps_idle = prev_idle; 877 sc->ps_idle = prev_idle;
878 878
879 mutex_unlock(&sc->mutex); 879 mutex_unlock(&sc->mutex);
880 880
881 ath_dbg(common, CONFIG, "Driver halt\n"); 881 ath_dbg(common, CONFIG, "Driver halt\n");
882 } 882 }
883 883
884 static bool ath9k_uses_beacons(int type) 884 static bool ath9k_uses_beacons(int type)
885 { 885 {
886 switch (type) { 886 switch (type) {
887 case NL80211_IFTYPE_AP: 887 case NL80211_IFTYPE_AP:
888 case NL80211_IFTYPE_ADHOC: 888 case NL80211_IFTYPE_ADHOC:
889 case NL80211_IFTYPE_MESH_POINT: 889 case NL80211_IFTYPE_MESH_POINT:
890 return true; 890 return true;
891 default: 891 default:
892 return false; 892 return false;
893 } 893 }
894 } 894 }
895 895
896 static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data, 896 static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data,
897 u8 *mac, struct ieee80211_vif *vif) 897 u8 *mac, struct ieee80211_vif *vif)
898 { 898 {
899 struct ath_vif *avp = (struct ath_vif *)vif->drv_priv; 899 struct ath_vif *avp = (struct ath_vif *)vif->drv_priv;
900 int i; 900 int i;
901 901
902 if (iter_data->has_hw_macaddr) { 902 if (iter_data->has_hw_macaddr) {
903 for (i = 0; i < ETH_ALEN; i++) 903 for (i = 0; i < ETH_ALEN; i++)
904 iter_data->mask[i] &= 904 iter_data->mask[i] &=
905 ~(iter_data->hw_macaddr[i] ^ mac[i]); 905 ~(iter_data->hw_macaddr[i] ^ mac[i]);
906 } else { 906 } else {
907 memcpy(iter_data->hw_macaddr, mac, ETH_ALEN); 907 memcpy(iter_data->hw_macaddr, mac, ETH_ALEN);
908 iter_data->has_hw_macaddr = true; 908 iter_data->has_hw_macaddr = true;
909 } 909 }
910 910
911 if (!vif->bss_conf.use_short_slot) 911 if (!vif->bss_conf.use_short_slot)
912 iter_data->slottime = ATH9K_SLOT_TIME_20; 912 iter_data->slottime = ATH9K_SLOT_TIME_20;
913 913
914 switch (vif->type) { 914 switch (vif->type) {
915 case NL80211_IFTYPE_AP: 915 case NL80211_IFTYPE_AP:
916 iter_data->naps++; 916 iter_data->naps++;
917 break; 917 break;
918 case NL80211_IFTYPE_STATION: 918 case NL80211_IFTYPE_STATION:
919 iter_data->nstations++; 919 iter_data->nstations++;
920 if (avp->assoc && !iter_data->primary_sta) 920 if (avp->assoc && !iter_data->primary_sta)
921 iter_data->primary_sta = vif; 921 iter_data->primary_sta = vif;
922 break; 922 break;
923 case NL80211_IFTYPE_ADHOC: 923 case NL80211_IFTYPE_ADHOC:
924 iter_data->nadhocs++; 924 iter_data->nadhocs++;
925 if (vif->bss_conf.enable_beacon) 925 if (vif->bss_conf.enable_beacon)
926 iter_data->beacons = true; 926 iter_data->beacons = true;
927 break; 927 break;
928 case NL80211_IFTYPE_MESH_POINT: 928 case NL80211_IFTYPE_MESH_POINT:
929 iter_data->nmeshes++; 929 iter_data->nmeshes++;
930 if (vif->bss_conf.enable_beacon) 930 if (vif->bss_conf.enable_beacon)
931 iter_data->beacons = true; 931 iter_data->beacons = true;
932 break; 932 break;
933 case NL80211_IFTYPE_WDS: 933 case NL80211_IFTYPE_WDS:
934 iter_data->nwds++; 934 iter_data->nwds++;
935 break; 935 break;
936 default: 936 default:
937 break; 937 break;
938 } 938 }
939 } 939 }
940 940
941 static void ath9k_update_bssid_mask(struct ath_softc *sc, 941 static void ath9k_update_bssid_mask(struct ath_softc *sc,
942 struct ath_chanctx *ctx, 942 struct ath_chanctx *ctx,
943 struct ath9k_vif_iter_data *iter_data) 943 struct ath9k_vif_iter_data *iter_data)
944 { 944 {
945 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 945 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
946 struct ath_vif *avp; 946 struct ath_vif *avp;
947 int i; 947 int i;
948 948
949 if (!ath9k_is_chanctx_enabled()) 949 if (!ath9k_is_chanctx_enabled())
950 return; 950 return;
951 951
952 list_for_each_entry(avp, &ctx->vifs, list) { 952 list_for_each_entry(avp, &ctx->vifs, list) {
953 if (ctx->nvifs_assigned != 1) 953 if (ctx->nvifs_assigned != 1)
954 continue; 954 continue;
955 955
956 if (!avp->vif->p2p || !iter_data->has_hw_macaddr) 956 if (!avp->vif->p2p || !iter_data->has_hw_macaddr)
957 continue; 957 continue;
958 958
959 ether_addr_copy(common->curbssid, avp->bssid); 959 ether_addr_copy(common->curbssid, avp->bssid);
960 960
961 /* perm_addr will be used as the p2p device address. */ 961 /* perm_addr will be used as the p2p device address. */
962 for (i = 0; i < ETH_ALEN; i++) 962 for (i = 0; i < ETH_ALEN; i++)
963 iter_data->mask[i] &= 963 iter_data->mask[i] &=
964 ~(iter_data->hw_macaddr[i] ^ 964 ~(iter_data->hw_macaddr[i] ^
965 sc->hw->wiphy->perm_addr[i]); 965 sc->hw->wiphy->perm_addr[i]);
966 } 966 }
967 } 967 }
968 968
969 /* Called with sc->mutex held. */ 969 /* Called with sc->mutex held. */
970 void ath9k_calculate_iter_data(struct ath_softc *sc, 970 void ath9k_calculate_iter_data(struct ath_softc *sc,
971 struct ath_chanctx *ctx, 971 struct ath_chanctx *ctx,
972 struct ath9k_vif_iter_data *iter_data) 972 struct ath9k_vif_iter_data *iter_data)
973 { 973 {
974 struct ath_vif *avp; 974 struct ath_vif *avp;
975 975
976 /* 976 /*
977 * Pick the MAC address of the first interface as the new hardware 977 * The hardware will use primary station addr together with the
978 * MAC address. The hardware will use it together with the BSSID mask 978 * BSSID mask when matching addresses.
979 * when matching addresses.
980 */ 979 */
981 memset(iter_data, 0, sizeof(*iter_data)); 980 memset(iter_data, 0, sizeof(*iter_data));
982 memset(&iter_data->mask, 0xff, ETH_ALEN); 981 memset(&iter_data->mask, 0xff, ETH_ALEN);
983 iter_data->slottime = ATH9K_SLOT_TIME_9; 982 iter_data->slottime = ATH9K_SLOT_TIME_9;
984 983
985 list_for_each_entry(avp, &ctx->vifs, list) 984 list_for_each_entry(avp, &ctx->vifs, list)
986 ath9k_vif_iter(iter_data, avp->vif->addr, avp->vif); 985 ath9k_vif_iter(iter_data, avp->vif->addr, avp->vif);
987 986
988 ath9k_update_bssid_mask(sc, ctx, iter_data); 987 ath9k_update_bssid_mask(sc, ctx, iter_data);
989 } 988 }
990 989
991 static void ath9k_set_assoc_state(struct ath_softc *sc, 990 static void ath9k_set_assoc_state(struct ath_softc *sc,
992 struct ieee80211_vif *vif, bool changed) 991 struct ieee80211_vif *vif, bool changed)
993 { 992 {
994 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 993 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
995 struct ath_vif *avp = (struct ath_vif *)vif->drv_priv; 994 struct ath_vif *avp = (struct ath_vif *)vif->drv_priv;
996 unsigned long flags; 995 unsigned long flags;
997 996
998 set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags); 997 set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
999 998
1000 ether_addr_copy(common->curbssid, avp->bssid); 999 ether_addr_copy(common->curbssid, avp->bssid);
1001 common->curaid = avp->aid; 1000 common->curaid = avp->aid;
1002 ath9k_hw_write_associd(sc->sc_ah); 1001 ath9k_hw_write_associd(sc->sc_ah);
1003 1002
1004 if (changed) { 1003 if (changed) {
1005 common->last_rssi = ATH_RSSI_DUMMY_MARKER; 1004 common->last_rssi = ATH_RSSI_DUMMY_MARKER;
1006 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 1005 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
1007 1006
1008 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1007 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1009 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; 1008 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
1010 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1009 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1011 } 1010 }
1012 1011
1013 if (ath9k_hw_mci_is_enabled(sc->sc_ah)) 1012 if (ath9k_hw_mci_is_enabled(sc->sc_ah))
1014 ath9k_mci_update_wlan_channels(sc, false); 1013 ath9k_mci_update_wlan_channels(sc, false);
1015 1014
1016 ath_dbg(common, CONFIG, 1015 ath_dbg(common, CONFIG,
1017 "Primary Station interface: %pM, BSSID: %pM\n", 1016 "Primary Station interface: %pM, BSSID: %pM\n",
1018 vif->addr, common->curbssid); 1017 vif->addr, common->curbssid);
1019 } 1018 }
1020 1019
1021 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 1020 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
1022 static void ath9k_set_offchannel_state(struct ath_softc *sc) 1021 static void ath9k_set_offchannel_state(struct ath_softc *sc)
1023 { 1022 {
1024 struct ath_hw *ah = sc->sc_ah; 1023 struct ath_hw *ah = sc->sc_ah;
1025 struct ath_common *common = ath9k_hw_common(ah); 1024 struct ath_common *common = ath9k_hw_common(ah);
1026 struct ieee80211_vif *vif = NULL; 1025 struct ieee80211_vif *vif = NULL;
1027 1026
1028 ath9k_ps_wakeup(sc); 1027 ath9k_ps_wakeup(sc);
1029 1028
1030 if (sc->offchannel.state < ATH_OFFCHANNEL_ROC_START) 1029 if (sc->offchannel.state < ATH_OFFCHANNEL_ROC_START)
1031 vif = sc->offchannel.scan_vif; 1030 vif = sc->offchannel.scan_vif;
1032 else 1031 else
1033 vif = sc->offchannel.roc_vif; 1032 vif = sc->offchannel.roc_vif;
1034 1033
1035 if (WARN_ON(!vif)) 1034 if (WARN_ON(!vif))
1036 goto exit; 1035 goto exit;
1037 1036
1038 eth_zero_addr(common->curbssid); 1037 eth_zero_addr(common->curbssid);
1039 eth_broadcast_addr(common->bssidmask); 1038 eth_broadcast_addr(common->bssidmask);
1040 ether_addr_copy(common->macaddr, vif->addr); 1039 ether_addr_copy(common->macaddr, vif->addr);
1041 common->curaid = 0; 1040 common->curaid = 0;
1042 ah->opmode = vif->type; 1041 ah->opmode = vif->type;
1043 ah->imask &= ~ATH9K_INT_SWBA; 1042 ah->imask &= ~ATH9K_INT_SWBA;
1044 ah->imask &= ~ATH9K_INT_TSFOOR; 1043 ah->imask &= ~ATH9K_INT_TSFOOR;
1045 ah->slottime = ATH9K_SLOT_TIME_9; 1044 ah->slottime = ATH9K_SLOT_TIME_9;
1046 1045
1047 ath_hw_setbssidmask(common); 1046 ath_hw_setbssidmask(common);
1048 ath9k_hw_setopmode(ah); 1047 ath9k_hw_setopmode(ah);
1049 ath9k_hw_write_associd(sc->sc_ah); 1048 ath9k_hw_write_associd(sc->sc_ah);
1050 ath9k_hw_set_interrupts(ah); 1049 ath9k_hw_set_interrupts(ah);
1051 ath9k_hw_init_global_settings(ah); 1050 ath9k_hw_init_global_settings(ah);
1052 1051
1053 exit: 1052 exit:
1054 ath9k_ps_restore(sc); 1053 ath9k_ps_restore(sc);
1055 } 1054 }
1056 #endif 1055 #endif
1057 1056
1058 /* Called with sc->mutex held. */ 1057 /* Called with sc->mutex held. */
1059 void ath9k_calculate_summary_state(struct ath_softc *sc, 1058 void ath9k_calculate_summary_state(struct ath_softc *sc,
1060 struct ath_chanctx *ctx) 1059 struct ath_chanctx *ctx)
1061 { 1060 {
1062 struct ath_hw *ah = sc->sc_ah; 1061 struct ath_hw *ah = sc->sc_ah;
1063 struct ath_common *common = ath9k_hw_common(ah); 1062 struct ath_common *common = ath9k_hw_common(ah);
1064 struct ath9k_vif_iter_data iter_data; 1063 struct ath9k_vif_iter_data iter_data;
1065 struct ath_beacon_config *cur_conf; 1064 struct ath_beacon_config *cur_conf;
1066 1065
1067 ath_chanctx_check_active(sc, ctx); 1066 ath_chanctx_check_active(sc, ctx);
1068 1067
1069 if (ctx != sc->cur_chan) 1068 if (ctx != sc->cur_chan)
1070 return; 1069 return;
1071 1070
1072 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 1071 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
1073 if (ctx == &sc->offchannel.chan) 1072 if (ctx == &sc->offchannel.chan)
1074 return ath9k_set_offchannel_state(sc); 1073 return ath9k_set_offchannel_state(sc);
1075 #endif 1074 #endif
1076 1075
1077 ath9k_ps_wakeup(sc); 1076 ath9k_ps_wakeup(sc);
1078 ath9k_calculate_iter_data(sc, ctx, &iter_data); 1077 ath9k_calculate_iter_data(sc, ctx, &iter_data);
1079 1078
1080 if (iter_data.has_hw_macaddr) 1079 if (iter_data.has_hw_macaddr)
1081 ether_addr_copy(common->macaddr, iter_data.hw_macaddr); 1080 ether_addr_copy(common->macaddr, iter_data.hw_macaddr);
1082 1081
1083 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); 1082 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
1084 ath_hw_setbssidmask(common); 1083 ath_hw_setbssidmask(common);
1085 1084
1086 if (iter_data.naps > 0) { 1085 if (iter_data.naps > 0) {
1087 cur_conf = &ctx->beacon; 1086 cur_conf = &ctx->beacon;
1088 ath9k_hw_set_tsfadjust(ah, true); 1087 ath9k_hw_set_tsfadjust(ah, true);
1089 ah->opmode = NL80211_IFTYPE_AP; 1088 ah->opmode = NL80211_IFTYPE_AP;
1090 if (cur_conf->enable_beacon) 1089 if (cur_conf->enable_beacon)
1091 iter_data.beacons = true; 1090 iter_data.beacons = true;
1092 } else { 1091 } else {
1093 ath9k_hw_set_tsfadjust(ah, false); 1092 ath9k_hw_set_tsfadjust(ah, false);
1094 1093
1095 if (iter_data.nmeshes) 1094 if (iter_data.nmeshes)
1096 ah->opmode = NL80211_IFTYPE_MESH_POINT; 1095 ah->opmode = NL80211_IFTYPE_MESH_POINT;
1097 else if (iter_data.nwds) 1096 else if (iter_data.nwds)
1098 ah->opmode = NL80211_IFTYPE_AP; 1097 ah->opmode = NL80211_IFTYPE_AP;
1099 else if (iter_data.nadhocs) 1098 else if (iter_data.nadhocs)
1100 ah->opmode = NL80211_IFTYPE_ADHOC; 1099 ah->opmode = NL80211_IFTYPE_ADHOC;
1101 else 1100 else
1102 ah->opmode = NL80211_IFTYPE_STATION; 1101 ah->opmode = NL80211_IFTYPE_STATION;
1103 } 1102 }
1104 1103
1105 ath9k_hw_setopmode(ah); 1104 ath9k_hw_setopmode(ah);
1106 1105
1107 ctx->switch_after_beacon = false; 1106 ctx->switch_after_beacon = false;
1108 if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) 1107 if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0)
1109 ah->imask |= ATH9K_INT_TSFOOR; 1108 ah->imask |= ATH9K_INT_TSFOOR;
1110 else { 1109 else {
1111 ah->imask &= ~ATH9K_INT_TSFOOR; 1110 ah->imask &= ~ATH9K_INT_TSFOOR;
1112 if (iter_data.naps == 1 && iter_data.beacons) 1111 if (iter_data.naps == 1 && iter_data.beacons)
1113 ctx->switch_after_beacon = true; 1112 ctx->switch_after_beacon = true;
1114 } 1113 }
1115 1114
1116 ah->imask &= ~ATH9K_INT_SWBA; 1115 ah->imask &= ~ATH9K_INT_SWBA;
1117 if (ah->opmode == NL80211_IFTYPE_STATION) { 1116 if (ah->opmode == NL80211_IFTYPE_STATION) {
1118 bool changed = (iter_data.primary_sta != ctx->primary_sta); 1117 bool changed = (iter_data.primary_sta != ctx->primary_sta);
1119 1118
1120 if (iter_data.primary_sta) { 1119 if (iter_data.primary_sta) {
1121 iter_data.beacons = true; 1120 iter_data.beacons = true;
1122 ath9k_set_assoc_state(sc, iter_data.primary_sta, 1121 ath9k_set_assoc_state(sc, iter_data.primary_sta,
1123 changed); 1122 changed);
1124 ctx->primary_sta = iter_data.primary_sta; 1123 ctx->primary_sta = iter_data.primary_sta;
1125 } else { 1124 } else {
1126 ctx->primary_sta = NULL; 1125 ctx->primary_sta = NULL;
1127 memset(common->curbssid, 0, ETH_ALEN); 1126 memset(common->curbssid, 0, ETH_ALEN);
1128 common->curaid = 0; 1127 common->curaid = 0;
1129 ath9k_hw_write_associd(sc->sc_ah); 1128 ath9k_hw_write_associd(sc->sc_ah);
1130 if (ath9k_hw_mci_is_enabled(sc->sc_ah)) 1129 if (ath9k_hw_mci_is_enabled(sc->sc_ah))
1131 ath9k_mci_update_wlan_channels(sc, true); 1130 ath9k_mci_update_wlan_channels(sc, true);
1132 } 1131 }
1133 } else if (iter_data.beacons) { 1132 } else if (iter_data.beacons) {
1134 ah->imask |= ATH9K_INT_SWBA; 1133 ah->imask |= ATH9K_INT_SWBA;
1135 } 1134 }
1136 ath9k_hw_set_interrupts(ah); 1135 ath9k_hw_set_interrupts(ah);
1137 1136
1138 if (iter_data.beacons) 1137 if (iter_data.beacons)
1139 set_bit(ATH_OP_BEACONS, &common->op_flags); 1138 set_bit(ATH_OP_BEACONS, &common->op_flags);
1140 else 1139 else
1141 clear_bit(ATH_OP_BEACONS, &common->op_flags); 1140 clear_bit(ATH_OP_BEACONS, &common->op_flags);
1142 1141
1143 if (ah->slottime != iter_data.slottime) { 1142 if (ah->slottime != iter_data.slottime) {
1144 ah->slottime = iter_data.slottime; 1143 ah->slottime = iter_data.slottime;
1145 ath9k_hw_init_global_settings(ah); 1144 ath9k_hw_init_global_settings(ah);
1146 } 1145 }
1147 1146
1148 if (iter_data.primary_sta) 1147 if (iter_data.primary_sta)
1149 set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags); 1148 set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
1150 else 1149 else
1151 clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags); 1150 clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
1152 1151
1153 ath_dbg(common, CONFIG, 1152 ath_dbg(common, CONFIG,
1154 "macaddr: %pM, bssid: %pM, bssidmask: %pM\n", 1153 "macaddr: %pM, bssid: %pM, bssidmask: %pM\n",
1155 common->macaddr, common->curbssid, common->bssidmask); 1154 common->macaddr, common->curbssid, common->bssidmask);
1156 1155
1157 ath9k_ps_restore(sc); 1156 ath9k_ps_restore(sc);
1158 } 1157 }
1159 1158
1160 static void ath9k_assign_hw_queues(struct ieee80211_hw *hw, 1159 static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
1161 struct ieee80211_vif *vif) 1160 struct ieee80211_vif *vif)
1162 { 1161 {
1163 int i; 1162 int i;
1164 1163
1165 if (!ath9k_is_chanctx_enabled()) 1164 if (!ath9k_is_chanctx_enabled())
1166 return; 1165 return;
1167 1166
1168 for (i = 0; i < IEEE80211_NUM_ACS; i++) 1167 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1169 vif->hw_queue[i] = i; 1168 vif->hw_queue[i] = i;
1170 1169
1171 if (vif->type == NL80211_IFTYPE_AP) 1170 if (vif->type == NL80211_IFTYPE_AP)
1172 vif->cab_queue = hw->queues - 2; 1171 vif->cab_queue = hw->queues - 2;
1173 else 1172 else
1174 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; 1173 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
1175 } 1174 }
1176 1175
1177 static int ath9k_add_interface(struct ieee80211_hw *hw, 1176 static int ath9k_add_interface(struct ieee80211_hw *hw,
1178 struct ieee80211_vif *vif) 1177 struct ieee80211_vif *vif)
1179 { 1178 {
1180 struct ath_softc *sc = hw->priv; 1179 struct ath_softc *sc = hw->priv;
1181 struct ath_hw *ah = sc->sc_ah; 1180 struct ath_hw *ah = sc->sc_ah;
1182 struct ath_common *common = ath9k_hw_common(ah); 1181 struct ath_common *common = ath9k_hw_common(ah);
1183 struct ath_vif *avp = (void *)vif->drv_priv; 1182 struct ath_vif *avp = (void *)vif->drv_priv;
1184 struct ath_node *an = &avp->mcast_node; 1183 struct ath_node *an = &avp->mcast_node;
1185 1184
1186 mutex_lock(&sc->mutex); 1185 mutex_lock(&sc->mutex);
1187 1186
1188 if (config_enabled(CONFIG_ATH9K_TX99)) { 1187 if (config_enabled(CONFIG_ATH9K_TX99)) {
1189 if (sc->cur_chan->nvifs >= 1) { 1188 if (sc->cur_chan->nvifs >= 1) {
1190 mutex_unlock(&sc->mutex); 1189 mutex_unlock(&sc->mutex);
1191 return -EOPNOTSUPP; 1190 return -EOPNOTSUPP;
1192 } 1191 }
1193 sc->tx99_vif = vif; 1192 sc->tx99_vif = vif;
1194 } 1193 }
1195 1194
1196 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); 1195 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
1197 sc->cur_chan->nvifs++; 1196 sc->cur_chan->nvifs++;
1198 1197
1199 if (ath9k_uses_beacons(vif->type)) 1198 if (ath9k_uses_beacons(vif->type))
1200 ath9k_beacon_assign_slot(sc, vif); 1199 ath9k_beacon_assign_slot(sc, vif);
1201 1200
1202 avp->vif = vif; 1201 avp->vif = vif;
1203 if (!ath9k_is_chanctx_enabled()) { 1202 if (!ath9k_is_chanctx_enabled()) {
1204 avp->chanctx = sc->cur_chan; 1203 avp->chanctx = sc->cur_chan;
1205 list_add_tail(&avp->list, &avp->chanctx->vifs); 1204 list_add_tail(&avp->list, &avp->chanctx->vifs);
1206 } 1205 }
1207 1206
1207 ath9k_calculate_summary_state(sc, avp->chanctx);
1208
1208 ath9k_assign_hw_queues(hw, vif); 1209 ath9k_assign_hw_queues(hw, vif);
1209 1210
1210 an->sc = sc; 1211 an->sc = sc;
1211 an->sta = NULL; 1212 an->sta = NULL;
1212 an->vif = vif; 1213 an->vif = vif;
1213 an->no_ps_filter = true; 1214 an->no_ps_filter = true;
1214 ath_tx_node_init(sc, an); 1215 ath_tx_node_init(sc, an);
1215 1216
1216 mutex_unlock(&sc->mutex); 1217 mutex_unlock(&sc->mutex);
1217 return 0; 1218 return 0;
1218 } 1219 }
1219 1220
1220 static int ath9k_change_interface(struct ieee80211_hw *hw, 1221 static int ath9k_change_interface(struct ieee80211_hw *hw,
1221 struct ieee80211_vif *vif, 1222 struct ieee80211_vif *vif,
1222 enum nl80211_iftype new_type, 1223 enum nl80211_iftype new_type,
1223 bool p2p) 1224 bool p2p)
1224 { 1225 {
1225 struct ath_softc *sc = hw->priv; 1226 struct ath_softc *sc = hw->priv;
1226 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1227 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1227 struct ath_vif *avp = (void *)vif->drv_priv; 1228 struct ath_vif *avp = (void *)vif->drv_priv;
1228 1229
1229 mutex_lock(&sc->mutex); 1230 mutex_lock(&sc->mutex);
1230 1231
1231 if (config_enabled(CONFIG_ATH9K_TX99)) { 1232 if (config_enabled(CONFIG_ATH9K_TX99)) {
1232 mutex_unlock(&sc->mutex); 1233 mutex_unlock(&sc->mutex);
1233 return -EOPNOTSUPP; 1234 return -EOPNOTSUPP;
1234 } 1235 }
1235 1236
1236 ath_dbg(common, CONFIG, "Change Interface\n"); 1237 ath_dbg(common, CONFIG, "Change Interface\n");
1237 1238
1238 if (ath9k_uses_beacons(vif->type)) 1239 if (ath9k_uses_beacons(vif->type))
1239 ath9k_beacon_remove_slot(sc, vif); 1240 ath9k_beacon_remove_slot(sc, vif);
1240 1241
1241 vif->type = new_type; 1242 vif->type = new_type;
1242 vif->p2p = p2p; 1243 vif->p2p = p2p;
1243 1244
1244 if (ath9k_uses_beacons(vif->type)) 1245 if (ath9k_uses_beacons(vif->type))
1245 ath9k_beacon_assign_slot(sc, vif); 1246 ath9k_beacon_assign_slot(sc, vif);
1246 1247
1247 ath9k_assign_hw_queues(hw, vif); 1248 ath9k_assign_hw_queues(hw, vif);
1248 ath9k_calculate_summary_state(sc, avp->chanctx); 1249 ath9k_calculate_summary_state(sc, avp->chanctx);
1249 1250
1250 mutex_unlock(&sc->mutex); 1251 mutex_unlock(&sc->mutex);
1251 return 0; 1252 return 0;
1252 } 1253 }
1253 1254
1254 static void ath9k_remove_interface(struct ieee80211_hw *hw, 1255 static void ath9k_remove_interface(struct ieee80211_hw *hw,
1255 struct ieee80211_vif *vif) 1256 struct ieee80211_vif *vif)
1256 { 1257 {
1257 struct ath_softc *sc = hw->priv; 1258 struct ath_softc *sc = hw->priv;
1258 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1259 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1259 struct ath_vif *avp = (void *)vif->drv_priv; 1260 struct ath_vif *avp = (void *)vif->drv_priv;
1260 1261
1261 ath_dbg(common, CONFIG, "Detach Interface\n"); 1262 ath_dbg(common, CONFIG, "Detach Interface\n");
1262 1263
1263 mutex_lock(&sc->mutex); 1264 mutex_lock(&sc->mutex);
1264 1265
1265 ath9k_p2p_remove_vif(sc, vif); 1266 ath9k_p2p_remove_vif(sc, vif);
1266 1267
1267 sc->cur_chan->nvifs--; 1268 sc->cur_chan->nvifs--;
1268 sc->tx99_vif = NULL; 1269 sc->tx99_vif = NULL;
1269 if (!ath9k_is_chanctx_enabled()) 1270 if (!ath9k_is_chanctx_enabled())
1270 list_del(&avp->list); 1271 list_del(&avp->list);
1271 1272
1272 if (ath9k_uses_beacons(vif->type)) 1273 if (ath9k_uses_beacons(vif->type))
1273 ath9k_beacon_remove_slot(sc, vif); 1274 ath9k_beacon_remove_slot(sc, vif);
1274 1275
1275 ath_tx_node_cleanup(sc, &avp->mcast_node); 1276 ath_tx_node_cleanup(sc, &avp->mcast_node);
1277
1278 ath9k_calculate_summary_state(sc, avp->chanctx);
1276 1279
1277 mutex_unlock(&sc->mutex); 1280 mutex_unlock(&sc->mutex);
1278 } 1281 }
1279 1282
1280 static void ath9k_enable_ps(struct ath_softc *sc) 1283 static void ath9k_enable_ps(struct ath_softc *sc)
1281 { 1284 {
1282 struct ath_hw *ah = sc->sc_ah; 1285 struct ath_hw *ah = sc->sc_ah;
1283 struct ath_common *common = ath9k_hw_common(ah); 1286 struct ath_common *common = ath9k_hw_common(ah);
1284 1287
1285 if (config_enabled(CONFIG_ATH9K_TX99)) 1288 if (config_enabled(CONFIG_ATH9K_TX99))
1286 return; 1289 return;
1287 1290
1288 sc->ps_enabled = true; 1291 sc->ps_enabled = true;
1289 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 1292 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1290 if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) { 1293 if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
1291 ah->imask |= ATH9K_INT_TIM_TIMER; 1294 ah->imask |= ATH9K_INT_TIM_TIMER;
1292 ath9k_hw_set_interrupts(ah); 1295 ath9k_hw_set_interrupts(ah);
1293 } 1296 }
1294 ath9k_hw_setrxabort(ah, 1); 1297 ath9k_hw_setrxabort(ah, 1);
1295 } 1298 }
1296 ath_dbg(common, PS, "PowerSave enabled\n"); 1299 ath_dbg(common, PS, "PowerSave enabled\n");
1297 } 1300 }
1298 1301
1299 static void ath9k_disable_ps(struct ath_softc *sc) 1302 static void ath9k_disable_ps(struct ath_softc *sc)
1300 { 1303 {
1301 struct ath_hw *ah = sc->sc_ah; 1304 struct ath_hw *ah = sc->sc_ah;
1302 struct ath_common *common = ath9k_hw_common(ah); 1305 struct ath_common *common = ath9k_hw_common(ah);
1303 1306
1304 if (config_enabled(CONFIG_ATH9K_TX99)) 1307 if (config_enabled(CONFIG_ATH9K_TX99))
1305 return; 1308 return;
1306 1309
1307 sc->ps_enabled = false; 1310 sc->ps_enabled = false;
1308 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE); 1311 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
1309 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 1312 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1310 ath9k_hw_setrxabort(ah, 0); 1313 ath9k_hw_setrxabort(ah, 0);
1311 sc->ps_flags &= ~(PS_WAIT_FOR_BEACON | 1314 sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
1312 PS_WAIT_FOR_CAB | 1315 PS_WAIT_FOR_CAB |
1313 PS_WAIT_FOR_PSPOLL_DATA | 1316 PS_WAIT_FOR_PSPOLL_DATA |
1314 PS_WAIT_FOR_TX_ACK); 1317 PS_WAIT_FOR_TX_ACK);
1315 if (ah->imask & ATH9K_INT_TIM_TIMER) { 1318 if (ah->imask & ATH9K_INT_TIM_TIMER) {
1316 ah->imask &= ~ATH9K_INT_TIM_TIMER; 1319 ah->imask &= ~ATH9K_INT_TIM_TIMER;
1317 ath9k_hw_set_interrupts(ah); 1320 ath9k_hw_set_interrupts(ah);
1318 } 1321 }
1319 } 1322 }
1320 ath_dbg(common, PS, "PowerSave disabled\n"); 1323 ath_dbg(common, PS, "PowerSave disabled\n");
1321 } 1324 }
1322 1325
1323 void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw) 1326 void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw)
1324 { 1327 {
1325 struct ath_softc *sc = hw->priv; 1328 struct ath_softc *sc = hw->priv;
1326 struct ath_hw *ah = sc->sc_ah; 1329 struct ath_hw *ah = sc->sc_ah;
1327 struct ath_common *common = ath9k_hw_common(ah); 1330 struct ath_common *common = ath9k_hw_common(ah);
1328 u32 rxfilter; 1331 u32 rxfilter;
1329 1332
1330 if (config_enabled(CONFIG_ATH9K_TX99)) 1333 if (config_enabled(CONFIG_ATH9K_TX99))
1331 return; 1334 return;
1332 1335
1333 if (!ath9k_hw_ops(ah)->spectral_scan_trigger) { 1336 if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
1334 ath_err(common, "spectrum analyzer not implemented on this hardware\n"); 1337 ath_err(common, "spectrum analyzer not implemented on this hardware\n");
1335 return; 1338 return;
1336 } 1339 }
1337 1340
1338 ath9k_ps_wakeup(sc); 1341 ath9k_ps_wakeup(sc);
1339 rxfilter = ath9k_hw_getrxfilter(ah); 1342 rxfilter = ath9k_hw_getrxfilter(ah);
1340 ath9k_hw_setrxfilter(ah, rxfilter | 1343 ath9k_hw_setrxfilter(ah, rxfilter |
1341 ATH9K_RX_FILTER_PHYRADAR | 1344 ATH9K_RX_FILTER_PHYRADAR |
1342 ATH9K_RX_FILTER_PHYERR); 1345 ATH9K_RX_FILTER_PHYERR);
1343 1346
1344 /* TODO: usually this should not be neccesary, but for some reason 1347 /* TODO: usually this should not be neccesary, but for some reason
1345 * (or in some mode?) the trigger must be called after the 1348 * (or in some mode?) the trigger must be called after the
1346 * configuration, otherwise the register will have its values reset 1349 * configuration, otherwise the register will have its values reset
1347 * (on my ar9220 to value 0x01002310) 1350 * (on my ar9220 to value 0x01002310)
1348 */ 1351 */
1349 ath9k_spectral_scan_config(hw, sc->spectral_mode); 1352 ath9k_spectral_scan_config(hw, sc->spectral_mode);
1350 ath9k_hw_ops(ah)->spectral_scan_trigger(ah); 1353 ath9k_hw_ops(ah)->spectral_scan_trigger(ah);
1351 ath9k_ps_restore(sc); 1354 ath9k_ps_restore(sc);
1352 } 1355 }
1353 1356
1354 int ath9k_spectral_scan_config(struct ieee80211_hw *hw, 1357 int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
1355 enum spectral_mode spectral_mode) 1358 enum spectral_mode spectral_mode)
1356 { 1359 {
1357 struct ath_softc *sc = hw->priv; 1360 struct ath_softc *sc = hw->priv;
1358 struct ath_hw *ah = sc->sc_ah; 1361 struct ath_hw *ah = sc->sc_ah;
1359 struct ath_common *common = ath9k_hw_common(ah); 1362 struct ath_common *common = ath9k_hw_common(ah);
1360 1363
1361 if (!ath9k_hw_ops(ah)->spectral_scan_trigger) { 1364 if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
1362 ath_err(common, "spectrum analyzer not implemented on this hardware\n"); 1365 ath_err(common, "spectrum analyzer not implemented on this hardware\n");
1363 return -1; 1366 return -1;
1364 } 1367 }
1365 1368
1366 switch (spectral_mode) { 1369 switch (spectral_mode) {
1367 case SPECTRAL_DISABLED: 1370 case SPECTRAL_DISABLED:
1368 sc->spec_config.enabled = 0; 1371 sc->spec_config.enabled = 0;
1369 break; 1372 break;
1370 case SPECTRAL_BACKGROUND: 1373 case SPECTRAL_BACKGROUND:
1371 /* send endless samples. 1374 /* send endless samples.
1372 * TODO: is this really useful for "background"? 1375 * TODO: is this really useful for "background"?
1373 */ 1376 */
1374 sc->spec_config.endless = 1; 1377 sc->spec_config.endless = 1;
1375 sc->spec_config.enabled = 1; 1378 sc->spec_config.enabled = 1;
1376 break; 1379 break;
1377 case SPECTRAL_CHANSCAN: 1380 case SPECTRAL_CHANSCAN:
1378 case SPECTRAL_MANUAL: 1381 case SPECTRAL_MANUAL:
1379 sc->spec_config.endless = 0; 1382 sc->spec_config.endless = 0;
1380 sc->spec_config.enabled = 1; 1383 sc->spec_config.enabled = 1;
1381 break; 1384 break;
1382 default: 1385 default:
1383 return -1; 1386 return -1;
1384 } 1387 }
1385 1388
1386 ath9k_ps_wakeup(sc); 1389 ath9k_ps_wakeup(sc);
1387 ath9k_hw_ops(ah)->spectral_scan_config(ah, &sc->spec_config); 1390 ath9k_hw_ops(ah)->spectral_scan_config(ah, &sc->spec_config);
1388 ath9k_ps_restore(sc); 1391 ath9k_ps_restore(sc);
1389 1392
1390 sc->spectral_mode = spectral_mode; 1393 sc->spectral_mode = spectral_mode;
1391 1394
1392 return 0; 1395 return 0;
1393 } 1396 }
1394 1397
1395 static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 1398 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1396 { 1399 {
1397 struct ath_softc *sc = hw->priv; 1400 struct ath_softc *sc = hw->priv;
1398 struct ath_hw *ah = sc->sc_ah; 1401 struct ath_hw *ah = sc->sc_ah;
1399 struct ath_common *common = ath9k_hw_common(ah); 1402 struct ath_common *common = ath9k_hw_common(ah);
1400 struct ieee80211_conf *conf = &hw->conf; 1403 struct ieee80211_conf *conf = &hw->conf;
1401 struct ath_chanctx *ctx = sc->cur_chan; 1404 struct ath_chanctx *ctx = sc->cur_chan;
1402 1405
1403 ath9k_ps_wakeup(sc); 1406 ath9k_ps_wakeup(sc);
1404 mutex_lock(&sc->mutex); 1407 mutex_lock(&sc->mutex);
1405 1408
1406 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1409 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1407 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1410 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1408 if (sc->ps_idle) { 1411 if (sc->ps_idle) {
1409 ath_cancel_work(sc); 1412 ath_cancel_work(sc);
1410 ath9k_stop_btcoex(sc); 1413 ath9k_stop_btcoex(sc);
1411 } else { 1414 } else {
1412 ath9k_start_btcoex(sc); 1415 ath9k_start_btcoex(sc);
1413 /* 1416 /*
1414 * The chip needs a reset to properly wake up from 1417 * The chip needs a reset to properly wake up from
1415 * full sleep 1418 * full sleep
1416 */ 1419 */
1417 ath_chanctx_set_channel(sc, ctx, &ctx->chandef); 1420 ath_chanctx_set_channel(sc, ctx, &ctx->chandef);
1418 } 1421 }
1419 } 1422 }
1420 1423
1421 /* 1424 /*
1422 * We just prepare to enable PS. We have to wait until our AP has 1425 * We just prepare to enable PS. We have to wait until our AP has
1423 * ACK'd our null data frame to disable RX otherwise we'll ignore 1426 * ACK'd our null data frame to disable RX otherwise we'll ignore
1424 * those ACKs and end up retransmitting the same null data frames. 1427 * those ACKs and end up retransmitting the same null data frames.
1425 * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode. 1428 * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode.
1426 */ 1429 */
1427 if (changed & IEEE80211_CONF_CHANGE_PS) { 1430 if (changed & IEEE80211_CONF_CHANGE_PS) {
1428 unsigned long flags; 1431 unsigned long flags;
1429 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1432 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1430 if (conf->flags & IEEE80211_CONF_PS) 1433 if (conf->flags & IEEE80211_CONF_PS)
1431 ath9k_enable_ps(sc); 1434 ath9k_enable_ps(sc);
1432 else 1435 else
1433 ath9k_disable_ps(sc); 1436 ath9k_disable_ps(sc);
1434 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1437 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1435 } 1438 }
1436 1439
1437 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1440 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1438 if (conf->flags & IEEE80211_CONF_MONITOR) { 1441 if (conf->flags & IEEE80211_CONF_MONITOR) {
1439 ath_dbg(common, CONFIG, "Monitor mode is enabled\n"); 1442 ath_dbg(common, CONFIG, "Monitor mode is enabled\n");
1440 sc->sc_ah->is_monitoring = true; 1443 sc->sc_ah->is_monitoring = true;
1441 } else { 1444 } else {
1442 ath_dbg(common, CONFIG, "Monitor mode is disabled\n"); 1445 ath_dbg(common, CONFIG, "Monitor mode is disabled\n");
1443 sc->sc_ah->is_monitoring = false; 1446 sc->sc_ah->is_monitoring = false;
1444 } 1447 }
1445 } 1448 }
1446 1449
1447 if (!ath9k_is_chanctx_enabled() && (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { 1450 if (!ath9k_is_chanctx_enabled() && (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
1448 ctx->offchannel = !!(conf->flags & IEEE80211_CONF_OFFCHANNEL); 1451 ctx->offchannel = !!(conf->flags & IEEE80211_CONF_OFFCHANNEL);
1449 ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef); 1452 ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
1450 } 1453 }
1451 1454
1452 if (changed & IEEE80211_CONF_CHANGE_POWER) { 1455 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1453 ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level); 1456 ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
1454 sc->cur_chan->txpower = 2 * conf->power_level; 1457 sc->cur_chan->txpower = 2 * conf->power_level;
1455 ath9k_cmn_update_txpow(ah, sc->curtxpow, 1458 ath9k_cmn_update_txpow(ah, sc->curtxpow,
1456 sc->cur_chan->txpower, &sc->curtxpow); 1459 sc->cur_chan->txpower, &sc->curtxpow);
1457 } 1460 }
1458 1461
1459 mutex_unlock(&sc->mutex); 1462 mutex_unlock(&sc->mutex);
1460 ath9k_ps_restore(sc); 1463 ath9k_ps_restore(sc);
1461 1464
1462 return 0; 1465 return 0;
1463 } 1466 }
1464 1467
1465 #define SUPPORTED_FILTERS \ 1468 #define SUPPORTED_FILTERS \
1466 (FIF_PROMISC_IN_BSS | \ 1469 (FIF_PROMISC_IN_BSS | \
1467 FIF_ALLMULTI | \ 1470 FIF_ALLMULTI | \
1468 FIF_CONTROL | \ 1471 FIF_CONTROL | \
1469 FIF_PSPOLL | \ 1472 FIF_PSPOLL | \
1470 FIF_OTHER_BSS | \ 1473 FIF_OTHER_BSS | \
1471 FIF_BCN_PRBRESP_PROMISC | \ 1474 FIF_BCN_PRBRESP_PROMISC | \
1472 FIF_PROBE_REQ | \ 1475 FIF_PROBE_REQ | \
1473 FIF_FCSFAIL) 1476 FIF_FCSFAIL)
1474 1477
1475 /* FIXME: sc->sc_full_reset ? */ 1478 /* FIXME: sc->sc_full_reset ? */
1476 static void ath9k_configure_filter(struct ieee80211_hw *hw, 1479 static void ath9k_configure_filter(struct ieee80211_hw *hw,
1477 unsigned int changed_flags, 1480 unsigned int changed_flags,
1478 unsigned int *total_flags, 1481 unsigned int *total_flags,
1479 u64 multicast) 1482 u64 multicast)
1480 { 1483 {
1481 struct ath_softc *sc = hw->priv; 1484 struct ath_softc *sc = hw->priv;
1482 u32 rfilt; 1485 u32 rfilt;
1483 1486
1484 changed_flags &= SUPPORTED_FILTERS; 1487 changed_flags &= SUPPORTED_FILTERS;
1485 *total_flags &= SUPPORTED_FILTERS; 1488 *total_flags &= SUPPORTED_FILTERS;
1486 1489
1487 spin_lock_bh(&sc->chan_lock); 1490 spin_lock_bh(&sc->chan_lock);
1488 sc->cur_chan->rxfilter = *total_flags; 1491 sc->cur_chan->rxfilter = *total_flags;
1489 spin_unlock_bh(&sc->chan_lock); 1492 spin_unlock_bh(&sc->chan_lock);
1490 1493
1491 ath9k_ps_wakeup(sc); 1494 ath9k_ps_wakeup(sc);
1492 rfilt = ath_calcrxfilter(sc); 1495 rfilt = ath_calcrxfilter(sc);
1493 ath9k_hw_setrxfilter(sc->sc_ah, rfilt); 1496 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
1494 ath9k_ps_restore(sc); 1497 ath9k_ps_restore(sc);
1495 1498
1496 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n", 1499 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n",
1497 rfilt); 1500 rfilt);
1498 } 1501 }
1499 1502
1500 static int ath9k_sta_add(struct ieee80211_hw *hw, 1503 static int ath9k_sta_add(struct ieee80211_hw *hw,
1501 struct ieee80211_vif *vif, 1504 struct ieee80211_vif *vif,
1502 struct ieee80211_sta *sta) 1505 struct ieee80211_sta *sta)
1503 { 1506 {
1504 struct ath_softc *sc = hw->priv; 1507 struct ath_softc *sc = hw->priv;
1505 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1508 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1506 struct ath_node *an = (struct ath_node *) sta->drv_priv; 1509 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1507 struct ieee80211_key_conf ps_key = { }; 1510 struct ieee80211_key_conf ps_key = { };
1508 int key; 1511 int key;
1509 1512
1510 ath_node_attach(sc, sta, vif); 1513 ath_node_attach(sc, sta, vif);
1511 1514
1512 if (vif->type != NL80211_IFTYPE_AP && 1515 if (vif->type != NL80211_IFTYPE_AP &&
1513 vif->type != NL80211_IFTYPE_AP_VLAN) 1516 vif->type != NL80211_IFTYPE_AP_VLAN)
1514 return 0; 1517 return 0;
1515 1518
1516 key = ath_key_config(common, vif, sta, &ps_key); 1519 key = ath_key_config(common, vif, sta, &ps_key);
1517 if (key > 0) { 1520 if (key > 0) {
1518 an->ps_key = key; 1521 an->ps_key = key;
1519 an->key_idx[0] = key; 1522 an->key_idx[0] = key;
1520 } 1523 }
1521 1524
1522 return 0; 1525 return 0;
1523 } 1526 }
1524 1527
1525 static void ath9k_del_ps_key(struct ath_softc *sc, 1528 static void ath9k_del_ps_key(struct ath_softc *sc,
1526 struct ieee80211_vif *vif, 1529 struct ieee80211_vif *vif,
1527 struct ieee80211_sta *sta) 1530 struct ieee80211_sta *sta)
1528 { 1531 {
1529 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1532 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1530 struct ath_node *an = (struct ath_node *) sta->drv_priv; 1533 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1531 struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key }; 1534 struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
1532 1535
1533 if (!an->ps_key) 1536 if (!an->ps_key)
1534 return; 1537 return;
1535 1538
1536 ath_key_delete(common, &ps_key); 1539 ath_key_delete(common, &ps_key);
1537 an->ps_key = 0; 1540 an->ps_key = 0;
1538 an->key_idx[0] = 0; 1541 an->key_idx[0] = 0;
1539 } 1542 }
1540 1543
1541 static int ath9k_sta_remove(struct ieee80211_hw *hw, 1544 static int ath9k_sta_remove(struct ieee80211_hw *hw,
1542 struct ieee80211_vif *vif, 1545 struct ieee80211_vif *vif,
1543 struct ieee80211_sta *sta) 1546 struct ieee80211_sta *sta)
1544 { 1547 {
1545 struct ath_softc *sc = hw->priv; 1548 struct ath_softc *sc = hw->priv;
1546 1549
1547 ath9k_del_ps_key(sc, vif, sta); 1550 ath9k_del_ps_key(sc, vif, sta);
1548 ath_node_detach(sc, sta); 1551 ath_node_detach(sc, sta);
1549 1552
1550 return 0; 1553 return 0;
1551 } 1554 }
1552 1555
1553 static void ath9k_sta_set_tx_filter(struct ath_hw *ah, 1556 static void ath9k_sta_set_tx_filter(struct ath_hw *ah,
1554 struct ath_node *an, 1557 struct ath_node *an,
1555 bool set) 1558 bool set)
1556 { 1559 {
1557 int i; 1560 int i;
1558 1561
1559 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) { 1562 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1560 if (!an->key_idx[i]) 1563 if (!an->key_idx[i])
1561 continue; 1564 continue;
1562 ath9k_hw_set_tx_filter(ah, an->key_idx[i], set); 1565 ath9k_hw_set_tx_filter(ah, an->key_idx[i], set);
1563 } 1566 }
1564 } 1567 }
1565 1568
1566 static void ath9k_sta_notify(struct ieee80211_hw *hw, 1569 static void ath9k_sta_notify(struct ieee80211_hw *hw,
1567 struct ieee80211_vif *vif, 1570 struct ieee80211_vif *vif,
1568 enum sta_notify_cmd cmd, 1571 enum sta_notify_cmd cmd,
1569 struct ieee80211_sta *sta) 1572 struct ieee80211_sta *sta)
1570 { 1573 {
1571 struct ath_softc *sc = hw->priv; 1574 struct ath_softc *sc = hw->priv;
1572 struct ath_node *an = (struct ath_node *) sta->drv_priv; 1575 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1573 1576
1574 switch (cmd) { 1577 switch (cmd) {
1575 case STA_NOTIFY_SLEEP: 1578 case STA_NOTIFY_SLEEP:
1576 an->sleeping = true; 1579 an->sleeping = true;
1577 ath_tx_aggr_sleep(sta, sc, an); 1580 ath_tx_aggr_sleep(sta, sc, an);
1578 ath9k_sta_set_tx_filter(sc->sc_ah, an, true); 1581 ath9k_sta_set_tx_filter(sc->sc_ah, an, true);
1579 break; 1582 break;
1580 case STA_NOTIFY_AWAKE: 1583 case STA_NOTIFY_AWAKE:
1581 ath9k_sta_set_tx_filter(sc->sc_ah, an, false); 1584 ath9k_sta_set_tx_filter(sc->sc_ah, an, false);
1582 an->sleeping = false; 1585 an->sleeping = false;
1583 ath_tx_aggr_wakeup(sc, an); 1586 ath_tx_aggr_wakeup(sc, an);
1584 break; 1587 break;
1585 } 1588 }
1586 } 1589 }
1587 1590
1588 static int ath9k_conf_tx(struct ieee80211_hw *hw, 1591 static int ath9k_conf_tx(struct ieee80211_hw *hw,
1589 struct ieee80211_vif *vif, u16 queue, 1592 struct ieee80211_vif *vif, u16 queue,
1590 const struct ieee80211_tx_queue_params *params) 1593 const struct ieee80211_tx_queue_params *params)
1591 { 1594 {
1592 struct ath_softc *sc = hw->priv; 1595 struct ath_softc *sc = hw->priv;
1593 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1596 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1594 struct ath_txq *txq; 1597 struct ath_txq *txq;
1595 struct ath9k_tx_queue_info qi; 1598 struct ath9k_tx_queue_info qi;
1596 int ret = 0; 1599 int ret = 0;
1597 1600
1598 if (queue >= IEEE80211_NUM_ACS) 1601 if (queue >= IEEE80211_NUM_ACS)
1599 return 0; 1602 return 0;
1600 1603
1601 txq = sc->tx.txq_map[queue]; 1604 txq = sc->tx.txq_map[queue];
1602 1605
1603 ath9k_ps_wakeup(sc); 1606 ath9k_ps_wakeup(sc);
1604 mutex_lock(&sc->mutex); 1607 mutex_lock(&sc->mutex);
1605 1608
1606 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); 1609 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
1607 1610
1608 qi.tqi_aifs = params->aifs; 1611 qi.tqi_aifs = params->aifs;
1609 qi.tqi_cwmin = params->cw_min; 1612 qi.tqi_cwmin = params->cw_min;
1610 qi.tqi_cwmax = params->cw_max; 1613 qi.tqi_cwmax = params->cw_max;
1611 qi.tqi_burstTime = params->txop * 32; 1614 qi.tqi_burstTime = params->txop * 32;
1612 1615
1613 ath_dbg(common, CONFIG, 1616 ath_dbg(common, CONFIG,
1614 "Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", 1617 "Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
1615 queue, txq->axq_qnum, params->aifs, params->cw_min, 1618 queue, txq->axq_qnum, params->aifs, params->cw_min,
1616 params->cw_max, params->txop); 1619 params->cw_max, params->txop);
1617 1620
1618 ath_update_max_aggr_framelen(sc, queue, qi.tqi_burstTime); 1621 ath_update_max_aggr_framelen(sc, queue, qi.tqi_burstTime);
1619 ret = ath_txq_update(sc, txq->axq_qnum, &qi); 1622 ret = ath_txq_update(sc, txq->axq_qnum, &qi);
1620 if (ret) 1623 if (ret)
1621 ath_err(common, "TXQ Update failed\n"); 1624 ath_err(common, "TXQ Update failed\n");
1622 1625
1623 mutex_unlock(&sc->mutex); 1626 mutex_unlock(&sc->mutex);
1624 ath9k_ps_restore(sc); 1627 ath9k_ps_restore(sc);
1625 1628
1626 return ret; 1629 return ret;
1627 } 1630 }
1628 1631
1629 static int ath9k_set_key(struct ieee80211_hw *hw, 1632 static int ath9k_set_key(struct ieee80211_hw *hw,
1630 enum set_key_cmd cmd, 1633 enum set_key_cmd cmd,
1631 struct ieee80211_vif *vif, 1634 struct ieee80211_vif *vif,
1632 struct ieee80211_sta *sta, 1635 struct ieee80211_sta *sta,
1633 struct ieee80211_key_conf *key) 1636 struct ieee80211_key_conf *key)
1634 { 1637 {
1635 struct ath_softc *sc = hw->priv; 1638 struct ath_softc *sc = hw->priv;
1636 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1639 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1637 struct ath_node *an = NULL; 1640 struct ath_node *an = NULL;
1638 int ret = 0, i; 1641 int ret = 0, i;
1639 1642
1640 if (ath9k_modparam_nohwcrypt) 1643 if (ath9k_modparam_nohwcrypt)
1641 return -ENOSPC; 1644 return -ENOSPC;
1642 1645
1643 if ((vif->type == NL80211_IFTYPE_ADHOC || 1646 if ((vif->type == NL80211_IFTYPE_ADHOC ||
1644 vif->type == NL80211_IFTYPE_MESH_POINT) && 1647 vif->type == NL80211_IFTYPE_MESH_POINT) &&
1645 (key->cipher == WLAN_CIPHER_SUITE_TKIP || 1648 (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
1646 key->cipher == WLAN_CIPHER_SUITE_CCMP) && 1649 key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
1647 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 1650 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1648 /* 1651 /*
1649 * For now, disable hw crypto for the RSN IBSS group keys. This 1652 * For now, disable hw crypto for the RSN IBSS group keys. This
1650 * could be optimized in the future to use a modified key cache 1653 * could be optimized in the future to use a modified key cache
1651 * design to support per-STA RX GTK, but until that gets 1654 * design to support per-STA RX GTK, but until that gets
1652 * implemented, use of software crypto for group addressed 1655 * implemented, use of software crypto for group addressed
1653 * frames is a acceptable to allow RSN IBSS to be used. 1656 * frames is a acceptable to allow RSN IBSS to be used.
1654 */ 1657 */
1655 return -EOPNOTSUPP; 1658 return -EOPNOTSUPP;
1656 } 1659 }
1657 1660
1658 mutex_lock(&sc->mutex); 1661 mutex_lock(&sc->mutex);
1659 ath9k_ps_wakeup(sc); 1662 ath9k_ps_wakeup(sc);
1660 ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd); 1663 ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd);
1661 if (sta) 1664 if (sta)
1662 an = (struct ath_node *)sta->drv_priv; 1665 an = (struct ath_node *)sta->drv_priv;
1663 1666
1664 switch (cmd) { 1667 switch (cmd) {
1665 case SET_KEY: 1668 case SET_KEY:
1666 if (sta) 1669 if (sta)
1667 ath9k_del_ps_key(sc, vif, sta); 1670 ath9k_del_ps_key(sc, vif, sta);
1668 1671
1669 key->hw_key_idx = 0; 1672 key->hw_key_idx = 0;
1670 ret = ath_key_config(common, vif, sta, key); 1673 ret = ath_key_config(common, vif, sta, key);
1671 if (ret >= 0) { 1674 if (ret >= 0) {
1672 key->hw_key_idx = ret; 1675 key->hw_key_idx = ret;
1673 /* push IV and Michael MIC generation to stack */ 1676 /* push IV and Michael MIC generation to stack */
1674 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1677 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1675 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) 1678 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
1676 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1679 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1677 if (sc->sc_ah->sw_mgmt_crypto && 1680 if (sc->sc_ah->sw_mgmt_crypto &&
1678 key->cipher == WLAN_CIPHER_SUITE_CCMP) 1681 key->cipher == WLAN_CIPHER_SUITE_CCMP)
1679 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; 1682 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1680 ret = 0; 1683 ret = 0;
1681 } 1684 }
1682 if (an && key->hw_key_idx) { 1685 if (an && key->hw_key_idx) {
1683 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) { 1686 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1684 if (an->key_idx[i]) 1687 if (an->key_idx[i])
1685 continue; 1688 continue;
1686 an->key_idx[i] = key->hw_key_idx; 1689 an->key_idx[i] = key->hw_key_idx;
1687 break; 1690 break;
1688 } 1691 }
1689 WARN_ON(i == ARRAY_SIZE(an->key_idx)); 1692 WARN_ON(i == ARRAY_SIZE(an->key_idx));
1690 } 1693 }
1691 break; 1694 break;
1692 case DISABLE_KEY: 1695 case DISABLE_KEY:
1693 ath_key_delete(common, key); 1696 ath_key_delete(common, key);
1694 if (an) { 1697 if (an) {
1695 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) { 1698 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1696 if (an->key_idx[i] != key->hw_key_idx) 1699 if (an->key_idx[i] != key->hw_key_idx)
1697 continue; 1700 continue;
1698 an->key_idx[i] = 0; 1701 an->key_idx[i] = 0;
1699 break; 1702 break;
1700 } 1703 }
1701 } 1704 }
1702 key->hw_key_idx = 0; 1705 key->hw_key_idx = 0;
1703 break; 1706 break;
1704 default: 1707 default:
1705 ret = -EINVAL; 1708 ret = -EINVAL;
1706 } 1709 }
1707 1710
1708 ath9k_ps_restore(sc); 1711 ath9k_ps_restore(sc);
1709 mutex_unlock(&sc->mutex); 1712 mutex_unlock(&sc->mutex);
1710 1713
1711 return ret; 1714 return ret;
1712 } 1715 }
1713 1716
1714 static void ath9k_bss_info_changed(struct ieee80211_hw *hw, 1717 static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1715 struct ieee80211_vif *vif, 1718 struct ieee80211_vif *vif,
1716 struct ieee80211_bss_conf *bss_conf, 1719 struct ieee80211_bss_conf *bss_conf,
1717 u32 changed) 1720 u32 changed)
1718 { 1721 {
1719 #define CHECK_ANI \ 1722 #define CHECK_ANI \
1720 (BSS_CHANGED_ASSOC | \ 1723 (BSS_CHANGED_ASSOC | \
1721 BSS_CHANGED_IBSS | \ 1724 BSS_CHANGED_IBSS | \
1722 BSS_CHANGED_BEACON_ENABLED) 1725 BSS_CHANGED_BEACON_ENABLED)
1723 1726
1724 struct ath_softc *sc = hw->priv; 1727 struct ath_softc *sc = hw->priv;
1725 struct ath_hw *ah = sc->sc_ah; 1728 struct ath_hw *ah = sc->sc_ah;
1726 struct ath_common *common = ath9k_hw_common(ah); 1729 struct ath_common *common = ath9k_hw_common(ah);
1727 struct ath_vif *avp = (void *)vif->drv_priv; 1730 struct ath_vif *avp = (void *)vif->drv_priv;
1728 int slottime; 1731 int slottime;
1729 1732
1730 ath9k_ps_wakeup(sc); 1733 ath9k_ps_wakeup(sc);
1731 mutex_lock(&sc->mutex); 1734 mutex_lock(&sc->mutex);
1732 1735
1733 if (changed & BSS_CHANGED_ASSOC) { 1736 if (changed & BSS_CHANGED_ASSOC) {
1734 ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n", 1737 ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n",
1735 bss_conf->bssid, bss_conf->assoc); 1738 bss_conf->bssid, bss_conf->assoc);
1736 1739
1737 ether_addr_copy(avp->bssid, bss_conf->bssid); 1740 ether_addr_copy(avp->bssid, bss_conf->bssid);
1738 avp->aid = bss_conf->aid; 1741 avp->aid = bss_conf->aid;
1739 avp->assoc = bss_conf->assoc; 1742 avp->assoc = bss_conf->assoc;
1740 1743
1741 ath9k_calculate_summary_state(sc, avp->chanctx); 1744 ath9k_calculate_summary_state(sc, avp->chanctx);
1742 1745
1743 if (ath9k_is_chanctx_enabled()) { 1746 if (ath9k_is_chanctx_enabled()) {
1744 if (bss_conf->assoc) 1747 if (bss_conf->assoc)
1745 ath_chanctx_event(sc, vif, 1748 ath_chanctx_event(sc, vif,
1746 ATH_CHANCTX_EVENT_ASSOC); 1749 ATH_CHANCTX_EVENT_ASSOC);
1747 } 1750 }
1748 } 1751 }
1749 1752
1750 if (changed & BSS_CHANGED_IBSS) { 1753 if (changed & BSS_CHANGED_IBSS) {
1751 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 1754 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1752 common->curaid = bss_conf->aid; 1755 common->curaid = bss_conf->aid;
1753 ath9k_hw_write_associd(sc->sc_ah); 1756 ath9k_hw_write_associd(sc->sc_ah);
1754 } 1757 }
1755 1758
1756 if ((changed & BSS_CHANGED_BEACON_ENABLED) || 1759 if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
1757 (changed & BSS_CHANGED_BEACON_INT) || 1760 (changed & BSS_CHANGED_BEACON_INT) ||
1758 (changed & BSS_CHANGED_BEACON_INFO)) { 1761 (changed & BSS_CHANGED_BEACON_INFO)) {
1759 ath9k_beacon_config(sc, vif, changed); 1762 ath9k_beacon_config(sc, vif, changed);
1760 if (changed & BSS_CHANGED_BEACON_ENABLED) 1763 if (changed & BSS_CHANGED_BEACON_ENABLED)
1761 ath9k_calculate_summary_state(sc, avp->chanctx); 1764 ath9k_calculate_summary_state(sc, avp->chanctx);
1762 } 1765 }
1763 1766
1764 if ((avp->chanctx == sc->cur_chan) && 1767 if ((avp->chanctx == sc->cur_chan) &&
1765 (changed & BSS_CHANGED_ERP_SLOT)) { 1768 (changed & BSS_CHANGED_ERP_SLOT)) {
1766 if (bss_conf->use_short_slot) 1769 if (bss_conf->use_short_slot)
1767 slottime = 9; 1770 slottime = 9;
1768 else 1771 else
1769 slottime = 20; 1772 slottime = 20;
1770 if (vif->type == NL80211_IFTYPE_AP) { 1773 if (vif->type == NL80211_IFTYPE_AP) {
1771 /* 1774 /*
1772 * Defer update, so that connected stations can adjust 1775 * Defer update, so that connected stations can adjust
1773 * their settings at the same time. 1776 * their settings at the same time.
1774 * See beacon.c for more details 1777 * See beacon.c for more details
1775 */ 1778 */
1776 sc->beacon.slottime = slottime; 1779 sc->beacon.slottime = slottime;
1777 sc->beacon.updateslot = UPDATE; 1780 sc->beacon.updateslot = UPDATE;
1778 } else { 1781 } else {
1779 ah->slottime = slottime; 1782 ah->slottime = slottime;
1780 ath9k_hw_init_global_settings(ah); 1783 ath9k_hw_init_global_settings(ah);
1781 } 1784 }
1782 } 1785 }
1783 1786
1784 if (changed & BSS_CHANGED_P2P_PS) 1787 if (changed & BSS_CHANGED_P2P_PS)
1785 ath9k_p2p_bss_info_changed(sc, vif); 1788 ath9k_p2p_bss_info_changed(sc, vif);
1786 1789
1787 if (changed & CHECK_ANI) 1790 if (changed & CHECK_ANI)
1788 ath_check_ani(sc); 1791 ath_check_ani(sc);
1789 1792
1790 mutex_unlock(&sc->mutex); 1793 mutex_unlock(&sc->mutex);
1791 ath9k_ps_restore(sc); 1794 ath9k_ps_restore(sc);
1792 1795
1793 #undef CHECK_ANI 1796 #undef CHECK_ANI
1794 } 1797 }
1795 1798
1796 static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1799 static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1797 { 1800 {
1798 struct ath_softc *sc = hw->priv; 1801 struct ath_softc *sc = hw->priv;
1799 u64 tsf; 1802 u64 tsf;
1800 1803
1801 mutex_lock(&sc->mutex); 1804 mutex_lock(&sc->mutex);
1802 ath9k_ps_wakeup(sc); 1805 ath9k_ps_wakeup(sc);
1803 tsf = ath9k_hw_gettsf64(sc->sc_ah); 1806 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1804 ath9k_ps_restore(sc); 1807 ath9k_ps_restore(sc);
1805 mutex_unlock(&sc->mutex); 1808 mutex_unlock(&sc->mutex);
1806 1809
1807 return tsf; 1810 return tsf;
1808 } 1811 }
1809 1812
1810 static void ath9k_set_tsf(struct ieee80211_hw *hw, 1813 static void ath9k_set_tsf(struct ieee80211_hw *hw,
1811 struct ieee80211_vif *vif, 1814 struct ieee80211_vif *vif,
1812 u64 tsf) 1815 u64 tsf)
1813 { 1816 {
1814 struct ath_softc *sc = hw->priv; 1817 struct ath_softc *sc = hw->priv;
1815 1818
1816 mutex_lock(&sc->mutex); 1819 mutex_lock(&sc->mutex);
1817 ath9k_ps_wakeup(sc); 1820 ath9k_ps_wakeup(sc);
1818 ath9k_hw_settsf64(sc->sc_ah, tsf); 1821 ath9k_hw_settsf64(sc->sc_ah, tsf);
1819 ath9k_ps_restore(sc); 1822 ath9k_ps_restore(sc);
1820 mutex_unlock(&sc->mutex); 1823 mutex_unlock(&sc->mutex);
1821 } 1824 }
1822 1825
1823 static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1826 static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1824 { 1827 {
1825 struct ath_softc *sc = hw->priv; 1828 struct ath_softc *sc = hw->priv;
1826 1829
1827 mutex_lock(&sc->mutex); 1830 mutex_lock(&sc->mutex);
1828 1831
1829 ath9k_ps_wakeup(sc); 1832 ath9k_ps_wakeup(sc);
1830 ath9k_hw_reset_tsf(sc->sc_ah); 1833 ath9k_hw_reset_tsf(sc->sc_ah);
1831 ath9k_ps_restore(sc); 1834 ath9k_ps_restore(sc);
1832 1835
1833 mutex_unlock(&sc->mutex); 1836 mutex_unlock(&sc->mutex);
1834 } 1837 }
1835 1838
1836 static int ath9k_ampdu_action(struct ieee80211_hw *hw, 1839 static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1837 struct ieee80211_vif *vif, 1840 struct ieee80211_vif *vif,
1838 enum ieee80211_ampdu_mlme_action action, 1841 enum ieee80211_ampdu_mlme_action action,
1839 struct ieee80211_sta *sta, 1842 struct ieee80211_sta *sta,
1840 u16 tid, u16 *ssn, u8 buf_size) 1843 u16 tid, u16 *ssn, u8 buf_size)
1841 { 1844 {
1842 struct ath_softc *sc = hw->priv; 1845 struct ath_softc *sc = hw->priv;
1843 bool flush = false; 1846 bool flush = false;
1844 int ret = 0; 1847 int ret = 0;
1845 1848
1846 mutex_lock(&sc->mutex); 1849 mutex_lock(&sc->mutex);
1847 1850
1848 switch (action) { 1851 switch (action) {
1849 case IEEE80211_AMPDU_RX_START: 1852 case IEEE80211_AMPDU_RX_START:
1850 break; 1853 break;
1851 case IEEE80211_AMPDU_RX_STOP: 1854 case IEEE80211_AMPDU_RX_STOP:
1852 break; 1855 break;
1853 case IEEE80211_AMPDU_TX_START: 1856 case IEEE80211_AMPDU_TX_START:
1854 ath9k_ps_wakeup(sc); 1857 ath9k_ps_wakeup(sc);
1855 ret = ath_tx_aggr_start(sc, sta, tid, ssn); 1858 ret = ath_tx_aggr_start(sc, sta, tid, ssn);
1856 if (!ret) 1859 if (!ret)
1857 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1860 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1858 ath9k_ps_restore(sc); 1861 ath9k_ps_restore(sc);
1859 break; 1862 break;
1860 case IEEE80211_AMPDU_TX_STOP_FLUSH: 1863 case IEEE80211_AMPDU_TX_STOP_FLUSH:
1861 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 1864 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1862 flush = true; 1865 flush = true;
1863 case IEEE80211_AMPDU_TX_STOP_CONT: 1866 case IEEE80211_AMPDU_TX_STOP_CONT:
1864 ath9k_ps_wakeup(sc); 1867 ath9k_ps_wakeup(sc);
1865 ath_tx_aggr_stop(sc, sta, tid); 1868 ath_tx_aggr_stop(sc, sta, tid);
1866 if (!flush) 1869 if (!flush)
1867 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1870 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1868 ath9k_ps_restore(sc); 1871 ath9k_ps_restore(sc);
1869 break; 1872 break;
1870 case IEEE80211_AMPDU_TX_OPERATIONAL: 1873 case IEEE80211_AMPDU_TX_OPERATIONAL:
1871 ath9k_ps_wakeup(sc); 1874 ath9k_ps_wakeup(sc);
1872 ath_tx_aggr_resume(sc, sta, tid); 1875 ath_tx_aggr_resume(sc, sta, tid);
1873 ath9k_ps_restore(sc); 1876 ath9k_ps_restore(sc);
1874 break; 1877 break;
1875 default: 1878 default:
1876 ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n"); 1879 ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
1877 } 1880 }
1878 1881
1879 mutex_unlock(&sc->mutex); 1882 mutex_unlock(&sc->mutex);
1880 1883
1881 return ret; 1884 return ret;
1882 } 1885 }
1883 1886
1884 static int ath9k_get_survey(struct ieee80211_hw *hw, int idx, 1887 static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
1885 struct survey_info *survey) 1888 struct survey_info *survey)
1886 { 1889 {
1887 struct ath_softc *sc = hw->priv; 1890 struct ath_softc *sc = hw->priv;
1888 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1891 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1889 struct ieee80211_supported_band *sband; 1892 struct ieee80211_supported_band *sband;
1890 struct ieee80211_channel *chan; 1893 struct ieee80211_channel *chan;
1891 int pos; 1894 int pos;
1892 1895
1893 if (config_enabled(CONFIG_ATH9K_TX99)) 1896 if (config_enabled(CONFIG_ATH9K_TX99))
1894 return -EOPNOTSUPP; 1897 return -EOPNOTSUPP;
1895 1898
1896 spin_lock_bh(&common->cc_lock); 1899 spin_lock_bh(&common->cc_lock);
1897 if (idx == 0) 1900 if (idx == 0)
1898 ath_update_survey_stats(sc); 1901 ath_update_survey_stats(sc);
1899 1902
1900 sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ]; 1903 sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
1901 if (sband && idx >= sband->n_channels) { 1904 if (sband && idx >= sband->n_channels) {
1902 idx -= sband->n_channels; 1905 idx -= sband->n_channels;
1903 sband = NULL; 1906 sband = NULL;
1904 } 1907 }
1905 1908
1906 if (!sband) 1909 if (!sband)
1907 sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ]; 1910 sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
1908 1911
1909 if (!sband || idx >= sband->n_channels) { 1912 if (!sband || idx >= sband->n_channels) {
1910 spin_unlock_bh(&common->cc_lock); 1913 spin_unlock_bh(&common->cc_lock);
1911 return -ENOENT; 1914 return -ENOENT;
1912 } 1915 }
1913 1916
1914 chan = &sband->channels[idx]; 1917 chan = &sband->channels[idx];
1915 pos = chan->hw_value; 1918 pos = chan->hw_value;
1916 memcpy(survey, &sc->survey[pos], sizeof(*survey)); 1919 memcpy(survey, &sc->survey[pos], sizeof(*survey));
1917 survey->channel = chan; 1920 survey->channel = chan;
1918 spin_unlock_bh(&common->cc_lock); 1921 spin_unlock_bh(&common->cc_lock);
1919 1922
1920 return 0; 1923 return 0;
1921 } 1924 }
1922 1925
1923 static void ath9k_enable_dynack(struct ath_softc *sc) 1926 static void ath9k_enable_dynack(struct ath_softc *sc)
1924 { 1927 {
1925 #ifdef CONFIG_ATH9K_DYNACK 1928 #ifdef CONFIG_ATH9K_DYNACK
1926 u32 rfilt; 1929 u32 rfilt;
1927 struct ath_hw *ah = sc->sc_ah; 1930 struct ath_hw *ah = sc->sc_ah;
1928 1931
1929 ath_dynack_reset(ah); 1932 ath_dynack_reset(ah);
1930 1933
1931 ah->dynack.enabled = true; 1934 ah->dynack.enabled = true;
1932 rfilt = ath_calcrxfilter(sc); 1935 rfilt = ath_calcrxfilter(sc);
1933 ath9k_hw_setrxfilter(ah, rfilt); 1936 ath9k_hw_setrxfilter(ah, rfilt);
1934 #endif 1937 #endif
1935 } 1938 }
1936 1939
1937 static void ath9k_set_coverage_class(struct ieee80211_hw *hw, 1940 static void ath9k_set_coverage_class(struct ieee80211_hw *hw,
1938 s16 coverage_class) 1941 s16 coverage_class)
1939 { 1942 {
1940 struct ath_softc *sc = hw->priv; 1943 struct ath_softc *sc = hw->priv;
1941 struct ath_hw *ah = sc->sc_ah; 1944 struct ath_hw *ah = sc->sc_ah;
1942 1945
1943 if (config_enabled(CONFIG_ATH9K_TX99)) 1946 if (config_enabled(CONFIG_ATH9K_TX99))
1944 return; 1947 return;
1945 1948
1946 mutex_lock(&sc->mutex); 1949 mutex_lock(&sc->mutex);
1947 1950
1948 if (coverage_class >= 0) { 1951 if (coverage_class >= 0) {
1949 ah->coverage_class = coverage_class; 1952 ah->coverage_class = coverage_class;
1950 if (ah->dynack.enabled) { 1953 if (ah->dynack.enabled) {
1951 u32 rfilt; 1954 u32 rfilt;
1952 1955
1953 ah->dynack.enabled = false; 1956 ah->dynack.enabled = false;
1954 rfilt = ath_calcrxfilter(sc); 1957 rfilt = ath_calcrxfilter(sc);
1955 ath9k_hw_setrxfilter(ah, rfilt); 1958 ath9k_hw_setrxfilter(ah, rfilt);
1956 } 1959 }
1957 ath9k_ps_wakeup(sc); 1960 ath9k_ps_wakeup(sc);
1958 ath9k_hw_init_global_settings(ah); 1961 ath9k_hw_init_global_settings(ah);
1959 ath9k_ps_restore(sc); 1962 ath9k_ps_restore(sc);
1960 } else if (!ah->dynack.enabled) { 1963 } else if (!ah->dynack.enabled) {
1961 ath9k_enable_dynack(sc); 1964 ath9k_enable_dynack(sc);
1962 } 1965 }
1963 1966
1964 mutex_unlock(&sc->mutex); 1967 mutex_unlock(&sc->mutex);
1965 } 1968 }
1966 1969
1967 static bool ath9k_has_tx_pending(struct ath_softc *sc) 1970 static bool ath9k_has_tx_pending(struct ath_softc *sc)
1968 { 1971 {
1969 int i, npend = 0; 1972 int i, npend = 0;
1970 1973
1971 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1974 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1972 if (!ATH_TXQ_SETUP(sc, i)) 1975 if (!ATH_TXQ_SETUP(sc, i))
1973 continue; 1976 continue;
1974 1977
1975 npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]); 1978 npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
1976 if (npend) 1979 if (npend)
1977 break; 1980 break;
1978 } 1981 }
1979 1982
1980 return !!npend; 1983 return !!npend;
1981 } 1984 }
1982 1985
1983 static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1986 static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1984 u32 queues, bool drop) 1987 u32 queues, bool drop)
1985 { 1988 {
1986 struct ath_softc *sc = hw->priv; 1989 struct ath_softc *sc = hw->priv;
1987 1990
1988 mutex_lock(&sc->mutex); 1991 mutex_lock(&sc->mutex);
1989 __ath9k_flush(hw, queues, drop); 1992 __ath9k_flush(hw, queues, drop);
1990 mutex_unlock(&sc->mutex); 1993 mutex_unlock(&sc->mutex);
1991 } 1994 }
1992 1995
1993 void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 1996 void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1994 { 1997 {
1995 struct ath_softc *sc = hw->priv; 1998 struct ath_softc *sc = hw->priv;
1996 struct ath_hw *ah = sc->sc_ah; 1999 struct ath_hw *ah = sc->sc_ah;
1997 struct ath_common *common = ath9k_hw_common(ah); 2000 struct ath_common *common = ath9k_hw_common(ah);
1998 int timeout = HZ / 5; /* 200 ms */ 2001 int timeout = HZ / 5; /* 200 ms */
1999 bool drain_txq; 2002 bool drain_txq;
2000 2003
2001 cancel_delayed_work_sync(&sc->tx_complete_work); 2004 cancel_delayed_work_sync(&sc->tx_complete_work);
2002 2005
2003 if (ah->ah_flags & AH_UNPLUGGED) { 2006 if (ah->ah_flags & AH_UNPLUGGED) {
2004 ath_dbg(common, ANY, "Device has been unplugged!\n"); 2007 ath_dbg(common, ANY, "Device has been unplugged!\n");
2005 return; 2008 return;
2006 } 2009 }
2007 2010
2008 if (test_bit(ATH_OP_INVALID, &common->op_flags)) { 2011 if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
2009 ath_dbg(common, ANY, "Device not present\n"); 2012 ath_dbg(common, ANY, "Device not present\n");
2010 return; 2013 return;
2011 } 2014 }
2012 2015
2013 if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc), 2016 if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc),
2014 timeout) > 0) 2017 timeout) > 0)
2015 drop = false; 2018 drop = false;
2016 2019
2017 if (drop) { 2020 if (drop) {
2018 ath9k_ps_wakeup(sc); 2021 ath9k_ps_wakeup(sc);
2019 spin_lock_bh(&sc->sc_pcu_lock); 2022 spin_lock_bh(&sc->sc_pcu_lock);
2020 drain_txq = ath_drain_all_txq(sc); 2023 drain_txq = ath_drain_all_txq(sc);
2021 spin_unlock_bh(&sc->sc_pcu_lock); 2024 spin_unlock_bh(&sc->sc_pcu_lock);
2022 2025
2023 if (!drain_txq) 2026 if (!drain_txq)
2024 ath_reset(sc); 2027 ath_reset(sc);
2025 2028
2026 ath9k_ps_restore(sc); 2029 ath9k_ps_restore(sc);
2027 } 2030 }
2028 2031
2029 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); 2032 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
2030 } 2033 }
2031 2034
2032 static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw) 2035 static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
2033 { 2036 {
2034 struct ath_softc *sc = hw->priv; 2037 struct ath_softc *sc = hw->priv;
2035 2038
2036 return ath9k_has_tx_pending(sc); 2039 return ath9k_has_tx_pending(sc);
2037 } 2040 }
2038 2041
2039 static int ath9k_tx_last_beacon(struct ieee80211_hw *hw) 2042 static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
2040 { 2043 {
2041 struct ath_softc *sc = hw->priv; 2044 struct ath_softc *sc = hw->priv;
2042 struct ath_hw *ah = sc->sc_ah; 2045 struct ath_hw *ah = sc->sc_ah;
2043 struct ieee80211_vif *vif; 2046 struct ieee80211_vif *vif;
2044 struct ath_vif *avp; 2047 struct ath_vif *avp;
2045 struct ath_buf *bf; 2048 struct ath_buf *bf;
2046 struct ath_tx_status ts; 2049 struct ath_tx_status ts;
2047 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 2050 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
2048 int status; 2051 int status;
2049 2052
2050 vif = sc->beacon.bslot[0]; 2053 vif = sc->beacon.bslot[0];
2051 if (!vif) 2054 if (!vif)
2052 return 0; 2055 return 0;
2053 2056
2054 if (!vif->bss_conf.enable_beacon) 2057 if (!vif->bss_conf.enable_beacon)
2055 return 0; 2058 return 0;
2056 2059
2057 avp = (void *)vif->drv_priv; 2060 avp = (void *)vif->drv_priv;
2058 2061
2059 if (!sc->beacon.tx_processed && !edma) { 2062 if (!sc->beacon.tx_processed && !edma) {
2060 tasklet_disable(&sc->bcon_tasklet); 2063 tasklet_disable(&sc->bcon_tasklet);
2061 2064
2062 bf = avp->av_bcbuf; 2065 bf = avp->av_bcbuf;
2063 if (!bf || !bf->bf_mpdu) 2066 if (!bf || !bf->bf_mpdu)
2064 goto skip; 2067 goto skip;
2065 2068
2066 status = ath9k_hw_txprocdesc(ah, bf->bf_desc, &ts); 2069 status = ath9k_hw_txprocdesc(ah, bf->bf_desc, &ts);
2067 if (status == -EINPROGRESS) 2070 if (status == -EINPROGRESS)
2068 goto skip; 2071 goto skip;
2069 2072
2070 sc->beacon.tx_processed = true; 2073 sc->beacon.tx_processed = true;
2071 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); 2074 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2072 2075
2073 skip: 2076 skip:
2074 tasklet_enable(&sc->bcon_tasklet); 2077 tasklet_enable(&sc->bcon_tasklet);
2075 } 2078 }
2076 2079
2077 return sc->beacon.tx_last; 2080 return sc->beacon.tx_last;
2078 } 2081 }
2079 2082
2080 static int ath9k_get_stats(struct ieee80211_hw *hw, 2083 static int ath9k_get_stats(struct ieee80211_hw *hw,
2081 struct ieee80211_low_level_stats *stats) 2084 struct ieee80211_low_level_stats *stats)
2082 { 2085 {
2083 struct ath_softc *sc = hw->priv; 2086 struct ath_softc *sc = hw->priv;
2084 struct ath_hw *ah = sc->sc_ah; 2087 struct ath_hw *ah = sc->sc_ah;
2085 struct ath9k_mib_stats *mib_stats = &ah->ah_mibStats; 2088 struct ath9k_mib_stats *mib_stats = &ah->ah_mibStats;
2086 2089
2087 stats->dot11ACKFailureCount = mib_stats->ackrcv_bad; 2090 stats->dot11ACKFailureCount = mib_stats->ackrcv_bad;
2088 stats->dot11RTSFailureCount = mib_stats->rts_bad; 2091 stats->dot11RTSFailureCount = mib_stats->rts_bad;
2089 stats->dot11FCSErrorCount = mib_stats->fcs_bad; 2092 stats->dot11FCSErrorCount = mib_stats->fcs_bad;
2090 stats->dot11RTSSuccessCount = mib_stats->rts_good; 2093 stats->dot11RTSSuccessCount = mib_stats->rts_good;
2091 return 0; 2094 return 0;
2092 } 2095 }
2093 2096
2094 static u32 fill_chainmask(u32 cap, u32 new) 2097 static u32 fill_chainmask(u32 cap, u32 new)
2095 { 2098 {
2096 u32 filled = 0; 2099 u32 filled = 0;
2097 int i; 2100 int i;
2098 2101
2099 for (i = 0; cap && new; i++, cap >>= 1) { 2102 for (i = 0; cap && new; i++, cap >>= 1) {
2100 if (!(cap & BIT(0))) 2103 if (!(cap & BIT(0)))
2101 continue; 2104 continue;
2102 2105
2103 if (new & BIT(0)) 2106 if (new & BIT(0))
2104 filled |= BIT(i); 2107 filled |= BIT(i);
2105 2108
2106 new >>= 1; 2109 new >>= 1;
2107 } 2110 }
2108 2111
2109 return filled; 2112 return filled;
2110 } 2113 }
2111 2114
2112 static bool validate_antenna_mask(struct ath_hw *ah, u32 val) 2115 static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
2113 { 2116 {
2114 if (AR_SREV_9300_20_OR_LATER(ah)) 2117 if (AR_SREV_9300_20_OR_LATER(ah))
2115 return true; 2118 return true;
2116 2119
2117 switch (val & 0x7) { 2120 switch (val & 0x7) {
2118 case 0x1: 2121 case 0x1:
2119 case 0x3: 2122 case 0x3:
2120 case 0x7: 2123 case 0x7:
2121 return true; 2124 return true;
2122 case 0x2: 2125 case 0x2:
2123 return (ah->caps.rx_chainmask == 1); 2126 return (ah->caps.rx_chainmask == 1);
2124 default: 2127 default:
2125 return false; 2128 return false;
2126 } 2129 }
2127 } 2130 }
2128 2131
2129 static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 2132 static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
2130 { 2133 {
2131 struct ath_softc *sc = hw->priv; 2134 struct ath_softc *sc = hw->priv;
2132 struct ath_hw *ah = sc->sc_ah; 2135 struct ath_hw *ah = sc->sc_ah;
2133 2136
2134 if (ah->caps.rx_chainmask != 1) 2137 if (ah->caps.rx_chainmask != 1)
2135 rx_ant |= tx_ant; 2138 rx_ant |= tx_ant;
2136 2139
2137 if (!validate_antenna_mask(ah, rx_ant) || !tx_ant) 2140 if (!validate_antenna_mask(ah, rx_ant) || !tx_ant)
2138 return -EINVAL; 2141 return -EINVAL;
2139 2142
2140 sc->ant_rx = rx_ant; 2143 sc->ant_rx = rx_ant;
2141 sc->ant_tx = tx_ant; 2144 sc->ant_tx = tx_ant;
2142 2145
2143 if (ah->caps.rx_chainmask == 1) 2146 if (ah->caps.rx_chainmask == 1)
2144 return 0; 2147 return 0;
2145 2148
2146 /* AR9100 runs into calibration issues if not all rx chains are enabled */ 2149 /* AR9100 runs into calibration issues if not all rx chains are enabled */
2147 if (AR_SREV_9100(ah)) 2150 if (AR_SREV_9100(ah))
2148 ah->rxchainmask = 0x7; 2151 ah->rxchainmask = 0x7;
2149 else 2152 else
2150 ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant); 2153 ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant);
2151 2154
2152 ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant); 2155 ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant);
2153 ath9k_cmn_reload_chainmask(ah); 2156 ath9k_cmn_reload_chainmask(ah);
2154 2157
2155 return 0; 2158 return 0;
2156 } 2159 }
2157 2160
2158 static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 2161 static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
2159 { 2162 {
2160 struct ath_softc *sc = hw->priv; 2163 struct ath_softc *sc = hw->priv;
2161 2164
2162 *tx_ant = sc->ant_tx; 2165 *tx_ant = sc->ant_tx;
2163 *rx_ant = sc->ant_rx; 2166 *rx_ant = sc->ant_rx;
2164 return 0; 2167 return 0;
2165 } 2168 }
2166 2169
2167 static void ath9k_sw_scan_start(struct ieee80211_hw *hw) 2170 static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
2168 { 2171 {
2169 struct ath_softc *sc = hw->priv; 2172 struct ath_softc *sc = hw->priv;
2170 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2173 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2171 set_bit(ATH_OP_SCANNING, &common->op_flags); 2174 set_bit(ATH_OP_SCANNING, &common->op_flags);
2172 } 2175 }
2173 2176
2174 static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) 2177 static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2175 { 2178 {
2176 struct ath_softc *sc = hw->priv; 2179 struct ath_softc *sc = hw->priv;
2177 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2180 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2178 clear_bit(ATH_OP_SCANNING, &common->op_flags); 2181 clear_bit(ATH_OP_SCANNING, &common->op_flags);
2179 } 2182 }
2180 2183
2181 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 2184 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
2182 2185
2183 static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2186 static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2184 struct ieee80211_scan_request *hw_req) 2187 struct ieee80211_scan_request *hw_req)
2185 { 2188 {
2186 struct cfg80211_scan_request *req = &hw_req->req; 2189 struct cfg80211_scan_request *req = &hw_req->req;
2187 struct ath_softc *sc = hw->priv; 2190 struct ath_softc *sc = hw->priv;
2188 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2191 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2189 int ret = 0; 2192 int ret = 0;
2190 2193
2191 mutex_lock(&sc->mutex); 2194 mutex_lock(&sc->mutex);
2192 2195
2193 if (WARN_ON(sc->offchannel.scan_req)) { 2196 if (WARN_ON(sc->offchannel.scan_req)) {
2194 ret = -EBUSY; 2197 ret = -EBUSY;
2195 goto out; 2198 goto out;
2196 } 2199 }
2197 2200
2198 ath9k_ps_wakeup(sc); 2201 ath9k_ps_wakeup(sc);
2199 set_bit(ATH_OP_SCANNING, &common->op_flags); 2202 set_bit(ATH_OP_SCANNING, &common->op_flags);
2200 sc->offchannel.scan_vif = vif; 2203 sc->offchannel.scan_vif = vif;
2201 sc->offchannel.scan_req = req; 2204 sc->offchannel.scan_req = req;
2202 sc->offchannel.scan_idx = 0; 2205 sc->offchannel.scan_idx = 0;
2203 2206
2204 ath_dbg(common, CHAN_CTX, "HW scan request received on vif: %pM\n", 2207 ath_dbg(common, CHAN_CTX, "HW scan request received on vif: %pM\n",
2205 vif->addr); 2208 vif->addr);
2206 2209
2207 if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) { 2210 if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) {
2208 ath_dbg(common, CHAN_CTX, "Starting HW scan\n"); 2211 ath_dbg(common, CHAN_CTX, "Starting HW scan\n");
2209 ath_offchannel_next(sc); 2212 ath_offchannel_next(sc);
2210 } 2213 }
2211 2214
2212 out: 2215 out:
2213 mutex_unlock(&sc->mutex); 2216 mutex_unlock(&sc->mutex);
2214 2217
2215 return ret; 2218 return ret;
2216 } 2219 }
2217 2220
2218 static void ath9k_cancel_hw_scan(struct ieee80211_hw *hw, 2221 static void ath9k_cancel_hw_scan(struct ieee80211_hw *hw,
2219 struct ieee80211_vif *vif) 2222 struct ieee80211_vif *vif)
2220 { 2223 {
2221 struct ath_softc *sc = hw->priv; 2224 struct ath_softc *sc = hw->priv;
2222 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2225 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2223 2226
2224 ath_dbg(common, CHAN_CTX, "Cancel HW scan on vif: %pM\n", vif->addr); 2227 ath_dbg(common, CHAN_CTX, "Cancel HW scan on vif: %pM\n", vif->addr);
2225 2228
2226 mutex_lock(&sc->mutex); 2229 mutex_lock(&sc->mutex);
2227 del_timer_sync(&sc->offchannel.timer); 2230 del_timer_sync(&sc->offchannel.timer);
2228 ath_scan_complete(sc, true); 2231 ath_scan_complete(sc, true);
2229 mutex_unlock(&sc->mutex); 2232 mutex_unlock(&sc->mutex);
2230 } 2233 }
2231 2234
2232 static int ath9k_remain_on_channel(struct ieee80211_hw *hw, 2235 static int ath9k_remain_on_channel(struct ieee80211_hw *hw,
2233 struct ieee80211_vif *vif, 2236 struct ieee80211_vif *vif,
2234 struct ieee80211_channel *chan, int duration, 2237 struct ieee80211_channel *chan, int duration,
2235 enum ieee80211_roc_type type) 2238 enum ieee80211_roc_type type)
2236 { 2239 {
2237 struct ath_softc *sc = hw->priv; 2240 struct ath_softc *sc = hw->priv;
2238 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2241 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2239 int ret = 0; 2242 int ret = 0;
2240 2243
2241 mutex_lock(&sc->mutex); 2244 mutex_lock(&sc->mutex);
2242 2245
2243 if (WARN_ON(sc->offchannel.roc_vif)) { 2246 if (WARN_ON(sc->offchannel.roc_vif)) {
2244 ret = -EBUSY; 2247 ret = -EBUSY;
2245 goto out; 2248 goto out;
2246 } 2249 }
2247 2250
2248 ath9k_ps_wakeup(sc); 2251 ath9k_ps_wakeup(sc);
2249 sc->offchannel.roc_vif = vif; 2252 sc->offchannel.roc_vif = vif;
2250 sc->offchannel.roc_chan = chan; 2253 sc->offchannel.roc_chan = chan;
2251 sc->offchannel.roc_duration = duration; 2254 sc->offchannel.roc_duration = duration;
2252 2255
2253 ath_dbg(common, CHAN_CTX, 2256 ath_dbg(common, CHAN_CTX,
2254 "RoC request on vif: %pM, type: %d duration: %d\n", 2257 "RoC request on vif: %pM, type: %d duration: %d\n",
2255 vif->addr, type, duration); 2258 vif->addr, type, duration);
2256 2259
2257 if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) { 2260 if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) {
2258 ath_dbg(common, CHAN_CTX, "Starting RoC period\n"); 2261 ath_dbg(common, CHAN_CTX, "Starting RoC period\n");
2259 ath_offchannel_next(sc); 2262 ath_offchannel_next(sc);
2260 } 2263 }
2261 2264
2262 out: 2265 out:
2263 mutex_unlock(&sc->mutex); 2266 mutex_unlock(&sc->mutex);
2264 2267
2265 return ret; 2268 return ret;
2266 } 2269 }
2267 2270
2268 static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw) 2271 static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw)
2269 { 2272 {
2270 struct ath_softc *sc = hw->priv; 2273 struct ath_softc *sc = hw->priv;
2271 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2274 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2272 2275
2273 mutex_lock(&sc->mutex); 2276 mutex_lock(&sc->mutex);
2274 2277
2275 ath_dbg(common, CHAN_CTX, "Cancel RoC\n"); 2278 ath_dbg(common, CHAN_CTX, "Cancel RoC\n");
2276 del_timer_sync(&sc->offchannel.timer); 2279 del_timer_sync(&sc->offchannel.timer);
2277 2280
2278 if (sc->offchannel.roc_vif) { 2281 if (sc->offchannel.roc_vif) {
2279 if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START) 2282 if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
2280 ath_roc_complete(sc, true); 2283 ath_roc_complete(sc, true);
2281 } 2284 }
2282 2285
2283 mutex_unlock(&sc->mutex); 2286 mutex_unlock(&sc->mutex);
2284 2287
2285 return 0; 2288 return 0;
2286 } 2289 }
2287 2290
2288 static int ath9k_add_chanctx(struct ieee80211_hw *hw, 2291 static int ath9k_add_chanctx(struct ieee80211_hw *hw,
2289 struct ieee80211_chanctx_conf *conf) 2292 struct ieee80211_chanctx_conf *conf)
2290 { 2293 {
2291 struct ath_softc *sc = hw->priv; 2294 struct ath_softc *sc = hw->priv;
2292 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2295 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2293 struct ath_chanctx *ctx, **ptr; 2296 struct ath_chanctx *ctx, **ptr;
2294 int pos; 2297 int pos;
2295 2298
2296 mutex_lock(&sc->mutex); 2299 mutex_lock(&sc->mutex);
2297 2300
2298 ath_for_each_chanctx(sc, ctx) { 2301 ath_for_each_chanctx(sc, ctx) {
2299 if (ctx->assigned) 2302 if (ctx->assigned)
2300 continue; 2303 continue;
2301 2304
2302 ptr = (void *) conf->drv_priv; 2305 ptr = (void *) conf->drv_priv;
2303 *ptr = ctx; 2306 *ptr = ctx;
2304 ctx->assigned = true; 2307 ctx->assigned = true;
2305 pos = ctx - &sc->chanctx[0]; 2308 pos = ctx - &sc->chanctx[0];
2306 ctx->hw_queue_base = pos * IEEE80211_NUM_ACS; 2309 ctx->hw_queue_base = pos * IEEE80211_NUM_ACS;
2307 2310
2308 ath_dbg(common, CHAN_CTX, 2311 ath_dbg(common, CHAN_CTX,
2309 "Add channel context: %d MHz\n", 2312 "Add channel context: %d MHz\n",
2310 conf->def.chan->center_freq); 2313 conf->def.chan->center_freq);
2311 2314
2312 ath_chanctx_set_channel(sc, ctx, &conf->def); 2315 ath_chanctx_set_channel(sc, ctx, &conf->def);
2313 ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_ASSIGN); 2316 ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_ASSIGN);
2314 2317
2315 mutex_unlock(&sc->mutex); 2318 mutex_unlock(&sc->mutex);
2316 return 0; 2319 return 0;
2317 } 2320 }
2318 2321
2319 mutex_unlock(&sc->mutex); 2322 mutex_unlock(&sc->mutex);
2320 return -ENOSPC; 2323 return -ENOSPC;
2321 } 2324 }
2322 2325
2323 2326
2324 static void ath9k_remove_chanctx(struct ieee80211_hw *hw, 2327 static void ath9k_remove_chanctx(struct ieee80211_hw *hw,
2325 struct ieee80211_chanctx_conf *conf) 2328 struct ieee80211_chanctx_conf *conf)
2326 { 2329 {
2327 struct ath_softc *sc = hw->priv; 2330 struct ath_softc *sc = hw->priv;
2328 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2331 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2329 struct ath_chanctx *ctx = ath_chanctx_get(conf); 2332 struct ath_chanctx *ctx = ath_chanctx_get(conf);
2330 2333
2331 mutex_lock(&sc->mutex); 2334 mutex_lock(&sc->mutex);
2332 2335
2333 ath_dbg(common, CHAN_CTX, 2336 ath_dbg(common, CHAN_CTX,
2334 "Remove channel context: %d MHz\n", 2337 "Remove channel context: %d MHz\n",
2335 conf->def.chan->center_freq); 2338 conf->def.chan->center_freq);
2336 2339
2337 ctx->assigned = false; 2340 ctx->assigned = false;
2338 ctx->hw_queue_base = 0; 2341 ctx->hw_queue_base = 0;
2339 ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN); 2342 ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN);
2340 2343
2341 mutex_unlock(&sc->mutex); 2344 mutex_unlock(&sc->mutex);
2342 } 2345 }
2343 2346
2344 static void ath9k_change_chanctx(struct ieee80211_hw *hw, 2347 static void ath9k_change_chanctx(struct ieee80211_hw *hw,
2345 struct ieee80211_chanctx_conf *conf, 2348 struct ieee80211_chanctx_conf *conf,
2346 u32 changed) 2349 u32 changed)
2347 { 2350 {
2348 struct ath_softc *sc = hw->priv; 2351 struct ath_softc *sc = hw->priv;
2349 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2352 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2350 struct ath_chanctx *ctx = ath_chanctx_get(conf); 2353 struct ath_chanctx *ctx = ath_chanctx_get(conf);
2351 2354
2352 mutex_lock(&sc->mutex); 2355 mutex_lock(&sc->mutex);
2353 ath_dbg(common, CHAN_CTX, 2356 ath_dbg(common, CHAN_CTX,
2354 "Change channel context: %d MHz\n", 2357 "Change channel context: %d MHz\n",
2355 conf->def.chan->center_freq); 2358 conf->def.chan->center_freq);
2356 ath_chanctx_set_channel(sc, ctx, &conf->def); 2359 ath_chanctx_set_channel(sc, ctx, &conf->def);
2357 mutex_unlock(&sc->mutex); 2360 mutex_unlock(&sc->mutex);
2358 } 2361 }
2359 2362
2360 static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw, 2363 static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw,
2361 struct ieee80211_vif *vif, 2364 struct ieee80211_vif *vif,
2362 struct ieee80211_chanctx_conf *conf) 2365 struct ieee80211_chanctx_conf *conf)
2363 { 2366 {
2364 struct ath_softc *sc = hw->priv; 2367 struct ath_softc *sc = hw->priv;
2365 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2368 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2366 struct ath_vif *avp = (void *)vif->drv_priv; 2369 struct ath_vif *avp = (void *)vif->drv_priv;
2367 struct ath_chanctx *ctx = ath_chanctx_get(conf); 2370 struct ath_chanctx *ctx = ath_chanctx_get(conf);
2368 int i; 2371 int i;
2369 2372
2370 mutex_lock(&sc->mutex); 2373 mutex_lock(&sc->mutex);
2371 2374
2372 ath_dbg(common, CHAN_CTX, 2375 ath_dbg(common, CHAN_CTX,
2373 "Assign VIF (addr: %pM, type: %d, p2p: %d) to channel context: %d MHz\n", 2376 "Assign VIF (addr: %pM, type: %d, p2p: %d) to channel context: %d MHz\n",
2374 vif->addr, vif->type, vif->p2p, 2377 vif->addr, vif->type, vif->p2p,
2375 conf->def.chan->center_freq); 2378 conf->def.chan->center_freq);
2376 2379
2377 avp->chanctx = ctx; 2380 avp->chanctx = ctx;
2378 ctx->nvifs_assigned++; 2381 ctx->nvifs_assigned++;
2379 list_add_tail(&avp->list, &ctx->vifs); 2382 list_add_tail(&avp->list, &ctx->vifs);
2380 ath9k_calculate_summary_state(sc, ctx); 2383 ath9k_calculate_summary_state(sc, ctx);
2381 for (i = 0; i < IEEE80211_NUM_ACS; i++) 2384 for (i = 0; i < IEEE80211_NUM_ACS; i++)
2382 vif->hw_queue[i] = ctx->hw_queue_base + i; 2385 vif->hw_queue[i] = ctx->hw_queue_base + i;
2383 2386
2384 mutex_unlock(&sc->mutex); 2387 mutex_unlock(&sc->mutex);
2385 2388
2386 return 0; 2389 return 0;
2387 } 2390 }
2388 2391
2389 static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw, 2392 static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw,
2390 struct ieee80211_vif *vif, 2393 struct ieee80211_vif *vif,
2391 struct ieee80211_chanctx_conf *conf) 2394 struct ieee80211_chanctx_conf *conf)
2392 { 2395 {
2393 struct ath_softc *sc = hw->priv; 2396 struct ath_softc *sc = hw->priv;
2394 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2397 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2395 struct ath_vif *avp = (void *)vif->drv_priv; 2398 struct ath_vif *avp = (void *)vif->drv_priv;
2396 struct ath_chanctx *ctx = ath_chanctx_get(conf); 2399 struct ath_chanctx *ctx = ath_chanctx_get(conf);
2397 int ac; 2400 int ac;
2398 2401
2399 mutex_lock(&sc->mutex); 2402 mutex_lock(&sc->mutex);
2400 2403
2401 ath_dbg(common, CHAN_CTX, 2404 ath_dbg(common, CHAN_CTX,
2402 "Remove VIF (addr: %pM, type: %d, p2p: %d) from channel context: %d MHz\n", 2405 "Remove VIF (addr: %pM, type: %d, p2p: %d) from channel context: %d MHz\n",
2403 vif->addr, vif->type, vif->p2p, 2406 vif->addr, vif->type, vif->p2p,
2404 conf->def.chan->center_freq); 2407 conf->def.chan->center_freq);
2405 2408
2406 avp->chanctx = NULL; 2409 avp->chanctx = NULL;
2407 ctx->nvifs_assigned--; 2410 ctx->nvifs_assigned--;
2408 list_del(&avp->list); 2411 list_del(&avp->list);
2409 ath9k_calculate_summary_state(sc, ctx); 2412 ath9k_calculate_summary_state(sc, ctx);
2410 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2413 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
2411 vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE; 2414 vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
2412 2415
2413 mutex_unlock(&sc->mutex); 2416 mutex_unlock(&sc->mutex);
2414 } 2417 }
2415 2418
2416 static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw, 2419 static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw,
2417 struct ieee80211_vif *vif) 2420 struct ieee80211_vif *vif)
2418 { 2421 {
2419 struct ath_softc *sc = hw->priv; 2422 struct ath_softc *sc = hw->priv;
2420 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2423 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2421 struct ath_vif *avp = (struct ath_vif *) vif->drv_priv; 2424 struct ath_vif *avp = (struct ath_vif *) vif->drv_priv;
2422 bool changed = false; 2425 bool changed = false;
2423 2426
2424 if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) 2427 if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
2425 return; 2428 return;
2426 2429
2427 if (!avp->chanctx) 2430 if (!avp->chanctx)
2428 return; 2431 return;
2429 2432
2430 mutex_lock(&sc->mutex); 2433 mutex_lock(&sc->mutex);
2431 2434
2432 spin_lock_bh(&sc->chan_lock); 2435 spin_lock_bh(&sc->chan_lock);
2433 if (sc->next_chan || (sc->cur_chan != avp->chanctx)) { 2436 if (sc->next_chan || (sc->cur_chan != avp->chanctx)) {
2434 sc->next_chan = avp->chanctx; 2437 sc->next_chan = avp->chanctx;
2435 changed = true; 2438 changed = true;
2436 } 2439 }
2437 ath_dbg(common, CHAN_CTX, 2440 ath_dbg(common, CHAN_CTX,
2438 "%s: Set chanctx state to FORCE_ACTIVE, changed: %d\n", 2441 "%s: Set chanctx state to FORCE_ACTIVE, changed: %d\n",
2439 __func__, changed); 2442 __func__, changed);
2440 sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE; 2443 sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE;
2441 spin_unlock_bh(&sc->chan_lock); 2444 spin_unlock_bh(&sc->chan_lock);
2442 2445
2443 if (changed) 2446 if (changed)
2444 ath_chanctx_set_next(sc, true); 2447 ath_chanctx_set_next(sc, true);
2445 2448
2446 mutex_unlock(&sc->mutex); 2449 mutex_unlock(&sc->mutex);
2447 } 2450 }
2448 2451
2449 void ath9k_fill_chanctx_ops(void) 2452 void ath9k_fill_chanctx_ops(void)
2450 { 2453 {
2451 if (!ath9k_is_chanctx_enabled()) 2454 if (!ath9k_is_chanctx_enabled())
2452 return; 2455 return;
2453 2456
2454 ath9k_ops.hw_scan = ath9k_hw_scan; 2457 ath9k_ops.hw_scan = ath9k_hw_scan;
2455 ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan; 2458 ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
2456 ath9k_ops.remain_on_channel = ath9k_remain_on_channel; 2459 ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
2457 ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel; 2460 ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
2458 ath9k_ops.add_chanctx = ath9k_add_chanctx; 2461 ath9k_ops.add_chanctx = ath9k_add_chanctx;
2459 ath9k_ops.remove_chanctx = ath9k_remove_chanctx; 2462 ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
2460 ath9k_ops.change_chanctx = ath9k_change_chanctx; 2463 ath9k_ops.change_chanctx = ath9k_change_chanctx;
2461 ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx; 2464 ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
2462 ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx; 2465 ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
2463 ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx; 2466 ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
2464 } 2467 }
2465 2468
2466 #endif 2469 #endif
2467 2470
2468 struct ieee80211_ops ath9k_ops = { 2471 struct ieee80211_ops ath9k_ops = {
2469 .tx = ath9k_tx, 2472 .tx = ath9k_tx,
2470 .start = ath9k_start, 2473 .start = ath9k_start,
2471 .stop = ath9k_stop, 2474 .stop = ath9k_stop,
2472 .add_interface = ath9k_add_interface, 2475 .add_interface = ath9k_add_interface,
2473 .change_interface = ath9k_change_interface, 2476 .change_interface = ath9k_change_interface,
2474 .remove_interface = ath9k_remove_interface, 2477 .remove_interface = ath9k_remove_interface,
2475 .config = ath9k_config, 2478 .config = ath9k_config,
2476 .configure_filter = ath9k_configure_filter, 2479 .configure_filter = ath9k_configure_filter,
2477 .sta_add = ath9k_sta_add, 2480 .sta_add = ath9k_sta_add,
2478 .sta_remove = ath9k_sta_remove, 2481 .sta_remove = ath9k_sta_remove,
2479 .sta_notify = ath9k_sta_notify, 2482 .sta_notify = ath9k_sta_notify,
2480 .conf_tx = ath9k_conf_tx, 2483 .conf_tx = ath9k_conf_tx,
2481 .bss_info_changed = ath9k_bss_info_changed, 2484 .bss_info_changed = ath9k_bss_info_changed,
2482 .set_key = ath9k_set_key, 2485 .set_key = ath9k_set_key,
2483 .get_tsf = ath9k_get_tsf, 2486 .get_tsf = ath9k_get_tsf,
2484 .set_tsf = ath9k_set_tsf, 2487 .set_tsf = ath9k_set_tsf,
2485 .reset_tsf = ath9k_reset_tsf, 2488 .reset_tsf = ath9k_reset_tsf,
2486 .ampdu_action = ath9k_ampdu_action, 2489 .ampdu_action = ath9k_ampdu_action,
2487 .get_survey = ath9k_get_survey, 2490 .get_survey = ath9k_get_survey,
2488 .rfkill_poll = ath9k_rfkill_poll_state, 2491 .rfkill_poll = ath9k_rfkill_poll_state,
2489 .set_coverage_class = ath9k_set_coverage_class, 2492 .set_coverage_class = ath9k_set_coverage_class,
2490 .flush = ath9k_flush, 2493 .flush = ath9k_flush,
2491 .tx_frames_pending = ath9k_tx_frames_pending, 2494 .tx_frames_pending = ath9k_tx_frames_pending,
2492 .tx_last_beacon = ath9k_tx_last_beacon, 2495 .tx_last_beacon = ath9k_tx_last_beacon,
2493 .release_buffered_frames = ath9k_release_buffered_frames, 2496 .release_buffered_frames = ath9k_release_buffered_frames,
2494 .get_stats = ath9k_get_stats, 2497 .get_stats = ath9k_get_stats,
2495 .set_antenna = ath9k_set_antenna, 2498 .set_antenna = ath9k_set_antenna,
2496 .get_antenna = ath9k_get_antenna, 2499 .get_antenna = ath9k_get_antenna,
2497 2500
2498 #ifdef CONFIG_ATH9K_WOW 2501 #ifdef CONFIG_ATH9K_WOW
2499 .suspend = ath9k_suspend, 2502 .suspend = ath9k_suspend,
2500 .resume = ath9k_resume, 2503 .resume = ath9k_resume,
2501 .set_wakeup = ath9k_set_wakeup, 2504 .set_wakeup = ath9k_set_wakeup,
2502 #endif 2505 #endif
2503 2506
2504 #ifdef CONFIG_ATH9K_DEBUGFS 2507 #ifdef CONFIG_ATH9K_DEBUGFS
2505 .get_et_sset_count = ath9k_get_et_sset_count, 2508 .get_et_sset_count = ath9k_get_et_sset_count,
2506 .get_et_stats = ath9k_get_et_stats, 2509 .get_et_stats = ath9k_get_et_stats,
2507 .get_et_strings = ath9k_get_et_strings, 2510 .get_et_strings = ath9k_get_et_strings,
2508 #endif 2511 #endif
2509 2512
2510 #if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_STATION_STATISTICS) 2513 #if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_STATION_STATISTICS)
2511 .sta_add_debugfs = ath9k_sta_add_debugfs, 2514 .sta_add_debugfs = ath9k_sta_add_debugfs,
2512 #endif 2515 #endif
2513 .sw_scan_start = ath9k_sw_scan_start, 2516 .sw_scan_start = ath9k_sw_scan_start,
2514 .sw_scan_complete = ath9k_sw_scan_complete, 2517 .sw_scan_complete = ath9k_sw_scan_complete,
2515 }; 2518 };
drivers/net/wireless/brcm80211/brcmfmac/of.c
1 /* 1 /*
2 * Copyright (c) 2014 Broadcom Corporation 2 * Copyright (c) 2014 Broadcom Corporation
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies. 6 * copyright notice and this permission notice appear in all copies.
7 * 7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 #include <linux/init.h> 16 #include <linux/init.h>
17 #include <linux/of.h> 17 #include <linux/of.h>
18 #include <linux/of_irq.h> 18 #include <linux/of_irq.h>
19 #include <linux/mmc/card.h> 19 #include <linux/mmc/card.h>
20 #include <linux/platform_data/brcmfmac-sdio.h> 20 #include <linux/platform_data/brcmfmac-sdio.h>
21 #include <linux/mmc/sdio_func.h> 21 #include <linux/mmc/sdio_func.h>
22 22
23 #include <defs.h> 23 #include <defs.h>
24 #include "dhd_dbg.h" 24 #include "dhd_dbg.h"
25 #include "sdio_host.h" 25 #include "sdio_host.h"
26 26
27 void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) 27 void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
28 { 28 {
29 struct device *dev = sdiodev->dev; 29 struct device *dev = sdiodev->dev;
30 struct device_node *np = dev->of_node; 30 struct device_node *np = dev->of_node;
31 int irq; 31 int irq;
32 u32 irqf; 32 u32 irqf;
33 u32 val; 33 u32 val;
34 34
35 if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac")) 35 if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
36 return; 36 return;
37 37
38 sdiodev->pdata = devm_kzalloc(dev, sizeof(*sdiodev->pdata), GFP_KERNEL); 38 sdiodev->pdata = devm_kzalloc(dev, sizeof(*sdiodev->pdata), GFP_KERNEL);
39 if (!sdiodev->pdata) 39 if (!sdiodev->pdata)
40 return; 40 return;
41 41
42 irq = irq_of_parse_and_map(np, 0); 42 irq = irq_of_parse_and_map(np, 0);
43 if (irq < 0) { 43 if (!irq) {
44 brcmf_err("interrupt could not be mapped: err=%d\n", irq); 44 brcmf_err("interrupt could not be mapped\n");
45 devm_kfree(dev, sdiodev->pdata); 45 devm_kfree(dev, sdiodev->pdata);
46 return; 46 return;
47 } 47 }
48 irqf = irqd_get_trigger_type(irq_get_irq_data(irq)); 48 irqf = irqd_get_trigger_type(irq_get_irq_data(irq));
49 49
50 sdiodev->pdata->oob_irq_supported = true; 50 sdiodev->pdata->oob_irq_supported = true;
51 sdiodev->pdata->oob_irq_nr = irq; 51 sdiodev->pdata->oob_irq_nr = irq;
52 sdiodev->pdata->oob_irq_flags = irqf; 52 sdiodev->pdata->oob_irq_flags = irqf;
53 53
54 if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0) 54 if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
55 sdiodev->pdata->drive_strength = val; 55 sdiodev->pdata->drive_strength = val;
56 } 56 }
57 57
drivers/net/wireless/brcm80211/brcmfmac/pcie.c
1 /* Copyright (c) 2014 Broadcom Corporation 1 /* Copyright (c) 2014 Broadcom Corporation
2 * 2 *
3 * Permission to use, copy, modify, and/or distribute this software for any 3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above 4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies. 5 * copyright notice and this permission notice appear in all copies.
6 * 6 *
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */ 14 */
15 15
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/firmware.h> 18 #include <linux/firmware.h>
19 #include <linux/pci.h> 19 #include <linux/pci.h>
20 #include <linux/vmalloc.h> 20 #include <linux/vmalloc.h>
21 #include <linux/delay.h> 21 #include <linux/delay.h>
22 #include <linux/unaligned/access_ok.h>
23 #include <linux/interrupt.h> 22 #include <linux/interrupt.h>
24 #include <linux/bcma/bcma.h> 23 #include <linux/bcma/bcma.h>
25 #include <linux/sched.h> 24 #include <linux/sched.h>
25 #include <asm/unaligned.h>
26 26
27 #include <soc.h> 27 #include <soc.h>
28 #include <chipcommon.h> 28 #include <chipcommon.h>
29 #include <brcmu_utils.h> 29 #include <brcmu_utils.h>
30 #include <brcmu_wifi.h> 30 #include <brcmu_wifi.h>
31 #include <brcm_hw_ids.h> 31 #include <brcm_hw_ids.h>
32 32
33 #include "dhd_dbg.h" 33 #include "dhd_dbg.h"
34 #include "dhd_bus.h" 34 #include "dhd_bus.h"
35 #include "commonring.h" 35 #include "commonring.h"
36 #include "msgbuf.h" 36 #include "msgbuf.h"
37 #include "pcie.h" 37 #include "pcie.h"
38 #include "firmware.h" 38 #include "firmware.h"
39 #include "chip.h" 39 #include "chip.h"
40 40
41 41
42 enum brcmf_pcie_state { 42 enum brcmf_pcie_state {
43 BRCMFMAC_PCIE_STATE_DOWN, 43 BRCMFMAC_PCIE_STATE_DOWN,
44 BRCMFMAC_PCIE_STATE_UP 44 BRCMFMAC_PCIE_STATE_UP
45 }; 45 };
46 46
47 47
48 #define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin" 48 #define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin"
49 #define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt" 49 #define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt"
50 #define BRCMF_PCIE_4354_FW_NAME "brcm/brcmfmac4354-pcie.bin" 50 #define BRCMF_PCIE_4354_FW_NAME "brcm/brcmfmac4354-pcie.bin"
51 #define BRCMF_PCIE_4354_NVRAM_NAME "brcm/brcmfmac4354-pcie.txt" 51 #define BRCMF_PCIE_4354_NVRAM_NAME "brcm/brcmfmac4354-pcie.txt"
52 #define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin" 52 #define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin"
53 #define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt" 53 #define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
54 #define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin" 54 #define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
55 #define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt" 55 #define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt"
56 56
57 #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */ 57 #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
58 58
59 #define BRCMF_PCIE_TCM_MAP_SIZE (4096 * 1024) 59 #define BRCMF_PCIE_TCM_MAP_SIZE (4096 * 1024)
60 #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024) 60 #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
61 61
62 /* backplane addres space accessed by BAR0 */ 62 /* backplane addres space accessed by BAR0 */
63 #define BRCMF_PCIE_BAR0_WINDOW 0x80 63 #define BRCMF_PCIE_BAR0_WINDOW 0x80
64 #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000 64 #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
65 #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70 65 #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
66 66
67 #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000 67 #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
68 #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000 68 #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
69 69
70 #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40 70 #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
71 #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C 71 #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
72 72
73 #define BRCMF_PCIE_REG_INTSTATUS 0x90 73 #define BRCMF_PCIE_REG_INTSTATUS 0x90
74 #define BRCMF_PCIE_REG_INTMASK 0x94 74 #define BRCMF_PCIE_REG_INTMASK 0x94
75 #define BRCMF_PCIE_REG_SBMBX 0x98 75 #define BRCMF_PCIE_REG_SBMBX 0x98
76 76
77 #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24 77 #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
78 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48 78 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
79 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C 79 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
80 #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120 80 #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
81 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124 81 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
82 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140 82 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140
83 83
84 #define BRCMF_PCIE_GENREV1 1 84 #define BRCMF_PCIE_GENREV1 1
85 #define BRCMF_PCIE_GENREV2 2 85 #define BRCMF_PCIE_GENREV2 2
86 86
87 #define BRCMF_PCIE2_INTA 0x01 87 #define BRCMF_PCIE2_INTA 0x01
88 #define BRCMF_PCIE2_INTB 0x02 88 #define BRCMF_PCIE2_INTB 0x02
89 89
90 #define BRCMF_PCIE_INT_0 0x01 90 #define BRCMF_PCIE_INT_0 0x01
91 #define BRCMF_PCIE_INT_1 0x02 91 #define BRCMF_PCIE_INT_1 0x02
92 #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \ 92 #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
93 BRCMF_PCIE_INT_1) 93 BRCMF_PCIE_INT_1)
94 94
95 #define BRCMF_PCIE_MB_INT_FN0_0 0x0100 95 #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
96 #define BRCMF_PCIE_MB_INT_FN0_1 0x0200 96 #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
97 #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000 97 #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
98 #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000 98 #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
99 #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000 99 #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
100 #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000 100 #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
101 #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000 101 #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
102 #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000 102 #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
103 #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000 103 #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
104 #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000 104 #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
105 105
106 #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \ 106 #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
107 BRCMF_PCIE_MB_INT_D2H0_DB1 | \ 107 BRCMF_PCIE_MB_INT_D2H0_DB1 | \
108 BRCMF_PCIE_MB_INT_D2H1_DB0 | \ 108 BRCMF_PCIE_MB_INT_D2H1_DB0 | \
109 BRCMF_PCIE_MB_INT_D2H1_DB1 | \ 109 BRCMF_PCIE_MB_INT_D2H1_DB1 | \
110 BRCMF_PCIE_MB_INT_D2H2_DB0 | \ 110 BRCMF_PCIE_MB_INT_D2H2_DB0 | \
111 BRCMF_PCIE_MB_INT_D2H2_DB1 | \ 111 BRCMF_PCIE_MB_INT_D2H2_DB1 | \
112 BRCMF_PCIE_MB_INT_D2H3_DB0 | \ 112 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
113 BRCMF_PCIE_MB_INT_D2H3_DB1) 113 BRCMF_PCIE_MB_INT_D2H3_DB1)
114 114
115 #define BRCMF_PCIE_MIN_SHARED_VERSION 4 115 #define BRCMF_PCIE_MIN_SHARED_VERSION 4
116 #define BRCMF_PCIE_MAX_SHARED_VERSION 5 116 #define BRCMF_PCIE_MAX_SHARED_VERSION 5
117 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF 117 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
118 #define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000 118 #define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000
119 119
120 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000 120 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
121 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000 121 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
122 122
123 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34 123 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
124 #define BRCMF_SHARED_RING_BASE_OFFSET 52 124 #define BRCMF_SHARED_RING_BASE_OFFSET 52
125 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36 125 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
126 #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20 126 #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
127 #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40 127 #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
128 #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44 128 #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
129 #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48 129 #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
130 #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52 130 #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
131 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56 131 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
132 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64 132 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
133 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68 133 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
134 134
135 #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0 135 #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
136 #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1 136 #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
137 #define BRCMF_RING_H2D_RING_MEM_OFFSET 4 137 #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
138 #define BRCMF_RING_H2D_RING_STATE_OFFSET 8 138 #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
139 139
140 #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8 140 #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
141 #define BRCMF_RING_MAX_ITEM_OFFSET 4 141 #define BRCMF_RING_MAX_ITEM_OFFSET 4
142 #define BRCMF_RING_LEN_ITEMS_OFFSET 6 142 #define BRCMF_RING_LEN_ITEMS_OFFSET 6
143 #define BRCMF_RING_MEM_SZ 16 143 #define BRCMF_RING_MEM_SZ 16
144 #define BRCMF_RING_STATE_SZ 8 144 #define BRCMF_RING_STATE_SZ 8
145 145
146 #define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET 4 146 #define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET 4
147 #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8 147 #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
148 #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12 148 #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
149 #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16 149 #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
150 #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0 150 #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
151 #define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52 151 #define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
152 152
153 #define BRCMF_DEF_MAX_RXBUFPOST 255 153 #define BRCMF_DEF_MAX_RXBUFPOST 255
154 154
155 #define BRCMF_CONSOLE_BUFADDR_OFFSET 8 155 #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
156 #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12 156 #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
157 #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16 157 #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
158 158
159 #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8 159 #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
160 #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024 160 #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
161 161
162 #define BRCMF_D2H_DEV_D3_ACK 0x00000001 162 #define BRCMF_D2H_DEV_D3_ACK 0x00000001
163 #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002 163 #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
164 #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004 164 #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
165 165
166 #define BRCMF_H2D_HOST_D3_INFORM 0x00000001 166 #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
167 #define BRCMF_H2D_HOST_DS_ACK 0x00000002 167 #define BRCMF_H2D_HOST_DS_ACK 0x00000002
168 #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008 168 #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
169 #define BRCMF_H2D_HOST_D0_INFORM 0x00000010 169 #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
170 170
171 #define BRCMF_PCIE_MBDATA_TIMEOUT 2000 171 #define BRCMF_PCIE_MBDATA_TIMEOUT 2000
172 172
173 #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4 173 #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
174 #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C 174 #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
175 #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58 175 #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
176 #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C 176 #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
177 #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60 177 #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
178 #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64 178 #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
179 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC 179 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
180 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC 180 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
181 #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228 181 #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
182 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248 182 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
183 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0 183 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
184 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4 184 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
185 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3 185 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
186 186
187 187
188 MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME); 188 MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
189 MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME); 189 MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
190 MODULE_FIRMWARE(BRCMF_PCIE_4354_FW_NAME); 190 MODULE_FIRMWARE(BRCMF_PCIE_4354_FW_NAME);
191 MODULE_FIRMWARE(BRCMF_PCIE_4354_NVRAM_NAME); 191 MODULE_FIRMWARE(BRCMF_PCIE_4354_NVRAM_NAME);
192 MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME); 192 MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
193 MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME); 193 MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
194 194
195 195
196 struct brcmf_pcie_console { 196 struct brcmf_pcie_console {
197 u32 base_addr; 197 u32 base_addr;
198 u32 buf_addr; 198 u32 buf_addr;
199 u32 bufsize; 199 u32 bufsize;
200 u32 read_idx; 200 u32 read_idx;
201 u8 log_str[256]; 201 u8 log_str[256];
202 u8 log_idx; 202 u8 log_idx;
203 }; 203 };
204 204
205 struct brcmf_pcie_shared_info { 205 struct brcmf_pcie_shared_info {
206 u32 tcm_base_address; 206 u32 tcm_base_address;
207 u32 flags; 207 u32 flags;
208 struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS]; 208 struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
209 struct brcmf_pcie_ringbuf *flowrings; 209 struct brcmf_pcie_ringbuf *flowrings;
210 u16 max_rxbufpost; 210 u16 max_rxbufpost;
211 u32 nrof_flowrings; 211 u32 nrof_flowrings;
212 u32 rx_dataoffset; 212 u32 rx_dataoffset;
213 u32 htod_mb_data_addr; 213 u32 htod_mb_data_addr;
214 u32 dtoh_mb_data_addr; 214 u32 dtoh_mb_data_addr;
215 u32 ring_info_addr; 215 u32 ring_info_addr;
216 struct brcmf_pcie_console console; 216 struct brcmf_pcie_console console;
217 void *scratch; 217 void *scratch;
218 dma_addr_t scratch_dmahandle; 218 dma_addr_t scratch_dmahandle;
219 void *ringupd; 219 void *ringupd;
220 dma_addr_t ringupd_dmahandle; 220 dma_addr_t ringupd_dmahandle;
221 }; 221 };
222 222
223 struct brcmf_pcie_core_info { 223 struct brcmf_pcie_core_info {
224 u32 base; 224 u32 base;
225 u32 wrapbase; 225 u32 wrapbase;
226 }; 226 };
227 227
228 struct brcmf_pciedev_info { 228 struct brcmf_pciedev_info {
229 enum brcmf_pcie_state state; 229 enum brcmf_pcie_state state;
230 bool in_irq; 230 bool in_irq;
231 bool irq_requested; 231 bool irq_requested;
232 struct pci_dev *pdev; 232 struct pci_dev *pdev;
233 char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; 233 char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
234 char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; 234 char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
235 void __iomem *regs; 235 void __iomem *regs;
236 void __iomem *tcm; 236 void __iomem *tcm;
237 u32 tcm_size; 237 u32 tcm_size;
238 u32 ram_base; 238 u32 ram_base;
239 u32 ram_size; 239 u32 ram_size;
240 struct brcmf_chip *ci; 240 struct brcmf_chip *ci;
241 u32 coreid; 241 u32 coreid;
242 u32 generic_corerev; 242 u32 generic_corerev;
243 struct brcmf_pcie_shared_info shared; 243 struct brcmf_pcie_shared_info shared;
244 void (*ringbell)(struct brcmf_pciedev_info *devinfo); 244 void (*ringbell)(struct brcmf_pciedev_info *devinfo);
245 wait_queue_head_t mbdata_resp_wait; 245 wait_queue_head_t mbdata_resp_wait;
246 bool mbdata_completed; 246 bool mbdata_completed;
247 bool irq_allocated; 247 bool irq_allocated;
248 bool wowl_enabled; 248 bool wowl_enabled;
249 }; 249 };
250 250
251 struct brcmf_pcie_ringbuf { 251 struct brcmf_pcie_ringbuf {
252 struct brcmf_commonring commonring; 252 struct brcmf_commonring commonring;
253 dma_addr_t dma_handle; 253 dma_addr_t dma_handle;
254 u32 w_idx_addr; 254 u32 w_idx_addr;
255 u32 r_idx_addr; 255 u32 r_idx_addr;
256 struct brcmf_pciedev_info *devinfo; 256 struct brcmf_pciedev_info *devinfo;
257 u8 id; 257 u8 id;
258 }; 258 };
259 259
260 260
261 static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = { 261 static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
262 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM, 262 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
263 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM, 263 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
264 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM, 264 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
265 BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM, 265 BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
266 BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 266 BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
267 }; 267 };
268 268
269 static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = { 269 static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
270 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE, 270 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
271 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE, 271 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
272 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE, 272 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
273 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE, 273 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
274 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE 274 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
275 }; 275 };
276 276
277 277
278 /* dma flushing needs implementation for mips and arm platforms. Should 278 /* dma flushing needs implementation for mips and arm platforms. Should
279 * be put in util. Note, this is not real flushing. It is virtual non 279 * be put in util. Note, this is not real flushing. It is virtual non
280 * cached memory. Only write buffers should have to be drained. Though 280 * cached memory. Only write buffers should have to be drained. Though
281 * this may be different depending on platform...... 281 * this may be different depending on platform......
282 */ 282 */
283 #define brcmf_dma_flush(addr, len) 283 #define brcmf_dma_flush(addr, len)
284 #define brcmf_dma_invalidate_cache(addr, len) 284 #define brcmf_dma_invalidate_cache(addr, len)
285 285
286 286
287 static u32 287 static u32
288 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset) 288 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
289 { 289 {
290 void __iomem *address = devinfo->regs + reg_offset; 290 void __iomem *address = devinfo->regs + reg_offset;
291 291
292 return (ioread32(address)); 292 return (ioread32(address));
293 } 293 }
294 294
295 295
296 static void 296 static void
297 brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset, 297 brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
298 u32 value) 298 u32 value)
299 { 299 {
300 void __iomem *address = devinfo->regs + reg_offset; 300 void __iomem *address = devinfo->regs + reg_offset;
301 301
302 iowrite32(value, address); 302 iowrite32(value, address);
303 } 303 }
304 304
305 305
306 static u8 306 static u8
307 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset) 307 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
308 { 308 {
309 void __iomem *address = devinfo->tcm + mem_offset; 309 void __iomem *address = devinfo->tcm + mem_offset;
310 310
311 return (ioread8(address)); 311 return (ioread8(address));
312 } 312 }
313 313
314 314
315 static u16 315 static u16
316 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset) 316 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
317 { 317 {
318 void __iomem *address = devinfo->tcm + mem_offset; 318 void __iomem *address = devinfo->tcm + mem_offset;
319 319
320 return (ioread16(address)); 320 return (ioread16(address));
321 } 321 }
322 322
323 323
324 static void 324 static void
325 brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset, 325 brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
326 u16 value) 326 u16 value)
327 { 327 {
328 void __iomem *address = devinfo->tcm + mem_offset; 328 void __iomem *address = devinfo->tcm + mem_offset;
329 329
330 iowrite16(value, address); 330 iowrite16(value, address);
331 } 331 }
332 332
333 333
334 static u32 334 static u32
335 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset) 335 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
336 { 336 {
337 void __iomem *address = devinfo->tcm + mem_offset; 337 void __iomem *address = devinfo->tcm + mem_offset;
338 338
339 return (ioread32(address)); 339 return (ioread32(address));
340 } 340 }
341 341
342 342
343 static void 343 static void
344 brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset, 344 brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
345 u32 value) 345 u32 value)
346 { 346 {
347 void __iomem *address = devinfo->tcm + mem_offset; 347 void __iomem *address = devinfo->tcm + mem_offset;
348 348
349 iowrite32(value, address); 349 iowrite32(value, address);
350 } 350 }
351 351
352 352
353 static u32 353 static u32
354 brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset) 354 brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
355 { 355 {
356 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset; 356 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
357 357
358 return (ioread32(addr)); 358 return (ioread32(addr));
359 } 359 }
360 360
361 361
362 static void 362 static void
363 brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset, 363 brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
364 u32 value) 364 u32 value)
365 { 365 {
366 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset; 366 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
367 367
368 iowrite32(value, addr); 368 iowrite32(value, addr);
369 } 369 }
370 370
371 371
372 static void 372 static void
373 brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset, 373 brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
374 void *srcaddr, u32 len) 374 void *srcaddr, u32 len)
375 { 375 {
376 void __iomem *address = devinfo->tcm + mem_offset; 376 void __iomem *address = devinfo->tcm + mem_offset;
377 __le32 *src32; 377 __le32 *src32;
378 __le16 *src16; 378 __le16 *src16;
379 u8 *src8; 379 u8 *src8;
380 380
381 if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) { 381 if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
382 if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) { 382 if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
383 src8 = (u8 *)srcaddr; 383 src8 = (u8 *)srcaddr;
384 while (len) { 384 while (len) {
385 iowrite8(*src8, address); 385 iowrite8(*src8, address);
386 address++; 386 address++;
387 src8++; 387 src8++;
388 len--; 388 len--;
389 } 389 }
390 } else { 390 } else {
391 len = len / 2; 391 len = len / 2;
392 src16 = (__le16 *)srcaddr; 392 src16 = (__le16 *)srcaddr;
393 while (len) { 393 while (len) {
394 iowrite16(le16_to_cpu(*src16), address); 394 iowrite16(le16_to_cpu(*src16), address);
395 address += 2; 395 address += 2;
396 src16++; 396 src16++;
397 len--; 397 len--;
398 } 398 }
399 } 399 }
400 } else { 400 } else {
401 len = len / 4; 401 len = len / 4;
402 src32 = (__le32 *)srcaddr; 402 src32 = (__le32 *)srcaddr;
403 while (len) { 403 while (len) {
404 iowrite32(le32_to_cpu(*src32), address); 404 iowrite32(le32_to_cpu(*src32), address);
405 address += 4; 405 address += 4;
406 src32++; 406 src32++;
407 len--; 407 len--;
408 } 408 }
409 } 409 }
410 } 410 }
411 411
412 412
413 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \ 413 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
414 CHIPCREGOFFS(reg), value) 414 CHIPCREGOFFS(reg), value)
415 415
416 416
417 static void 417 static void
418 brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid) 418 brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
419 { 419 {
420 const struct pci_dev *pdev = devinfo->pdev; 420 const struct pci_dev *pdev = devinfo->pdev;
421 struct brcmf_core *core; 421 struct brcmf_core *core;
422 u32 bar0_win; 422 u32 bar0_win;
423 423
424 core = brcmf_chip_get_core(devinfo->ci, coreid); 424 core = brcmf_chip_get_core(devinfo->ci, coreid);
425 if (core) { 425 if (core) {
426 bar0_win = core->base; 426 bar0_win = core->base;
427 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win); 427 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
428 if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, 428 if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
429 &bar0_win) == 0) { 429 &bar0_win) == 0) {
430 if (bar0_win != core->base) { 430 if (bar0_win != core->base) {
431 bar0_win = core->base; 431 bar0_win = core->base;
432 pci_write_config_dword(pdev, 432 pci_write_config_dword(pdev,
433 BRCMF_PCIE_BAR0_WINDOW, 433 BRCMF_PCIE_BAR0_WINDOW,
434 bar0_win); 434 bar0_win);
435 } 435 }
436 } 436 }
437 } else { 437 } else {
438 brcmf_err("Unsupported core selected %x\n", coreid); 438 brcmf_err("Unsupported core selected %x\n", coreid);
439 } 439 }
440 } 440 }
441 441
442 442
443 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo) 443 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
444 { 444 {
445 u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD, 445 u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
446 BRCMF_PCIE_CFGREG_PM_CSR, 446 BRCMF_PCIE_CFGREG_PM_CSR,
447 BRCMF_PCIE_CFGREG_MSI_CAP, 447 BRCMF_PCIE_CFGREG_MSI_CAP,
448 BRCMF_PCIE_CFGREG_MSI_ADDR_L, 448 BRCMF_PCIE_CFGREG_MSI_ADDR_L,
449 BRCMF_PCIE_CFGREG_MSI_ADDR_H, 449 BRCMF_PCIE_CFGREG_MSI_ADDR_H,
450 BRCMF_PCIE_CFGREG_MSI_DATA, 450 BRCMF_PCIE_CFGREG_MSI_DATA,
451 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2, 451 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
452 BRCMF_PCIE_CFGREG_RBAR_CTRL, 452 BRCMF_PCIE_CFGREG_RBAR_CTRL,
453 BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1, 453 BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
454 BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG, 454 BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
455 BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG }; 455 BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
456 u32 i; 456 u32 i;
457 u32 val; 457 u32 val;
458 u32 lsc; 458 u32 lsc;
459 459
460 if (!devinfo->ci) 460 if (!devinfo->ci)
461 return; 461 return;
462 462
463 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 463 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
464 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 464 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
465 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL); 465 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL);
466 lsc = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA); 466 lsc = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
467 val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB); 467 val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
468 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, val); 468 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, val);
469 469
470 brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON); 470 brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
471 WRITECC32(devinfo, watchdog, 4); 471 WRITECC32(devinfo, watchdog, 4);
472 msleep(100); 472 msleep(100);
473 473
474 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 474 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
475 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 475 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
476 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL); 476 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL);
477 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, lsc); 477 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, lsc);
478 478
479 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 479 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
480 for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) { 480 for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
481 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 481 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
482 cfg_offset[i]); 482 cfg_offset[i]);
483 val = brcmf_pcie_read_reg32(devinfo, 483 val = brcmf_pcie_read_reg32(devinfo,
484 BRCMF_PCIE_PCIE2REG_CONFIGDATA); 484 BRCMF_PCIE_PCIE2REG_CONFIGDATA);
485 brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n", 485 brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
486 cfg_offset[i], val); 486 cfg_offset[i], val);
487 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, 487 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA,
488 val); 488 val);
489 } 489 }
490 } 490 }
491 491
492 492
493 static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo) 493 static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
494 { 494 {
495 u32 config; 495 u32 config;
496 496
497 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 497 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
498 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) 498 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0)
499 brcmf_pcie_reset_device(devinfo); 499 brcmf_pcie_reset_device(devinfo);
500 /* BAR1 window may not be sized properly */ 500 /* BAR1 window may not be sized properly */
501 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 501 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
502 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0); 502 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
503 config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA); 503 config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
504 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config); 504 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
505 505
506 device_wakeup_enable(&devinfo->pdev->dev); 506 device_wakeup_enable(&devinfo->pdev->dev);
507 } 507 }
508 508
509 509
510 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo) 510 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
511 { 511 {
512 brcmf_chip_enter_download(devinfo->ci); 512 brcmf_chip_enter_download(devinfo->ci);
513 513
514 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) { 514 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
515 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4); 515 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
516 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX, 516 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
517 5); 517 5);
518 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA, 518 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
519 0); 519 0);
520 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX, 520 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
521 7); 521 7);
522 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA, 522 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
523 0); 523 0);
524 } 524 }
525 return 0; 525 return 0;
526 } 526 }
527 527
528 528
529 static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo, 529 static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
530 u32 resetintr) 530 u32 resetintr)
531 { 531 {
532 struct brcmf_core *core; 532 struct brcmf_core *core;
533 533
534 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) { 534 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
535 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM); 535 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
536 brcmf_chip_resetcore(core, 0, 0, 0); 536 brcmf_chip_resetcore(core, 0, 0, 0);
537 } 537 }
538 538
539 return !brcmf_chip_exit_download(devinfo->ci, resetintr); 539 return !brcmf_chip_exit_download(devinfo->ci, resetintr);
540 } 540 }
541 541
542 542
543 static int 543 static int
544 brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data) 544 brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
545 { 545 {
546 struct brcmf_pcie_shared_info *shared; 546 struct brcmf_pcie_shared_info *shared;
547 u32 addr; 547 u32 addr;
548 u32 cur_htod_mb_data; 548 u32 cur_htod_mb_data;
549 u32 i; 549 u32 i;
550 550
551 shared = &devinfo->shared; 551 shared = &devinfo->shared;
552 addr = shared->htod_mb_data_addr; 552 addr = shared->htod_mb_data_addr;
553 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); 553 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
554 554
555 if (cur_htod_mb_data != 0) 555 if (cur_htod_mb_data != 0)
556 brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n", 556 brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
557 cur_htod_mb_data); 557 cur_htod_mb_data);
558 558
559 i = 0; 559 i = 0;
560 while (cur_htod_mb_data != 0) { 560 while (cur_htod_mb_data != 0) {
561 msleep(10); 561 msleep(10);
562 i++; 562 i++;
563 if (i > 100) 563 if (i > 100)
564 return -EIO; 564 return -EIO;
565 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); 565 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
566 } 566 }
567 567
568 brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data); 568 brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
569 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1); 569 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
570 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1); 570 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
571 571
572 return 0; 572 return 0;
573 } 573 }
574 574
575 575
576 static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo) 576 static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
577 { 577 {
578 struct brcmf_pcie_shared_info *shared; 578 struct brcmf_pcie_shared_info *shared;
579 u32 addr; 579 u32 addr;
580 u32 dtoh_mb_data; 580 u32 dtoh_mb_data;
581 581
582 shared = &devinfo->shared; 582 shared = &devinfo->shared;
583 addr = shared->dtoh_mb_data_addr; 583 addr = shared->dtoh_mb_data_addr;
584 dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); 584 dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
585 585
586 if (!dtoh_mb_data) 586 if (!dtoh_mb_data)
587 return; 587 return;
588 588
589 brcmf_pcie_write_tcm32(devinfo, addr, 0); 589 brcmf_pcie_write_tcm32(devinfo, addr, 0);
590 590
591 brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data); 591 brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
592 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) { 592 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
593 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n"); 593 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
594 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK); 594 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
595 brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n"); 595 brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
596 } 596 }
597 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE) 597 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
598 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n"); 598 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
599 if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) { 599 if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
600 brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n"); 600 brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
601 if (waitqueue_active(&devinfo->mbdata_resp_wait)) { 601 if (waitqueue_active(&devinfo->mbdata_resp_wait)) {
602 devinfo->mbdata_completed = true; 602 devinfo->mbdata_completed = true;
603 wake_up(&devinfo->mbdata_resp_wait); 603 wake_up(&devinfo->mbdata_resp_wait);
604 } 604 }
605 } 605 }
606 } 606 }
607 607
608 608
609 static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo) 609 static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
610 { 610 {
611 struct brcmf_pcie_shared_info *shared; 611 struct brcmf_pcie_shared_info *shared;
612 struct brcmf_pcie_console *console; 612 struct brcmf_pcie_console *console;
613 u32 addr; 613 u32 addr;
614 614
615 shared = &devinfo->shared; 615 shared = &devinfo->shared;
616 console = &shared->console; 616 console = &shared->console;
617 addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET; 617 addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
618 console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr); 618 console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
619 619
620 addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET; 620 addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
621 console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr); 621 console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
622 addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET; 622 addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
623 console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr); 623 console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
624 624
625 brcmf_dbg(PCIE, "Console: base %x, buf %x, size %d\n", 625 brcmf_dbg(PCIE, "Console: base %x, buf %x, size %d\n",
626 console->base_addr, console->buf_addr, console->bufsize); 626 console->base_addr, console->buf_addr, console->bufsize);
627 } 627 }
628 628
629 629
630 static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo) 630 static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
631 { 631 {
632 struct brcmf_pcie_console *console; 632 struct brcmf_pcie_console *console;
633 u32 addr; 633 u32 addr;
634 u8 ch; 634 u8 ch;
635 u32 newidx; 635 u32 newidx;
636 636
637 console = &devinfo->shared.console; 637 console = &devinfo->shared.console;
638 addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET; 638 addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
639 newidx = brcmf_pcie_read_tcm32(devinfo, addr); 639 newidx = brcmf_pcie_read_tcm32(devinfo, addr);
640 while (newidx != console->read_idx) { 640 while (newidx != console->read_idx) {
641 addr = console->buf_addr + console->read_idx; 641 addr = console->buf_addr + console->read_idx;
642 ch = brcmf_pcie_read_tcm8(devinfo, addr); 642 ch = brcmf_pcie_read_tcm8(devinfo, addr);
643 console->read_idx++; 643 console->read_idx++;
644 if (console->read_idx == console->bufsize) 644 if (console->read_idx == console->bufsize)
645 console->read_idx = 0; 645 console->read_idx = 0;
646 if (ch == '\r') 646 if (ch == '\r')
647 continue; 647 continue;
648 console->log_str[console->log_idx] = ch; 648 console->log_str[console->log_idx] = ch;
649 console->log_idx++; 649 console->log_idx++;
650 if ((ch != '\n') && 650 if ((ch != '\n') &&
651 (console->log_idx == (sizeof(console->log_str) - 2))) { 651 (console->log_idx == (sizeof(console->log_str) - 2))) {
652 ch = '\n'; 652 ch = '\n';
653 console->log_str[console->log_idx] = ch; 653 console->log_str[console->log_idx] = ch;
654 console->log_idx++; 654 console->log_idx++;
655 } 655 }
656 656
657 if (ch == '\n') { 657 if (ch == '\n') {
658 console->log_str[console->log_idx] = 0; 658 console->log_str[console->log_idx] = 0;
659 brcmf_dbg(PCIE, "CONSOLE: %s\n", console->log_str); 659 brcmf_dbg(PCIE, "CONSOLE: %s\n", console->log_str);
660 console->log_idx = 0; 660 console->log_idx = 0;
661 } 661 }
662 } 662 }
663 } 663 }
664 664
665 665
666 static __used void brcmf_pcie_ringbell_v1(struct brcmf_pciedev_info *devinfo) 666 static __used void brcmf_pcie_ringbell_v1(struct brcmf_pciedev_info *devinfo)
667 { 667 {
668 u32 reg_value; 668 u32 reg_value;
669 669
670 brcmf_dbg(PCIE, "RING !\n"); 670 brcmf_dbg(PCIE, "RING !\n");
671 reg_value = brcmf_pcie_read_reg32(devinfo, 671 reg_value = brcmf_pcie_read_reg32(devinfo,
672 BRCMF_PCIE_PCIE2REG_MAILBOXINT); 672 BRCMF_PCIE_PCIE2REG_MAILBOXINT);
673 reg_value |= BRCMF_PCIE2_INTB; 673 reg_value |= BRCMF_PCIE2_INTB;
674 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, 674 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
675 reg_value); 675 reg_value);
676 } 676 }
677 677
678 678
679 static void brcmf_pcie_ringbell_v2(struct brcmf_pciedev_info *devinfo) 679 static void brcmf_pcie_ringbell_v2(struct brcmf_pciedev_info *devinfo)
680 { 680 {
681 brcmf_dbg(PCIE, "RING !\n"); 681 brcmf_dbg(PCIE, "RING !\n");
682 /* Any arbitrary value will do, lets use 1 */ 682 /* Any arbitrary value will do, lets use 1 */
683 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1); 683 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1);
684 } 684 }
685 685
686 686
687 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo) 687 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
688 { 688 {
689 if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) 689 if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
690 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK, 690 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
691 0); 691 0);
692 else 692 else
693 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 693 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
694 0); 694 0);
695 } 695 }
696 696
697 697
698 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo) 698 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
699 { 699 {
700 if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) 700 if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
701 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK, 701 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
702 BRCMF_PCIE_INT_DEF); 702 BRCMF_PCIE_INT_DEF);
703 else 703 else
704 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 704 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
705 BRCMF_PCIE_MB_INT_D2H_DB | 705 BRCMF_PCIE_MB_INT_D2H_DB |
706 BRCMF_PCIE_MB_INT_FN0_0 | 706 BRCMF_PCIE_MB_INT_FN0_0 |
707 BRCMF_PCIE_MB_INT_FN0_1); 707 BRCMF_PCIE_MB_INT_FN0_1);
708 } 708 }
709 709
710 710
711 static irqreturn_t brcmf_pcie_quick_check_isr_v1(int irq, void *arg) 711 static irqreturn_t brcmf_pcie_quick_check_isr_v1(int irq, void *arg)
712 { 712 {
713 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; 713 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
714 u32 status; 714 u32 status;
715 715
716 status = 0; 716 status = 0;
717 pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTSTATUS, &status); 717 pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
718 if (status) { 718 if (status) {
719 brcmf_pcie_intr_disable(devinfo); 719 brcmf_pcie_intr_disable(devinfo);
720 brcmf_dbg(PCIE, "Enter\n"); 720 brcmf_dbg(PCIE, "Enter\n");
721 return IRQ_WAKE_THREAD; 721 return IRQ_WAKE_THREAD;
722 } 722 }
723 return IRQ_NONE; 723 return IRQ_NONE;
724 } 724 }
725 725
726 726
727 static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg) 727 static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg)
728 { 728 {
729 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; 729 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
730 730
731 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) { 731 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
732 brcmf_pcie_intr_disable(devinfo); 732 brcmf_pcie_intr_disable(devinfo);
733 brcmf_dbg(PCIE, "Enter\n"); 733 brcmf_dbg(PCIE, "Enter\n");
734 return IRQ_WAKE_THREAD; 734 return IRQ_WAKE_THREAD;
735 } 735 }
736 return IRQ_NONE; 736 return IRQ_NONE;
737 } 737 }
738 738
739 739
740 static irqreturn_t brcmf_pcie_isr_thread_v1(int irq, void *arg) 740 static irqreturn_t brcmf_pcie_isr_thread_v1(int irq, void *arg)
741 { 741 {
742 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; 742 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
743 const struct pci_dev *pdev = devinfo->pdev; 743 const struct pci_dev *pdev = devinfo->pdev;
744 u32 status; 744 u32 status;
745 745
746 devinfo->in_irq = true; 746 devinfo->in_irq = true;
747 status = 0; 747 status = 0;
748 pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status); 748 pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
749 brcmf_dbg(PCIE, "Enter %x\n", status); 749 brcmf_dbg(PCIE, "Enter %x\n", status);
750 if (status) { 750 if (status) {
751 pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status); 751 pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
752 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) 752 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
753 brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev); 753 brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev);
754 } 754 }
755 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) 755 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
756 brcmf_pcie_intr_enable(devinfo); 756 brcmf_pcie_intr_enable(devinfo);
757 devinfo->in_irq = false; 757 devinfo->in_irq = false;
758 return IRQ_HANDLED; 758 return IRQ_HANDLED;
759 } 759 }
760 760
761 761
762 static irqreturn_t brcmf_pcie_isr_thread_v2(int irq, void *arg) 762 static irqreturn_t brcmf_pcie_isr_thread_v2(int irq, void *arg)
763 { 763 {
764 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; 764 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
765 u32 status; 765 u32 status;
766 766
767 devinfo->in_irq = true; 767 devinfo->in_irq = true;
768 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT); 768 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
769 brcmf_dbg(PCIE, "Enter %x\n", status); 769 brcmf_dbg(PCIE, "Enter %x\n", status);
770 if (status) { 770 if (status) {
771 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, 771 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
772 status); 772 status);
773 if (status & (BRCMF_PCIE_MB_INT_FN0_0 | 773 if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
774 BRCMF_PCIE_MB_INT_FN0_1)) 774 BRCMF_PCIE_MB_INT_FN0_1))
775 brcmf_pcie_handle_mb_data(devinfo); 775 brcmf_pcie_handle_mb_data(devinfo);
776 if (status & BRCMF_PCIE_MB_INT_D2H_DB) { 776 if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
777 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) 777 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
778 brcmf_proto_msgbuf_rx_trigger( 778 brcmf_proto_msgbuf_rx_trigger(
779 &devinfo->pdev->dev); 779 &devinfo->pdev->dev);
780 } 780 }
781 } 781 }
782 brcmf_pcie_bus_console_read(devinfo); 782 brcmf_pcie_bus_console_read(devinfo);
783 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) 783 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
784 brcmf_pcie_intr_enable(devinfo); 784 brcmf_pcie_intr_enable(devinfo);
785 devinfo->in_irq = false; 785 devinfo->in_irq = false;
786 return IRQ_HANDLED; 786 return IRQ_HANDLED;
787 } 787 }
788 788
789 789
790 static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo) 790 static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
791 { 791 {
792 struct pci_dev *pdev; 792 struct pci_dev *pdev;
793 793
794 pdev = devinfo->pdev; 794 pdev = devinfo->pdev;
795 795
796 brcmf_pcie_intr_disable(devinfo); 796 brcmf_pcie_intr_disable(devinfo);
797 797
798 brcmf_dbg(PCIE, "Enter\n"); 798 brcmf_dbg(PCIE, "Enter\n");
799 /* is it a v1 or v2 implementation */ 799 /* is it a v1 or v2 implementation */
800 devinfo->irq_requested = false; 800 devinfo->irq_requested = false;
801 if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) { 801 if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
802 if (request_threaded_irq(pdev->irq, 802 if (request_threaded_irq(pdev->irq,
803 brcmf_pcie_quick_check_isr_v1, 803 brcmf_pcie_quick_check_isr_v1,
804 brcmf_pcie_isr_thread_v1, 804 brcmf_pcie_isr_thread_v1,
805 IRQF_SHARED, "brcmf_pcie_intr", 805 IRQF_SHARED, "brcmf_pcie_intr",
806 devinfo)) { 806 devinfo)) {
807 brcmf_err("Failed to request IRQ %d\n", pdev->irq); 807 brcmf_err("Failed to request IRQ %d\n", pdev->irq);
808 return -EIO; 808 return -EIO;
809 } 809 }
810 } else { 810 } else {
811 if (request_threaded_irq(pdev->irq, 811 if (request_threaded_irq(pdev->irq,
812 brcmf_pcie_quick_check_isr_v2, 812 brcmf_pcie_quick_check_isr_v2,
813 brcmf_pcie_isr_thread_v2, 813 brcmf_pcie_isr_thread_v2,
814 IRQF_SHARED, "brcmf_pcie_intr", 814 IRQF_SHARED, "brcmf_pcie_intr",
815 devinfo)) { 815 devinfo)) {
816 brcmf_err("Failed to request IRQ %d\n", pdev->irq); 816 brcmf_err("Failed to request IRQ %d\n", pdev->irq);
817 return -EIO; 817 return -EIO;
818 } 818 }
819 } 819 }
820 devinfo->irq_requested = true; 820 devinfo->irq_requested = true;
821 devinfo->irq_allocated = true; 821 devinfo->irq_allocated = true;
822 return 0; 822 return 0;
823 } 823 }
824 824
825 825
826 static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo) 826 static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
827 { 827 {
828 struct pci_dev *pdev; 828 struct pci_dev *pdev;
829 u32 status; 829 u32 status;
830 u32 count; 830 u32 count;
831 831
832 if (!devinfo->irq_allocated) 832 if (!devinfo->irq_allocated)
833 return; 833 return;
834 834
835 pdev = devinfo->pdev; 835 pdev = devinfo->pdev;
836 836
837 brcmf_pcie_intr_disable(devinfo); 837 brcmf_pcie_intr_disable(devinfo);
838 if (!devinfo->irq_requested) 838 if (!devinfo->irq_requested)
839 return; 839 return;
840 devinfo->irq_requested = false; 840 devinfo->irq_requested = false;
841 free_irq(pdev->irq, devinfo); 841 free_irq(pdev->irq, devinfo);
842 842
843 msleep(50); 843 msleep(50);
844 count = 0; 844 count = 0;
845 while ((devinfo->in_irq) && (count < 20)) { 845 while ((devinfo->in_irq) && (count < 20)) {
846 msleep(50); 846 msleep(50);
847 count++; 847 count++;
848 } 848 }
849 if (devinfo->in_irq) 849 if (devinfo->in_irq)
850 brcmf_err("Still in IRQ (processing) !!!\n"); 850 brcmf_err("Still in IRQ (processing) !!!\n");
851 851
852 if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) { 852 if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
853 status = 0; 853 status = 0;
854 pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status); 854 pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
855 pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status); 855 pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
856 } else { 856 } else {
857 status = brcmf_pcie_read_reg32(devinfo, 857 status = brcmf_pcie_read_reg32(devinfo,
858 BRCMF_PCIE_PCIE2REG_MAILBOXINT); 858 BRCMF_PCIE_PCIE2REG_MAILBOXINT);
859 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, 859 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
860 status); 860 status);
861 } 861 }
862 devinfo->irq_allocated = false; 862 devinfo->irq_allocated = false;
863 } 863 }
864 864
865 865
866 static int brcmf_pcie_ring_mb_write_rptr(void *ctx) 866 static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
867 { 867 {
868 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; 868 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
869 struct brcmf_pciedev_info *devinfo = ring->devinfo; 869 struct brcmf_pciedev_info *devinfo = ring->devinfo;
870 struct brcmf_commonring *commonring = &ring->commonring; 870 struct brcmf_commonring *commonring = &ring->commonring;
871 871
872 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) 872 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
873 return -EIO; 873 return -EIO;
874 874
875 brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr, 875 brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
876 commonring->w_ptr, ring->id); 876 commonring->w_ptr, ring->id);
877 877
878 brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr); 878 brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr);
879 879
880 return 0; 880 return 0;
881 } 881 }
882 882
883 883
884 static int brcmf_pcie_ring_mb_write_wptr(void *ctx) 884 static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
885 { 885 {
886 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; 886 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
887 struct brcmf_pciedev_info *devinfo = ring->devinfo; 887 struct brcmf_pciedev_info *devinfo = ring->devinfo;
888 struct brcmf_commonring *commonring = &ring->commonring; 888 struct brcmf_commonring *commonring = &ring->commonring;
889 889
890 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) 890 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
891 return -EIO; 891 return -EIO;
892 892
893 brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr, 893 brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
894 commonring->r_ptr, ring->id); 894 commonring->r_ptr, ring->id);
895 895
896 brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr); 896 brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr);
897 897
898 return 0; 898 return 0;
899 } 899 }
900 900
901 901
902 static int brcmf_pcie_ring_mb_ring_bell(void *ctx) 902 static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
903 { 903 {
904 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; 904 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
905 struct brcmf_pciedev_info *devinfo = ring->devinfo; 905 struct brcmf_pciedev_info *devinfo = ring->devinfo;
906 906
907 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) 907 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
908 return -EIO; 908 return -EIO;
909 909
910 devinfo->ringbell(devinfo); 910 devinfo->ringbell(devinfo);
911 911
912 return 0; 912 return 0;
913 } 913 }
914 914
915 915
916 static int brcmf_pcie_ring_mb_update_rptr(void *ctx) 916 static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
917 { 917 {
918 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; 918 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
919 struct brcmf_pciedev_info *devinfo = ring->devinfo; 919 struct brcmf_pciedev_info *devinfo = ring->devinfo;
920 struct brcmf_commonring *commonring = &ring->commonring; 920 struct brcmf_commonring *commonring = &ring->commonring;
921 921
922 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) 922 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
923 return -EIO; 923 return -EIO;
924 924
925 commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr); 925 commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr);
926 926
927 brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr, 927 brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
928 commonring->w_ptr, ring->id); 928 commonring->w_ptr, ring->id);
929 929
930 return 0; 930 return 0;
931 } 931 }
932 932
933 933
934 static int brcmf_pcie_ring_mb_update_wptr(void *ctx) 934 static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
935 { 935 {
936 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; 936 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
937 struct brcmf_pciedev_info *devinfo = ring->devinfo; 937 struct brcmf_pciedev_info *devinfo = ring->devinfo;
938 struct brcmf_commonring *commonring = &ring->commonring; 938 struct brcmf_commonring *commonring = &ring->commonring;
939 939
940 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) 940 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
941 return -EIO; 941 return -EIO;
942 942
943 commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr); 943 commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr);
944 944
945 brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr, 945 brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
946 commonring->r_ptr, ring->id); 946 commonring->r_ptr, ring->id);
947 947
948 return 0; 948 return 0;
949 } 949 }
950 950
951 951
952 static void * 952 static void *
953 brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo, 953 brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
954 u32 size, u32 tcm_dma_phys_addr, 954 u32 size, u32 tcm_dma_phys_addr,
955 dma_addr_t *dma_handle) 955 dma_addr_t *dma_handle)
956 { 956 {
957 void *ring; 957 void *ring;
958 long long address; 958 long long address;
959 959
960 ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle, 960 ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
961 GFP_KERNEL); 961 GFP_KERNEL);
962 if (!ring) 962 if (!ring)
963 return NULL; 963 return NULL;
964 964
965 address = (long long)(long)*dma_handle; 965 address = (long long)(long)*dma_handle;
966 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr, 966 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
967 address & 0xffffffff); 967 address & 0xffffffff);
968 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32); 968 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
969 969
970 memset(ring, 0, size); 970 memset(ring, 0, size);
971 971
972 return (ring); 972 return (ring);
973 } 973 }
974 974
975 975
976 static struct brcmf_pcie_ringbuf * 976 static struct brcmf_pcie_ringbuf *
977 brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id, 977 brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
978 u32 tcm_ring_phys_addr) 978 u32 tcm_ring_phys_addr)
979 { 979 {
980 void *dma_buf; 980 void *dma_buf;
981 dma_addr_t dma_handle; 981 dma_addr_t dma_handle;
982 struct brcmf_pcie_ringbuf *ring; 982 struct brcmf_pcie_ringbuf *ring;
983 u32 size; 983 u32 size;
984 u32 addr; 984 u32 addr;
985 985
986 size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id]; 986 size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id];
987 dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size, 987 dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
988 tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET, 988 tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
989 &dma_handle); 989 &dma_handle);
990 if (!dma_buf) 990 if (!dma_buf)
991 return NULL; 991 return NULL;
992 992
993 addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET; 993 addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
994 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]); 994 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
995 addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET; 995 addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
996 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]); 996 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]);
997 997
998 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 998 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
999 if (!ring) { 999 if (!ring) {
1000 dma_free_coherent(&devinfo->pdev->dev, size, dma_buf, 1000 dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
1001 dma_handle); 1001 dma_handle);
1002 return NULL; 1002 return NULL;
1003 } 1003 }
1004 brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id], 1004 brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
1005 brcmf_ring_itemsize[ring_id], dma_buf); 1005 brcmf_ring_itemsize[ring_id], dma_buf);
1006 ring->dma_handle = dma_handle; 1006 ring->dma_handle = dma_handle;
1007 ring->devinfo = devinfo; 1007 ring->devinfo = devinfo;
1008 brcmf_commonring_register_cb(&ring->commonring, 1008 brcmf_commonring_register_cb(&ring->commonring,
1009 brcmf_pcie_ring_mb_ring_bell, 1009 brcmf_pcie_ring_mb_ring_bell,
1010 brcmf_pcie_ring_mb_update_rptr, 1010 brcmf_pcie_ring_mb_update_rptr,
1011 brcmf_pcie_ring_mb_update_wptr, 1011 brcmf_pcie_ring_mb_update_wptr,
1012 brcmf_pcie_ring_mb_write_rptr, 1012 brcmf_pcie_ring_mb_write_rptr,
1013 brcmf_pcie_ring_mb_write_wptr, ring); 1013 brcmf_pcie_ring_mb_write_wptr, ring);
1014 1014
1015 return (ring); 1015 return (ring);
1016 } 1016 }
1017 1017
1018 1018
1019 static void brcmf_pcie_release_ringbuffer(struct device *dev, 1019 static void brcmf_pcie_release_ringbuffer(struct device *dev,
1020 struct brcmf_pcie_ringbuf *ring) 1020 struct brcmf_pcie_ringbuf *ring)
1021 { 1021 {
1022 void *dma_buf; 1022 void *dma_buf;
1023 u32 size; 1023 u32 size;
1024 1024
1025 if (!ring) 1025 if (!ring)
1026 return; 1026 return;
1027 1027
1028 dma_buf = ring->commonring.buf_addr; 1028 dma_buf = ring->commonring.buf_addr;
1029 if (dma_buf) { 1029 if (dma_buf) {
1030 size = ring->commonring.depth * ring->commonring.item_len; 1030 size = ring->commonring.depth * ring->commonring.item_len;
1031 dma_free_coherent(dev, size, dma_buf, ring->dma_handle); 1031 dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
1032 } 1032 }
1033 kfree(ring); 1033 kfree(ring);
1034 } 1034 }
1035 1035
1036 1036
1037 static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo) 1037 static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
1038 { 1038 {
1039 u32 i; 1039 u32 i;
1040 1040
1041 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) { 1041 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1042 brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev, 1042 brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
1043 devinfo->shared.commonrings[i]); 1043 devinfo->shared.commonrings[i]);
1044 devinfo->shared.commonrings[i] = NULL; 1044 devinfo->shared.commonrings[i] = NULL;
1045 } 1045 }
1046 kfree(devinfo->shared.flowrings); 1046 kfree(devinfo->shared.flowrings);
1047 devinfo->shared.flowrings = NULL; 1047 devinfo->shared.flowrings = NULL;
1048 } 1048 }
1049 1049
1050 1050
1051 static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) 1051 static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
1052 { 1052 {
1053 struct brcmf_pcie_ringbuf *ring; 1053 struct brcmf_pcie_ringbuf *ring;
1054 struct brcmf_pcie_ringbuf *rings; 1054 struct brcmf_pcie_ringbuf *rings;
1055 u32 ring_addr; 1055 u32 ring_addr;
1056 u32 d2h_w_idx_ptr; 1056 u32 d2h_w_idx_ptr;
1057 u32 d2h_r_idx_ptr; 1057 u32 d2h_r_idx_ptr;
1058 u32 h2d_w_idx_ptr; 1058 u32 h2d_w_idx_ptr;
1059 u32 h2d_r_idx_ptr; 1059 u32 h2d_r_idx_ptr;
1060 u32 addr; 1060 u32 addr;
1061 u32 ring_mem_ptr; 1061 u32 ring_mem_ptr;
1062 u32 i; 1062 u32 i;
1063 u16 max_sub_queues; 1063 u16 max_sub_queues;
1064 1064
1065 ring_addr = devinfo->shared.ring_info_addr; 1065 ring_addr = devinfo->shared.ring_info_addr;
1066 brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr); 1066 brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
1067 1067
1068 addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET; 1068 addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
1069 d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); 1069 d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
1070 addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET; 1070 addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
1071 d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); 1071 d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
1072 addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET; 1072 addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
1073 h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); 1073 h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
1074 addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET; 1074 addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
1075 h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); 1075 h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
1076 1076
1077 addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET; 1077 addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
1078 ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr); 1078 ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
1079 1079
1080 for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) { 1080 for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
1081 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr); 1081 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1082 if (!ring) 1082 if (!ring)
1083 goto fail; 1083 goto fail;
1084 ring->w_idx_addr = h2d_w_idx_ptr; 1084 ring->w_idx_addr = h2d_w_idx_ptr;
1085 ring->r_idx_addr = h2d_r_idx_ptr; 1085 ring->r_idx_addr = h2d_r_idx_ptr;
1086 ring->id = i; 1086 ring->id = i;
1087 devinfo->shared.commonrings[i] = ring; 1087 devinfo->shared.commonrings[i] = ring;
1088 1088
1089 h2d_w_idx_ptr += sizeof(u32); 1089 h2d_w_idx_ptr += sizeof(u32);
1090 h2d_r_idx_ptr += sizeof(u32); 1090 h2d_r_idx_ptr += sizeof(u32);
1091 ring_mem_ptr += BRCMF_RING_MEM_SZ; 1091 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1092 } 1092 }
1093 1093
1094 for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS; 1094 for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
1095 i < BRCMF_NROF_COMMON_MSGRINGS; i++) { 1095 i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1096 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr); 1096 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1097 if (!ring) 1097 if (!ring)
1098 goto fail; 1098 goto fail;
1099 ring->w_idx_addr = d2h_w_idx_ptr; 1099 ring->w_idx_addr = d2h_w_idx_ptr;
1100 ring->r_idx_addr = d2h_r_idx_ptr; 1100 ring->r_idx_addr = d2h_r_idx_ptr;
1101 ring->id = i; 1101 ring->id = i;
1102 devinfo->shared.commonrings[i] = ring; 1102 devinfo->shared.commonrings[i] = ring;
1103 1103
1104 d2h_w_idx_ptr += sizeof(u32); 1104 d2h_w_idx_ptr += sizeof(u32);
1105 d2h_r_idx_ptr += sizeof(u32); 1105 d2h_r_idx_ptr += sizeof(u32);
1106 ring_mem_ptr += BRCMF_RING_MEM_SZ; 1106 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1107 } 1107 }
1108 1108
1109 addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES; 1109 addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
1110 max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr); 1110 max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
1111 devinfo->shared.nrof_flowrings = 1111 devinfo->shared.nrof_flowrings =
1112 max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS; 1112 max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
1113 rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring), 1113 rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
1114 GFP_KERNEL); 1114 GFP_KERNEL);
1115 if (!rings) 1115 if (!rings)
1116 goto fail; 1116 goto fail;
1117 1117
1118 brcmf_dbg(PCIE, "Nr of flowrings is %d\n", 1118 brcmf_dbg(PCIE, "Nr of flowrings is %d\n",
1119 devinfo->shared.nrof_flowrings); 1119 devinfo->shared.nrof_flowrings);
1120 1120
1121 for (i = 0; i < devinfo->shared.nrof_flowrings; i++) { 1121 for (i = 0; i < devinfo->shared.nrof_flowrings; i++) {
1122 ring = &rings[i]; 1122 ring = &rings[i];
1123 ring->devinfo = devinfo; 1123 ring->devinfo = devinfo;
1124 ring->id = i + BRCMF_NROF_COMMON_MSGRINGS; 1124 ring->id = i + BRCMF_NROF_COMMON_MSGRINGS;
1125 brcmf_commonring_register_cb(&ring->commonring, 1125 brcmf_commonring_register_cb(&ring->commonring,
1126 brcmf_pcie_ring_mb_ring_bell, 1126 brcmf_pcie_ring_mb_ring_bell,
1127 brcmf_pcie_ring_mb_update_rptr, 1127 brcmf_pcie_ring_mb_update_rptr,
1128 brcmf_pcie_ring_mb_update_wptr, 1128 brcmf_pcie_ring_mb_update_wptr,
1129 brcmf_pcie_ring_mb_write_rptr, 1129 brcmf_pcie_ring_mb_write_rptr,
1130 brcmf_pcie_ring_mb_write_wptr, 1130 brcmf_pcie_ring_mb_write_wptr,
1131 ring); 1131 ring);
1132 ring->w_idx_addr = h2d_w_idx_ptr; 1132 ring->w_idx_addr = h2d_w_idx_ptr;
1133 ring->r_idx_addr = h2d_r_idx_ptr; 1133 ring->r_idx_addr = h2d_r_idx_ptr;
1134 h2d_w_idx_ptr += sizeof(u32); 1134 h2d_w_idx_ptr += sizeof(u32);
1135 h2d_r_idx_ptr += sizeof(u32); 1135 h2d_r_idx_ptr += sizeof(u32);
1136 } 1136 }
1137 devinfo->shared.flowrings = rings; 1137 devinfo->shared.flowrings = rings;
1138 1138
1139 return 0; 1139 return 0;
1140 1140
1141 fail: 1141 fail:
1142 brcmf_err("Allocating commonring buffers failed\n"); 1142 brcmf_err("Allocating commonring buffers failed\n");
1143 brcmf_pcie_release_ringbuffers(devinfo); 1143 brcmf_pcie_release_ringbuffers(devinfo);
1144 return -ENOMEM; 1144 return -ENOMEM;
1145 } 1145 }
1146 1146
1147 1147
1148 static void 1148 static void
1149 brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo) 1149 brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1150 { 1150 {
1151 if (devinfo->shared.scratch) 1151 if (devinfo->shared.scratch)
1152 dma_free_coherent(&devinfo->pdev->dev, 1152 dma_free_coherent(&devinfo->pdev->dev,
1153 BRCMF_DMA_D2H_SCRATCH_BUF_LEN, 1153 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1154 devinfo->shared.scratch, 1154 devinfo->shared.scratch,
1155 devinfo->shared.scratch_dmahandle); 1155 devinfo->shared.scratch_dmahandle);
1156 if (devinfo->shared.ringupd) 1156 if (devinfo->shared.ringupd)
1157 dma_free_coherent(&devinfo->pdev->dev, 1157 dma_free_coherent(&devinfo->pdev->dev,
1158 BRCMF_DMA_D2H_RINGUPD_BUF_LEN, 1158 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1159 devinfo->shared.ringupd, 1159 devinfo->shared.ringupd,
1160 devinfo->shared.ringupd_dmahandle); 1160 devinfo->shared.ringupd_dmahandle);
1161 } 1161 }
1162 1162
1163 static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) 1163 static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1164 { 1164 {
1165 long long address; 1165 long long address;
1166 u32 addr; 1166 u32 addr;
1167 1167
1168 devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev, 1168 devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
1169 BRCMF_DMA_D2H_SCRATCH_BUF_LEN, 1169 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1170 &devinfo->shared.scratch_dmahandle, GFP_KERNEL); 1170 &devinfo->shared.scratch_dmahandle, GFP_KERNEL);
1171 if (!devinfo->shared.scratch) 1171 if (!devinfo->shared.scratch)
1172 goto fail; 1172 goto fail;
1173 1173
1174 memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); 1174 memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1175 brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); 1175 brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1176 1176
1177 addr = devinfo->shared.tcm_base_address + 1177 addr = devinfo->shared.tcm_base_address +
1178 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET; 1178 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
1179 address = (long long)(long)devinfo->shared.scratch_dmahandle; 1179 address = (long long)(long)devinfo->shared.scratch_dmahandle;
1180 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); 1180 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1181 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); 1181 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1182 addr = devinfo->shared.tcm_base_address + 1182 addr = devinfo->shared.tcm_base_address +
1183 BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET; 1183 BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
1184 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); 1184 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1185 1185
1186 devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev, 1186 devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
1187 BRCMF_DMA_D2H_RINGUPD_BUF_LEN, 1187 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1188 &devinfo->shared.ringupd_dmahandle, GFP_KERNEL); 1188 &devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
1189 if (!devinfo->shared.ringupd) 1189 if (!devinfo->shared.ringupd)
1190 goto fail; 1190 goto fail;
1191 1191
1192 memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); 1192 memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
1193 brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); 1193 brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
1194 1194
1195 addr = devinfo->shared.tcm_base_address + 1195 addr = devinfo->shared.tcm_base_address +
1196 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET; 1196 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
1197 address = (long long)(long)devinfo->shared.ringupd_dmahandle; 1197 address = (long long)(long)devinfo->shared.ringupd_dmahandle;
1198 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); 1198 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1199 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); 1199 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1200 addr = devinfo->shared.tcm_base_address + 1200 addr = devinfo->shared.tcm_base_address +
1201 BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET; 1201 BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
1202 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); 1202 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
1203 return 0; 1203 return 0;
1204 1204
1205 fail: 1205 fail:
1206 brcmf_err("Allocating scratch buffers failed\n"); 1206 brcmf_err("Allocating scratch buffers failed\n");
1207 brcmf_pcie_release_scratchbuffers(devinfo); 1207 brcmf_pcie_release_scratchbuffers(devinfo);
1208 return -ENOMEM; 1208 return -ENOMEM;
1209 } 1209 }
1210 1210
1211 1211
1212 static void brcmf_pcie_down(struct device *dev) 1212 static void brcmf_pcie_down(struct device *dev)
1213 { 1213 {
1214 } 1214 }
1215 1215
1216 1216
1217 static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb) 1217 static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
1218 { 1218 {
1219 return 0; 1219 return 0;
1220 } 1220 }
1221 1221
1222 1222
1223 static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg, 1223 static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
1224 uint len) 1224 uint len)
1225 { 1225 {
1226 return 0; 1226 return 0;
1227 } 1227 }
1228 1228
1229 1229
1230 static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg, 1230 static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
1231 uint len) 1231 uint len)
1232 { 1232 {
1233 return 0; 1233 return 0;
1234 } 1234 }
1235 1235
1236 1236
1237 static void brcmf_pcie_wowl_config(struct device *dev, bool enabled) 1237 static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
1238 { 1238 {
1239 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1239 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1240 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; 1240 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1241 struct brcmf_pciedev_info *devinfo = buspub->devinfo; 1241 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1242 1242
1243 brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled); 1243 brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
1244 devinfo->wowl_enabled = enabled; 1244 devinfo->wowl_enabled = enabled;
1245 if (enabled) 1245 if (enabled)
1246 device_set_wakeup_enable(&devinfo->pdev->dev, true); 1246 device_set_wakeup_enable(&devinfo->pdev->dev, true);
1247 else 1247 else
1248 device_set_wakeup_enable(&devinfo->pdev->dev, false); 1248 device_set_wakeup_enable(&devinfo->pdev->dev, false);
1249 } 1249 }
1250 1250
1251 1251
1252 static struct brcmf_bus_ops brcmf_pcie_bus_ops = { 1252 static struct brcmf_bus_ops brcmf_pcie_bus_ops = {
1253 .txdata = brcmf_pcie_tx, 1253 .txdata = brcmf_pcie_tx,
1254 .stop = brcmf_pcie_down, 1254 .stop = brcmf_pcie_down,
1255 .txctl = brcmf_pcie_tx_ctlpkt, 1255 .txctl = brcmf_pcie_tx_ctlpkt,
1256 .rxctl = brcmf_pcie_rx_ctlpkt, 1256 .rxctl = brcmf_pcie_rx_ctlpkt,
1257 .wowl_config = brcmf_pcie_wowl_config, 1257 .wowl_config = brcmf_pcie_wowl_config,
1258 }; 1258 };
1259 1259
1260 1260
1261 static int 1261 static int
1262 brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, 1262 brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
1263 u32 sharedram_addr) 1263 u32 sharedram_addr)
1264 { 1264 {
1265 struct brcmf_pcie_shared_info *shared; 1265 struct brcmf_pcie_shared_info *shared;
1266 u32 addr; 1266 u32 addr;
1267 u32 version; 1267 u32 version;
1268 1268
1269 shared = &devinfo->shared; 1269 shared = &devinfo->shared;
1270 shared->tcm_base_address = sharedram_addr; 1270 shared->tcm_base_address = sharedram_addr;
1271 1271
1272 shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr); 1272 shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
1273 version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK; 1273 version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK;
1274 brcmf_dbg(PCIE, "PCIe protocol version %d\n", version); 1274 brcmf_dbg(PCIE, "PCIe protocol version %d\n", version);
1275 if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) || 1275 if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
1276 (version < BRCMF_PCIE_MIN_SHARED_VERSION)) { 1276 (version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
1277 brcmf_err("Unsupported PCIE version %d\n", version); 1277 brcmf_err("Unsupported PCIE version %d\n", version);
1278 return -EINVAL; 1278 return -EINVAL;
1279 } 1279 }
1280 if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) { 1280 if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) {
1281 brcmf_err("Unsupported legacy TX mode 0x%x\n", 1281 brcmf_err("Unsupported legacy TX mode 0x%x\n",
1282 shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT); 1282 shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT);
1283 return -EINVAL; 1283 return -EINVAL;
1284 } 1284 }
1285 1285
1286 addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET; 1286 addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
1287 shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr); 1287 shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
1288 if (shared->max_rxbufpost == 0) 1288 if (shared->max_rxbufpost == 0)
1289 shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST; 1289 shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
1290 1290
1291 addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET; 1291 addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
1292 shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr); 1292 shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
1293 1293
1294 addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET; 1294 addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
1295 shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr); 1295 shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1296 1296
1297 addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET; 1297 addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
1298 shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr); 1298 shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1299 1299
1300 addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET; 1300 addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
1301 shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr); 1301 shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1302 1302
1303 brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n", 1303 brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
1304 shared->max_rxbufpost, shared->rx_dataoffset); 1304 shared->max_rxbufpost, shared->rx_dataoffset);
1305 1305
1306 brcmf_pcie_bus_console_init(devinfo); 1306 brcmf_pcie_bus_console_init(devinfo);
1307 1307
1308 return 0; 1308 return 0;
1309 } 1309 }
1310 1310
1311 1311
1312 static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo) 1312 static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
1313 { 1313 {
1314 char *fw_name; 1314 char *fw_name;
1315 char *nvram_name; 1315 char *nvram_name;
1316 uint fw_len, nv_len; 1316 uint fw_len, nv_len;
1317 char end; 1317 char end;
1318 1318
1319 brcmf_dbg(PCIE, "Enter, chip 0x%04x chiprev %d\n", devinfo->ci->chip, 1319 brcmf_dbg(PCIE, "Enter, chip 0x%04x chiprev %d\n", devinfo->ci->chip,
1320 devinfo->ci->chiprev); 1320 devinfo->ci->chiprev);
1321 1321
1322 switch (devinfo->ci->chip) { 1322 switch (devinfo->ci->chip) {
1323 case BRCM_CC_43602_CHIP_ID: 1323 case BRCM_CC_43602_CHIP_ID:
1324 fw_name = BRCMF_PCIE_43602_FW_NAME; 1324 fw_name = BRCMF_PCIE_43602_FW_NAME;
1325 nvram_name = BRCMF_PCIE_43602_NVRAM_NAME; 1325 nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
1326 break; 1326 break;
1327 case BRCM_CC_4354_CHIP_ID: 1327 case BRCM_CC_4354_CHIP_ID:
1328 fw_name = BRCMF_PCIE_4354_FW_NAME; 1328 fw_name = BRCMF_PCIE_4354_FW_NAME;
1329 nvram_name = BRCMF_PCIE_4354_NVRAM_NAME; 1329 nvram_name = BRCMF_PCIE_4354_NVRAM_NAME;
1330 break; 1330 break;
1331 case BRCM_CC_4356_CHIP_ID: 1331 case BRCM_CC_4356_CHIP_ID:
1332 fw_name = BRCMF_PCIE_4356_FW_NAME; 1332 fw_name = BRCMF_PCIE_4356_FW_NAME;
1333 nvram_name = BRCMF_PCIE_4356_NVRAM_NAME; 1333 nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
1334 break; 1334 break;
1335 case BRCM_CC_43567_CHIP_ID: 1335 case BRCM_CC_43567_CHIP_ID:
1336 case BRCM_CC_43569_CHIP_ID: 1336 case BRCM_CC_43569_CHIP_ID:
1337 case BRCM_CC_43570_CHIP_ID: 1337 case BRCM_CC_43570_CHIP_ID:
1338 fw_name = BRCMF_PCIE_43570_FW_NAME; 1338 fw_name = BRCMF_PCIE_43570_FW_NAME;
1339 nvram_name = BRCMF_PCIE_43570_NVRAM_NAME; 1339 nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
1340 break; 1340 break;
1341 default: 1341 default:
1342 brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip); 1342 brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
1343 return -ENODEV; 1343 return -ENODEV;
1344 } 1344 }
1345 1345
1346 fw_len = sizeof(devinfo->fw_name) - 1; 1346 fw_len = sizeof(devinfo->fw_name) - 1;
1347 nv_len = sizeof(devinfo->nvram_name) - 1; 1347 nv_len = sizeof(devinfo->nvram_name) - 1;
1348 /* check if firmware path is provided by module parameter */ 1348 /* check if firmware path is provided by module parameter */
1349 if (brcmf_firmware_path[0] != '\0') { 1349 if (brcmf_firmware_path[0] != '\0') {
1350 strncpy(devinfo->fw_name, brcmf_firmware_path, fw_len); 1350 strncpy(devinfo->fw_name, brcmf_firmware_path, fw_len);
1351 strncpy(devinfo->nvram_name, brcmf_firmware_path, nv_len); 1351 strncpy(devinfo->nvram_name, brcmf_firmware_path, nv_len);
1352 fw_len -= strlen(devinfo->fw_name); 1352 fw_len -= strlen(devinfo->fw_name);
1353 nv_len -= strlen(devinfo->nvram_name); 1353 nv_len -= strlen(devinfo->nvram_name);
1354 1354
1355 end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1]; 1355 end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
1356 if (end != '/') { 1356 if (end != '/') {
1357 strncat(devinfo->fw_name, "/", fw_len); 1357 strncat(devinfo->fw_name, "/", fw_len);
1358 strncat(devinfo->nvram_name, "/", nv_len); 1358 strncat(devinfo->nvram_name, "/", nv_len);
1359 fw_len--; 1359 fw_len--;
1360 nv_len--; 1360 nv_len--;
1361 } 1361 }
1362 } 1362 }
1363 strncat(devinfo->fw_name, fw_name, fw_len); 1363 strncat(devinfo->fw_name, fw_name, fw_len);
1364 strncat(devinfo->nvram_name, nvram_name, nv_len); 1364 strncat(devinfo->nvram_name, nvram_name, nv_len);
1365 1365
1366 return 0; 1366 return 0;
1367 } 1367 }
1368 1368
1369 1369
1370 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, 1370 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
1371 const struct firmware *fw, void *nvram, 1371 const struct firmware *fw, void *nvram,
1372 u32 nvram_len) 1372 u32 nvram_len)
1373 { 1373 {
1374 u32 sharedram_addr; 1374 u32 sharedram_addr;
1375 u32 sharedram_addr_written; 1375 u32 sharedram_addr_written;
1376 u32 loop_counter; 1376 u32 loop_counter;
1377 int err; 1377 int err;
1378 u32 address; 1378 u32 address;
1379 u32 resetintr; 1379 u32 resetintr;
1380 1380
1381 devinfo->ringbell = brcmf_pcie_ringbell_v2; 1381 devinfo->ringbell = brcmf_pcie_ringbell_v2;
1382 devinfo->generic_corerev = BRCMF_PCIE_GENREV2; 1382 devinfo->generic_corerev = BRCMF_PCIE_GENREV2;
1383 1383
1384 brcmf_dbg(PCIE, "Halt ARM.\n"); 1384 brcmf_dbg(PCIE, "Halt ARM.\n");
1385 err = brcmf_pcie_enter_download_state(devinfo); 1385 err = brcmf_pcie_enter_download_state(devinfo);
1386 if (err) 1386 if (err)
1387 return err; 1387 return err;
1388 1388
1389 brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name); 1389 brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
1390 brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase, 1390 brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
1391 (void *)fw->data, fw->size); 1391 (void *)fw->data, fw->size);
1392 1392
1393 resetintr = get_unaligned_le32(fw->data); 1393 resetintr = get_unaligned_le32(fw->data);
1394 release_firmware(fw); 1394 release_firmware(fw);
1395 1395
1396 /* reset last 4 bytes of RAM address. to be used for shared 1396 /* reset last 4 bytes of RAM address. to be used for shared
1397 * area. This identifies when FW is running 1397 * area. This identifies when FW is running
1398 */ 1398 */
1399 brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0); 1399 brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
1400 1400
1401 if (nvram) { 1401 if (nvram) {
1402 brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name); 1402 brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
1403 address = devinfo->ci->rambase + devinfo->ci->ramsize - 1403 address = devinfo->ci->rambase + devinfo->ci->ramsize -
1404 nvram_len; 1404 nvram_len;
1405 brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len); 1405 brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
1406 brcmf_fw_nvram_free(nvram); 1406 brcmf_fw_nvram_free(nvram);
1407 } else { 1407 } else {
1408 brcmf_dbg(PCIE, "No matching NVRAM file found %s\n", 1408 brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
1409 devinfo->nvram_name); 1409 devinfo->nvram_name);
1410 } 1410 }
1411 1411
1412 sharedram_addr_written = brcmf_pcie_read_ram32(devinfo, 1412 sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
1413 devinfo->ci->ramsize - 1413 devinfo->ci->ramsize -
1414 4); 1414 4);
1415 brcmf_dbg(PCIE, "Bring ARM in running state\n"); 1415 brcmf_dbg(PCIE, "Bring ARM in running state\n");
1416 err = brcmf_pcie_exit_download_state(devinfo, resetintr); 1416 err = brcmf_pcie_exit_download_state(devinfo, resetintr);
1417 if (err) 1417 if (err)
1418 return err; 1418 return err;
1419 1419
1420 brcmf_dbg(PCIE, "Wait for FW init\n"); 1420 brcmf_dbg(PCIE, "Wait for FW init\n");
1421 sharedram_addr = sharedram_addr_written; 1421 sharedram_addr = sharedram_addr_written;
1422 loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50; 1422 loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
1423 while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) { 1423 while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
1424 msleep(50); 1424 msleep(50);
1425 sharedram_addr = brcmf_pcie_read_ram32(devinfo, 1425 sharedram_addr = brcmf_pcie_read_ram32(devinfo,
1426 devinfo->ci->ramsize - 1426 devinfo->ci->ramsize -
1427 4); 1427 4);
1428 loop_counter--; 1428 loop_counter--;
1429 } 1429 }
1430 if (sharedram_addr == sharedram_addr_written) { 1430 if (sharedram_addr == sharedram_addr_written) {
1431 brcmf_err("FW failed to initialize\n"); 1431 brcmf_err("FW failed to initialize\n");
1432 return -ENODEV; 1432 return -ENODEV;
1433 } 1433 }
1434 brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr); 1434 brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
1435 1435
1436 return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr)); 1436 return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
1437 } 1437 }
1438 1438
1439 1439
1440 static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo) 1440 static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
1441 { 1441 {
1442 struct pci_dev *pdev; 1442 struct pci_dev *pdev;
1443 int err; 1443 int err;
1444 phys_addr_t bar0_addr, bar1_addr; 1444 phys_addr_t bar0_addr, bar1_addr;
1445 ulong bar1_size; 1445 ulong bar1_size;
1446 1446
1447 pdev = devinfo->pdev; 1447 pdev = devinfo->pdev;
1448 1448
1449 err = pci_enable_device(pdev); 1449 err = pci_enable_device(pdev);
1450 if (err) { 1450 if (err) {
1451 brcmf_err("pci_enable_device failed err=%d\n", err); 1451 brcmf_err("pci_enable_device failed err=%d\n", err);
1452 return err; 1452 return err;
1453 } 1453 }
1454 1454
1455 pci_set_master(pdev); 1455 pci_set_master(pdev);
1456 1456
1457 /* Bar-0 mapped address */ 1457 /* Bar-0 mapped address */
1458 bar0_addr = pci_resource_start(pdev, 0); 1458 bar0_addr = pci_resource_start(pdev, 0);
1459 /* Bar-1 mapped address */ 1459 /* Bar-1 mapped address */
1460 bar1_addr = pci_resource_start(pdev, 2); 1460 bar1_addr = pci_resource_start(pdev, 2);
1461 /* read Bar-1 mapped memory range */ 1461 /* read Bar-1 mapped memory range */
1462 bar1_size = pci_resource_len(pdev, 2); 1462 bar1_size = pci_resource_len(pdev, 2);
1463 if ((bar1_size == 0) || (bar1_addr == 0)) { 1463 if ((bar1_size == 0) || (bar1_addr == 0)) {
1464 brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n", 1464 brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
1465 bar1_size, (unsigned long long)bar1_addr); 1465 bar1_size, (unsigned long long)bar1_addr);
1466 return -EINVAL; 1466 return -EINVAL;
1467 } 1467 }
1468 1468
1469 devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE); 1469 devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
1470 devinfo->tcm = ioremap_nocache(bar1_addr, BRCMF_PCIE_TCM_MAP_SIZE); 1470 devinfo->tcm = ioremap_nocache(bar1_addr, BRCMF_PCIE_TCM_MAP_SIZE);
1471 devinfo->tcm_size = BRCMF_PCIE_TCM_MAP_SIZE; 1471 devinfo->tcm_size = BRCMF_PCIE_TCM_MAP_SIZE;
1472 1472
1473 if (!devinfo->regs || !devinfo->tcm) { 1473 if (!devinfo->regs || !devinfo->tcm) {
1474 brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs, 1474 brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs,
1475 devinfo->tcm); 1475 devinfo->tcm);
1476 return -EINVAL; 1476 return -EINVAL;
1477 } 1477 }
1478 brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n", 1478 brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
1479 devinfo->regs, (unsigned long long)bar0_addr); 1479 devinfo->regs, (unsigned long long)bar0_addr);
1480 brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx\n", 1480 brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx\n",
1481 devinfo->tcm, (unsigned long long)bar1_addr); 1481 devinfo->tcm, (unsigned long long)bar1_addr);
1482 1482
1483 return 0; 1483 return 0;
1484 } 1484 }
1485 1485
1486 1486
1487 static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo) 1487 static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
1488 { 1488 {
1489 if (devinfo->tcm) 1489 if (devinfo->tcm)
1490 iounmap(devinfo->tcm); 1490 iounmap(devinfo->tcm);
1491 if (devinfo->regs) 1491 if (devinfo->regs)
1492 iounmap(devinfo->regs); 1492 iounmap(devinfo->regs);
1493 1493
1494 pci_disable_device(devinfo->pdev); 1494 pci_disable_device(devinfo->pdev);
1495 } 1495 }
1496 1496
1497 1497
1498 static int brcmf_pcie_attach_bus(struct device *dev) 1498 static int brcmf_pcie_attach_bus(struct device *dev)
1499 { 1499 {
1500 int ret; 1500 int ret;
1501 1501
1502 /* Attach to the common driver interface */ 1502 /* Attach to the common driver interface */
1503 ret = brcmf_attach(dev); 1503 ret = brcmf_attach(dev);
1504 if (ret) { 1504 if (ret) {
1505 brcmf_err("brcmf_attach failed\n"); 1505 brcmf_err("brcmf_attach failed\n");
1506 } else { 1506 } else {
1507 ret = brcmf_bus_start(dev); 1507 ret = brcmf_bus_start(dev);
1508 if (ret) 1508 if (ret)
1509 brcmf_err("dongle is not responding\n"); 1509 brcmf_err("dongle is not responding\n");
1510 } 1510 }
1511 1511
1512 return ret; 1512 return ret;
1513 } 1513 }
1514 1514
1515 1515
1516 static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr) 1516 static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
1517 { 1517 {
1518 u32 ret_addr; 1518 u32 ret_addr;
1519 1519
1520 ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1); 1520 ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
1521 addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1); 1521 addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
1522 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr); 1522 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
1523 1523
1524 return ret_addr; 1524 return ret_addr;
1525 } 1525 }
1526 1526
1527 1527
1528 static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr) 1528 static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
1529 { 1529 {
1530 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; 1530 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1531 1531
1532 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr); 1532 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1533 return brcmf_pcie_read_reg32(devinfo, addr); 1533 return brcmf_pcie_read_reg32(devinfo, addr);
1534 } 1534 }
1535 1535
1536 1536
1537 static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value) 1537 static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
1538 { 1538 {
1539 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; 1539 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1540 1540
1541 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr); 1541 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1542 brcmf_pcie_write_reg32(devinfo, addr, value); 1542 brcmf_pcie_write_reg32(devinfo, addr, value);
1543 } 1543 }
1544 1544
1545 1545
1546 static int brcmf_pcie_buscoreprep(void *ctx) 1546 static int brcmf_pcie_buscoreprep(void *ctx)
1547 { 1547 {
1548 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; 1548 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1549 int err; 1549 int err;
1550 1550
1551 err = brcmf_pcie_get_resource(devinfo); 1551 err = brcmf_pcie_get_resource(devinfo);
1552 if (err == 0) { 1552 if (err == 0) {
1553 /* Set CC watchdog to reset all the cores on the chip to bring 1553 /* Set CC watchdog to reset all the cores on the chip to bring
1554 * back dongle to a sane state. 1554 * back dongle to a sane state.
1555 */ 1555 */
1556 brcmf_pcie_buscore_write32(ctx, CORE_CC_REG(SI_ENUM_BASE, 1556 brcmf_pcie_buscore_write32(ctx, CORE_CC_REG(SI_ENUM_BASE,
1557 watchdog), 4); 1557 watchdog), 4);
1558 msleep(100); 1558 msleep(100);
1559 } 1559 }
1560 1560
1561 return err; 1561 return err;
1562 } 1562 }
1563 1563
1564 1564
1565 static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip, 1565 static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
1566 u32 rstvec) 1566 u32 rstvec)
1567 { 1567 {
1568 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; 1568 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1569 1569
1570 brcmf_pcie_write_tcm32(devinfo, 0, rstvec); 1570 brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
1571 } 1571 }
1572 1572
1573 1573
1574 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { 1574 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1575 .prepare = brcmf_pcie_buscoreprep, 1575 .prepare = brcmf_pcie_buscoreprep,
1576 .exit_dl = brcmf_pcie_buscore_exitdl, 1576 .exit_dl = brcmf_pcie_buscore_exitdl,
1577 .read32 = brcmf_pcie_buscore_read32, 1577 .read32 = brcmf_pcie_buscore_read32,
1578 .write32 = brcmf_pcie_buscore_write32, 1578 .write32 = brcmf_pcie_buscore_write32,
1579 }; 1579 };
1580 1580
1581 static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, 1581 static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
1582 void *nvram, u32 nvram_len) 1582 void *nvram, u32 nvram_len)
1583 { 1583 {
1584 struct brcmf_bus *bus = dev_get_drvdata(dev); 1584 struct brcmf_bus *bus = dev_get_drvdata(dev);
1585 struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; 1585 struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
1586 struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; 1586 struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
1587 struct brcmf_commonring **flowrings; 1587 struct brcmf_commonring **flowrings;
1588 int ret; 1588 int ret;
1589 u32 i; 1589 u32 i;
1590 1590
1591 brcmf_pcie_attach(devinfo); 1591 brcmf_pcie_attach(devinfo);
1592 1592
1593 ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len); 1593 ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
1594 if (ret) 1594 if (ret)
1595 goto fail; 1595 goto fail;
1596 1596
1597 devinfo->state = BRCMFMAC_PCIE_STATE_UP; 1597 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
1598 1598
1599 ret = brcmf_pcie_init_ringbuffers(devinfo); 1599 ret = brcmf_pcie_init_ringbuffers(devinfo);
1600 if (ret) 1600 if (ret)
1601 goto fail; 1601 goto fail;
1602 1602
1603 ret = brcmf_pcie_init_scratchbuffers(devinfo); 1603 ret = brcmf_pcie_init_scratchbuffers(devinfo);
1604 if (ret) 1604 if (ret)
1605 goto fail; 1605 goto fail;
1606 1606
1607 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 1607 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1608 ret = brcmf_pcie_request_irq(devinfo); 1608 ret = brcmf_pcie_request_irq(devinfo);
1609 if (ret) 1609 if (ret)
1610 goto fail; 1610 goto fail;
1611 1611
1612 /* hook the commonrings in the bus structure. */ 1612 /* hook the commonrings in the bus structure. */
1613 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) 1613 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
1614 bus->msgbuf->commonrings[i] = 1614 bus->msgbuf->commonrings[i] =
1615 &devinfo->shared.commonrings[i]->commonring; 1615 &devinfo->shared.commonrings[i]->commonring;
1616 1616
1617 flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings), 1617 flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings),
1618 GFP_KERNEL); 1618 GFP_KERNEL);
1619 if (!flowrings) 1619 if (!flowrings)
1620 goto fail; 1620 goto fail;
1621 1621
1622 for (i = 0; i < devinfo->shared.nrof_flowrings; i++) 1622 for (i = 0; i < devinfo->shared.nrof_flowrings; i++)
1623 flowrings[i] = &devinfo->shared.flowrings[i].commonring; 1623 flowrings[i] = &devinfo->shared.flowrings[i].commonring;
1624 bus->msgbuf->flowrings = flowrings; 1624 bus->msgbuf->flowrings = flowrings;
1625 1625
1626 bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset; 1626 bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
1627 bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost; 1627 bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
1628 bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings; 1628 bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings;
1629 1629
1630 init_waitqueue_head(&devinfo->mbdata_resp_wait); 1630 init_waitqueue_head(&devinfo->mbdata_resp_wait);
1631 1631
1632 brcmf_pcie_intr_enable(devinfo); 1632 brcmf_pcie_intr_enable(devinfo);
1633 if (brcmf_pcie_attach_bus(bus->dev) == 0) 1633 if (brcmf_pcie_attach_bus(bus->dev) == 0)
1634 return; 1634 return;
1635 1635
1636 brcmf_pcie_bus_console_read(devinfo); 1636 brcmf_pcie_bus_console_read(devinfo);
1637 1637
1638 fail: 1638 fail:
1639 device_release_driver(dev); 1639 device_release_driver(dev);
1640 } 1640 }
1641 1641
1642 static int 1642 static int
1643 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1643 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1644 { 1644 {
1645 int ret; 1645 int ret;
1646 struct brcmf_pciedev_info *devinfo; 1646 struct brcmf_pciedev_info *devinfo;
1647 struct brcmf_pciedev *pcie_bus_dev; 1647 struct brcmf_pciedev *pcie_bus_dev;
1648 struct brcmf_bus *bus; 1648 struct brcmf_bus *bus;
1649 1649
1650 brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device); 1650 brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
1651 1651
1652 ret = -ENOMEM; 1652 ret = -ENOMEM;
1653 devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL); 1653 devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
1654 if (devinfo == NULL) 1654 if (devinfo == NULL)
1655 return ret; 1655 return ret;
1656 1656
1657 devinfo->pdev = pdev; 1657 devinfo->pdev = pdev;
1658 pcie_bus_dev = NULL; 1658 pcie_bus_dev = NULL;
1659 devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops); 1659 devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
1660 if (IS_ERR(devinfo->ci)) { 1660 if (IS_ERR(devinfo->ci)) {
1661 ret = PTR_ERR(devinfo->ci); 1661 ret = PTR_ERR(devinfo->ci);
1662 devinfo->ci = NULL; 1662 devinfo->ci = NULL;
1663 goto fail; 1663 goto fail;
1664 } 1664 }
1665 1665
1666 pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL); 1666 pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
1667 if (pcie_bus_dev == NULL) { 1667 if (pcie_bus_dev == NULL) {
1668 ret = -ENOMEM; 1668 ret = -ENOMEM;
1669 goto fail; 1669 goto fail;
1670 } 1670 }
1671 1671
1672 bus = kzalloc(sizeof(*bus), GFP_KERNEL); 1672 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
1673 if (!bus) { 1673 if (!bus) {
1674 ret = -ENOMEM; 1674 ret = -ENOMEM;
1675 goto fail; 1675 goto fail;
1676 } 1676 }
1677 bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL); 1677 bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
1678 if (!bus->msgbuf) { 1678 if (!bus->msgbuf) {
1679 ret = -ENOMEM; 1679 ret = -ENOMEM;
1680 kfree(bus); 1680 kfree(bus);
1681 goto fail; 1681 goto fail;
1682 } 1682 }
1683 1683
1684 /* hook it all together. */ 1684 /* hook it all together. */
1685 pcie_bus_dev->devinfo = devinfo; 1685 pcie_bus_dev->devinfo = devinfo;
1686 pcie_bus_dev->bus = bus; 1686 pcie_bus_dev->bus = bus;
1687 bus->dev = &pdev->dev; 1687 bus->dev = &pdev->dev;
1688 bus->bus_priv.pcie = pcie_bus_dev; 1688 bus->bus_priv.pcie = pcie_bus_dev;
1689 bus->ops = &brcmf_pcie_bus_ops; 1689 bus->ops = &brcmf_pcie_bus_ops;
1690 bus->proto_type = BRCMF_PROTO_MSGBUF; 1690 bus->proto_type = BRCMF_PROTO_MSGBUF;
1691 bus->chip = devinfo->coreid; 1691 bus->chip = devinfo->coreid;
1692 bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot); 1692 bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
1693 dev_set_drvdata(&pdev->dev, bus); 1693 dev_set_drvdata(&pdev->dev, bus);
1694 1694
1695 ret = brcmf_pcie_get_fwnames(devinfo); 1695 ret = brcmf_pcie_get_fwnames(devinfo);
1696 if (ret) 1696 if (ret)
1697 goto fail_bus; 1697 goto fail_bus;
1698 1698
1699 ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM | 1699 ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM |
1700 BRCMF_FW_REQ_NV_OPTIONAL, 1700 BRCMF_FW_REQ_NV_OPTIONAL,
1701 devinfo->fw_name, devinfo->nvram_name, 1701 devinfo->fw_name, devinfo->nvram_name,
1702 brcmf_pcie_setup); 1702 brcmf_pcie_setup);
1703 if (ret == 0) 1703 if (ret == 0)
1704 return 0; 1704 return 0;
1705 fail_bus: 1705 fail_bus:
1706 kfree(bus->msgbuf); 1706 kfree(bus->msgbuf);
1707 kfree(bus); 1707 kfree(bus);
1708 fail: 1708 fail:
1709 brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device); 1709 brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device);
1710 brcmf_pcie_release_resource(devinfo); 1710 brcmf_pcie_release_resource(devinfo);
1711 if (devinfo->ci) 1711 if (devinfo->ci)
1712 brcmf_chip_detach(devinfo->ci); 1712 brcmf_chip_detach(devinfo->ci);
1713 kfree(pcie_bus_dev); 1713 kfree(pcie_bus_dev);
1714 kfree(devinfo); 1714 kfree(devinfo);
1715 return ret; 1715 return ret;
1716 } 1716 }
1717 1717
1718 1718
1719 static void 1719 static void
1720 brcmf_pcie_remove(struct pci_dev *pdev) 1720 brcmf_pcie_remove(struct pci_dev *pdev)
1721 { 1721 {
1722 struct brcmf_pciedev_info *devinfo; 1722 struct brcmf_pciedev_info *devinfo;
1723 struct brcmf_bus *bus; 1723 struct brcmf_bus *bus;
1724 1724
1725 brcmf_dbg(PCIE, "Enter\n"); 1725 brcmf_dbg(PCIE, "Enter\n");
1726 1726
1727 bus = dev_get_drvdata(&pdev->dev); 1727 bus = dev_get_drvdata(&pdev->dev);
1728 if (bus == NULL) 1728 if (bus == NULL)
1729 return; 1729 return;
1730 1730
1731 devinfo = bus->bus_priv.pcie->devinfo; 1731 devinfo = bus->bus_priv.pcie->devinfo;
1732 1732
1733 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN; 1733 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
1734 if (devinfo->ci) 1734 if (devinfo->ci)
1735 brcmf_pcie_intr_disable(devinfo); 1735 brcmf_pcie_intr_disable(devinfo);
1736 1736
1737 brcmf_detach(&pdev->dev); 1737 brcmf_detach(&pdev->dev);
1738 1738
1739 kfree(bus->bus_priv.pcie); 1739 kfree(bus->bus_priv.pcie);
1740 kfree(bus->msgbuf->flowrings); 1740 kfree(bus->msgbuf->flowrings);
1741 kfree(bus->msgbuf); 1741 kfree(bus->msgbuf);
1742 kfree(bus); 1742 kfree(bus);
1743 1743
1744 brcmf_pcie_release_irq(devinfo); 1744 brcmf_pcie_release_irq(devinfo);
1745 brcmf_pcie_release_scratchbuffers(devinfo); 1745 brcmf_pcie_release_scratchbuffers(devinfo);
1746 brcmf_pcie_release_ringbuffers(devinfo); 1746 brcmf_pcie_release_ringbuffers(devinfo);
1747 brcmf_pcie_reset_device(devinfo); 1747 brcmf_pcie_reset_device(devinfo);
1748 brcmf_pcie_release_resource(devinfo); 1748 brcmf_pcie_release_resource(devinfo);
1749 1749
1750 if (devinfo->ci) 1750 if (devinfo->ci)
1751 brcmf_chip_detach(devinfo->ci); 1751 brcmf_chip_detach(devinfo->ci);
1752 1752
1753 kfree(devinfo); 1753 kfree(devinfo);
1754 dev_set_drvdata(&pdev->dev, NULL); 1754 dev_set_drvdata(&pdev->dev, NULL);
1755 } 1755 }
1756 1756
1757 1757
1758 #ifdef CONFIG_PM 1758 #ifdef CONFIG_PM
1759 1759
1760 1760
1761 static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state) 1761 static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
1762 { 1762 {
1763 struct brcmf_pciedev_info *devinfo; 1763 struct brcmf_pciedev_info *devinfo;
1764 struct brcmf_bus *bus; 1764 struct brcmf_bus *bus;
1765 int err; 1765 int err;
1766 1766
1767 brcmf_dbg(PCIE, "Enter, state=%d, pdev=%p\n", state.event, pdev); 1767 brcmf_dbg(PCIE, "Enter, state=%d, pdev=%p\n", state.event, pdev);
1768 1768
1769 bus = dev_get_drvdata(&pdev->dev); 1769 bus = dev_get_drvdata(&pdev->dev);
1770 devinfo = bus->bus_priv.pcie->devinfo; 1770 devinfo = bus->bus_priv.pcie->devinfo;
1771 1771
1772 brcmf_bus_change_state(bus, BRCMF_BUS_DOWN); 1772 brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
1773 1773
1774 devinfo->mbdata_completed = false; 1774 devinfo->mbdata_completed = false;
1775 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM); 1775 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
1776 1776
1777 wait_event_timeout(devinfo->mbdata_resp_wait, 1777 wait_event_timeout(devinfo->mbdata_resp_wait,
1778 devinfo->mbdata_completed, 1778 devinfo->mbdata_completed,
1779 msecs_to_jiffies(BRCMF_PCIE_MBDATA_TIMEOUT)); 1779 msecs_to_jiffies(BRCMF_PCIE_MBDATA_TIMEOUT));
1780 if (!devinfo->mbdata_completed) { 1780 if (!devinfo->mbdata_completed) {
1781 brcmf_err("Timeout on response for entering D3 substate\n"); 1781 brcmf_err("Timeout on response for entering D3 substate\n");
1782 return -EIO; 1782 return -EIO;
1783 } 1783 }
1784 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM_IN_USE); 1784 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM_IN_USE);
1785 1785
1786 err = pci_save_state(pdev); 1786 err = pci_save_state(pdev);
1787 if (err) 1787 if (err)
1788 brcmf_err("pci_save_state failed, err=%d\n", err); 1788 brcmf_err("pci_save_state failed, err=%d\n", err);
1789 if ((err) || (!devinfo->wowl_enabled)) { 1789 if ((err) || (!devinfo->wowl_enabled)) {
1790 brcmf_chip_detach(devinfo->ci); 1790 brcmf_chip_detach(devinfo->ci);
1791 devinfo->ci = NULL; 1791 devinfo->ci = NULL;
1792 brcmf_pcie_remove(pdev); 1792 brcmf_pcie_remove(pdev);
1793 return 0; 1793 return 0;
1794 } 1794 }
1795 1795
1796 return pci_prepare_to_sleep(pdev); 1796 return pci_prepare_to_sleep(pdev);
1797 } 1797 }
1798 1798
1799 static int brcmf_pcie_resume(struct pci_dev *pdev) 1799 static int brcmf_pcie_resume(struct pci_dev *pdev)
1800 { 1800 {
1801 struct brcmf_pciedev_info *devinfo; 1801 struct brcmf_pciedev_info *devinfo;
1802 struct brcmf_bus *bus; 1802 struct brcmf_bus *bus;
1803 int err; 1803 int err;
1804 1804
1805 bus = dev_get_drvdata(&pdev->dev); 1805 bus = dev_get_drvdata(&pdev->dev);
1806 brcmf_dbg(PCIE, "Enter, pdev=%p, bus=%p\n", pdev, bus); 1806 brcmf_dbg(PCIE, "Enter, pdev=%p, bus=%p\n", pdev, bus);
1807 1807
1808 err = pci_set_power_state(pdev, PCI_D0); 1808 err = pci_set_power_state(pdev, PCI_D0);
1809 if (err) { 1809 if (err) {
1810 brcmf_err("pci_set_power_state failed, err=%d\n", err); 1810 brcmf_err("pci_set_power_state failed, err=%d\n", err);
1811 goto cleanup; 1811 goto cleanup;
1812 } 1812 }
1813 pci_restore_state(pdev); 1813 pci_restore_state(pdev);
1814 pci_enable_wake(pdev, PCI_D3hot, false); 1814 pci_enable_wake(pdev, PCI_D3hot, false);
1815 pci_enable_wake(pdev, PCI_D3cold, false); 1815 pci_enable_wake(pdev, PCI_D3cold, false);
1816 1816
1817 /* Check if device is still up and running, if so we are ready */ 1817 /* Check if device is still up and running, if so we are ready */
1818 if (bus) { 1818 if (bus) {
1819 devinfo = bus->bus_priv.pcie->devinfo; 1819 devinfo = bus->bus_priv.pcie->devinfo;
1820 if (brcmf_pcie_read_reg32(devinfo, 1820 if (brcmf_pcie_read_reg32(devinfo,
1821 BRCMF_PCIE_PCIE2REG_INTMASK) != 0) { 1821 BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
1822 if (brcmf_pcie_send_mb_data(devinfo, 1822 if (brcmf_pcie_send_mb_data(devinfo,
1823 BRCMF_H2D_HOST_D0_INFORM)) 1823 BRCMF_H2D_HOST_D0_INFORM))
1824 goto cleanup; 1824 goto cleanup;
1825 brcmf_dbg(PCIE, "Hot resume, continue....\n"); 1825 brcmf_dbg(PCIE, "Hot resume, continue....\n");
1826 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 1826 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1827 brcmf_bus_change_state(bus, BRCMF_BUS_DATA); 1827 brcmf_bus_change_state(bus, BRCMF_BUS_DATA);
1828 brcmf_pcie_intr_enable(devinfo); 1828 brcmf_pcie_intr_enable(devinfo);
1829 return 0; 1829 return 0;
1830 } 1830 }
1831 } 1831 }
1832 1832
1833 cleanup: 1833 cleanup:
1834 if (bus) { 1834 if (bus) {
1835 devinfo = bus->bus_priv.pcie->devinfo; 1835 devinfo = bus->bus_priv.pcie->devinfo;
1836 brcmf_chip_detach(devinfo->ci); 1836 brcmf_chip_detach(devinfo->ci);
1837 devinfo->ci = NULL; 1837 devinfo->ci = NULL;
1838 brcmf_pcie_remove(pdev); 1838 brcmf_pcie_remove(pdev);
1839 } 1839 }
1840 err = brcmf_pcie_probe(pdev, NULL); 1840 err = brcmf_pcie_probe(pdev, NULL);
1841 if (err) 1841 if (err)
1842 brcmf_err("probe after resume failed, err=%d\n", err); 1842 brcmf_err("probe after resume failed, err=%d\n", err);
1843 1843
1844 return err; 1844 return err;
1845 } 1845 }
1846 1846
1847 1847
1848 #endif /* CONFIG_PM */ 1848 #endif /* CONFIG_PM */
1849 1849
1850 1850
1851 #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ 1851 #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
1852 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } 1852 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
1853 1853
1854 static struct pci_device_id brcmf_pcie_devid_table[] = { 1854 static struct pci_device_id brcmf_pcie_devid_table[] = {
1855 BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_DEVICE_ID), 1855 BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_DEVICE_ID),
1856 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), 1856 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
1857 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), 1857 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
1858 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID), 1858 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
1859 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID), 1859 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
1860 { /* end: all zeroes */ } 1860 { /* end: all zeroes */ }
1861 }; 1861 };
1862 1862
1863 1863
1864 MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table); 1864 MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
1865 1865
1866 1866
1867 static struct pci_driver brcmf_pciedrvr = { 1867 static struct pci_driver brcmf_pciedrvr = {
1868 .node = {}, 1868 .node = {},
1869 .name = KBUILD_MODNAME, 1869 .name = KBUILD_MODNAME,
1870 .id_table = brcmf_pcie_devid_table, 1870 .id_table = brcmf_pcie_devid_table,
1871 .probe = brcmf_pcie_probe, 1871 .probe = brcmf_pcie_probe,
1872 .remove = brcmf_pcie_remove, 1872 .remove = brcmf_pcie_remove,
1873 #ifdef CONFIG_PM 1873 #ifdef CONFIG_PM
1874 .suspend = brcmf_pcie_suspend, 1874 .suspend = brcmf_pcie_suspend,
1875 .resume = brcmf_pcie_resume 1875 .resume = brcmf_pcie_resume
1876 #endif /* CONFIG_PM */ 1876 #endif /* CONFIG_PM */
1877 }; 1877 };
1878 1878
1879 1879
1880 void brcmf_pcie_register(void) 1880 void brcmf_pcie_register(void)
1881 { 1881 {
1882 int err; 1882 int err;
1883 1883
1884 brcmf_dbg(PCIE, "Enter\n"); 1884 brcmf_dbg(PCIE, "Enter\n");
1885 err = pci_register_driver(&brcmf_pciedrvr); 1885 err = pci_register_driver(&brcmf_pciedrvr);
1886 if (err) 1886 if (err)
1887 brcmf_err("PCIE driver registration failed, err=%d\n", err); 1887 brcmf_err("PCIE driver registration failed, err=%d\n", err);
1888 } 1888 }
1889 1889
1890 1890
1891 void brcmf_pcie_exit(void) 1891 void brcmf_pcie_exit(void)
1892 { 1892 {
1893 brcmf_dbg(PCIE, "Enter\n"); 1893 brcmf_dbg(PCIE, "Enter\n");
1894 pci_unregister_driver(&brcmf_pciedrvr); 1894 pci_unregister_driver(&brcmf_pciedrvr);
1895 } 1895 }
drivers/net/wireless/brcm80211/brcmfmac/usb.c
1 /* 1 /*
2 * Copyright (c) 2011 Broadcom Corporation 2 * Copyright (c) 2011 Broadcom Corporation
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies. 6 * copyright notice and this permission notice appear in all copies.
7 * 7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17 #include <linux/kernel.h> 17 #include <linux/kernel.h>
18 #include <linux/module.h> 18 #include <linux/module.h>
19 #include <linux/firmware.h> 19 #include <linux/firmware.h>
20 #include <linux/usb.h> 20 #include <linux/usb.h>
21 #include <linux/vmalloc.h> 21 #include <linux/vmalloc.h>
22 22
23 #include <brcmu_utils.h> 23 #include <brcmu_utils.h>
24 #include <brcm_hw_ids.h> 24 #include <brcm_hw_ids.h>
25 #include <brcmu_wifi.h> 25 #include <brcmu_wifi.h>
26 #include <dhd_bus.h> 26 #include <dhd_bus.h>
27 #include <dhd_dbg.h> 27 #include <dhd_dbg.h>
28 28
29 #include "firmware.h" 29 #include "firmware.h"
30 #include "usb_rdl.h" 30 #include "usb_rdl.h"
31 #include "usb.h" 31 #include "usb.h"
32 32
33 #define IOCTL_RESP_TIMEOUT 2000 33 #define IOCTL_RESP_TIMEOUT 2000
34 34
35 #define BRCMF_USB_RESET_GETVER_SPINWAIT 100 /* in unit of ms */ 35 #define BRCMF_USB_RESET_GETVER_SPINWAIT 100 /* in unit of ms */
36 #define BRCMF_USB_RESET_GETVER_LOOP_CNT 10 36 #define BRCMF_USB_RESET_GETVER_LOOP_CNT 10
37 37
38 #define BRCMF_POSTBOOT_ID 0xA123 /* ID to detect if dongle 38 #define BRCMF_POSTBOOT_ID 0xA123 /* ID to detect if dongle
39 has boot up */ 39 has boot up */
40 #define BRCMF_USB_NRXQ 50 40 #define BRCMF_USB_NRXQ 50
41 #define BRCMF_USB_NTXQ 50 41 #define BRCMF_USB_NTXQ 50
42 42
43 #define BRCMF_USB_CBCTL_WRITE 0 43 #define BRCMF_USB_CBCTL_WRITE 0
44 #define BRCMF_USB_CBCTL_READ 1 44 #define BRCMF_USB_CBCTL_READ 1
45 #define BRCMF_USB_MAX_PKT_SIZE 1600 45 #define BRCMF_USB_MAX_PKT_SIZE 1600
46 46
47 #define BRCMF_USB_43143_FW_NAME "brcm/brcmfmac43143.bin" 47 #define BRCMF_USB_43143_FW_NAME "brcm/brcmfmac43143.bin"
48 #define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin" 48 #define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin"
49 #define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin" 49 #define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin"
50 #define BRCMF_USB_43569_FW_NAME "brcm/brcmfmac43569.bin" 50 #define BRCMF_USB_43569_FW_NAME "brcm/brcmfmac43569.bin"
51 51
52 struct brcmf_usb_image { 52 struct brcmf_usb_image {
53 struct list_head list; 53 struct list_head list;
54 s8 *fwname; 54 s8 *fwname;
55 u8 *image; 55 u8 *image;
56 int image_len; 56 int image_len;
57 }; 57 };
58 58
59 struct brcmf_usbdev_info { 59 struct brcmf_usbdev_info {
60 struct brcmf_usbdev bus_pub; /* MUST BE FIRST */ 60 struct brcmf_usbdev bus_pub; /* MUST BE FIRST */
61 spinlock_t qlock; 61 spinlock_t qlock;
62 struct list_head rx_freeq; 62 struct list_head rx_freeq;
63 struct list_head rx_postq; 63 struct list_head rx_postq;
64 struct list_head tx_freeq; 64 struct list_head tx_freeq;
65 struct list_head tx_postq; 65 struct list_head tx_postq;
66 uint rx_pipe, tx_pipe; 66 uint rx_pipe, tx_pipe;
67 67
68 int rx_low_watermark; 68 int rx_low_watermark;
69 int tx_low_watermark; 69 int tx_low_watermark;
70 int tx_high_watermark; 70 int tx_high_watermark;
71 int tx_freecount; 71 int tx_freecount;
72 bool tx_flowblock; 72 bool tx_flowblock;
73 spinlock_t tx_flowblock_lock; 73 spinlock_t tx_flowblock_lock;
74 74
75 struct brcmf_usbreq *tx_reqs; 75 struct brcmf_usbreq *tx_reqs;
76 struct brcmf_usbreq *rx_reqs; 76 struct brcmf_usbreq *rx_reqs;
77 77
78 const u8 *image; /* buffer for combine fw and nvram */ 78 const u8 *image; /* buffer for combine fw and nvram */
79 int image_len; 79 int image_len;
80 80
81 struct usb_device *usbdev; 81 struct usb_device *usbdev;
82 struct device *dev; 82 struct device *dev;
83 83
84 int ctl_in_pipe, ctl_out_pipe; 84 int ctl_in_pipe, ctl_out_pipe;
85 struct urb *ctl_urb; /* URB for control endpoint */ 85 struct urb *ctl_urb; /* URB for control endpoint */
86 struct usb_ctrlrequest ctl_write; 86 struct usb_ctrlrequest ctl_write;
87 struct usb_ctrlrequest ctl_read; 87 struct usb_ctrlrequest ctl_read;
88 u32 ctl_urb_actual_length; 88 u32 ctl_urb_actual_length;
89 int ctl_urb_status; 89 int ctl_urb_status;
90 int ctl_completed; 90 int ctl_completed;
91 wait_queue_head_t ioctl_resp_wait; 91 wait_queue_head_t ioctl_resp_wait;
92 ulong ctl_op; 92 ulong ctl_op;
93 u8 ifnum; 93 u8 ifnum;
94 94
95 struct urb *bulk_urb; /* used for FW download */ 95 struct urb *bulk_urb; /* used for FW download */
96 }; 96 };
97 97
98 static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo, 98 static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
99 struct brcmf_usbreq *req); 99 struct brcmf_usbreq *req);
100 100
101 static struct brcmf_usbdev *brcmf_usb_get_buspub(struct device *dev) 101 static struct brcmf_usbdev *brcmf_usb_get_buspub(struct device *dev)
102 { 102 {
103 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 103 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
104 return bus_if->bus_priv.usb; 104 return bus_if->bus_priv.usb;
105 } 105 }
106 106
107 static struct brcmf_usbdev_info *brcmf_usb_get_businfo(struct device *dev) 107 static struct brcmf_usbdev_info *brcmf_usb_get_businfo(struct device *dev)
108 { 108 {
109 return brcmf_usb_get_buspub(dev)->devinfo; 109 return brcmf_usb_get_buspub(dev)->devinfo;
110 } 110 }
111 111
112 static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo) 112 static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo)
113 { 113 {
114 return wait_event_timeout(devinfo->ioctl_resp_wait, 114 return wait_event_timeout(devinfo->ioctl_resp_wait,
115 devinfo->ctl_completed, 115 devinfo->ctl_completed,
116 msecs_to_jiffies(IOCTL_RESP_TIMEOUT)); 116 msecs_to_jiffies(IOCTL_RESP_TIMEOUT));
117 } 117 }
118 118
119 static void brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo) 119 static void brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
120 { 120 {
121 if (waitqueue_active(&devinfo->ioctl_resp_wait)) 121 if (waitqueue_active(&devinfo->ioctl_resp_wait))
122 wake_up(&devinfo->ioctl_resp_wait); 122 wake_up(&devinfo->ioctl_resp_wait);
123 } 123 }
124 124
125 static void 125 static void
126 brcmf_usb_ctl_complete(struct brcmf_usbdev_info *devinfo, int type, int status) 126 brcmf_usb_ctl_complete(struct brcmf_usbdev_info *devinfo, int type, int status)
127 { 127 {
128 brcmf_dbg(USB, "Enter, status=%d\n", status); 128 brcmf_dbg(USB, "Enter, status=%d\n", status);
129 129
130 if (unlikely(devinfo == NULL)) 130 if (unlikely(devinfo == NULL))
131 return; 131 return;
132 132
133 if (type == BRCMF_USB_CBCTL_READ) { 133 if (type == BRCMF_USB_CBCTL_READ) {
134 if (status == 0) 134 if (status == 0)
135 devinfo->bus_pub.stats.rx_ctlpkts++; 135 devinfo->bus_pub.stats.rx_ctlpkts++;
136 else 136 else
137 devinfo->bus_pub.stats.rx_ctlerrs++; 137 devinfo->bus_pub.stats.rx_ctlerrs++;
138 } else if (type == BRCMF_USB_CBCTL_WRITE) { 138 } else if (type == BRCMF_USB_CBCTL_WRITE) {
139 if (status == 0) 139 if (status == 0)
140 devinfo->bus_pub.stats.tx_ctlpkts++; 140 devinfo->bus_pub.stats.tx_ctlpkts++;
141 else 141 else
142 devinfo->bus_pub.stats.tx_ctlerrs++; 142 devinfo->bus_pub.stats.tx_ctlerrs++;
143 } 143 }
144 144
145 devinfo->ctl_urb_status = status; 145 devinfo->ctl_urb_status = status;
146 devinfo->ctl_completed = true; 146 devinfo->ctl_completed = true;
147 brcmf_usb_ioctl_resp_wake(devinfo); 147 brcmf_usb_ioctl_resp_wake(devinfo);
148 } 148 }
149 149
150 static void 150 static void
151 brcmf_usb_ctlread_complete(struct urb *urb) 151 brcmf_usb_ctlread_complete(struct urb *urb)
152 { 152 {
153 struct brcmf_usbdev_info *devinfo = 153 struct brcmf_usbdev_info *devinfo =
154 (struct brcmf_usbdev_info *)urb->context; 154 (struct brcmf_usbdev_info *)urb->context;
155 155
156 brcmf_dbg(USB, "Enter\n"); 156 brcmf_dbg(USB, "Enter\n");
157 devinfo->ctl_urb_actual_length = urb->actual_length; 157 devinfo->ctl_urb_actual_length = urb->actual_length;
158 brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_READ, 158 brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_READ,
159 urb->status); 159 urb->status);
160 } 160 }
161 161
162 static void 162 static void
163 brcmf_usb_ctlwrite_complete(struct urb *urb) 163 brcmf_usb_ctlwrite_complete(struct urb *urb)
164 { 164 {
165 struct brcmf_usbdev_info *devinfo = 165 struct brcmf_usbdev_info *devinfo =
166 (struct brcmf_usbdev_info *)urb->context; 166 (struct brcmf_usbdev_info *)urb->context;
167 167
168 brcmf_dbg(USB, "Enter\n"); 168 brcmf_dbg(USB, "Enter\n");
169 brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_WRITE, 169 brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_WRITE,
170 urb->status); 170 urb->status);
171 } 171 }
172 172
173 static int 173 static int
174 brcmf_usb_send_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len) 174 brcmf_usb_send_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
175 { 175 {
176 int ret; 176 int ret;
177 u16 size; 177 u16 size;
178 178
179 brcmf_dbg(USB, "Enter\n"); 179 brcmf_dbg(USB, "Enter\n");
180 if (devinfo == NULL || buf == NULL || 180 if (devinfo == NULL || buf == NULL ||
181 len == 0 || devinfo->ctl_urb == NULL) 181 len == 0 || devinfo->ctl_urb == NULL)
182 return -EINVAL; 182 return -EINVAL;
183 183
184 size = len; 184 size = len;
185 devinfo->ctl_write.wLength = cpu_to_le16p(&size); 185 devinfo->ctl_write.wLength = cpu_to_le16p(&size);
186 devinfo->ctl_urb->transfer_buffer_length = size; 186 devinfo->ctl_urb->transfer_buffer_length = size;
187 devinfo->ctl_urb_status = 0; 187 devinfo->ctl_urb_status = 0;
188 devinfo->ctl_urb_actual_length = 0; 188 devinfo->ctl_urb_actual_length = 0;
189 189
190 usb_fill_control_urb(devinfo->ctl_urb, 190 usb_fill_control_urb(devinfo->ctl_urb,
191 devinfo->usbdev, 191 devinfo->usbdev,
192 devinfo->ctl_out_pipe, 192 devinfo->ctl_out_pipe,
193 (unsigned char *) &devinfo->ctl_write, 193 (unsigned char *) &devinfo->ctl_write,
194 buf, size, 194 buf, size,
195 (usb_complete_t)brcmf_usb_ctlwrite_complete, 195 (usb_complete_t)brcmf_usb_ctlwrite_complete,
196 devinfo); 196 devinfo);
197 197
198 ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC); 198 ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
199 if (ret < 0) 199 if (ret < 0)
200 brcmf_err("usb_submit_urb failed %d\n", ret); 200 brcmf_err("usb_submit_urb failed %d\n", ret);
201 201
202 return ret; 202 return ret;
203 } 203 }
204 204
205 static int 205 static int
206 brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len) 206 brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
207 { 207 {
208 int ret; 208 int ret;
209 u16 size; 209 u16 size;
210 210
211 brcmf_dbg(USB, "Enter\n"); 211 brcmf_dbg(USB, "Enter\n");
212 if ((devinfo == NULL) || (buf == NULL) || (len == 0) 212 if ((devinfo == NULL) || (buf == NULL) || (len == 0)
213 || (devinfo->ctl_urb == NULL)) 213 || (devinfo->ctl_urb == NULL))
214 return -EINVAL; 214 return -EINVAL;
215 215
216 size = len; 216 size = len;
217 devinfo->ctl_read.wLength = cpu_to_le16p(&size); 217 devinfo->ctl_read.wLength = cpu_to_le16p(&size);
218 devinfo->ctl_urb->transfer_buffer_length = size; 218 devinfo->ctl_urb->transfer_buffer_length = size;
219 219
220 devinfo->ctl_read.bRequestType = USB_DIR_IN 220 devinfo->ctl_read.bRequestType = USB_DIR_IN
221 | USB_TYPE_CLASS | USB_RECIP_INTERFACE; 221 | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
222 devinfo->ctl_read.bRequest = 1; 222 devinfo->ctl_read.bRequest = 1;
223 223
224 usb_fill_control_urb(devinfo->ctl_urb, 224 usb_fill_control_urb(devinfo->ctl_urb,
225 devinfo->usbdev, 225 devinfo->usbdev,
226 devinfo->ctl_in_pipe, 226 devinfo->ctl_in_pipe,
227 (unsigned char *) &devinfo->ctl_read, 227 (unsigned char *) &devinfo->ctl_read,
228 buf, size, 228 buf, size,
229 (usb_complete_t)brcmf_usb_ctlread_complete, 229 (usb_complete_t)brcmf_usb_ctlread_complete,
230 devinfo); 230 devinfo);
231 231
232 ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC); 232 ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
233 if (ret < 0) 233 if (ret < 0)
234 brcmf_err("usb_submit_urb failed %d\n", ret); 234 brcmf_err("usb_submit_urb failed %d\n", ret);
235 235
236 return ret; 236 return ret;
237 } 237 }
238 238
239 static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len) 239 static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
240 { 240 {
241 int err = 0; 241 int err = 0;
242 int timeout = 0; 242 int timeout = 0;
243 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 243 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
244 244
245 brcmf_dbg(USB, "Enter\n"); 245 brcmf_dbg(USB, "Enter\n");
246 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) 246 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
247 return -EIO; 247 return -EIO;
248 248
249 if (test_and_set_bit(0, &devinfo->ctl_op)) 249 if (test_and_set_bit(0, &devinfo->ctl_op))
250 return -EIO; 250 return -EIO;
251 251
252 devinfo->ctl_completed = false; 252 devinfo->ctl_completed = false;
253 err = brcmf_usb_send_ctl(devinfo, buf, len); 253 err = brcmf_usb_send_ctl(devinfo, buf, len);
254 if (err) { 254 if (err) {
255 brcmf_err("fail %d bytes: %d\n", err, len); 255 brcmf_err("fail %d bytes: %d\n", err, len);
256 clear_bit(0, &devinfo->ctl_op); 256 clear_bit(0, &devinfo->ctl_op);
257 return err; 257 return err;
258 } 258 }
259 timeout = brcmf_usb_ioctl_resp_wait(devinfo); 259 timeout = brcmf_usb_ioctl_resp_wait(devinfo);
260 clear_bit(0, &devinfo->ctl_op); 260 clear_bit(0, &devinfo->ctl_op);
261 if (!timeout) { 261 if (!timeout) {
262 brcmf_err("Txctl wait timed out\n"); 262 brcmf_err("Txctl wait timed out\n");
263 err = -EIO; 263 err = -EIO;
264 } 264 }
265 return err; 265 return err;
266 } 266 }
267 267
268 static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len) 268 static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
269 { 269 {
270 int err = 0; 270 int err = 0;
271 int timeout = 0; 271 int timeout = 0;
272 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 272 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
273 273
274 brcmf_dbg(USB, "Enter\n"); 274 brcmf_dbg(USB, "Enter\n");
275 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) 275 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
276 return -EIO; 276 return -EIO;
277 277
278 if (test_and_set_bit(0, &devinfo->ctl_op)) 278 if (test_and_set_bit(0, &devinfo->ctl_op))
279 return -EIO; 279 return -EIO;
280 280
281 devinfo->ctl_completed = false; 281 devinfo->ctl_completed = false;
282 err = brcmf_usb_recv_ctl(devinfo, buf, len); 282 err = brcmf_usb_recv_ctl(devinfo, buf, len);
283 if (err) { 283 if (err) {
284 brcmf_err("fail %d bytes: %d\n", err, len); 284 brcmf_err("fail %d bytes: %d\n", err, len);
285 clear_bit(0, &devinfo->ctl_op); 285 clear_bit(0, &devinfo->ctl_op);
286 return err; 286 return err;
287 } 287 }
288 timeout = brcmf_usb_ioctl_resp_wait(devinfo); 288 timeout = brcmf_usb_ioctl_resp_wait(devinfo);
289 err = devinfo->ctl_urb_status; 289 err = devinfo->ctl_urb_status;
290 clear_bit(0, &devinfo->ctl_op); 290 clear_bit(0, &devinfo->ctl_op);
291 if (!timeout) { 291 if (!timeout) {
292 brcmf_err("rxctl wait timed out\n"); 292 brcmf_err("rxctl wait timed out\n");
293 err = -EIO; 293 err = -EIO;
294 } 294 }
295 if (!err) 295 if (!err)
296 return devinfo->ctl_urb_actual_length; 296 return devinfo->ctl_urb_actual_length;
297 else 297 else
298 return err; 298 return err;
299 } 299 }
300 300
301 static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo, 301 static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
302 struct list_head *q, int *counter) 302 struct list_head *q, int *counter)
303 { 303 {
304 unsigned long flags; 304 unsigned long flags;
305 struct brcmf_usbreq *req; 305 struct brcmf_usbreq *req;
306 spin_lock_irqsave(&devinfo->qlock, flags); 306 spin_lock_irqsave(&devinfo->qlock, flags);
307 if (list_empty(q)) { 307 if (list_empty(q)) {
308 spin_unlock_irqrestore(&devinfo->qlock, flags); 308 spin_unlock_irqrestore(&devinfo->qlock, flags);
309 return NULL; 309 return NULL;
310 } 310 }
311 req = list_entry(q->next, struct brcmf_usbreq, list); 311 req = list_entry(q->next, struct brcmf_usbreq, list);
312 list_del_init(q->next); 312 list_del_init(q->next);
313 if (counter) 313 if (counter)
314 (*counter)--; 314 (*counter)--;
315 spin_unlock_irqrestore(&devinfo->qlock, flags); 315 spin_unlock_irqrestore(&devinfo->qlock, flags);
316 return req; 316 return req;
317 317
318 } 318 }
319 319
320 static void brcmf_usb_enq(struct brcmf_usbdev_info *devinfo, 320 static void brcmf_usb_enq(struct brcmf_usbdev_info *devinfo,
321 struct list_head *q, struct brcmf_usbreq *req, 321 struct list_head *q, struct brcmf_usbreq *req,
322 int *counter) 322 int *counter)
323 { 323 {
324 unsigned long flags; 324 unsigned long flags;
325 spin_lock_irqsave(&devinfo->qlock, flags); 325 spin_lock_irqsave(&devinfo->qlock, flags);
326 list_add_tail(&req->list, q); 326 list_add_tail(&req->list, q);
327 if (counter) 327 if (counter)
328 (*counter)++; 328 (*counter)++;
329 spin_unlock_irqrestore(&devinfo->qlock, flags); 329 spin_unlock_irqrestore(&devinfo->qlock, flags);
330 } 330 }
331 331
332 static struct brcmf_usbreq * 332 static struct brcmf_usbreq *
333 brcmf_usbdev_qinit(struct list_head *q, int qsize) 333 brcmf_usbdev_qinit(struct list_head *q, int qsize)
334 { 334 {
335 int i; 335 int i;
336 struct brcmf_usbreq *req, *reqs; 336 struct brcmf_usbreq *req, *reqs;
337 337
338 reqs = kcalloc(qsize, sizeof(struct brcmf_usbreq), GFP_ATOMIC); 338 reqs = kcalloc(qsize, sizeof(struct brcmf_usbreq), GFP_ATOMIC);
339 if (reqs == NULL) 339 if (reqs == NULL)
340 return NULL; 340 return NULL;
341 341
342 req = reqs; 342 req = reqs;
343 343
344 for (i = 0; i < qsize; i++) { 344 for (i = 0; i < qsize; i++) {
345 req->urb = usb_alloc_urb(0, GFP_ATOMIC); 345 req->urb = usb_alloc_urb(0, GFP_ATOMIC);
346 if (!req->urb) 346 if (!req->urb)
347 goto fail; 347 goto fail;
348 348
349 INIT_LIST_HEAD(&req->list); 349 INIT_LIST_HEAD(&req->list);
350 list_add_tail(&req->list, q); 350 list_add_tail(&req->list, q);
351 req++; 351 req++;
352 } 352 }
353 return reqs; 353 return reqs;
354 fail: 354 fail:
355 brcmf_err("fail!\n"); 355 brcmf_err("fail!\n");
356 while (!list_empty(q)) { 356 while (!list_empty(q)) {
357 req = list_entry(q->next, struct brcmf_usbreq, list); 357 req = list_entry(q->next, struct brcmf_usbreq, list);
358 if (req && req->urb) 358 if (req && req->urb)
359 usb_free_urb(req->urb); 359 usb_free_urb(req->urb);
360 list_del(q->next); 360 list_del(q->next);
361 } 361 }
362 return NULL; 362 return NULL;
363 363
364 } 364 }
365 365
366 static void brcmf_usb_free_q(struct list_head *q, bool pending) 366 static void brcmf_usb_free_q(struct list_head *q, bool pending)
367 { 367 {
368 struct brcmf_usbreq *req, *next; 368 struct brcmf_usbreq *req, *next;
369 int i = 0; 369 int i = 0;
370 list_for_each_entry_safe(req, next, q, list) { 370 list_for_each_entry_safe(req, next, q, list) {
371 if (!req->urb) { 371 if (!req->urb) {
372 brcmf_err("bad req\n"); 372 brcmf_err("bad req\n");
373 break; 373 break;
374 } 374 }
375 i++; 375 i++;
376 if (pending) { 376 if (pending) {
377 usb_kill_urb(req->urb); 377 usb_kill_urb(req->urb);
378 } else { 378 } else {
379 usb_free_urb(req->urb); 379 usb_free_urb(req->urb);
380 list_del_init(&req->list); 380 list_del_init(&req->list);
381 } 381 }
382 } 382 }
383 } 383 }
384 384
385 static void brcmf_usb_del_fromq(struct brcmf_usbdev_info *devinfo, 385 static void brcmf_usb_del_fromq(struct brcmf_usbdev_info *devinfo,
386 struct brcmf_usbreq *req) 386 struct brcmf_usbreq *req)
387 { 387 {
388 unsigned long flags; 388 unsigned long flags;
389 389
390 spin_lock_irqsave(&devinfo->qlock, flags); 390 spin_lock_irqsave(&devinfo->qlock, flags);
391 list_del_init(&req->list); 391 list_del_init(&req->list);
392 spin_unlock_irqrestore(&devinfo->qlock, flags); 392 spin_unlock_irqrestore(&devinfo->qlock, flags);
393 } 393 }
394 394
395 395
396 static void brcmf_usb_tx_complete(struct urb *urb) 396 static void brcmf_usb_tx_complete(struct urb *urb)
397 { 397 {
398 struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context; 398 struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
399 struct brcmf_usbdev_info *devinfo = req->devinfo; 399 struct brcmf_usbdev_info *devinfo = req->devinfo;
400 unsigned long flags; 400 unsigned long flags;
401 401
402 brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status, 402 brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
403 req->skb); 403 req->skb);
404 brcmf_usb_del_fromq(devinfo, req); 404 brcmf_usb_del_fromq(devinfo, req);
405 405
406 brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0); 406 brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
407 req->skb = NULL; 407 req->skb = NULL;
408 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount); 408 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
409 spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags); 409 spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
410 if (devinfo->tx_freecount > devinfo->tx_high_watermark && 410 if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
411 devinfo->tx_flowblock) { 411 devinfo->tx_flowblock) {
412 brcmf_txflowblock(devinfo->dev, false); 412 brcmf_txflowblock(devinfo->dev, false);
413 devinfo->tx_flowblock = false; 413 devinfo->tx_flowblock = false;
414 } 414 }
415 spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags); 415 spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
416 } 416 }
417 417
418 static void brcmf_usb_rx_complete(struct urb *urb) 418 static void brcmf_usb_rx_complete(struct urb *urb)
419 { 419 {
420 struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context; 420 struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
421 struct brcmf_usbdev_info *devinfo = req->devinfo; 421 struct brcmf_usbdev_info *devinfo = req->devinfo;
422 struct sk_buff *skb; 422 struct sk_buff *skb;
423 423
424 brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status); 424 brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
425 brcmf_usb_del_fromq(devinfo, req); 425 brcmf_usb_del_fromq(devinfo, req);
426 skb = req->skb; 426 skb = req->skb;
427 req->skb = NULL; 427 req->skb = NULL;
428 428
429 /* zero lenght packets indicate usb "failure". Do not refill */ 429 /* zero lenght packets indicate usb "failure". Do not refill */
430 if (urb->status != 0 || !urb->actual_length) { 430 if (urb->status != 0 || !urb->actual_length) {
431 brcmu_pkt_buf_free_skb(skb); 431 brcmu_pkt_buf_free_skb(skb);
432 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL); 432 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
433 return; 433 return;
434 } 434 }
435 435
436 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) { 436 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
437 skb_put(skb, urb->actual_length); 437 skb_put(skb, urb->actual_length);
438 brcmf_rx_frame(devinfo->dev, skb); 438 brcmf_rx_frame(devinfo->dev, skb);
439 brcmf_usb_rx_refill(devinfo, req); 439 brcmf_usb_rx_refill(devinfo, req);
440 } else { 440 } else {
441 brcmu_pkt_buf_free_skb(skb); 441 brcmu_pkt_buf_free_skb(skb);
442 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL); 442 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
443 } 443 }
444 return; 444 return;
445 445
446 } 446 }
447 447
448 static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo, 448 static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
449 struct brcmf_usbreq *req) 449 struct brcmf_usbreq *req)
450 { 450 {
451 struct sk_buff *skb; 451 struct sk_buff *skb;
452 int ret; 452 int ret;
453 453
454 if (!req || !devinfo) 454 if (!req || !devinfo)
455 return; 455 return;
456 456
457 skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu); 457 skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu);
458 if (!skb) { 458 if (!skb) {
459 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL); 459 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
460 return; 460 return;
461 } 461 }
462 req->skb = skb; 462 req->skb = skb;
463 463
464 usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->rx_pipe, 464 usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->rx_pipe,
465 skb->data, skb_tailroom(skb), brcmf_usb_rx_complete, 465 skb->data, skb_tailroom(skb), brcmf_usb_rx_complete,
466 req); 466 req);
467 req->devinfo = devinfo; 467 req->devinfo = devinfo;
468 brcmf_usb_enq(devinfo, &devinfo->rx_postq, req, NULL); 468 brcmf_usb_enq(devinfo, &devinfo->rx_postq, req, NULL);
469 469
470 ret = usb_submit_urb(req->urb, GFP_ATOMIC); 470 ret = usb_submit_urb(req->urb, GFP_ATOMIC);
471 if (ret) { 471 if (ret) {
472 brcmf_usb_del_fromq(devinfo, req); 472 brcmf_usb_del_fromq(devinfo, req);
473 brcmu_pkt_buf_free_skb(req->skb); 473 brcmu_pkt_buf_free_skb(req->skb);
474 req->skb = NULL; 474 req->skb = NULL;
475 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL); 475 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
476 } 476 }
477 return; 477 return;
478 } 478 }
479 479
480 static void brcmf_usb_rx_fill_all(struct brcmf_usbdev_info *devinfo) 480 static void brcmf_usb_rx_fill_all(struct brcmf_usbdev_info *devinfo)
481 { 481 {
482 struct brcmf_usbreq *req; 482 struct brcmf_usbreq *req;
483 483
484 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) { 484 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
485 brcmf_err("bus is not up=%d\n", devinfo->bus_pub.state); 485 brcmf_err("bus is not up=%d\n", devinfo->bus_pub.state);
486 return; 486 return;
487 } 487 }
488 while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL) 488 while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL)
489 brcmf_usb_rx_refill(devinfo, req); 489 brcmf_usb_rx_refill(devinfo, req);
490 } 490 }
491 491
492 static void 492 static void
493 brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state) 493 brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
494 { 494 {
495 struct brcmf_bus *bcmf_bus = devinfo->bus_pub.bus; 495 struct brcmf_bus *bcmf_bus = devinfo->bus_pub.bus;
496 int old_state; 496 int old_state;
497 497
498 brcmf_dbg(USB, "Enter, current state=%d, new state=%d\n", 498 brcmf_dbg(USB, "Enter, current state=%d, new state=%d\n",
499 devinfo->bus_pub.state, state); 499 devinfo->bus_pub.state, state);
500 500
501 if (devinfo->bus_pub.state == state) 501 if (devinfo->bus_pub.state == state)
502 return; 502 return;
503 503
504 old_state = devinfo->bus_pub.state; 504 old_state = devinfo->bus_pub.state;
505 devinfo->bus_pub.state = state; 505 devinfo->bus_pub.state = state;
506 506
507 /* update state of upper layer */ 507 /* update state of upper layer */
508 if (state == BRCMFMAC_USB_STATE_DOWN) { 508 if (state == BRCMFMAC_USB_STATE_DOWN) {
509 brcmf_dbg(USB, "DBUS is down\n"); 509 brcmf_dbg(USB, "DBUS is down\n");
510 brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DOWN); 510 brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DOWN);
511 } else if (state == BRCMFMAC_USB_STATE_UP) { 511 } else if (state == BRCMFMAC_USB_STATE_UP) {
512 brcmf_dbg(USB, "DBUS is up\n"); 512 brcmf_dbg(USB, "DBUS is up\n");
513 brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DATA); 513 brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DATA);
514 } else { 514 } else {
515 brcmf_dbg(USB, "DBUS current state=%d\n", state); 515 brcmf_dbg(USB, "DBUS current state=%d\n", state);
516 } 516 }
517 } 517 }
518 518
519 static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) 519 static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
520 { 520 {
521 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 521 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
522 struct brcmf_usbreq *req; 522 struct brcmf_usbreq *req;
523 int ret; 523 int ret;
524 unsigned long flags; 524 unsigned long flags;
525 525
526 brcmf_dbg(USB, "Enter, skb=%p\n", skb); 526 brcmf_dbg(USB, "Enter, skb=%p\n", skb);
527 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) { 527 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
528 ret = -EIO; 528 ret = -EIO;
529 goto fail; 529 goto fail;
530 } 530 }
531 531
532 req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq, 532 req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
533 &devinfo->tx_freecount); 533 &devinfo->tx_freecount);
534 if (!req) { 534 if (!req) {
535 brcmf_err("no req to send\n"); 535 brcmf_err("no req to send\n");
536 ret = -ENOMEM; 536 ret = -ENOMEM;
537 goto fail; 537 goto fail;
538 } 538 }
539 539
540 req->skb = skb; 540 req->skb = skb;
541 req->devinfo = devinfo; 541 req->devinfo = devinfo;
542 usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe, 542 usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe,
543 skb->data, skb->len, brcmf_usb_tx_complete, req); 543 skb->data, skb->len, brcmf_usb_tx_complete, req);
544 req->urb->transfer_flags |= URB_ZERO_PACKET; 544 req->urb->transfer_flags |= URB_ZERO_PACKET;
545 brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL); 545 brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL);
546 ret = usb_submit_urb(req->urb, GFP_ATOMIC); 546 ret = usb_submit_urb(req->urb, GFP_ATOMIC);
547 if (ret) { 547 if (ret) {
548 brcmf_err("brcmf_usb_tx usb_submit_urb FAILED\n"); 548 brcmf_err("brcmf_usb_tx usb_submit_urb FAILED\n");
549 brcmf_usb_del_fromq(devinfo, req); 549 brcmf_usb_del_fromq(devinfo, req);
550 req->skb = NULL; 550 req->skb = NULL;
551 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, 551 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req,
552 &devinfo->tx_freecount); 552 &devinfo->tx_freecount);
553 goto fail; 553 goto fail;
554 } 554 }
555 555
556 spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags); 556 spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
557 if (devinfo->tx_freecount < devinfo->tx_low_watermark && 557 if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
558 !devinfo->tx_flowblock) { 558 !devinfo->tx_flowblock) {
559 brcmf_txflowblock(dev, true); 559 brcmf_txflowblock(dev, true);
560 devinfo->tx_flowblock = true; 560 devinfo->tx_flowblock = true;
561 } 561 }
562 spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags); 562 spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
563 return 0; 563 return 0;
564 564
565 fail: 565 fail:
566 return ret; 566 return ret;
567 } 567 }
568 568
569 569
570 static int brcmf_usb_up(struct device *dev) 570 static int brcmf_usb_up(struct device *dev)
571 { 571 {
572 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 572 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
573 573
574 brcmf_dbg(USB, "Enter\n"); 574 brcmf_dbg(USB, "Enter\n");
575 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) 575 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
576 return 0; 576 return 0;
577 577
578 /* Success, indicate devinfo is fully up */ 578 /* Success, indicate devinfo is fully up */
579 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP); 579 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP);
580 580
581 if (devinfo->ctl_urb) { 581 if (devinfo->ctl_urb) {
582 devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0); 582 devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0);
583 devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0); 583 devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0);
584 584
585 /* CTL Write */ 585 /* CTL Write */
586 devinfo->ctl_write.bRequestType = 586 devinfo->ctl_write.bRequestType =
587 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; 587 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
588 devinfo->ctl_write.bRequest = 0; 588 devinfo->ctl_write.bRequest = 0;
589 devinfo->ctl_write.wValue = cpu_to_le16(0); 589 devinfo->ctl_write.wValue = cpu_to_le16(0);
590 devinfo->ctl_write.wIndex = cpu_to_le16(devinfo->ifnum); 590 devinfo->ctl_write.wIndex = cpu_to_le16(devinfo->ifnum);
591 591
592 /* CTL Read */ 592 /* CTL Read */
593 devinfo->ctl_read.bRequestType = 593 devinfo->ctl_read.bRequestType =
594 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; 594 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
595 devinfo->ctl_read.bRequest = 1; 595 devinfo->ctl_read.bRequest = 1;
596 devinfo->ctl_read.wValue = cpu_to_le16(0); 596 devinfo->ctl_read.wValue = cpu_to_le16(0);
597 devinfo->ctl_read.wIndex = cpu_to_le16(devinfo->ifnum); 597 devinfo->ctl_read.wIndex = cpu_to_le16(devinfo->ifnum);
598 } 598 }
599 brcmf_usb_rx_fill_all(devinfo); 599 brcmf_usb_rx_fill_all(devinfo);
600 return 0; 600 return 0;
601 } 601 }
602 602
603 static void brcmf_usb_down(struct device *dev) 603 static void brcmf_usb_down(struct device *dev)
604 { 604 {
605 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 605 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
606 606
607 brcmf_dbg(USB, "Enter\n"); 607 brcmf_dbg(USB, "Enter\n");
608 if (devinfo == NULL) 608 if (devinfo == NULL)
609 return; 609 return;
610 610
611 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN) 611 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN)
612 return; 612 return;
613 613
614 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN); 614 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
615 615
616 if (devinfo->ctl_urb) 616 if (devinfo->ctl_urb)
617 usb_kill_urb(devinfo->ctl_urb); 617 usb_kill_urb(devinfo->ctl_urb);
618 618
619 if (devinfo->bulk_urb) 619 if (devinfo->bulk_urb)
620 usb_kill_urb(devinfo->bulk_urb); 620 usb_kill_urb(devinfo->bulk_urb);
621 brcmf_usb_free_q(&devinfo->tx_postq, true); 621 brcmf_usb_free_q(&devinfo->tx_postq, true);
622 622
623 brcmf_usb_free_q(&devinfo->rx_postq, true); 623 brcmf_usb_free_q(&devinfo->rx_postq, true);
624 } 624 }
625 625
626 static void 626 static void
627 brcmf_usb_sync_complete(struct urb *urb) 627 brcmf_usb_sync_complete(struct urb *urb)
628 { 628 {
629 struct brcmf_usbdev_info *devinfo = 629 struct brcmf_usbdev_info *devinfo =
630 (struct brcmf_usbdev_info *)urb->context; 630 (struct brcmf_usbdev_info *)urb->context;
631 631
632 devinfo->ctl_completed = true; 632 devinfo->ctl_completed = true;
633 brcmf_usb_ioctl_resp_wake(devinfo); 633 brcmf_usb_ioctl_resp_wake(devinfo);
634 } 634 }
635 635
636 static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd, 636 static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
637 void *buffer, int buflen) 637 void *buffer, int buflen)
638 { 638 {
639 int ret; 639 int ret;
640 char *tmpbuf; 640 char *tmpbuf;
641 u16 size; 641 u16 size;
642 642
643 if ((!devinfo) || (devinfo->ctl_urb == NULL)) 643 if ((!devinfo) || (devinfo->ctl_urb == NULL))
644 return -EINVAL; 644 return -EINVAL;
645 645
646 tmpbuf = kmalloc(buflen, GFP_ATOMIC); 646 tmpbuf = kmalloc(buflen, GFP_ATOMIC);
647 if (!tmpbuf) 647 if (!tmpbuf)
648 return -ENOMEM; 648 return -ENOMEM;
649 649
650 size = buflen; 650 size = buflen;
651 devinfo->ctl_urb->transfer_buffer_length = size; 651 devinfo->ctl_urb->transfer_buffer_length = size;
652 652
653 devinfo->ctl_read.wLength = cpu_to_le16p(&size); 653 devinfo->ctl_read.wLength = cpu_to_le16p(&size);
654 devinfo->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_VENDOR | 654 devinfo->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_VENDOR |
655 USB_RECIP_INTERFACE; 655 USB_RECIP_INTERFACE;
656 devinfo->ctl_read.bRequest = cmd; 656 devinfo->ctl_read.bRequest = cmd;
657 657
658 usb_fill_control_urb(devinfo->ctl_urb, 658 usb_fill_control_urb(devinfo->ctl_urb,
659 devinfo->usbdev, 659 devinfo->usbdev,
660 usb_rcvctrlpipe(devinfo->usbdev, 0), 660 usb_rcvctrlpipe(devinfo->usbdev, 0),
661 (unsigned char *) &devinfo->ctl_read, 661 (unsigned char *) &devinfo->ctl_read,
662 (void *) tmpbuf, size, 662 (void *) tmpbuf, size,
663 (usb_complete_t)brcmf_usb_sync_complete, devinfo); 663 (usb_complete_t)brcmf_usb_sync_complete, devinfo);
664 664
665 devinfo->ctl_completed = false; 665 devinfo->ctl_completed = false;
666 ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC); 666 ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
667 if (ret < 0) { 667 if (ret < 0) {
668 brcmf_err("usb_submit_urb failed %d\n", ret); 668 brcmf_err("usb_submit_urb failed %d\n", ret);
669 goto finalize; 669 goto finalize;
670 } 670 }
671 671
672 if (!brcmf_usb_ioctl_resp_wait(devinfo)) 672 if (!brcmf_usb_ioctl_resp_wait(devinfo)) {
673 usb_kill_urb(devinfo->ctl_urb);
673 ret = -ETIMEDOUT; 674 ret = -ETIMEDOUT;
674 else 675 } else {
675 memcpy(buffer, tmpbuf, buflen); 676 memcpy(buffer, tmpbuf, buflen);
677 }
676 678
677 finalize: 679 finalize:
678 kfree(tmpbuf); 680 kfree(tmpbuf);
679 return ret; 681 return ret;
680 } 682 }
681 683
682 static bool 684 static bool
683 brcmf_usb_dlneeded(struct brcmf_usbdev_info *devinfo) 685 brcmf_usb_dlneeded(struct brcmf_usbdev_info *devinfo)
684 { 686 {
685 struct bootrom_id_le id; 687 struct bootrom_id_le id;
686 u32 chipid, chiprev; 688 u32 chipid, chiprev;
687 689
688 brcmf_dbg(USB, "Enter\n"); 690 brcmf_dbg(USB, "Enter\n");
689 691
690 if (devinfo == NULL) 692 if (devinfo == NULL)
691 return false; 693 return false;
692 694
693 /* Check if firmware downloaded already by querying runtime ID */ 695 /* Check if firmware downloaded already by querying runtime ID */
694 id.chip = cpu_to_le32(0xDEAD); 696 id.chip = cpu_to_le32(0xDEAD);
695 brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id)); 697 brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
696 698
697 chipid = le32_to_cpu(id.chip); 699 chipid = le32_to_cpu(id.chip);
698 chiprev = le32_to_cpu(id.chiprev); 700 chiprev = le32_to_cpu(id.chiprev);
699 701
700 if ((chipid & 0x4300) == 0x4300) 702 if ((chipid & 0x4300) == 0x4300)
701 brcmf_dbg(USB, "chip %x rev 0x%x\n", chipid, chiprev); 703 brcmf_dbg(USB, "chip %x rev 0x%x\n", chipid, chiprev);
702 else 704 else
703 brcmf_dbg(USB, "chip %d rev 0x%x\n", chipid, chiprev); 705 brcmf_dbg(USB, "chip %d rev 0x%x\n", chipid, chiprev);
704 if (chipid == BRCMF_POSTBOOT_ID) { 706 if (chipid == BRCMF_POSTBOOT_ID) {
705 brcmf_dbg(USB, "firmware already downloaded\n"); 707 brcmf_dbg(USB, "firmware already downloaded\n");
706 brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id)); 708 brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id));
707 return false; 709 return false;
708 } else { 710 } else {
709 devinfo->bus_pub.devid = chipid; 711 devinfo->bus_pub.devid = chipid;
710 devinfo->bus_pub.chiprev = chiprev; 712 devinfo->bus_pub.chiprev = chiprev;
711 } 713 }
712 return true; 714 return true;
713 } 715 }
714 716
715 static int 717 static int
716 brcmf_usb_resetcfg(struct brcmf_usbdev_info *devinfo) 718 brcmf_usb_resetcfg(struct brcmf_usbdev_info *devinfo)
717 { 719 {
718 struct bootrom_id_le id; 720 struct bootrom_id_le id;
719 u32 loop_cnt; 721 u32 loop_cnt;
720 int err; 722 int err;
721 723
722 brcmf_dbg(USB, "Enter\n"); 724 brcmf_dbg(USB, "Enter\n");
723 725
724 loop_cnt = 0; 726 loop_cnt = 0;
725 do { 727 do {
726 mdelay(BRCMF_USB_RESET_GETVER_SPINWAIT); 728 mdelay(BRCMF_USB_RESET_GETVER_SPINWAIT);
727 loop_cnt++; 729 loop_cnt++;
728 id.chip = cpu_to_le32(0xDEAD); /* Get the ID */ 730 id.chip = cpu_to_le32(0xDEAD); /* Get the ID */
729 err = brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id)); 731 err = brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
730 if ((err) && (err != -ETIMEDOUT)) 732 if ((err) && (err != -ETIMEDOUT))
731 return err; 733 return err;
732 if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID)) 734 if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID))
733 break; 735 break;
734 } while (loop_cnt < BRCMF_USB_RESET_GETVER_LOOP_CNT); 736 } while (loop_cnt < BRCMF_USB_RESET_GETVER_LOOP_CNT);
735 737
736 if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID)) { 738 if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID)) {
737 brcmf_dbg(USB, "postboot chip 0x%x/rev 0x%x\n", 739 brcmf_dbg(USB, "postboot chip 0x%x/rev 0x%x\n",
738 le32_to_cpu(id.chip), le32_to_cpu(id.chiprev)); 740 le32_to_cpu(id.chip), le32_to_cpu(id.chiprev));
739 741
740 brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id)); 742 brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id));
741 return 0; 743 return 0;
742 } else { 744 } else {
743 brcmf_err("Cannot talk to Dongle. Firmware is not UP, %d ms\n", 745 brcmf_err("Cannot talk to Dongle. Firmware is not UP, %d ms\n",
744 BRCMF_USB_RESET_GETVER_SPINWAIT * loop_cnt); 746 BRCMF_USB_RESET_GETVER_SPINWAIT * loop_cnt);
745 return -EINVAL; 747 return -EINVAL;
746 } 748 }
747 } 749 }
748 750
749 751
750 static int 752 static int
751 brcmf_usb_dl_send_bulk(struct brcmf_usbdev_info *devinfo, void *buffer, int len) 753 brcmf_usb_dl_send_bulk(struct brcmf_usbdev_info *devinfo, void *buffer, int len)
752 { 754 {
753 int ret; 755 int ret;
754 756
755 if ((devinfo == NULL) || (devinfo->bulk_urb == NULL)) 757 if ((devinfo == NULL) || (devinfo->bulk_urb == NULL))
756 return -EINVAL; 758 return -EINVAL;
757 759
758 /* Prepare the URB */ 760 /* Prepare the URB */
759 usb_fill_bulk_urb(devinfo->bulk_urb, devinfo->usbdev, 761 usb_fill_bulk_urb(devinfo->bulk_urb, devinfo->usbdev,
760 devinfo->tx_pipe, buffer, len, 762 devinfo->tx_pipe, buffer, len,
761 (usb_complete_t)brcmf_usb_sync_complete, devinfo); 763 (usb_complete_t)brcmf_usb_sync_complete, devinfo);
762 764
763 devinfo->bulk_urb->transfer_flags |= URB_ZERO_PACKET; 765 devinfo->bulk_urb->transfer_flags |= URB_ZERO_PACKET;
764 766
765 devinfo->ctl_completed = false; 767 devinfo->ctl_completed = false;
766 ret = usb_submit_urb(devinfo->bulk_urb, GFP_ATOMIC); 768 ret = usb_submit_urb(devinfo->bulk_urb, GFP_ATOMIC);
767 if (ret) { 769 if (ret) {
768 brcmf_err("usb_submit_urb failed %d\n", ret); 770 brcmf_err("usb_submit_urb failed %d\n", ret);
769 return ret; 771 return ret;
770 } 772 }
771 ret = brcmf_usb_ioctl_resp_wait(devinfo); 773 ret = brcmf_usb_ioctl_resp_wait(devinfo);
772 return (ret == 0); 774 return (ret == 0);
773 } 775 }
774 776
775 static int 777 static int
776 brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen) 778 brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
777 { 779 {
778 unsigned int sendlen, sent, dllen; 780 unsigned int sendlen, sent, dllen;
779 char *bulkchunk = NULL, *dlpos; 781 char *bulkchunk = NULL, *dlpos;
780 struct rdl_state_le state; 782 struct rdl_state_le state;
781 u32 rdlstate, rdlbytes; 783 u32 rdlstate, rdlbytes;
782 int err = 0; 784 int err = 0;
783 785
784 brcmf_dbg(USB, "Enter, fw %p, len %d\n", fw, fwlen); 786 brcmf_dbg(USB, "Enter, fw %p, len %d\n", fw, fwlen);
785 787
786 bulkchunk = kmalloc(RDL_CHUNK, GFP_ATOMIC); 788 bulkchunk = kmalloc(RDL_CHUNK, GFP_ATOMIC);
787 if (bulkchunk == NULL) { 789 if (bulkchunk == NULL) {
788 err = -ENOMEM; 790 err = -ENOMEM;
789 goto fail; 791 goto fail;
790 } 792 }
791 793
792 /* 1) Prepare USB boot loader for runtime image */ 794 /* 1) Prepare USB boot loader for runtime image */
793 brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state)); 795 brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
794 796
795 rdlstate = le32_to_cpu(state.state); 797 rdlstate = le32_to_cpu(state.state);
796 rdlbytes = le32_to_cpu(state.bytes); 798 rdlbytes = le32_to_cpu(state.bytes);
797 799
798 /* 2) Check we are in the Waiting state */ 800 /* 2) Check we are in the Waiting state */
799 if (rdlstate != DL_WAITING) { 801 if (rdlstate != DL_WAITING) {
800 brcmf_err("Failed to DL_START\n"); 802 brcmf_err("Failed to DL_START\n");
801 err = -EINVAL; 803 err = -EINVAL;
802 goto fail; 804 goto fail;
803 } 805 }
804 sent = 0; 806 sent = 0;
805 dlpos = fw; 807 dlpos = fw;
806 dllen = fwlen; 808 dllen = fwlen;
807 809
808 /* Get chip id and rev */ 810 /* Get chip id and rev */
809 while (rdlbytes != dllen) { 811 while (rdlbytes != dllen) {
810 /* Wait until the usb device reports it received all 812 /* Wait until the usb device reports it received all
811 * the bytes we sent */ 813 * the bytes we sent */
812 if ((rdlbytes == sent) && (rdlbytes != dllen)) { 814 if ((rdlbytes == sent) && (rdlbytes != dllen)) {
813 if ((dllen-sent) < RDL_CHUNK) 815 if ((dllen-sent) < RDL_CHUNK)
814 sendlen = dllen-sent; 816 sendlen = dllen-sent;
815 else 817 else
816 sendlen = RDL_CHUNK; 818 sendlen = RDL_CHUNK;
817 819
818 /* simply avoid having to send a ZLP by ensuring we 820 /* simply avoid having to send a ZLP by ensuring we
819 * never have an even 821 * never have an even
820 * multiple of 64 822 * multiple of 64
821 */ 823 */
822 if (!(sendlen % 64)) 824 if (!(sendlen % 64))
823 sendlen -= 4; 825 sendlen -= 4;
824 826
825 /* send data */ 827 /* send data */
826 memcpy(bulkchunk, dlpos, sendlen); 828 memcpy(bulkchunk, dlpos, sendlen);
827 if (brcmf_usb_dl_send_bulk(devinfo, bulkchunk, 829 if (brcmf_usb_dl_send_bulk(devinfo, bulkchunk,
828 sendlen)) { 830 sendlen)) {
829 brcmf_err("send_bulk failed\n"); 831 brcmf_err("send_bulk failed\n");
830 err = -EINVAL; 832 err = -EINVAL;
831 goto fail; 833 goto fail;
832 } 834 }
833 835
834 dlpos += sendlen; 836 dlpos += sendlen;
835 sent += sendlen; 837 sent += sendlen;
836 } 838 }
837 err = brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state, 839 err = brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state,
838 sizeof(state)); 840 sizeof(state));
839 if (err) { 841 if (err) {
840 brcmf_err("DL_GETSTATE Failed\n"); 842 brcmf_err("DL_GETSTATE Failed\n");
841 goto fail; 843 goto fail;
842 } 844 }
843 845
844 rdlstate = le32_to_cpu(state.state); 846 rdlstate = le32_to_cpu(state.state);
845 rdlbytes = le32_to_cpu(state.bytes); 847 rdlbytes = le32_to_cpu(state.bytes);
846 848
847 /* restart if an error is reported */ 849 /* restart if an error is reported */
848 if (rdlstate == DL_BAD_HDR || rdlstate == DL_BAD_CRC) { 850 if (rdlstate == DL_BAD_HDR || rdlstate == DL_BAD_CRC) {
849 brcmf_err("Bad Hdr or Bad CRC state %d\n", 851 brcmf_err("Bad Hdr or Bad CRC state %d\n",
850 rdlstate); 852 rdlstate);
851 err = -EINVAL; 853 err = -EINVAL;
852 goto fail; 854 goto fail;
853 } 855 }
854 } 856 }
855 857
856 fail: 858 fail:
857 kfree(bulkchunk); 859 kfree(bulkchunk);
858 brcmf_dbg(USB, "Exit, err=%d\n", err); 860 brcmf_dbg(USB, "Exit, err=%d\n", err);
859 return err; 861 return err;
860 } 862 }
861 863
862 static int brcmf_usb_dlstart(struct brcmf_usbdev_info *devinfo, u8 *fw, int len) 864 static int brcmf_usb_dlstart(struct brcmf_usbdev_info *devinfo, u8 *fw, int len)
863 { 865 {
864 int err; 866 int err;
865 867
866 brcmf_dbg(USB, "Enter\n"); 868 brcmf_dbg(USB, "Enter\n");
867 869
868 if (devinfo == NULL) 870 if (devinfo == NULL)
869 return -EINVAL; 871 return -EINVAL;
870 872
871 if (devinfo->bus_pub.devid == 0xDEAD) 873 if (devinfo->bus_pub.devid == 0xDEAD)
872 return -EINVAL; 874 return -EINVAL;
873 875
874 err = brcmf_usb_dl_writeimage(devinfo, fw, len); 876 err = brcmf_usb_dl_writeimage(devinfo, fw, len);
875 if (err == 0) 877 if (err == 0)
876 devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_DONE; 878 devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_DONE;
877 else 879 else
878 devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_FAIL; 880 devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_FAIL;
879 brcmf_dbg(USB, "Exit, err=%d\n", err); 881 brcmf_dbg(USB, "Exit, err=%d\n", err);
880 882
881 return err; 883 return err;
882 } 884 }
883 885
884 static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo) 886 static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
885 { 887 {
886 struct rdl_state_le state; 888 struct rdl_state_le state;
887 889
888 brcmf_dbg(USB, "Enter\n"); 890 brcmf_dbg(USB, "Enter\n");
889 if (!devinfo) 891 if (!devinfo)
890 return -EINVAL; 892 return -EINVAL;
891 893
892 if (devinfo->bus_pub.devid == 0xDEAD) 894 if (devinfo->bus_pub.devid == 0xDEAD)
893 return -EINVAL; 895 return -EINVAL;
894 896
895 /* Check we are runnable */ 897 /* Check we are runnable */
896 state.state = 0; 898 state.state = 0;
897 brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state, sizeof(state)); 899 brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state, sizeof(state));
898 900
899 /* Start the image */ 901 /* Start the image */
900 if (state.state == cpu_to_le32(DL_RUNNABLE)) { 902 if (state.state == cpu_to_le32(DL_RUNNABLE)) {
901 if (brcmf_usb_dl_cmd(devinfo, DL_GO, &state, sizeof(state))) 903 if (brcmf_usb_dl_cmd(devinfo, DL_GO, &state, sizeof(state)))
902 return -ENODEV; 904 return -ENODEV;
903 if (brcmf_usb_resetcfg(devinfo)) 905 if (brcmf_usb_resetcfg(devinfo))
904 return -ENODEV; 906 return -ENODEV;
905 /* The Dongle may go for re-enumeration. */ 907 /* The Dongle may go for re-enumeration. */
906 } else { 908 } else {
907 brcmf_err("Dongle not runnable\n"); 909 brcmf_err("Dongle not runnable\n");
908 return -EINVAL; 910 return -EINVAL;
909 } 911 }
910 brcmf_dbg(USB, "Exit\n"); 912 brcmf_dbg(USB, "Exit\n");
911 return 0; 913 return 0;
912 } 914 }
913 915
914 static bool brcmf_usb_chip_support(int chipid, int chiprev) 916 static bool brcmf_usb_chip_support(int chipid, int chiprev)
915 { 917 {
916 switch(chipid) { 918 switch(chipid) {
917 case BRCM_CC_43143_CHIP_ID: 919 case BRCM_CC_43143_CHIP_ID:
918 return true; 920 return true;
919 case BRCM_CC_43235_CHIP_ID: 921 case BRCM_CC_43235_CHIP_ID:
920 case BRCM_CC_43236_CHIP_ID: 922 case BRCM_CC_43236_CHIP_ID:
921 case BRCM_CC_43238_CHIP_ID: 923 case BRCM_CC_43238_CHIP_ID:
922 return (chiprev == 3); 924 return (chiprev == 3);
923 case BRCM_CC_43242_CHIP_ID: 925 case BRCM_CC_43242_CHIP_ID:
924 return true; 926 return true;
925 case BRCM_CC_43566_CHIP_ID: 927 case BRCM_CC_43566_CHIP_ID:
926 case BRCM_CC_43569_CHIP_ID: 928 case BRCM_CC_43569_CHIP_ID:
927 return true; 929 return true;
928 default: 930 default:
929 break; 931 break;
930 } 932 }
931 return false; 933 return false;
932 } 934 }
933 935
934 static int 936 static int
935 brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo) 937 brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
936 { 938 {
937 int devid, chiprev; 939 int devid, chiprev;
938 int err; 940 int err;
939 941
940 brcmf_dbg(USB, "Enter\n"); 942 brcmf_dbg(USB, "Enter\n");
941 if (devinfo == NULL) 943 if (devinfo == NULL)
942 return -ENODEV; 944 return -ENODEV;
943 945
944 devid = devinfo->bus_pub.devid; 946 devid = devinfo->bus_pub.devid;
945 chiprev = devinfo->bus_pub.chiprev; 947 chiprev = devinfo->bus_pub.chiprev;
946 948
947 if (!brcmf_usb_chip_support(devid, chiprev)) { 949 if (!brcmf_usb_chip_support(devid, chiprev)) {
948 brcmf_err("unsupported chip %d rev %d\n", 950 brcmf_err("unsupported chip %d rev %d\n",
949 devid, chiprev); 951 devid, chiprev);
950 return -EINVAL; 952 return -EINVAL;
951 } 953 }
952 954
953 if (!devinfo->image) { 955 if (!devinfo->image) {
954 brcmf_err("No firmware!\n"); 956 brcmf_err("No firmware!\n");
955 return -ENOENT; 957 return -ENOENT;
956 } 958 }
957 959
958 err = brcmf_usb_dlstart(devinfo, 960 err = brcmf_usb_dlstart(devinfo,
959 (u8 *)devinfo->image, devinfo->image_len); 961 (u8 *)devinfo->image, devinfo->image_len);
960 if (err == 0) 962 if (err == 0)
961 err = brcmf_usb_dlrun(devinfo); 963 err = brcmf_usb_dlrun(devinfo);
962 return err; 964 return err;
963 } 965 }
964 966
965 967
966 static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo) 968 static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
967 { 969 {
968 brcmf_dbg(USB, "Enter, devinfo %p\n", devinfo); 970 brcmf_dbg(USB, "Enter, devinfo %p\n", devinfo);
969 971
970 /* free the URBS */ 972 /* free the URBS */
971 brcmf_usb_free_q(&devinfo->rx_freeq, false); 973 brcmf_usb_free_q(&devinfo->rx_freeq, false);
972 brcmf_usb_free_q(&devinfo->tx_freeq, false); 974 brcmf_usb_free_q(&devinfo->tx_freeq, false);
973 975
974 usb_free_urb(devinfo->ctl_urb); 976 usb_free_urb(devinfo->ctl_urb);
975 usb_free_urb(devinfo->bulk_urb); 977 usb_free_urb(devinfo->bulk_urb);
976 978
977 kfree(devinfo->tx_reqs); 979 kfree(devinfo->tx_reqs);
978 kfree(devinfo->rx_reqs); 980 kfree(devinfo->rx_reqs);
979 } 981 }
980 982
981 #define TRX_MAGIC 0x30524448 /* "HDR0" */ 983 #define TRX_MAGIC 0x30524448 /* "HDR0" */
982 #define TRX_VERSION 1 /* Version 1 */ 984 #define TRX_VERSION 1 /* Version 1 */
983 #define TRX_MAX_LEN 0x3B0000 /* Max length */ 985 #define TRX_MAX_LEN 0x3B0000 /* Max length */
984 #define TRX_NO_HEADER 1 /* Do not write TRX header */ 986 #define TRX_NO_HEADER 1 /* Do not write TRX header */
985 #define TRX_MAX_OFFSET 3 /* Max number of individual files */ 987 #define TRX_MAX_OFFSET 3 /* Max number of individual files */
986 #define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed image */ 988 #define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed image */
987 989
988 struct trx_header_le { 990 struct trx_header_le {
989 __le32 magic; /* "HDR0" */ 991 __le32 magic; /* "HDR0" */
990 __le32 len; /* Length of file including header */ 992 __le32 len; /* Length of file including header */
991 __le32 crc32; /* CRC from flag_version to end of file */ 993 __le32 crc32; /* CRC from flag_version to end of file */
992 __le32 flag_version; /* 0:15 flags, 16:31 version */ 994 __le32 flag_version; /* 0:15 flags, 16:31 version */
993 __le32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of 995 __le32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of
994 * header */ 996 * header */
995 }; 997 };
996 998
997 static int check_file(const u8 *headers) 999 static int check_file(const u8 *headers)
998 { 1000 {
999 struct trx_header_le *trx; 1001 struct trx_header_le *trx;
1000 int actual_len = -1; 1002 int actual_len = -1;
1001 1003
1002 brcmf_dbg(USB, "Enter\n"); 1004 brcmf_dbg(USB, "Enter\n");
1003 /* Extract trx header */ 1005 /* Extract trx header */
1004 trx = (struct trx_header_le *) headers; 1006 trx = (struct trx_header_le *) headers;
1005 if (trx->magic != cpu_to_le32(TRX_MAGIC)) 1007 if (trx->magic != cpu_to_le32(TRX_MAGIC))
1006 return -1; 1008 return -1;
1007 1009
1008 headers += sizeof(struct trx_header_le); 1010 headers += sizeof(struct trx_header_le);
1009 1011
1010 if (le32_to_cpu(trx->flag_version) & TRX_UNCOMP_IMAGE) { 1012 if (le32_to_cpu(trx->flag_version) & TRX_UNCOMP_IMAGE) {
1011 actual_len = le32_to_cpu(trx->offsets[TRX_OFFSETS_DLFWLEN_IDX]); 1013 actual_len = le32_to_cpu(trx->offsets[TRX_OFFSETS_DLFWLEN_IDX]);
1012 return actual_len + sizeof(struct trx_header_le); 1014 return actual_len + sizeof(struct trx_header_le);
1013 } 1015 }
1014 return -1; 1016 return -1;
1015 } 1017 }
1016 1018
1017 static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo) 1019 static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo)
1018 { 1020 {
1019 switch (devinfo->bus_pub.devid) { 1021 switch (devinfo->bus_pub.devid) {
1020 case BRCM_CC_43143_CHIP_ID: 1022 case BRCM_CC_43143_CHIP_ID:
1021 return BRCMF_USB_43143_FW_NAME; 1023 return BRCMF_USB_43143_FW_NAME;
1022 case BRCM_CC_43235_CHIP_ID: 1024 case BRCM_CC_43235_CHIP_ID:
1023 case BRCM_CC_43236_CHIP_ID: 1025 case BRCM_CC_43236_CHIP_ID:
1024 case BRCM_CC_43238_CHIP_ID: 1026 case BRCM_CC_43238_CHIP_ID:
1025 return BRCMF_USB_43236_FW_NAME; 1027 return BRCMF_USB_43236_FW_NAME;
1026 case BRCM_CC_43242_CHIP_ID: 1028 case BRCM_CC_43242_CHIP_ID:
1027 return BRCMF_USB_43242_FW_NAME; 1029 return BRCMF_USB_43242_FW_NAME;
1028 case BRCM_CC_43566_CHIP_ID: 1030 case BRCM_CC_43566_CHIP_ID:
1029 case BRCM_CC_43569_CHIP_ID: 1031 case BRCM_CC_43569_CHIP_ID:
1030 return BRCMF_USB_43569_FW_NAME; 1032 return BRCMF_USB_43569_FW_NAME;
1031 default: 1033 default:
1032 return NULL; 1034 return NULL;
1033 } 1035 }
1034 } 1036 }
1035 1037
1036 1038
1037 static 1039 static
1038 struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo, 1040 struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
1039 int nrxq, int ntxq) 1041 int nrxq, int ntxq)
1040 { 1042 {
1041 brcmf_dbg(USB, "Enter\n"); 1043 brcmf_dbg(USB, "Enter\n");
1042 1044
1043 devinfo->bus_pub.nrxq = nrxq; 1045 devinfo->bus_pub.nrxq = nrxq;
1044 devinfo->rx_low_watermark = nrxq / 2; 1046 devinfo->rx_low_watermark = nrxq / 2;
1045 devinfo->bus_pub.devinfo = devinfo; 1047 devinfo->bus_pub.devinfo = devinfo;
1046 devinfo->bus_pub.ntxq = ntxq; 1048 devinfo->bus_pub.ntxq = ntxq;
1047 devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DOWN; 1049 devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DOWN;
1048 1050
1049 /* flow control when too many tx urbs posted */ 1051 /* flow control when too many tx urbs posted */
1050 devinfo->tx_low_watermark = ntxq / 4; 1052 devinfo->tx_low_watermark = ntxq / 4;
1051 devinfo->tx_high_watermark = devinfo->tx_low_watermark * 3; 1053 devinfo->tx_high_watermark = devinfo->tx_low_watermark * 3;
1052 devinfo->bus_pub.bus_mtu = BRCMF_USB_MAX_PKT_SIZE; 1054 devinfo->bus_pub.bus_mtu = BRCMF_USB_MAX_PKT_SIZE;
1053 1055
1054 /* Initialize other structure content */ 1056 /* Initialize other structure content */
1055 init_waitqueue_head(&devinfo->ioctl_resp_wait); 1057 init_waitqueue_head(&devinfo->ioctl_resp_wait);
1056 1058
1057 /* Initialize the spinlocks */ 1059 /* Initialize the spinlocks */
1058 spin_lock_init(&devinfo->qlock); 1060 spin_lock_init(&devinfo->qlock);
1059 spin_lock_init(&devinfo->tx_flowblock_lock); 1061 spin_lock_init(&devinfo->tx_flowblock_lock);
1060 1062
1061 INIT_LIST_HEAD(&devinfo->rx_freeq); 1063 INIT_LIST_HEAD(&devinfo->rx_freeq);
1062 INIT_LIST_HEAD(&devinfo->rx_postq); 1064 INIT_LIST_HEAD(&devinfo->rx_postq);
1063 1065
1064 INIT_LIST_HEAD(&devinfo->tx_freeq); 1066 INIT_LIST_HEAD(&devinfo->tx_freeq);
1065 INIT_LIST_HEAD(&devinfo->tx_postq); 1067 INIT_LIST_HEAD(&devinfo->tx_postq);
1066 1068
1067 devinfo->tx_flowblock = false; 1069 devinfo->tx_flowblock = false;
1068 1070
1069 devinfo->rx_reqs = brcmf_usbdev_qinit(&devinfo->rx_freeq, nrxq); 1071 devinfo->rx_reqs = brcmf_usbdev_qinit(&devinfo->rx_freeq, nrxq);
1070 if (!devinfo->rx_reqs) 1072 if (!devinfo->rx_reqs)
1071 goto error; 1073 goto error;
1072 1074
1073 devinfo->tx_reqs = brcmf_usbdev_qinit(&devinfo->tx_freeq, ntxq); 1075 devinfo->tx_reqs = brcmf_usbdev_qinit(&devinfo->tx_freeq, ntxq);
1074 if (!devinfo->tx_reqs) 1076 if (!devinfo->tx_reqs)
1075 goto error; 1077 goto error;
1076 devinfo->tx_freecount = ntxq; 1078 devinfo->tx_freecount = ntxq;
1077 1079
1078 devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC); 1080 devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
1079 if (!devinfo->ctl_urb) { 1081 if (!devinfo->ctl_urb) {
1080 brcmf_err("usb_alloc_urb (ctl) failed\n"); 1082 brcmf_err("usb_alloc_urb (ctl) failed\n");
1081 goto error; 1083 goto error;
1082 } 1084 }
1083 devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC); 1085 devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC);
1084 if (!devinfo->bulk_urb) { 1086 if (!devinfo->bulk_urb) {
1085 brcmf_err("usb_alloc_urb (bulk) failed\n"); 1087 brcmf_err("usb_alloc_urb (bulk) failed\n");
1086 goto error; 1088 goto error;
1087 } 1089 }
1088 1090
1089 return &devinfo->bus_pub; 1091 return &devinfo->bus_pub;
1090 1092
1091 error: 1093 error:
1092 brcmf_err("failed!\n"); 1094 brcmf_err("failed!\n");
1093 brcmf_usb_detach(devinfo); 1095 brcmf_usb_detach(devinfo);
1094 return NULL; 1096 return NULL;
1095 } 1097 }
1096 1098
1097 static struct brcmf_bus_ops brcmf_usb_bus_ops = { 1099 static struct brcmf_bus_ops brcmf_usb_bus_ops = {
1098 .txdata = brcmf_usb_tx, 1100 .txdata = brcmf_usb_tx,
1099 .stop = brcmf_usb_down, 1101 .stop = brcmf_usb_down,
1100 .txctl = brcmf_usb_tx_ctlpkt, 1102 .txctl = brcmf_usb_tx_ctlpkt,
1101 .rxctl = brcmf_usb_rx_ctlpkt, 1103 .rxctl = brcmf_usb_rx_ctlpkt,
1102 }; 1104 };
1103 1105
1104 static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo) 1106 static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
1105 { 1107 {
1106 int ret; 1108 int ret;
1107 1109
1108 /* Attach to the common driver interface */ 1110 /* Attach to the common driver interface */
1109 ret = brcmf_attach(devinfo->dev); 1111 ret = brcmf_attach(devinfo->dev);
1110 if (ret) { 1112 if (ret) {
1111 brcmf_err("brcmf_attach failed\n"); 1113 brcmf_err("brcmf_attach failed\n");
1112 return ret; 1114 return ret;
1113 } 1115 }
1114 1116
1115 ret = brcmf_usb_up(devinfo->dev); 1117 ret = brcmf_usb_up(devinfo->dev);
1116 if (ret) 1118 if (ret)
1117 goto fail; 1119 goto fail;
1118 1120
1119 ret = brcmf_bus_start(devinfo->dev); 1121 ret = brcmf_bus_start(devinfo->dev);
1120 if (ret) 1122 if (ret)
1121 goto fail; 1123 goto fail;
1122 1124
1123 return 0; 1125 return 0;
1124 fail: 1126 fail:
1125 brcmf_detach(devinfo->dev); 1127 brcmf_detach(devinfo->dev);
1126 return ret; 1128 return ret;
1127 } 1129 }
1128 1130
1129 static void brcmf_usb_probe_phase2(struct device *dev, 1131 static void brcmf_usb_probe_phase2(struct device *dev,
1130 const struct firmware *fw, 1132 const struct firmware *fw,
1131 void *nvram, u32 nvlen) 1133 void *nvram, u32 nvlen)
1132 { 1134 {
1133 struct brcmf_bus *bus = dev_get_drvdata(dev); 1135 struct brcmf_bus *bus = dev_get_drvdata(dev);
1134 struct brcmf_usbdev_info *devinfo; 1136 struct brcmf_usbdev_info *devinfo;
1135 int ret; 1137 int ret;
1136 1138
1137 brcmf_dbg(USB, "Start fw downloading\n"); 1139 brcmf_dbg(USB, "Start fw downloading\n");
1138 ret = check_file(fw->data); 1140 ret = check_file(fw->data);
1139 if (ret < 0) { 1141 if (ret < 0) {
1140 brcmf_err("invalid firmware\n"); 1142 brcmf_err("invalid firmware\n");
1141 release_firmware(fw); 1143 release_firmware(fw);
1142 goto error; 1144 goto error;
1143 } 1145 }
1144 1146
1145 devinfo = bus->bus_priv.usb->devinfo; 1147 devinfo = bus->bus_priv.usb->devinfo;
1146 devinfo->image = fw->data; 1148 devinfo->image = fw->data;
1147 devinfo->image_len = fw->size; 1149 devinfo->image_len = fw->size;
1148 1150
1149 ret = brcmf_usb_fw_download(devinfo); 1151 ret = brcmf_usb_fw_download(devinfo);
1150 release_firmware(fw); 1152 release_firmware(fw);
1151 if (ret) 1153 if (ret)
1152 goto error; 1154 goto error;
1153 1155
1154 ret = brcmf_usb_bus_setup(devinfo); 1156 ret = brcmf_usb_bus_setup(devinfo);
1155 if (ret) 1157 if (ret)
1156 goto error; 1158 goto error;
1157 1159
1158 return; 1160 return;
1159 error: 1161 error:
1160 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret); 1162 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
1161 device_release_driver(dev); 1163 device_release_driver(dev);
1162 } 1164 }
1163 1165
1164 static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) 1166 static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1165 { 1167 {
1166 struct brcmf_bus *bus = NULL; 1168 struct brcmf_bus *bus = NULL;
1167 struct brcmf_usbdev *bus_pub = NULL; 1169 struct brcmf_usbdev *bus_pub = NULL;
1168 struct device *dev = devinfo->dev; 1170 struct device *dev = devinfo->dev;
1169 int ret; 1171 int ret;
1170 1172
1171 brcmf_dbg(USB, "Enter\n"); 1173 brcmf_dbg(USB, "Enter\n");
1172 bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ); 1174 bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
1173 if (!bus_pub) 1175 if (!bus_pub)
1174 return -ENODEV; 1176 return -ENODEV;
1175 1177
1176 bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC); 1178 bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
1177 if (!bus) { 1179 if (!bus) {
1178 ret = -ENOMEM; 1180 ret = -ENOMEM;
1179 goto fail; 1181 goto fail;
1180 } 1182 }
1181 1183
1182 bus->dev = dev; 1184 bus->dev = dev;
1183 bus_pub->bus = bus; 1185 bus_pub->bus = bus;
1184 bus->bus_priv.usb = bus_pub; 1186 bus->bus_priv.usb = bus_pub;
1185 dev_set_drvdata(dev, bus); 1187 dev_set_drvdata(dev, bus);
1186 bus->ops = &brcmf_usb_bus_ops; 1188 bus->ops = &brcmf_usb_bus_ops;
1187 bus->proto_type = BRCMF_PROTO_BCDC; 1189 bus->proto_type = BRCMF_PROTO_BCDC;
1188 bus->always_use_fws_queue = true; 1190 bus->always_use_fws_queue = true;
1189 1191
1190 if (!brcmf_usb_dlneeded(devinfo)) { 1192 if (!brcmf_usb_dlneeded(devinfo)) {
1191 ret = brcmf_usb_bus_setup(devinfo); 1193 ret = brcmf_usb_bus_setup(devinfo);
1192 if (ret) 1194 if (ret)
1193 goto fail; 1195 goto fail;
1194 } 1196 }
1195 bus->chip = bus_pub->devid; 1197 bus->chip = bus_pub->devid;
1196 bus->chiprev = bus_pub->chiprev; 1198 bus->chiprev = bus_pub->chiprev;
1197 1199
1198 /* request firmware here */ 1200 /* request firmware here */
1199 brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL, 1201 brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
1200 brcmf_usb_probe_phase2); 1202 brcmf_usb_probe_phase2);
1201 return 0; 1203 return 0;
1202 1204
1203 fail: 1205 fail:
1204 /* Release resources in reverse order */ 1206 /* Release resources in reverse order */
1205 kfree(bus); 1207 kfree(bus);
1206 brcmf_usb_detach(devinfo); 1208 brcmf_usb_detach(devinfo);
1207 return ret; 1209 return ret;
1208 } 1210 }
1209 1211
1210 static void 1212 static void
1211 brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo) 1213 brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
1212 { 1214 {
1213 if (!devinfo) 1215 if (!devinfo)
1214 return; 1216 return;
1215 brcmf_dbg(USB, "Enter, bus_pub %p\n", devinfo); 1217 brcmf_dbg(USB, "Enter, bus_pub %p\n", devinfo);
1216 1218
1217 brcmf_detach(devinfo->dev); 1219 brcmf_detach(devinfo->dev);
1218 kfree(devinfo->bus_pub.bus); 1220 kfree(devinfo->bus_pub.bus);
1219 brcmf_usb_detach(devinfo); 1221 brcmf_usb_detach(devinfo);
1220 } 1222 }
1221 1223
1222 static int 1224 static int
1223 brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) 1225 brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1224 { 1226 {
1225 struct usb_device *usb = interface_to_usbdev(intf); 1227 struct usb_device *usb = interface_to_usbdev(intf);
1226 struct brcmf_usbdev_info *devinfo; 1228 struct brcmf_usbdev_info *devinfo;
1227 struct usb_interface_descriptor *desc; 1229 struct usb_interface_descriptor *desc;
1228 struct usb_endpoint_descriptor *endpoint; 1230 struct usb_endpoint_descriptor *endpoint;
1229 int ret = 0; 1231 int ret = 0;
1230 u32 num_of_eps; 1232 u32 num_of_eps;
1231 u8 endpoint_num, ep; 1233 u8 endpoint_num, ep;
1232 1234
1233 brcmf_dbg(USB, "Enter 0x%04x:0x%04x\n", id->idVendor, id->idProduct); 1235 brcmf_dbg(USB, "Enter 0x%04x:0x%04x\n", id->idVendor, id->idProduct);
1234 1236
1235 devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC); 1237 devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
1236 if (devinfo == NULL) 1238 if (devinfo == NULL)
1237 return -ENOMEM; 1239 return -ENOMEM;
1238 1240
1239 devinfo->usbdev = usb; 1241 devinfo->usbdev = usb;
1240 devinfo->dev = &usb->dev; 1242 devinfo->dev = &usb->dev;
1241 usb_set_intfdata(intf, devinfo); 1243 usb_set_intfdata(intf, devinfo);
1242 1244
1243 /* Check that the device supports only one configuration */ 1245 /* Check that the device supports only one configuration */
1244 if (usb->descriptor.bNumConfigurations != 1) { 1246 if (usb->descriptor.bNumConfigurations != 1) {
1245 brcmf_err("Number of configurations: %d not supported\n", 1247 brcmf_err("Number of configurations: %d not supported\n",
1246 usb->descriptor.bNumConfigurations); 1248 usb->descriptor.bNumConfigurations);
1247 ret = -ENODEV; 1249 ret = -ENODEV;
1248 goto fail; 1250 goto fail;
1249 } 1251 }
1250 1252
1251 if ((usb->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) && 1253 if ((usb->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) &&
1252 (usb->descriptor.bDeviceClass != USB_CLASS_MISC) && 1254 (usb->descriptor.bDeviceClass != USB_CLASS_MISC) &&
1253 (usb->descriptor.bDeviceClass != USB_CLASS_WIRELESS_CONTROLLER)) { 1255 (usb->descriptor.bDeviceClass != USB_CLASS_WIRELESS_CONTROLLER)) {
1254 brcmf_err("Device class: 0x%x not supported\n", 1256 brcmf_err("Device class: 0x%x not supported\n",
1255 usb->descriptor.bDeviceClass); 1257 usb->descriptor.bDeviceClass);
1256 ret = -ENODEV; 1258 ret = -ENODEV;
1257 goto fail; 1259 goto fail;
1258 } 1260 }
1259 1261
1260 desc = &intf->altsetting[0].desc; 1262 desc = &intf->altsetting[0].desc;
1261 if ((desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) || 1263 if ((desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
1262 (desc->bInterfaceSubClass != 2) || 1264 (desc->bInterfaceSubClass != 2) ||
1263 (desc->bInterfaceProtocol != 0xff)) { 1265 (desc->bInterfaceProtocol != 0xff)) {
1264 brcmf_err("non WLAN interface %d: 0x%x:0x%x:0x%x\n", 1266 brcmf_err("non WLAN interface %d: 0x%x:0x%x:0x%x\n",
1265 desc->bInterfaceNumber, desc->bInterfaceClass, 1267 desc->bInterfaceNumber, desc->bInterfaceClass,
1266 desc->bInterfaceSubClass, desc->bInterfaceProtocol); 1268 desc->bInterfaceSubClass, desc->bInterfaceProtocol);
1267 ret = -ENODEV; 1269 ret = -ENODEV;
1268 goto fail; 1270 goto fail;
1269 } 1271 }
1270 1272
1271 num_of_eps = desc->bNumEndpoints; 1273 num_of_eps = desc->bNumEndpoints;
1272 for (ep = 0; ep < num_of_eps; ep++) { 1274 for (ep = 0; ep < num_of_eps; ep++) {
1273 endpoint = &intf->altsetting[0].endpoint[ep].desc; 1275 endpoint = &intf->altsetting[0].endpoint[ep].desc;
1274 endpoint_num = usb_endpoint_num(endpoint); 1276 endpoint_num = usb_endpoint_num(endpoint);
1275 if (!usb_endpoint_xfer_bulk(endpoint)) 1277 if (!usb_endpoint_xfer_bulk(endpoint))
1276 continue; 1278 continue;
1277 if (usb_endpoint_dir_in(endpoint)) { 1279 if (usb_endpoint_dir_in(endpoint)) {
1278 if (!devinfo->rx_pipe) 1280 if (!devinfo->rx_pipe)
1279 devinfo->rx_pipe = 1281 devinfo->rx_pipe =
1280 usb_rcvbulkpipe(usb, endpoint_num); 1282 usb_rcvbulkpipe(usb, endpoint_num);
1281 } else { 1283 } else {
1282 if (!devinfo->tx_pipe) 1284 if (!devinfo->tx_pipe)
1283 devinfo->tx_pipe = 1285 devinfo->tx_pipe =
1284 usb_sndbulkpipe(usb, endpoint_num); 1286 usb_sndbulkpipe(usb, endpoint_num);
1285 } 1287 }
1286 } 1288 }
1287 if (devinfo->rx_pipe == 0) { 1289 if (devinfo->rx_pipe == 0) {
1288 brcmf_err("No RX (in) Bulk EP found\n"); 1290 brcmf_err("No RX (in) Bulk EP found\n");
1289 ret = -ENODEV; 1291 ret = -ENODEV;
1290 goto fail; 1292 goto fail;
1291 } 1293 }
1292 if (devinfo->tx_pipe == 0) { 1294 if (devinfo->tx_pipe == 0) {
1293 brcmf_err("No TX (out) Bulk EP found\n"); 1295 brcmf_err("No TX (out) Bulk EP found\n");
1294 ret = -ENODEV; 1296 ret = -ENODEV;
1295 goto fail; 1297 goto fail;
1296 } 1298 }
1297 1299
1298 devinfo->ifnum = desc->bInterfaceNumber; 1300 devinfo->ifnum = desc->bInterfaceNumber;
1299 1301
1300 if (usb->speed == USB_SPEED_SUPER) 1302 if (usb->speed == USB_SPEED_SUPER)
1301 brcmf_dbg(USB, "Broadcom super speed USB WLAN interface detected\n"); 1303 brcmf_dbg(USB, "Broadcom super speed USB WLAN interface detected\n");
1302 else if (usb->speed == USB_SPEED_HIGH) 1304 else if (usb->speed == USB_SPEED_HIGH)
1303 brcmf_dbg(USB, "Broadcom high speed USB WLAN interface detected\n"); 1305 brcmf_dbg(USB, "Broadcom high speed USB WLAN interface detected\n");
1304 else 1306 else
1305 brcmf_dbg(USB, "Broadcom full speed USB WLAN interface detected\n"); 1307 brcmf_dbg(USB, "Broadcom full speed USB WLAN interface detected\n");
1306 1308
1307 ret = brcmf_usb_probe_cb(devinfo); 1309 ret = brcmf_usb_probe_cb(devinfo);
1308 if (ret) 1310 if (ret)
1309 goto fail; 1311 goto fail;
1310 1312
1311 /* Success */ 1313 /* Success */
1312 return 0; 1314 return 0;
1313 1315
1314 fail: 1316 fail:
1315 kfree(devinfo); 1317 kfree(devinfo);
1316 usb_set_intfdata(intf, NULL); 1318 usb_set_intfdata(intf, NULL);
1317 return ret; 1319 return ret;
1318 } 1320 }
1319 1321
1320 static void 1322 static void
1321 brcmf_usb_disconnect(struct usb_interface *intf) 1323 brcmf_usb_disconnect(struct usb_interface *intf)
1322 { 1324 {
1323 struct brcmf_usbdev_info *devinfo; 1325 struct brcmf_usbdev_info *devinfo;
1324 1326
1325 brcmf_dbg(USB, "Enter\n"); 1327 brcmf_dbg(USB, "Enter\n");
1326 devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf); 1328 devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
1327 brcmf_usb_disconnect_cb(devinfo); 1329 brcmf_usb_disconnect_cb(devinfo);
1328 kfree(devinfo); 1330 kfree(devinfo);
1329 brcmf_dbg(USB, "Exit\n"); 1331 brcmf_dbg(USB, "Exit\n");
1330 } 1332 }
1331 1333
1332 /* 1334 /*
1333 * only need to signal the bus being down and update the state. 1335 * only need to signal the bus being down and update the state.
1334 */ 1336 */
1335 static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state) 1337 static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state)
1336 { 1338 {
1337 struct usb_device *usb = interface_to_usbdev(intf); 1339 struct usb_device *usb = interface_to_usbdev(intf);
1338 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); 1340 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
1339 1341
1340 brcmf_dbg(USB, "Enter\n"); 1342 brcmf_dbg(USB, "Enter\n");
1341 devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP; 1343 devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP;
1342 brcmf_detach(&usb->dev); 1344 brcmf_detach(&usb->dev);
1343 return 0; 1345 return 0;
1344 } 1346 }
1345 1347
1346 /* 1348 /*
1347 * (re-) start the bus. 1349 * (re-) start the bus.
1348 */ 1350 */
1349 static int brcmf_usb_resume(struct usb_interface *intf) 1351 static int brcmf_usb_resume(struct usb_interface *intf)
1350 { 1352 {
1351 struct usb_device *usb = interface_to_usbdev(intf); 1353 struct usb_device *usb = interface_to_usbdev(intf);
1352 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); 1354 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
1353 1355
1354 brcmf_dbg(USB, "Enter\n"); 1356 brcmf_dbg(USB, "Enter\n");
1355 return brcmf_usb_bus_setup(devinfo); 1357 return brcmf_usb_bus_setup(devinfo);
1356 } 1358 }
1357 1359
1358 static int brcmf_usb_reset_resume(struct usb_interface *intf) 1360 static int brcmf_usb_reset_resume(struct usb_interface *intf)
1359 { 1361 {
1360 struct usb_device *usb = interface_to_usbdev(intf); 1362 struct usb_device *usb = interface_to_usbdev(intf);
1361 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); 1363 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
1362 1364
1363 brcmf_dbg(USB, "Enter\n"); 1365 brcmf_dbg(USB, "Enter\n");
1364 1366
1365 return brcmf_fw_get_firmwares(&usb->dev, 0, 1367 return brcmf_fw_get_firmwares(&usb->dev, 0,
1366 brcmf_usb_get_fwname(devinfo), NULL, 1368 brcmf_usb_get_fwname(devinfo), NULL,
1367 brcmf_usb_probe_phase2); 1369 brcmf_usb_probe_phase2);
1368 } 1370 }
1369 1371
1370 #define BRCMF_USB_DEVICE(dev_id) \ 1372 #define BRCMF_USB_DEVICE(dev_id) \
1371 { USB_DEVICE(BRCM_USB_VENDOR_ID_BROADCOM, dev_id) } 1373 { USB_DEVICE(BRCM_USB_VENDOR_ID_BROADCOM, dev_id) }
1372 1374
1373 static struct usb_device_id brcmf_usb_devid_table[] = { 1375 static struct usb_device_id brcmf_usb_devid_table[] = {
1374 BRCMF_USB_DEVICE(BRCM_USB_43143_DEVICE_ID), 1376 BRCMF_USB_DEVICE(BRCM_USB_43143_DEVICE_ID),
1375 BRCMF_USB_DEVICE(BRCM_USB_43236_DEVICE_ID), 1377 BRCMF_USB_DEVICE(BRCM_USB_43236_DEVICE_ID),
1376 BRCMF_USB_DEVICE(BRCM_USB_43242_DEVICE_ID), 1378 BRCMF_USB_DEVICE(BRCM_USB_43242_DEVICE_ID),
1377 BRCMF_USB_DEVICE(BRCM_USB_43569_DEVICE_ID), 1379 BRCMF_USB_DEVICE(BRCM_USB_43569_DEVICE_ID),
1378 /* special entry for device with firmware loaded and running */ 1380 /* special entry for device with firmware loaded and running */
1379 BRCMF_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID), 1381 BRCMF_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID),
1380 { /* end: all zeroes */ } 1382 { /* end: all zeroes */ }
1381 }; 1383 };
1382 1384
1383 MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table); 1385 MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
1384 MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME); 1386 MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
1385 MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME); 1387 MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
1386 MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME); 1388 MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME);
1387 MODULE_FIRMWARE(BRCMF_USB_43569_FW_NAME); 1389 MODULE_FIRMWARE(BRCMF_USB_43569_FW_NAME);
1388 1390
1389 static struct usb_driver brcmf_usbdrvr = { 1391 static struct usb_driver brcmf_usbdrvr = {
1390 .name = KBUILD_MODNAME, 1392 .name = KBUILD_MODNAME,
1391 .probe = brcmf_usb_probe, 1393 .probe = brcmf_usb_probe,
1392 .disconnect = brcmf_usb_disconnect, 1394 .disconnect = brcmf_usb_disconnect,
1393 .id_table = brcmf_usb_devid_table, 1395 .id_table = brcmf_usb_devid_table,
1394 .suspend = brcmf_usb_suspend, 1396 .suspend = brcmf_usb_suspend,
1395 .resume = brcmf_usb_resume, 1397 .resume = brcmf_usb_resume,
1396 .reset_resume = brcmf_usb_reset_resume, 1398 .reset_resume = brcmf_usb_reset_resume,
1397 .supports_autosuspend = 1, 1399 .supports_autosuspend = 1,
1398 .disable_hub_initiated_lpm = 1, 1400 .disable_hub_initiated_lpm = 1,
1399 }; 1401 };
1400 1402
1401 static int brcmf_usb_reset_device(struct device *dev, void *notused) 1403 static int brcmf_usb_reset_device(struct device *dev, void *notused)
1402 { 1404 {
1403 /* device past is the usb interface so we 1405 /* device past is the usb interface so we
1404 * need to use parent here. 1406 * need to use parent here.
1405 */ 1407 */
1406 brcmf_dev_reset(dev->parent); 1408 brcmf_dev_reset(dev->parent);
1407 return 0; 1409 return 0;
1408 } 1410 }
1409 1411
1410 void brcmf_usb_exit(void) 1412 void brcmf_usb_exit(void)
1411 { 1413 {
1412 struct device_driver *drv = &brcmf_usbdrvr.drvwrap.driver; 1414 struct device_driver *drv = &brcmf_usbdrvr.drvwrap.driver;
1413 int ret; 1415 int ret;
1414 1416
1415 brcmf_dbg(USB, "Enter\n"); 1417 brcmf_dbg(USB, "Enter\n");
1416 ret = driver_for_each_device(drv, NULL, NULL, 1418 ret = driver_for_each_device(drv, NULL, NULL,
1417 brcmf_usb_reset_device); 1419 brcmf_usb_reset_device);
1418 usb_deregister(&brcmf_usbdrvr); 1420 usb_deregister(&brcmf_usbdrvr);
1419 } 1421 }
1420 1422
1421 void brcmf_usb_register(void) 1423 void brcmf_usb_register(void)
1422 { 1424 {
1423 brcmf_dbg(USB, "Enter\n"); 1425 brcmf_dbg(USB, "Enter\n");
1424 usb_register(&brcmf_usbdrvr); 1426 usb_register(&brcmf_usbdrvr);
1425 } 1427 }
1426 1428
net/mac80211/rc80211_minstrel_ht.c
1 /* 1 /*
2 * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org> 2 * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 #include <linux/netdevice.h> 8 #include <linux/netdevice.h>
9 #include <linux/types.h> 9 #include <linux/types.h>
10 #include <linux/skbuff.h> 10 #include <linux/skbuff.h>
11 #include <linux/debugfs.h> 11 #include <linux/debugfs.h>
12 #include <linux/random.h> 12 #include <linux/random.h>
13 #include <linux/ieee80211.h> 13 #include <linux/ieee80211.h>
14 #include <net/mac80211.h> 14 #include <net/mac80211.h>
15 #include "rate.h" 15 #include "rate.h"
16 #include "rc80211_minstrel.h" 16 #include "rc80211_minstrel.h"
17 #include "rc80211_minstrel_ht.h" 17 #include "rc80211_minstrel_ht.h"
18 18
19 #define AVG_PKT_SIZE 1200 19 #define AVG_PKT_SIZE 1200
20 20
21 /* Number of bits for an average sized packet */ 21 /* Number of bits for an average sized packet */
22 #define MCS_NBITS (AVG_PKT_SIZE << 3) 22 #define MCS_NBITS (AVG_PKT_SIZE << 3)
23 23
24 /* Number of symbols for a packet with (bps) bits per symbol */ 24 /* Number of symbols for a packet with (bps) bits per symbol */
25 #define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps)) 25 #define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
26 26
27 /* Transmission time (nanoseconds) for a packet containing (syms) symbols */ 27 /* Transmission time (nanoseconds) for a packet containing (syms) symbols */
28 #define MCS_SYMBOL_TIME(sgi, syms) \ 28 #define MCS_SYMBOL_TIME(sgi, syms) \
29 (sgi ? \ 29 (sgi ? \
30 ((syms) * 18000 + 4000) / 5 : /* syms * 3.6 us */ \ 30 ((syms) * 18000 + 4000) / 5 : /* syms * 3.6 us */ \
31 ((syms) * 1000) << 2 /* syms * 4 us */ \ 31 ((syms) * 1000) << 2 /* syms * 4 us */ \
32 ) 32 )
33 33
34 /* Transmit duration for the raw data part of an average sized packet */ 34 /* Transmit duration for the raw data part of an average sized packet */
35 #define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) 35 #define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
36 36
37 /* 37 /*
38 * Define group sort order: HT40 -> SGI -> #streams 38 * Define group sort order: HT40 -> SGI -> #streams
39 */ 39 */
40 #define GROUP_IDX(_streams, _sgi, _ht40) \ 40 #define GROUP_IDX(_streams, _sgi, _ht40) \
41 MINSTREL_MAX_STREAMS * 2 * _ht40 + \ 41 MINSTREL_MAX_STREAMS * 2 * _ht40 + \
42 MINSTREL_MAX_STREAMS * _sgi + \ 42 MINSTREL_MAX_STREAMS * _sgi + \
43 _streams - 1 43 _streams - 1
44 44
45 /* MCS rate information for an MCS group */ 45 /* MCS rate information for an MCS group */
46 #define MCS_GROUP(_streams, _sgi, _ht40) \ 46 #define MCS_GROUP(_streams, _sgi, _ht40) \
47 [GROUP_IDX(_streams, _sgi, _ht40)] = { \ 47 [GROUP_IDX(_streams, _sgi, _ht40)] = { \
48 .streams = _streams, \ 48 .streams = _streams, \
49 .flags = \ 49 .flags = \
50 (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ 50 (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
51 (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ 51 (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
52 .duration = { \ 52 .duration = { \
53 MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26), \ 53 MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26), \
54 MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52), \ 54 MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52), \
55 MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78), \ 55 MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78), \
56 MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104), \ 56 MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104), \
57 MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156), \ 57 MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156), \
58 MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208), \ 58 MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208), \
59 MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234), \ 59 MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234), \
60 MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) \ 60 MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) \
61 } \ 61 } \
62 } 62 }
63 63
64 #define CCK_DURATION(_bitrate, _short, _len) \ 64 #define CCK_DURATION(_bitrate, _short, _len) \
65 (1000 * (10 /* SIFS */ + \ 65 (1000 * (10 /* SIFS */ + \
66 (_short ? 72 + 24 : 144 + 48) + \ 66 (_short ? 72 + 24 : 144 + 48) + \
67 (8 * (_len + 4) * 10) / (_bitrate))) 67 (8 * (_len + 4) * 10) / (_bitrate)))
68 68
69 #define CCK_ACK_DURATION(_bitrate, _short) \ 69 #define CCK_ACK_DURATION(_bitrate, _short) \
70 (CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \ 70 (CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \
71 CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE)) 71 CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE))
72 72
73 #define CCK_DURATION_LIST(_short) \ 73 #define CCK_DURATION_LIST(_short) \
74 CCK_ACK_DURATION(10, _short), \ 74 CCK_ACK_DURATION(10, _short), \
75 CCK_ACK_DURATION(20, _short), \ 75 CCK_ACK_DURATION(20, _short), \
76 CCK_ACK_DURATION(55, _short), \ 76 CCK_ACK_DURATION(55, _short), \
77 CCK_ACK_DURATION(110, _short) 77 CCK_ACK_DURATION(110, _short)
78 78
79 #define CCK_GROUP \ 79 #define CCK_GROUP \
80 [MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS] = { \ 80 [MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS] = { \
81 .streams = 0, \ 81 .streams = 0, \
82 .duration = { \ 82 .duration = { \
83 CCK_DURATION_LIST(false), \ 83 CCK_DURATION_LIST(false), \
84 CCK_DURATION_LIST(true) \ 84 CCK_DURATION_LIST(true) \
85 } \ 85 } \
86 } 86 }
87 87
88 /* 88 /*
89 * To enable sufficiently targeted rate sampling, MCS rates are divided into 89 * To enable sufficiently targeted rate sampling, MCS rates are divided into
90 * groups, based on the number of streams and flags (HT40, SGI) that they 90 * groups, based on the number of streams and flags (HT40, SGI) that they
91 * use. 91 * use.
92 * 92 *
93 * Sortorder has to be fixed for GROUP_IDX macro to be applicable: 93 * Sortorder has to be fixed for GROUP_IDX macro to be applicable:
94 * HT40 -> SGI -> #streams 94 * HT40 -> SGI -> #streams
95 */ 95 */
96 const struct mcs_group minstrel_mcs_groups[] = { 96 const struct mcs_group minstrel_mcs_groups[] = {
97 MCS_GROUP(1, 0, 0), 97 MCS_GROUP(1, 0, 0),
98 MCS_GROUP(2, 0, 0), 98 MCS_GROUP(2, 0, 0),
99 #if MINSTREL_MAX_STREAMS >= 3 99 #if MINSTREL_MAX_STREAMS >= 3
100 MCS_GROUP(3, 0, 0), 100 MCS_GROUP(3, 0, 0),
101 #endif 101 #endif
102 102
103 MCS_GROUP(1, 1, 0), 103 MCS_GROUP(1, 1, 0),
104 MCS_GROUP(2, 1, 0), 104 MCS_GROUP(2, 1, 0),
105 #if MINSTREL_MAX_STREAMS >= 3 105 #if MINSTREL_MAX_STREAMS >= 3
106 MCS_GROUP(3, 1, 0), 106 MCS_GROUP(3, 1, 0),
107 #endif 107 #endif
108 108
109 MCS_GROUP(1, 0, 1), 109 MCS_GROUP(1, 0, 1),
110 MCS_GROUP(2, 0, 1), 110 MCS_GROUP(2, 0, 1),
111 #if MINSTREL_MAX_STREAMS >= 3 111 #if MINSTREL_MAX_STREAMS >= 3
112 MCS_GROUP(3, 0, 1), 112 MCS_GROUP(3, 0, 1),
113 #endif 113 #endif
114 114
115 MCS_GROUP(1, 1, 1), 115 MCS_GROUP(1, 1, 1),
116 MCS_GROUP(2, 1, 1), 116 MCS_GROUP(2, 1, 1),
117 #if MINSTREL_MAX_STREAMS >= 3 117 #if MINSTREL_MAX_STREAMS >= 3
118 MCS_GROUP(3, 1, 1), 118 MCS_GROUP(3, 1, 1),
119 #endif 119 #endif
120 120
121 /* must be last */ 121 /* must be last */
122 CCK_GROUP 122 CCK_GROUP
123 }; 123 };
124 124
125 #define MINSTREL_CCK_GROUP (ARRAY_SIZE(minstrel_mcs_groups) - 1) 125 #define MINSTREL_CCK_GROUP (ARRAY_SIZE(minstrel_mcs_groups) - 1)
126 126
127 static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly; 127 static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
128 128
129 static void 129 static void
130 minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi); 130 minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
131 131
132 /* 132 /*
133 * Look up an MCS group index based on mac80211 rate information 133 * Look up an MCS group index based on mac80211 rate information
134 */ 134 */
135 static int 135 static int
136 minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) 136 minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
137 { 137 {
138 return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1, 138 return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
139 !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), 139 !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
140 !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)); 140 !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
141 } 141 }
142 142
143 static struct minstrel_rate_stats * 143 static struct minstrel_rate_stats *
144 minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, 144 minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
145 struct ieee80211_tx_rate *rate) 145 struct ieee80211_tx_rate *rate)
146 { 146 {
147 int group, idx; 147 int group, idx;
148 148
149 if (rate->flags & IEEE80211_TX_RC_MCS) { 149 if (rate->flags & IEEE80211_TX_RC_MCS) {
150 group = minstrel_ht_get_group_idx(rate); 150 group = minstrel_ht_get_group_idx(rate);
151 idx = rate->idx % 8; 151 idx = rate->idx % 8;
152 } else { 152 } else {
153 group = MINSTREL_CCK_GROUP; 153 group = MINSTREL_CCK_GROUP;
154 154
155 for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++) 155 for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++)
156 if (rate->idx == mp->cck_rates[idx]) 156 if (rate->idx == mp->cck_rates[idx])
157 break; 157 break;
158 158
159 /* short preamble */ 159 /* short preamble */
160 if (!(mi->groups[group].supported & BIT(idx))) 160 if (!(mi->groups[group].supported & BIT(idx)))
161 idx += 4; 161 idx += 4;
162 } 162 }
163 return &mi->groups[group].rates[idx]; 163 return &mi->groups[group].rates[idx];
164 } 164 }
165 165
166 static inline struct minstrel_rate_stats * 166 static inline struct minstrel_rate_stats *
167 minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index) 167 minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
168 { 168 {
169 return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES]; 169 return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES];
170 } 170 }
171 171
172 172
173 /* 173 /*
174 * Recalculate success probabilities and counters for a rate using EWMA 174 * Recalculate success probabilities and counters for a rate using EWMA
175 */ 175 */
176 static void 176 static void
177 minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr) 177 minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr)
178 { 178 {
179 if (unlikely(mr->attempts > 0)) { 179 if (unlikely(mr->attempts > 0)) {
180 mr->sample_skipped = 0; 180 mr->sample_skipped = 0;
181 mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts); 181 mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
182 if (!mr->att_hist) 182 if (!mr->att_hist)
183 mr->probability = mr->cur_prob; 183 mr->probability = mr->cur_prob;
184 else 184 else
185 mr->probability = minstrel_ewma(mr->probability, 185 mr->probability = minstrel_ewma(mr->probability,
186 mr->cur_prob, EWMA_LEVEL); 186 mr->cur_prob, EWMA_LEVEL);
187 mr->att_hist += mr->attempts; 187 mr->att_hist += mr->attempts;
188 mr->succ_hist += mr->success; 188 mr->succ_hist += mr->success;
189 } else { 189 } else {
190 mr->sample_skipped++; 190 mr->sample_skipped++;
191 } 191 }
192 mr->last_success = mr->success; 192 mr->last_success = mr->success;
193 mr->last_attempts = mr->attempts; 193 mr->last_attempts = mr->attempts;
194 mr->success = 0; 194 mr->success = 0;
195 mr->attempts = 0; 195 mr->attempts = 0;
196 } 196 }
197 197
198 /* 198 /*
199 * Calculate throughput based on the average A-MPDU length, taking into account 199 * Calculate throughput based on the average A-MPDU length, taking into account
200 * the expected number of retransmissions and their expected length 200 * the expected number of retransmissions and their expected length
201 */ 201 */
202 static void 202 static void
203 minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) 203 minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
204 { 204 {
205 struct minstrel_rate_stats *mr; 205 struct minstrel_rate_stats *mr;
206 unsigned int nsecs = 0; 206 unsigned int nsecs = 0;
207 unsigned int tp; 207 unsigned int tp;
208 unsigned int prob; 208 unsigned int prob;
209 209
210 mr = &mi->groups[group].rates[rate]; 210 mr = &mi->groups[group].rates[rate];
211 prob = mr->probability; 211 prob = mr->probability;
212 212
213 if (prob < MINSTREL_FRAC(1, 10)) { 213 if (prob < MINSTREL_FRAC(1, 10)) {
214 mr->cur_tp = 0; 214 mr->cur_tp = 0;
215 return; 215 return;
216 } 216 }
217 217
218 /* 218 /*
219 * For the throughput calculation, limit the probability value to 90% to 219 * For the throughput calculation, limit the probability value to 90% to
220 * account for collision related packet error rate fluctuation 220 * account for collision related packet error rate fluctuation
221 */ 221 */
222 if (prob > MINSTREL_FRAC(9, 10)) 222 if (prob > MINSTREL_FRAC(9, 10))
223 prob = MINSTREL_FRAC(9, 10); 223 prob = MINSTREL_FRAC(9, 10);
224 224
225 if (group != MINSTREL_CCK_GROUP) 225 if (group != MINSTREL_CCK_GROUP)
226 nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); 226 nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
227 227
228 nsecs += minstrel_mcs_groups[group].duration[rate]; 228 nsecs += minstrel_mcs_groups[group].duration[rate];
229 229
230 /* prob is scaled - see MINSTREL_FRAC above */ 230 /* prob is scaled - see MINSTREL_FRAC above */
231 tp = 1000000 * ((prob * 1000) / nsecs); 231 tp = 1000000 * ((prob * 1000) / nsecs);
232 mr->cur_tp = MINSTREL_TRUNC(tp); 232 mr->cur_tp = MINSTREL_TRUNC(tp);
233 } 233 }
234 234
235 /* 235 /*
236 * Find & sort topmost throughput rates 236 * Find & sort topmost throughput rates
237 * 237 *
238 * If multiple rates provide equal throughput the sorting is based on their 238 * If multiple rates provide equal throughput the sorting is based on their
239 * current success probability. Higher success probability is preferred among 239 * current success probability. Higher success probability is preferred among
240 * MCS groups, CCK rates do not provide aggregation and are therefore at last. 240 * MCS groups, CCK rates do not provide aggregation and are therefore at last.
241 */ 241 */
242 static void 242 static void
243 minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index, 243 minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index,
244 u8 *tp_list) 244 u8 *tp_list)
245 { 245 {
246 int cur_group, cur_idx, cur_thr, cur_prob; 246 int cur_group, cur_idx, cur_thr, cur_prob;
247 int tmp_group, tmp_idx, tmp_thr, tmp_prob; 247 int tmp_group, tmp_idx, tmp_thr, tmp_prob;
248 int j = MAX_THR_RATES; 248 int j = MAX_THR_RATES;
249 249
250 cur_group = index / MCS_GROUP_RATES; 250 cur_group = index / MCS_GROUP_RATES;
251 cur_idx = index % MCS_GROUP_RATES; 251 cur_idx = index % MCS_GROUP_RATES;
252 cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp; 252 cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp;
253 cur_prob = mi->groups[cur_group].rates[cur_idx].probability; 253 cur_prob = mi->groups[cur_group].rates[cur_idx].probability;
254 254
255 tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; 255 do {
256 tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
257 tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
258 tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
259
260 while (j > 0 && (cur_thr > tmp_thr ||
261 (cur_thr == tmp_thr && cur_prob > tmp_prob))) {
262 j--;
263 tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; 256 tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
264 tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; 257 tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
265 tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; 258 tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
266 tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; 259 tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
267 } 260 if (cur_thr < tmp_thr ||
261 (cur_thr == tmp_thr && cur_prob <= tmp_prob))
262 break;
263 j--;
264 } while (j > 0);
268 265
269 if (j < MAX_THR_RATES - 1) { 266 if (j < MAX_THR_RATES - 1) {
270 memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * 267 memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) *
271 (MAX_THR_RATES - (j + 1)))); 268 (MAX_THR_RATES - (j + 1))));
272 } 269 }
273 if (j < MAX_THR_RATES) 270 if (j < MAX_THR_RATES)
274 tp_list[j] = index; 271 tp_list[j] = index;
275 } 272 }
276 273
277 /* 274 /*
278 * Find and set the topmost probability rate per sta and per group 275 * Find and set the topmost probability rate per sta and per group
279 */ 276 */
280 static void 277 static void
281 minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u8 index) 278 minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u8 index)
282 { 279 {
283 struct minstrel_mcs_group_data *mg; 280 struct minstrel_mcs_group_data *mg;
284 struct minstrel_rate_stats *mr; 281 struct minstrel_rate_stats *mr;
285 int tmp_group, tmp_idx, tmp_tp, tmp_prob, max_tp_group; 282 int tmp_group, tmp_idx, tmp_tp, tmp_prob, max_tp_group;
286 283
287 mg = &mi->groups[index / MCS_GROUP_RATES]; 284 mg = &mi->groups[index / MCS_GROUP_RATES];
288 mr = &mg->rates[index % MCS_GROUP_RATES]; 285 mr = &mg->rates[index % MCS_GROUP_RATES];
289 286
290 tmp_group = mi->max_prob_rate / MCS_GROUP_RATES; 287 tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
291 tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES; 288 tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
292 tmp_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp; 289 tmp_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
293 tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; 290 tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
294 291
295 /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from 292 /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
296 * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */ 293 * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */
297 max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES; 294 max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES;
298 if((index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) && 295 if((index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) &&
299 (max_tp_group != MINSTREL_CCK_GROUP)) 296 (max_tp_group != MINSTREL_CCK_GROUP))
300 return; 297 return;
301 298
302 if (mr->probability > MINSTREL_FRAC(75, 100)) { 299 if (mr->probability > MINSTREL_FRAC(75, 100)) {
303 if (mr->cur_tp > tmp_tp) 300 if (mr->cur_tp > tmp_tp)
304 mi->max_prob_rate = index; 301 mi->max_prob_rate = index;
305 if (mr->cur_tp > mg->rates[mg->max_group_prob_rate].cur_tp) 302 if (mr->cur_tp > mg->rates[mg->max_group_prob_rate].cur_tp)
306 mg->max_group_prob_rate = index; 303 mg->max_group_prob_rate = index;
307 } else { 304 } else {
308 if (mr->probability > tmp_prob) 305 if (mr->probability > tmp_prob)
309 mi->max_prob_rate = index; 306 mi->max_prob_rate = index;
310 if (mr->probability > mg->rates[mg->max_group_prob_rate].probability) 307 if (mr->probability > mg->rates[mg->max_group_prob_rate].probability)
311 mg->max_group_prob_rate = index; 308 mg->max_group_prob_rate = index;
312 } 309 }
313 } 310 }
314 311
315 312
316 /* 313 /*
317 * Assign new rate set per sta and use CCK rates only if the fastest 314 * Assign new rate set per sta and use CCK rates only if the fastest
318 * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted 315 * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted
319 * rate sets where MCS and CCK rates are mixed, because CCK rates can 316 * rate sets where MCS and CCK rates are mixed, because CCK rates can
320 * not use aggregation. 317 * not use aggregation.
321 */ 318 */
322 static void 319 static void
323 minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi, 320 minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
324 u8 tmp_mcs_tp_rate[MAX_THR_RATES], 321 u8 tmp_mcs_tp_rate[MAX_THR_RATES],
325 u8 tmp_cck_tp_rate[MAX_THR_RATES]) 322 u8 tmp_cck_tp_rate[MAX_THR_RATES])
326 { 323 {
327 unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp; 324 unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp;
328 int i; 325 int i;
329 326
330 tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES; 327 tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
331 tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES; 328 tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
332 tmp_cck_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp; 329 tmp_cck_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
333 330
334 tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES; 331 tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
335 tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES; 332 tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
336 tmp_mcs_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp; 333 tmp_mcs_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
337 334
338 if (tmp_cck_tp > tmp_mcs_tp) { 335 if (tmp_cck_tp > tmp_mcs_tp) {
339 for(i = 0; i < MAX_THR_RATES; i++) { 336 for(i = 0; i < MAX_THR_RATES; i++) {
340 minstrel_ht_sort_best_tp_rates(mi, tmp_cck_tp_rate[i], 337 minstrel_ht_sort_best_tp_rates(mi, tmp_cck_tp_rate[i],
341 tmp_mcs_tp_rate); 338 tmp_mcs_tp_rate);
342 } 339 }
343 } 340 }
344 341
345 } 342 }
346 343
347 /* 344 /*
348 * Try to increase robustness of max_prob rate by decrease number of 345 * Try to increase robustness of max_prob rate by decrease number of
349 * streams if possible. 346 * streams if possible.
350 */ 347 */
351 static inline void 348 static inline void
352 minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi) 349 minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
353 { 350 {
354 struct minstrel_mcs_group_data *mg; 351 struct minstrel_mcs_group_data *mg;
355 struct minstrel_rate_stats *mr; 352 struct minstrel_rate_stats *mr;
356 int tmp_max_streams, group; 353 int tmp_max_streams, group;
357 int tmp_tp = 0; 354 int tmp_tp = 0;
358 355
359 tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] / 356 tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
360 MCS_GROUP_RATES].streams; 357 MCS_GROUP_RATES].streams;
361 for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { 358 for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
362 mg = &mi->groups[group]; 359 mg = &mi->groups[group];
363 if (!mg->supported || group == MINSTREL_CCK_GROUP) 360 if (!mg->supported || group == MINSTREL_CCK_GROUP)
364 continue; 361 continue;
365 mr = minstrel_get_ratestats(mi, mg->max_group_prob_rate); 362 mr = minstrel_get_ratestats(mi, mg->max_group_prob_rate);
366 if (tmp_tp < mr->cur_tp && 363 if (tmp_tp < mr->cur_tp &&
367 (minstrel_mcs_groups[group].streams < tmp_max_streams)) { 364 (minstrel_mcs_groups[group].streams < tmp_max_streams)) {
368 mi->max_prob_rate = mg->max_group_prob_rate; 365 mi->max_prob_rate = mg->max_group_prob_rate;
369 tmp_tp = mr->cur_tp; 366 tmp_tp = mr->cur_tp;
370 } 367 }
371 } 368 }
372 } 369 }
373 370
374 /* 371 /*
375 * Update rate statistics and select new primary rates 372 * Update rate statistics and select new primary rates
376 * 373 *
377 * Rules for rate selection: 374 * Rules for rate selection:
378 * - max_prob_rate must use only one stream, as a tradeoff between delivery 375 * - max_prob_rate must use only one stream, as a tradeoff between delivery
379 * probability and throughput during strong fluctuations 376 * probability and throughput during strong fluctuations
380 * - as long as the max prob rate has a probability of more than 75%, pick 377 * - as long as the max prob rate has a probability of more than 75%, pick
381 * higher throughput rates, even if the probablity is a bit lower 378 * higher throughput rates, even if the probablity is a bit lower
382 */ 379 */
383 static void 380 static void
384 minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) 381 minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
385 { 382 {
386 struct minstrel_mcs_group_data *mg; 383 struct minstrel_mcs_group_data *mg;
387 struct minstrel_rate_stats *mr; 384 struct minstrel_rate_stats *mr;
388 int group, i, j; 385 int group, i, j;
389 u8 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES]; 386 u8 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
390 u8 tmp_cck_tp_rate[MAX_THR_RATES], index; 387 u8 tmp_cck_tp_rate[MAX_THR_RATES], index;
391 388
392 if (mi->ampdu_packets > 0) { 389 if (mi->ampdu_packets > 0) {
393 mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, 390 mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
394 MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL); 391 MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL);
395 mi->ampdu_len = 0; 392 mi->ampdu_len = 0;
396 mi->ampdu_packets = 0; 393 mi->ampdu_packets = 0;
397 } 394 }
398 395
399 mi->sample_slow = 0; 396 mi->sample_slow = 0;
400 mi->sample_count = 0; 397 mi->sample_count = 0;
401 398
402 /* Initialize global rate indexes */ 399 /* Initialize global rate indexes */
403 for(j = 0; j < MAX_THR_RATES; j++){ 400 for(j = 0; j < MAX_THR_RATES; j++){
404 tmp_mcs_tp_rate[j] = 0; 401 tmp_mcs_tp_rate[j] = 0;
405 tmp_cck_tp_rate[j] = 0; 402 tmp_cck_tp_rate[j] = 0;
406 } 403 }
407 404
408 /* Find best rate sets within all MCS groups*/ 405 /* Find best rate sets within all MCS groups*/
409 for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { 406 for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
410 407
411 mg = &mi->groups[group]; 408 mg = &mi->groups[group];
412 if (!mg->supported) 409 if (!mg->supported)
413 continue; 410 continue;
414 411
415 mi->sample_count++; 412 mi->sample_count++;
416 413
417 /* (re)Initialize group rate indexes */ 414 /* (re)Initialize group rate indexes */
418 for(j = 0; j < MAX_THR_RATES; j++) 415 for(j = 0; j < MAX_THR_RATES; j++)
419 tmp_group_tp_rate[j] = group; 416 tmp_group_tp_rate[j] = group;
420 417
421 for (i = 0; i < MCS_GROUP_RATES; i++) { 418 for (i = 0; i < MCS_GROUP_RATES; i++) {
422 if (!(mg->supported & BIT(i))) 419 if (!(mg->supported & BIT(i)))
423 continue; 420 continue;
424 421
425 index = MCS_GROUP_RATES * group + i; 422 index = MCS_GROUP_RATES * group + i;
426 423
427 mr = &mg->rates[i]; 424 mr = &mg->rates[i];
428 mr->retry_updated = false; 425 mr->retry_updated = false;
429 minstrel_calc_rate_ewma(mr); 426 minstrel_calc_rate_ewma(mr);
430 minstrel_ht_calc_tp(mi, group, i); 427 minstrel_ht_calc_tp(mi, group, i);
431 428
432 if (!mr->cur_tp) 429 if (!mr->cur_tp)
433 continue; 430 continue;
434 431
435 /* Find max throughput rate set */ 432 /* Find max throughput rate set */
436 if (group != MINSTREL_CCK_GROUP) { 433 if (group != MINSTREL_CCK_GROUP) {
437 minstrel_ht_sort_best_tp_rates(mi, index, 434 minstrel_ht_sort_best_tp_rates(mi, index,
438 tmp_mcs_tp_rate); 435 tmp_mcs_tp_rate);
439 } else if (group == MINSTREL_CCK_GROUP) { 436 } else if (group == MINSTREL_CCK_GROUP) {
440 minstrel_ht_sort_best_tp_rates(mi, index, 437 minstrel_ht_sort_best_tp_rates(mi, index,
441 tmp_cck_tp_rate); 438 tmp_cck_tp_rate);
442 } 439 }
443 440
444 /* Find max throughput rate set within a group */ 441 /* Find max throughput rate set within a group */
445 minstrel_ht_sort_best_tp_rates(mi, index, 442 minstrel_ht_sort_best_tp_rates(mi, index,
446 tmp_group_tp_rate); 443 tmp_group_tp_rate);
447 444
448 /* Find max probability rate per group and global */ 445 /* Find max probability rate per group and global */
449 minstrel_ht_set_best_prob_rate(mi, index); 446 minstrel_ht_set_best_prob_rate(mi, index);
450 } 447 }
451 448
452 memcpy(mg->max_group_tp_rate, tmp_group_tp_rate, 449 memcpy(mg->max_group_tp_rate, tmp_group_tp_rate,
453 sizeof(mg->max_group_tp_rate)); 450 sizeof(mg->max_group_tp_rate));
454 } 451 }
455 452
456 /* Assign new rate set per sta */ 453 /* Assign new rate set per sta */
457 minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate, tmp_cck_tp_rate); 454 minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate, tmp_cck_tp_rate);
458 memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate)); 455 memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate));
459 456
460 /* Try to increase robustness of max_prob_rate*/ 457 /* Try to increase robustness of max_prob_rate*/
461 minstrel_ht_prob_rate_reduce_streams(mi); 458 minstrel_ht_prob_rate_reduce_streams(mi);
462 459
463 /* try to sample all available rates during each interval */ 460 /* try to sample all available rates during each interval */
464 mi->sample_count *= 8; 461 mi->sample_count *= 8;
465 462
466 #ifdef CONFIG_MAC80211_DEBUGFS 463 #ifdef CONFIG_MAC80211_DEBUGFS
467 /* use fixed index if set */ 464 /* use fixed index if set */
468 if (mp->fixed_rate_idx != -1) { 465 if (mp->fixed_rate_idx != -1) {
469 for (i = 0; i < 4; i++) 466 for (i = 0; i < 4; i++)
470 mi->max_tp_rate[i] = mp->fixed_rate_idx; 467 mi->max_tp_rate[i] = mp->fixed_rate_idx;
471 mi->max_prob_rate = mp->fixed_rate_idx; 468 mi->max_prob_rate = mp->fixed_rate_idx;
472 } 469 }
473 #endif 470 #endif
474 471
475 /* Reset update timer */ 472 /* Reset update timer */
476 mi->stats_update = jiffies; 473 mi->stats_update = jiffies;
477 } 474 }
478 475
479 static bool 476 static bool
480 minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rate) 477 minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rate)
481 { 478 {
482 if (rate->idx < 0) 479 if (rate->idx < 0)
483 return false; 480 return false;
484 481
485 if (!rate->count) 482 if (!rate->count)
486 return false; 483 return false;
487 484
488 if (rate->flags & IEEE80211_TX_RC_MCS) 485 if (rate->flags & IEEE80211_TX_RC_MCS)
489 return true; 486 return true;
490 487
491 return rate->idx == mp->cck_rates[0] || 488 return rate->idx == mp->cck_rates[0] ||
492 rate->idx == mp->cck_rates[1] || 489 rate->idx == mp->cck_rates[1] ||
493 rate->idx == mp->cck_rates[2] || 490 rate->idx == mp->cck_rates[2] ||
494 rate->idx == mp->cck_rates[3]; 491 rate->idx == mp->cck_rates[3];
495 } 492 }
496 493
497 static void 494 static void
498 minstrel_next_sample_idx(struct minstrel_ht_sta *mi) 495 minstrel_next_sample_idx(struct minstrel_ht_sta *mi)
499 { 496 {
500 struct minstrel_mcs_group_data *mg; 497 struct minstrel_mcs_group_data *mg;
501 498
502 for (;;) { 499 for (;;) {
503 mi->sample_group++; 500 mi->sample_group++;
504 mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups); 501 mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
505 mg = &mi->groups[mi->sample_group]; 502 mg = &mi->groups[mi->sample_group];
506 503
507 if (!mg->supported) 504 if (!mg->supported)
508 continue; 505 continue;
509 506
510 if (++mg->index >= MCS_GROUP_RATES) { 507 if (++mg->index >= MCS_GROUP_RATES) {
511 mg->index = 0; 508 mg->index = 0;
512 if (++mg->column >= ARRAY_SIZE(sample_table)) 509 if (++mg->column >= ARRAY_SIZE(sample_table))
513 mg->column = 0; 510 mg->column = 0;
514 } 511 }
515 break; 512 break;
516 } 513 }
517 } 514 }
518 515
519 static void 516 static void
520 minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u8 *idx, bool primary) 517 minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u8 *idx, bool primary)
521 { 518 {
522 int group, orig_group; 519 int group, orig_group;
523 520
524 orig_group = group = *idx / MCS_GROUP_RATES; 521 orig_group = group = *idx / MCS_GROUP_RATES;
525 while (group > 0) { 522 while (group > 0) {
526 group--; 523 group--;
527 524
528 if (!mi->groups[group].supported) 525 if (!mi->groups[group].supported)
529 continue; 526 continue;
530 527
531 if (minstrel_mcs_groups[group].streams > 528 if (minstrel_mcs_groups[group].streams >
532 minstrel_mcs_groups[orig_group].streams) 529 minstrel_mcs_groups[orig_group].streams)
533 continue; 530 continue;
534 531
535 if (primary) 532 if (primary)
536 *idx = mi->groups[group].max_group_tp_rate[0]; 533 *idx = mi->groups[group].max_group_tp_rate[0];
537 else 534 else
538 *idx = mi->groups[group].max_group_tp_rate[1]; 535 *idx = mi->groups[group].max_group_tp_rate[1];
539 break; 536 break;
540 } 537 }
541 } 538 }
542 539
543 static void 540 static void
544 minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) 541 minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
545 { 542 {
546 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 543 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
547 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 544 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
548 u16 tid; 545 u16 tid;
549 546
550 if (unlikely(!ieee80211_is_data_qos(hdr->frame_control))) 547 if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
551 return; 548 return;
552 549
553 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) 550 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
554 return; 551 return;
555 552
556 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 553 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
557 if (likely(sta->ampdu_mlme.tid_tx[tid])) 554 if (likely(sta->ampdu_mlme.tid_tx[tid]))
558 return; 555 return;
559 556
560 if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) 557 if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
561 return; 558 return;
562 559
563 ieee80211_start_tx_ba_session(pubsta, tid, 5000); 560 ieee80211_start_tx_ba_session(pubsta, tid, 5000);
564 } 561 }
565 562
566 static void 563 static void
567 minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, 564 minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
568 struct ieee80211_sta *sta, void *priv_sta, 565 struct ieee80211_sta *sta, void *priv_sta,
569 struct sk_buff *skb) 566 struct sk_buff *skb)
570 { 567 {
571 struct minstrel_ht_sta_priv *msp = priv_sta; 568 struct minstrel_ht_sta_priv *msp = priv_sta;
572 struct minstrel_ht_sta *mi = &msp->ht; 569 struct minstrel_ht_sta *mi = &msp->ht;
573 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 570 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
574 struct ieee80211_tx_rate *ar = info->status.rates; 571 struct ieee80211_tx_rate *ar = info->status.rates;
575 struct minstrel_rate_stats *rate, *rate2; 572 struct minstrel_rate_stats *rate, *rate2;
576 struct minstrel_priv *mp = priv; 573 struct minstrel_priv *mp = priv;
577 bool last, update = false; 574 bool last, update = false;
578 int i; 575 int i;
579 576
580 if (!msp->is_ht) 577 if (!msp->is_ht)
581 return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb); 578 return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb);
582 579
583 /* This packet was aggregated but doesn't carry status info */ 580 /* This packet was aggregated but doesn't carry status info */
584 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && 581 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
585 !(info->flags & IEEE80211_TX_STAT_AMPDU)) 582 !(info->flags & IEEE80211_TX_STAT_AMPDU))
586 return; 583 return;
587 584
588 if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) { 585 if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) {
589 info->status.ampdu_ack_len = 586 info->status.ampdu_ack_len =
590 (info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0); 587 (info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
591 info->status.ampdu_len = 1; 588 info->status.ampdu_len = 1;
592 } 589 }
593 590
594 mi->ampdu_packets++; 591 mi->ampdu_packets++;
595 mi->ampdu_len += info->status.ampdu_len; 592 mi->ampdu_len += info->status.ampdu_len;
596 593
597 if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) { 594 if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
598 mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len); 595 mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
599 mi->sample_tries = 1; 596 mi->sample_tries = 1;
600 mi->sample_count--; 597 mi->sample_count--;
601 } 598 }
602 599
603 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) 600 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
604 mi->sample_packets += info->status.ampdu_len; 601 mi->sample_packets += info->status.ampdu_len;
605 602
606 last = !minstrel_ht_txstat_valid(mp, &ar[0]); 603 last = !minstrel_ht_txstat_valid(mp, &ar[0]);
607 for (i = 0; !last; i++) { 604 for (i = 0; !last; i++) {
608 last = (i == IEEE80211_TX_MAX_RATES - 1) || 605 last = (i == IEEE80211_TX_MAX_RATES - 1) ||
609 !minstrel_ht_txstat_valid(mp, &ar[i + 1]); 606 !minstrel_ht_txstat_valid(mp, &ar[i + 1]);
610 607
611 rate = minstrel_ht_get_stats(mp, mi, &ar[i]); 608 rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
612 609
613 if (last) 610 if (last)
614 rate->success += info->status.ampdu_ack_len; 611 rate->success += info->status.ampdu_ack_len;
615 612
616 rate->attempts += ar[i].count * info->status.ampdu_len; 613 rate->attempts += ar[i].count * info->status.ampdu_len;
617 } 614 }
618 615
619 /* 616 /*
620 * check for sudden death of spatial multiplexing, 617 * check for sudden death of spatial multiplexing,
621 * downgrade to a lower number of streams if necessary. 618 * downgrade to a lower number of streams if necessary.
622 */ 619 */
623 rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]); 620 rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
624 if (rate->attempts > 30 && 621 if (rate->attempts > 30 &&
625 MINSTREL_FRAC(rate->success, rate->attempts) < 622 MINSTREL_FRAC(rate->success, rate->attempts) <
626 MINSTREL_FRAC(20, 100)) { 623 MINSTREL_FRAC(20, 100)) {
627 minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true); 624 minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
628 update = true; 625 update = true;
629 } 626 }
630 627
631 rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]); 628 rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
632 if (rate2->attempts > 30 && 629 if (rate2->attempts > 30 &&
633 MINSTREL_FRAC(rate2->success, rate2->attempts) < 630 MINSTREL_FRAC(rate2->success, rate2->attempts) <
634 MINSTREL_FRAC(20, 100)) { 631 MINSTREL_FRAC(20, 100)) {
635 minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false); 632 minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
636 update = true; 633 update = true;
637 } 634 }
638 635
639 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { 636 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
640 update = true; 637 update = true;
641 minstrel_ht_update_stats(mp, mi); 638 minstrel_ht_update_stats(mp, mi);
642 if (!(info->flags & IEEE80211_TX_CTL_AMPDU) && 639 if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
643 mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) 640 mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP)
644 minstrel_aggr_check(sta, skb); 641 minstrel_aggr_check(sta, skb);
645 } 642 }
646 643
647 if (update) 644 if (update)
648 minstrel_ht_update_rates(mp, mi); 645 minstrel_ht_update_rates(mp, mi);
649 } 646 }
650 647
651 static void 648 static void
652 minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, 649 minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
653 int index) 650 int index)
654 { 651 {
655 struct minstrel_rate_stats *mr; 652 struct minstrel_rate_stats *mr;
656 const struct mcs_group *group; 653 const struct mcs_group *group;
657 unsigned int tx_time, tx_time_rtscts, tx_time_data; 654 unsigned int tx_time, tx_time_rtscts, tx_time_data;
658 unsigned int cw = mp->cw_min; 655 unsigned int cw = mp->cw_min;
659 unsigned int ctime = 0; 656 unsigned int ctime = 0;
660 unsigned int t_slot = 9; /* FIXME */ 657 unsigned int t_slot = 9; /* FIXME */
661 unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len); 658 unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
662 unsigned int overhead = 0, overhead_rtscts = 0; 659 unsigned int overhead = 0, overhead_rtscts = 0;
663 660
664 mr = minstrel_get_ratestats(mi, index); 661 mr = minstrel_get_ratestats(mi, index);
665 if (mr->probability < MINSTREL_FRAC(1, 10)) { 662 if (mr->probability < MINSTREL_FRAC(1, 10)) {
666 mr->retry_count = 1; 663 mr->retry_count = 1;
667 mr->retry_count_rtscts = 1; 664 mr->retry_count_rtscts = 1;
668 return; 665 return;
669 } 666 }
670 667
671 mr->retry_count = 2; 668 mr->retry_count = 2;
672 mr->retry_count_rtscts = 2; 669 mr->retry_count_rtscts = 2;
673 mr->retry_updated = true; 670 mr->retry_updated = true;
674 671
675 group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; 672 group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
676 tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len / 1000; 673 tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len / 1000;
677 674
678 /* Contention time for first 2 tries */ 675 /* Contention time for first 2 tries */
679 ctime = (t_slot * cw) >> 1; 676 ctime = (t_slot * cw) >> 1;
680 cw = min((cw << 1) | 1, mp->cw_max); 677 cw = min((cw << 1) | 1, mp->cw_max);
681 ctime += (t_slot * cw) >> 1; 678 ctime += (t_slot * cw) >> 1;
682 cw = min((cw << 1) | 1, mp->cw_max); 679 cw = min((cw << 1) | 1, mp->cw_max);
683 680
684 if (index / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) { 681 if (index / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) {
685 overhead = mi->overhead; 682 overhead = mi->overhead;
686 overhead_rtscts = mi->overhead_rtscts; 683 overhead_rtscts = mi->overhead_rtscts;
687 } 684 }
688 685
689 /* Total TX time for data and Contention after first 2 tries */ 686 /* Total TX time for data and Contention after first 2 tries */
690 tx_time = ctime + 2 * (overhead + tx_time_data); 687 tx_time = ctime + 2 * (overhead + tx_time_data);
691 tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data); 688 tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data);
692 689
693 /* See how many more tries we can fit inside segment size */ 690 /* See how many more tries we can fit inside segment size */
694 do { 691 do {
695 /* Contention time for this try */ 692 /* Contention time for this try */
696 ctime = (t_slot * cw) >> 1; 693 ctime = (t_slot * cw) >> 1;
697 cw = min((cw << 1) | 1, mp->cw_max); 694 cw = min((cw << 1) | 1, mp->cw_max);
698 695
699 /* Total TX time after this try */ 696 /* Total TX time after this try */
700 tx_time += ctime + overhead + tx_time_data; 697 tx_time += ctime + overhead + tx_time_data;
701 tx_time_rtscts += ctime + overhead_rtscts + tx_time_data; 698 tx_time_rtscts += ctime + overhead_rtscts + tx_time_data;
702 699
703 if (tx_time_rtscts < mp->segment_size) 700 if (tx_time_rtscts < mp->segment_size)
704 mr->retry_count_rtscts++; 701 mr->retry_count_rtscts++;
705 } while ((tx_time < mp->segment_size) && 702 } while ((tx_time < mp->segment_size) &&
706 (++mr->retry_count < mp->max_retry)); 703 (++mr->retry_count < mp->max_retry));
707 } 704 }
708 705
709 706
710 static void 707 static void
711 minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, 708 minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
712 struct ieee80211_sta_rates *ratetbl, int offset, int index) 709 struct ieee80211_sta_rates *ratetbl, int offset, int index)
713 { 710 {
714 const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; 711 const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
715 struct minstrel_rate_stats *mr; 712 struct minstrel_rate_stats *mr;
716 u8 idx; 713 u8 idx;
717 u16 flags; 714 u16 flags;
718 715
719 mr = minstrel_get_ratestats(mi, index); 716 mr = minstrel_get_ratestats(mi, index);
720 if (!mr->retry_updated) 717 if (!mr->retry_updated)
721 minstrel_calc_retransmit(mp, mi, index); 718 minstrel_calc_retransmit(mp, mi, index);
722 719
723 if (mr->probability < MINSTREL_FRAC(20, 100) || !mr->retry_count) { 720 if (mr->probability < MINSTREL_FRAC(20, 100) || !mr->retry_count) {
724 ratetbl->rate[offset].count = 2; 721 ratetbl->rate[offset].count = 2;
725 ratetbl->rate[offset].count_rts = 2; 722 ratetbl->rate[offset].count_rts = 2;
726 ratetbl->rate[offset].count_cts = 2; 723 ratetbl->rate[offset].count_cts = 2;
727 } else { 724 } else {
728 ratetbl->rate[offset].count = mr->retry_count; 725 ratetbl->rate[offset].count = mr->retry_count;
729 ratetbl->rate[offset].count_cts = mr->retry_count; 726 ratetbl->rate[offset].count_cts = mr->retry_count;
730 ratetbl->rate[offset].count_rts = mr->retry_count_rtscts; 727 ratetbl->rate[offset].count_rts = mr->retry_count_rtscts;
731 } 728 }
732 729
733 if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) { 730 if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
734 idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)]; 731 idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)];
735 flags = 0; 732 flags = 0;
736 } else { 733 } else {
737 idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8; 734 idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8;
738 flags = IEEE80211_TX_RC_MCS | group->flags; 735 flags = IEEE80211_TX_RC_MCS | group->flags;
739 } 736 }
740 737
741 if (offset > 0) { 738 if (offset > 0) {
742 ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts; 739 ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
743 flags |= IEEE80211_TX_RC_USE_RTS_CTS; 740 flags |= IEEE80211_TX_RC_USE_RTS_CTS;
744 } 741 }
745 742
746 ratetbl->rate[offset].idx = idx; 743 ratetbl->rate[offset].idx = idx;
747 ratetbl->rate[offset].flags = flags; 744 ratetbl->rate[offset].flags = flags;
748 } 745 }
749 746
750 static void 747 static void
751 minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) 748 minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
752 { 749 {
753 struct ieee80211_sta_rates *rates; 750 struct ieee80211_sta_rates *rates;
754 int i = 0; 751 int i = 0;
755 752
756 rates = kzalloc(sizeof(*rates), GFP_ATOMIC); 753 rates = kzalloc(sizeof(*rates), GFP_ATOMIC);
757 if (!rates) 754 if (!rates)
758 return; 755 return;
759 756
760 /* Start with max_tp_rate[0] */ 757 /* Start with max_tp_rate[0] */
761 minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]); 758 minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
762 759
763 if (mp->hw->max_rates >= 3) { 760 if (mp->hw->max_rates >= 3) {
764 /* At least 3 tx rates supported, use max_tp_rate[1] next */ 761 /* At least 3 tx rates supported, use max_tp_rate[1] next */
765 minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[1]); 762 minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[1]);
766 } 763 }
767 764
768 if (mp->hw->max_rates >= 2) { 765 if (mp->hw->max_rates >= 2) {
769 /* 766 /*
770 * At least 2 tx rates supported, use max_prob_rate next */ 767 * At least 2 tx rates supported, use max_prob_rate next */
771 minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate); 768 minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
772 } 769 }
773 770
774 rates->rate[i].idx = -1; 771 rates->rate[i].idx = -1;
775 rate_control_set_rates(mp->hw, mi->sta, rates); 772 rate_control_set_rates(mp->hw, mi->sta, rates);
776 } 773 }
777 774
778 static inline int 775 static inline int
779 minstrel_get_duration(int index) 776 minstrel_get_duration(int index)
780 { 777 {
781 const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; 778 const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
782 return group->duration[index % MCS_GROUP_RATES]; 779 return group->duration[index % MCS_GROUP_RATES];
783 } 780 }
784 781
785 static int 782 static int
786 minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) 783 minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
787 { 784 {
788 struct minstrel_rate_stats *mr; 785 struct minstrel_rate_stats *mr;
789 struct minstrel_mcs_group_data *mg; 786 struct minstrel_mcs_group_data *mg;
790 unsigned int sample_dur, sample_group, cur_max_tp_streams; 787 unsigned int sample_dur, sample_group, cur_max_tp_streams;
791 int sample_idx = 0; 788 int sample_idx = 0;
792 789
793 if (mi->sample_wait > 0) { 790 if (mi->sample_wait > 0) {
794 mi->sample_wait--; 791 mi->sample_wait--;
795 return -1; 792 return -1;
796 } 793 }
797 794
798 if (!mi->sample_tries) 795 if (!mi->sample_tries)
799 return -1; 796 return -1;
800 797
801 sample_group = mi->sample_group; 798 sample_group = mi->sample_group;
802 mg = &mi->groups[sample_group]; 799 mg = &mi->groups[sample_group];
803 sample_idx = sample_table[mg->column][mg->index]; 800 sample_idx = sample_table[mg->column][mg->index];
804 minstrel_next_sample_idx(mi); 801 minstrel_next_sample_idx(mi);
805 802
806 if (!(mg->supported & BIT(sample_idx))) 803 if (!(mg->supported & BIT(sample_idx)))
807 return -1; 804 return -1;
808 805
809 mr = &mg->rates[sample_idx]; 806 mr = &mg->rates[sample_idx];
810 sample_idx += sample_group * MCS_GROUP_RATES; 807 sample_idx += sample_group * MCS_GROUP_RATES;
811 808
812 /* 809 /*
813 * Sampling might add some overhead (RTS, no aggregation) 810 * Sampling might add some overhead (RTS, no aggregation)
814 * to the frame. Hence, don't use sampling for the currently 811 * to the frame. Hence, don't use sampling for the currently
815 * used rates. 812 * used rates.
816 */ 813 */
817 if (sample_idx == mi->max_tp_rate[0] || 814 if (sample_idx == mi->max_tp_rate[0] ||
818 sample_idx == mi->max_tp_rate[1] || 815 sample_idx == mi->max_tp_rate[1] ||
819 sample_idx == mi->max_prob_rate) 816 sample_idx == mi->max_prob_rate)
820 return -1; 817 return -1;
821 818
822 /* 819 /*
823 * Do not sample if the probability is already higher than 95% 820 * Do not sample if the probability is already higher than 95%
824 * to avoid wasting airtime. 821 * to avoid wasting airtime.
825 */ 822 */
826 if (mr->probability > MINSTREL_FRAC(95, 100)) 823 if (mr->probability > MINSTREL_FRAC(95, 100))
827 return -1; 824 return -1;
828 825
829 /* 826 /*
830 * Make sure that lower rates get sampled only occasionally, 827 * Make sure that lower rates get sampled only occasionally,
831 * if the link is working perfectly. 828 * if the link is working perfectly.
832 */ 829 */
833 830
834 cur_max_tp_streams = minstrel_mcs_groups[mi->max_tp_rate[0] / 831 cur_max_tp_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
835 MCS_GROUP_RATES].streams; 832 MCS_GROUP_RATES].streams;
836 sample_dur = minstrel_get_duration(sample_idx); 833 sample_dur = minstrel_get_duration(sample_idx);
837 if (sample_dur >= minstrel_get_duration(mi->max_tp_rate[1]) && 834 if (sample_dur >= minstrel_get_duration(mi->max_tp_rate[1]) &&
838 (cur_max_tp_streams - 1 < 835 (cur_max_tp_streams - 1 <
839 minstrel_mcs_groups[sample_group].streams || 836 minstrel_mcs_groups[sample_group].streams ||
840 sample_dur >= minstrel_get_duration(mi->max_prob_rate))) { 837 sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
841 if (mr->sample_skipped < 20) 838 if (mr->sample_skipped < 20)
842 return -1; 839 return -1;
843 840
844 if (mi->sample_slow++ > 2) 841 if (mi->sample_slow++ > 2)
845 return -1; 842 return -1;
846 } 843 }
847 mi->sample_tries--; 844 mi->sample_tries--;
848 845
849 return sample_idx; 846 return sample_idx;
850 } 847 }
851 848
852 static void 849 static void
853 minstrel_ht_check_cck_shortpreamble(struct minstrel_priv *mp, 850 minstrel_ht_check_cck_shortpreamble(struct minstrel_priv *mp,
854 struct minstrel_ht_sta *mi, bool val) 851 struct minstrel_ht_sta *mi, bool val)
855 { 852 {
856 u8 supported = mi->groups[MINSTREL_CCK_GROUP].supported; 853 u8 supported = mi->groups[MINSTREL_CCK_GROUP].supported;
857 854
858 if (!supported || !mi->cck_supported_short) 855 if (!supported || !mi->cck_supported_short)
859 return; 856 return;
860 857
861 if (supported & (mi->cck_supported_short << (val * 4))) 858 if (supported & (mi->cck_supported_short << (val * 4)))
862 return; 859 return;
863 860
864 supported ^= mi->cck_supported_short | (mi->cck_supported_short << 4); 861 supported ^= mi->cck_supported_short | (mi->cck_supported_short << 4);
865 mi->groups[MINSTREL_CCK_GROUP].supported = supported; 862 mi->groups[MINSTREL_CCK_GROUP].supported = supported;
866 } 863 }
867 864
868 static void 865 static void
869 minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, 866 minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
870 struct ieee80211_tx_rate_control *txrc) 867 struct ieee80211_tx_rate_control *txrc)
871 { 868 {
872 const struct mcs_group *sample_group; 869 const struct mcs_group *sample_group;
873 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); 870 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
874 struct ieee80211_tx_rate *rate = &info->status.rates[0]; 871 struct ieee80211_tx_rate *rate = &info->status.rates[0];
875 struct minstrel_ht_sta_priv *msp = priv_sta; 872 struct minstrel_ht_sta_priv *msp = priv_sta;
876 struct minstrel_ht_sta *mi = &msp->ht; 873 struct minstrel_ht_sta *mi = &msp->ht;
877 struct minstrel_priv *mp = priv; 874 struct minstrel_priv *mp = priv;
878 int sample_idx; 875 int sample_idx;
879 876
880 if (rate_control_send_low(sta, priv_sta, txrc)) 877 if (rate_control_send_low(sta, priv_sta, txrc))
881 return; 878 return;
882 879
883 if (!msp->is_ht) 880 if (!msp->is_ht)
884 return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc); 881 return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
885 882
886 info->flags |= mi->tx_flags; 883 info->flags |= mi->tx_flags;
887 minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble); 884 minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble);
888 885
889 #ifdef CONFIG_MAC80211_DEBUGFS 886 #ifdef CONFIG_MAC80211_DEBUGFS
890 if (mp->fixed_rate_idx != -1) 887 if (mp->fixed_rate_idx != -1)
891 return; 888 return;
892 #endif 889 #endif
893 890
894 /* Don't use EAPOL frames for sampling on non-mrr hw */ 891 /* Don't use EAPOL frames for sampling on non-mrr hw */
895 if (mp->hw->max_rates == 1 && 892 if (mp->hw->max_rates == 1 &&
896 (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) 893 (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
897 sample_idx = -1; 894 sample_idx = -1;
898 else 895 else
899 sample_idx = minstrel_get_sample_rate(mp, mi); 896 sample_idx = minstrel_get_sample_rate(mp, mi);
900 897
901 mi->total_packets++; 898 mi->total_packets++;
902 899
903 /* wraparound */ 900 /* wraparound */
904 if (mi->total_packets == ~0) { 901 if (mi->total_packets == ~0) {
905 mi->total_packets = 0; 902 mi->total_packets = 0;
906 mi->sample_packets = 0; 903 mi->sample_packets = 0;
907 } 904 }
908 905
909 if (sample_idx < 0) 906 if (sample_idx < 0)
910 return; 907 return;
911 908
912 sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES]; 909 sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
913 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; 910 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
914 rate->count = 1; 911 rate->count = 1;
915 912
916 if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) { 913 if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
917 int idx = sample_idx % ARRAY_SIZE(mp->cck_rates); 914 int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
918 rate->idx = mp->cck_rates[idx]; 915 rate->idx = mp->cck_rates[idx];
919 rate->flags = 0; 916 rate->flags = 0;
920 return; 917 return;
921 } 918 }
922 919
923 rate->idx = sample_idx % MCS_GROUP_RATES + 920 rate->idx = sample_idx % MCS_GROUP_RATES +
924 (sample_group->streams - 1) * 8; 921 (sample_group->streams - 1) * 8;
925 rate->flags = IEEE80211_TX_RC_MCS | sample_group->flags; 922 rate->flags = IEEE80211_TX_RC_MCS | sample_group->flags;
926 } 923 }
927 924
928 static void 925 static void
929 minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, 926 minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
930 struct ieee80211_supported_band *sband, 927 struct ieee80211_supported_band *sband,
931 struct ieee80211_sta *sta) 928 struct ieee80211_sta *sta)
932 { 929 {
933 int i; 930 int i;
934 931
935 if (sband->band != IEEE80211_BAND_2GHZ) 932 if (sband->band != IEEE80211_BAND_2GHZ)
936 return; 933 return;
937 934
938 if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES)) 935 if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES))
939 return; 936 return;
940 937
941 mi->cck_supported = 0; 938 mi->cck_supported = 0;
942 mi->cck_supported_short = 0; 939 mi->cck_supported_short = 0;
943 for (i = 0; i < 4; i++) { 940 for (i = 0; i < 4; i++) {
944 if (!rate_supported(sta, sband->band, mp->cck_rates[i])) 941 if (!rate_supported(sta, sband->band, mp->cck_rates[i]))
945 continue; 942 continue;
946 943
947 mi->cck_supported |= BIT(i); 944 mi->cck_supported |= BIT(i);
948 if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE) 945 if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE)
949 mi->cck_supported_short |= BIT(i); 946 mi->cck_supported_short |= BIT(i);
950 } 947 }
951 948
952 mi->groups[MINSTREL_CCK_GROUP].supported = mi->cck_supported; 949 mi->groups[MINSTREL_CCK_GROUP].supported = mi->cck_supported;
953 } 950 }
954 951
955 static void 952 static void
956 minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, 953 minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
957 struct cfg80211_chan_def *chandef, 954 struct cfg80211_chan_def *chandef,
958 struct ieee80211_sta *sta, void *priv_sta) 955 struct ieee80211_sta *sta, void *priv_sta)
959 { 956 {
960 struct minstrel_priv *mp = priv; 957 struct minstrel_priv *mp = priv;
961 struct minstrel_ht_sta_priv *msp = priv_sta; 958 struct minstrel_ht_sta_priv *msp = priv_sta;
962 struct minstrel_ht_sta *mi = &msp->ht; 959 struct minstrel_ht_sta *mi = &msp->ht;
963 struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; 960 struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
964 u16 sta_cap = sta->ht_cap.cap; 961 u16 sta_cap = sta->ht_cap.cap;
965 int n_supported = 0; 962 int n_supported = 0;
966 int ack_dur; 963 int ack_dur;
967 int stbc; 964 int stbc;
968 int i; 965 int i;
969 966
970 /* fall back to the old minstrel for legacy stations */ 967 /* fall back to the old minstrel for legacy stations */
971 if (!sta->ht_cap.ht_supported) 968 if (!sta->ht_cap.ht_supported)
972 goto use_legacy; 969 goto use_legacy;
973 970
974 BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != 971 BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) !=
975 MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1); 972 MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1);
976 973
977 msp->is_ht = true; 974 msp->is_ht = true;
978 memset(mi, 0, sizeof(*mi)); 975 memset(mi, 0, sizeof(*mi));
979 976
980 mi->sta = sta; 977 mi->sta = sta;
981 mi->stats_update = jiffies; 978 mi->stats_update = jiffies;
982 979
983 ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0); 980 ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
984 mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0); 981 mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
985 mi->overhead += ack_dur; 982 mi->overhead += ack_dur;
986 mi->overhead_rtscts = mi->overhead + 2 * ack_dur; 983 mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
987 984
988 mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); 985 mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
989 986
990 /* When using MRR, sample more on the first attempt, without delay */ 987 /* When using MRR, sample more on the first attempt, without delay */
991 if (mp->has_mrr) { 988 if (mp->has_mrr) {
992 mi->sample_count = 16; 989 mi->sample_count = 16;
993 mi->sample_wait = 0; 990 mi->sample_wait = 0;
994 } else { 991 } else {
995 mi->sample_count = 8; 992 mi->sample_count = 8;
996 mi->sample_wait = 8; 993 mi->sample_wait = 8;
997 } 994 }
998 mi->sample_tries = 4; 995 mi->sample_tries = 4;
999 996
1000 stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >> 997 stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >>
1001 IEEE80211_HT_CAP_RX_STBC_SHIFT; 998 IEEE80211_HT_CAP_RX_STBC_SHIFT;
1002 mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT; 999 mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
1003 1000
1004 if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) 1001 if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
1005 mi->tx_flags |= IEEE80211_TX_CTL_LDPC; 1002 mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
1006 1003
1007 for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { 1004 for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
1008 mi->groups[i].supported = 0; 1005 mi->groups[i].supported = 0;
1009 if (i == MINSTREL_CCK_GROUP) { 1006 if (i == MINSTREL_CCK_GROUP) {
1010 minstrel_ht_update_cck(mp, mi, sband, sta); 1007 minstrel_ht_update_cck(mp, mi, sband, sta);
1011 continue; 1008 continue;
1012 } 1009 }
1013 1010
1014 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) { 1011 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) {
1015 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) { 1012 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
1016 if (!(sta_cap & IEEE80211_HT_CAP_SGI_40)) 1013 if (!(sta_cap & IEEE80211_HT_CAP_SGI_40))
1017 continue; 1014 continue;
1018 } else { 1015 } else {
1019 if (!(sta_cap & IEEE80211_HT_CAP_SGI_20)) 1016 if (!(sta_cap & IEEE80211_HT_CAP_SGI_20))
1020 continue; 1017 continue;
1021 } 1018 }
1022 } 1019 }
1023 1020
1024 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH && 1021 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
1025 sta->bandwidth < IEEE80211_STA_RX_BW_40) 1022 sta->bandwidth < IEEE80211_STA_RX_BW_40)
1026 continue; 1023 continue;
1027 1024
1028 /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */ 1025 /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
1029 if (sta->smps_mode == IEEE80211_SMPS_STATIC && 1026 if (sta->smps_mode == IEEE80211_SMPS_STATIC &&
1030 minstrel_mcs_groups[i].streams > 1) 1027 minstrel_mcs_groups[i].streams > 1)
1031 continue; 1028 continue;
1032 1029
1033 mi->groups[i].supported = 1030 mi->groups[i].supported =
1034 mcs->rx_mask[minstrel_mcs_groups[i].streams - 1]; 1031 mcs->rx_mask[minstrel_mcs_groups[i].streams - 1];
1035 1032
1036 if (mi->groups[i].supported) 1033 if (mi->groups[i].supported)
1037 n_supported++; 1034 n_supported++;
1038 } 1035 }
1039 1036
1040 if (!n_supported) 1037 if (!n_supported)
1041 goto use_legacy; 1038 goto use_legacy;
1042 1039
1043 /* create an initial rate table with the lowest supported rates */ 1040 /* create an initial rate table with the lowest supported rates */
1044 minstrel_ht_update_stats(mp, mi); 1041 minstrel_ht_update_stats(mp, mi);
1045 minstrel_ht_update_rates(mp, mi); 1042 minstrel_ht_update_rates(mp, mi);
1046 1043
1047 return; 1044 return;
1048 1045
1049 use_legacy: 1046 use_legacy:
1050 msp->is_ht = false; 1047 msp->is_ht = false;
1051 memset(&msp->legacy, 0, sizeof(msp->legacy)); 1048 memset(&msp->legacy, 0, sizeof(msp->legacy));
1052 msp->legacy.r = msp->ratelist; 1049 msp->legacy.r = msp->ratelist;
1053 msp->legacy.sample_table = msp->sample_table; 1050 msp->legacy.sample_table = msp->sample_table;
1054 return mac80211_minstrel.rate_init(priv, sband, chandef, sta, 1051 return mac80211_minstrel.rate_init(priv, sband, chandef, sta,
1055 &msp->legacy); 1052 &msp->legacy);
1056 } 1053 }
1057 1054
1058 static void 1055 static void
1059 minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband, 1056 minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
1060 struct cfg80211_chan_def *chandef, 1057 struct cfg80211_chan_def *chandef,
1061 struct ieee80211_sta *sta, void *priv_sta) 1058 struct ieee80211_sta *sta, void *priv_sta)
1062 { 1059 {
1063 minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta); 1060 minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
1064 } 1061 }
1065 1062
1066 static void 1063 static void
1067 minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband, 1064 minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
1068 struct cfg80211_chan_def *chandef, 1065 struct cfg80211_chan_def *chandef,
1069 struct ieee80211_sta *sta, void *priv_sta, 1066 struct ieee80211_sta *sta, void *priv_sta,
1070 u32 changed) 1067 u32 changed)
1071 { 1068 {
1072 minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta); 1069 minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
1073 } 1070 }
1074 1071
1075 static void * 1072 static void *
1076 minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) 1073 minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
1077 { 1074 {
1078 struct ieee80211_supported_band *sband; 1075 struct ieee80211_supported_band *sband;
1079 struct minstrel_ht_sta_priv *msp; 1076 struct minstrel_ht_sta_priv *msp;
1080 struct minstrel_priv *mp = priv; 1077 struct minstrel_priv *mp = priv;
1081 struct ieee80211_hw *hw = mp->hw; 1078 struct ieee80211_hw *hw = mp->hw;
1082 int max_rates = 0; 1079 int max_rates = 0;
1083 int i; 1080 int i;
1084 1081
1085 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 1082 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
1086 sband = hw->wiphy->bands[i]; 1083 sband = hw->wiphy->bands[i];
1087 if (sband && sband->n_bitrates > max_rates) 1084 if (sband && sband->n_bitrates > max_rates)
1088 max_rates = sband->n_bitrates; 1085 max_rates = sband->n_bitrates;
1089 } 1086 }
1090 1087
1091 msp = kzalloc(sizeof(*msp), gfp); 1088 msp = kzalloc(sizeof(*msp), gfp);
1092 if (!msp) 1089 if (!msp)
1093 return NULL; 1090 return NULL;
1094 1091
1095 msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp); 1092 msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
1096 if (!msp->ratelist) 1093 if (!msp->ratelist)
1097 goto error; 1094 goto error;
1098 1095
1099 msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp); 1096 msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
1100 if (!msp->sample_table) 1097 if (!msp->sample_table)
1101 goto error1; 1098 goto error1;
1102 1099
1103 return msp; 1100 return msp;
1104 1101
1105 error1: 1102 error1:
1106 kfree(msp->ratelist); 1103 kfree(msp->ratelist);
1107 error: 1104 error:
1108 kfree(msp); 1105 kfree(msp);
1109 return NULL; 1106 return NULL;
1110 } 1107 }
1111 1108
1112 static void 1109 static void
1113 minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta) 1110 minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
1114 { 1111 {
1115 struct minstrel_ht_sta_priv *msp = priv_sta; 1112 struct minstrel_ht_sta_priv *msp = priv_sta;
1116 1113
1117 kfree(msp->sample_table); 1114 kfree(msp->sample_table);
1118 kfree(msp->ratelist); 1115 kfree(msp->ratelist);
1119 kfree(msp); 1116 kfree(msp);
1120 } 1117 }
1121 1118
1122 static void * 1119 static void *
1123 minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 1120 minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
1124 { 1121 {
1125 return mac80211_minstrel.alloc(hw, debugfsdir); 1122 return mac80211_minstrel.alloc(hw, debugfsdir);
1126 } 1123 }
1127 1124
1128 static void 1125 static void
1129 minstrel_ht_free(void *priv) 1126 minstrel_ht_free(void *priv)
1130 { 1127 {
1131 mac80211_minstrel.free(priv); 1128 mac80211_minstrel.free(priv);
1132 } 1129 }
1133 1130
1134 static u32 minstrel_ht_get_expected_throughput(void *priv_sta) 1131 static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
1135 { 1132 {
1136 struct minstrel_ht_sta_priv *msp = priv_sta; 1133 struct minstrel_ht_sta_priv *msp = priv_sta;
1137 struct minstrel_ht_sta *mi = &msp->ht; 1134 struct minstrel_ht_sta *mi = &msp->ht;
1138 int i, j; 1135 int i, j;
1139 1136
1140 if (!msp->is_ht) 1137 if (!msp->is_ht)
1141 return mac80211_minstrel.get_expected_throughput(priv_sta); 1138 return mac80211_minstrel.get_expected_throughput(priv_sta);
1142 1139
1143 i = mi->max_tp_rate[0] / MCS_GROUP_RATES; 1140 i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
1144 j = mi->max_tp_rate[0] % MCS_GROUP_RATES; 1141 j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
1145 1142
1146 /* convert cur_tp from pkt per second in kbps */ 1143 /* convert cur_tp from pkt per second in kbps */
1147 return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024; 1144 return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024;
1148 } 1145 }
1149 1146
1150 static const struct rate_control_ops mac80211_minstrel_ht = { 1147 static const struct rate_control_ops mac80211_minstrel_ht = {
1151 .name = "minstrel_ht", 1148 .name = "minstrel_ht",
1152 .tx_status = minstrel_ht_tx_status, 1149 .tx_status = minstrel_ht_tx_status,
1153 .get_rate = minstrel_ht_get_rate, 1150 .get_rate = minstrel_ht_get_rate,
1154 .rate_init = minstrel_ht_rate_init, 1151 .rate_init = minstrel_ht_rate_init,
1155 .rate_update = minstrel_ht_rate_update, 1152 .rate_update = minstrel_ht_rate_update,
1156 .alloc_sta = minstrel_ht_alloc_sta, 1153 .alloc_sta = minstrel_ht_alloc_sta,
1157 .free_sta = minstrel_ht_free_sta, 1154 .free_sta = minstrel_ht_free_sta,
1158 .alloc = minstrel_ht_alloc, 1155 .alloc = minstrel_ht_alloc,
1159 .free = minstrel_ht_free, 1156 .free = minstrel_ht_free,
1160 #ifdef CONFIG_MAC80211_DEBUGFS 1157 #ifdef CONFIG_MAC80211_DEBUGFS
1161 .add_sta_debugfs = minstrel_ht_add_sta_debugfs, 1158 .add_sta_debugfs = minstrel_ht_add_sta_debugfs,
1162 .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs, 1159 .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs,
1163 #endif 1160 #endif
1164 .get_expected_throughput = minstrel_ht_get_expected_throughput, 1161 .get_expected_throughput = minstrel_ht_get_expected_throughput,
1165 }; 1162 };
1166 1163
1167 1164
1168 static void __init init_sample_table(void) 1165 static void __init init_sample_table(void)
1169 { 1166 {
1170 int col, i, new_idx; 1167 int col, i, new_idx;
1171 u8 rnd[MCS_GROUP_RATES]; 1168 u8 rnd[MCS_GROUP_RATES];
1172 1169
1173 memset(sample_table, 0xff, sizeof(sample_table)); 1170 memset(sample_table, 0xff, sizeof(sample_table));
1174 for (col = 0; col < SAMPLE_COLUMNS; col++) { 1171 for (col = 0; col < SAMPLE_COLUMNS; col++) {
1175 prandom_bytes(rnd, sizeof(rnd)); 1172 prandom_bytes(rnd, sizeof(rnd));
1176 for (i = 0; i < MCS_GROUP_RATES; i++) { 1173 for (i = 0; i < MCS_GROUP_RATES; i++) {
1177 new_idx = (i + rnd[i]) % MCS_GROUP_RATES; 1174 new_idx = (i + rnd[i]) % MCS_GROUP_RATES;
1178 while (sample_table[col][new_idx] != 0xff) 1175 while (sample_table[col][new_idx] != 0xff)
1179 new_idx = (new_idx + 1) % MCS_GROUP_RATES; 1176 new_idx = (new_idx + 1) % MCS_GROUP_RATES;
1180 1177
1181 sample_table[col][new_idx] = i; 1178 sample_table[col][new_idx] = i;
1182 } 1179 }
1183 } 1180 }
1184 } 1181 }
1185 1182
1186 int __init 1183 int __init
1187 rc80211_minstrel_ht_init(void) 1184 rc80211_minstrel_ht_init(void)
1188 { 1185 {
1189 init_sample_table(); 1186 init_sample_table();
1190 return ieee80211_rate_control_register(&mac80211_minstrel_ht); 1187 return ieee80211_rate_control_register(&mac80211_minstrel_ht);
1191 } 1188 }
1192 1189
1193 void 1190 void
1194 rc80211_minstrel_ht_exit(void) 1191 rc80211_minstrel_ht_exit(void)