Commit ce7c9111a97492d04c504f40736a669c235d664a
Committed by
John W. Linville
1 parent
e0cb686ff8
Exists in
master
and in
7 other branches
mac80211: track master queue status
This is a preparation for the dynamic power save support. In future there are two paths to stop the master queues and we need to track this properly to avoid starting queues incorrectly. Implement this by adding a status array for each queue. The original idea and design is from Johannes Berg, I just did the implementation based on his notes. All the bugs are mine, of course. Signed-off-by: Kalle Valo <kalle.valo@nokia.com> Acked-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Showing 3 changed files with 93 additions and 7 deletions Side-by-side Diff
net/mac80211/ieee80211_i.h
... | ... | @@ -538,6 +538,10 @@ |
538 | 538 | IEEE80211_ADDBA_MSG = 4, |
539 | 539 | }; |
540 | 540 | |
541 | +enum queue_stop_reason { | |
542 | + IEEE80211_QUEUE_STOP_REASON_DRIVER, | |
543 | +}; | |
544 | + | |
541 | 545 | /* maximum number of hardware queues we support. */ |
542 | 546 | #define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES) |
543 | 547 | |
... | ... | @@ -554,7 +558,8 @@ |
554 | 558 | const struct ieee80211_ops *ops; |
555 | 559 | |
556 | 560 | unsigned long queue_pool[BITS_TO_LONGS(QD_MAX_QUEUES)]; |
557 | - | |
561 | + unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES]; | |
562 | + spinlock_t queue_stop_reason_lock; | |
558 | 563 | struct net_device *mdev; /* wmaster# - "master" 802.11 device */ |
559 | 564 | int open_count; |
560 | 565 | int monitors, cooked_mntrs; |
... | ... | @@ -971,6 +976,11 @@ |
971 | 976 | int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq); |
972 | 977 | u64 ieee80211_mandatory_rates(struct ieee80211_local *local, |
973 | 978 | enum ieee80211_band band); |
979 | + | |
980 | +void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, | |
981 | + enum queue_stop_reason reason); | |
982 | +void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, | |
983 | + enum queue_stop_reason reason); | |
974 | 984 | |
975 | 985 | #ifdef CONFIG_MAC80211_NOINLINE |
976 | 986 | #define debug_noinline noinline |
net/mac80211/main.c
net/mac80211/util.c
... | ... | @@ -330,10 +330,20 @@ |
330 | 330 | } |
331 | 331 | EXPORT_SYMBOL(ieee80211_ctstoself_duration); |
332 | 332 | |
333 | -void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) | |
333 | +static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, | |
334 | + enum queue_stop_reason reason) | |
334 | 335 | { |
335 | 336 | struct ieee80211_local *local = hw_to_local(hw); |
336 | 337 | |
338 | + /* we don't need to track ampdu queues */ | |
339 | + if (queue < ieee80211_num_regular_queues(hw)) { | |
340 | + __clear_bit(reason, &local->queue_stop_reasons[queue]); | |
341 | + | |
342 | + if (local->queue_stop_reasons[queue] != 0) | |
343 | + /* someone still has this queue stopped */ | |
344 | + return; | |
345 | + } | |
346 | + | |
337 | 347 | if (test_bit(queue, local->queues_pending)) { |
338 | 348 | set_bit(queue, local->queues_pending_run); |
339 | 349 | tasklet_schedule(&local->tx_pending_tasklet); |
340 | 350 | |
341 | 351 | |
342 | 352 | |
343 | 353 | |
344 | 354 | |
345 | 355 | |
346 | 356 | |
347 | 357 | |
... | ... | @@ -341,23 +351,75 @@ |
341 | 351 | netif_wake_subqueue(local->mdev, queue); |
342 | 352 | } |
343 | 353 | } |
354 | + | |
355 | +void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, | |
356 | + enum queue_stop_reason reason) | |
357 | +{ | |
358 | + struct ieee80211_local *local = hw_to_local(hw); | |
359 | + unsigned long flags; | |
360 | + | |
361 | + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | |
362 | + __ieee80211_wake_queue(hw, queue, reason); | |
363 | + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | |
364 | +} | |
365 | + | |
366 | +void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) | |
367 | +{ | |
368 | + ieee80211_wake_queue_by_reason(hw, queue, | |
369 | + IEEE80211_QUEUE_STOP_REASON_DRIVER); | |
370 | +} | |
344 | 371 | EXPORT_SYMBOL(ieee80211_wake_queue); |
345 | 372 | |
346 | -void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) | |
373 | +static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, | |
374 | + enum queue_stop_reason reason) | |
347 | 375 | { |
348 | 376 | struct ieee80211_local *local = hw_to_local(hw); |
349 | 377 | |
378 | + /* we don't need to track ampdu queues */ | |
379 | + if (queue < ieee80211_num_regular_queues(hw)) | |
380 | + __set_bit(reason, &local->queue_stop_reasons[queue]); | |
381 | + | |
350 | 382 | netif_stop_subqueue(local->mdev, queue); |
351 | 383 | } |
384 | + | |
385 | +void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, | |
386 | + enum queue_stop_reason reason) | |
387 | +{ | |
388 | + struct ieee80211_local *local = hw_to_local(hw); | |
389 | + unsigned long flags; | |
390 | + | |
391 | + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | |
392 | + __ieee80211_stop_queue(hw, queue, reason); | |
393 | + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | |
394 | +} | |
395 | + | |
396 | +void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) | |
397 | +{ | |
398 | + ieee80211_stop_queue_by_reason(hw, queue, | |
399 | + IEEE80211_QUEUE_STOP_REASON_DRIVER); | |
400 | +} | |
352 | 401 | EXPORT_SYMBOL(ieee80211_stop_queue); |
353 | 402 | |
354 | -void ieee80211_stop_queues(struct ieee80211_hw *hw) | |
403 | +void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, | |
404 | + enum queue_stop_reason reason) | |
355 | 405 | { |
406 | + struct ieee80211_local *local = hw_to_local(hw); | |
407 | + unsigned long flags; | |
356 | 408 | int i; |
357 | 409 | |
410 | + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | |
411 | + | |
358 | 412 | for (i = 0; i < ieee80211_num_queues(hw); i++) |
359 | - ieee80211_stop_queue(hw, i); | |
413 | + __ieee80211_stop_queue(hw, i, reason); | |
414 | + | |
415 | + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | |
360 | 416 | } |
417 | + | |
418 | +void ieee80211_stop_queues(struct ieee80211_hw *hw) | |
419 | +{ | |
420 | + ieee80211_stop_queues_by_reason(hw, | |
421 | + IEEE80211_QUEUE_STOP_REASON_DRIVER); | |
422 | +} | |
361 | 423 | EXPORT_SYMBOL(ieee80211_stop_queues); |
362 | 424 | |
363 | 425 | int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) |
364 | 426 | |
365 | 427 | |
366 | 428 | |
... | ... | @@ -367,12 +429,24 @@ |
367 | 429 | } |
368 | 430 | EXPORT_SYMBOL(ieee80211_queue_stopped); |
369 | 431 | |
370 | -void ieee80211_wake_queues(struct ieee80211_hw *hw) | |
432 | +void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, | |
433 | + enum queue_stop_reason reason) | |
371 | 434 | { |
435 | + struct ieee80211_local *local = hw_to_local(hw); | |
436 | + unsigned long flags; | |
372 | 437 | int i; |
373 | 438 | |
439 | + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | |
440 | + | |
374 | 441 | for (i = 0; i < hw->queues + hw->ampdu_queues; i++) |
375 | - ieee80211_wake_queue(hw, i); | |
442 | + __ieee80211_wake_queue(hw, i, reason); | |
443 | + | |
444 | + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | |
445 | +} | |
446 | + | |
447 | +void ieee80211_wake_queues(struct ieee80211_hw *hw) | |
448 | +{ | |
449 | + ieee80211_wake_queues_by_reason(hw, IEEE80211_QUEUE_STOP_REASON_DRIVER); | |
376 | 450 | } |
377 | 451 | EXPORT_SYMBOL(ieee80211_wake_queues); |
378 | 452 |