Commit c04bfc6b223662c42a77727342c1df7d39e686a2

Authored by Ben Hutchings
1 parent 6ecfd0c70c

sfc: Remove ancient support for nesting of TX stop

Long before this driver went into mainline, it had support for
multiple TX queues per port, with lockless TX enabled.  Since Linux
did not know anything of this, filling up any hardware TX queue would
stop the core TX queue and multiple hardware TX queues could fill up
before the scheduler reacted.  Thus it was necessary to keep a count
of how many TX queues were stopped and to wake the core TX queue only
when all had free space again.

The driver also previously (ab)used the per-hardware-queue stopped
flag as a counter to deal with various things that can inhibit TX, but
it no longer does that.

Remove the per-channel tx_stop_count, tx_stop_lock and
per-hardware-queue stopped count and just use the networking core
queue state directly.

Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>

Showing 4 changed files with 35 additions and 113 deletions Side-by-side Diff

drivers/net/sfc/efx.c
... ... @@ -461,9 +461,6 @@
461 461 }
462 462 }
463 463  
464   - spin_lock_init(&channel->tx_stop_lock);
465   - atomic_set(&channel->tx_stop_count, 1);
466   -
467 464 rx_queue = &channel->rx_queue;
468 465 rx_queue->efx = efx;
469 466 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
470 467  
... ... @@ -1406,11 +1403,11 @@
1406 1403 * restart the transmit interface early so the watchdog timer stops */
1407 1404 efx_start_port(efx);
1408 1405  
1409   - efx_for_each_channel(channel, efx) {
1410   - if (efx_dev_registered(efx))
1411   - efx_wake_queue(channel);
  1406 + if (efx_dev_registered(efx))
  1407 + netif_tx_wake_all_queues(efx->net_dev);
  1408 +
  1409 + efx_for_each_channel(channel, efx)
1412 1410 efx_start_channel(channel);
1413   - }
1414 1411  
1415 1412 if (efx->legacy_irq)
1416 1413 efx->legacy_irq_enabled = true;
... ... @@ -1498,9 +1495,7 @@
1498 1495 /* Stop the kernel transmit interface late, so the watchdog
1499 1496 * timer isn't ticking over the flush */
1500 1497 if (efx_dev_registered(efx)) {
1501   - struct efx_channel *channel;
1502   - efx_for_each_channel(channel, efx)
1503   - efx_stop_queue(channel);
  1498 + netif_tx_stop_all_queues(efx->net_dev);
1504 1499 netif_tx_lock_bh(efx->net_dev);
1505 1500 netif_tx_unlock_bh(efx->net_dev);
1506 1501 }
... ... @@ -1896,6 +1891,7 @@
1896 1891 static int efx_register_netdev(struct efx_nic *efx)
1897 1892 {
1898 1893 struct net_device *net_dev = efx->net_dev;
  1894 + struct efx_channel *channel;
1899 1895 int rc;
1900 1896  
1901 1897 net_dev->watchdog_timeo = 5 * HZ;
... ... @@ -1917,6 +1913,14 @@
1917 1913 rc = register_netdevice(net_dev);
1918 1914 if (rc)
1919 1915 goto fail_locked;
  1916 +
  1917 + efx_for_each_channel(channel, efx) {
  1918 + struct efx_tx_queue *tx_queue;
  1919 + efx_for_each_channel_tx_queue(tx_queue, channel) {
  1920 + tx_queue->core_txq = netdev_get_tx_queue(
  1921 + efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
  1922 + }
  1923 + }
1920 1924  
1921 1925 /* Always start with carrier off; PHY events will detect the link */
1922 1926 netif_carrier_off(efx->net_dev);
drivers/net/sfc/efx.h
... ... @@ -36,8 +36,6 @@
36 36 extern netdev_tx_t
37 37 efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
38 38 extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
39   -extern void efx_stop_queue(struct efx_channel *channel);
40   -extern void efx_wake_queue(struct efx_channel *channel);
41 39  
42 40 /* RX */
43 41 extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
drivers/net/sfc/net_driver.h
... ... @@ -136,6 +136,7 @@
136 136 * @efx: The associated Efx NIC
137 137 * @queue: DMA queue number
138 138 * @channel: The associated channel
  139 + * @core_txq: The networking core TX queue structure
139 140 * @buffer: The software buffer ring
140 141 * @txd: The hardware descriptor ring
141 142 * @ptr_mask: The size of the ring minus 1.
... ... @@ -148,8 +149,6 @@
148 149 * variable indicates that the queue is empty. This is to
149 150 * avoid cache-line ping-pong between the xmit path and the
150 151 * completion path.
151   - * @stopped: Stopped count.
152   - * Set if this TX queue is currently stopping its port.
153 152 * @insert_count: Current insert pointer
154 153 * This is the number of buffers that have been added to the
155 154 * software ring.
... ... @@ -179,6 +178,7 @@
179 178 struct efx_nic *efx ____cacheline_aligned_in_smp;
180 179 unsigned queue;
181 180 struct efx_channel *channel;
  181 + struct netdev_queue *core_txq;
182 182 struct efx_tx_buffer *buffer;
183 183 struct efx_special_buffer txd;
184 184 unsigned int ptr_mask;
... ... @@ -187,7 +187,6 @@
187 187 /* Members used mainly on the completion path */
188 188 unsigned int read_count ____cacheline_aligned_in_smp;
189 189 unsigned int old_write_count;
190   - int stopped;
191 190  
192 191 /* Members used only on the xmit path */
193 192 unsigned int insert_count ____cacheline_aligned_in_smp;
... ... @@ -340,8 +339,6 @@
340 339 * @n_rx_overlength: Count of RX_OVERLENGTH errors
341 340 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
342 341 * @rx_queue: RX queue for this channel
343   - * @tx_stop_count: Core TX queue stop count
344   - * @tx_stop_lock: Core TX queue stop lock
345 342 * @tx_queue: TX queues for this channel
346 343 */
347 344 struct efx_channel {
... ... @@ -380,10 +377,6 @@
380 377 bool rx_pkt_csummed;
381 378  
382 379 struct efx_rx_queue rx_queue;
383   -
384   - atomic_t tx_stop_count;
385   - spinlock_t tx_stop_lock;
386   -
387 380 struct efx_tx_queue tx_queue[2];
388 381 };
389 382  
drivers/net/sfc/tx.c
... ... @@ -30,50 +30,6 @@
30 30 */
31 31 #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32 32  
33   -/* We need to be able to nest calls to netif_tx_stop_queue(), partly
34   - * because of the 2 hardware queues associated with each core queue,
35   - * but also so that we can inhibit TX for reasons other than a full
36   - * hardware queue. */
37   -void efx_stop_queue(struct efx_channel *channel)
38   -{
39   - struct efx_nic *efx = channel->efx;
40   - struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
41   -
42   - if (!tx_queue)
43   - return;
44   -
45   - spin_lock_bh(&channel->tx_stop_lock);
46   - netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
47   -
48   - atomic_inc(&channel->tx_stop_count);
49   - netif_tx_stop_queue(
50   - netdev_get_tx_queue(efx->net_dev,
51   - tx_queue->queue / EFX_TXQ_TYPES));
52   -
53   - spin_unlock_bh(&channel->tx_stop_lock);
54   -}
55   -
56   -/* Decrement core TX queue stop count and wake it if the count is 0 */
57   -void efx_wake_queue(struct efx_channel *channel)
58   -{
59   - struct efx_nic *efx = channel->efx;
60   - struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
61   -
62   - if (!tx_queue)
63   - return;
64   -
65   - local_bh_disable();
66   - if (atomic_dec_and_lock(&channel->tx_stop_count,
67   - &channel->tx_stop_lock)) {
68   - netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
69   - netif_tx_wake_queue(
70   - netdev_get_tx_queue(efx->net_dev,
71   - tx_queue->queue / EFX_TXQ_TYPES));
72   - spin_unlock(&channel->tx_stop_lock);
73   - }
74   - local_bh_enable();
75   -}
76   -
77 33 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
78 34 struct efx_tx_buffer *buffer)
79 35 {
80 36  
... ... @@ -234,9 +190,9 @@
234 190 * checked. Update the xmit path's
235 191 * copy of read_count.
236 192 */
237   - ++tx_queue->stopped;
  193 + netif_tx_stop_queue(tx_queue->core_txq);
238 194 /* This memory barrier protects the
239   - * change of stopped from the access
  195 + * change of queue state from the access
240 196 * of read_count. */
241 197 smp_mb();
242 198 tx_queue->old_read_count =
243 199  
... ... @@ -244,10 +200,12 @@
244 200 fill_level = (tx_queue->insert_count
245 201 - tx_queue->old_read_count);
246 202 q_space = efx->txq_entries - 1 - fill_level;
247   - if (unlikely(q_space-- <= 0))
248   - goto stop;
  203 + if (unlikely(q_space-- <= 0)) {
  204 + rc = NETDEV_TX_BUSY;
  205 + goto unwind;
  206 + }
249 207 smp_mb();
250   - --tx_queue->stopped;
  208 + netif_tx_start_queue(tx_queue->core_txq);
251 209 }
252 210  
253 211 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
254 212  
... ... @@ -307,14 +265,7 @@
307 265  
308 266 /* Mark the packet as transmitted, and free the SKB ourselves */
309 267 dev_kfree_skb_any(skb);
310   - goto unwind;
311 268  
312   - stop:
313   - rc = NETDEV_TX_BUSY;
314   -
315   - if (tx_queue->stopped == 1)
316   - efx_stop_queue(tx_queue->channel);
317   -
318 269 unwind:
319 270 /* Work backwards until we hit the original insert pointer value */
320 271 while (tx_queue->insert_count != tx_queue->write_count) {
321 272  
322 273  
323 274  
... ... @@ -400,32 +351,21 @@
400 351 {
401 352 unsigned fill_level;
402 353 struct efx_nic *efx = tx_queue->efx;
403   - struct netdev_queue *queue;
404 354  
405 355 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
406 356  
407 357 efx_dequeue_buffers(tx_queue, index);
408 358  
409 359 /* See if we need to restart the netif queue. This barrier
410   - * separates the update of read_count from the test of
411   - * stopped. */
  360 + * separates the update of read_count from the test of the
  361 + * queue state. */
412 362 smp_mb();
413   - if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
  363 + if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
  364 + likely(efx->port_enabled)) {
414 365 fill_level = tx_queue->insert_count - tx_queue->read_count;
415 366 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
416 367 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
417   -
418   - /* Do this under netif_tx_lock(), to avoid racing
419   - * with efx_xmit(). */
420   - queue = netdev_get_tx_queue(
421   - efx->net_dev,
422   - tx_queue->queue / EFX_TXQ_TYPES);
423   - __netif_tx_lock(queue, smp_processor_id());
424   - if (tx_queue->stopped) {
425   - tx_queue->stopped = 0;
426   - efx_wake_queue(tx_queue->channel);
427   - }
428   - __netif_tx_unlock(queue);
  368 + netif_tx_wake_queue(tx_queue->core_txq);
429 369 }
430 370 }
431 371  
... ... @@ -487,7 +427,6 @@
487 427 tx_queue->read_count = 0;
488 428 tx_queue->old_read_count = 0;
489 429 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
490   - BUG_ON(tx_queue->stopped);
491 430  
492 431 /* Set up TX descriptor ring */
493 432 efx_nic_init_tx(tx_queue);
... ... @@ -523,12 +462,6 @@
523 462  
524 463 /* Free up TSO header cache */
525 464 efx_fini_tso(tx_queue);
526   -
527   - /* Release queue's stop on port, if any */
528   - if (tx_queue->stopped) {
529   - tx_queue->stopped = 0;
530   - efx_wake_queue(tx_queue->channel);
531   - }
532 465 }
533 466  
534 467 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
535 468  
... ... @@ -770,9 +703,9 @@
770 703 * since the xmit path last checked. Update
771 704 * the xmit path's copy of read_count.
772 705 */
773   - ++tx_queue->stopped;
  706 + netif_tx_stop_queue(tx_queue->core_txq);
774 707 /* This memory barrier protects the change of
775   - * stopped from the access of read_count. */
  708 + * queue state from the access of read_count. */
776 709 smp_mb();
777 710 tx_queue->old_read_count =
778 711 ACCESS_ONCE(tx_queue->read_count);
... ... @@ -784,7 +717,7 @@
784 717 return 1;
785 718 }
786 719 smp_mb();
787   - --tx_queue->stopped;
  720 + netif_tx_start_queue(tx_queue->core_txq);
788 721 }
789 722  
790 723 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
... ... @@ -1124,8 +1057,10 @@
1124 1057  
1125 1058 while (1) {
1126 1059 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1127   - if (unlikely(rc))
1128   - goto stop;
  1060 + if (unlikely(rc)) {
  1061 + rc2 = NETDEV_TX_BUSY;
  1062 + goto unwind;
  1063 + }
1129 1064  
1130 1065 /* Move onto the next fragment? */
1131 1066 if (state.in_len == 0) {
... ... @@ -1154,14 +1089,6 @@
1154 1089 netif_err(efx, tx_err, efx->net_dev,
1155 1090 "Out of memory for TSO headers, or PCI mapping error\n");
1156 1091 dev_kfree_skb_any(skb);
1157   - goto unwind;
1158   -
1159   - stop:
1160   - rc2 = NETDEV_TX_BUSY;
1161   -
1162   - /* Stop the queue if it wasn't stopped before. */
1163   - if (tx_queue->stopped == 1)
1164   - efx_stop_queue(tx_queue->channel);
1165 1092  
1166 1093 unwind:
1167 1094 /* Free the DMA mapping we were in the process of writing out */