Commit 6568a234363978e1aebb5b7c9840ed87eed20362
Committed by
Ralf Baechle
1 parent
dbb103b243
Exists in
master
and in
7 other branches
Staging: Octeon Ethernet: Remove unused code.
Remove unused code, reindent, and join some spilt strings. Signed-off-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org To: gregkh@suse.de Patchwork: http://patchwork.linux-mips.org/patch/842/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Showing 6 changed files with 55 additions and 284 deletions Side-by-side Diff
drivers/staging/octeon/ethernet-defines.h
... | ... | @@ -41,9 +41,6 @@ |
41 | 41 | * Tells the driver to populate the packet buffers with kernel skbuffs. |
42 | 42 | * This allows the driver to receive packets without copying them. It also |
43 | 43 | * means that 32bit userspace can't access the packet buffers. |
44 | - * USE_32BIT_SHARED | |
45 | - * This define tells the driver to allocate memory for buffers from the | |
46 | - * 32bit sahred region instead of the kernel memory space. | |
47 | 44 | * USE_HW_TCPUDP_CHECKSUM |
48 | 45 | * Controls if the Octeon TCP/UDP checksum engine is used for packet |
49 | 46 | * output. If this is zero, the kernel will perform the checksum in |
50 | 47 | |
... | ... | @@ -75,18 +72,11 @@ |
75 | 72 | #define CONFIG_CAVIUM_RESERVE32 0 |
76 | 73 | #endif |
77 | 74 | |
78 | -#if CONFIG_CAVIUM_RESERVE32 | |
79 | -#define USE_32BIT_SHARED 1 | |
80 | -#define USE_SKBUFFS_IN_HW 0 | |
81 | -#define REUSE_SKBUFFS_WITHOUT_FREE 0 | |
82 | -#else | |
83 | -#define USE_32BIT_SHARED 0 | |
84 | 75 | #define USE_SKBUFFS_IN_HW 1 |
85 | 76 | #ifdef CONFIG_NETFILTER |
86 | 77 | #define REUSE_SKBUFFS_WITHOUT_FREE 0 |
87 | 78 | #else |
88 | 79 | #define REUSE_SKBUFFS_WITHOUT_FREE 1 |
89 | -#endif | |
90 | 80 | #endif |
91 | 81 | |
92 | 82 | /* Max interrupts per second per core */ |
drivers/staging/octeon/ethernet-mem.c
... | ... | @@ -26,8 +26,6 @@ |
26 | 26 | **********************************************************************/ |
27 | 27 | #include <linux/kernel.h> |
28 | 28 | #include <linux/netdevice.h> |
29 | -#include <linux/mii.h> | |
30 | -#include <net/dst.h> | |
31 | 29 | |
32 | 30 | #include <asm/octeon/octeon.h> |
33 | 31 | |
34 | 32 | |
... | ... | @@ -107,42 +105,17 @@ |
107 | 105 | char *memory; |
108 | 106 | int freed = elements; |
109 | 107 | |
110 | - if (USE_32BIT_SHARED) { | |
111 | - extern uint64_t octeon_reserve32_memory; | |
112 | - | |
113 | - memory = | |
114 | - cvmx_bootmem_alloc_range(elements * size, 128, | |
115 | - octeon_reserve32_memory, | |
116 | - octeon_reserve32_memory + | |
117 | - (CONFIG_CAVIUM_RESERVE32 << 20) - | |
118 | - 1); | |
119 | - if (memory == NULL) | |
120 | - panic("Unable to allocate %u bytes for FPA pool %d\n", | |
121 | - elements * size, pool); | |
122 | - | |
123 | - pr_notice("Memory range %p - %p reserved for " | |
124 | - "hardware\n", memory, | |
125 | - memory + elements * size - 1); | |
126 | - | |
127 | - while (freed) { | |
128 | - cvmx_fpa_free(memory, pool, 0); | |
129 | - memory += size; | |
130 | - freed--; | |
108 | + while (freed) { | |
109 | + /* We need to force alignment to 128 bytes here */ | |
110 | + memory = kmalloc(size + 127, GFP_ATOMIC); | |
111 | + if (unlikely(memory == NULL)) { | |
112 | + pr_warning("Unable to allocate %u bytes for FPA pool %d\n", | |
113 | + elements * size, pool); | |
114 | + break; | |
131 | 115 | } |
132 | - } else { | |
133 | - while (freed) { | |
134 | - /* We need to force alignment to 128 bytes here */ | |
135 | - memory = kmalloc(size + 127, GFP_ATOMIC); | |
136 | - if (unlikely(memory == NULL)) { | |
137 | - pr_warning("Unable to allocate %u bytes for " | |
138 | - "FPA pool %d\n", | |
139 | - elements * size, pool); | |
140 | - break; | |
141 | - } | |
142 | - memory = (char *)(((unsigned long)memory + 127) & -128); | |
143 | - cvmx_fpa_free(memory, pool, 0); | |
144 | - freed--; | |
145 | - } | |
116 | + memory = (char *)(((unsigned long)memory + 127) & -128); | |
117 | + cvmx_fpa_free(memory, pool, 0); | |
118 | + freed--; | |
146 | 119 | } |
147 | 120 | return elements - freed; |
148 | 121 | } |
149 | 122 | |
... | ... | @@ -156,27 +129,21 @@ |
156 | 129 | */ |
157 | 130 | static void cvm_oct_free_hw_memory(int pool, int size, int elements) |
158 | 131 | { |
159 | - if (USE_32BIT_SHARED) { | |
160 | - pr_warning("Warning: 32 shared memory is not freeable\n"); | |
161 | - } else { | |
162 | - char *memory; | |
163 | - do { | |
164 | - memory = cvmx_fpa_alloc(pool); | |
165 | - if (memory) { | |
166 | - elements--; | |
167 | - kfree(phys_to_virt(cvmx_ptr_to_phys(memory))); | |
168 | - } | |
169 | - } while (memory); | |
132 | + char *memory; | |
133 | + do { | |
134 | + memory = cvmx_fpa_alloc(pool); | |
135 | + if (memory) { | |
136 | + elements--; | |
137 | + kfree(phys_to_virt(cvmx_ptr_to_phys(memory))); | |
138 | + } | |
139 | + } while (memory); | |
170 | 140 | |
171 | - if (elements < 0) | |
172 | - pr_warning("Freeing of pool %u had too many " | |
173 | - "buffers (%d)\n", | |
174 | - pool, elements); | |
175 | - else if (elements > 0) | |
176 | - pr_warning("Warning: Freeing of pool %u is " | |
177 | - "missing %d buffers\n", | |
178 | - pool, elements); | |
179 | - } | |
141 | + if (elements < 0) | |
142 | + pr_warning("Freeing of pool %u had too many buffers (%d)\n", | |
143 | + pool, elements); | |
144 | + else if (elements > 0) | |
145 | + pr_warning("Warning: Freeing of pool %u is missing %d buffers\n", | |
146 | + pool, elements); | |
180 | 147 | } |
181 | 148 | |
182 | 149 | int cvm_oct_mem_fill_fpa(int pool, int size, int elements) |
drivers/staging/octeon/ethernet-rx.c
... | ... | @@ -33,10 +33,6 @@ |
33 | 33 | #include <linux/ip.h> |
34 | 34 | #include <linux/string.h> |
35 | 35 | #include <linux/prefetch.h> |
36 | -#include <linux/ethtool.h> | |
37 | -#include <linux/mii.h> | |
38 | -#include <linux/seq_file.h> | |
39 | -#include <linux/proc_fs.h> | |
40 | 36 | #include <net/dst.h> |
41 | 37 | #ifdef CONFIG_XFRM |
42 | 38 | #include <linux/xfrm.h> |
43 | 39 | |
44 | 40 | |
45 | 41 | |
... | ... | @@ -292,39 +288,27 @@ |
292 | 288 | * buffer. |
293 | 289 | */ |
294 | 290 | if (likely(skb_in_hw)) { |
295 | - /* | |
296 | - * This calculation was changed in case the | |
297 | - * skb header is using a different address | |
298 | - * aliasing type than the buffer. It doesn't | |
299 | - * make any differnece now, but the new one is | |
300 | - * more correct. | |
301 | - */ | |
302 | - skb->data = | |
303 | - skb->head + work->packet_ptr.s.addr - | |
304 | - cvmx_ptr_to_phys(skb->head); | |
291 | + skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head); | |
305 | 292 | prefetch(skb->data); |
306 | 293 | skb->len = work->len; |
307 | 294 | skb_set_tail_pointer(skb, skb->len); |
308 | 295 | packet_not_copied = 1; |
309 | 296 | } else { |
310 | - | |
311 | 297 | /* |
312 | 298 | * We have to copy the packet. First allocate |
313 | 299 | * an skbuff for it. |
314 | 300 | */ |
315 | 301 | skb = dev_alloc_skb(work->len); |
316 | 302 | if (!skb) { |
317 | - DEBUGPRINT("Port %d failed to allocate " | |
318 | - "skbuff, packet dropped\n", | |
319 | - work->ipprt); | |
303 | + DEBUGPRINT("Port %d failed to allocate skbuff, packet dropped\n", | |
304 | + work->ipprt); | |
320 | 305 | cvm_oct_free_work(work); |
321 | 306 | continue; |
322 | 307 | } |
323 | 308 | |
324 | 309 | /* |
325 | 310 | * Check if we've received a packet that was |
326 | - * entirely stored in the work entry. This is | |
327 | - * untested. | |
311 | + * entirely stored in the work entry. | |
328 | 312 | */ |
329 | 313 | if (unlikely(work->word2.s.bufs == 0)) { |
330 | 314 | uint8_t *ptr = work->packet_data; |
331 | 315 | |
... | ... | @@ -343,15 +327,13 @@ |
343 | 327 | /* No packet buffers to free */ |
344 | 328 | } else { |
345 | 329 | int segments = work->word2.s.bufs; |
346 | - union cvmx_buf_ptr segment_ptr = | |
347 | - work->packet_ptr; | |
330 | + union cvmx_buf_ptr segment_ptr = work->packet_ptr; | |
348 | 331 | int len = work->len; |
349 | 332 | |
350 | 333 | while (segments--) { |
351 | 334 | union cvmx_buf_ptr next_ptr = |
352 | - *(union cvmx_buf_ptr *) | |
353 | - cvmx_phys_to_ptr(segment_ptr.s. | |
354 | - addr - 8); | |
335 | + *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8); | |
336 | + | |
355 | 337 | /* |
356 | 338 | * Octeon Errata PKI-100: The segment size is |
357 | 339 | * wrong. Until it is fixed, calculate the |
358 | 340 | |
359 | 341 | |
... | ... | @@ -361,22 +343,18 @@ |
361 | 343 | * one: int segment_size = |
362 | 344 | * segment_ptr.s.size; |
363 | 345 | */ |
364 | - int segment_size = | |
365 | - CVMX_FPA_PACKET_POOL_SIZE - | |
366 | - (segment_ptr.s.addr - | |
367 | - (((segment_ptr.s.addr >> 7) - | |
368 | - segment_ptr.s.back) << 7)); | |
369 | - /* Don't copy more than what is left | |
370 | - in the packet */ | |
346 | + int segment_size = CVMX_FPA_PACKET_POOL_SIZE - | |
347 | + (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7)); | |
348 | + /* | |
349 | + * Don't copy more than what | |
350 | + * is left in the packet. | |
351 | + */ | |
371 | 352 | if (segment_size > len) |
372 | 353 | segment_size = len; |
373 | 354 | /* Copy the data into the packet */ |
374 | 355 | memcpy(skb_put(skb, segment_size), |
375 | - cvmx_phys_to_ptr(segment_ptr.s. | |
376 | - addr), | |
356 | + cvmx_phys_to_ptr(segment_ptr.s.addr), | |
377 | 357 | segment_size); |
378 | - /* Reduce the amount of bytes left | |
379 | - to copy */ | |
380 | 358 | len -= segment_size; |
381 | 359 | segment_ptr = next_ptr; |
382 | 360 | } |
383 | 361 | |
... | ... | @@ -389,16 +367,15 @@ |
389 | 367 | struct net_device *dev = cvm_oct_device[work->ipprt]; |
390 | 368 | struct octeon_ethernet *priv = netdev_priv(dev); |
391 | 369 | |
392 | - /* Only accept packets for devices | |
393 | - that are currently up */ | |
370 | + /* | |
371 | + * Only accept packets for devices that are | |
372 | + * currently up. | |
373 | + */ | |
394 | 374 | if (likely(dev->flags & IFF_UP)) { |
395 | 375 | skb->protocol = eth_type_trans(skb, dev); |
396 | 376 | skb->dev = dev; |
397 | 377 | |
398 | - if (unlikely | |
399 | - (work->word2.s.not_IP | |
400 | - || work->word2.s.IP_exc | |
401 | - || work->word2.s.L4_error)) | |
378 | + if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error)) | |
402 | 379 | skb->ip_summed = CHECKSUM_NONE; |
403 | 380 | else |
404 | 381 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
405 | 382 | |
... | ... | @@ -415,14 +392,11 @@ |
415 | 392 | } |
416 | 393 | netif_receive_skb(skb); |
417 | 394 | } else { |
395 | + /* Drop any packet received for a device that isn't up */ | |
418 | 396 | /* |
419 | - * Drop any packet received for a | |
420 | - * device that isn't up. | |
421 | - */ | |
422 | - /* | |
423 | - DEBUGPRINT("%s: Device not up, packet dropped\n", | |
424 | - dev->name); | |
425 | - */ | |
397 | + DEBUGPRINT("%s: Device not up, packet dropped\n", | |
398 | + dev->name); | |
399 | + */ | |
426 | 400 | #ifdef CONFIG_64BIT |
427 | 401 | atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); |
428 | 402 | #else |
... | ... | @@ -435,9 +409,8 @@ |
435 | 409 | * Drop any packet received for a device that |
436 | 410 | * doesn't exist. |
437 | 411 | */ |
438 | - DEBUGPRINT("Port %d not controlled by Linux, packet " | |
439 | - "dropped\n", | |
440 | - work->ipprt); | |
412 | + DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n", | |
413 | + work->ipprt); | |
441 | 414 | dev_kfree_skb_irq(skb); |
442 | 415 | } |
443 | 416 | /* |
drivers/staging/octeon/ethernet-tx.c
... | ... | @@ -31,10 +31,6 @@ |
31 | 31 | #include <linux/etherdevice.h> |
32 | 32 | #include <linux/ip.h> |
33 | 33 | #include <linux/string.h> |
34 | -#include <linux/ethtool.h> | |
35 | -#include <linux/mii.h> | |
36 | -#include <linux/seq_file.h> | |
37 | -#include <linux/proc_fs.h> | |
38 | 34 | #include <net/dst.h> |
39 | 35 | #ifdef CONFIG_XFRM |
40 | 36 | #include <linux/xfrm.h> |
... | ... | @@ -527,101 +523,6 @@ |
527 | 523 | dev_kfree_skb(skb); |
528 | 524 | return 0; |
529 | 525 | } |
530 | - | |
531 | -/** | |
532 | - * Transmit a work queue entry out of the ethernet port. Both | |
533 | - * the work queue entry and the packet data can optionally be | |
534 | - * freed. The work will be freed on error as well. | |
535 | - * | |
536 | - * @dev: Device to transmit out. | |
537 | - * @work_queue_entry: | |
538 | - * Work queue entry to send | |
539 | - * @do_free: True if the work queue entry and packet data should be | |
540 | - * freed. If false, neither will be freed. | |
541 | - * @qos: Index into the queues for this port to transmit on. This | |
542 | - * is used to implement QoS if their are multiple queues per | |
543 | - * port. This parameter must be between 0 and the number of | |
544 | - * queues per port minus 1. Values outside of this range will | |
545 | - * be change to zero. | |
546 | - * | |
547 | - * Returns Zero on success, negative on failure. | |
548 | - */ | |
549 | -int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, | |
550 | - int do_free, int qos) | |
551 | -{ | |
552 | - unsigned long flags; | |
553 | - union cvmx_buf_ptr hw_buffer; | |
554 | - cvmx_pko_command_word0_t pko_command; | |
555 | - int dropped; | |
556 | - struct octeon_ethernet *priv = netdev_priv(dev); | |
557 | - cvmx_wqe_t *work = work_queue_entry; | |
558 | - | |
559 | - if (!(dev->flags & IFF_UP)) { | |
560 | - DEBUGPRINT("%s: Device not up\n", dev->name); | |
561 | - if (do_free) | |
562 | - cvm_oct_free_work(work); | |
563 | - return -1; | |
564 | - } | |
565 | - | |
566 | - /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely | |
567 | - remove "qos" in the event neither interface supports | |
568 | - multiple queues per port */ | |
569 | - if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || | |
570 | - (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { | |
571 | - if (qos <= 0) | |
572 | - qos = 0; | |
573 | - else if (qos >= cvmx_pko_get_num_queues(priv->port)) | |
574 | - qos = 0; | |
575 | - } else | |
576 | - qos = 0; | |
577 | - | |
578 | - /* Start off assuming no drop */ | |
579 | - dropped = 0; | |
580 | - | |
581 | - local_irq_save(flags); | |
582 | - cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, | |
583 | - CVMX_PKO_LOCK_CMD_QUEUE); | |
584 | - | |
585 | - /* Build the PKO buffer pointer */ | |
586 | - hw_buffer.u64 = 0; | |
587 | - hw_buffer.s.addr = work->packet_ptr.s.addr; | |
588 | - hw_buffer.s.pool = CVMX_FPA_PACKET_POOL; | |
589 | - hw_buffer.s.size = CVMX_FPA_PACKET_POOL_SIZE; | |
590 | - hw_buffer.s.back = work->packet_ptr.s.back; | |
591 | - | |
592 | - /* Build the PKO command */ | |
593 | - pko_command.u64 = 0; | |
594 | - pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ | |
595 | - pko_command.s.dontfree = !do_free; | |
596 | - pko_command.s.segs = work->word2.s.bufs; | |
597 | - pko_command.s.total_bytes = work->len; | |
598 | - | |
599 | - /* Check if we can use the hardware checksumming */ | |
600 | - if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc)) | |
601 | - pko_command.s.ipoffp1 = 0; | |
602 | - else | |
603 | - pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; | |
604 | - | |
605 | - /* Send the packet to the output queue */ | |
606 | - if (unlikely | |
607 | - (cvmx_pko_send_packet_finish | |
608 | - (priv->port, priv->queue + qos, pko_command, hw_buffer, | |
609 | - CVMX_PKO_LOCK_CMD_QUEUE))) { | |
610 | - DEBUGPRINT("%s: Failed to send the packet\n", dev->name); | |
611 | - dropped = -1; | |
612 | - } | |
613 | - local_irq_restore(flags); | |
614 | - | |
615 | - if (unlikely(dropped)) { | |
616 | - if (do_free) | |
617 | - cvm_oct_free_work(work); | |
618 | - priv->stats.tx_dropped++; | |
619 | - } else if (do_free) | |
620 | - cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); | |
621 | - | |
622 | - return dropped; | |
623 | -} | |
624 | -EXPORT_SYMBOL(cvm_oct_transmit_qos); | |
625 | 526 | |
626 | 527 | /** |
627 | 528 | * This function frees all skb that are currently queued for TX. |
drivers/staging/octeon/ethernet.c
... | ... | @@ -104,14 +104,6 @@ |
104 | 104 | "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" |
105 | 105 | "\tusing the pow_send_group."); |
106 | 106 | |
107 | -static int disable_core_queueing = 1; | |
108 | -module_param(disable_core_queueing, int, 0444); | |
109 | -MODULE_PARM_DESC(disable_core_queueing, "\n" | |
110 | - "\tWhen set the networking core's tx_queue_len is set to zero. This\n" | |
111 | - "\tallows packets to be sent without lock contention in the packet\n" | |
112 | - "\tscheduler resulting in some cases in improved throughput.\n"); | |
113 | - | |
114 | - | |
115 | 107 | /* |
116 | 108 | * The offset from mac_addr_base that should be used for the next port |
117 | 109 | * that is configured. By convention, if any mgmt ports exist on the |
... | ... | @@ -205,10 +197,6 @@ |
205 | 197 | cvmx_helper_setup_red(num_packet_buffers / 4, |
206 | 198 | num_packet_buffers / 8); |
207 | 199 | |
208 | - /* Enable the MII interface */ | |
209 | - if (!octeon_is_simulation()) | |
210 | - cvmx_write_csr(CVMX_SMIX_EN(0), 1); | |
211 | - | |
212 | 200 | /* Register an IRQ hander for to receive POW interrupts */ |
213 | 201 | r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, |
214 | 202 | cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet", |
... | ... | @@ -689,7 +677,6 @@ |
689 | 677 | if (dev) { |
690 | 678 | /* Initialize the device private structure. */ |
691 | 679 | struct octeon_ethernet *priv = netdev_priv(dev); |
692 | - memset(priv, 0, sizeof(struct octeon_ethernet)); | |
693 | 680 | |
694 | 681 | dev->netdev_ops = &cvm_oct_pow_netdev_ops; |
695 | 682 | priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; |
696 | 683 | |
697 | 684 | |
... | ... | @@ -700,19 +687,16 @@ |
700 | 687 | skb_queue_head_init(&priv->tx_free_list[qos]); |
701 | 688 | |
702 | 689 | if (register_netdev(dev) < 0) { |
703 | - pr_err("Failed to register ethernet " | |
704 | - "device for POW\n"); | |
690 | + pr_err("Failed to register ethernet device for POW\n"); | |
705 | 691 | kfree(dev); |
706 | 692 | } else { |
707 | 693 | cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; |
708 | - pr_info("%s: POW send group %d, receive " | |
709 | - "group %d\n", | |
710 | - dev->name, pow_send_group, | |
711 | - pow_receive_group); | |
694 | + pr_info("%s: POW send group %d, receive group %d\n", | |
695 | + dev->name, pow_send_group, | |
696 | + pow_receive_group); | |
712 | 697 | } |
713 | 698 | } else { |
714 | - pr_err("Failed to allocate ethernet device " | |
715 | - "for POW\n"); | |
699 | + pr_err("Failed to allocate ethernet device for POW\n"); | |
716 | 700 | } |
717 | 701 | } |
718 | 702 | |
719 | 703 | |
... | ... | @@ -730,12 +714,9 @@ |
730 | 714 | struct net_device *dev = |
731 | 715 | alloc_etherdev(sizeof(struct octeon_ethernet)); |
732 | 716 | if (!dev) { |
733 | - pr_err("Failed to allocate ethernet device " | |
734 | - "for port %d\n", port); | |
717 | + pr_err("Failed to allocate ethernet device for port %d\n", port); | |
735 | 718 | continue; |
736 | 719 | } |
737 | - if (disable_core_queueing) | |
738 | - dev->tx_queue_len = 0; | |
739 | 720 | |
740 | 721 | /* Initialize the device private structure. */ |
741 | 722 | priv = netdev_priv(dev); |
drivers/staging/octeon/octeon-ethernet.h
... | ... | @@ -68,47 +68,6 @@ |
68 | 68 | */ |
69 | 69 | int cvm_oct_free_work(void *work_queue_entry); |
70 | 70 | |
71 | -/** | |
72 | - * Transmit a work queue entry out of the ethernet port. Both | |
73 | - * the work queue entry and the packet data can optionally be | |
74 | - * freed. The work will be freed on error as well. | |
75 | - * | |
76 | - * @dev: Device to transmit out. | |
77 | - * @work_queue_entry: | |
78 | - * Work queue entry to send | |
79 | - * @do_free: True if the work queue entry and packet data should be | |
80 | - * freed. If false, neither will be freed. | |
81 | - * @qos: Index into the queues for this port to transmit on. This | |
82 | - * is used to implement QoS if their are multiple queues per | |
83 | - * port. This parameter must be between 0 and the number of | |
84 | - * queues per port minus 1. Values outside of this range will | |
85 | - * be change to zero. | |
86 | - * | |
87 | - * Returns Zero on success, negative on failure. | |
88 | - */ | |
89 | -int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, | |
90 | - int do_free, int qos); | |
91 | - | |
92 | -/** | |
93 | - * Transmit a work queue entry out of the ethernet port. Both | |
94 | - * the work queue entry and the packet data can optionally be | |
95 | - * freed. The work will be freed on error as well. This simply | |
96 | - * wraps cvmx_oct_transmit_qos() for backwards compatability. | |
97 | - * | |
98 | - * @dev: Device to transmit out. | |
99 | - * @work_queue_entry: | |
100 | - * Work queue entry to send | |
101 | - * @do_free: True if the work queue entry and packet data should be | |
102 | - * freed. If false, neither will be freed. | |
103 | - * | |
104 | - * Returns Zero on success, negative on failure. | |
105 | - */ | |
106 | -static inline int cvm_oct_transmit(struct net_device *dev, | |
107 | - void *work_queue_entry, int do_free) | |
108 | -{ | |
109 | - return cvm_oct_transmit_qos(dev, work_queue_entry, do_free, 0); | |
110 | -} | |
111 | - | |
112 | 71 | extern int cvm_oct_rgmii_init(struct net_device *dev); |
113 | 72 | extern void cvm_oct_rgmii_uninit(struct net_device *dev); |
114 | 73 | extern int cvm_oct_rgmii_open(struct net_device *dev); |