Commit a4be637b34a543af5b5421a8ee0ee04d30f5f58e
Committed by
Ralf Baechle
1 parent
2f19d080fb
Exists in
master
and in
6 other branches
staging/octeon: Software should check the checksum of no tcp/udp packets
Icmp packets with wrong checksum are never dropped since skb->ip_summed is set to CHECKSUM_UNNECESSARY. When icmp packets with wrong checksum pass through the octeon net driver, the not_IP, IP_exc, L4_error hardware indicators show no error. so the driver sets CHECKSUM_UNNECESSARY on skb->ip_summed. L4_error only works for TCP/UDP, not for ICMP. Signed-off-by: Roy.Li <rongqing.li@windriver.com> To: linux-mips@linux-mips.org Cc: netdev@vger.kernel.org Cc: ralf@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2798/ Acked-by: David Daney <david.daney@cavium.com> Cc: Greg KH <greg@kroah.com Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Showing 1 changed file with 2 additions and 1 deletions Inline Diff
drivers/staging/octeon/ethernet-rx.c
1 | /********************************************************************** | 1 | /********************************************************************** |
2 | * Author: Cavium Networks | 2 | * Author: Cavium Networks |
3 | * | 3 | * |
4 | * Contact: support@caviumnetworks.com | 4 | * Contact: support@caviumnetworks.com |
5 | * This file is part of the OCTEON SDK | 5 | * This file is part of the OCTEON SDK |
6 | * | 6 | * |
7 | * Copyright (c) 2003-2010 Cavium Networks | 7 | * Copyright (c) 2003-2010 Cavium Networks |
8 | * | 8 | * |
9 | * This file is free software; you can redistribute it and/or modify | 9 | * This file is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License, Version 2, as | 10 | * it under the terms of the GNU General Public License, Version 2, as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | * | 12 | * |
13 | * This file is distributed in the hope that it will be useful, but | 13 | * This file is distributed in the hope that it will be useful, but |
14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty | 14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty |
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or | 15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or |
16 | * NONINFRINGEMENT. See the GNU General Public License for more | 16 | * NONINFRINGEMENT. See the GNU General Public License for more |
17 | * details. | 17 | * details. |
18 | * | 18 | * |
19 | * You should have received a copy of the GNU General Public License | 19 | * You should have received a copy of the GNU General Public License |
20 | * along with this file; if not, write to the Free Software | 20 | * along with this file; if not, write to the Free Software |
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
22 | * or visit http://www.gnu.org/licenses/. | 22 | * or visit http://www.gnu.org/licenses/. |
23 | * | 23 | * |
24 | * This file may also be available under a different license from Cavium. | 24 | * This file may also be available under a different license from Cavium. |
25 | * Contact Cavium Networks for more information | 25 | * Contact Cavium Networks for more information |
26 | **********************************************************************/ | 26 | **********************************************************************/ |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/cache.h> | 29 | #include <linux/cache.h> |
30 | #include <linux/cpumask.h> | 30 | #include <linux/cpumask.h> |
31 | #include <linux/netdevice.h> | 31 | #include <linux/netdevice.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/etherdevice.h> | 33 | #include <linux/etherdevice.h> |
34 | #include <linux/ip.h> | 34 | #include <linux/ip.h> |
35 | #include <linux/string.h> | 35 | #include <linux/string.h> |
36 | #include <linux/prefetch.h> | 36 | #include <linux/prefetch.h> |
37 | #include <linux/ratelimit.h> | 37 | #include <linux/ratelimit.h> |
38 | #include <linux/smp.h> | 38 | #include <linux/smp.h> |
39 | #include <net/dst.h> | 39 | #include <net/dst.h> |
40 | #ifdef CONFIG_XFRM | 40 | #ifdef CONFIG_XFRM |
41 | #include <linux/xfrm.h> | 41 | #include <linux/xfrm.h> |
42 | #include <net/xfrm.h> | 42 | #include <net/xfrm.h> |
43 | #endif /* CONFIG_XFRM */ | 43 | #endif /* CONFIG_XFRM */ |
44 | 44 | ||
45 | #include <linux/atomic.h> | 45 | #include <linux/atomic.h> |
46 | 46 | ||
47 | #include <asm/octeon/octeon.h> | 47 | #include <asm/octeon/octeon.h> |
48 | 48 | ||
49 | #include "ethernet-defines.h" | 49 | #include "ethernet-defines.h" |
50 | #include "ethernet-mem.h" | 50 | #include "ethernet-mem.h" |
51 | #include "ethernet-rx.h" | 51 | #include "ethernet-rx.h" |
52 | #include "octeon-ethernet.h" | 52 | #include "octeon-ethernet.h" |
53 | #include "ethernet-util.h" | 53 | #include "ethernet-util.h" |
54 | 54 | ||
55 | #include "cvmx-helper.h" | 55 | #include "cvmx-helper.h" |
56 | #include "cvmx-wqe.h" | 56 | #include "cvmx-wqe.h" |
57 | #include "cvmx-fau.h" | 57 | #include "cvmx-fau.h" |
58 | #include "cvmx-pow.h" | 58 | #include "cvmx-pow.h" |
59 | #include "cvmx-pip.h" | 59 | #include "cvmx-pip.h" |
60 | #include "cvmx-scratch.h" | 60 | #include "cvmx-scratch.h" |
61 | 61 | ||
62 | #include "cvmx-gmxx-defs.h" | 62 | #include "cvmx-gmxx-defs.h" |
63 | 63 | ||
64 | struct cvm_napi_wrapper { | 64 | struct cvm_napi_wrapper { |
65 | struct napi_struct napi; | 65 | struct napi_struct napi; |
66 | } ____cacheline_aligned_in_smp; | 66 | } ____cacheline_aligned_in_smp; |
67 | 67 | ||
68 | static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp; | 68 | static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp; |
69 | 69 | ||
70 | struct cvm_oct_core_state { | 70 | struct cvm_oct_core_state { |
71 | int baseline_cores; | 71 | int baseline_cores; |
72 | /* | 72 | /* |
73 | * The number of additional cores that could be processing | 73 | * The number of additional cores that could be processing |
74 | * input packtes. | 74 | * input packtes. |
75 | */ | 75 | */ |
76 | atomic_t available_cores; | 76 | atomic_t available_cores; |
77 | cpumask_t cpu_state; | 77 | cpumask_t cpu_state; |
78 | } ____cacheline_aligned_in_smp; | 78 | } ____cacheline_aligned_in_smp; |
79 | 79 | ||
80 | static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp; | 80 | static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp; |
81 | 81 | ||
82 | static void cvm_oct_enable_napi(void *_) | 82 | static void cvm_oct_enable_napi(void *_) |
83 | { | 83 | { |
84 | int cpu = smp_processor_id(); | 84 | int cpu = smp_processor_id(); |
85 | napi_schedule(&cvm_oct_napi[cpu].napi); | 85 | napi_schedule(&cvm_oct_napi[cpu].napi); |
86 | } | 86 | } |
87 | 87 | ||
88 | static void cvm_oct_enable_one_cpu(void) | 88 | static void cvm_oct_enable_one_cpu(void) |
89 | { | 89 | { |
90 | int v; | 90 | int v; |
91 | int cpu; | 91 | int cpu; |
92 | 92 | ||
93 | /* Check to see if more CPUs are available for receive processing... */ | 93 | /* Check to see if more CPUs are available for receive processing... */ |
94 | v = atomic_sub_if_positive(1, &core_state.available_cores); | 94 | v = atomic_sub_if_positive(1, &core_state.available_cores); |
95 | if (v < 0) | 95 | if (v < 0) |
96 | return; | 96 | return; |
97 | 97 | ||
98 | /* ... if a CPU is available, Turn on NAPI polling for that CPU. */ | 98 | /* ... if a CPU is available, Turn on NAPI polling for that CPU. */ |
99 | for_each_online_cpu(cpu) { | 99 | for_each_online_cpu(cpu) { |
100 | if (!cpu_test_and_set(cpu, core_state.cpu_state)) { | 100 | if (!cpu_test_and_set(cpu, core_state.cpu_state)) { |
101 | v = smp_call_function_single(cpu, cvm_oct_enable_napi, | 101 | v = smp_call_function_single(cpu, cvm_oct_enable_napi, |
102 | NULL, 0); | 102 | NULL, 0); |
103 | if (v) | 103 | if (v) |
104 | panic("Can't enable NAPI."); | 104 | panic("Can't enable NAPI."); |
105 | break; | 105 | break; |
106 | } | 106 | } |
107 | } | 107 | } |
108 | } | 108 | } |
109 | 109 | ||
110 | static void cvm_oct_no_more_work(void) | 110 | static void cvm_oct_no_more_work(void) |
111 | { | 111 | { |
112 | int cpu = smp_processor_id(); | 112 | int cpu = smp_processor_id(); |
113 | 113 | ||
114 | /* | 114 | /* |
115 | * CPU zero is special. It always has the irq enabled when | 115 | * CPU zero is special. It always has the irq enabled when |
116 | * waiting for incoming packets. | 116 | * waiting for incoming packets. |
117 | */ | 117 | */ |
118 | if (cpu == 0) { | 118 | if (cpu == 0) { |
119 | enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group); | 119 | enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group); |
120 | return; | 120 | return; |
121 | } | 121 | } |
122 | 122 | ||
123 | cpu_clear(cpu, core_state.cpu_state); | 123 | cpu_clear(cpu, core_state.cpu_state); |
124 | atomic_add(1, &core_state.available_cores); | 124 | atomic_add(1, &core_state.available_cores); |
125 | } | 125 | } |
126 | 126 | ||
127 | /** | 127 | /** |
128 | * cvm_oct_do_interrupt - interrupt handler. | 128 | * cvm_oct_do_interrupt - interrupt handler. |
129 | * | 129 | * |
130 | * The interrupt occurs whenever the POW has packets in our group. | 130 | * The interrupt occurs whenever the POW has packets in our group. |
131 | * | 131 | * |
132 | */ | 132 | */ |
133 | static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) | 133 | static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) |
134 | { | 134 | { |
135 | /* Disable the IRQ and start napi_poll. */ | 135 | /* Disable the IRQ and start napi_poll. */ |
136 | disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); | 136 | disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); |
137 | cvm_oct_enable_napi(NULL); | 137 | cvm_oct_enable_napi(NULL); |
138 | 138 | ||
139 | return IRQ_HANDLED; | 139 | return IRQ_HANDLED; |
140 | } | 140 | } |
141 | 141 | ||
142 | /** | 142 | /** |
143 | * cvm_oct_check_rcv_error - process receive errors | 143 | * cvm_oct_check_rcv_error - process receive errors |
144 | * @work: Work queue entry pointing to the packet. | 144 | * @work: Work queue entry pointing to the packet. |
145 | * | 145 | * |
146 | * Returns Non-zero if the packet can be dropped, zero otherwise. | 146 | * Returns Non-zero if the packet can be dropped, zero otherwise. |
147 | */ | 147 | */ |
148 | static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) | 148 | static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) |
149 | { | 149 | { |
150 | if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) { | 150 | if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) { |
151 | /* | 151 | /* |
152 | * Ignore length errors on min size packets. Some | 152 | * Ignore length errors on min size packets. Some |
153 | * equipment incorrectly pads packets to 64+4FCS | 153 | * equipment incorrectly pads packets to 64+4FCS |
154 | * instead of 60+4FCS. Note these packets still get | 154 | * instead of 60+4FCS. Note these packets still get |
155 | * counted as frame errors. | 155 | * counted as frame errors. |
156 | */ | 156 | */ |
157 | } else | 157 | } else |
158 | if (USE_10MBPS_PREAMBLE_WORKAROUND | 158 | if (USE_10MBPS_PREAMBLE_WORKAROUND |
159 | && ((work->word2.snoip.err_code == 5) | 159 | && ((work->word2.snoip.err_code == 5) |
160 | || (work->word2.snoip.err_code == 7))) { | 160 | || (work->word2.snoip.err_code == 7))) { |
161 | 161 | ||
162 | /* | 162 | /* |
163 | * We received a packet with either an alignment error | 163 | * We received a packet with either an alignment error |
164 | * or a FCS error. This may be signalling that we are | 164 | * or a FCS error. This may be signalling that we are |
165 | * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK} | 165 | * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK} |
166 | * off. If this is the case we need to parse the | 166 | * off. If this is the case we need to parse the |
167 | * packet to determine if we can remove a non spec | 167 | * packet to determine if we can remove a non spec |
168 | * preamble and generate a correct packet. | 168 | * preamble and generate a correct packet. |
169 | */ | 169 | */ |
170 | int interface = cvmx_helper_get_interface_num(work->ipprt); | 170 | int interface = cvmx_helper_get_interface_num(work->ipprt); |
171 | int index = cvmx_helper_get_interface_index_num(work->ipprt); | 171 | int index = cvmx_helper_get_interface_index_num(work->ipprt); |
172 | union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; | 172 | union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; |
173 | gmxx_rxx_frm_ctl.u64 = | 173 | gmxx_rxx_frm_ctl.u64 = |
174 | cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); | 174 | cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); |
175 | if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { | 175 | if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { |
176 | 176 | ||
177 | uint8_t *ptr = | 177 | uint8_t *ptr = |
178 | cvmx_phys_to_ptr(work->packet_ptr.s.addr); | 178 | cvmx_phys_to_ptr(work->packet_ptr.s.addr); |
179 | int i = 0; | 179 | int i = 0; |
180 | 180 | ||
181 | while (i < work->len - 1) { | 181 | while (i < work->len - 1) { |
182 | if (*ptr != 0x55) | 182 | if (*ptr != 0x55) |
183 | break; | 183 | break; |
184 | ptr++; | 184 | ptr++; |
185 | i++; | 185 | i++; |
186 | } | 186 | } |
187 | 187 | ||
188 | if (*ptr == 0xd5) { | 188 | if (*ptr == 0xd5) { |
189 | /* | 189 | /* |
190 | printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt); | 190 | printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt); |
191 | */ | 191 | */ |
192 | work->packet_ptr.s.addr += i + 1; | 192 | work->packet_ptr.s.addr += i + 1; |
193 | work->len -= i + 5; | 193 | work->len -= i + 5; |
194 | } else if ((*ptr & 0xf) == 0xd) { | 194 | } else if ((*ptr & 0xf) == 0xd) { |
195 | /* | 195 | /* |
196 | printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt); | 196 | printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt); |
197 | */ | 197 | */ |
198 | work->packet_ptr.s.addr += i; | 198 | work->packet_ptr.s.addr += i; |
199 | work->len -= i + 4; | 199 | work->len -= i + 4; |
200 | for (i = 0; i < work->len; i++) { | 200 | for (i = 0; i < work->len; i++) { |
201 | *ptr = | 201 | *ptr = |
202 | ((*ptr & 0xf0) >> 4) | | 202 | ((*ptr & 0xf0) >> 4) | |
203 | ((*(ptr + 1) & 0xf) << 4); | 203 | ((*(ptr + 1) & 0xf) << 4); |
204 | ptr++; | 204 | ptr++; |
205 | } | 205 | } |
206 | } else { | 206 | } else { |
207 | printk_ratelimited("Port %d unknown preamble, packet " | 207 | printk_ratelimited("Port %d unknown preamble, packet " |
208 | "dropped\n", | 208 | "dropped\n", |
209 | work->ipprt); | 209 | work->ipprt); |
210 | /* | 210 | /* |
211 | cvmx_helper_dump_packet(work); | 211 | cvmx_helper_dump_packet(work); |
212 | */ | 212 | */ |
213 | cvm_oct_free_work(work); | 213 | cvm_oct_free_work(work); |
214 | return 1; | 214 | return 1; |
215 | } | 215 | } |
216 | } | 216 | } |
217 | } else { | 217 | } else { |
218 | printk_ratelimited("Port %d receive error code %d, packet dropped\n", | 218 | printk_ratelimited("Port %d receive error code %d, packet dropped\n", |
219 | work->ipprt, work->word2.snoip.err_code); | 219 | work->ipprt, work->word2.snoip.err_code); |
220 | cvm_oct_free_work(work); | 220 | cvm_oct_free_work(work); |
221 | return 1; | 221 | return 1; |
222 | } | 222 | } |
223 | 223 | ||
224 | return 0; | 224 | return 0; |
225 | } | 225 | } |
226 | 226 | ||
227 | /** | 227 | /** |
228 | * cvm_oct_napi_poll - the NAPI poll function. | 228 | * cvm_oct_napi_poll - the NAPI poll function. |
229 | * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller | 229 | * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller |
230 | * @budget: Maximum number of packets to receive. | 230 | * @budget: Maximum number of packets to receive. |
231 | * | 231 | * |
232 | * Returns the number of packets processed. | 232 | * Returns the number of packets processed. |
233 | */ | 233 | */ |
234 | static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) | 234 | static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) |
235 | { | 235 | { |
236 | const int coreid = cvmx_get_core_num(); | 236 | const int coreid = cvmx_get_core_num(); |
237 | uint64_t old_group_mask; | 237 | uint64_t old_group_mask; |
238 | uint64_t old_scratch; | 238 | uint64_t old_scratch; |
239 | int rx_count = 0; | 239 | int rx_count = 0; |
240 | int did_work_request = 0; | 240 | int did_work_request = 0; |
241 | int packet_not_copied; | 241 | int packet_not_copied; |
242 | 242 | ||
243 | /* Prefetch cvm_oct_device since we know we need it soon */ | 243 | /* Prefetch cvm_oct_device since we know we need it soon */ |
244 | prefetch(cvm_oct_device); | 244 | prefetch(cvm_oct_device); |
245 | 245 | ||
246 | if (USE_ASYNC_IOBDMA) { | 246 | if (USE_ASYNC_IOBDMA) { |
247 | /* Save scratch in case userspace is using it */ | 247 | /* Save scratch in case userspace is using it */ |
248 | CVMX_SYNCIOBDMA; | 248 | CVMX_SYNCIOBDMA; |
249 | old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); | 249 | old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); |
250 | } | 250 | } |
251 | 251 | ||
252 | /* Only allow work for our group (and preserve priorities) */ | 252 | /* Only allow work for our group (and preserve priorities) */ |
253 | old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); | 253 | old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); |
254 | cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), | 254 | cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), |
255 | (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); | 255 | (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); |
256 | 256 | ||
257 | if (USE_ASYNC_IOBDMA) { | 257 | if (USE_ASYNC_IOBDMA) { |
258 | cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); | 258 | cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); |
259 | did_work_request = 1; | 259 | did_work_request = 1; |
260 | } | 260 | } |
261 | 261 | ||
262 | while (rx_count < budget) { | 262 | while (rx_count < budget) { |
263 | struct sk_buff *skb = NULL; | 263 | struct sk_buff *skb = NULL; |
264 | struct sk_buff **pskb = NULL; | 264 | struct sk_buff **pskb = NULL; |
265 | int skb_in_hw; | 265 | int skb_in_hw; |
266 | cvmx_wqe_t *work; | 266 | cvmx_wqe_t *work; |
267 | 267 | ||
268 | if (USE_ASYNC_IOBDMA && did_work_request) | 268 | if (USE_ASYNC_IOBDMA && did_work_request) |
269 | work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); | 269 | work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); |
270 | else | 270 | else |
271 | work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); | 271 | work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); |
272 | 272 | ||
273 | prefetch(work); | 273 | prefetch(work); |
274 | did_work_request = 0; | 274 | did_work_request = 0; |
275 | if (work == NULL) { | 275 | if (work == NULL) { |
276 | union cvmx_pow_wq_int wq_int; | 276 | union cvmx_pow_wq_int wq_int; |
277 | wq_int.u64 = 0; | 277 | wq_int.u64 = 0; |
278 | wq_int.s.iq_dis = 1 << pow_receive_group; | 278 | wq_int.s.iq_dis = 1 << pow_receive_group; |
279 | wq_int.s.wq_int = 1 << pow_receive_group; | 279 | wq_int.s.wq_int = 1 << pow_receive_group; |
280 | cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64); | 280 | cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64); |
281 | break; | 281 | break; |
282 | } | 282 | } |
283 | pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *)); | 283 | pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *)); |
284 | prefetch(pskb); | 284 | prefetch(pskb); |
285 | 285 | ||
286 | if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) { | 286 | if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) { |
287 | cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); | 287 | cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); |
288 | did_work_request = 1; | 288 | did_work_request = 1; |
289 | } | 289 | } |
290 | 290 | ||
291 | if (rx_count == 0) { | 291 | if (rx_count == 0) { |
292 | /* | 292 | /* |
293 | * First time through, see if there is enough | 293 | * First time through, see if there is enough |
294 | * work waiting to merit waking another | 294 | * work waiting to merit waking another |
295 | * CPU. | 295 | * CPU. |
296 | */ | 296 | */ |
297 | union cvmx_pow_wq_int_cntx counts; | 297 | union cvmx_pow_wq_int_cntx counts; |
298 | int backlog; | 298 | int backlog; |
299 | int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores); | 299 | int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores); |
300 | counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group)); | 300 | counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group)); |
301 | backlog = counts.s.iq_cnt + counts.s.ds_cnt; | 301 | backlog = counts.s.iq_cnt + counts.s.ds_cnt; |
302 | if (backlog > budget * cores_in_use && napi != NULL) | 302 | if (backlog > budget * cores_in_use && napi != NULL) |
303 | cvm_oct_enable_one_cpu(); | 303 | cvm_oct_enable_one_cpu(); |
304 | } | 304 | } |
305 | 305 | ||
306 | skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; | 306 | skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; |
307 | if (likely(skb_in_hw)) { | 307 | if (likely(skb_in_hw)) { |
308 | skb = *pskb; | 308 | skb = *pskb; |
309 | prefetch(&skb->head); | 309 | prefetch(&skb->head); |
310 | prefetch(&skb->len); | 310 | prefetch(&skb->len); |
311 | } | 311 | } |
312 | prefetch(cvm_oct_device[work->ipprt]); | 312 | prefetch(cvm_oct_device[work->ipprt]); |
313 | 313 | ||
314 | /* Immediately throw away all packets with receive errors */ | 314 | /* Immediately throw away all packets with receive errors */ |
315 | if (unlikely(work->word2.snoip.rcv_error)) { | 315 | if (unlikely(work->word2.snoip.rcv_error)) { |
316 | if (cvm_oct_check_rcv_error(work)) | 316 | if (cvm_oct_check_rcv_error(work)) |
317 | continue; | 317 | continue; |
318 | } | 318 | } |
319 | 319 | ||
320 | /* | 320 | /* |
321 | * We can only use the zero copy path if skbuffs are | 321 | * We can only use the zero copy path if skbuffs are |
322 | * in the FPA pool and the packet fits in a single | 322 | * in the FPA pool and the packet fits in a single |
323 | * buffer. | 323 | * buffer. |
324 | */ | 324 | */ |
325 | if (likely(skb_in_hw)) { | 325 | if (likely(skb_in_hw)) { |
326 | skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head); | 326 | skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head); |
327 | prefetch(skb->data); | 327 | prefetch(skb->data); |
328 | skb->len = work->len; | 328 | skb->len = work->len; |
329 | skb_set_tail_pointer(skb, skb->len); | 329 | skb_set_tail_pointer(skb, skb->len); |
330 | packet_not_copied = 1; | 330 | packet_not_copied = 1; |
331 | } else { | 331 | } else { |
332 | /* | 332 | /* |
333 | * We have to copy the packet. First allocate | 333 | * We have to copy the packet. First allocate |
334 | * an skbuff for it. | 334 | * an skbuff for it. |
335 | */ | 335 | */ |
336 | skb = dev_alloc_skb(work->len); | 336 | skb = dev_alloc_skb(work->len); |
337 | if (!skb) { | 337 | if (!skb) { |
338 | printk_ratelimited("Port %d failed to allocate " | 338 | printk_ratelimited("Port %d failed to allocate " |
339 | "skbuff, packet dropped\n", | 339 | "skbuff, packet dropped\n", |
340 | work->ipprt); | 340 | work->ipprt); |
341 | cvm_oct_free_work(work); | 341 | cvm_oct_free_work(work); |
342 | continue; | 342 | continue; |
343 | } | 343 | } |
344 | 344 | ||
345 | /* | 345 | /* |
346 | * Check if we've received a packet that was | 346 | * Check if we've received a packet that was |
347 | * entirely stored in the work entry. | 347 | * entirely stored in the work entry. |
348 | */ | 348 | */ |
349 | if (unlikely(work->word2.s.bufs == 0)) { | 349 | if (unlikely(work->word2.s.bufs == 0)) { |
350 | uint8_t *ptr = work->packet_data; | 350 | uint8_t *ptr = work->packet_data; |
351 | 351 | ||
352 | if (likely(!work->word2.s.not_IP)) { | 352 | if (likely(!work->word2.s.not_IP)) { |
353 | /* | 353 | /* |
354 | * The beginning of the packet | 354 | * The beginning of the packet |
355 | * moves for IP packets. | 355 | * moves for IP packets. |
356 | */ | 356 | */ |
357 | if (work->word2.s.is_v6) | 357 | if (work->word2.s.is_v6) |
358 | ptr += 2; | 358 | ptr += 2; |
359 | else | 359 | else |
360 | ptr += 6; | 360 | ptr += 6; |
361 | } | 361 | } |
362 | memcpy(skb_put(skb, work->len), ptr, work->len); | 362 | memcpy(skb_put(skb, work->len), ptr, work->len); |
363 | /* No packet buffers to free */ | 363 | /* No packet buffers to free */ |
364 | } else { | 364 | } else { |
365 | int segments = work->word2.s.bufs; | 365 | int segments = work->word2.s.bufs; |
366 | union cvmx_buf_ptr segment_ptr = work->packet_ptr; | 366 | union cvmx_buf_ptr segment_ptr = work->packet_ptr; |
367 | int len = work->len; | 367 | int len = work->len; |
368 | 368 | ||
369 | while (segments--) { | 369 | while (segments--) { |
370 | union cvmx_buf_ptr next_ptr = | 370 | union cvmx_buf_ptr next_ptr = |
371 | *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8); | 371 | *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8); |
372 | 372 | ||
373 | /* | 373 | /* |
374 | * Octeon Errata PKI-100: The segment size is | 374 | * Octeon Errata PKI-100: The segment size is |
375 | * wrong. Until it is fixed, calculate the | 375 | * wrong. Until it is fixed, calculate the |
376 | * segment size based on the packet pool | 376 | * segment size based on the packet pool |
377 | * buffer size. When it is fixed, the | 377 | * buffer size. When it is fixed, the |
378 | * following line should be replaced with this | 378 | * following line should be replaced with this |
379 | * one: int segment_size = | 379 | * one: int segment_size = |
380 | * segment_ptr.s.size; | 380 | * segment_ptr.s.size; |
381 | */ | 381 | */ |
382 | int segment_size = CVMX_FPA_PACKET_POOL_SIZE - | 382 | int segment_size = CVMX_FPA_PACKET_POOL_SIZE - |
383 | (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7)); | 383 | (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7)); |
384 | /* | 384 | /* |
385 | * Don't copy more than what | 385 | * Don't copy more than what |
386 | * is left in the packet. | 386 | * is left in the packet. |
387 | */ | 387 | */ |
388 | if (segment_size > len) | 388 | if (segment_size > len) |
389 | segment_size = len; | 389 | segment_size = len; |
390 | /* Copy the data into the packet */ | 390 | /* Copy the data into the packet */ |
391 | memcpy(skb_put(skb, segment_size), | 391 | memcpy(skb_put(skb, segment_size), |
392 | cvmx_phys_to_ptr(segment_ptr.s.addr), | 392 | cvmx_phys_to_ptr(segment_ptr.s.addr), |
393 | segment_size); | 393 | segment_size); |
394 | len -= segment_size; | 394 | len -= segment_size; |
395 | segment_ptr = next_ptr; | 395 | segment_ptr = next_ptr; |
396 | } | 396 | } |
397 | } | 397 | } |
398 | packet_not_copied = 0; | 398 | packet_not_copied = 0; |
399 | } | 399 | } |
400 | 400 | ||
401 | if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) && | 401 | if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) && |
402 | cvm_oct_device[work->ipprt])) { | 402 | cvm_oct_device[work->ipprt])) { |
403 | struct net_device *dev = cvm_oct_device[work->ipprt]; | 403 | struct net_device *dev = cvm_oct_device[work->ipprt]; |
404 | struct octeon_ethernet *priv = netdev_priv(dev); | 404 | struct octeon_ethernet *priv = netdev_priv(dev); |
405 | 405 | ||
406 | /* | 406 | /* |
407 | * Only accept packets for devices that are | 407 | * Only accept packets for devices that are |
408 | * currently up. | 408 | * currently up. |
409 | */ | 409 | */ |
410 | if (likely(dev->flags & IFF_UP)) { | 410 | if (likely(dev->flags & IFF_UP)) { |
411 | skb->protocol = eth_type_trans(skb, dev); | 411 | skb->protocol = eth_type_trans(skb, dev); |
412 | skb->dev = dev; | 412 | skb->dev = dev; |
413 | 413 | ||
414 | if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error)) | 414 | if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || |
415 | work->word2.s.L4_error || !work->word2.s.tcp_or_udp)) | ||
415 | skb->ip_summed = CHECKSUM_NONE; | 416 | skb->ip_summed = CHECKSUM_NONE; |
416 | else | 417 | else |
417 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 418 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
418 | 419 | ||
419 | /* Increment RX stats for virtual ports */ | 420 | /* Increment RX stats for virtual ports */ |
420 | if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { | 421 | if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { |
421 | #ifdef CONFIG_64BIT | 422 | #ifdef CONFIG_64BIT |
422 | atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); | 423 | atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); |
423 | atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); | 424 | atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); |
424 | #else | 425 | #else |
425 | atomic_add(1, (atomic_t *)&priv->stats.rx_packets); | 426 | atomic_add(1, (atomic_t *)&priv->stats.rx_packets); |
426 | atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); | 427 | atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); |
427 | #endif | 428 | #endif |
428 | } | 429 | } |
429 | netif_receive_skb(skb); | 430 | netif_receive_skb(skb); |
430 | rx_count++; | 431 | rx_count++; |
431 | } else { | 432 | } else { |
432 | /* Drop any packet received for a device that isn't up */ | 433 | /* Drop any packet received for a device that isn't up */ |
433 | /* | 434 | /* |
434 | printk_ratelimited("%s: Device not up, packet dropped\n", | 435 | printk_ratelimited("%s: Device not up, packet dropped\n", |
435 | dev->name); | 436 | dev->name); |
436 | */ | 437 | */ |
437 | #ifdef CONFIG_64BIT | 438 | #ifdef CONFIG_64BIT |
438 | atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); | 439 | atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); |
439 | #else | 440 | #else |
440 | atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); | 441 | atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); |
441 | #endif | 442 | #endif |
442 | dev_kfree_skb_irq(skb); | 443 | dev_kfree_skb_irq(skb); |
443 | } | 444 | } |
444 | } else { | 445 | } else { |
445 | /* | 446 | /* |
446 | * Drop any packet received for a device that | 447 | * Drop any packet received for a device that |
447 | * doesn't exist. | 448 | * doesn't exist. |
448 | */ | 449 | */ |
449 | printk_ratelimited("Port %d not controlled by Linux, packet dropped\n", | 450 | printk_ratelimited("Port %d not controlled by Linux, packet dropped\n", |
450 | work->ipprt); | 451 | work->ipprt); |
451 | dev_kfree_skb_irq(skb); | 452 | dev_kfree_skb_irq(skb); |
452 | } | 453 | } |
453 | /* | 454 | /* |
454 | * Check to see if the skbuff and work share the same | 455 | * Check to see if the skbuff and work share the same |
455 | * packet buffer. | 456 | * packet buffer. |
456 | */ | 457 | */ |
457 | if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) { | 458 | if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) { |
458 | /* | 459 | /* |
459 | * This buffer needs to be replaced, increment | 460 | * This buffer needs to be replaced, increment |
460 | * the number of buffers we need to free by | 461 | * the number of buffers we need to free by |
461 | * one. | 462 | * one. |
462 | */ | 463 | */ |
463 | cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, | 464 | cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, |
464 | 1); | 465 | 1); |
465 | 466 | ||
466 | cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, | 467 | cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, |
467 | DONT_WRITEBACK(1)); | 468 | DONT_WRITEBACK(1)); |
468 | } else { | 469 | } else { |
469 | cvm_oct_free_work(work); | 470 | cvm_oct_free_work(work); |
470 | } | 471 | } |
471 | } | 472 | } |
472 | /* Restore the original POW group mask */ | 473 | /* Restore the original POW group mask */ |
473 | cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); | 474 | cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); |
474 | if (USE_ASYNC_IOBDMA) { | 475 | if (USE_ASYNC_IOBDMA) { |
475 | /* Restore the scratch area */ | 476 | /* Restore the scratch area */ |
476 | cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); | 477 | cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); |
477 | } | 478 | } |
478 | cvm_oct_rx_refill_pool(0); | 479 | cvm_oct_rx_refill_pool(0); |
479 | 480 | ||
480 | if (rx_count < budget && napi != NULL) { | 481 | if (rx_count < budget && napi != NULL) { |
481 | /* No more work */ | 482 | /* No more work */ |
482 | napi_complete(napi); | 483 | napi_complete(napi); |
483 | cvm_oct_no_more_work(); | 484 | cvm_oct_no_more_work(); |
484 | } | 485 | } |
485 | return rx_count; | 486 | return rx_count; |
486 | } | 487 | } |
487 | 488 | ||
488 | #ifdef CONFIG_NET_POLL_CONTROLLER | 489 | #ifdef CONFIG_NET_POLL_CONTROLLER |
489 | /** | 490 | /** |
490 | * cvm_oct_poll_controller - poll for receive packets | 491 | * cvm_oct_poll_controller - poll for receive packets |
491 | * device. | 492 | * device. |
492 | * | 493 | * |
493 | * @dev: Device to poll. Unused | 494 | * @dev: Device to poll. Unused |
494 | */ | 495 | */ |
495 | void cvm_oct_poll_controller(struct net_device *dev) | 496 | void cvm_oct_poll_controller(struct net_device *dev) |
496 | { | 497 | { |
497 | cvm_oct_napi_poll(NULL, 16); | 498 | cvm_oct_napi_poll(NULL, 16); |
498 | } | 499 | } |
499 | #endif | 500 | #endif |
500 | 501 | ||
501 | void cvm_oct_rx_initialize(void) | 502 | void cvm_oct_rx_initialize(void) |
502 | { | 503 | { |
503 | int i; | 504 | int i; |
504 | struct net_device *dev_for_napi = NULL; | 505 | struct net_device *dev_for_napi = NULL; |
505 | union cvmx_pow_wq_int_thrx int_thr; | 506 | union cvmx_pow_wq_int_thrx int_thr; |
506 | union cvmx_pow_wq_int_pc int_pc; | 507 | union cvmx_pow_wq_int_pc int_pc; |
507 | 508 | ||
508 | for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) { | 509 | for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) { |
509 | if (cvm_oct_device[i]) { | 510 | if (cvm_oct_device[i]) { |
510 | dev_for_napi = cvm_oct_device[i]; | 511 | dev_for_napi = cvm_oct_device[i]; |
511 | break; | 512 | break; |
512 | } | 513 | } |
513 | } | 514 | } |
514 | 515 | ||
515 | if (NULL == dev_for_napi) | 516 | if (NULL == dev_for_napi) |
516 | panic("No net_devices were allocated."); | 517 | panic("No net_devices were allocated."); |
517 | 518 | ||
518 | if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus()) | 519 | if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus()) |
519 | atomic_set(&core_state.available_cores, max_rx_cpus); | 520 | atomic_set(&core_state.available_cores, max_rx_cpus); |
520 | else | 521 | else |
521 | atomic_set(&core_state.available_cores, num_online_cpus()); | 522 | atomic_set(&core_state.available_cores, num_online_cpus()); |
522 | core_state.baseline_cores = atomic_read(&core_state.available_cores); | 523 | core_state.baseline_cores = atomic_read(&core_state.available_cores); |
523 | 524 | ||
524 | core_state.cpu_state = CPU_MASK_NONE; | 525 | core_state.cpu_state = CPU_MASK_NONE; |
525 | for_each_possible_cpu(i) { | 526 | for_each_possible_cpu(i) { |
526 | netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi, | 527 | netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi, |
527 | cvm_oct_napi_poll, rx_napi_weight); | 528 | cvm_oct_napi_poll, rx_napi_weight); |
528 | napi_enable(&cvm_oct_napi[i].napi); | 529 | napi_enable(&cvm_oct_napi[i].napi); |
529 | } | 530 | } |
530 | /* Register an IRQ hander for to receive POW interrupts */ | 531 | /* Register an IRQ hander for to receive POW interrupts */ |
531 | i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, | 532 | i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, |
532 | cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device); | 533 | cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device); |
533 | 534 | ||
534 | if (i) | 535 | if (i) |
535 | panic("Could not acquire Ethernet IRQ %d\n", | 536 | panic("Could not acquire Ethernet IRQ %d\n", |
536 | OCTEON_IRQ_WORKQ0 + pow_receive_group); | 537 | OCTEON_IRQ_WORKQ0 + pow_receive_group); |
537 | 538 | ||
538 | disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); | 539 | disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); |
539 | 540 | ||
540 | int_thr.u64 = 0; | 541 | int_thr.u64 = 0; |
541 | int_thr.s.tc_en = 1; | 542 | int_thr.s.tc_en = 1; |
542 | int_thr.s.tc_thr = 1; | 543 | int_thr.s.tc_thr = 1; |
543 | /* Enable POW interrupt when our port has at least one packet */ | 544 | /* Enable POW interrupt when our port has at least one packet */ |
544 | cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64); | 545 | cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64); |
545 | 546 | ||
546 | int_pc.u64 = 0; | 547 | int_pc.u64 = 0; |
547 | int_pc.s.pc_thr = 5; | 548 | int_pc.s.pc_thr = 5; |
548 | cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); | 549 | cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); |
549 | 550 | ||
550 | 551 | ||
551 | /* Scheduld NAPI now. This will indirectly enable interrupts. */ | 552 | /* Scheduld NAPI now. This will indirectly enable interrupts. */ |
552 | cvm_oct_enable_one_cpu(); | 553 | cvm_oct_enable_one_cpu(); |
553 | } | 554 | } |
554 | 555 | ||
555 | void cvm_oct_rx_shutdown(void) | 556 | void cvm_oct_rx_shutdown(void) |
556 | { | 557 | { |
557 | int i; | 558 | int i; |
558 | /* Shutdown all of the NAPIs */ | 559 | /* Shutdown all of the NAPIs */ |
559 | for_each_possible_cpu(i) | 560 | for_each_possible_cpu(i) |
560 | netif_napi_del(&cvm_oct_napi[i].napi); | 561 | netif_napi_del(&cvm_oct_napi[i].napi); |
561 | } | 562 | } |
562 | 563 |