Commit e3d62d7e8e05a6a4b08f4672385ae58fc0f132c4
Committed by
David S. Miller
1 parent
b7abee6ef8
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
tilegx network driver: initial support
This change adds support for the tilegx network driver based on the GXIO IORPC support in the tilegx software stack, using the on-chip mPIPE packet processing engine. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 3 changed files with 1902 additions and 2 deletions Side-by-side Diff
drivers/net/ethernet/tile/Kconfig
drivers/net/ethernet/tile/Makefile
drivers/net/ethernet/tile/tilegx.c
Changes suppressed. Click to show
1 | +/* | |
2 | + * Copyright 2012 Tilera Corporation. All Rights Reserved. | |
3 | + * | |
4 | + * This program is free software; you can redistribute it and/or | |
5 | + * modify it under the terms of the GNU General Public License | |
6 | + * as published by the Free Software Foundation, version 2. | |
7 | + * | |
8 | + * This program is distributed in the hope that it will be useful, but | |
9 | + * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | + * NON INFRINGEMENT. See the GNU General Public License for | |
12 | + * more details. | |
13 | + */ | |
14 | + | |
15 | +#include <linux/module.h> | |
16 | +#include <linux/init.h> | |
17 | +#include <linux/moduleparam.h> | |
18 | +#include <linux/sched.h> | |
19 | +#include <linux/kernel.h> /* printk() */ | |
20 | +#include <linux/slab.h> /* kmalloc() */ | |
21 | +#include <linux/errno.h> /* error codes */ | |
22 | +#include <linux/types.h> /* size_t */ | |
23 | +#include <linux/interrupt.h> | |
24 | +#include <linux/in.h> | |
25 | +#include <linux/irq.h> | |
26 | +#include <linux/netdevice.h> /* struct device, and other headers */ | |
27 | +#include <linux/etherdevice.h> /* eth_type_trans */ | |
28 | +#include <linux/skbuff.h> | |
29 | +#include <linux/ioctl.h> | |
30 | +#include <linux/cdev.h> | |
31 | +#include <linux/hugetlb.h> | |
32 | +#include <linux/in6.h> | |
33 | +#include <linux/timer.h> | |
34 | +#include <linux/hrtimer.h> | |
35 | +#include <linux/ktime.h> | |
36 | +#include <linux/io.h> | |
37 | +#include <linux/ctype.h> | |
38 | +#include <linux/ip.h> | |
39 | +#include <linux/tcp.h> | |
40 | + | |
41 | +#include <asm/checksum.h> | |
42 | +#include <asm/homecache.h> | |
43 | +#include <gxio/mpipe.h> | |
44 | +#include <arch/sim.h> | |
45 | + | |
46 | +/* Default transmit lockup timeout period, in jiffies. */ | |
47 | +#define TILE_NET_TIMEOUT (5 * HZ) | |
48 | + | |
49 | +/* The maximum number of distinct channels (idesc.channel is 5 bits). */ | |
50 | +#define TILE_NET_CHANNELS 32 | |
51 | + | |
52 | +/* Maximum number of idescs to handle per "poll". */ | |
53 | +#define TILE_NET_BATCH 128 | |
54 | + | |
55 | +/* Maximum number of packets to handle per "poll". */ | |
56 | +#define TILE_NET_WEIGHT 64 | |
57 | + | |
58 | +/* Number of entries in each iqueue. */ | |
59 | +#define IQUEUE_ENTRIES 512 | |
60 | + | |
61 | +/* Number of entries in each equeue. */ | |
62 | +#define EQUEUE_ENTRIES 2048 | |
63 | + | |
64 | +/* Total header bytes per equeue slot. Must be big enough for 2 bytes | |
65 | + * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to | |
66 | + * 60 bytes of actual TCP header. We round up to align to cache lines. | |
67 | + */ | |
68 | +#define HEADER_BYTES 128 | |
69 | + | |
70 | +/* Maximum completions per cpu per device (must be a power of two). | |
71 | + * ISSUE: What is the right number here? If this is too small, then | |
72 | + * egress might block waiting for free space in a completions array. | |
73 | + * ISSUE: At the least, allocate these only for initialized echannels. | |
74 | + */ | |
75 | +#define TILE_NET_MAX_COMPS 64 | |
76 | + | |
77 | +#define MAX_FRAGS (MAX_SKB_FRAGS + 1) | |
78 | + | |
79 | +/* Size of completions data to allocate. | |
80 | + * ISSUE: Probably more than needed since we don't use all the channels. | |
81 | + */ | |
82 | +#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps)) | |
83 | + | |
84 | +/* Size of NotifRing data to allocate. */ | |
85 | +#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t)) | |
86 | + | |
87 | +/* Timeout to wake the per-device TX timer after we stop the queue. | |
88 | + * We don't want the timeout too short (adds overhead, and might end | |
89 | + * up causing stop/wake/stop/wake cycles) or too long (affects performance). | |
90 | + * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets. | |
91 | + */ | |
92 | +#define TX_TIMER_DELAY_USEC 30 | |
93 | + | |
94 | +/* Timeout to wake the per-cpu egress timer to free completions. */ | |
95 | +#define EGRESS_TIMER_DELAY_USEC 1000 | |
96 | + | |
97 | +MODULE_AUTHOR("Tilera Corporation"); | |
98 | +MODULE_LICENSE("GPL"); | |
99 | + | |
100 | +/* A "packet fragment" (a chunk of memory). */ | |
101 | +struct frag { | |
102 | + void *buf; | |
103 | + size_t length; | |
104 | +}; | |
105 | + | |
106 | +/* A single completion. */ | |
107 | +struct tile_net_comp { | |
108 | + /* The "complete_count" when the completion will be complete. */ | |
109 | + s64 when; | |
110 | + /* The buffer to be freed when the completion is complete. */ | |
111 | + struct sk_buff *skb; | |
112 | +}; | |
113 | + | |
114 | +/* The completions for a given cpu and echannel. */ | |
115 | +struct tile_net_comps { | |
116 | + /* The completions. */ | |
117 | + struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS]; | |
118 | + /* The number of completions used. */ | |
119 | + unsigned long comp_next; | |
120 | + /* The number of completions freed. */ | |
121 | + unsigned long comp_last; | |
122 | +}; | |
123 | + | |
124 | +/* The transmit wake timer for a given cpu and echannel. */ | |
125 | +struct tile_net_tx_wake { | |
126 | + struct hrtimer timer; | |
127 | + struct net_device *dev; | |
128 | +}; | |
129 | + | |
130 | +/* Info for a specific cpu. */ | |
131 | +struct tile_net_info { | |
132 | + /* The NAPI struct. */ | |
133 | + struct napi_struct napi; | |
134 | + /* Packet queue. */ | |
135 | + gxio_mpipe_iqueue_t iqueue; | |
136 | + /* Our cpu. */ | |
137 | + int my_cpu; | |
138 | + /* True if iqueue is valid. */ | |
139 | + bool has_iqueue; | |
140 | + /* NAPI flags. */ | |
141 | + bool napi_added; | |
142 | + bool napi_enabled; | |
143 | + /* Number of small sk_buffs which must still be provided. */ | |
144 | + unsigned int num_needed_small_buffers; | |
145 | + /* Number of large sk_buffs which must still be provided. */ | |
146 | + unsigned int num_needed_large_buffers; | |
147 | + /* A timer for handling egress completions. */ | |
148 | + struct hrtimer egress_timer; | |
149 | + /* True if "egress_timer" is scheduled. */ | |
150 | + bool egress_timer_scheduled; | |
151 | + /* Comps for each egress channel. */ | |
152 | + struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; | |
153 | + /* Transmit wake timer for each egress channel. */ | |
154 | + struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; | |
155 | +}; | |
156 | + | |
157 | +/* Info for egress on a particular egress channel. */ | |
158 | +struct tile_net_egress { | |
159 | + /* The "equeue". */ | |
160 | + gxio_mpipe_equeue_t *equeue; | |
161 | + /* The headers for TSO. */ | |
162 | + unsigned char *headers; | |
163 | +}; | |
164 | + | |
165 | +/* Info for a specific device. */ | |
166 | +struct tile_net_priv { | |
167 | + /* Our network device. */ | |
168 | + struct net_device *dev; | |
169 | + /* The primary link. */ | |
170 | + gxio_mpipe_link_t link; | |
171 | + /* The primary channel, if open, else -1. */ | |
172 | + int channel; | |
173 | + /* The "loopify" egress link, if needed. */ | |
174 | + gxio_mpipe_link_t loopify_link; | |
175 | + /* The "loopify" egress channel, if open, else -1. */ | |
176 | + int loopify_channel; | |
177 | + /* The egress channel (channel or loopify_channel). */ | |
178 | + int echannel; | |
179 | + /* Total stats. */ | |
180 | + struct net_device_stats stats; | |
181 | +}; | |
182 | + | |
183 | +/* Egress info, indexed by "priv->echannel" (lazily created as needed). */ | |
184 | +static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS]; | |
185 | + | |
186 | +/* Devices currently associated with each channel. | |
187 | + * NOTE: The array entry can become NULL after ifconfig down, but | |
188 | + * we do not free the underlying net_device structures, so it is | |
189 | + * safe to use a pointer after reading it from this array. | |
190 | + */ | |
191 | +static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS]; | |
192 | + | |
193 | +/* A mutex for "tile_net_devs_for_channel". */ | |
194 | +static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); | |
195 | + | |
196 | +/* The per-cpu info. */ | |
197 | +static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); | |
198 | + | |
199 | +/* The "context" for all devices. */ | |
200 | +static gxio_mpipe_context_t context; | |
201 | + | |
202 | +/* Buffer sizes and mpipe enum codes for buffer stacks. | |
203 | + * See arch/tile/include/gxio/mpipe.h for the set of possible values. | |
204 | + */ | |
205 | +#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128 | |
206 | +#define BUFFER_SIZE_SMALL 128 | |
207 | +#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664 | |
208 | +#define BUFFER_SIZE_LARGE 1664 | |
209 | + | |
210 | +/* The small/large "buffer stacks". */ | |
211 | +static int small_buffer_stack = -1; | |
212 | +static int large_buffer_stack = -1; | |
213 | + | |
214 | +/* Amount of memory allocated for each buffer stack. */ | |
215 | +static size_t buffer_stack_size; | |
216 | + | |
217 | +/* The actual memory allocated for the buffer stacks. */ | |
218 | +static void *small_buffer_stack_va; | |
219 | +static void *large_buffer_stack_va; | |
220 | + | |
221 | +/* The buckets. */ | |
222 | +static int first_bucket = -1; | |
223 | +static int num_buckets = 1; | |
224 | + | |
225 | +/* The ingress irq. */ | |
226 | +static int ingress_irq = -1; | |
227 | + | |
228 | +/* Text value of tile_net.cpus if passed as a module parameter. */ | |
229 | +static char *network_cpus_string; | |
230 | + | |
231 | +/* The actual cpus in "network_cpus". */ | |
232 | +static struct cpumask network_cpus_map; | |
233 | + | |
234 | +/* If "loopify=LINK" was specified, this is "LINK". */ | |
235 | +static char *loopify_link_name; | |
236 | + | |
237 | +/* If "tile_net.custom" was specified, this is non-NULL. */ | |
238 | +static char *custom_str; | |
239 | + | |
240 | +/* The "tile_net.cpus" argument specifies the cpus that are dedicated | |
241 | + * to handle ingress packets. | |
242 | + * | |
243 | + * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where | |
244 | + * m, n, x, y are integer numbers that represent the cpus that can be | |
245 | + * neither a dedicated cpu nor a dataplane cpu. | |
246 | + */ | |
247 | +static bool network_cpus_init(void) | |
248 | +{ | |
249 | + char buf[1024]; | |
250 | + int rc; | |
251 | + | |
252 | + if (network_cpus_string == NULL) | |
253 | + return false; | |
254 | + | |
255 | + rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map); | |
256 | + if (rc != 0) { | |
257 | + pr_warn("tile_net.cpus=%s: malformed cpu list\n", | |
258 | + network_cpus_string); | |
259 | + return false; | |
260 | + } | |
261 | + | |
262 | + /* Remove dedicated cpus. */ | |
263 | + cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask); | |
264 | + | |
265 | + if (cpumask_empty(&network_cpus_map)) { | |
266 | + pr_warn("Ignoring empty tile_net.cpus='%s'.\n", | |
267 | + network_cpus_string); | |
268 | + return false; | |
269 | + } | |
270 | + | |
271 | + cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); | |
272 | + pr_info("Linux network CPUs: %s\n", buf); | |
273 | + return true; | |
274 | +} | |
275 | + | |
276 | +module_param_named(cpus, network_cpus_string, charp, 0444); | |
277 | +MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts"); | |
278 | + | |
279 | +/* The "tile_net.loopify=LINK" argument causes the named device to | |
280 | + * actually use "loop0" for ingress, and "loop1" for egress. This | |
281 | + * allows an app to sit between the actual link and linux, passing | |
282 | + * (some) packets along to linux, and forwarding (some) packets sent | |
283 | + * out by linux. | |
284 | + */ | |
285 | +module_param_named(loopify, loopify_link_name, charp, 0444); | |
286 | +MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress"); | |
287 | + | |
288 | +/* The "tile_net.custom" argument causes us to ignore the "conventional" | |
289 | + * classifier metadata, in particular, the "l2_offset". | |
290 | + */ | |
291 | +module_param_named(custom, custom_str, charp, 0444); | |
292 | +MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); | |
293 | + | |
294 | +/* Atomically update a statistics field. | |
295 | + * Note that on TILE-Gx, this operation is fire-and-forget on the | |
296 | + * issuing core (single-cycle dispatch) and takes only a few cycles | |
297 | + * longer than a regular store when the request reaches the home cache. | |
298 | + * No expensive bus management overhead is required. | |
299 | + */ | |
300 | +static void tile_net_stats_add(unsigned long value, unsigned long *field) | |
301 | +{ | |
302 | + BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long)); | |
303 | + atomic_long_add(value, (atomic_long_t *)field); | |
304 | +} | |
305 | + | |
306 | +/* Allocate and push a buffer. */ | |
307 | +static bool tile_net_provide_buffer(bool small) | |
308 | +{ | |
309 | + int stack = small ? small_buffer_stack : large_buffer_stack; | |
310 | + const unsigned long buffer_alignment = 128; | |
311 | + struct sk_buff *skb; | |
312 | + int len; | |
313 | + | |
314 | + len = sizeof(struct sk_buff **) + buffer_alignment; | |
315 | + len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE); | |
316 | + skb = dev_alloc_skb(len); | |
317 | + if (skb == NULL) | |
318 | + return false; | |
319 | + | |
320 | + /* Make room for a back-pointer to 'skb' and guarantee alignment. */ | |
321 | + skb_reserve(skb, sizeof(struct sk_buff **)); | |
322 | + skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1)); | |
323 | + | |
324 | + /* Save a back-pointer to 'skb'. */ | |
325 | + *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb; | |
326 | + | |
327 | + /* Make sure "skb" and the back-pointer have been flushed. */ | |
328 | + wmb(); | |
329 | + | |
330 | + gxio_mpipe_push_buffer(&context, stack, | |
331 | + (void *)va_to_tile_io_addr(skb->data)); | |
332 | + | |
333 | + return true; | |
334 | +} | |
335 | + | |
336 | +/* Convert a raw mpipe buffer to its matching skb pointer. */ | |
337 | +static struct sk_buff *mpipe_buf_to_skb(void *va) | |
338 | +{ | |
339 | + /* Acquire the associated "skb". */ | |
340 | + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | |
341 | + struct sk_buff *skb = *skb_ptr; | |
342 | + | |
343 | + /* Paranoia. */ | |
344 | + if (skb->data != va) { | |
345 | + /* Panic here since there's a reasonable chance | |
346 | + * that corrupt buffers means generic memory | |
347 | + * corruption, with unpredictable system effects. | |
348 | + */ | |
349 | + panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p", | |
350 | + va, skb, skb->data); | |
351 | + } | |
352 | + | |
353 | + return skb; | |
354 | +} | |
355 | + | |
356 | +static void tile_net_pop_all_buffers(int stack) | |
357 | +{ | |
358 | + for (;;) { | |
359 | + tile_io_addr_t addr = | |
360 | + (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack); | |
361 | + if (addr == 0) | |
362 | + break; | |
363 | + dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); | |
364 | + } | |
365 | +} | |
366 | + | |
367 | +/* Provide linux buffers to mPIPE. */ | |
368 | +static void tile_net_provide_needed_buffers(void) | |
369 | +{ | |
370 | + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | |
371 | + | |
372 | + while (info->num_needed_small_buffers != 0) { | |
373 | + if (!tile_net_provide_buffer(true)) | |
374 | + goto oops; | |
375 | + info->num_needed_small_buffers--; | |
376 | + } | |
377 | + | |
378 | + while (info->num_needed_large_buffers != 0) { | |
379 | + if (!tile_net_provide_buffer(false)) | |
380 | + goto oops; | |
381 | + info->num_needed_large_buffers--; | |
382 | + } | |
383 | + | |
384 | + return; | |
385 | + | |
386 | +oops: | |
387 | + /* Add a description to the page allocation failure dump. */ | |
388 | + pr_notice("Tile %d still needs some buffers\n", info->my_cpu); | |
389 | +} | |
390 | + | |
391 | +static inline bool filter_packet(struct net_device *dev, void *buf) | |
392 | +{ | |
393 | + /* Filter packets received before we're up. */ | |
394 | + if (dev == NULL || !(dev->flags & IFF_UP)) | |
395 | + return true; | |
396 | + | |
397 | + /* Filter out packets that aren't for us. */ | |
398 | + if (!(dev->flags & IFF_PROMISC) && | |
399 | + !is_multicast_ether_addr(buf) && | |
400 | + compare_ether_addr(dev->dev_addr, buf) != 0) | |
401 | + return true; | |
402 | + | |
403 | + return false; | |
404 | +} | |
405 | + | |
406 | +static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, | |
407 | + gxio_mpipe_idesc_t *idesc, unsigned long len) | |
408 | +{ | |
409 | + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | |
410 | + struct tile_net_priv *priv = netdev_priv(dev); | |
411 | + | |
412 | + /* Encode the actual packet length. */ | |
413 | + skb_put(skb, len); | |
414 | + | |
415 | + skb->protocol = eth_type_trans(skb, dev); | |
416 | + | |
417 | + /* Acknowledge "good" hardware checksums. */ | |
418 | + if (idesc->cs && idesc->csum_seed_val == 0xFFFF) | |
419 | + skb->ip_summed = CHECKSUM_UNNECESSARY; | |
420 | + | |
421 | + netif_receive_skb(skb); | |
422 | + | |
423 | + /* Update stats. */ | |
424 | + tile_net_stats_add(1, &priv->stats.rx_packets); | |
425 | + tile_net_stats_add(len, &priv->stats.rx_bytes); | |
426 | + | |
427 | + /* Need a new buffer. */ | |
428 | + if (idesc->size == BUFFER_SIZE_SMALL_ENUM) | |
429 | + info->num_needed_small_buffers++; | |
430 | + else | |
431 | + info->num_needed_large_buffers++; | |
432 | +} | |
433 | + | |
434 | +/* Handle a packet. Return true if "processed", false if "filtered". */ | |
435 | +static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) | |
436 | +{ | |
437 | + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | |
438 | + struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; | |
439 | + uint8_t l2_offset; | |
440 | + void *va; | |
441 | + void *buf; | |
442 | + unsigned long len; | |
443 | + bool filter; | |
444 | + | |
445 | + /* Drop packets for which no buffer was available. | |
446 | + * NOTE: This happens under heavy load. | |
447 | + */ | |
448 | + if (idesc->be) { | |
449 | + struct tile_net_priv *priv = netdev_priv(dev); | |
450 | + tile_net_stats_add(1, &priv->stats.rx_dropped); | |
451 | + gxio_mpipe_iqueue_consume(&info->iqueue, idesc); | |
452 | + if (net_ratelimit()) | |
453 | + pr_info("Dropping packet (insufficient buffers).\n"); | |
454 | + return false; | |
455 | + } | |
456 | + | |
457 | + /* Get the "l2_offset", if allowed. */ | |
458 | + l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); | |
459 | + | |
460 | + /* Get the raw buffer VA (includes "headroom"). */ | |
461 | + va = tile_io_addr_to_va((unsigned long)(long)idesc->va); | |
462 | + | |
463 | + /* Get the actual packet start/length. */ | |
464 | + buf = va + l2_offset; | |
465 | + len = idesc->l2_size - l2_offset; | |
466 | + | |
467 | + /* Point "va" at the raw buffer. */ | |
468 | + va -= NET_IP_ALIGN; | |
469 | + | |
470 | + filter = filter_packet(dev, buf); | |
471 | + if (filter) { | |
472 | + gxio_mpipe_iqueue_drop(&info->iqueue, idesc); | |
473 | + } else { | |
474 | + struct sk_buff *skb = mpipe_buf_to_skb(va); | |
475 | + | |
476 | + /* Skip headroom, and any custom header. */ | |
477 | + skb_reserve(skb, NET_IP_ALIGN + l2_offset); | |
478 | + | |
479 | + tile_net_receive_skb(dev, skb, idesc, len); | |
480 | + } | |
481 | + | |
482 | + gxio_mpipe_iqueue_consume(&info->iqueue, idesc); | |
483 | + return !filter; | |
484 | +} | |
485 | + | |
486 | +/* Handle some packets for the current CPU. | |
487 | + * | |
488 | + * This function handles up to TILE_NET_BATCH idescs per call. | |
489 | + * | |
490 | + * ISSUE: Since we do not provide new buffers until this function is | |
491 | + * complete, we must initially provide enough buffers for each network | |
492 | + * cpu to fill its iqueue and also its batched idescs. | |
493 | + * | |
494 | + * ISSUE: The "rotting packet" race condition occurs if a packet | |
495 | + * arrives after the queue appears to be empty, and before the | |
496 | + * hypervisor interrupt is re-enabled. | |
497 | + */ | |
498 | +static int tile_net_poll(struct napi_struct *napi, int budget) | |
499 | +{ | |
500 | + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | |
501 | + unsigned int work = 0; | |
502 | + gxio_mpipe_idesc_t *idesc; | |
503 | + int i, n; | |
504 | + | |
505 | + /* Process packets. */ | |
506 | + while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) { | |
507 | + for (i = 0; i < n; i++) { | |
508 | + if (i == TILE_NET_BATCH) | |
509 | + goto done; | |
510 | + if (tile_net_handle_packet(idesc + i)) { | |
511 | + if (++work >= budget) | |
512 | + goto done; | |
513 | + } | |
514 | + } | |
515 | + } | |
516 | + | |
517 | + /* There are no packets left. */ | |
518 | + napi_complete(&info->napi); | |
519 | + | |
520 | + /* Re-enable hypervisor interrupts. */ | |
521 | + gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring); | |
522 | + | |
523 | + /* HACK: Avoid the "rotting packet" problem. */ | |
524 | + if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0) | |
525 | + napi_schedule(&info->napi); | |
526 | + | |
527 | + /* ISSUE: Handle completions? */ | |
528 | + | |
529 | +done: | |
530 | + tile_net_provide_needed_buffers(); | |
531 | + | |
532 | + return work; | |
533 | +} | |
534 | + | |
535 | +/* Handle an ingress interrupt on the current cpu. */ | |
536 | +static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused) | |
537 | +{ | |
538 | + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | |
539 | + napi_schedule(&info->napi); | |
540 | + return IRQ_HANDLED; | |
541 | +} | |
542 | + | |
543 | +/* Free some completions. This must be called with interrupts blocked. */ | |
544 | +static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue, | |
545 | + struct tile_net_comps *comps, | |
546 | + int limit, bool force_update) | |
547 | +{ | |
548 | + int n = 0; | |
549 | + while (comps->comp_last < comps->comp_next) { | |
550 | + unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS; | |
551 | + struct tile_net_comp *comp = &comps->comp_queue[cid]; | |
552 | + if (!gxio_mpipe_equeue_is_complete(equeue, comp->when, | |
553 | + force_update || n == 0)) | |
554 | + break; | |
555 | + dev_kfree_skb_irq(comp->skb); | |
556 | + comps->comp_last++; | |
557 | + if (++n == limit) | |
558 | + break; | |
559 | + } | |
560 | + return n; | |
561 | +} | |
562 | + | |
563 | +/* Add a completion. This must be called with interrupts blocked. | |
564 | + * tile_net_equeue_try_reserve() will have ensured a free completion entry. | |
565 | + */ | |
566 | +static void add_comp(gxio_mpipe_equeue_t *equeue, | |
567 | + struct tile_net_comps *comps, | |
568 | + uint64_t when, struct sk_buff *skb) | |
569 | +{ | |
570 | + int cid = comps->comp_next % TILE_NET_MAX_COMPS; | |
571 | + comps->comp_queue[cid].when = when; | |
572 | + comps->comp_queue[cid].skb = skb; | |
573 | + comps->comp_next++; | |
574 | +} | |
575 | + | |
576 | +static void tile_net_schedule_tx_wake_timer(struct net_device *dev) | |
577 | +{ | |
578 | + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | |
579 | + struct tile_net_priv *priv = netdev_priv(dev); | |
580 | + | |
581 | + hrtimer_start(&info->tx_wake[priv->echannel].timer, | |
582 | + ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), | |
583 | + HRTIMER_MODE_REL_PINNED); | |
584 | +} | |
585 | + | |
586 | +static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t) | |
587 | +{ | |
588 | + struct tile_net_tx_wake *tx_wake = | |
589 | + container_of(t, struct tile_net_tx_wake, timer); | |
590 | + netif_wake_subqueue(tx_wake->dev, smp_processor_id()); | |
591 | + return HRTIMER_NORESTART; | |
592 | +} | |
593 | + | |
594 | +/* Make sure the egress timer is scheduled. */ | |
595 | +static void tile_net_schedule_egress_timer(void) | |
596 | +{ | |
597 | + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | |
598 | + | |
599 | + if (!info->egress_timer_scheduled) { | |
600 | + hrtimer_start(&info->egress_timer, | |
601 | + ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL), | |
602 | + HRTIMER_MODE_REL_PINNED); | |
603 | + info->egress_timer_scheduled = true; | |
604 | + } | |
605 | +} | |
606 | + | |
607 | +/* The "function" for "info->egress_timer". | |
608 | + * | |
609 | + * This timer will reschedule itself as long as there are any pending | |
610 | + * completions expected for this tile. | |
611 | + */ | |
612 | +static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) | |
613 | +{ | |
614 | + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | |
615 | + unsigned long irqflags; | |
616 | + bool pending = false; | |
617 | + int i; | |
618 | + | |
619 | + local_irq_save(irqflags); | |
620 | + | |
621 | + /* The timer is no longer scheduled. */ | |
622 | + info->egress_timer_scheduled = false; | |
623 | + | |
624 | + /* Free all possible comps for this tile. */ | |
625 | + for (i = 0; i < TILE_NET_CHANNELS; i++) { | |
626 | + struct tile_net_egress *egress = &egress_for_echannel[i]; | |
627 | + struct tile_net_comps *comps = info->comps_for_echannel[i]; | |
628 | + if (comps->comp_last >= comps->comp_next) | |
629 | + continue; | |
630 | + tile_net_free_comps(egress->equeue, comps, -1, true); | |
631 | + pending = pending || (comps->comp_last < comps->comp_next); | |
632 | + } | |
633 | + | |
634 | + /* Reschedule timer if needed. */ | |
635 | + if (pending) | |
636 | + tile_net_schedule_egress_timer(); | |
637 | + | |
638 | + local_irq_restore(irqflags); | |
639 | + | |
640 | + return HRTIMER_NORESTART; | |
641 | +} | |
642 | + | |
643 | +/* Helper function for "tile_net_update()". | |
644 | + * "dev" (i.e. arg) is the device being brought up or down, | |
645 | + * or NULL if all devices are now down. | |
646 | + */ | |
647 | +static void tile_net_update_cpu(void *arg) | |
648 | +{ | |
649 | + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | |
650 | + struct net_device *dev = arg; | |
651 | + | |
652 | + if (!info->has_iqueue) | |
653 | + return; | |
654 | + | |
655 | + if (dev != NULL) { | |
656 | + if (!info->napi_added) { | |
657 | + netif_napi_add(dev, &info->napi, | |
658 | + tile_net_poll, TILE_NET_WEIGHT); | |
659 | + info->napi_added = true; | |
660 | + } | |
661 | + if (!info->napi_enabled) { | |
662 | + napi_enable(&info->napi); | |
663 | + info->napi_enabled = true; | |
664 | + } | |
665 | + enable_percpu_irq(ingress_irq, 0); | |
666 | + } else { | |
667 | + disable_percpu_irq(ingress_irq); | |
668 | + if (info->napi_enabled) { | |
669 | + napi_disable(&info->napi); | |
670 | + info->napi_enabled = false; | |
671 | + } | |
672 | + /* FIXME: Drain the iqueue. */ | |
673 | + } | |
674 | +} | |
675 | + | |
676 | +/* Helper function for tile_net_open() and tile_net_stop(). | |
677 | + * Always called under tile_net_devs_for_channel_mutex. | |
678 | + */ | |
679 | +static int tile_net_update(struct net_device *dev) | |
680 | +{ | |
681 | + static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ | |
682 | + bool saw_channel = false; | |
683 | + int channel; | |
684 | + int rc; | |
685 | + int cpu; | |
686 | + | |
687 | + gxio_mpipe_rules_init(&rules, &context); | |
688 | + | |
689 | + for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { | |
690 | + if (tile_net_devs_for_channel[channel] == NULL) | |
691 | + continue; | |
692 | + if (!saw_channel) { | |
693 | + saw_channel = true; | |
694 | + gxio_mpipe_rules_begin(&rules, first_bucket, | |
695 | + num_buckets, NULL); | |
696 | + gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); | |
697 | + } | |
698 | + gxio_mpipe_rules_add_channel(&rules, channel); | |
699 | + } | |
700 | + | |
701 | + /* NOTE: This can fail if there is no classifier. | |
702 | + * ISSUE: Can anything else cause it to fail? | |
703 | + */ | |
704 | + rc = gxio_mpipe_rules_commit(&rules); | |
705 | + if (rc != 0) { | |
706 | + netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc); | |
707 | + return -EIO; | |
708 | + } | |
709 | + | |
710 | + /* Update all cpus, sequentially (to protect "netif_napi_add()"). */ | |
711 | + for_each_online_cpu(cpu) | |
712 | + smp_call_function_single(cpu, tile_net_update_cpu, | |
713 | + (saw_channel ? dev : NULL), 1); | |
714 | + | |
715 | + /* HACK: Allow packets to flow in the simulator. */ | |
716 | + if (saw_channel) | |
717 | + sim_enable_mpipe_links(0, -1); | |
718 | + | |
719 | + return 0; | |
720 | +} | |
721 | + | |
722 | +/* Allocate and initialize mpipe buffer stacks, and register them in | |
723 | + * the mPIPE TLBs, for both small and large packet sizes. | |
724 | + * This routine supports tile_net_init_mpipe(), below. | |
725 | + */ | |
726 | +static int init_buffer_stacks(struct net_device *dev, int num_buffers) | |
727 | +{ | |
728 | + pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); | |
729 | + int rc; | |
730 | + | |
731 | + /* Compute stack bytes; we round up to 64KB and then use | |
732 | + * alloc_pages() so we get the required 64KB alignment as well. | |
733 | + */ | |
734 | + buffer_stack_size = | |
735 | + ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers), | |
736 | + 64 * 1024); | |
737 | + | |
738 | + /* Allocate two buffer stack indices. */ | |
739 | + rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0); | |
740 | + if (rc < 0) { | |
741 | + netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n", | |
742 | + rc); | |
743 | + return rc; | |
744 | + } | |
745 | + small_buffer_stack = rc; | |
746 | + large_buffer_stack = rc + 1; | |
747 | + | |
748 | + /* Allocate the small memory stack. */ | |
749 | + small_buffer_stack_va = | |
750 | + alloc_pages_exact(buffer_stack_size, GFP_KERNEL); | |
751 | + if (small_buffer_stack_va == NULL) { | |
752 | + netdev_err(dev, | |
753 | + "Could not alloc %zd bytes for buffer stacks\n", | |
754 | + buffer_stack_size); | |
755 | + return -ENOMEM; | |
756 | + } | |
757 | + rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack, | |
758 | + BUFFER_SIZE_SMALL_ENUM, | |
759 | + small_buffer_stack_va, | |
760 | + buffer_stack_size, 0); | |
761 | + if (rc != 0) { | |
762 | + netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); | |
763 | + return rc; | |
764 | + } | |
765 | + rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack, | |
766 | + hash_pte, 0); | |
767 | + if (rc != 0) { | |
768 | + netdev_err(dev, | |
769 | + "gxio_mpipe_register_buffer_memory failed: %d\n", | |
770 | + rc); | |
771 | + return rc; | |
772 | + } | |
773 | + | |
774 | + /* Allocate the large buffer stack. */ | |
775 | + large_buffer_stack_va = | |
776 | + alloc_pages_exact(buffer_stack_size, GFP_KERNEL); | |
777 | + if (large_buffer_stack_va == NULL) { | |
778 | + netdev_err(dev, | |
779 | + "Could not alloc %zd bytes for buffer stacks\n", | |
780 | + buffer_stack_size); | |
781 | + return -ENOMEM; | |
782 | + } | |
783 | + rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack, | |
784 | + BUFFER_SIZE_LARGE_ENUM, | |
785 | + large_buffer_stack_va, | |
786 | + buffer_stack_size, 0); | |
787 | + if (rc != 0) { | |
788 | + netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n", | |
789 | + rc); | |
790 | + return rc; | |
791 | + } | |
792 | + rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack, | |
793 | + hash_pte, 0); | |
794 | + if (rc != 0) { | |
795 | + netdev_err(dev, | |
796 | + "gxio_mpipe_register_buffer_memory failed: %d\n", | |
797 | + rc); | |
798 | + return rc; | |
799 | + } | |
800 | + | |
801 | + return 0; | |
802 | +} | |
803 | + | |
804 | +/* Allocate per-cpu resources (memory for completions and idescs). | |
805 | + * This routine supports tile_net_init_mpipe(), below. | |
806 | + */ | |
807 | +static int alloc_percpu_mpipe_resources(struct net_device *dev, | |
808 | + int cpu, int ring) | |
809 | +{ | |
810 | + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | |
811 | + int order, i, rc; | |
812 | + struct page *page; | |
813 | + void *addr; | |
814 | + | |
815 | + /* Allocate the "comps". */ | |
816 | + order = get_order(COMPS_SIZE); | |
817 | + page = homecache_alloc_pages(GFP_KERNEL, order, cpu); | |
818 | + if (page == NULL) { | |
819 | + netdev_err(dev, "Failed to alloc %zd bytes comps memory\n", | |
820 | + COMPS_SIZE); | |
821 | + return -ENOMEM; | |
822 | + } | |
823 | + addr = pfn_to_kaddr(page_to_pfn(page)); | |
824 | + memset(addr, 0, COMPS_SIZE); | |
825 | + for (i = 0; i < TILE_NET_CHANNELS; i++) | |
826 | + info->comps_for_echannel[i] = | |
827 | + addr + i * sizeof(struct tile_net_comps); | |
828 | + | |
829 | + /* If this is a network cpu, create an iqueue. */ | |
830 | + if (cpu_isset(cpu, network_cpus_map)) { | |
831 | + order = get_order(NOTIF_RING_SIZE); | |
832 | + page = homecache_alloc_pages(GFP_KERNEL, order, cpu); | |
833 | + if (page == NULL) { | |
834 | + netdev_err(dev, | |
835 | + "Failed to alloc %zd bytes iqueue memory\n", | |
836 | + NOTIF_RING_SIZE); | |
837 | + return -ENOMEM; | |
838 | + } | |
839 | + addr = pfn_to_kaddr(page_to_pfn(page)); | |
840 | + rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++, | |
841 | + addr, NOTIF_RING_SIZE, 0); | |
842 | + if (rc < 0) { | |
843 | + netdev_err(dev, | |
844 | + "gxio_mpipe_iqueue_init failed: %d\n", rc); | |
845 | + return rc; | |
846 | + } | |
847 | + info->has_iqueue = true; | |
848 | + } | |
849 | + | |
850 | + return ring; | |
851 | +} | |
852 | + | |
853 | +/* Initialize NotifGroup and buckets. | |
854 | + * This routine supports tile_net_init_mpipe(), below. | |
855 | + */ | |
856 | +static int init_notif_group_and_buckets(struct net_device *dev, | |
857 | + int ring, int network_cpus_count) | |
858 | +{ | |
859 | + int group, rc; | |
860 | + | |
861 | + /* Allocate one NotifGroup. */ | |
862 | + rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0); | |
863 | + if (rc < 0) { | |
864 | + netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n", | |
865 | + rc); | |
866 | + return rc; | |
867 | + } | |
868 | + group = rc; | |
869 | + | |
870 | + /* Initialize global num_buckets value. */ | |
871 | + if (network_cpus_count > 4) | |
872 | + num_buckets = 256; | |
873 | + else if (network_cpus_count > 1) | |
874 | + num_buckets = 16; | |
875 | + | |
876 | + /* Allocate some buckets, and set global first_bucket value. */ | |
877 | + rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0); | |
878 | + if (rc < 0) { | |
879 | + netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc); | |
880 | + return rc; | |
881 | + } | |
882 | + first_bucket = rc; | |
883 | + | |
884 | + /* Init group and buckets. */ | |
885 | + rc = gxio_mpipe_init_notif_group_and_buckets( | |
886 | + &context, group, ring, network_cpus_count, | |
887 | + first_bucket, num_buckets, | |
888 | + GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); | |
889 | + if (rc != 0) { | |
890 | + netdev_err( | |
891 | + dev, | |
892 | + "gxio_mpipe_init_notif_group_and_buckets failed: %d\n", | |
893 | + rc); | |
894 | + return rc; | |
895 | + } | |
896 | + | |
897 | + return 0; | |
898 | +} | |
899 | + | |
900 | +/* Create an irq and register it, then activate the irq and request | |
901 | + * interrupts on all cores. Note that "ingress_irq" being initialized | |
902 | + * is how we know not to call tile_net_init_mpipe() again. | |
903 | + * This routine supports tile_net_init_mpipe(), below. | |
904 | + */ | |
905 | +static int tile_net_setup_interrupts(struct net_device *dev) | |
906 | +{ | |
907 | + int cpu, rc; | |
908 | + | |
909 | + rc = create_irq(); | |
910 | + if (rc < 0) { | |
911 | + netdev_err(dev, "create_irq failed: %d\n", rc); | |
912 | + return rc; | |
913 | + } | |
914 | + ingress_irq = rc; | |
915 | + tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); | |
916 | + rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, | |
917 | + 0, NULL, NULL); | |
918 | + if (rc != 0) { | |
919 | + netdev_err(dev, "request_irq failed: %d\n", rc); | |
920 | + destroy_irq(ingress_irq); | |
921 | + ingress_irq = -1; | |
922 | + return rc; | |
923 | + } | |
924 | + | |
925 | + for_each_online_cpu(cpu) { | |
926 | + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | |
927 | + if (info->has_iqueue) { | |
928 | + gxio_mpipe_request_notif_ring_interrupt( | |
929 | + &context, cpu_x(cpu), cpu_y(cpu), | |
930 | + 1, ingress_irq, info->iqueue.ring); | |
931 | + } | |
932 | + } | |
933 | + | |
934 | + return 0; | |
935 | +} | |
936 | + | |
937 | +/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ | |
938 | +static void tile_net_init_mpipe_fail(void) | |
939 | +{ | |
940 | + int cpu; | |
941 | + | |
942 | + /* Do cleanups that require the mpipe context first. */ | |
943 | + if (small_buffer_stack >= 0) | |
944 | + tile_net_pop_all_buffers(small_buffer_stack); | |
945 | + if (large_buffer_stack >= 0) | |
946 | + tile_net_pop_all_buffers(large_buffer_stack); | |
947 | + | |
948 | + /* Destroy mpipe context so the hardware no longer owns any memory. */ | |
949 | + gxio_mpipe_destroy(&context); | |
950 | + | |
951 | + for_each_online_cpu(cpu) { | |
952 | + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | |
953 | + free_pages((unsigned long)(info->comps_for_echannel[0]), | |
954 | + get_order(COMPS_SIZE)); | |
955 | + info->comps_for_echannel[0] = NULL; | |
956 | + free_pages((unsigned long)(info->iqueue.idescs), | |
957 | + get_order(NOTIF_RING_SIZE)); | |
958 | + info->iqueue.idescs = NULL; | |
959 | + } | |
960 | + | |
961 | + if (small_buffer_stack_va) | |
962 | + free_pages_exact(small_buffer_stack_va, buffer_stack_size); | |
963 | + if (large_buffer_stack_va) | |
964 | + free_pages_exact(large_buffer_stack_va, buffer_stack_size); | |
965 | + | |
966 | + small_buffer_stack_va = NULL; | |
967 | + large_buffer_stack_va = NULL; | |
968 | + large_buffer_stack = -1; | |
969 | + small_buffer_stack = -1; | |
970 | + first_bucket = -1; | |
971 | +} | |
972 | + | |
973 | +/* The first time any tilegx network device is opened, we initialize | |
974 | + * the global mpipe state. If this step fails, we fail to open the | |
975 | + * device, but if it succeeds, we never need to do it again, and since | |
976 | + * tile_net can't be unloaded, we never undo it. | |
977 | + * | |
978 | + * Note that some resources in this path (buffer stack indices, | |
979 | + * bindings from init_buffer_stack, etc.) are hypervisor resources | |
980 | + * that are freed implicitly by gxio_mpipe_destroy(). | |
981 | + */ | |
982 | +static int tile_net_init_mpipe(struct net_device *dev) | |
983 | +{ | |
984 | + int i, num_buffers, rc; | |
985 | + int cpu; | |
986 | + int first_ring, ring; | |
987 | + int network_cpus_count = cpus_weight(network_cpus_map); | |
988 | + | |
989 | + if (!hash_default) { | |
990 | + netdev_err(dev, "Networking requires hash_default!\n"); | |
991 | + return -EIO; | |
992 | + } | |
993 | + | |
994 | + rc = gxio_mpipe_init(&context, 0); | |
995 | + if (rc != 0) { | |
996 | + netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc); | |
997 | + return -EIO; | |
998 | + } | |
999 | + | |
1000 | + /* Set up the buffer stacks. */ | |
1001 | + num_buffers = | |
1002 | + network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); | |
1003 | + rc = init_buffer_stacks(dev, num_buffers); | |
1004 | + if (rc != 0) | |
1005 | + goto fail; | |
1006 | + | |
1007 | + /* Provide initial buffers. */ | |
1008 | + rc = -ENOMEM; | |
1009 | + for (i = 0; i < num_buffers; i++) { | |
1010 | + if (!tile_net_provide_buffer(true)) { | |
1011 | + netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); | |
1012 | + goto fail; | |
1013 | + } | |
1014 | + } | |
1015 | + for (i = 0; i < num_buffers; i++) { | |
1016 | + if (!tile_net_provide_buffer(false)) { | |
1017 | + netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); | |
1018 | + goto fail; | |
1019 | + } | |
1020 | + } | |
1021 | + | |
1022 | + /* Allocate one NotifRing for each network cpu. */ | |
1023 | + rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); | |
1024 | + if (rc < 0) { | |
1025 | + netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", | |
1026 | + rc); | |
1027 | + goto fail; | |
1028 | + } | |
1029 | + | |
1030 | + /* Init NotifRings per-cpu. */ | |
1031 | + first_ring = rc; | |
1032 | + ring = first_ring; | |
1033 | + for_each_online_cpu(cpu) { | |
1034 | + rc = alloc_percpu_mpipe_resources(dev, cpu, ring); | |
1035 | + if (rc < 0) | |
1036 | + goto fail; | |
1037 | + ring = rc; | |
1038 | + } | |
1039 | + | |
1040 | + /* Initialize NotifGroup and buckets. */ | |
1041 | + rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count); | |
1042 | + if (rc != 0) | |
1043 | + goto fail; | |
1044 | + | |
1045 | + /* Create and enable interrupts. */ | |
1046 | + rc = tile_net_setup_interrupts(dev); | |
1047 | + if (rc != 0) | |
1048 | + goto fail; | |
1049 | + | |
1050 | + return 0; | |
1051 | + | |
1052 | +fail: | |
1053 | + tile_net_init_mpipe_fail(); | |
1054 | + return rc; | |
1055 | +} | |
1056 | + | |
1057 | +/* Create persistent egress info for a given egress channel. | |
1058 | + * Note that this may be shared between, say, "gbe0" and "xgbe0". | |
1059 | + * ISSUE: Defer header allocation until TSO is actually needed? | |
1060 | + */ | |
1061 | +static int tile_net_init_egress(struct net_device *dev, int echannel) | |
1062 | +{ | |
1063 | + struct page *headers_page, *edescs_page, *equeue_page; | |
1064 | + gxio_mpipe_edesc_t *edescs; | |
1065 | + gxio_mpipe_equeue_t *equeue; | |
1066 | + unsigned char *headers; | |
1067 | + int headers_order, edescs_order, equeue_order; | |
1068 | + size_t edescs_size; | |
1069 | + int edma; | |
1070 | + int rc = -ENOMEM; | |
1071 | + | |
1072 | + /* Only initialize once. */ | |
1073 | + if (egress_for_echannel[echannel].equeue != NULL) | |
1074 | + return 0; | |
1075 | + | |
1076 | + /* Allocate memory for the "headers". */ | |
1077 | + headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES); | |
1078 | + headers_page = alloc_pages(GFP_KERNEL, headers_order); | |
1079 | + if (headers_page == NULL) { | |
1080 | + netdev_warn(dev, | |
1081 | + "Could not alloc %zd bytes for TSO headers.\n", | |
1082 | + PAGE_SIZE << headers_order); | |
1083 | + goto fail; | |
1084 | + } | |
1085 | + headers = pfn_to_kaddr(page_to_pfn(headers_page)); | |
1086 | + | |
1087 | + /* Allocate memory for the "edescs". */ | |
1088 | + edescs_size = EQUEUE_ENTRIES * sizeof(*edescs); | |
1089 | + edescs_order = get_order(edescs_size); | |
1090 | + edescs_page = alloc_pages(GFP_KERNEL, edescs_order); | |
1091 | + if (edescs_page == NULL) { | |
1092 | + netdev_warn(dev, | |
1093 | + "Could not alloc %zd bytes for eDMA ring.\n", | |
1094 | + edescs_size); | |
1095 | + goto fail_headers; | |
1096 | + } | |
1097 | + edescs = pfn_to_kaddr(page_to_pfn(edescs_page)); | |
1098 | + | |
1099 | + /* Allocate memory for the "equeue". */ | |
1100 | + equeue_order = get_order(sizeof(*equeue)); | |
1101 | + equeue_page = alloc_pages(GFP_KERNEL, equeue_order); | |
1102 | + if (equeue_page == NULL) { | |
1103 | + netdev_warn(dev, | |
1104 | + "Could not alloc %zd bytes for equeue info.\n", | |
1105 | + PAGE_SIZE << equeue_order); | |
1106 | + goto fail_edescs; | |
1107 | + } | |
1108 | + equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); | |
1109 | + | |
1110 | + /* Allocate an edma ring. Note that in practice this can't | |
1111 | + * fail, which is good, because we will leak an edma ring if so. | |
1112 | + */ | |
1113 | + rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); | |
1114 | + if (rc < 0) { | |
1115 | + netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n", | |
1116 | + rc); | |
1117 | + goto fail_equeue; | |
1118 | + } | |
1119 | + edma = rc; | |
1120 | + | |
1121 | + /* Initialize the equeue. */ | |
1122 | + rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel, | |
1123 | + edescs, edescs_size, 0); | |
1124 | + if (rc != 0) { | |
1125 | + netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); | |
1126 | + goto fail_equeue; | |
1127 | + } | |
1128 | + | |
1129 | + /* Done. */ | |
1130 | + egress_for_echannel[echannel].equeue = equeue; | |
1131 | + egress_for_echannel[echannel].headers = headers; | |
1132 | + return 0; | |
1133 | + | |
1134 | +fail_equeue: | |
1135 | + __free_pages(equeue_page, equeue_order); | |
1136 | + | |
1137 | +fail_edescs: | |
1138 | + __free_pages(edescs_page, edescs_order); | |
1139 | + | |
1140 | +fail_headers: | |
1141 | + __free_pages(headers_page, headers_order); | |
1142 | + | |
1143 | +fail: | |
1144 | + return rc; | |
1145 | +} | |
1146 | + | |
1147 | +/* Return channel number for a newly-opened link. */ | |
1148 | +static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, | |
1149 | + const char *link_name) | |
1150 | +{ | |
1151 | + int rc = gxio_mpipe_link_open(link, &context, link_name, 0); | |
1152 | + if (rc < 0) { | |
1153 | + netdev_err(dev, "Failed to open '%s'\n", link_name); | |
1154 | + return rc; | |
1155 | + } | |
1156 | + rc = gxio_mpipe_link_channel(link); | |
1157 | + if (rc < 0 || rc >= TILE_NET_CHANNELS) { | |
1158 | + netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); | |
1159 | + gxio_mpipe_link_close(link); | |
1160 | + return -EINVAL; | |
1161 | + } | |
1162 | + return rc; | |
1163 | +} | |
1164 | + | |
1165 | +/* Help the kernel activate the given network interface. */ | |
1166 | +static int tile_net_open(struct net_device *dev) | |
1167 | +{ | |
1168 | + struct tile_net_priv *priv = netdev_priv(dev); | |
1169 | + int cpu, rc; | |
1170 | + | |
1171 | + mutex_lock(&tile_net_devs_for_channel_mutex); | |
1172 | + | |
1173 | + /* Do one-time initialization the first time any device is opened. */ | |
1174 | + if (ingress_irq < 0) { | |
1175 | + rc = tile_net_init_mpipe(dev); | |
1176 | + if (rc != 0) | |
1177 | + goto fail; | |
1178 | + } | |
1179 | + | |
1180 | + /* Determine if this is the "loopify" device. */ | |
1181 | + if (unlikely((loopify_link_name != NULL) && | |
1182 | + !strcmp(dev->name, loopify_link_name))) { | |
1183 | + rc = tile_net_link_open(dev, &priv->link, "loop0"); | |
1184 | + if (rc < 0) | |
1185 | + goto fail; | |
1186 | + priv->channel = rc; | |
1187 | + rc = tile_net_link_open(dev, &priv->loopify_link, "loop1"); | |
1188 | + if (rc < 0) | |
1189 | + goto fail; | |
1190 | + priv->loopify_channel = rc; | |
1191 | + priv->echannel = rc; | |
1192 | + } else { | |
1193 | + rc = tile_net_link_open(dev, &priv->link, dev->name); | |
1194 | + if (rc < 0) | |
1195 | + goto fail; | |
1196 | + priv->channel = rc; | |
1197 | + priv->echannel = rc; | |
1198 | + } | |
1199 | + | |
1200 | + /* Initialize egress info (if needed). Once ever, per echannel. */ | |
1201 | + rc = tile_net_init_egress(dev, priv->echannel); | |
1202 | + if (rc != 0) | |
1203 | + goto fail; | |
1204 | + | |
1205 | + tile_net_devs_for_channel[priv->channel] = dev; | |
1206 | + | |
1207 | + rc = tile_net_update(dev); | |
1208 | + if (rc != 0) | |
1209 | + goto fail; | |
1210 | + | |
1211 | + mutex_unlock(&tile_net_devs_for_channel_mutex); | |
1212 | + | |
1213 | + /* Initialize the transmit wake timer for this device for each cpu. */ | |
1214 | + for_each_online_cpu(cpu) { | |
1215 | + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | |
1216 | + struct tile_net_tx_wake *tx_wake = | |
1217 | + &info->tx_wake[priv->echannel]; | |
1218 | + | |
1219 | + hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, | |
1220 | + HRTIMER_MODE_REL); | |
1221 | + tx_wake->timer.function = tile_net_handle_tx_wake_timer; | |
1222 | + tx_wake->dev = dev; | |
1223 | + } | |
1224 | + | |
1225 | + for_each_online_cpu(cpu) | |
1226 | + netif_start_subqueue(dev, cpu); | |
1227 | + netif_carrier_on(dev); | |
1228 | + return 0; | |
1229 | + | |
1230 | +fail: | |
1231 | + if (priv->loopify_channel >= 0) { | |
1232 | + if (gxio_mpipe_link_close(&priv->loopify_link) != 0) | |
1233 | + netdev_warn(dev, "Failed to close loopify link!\n"); | |
1234 | + priv->loopify_channel = -1; | |
1235 | + } | |
1236 | + if (priv->channel >= 0) { | |
1237 | + if (gxio_mpipe_link_close(&priv->link) != 0) | |
1238 | + netdev_warn(dev, "Failed to close link!\n"); | |
1239 | + priv->channel = -1; | |
1240 | + } | |
1241 | + priv->echannel = -1; | |
1242 | + tile_net_devs_for_channel[priv->channel] = NULL; | |
1243 | + mutex_unlock(&tile_net_devs_for_channel_mutex); | |
1244 | + | |
1245 | + /* Don't return raw gxio error codes to generic Linux. */ | |
1246 | + return (rc > -512) ? rc : -EIO; | |
1247 | +} | |
1248 | + | |
1249 | +/* Help the kernel deactivate the given network interface. */ | |
1250 | +static int tile_net_stop(struct net_device *dev) | |
1251 | +{ | |
1252 | + struct tile_net_priv *priv = netdev_priv(dev); | |
1253 | + int cpu; | |
1254 | + | |
1255 | + for_each_online_cpu(cpu) { | |
1256 | + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | |
1257 | + struct tile_net_tx_wake *tx_wake = | |
1258 | + &info->tx_wake[priv->echannel]; | |
1259 | + | |
1260 | + hrtimer_cancel(&tx_wake->timer); | |
1261 | + netif_stop_subqueue(dev, cpu); | |
1262 | + } | |
1263 | + | |
1264 | + mutex_lock(&tile_net_devs_for_channel_mutex); | |
1265 | + tile_net_devs_for_channel[priv->channel] = NULL; | |
1266 | + (void)tile_net_update(dev); | |
1267 | + if (priv->loopify_channel >= 0) { | |
1268 | + if (gxio_mpipe_link_close(&priv->loopify_link) != 0) | |
1269 | + netdev_warn(dev, "Failed to close loopify link!\n"); | |
1270 | + priv->loopify_channel = -1; | |
1271 | + } | |
1272 | + if (priv->channel >= 0) { | |
1273 | + if (gxio_mpipe_link_close(&priv->link) != 0) | |
1274 | + netdev_warn(dev, "Failed to close link!\n"); | |
1275 | + priv->channel = -1; | |
1276 | + } | |
1277 | + priv->echannel = -1; | |
1278 | + mutex_unlock(&tile_net_devs_for_channel_mutex); | |
1279 | + | |
1280 | + return 0; | |
1281 | +} | |
1282 | + | |
1283 | +/* Determine the VA for a fragment. */ | |
1284 | +static inline void *tile_net_frag_buf(skb_frag_t *f) | |
1285 | +{ | |
1286 | + unsigned long pfn = page_to_pfn(skb_frag_page(f)); | |
1287 | + return pfn_to_kaddr(pfn) + f->page_offset; | |
1288 | +} | |
1289 | + | |
1290 | +/* Acquire a completion entry and an egress slot, or if we can't, | |
1291 | + * stop the queue and schedule the tx_wake timer. | |
1292 | + */ | |
1293 | +static s64 tile_net_equeue_try_reserve(struct net_device *dev, | |
1294 | + struct tile_net_comps *comps, | |
1295 | + gxio_mpipe_equeue_t *equeue, | |
1296 | + int num_edescs) | |
1297 | +{ | |
1298 | + /* Try to acquire a completion entry. */ | |
1299 | + if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 || | |
1300 | + tile_net_free_comps(equeue, comps, 32, false) != 0) { | |
1301 | + | |
1302 | + /* Try to acquire an egress slot. */ | |
1303 | + s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); | |
1304 | + if (slot >= 0) | |
1305 | + return slot; | |
1306 | + | |
1307 | + /* Freeing some completions gives the equeue time to drain. */ | |
1308 | + tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false); | |
1309 | + | |
1310 | + slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); | |
1311 | + if (slot >= 0) | |
1312 | + return slot; | |
1313 | + } | |
1314 | + | |
1315 | + /* Still nothing; give up and stop the queue for a short while. */ | |
1316 | + netif_stop_subqueue(dev, smp_processor_id()); | |
1317 | + tile_net_schedule_tx_wake_timer(dev); | |
1318 | + return -1; | |
1319 | +} | |
1320 | + | |
1321 | +/* Determine how many edesc's are needed for TSO. | |
1322 | + * | |
1323 | + * Sometimes, if "sendfile()" requires copying, we will be called with | |
1324 | + * "data" containing the header and payload, with "frags" being empty. | |
1325 | + * Sometimes, for example when using NFS over TCP, a single segment can | |
1326 | + * span 3 fragments. This requires special care. | |
1327 | + */ | |
1328 | +static int tso_count_edescs(struct sk_buff *skb) | |
1329 | +{ | |
1330 | + struct skb_shared_info *sh = skb_shinfo(skb); | |
1331 | + unsigned int data_len = skb->data_len; | |
1332 | + unsigned int p_len = sh->gso_size; | |
1333 | + long f_id = -1; /* id of the current fragment */ | |
1334 | + long f_size = -1; /* size of the current fragment */ | |
1335 | + long f_used = -1; /* bytes used from the current fragment */ | |
1336 | + long n; /* size of the current piece of payload */ | |
1337 | + int num_edescs = 0; | |
1338 | + int segment; | |
1339 | + | |
1340 | + for (segment = 0; segment < sh->gso_segs; segment++) { | |
1341 | + | |
1342 | + unsigned int p_used = 0; | |
1343 | + | |
1344 | + /* One edesc for header and for each piece of the payload. */ | |
1345 | + for (num_edescs++; p_used < p_len; num_edescs++) { | |
1346 | + | |
1347 | + /* Advance as needed. */ | |
1348 | + while (f_used >= f_size) { | |
1349 | + f_id++; | |
1350 | + f_size = sh->frags[f_id].size; | |
1351 | + f_used = 0; | |
1352 | + } | |
1353 | + | |
1354 | + /* Use bytes from the current fragment. */ | |
1355 | + n = p_len - p_used; | |
1356 | + if (n > f_size - f_used) | |
1357 | + n = f_size - f_used; | |
1358 | + f_used += n; | |
1359 | + p_used += n; | |
1360 | + } | |
1361 | + | |
1362 | + /* The last segment may be less than gso_size. */ | |
1363 | + data_len -= p_len; | |
1364 | + if (data_len < p_len) | |
1365 | + p_len = data_len; | |
1366 | + } | |
1367 | + | |
1368 | + return num_edescs; | |
1369 | +} | |
1370 | + | |
1371 | +/* Prepare modified copies of the skbuff headers. | |
1372 | + * FIXME: add support for IPv6. | |
1373 | + */ | |
1374 | +static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, | |
1375 | + s64 slot) | |
1376 | +{ | |
1377 | + struct skb_shared_info *sh = skb_shinfo(skb); | |
1378 | + struct iphdr *ih; | |
1379 | + struct tcphdr *th; | |
1380 | + unsigned int data_len = skb->data_len; | |
1381 | + unsigned char *data = skb->data; | |
1382 | + unsigned int ih_off, th_off, sh_len, p_len; | |
1383 | + unsigned int isum_seed, tsum_seed, id, seq; | |
1384 | + long f_id = -1; /* id of the current fragment */ | |
1385 | + long f_size = -1; /* size of the current fragment */ | |
1386 | + long f_used = -1; /* bytes used from the current fragment */ | |
1387 | + long n; /* size of the current piece of payload */ | |
1388 | + int segment; | |
1389 | + | |
1390 | + /* Locate original headers and compute various lengths. */ | |
1391 | + ih = ip_hdr(skb); | |
1392 | + th = tcp_hdr(skb); | |
1393 | + ih_off = skb_network_offset(skb); | |
1394 | + th_off = skb_transport_offset(skb); | |
1395 | + sh_len = th_off + tcp_hdrlen(skb); | |
1396 | + p_len = sh->gso_size; | |
1397 | + | |
1398 | + /* Set up seed values for IP and TCP csum and initialize id and seq. */ | |
1399 | + isum_seed = ((0xFFFF - ih->check) + | |
1400 | + (0xFFFF - ih->tot_len) + | |
1401 | + (0xFFFF - ih->id)); | |
1402 | + tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); | |
1403 | + id = ntohs(ih->id); | |
1404 | + seq = ntohl(th->seq); | |
1405 | + | |
1406 | + /* Prepare all the headers. */ | |
1407 | + for (segment = 0; segment < sh->gso_segs; segment++) { | |
1408 | + unsigned char *buf; | |
1409 | + unsigned int p_used = 0; | |
1410 | + | |
1411 | + /* Copy to the header memory for this segment. */ | |
1412 | + buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + | |
1413 | + NET_IP_ALIGN; | |
1414 | + memcpy(buf, data, sh_len); | |
1415 | + | |
1416 | + /* Update copied ip header. */ | |
1417 | + ih = (struct iphdr *)(buf + ih_off); | |
1418 | + ih->tot_len = htons(sh_len + p_len - ih_off); | |
1419 | + ih->id = htons(id); | |
1420 | + ih->check = csum_long(isum_seed + ih->tot_len + | |
1421 | + ih->id) ^ 0xffff; | |
1422 | + | |
1423 | + /* Update copied tcp header. */ | |
1424 | + th = (struct tcphdr *)(buf + th_off); | |
1425 | + th->seq = htonl(seq); | |
1426 | + th->check = csum_long(tsum_seed + htons(sh_len + p_len)); | |
1427 | + if (segment != sh->gso_segs - 1) { | |
1428 | + th->fin = 0; | |
1429 | + th->psh = 0; | |
1430 | + } | |
1431 | + | |
1432 | + /* Skip past the header. */ | |
1433 | + slot++; | |
1434 | + | |
1435 | + /* Skip past the payload. */ | |
1436 | + while (p_used < p_len) { | |
1437 | + | |
1438 | + /* Advance as needed. */ | |
1439 | + while (f_used >= f_size) { | |
1440 | + f_id++; | |
1441 | + f_size = sh->frags[f_id].size; | |
1442 | + f_used = 0; | |
1443 | + } | |
1444 | + | |
1445 | + /* Use bytes from the current fragment. */ | |
1446 | + n = p_len - p_used; | |
1447 | + if (n > f_size - f_used) | |
1448 | + n = f_size - f_used; | |
1449 | + f_used += n; | |
1450 | + p_used += n; | |
1451 | + | |
1452 | + slot++; | |
1453 | + } | |
1454 | + | |
1455 | + id++; | |
1456 | + seq += p_len; | |
1457 | + | |
1458 | + /* The last segment may be less than gso_size. */ | |
1459 | + data_len -= p_len; | |
1460 | + if (data_len < p_len) | |
1461 | + p_len = data_len; | |
1462 | + } | |
1463 | + | |
1464 | + /* Flush the headers so they are ready for hardware DMA. */ | |
1465 | + wmb(); | |
1466 | +} | |
1467 | + | |
1468 | +/* Pass all the data to mpipe for egress. */ | |
1469 | +static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, | |
1470 | + struct sk_buff *skb, unsigned char *headers, s64 slot) | |
1471 | +{ | |
1472 | + struct tile_net_priv *priv = netdev_priv(dev); | |
1473 | + struct skb_shared_info *sh = skb_shinfo(skb); | |
1474 | + unsigned int data_len = skb->data_len; | |
1475 | + unsigned int p_len = sh->gso_size; | |
1476 | + gxio_mpipe_edesc_t edesc_head = { { 0 } }; | |
1477 | + gxio_mpipe_edesc_t edesc_body = { { 0 } }; | |
1478 | + long f_id = -1; /* id of the current fragment */ | |
1479 | + long f_size = -1; /* size of the current fragment */ | |
1480 | + long f_used = -1; /* bytes used from the current fragment */ | |
1481 | + long n; /* size of the current piece of payload */ | |
1482 | + unsigned long tx_packets = 0, tx_bytes = 0; | |
1483 | + unsigned int csum_start, sh_len; | |
1484 | + int segment; | |
1485 | + | |
1486 | + /* Prepare to egress the headers: set up header edesc. */ | |
1487 | + csum_start = skb_checksum_start_offset(skb); | |
1488 | + sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
1489 | + edesc_head.csum = 1; | |
1490 | + edesc_head.csum_start = csum_start; | |
1491 | + edesc_head.csum_dest = csum_start + skb->csum_offset; | |
1492 | + edesc_head.xfer_size = sh_len; | |
1493 | + | |
1494 | + /* This is only used to specify the TLB. */ | |
1495 | + edesc_head.stack_idx = large_buffer_stack; | |
1496 | + edesc_body.stack_idx = large_buffer_stack; | |
1497 | + | |
1498 | + /* Egress all the edescs. */ | |
1499 | + for (segment = 0; segment < sh->gso_segs; segment++) { | |
1500 | + void *va; | |
1501 | + unsigned char *buf; | |
1502 | + unsigned int p_used = 0; | |
1503 | + | |
1504 | + /* Egress the header. */ | |
1505 | + buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + | |
1506 | + NET_IP_ALIGN; | |
1507 | + edesc_head.va = va_to_tile_io_addr(buf); | |
1508 | + gxio_mpipe_equeue_put_at(equeue, edesc_head, slot); | |
1509 | + slot++; | |
1510 | + | |
1511 | + /* Egress the payload. */ | |
1512 | + while (p_used < p_len) { | |
1513 | + | |
1514 | + /* Advance as needed. */ | |
1515 | + while (f_used >= f_size) { | |
1516 | + f_id++; | |
1517 | + f_size = sh->frags[f_id].size; | |
1518 | + f_used = 0; | |
1519 | + } | |
1520 | + | |
1521 | + va = tile_net_frag_buf(&sh->frags[f_id]) + f_used; | |
1522 | + | |
1523 | + /* Use bytes from the current fragment. */ | |
1524 | + n = p_len - p_used; | |
1525 | + if (n > f_size - f_used) | |
1526 | + n = f_size - f_used; | |
1527 | + f_used += n; | |
1528 | + p_used += n; | |
1529 | + | |
1530 | + /* Egress a piece of the payload. */ | |
1531 | + edesc_body.va = va_to_tile_io_addr(va); | |
1532 | + edesc_body.xfer_size = n; | |
1533 | + edesc_body.bound = !(p_used < p_len); | |
1534 | + gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); | |
1535 | + slot++; | |
1536 | + } | |
1537 | + | |
1538 | + tx_packets++; | |
1539 | + tx_bytes += sh_len + p_len; | |
1540 | + | |
1541 | + /* The last segment may be less than gso_size. */ | |
1542 | + data_len -= p_len; | |
1543 | + if (data_len < p_len) | |
1544 | + p_len = data_len; | |
1545 | + } | |
1546 | + | |
1547 | + /* Update stats. */ | |
1548 | + tile_net_stats_add(tx_packets, &priv->stats.tx_packets); | |
1549 | + tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes); | |
1550 | +} | |
1551 | + | |
1552 | +/* Do "TSO" handling for egress. | |
1553 | + * | |
1554 | + * Normally drivers set NETIF_F_TSO only to support hardware TSO; | |
1555 | + * otherwise the stack uses scatter-gather to implement GSO in software. | |
1556 | + * On our testing, enabling GSO support (via NETIF_F_SG) drops network | |
1557 | + * performance down to around 7.5 Gbps on the 10G interfaces, although | |
1558 | + * also dropping cpu utilization way down, to under 8%. But | |
1559 | + * implementing "TSO" in the driver brings performance back up to line | |
1560 | + * rate, while dropping cpu usage even further, to less than 4%. In | |
1561 | + * practice, profiling of GSO shows that skb_segment() is what causes | |
1562 | + * the performance overheads; we benefit in the driver from using | |
1563 | + * preallocated memory to duplicate the TCP/IP headers. | |
1564 | + */ | |
1565 | +static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | |
1566 | +{ | |
1567 | + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | |
1568 | + struct tile_net_priv *priv = netdev_priv(dev); | |
1569 | + int channel = priv->echannel; | |
1570 | + struct tile_net_egress *egress = &egress_for_echannel[channel]; | |
1571 | + struct tile_net_comps *comps = info->comps_for_echannel[channel]; | |
1572 | + gxio_mpipe_equeue_t *equeue = egress->equeue; | |
1573 | + unsigned long irqflags; | |
1574 | + int num_edescs; | |
1575 | + s64 slot; | |
1576 | + | |
1577 | + /* Determine how many mpipe edesc's are needed. */ | |
1578 | + num_edescs = tso_count_edescs(skb); | |
1579 | + | |
1580 | + local_irq_save(irqflags); | |
1581 | + | |
1582 | + /* Try to acquire a completion entry and an egress slot. */ | |
1583 | + slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); | |
1584 | + if (slot < 0) { | |
1585 | + local_irq_restore(irqflags); | |
1586 | + return NETDEV_TX_BUSY; | |
1587 | + } | |
1588 | + | |
1589 | + /* Set up copies of header data properly. */ | |
1590 | + tso_headers_prepare(skb, egress->headers, slot); | |
1591 | + | |
1592 | + /* Actually pass the data to the network hardware. */ | |
1593 | + tso_egress(dev, equeue, skb, egress->headers, slot); | |
1594 | + | |
1595 | + /* Add a completion record. */ | |
1596 | + add_comp(equeue, comps, slot + num_edescs - 1, skb); | |
1597 | + | |
1598 | + local_irq_restore(irqflags); | |
1599 | + | |
1600 | + /* Make sure the egress timer is scheduled. */ | |
1601 | + tile_net_schedule_egress_timer(); | |
1602 | + | |
1603 | + return NETDEV_TX_OK; | |
1604 | +} | |
1605 | + | |
1606 | +/* Analyze the body and frags for a transmit request. */ | |
1607 | +static unsigned int tile_net_tx_frags(struct frag *frags, | |
1608 | + struct sk_buff *skb, | |
1609 | + void *b_data, unsigned int b_len) | |
1610 | +{ | |
1611 | + unsigned int i, n = 0; | |
1612 | + | |
1613 | + struct skb_shared_info *sh = skb_shinfo(skb); | |
1614 | + | |
1615 | + if (b_len != 0) { | |
1616 | + frags[n].buf = b_data; | |
1617 | + frags[n++].length = b_len; | |
1618 | + } | |
1619 | + | |
1620 | + for (i = 0; i < sh->nr_frags; i++) { | |
1621 | + skb_frag_t *f = &sh->frags[i]; | |
1622 | + frags[n].buf = tile_net_frag_buf(f); | |
1623 | + frags[n++].length = skb_frag_size(f); | |
1624 | + } | |
1625 | + | |
1626 | + return n; | |
1627 | +} | |
1628 | + | |
1629 | +/* Help the kernel transmit a packet. */ | |
1630 | +static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | |
1631 | +{ | |
1632 | + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | |
1633 | + struct tile_net_priv *priv = netdev_priv(dev); | |
1634 | + struct tile_net_egress *egress = &egress_for_echannel[priv->echannel]; | |
1635 | + gxio_mpipe_equeue_t *equeue = egress->equeue; | |
1636 | + struct tile_net_comps *comps = | |
1637 | + info->comps_for_echannel[priv->echannel]; | |
1638 | + unsigned int len = skb->len; | |
1639 | + unsigned char *data = skb->data; | |
1640 | + unsigned int num_edescs; | |
1641 | + struct frag frags[MAX_FRAGS]; | |
1642 | + gxio_mpipe_edesc_t edescs[MAX_FRAGS]; | |
1643 | + unsigned long irqflags; | |
1644 | + gxio_mpipe_edesc_t edesc = { { 0 } }; | |
1645 | + unsigned int i; | |
1646 | + s64 slot; | |
1647 | + | |
1648 | + if (skb_is_gso(skb)) | |
1649 | + return tile_net_tx_tso(skb, dev); | |
1650 | + | |
1651 | + num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); | |
1652 | + | |
1653 | + /* This is only used to specify the TLB. */ | |
1654 | + edesc.stack_idx = large_buffer_stack; | |
1655 | + | |
1656 | + /* Prepare the edescs. */ | |
1657 | + for (i = 0; i < num_edescs; i++) { | |
1658 | + edesc.xfer_size = frags[i].length; | |
1659 | + edesc.va = va_to_tile_io_addr(frags[i].buf); | |
1660 | + edescs[i] = edesc; | |
1661 | + } | |
1662 | + | |
1663 | + /* Mark the final edesc. */ | |
1664 | + edescs[num_edescs - 1].bound = 1; | |
1665 | + | |
1666 | + /* Add checksum info to the initial edesc, if needed. */ | |
1667 | + if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
1668 | + unsigned int csum_start = skb_checksum_start_offset(skb); | |
1669 | + edescs[0].csum = 1; | |
1670 | + edescs[0].csum_start = csum_start; | |
1671 | + edescs[0].csum_dest = csum_start + skb->csum_offset; | |
1672 | + } | |
1673 | + | |
1674 | + local_irq_save(irqflags); | |
1675 | + | |
1676 | + /* Try to acquire a completion entry and an egress slot. */ | |
1677 | + slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); | |
1678 | + if (slot < 0) { | |
1679 | + local_irq_restore(irqflags); | |
1680 | + return NETDEV_TX_BUSY; | |
1681 | + } | |
1682 | + | |
1683 | + for (i = 0; i < num_edescs; i++) | |
1684 | + gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); | |
1685 | + | |
1686 | + /* Add a completion record. */ | |
1687 | + add_comp(equeue, comps, slot - 1, skb); | |
1688 | + | |
1689 | + /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ | |
1690 | + tile_net_stats_add(1, &priv->stats.tx_packets); | |
1691 | + tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), | |
1692 | + &priv->stats.tx_bytes); | |
1693 | + | |
1694 | + local_irq_restore(irqflags); | |
1695 | + | |
1696 | + /* Make sure the egress timer is scheduled. */ | |
1697 | + tile_net_schedule_egress_timer(); | |
1698 | + | |
1699 | + return NETDEV_TX_OK; | |
1700 | +} | |
1701 | + | |
1702 | +/* Return subqueue id on this core (one per core). */ | |
1703 | +static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) | |
1704 | +{ | |
1705 | + return smp_processor_id(); | |
1706 | +} | |
1707 | + | |
1708 | +/* Deal with a transmit timeout. */ | |
1709 | +static void tile_net_tx_timeout(struct net_device *dev) | |
1710 | +{ | |
1711 | + int cpu; | |
1712 | + | |
1713 | + for_each_online_cpu(cpu) | |
1714 | + netif_wake_subqueue(dev, cpu); | |
1715 | +} | |
1716 | + | |
1717 | +/* Ioctl commands. */ | |
1718 | +static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
1719 | +{ | |
1720 | + return -EOPNOTSUPP; | |
1721 | +} | |
1722 | + | |
1723 | +/* Get system network statistics for device. */ | |
1724 | +static struct net_device_stats *tile_net_get_stats(struct net_device *dev) | |
1725 | +{ | |
1726 | + struct tile_net_priv *priv = netdev_priv(dev); | |
1727 | + return &priv->stats; | |
1728 | +} | |
1729 | + | |
1730 | +/* Change the MTU. */ | |
1731 | +static int tile_net_change_mtu(struct net_device *dev, int new_mtu) | |
1732 | +{ | |
1733 | + if ((new_mtu < 68) || (new_mtu > 1500)) | |
1734 | + return -EINVAL; | |
1735 | + dev->mtu = new_mtu; | |
1736 | + return 0; | |
1737 | +} | |
1738 | + | |
1739 | +/* Change the Ethernet address of the NIC. | |
1740 | + * | |
1741 | + * The hypervisor driver does not support changing MAC address. However, | |
1742 | + * the hardware does not do anything with the MAC address, so the address | |
1743 | + * which gets used on outgoing packets, and which is accepted on incoming | |
1744 | + * packets, is completely up to us. | |
1745 | + * | |
1746 | + * Returns 0 on success, negative on failure. | |
1747 | + */ | |
1748 | +static int tile_net_set_mac_address(struct net_device *dev, void *p) | |
1749 | +{ | |
1750 | + struct sockaddr *addr = p; | |
1751 | + | |
1752 | + if (!is_valid_ether_addr(addr->sa_data)) | |
1753 | + return -EINVAL; | |
1754 | + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
1755 | + return 0; | |
1756 | +} | |
1757 | + | |
1758 | +#ifdef CONFIG_NET_POLL_CONTROLLER | |
1759 | +/* Polling 'interrupt' - used by things like netconsole to send skbs | |
1760 | + * without having to re-enable interrupts. It's not called while | |
1761 | + * the interrupt routine is executing. | |
1762 | + */ | |
1763 | +static void tile_net_netpoll(struct net_device *dev) | |
1764 | +{ | |
1765 | + disable_percpu_irq(ingress_irq); | |
1766 | + tile_net_handle_ingress_irq(ingress_irq, NULL); | |
1767 | + enable_percpu_irq(ingress_irq, 0); | |
1768 | +} | |
1769 | +#endif | |
1770 | + | |
1771 | +static const struct net_device_ops tile_net_ops = { | |
1772 | + .ndo_open = tile_net_open, | |
1773 | + .ndo_stop = tile_net_stop, | |
1774 | + .ndo_start_xmit = tile_net_tx, | |
1775 | + .ndo_select_queue = tile_net_select_queue, | |
1776 | + .ndo_do_ioctl = tile_net_ioctl, | |
1777 | + .ndo_get_stats = tile_net_get_stats, | |
1778 | + .ndo_change_mtu = tile_net_change_mtu, | |
1779 | + .ndo_tx_timeout = tile_net_tx_timeout, | |
1780 | + .ndo_set_mac_address = tile_net_set_mac_address, | |
1781 | +#ifdef CONFIG_NET_POLL_CONTROLLER | |
1782 | + .ndo_poll_controller = tile_net_netpoll, | |
1783 | +#endif | |
1784 | +}; | |
1785 | + | |
1786 | +/* The setup function. | |
1787 | + * | |
1788 | + * This uses ether_setup() to assign various fields in dev, including | |
1789 | + * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. | |
1790 | + */ | |
1791 | +static void tile_net_setup(struct net_device *dev) | |
1792 | +{ | |
1793 | + ether_setup(dev); | |
1794 | + dev->netdev_ops = &tile_net_ops; | |
1795 | + dev->watchdog_timeo = TILE_NET_TIMEOUT; | |
1796 | + dev->features |= NETIF_F_LLTX; | |
1797 | + dev->features |= NETIF_F_HW_CSUM; | |
1798 | + dev->features |= NETIF_F_SG; | |
1799 | + dev->features |= NETIF_F_TSO; | |
1800 | + dev->mtu = 1500; | |
1801 | +} | |
1802 | + | |
1803 | +/* Allocate the device structure, register the device, and obtain the | |
1804 | + * MAC address from the hypervisor. | |
1805 | + */ | |
1806 | +static void tile_net_dev_init(const char *name, const uint8_t *mac) | |
1807 | +{ | |
1808 | + int ret; | |
1809 | + int i; | |
1810 | + int nz_addr = 0; | |
1811 | + struct net_device *dev; | |
1812 | + struct tile_net_priv *priv; | |
1813 | + | |
1814 | + /* HACK: Ignore "loop" links. */ | |
1815 | + if (strncmp(name, "loop", 4) == 0) | |
1816 | + return; | |
1817 | + | |
1818 | + /* Allocate the device structure. Normally, "name" is a | |
1819 | + * template, instantiated by register_netdev(), but not for us. | |
1820 | + */ | |
1821 | + dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup, | |
1822 | + NR_CPUS, 1); | |
1823 | + if (!dev) { | |
1824 | + pr_err("alloc_netdev_mqs(%s) failed\n", name); | |
1825 | + return; | |
1826 | + } | |
1827 | + | |
1828 | + /* Initialize "priv". */ | |
1829 | + priv = netdev_priv(dev); | |
1830 | + memset(priv, 0, sizeof(*priv)); | |
1831 | + priv->dev = dev; | |
1832 | + priv->channel = -1; | |
1833 | + priv->loopify_channel = -1; | |
1834 | + priv->echannel = -1; | |
1835 | + | |
1836 | + /* Get the MAC address and set it in the device struct; this must | |
1837 | + * be done before the device is opened. If the MAC is all zeroes, | |
1838 | + * we use a random address, since we're probably on the simulator. | |
1839 | + */ | |
1840 | + for (i = 0; i < 6; i++) | |
1841 | + nz_addr |= mac[i]; | |
1842 | + | |
1843 | + if (nz_addr) { | |
1844 | + memcpy(dev->dev_addr, mac, 6); | |
1845 | + dev->addr_len = 6; | |
1846 | + } else { | |
1847 | + random_ether_addr(dev->dev_addr); | |
1848 | + } | |
1849 | + | |
1850 | + /* Register the network device. */ | |
1851 | + ret = register_netdev(dev); | |
1852 | + if (ret) { | |
1853 | + netdev_err(dev, "register_netdev failed %d\n", ret); | |
1854 | + free_netdev(dev); | |
1855 | + return; | |
1856 | + } | |
1857 | +} | |
1858 | + | |
1859 | +/* Per-cpu module initialization. */ | |
1860 | +static void tile_net_init_module_percpu(void *unused) | |
1861 | +{ | |
1862 | + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | |
1863 | + int my_cpu = smp_processor_id(); | |
1864 | + | |
1865 | + info->has_iqueue = false; | |
1866 | + | |
1867 | + info->my_cpu = my_cpu; | |
1868 | + | |
1869 | + /* Initialize the egress timer. */ | |
1870 | + hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
1871 | + info->egress_timer.function = tile_net_handle_egress_timer; | |
1872 | +} | |
1873 | + | |
1874 | +/* Module initialization. */ | |
1875 | +static int __init tile_net_init_module(void) | |
1876 | +{ | |
1877 | + int i; | |
1878 | + char name[GXIO_MPIPE_LINK_NAME_LEN]; | |
1879 | + uint8_t mac[6]; | |
1880 | + | |
1881 | + pr_info("Tilera Network Driver\n"); | |
1882 | + | |
1883 | + mutex_init(&tile_net_devs_for_channel_mutex); | |
1884 | + | |
1885 | + /* Initialize each CPU. */ | |
1886 | + on_each_cpu(tile_net_init_module_percpu, NULL, 1); | |
1887 | + | |
1888 | + /* Find out what devices we have, and initialize them. */ | |
1889 | + for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++) | |
1890 | + tile_net_dev_init(name, mac); | |
1891 | + | |
1892 | + if (!network_cpus_init()) | |
1893 | + network_cpus_map = *cpu_online_mask; | |
1894 | + | |
1895 | + return 0; | |
1896 | +} | |
1897 | + | |
1898 | +module_init(tile_net_init_module); |