Commit db21733488f84a596faaad0d05430b3f51804692

Authored by Chris Leech
Committed by David S. Miller
1 parent 57c651f74c

[I/OAT]: Setup the networking subsystem as a DMA client

Attempts to allocate per-CPU DMA channels

Signed-off-by: Chris Leech <christopher.leech@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 4 changed files with 158 additions and 0 deletions Side-by-side Diff

... ... @@ -10,6 +10,18 @@
10 10 DMA engines offload copy operations from the CPU to dedicated
11 11 hardware, allowing the copies to happen asynchronously.
12 12  
  13 +comment "DMA Clients"
  14 +
  15 +config NET_DMA
  16 + bool "Network: TCP receive copy offload"
  17 + depends on DMA_ENGINE && NET
  18 + default y
  19 + ---help---
  20 + This enables the use of DMA engines in the network stack to
  21 + offload receive copy-to-user operations, freeing CPU cycles.
  22 + Since this is the main user of the DMA engine, it should be enabled;
  23 + say Y here.
  24 +
13 25 comment "DMA Devices"
14 26  
15 27 config INTEL_IOATDMA
include/linux/netdevice.h
... ... @@ -37,6 +37,7 @@
37 37 #include <linux/config.h>
38 38 #include <linux/device.h>
39 39 #include <linux/percpu.h>
  40 +#include <linux/dmaengine.h>
40 41  
41 42 struct divert_blk;
42 43 struct vlan_group;
... ... @@ -593,6 +594,9 @@
593 594 struct sk_buff *completion_queue;
594 595  
595 596 struct net_device backlog_dev; /* Sorry. 8) */
  597 +#ifdef CONFIG_NET_DMA
  598 + struct dma_chan *net_dma;
  599 +#endif
596 600 };
597 601  
598 602 DECLARE_PER_CPU(struct softnet_data,softnet_data);
include/net/netdma.h
  1 +/*
  2 + * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
  3 + *
  4 + * This program is free software; you can redistribute it and/or modify it
  5 + * under the terms of the GNU General Public License as published by the Free
  6 + * Software Foundation; either version 2 of the License, or (at your option)
  7 + * any later version.
  8 + *
  9 + * This program is distributed in the hope that it will be useful, but WITHOUT
  10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12 + * more details.
  13 + *
  14 + * You should have received a copy of the GNU General Public License along with
  15 + * this program; if not, write to the Free Software Foundation, Inc., 59
  16 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 + *
  18 + * The full GNU General Public License is included in this distribution in the
  19 + * file called COPYING.
  20 + */
  21 +#ifndef NETDMA_H
  22 +#define NETDMA_H
  23 +#include <linux/config.h>
  24 +#ifdef CONFIG_NET_DMA
  25 +#include <linux/dmaengine.h>
  26 +
  27 +static inline struct dma_chan *get_softnet_dma(void)
  28 +{
  29 + struct dma_chan *chan;
  30 + rcu_read_lock();
  31 + chan = rcu_dereference(__get_cpu_var(softnet_data.net_dma));
  32 + if (chan)
  33 + dma_chan_get(chan);
  34 + rcu_read_unlock();
  35 + return chan;
  36 +}
  37 +#endif /* CONFIG_NET_DMA */
  38 +#endif /* NETDMA_H */
... ... @@ -115,6 +115,7 @@
115 115 #include <net/iw_handler.h>
116 116 #include <asm/current.h>
117 117 #include <linux/audit.h>
  118 +#include <linux/dmaengine.h>
118 119  
119 120 /*
120 121 * The list of packet types we will receive (as opposed to discard)
... ... @@ -148,6 +149,12 @@
148 149 static struct list_head ptype_base[16]; /* 16 way hashed list */
149 150 static struct list_head ptype_all; /* Taps */
150 151  
  152 +#ifdef CONFIG_NET_DMA
  153 +static struct dma_client *net_dma_client;
  154 +static unsigned int net_dma_count;
  155 +static spinlock_t net_dma_event_lock;
  156 +#endif
  157 +
151 158 /*
152 159 * The @dev_base list is protected by @dev_base_lock and the rtnl
153 160 * semaphore.
... ... @@ -1846,6 +1853,19 @@
1846 1853 }
1847 1854 }
1848 1855 out:
  1856 +#ifdef CONFIG_NET_DMA
  1857 + /*
  1858 + * There may not be any more sk_buffs coming right now, so push
  1859 + * any pending DMA copies to hardware
  1860 + */
  1861 + if (net_dma_client) {
  1862 + struct dma_chan *chan;
  1863 + rcu_read_lock();
  1864 + list_for_each_entry_rcu(chan, &net_dma_client->channels, client_node)
  1865 + dma_async_memcpy_issue_pending(chan);
  1866 + rcu_read_unlock();
  1867 + }
  1868 +#endif
1849 1869 local_irq_enable();
1850 1870 return;
1851 1871  
1852 1872  
... ... @@ -3300,7 +3320,89 @@
3300 3320 }
3301 3321 #endif /* CONFIG_HOTPLUG_CPU */
3302 3322  
  3323 +#ifdef CONFIG_NET_DMA
  3324 +/**
  3325 + * net_dma_rebalance -
  3326 + * This is called when the number of channels allocated to the net_dma_client
  3327 + * changes. The net_dma_client tries to have one DMA channel per CPU.
  3328 + */
  3329 +static void net_dma_rebalance(void)
  3330 +{
  3331 + unsigned int cpu, i, n;
  3332 + struct dma_chan *chan;
3303 3333  
  3334 + lock_cpu_hotplug();
  3335 +
  3336 + if (net_dma_count == 0) {
  3337 + for_each_online_cpu(cpu)
  3338 + rcu_assign_pointer(per_cpu(softnet_data.net_dma, cpu), NULL);
  3339 + unlock_cpu_hotplug();
  3340 + return;
  3341 + }
  3342 +
  3343 + i = 0;
  3344 + cpu = first_cpu(cpu_online_map);
  3345 +
  3346 + rcu_read_lock();
  3347 + list_for_each_entry(chan, &net_dma_client->channels, client_node) {
  3348 + n = ((num_online_cpus() / net_dma_count)
  3349 + + (i < (num_online_cpus() % net_dma_count) ? 1 : 0));
  3350 +
  3351 + while(n) {
  3352 + per_cpu(softnet_data.net_dma, cpu) = chan;
  3353 + cpu = next_cpu(cpu, cpu_online_map);
  3354 + n--;
  3355 + }
  3356 + i++;
  3357 + }
  3358 + rcu_read_unlock();
  3359 +
  3360 + unlock_cpu_hotplug();
  3361 +}
  3362 +
  3363 +/**
  3364 + * netdev_dma_event - event callback for the net_dma_client
  3365 + * @client: should always be net_dma_client
  3366 + * @chan:
  3367 + * @event:
  3368 + */
  3369 +static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
  3370 + enum dma_event event)
  3371 +{
  3372 + spin_lock(&net_dma_event_lock);
  3373 + switch (event) {
  3374 + case DMA_RESOURCE_ADDED:
  3375 + net_dma_count++;
  3376 + net_dma_rebalance();
  3377 + break;
  3378 + case DMA_RESOURCE_REMOVED:
  3379 + net_dma_count--;
  3380 + net_dma_rebalance();
  3381 + break;
  3382 + default:
  3383 + break;
  3384 + }
  3385 + spin_unlock(&net_dma_event_lock);
  3386 +}
  3387 +
  3388 +/**
  3389 + * netdev_dma_regiser - register the networking subsystem as a DMA client
  3390 + */
  3391 +static int __init netdev_dma_register(void)
  3392 +{
  3393 + spin_lock_init(&net_dma_event_lock);
  3394 + net_dma_client = dma_async_client_register(netdev_dma_event);
  3395 + if (net_dma_client == NULL)
  3396 + return -ENOMEM;
  3397 +
  3398 + dma_async_client_chan_request(net_dma_client, num_online_cpus());
  3399 + return 0;
  3400 +}
  3401 +
  3402 +#else
  3403 +static int __init netdev_dma_register(void) { return -ENODEV; }
  3404 +#endif /* CONFIG_NET_DMA */
  3405 +
3304 3406 /*
3305 3407 * Initialize the DEV module. At boot time this walks the device list and
3306 3408 * unhooks any devices that fail to initialise (normally hardware not
... ... @@ -3352,6 +3454,8 @@
3352 3454 queue->backlog_dev.poll = process_backlog;
3353 3455 atomic_set(&queue->backlog_dev.refcnt, 1);
3354 3456 }
  3457 +
  3458 + netdev_dma_register();
3355 3459  
3356 3460 dev_boot_phase = 0;
3357 3461