Blame view

drivers/dma/dmaengine.c 32.7 KB
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
  /*
   * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License as published by the Free
   * Software Foundation; either version 2 of the License, or (at your option)
   * any later version.
   *
   * This program is distributed in the hope that it will be useful, but WITHOUT
   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   * more details.
   *
   * You should have received a copy of the GNU General Public License along with
   * this program; if not, write to the Free Software Foundation, Inc., 59
   * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
   *
   * The full GNU General Public License is included in this distribution in the
   * file called COPYING.
   */
  
  /*
   * This code implements the DMA subsystem. It provides a HW-neutral interface
   * for other kernel code to use asynchronous memory copy capabilities,
   * if present, and allows different HW DMA drivers to register as providing
   * this capability.
   *
   * Due to the fact we are accelerating what is already a relatively fast
   * operation, the code goes to great lengths to avoid additional overhead,
   * such as locking.
   *
   * LOCKING:
   *
aa1e6f1a3   Dan Williams   dmaengine: kill s...
34
35
   * The subsystem keeps a global list of dma_device structs it is protected by a
   * mutex, dma_list_mutex.
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
36
   *
f27c580c3   Dan Williams   dmaengine: remove...
37
38
39
40
41
   * A subsystem can get access to a channel by calling dmaengine_get() followed
   * by dma_find_channel(), or if it has need for an exclusive channel it can call
   * dma_request_channel().  Once a channel is allocated a reference is taken
   * against its corresponding driver to disable removal.
   *
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
42
43
44
   * Each device has a channels list, which runs unlocked but is never modified
   * once the device is registered, it's just setup by the driver.
   *
f27c580c3   Dan Williams   dmaengine: remove...
45
   * See Documentation/dmaengine.txt for more details
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
46
   */
634332502   Joe Perches   dmaengine: Cleanu...
47
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
b7f080cfe   Alexey Dobriyan   net: remove mm.h ...
48
  #include <linux/dma-mapping.h>
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
49
50
  #include <linux/init.h>
  #include <linux/module.h>
7405f74ba   Dan Williams   dmaengine: refact...
51
  #include <linux/mm.h>
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
52
53
54
55
56
57
58
  #include <linux/device.h>
  #include <linux/dmaengine.h>
  #include <linux/hardirq.h>
  #include <linux/spinlock.h>
  #include <linux/percpu.h>
  #include <linux/rcupdate.h>
  #include <linux/mutex.h>
7405f74ba   Dan Williams   dmaengine: refact...
59
  #include <linux/jiffies.h>
2ba05622b   Dan Williams   dmaengine: provid...
60
  #include <linux/rculist.h>
864498aaa   Dan Williams   dmaengine: use id...
61
  #include <linux/idr.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
62
  #include <linux/slab.h>
4e82f5ddd   Andy Shevchenko   dmaengine: call a...
63
64
  #include <linux/acpi.h>
  #include <linux/acpi_dma.h>
9a6cecc84   Jon Hunter   dmaengine: add he...
65
  #include <linux/of_dma.h>
45c463ae9   Dan Williams   dmaengine: refere...
66
  #include <linux/mempool.h>
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
67
68
  
  static DEFINE_MUTEX(dma_list_mutex);
21ef4b8b7   Axel Lin   dmaengine: use DE...
69
  static DEFINE_IDR(dma_idr);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
70
  static LIST_HEAD(dma_device_list);
6f49a57aa   Dan Williams   dmaengine: up-lev...
71
  static long dmaengine_ref_count;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
72
73
  
  /* --- sysfs implementation --- */
41d5e59c1   Dan Williams   dmaengine: add a ...
74
75
76
77
78
79
80
81
82
83
84
85
86
  /**
   * dev_to_dma_chan - convert a device pointer to the its sysfs container object
   * @dev - device node
   *
   * Must be called under dma_list_mutex
   */
  static struct dma_chan *dev_to_dma_chan(struct device *dev)
  {
  	struct dma_chan_dev *chan_dev;
  
  	chan_dev = container_of(dev, typeof(*chan_dev), device);
  	return chan_dev->chan;
  }
58b267d3e   Greg Kroah-Hartman   dma: convert dma_...
87
88
  static ssize_t memcpy_count_show(struct device *dev,
  				 struct device_attribute *attr, char *buf)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
89
  {
41d5e59c1   Dan Williams   dmaengine: add a ...
90
  	struct dma_chan *chan;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
91
92
  	unsigned long count = 0;
  	int i;
41d5e59c1   Dan Williams   dmaengine: add a ...
93
  	int err;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
94

41d5e59c1   Dan Williams   dmaengine: add a ...
95
96
97
98
99
100
101
102
103
104
  	mutex_lock(&dma_list_mutex);
  	chan = dev_to_dma_chan(dev);
  	if (chan) {
  		for_each_possible_cpu(i)
  			count += per_cpu_ptr(chan->local, i)->memcpy_count;
  		err = sprintf(buf, "%lu
  ", count);
  	} else
  		err = -ENODEV;
  	mutex_unlock(&dma_list_mutex);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
105

41d5e59c1   Dan Williams   dmaengine: add a ...
106
  	return err;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
107
  }
58b267d3e   Greg Kroah-Hartman   dma: convert dma_...
108
  static DEVICE_ATTR_RO(memcpy_count);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
109

58b267d3e   Greg Kroah-Hartman   dma: convert dma_...
110
111
  static ssize_t bytes_transferred_show(struct device *dev,
  				      struct device_attribute *attr, char *buf)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
112
  {
41d5e59c1   Dan Williams   dmaengine: add a ...
113
  	struct dma_chan *chan;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
114
115
  	unsigned long count = 0;
  	int i;
41d5e59c1   Dan Williams   dmaengine: add a ...
116
  	int err;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
117

41d5e59c1   Dan Williams   dmaengine: add a ...
118
119
120
121
122
123
124
125
126
127
  	mutex_lock(&dma_list_mutex);
  	chan = dev_to_dma_chan(dev);
  	if (chan) {
  		for_each_possible_cpu(i)
  			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
  		err = sprintf(buf, "%lu
  ", count);
  	} else
  		err = -ENODEV;
  	mutex_unlock(&dma_list_mutex);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
128

41d5e59c1   Dan Williams   dmaengine: add a ...
129
  	return err;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
130
  }
58b267d3e   Greg Kroah-Hartman   dma: convert dma_...
131
  static DEVICE_ATTR_RO(bytes_transferred);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
132

58b267d3e   Greg Kroah-Hartman   dma: convert dma_...
133
134
  static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
  			   char *buf)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
135
  {
41d5e59c1   Dan Williams   dmaengine: add a ...
136
137
  	struct dma_chan *chan;
  	int err;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
138

41d5e59c1   Dan Williams   dmaengine: add a ...
139
140
141
142
143
144
145
146
147
148
  	mutex_lock(&dma_list_mutex);
  	chan = dev_to_dma_chan(dev);
  	if (chan)
  		err = sprintf(buf, "%d
  ", chan->client_count);
  	else
  		err = -ENODEV;
  	mutex_unlock(&dma_list_mutex);
  
  	return err;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
149
  }
58b267d3e   Greg Kroah-Hartman   dma: convert dma_...
150
  static DEVICE_ATTR_RO(in_use);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
151

58b267d3e   Greg Kroah-Hartman   dma: convert dma_...
152
153
154
155
156
  static struct attribute *dma_dev_attrs[] = {
  	&dev_attr_memcpy_count.attr,
  	&dev_attr_bytes_transferred.attr,
  	&dev_attr_in_use.attr,
  	NULL,
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
157
  };
58b267d3e   Greg Kroah-Hartman   dma: convert dma_...
158
  ATTRIBUTE_GROUPS(dma_dev);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
159

41d5e59c1   Dan Williams   dmaengine: add a ...
160
161
162
163
164
  static void chan_dev_release(struct device *dev)
  {
  	struct dma_chan_dev *chan_dev;
  
  	chan_dev = container_of(dev, typeof(*chan_dev), device);
864498aaa   Dan Williams   dmaengine: use id...
165
166
167
168
169
170
  	if (atomic_dec_and_test(chan_dev->idr_ref)) {
  		mutex_lock(&dma_list_mutex);
  		idr_remove(&dma_idr, chan_dev->dev_id);
  		mutex_unlock(&dma_list_mutex);
  		kfree(chan_dev->idr_ref);
  	}
41d5e59c1   Dan Williams   dmaengine: add a ...
171
172
  	kfree(chan_dev);
  }
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
173
  static struct class dma_devclass = {
891f78ea8   Tony Jones   DMA: Convert from...
174
  	.name		= "dma",
58b267d3e   Greg Kroah-Hartman   dma: convert dma_...
175
  	.dev_groups	= dma_dev_groups,
41d5e59c1   Dan Williams   dmaengine: add a ...
176
  	.dev_release	= chan_dev_release,
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
177
178
179
  };
  
  /* --- client and device registration --- */
59b5ec214   Dan Williams   dmaengine: introd...
180
181
  #define dma_device_satisfies_mask(device, mask) \
  	__dma_device_satisfies_mask((device), &(mask))
d379b01e9   Dan Williams   dmaengine: make c...
182
  static int
a53e28da5   Lars-Peter Clausen   dma: Make the 'ma...
183
184
  __dma_device_satisfies_mask(struct dma_device *device,
  			    const dma_cap_mask_t *want)
d379b01e9   Dan Williams   dmaengine: make c...
185
186
  {
  	dma_cap_mask_t has;
59b5ec214   Dan Williams   dmaengine: introd...
187
  	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
d379b01e9   Dan Williams   dmaengine: make c...
188
189
190
  		DMA_TX_TYPE_END);
  	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
  }
6f49a57aa   Dan Williams   dmaengine: up-lev...
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
  static struct module *dma_chan_to_owner(struct dma_chan *chan)
  {
  	return chan->device->dev->driver->owner;
  }
  
  /**
   * balance_ref_count - catch up the channel reference count
   * @chan - channel to balance ->client_count versus dmaengine_ref_count
   *
   * balance_ref_count must be called under dma_list_mutex
   */
  static void balance_ref_count(struct dma_chan *chan)
  {
  	struct module *owner = dma_chan_to_owner(chan);
  
  	while (chan->client_count < dmaengine_ref_count) {
  		__module_get(owner);
  		chan->client_count++;
  	}
  }
  
  /**
   * dma_chan_get - try to grab a dma channel's parent driver module
   * @chan - channel to grab
   *
   * Must be called under dma_list_mutex
   */
  static int dma_chan_get(struct dma_chan *chan)
  {
  	int err = -ENODEV;
  	struct module *owner = dma_chan_to_owner(chan);
  
  	if (chan->client_count) {
  		__module_get(owner);
  		err = 0;
  	} else if (try_module_get(owner))
  		err = 0;
  
  	if (err == 0)
  		chan->client_count++;
  
  	/* allocate upon first client reference */
  	if (chan->client_count == 1 && err == 0) {
aa1e6f1a3   Dan Williams   dmaengine: kill s...
234
  		int desc_cnt = chan->device->device_alloc_chan_resources(chan);
6f49a57aa   Dan Williams   dmaengine: up-lev...
235
236
237
238
239
  
  		if (desc_cnt < 0) {
  			err = desc_cnt;
  			chan->client_count = 0;
  			module_put(owner);
59b5ec214   Dan Williams   dmaengine: introd...
240
  		} else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
6f49a57aa   Dan Williams   dmaengine: up-lev...
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
  			balance_ref_count(chan);
  	}
  
  	return err;
  }
  
  /**
   * dma_chan_put - drop a reference to a dma channel's parent driver module
   * @chan - channel to release
   *
   * Must be called under dma_list_mutex
   */
  static void dma_chan_put(struct dma_chan *chan)
  {
  	if (!chan->client_count)
  		return; /* this channel failed alloc_chan_resources */
  	chan->client_count--;
  	module_put(dma_chan_to_owner(chan));
  	if (chan->client_count == 0)
  		chan->device->device_free_chan_resources(chan);
  }
7405f74ba   Dan Williams   dmaengine: refact...
262
263
264
265
266
267
268
269
270
  enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
  {
  	enum dma_status status;
  	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
  
  	dma_async_issue_pending(chan);
  	do {
  		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
  		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
634332502   Joe Perches   dmaengine: Cleanu...
271
272
  			pr_err("%s: timeout!
  ", __func__);
7405f74ba   Dan Williams   dmaengine: refact...
273
274
  			return DMA_ERROR;
  		}
2cbe7feba   Bartlomiej Zolnierkiewicz   dmaengine: add cp...
275
276
277
278
  		if (status != DMA_IN_PROGRESS)
  			break;
  		cpu_relax();
  	} while (1);
7405f74ba   Dan Williams   dmaengine: refact...
279
280
281
282
  
  	return status;
  }
  EXPORT_SYMBOL(dma_sync_wait);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
283
  /**
bec085134   Dan Williams   dmaengine: centra...
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
   * dma_cap_mask_all - enable iteration over all operation types
   */
  static dma_cap_mask_t dma_cap_mask_all;
  
  /**
   * dma_chan_tbl_ent - tracks channel allocations per core/operation
   * @chan - associated channel for this entry
   */
  struct dma_chan_tbl_ent {
  	struct dma_chan *chan;
  };
  
  /**
   * channel_table - percpu lookup table for memory-to-memory offload providers
   */
a29d8b8e2   Tejun Heo   percpu: add __per...
299
  static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
bec085134   Dan Williams   dmaengine: centra...
300
301
302
303
304
305
306
  
  static int __init dma_channel_table_init(void)
  {
  	enum dma_transaction_type cap;
  	int err = 0;
  
  	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
59b5ec214   Dan Williams   dmaengine: introd...
307
308
309
  	/* 'interrupt', 'private', and 'slave' are channel capabilities,
  	 * but are not associated with an operation so they do not need
  	 * an entry in the channel_table
bec085134   Dan Williams   dmaengine: centra...
310
311
  	 */
  	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
59b5ec214   Dan Williams   dmaengine: introd...
312
  	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
bec085134   Dan Williams   dmaengine: centra...
313
314
315
316
317
318
319
320
321
322
323
  	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
  
  	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
  		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
  		if (!channel_table[cap]) {
  			err = -ENOMEM;
  			break;
  		}
  	}
  
  	if (err) {
634332502   Joe Perches   dmaengine: Cleanu...
324
325
  		pr_err("initialization failure
  ");
bec085134   Dan Williams   dmaengine: centra...
326
327
328
329
330
331
332
  		for_each_dma_cap_mask(cap, dma_cap_mask_all)
  			if (channel_table[cap])
  				free_percpu(channel_table[cap]);
  	}
  
  	return err;
  }
652afc27b   Dan Williams   dmaengine: bump i...
333
  arch_initcall(dma_channel_table_init);
bec085134   Dan Williams   dmaengine: centra...
334
335
336
337
338
339
340
  
  /**
   * dma_find_channel - find a channel to carry out the operation
   * @tx_type: transaction type
   */
  struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
  {
e7dcaa475   Christoph Lameter   this_cpu: Elimina...
341
  	return this_cpu_read(channel_table[tx_type]->chan);
bec085134   Dan Williams   dmaengine: centra...
342
343
  }
  EXPORT_SYMBOL(dma_find_channel);
a2bd1140a   Dave Jiang   netdma: adding al...
344
345
346
347
348
349
350
351
352
353
354
355
356
  /*
   * net_dma_find_channel - find a channel for net_dma
   * net_dma has alignment requirements
   */
  struct dma_chan *net_dma_find_channel(void)
  {
  	struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
  	if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
  		return NULL;
  
  	return chan;
  }
  EXPORT_SYMBOL(net_dma_find_channel);
bec085134   Dan Williams   dmaengine: centra...
357
  /**
2ba05622b   Dan Williams   dmaengine: provid...
358
359
360
361
362
363
   * dma_issue_pending_all - flush all pending operations across all channels
   */
  void dma_issue_pending_all(void)
  {
  	struct dma_device *device;
  	struct dma_chan *chan;
2ba05622b   Dan Williams   dmaengine: provid...
364
  	rcu_read_lock();
59b5ec214   Dan Williams   dmaengine: introd...
365
366
367
  	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
  		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  			continue;
2ba05622b   Dan Williams   dmaengine: provid...
368
369
370
  		list_for_each_entry(chan, &device->channels, device_node)
  			if (chan->client_count)
  				device->device_issue_pending(chan);
59b5ec214   Dan Williams   dmaengine: introd...
371
  	}
2ba05622b   Dan Williams   dmaengine: provid...
372
373
374
375
376
  	rcu_read_unlock();
  }
  EXPORT_SYMBOL(dma_issue_pending_all);
  
  /**
c4d27c4d0   Brice Goglin   dmaengine: make d...
377
378
379
380
381
382
383
384
385
386
   * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
   */
  static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
  {
  	int node = dev_to_node(chan->device->dev);
  	return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
  }
  
  /**
   * min_chan - returns the channel with min count and in the same numa-node as the cpu
bec085134   Dan Williams   dmaengine: centra...
387
   * @cap: capability to match
c4d27c4d0   Brice Goglin   dmaengine: make d...
388
   * @cpu: cpu index which the channel should be close to
bec085134   Dan Williams   dmaengine: centra...
389
   *
c4d27c4d0   Brice Goglin   dmaengine: make d...
390
391
392
393
   * If some channels are close to the given cpu, the one with the lowest
   * reference count is returned. Otherwise, cpu is ignored and only the
   * reference count is taken into account.
   * Must be called under dma_list_mutex.
bec085134   Dan Williams   dmaengine: centra...
394
   */
c4d27c4d0   Brice Goglin   dmaengine: make d...
395
  static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
bec085134   Dan Williams   dmaengine: centra...
396
397
398
  {
  	struct dma_device *device;
  	struct dma_chan *chan;
bec085134   Dan Williams   dmaengine: centra...
399
  	struct dma_chan *min = NULL;
c4d27c4d0   Brice Goglin   dmaengine: make d...
400
  	struct dma_chan *localmin = NULL;
bec085134   Dan Williams   dmaengine: centra...
401
402
  
  	list_for_each_entry(device, &dma_device_list, global_node) {
59b5ec214   Dan Williams   dmaengine: introd...
403
404
  		if (!dma_has_cap(cap, device->cap_mask) ||
  		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
bec085134   Dan Williams   dmaengine: centra...
405
406
407
408
  			continue;
  		list_for_each_entry(chan, &device->channels, device_node) {
  			if (!chan->client_count)
  				continue;
c4d27c4d0   Brice Goglin   dmaengine: make d...
409
  			if (!min || chan->table_count < min->table_count)
bec085134   Dan Williams   dmaengine: centra...
410
  				min = chan;
c4d27c4d0   Brice Goglin   dmaengine: make d...
411
412
413
414
  			if (dma_chan_is_local(chan, cpu))
  				if (!localmin ||
  				    chan->table_count < localmin->table_count)
  					localmin = chan;
bec085134   Dan Williams   dmaengine: centra...
415
  		}
bec085134   Dan Williams   dmaengine: centra...
416
  	}
c4d27c4d0   Brice Goglin   dmaengine: make d...
417
  	chan = localmin ? localmin : min;
bec085134   Dan Williams   dmaengine: centra...
418

c4d27c4d0   Brice Goglin   dmaengine: make d...
419
420
  	if (chan)
  		chan->table_count++;
bec085134   Dan Williams   dmaengine: centra...
421

c4d27c4d0   Brice Goglin   dmaengine: make d...
422
  	return chan;
bec085134   Dan Williams   dmaengine: centra...
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
  }
  
  /**
   * dma_channel_rebalance - redistribute the available channels
   *
   * Optimize for cpu isolation (each cpu gets a dedicated channel for an
   * operation type) in the SMP case,  and operation isolation (avoid
   * multi-tasking channels) in the non-SMP case.  Must be called under
   * dma_list_mutex.
   */
  static void dma_channel_rebalance(void)
  {
  	struct dma_chan *chan;
  	struct dma_device *device;
  	int cpu;
  	int cap;
bec085134   Dan Williams   dmaengine: centra...
439
440
441
442
443
  
  	/* undo the last distribution */
  	for_each_dma_cap_mask(cap, dma_cap_mask_all)
  		for_each_possible_cpu(cpu)
  			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
59b5ec214   Dan Williams   dmaengine: introd...
444
445
446
  	list_for_each_entry(device, &dma_device_list, global_node) {
  		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  			continue;
bec085134   Dan Williams   dmaengine: centra...
447
448
  		list_for_each_entry(chan, &device->channels, device_node)
  			chan->table_count = 0;
59b5ec214   Dan Williams   dmaengine: introd...
449
  	}
bec085134   Dan Williams   dmaengine: centra...
450
451
452
453
454
455
  
  	/* don't populate the channel_table if no clients are available */
  	if (!dmaengine_ref_count)
  		return;
  
  	/* redistribute available channels */
bec085134   Dan Williams   dmaengine: centra...
456
457
  	for_each_dma_cap_mask(cap, dma_cap_mask_all)
  		for_each_online_cpu(cpu) {
c4d27c4d0   Brice Goglin   dmaengine: make d...
458
  			chan = min_chan(cap, cpu);
bec085134   Dan Williams   dmaengine: centra...
459
460
461
  			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
  		}
  }
a53e28da5   Lars-Peter Clausen   dma: Make the 'ma...
462
463
  static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
  					  struct dma_device *dev,
e2346677a   Dan Williams   dmaengine: advert...
464
  					  dma_filter_fn fn, void *fn_param)
59b5ec214   Dan Williams   dmaengine: introd...
465
466
  {
  	struct dma_chan *chan;
59b5ec214   Dan Williams   dmaengine: introd...
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
  
  	if (!__dma_device_satisfies_mask(dev, mask)) {
  		pr_debug("%s: wrong capabilities
  ", __func__);
  		return NULL;
  	}
  	/* devices with multiple channels need special handling as we need to
  	 * ensure that all channels are either private or public.
  	 */
  	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
  		list_for_each_entry(chan, &dev->channels, device_node) {
  			/* some channels are already publicly allocated */
  			if (chan->client_count)
  				return NULL;
  		}
  
  	list_for_each_entry(chan, &dev->channels, device_node) {
  		if (chan->client_count) {
  			pr_debug("%s: %s busy
  ",
41d5e59c1   Dan Williams   dmaengine: add a ...
487
  				 __func__, dma_chan_name(chan));
59b5ec214   Dan Williams   dmaengine: introd...
488
489
  			continue;
  		}
e2346677a   Dan Williams   dmaengine: advert...
490
491
492
493
494
495
496
  		if (fn && !fn(chan, fn_param)) {
  			pr_debug("%s: %s filter said false
  ",
  				 __func__, dma_chan_name(chan));
  			continue;
  		}
  		return chan;
59b5ec214   Dan Williams   dmaengine: introd...
497
  	}
e2346677a   Dan Williams   dmaengine: advert...
498
  	return NULL;
59b5ec214   Dan Williams   dmaengine: introd...
499
500
501
  }
  
  /**
6b9019a7f   Daniel Mack   dma: dmagengine: ...
502
   * dma_request_slave_channel - try to get specific channel exclusively
7bb587f4e   Zhangfei Gao   dmaengine: add in...
503
504
505
506
507
508
509
510
   * @chan: target channel
   */
  struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
  {
  	int err = -EBUSY;
  
  	/* lock against __dma_request_channel */
  	mutex_lock(&dma_list_mutex);
d9a6c8f52   Vinod Koul   dmaengine: fix - ...
511
  	if (chan->client_count == 0) {
7bb587f4e   Zhangfei Gao   dmaengine: add in...
512
  		err = dma_chan_get(chan);
d9a6c8f52   Vinod Koul   dmaengine: fix - ...
513
514
515
516
517
  		if (err)
  			pr_debug("%s: failed to get %s: (%d)
  ",
  				__func__, dma_chan_name(chan), err);
  	} else
7bb587f4e   Zhangfei Gao   dmaengine: add in...
518
519
520
  		chan = NULL;
  
  	mutex_unlock(&dma_list_mutex);
7bb587f4e   Zhangfei Gao   dmaengine: add in...
521
522
523
524
  
  	return chan;
  }
  EXPORT_SYMBOL_GPL(dma_get_slave_channel);
8010dad55   Stephen Warren   dma: add dma_get_...
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
  struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
  {
  	dma_cap_mask_t mask;
  	struct dma_chan *chan;
  	int err;
  
  	dma_cap_zero(mask);
  	dma_cap_set(DMA_SLAVE, mask);
  
  	/* lock against __dma_request_channel */
  	mutex_lock(&dma_list_mutex);
  
  	chan = private_candidate(&mask, device, NULL, NULL);
  	if (chan) {
  		err = dma_chan_get(chan);
  		if (err) {
  			pr_debug("%s: failed to get %s: (%d)
  ",
  				__func__, dma_chan_name(chan), err);
  			chan = NULL;
  		}
  	}
  
  	mutex_unlock(&dma_list_mutex);
  
  	return chan;
  }
  EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
7bb587f4e   Zhangfei Gao   dmaengine: add in...
553
  /**
6b9019a7f   Daniel Mack   dma: dmagengine: ...
554
   * __dma_request_channel - try to allocate an exclusive channel
59b5ec214   Dan Williams   dmaengine: introd...
555
556
557
   * @mask: capabilities that the channel must satisfy
   * @fn: optional callback to disposition available channels
   * @fn_param: opaque parameter to pass to dma_filter_fn
0ad7c0005   Stephen Warren   dma: add channel ...
558
559
   *
   * Returns pointer to appropriate DMA channel on success or NULL.
59b5ec214   Dan Williams   dmaengine: introd...
560
   */
a53e28da5   Lars-Peter Clausen   dma: Make the 'ma...
561
562
  struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
  				       dma_filter_fn fn, void *fn_param)
59b5ec214   Dan Williams   dmaengine: introd...
563
564
565
  {
  	struct dma_device *device, *_d;
  	struct dma_chan *chan = NULL;
59b5ec214   Dan Williams   dmaengine: introd...
566
567
568
569
570
  	int err;
  
  	/* Find a channel */
  	mutex_lock(&dma_list_mutex);
  	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
e2346677a   Dan Williams   dmaengine: advert...
571
572
  		chan = private_candidate(mask, device, fn, fn_param);
  		if (chan) {
59b5ec214   Dan Williams   dmaengine: introd...
573
574
575
576
577
578
  			/* Found a suitable channel, try to grab, prep, and
  			 * return it.  We first set DMA_PRIVATE to disable
  			 * balance_ref_count as this channel will not be
  			 * published in the general-purpose allocator
  			 */
  			dma_cap_set(DMA_PRIVATE, device->cap_mask);
0f571515c   Atsushi Nemoto   dmaengine: Add pr...
579
  			device->privatecnt++;
59b5ec214   Dan Williams   dmaengine: introd...
580
581
582
  			err = dma_chan_get(chan);
  
  			if (err == -ENODEV) {
634332502   Joe Perches   dmaengine: Cleanu...
583
584
585
  				pr_debug("%s: %s module removed
  ",
  					 __func__, dma_chan_name(chan));
59b5ec214   Dan Williams   dmaengine: introd...
586
587
  				list_del_rcu(&device->global_node);
  			} else if (err)
d8b53489d   Fabio Estevam   dma: dmaengine: D...
588
589
  				pr_debug("%s: failed to get %s: (%d)
  ",
634332502   Joe Perches   dmaengine: Cleanu...
590
  					 __func__, dma_chan_name(chan), err);
59b5ec214   Dan Williams   dmaengine: introd...
591
592
  			else
  				break;
0f571515c   Atsushi Nemoto   dmaengine: Add pr...
593
594
  			if (--device->privatecnt == 0)
  				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
e2346677a   Dan Williams   dmaengine: advert...
595
596
  			chan = NULL;
  		}
59b5ec214   Dan Williams   dmaengine: introd...
597
598
  	}
  	mutex_unlock(&dma_list_mutex);
634332502   Joe Perches   dmaengine: Cleanu...
599
600
601
602
  	pr_debug("%s: %s (%s)
  ",
  		 __func__,
  		 chan ? "success" : "fail",
41d5e59c1   Dan Williams   dmaengine: add a ...
603
  		 chan ? dma_chan_name(chan) : NULL);
59b5ec214   Dan Williams   dmaengine: introd...
604
605
606
607
  
  	return chan;
  }
  EXPORT_SYMBOL_GPL(__dma_request_channel);
9a6cecc84   Jon Hunter   dmaengine: add he...
608
609
610
611
  /**
   * dma_request_slave_channel - try to allocate an exclusive slave channel
   * @dev:	pointer to client device structure
   * @name:	slave channel name
0ad7c0005   Stephen Warren   dma: add channel ...
612
613
   *
   * Returns pointer to appropriate DMA channel on success or an error pointer.
9a6cecc84   Jon Hunter   dmaengine: add he...
614
   */
0ad7c0005   Stephen Warren   dma: add channel ...
615
616
  struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
  						  const char *name)
9a6cecc84   Jon Hunter   dmaengine: add he...
617
618
619
620
  {
  	/* If device-tree is present get slave info from here */
  	if (dev->of_node)
  		return of_dma_request_slave_channel(dev->of_node, name);
4e82f5ddd   Andy Shevchenko   dmaengine: call a...
621
  	/* If device was enumerated by ACPI get slave info from here */
0f6a928d0   Andy Shevchenko   acpi-dma: convert...
622
623
  	if (ACPI_HANDLE(dev))
  		return acpi_dma_request_slave_chan_by_name(dev, name);
4e82f5ddd   Andy Shevchenko   dmaengine: call a...
624

0ad7c0005   Stephen Warren   dma: add channel ...
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
  	return ERR_PTR(-ENODEV);
  }
  EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
  
  /**
   * dma_request_slave_channel - try to allocate an exclusive slave channel
   * @dev:	pointer to client device structure
   * @name:	slave channel name
   *
   * Returns pointer to appropriate DMA channel on success or NULL.
   */
  struct dma_chan *dma_request_slave_channel(struct device *dev,
  					   const char *name)
  {
  	struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
  	if (IS_ERR(ch))
  		return NULL;
  	return ch;
9a6cecc84   Jon Hunter   dmaengine: add he...
643
644
  }
  EXPORT_SYMBOL_GPL(dma_request_slave_channel);
59b5ec214   Dan Williams   dmaengine: introd...
645
646
647
648
649
650
651
  void dma_release_channel(struct dma_chan *chan)
  {
  	mutex_lock(&dma_list_mutex);
  	WARN_ONCE(chan->client_count != 1,
  		  "chan reference count %d != 1
  ", chan->client_count);
  	dma_chan_put(chan);
0f571515c   Atsushi Nemoto   dmaengine: Add pr...
652
653
654
  	/* drop PRIVATE cap enabled by __dma_request_channel() */
  	if (--chan->device->privatecnt == 0)
  		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
59b5ec214   Dan Williams   dmaengine: introd...
655
656
657
  	mutex_unlock(&dma_list_mutex);
  }
  EXPORT_SYMBOL_GPL(dma_release_channel);
bec085134   Dan Williams   dmaengine: centra...
658
  /**
209b84a88   Dan Williams   dmaengine: replac...
659
   * dmaengine_get - register interest in dma_channels
d379b01e9   Dan Williams   dmaengine: make c...
660
   */
209b84a88   Dan Williams   dmaengine: replac...
661
  void dmaengine_get(void)
d379b01e9   Dan Williams   dmaengine: make c...
662
  {
6f49a57aa   Dan Williams   dmaengine: up-lev...
663
664
665
  	struct dma_device *device, *_d;
  	struct dma_chan *chan;
  	int err;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
666
  	mutex_lock(&dma_list_mutex);
6f49a57aa   Dan Williams   dmaengine: up-lev...
667
668
669
  	dmaengine_ref_count++;
  
  	/* try to grab channels */
59b5ec214   Dan Williams   dmaengine: introd...
670
671
672
  	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  			continue;
6f49a57aa   Dan Williams   dmaengine: up-lev...
673
674
675
676
  		list_for_each_entry(chan, &device->channels, device_node) {
  			err = dma_chan_get(chan);
  			if (err == -ENODEV) {
  				/* module removed before we could use it */
2ba05622b   Dan Williams   dmaengine: provid...
677
  				list_del_rcu(&device->global_node);
6f49a57aa   Dan Williams   dmaengine: up-lev...
678
679
  				break;
  			} else if (err)
0eb5a3580   Fabio Estevam   drivers/dma/dmaen...
680
681
  				pr_debug("%s: failed to get %s: (%d)
  ",
634332502   Joe Perches   dmaengine: Cleanu...
682
  				       __func__, dma_chan_name(chan), err);
6f49a57aa   Dan Williams   dmaengine: up-lev...
683
  		}
59b5ec214   Dan Williams   dmaengine: introd...
684
  	}
6f49a57aa   Dan Williams   dmaengine: up-lev...
685

bec085134   Dan Williams   dmaengine: centra...
686
687
688
689
690
691
  	/* if this is the first reference and there were channels
  	 * waiting we need to rebalance to get those channels
  	 * incorporated into the channel table
  	 */
  	if (dmaengine_ref_count == 1)
  		dma_channel_rebalance();
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
692
  	mutex_unlock(&dma_list_mutex);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
693
  }
209b84a88   Dan Williams   dmaengine: replac...
694
  EXPORT_SYMBOL(dmaengine_get);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
695
696
  
  /**
209b84a88   Dan Williams   dmaengine: replac...
697
   * dmaengine_put - let dma drivers be removed when ref_count == 0
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
698
   */
209b84a88   Dan Williams   dmaengine: replac...
699
  void dmaengine_put(void)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
700
  {
d379b01e9   Dan Williams   dmaengine: make c...
701
  	struct dma_device *device;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
702
  	struct dma_chan *chan;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
703
  	mutex_lock(&dma_list_mutex);
6f49a57aa   Dan Williams   dmaengine: up-lev...
704
705
706
  	dmaengine_ref_count--;
  	BUG_ON(dmaengine_ref_count < 0);
  	/* drop channel references */
59b5ec214   Dan Williams   dmaengine: introd...
707
708
709
  	list_for_each_entry(device, &dma_device_list, global_node) {
  		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  			continue;
6f49a57aa   Dan Williams   dmaengine: up-lev...
710
711
  		list_for_each_entry(chan, &device->channels, device_node)
  			dma_chan_put(chan);
59b5ec214   Dan Williams   dmaengine: introd...
712
  	}
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
713
  	mutex_unlock(&dma_list_mutex);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
714
  }
209b84a88   Dan Williams   dmaengine: replac...
715
  EXPORT_SYMBOL(dmaengine_put);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
716

138f4c359   Dan Williams   dmaengine, async_...
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
  static bool device_has_all_tx_types(struct dma_device *device)
  {
  	/* A device that satisfies this test has channels that will never cause
  	 * an async_tx channel switch event as all possible operation types can
  	 * be handled.
  	 */
  	#ifdef CONFIG_ASYNC_TX_DMA
  	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
  		return false;
  	#endif
  
  	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
  	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
  		return false;
  	#endif
138f4c359   Dan Williams   dmaengine, async_...
732
733
734
  	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
  	if (!dma_has_cap(DMA_XOR, device->cap_mask))
  		return false;
7b3cc2b1f   Dan Williams   async_tx: build-t...
735
736
  
  	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
4499a24de   Dan Williams   dmaengine: includ...
737
738
  	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
  		return false;
138f4c359   Dan Williams   dmaengine, async_...
739
  	#endif
7b3cc2b1f   Dan Williams   async_tx: build-t...
740
  	#endif
138f4c359   Dan Williams   dmaengine, async_...
741
742
743
744
  
  	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
  	if (!dma_has_cap(DMA_PQ, device->cap_mask))
  		return false;
7b3cc2b1f   Dan Williams   async_tx: build-t...
745
746
  
  	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
4499a24de   Dan Williams   dmaengine: includ...
747
748
  	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
  		return false;
138f4c359   Dan Williams   dmaengine, async_...
749
  	#endif
7b3cc2b1f   Dan Williams   async_tx: build-t...
750
  	#endif
138f4c359   Dan Williams   dmaengine, async_...
751
752
753
  
  	return true;
  }
257b17ca0   Dan Williams   dmaengine: fail d...
754
755
756
  static int get_dma_id(struct dma_device *device)
  {
  	int rc;
257b17ca0   Dan Williams   dmaengine: fail d...
757
  	mutex_lock(&dma_list_mutex);
257b17ca0   Dan Williams   dmaengine: fail d...
758

69ee266b4   Tejun Heo   dmaengine: conver...
759
760
761
762
763
764
  	rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
  	if (rc >= 0)
  		device->dev_id = rc;
  
  	mutex_unlock(&dma_list_mutex);
  	return rc < 0 ? rc : 0;
257b17ca0   Dan Williams   dmaengine: fail d...
765
  }
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
766
  /**
6508871ed   Randy Dunlap   [IOAT]: fix kerne...
767
   * dma_async_device_register - registers DMA devices found
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
768
769
770
771
   * @device: &dma_device
   */
  int dma_async_device_register(struct dma_device *device)
  {
ff487fb77   Jeff Garzik   drivers/dma: hand...
772
  	int chancnt = 0, rc;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
773
  	struct dma_chan* chan;
864498aaa   Dan Williams   dmaengine: use id...
774
  	atomic_t *idr_ref;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
775
776
777
  
  	if (!device)
  		return -ENODEV;
7405f74ba   Dan Williams   dmaengine: refact...
778
779
780
781
782
  	/* validate device routines */
  	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
  		!device->device_prep_dma_memcpy);
  	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
  		!device->device_prep_dma_xor);
099f53cb5   Dan Williams   async_tx: rename ...
783
784
  	BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
  		!device->device_prep_dma_xor_val);
b2f46fd8e   Dan Williams   async_tx: add sup...
785
786
787
788
  	BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
  		!device->device_prep_dma_pq);
  	BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
  		!device->device_prep_dma_pq_val);
9b941c666   Zhang Wei   dmaengine: Fix a ...
789
  	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
7405f74ba   Dan Williams   dmaengine: refact...
790
  		!device->device_prep_dma_interrupt);
a86ee03ce   Ira Snyder   dma: add support ...
791
792
  	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
  		!device->device_prep_dma_sg);
782bc950d   Sascha Hauer   dmaengine: add po...
793
794
  	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
  		!device->device_prep_dma_cyclic);
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
795
  	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
c3635c78e   Linus Walleij   DMAENGINE: generi...
796
  		!device->device_control);
b14dab792   Jassi Brar   DMAEngine: Define...
797
798
  	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
  		!device->device_prep_interleaved_dma);
7405f74ba   Dan Williams   dmaengine: refact...
799
800
801
  
  	BUG_ON(!device->device_alloc_chan_resources);
  	BUG_ON(!device->device_free_chan_resources);
079344818   Linus Walleij   DMAENGINE: generi...
802
  	BUG_ON(!device->device_tx_status);
7405f74ba   Dan Williams   dmaengine: refact...
803
804
  	BUG_ON(!device->device_issue_pending);
  	BUG_ON(!device->dev);
138f4c359   Dan Williams   dmaengine, async_...
805
  	/* note: this only matters in the
5fc6d897f   Dan Williams   async_tx: make as...
806
  	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
138f4c359   Dan Williams   dmaengine, async_...
807
808
809
  	 */
  	if (device_has_all_tx_types(device))
  		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
864498aaa   Dan Williams   dmaengine: use id...
810
811
812
  	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
  	if (!idr_ref)
  		return -ENOMEM;
257b17ca0   Dan Williams   dmaengine: fail d...
813
814
815
  	rc = get_dma_id(device);
  	if (rc != 0) {
  		kfree(idr_ref);
864498aaa   Dan Williams   dmaengine: use id...
816
  		return rc;
257b17ca0   Dan Williams   dmaengine: fail d...
817
818
819
  	}
  
  	atomic_set(idr_ref, 0);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
820
821
822
  
  	/* represent channels in sysfs. Probably want devs too */
  	list_for_each_entry(chan, &device->channels, device_node) {
257b17ca0   Dan Williams   dmaengine: fail d...
823
  		rc = -ENOMEM;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
824
825
  		chan->local = alloc_percpu(typeof(*chan->local));
  		if (chan->local == NULL)
257b17ca0   Dan Williams   dmaengine: fail d...
826
  			goto err_out;
41d5e59c1   Dan Williams   dmaengine: add a ...
827
828
829
  		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
  		if (chan->dev == NULL) {
  			free_percpu(chan->local);
257b17ca0   Dan Williams   dmaengine: fail d...
830
831
  			chan->local = NULL;
  			goto err_out;
41d5e59c1   Dan Williams   dmaengine: add a ...
832
  		}
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
833
834
  
  		chan->chan_id = chancnt++;
41d5e59c1   Dan Williams   dmaengine: add a ...
835
836
837
  		chan->dev->device.class = &dma_devclass;
  		chan->dev->device.parent = device->dev;
  		chan->dev->chan = chan;
864498aaa   Dan Williams   dmaengine: use id...
838
839
840
  		chan->dev->idr_ref = idr_ref;
  		chan->dev->dev_id = device->dev_id;
  		atomic_inc(idr_ref);
41d5e59c1   Dan Williams   dmaengine: add a ...
841
  		dev_set_name(&chan->dev->device, "dma%dchan%d",
06190d841   Kay Sievers   dmaengine: struct...
842
  			     device->dev_id, chan->chan_id);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
843

41d5e59c1   Dan Williams   dmaengine: add a ...
844
  		rc = device_register(&chan->dev->device);
ff487fb77   Jeff Garzik   drivers/dma: hand...
845
  		if (rc) {
ff487fb77   Jeff Garzik   drivers/dma: hand...
846
847
  			free_percpu(chan->local);
  			chan->local = NULL;
257b17ca0   Dan Williams   dmaengine: fail d...
848
849
  			kfree(chan->dev);
  			atomic_dec(idr_ref);
ff487fb77   Jeff Garzik   drivers/dma: hand...
850
851
  			goto err_out;
  		}
7cc5bf9a3   Dan Williams   dmaengine: track ...
852
  		chan->client_count = 0;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
853
  	}
59b5ec214   Dan Williams   dmaengine: introd...
854
  	device->chancnt = chancnt;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
855
856
  
  	mutex_lock(&dma_list_mutex);
59b5ec214   Dan Williams   dmaengine: introd...
857
858
  	/* take references on public channels */
  	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
6f49a57aa   Dan Williams   dmaengine: up-lev...
859
860
861
862
863
864
865
866
867
868
869
870
871
872
  		list_for_each_entry(chan, &device->channels, device_node) {
  			/* if clients are already waiting for channels we need
  			 * to take references on their behalf
  			 */
  			if (dma_chan_get(chan) == -ENODEV) {
  				/* note we can only get here for the first
  				 * channel as the remaining channels are
  				 * guaranteed to get a reference
  				 */
  				rc = -ENODEV;
  				mutex_unlock(&dma_list_mutex);
  				goto err_out;
  			}
  		}
2ba05622b   Dan Williams   dmaengine: provid...
873
  	list_add_tail_rcu(&device->global_node, &dma_device_list);
0f571515c   Atsushi Nemoto   dmaengine: Add pr...
874
875
  	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  		device->privatecnt++;	/* Always private */
bec085134   Dan Williams   dmaengine: centra...
876
  	dma_channel_rebalance();
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
877
  	mutex_unlock(&dma_list_mutex);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
878
  	return 0;
ff487fb77   Jeff Garzik   drivers/dma: hand...
879
880
  
  err_out:
257b17ca0   Dan Williams   dmaengine: fail d...
881
882
883
884
885
886
887
888
  	/* if we never registered a channel just release the idr */
  	if (atomic_read(idr_ref) == 0) {
  		mutex_lock(&dma_list_mutex);
  		idr_remove(&dma_idr, device->dev_id);
  		mutex_unlock(&dma_list_mutex);
  		kfree(idr_ref);
  		return rc;
  	}
ff487fb77   Jeff Garzik   drivers/dma: hand...
889
890
891
  	list_for_each_entry(chan, &device->channels, device_node) {
  		if (chan->local == NULL)
  			continue;
41d5e59c1   Dan Williams   dmaengine: add a ...
892
893
894
895
  		mutex_lock(&dma_list_mutex);
  		chan->dev->chan = NULL;
  		mutex_unlock(&dma_list_mutex);
  		device_unregister(&chan->dev->device);
ff487fb77   Jeff Garzik   drivers/dma: hand...
896
897
898
  		free_percpu(chan->local);
  	}
  	return rc;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
899
  }
765e3d8a7   David Brownell   [PATCH] rm pointl...
900
  EXPORT_SYMBOL(dma_async_device_register);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
901
902
  
  /**
6f49a57aa   Dan Williams   dmaengine: up-lev...
903
   * dma_async_device_unregister - unregister a DMA device
6508871ed   Randy Dunlap   [IOAT]: fix kerne...
904
   * @device: &dma_device
f27c580c3   Dan Williams   dmaengine: remove...
905
906
907
   *
   * This routine is called by dma driver exit routines, dmaengine holds module
   * references to prevent it being called while channels are in use.
6508871ed   Randy Dunlap   [IOAT]: fix kerne...
908
909
   */
  void dma_async_device_unregister(struct dma_device *device)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
910
911
  {
  	struct dma_chan *chan;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
912
913
  
  	mutex_lock(&dma_list_mutex);
2ba05622b   Dan Williams   dmaengine: provid...
914
  	list_del_rcu(&device->global_node);
bec085134   Dan Williams   dmaengine: centra...
915
  	dma_channel_rebalance();
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
916
917
918
  	mutex_unlock(&dma_list_mutex);
  
  	list_for_each_entry(chan, &device->channels, device_node) {
6f49a57aa   Dan Williams   dmaengine: up-lev...
919
920
921
922
  		WARN_ONCE(chan->client_count,
  			  "%s called while %d clients hold a reference
  ",
  			  __func__, chan->client_count);
41d5e59c1   Dan Williams   dmaengine: add a ...
923
924
925
926
  		mutex_lock(&dma_list_mutex);
  		chan->dev->chan = NULL;
  		mutex_unlock(&dma_list_mutex);
  		device_unregister(&chan->dev->device);
adef47726   Anatolij Gustschin   dmaengine: fix me...
927
  		free_percpu(chan->local);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
928
  	}
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
929
  }
765e3d8a7   David Brownell   [PATCH] rm pointl...
930
  EXPORT_SYMBOL(dma_async_device_unregister);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
931

45c463ae9   Dan Williams   dmaengine: refere...
932
933
934
935
936
937
  struct dmaengine_unmap_pool {
  	struct kmem_cache *cache;
  	const char *name;
  	mempool_t *pool;
  	size_t size;
  };
7405f74ba   Dan Williams   dmaengine: refact...
938

45c463ae9   Dan Williams   dmaengine: refere...
939
940
941
  #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
  static struct dmaengine_unmap_pool unmap_pool[] = {
  	__UNMAP_POOL(2),
3cc377b9a   Dan Williams   dmaengine: fix en...
942
  	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
45c463ae9   Dan Williams   dmaengine: refere...
943
944
945
946
947
  	__UNMAP_POOL(16),
  	__UNMAP_POOL(128),
  	__UNMAP_POOL(256),
  	#endif
  };
0036731c8   Dan Williams   async_tx: kill tx...
948

45c463ae9   Dan Williams   dmaengine: refere...
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
  static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
  {
  	int order = get_count_order(nr);
  
  	switch (order) {
  	case 0 ... 1:
  		return &unmap_pool[0];
  	case 2 ... 4:
  		return &unmap_pool[1];
  	case 5 ... 7:
  		return &unmap_pool[2];
  	case 8:
  		return &unmap_pool[3];
  	default:
  		BUG();
  		return NULL;
0036731c8   Dan Williams   async_tx: kill tx...
965
  	}
45c463ae9   Dan Williams   dmaengine: refere...
966
  }
7405f74ba   Dan Williams   dmaengine: refact...
967

45c463ae9   Dan Williams   dmaengine: refere...
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
  static void dmaengine_unmap(struct kref *kref)
  {
  	struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
  	struct device *dev = unmap->dev;
  	int cnt, i;
  
  	cnt = unmap->to_cnt;
  	for (i = 0; i < cnt; i++)
  		dma_unmap_page(dev, unmap->addr[i], unmap->len,
  			       DMA_TO_DEVICE);
  	cnt += unmap->from_cnt;
  	for (; i < cnt; i++)
  		dma_unmap_page(dev, unmap->addr[i], unmap->len,
  			       DMA_FROM_DEVICE);
  	cnt += unmap->bidi_cnt;
7476bd79f   Dan Williams   async_pq: convert...
983
984
985
  	for (; i < cnt; i++) {
  		if (unmap->addr[i] == 0)
  			continue;
45c463ae9   Dan Williams   dmaengine: refere...
986
987
  		dma_unmap_page(dev, unmap->addr[i], unmap->len,
  			       DMA_BIDIRECTIONAL);
7476bd79f   Dan Williams   async_pq: convert...
988
  	}
45c463ae9   Dan Williams   dmaengine: refere...
989
990
  	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
  }
7405f74ba   Dan Williams   dmaengine: refact...
991

45c463ae9   Dan Williams   dmaengine: refere...
992
993
994
995
996
997
  void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
  {
  	if (unmap)
  		kref_put(&unmap->kref, dmaengine_unmap);
  }
  EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
7405f74ba   Dan Williams   dmaengine: refact...
998

45c463ae9   Dan Williams   dmaengine: refere...
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
  static void dmaengine_destroy_unmap_pool(void)
  {
  	int i;
  
  	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
  		struct dmaengine_unmap_pool *p = &unmap_pool[i];
  
  		if (p->pool)
  			mempool_destroy(p->pool);
  		p->pool = NULL;
  		if (p->cache)
  			kmem_cache_destroy(p->cache);
  		p->cache = NULL;
  	}
7405f74ba   Dan Williams   dmaengine: refact...
1013
  }
7405f74ba   Dan Williams   dmaengine: refact...
1014

45c463ae9   Dan Williams   dmaengine: refere...
1015
  static int __init dmaengine_init_unmap_pool(void)
7405f74ba   Dan Williams   dmaengine: refact...
1016
  {
45c463ae9   Dan Williams   dmaengine: refere...
1017
  	int i;
7405f74ba   Dan Williams   dmaengine: refact...
1018

45c463ae9   Dan Williams   dmaengine: refere...
1019
1020
1021
  	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
  		struct dmaengine_unmap_pool *p = &unmap_pool[i];
  		size_t size;
0036731c8   Dan Williams   async_tx: kill tx...
1022

45c463ae9   Dan Williams   dmaengine: refere...
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
  		size = sizeof(struct dmaengine_unmap_data) +
  		       sizeof(dma_addr_t) * p->size;
  
  		p->cache = kmem_cache_create(p->name, size, 0,
  					     SLAB_HWCACHE_ALIGN, NULL);
  		if (!p->cache)
  			break;
  		p->pool = mempool_create_slab_pool(1, p->cache);
  		if (!p->pool)
  			break;
0036731c8   Dan Williams   async_tx: kill tx...
1033
  	}
7405f74ba   Dan Williams   dmaengine: refact...
1034

45c463ae9   Dan Williams   dmaengine: refere...
1035
1036
  	if (i == ARRAY_SIZE(unmap_pool))
  		return 0;
7405f74ba   Dan Williams   dmaengine: refact...
1037

45c463ae9   Dan Williams   dmaengine: refere...
1038
1039
1040
  	dmaengine_destroy_unmap_pool();
  	return -ENOMEM;
  }
7405f74ba   Dan Williams   dmaengine: refact...
1041

897164629   Dan Williams   async_memcpy: con...
1042
  struct dmaengine_unmap_data *
45c463ae9   Dan Williams   dmaengine: refere...
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
  dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
  {
  	struct dmaengine_unmap_data *unmap;
  
  	unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
  	if (!unmap)
  		return NULL;
  
  	memset(unmap, 0, sizeof(*unmap));
  	kref_init(&unmap->kref);
  	unmap->dev = dev;
  
  	return unmap;
7405f74ba   Dan Williams   dmaengine: refact...
1056
  }
897164629   Dan Williams   async_memcpy: con...
1057
  EXPORT_SYMBOL(dmaengine_get_unmap_data);
7405f74ba   Dan Williams   dmaengine: refact...
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
  
  /**
   * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
   * @chan: DMA channel to offload copy to
   * @dest_pg: destination page
   * @dest_off: offset in page to copy to
   * @src_pg: source page
   * @src_off: offset in page to copy from
   * @len: length
   *
   * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
   * address according to the DMA mapping API rules for streaming mappings.
   * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
   * (kernel memory or locked user space pages).
   */
  dma_cookie_t
  dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
  	unsigned int dest_off, struct page *src_pg, unsigned int src_off,
  	size_t len)
  {
  	struct dma_device *dev = chan->device;
  	struct dma_async_tx_descriptor *tx;
45c463ae9   Dan Williams   dmaengine: refere...
1080
  	struct dmaengine_unmap_data *unmap;
7405f74ba   Dan Williams   dmaengine: refact...
1081
  	dma_cookie_t cookie;
4f005dbe5   Maciej Sosnowski   ioatdma: fix "ioa...
1082
  	unsigned long flags;
7405f74ba   Dan Williams   dmaengine: refact...
1083

8194ee277   Dan Williams   dmaengine: fix sl...
1084
  	unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
45c463ae9   Dan Williams   dmaengine: refere...
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
  	if (!unmap)
  		return -ENOMEM;
  
  	unmap->to_cnt = 1;
  	unmap->from_cnt = 1;
  	unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
  				      DMA_TO_DEVICE);
  	unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
  				      DMA_FROM_DEVICE);
  	unmap->len = len;
4f005dbe5   Maciej Sosnowski   ioatdma: fix "ioa...
1095
  	flags = DMA_CTRL_ACK;
45c463ae9   Dan Williams   dmaengine: refere...
1096
1097
  	tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
  					 len, flags);
0036731c8   Dan Williams   async_tx: kill tx...
1098
1099
  
  	if (!tx) {
45c463ae9   Dan Williams   dmaengine: refere...
1100
  		dmaengine_unmap_put(unmap);
7405f74ba   Dan Williams   dmaengine: refact...
1101
  		return -ENOMEM;
0036731c8   Dan Williams   async_tx: kill tx...
1102
  	}
7405f74ba   Dan Williams   dmaengine: refact...
1103

45c463ae9   Dan Williams   dmaengine: refere...
1104
  	dma_set_unmap(tx, unmap);
7405f74ba   Dan Williams   dmaengine: refact...
1105
  	cookie = tx->tx_submit(tx);
45c463ae9   Dan Williams   dmaengine: refere...
1106
  	dmaengine_unmap_put(unmap);
7405f74ba   Dan Williams   dmaengine: refact...
1107

e7dcaa475   Christoph Lameter   this_cpu: Elimina...
1108
1109
1110
1111
  	preempt_disable();
  	__this_cpu_add(chan->local->bytes_transferred, len);
  	__this_cpu_inc(chan->local->memcpy_count);
  	preempt_enable();
7405f74ba   Dan Williams   dmaengine: refact...
1112
1113
1114
1115
  
  	return cookie;
  }
  EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
56ea27fd6   Dan Williams   dmaengine: consol...
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
  /**
   * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
   * @chan: DMA channel to offload copy to
   * @dest: destination address (virtual)
   * @src: source address (virtual)
   * @len: length
   *
   * Both @dest and @src must be mappable to a bus address according to the
   * DMA mapping API rules for streaming mappings.
   * Both @dest and @src must stay memory resident (kernel memory or locked
   * user space pages).
   */
  dma_cookie_t
  dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
  			    void *src, size_t len)
  {
  	return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
  					 (unsigned long) dest & ~PAGE_MASK,
  					 virt_to_page(src),
  					 (unsigned long) src & ~PAGE_MASK, len);
  }
7405f74ba   Dan Williams   dmaengine: refact...
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
  EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
  
  /**
   * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
   * @chan: DMA channel to offload copy to
   * @page: destination page
   * @offset: offset in page to copy to
   * @kdata: source address (virtual)
   * @len: length
   *
   * Both @page/@offset and @kdata must be mappable to a bus address according
   * to the DMA mapping API rules for streaming mappings.
   * Both @page/@offset and @kdata must stay memory resident (kernel memory or
   * locked user space pages)
   */
  dma_cookie_t
  dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
56ea27fd6   Dan Williams   dmaengine: consol...
1154
  			   unsigned int offset, void *kdata, size_t len)
7405f74ba   Dan Williams   dmaengine: refact...
1155
  {
56ea27fd6   Dan Williams   dmaengine: consol...
1156
1157
1158
  	return dma_async_memcpy_pg_to_pg(chan, page, offset,
  					 virt_to_page(kdata),
  					 (unsigned long) kdata & ~PAGE_MASK, len);
7405f74ba   Dan Williams   dmaengine: refact...
1159
1160
  }
  EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
7405f74ba   Dan Williams   dmaengine: refact...
1161
1162
1163
1164
  void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
  	struct dma_chan *chan)
  {
  	tx->chan = chan;
5fc6d897f   Dan Williams   async_tx: make as...
1165
  	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
7405f74ba   Dan Williams   dmaengine: refact...
1166
  	spin_lock_init(&tx->lock);
caa20d974   Dan Williams   async_tx: trim dm...
1167
  	#endif
7405f74ba   Dan Williams   dmaengine: refact...
1168
1169
  }
  EXPORT_SYMBOL(dma_async_tx_descriptor_init);
07f2211e4   Dan Williams   dmaengine: remove...
1170
1171
  /* dma_wait_for_async_tx - spin wait for a transaction to complete
   * @tx: in-flight transaction to wait on
07f2211e4   Dan Williams   dmaengine: remove...
1172
1173
1174
1175
   */
  enum dma_status
  dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
  {
95475e571   Dan Williams   async_tx: remove ...
1176
  	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
07f2211e4   Dan Williams   dmaengine: remove...
1177
1178
  
  	if (!tx)
adfedd9a3   Vinod Koul   dmaengine: use DM...
1179
  		return DMA_COMPLETE;
07f2211e4   Dan Williams   dmaengine: remove...
1180

95475e571   Dan Williams   async_tx: remove ...
1181
1182
1183
1184
  	while (tx->cookie == -EBUSY) {
  		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
  			pr_err("%s timeout waiting for descriptor submission
  ",
634332502   Joe Perches   dmaengine: Cleanu...
1185
  			       __func__);
95475e571   Dan Williams   async_tx: remove ...
1186
1187
1188
1189
1190
  			return DMA_ERROR;
  		}
  		cpu_relax();
  	}
  	return dma_sync_wait(tx->chan, tx->cookie);
07f2211e4   Dan Williams   dmaengine: remove...
1191
1192
1193
1194
1195
1196
1197
1198
1199
  }
  EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
  
  /* dma_run_dependencies - helper routine for dma drivers to process
   *	(start) dependent operations on their target channel
   * @tx: transaction with dependencies
   */
  void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
  {
caa20d974   Dan Williams   async_tx: trim dm...
1200
  	struct dma_async_tx_descriptor *dep = txd_next(tx);
07f2211e4   Dan Williams   dmaengine: remove...
1201
1202
1203
1204
1205
  	struct dma_async_tx_descriptor *dep_next;
  	struct dma_chan *chan;
  
  	if (!dep)
  		return;
dd59b8537   Yuri Tikhonov   dmaengine: fix de...
1206
  	/* we'll submit tx->next now, so clear the link */
caa20d974   Dan Williams   async_tx: trim dm...
1207
  	txd_clear_next(tx);
07f2211e4   Dan Williams   dmaengine: remove...
1208
1209
1210
1211
1212
1213
1214
  	chan = dep->chan;
  
  	/* keep submitting up until a channel switch is detected
  	 * in that case we will be called again as a result of
  	 * processing the interrupt from async_tx_channel_switch
  	 */
  	for (; dep; dep = dep_next) {
caa20d974   Dan Williams   async_tx: trim dm...
1215
1216
1217
  		txd_lock(dep);
  		txd_clear_parent(dep);
  		dep_next = txd_next(dep);
07f2211e4   Dan Williams   dmaengine: remove...
1218
  		if (dep_next && dep_next->chan == chan)
caa20d974   Dan Williams   async_tx: trim dm...
1219
  			txd_clear_next(dep); /* ->next will be submitted */
07f2211e4   Dan Williams   dmaengine: remove...
1220
1221
  		else
  			dep_next = NULL; /* submit current dep and terminate */
caa20d974   Dan Williams   async_tx: trim dm...
1222
  		txd_unlock(dep);
07f2211e4   Dan Williams   dmaengine: remove...
1223
1224
1225
1226
1227
1228
1229
  
  		dep->tx_submit(dep);
  	}
  
  	chan->device->device_issue_pending(chan);
  }
  EXPORT_SYMBOL_GPL(dma_run_dependencies);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1230
1231
  static int __init dma_bus_init(void)
  {
45c463ae9   Dan Williams   dmaengine: refere...
1232
1233
1234
1235
  	int err = dmaengine_init_unmap_pool();
  
  	if (err)
  		return err;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1236
1237
  	return class_register(&dma_devclass);
  }
652afc27b   Dan Williams   dmaengine: bump i...
1238
  arch_initcall(dma_bus_init);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1239

bec085134   Dan Williams   dmaengine: centra...
1240