Commit 0adff800662f52d0ffc3e420db231769cb3fff13
1 parent
872f05c6e9
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
dmatest: cleanup redundant "dmatest: " prefixes
...now that we have a common pr_fmt. Acked-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Showing 1 changed file with 10 additions and 11 deletions Inline Diff
drivers/dma/dmatest.c
1 | /* | 1 | /* |
2 | * DMA Engine test module | 2 | * DMA Engine test module |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Atmel Corporation | 4 | * Copyright (C) 2007 Atmel Corporation |
5 | * Copyright (C) 2013 Intel Corporation | 5 | * Copyright (C) 2013 Intel Corporation |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | 12 | ||
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/dma-mapping.h> | 14 | #include <linux/dma-mapping.h> |
15 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
16 | #include <linux/freezer.h> | 16 | #include <linux/freezer.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/kthread.h> | 18 | #include <linux/kthread.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/moduleparam.h> | 20 | #include <linux/moduleparam.h> |
21 | #include <linux/random.h> | 21 | #include <linux/random.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/wait.h> | 23 | #include <linux/wait.h> |
24 | #include <linux/ctype.h> | 24 | #include <linux/ctype.h> |
25 | #include <linux/debugfs.h> | 25 | #include <linux/debugfs.h> |
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/seq_file.h> | 27 | #include <linux/seq_file.h> |
28 | 28 | ||
29 | static unsigned int test_buf_size = 16384; | 29 | static unsigned int test_buf_size = 16384; |
30 | module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); | 30 | module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); |
31 | MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); | 31 | MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); |
32 | 32 | ||
33 | static char test_channel[20]; | 33 | static char test_channel[20]; |
34 | module_param_string(channel, test_channel, sizeof(test_channel), | 34 | module_param_string(channel, test_channel, sizeof(test_channel), |
35 | S_IRUGO | S_IWUSR); | 35 | S_IRUGO | S_IWUSR); |
36 | MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); | 36 | MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); |
37 | 37 | ||
38 | static char test_device[20]; | 38 | static char test_device[20]; |
39 | module_param_string(device, test_device, sizeof(test_device), | 39 | module_param_string(device, test_device, sizeof(test_device), |
40 | S_IRUGO | S_IWUSR); | 40 | S_IRUGO | S_IWUSR); |
41 | MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); | 41 | MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); |
42 | 42 | ||
43 | static unsigned int threads_per_chan = 1; | 43 | static unsigned int threads_per_chan = 1; |
44 | module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR); | 44 | module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR); |
45 | MODULE_PARM_DESC(threads_per_chan, | 45 | MODULE_PARM_DESC(threads_per_chan, |
46 | "Number of threads to start per channel (default: 1)"); | 46 | "Number of threads to start per channel (default: 1)"); |
47 | 47 | ||
48 | static unsigned int max_channels; | 48 | static unsigned int max_channels; |
49 | module_param(max_channels, uint, S_IRUGO | S_IWUSR); | 49 | module_param(max_channels, uint, S_IRUGO | S_IWUSR); |
50 | MODULE_PARM_DESC(max_channels, | 50 | MODULE_PARM_DESC(max_channels, |
51 | "Maximum number of channels to use (default: all)"); | 51 | "Maximum number of channels to use (default: all)"); |
52 | 52 | ||
53 | static unsigned int iterations; | 53 | static unsigned int iterations; |
54 | module_param(iterations, uint, S_IRUGO | S_IWUSR); | 54 | module_param(iterations, uint, S_IRUGO | S_IWUSR); |
55 | MODULE_PARM_DESC(iterations, | 55 | MODULE_PARM_DESC(iterations, |
56 | "Iterations before stopping test (default: infinite)"); | 56 | "Iterations before stopping test (default: infinite)"); |
57 | 57 | ||
58 | static unsigned int xor_sources = 3; | 58 | static unsigned int xor_sources = 3; |
59 | module_param(xor_sources, uint, S_IRUGO | S_IWUSR); | 59 | module_param(xor_sources, uint, S_IRUGO | S_IWUSR); |
60 | MODULE_PARM_DESC(xor_sources, | 60 | MODULE_PARM_DESC(xor_sources, |
61 | "Number of xor source buffers (default: 3)"); | 61 | "Number of xor source buffers (default: 3)"); |
62 | 62 | ||
63 | static unsigned int pq_sources = 3; | 63 | static unsigned int pq_sources = 3; |
64 | module_param(pq_sources, uint, S_IRUGO | S_IWUSR); | 64 | module_param(pq_sources, uint, S_IRUGO | S_IWUSR); |
65 | MODULE_PARM_DESC(pq_sources, | 65 | MODULE_PARM_DESC(pq_sources, |
66 | "Number of p+q source buffers (default: 3)"); | 66 | "Number of p+q source buffers (default: 3)"); |
67 | 67 | ||
68 | static int timeout = 3000; | 68 | static int timeout = 3000; |
69 | module_param(timeout, uint, S_IRUGO | S_IWUSR); | 69 | module_param(timeout, uint, S_IRUGO | S_IWUSR); |
70 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " | 70 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " |
71 | "Pass -1 for infinite timeout"); | 71 | "Pass -1 for infinite timeout"); |
72 | 72 | ||
73 | /* Maximum amount of mismatched bytes in buffer to print */ | 73 | /* Maximum amount of mismatched bytes in buffer to print */ |
74 | #define MAX_ERROR_COUNT 32 | 74 | #define MAX_ERROR_COUNT 32 |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * Initialization patterns. All bytes in the source buffer has bit 7 | 77 | * Initialization patterns. All bytes in the source buffer has bit 7 |
78 | * set, all bytes in the destination buffer has bit 7 cleared. | 78 | * set, all bytes in the destination buffer has bit 7 cleared. |
79 | * | 79 | * |
80 | * Bit 6 is set for all bytes which are to be copied by the DMA | 80 | * Bit 6 is set for all bytes which are to be copied by the DMA |
81 | * engine. Bit 5 is set for all bytes which are to be overwritten by | 81 | * engine. Bit 5 is set for all bytes which are to be overwritten by |
82 | * the DMA engine. | 82 | * the DMA engine. |
83 | * | 83 | * |
84 | * The remaining bits are the inverse of a counter which increments by | 84 | * The remaining bits are the inverse of a counter which increments by |
85 | * one for each byte address. | 85 | * one for each byte address. |
86 | */ | 86 | */ |
87 | #define PATTERN_SRC 0x80 | 87 | #define PATTERN_SRC 0x80 |
88 | #define PATTERN_DST 0x00 | 88 | #define PATTERN_DST 0x00 |
89 | #define PATTERN_COPY 0x40 | 89 | #define PATTERN_COPY 0x40 |
90 | #define PATTERN_OVERWRITE 0x20 | 90 | #define PATTERN_OVERWRITE 0x20 |
91 | #define PATTERN_COUNT_MASK 0x1f | 91 | #define PATTERN_COUNT_MASK 0x1f |
92 | 92 | ||
93 | struct dmatest_info; | 93 | struct dmatest_info; |
94 | 94 | ||
95 | struct dmatest_thread { | 95 | struct dmatest_thread { |
96 | struct list_head node; | 96 | struct list_head node; |
97 | struct dmatest_info *info; | 97 | struct dmatest_info *info; |
98 | struct task_struct *task; | 98 | struct task_struct *task; |
99 | struct dma_chan *chan; | 99 | struct dma_chan *chan; |
100 | u8 **srcs; | 100 | u8 **srcs; |
101 | u8 **dsts; | 101 | u8 **dsts; |
102 | enum dma_transaction_type type; | 102 | enum dma_transaction_type type; |
103 | bool done; | 103 | bool done; |
104 | }; | 104 | }; |
105 | 105 | ||
106 | struct dmatest_chan { | 106 | struct dmatest_chan { |
107 | struct list_head node; | 107 | struct list_head node; |
108 | struct dma_chan *chan; | 108 | struct dma_chan *chan; |
109 | struct list_head threads; | 109 | struct list_head threads; |
110 | }; | 110 | }; |
111 | 111 | ||
112 | /** | 112 | /** |
113 | * struct dmatest_params - test parameters. | 113 | * struct dmatest_params - test parameters. |
114 | * @buf_size: size of the memcpy test buffer | 114 | * @buf_size: size of the memcpy test buffer |
115 | * @channel: bus ID of the channel to test | 115 | * @channel: bus ID of the channel to test |
116 | * @device: bus ID of the DMA Engine to test | 116 | * @device: bus ID of the DMA Engine to test |
117 | * @threads_per_chan: number of threads to start per channel | 117 | * @threads_per_chan: number of threads to start per channel |
118 | * @max_channels: maximum number of channels to use | 118 | * @max_channels: maximum number of channels to use |
119 | * @iterations: iterations before stopping test | 119 | * @iterations: iterations before stopping test |
120 | * @xor_sources: number of xor source buffers | 120 | * @xor_sources: number of xor source buffers |
121 | * @pq_sources: number of p+q source buffers | 121 | * @pq_sources: number of p+q source buffers |
122 | * @timeout: transfer timeout in msec, -1 for infinite timeout | 122 | * @timeout: transfer timeout in msec, -1 for infinite timeout |
123 | */ | 123 | */ |
124 | struct dmatest_params { | 124 | struct dmatest_params { |
125 | unsigned int buf_size; | 125 | unsigned int buf_size; |
126 | char channel[20]; | 126 | char channel[20]; |
127 | char device[20]; | 127 | char device[20]; |
128 | unsigned int threads_per_chan; | 128 | unsigned int threads_per_chan; |
129 | unsigned int max_channels; | 129 | unsigned int max_channels; |
130 | unsigned int iterations; | 130 | unsigned int iterations; |
131 | unsigned int xor_sources; | 131 | unsigned int xor_sources; |
132 | unsigned int pq_sources; | 132 | unsigned int pq_sources; |
133 | int timeout; | 133 | int timeout; |
134 | }; | 134 | }; |
135 | 135 | ||
136 | /** | 136 | /** |
137 | * struct dmatest_info - test information. | 137 | * struct dmatest_info - test information. |
138 | * @params: test parameters | 138 | * @params: test parameters |
139 | * @lock: access protection to the fields of this structure | 139 | * @lock: access protection to the fields of this structure |
140 | */ | 140 | */ |
141 | struct dmatest_info { | 141 | struct dmatest_info { |
142 | /* Test parameters */ | 142 | /* Test parameters */ |
143 | struct dmatest_params params; | 143 | struct dmatest_params params; |
144 | 144 | ||
145 | /* Internal state */ | 145 | /* Internal state */ |
146 | struct list_head channels; | 146 | struct list_head channels; |
147 | unsigned int nr_channels; | 147 | unsigned int nr_channels; |
148 | struct mutex lock; | 148 | struct mutex lock; |
149 | 149 | ||
150 | /* debugfs related stuff */ | 150 | /* debugfs related stuff */ |
151 | struct dentry *root; | 151 | struct dentry *root; |
152 | }; | 152 | }; |
153 | 153 | ||
154 | static struct dmatest_info test_info; | 154 | static struct dmatest_info test_info; |
155 | 155 | ||
156 | static bool dmatest_match_channel(struct dmatest_params *params, | 156 | static bool dmatest_match_channel(struct dmatest_params *params, |
157 | struct dma_chan *chan) | 157 | struct dma_chan *chan) |
158 | { | 158 | { |
159 | if (params->channel[0] == '\0') | 159 | if (params->channel[0] == '\0') |
160 | return true; | 160 | return true; |
161 | return strcmp(dma_chan_name(chan), params->channel) == 0; | 161 | return strcmp(dma_chan_name(chan), params->channel) == 0; |
162 | } | 162 | } |
163 | 163 | ||
164 | static bool dmatest_match_device(struct dmatest_params *params, | 164 | static bool dmatest_match_device(struct dmatest_params *params, |
165 | struct dma_device *device) | 165 | struct dma_device *device) |
166 | { | 166 | { |
167 | if (params->device[0] == '\0') | 167 | if (params->device[0] == '\0') |
168 | return true; | 168 | return true; |
169 | return strcmp(dev_name(device->dev), params->device) == 0; | 169 | return strcmp(dev_name(device->dev), params->device) == 0; |
170 | } | 170 | } |
171 | 171 | ||
172 | static unsigned long dmatest_random(void) | 172 | static unsigned long dmatest_random(void) |
173 | { | 173 | { |
174 | unsigned long buf; | 174 | unsigned long buf; |
175 | 175 | ||
176 | get_random_bytes(&buf, sizeof(buf)); | 176 | get_random_bytes(&buf, sizeof(buf)); |
177 | return buf; | 177 | return buf; |
178 | } | 178 | } |
179 | 179 | ||
180 | static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, | 180 | static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, |
181 | unsigned int buf_size) | 181 | unsigned int buf_size) |
182 | { | 182 | { |
183 | unsigned int i; | 183 | unsigned int i; |
184 | u8 *buf; | 184 | u8 *buf; |
185 | 185 | ||
186 | for (; (buf = *bufs); bufs++) { | 186 | for (; (buf = *bufs); bufs++) { |
187 | for (i = 0; i < start; i++) | 187 | for (i = 0; i < start; i++) |
188 | buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); | 188 | buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); |
189 | for ( ; i < start + len; i++) | 189 | for ( ; i < start + len; i++) |
190 | buf[i] = PATTERN_SRC | PATTERN_COPY | 190 | buf[i] = PATTERN_SRC | PATTERN_COPY |
191 | | (~i & PATTERN_COUNT_MASK); | 191 | | (~i & PATTERN_COUNT_MASK); |
192 | for ( ; i < buf_size; i++) | 192 | for ( ; i < buf_size; i++) |
193 | buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); | 193 | buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); |
194 | buf++; | 194 | buf++; |
195 | } | 195 | } |
196 | } | 196 | } |
197 | 197 | ||
198 | static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, | 198 | static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, |
199 | unsigned int buf_size) | 199 | unsigned int buf_size) |
200 | { | 200 | { |
201 | unsigned int i; | 201 | unsigned int i; |
202 | u8 *buf; | 202 | u8 *buf; |
203 | 203 | ||
204 | for (; (buf = *bufs); bufs++) { | 204 | for (; (buf = *bufs); bufs++) { |
205 | for (i = 0; i < start; i++) | 205 | for (i = 0; i < start; i++) |
206 | buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); | 206 | buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); |
207 | for ( ; i < start + len; i++) | 207 | for ( ; i < start + len; i++) |
208 | buf[i] = PATTERN_DST | PATTERN_OVERWRITE | 208 | buf[i] = PATTERN_DST | PATTERN_OVERWRITE |
209 | | (~i & PATTERN_COUNT_MASK); | 209 | | (~i & PATTERN_COUNT_MASK); |
210 | for ( ; i < buf_size; i++) | 210 | for ( ; i < buf_size; i++) |
211 | buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); | 211 | buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); |
212 | } | 212 | } |
213 | } | 213 | } |
214 | 214 | ||
215 | static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, | 215 | static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, |
216 | unsigned int counter, bool is_srcbuf) | 216 | unsigned int counter, bool is_srcbuf) |
217 | { | 217 | { |
218 | u8 diff = actual ^ pattern; | 218 | u8 diff = actual ^ pattern; |
219 | u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); | 219 | u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); |
220 | const char *thread_name = current->comm; | 220 | const char *thread_name = current->comm; |
221 | 221 | ||
222 | if (is_srcbuf) | 222 | if (is_srcbuf) |
223 | pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n", | 223 | pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n", |
224 | thread_name, index, expected, actual); | 224 | thread_name, index, expected, actual); |
225 | else if ((pattern & PATTERN_COPY) | 225 | else if ((pattern & PATTERN_COPY) |
226 | && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) | 226 | && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) |
227 | pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n", | 227 | pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n", |
228 | thread_name, index, expected, actual); | 228 | thread_name, index, expected, actual); |
229 | else if (diff & PATTERN_SRC) | 229 | else if (diff & PATTERN_SRC) |
230 | pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n", | 230 | pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n", |
231 | thread_name, index, expected, actual); | 231 | thread_name, index, expected, actual); |
232 | else | 232 | else |
233 | pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n", | 233 | pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n", |
234 | thread_name, index, expected, actual); | 234 | thread_name, index, expected, actual); |
235 | } | 235 | } |
236 | 236 | ||
237 | static unsigned int dmatest_verify(u8 **bufs, unsigned int start, | 237 | static unsigned int dmatest_verify(u8 **bufs, unsigned int start, |
238 | unsigned int end, unsigned int counter, u8 pattern, | 238 | unsigned int end, unsigned int counter, u8 pattern, |
239 | bool is_srcbuf) | 239 | bool is_srcbuf) |
240 | { | 240 | { |
241 | unsigned int i; | 241 | unsigned int i; |
242 | unsigned int error_count = 0; | 242 | unsigned int error_count = 0; |
243 | u8 actual; | 243 | u8 actual; |
244 | u8 expected; | 244 | u8 expected; |
245 | u8 *buf; | 245 | u8 *buf; |
246 | unsigned int counter_orig = counter; | 246 | unsigned int counter_orig = counter; |
247 | 247 | ||
248 | for (; (buf = *bufs); bufs++) { | 248 | for (; (buf = *bufs); bufs++) { |
249 | counter = counter_orig; | 249 | counter = counter_orig; |
250 | for (i = start; i < end; i++) { | 250 | for (i = start; i < end; i++) { |
251 | actual = buf[i]; | 251 | actual = buf[i]; |
252 | expected = pattern | (~counter & PATTERN_COUNT_MASK); | 252 | expected = pattern | (~counter & PATTERN_COUNT_MASK); |
253 | if (actual != expected) { | 253 | if (actual != expected) { |
254 | if (error_count < MAX_ERROR_COUNT) | 254 | if (error_count < MAX_ERROR_COUNT) |
255 | dmatest_mismatch(actual, pattern, i, | 255 | dmatest_mismatch(actual, pattern, i, |
256 | counter, is_srcbuf); | 256 | counter, is_srcbuf); |
257 | error_count++; | 257 | error_count++; |
258 | } | 258 | } |
259 | counter++; | 259 | counter++; |
260 | } | 260 | } |
261 | } | 261 | } |
262 | 262 | ||
263 | if (error_count > MAX_ERROR_COUNT) | 263 | if (error_count > MAX_ERROR_COUNT) |
264 | pr_warn("%s: %u errors suppressed\n", | 264 | pr_warn("%s: %u errors suppressed\n", |
265 | current->comm, error_count - MAX_ERROR_COUNT); | 265 | current->comm, error_count - MAX_ERROR_COUNT); |
266 | 266 | ||
267 | return error_count; | 267 | return error_count; |
268 | } | 268 | } |
269 | 269 | ||
270 | /* poor man's completion - we want to use wait_event_freezable() on it */ | 270 | /* poor man's completion - we want to use wait_event_freezable() on it */ |
271 | struct dmatest_done { | 271 | struct dmatest_done { |
272 | bool done; | 272 | bool done; |
273 | wait_queue_head_t *wait; | 273 | wait_queue_head_t *wait; |
274 | }; | 274 | }; |
275 | 275 | ||
276 | static void dmatest_callback(void *arg) | 276 | static void dmatest_callback(void *arg) |
277 | { | 277 | { |
278 | struct dmatest_done *done = arg; | 278 | struct dmatest_done *done = arg; |
279 | 279 | ||
280 | done->done = true; | 280 | done->done = true; |
281 | wake_up_all(done->wait); | 281 | wake_up_all(done->wait); |
282 | } | 282 | } |
283 | 283 | ||
284 | static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len, | 284 | static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len, |
285 | unsigned int count) | 285 | unsigned int count) |
286 | { | 286 | { |
287 | while (count--) | 287 | while (count--) |
288 | dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE); | 288 | dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE); |
289 | } | 289 | } |
290 | 290 | ||
291 | static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, | 291 | static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, |
292 | unsigned int count) | 292 | unsigned int count) |
293 | { | 293 | { |
294 | while (count--) | 294 | while (count--) |
295 | dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); | 295 | dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); |
296 | } | 296 | } |
297 | 297 | ||
298 | static unsigned int min_odd(unsigned int x, unsigned int y) | 298 | static unsigned int min_odd(unsigned int x, unsigned int y) |
299 | { | 299 | { |
300 | unsigned int val = min(x, y); | 300 | unsigned int val = min(x, y); |
301 | 301 | ||
302 | return val % 2 ? val : val - 1; | 302 | return val % 2 ? val : val - 1; |
303 | } | 303 | } |
304 | 304 | ||
305 | static void result(const char *err, unsigned int n, unsigned int src_off, | 305 | static void result(const char *err, unsigned int n, unsigned int src_off, |
306 | unsigned int dst_off, unsigned int len, unsigned long data) | 306 | unsigned int dst_off, unsigned int len, unsigned long data) |
307 | { | 307 | { |
308 | pr_info("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", | 308 | pr_info("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", |
309 | current->comm, n, err, src_off, dst_off, len, data); | 309 | current->comm, n, err, src_off, dst_off, len, data); |
310 | } | 310 | } |
311 | 311 | ||
312 | static void dbg_result(const char *err, unsigned int n, unsigned int src_off, | 312 | static void dbg_result(const char *err, unsigned int n, unsigned int src_off, |
313 | unsigned int dst_off, unsigned int len, | 313 | unsigned int dst_off, unsigned int len, |
314 | unsigned long data) | 314 | unsigned long data) |
315 | { | 315 | { |
316 | pr_debug("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", | 316 | pr_debug("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", |
317 | current->comm, n, err, src_off, dst_off, len, data); | 317 | current->comm, n, err, src_off, dst_off, len, data); |
318 | } | 318 | } |
319 | 319 | ||
320 | /* | 320 | /* |
321 | * This function repeatedly tests DMA transfers of various lengths and | 321 | * This function repeatedly tests DMA transfers of various lengths and |
322 | * offsets for a given operation type until it is told to exit by | 322 | * offsets for a given operation type until it is told to exit by |
323 | * kthread_stop(). There may be multiple threads running this function | 323 | * kthread_stop(). There may be multiple threads running this function |
324 | * in parallel for a single channel, and there may be multiple channels | 324 | * in parallel for a single channel, and there may be multiple channels |
325 | * being tested in parallel. | 325 | * being tested in parallel. |
326 | * | 326 | * |
327 | * Before each test, the source and destination buffer is initialized | 327 | * Before each test, the source and destination buffer is initialized |
328 | * with a known pattern. This pattern is different depending on | 328 | * with a known pattern. This pattern is different depending on |
329 | * whether it's in an area which is supposed to be copied or | 329 | * whether it's in an area which is supposed to be copied or |
330 | * overwritten, and different in the source and destination buffers. | 330 | * overwritten, and different in the source and destination buffers. |
331 | * So if the DMA engine doesn't copy exactly what we tell it to copy, | 331 | * So if the DMA engine doesn't copy exactly what we tell it to copy, |
332 | * we'll notice. | 332 | * we'll notice. |
333 | */ | 333 | */ |
334 | static int dmatest_func(void *data) | 334 | static int dmatest_func(void *data) |
335 | { | 335 | { |
336 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); | 336 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); |
337 | struct dmatest_thread *thread = data; | 337 | struct dmatest_thread *thread = data; |
338 | struct dmatest_done done = { .wait = &done_wait }; | 338 | struct dmatest_done done = { .wait = &done_wait }; |
339 | struct dmatest_info *info; | 339 | struct dmatest_info *info; |
340 | struct dmatest_params *params; | 340 | struct dmatest_params *params; |
341 | struct dma_chan *chan; | 341 | struct dma_chan *chan; |
342 | struct dma_device *dev; | 342 | struct dma_device *dev; |
343 | unsigned int src_off, dst_off, len; | 343 | unsigned int src_off, dst_off, len; |
344 | unsigned int error_count; | 344 | unsigned int error_count; |
345 | unsigned int failed_tests = 0; | 345 | unsigned int failed_tests = 0; |
346 | unsigned int total_tests = 0; | 346 | unsigned int total_tests = 0; |
347 | dma_cookie_t cookie; | 347 | dma_cookie_t cookie; |
348 | enum dma_status status; | 348 | enum dma_status status; |
349 | enum dma_ctrl_flags flags; | 349 | enum dma_ctrl_flags flags; |
350 | u8 *pq_coefs = NULL; | 350 | u8 *pq_coefs = NULL; |
351 | int ret; | 351 | int ret; |
352 | int src_cnt; | 352 | int src_cnt; |
353 | int dst_cnt; | 353 | int dst_cnt; |
354 | int i; | 354 | int i; |
355 | 355 | ||
356 | set_freezable(); | 356 | set_freezable(); |
357 | 357 | ||
358 | ret = -ENOMEM; | 358 | ret = -ENOMEM; |
359 | 359 | ||
360 | smp_rmb(); | 360 | smp_rmb(); |
361 | info = thread->info; | 361 | info = thread->info; |
362 | params = &info->params; | 362 | params = &info->params; |
363 | chan = thread->chan; | 363 | chan = thread->chan; |
364 | dev = chan->device; | 364 | dev = chan->device; |
365 | if (thread->type == DMA_MEMCPY) | 365 | if (thread->type == DMA_MEMCPY) |
366 | src_cnt = dst_cnt = 1; | 366 | src_cnt = dst_cnt = 1; |
367 | else if (thread->type == DMA_XOR) { | 367 | else if (thread->type == DMA_XOR) { |
368 | /* force odd to ensure dst = src */ | 368 | /* force odd to ensure dst = src */ |
369 | src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); | 369 | src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); |
370 | dst_cnt = 1; | 370 | dst_cnt = 1; |
371 | } else if (thread->type == DMA_PQ) { | 371 | } else if (thread->type == DMA_PQ) { |
372 | /* force odd to ensure dst = src */ | 372 | /* force odd to ensure dst = src */ |
373 | src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); | 373 | src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); |
374 | dst_cnt = 2; | 374 | dst_cnt = 2; |
375 | 375 | ||
376 | pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL); | 376 | pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL); |
377 | if (!pq_coefs) | 377 | if (!pq_coefs) |
378 | goto err_thread_type; | 378 | goto err_thread_type; |
379 | 379 | ||
380 | for (i = 0; i < src_cnt; i++) | 380 | for (i = 0; i < src_cnt; i++) |
381 | pq_coefs[i] = 1; | 381 | pq_coefs[i] = 1; |
382 | } else | 382 | } else |
383 | goto err_thread_type; | 383 | goto err_thread_type; |
384 | 384 | ||
385 | thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); | 385 | thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); |
386 | if (!thread->srcs) | 386 | if (!thread->srcs) |
387 | goto err_srcs; | 387 | goto err_srcs; |
388 | for (i = 0; i < src_cnt; i++) { | 388 | for (i = 0; i < src_cnt; i++) { |
389 | thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL); | 389 | thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL); |
390 | if (!thread->srcs[i]) | 390 | if (!thread->srcs[i]) |
391 | goto err_srcbuf; | 391 | goto err_srcbuf; |
392 | } | 392 | } |
393 | thread->srcs[i] = NULL; | 393 | thread->srcs[i] = NULL; |
394 | 394 | ||
395 | thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL); | 395 | thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL); |
396 | if (!thread->dsts) | 396 | if (!thread->dsts) |
397 | goto err_dsts; | 397 | goto err_dsts; |
398 | for (i = 0; i < dst_cnt; i++) { | 398 | for (i = 0; i < dst_cnt; i++) { |
399 | thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL); | 399 | thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL); |
400 | if (!thread->dsts[i]) | 400 | if (!thread->dsts[i]) |
401 | goto err_dstbuf; | 401 | goto err_dstbuf; |
402 | } | 402 | } |
403 | thread->dsts[i] = NULL; | 403 | thread->dsts[i] = NULL; |
404 | 404 | ||
405 | set_user_nice(current, 10); | 405 | set_user_nice(current, 10); |
406 | 406 | ||
407 | /* | 407 | /* |
408 | * src and dst buffers are freed by ourselves below | 408 | * src and dst buffers are freed by ourselves below |
409 | */ | 409 | */ |
410 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; | 410 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; |
411 | 411 | ||
412 | while (!kthread_should_stop() | 412 | while (!kthread_should_stop() |
413 | && !(params->iterations && total_tests >= params->iterations)) { | 413 | && !(params->iterations && total_tests >= params->iterations)) { |
414 | struct dma_async_tx_descriptor *tx = NULL; | 414 | struct dma_async_tx_descriptor *tx = NULL; |
415 | dma_addr_t dma_srcs[src_cnt]; | 415 | dma_addr_t dma_srcs[src_cnt]; |
416 | dma_addr_t dma_dsts[dst_cnt]; | 416 | dma_addr_t dma_dsts[dst_cnt]; |
417 | u8 align = 0; | 417 | u8 align = 0; |
418 | 418 | ||
419 | total_tests++; | 419 | total_tests++; |
420 | 420 | ||
421 | /* honor alignment restrictions */ | 421 | /* honor alignment restrictions */ |
422 | if (thread->type == DMA_MEMCPY) | 422 | if (thread->type == DMA_MEMCPY) |
423 | align = dev->copy_align; | 423 | align = dev->copy_align; |
424 | else if (thread->type == DMA_XOR) | 424 | else if (thread->type == DMA_XOR) |
425 | align = dev->xor_align; | 425 | align = dev->xor_align; |
426 | else if (thread->type == DMA_PQ) | 426 | else if (thread->type == DMA_PQ) |
427 | align = dev->pq_align; | 427 | align = dev->pq_align; |
428 | 428 | ||
429 | if (1 << align > params->buf_size) { | 429 | if (1 << align > params->buf_size) { |
430 | pr_err("%u-byte buffer too small for %d-byte alignment\n", | 430 | pr_err("%u-byte buffer too small for %d-byte alignment\n", |
431 | params->buf_size, 1 << align); | 431 | params->buf_size, 1 << align); |
432 | break; | 432 | break; |
433 | } | 433 | } |
434 | 434 | ||
435 | len = dmatest_random() % params->buf_size + 1; | 435 | len = dmatest_random() % params->buf_size + 1; |
436 | len = (len >> align) << align; | 436 | len = (len >> align) << align; |
437 | if (!len) | 437 | if (!len) |
438 | len = 1 << align; | 438 | len = 1 << align; |
439 | src_off = dmatest_random() % (params->buf_size - len + 1); | 439 | src_off = dmatest_random() % (params->buf_size - len + 1); |
440 | dst_off = dmatest_random() % (params->buf_size - len + 1); | 440 | dst_off = dmatest_random() % (params->buf_size - len + 1); |
441 | 441 | ||
442 | src_off = (src_off >> align) << align; | 442 | src_off = (src_off >> align) << align; |
443 | dst_off = (dst_off >> align) << align; | 443 | dst_off = (dst_off >> align) << align; |
444 | 444 | ||
445 | dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); | 445 | dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); |
446 | dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); | 446 | dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); |
447 | 447 | ||
448 | for (i = 0; i < src_cnt; i++) { | 448 | for (i = 0; i < src_cnt; i++) { |
449 | u8 *buf = thread->srcs[i] + src_off; | 449 | u8 *buf = thread->srcs[i] + src_off; |
450 | 450 | ||
451 | dma_srcs[i] = dma_map_single(dev->dev, buf, len, | 451 | dma_srcs[i] = dma_map_single(dev->dev, buf, len, |
452 | DMA_TO_DEVICE); | 452 | DMA_TO_DEVICE); |
453 | ret = dma_mapping_error(dev->dev, dma_srcs[i]); | 453 | ret = dma_mapping_error(dev->dev, dma_srcs[i]); |
454 | if (ret) { | 454 | if (ret) { |
455 | unmap_src(dev->dev, dma_srcs, len, i); | 455 | unmap_src(dev->dev, dma_srcs, len, i); |
456 | result("src mapping error", total_tests, | 456 | result("src mapping error", total_tests, |
457 | src_off, dst_off, len, ret); | 457 | src_off, dst_off, len, ret); |
458 | failed_tests++; | 458 | failed_tests++; |
459 | continue; | 459 | continue; |
460 | } | 460 | } |
461 | } | 461 | } |
462 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ | 462 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ |
463 | for (i = 0; i < dst_cnt; i++) { | 463 | for (i = 0; i < dst_cnt; i++) { |
464 | dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], | 464 | dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], |
465 | params->buf_size, | 465 | params->buf_size, |
466 | DMA_BIDIRECTIONAL); | 466 | DMA_BIDIRECTIONAL); |
467 | ret = dma_mapping_error(dev->dev, dma_dsts[i]); | 467 | ret = dma_mapping_error(dev->dev, dma_dsts[i]); |
468 | if (ret) { | 468 | if (ret) { |
469 | unmap_src(dev->dev, dma_srcs, len, src_cnt); | 469 | unmap_src(dev->dev, dma_srcs, len, src_cnt); |
470 | unmap_dst(dev->dev, dma_dsts, params->buf_size, | 470 | unmap_dst(dev->dev, dma_dsts, params->buf_size, |
471 | i); | 471 | i); |
472 | result("dst mapping error", total_tests, | 472 | result("dst mapping error", total_tests, |
473 | src_off, dst_off, len, ret); | 473 | src_off, dst_off, len, ret); |
474 | failed_tests++; | 474 | failed_tests++; |
475 | continue; | 475 | continue; |
476 | } | 476 | } |
477 | } | 477 | } |
478 | 478 | ||
479 | if (thread->type == DMA_MEMCPY) | 479 | if (thread->type == DMA_MEMCPY) |
480 | tx = dev->device_prep_dma_memcpy(chan, | 480 | tx = dev->device_prep_dma_memcpy(chan, |
481 | dma_dsts[0] + dst_off, | 481 | dma_dsts[0] + dst_off, |
482 | dma_srcs[0], len, | 482 | dma_srcs[0], len, |
483 | flags); | 483 | flags); |
484 | else if (thread->type == DMA_XOR) | 484 | else if (thread->type == DMA_XOR) |
485 | tx = dev->device_prep_dma_xor(chan, | 485 | tx = dev->device_prep_dma_xor(chan, |
486 | dma_dsts[0] + dst_off, | 486 | dma_dsts[0] + dst_off, |
487 | dma_srcs, src_cnt, | 487 | dma_srcs, src_cnt, |
488 | len, flags); | 488 | len, flags); |
489 | else if (thread->type == DMA_PQ) { | 489 | else if (thread->type == DMA_PQ) { |
490 | dma_addr_t dma_pq[dst_cnt]; | 490 | dma_addr_t dma_pq[dst_cnt]; |
491 | 491 | ||
492 | for (i = 0; i < dst_cnt; i++) | 492 | for (i = 0; i < dst_cnt; i++) |
493 | dma_pq[i] = dma_dsts[i] + dst_off; | 493 | dma_pq[i] = dma_dsts[i] + dst_off; |
494 | tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, | 494 | tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, |
495 | src_cnt, pq_coefs, | 495 | src_cnt, pq_coefs, |
496 | len, flags); | 496 | len, flags); |
497 | } | 497 | } |
498 | 498 | ||
499 | if (!tx) { | 499 | if (!tx) { |
500 | unmap_src(dev->dev, dma_srcs, len, src_cnt); | 500 | unmap_src(dev->dev, dma_srcs, len, src_cnt); |
501 | unmap_dst(dev->dev, dma_dsts, params->buf_size, | 501 | unmap_dst(dev->dev, dma_dsts, params->buf_size, |
502 | dst_cnt); | 502 | dst_cnt); |
503 | result("prep error", total_tests, src_off, | 503 | result("prep error", total_tests, src_off, |
504 | dst_off, len, ret); | 504 | dst_off, len, ret); |
505 | msleep(100); | 505 | msleep(100); |
506 | failed_tests++; | 506 | failed_tests++; |
507 | continue; | 507 | continue; |
508 | } | 508 | } |
509 | 509 | ||
510 | done.done = false; | 510 | done.done = false; |
511 | tx->callback = dmatest_callback; | 511 | tx->callback = dmatest_callback; |
512 | tx->callback_param = &done; | 512 | tx->callback_param = &done; |
513 | cookie = tx->tx_submit(tx); | 513 | cookie = tx->tx_submit(tx); |
514 | 514 | ||
515 | if (dma_submit_error(cookie)) { | 515 | if (dma_submit_error(cookie)) { |
516 | result("submit error", total_tests, src_off, | 516 | result("submit error", total_tests, src_off, |
517 | dst_off, len, ret); | 517 | dst_off, len, ret); |
518 | msleep(100); | 518 | msleep(100); |
519 | failed_tests++; | 519 | failed_tests++; |
520 | continue; | 520 | continue; |
521 | } | 521 | } |
522 | dma_async_issue_pending(chan); | 522 | dma_async_issue_pending(chan); |
523 | 523 | ||
524 | wait_event_freezable_timeout(done_wait, done.done, | 524 | wait_event_freezable_timeout(done_wait, done.done, |
525 | msecs_to_jiffies(params->timeout)); | 525 | msecs_to_jiffies(params->timeout)); |
526 | 526 | ||
527 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 527 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
528 | 528 | ||
529 | if (!done.done) { | 529 | if (!done.done) { |
530 | /* | 530 | /* |
531 | * We're leaving the timed out dma operation with | 531 | * We're leaving the timed out dma operation with |
532 | * dangling pointer to done_wait. To make this | 532 | * dangling pointer to done_wait. To make this |
533 | * correct, we'll need to allocate wait_done for | 533 | * correct, we'll need to allocate wait_done for |
534 | * each test iteration and perform "who's gonna | 534 | * each test iteration and perform "who's gonna |
535 | * free it this time?" dancing. For now, just | 535 | * free it this time?" dancing. For now, just |
536 | * leave it dangling. | 536 | * leave it dangling. |
537 | */ | 537 | */ |
538 | result("test timed out", total_tests, src_off, dst_off, | 538 | result("test timed out", total_tests, src_off, dst_off, |
539 | len, 0); | 539 | len, 0); |
540 | failed_tests++; | 540 | failed_tests++; |
541 | continue; | 541 | continue; |
542 | } else if (status != DMA_SUCCESS) { | 542 | } else if (status != DMA_SUCCESS) { |
543 | result(status == DMA_ERROR ? | 543 | result(status == DMA_ERROR ? |
544 | "completion error status" : | 544 | "completion error status" : |
545 | "completion busy status", total_tests, src_off, | 545 | "completion busy status", total_tests, src_off, |
546 | dst_off, len, ret); | 546 | dst_off, len, ret); |
547 | failed_tests++; | 547 | failed_tests++; |
548 | continue; | 548 | continue; |
549 | } | 549 | } |
550 | 550 | ||
551 | /* Unmap by myself */ | 551 | /* Unmap by myself */ |
552 | unmap_src(dev->dev, dma_srcs, len, src_cnt); | 552 | unmap_src(dev->dev, dma_srcs, len, src_cnt); |
553 | unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); | 553 | unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); |
554 | 554 | ||
555 | error_count = 0; | 555 | error_count = 0; |
556 | 556 | ||
557 | pr_debug("%s: verifying source buffer...\n", current->comm); | 557 | pr_debug("%s: verifying source buffer...\n", current->comm); |
558 | error_count += dmatest_verify(thread->srcs, 0, src_off, | 558 | error_count += dmatest_verify(thread->srcs, 0, src_off, |
559 | 0, PATTERN_SRC, true); | 559 | 0, PATTERN_SRC, true); |
560 | error_count += dmatest_verify(thread->srcs, src_off, | 560 | error_count += dmatest_verify(thread->srcs, src_off, |
561 | src_off + len, src_off, | 561 | src_off + len, src_off, |
562 | PATTERN_SRC | PATTERN_COPY, true); | 562 | PATTERN_SRC | PATTERN_COPY, true); |
563 | error_count += dmatest_verify(thread->srcs, src_off + len, | 563 | error_count += dmatest_verify(thread->srcs, src_off + len, |
564 | params->buf_size, src_off + len, | 564 | params->buf_size, src_off + len, |
565 | PATTERN_SRC, true); | 565 | PATTERN_SRC, true); |
566 | 566 | ||
567 | pr_debug("%s: verifying dest buffer...\n", current->comm); | 567 | pr_debug("%s: verifying dest buffer...\n", current->comm); |
568 | error_count += dmatest_verify(thread->dsts, 0, dst_off, | 568 | error_count += dmatest_verify(thread->dsts, 0, dst_off, |
569 | 0, PATTERN_DST, false); | 569 | 0, PATTERN_DST, false); |
570 | error_count += dmatest_verify(thread->dsts, dst_off, | 570 | error_count += dmatest_verify(thread->dsts, dst_off, |
571 | dst_off + len, src_off, | 571 | dst_off + len, src_off, |
572 | PATTERN_SRC | PATTERN_COPY, false); | 572 | PATTERN_SRC | PATTERN_COPY, false); |
573 | error_count += dmatest_verify(thread->dsts, dst_off + len, | 573 | error_count += dmatest_verify(thread->dsts, dst_off + len, |
574 | params->buf_size, dst_off + len, | 574 | params->buf_size, dst_off + len, |
575 | PATTERN_DST, false); | 575 | PATTERN_DST, false); |
576 | 576 | ||
577 | if (error_count) { | 577 | if (error_count) { |
578 | result("data error", total_tests, src_off, dst_off, | 578 | result("data error", total_tests, src_off, dst_off, |
579 | len, error_count); | 579 | len, error_count); |
580 | failed_tests++; | 580 | failed_tests++; |
581 | } else { | 581 | } else { |
582 | dbg_result("test passed", total_tests, src_off, dst_off, | 582 | dbg_result("test passed", total_tests, src_off, dst_off, |
583 | len, 0); | 583 | len, 0); |
584 | } | 584 | } |
585 | } | 585 | } |
586 | 586 | ||
587 | ret = 0; | 587 | ret = 0; |
588 | for (i = 0; thread->dsts[i]; i++) | 588 | for (i = 0; thread->dsts[i]; i++) |
589 | kfree(thread->dsts[i]); | 589 | kfree(thread->dsts[i]); |
590 | err_dstbuf: | 590 | err_dstbuf: |
591 | kfree(thread->dsts); | 591 | kfree(thread->dsts); |
592 | err_dsts: | 592 | err_dsts: |
593 | for (i = 0; thread->srcs[i]; i++) | 593 | for (i = 0; thread->srcs[i]; i++) |
594 | kfree(thread->srcs[i]); | 594 | kfree(thread->srcs[i]); |
595 | err_srcbuf: | 595 | err_srcbuf: |
596 | kfree(thread->srcs); | 596 | kfree(thread->srcs); |
597 | err_srcs: | 597 | err_srcs: |
598 | kfree(pq_coefs); | 598 | kfree(pq_coefs); |
599 | err_thread_type: | 599 | err_thread_type: |
600 | pr_info("%s: terminating after %u tests, %u failures (status %d)\n", | 600 | pr_info("%s: terminating after %u tests, %u failures (status %d)\n", |
601 | current->comm, total_tests, failed_tests, ret); | 601 | current->comm, total_tests, failed_tests, ret); |
602 | 602 | ||
603 | /* terminate all transfers on specified channels */ | 603 | /* terminate all transfers on specified channels */ |
604 | if (ret) | 604 | if (ret) |
605 | dmaengine_terminate_all(chan); | 605 | dmaengine_terminate_all(chan); |
606 | 606 | ||
607 | thread->done = true; | 607 | thread->done = true; |
608 | 608 | ||
609 | if (params->iterations > 0) | 609 | if (params->iterations > 0) |
610 | while (!kthread_should_stop()) { | 610 | while (!kthread_should_stop()) { |
611 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); | 611 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); |
612 | interruptible_sleep_on(&wait_dmatest_exit); | 612 | interruptible_sleep_on(&wait_dmatest_exit); |
613 | } | 613 | } |
614 | 614 | ||
615 | return ret; | 615 | return ret; |
616 | } | 616 | } |
617 | 617 | ||
618 | static void dmatest_cleanup_channel(struct dmatest_chan *dtc) | 618 | static void dmatest_cleanup_channel(struct dmatest_chan *dtc) |
619 | { | 619 | { |
620 | struct dmatest_thread *thread; | 620 | struct dmatest_thread *thread; |
621 | struct dmatest_thread *_thread; | 621 | struct dmatest_thread *_thread; |
622 | int ret; | 622 | int ret; |
623 | 623 | ||
624 | list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { | 624 | list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { |
625 | ret = kthread_stop(thread->task); | 625 | ret = kthread_stop(thread->task); |
626 | pr_debug("dmatest: thread %s exited with status %d\n", | 626 | pr_debug("thread %s exited with status %d\n", |
627 | thread->task->comm, ret); | 627 | thread->task->comm, ret); |
628 | list_del(&thread->node); | 628 | list_del(&thread->node); |
629 | kfree(thread); | 629 | kfree(thread); |
630 | } | 630 | } |
631 | 631 | ||
632 | /* terminate all transfers on specified channels */ | 632 | /* terminate all transfers on specified channels */ |
633 | dmaengine_terminate_all(dtc->chan); | 633 | dmaengine_terminate_all(dtc->chan); |
634 | 634 | ||
635 | kfree(dtc); | 635 | kfree(dtc); |
636 | } | 636 | } |
637 | 637 | ||
638 | static int dmatest_add_threads(struct dmatest_info *info, | 638 | static int dmatest_add_threads(struct dmatest_info *info, |
639 | struct dmatest_chan *dtc, enum dma_transaction_type type) | 639 | struct dmatest_chan *dtc, enum dma_transaction_type type) |
640 | { | 640 | { |
641 | struct dmatest_params *params = &info->params; | 641 | struct dmatest_params *params = &info->params; |
642 | struct dmatest_thread *thread; | 642 | struct dmatest_thread *thread; |
643 | struct dma_chan *chan = dtc->chan; | 643 | struct dma_chan *chan = dtc->chan; |
644 | char *op; | 644 | char *op; |
645 | unsigned int i; | 645 | unsigned int i; |
646 | 646 | ||
647 | if (type == DMA_MEMCPY) | 647 | if (type == DMA_MEMCPY) |
648 | op = "copy"; | 648 | op = "copy"; |
649 | else if (type == DMA_XOR) | 649 | else if (type == DMA_XOR) |
650 | op = "xor"; | 650 | op = "xor"; |
651 | else if (type == DMA_PQ) | 651 | else if (type == DMA_PQ) |
652 | op = "pq"; | 652 | op = "pq"; |
653 | else | 653 | else |
654 | return -EINVAL; | 654 | return -EINVAL; |
655 | 655 | ||
656 | for (i = 0; i < params->threads_per_chan; i++) { | 656 | for (i = 0; i < params->threads_per_chan; i++) { |
657 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); | 657 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); |
658 | if (!thread) { | 658 | if (!thread) { |
659 | pr_warning("dmatest: No memory for %s-%s%u\n", | 659 | pr_warn("No memory for %s-%s%u\n", |
660 | dma_chan_name(chan), op, i); | 660 | dma_chan_name(chan), op, i); |
661 | |||
662 | break; | 661 | break; |
663 | } | 662 | } |
664 | thread->info = info; | 663 | thread->info = info; |
665 | thread->chan = dtc->chan; | 664 | thread->chan = dtc->chan; |
666 | thread->type = type; | 665 | thread->type = type; |
667 | smp_wmb(); | 666 | smp_wmb(); |
668 | thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", | 667 | thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", |
669 | dma_chan_name(chan), op, i); | 668 | dma_chan_name(chan), op, i); |
670 | if (IS_ERR(thread->task)) { | 669 | if (IS_ERR(thread->task)) { |
671 | pr_warning("dmatest: Failed to run thread %s-%s%u\n", | 670 | pr_warn("Failed to run thread %s-%s%u\n", |
672 | dma_chan_name(chan), op, i); | 671 | dma_chan_name(chan), op, i); |
673 | kfree(thread); | 672 | kfree(thread); |
674 | break; | 673 | break; |
675 | } | 674 | } |
676 | 675 | ||
677 | /* srcbuf and dstbuf are allocated by the thread itself */ | 676 | /* srcbuf and dstbuf are allocated by the thread itself */ |
678 | 677 | ||
679 | list_add_tail(&thread->node, &dtc->threads); | 678 | list_add_tail(&thread->node, &dtc->threads); |
680 | } | 679 | } |
681 | 680 | ||
682 | return i; | 681 | return i; |
683 | } | 682 | } |
684 | 683 | ||
685 | static int dmatest_add_channel(struct dmatest_info *info, | 684 | static int dmatest_add_channel(struct dmatest_info *info, |
686 | struct dma_chan *chan) | 685 | struct dma_chan *chan) |
687 | { | 686 | { |
688 | struct dmatest_chan *dtc; | 687 | struct dmatest_chan *dtc; |
689 | struct dma_device *dma_dev = chan->device; | 688 | struct dma_device *dma_dev = chan->device; |
690 | unsigned int thread_count = 0; | 689 | unsigned int thread_count = 0; |
691 | int cnt; | 690 | int cnt; |
692 | 691 | ||
693 | dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); | 692 | dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); |
694 | if (!dtc) { | 693 | if (!dtc) { |
695 | pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); | 694 | pr_warn("No memory for %s\n", dma_chan_name(chan)); |
696 | return -ENOMEM; | 695 | return -ENOMEM; |
697 | } | 696 | } |
698 | 697 | ||
699 | dtc->chan = chan; | 698 | dtc->chan = chan; |
700 | INIT_LIST_HEAD(&dtc->threads); | 699 | INIT_LIST_HEAD(&dtc->threads); |
701 | 700 | ||
702 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | 701 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { |
703 | cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); | 702 | cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); |
704 | thread_count += cnt > 0 ? cnt : 0; | 703 | thread_count += cnt > 0 ? cnt : 0; |
705 | } | 704 | } |
706 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 705 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
707 | cnt = dmatest_add_threads(info, dtc, DMA_XOR); | 706 | cnt = dmatest_add_threads(info, dtc, DMA_XOR); |
708 | thread_count += cnt > 0 ? cnt : 0; | 707 | thread_count += cnt > 0 ? cnt : 0; |
709 | } | 708 | } |
710 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { | 709 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { |
711 | cnt = dmatest_add_threads(info, dtc, DMA_PQ); | 710 | cnt = dmatest_add_threads(info, dtc, DMA_PQ); |
712 | thread_count += cnt > 0 ? cnt : 0; | 711 | thread_count += cnt > 0 ? cnt : 0; |
713 | } | 712 | } |
714 | 713 | ||
715 | pr_info("dmatest: Started %u threads using %s\n", | 714 | pr_info("Started %u threads using %s\n", |
716 | thread_count, dma_chan_name(chan)); | 715 | thread_count, dma_chan_name(chan)); |
717 | 716 | ||
718 | list_add_tail(&dtc->node, &info->channels); | 717 | list_add_tail(&dtc->node, &info->channels); |
719 | info->nr_channels++; | 718 | info->nr_channels++; |
720 | 719 | ||
721 | return 0; | 720 | return 0; |
722 | } | 721 | } |
723 | 722 | ||
724 | static bool filter(struct dma_chan *chan, void *param) | 723 | static bool filter(struct dma_chan *chan, void *param) |
725 | { | 724 | { |
726 | struct dmatest_params *params = param; | 725 | struct dmatest_params *params = param; |
727 | 726 | ||
728 | if (!dmatest_match_channel(params, chan) || | 727 | if (!dmatest_match_channel(params, chan) || |
729 | !dmatest_match_device(params, chan->device)) | 728 | !dmatest_match_device(params, chan->device)) |
730 | return false; | 729 | return false; |
731 | else | 730 | else |
732 | return true; | 731 | return true; |
733 | } | 732 | } |
734 | 733 | ||
735 | static int __run_threaded_test(struct dmatest_info *info) | 734 | static int __run_threaded_test(struct dmatest_info *info) |
736 | { | 735 | { |
737 | dma_cap_mask_t mask; | 736 | dma_cap_mask_t mask; |
738 | struct dma_chan *chan; | 737 | struct dma_chan *chan; |
739 | struct dmatest_params *params = &info->params; | 738 | struct dmatest_params *params = &info->params; |
740 | int err = 0; | 739 | int err = 0; |
741 | 740 | ||
742 | dma_cap_zero(mask); | 741 | dma_cap_zero(mask); |
743 | dma_cap_set(DMA_MEMCPY, mask); | 742 | dma_cap_set(DMA_MEMCPY, mask); |
744 | for (;;) { | 743 | for (;;) { |
745 | chan = dma_request_channel(mask, filter, params); | 744 | chan = dma_request_channel(mask, filter, params); |
746 | if (chan) { | 745 | if (chan) { |
747 | err = dmatest_add_channel(info, chan); | 746 | err = dmatest_add_channel(info, chan); |
748 | if (err) { | 747 | if (err) { |
749 | dma_release_channel(chan); | 748 | dma_release_channel(chan); |
750 | break; /* add_channel failed, punt */ | 749 | break; /* add_channel failed, punt */ |
751 | } | 750 | } |
752 | } else | 751 | } else |
753 | break; /* no more channels available */ | 752 | break; /* no more channels available */ |
754 | if (params->max_channels && | 753 | if (params->max_channels && |
755 | info->nr_channels >= params->max_channels) | 754 | info->nr_channels >= params->max_channels) |
756 | break; /* we have all we need */ | 755 | break; /* we have all we need */ |
757 | } | 756 | } |
758 | return err; | 757 | return err; |
759 | } | 758 | } |
760 | 759 | ||
761 | #ifndef MODULE | 760 | #ifndef MODULE |
762 | static int run_threaded_test(struct dmatest_info *info) | 761 | static int run_threaded_test(struct dmatest_info *info) |
763 | { | 762 | { |
764 | int ret; | 763 | int ret; |
765 | 764 | ||
766 | mutex_lock(&info->lock); | 765 | mutex_lock(&info->lock); |
767 | ret = __run_threaded_test(info); | 766 | ret = __run_threaded_test(info); |
768 | mutex_unlock(&info->lock); | 767 | mutex_unlock(&info->lock); |
769 | return ret; | 768 | return ret; |
770 | } | 769 | } |
771 | #endif | 770 | #endif |
772 | 771 | ||
773 | static void __stop_threaded_test(struct dmatest_info *info) | 772 | static void __stop_threaded_test(struct dmatest_info *info) |
774 | { | 773 | { |
775 | struct dmatest_chan *dtc, *_dtc; | 774 | struct dmatest_chan *dtc, *_dtc; |
776 | struct dma_chan *chan; | 775 | struct dma_chan *chan; |
777 | 776 | ||
778 | list_for_each_entry_safe(dtc, _dtc, &info->channels, node) { | 777 | list_for_each_entry_safe(dtc, _dtc, &info->channels, node) { |
779 | list_del(&dtc->node); | 778 | list_del(&dtc->node); |
780 | chan = dtc->chan; | 779 | chan = dtc->chan; |
781 | dmatest_cleanup_channel(dtc); | 780 | dmatest_cleanup_channel(dtc); |
782 | pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); | 781 | pr_debug("dropped channel %s\n", dma_chan_name(chan)); |
783 | dma_release_channel(chan); | 782 | dma_release_channel(chan); |
784 | } | 783 | } |
785 | 784 | ||
786 | info->nr_channels = 0; | 785 | info->nr_channels = 0; |
787 | } | 786 | } |
788 | 787 | ||
789 | static void stop_threaded_test(struct dmatest_info *info) | 788 | static void stop_threaded_test(struct dmatest_info *info) |
790 | { | 789 | { |
791 | mutex_lock(&info->lock); | 790 | mutex_lock(&info->lock); |
792 | __stop_threaded_test(info); | 791 | __stop_threaded_test(info); |
793 | mutex_unlock(&info->lock); | 792 | mutex_unlock(&info->lock); |
794 | } | 793 | } |
795 | 794 | ||
796 | static int __restart_threaded_test(struct dmatest_info *info, bool run) | 795 | static int __restart_threaded_test(struct dmatest_info *info, bool run) |
797 | { | 796 | { |
798 | struct dmatest_params *params = &info->params; | 797 | struct dmatest_params *params = &info->params; |
799 | 798 | ||
800 | /* Stop any running test first */ | 799 | /* Stop any running test first */ |
801 | __stop_threaded_test(info); | 800 | __stop_threaded_test(info); |
802 | 801 | ||
803 | if (run == false) | 802 | if (run == false) |
804 | return 0; | 803 | return 0; |
805 | 804 | ||
806 | /* Copy test parameters */ | 805 | /* Copy test parameters */ |
807 | params->buf_size = test_buf_size; | 806 | params->buf_size = test_buf_size; |
808 | strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); | 807 | strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); |
809 | strlcpy(params->device, strim(test_device), sizeof(params->device)); | 808 | strlcpy(params->device, strim(test_device), sizeof(params->device)); |
810 | params->threads_per_chan = threads_per_chan; | 809 | params->threads_per_chan = threads_per_chan; |
811 | params->max_channels = max_channels; | 810 | params->max_channels = max_channels; |
812 | params->iterations = iterations; | 811 | params->iterations = iterations; |
813 | params->xor_sources = xor_sources; | 812 | params->xor_sources = xor_sources; |
814 | params->pq_sources = pq_sources; | 813 | params->pq_sources = pq_sources; |
815 | params->timeout = timeout; | 814 | params->timeout = timeout; |
816 | 815 | ||
817 | /* Run test with new parameters */ | 816 | /* Run test with new parameters */ |
818 | return __run_threaded_test(info); | 817 | return __run_threaded_test(info); |
819 | } | 818 | } |
820 | 819 | ||
821 | static bool __is_threaded_test_run(struct dmatest_info *info) | 820 | static bool __is_threaded_test_run(struct dmatest_info *info) |
822 | { | 821 | { |
823 | struct dmatest_chan *dtc; | 822 | struct dmatest_chan *dtc; |
824 | 823 | ||
825 | list_for_each_entry(dtc, &info->channels, node) { | 824 | list_for_each_entry(dtc, &info->channels, node) { |
826 | struct dmatest_thread *thread; | 825 | struct dmatest_thread *thread; |
827 | 826 | ||
828 | list_for_each_entry(thread, &dtc->threads, node) { | 827 | list_for_each_entry(thread, &dtc->threads, node) { |
829 | if (!thread->done) | 828 | if (!thread->done) |
830 | return true; | 829 | return true; |
831 | } | 830 | } |
832 | } | 831 | } |
833 | 832 | ||
834 | return false; | 833 | return false; |
835 | } | 834 | } |
836 | 835 | ||
837 | static ssize_t dtf_read_run(struct file *file, char __user *user_buf, | 836 | static ssize_t dtf_read_run(struct file *file, char __user *user_buf, |
838 | size_t count, loff_t *ppos) | 837 | size_t count, loff_t *ppos) |
839 | { | 838 | { |
840 | struct dmatest_info *info = file->private_data; | 839 | struct dmatest_info *info = file->private_data; |
841 | char buf[3]; | 840 | char buf[3]; |
842 | 841 | ||
843 | mutex_lock(&info->lock); | 842 | mutex_lock(&info->lock); |
844 | 843 | ||
845 | if (__is_threaded_test_run(info)) { | 844 | if (__is_threaded_test_run(info)) { |
846 | buf[0] = 'Y'; | 845 | buf[0] = 'Y'; |
847 | } else { | 846 | } else { |
848 | __stop_threaded_test(info); | 847 | __stop_threaded_test(info); |
849 | buf[0] = 'N'; | 848 | buf[0] = 'N'; |
850 | } | 849 | } |
851 | 850 | ||
852 | mutex_unlock(&info->lock); | 851 | mutex_unlock(&info->lock); |
853 | buf[1] = '\n'; | 852 | buf[1] = '\n'; |
854 | buf[2] = 0x00; | 853 | buf[2] = 0x00; |
855 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); | 854 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); |
856 | } | 855 | } |
857 | 856 | ||
858 | static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, | 857 | static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, |
859 | size_t count, loff_t *ppos) | 858 | size_t count, loff_t *ppos) |
860 | { | 859 | { |
861 | struct dmatest_info *info = file->private_data; | 860 | struct dmatest_info *info = file->private_data; |
862 | char buf[16]; | 861 | char buf[16]; |
863 | bool bv; | 862 | bool bv; |
864 | int ret = 0; | 863 | int ret = 0; |
865 | 864 | ||
866 | if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) | 865 | if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) |
867 | return -EFAULT; | 866 | return -EFAULT; |
868 | 867 | ||
869 | if (strtobool(buf, &bv) == 0) { | 868 | if (strtobool(buf, &bv) == 0) { |
870 | mutex_lock(&info->lock); | 869 | mutex_lock(&info->lock); |
871 | 870 | ||
872 | if (__is_threaded_test_run(info)) | 871 | if (__is_threaded_test_run(info)) |
873 | ret = -EBUSY; | 872 | ret = -EBUSY; |
874 | else | 873 | else |
875 | ret = __restart_threaded_test(info, bv); | 874 | ret = __restart_threaded_test(info, bv); |
876 | 875 | ||
877 | mutex_unlock(&info->lock); | 876 | mutex_unlock(&info->lock); |
878 | } | 877 | } |
879 | 878 | ||
880 | return ret ? ret : count; | 879 | return ret ? ret : count; |
881 | } | 880 | } |
882 | 881 | ||
883 | static const struct file_operations dtf_run_fops = { | 882 | static const struct file_operations dtf_run_fops = { |
884 | .read = dtf_read_run, | 883 | .read = dtf_read_run, |
885 | .write = dtf_write_run, | 884 | .write = dtf_write_run, |
886 | .open = simple_open, | 885 | .open = simple_open, |
887 | .llseek = default_llseek, | 886 | .llseek = default_llseek, |
888 | }; | 887 | }; |
889 | 888 | ||
890 | static int dmatest_register_dbgfs(struct dmatest_info *info) | 889 | static int dmatest_register_dbgfs(struct dmatest_info *info) |
891 | { | 890 | { |
892 | struct dentry *d; | 891 | struct dentry *d; |
893 | 892 | ||
894 | d = debugfs_create_dir("dmatest", NULL); | 893 | d = debugfs_create_dir("dmatest", NULL); |
895 | if (IS_ERR(d)) | 894 | if (IS_ERR(d)) |
896 | return PTR_ERR(d); | 895 | return PTR_ERR(d); |
897 | if (!d) | 896 | if (!d) |
898 | goto err_root; | 897 | goto err_root; |
899 | 898 | ||
900 | info->root = d; | 899 | info->root = d; |
901 | 900 | ||
902 | /* Run or stop threaded test */ | 901 | /* Run or stop threaded test */ |
903 | debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info, | 902 | debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info, |
904 | &dtf_run_fops); | 903 | &dtf_run_fops); |
905 | 904 | ||
906 | return 0; | 905 | return 0; |
907 | 906 | ||
908 | err_root: | 907 | err_root: |
909 | pr_err("dmatest: Failed to initialize debugfs\n"); | 908 | pr_err("Failed to initialize debugfs\n"); |
910 | return -ENOMEM; | 909 | return -ENOMEM; |
911 | } | 910 | } |
912 | 911 | ||
913 | static int __init dmatest_init(void) | 912 | static int __init dmatest_init(void) |
914 | { | 913 | { |
915 | struct dmatest_info *info = &test_info; | 914 | struct dmatest_info *info = &test_info; |
916 | int ret; | 915 | int ret; |
917 | 916 | ||
918 | memset(info, 0, sizeof(*info)); | 917 | memset(info, 0, sizeof(*info)); |
919 | 918 | ||
920 | mutex_init(&info->lock); | 919 | mutex_init(&info->lock); |
921 | INIT_LIST_HEAD(&info->channels); | 920 | INIT_LIST_HEAD(&info->channels); |
922 | 921 | ||
923 | ret = dmatest_register_dbgfs(info); | 922 | ret = dmatest_register_dbgfs(info); |
924 | if (ret) | 923 | if (ret) |
925 | return ret; | 924 | return ret; |
926 | 925 | ||
927 | #ifdef MODULE | 926 | #ifdef MODULE |
928 | return 0; | 927 | return 0; |
929 | #else | 928 | #else |
930 | return run_threaded_test(info); | 929 | return run_threaded_test(info); |
931 | #endif | 930 | #endif |
932 | } | 931 | } |
933 | /* when compiled-in wait for drivers to load first */ | 932 | /* when compiled-in wait for drivers to load first */ |
934 | late_initcall(dmatest_init); | 933 | late_initcall(dmatest_init); |
935 | 934 | ||
936 | static void __exit dmatest_exit(void) | 935 | static void __exit dmatest_exit(void) |
937 | { | 936 | { |
938 | struct dmatest_info *info = &test_info; | 937 | struct dmatest_info *info = &test_info; |
939 | 938 | ||
940 | debugfs_remove_recursive(info->root); | 939 | debugfs_remove_recursive(info->root); |
941 | stop_threaded_test(info); | 940 | stop_threaded_test(info); |
942 | } | 941 | } |
943 | module_exit(dmatest_exit); | 942 | module_exit(dmatest_exit); |
944 | 943 | ||
945 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); | 944 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
946 | MODULE_LICENSE("GPL v2"); | 945 | MODULE_LICENSE("GPL v2"); |
947 | 946 |