Commit e3b9c347316fe243bea6abd08681050c43ca22ee

Authored by Dan Williams
1 parent be9fa5a436

dmatest: add support for skipping verification and random data setup

Towards enabling dmatest to checkout performance add a 'noverify' mode.

Acked-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

Showing 1 changed file with 33 additions and 11 deletions Inline Diff

drivers/dma/dmatest.c
1 /* 1 /*
2 * DMA Engine test module 2 * DMA Engine test module
3 * 3 *
4 * Copyright (C) 2007 Atmel Corporation 4 * Copyright (C) 2007 Atmel Corporation
5 * Copyright (C) 2013 Intel Corporation 5 * Copyright (C) 2013 Intel Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 12
13 #include <linux/delay.h> 13 #include <linux/delay.h>
14 #include <linux/dma-mapping.h> 14 #include <linux/dma-mapping.h>
15 #include <linux/dmaengine.h> 15 #include <linux/dmaengine.h>
16 #include <linux/freezer.h> 16 #include <linux/freezer.h>
17 #include <linux/init.h> 17 #include <linux/init.h>
18 #include <linux/kthread.h> 18 #include <linux/kthread.h>
19 #include <linux/module.h> 19 #include <linux/module.h>
20 #include <linux/moduleparam.h> 20 #include <linux/moduleparam.h>
21 #include <linux/random.h> 21 #include <linux/random.h>
22 #include <linux/slab.h> 22 #include <linux/slab.h>
23 #include <linux/wait.h> 23 #include <linux/wait.h>
24 24
25 static unsigned int test_buf_size = 16384; 25 static unsigned int test_buf_size = 16384;
26 module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); 26 module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
27 MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); 27 MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
28 28
29 static char test_channel[20]; 29 static char test_channel[20];
30 module_param_string(channel, test_channel, sizeof(test_channel), 30 module_param_string(channel, test_channel, sizeof(test_channel),
31 S_IRUGO | S_IWUSR); 31 S_IRUGO | S_IWUSR);
32 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); 32 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
33 33
34 static char test_device[20]; 34 static char test_device[20];
35 module_param_string(device, test_device, sizeof(test_device), 35 module_param_string(device, test_device, sizeof(test_device),
36 S_IRUGO | S_IWUSR); 36 S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); 37 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
38 38
39 static unsigned int threads_per_chan = 1; 39 static unsigned int threads_per_chan = 1;
40 module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR); 40 module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
41 MODULE_PARM_DESC(threads_per_chan, 41 MODULE_PARM_DESC(threads_per_chan,
42 "Number of threads to start per channel (default: 1)"); 42 "Number of threads to start per channel (default: 1)");
43 43
44 static unsigned int max_channels; 44 static unsigned int max_channels;
45 module_param(max_channels, uint, S_IRUGO | S_IWUSR); 45 module_param(max_channels, uint, S_IRUGO | S_IWUSR);
46 MODULE_PARM_DESC(max_channels, 46 MODULE_PARM_DESC(max_channels,
47 "Maximum number of channels to use (default: all)"); 47 "Maximum number of channels to use (default: all)");
48 48
49 static unsigned int iterations; 49 static unsigned int iterations;
50 module_param(iterations, uint, S_IRUGO | S_IWUSR); 50 module_param(iterations, uint, S_IRUGO | S_IWUSR);
51 MODULE_PARM_DESC(iterations, 51 MODULE_PARM_DESC(iterations,
52 "Iterations before stopping test (default: infinite)"); 52 "Iterations before stopping test (default: infinite)");
53 53
54 static unsigned int xor_sources = 3; 54 static unsigned int xor_sources = 3;
55 module_param(xor_sources, uint, S_IRUGO | S_IWUSR); 55 module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
56 MODULE_PARM_DESC(xor_sources, 56 MODULE_PARM_DESC(xor_sources,
57 "Number of xor source buffers (default: 3)"); 57 "Number of xor source buffers (default: 3)");
58 58
59 static unsigned int pq_sources = 3; 59 static unsigned int pq_sources = 3;
60 module_param(pq_sources, uint, S_IRUGO | S_IWUSR); 60 module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(pq_sources, 61 MODULE_PARM_DESC(pq_sources,
62 "Number of p+q source buffers (default: 3)"); 62 "Number of p+q source buffers (default: 3)");
63 63
64 static int timeout = 3000; 64 static int timeout = 3000;
65 module_param(timeout, uint, S_IRUGO | S_IWUSR); 65 module_param(timeout, uint, S_IRUGO | S_IWUSR);
66 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " 66 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
67 "Pass -1 for infinite timeout"); 67 "Pass -1 for infinite timeout");
68 68
69 static bool noverify;
70 module_param(noverify, bool, S_IRUGO | S_IWUSR);
71 MODULE_PARM_DESC(noverify, "Disable random data setup and verification");
72
69 /** 73 /**
70 * struct dmatest_params - test parameters. 74 * struct dmatest_params - test parameters.
71 * @buf_size: size of the memcpy test buffer 75 * @buf_size: size of the memcpy test buffer
72 * @channel: bus ID of the channel to test 76 * @channel: bus ID of the channel to test
73 * @device: bus ID of the DMA Engine to test 77 * @device: bus ID of the DMA Engine to test
74 * @threads_per_chan: number of threads to start per channel 78 * @threads_per_chan: number of threads to start per channel
75 * @max_channels: maximum number of channels to use 79 * @max_channels: maximum number of channels to use
76 * @iterations: iterations before stopping test 80 * @iterations: iterations before stopping test
77 * @xor_sources: number of xor source buffers 81 * @xor_sources: number of xor source buffers
78 * @pq_sources: number of p+q source buffers 82 * @pq_sources: number of p+q source buffers
79 * @timeout: transfer timeout in msec, -1 for infinite timeout 83 * @timeout: transfer timeout in msec, -1 for infinite timeout
80 */ 84 */
81 struct dmatest_params { 85 struct dmatest_params {
82 unsigned int buf_size; 86 unsigned int buf_size;
83 char channel[20]; 87 char channel[20];
84 char device[20]; 88 char device[20];
85 unsigned int threads_per_chan; 89 unsigned int threads_per_chan;
86 unsigned int max_channels; 90 unsigned int max_channels;
87 unsigned int iterations; 91 unsigned int iterations;
88 unsigned int xor_sources; 92 unsigned int xor_sources;
89 unsigned int pq_sources; 93 unsigned int pq_sources;
90 int timeout; 94 int timeout;
95 bool noverify;
91 }; 96 };
92 97
93 /** 98 /**
94 * struct dmatest_info - test information. 99 * struct dmatest_info - test information.
95 * @params: test parameters 100 * @params: test parameters
96 * @lock: access protection to the fields of this structure 101 * @lock: access protection to the fields of this structure
97 */ 102 */
98 static struct dmatest_info { 103 static struct dmatest_info {
99 /* Test parameters */ 104 /* Test parameters */
100 struct dmatest_params params; 105 struct dmatest_params params;
101 106
102 /* Internal state */ 107 /* Internal state */
103 struct list_head channels; 108 struct list_head channels;
104 unsigned int nr_channels; 109 unsigned int nr_channels;
105 struct mutex lock; 110 struct mutex lock;
106 bool did_init; 111 bool did_init;
107 } test_info = { 112 } test_info = {
108 .channels = LIST_HEAD_INIT(test_info.channels), 113 .channels = LIST_HEAD_INIT(test_info.channels),
109 .lock = __MUTEX_INITIALIZER(test_info.lock), 114 .lock = __MUTEX_INITIALIZER(test_info.lock),
110 }; 115 };
111 116
112 static int dmatest_run_set(const char *val, const struct kernel_param *kp); 117 static int dmatest_run_set(const char *val, const struct kernel_param *kp);
113 static int dmatest_run_get(char *val, const struct kernel_param *kp); 118 static int dmatest_run_get(char *val, const struct kernel_param *kp);
114 static struct kernel_param_ops run_ops = { 119 static struct kernel_param_ops run_ops = {
115 .set = dmatest_run_set, 120 .set = dmatest_run_set,
116 .get = dmatest_run_get, 121 .get = dmatest_run_get,
117 }; 122 };
118 static bool dmatest_run; 123 static bool dmatest_run;
119 module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); 124 module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
120 MODULE_PARM_DESC(run, "Run the test (default: false)"); 125 MODULE_PARM_DESC(run, "Run the test (default: false)");
121 126
122 /* Maximum amount of mismatched bytes in buffer to print */ 127 /* Maximum amount of mismatched bytes in buffer to print */
123 #define MAX_ERROR_COUNT 32 128 #define MAX_ERROR_COUNT 32
124 129
125 /* 130 /*
126 * Initialization patterns. All bytes in the source buffer has bit 7 131 * Initialization patterns. All bytes in the source buffer has bit 7
127 * set, all bytes in the destination buffer has bit 7 cleared. 132 * set, all bytes in the destination buffer has bit 7 cleared.
128 * 133 *
129 * Bit 6 is set for all bytes which are to be copied by the DMA 134 * Bit 6 is set for all bytes which are to be copied by the DMA
130 * engine. Bit 5 is set for all bytes which are to be overwritten by 135 * engine. Bit 5 is set for all bytes which are to be overwritten by
131 * the DMA engine. 136 * the DMA engine.
132 * 137 *
133 * The remaining bits are the inverse of a counter which increments by 138 * The remaining bits are the inverse of a counter which increments by
134 * one for each byte address. 139 * one for each byte address.
135 */ 140 */
136 #define PATTERN_SRC 0x80 141 #define PATTERN_SRC 0x80
137 #define PATTERN_DST 0x00 142 #define PATTERN_DST 0x00
138 #define PATTERN_COPY 0x40 143 #define PATTERN_COPY 0x40
139 #define PATTERN_OVERWRITE 0x20 144 #define PATTERN_OVERWRITE 0x20
140 #define PATTERN_COUNT_MASK 0x1f 145 #define PATTERN_COUNT_MASK 0x1f
141 146
142 struct dmatest_thread { 147 struct dmatest_thread {
143 struct list_head node; 148 struct list_head node;
144 struct dmatest_info *info; 149 struct dmatest_info *info;
145 struct task_struct *task; 150 struct task_struct *task;
146 struct dma_chan *chan; 151 struct dma_chan *chan;
147 u8 **srcs; 152 u8 **srcs;
148 u8 **dsts; 153 u8 **dsts;
149 enum dma_transaction_type type; 154 enum dma_transaction_type type;
150 bool done; 155 bool done;
151 }; 156 };
152 157
153 struct dmatest_chan { 158 struct dmatest_chan {
154 struct list_head node; 159 struct list_head node;
155 struct dma_chan *chan; 160 struct dma_chan *chan;
156 struct list_head threads; 161 struct list_head threads;
157 }; 162 };
158 163
159 static bool dmatest_match_channel(struct dmatest_params *params, 164 static bool dmatest_match_channel(struct dmatest_params *params,
160 struct dma_chan *chan) 165 struct dma_chan *chan)
161 { 166 {
162 if (params->channel[0] == '\0') 167 if (params->channel[0] == '\0')
163 return true; 168 return true;
164 return strcmp(dma_chan_name(chan), params->channel) == 0; 169 return strcmp(dma_chan_name(chan), params->channel) == 0;
165 } 170 }
166 171
167 static bool dmatest_match_device(struct dmatest_params *params, 172 static bool dmatest_match_device(struct dmatest_params *params,
168 struct dma_device *device) 173 struct dma_device *device)
169 { 174 {
170 if (params->device[0] == '\0') 175 if (params->device[0] == '\0')
171 return true; 176 return true;
172 return strcmp(dev_name(device->dev), params->device) == 0; 177 return strcmp(dev_name(device->dev), params->device) == 0;
173 } 178 }
174 179
175 static unsigned long dmatest_random(void) 180 static unsigned long dmatest_random(void)
176 { 181 {
177 unsigned long buf; 182 unsigned long buf;
178 183
179 prandom_bytes(&buf, sizeof(buf)); 184 prandom_bytes(&buf, sizeof(buf));
180 return buf; 185 return buf;
181 } 186 }
182 187
183 static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, 188 static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
184 unsigned int buf_size) 189 unsigned int buf_size)
185 { 190 {
186 unsigned int i; 191 unsigned int i;
187 u8 *buf; 192 u8 *buf;
188 193
189 for (; (buf = *bufs); bufs++) { 194 for (; (buf = *bufs); bufs++) {
190 for (i = 0; i < start; i++) 195 for (i = 0; i < start; i++)
191 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 196 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
192 for ( ; i < start + len; i++) 197 for ( ; i < start + len; i++)
193 buf[i] = PATTERN_SRC | PATTERN_COPY 198 buf[i] = PATTERN_SRC | PATTERN_COPY
194 | (~i & PATTERN_COUNT_MASK); 199 | (~i & PATTERN_COUNT_MASK);
195 for ( ; i < buf_size; i++) 200 for ( ; i < buf_size; i++)
196 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 201 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
197 buf++; 202 buf++;
198 } 203 }
199 } 204 }
200 205
201 static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, 206 static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
202 unsigned int buf_size) 207 unsigned int buf_size)
203 { 208 {
204 unsigned int i; 209 unsigned int i;
205 u8 *buf; 210 u8 *buf;
206 211
207 for (; (buf = *bufs); bufs++) { 212 for (; (buf = *bufs); bufs++) {
208 for (i = 0; i < start; i++) 213 for (i = 0; i < start; i++)
209 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); 214 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
210 for ( ; i < start + len; i++) 215 for ( ; i < start + len; i++)
211 buf[i] = PATTERN_DST | PATTERN_OVERWRITE 216 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
212 | (~i & PATTERN_COUNT_MASK); 217 | (~i & PATTERN_COUNT_MASK);
213 for ( ; i < buf_size; i++) 218 for ( ; i < buf_size; i++)
214 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); 219 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
215 } 220 }
216 } 221 }
217 222
218 static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, 223 static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
219 unsigned int counter, bool is_srcbuf) 224 unsigned int counter, bool is_srcbuf)
220 { 225 {
221 u8 diff = actual ^ pattern; 226 u8 diff = actual ^ pattern;
222 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); 227 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
223 const char *thread_name = current->comm; 228 const char *thread_name = current->comm;
224 229
225 if (is_srcbuf) 230 if (is_srcbuf)
226 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n", 231 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
227 thread_name, index, expected, actual); 232 thread_name, index, expected, actual);
228 else if ((pattern & PATTERN_COPY) 233 else if ((pattern & PATTERN_COPY)
229 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) 234 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
230 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n", 235 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
231 thread_name, index, expected, actual); 236 thread_name, index, expected, actual);
232 else if (diff & PATTERN_SRC) 237 else if (diff & PATTERN_SRC)
233 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n", 238 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
234 thread_name, index, expected, actual); 239 thread_name, index, expected, actual);
235 else 240 else
236 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n", 241 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
237 thread_name, index, expected, actual); 242 thread_name, index, expected, actual);
238 } 243 }
239 244
240 static unsigned int dmatest_verify(u8 **bufs, unsigned int start, 245 static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
241 unsigned int end, unsigned int counter, u8 pattern, 246 unsigned int end, unsigned int counter, u8 pattern,
242 bool is_srcbuf) 247 bool is_srcbuf)
243 { 248 {
244 unsigned int i; 249 unsigned int i;
245 unsigned int error_count = 0; 250 unsigned int error_count = 0;
246 u8 actual; 251 u8 actual;
247 u8 expected; 252 u8 expected;
248 u8 *buf; 253 u8 *buf;
249 unsigned int counter_orig = counter; 254 unsigned int counter_orig = counter;
250 255
251 for (; (buf = *bufs); bufs++) { 256 for (; (buf = *bufs); bufs++) {
252 counter = counter_orig; 257 counter = counter_orig;
253 for (i = start; i < end; i++) { 258 for (i = start; i < end; i++) {
254 actual = buf[i]; 259 actual = buf[i];
255 expected = pattern | (~counter & PATTERN_COUNT_MASK); 260 expected = pattern | (~counter & PATTERN_COUNT_MASK);
256 if (actual != expected) { 261 if (actual != expected) {
257 if (error_count < MAX_ERROR_COUNT) 262 if (error_count < MAX_ERROR_COUNT)
258 dmatest_mismatch(actual, pattern, i, 263 dmatest_mismatch(actual, pattern, i,
259 counter, is_srcbuf); 264 counter, is_srcbuf);
260 error_count++; 265 error_count++;
261 } 266 }
262 counter++; 267 counter++;
263 } 268 }
264 } 269 }
265 270
266 if (error_count > MAX_ERROR_COUNT) 271 if (error_count > MAX_ERROR_COUNT)
267 pr_warn("%s: %u errors suppressed\n", 272 pr_warn("%s: %u errors suppressed\n",
268 current->comm, error_count - MAX_ERROR_COUNT); 273 current->comm, error_count - MAX_ERROR_COUNT);
269 274
270 return error_count; 275 return error_count;
271 } 276 }
272 277
273 /* poor man's completion - we want to use wait_event_freezable() on it */ 278 /* poor man's completion - we want to use wait_event_freezable() on it */
274 struct dmatest_done { 279 struct dmatest_done {
275 bool done; 280 bool done;
276 wait_queue_head_t *wait; 281 wait_queue_head_t *wait;
277 }; 282 };
278 283
279 static void dmatest_callback(void *arg) 284 static void dmatest_callback(void *arg)
280 { 285 {
281 struct dmatest_done *done = arg; 286 struct dmatest_done *done = arg;
282 287
283 done->done = true; 288 done->done = true;
284 wake_up_all(done->wait); 289 wake_up_all(done->wait);
285 } 290 }
286 291
287 static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len, 292 static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
288 unsigned int count) 293 unsigned int count)
289 { 294 {
290 while (count--) 295 while (count--)
291 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE); 296 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
292 } 297 }
293 298
294 static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, 299 static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
295 unsigned int count) 300 unsigned int count)
296 { 301 {
297 while (count--) 302 while (count--)
298 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); 303 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
299 } 304 }
300 305
301 static unsigned int min_odd(unsigned int x, unsigned int y) 306 static unsigned int min_odd(unsigned int x, unsigned int y)
302 { 307 {
303 unsigned int val = min(x, y); 308 unsigned int val = min(x, y);
304 309
305 return val % 2 ? val : val - 1; 310 return val % 2 ? val : val - 1;
306 } 311 }
307 312
308 static void result(const char *err, unsigned int n, unsigned int src_off, 313 static void result(const char *err, unsigned int n, unsigned int src_off,
309 unsigned int dst_off, unsigned int len, unsigned long data) 314 unsigned int dst_off, unsigned int len, unsigned long data)
310 { 315 {
311 pr_info("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", 316 pr_info("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
312 current->comm, n, err, src_off, dst_off, len, data); 317 current->comm, n, err, src_off, dst_off, len, data);
313 } 318 }
314 319
315 static void dbg_result(const char *err, unsigned int n, unsigned int src_off, 320 static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
316 unsigned int dst_off, unsigned int len, 321 unsigned int dst_off, unsigned int len,
317 unsigned long data) 322 unsigned long data)
318 { 323 {
319 pr_debug("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", 324 pr_debug("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
320 current->comm, n, err, src_off, dst_off, len, data); 325 current->comm, n, err, src_off, dst_off, len, data);
321 } 326 }
322 327
323 /* 328 /*
324 * This function repeatedly tests DMA transfers of various lengths and 329 * This function repeatedly tests DMA transfers of various lengths and
325 * offsets for a given operation type until it is told to exit by 330 * offsets for a given operation type until it is told to exit by
326 * kthread_stop(). There may be multiple threads running this function 331 * kthread_stop(). There may be multiple threads running this function
327 * in parallel for a single channel, and there may be multiple channels 332 * in parallel for a single channel, and there may be multiple channels
328 * being tested in parallel. 333 * being tested in parallel.
329 * 334 *
330 * Before each test, the source and destination buffer is initialized 335 * Before each test, the source and destination buffer is initialized
331 * with a known pattern. This pattern is different depending on 336 * with a known pattern. This pattern is different depending on
332 * whether it's in an area which is supposed to be copied or 337 * whether it's in an area which is supposed to be copied or
333 * overwritten, and different in the source and destination buffers. 338 * overwritten, and different in the source and destination buffers.
334 * So if the DMA engine doesn't copy exactly what we tell it to copy, 339 * So if the DMA engine doesn't copy exactly what we tell it to copy,
335 * we'll notice. 340 * we'll notice.
336 */ 341 */
337 static int dmatest_func(void *data) 342 static int dmatest_func(void *data)
338 { 343 {
339 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); 344 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
340 struct dmatest_thread *thread = data; 345 struct dmatest_thread *thread = data;
341 struct dmatest_done done = { .wait = &done_wait }; 346 struct dmatest_done done = { .wait = &done_wait };
342 struct dmatest_info *info; 347 struct dmatest_info *info;
343 struct dmatest_params *params; 348 struct dmatest_params *params;
344 struct dma_chan *chan; 349 struct dma_chan *chan;
345 struct dma_device *dev; 350 struct dma_device *dev;
346 unsigned int src_off, dst_off, len; 351 unsigned int src_off, dst_off, len;
347 unsigned int error_count; 352 unsigned int error_count;
348 unsigned int failed_tests = 0; 353 unsigned int failed_tests = 0;
349 unsigned int total_tests = 0; 354 unsigned int total_tests = 0;
350 dma_cookie_t cookie; 355 dma_cookie_t cookie;
351 enum dma_status status; 356 enum dma_status status;
352 enum dma_ctrl_flags flags; 357 enum dma_ctrl_flags flags;
353 u8 *pq_coefs = NULL; 358 u8 *pq_coefs = NULL;
354 int ret; 359 int ret;
355 int src_cnt; 360 int src_cnt;
356 int dst_cnt; 361 int dst_cnt;
357 int i; 362 int i;
358 363
359 set_freezable(); 364 set_freezable();
360 365
361 ret = -ENOMEM; 366 ret = -ENOMEM;
362 367
363 smp_rmb(); 368 smp_rmb();
364 info = thread->info; 369 info = thread->info;
365 params = &info->params; 370 params = &info->params;
366 chan = thread->chan; 371 chan = thread->chan;
367 dev = chan->device; 372 dev = chan->device;
368 if (thread->type == DMA_MEMCPY) 373 if (thread->type == DMA_MEMCPY)
369 src_cnt = dst_cnt = 1; 374 src_cnt = dst_cnt = 1;
370 else if (thread->type == DMA_XOR) { 375 else if (thread->type == DMA_XOR) {
371 /* force odd to ensure dst = src */ 376 /* force odd to ensure dst = src */
372 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); 377 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
373 dst_cnt = 1; 378 dst_cnt = 1;
374 } else if (thread->type == DMA_PQ) { 379 } else if (thread->type == DMA_PQ) {
375 /* force odd to ensure dst = src */ 380 /* force odd to ensure dst = src */
376 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); 381 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
377 dst_cnt = 2; 382 dst_cnt = 2;
378 383
379 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL); 384 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
380 if (!pq_coefs) 385 if (!pq_coefs)
381 goto err_thread_type; 386 goto err_thread_type;
382 387
383 for (i = 0; i < src_cnt; i++) 388 for (i = 0; i < src_cnt; i++)
384 pq_coefs[i] = 1; 389 pq_coefs[i] = 1;
385 } else 390 } else
386 goto err_thread_type; 391 goto err_thread_type;
387 392
388 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); 393 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
389 if (!thread->srcs) 394 if (!thread->srcs)
390 goto err_srcs; 395 goto err_srcs;
391 for (i = 0; i < src_cnt; i++) { 396 for (i = 0; i < src_cnt; i++) {
392 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL); 397 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
393 if (!thread->srcs[i]) 398 if (!thread->srcs[i])
394 goto err_srcbuf; 399 goto err_srcbuf;
395 } 400 }
396 thread->srcs[i] = NULL; 401 thread->srcs[i] = NULL;
397 402
398 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL); 403 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
399 if (!thread->dsts) 404 if (!thread->dsts)
400 goto err_dsts; 405 goto err_dsts;
401 for (i = 0; i < dst_cnt; i++) { 406 for (i = 0; i < dst_cnt; i++) {
402 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL); 407 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
403 if (!thread->dsts[i]) 408 if (!thread->dsts[i])
404 goto err_dstbuf; 409 goto err_dstbuf;
405 } 410 }
406 thread->dsts[i] = NULL; 411 thread->dsts[i] = NULL;
407 412
408 set_user_nice(current, 10); 413 set_user_nice(current, 10);
409 414
410 /* 415 /*
411 * src and dst buffers are freed by ourselves below 416 * src and dst buffers are freed by ourselves below
412 */ 417 */
413 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 418 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
414 419
415 while (!kthread_should_stop() 420 while (!kthread_should_stop()
416 && !(params->iterations && total_tests >= params->iterations)) { 421 && !(params->iterations && total_tests >= params->iterations)) {
417 struct dma_async_tx_descriptor *tx = NULL; 422 struct dma_async_tx_descriptor *tx = NULL;
418 dma_addr_t dma_srcs[src_cnt]; 423 dma_addr_t dma_srcs[src_cnt];
419 dma_addr_t dma_dsts[dst_cnt]; 424 dma_addr_t dma_dsts[dst_cnt];
420 u8 align = 0; 425 u8 align = 0;
421 426
422 total_tests++; 427 total_tests++;
423 428
424 /* honor alignment restrictions */ 429 /* honor alignment restrictions */
425 if (thread->type == DMA_MEMCPY) 430 if (thread->type == DMA_MEMCPY)
426 align = dev->copy_align; 431 align = dev->copy_align;
427 else if (thread->type == DMA_XOR) 432 else if (thread->type == DMA_XOR)
428 align = dev->xor_align; 433 align = dev->xor_align;
429 else if (thread->type == DMA_PQ) 434 else if (thread->type == DMA_PQ)
430 align = dev->pq_align; 435 align = dev->pq_align;
431 436
432 if (1 << align > params->buf_size) { 437 if (1 << align > params->buf_size) {
433 pr_err("%u-byte buffer too small for %d-byte alignment\n", 438 pr_err("%u-byte buffer too small for %d-byte alignment\n",
434 params->buf_size, 1 << align); 439 params->buf_size, 1 << align);
435 break; 440 break;
436 } 441 }
437 442
438 len = dmatest_random() % params->buf_size + 1; 443 if (params->noverify) {
444 len = params->buf_size;
445 src_off = 0;
446 dst_off = 0;
447 } else {
448 len = dmatest_random() % params->buf_size + 1;
449 len = (len >> align) << align;
450 if (!len)
451 len = 1 << align;
452 src_off = dmatest_random() % (params->buf_size - len + 1);
453 dst_off = dmatest_random() % (params->buf_size - len + 1);
454
455 src_off = (src_off >> align) << align;
456 dst_off = (dst_off >> align) << align;
457
458 dmatest_init_srcs(thread->srcs, src_off, len,
459 params->buf_size);
460 dmatest_init_dsts(thread->dsts, dst_off, len,
461 params->buf_size);
462 }
463
439 len = (len >> align) << align; 464 len = (len >> align) << align;
440 if (!len) 465 if (!len)
441 len = 1 << align; 466 len = 1 << align;
442 src_off = dmatest_random() % (params->buf_size - len + 1);
443 dst_off = dmatest_random() % (params->buf_size - len + 1);
444 467
445 src_off = (src_off >> align) << align;
446 dst_off = (dst_off >> align) << align;
447
448 dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
449 dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
450
451 for (i = 0; i < src_cnt; i++) { 468 for (i = 0; i < src_cnt; i++) {
452 u8 *buf = thread->srcs[i] + src_off; 469 u8 *buf = thread->srcs[i] + src_off;
453 470
454 dma_srcs[i] = dma_map_single(dev->dev, buf, len, 471 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
455 DMA_TO_DEVICE); 472 DMA_TO_DEVICE);
456 ret = dma_mapping_error(dev->dev, dma_srcs[i]); 473 ret = dma_mapping_error(dev->dev, dma_srcs[i]);
457 if (ret) { 474 if (ret) {
458 unmap_src(dev->dev, dma_srcs, len, i); 475 unmap_src(dev->dev, dma_srcs, len, i);
459 result("src mapping error", total_tests, 476 result("src mapping error", total_tests,
460 src_off, dst_off, len, ret); 477 src_off, dst_off, len, ret);
461 failed_tests++; 478 failed_tests++;
462 continue; 479 continue;
463 } 480 }
464 } 481 }
465 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 482 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
466 for (i = 0; i < dst_cnt; i++) { 483 for (i = 0; i < dst_cnt; i++) {
467 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], 484 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
468 params->buf_size, 485 params->buf_size,
469 DMA_BIDIRECTIONAL); 486 DMA_BIDIRECTIONAL);
470 ret = dma_mapping_error(dev->dev, dma_dsts[i]); 487 ret = dma_mapping_error(dev->dev, dma_dsts[i]);
471 if (ret) { 488 if (ret) {
472 unmap_src(dev->dev, dma_srcs, len, src_cnt); 489 unmap_src(dev->dev, dma_srcs, len, src_cnt);
473 unmap_dst(dev->dev, dma_dsts, params->buf_size, 490 unmap_dst(dev->dev, dma_dsts, params->buf_size,
474 i); 491 i);
475 result("dst mapping error", total_tests, 492 result("dst mapping error", total_tests,
476 src_off, dst_off, len, ret); 493 src_off, dst_off, len, ret);
477 failed_tests++; 494 failed_tests++;
478 continue; 495 continue;
479 } 496 }
480 } 497 }
481 498
482 if (thread->type == DMA_MEMCPY) 499 if (thread->type == DMA_MEMCPY)
483 tx = dev->device_prep_dma_memcpy(chan, 500 tx = dev->device_prep_dma_memcpy(chan,
484 dma_dsts[0] + dst_off, 501 dma_dsts[0] + dst_off,
485 dma_srcs[0], len, 502 dma_srcs[0], len,
486 flags); 503 flags);
487 else if (thread->type == DMA_XOR) 504 else if (thread->type == DMA_XOR)
488 tx = dev->device_prep_dma_xor(chan, 505 tx = dev->device_prep_dma_xor(chan,
489 dma_dsts[0] + dst_off, 506 dma_dsts[0] + dst_off,
490 dma_srcs, src_cnt, 507 dma_srcs, src_cnt,
491 len, flags); 508 len, flags);
492 else if (thread->type == DMA_PQ) { 509 else if (thread->type == DMA_PQ) {
493 dma_addr_t dma_pq[dst_cnt]; 510 dma_addr_t dma_pq[dst_cnt];
494 511
495 for (i = 0; i < dst_cnt; i++) 512 for (i = 0; i < dst_cnt; i++)
496 dma_pq[i] = dma_dsts[i] + dst_off; 513 dma_pq[i] = dma_dsts[i] + dst_off;
497 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, 514 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
498 src_cnt, pq_coefs, 515 src_cnt, pq_coefs,
499 len, flags); 516 len, flags);
500 } 517 }
501 518
502 if (!tx) { 519 if (!tx) {
503 unmap_src(dev->dev, dma_srcs, len, src_cnt); 520 unmap_src(dev->dev, dma_srcs, len, src_cnt);
504 unmap_dst(dev->dev, dma_dsts, params->buf_size, 521 unmap_dst(dev->dev, dma_dsts, params->buf_size,
505 dst_cnt); 522 dst_cnt);
506 result("prep error", total_tests, src_off, 523 result("prep error", total_tests, src_off,
507 dst_off, len, ret); 524 dst_off, len, ret);
508 msleep(100); 525 msleep(100);
509 failed_tests++; 526 failed_tests++;
510 continue; 527 continue;
511 } 528 }
512 529
513 done.done = false; 530 done.done = false;
514 tx->callback = dmatest_callback; 531 tx->callback = dmatest_callback;
515 tx->callback_param = &done; 532 tx->callback_param = &done;
516 cookie = tx->tx_submit(tx); 533 cookie = tx->tx_submit(tx);
517 534
518 if (dma_submit_error(cookie)) { 535 if (dma_submit_error(cookie)) {
519 result("submit error", total_tests, src_off, 536 result("submit error", total_tests, src_off,
520 dst_off, len, ret); 537 dst_off, len, ret);
521 msleep(100); 538 msleep(100);
522 failed_tests++; 539 failed_tests++;
523 continue; 540 continue;
524 } 541 }
525 dma_async_issue_pending(chan); 542 dma_async_issue_pending(chan);
526 543
527 wait_event_freezable_timeout(done_wait, done.done, 544 wait_event_freezable_timeout(done_wait, done.done,
528 msecs_to_jiffies(params->timeout)); 545 msecs_to_jiffies(params->timeout));
529 546
530 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 547 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
531 548
532 if (!done.done) { 549 if (!done.done) {
533 /* 550 /*
534 * We're leaving the timed out dma operation with 551 * We're leaving the timed out dma operation with
535 * dangling pointer to done_wait. To make this 552 * dangling pointer to done_wait. To make this
536 * correct, we'll need to allocate wait_done for 553 * correct, we'll need to allocate wait_done for
537 * each test iteration and perform "who's gonna 554 * each test iteration and perform "who's gonna
538 * free it this time?" dancing. For now, just 555 * free it this time?" dancing. For now, just
539 * leave it dangling. 556 * leave it dangling.
540 */ 557 */
541 result("test timed out", total_tests, src_off, dst_off, 558 result("test timed out", total_tests, src_off, dst_off,
542 len, 0); 559 len, 0);
543 failed_tests++; 560 failed_tests++;
544 continue; 561 continue;
545 } else if (status != DMA_SUCCESS) { 562 } else if (status != DMA_SUCCESS) {
546 result(status == DMA_ERROR ? 563 result(status == DMA_ERROR ?
547 "completion error status" : 564 "completion error status" :
548 "completion busy status", total_tests, src_off, 565 "completion busy status", total_tests, src_off,
549 dst_off, len, ret); 566 dst_off, len, ret);
550 failed_tests++; 567 failed_tests++;
551 continue; 568 continue;
552 } 569 }
553 570
554 /* Unmap by myself */ 571 /* Unmap by myself */
555 unmap_src(dev->dev, dma_srcs, len, src_cnt); 572 unmap_src(dev->dev, dma_srcs, len, src_cnt);
556 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); 573 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
557 574
558 error_count = 0; 575 if (params->noverify) {
576 dbg_result("test passed", total_tests, src_off, dst_off,
577 len, 0);
578 continue;
579 }
559 580
560 pr_debug("%s: verifying source buffer...\n", current->comm); 581 pr_debug("%s: verifying source buffer...\n", current->comm);
561 error_count += dmatest_verify(thread->srcs, 0, src_off, 582 error_count = dmatest_verify(thread->srcs, 0, src_off,
562 0, PATTERN_SRC, true); 583 0, PATTERN_SRC, true);
563 error_count += dmatest_verify(thread->srcs, src_off, 584 error_count += dmatest_verify(thread->srcs, src_off,
564 src_off + len, src_off, 585 src_off + len, src_off,
565 PATTERN_SRC | PATTERN_COPY, true); 586 PATTERN_SRC | PATTERN_COPY, true);
566 error_count += dmatest_verify(thread->srcs, src_off + len, 587 error_count += dmatest_verify(thread->srcs, src_off + len,
567 params->buf_size, src_off + len, 588 params->buf_size, src_off + len,
568 PATTERN_SRC, true); 589 PATTERN_SRC, true);
569 590
570 pr_debug("%s: verifying dest buffer...\n", current->comm); 591 pr_debug("%s: verifying dest buffer...\n", current->comm);
571 error_count += dmatest_verify(thread->dsts, 0, dst_off, 592 error_count += dmatest_verify(thread->dsts, 0, dst_off,
572 0, PATTERN_DST, false); 593 0, PATTERN_DST, false);
573 error_count += dmatest_verify(thread->dsts, dst_off, 594 error_count += dmatest_verify(thread->dsts, dst_off,
574 dst_off + len, src_off, 595 dst_off + len, src_off,
575 PATTERN_SRC | PATTERN_COPY, false); 596 PATTERN_SRC | PATTERN_COPY, false);
576 error_count += dmatest_verify(thread->dsts, dst_off + len, 597 error_count += dmatest_verify(thread->dsts, dst_off + len,
577 params->buf_size, dst_off + len, 598 params->buf_size, dst_off + len,
578 PATTERN_DST, false); 599 PATTERN_DST, false);
579 600
580 if (error_count) { 601 if (error_count) {
581 result("data error", total_tests, src_off, dst_off, 602 result("data error", total_tests, src_off, dst_off,
582 len, error_count); 603 len, error_count);
583 failed_tests++; 604 failed_tests++;
584 } else { 605 } else {
585 dbg_result("test passed", total_tests, src_off, dst_off, 606 dbg_result("test passed", total_tests, src_off, dst_off,
586 len, 0); 607 len, 0);
587 } 608 }
588 } 609 }
589 610
590 ret = 0; 611 ret = 0;
591 for (i = 0; thread->dsts[i]; i++) 612 for (i = 0; thread->dsts[i]; i++)
592 kfree(thread->dsts[i]); 613 kfree(thread->dsts[i]);
593 err_dstbuf: 614 err_dstbuf:
594 kfree(thread->dsts); 615 kfree(thread->dsts);
595 err_dsts: 616 err_dsts:
596 for (i = 0; thread->srcs[i]; i++) 617 for (i = 0; thread->srcs[i]; i++)
597 kfree(thread->srcs[i]); 618 kfree(thread->srcs[i]);
598 err_srcbuf: 619 err_srcbuf:
599 kfree(thread->srcs); 620 kfree(thread->srcs);
600 err_srcs: 621 err_srcs:
601 kfree(pq_coefs); 622 kfree(pq_coefs);
602 err_thread_type: 623 err_thread_type:
603 pr_info("%s: terminating after %u tests, %u failures (status %d)\n", 624 pr_info("%s: terminating after %u tests, %u failures (status %d)\n",
604 current->comm, total_tests, failed_tests, ret); 625 current->comm, total_tests, failed_tests, ret);
605 626
606 /* terminate all transfers on specified channels */ 627 /* terminate all transfers on specified channels */
607 if (ret) 628 if (ret)
608 dmaengine_terminate_all(chan); 629 dmaengine_terminate_all(chan);
609 630
610 thread->done = true; 631 thread->done = true;
611 632
612 if (params->iterations > 0) 633 if (params->iterations > 0)
613 while (!kthread_should_stop()) { 634 while (!kthread_should_stop()) {
614 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); 635 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
615 interruptible_sleep_on(&wait_dmatest_exit); 636 interruptible_sleep_on(&wait_dmatest_exit);
616 } 637 }
617 638
618 return ret; 639 return ret;
619 } 640 }
620 641
621 static void dmatest_cleanup_channel(struct dmatest_chan *dtc) 642 static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
622 { 643 {
623 struct dmatest_thread *thread; 644 struct dmatest_thread *thread;
624 struct dmatest_thread *_thread; 645 struct dmatest_thread *_thread;
625 int ret; 646 int ret;
626 647
627 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { 648 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
628 ret = kthread_stop(thread->task); 649 ret = kthread_stop(thread->task);
629 pr_debug("thread %s exited with status %d\n", 650 pr_debug("thread %s exited with status %d\n",
630 thread->task->comm, ret); 651 thread->task->comm, ret);
631 list_del(&thread->node); 652 list_del(&thread->node);
632 kfree(thread); 653 kfree(thread);
633 } 654 }
634 655
635 /* terminate all transfers on specified channels */ 656 /* terminate all transfers on specified channels */
636 dmaengine_terminate_all(dtc->chan); 657 dmaengine_terminate_all(dtc->chan);
637 658
638 kfree(dtc); 659 kfree(dtc);
639 } 660 }
640 661
641 static int dmatest_add_threads(struct dmatest_info *info, 662 static int dmatest_add_threads(struct dmatest_info *info,
642 struct dmatest_chan *dtc, enum dma_transaction_type type) 663 struct dmatest_chan *dtc, enum dma_transaction_type type)
643 { 664 {
644 struct dmatest_params *params = &info->params; 665 struct dmatest_params *params = &info->params;
645 struct dmatest_thread *thread; 666 struct dmatest_thread *thread;
646 struct dma_chan *chan = dtc->chan; 667 struct dma_chan *chan = dtc->chan;
647 char *op; 668 char *op;
648 unsigned int i; 669 unsigned int i;
649 670
650 if (type == DMA_MEMCPY) 671 if (type == DMA_MEMCPY)
651 op = "copy"; 672 op = "copy";
652 else if (type == DMA_XOR) 673 else if (type == DMA_XOR)
653 op = "xor"; 674 op = "xor";
654 else if (type == DMA_PQ) 675 else if (type == DMA_PQ)
655 op = "pq"; 676 op = "pq";
656 else 677 else
657 return -EINVAL; 678 return -EINVAL;
658 679
659 for (i = 0; i < params->threads_per_chan; i++) { 680 for (i = 0; i < params->threads_per_chan; i++) {
660 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); 681 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
661 if (!thread) { 682 if (!thread) {
662 pr_warn("No memory for %s-%s%u\n", 683 pr_warn("No memory for %s-%s%u\n",
663 dma_chan_name(chan), op, i); 684 dma_chan_name(chan), op, i);
664 break; 685 break;
665 } 686 }
666 thread->info = info; 687 thread->info = info;
667 thread->chan = dtc->chan; 688 thread->chan = dtc->chan;
668 thread->type = type; 689 thread->type = type;
669 smp_wmb(); 690 smp_wmb();
670 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", 691 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
671 dma_chan_name(chan), op, i); 692 dma_chan_name(chan), op, i);
672 if (IS_ERR(thread->task)) { 693 if (IS_ERR(thread->task)) {
673 pr_warn("Failed to run thread %s-%s%u\n", 694 pr_warn("Failed to run thread %s-%s%u\n",
674 dma_chan_name(chan), op, i); 695 dma_chan_name(chan), op, i);
675 kfree(thread); 696 kfree(thread);
676 break; 697 break;
677 } 698 }
678 699
679 /* srcbuf and dstbuf are allocated by the thread itself */ 700 /* srcbuf and dstbuf are allocated by the thread itself */
680 701
681 list_add_tail(&thread->node, &dtc->threads); 702 list_add_tail(&thread->node, &dtc->threads);
682 } 703 }
683 704
684 return i; 705 return i;
685 } 706 }
686 707
687 static int dmatest_add_channel(struct dmatest_info *info, 708 static int dmatest_add_channel(struct dmatest_info *info,
688 struct dma_chan *chan) 709 struct dma_chan *chan)
689 { 710 {
690 struct dmatest_chan *dtc; 711 struct dmatest_chan *dtc;
691 struct dma_device *dma_dev = chan->device; 712 struct dma_device *dma_dev = chan->device;
692 unsigned int thread_count = 0; 713 unsigned int thread_count = 0;
693 int cnt; 714 int cnt;
694 715
695 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); 716 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
696 if (!dtc) { 717 if (!dtc) {
697 pr_warn("No memory for %s\n", dma_chan_name(chan)); 718 pr_warn("No memory for %s\n", dma_chan_name(chan));
698 return -ENOMEM; 719 return -ENOMEM;
699 } 720 }
700 721
701 dtc->chan = chan; 722 dtc->chan = chan;
702 INIT_LIST_HEAD(&dtc->threads); 723 INIT_LIST_HEAD(&dtc->threads);
703 724
704 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 725 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
705 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); 726 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
706 thread_count += cnt > 0 ? cnt : 0; 727 thread_count += cnt > 0 ? cnt : 0;
707 } 728 }
708 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 729 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
709 cnt = dmatest_add_threads(info, dtc, DMA_XOR); 730 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
710 thread_count += cnt > 0 ? cnt : 0; 731 thread_count += cnt > 0 ? cnt : 0;
711 } 732 }
712 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { 733 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
713 cnt = dmatest_add_threads(info, dtc, DMA_PQ); 734 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
714 thread_count += cnt > 0 ? cnt : 0; 735 thread_count += cnt > 0 ? cnt : 0;
715 } 736 }
716 737
717 pr_info("Started %u threads using %s\n", 738 pr_info("Started %u threads using %s\n",
718 thread_count, dma_chan_name(chan)); 739 thread_count, dma_chan_name(chan));
719 740
720 list_add_tail(&dtc->node, &info->channels); 741 list_add_tail(&dtc->node, &info->channels);
721 info->nr_channels++; 742 info->nr_channels++;
722 743
723 return 0; 744 return 0;
724 } 745 }
725 746
726 static bool filter(struct dma_chan *chan, void *param) 747 static bool filter(struct dma_chan *chan, void *param)
727 { 748 {
728 struct dmatest_params *params = param; 749 struct dmatest_params *params = param;
729 750
730 if (!dmatest_match_channel(params, chan) || 751 if (!dmatest_match_channel(params, chan) ||
731 !dmatest_match_device(params, chan->device)) 752 !dmatest_match_device(params, chan->device))
732 return false; 753 return false;
733 else 754 else
734 return true; 755 return true;
735 } 756 }
736 757
737 static void request_channels(struct dmatest_info *info, 758 static void request_channels(struct dmatest_info *info,
738 enum dma_transaction_type type) 759 enum dma_transaction_type type)
739 { 760 {
740 dma_cap_mask_t mask; 761 dma_cap_mask_t mask;
741 762
742 dma_cap_zero(mask); 763 dma_cap_zero(mask);
743 dma_cap_set(type, mask); 764 dma_cap_set(type, mask);
744 for (;;) { 765 for (;;) {
745 struct dmatest_params *params = &info->params; 766 struct dmatest_params *params = &info->params;
746 struct dma_chan *chan; 767 struct dma_chan *chan;
747 768
748 chan = dma_request_channel(mask, filter, params); 769 chan = dma_request_channel(mask, filter, params);
749 if (chan) { 770 if (chan) {
750 if (dmatest_add_channel(info, chan)) { 771 if (dmatest_add_channel(info, chan)) {
751 dma_release_channel(chan); 772 dma_release_channel(chan);
752 break; /* add_channel failed, punt */ 773 break; /* add_channel failed, punt */
753 } 774 }
754 } else 775 } else
755 break; /* no more channels available */ 776 break; /* no more channels available */
756 if (params->max_channels && 777 if (params->max_channels &&
757 info->nr_channels >= params->max_channels) 778 info->nr_channels >= params->max_channels)
758 break; /* we have all we need */ 779 break; /* we have all we need */
759 } 780 }
760 } 781 }
761 782
762 static void run_threaded_test(struct dmatest_info *info) 783 static void run_threaded_test(struct dmatest_info *info)
763 { 784 {
764 struct dmatest_params *params = &info->params; 785 struct dmatest_params *params = &info->params;
765 786
766 /* Copy test parameters */ 787 /* Copy test parameters */
767 params->buf_size = test_buf_size; 788 params->buf_size = test_buf_size;
768 strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); 789 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
769 strlcpy(params->device, strim(test_device), sizeof(params->device)); 790 strlcpy(params->device, strim(test_device), sizeof(params->device));
770 params->threads_per_chan = threads_per_chan; 791 params->threads_per_chan = threads_per_chan;
771 params->max_channels = max_channels; 792 params->max_channels = max_channels;
772 params->iterations = iterations; 793 params->iterations = iterations;
773 params->xor_sources = xor_sources; 794 params->xor_sources = xor_sources;
774 params->pq_sources = pq_sources; 795 params->pq_sources = pq_sources;
775 params->timeout = timeout; 796 params->timeout = timeout;
797 params->noverify = noverify;
776 798
777 request_channels(info, DMA_MEMCPY); 799 request_channels(info, DMA_MEMCPY);
778 request_channels(info, DMA_XOR); 800 request_channels(info, DMA_XOR);
779 request_channels(info, DMA_PQ); 801 request_channels(info, DMA_PQ);
780 } 802 }
781 803
782 static void stop_threaded_test(struct dmatest_info *info) 804 static void stop_threaded_test(struct dmatest_info *info)
783 { 805 {
784 struct dmatest_chan *dtc, *_dtc; 806 struct dmatest_chan *dtc, *_dtc;
785 struct dma_chan *chan; 807 struct dma_chan *chan;
786 808
787 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) { 809 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
788 list_del(&dtc->node); 810 list_del(&dtc->node);
789 chan = dtc->chan; 811 chan = dtc->chan;
790 dmatest_cleanup_channel(dtc); 812 dmatest_cleanup_channel(dtc);
791 pr_debug("dropped channel %s\n", dma_chan_name(chan)); 813 pr_debug("dropped channel %s\n", dma_chan_name(chan));
792 dma_release_channel(chan); 814 dma_release_channel(chan);
793 } 815 }
794 816
795 info->nr_channels = 0; 817 info->nr_channels = 0;
796 } 818 }
797 819
798 static void restart_threaded_test(struct dmatest_info *info, bool run) 820 static void restart_threaded_test(struct dmatest_info *info, bool run)
799 { 821 {
800 /* we might be called early to set run=, defer running until all 822 /* we might be called early to set run=, defer running until all
801 * parameters have been evaluated 823 * parameters have been evaluated
802 */ 824 */
803 if (!info->did_init) 825 if (!info->did_init)
804 return; 826 return;
805 827
806 /* Stop any running test first */ 828 /* Stop any running test first */
807 stop_threaded_test(info); 829 stop_threaded_test(info);
808 830
809 /* Run test with new parameters */ 831 /* Run test with new parameters */
810 run_threaded_test(info); 832 run_threaded_test(info);
811 } 833 }
812 834
813 static bool is_threaded_test_run(struct dmatest_info *info) 835 static bool is_threaded_test_run(struct dmatest_info *info)
814 { 836 {
815 struct dmatest_chan *dtc; 837 struct dmatest_chan *dtc;
816 838
817 list_for_each_entry(dtc, &info->channels, node) { 839 list_for_each_entry(dtc, &info->channels, node) {
818 struct dmatest_thread *thread; 840 struct dmatest_thread *thread;
819 841
820 list_for_each_entry(thread, &dtc->threads, node) { 842 list_for_each_entry(thread, &dtc->threads, node) {
821 if (!thread->done) 843 if (!thread->done)
822 return true; 844 return true;
823 } 845 }
824 } 846 }
825 847
826 return false; 848 return false;
827 } 849 }
828 850
829 static int dmatest_run_get(char *val, const struct kernel_param *kp) 851 static int dmatest_run_get(char *val, const struct kernel_param *kp)
830 { 852 {
831 struct dmatest_info *info = &test_info; 853 struct dmatest_info *info = &test_info;
832 854
833 mutex_lock(&info->lock); 855 mutex_lock(&info->lock);
834 if (is_threaded_test_run(info)) { 856 if (is_threaded_test_run(info)) {
835 dmatest_run = true; 857 dmatest_run = true;
836 } else { 858 } else {
837 stop_threaded_test(info); 859 stop_threaded_test(info);
838 dmatest_run = false; 860 dmatest_run = false;
839 } 861 }
840 mutex_unlock(&info->lock); 862 mutex_unlock(&info->lock);
841 863
842 return param_get_bool(val, kp); 864 return param_get_bool(val, kp);
843 } 865 }
844 866
845 static int dmatest_run_set(const char *val, const struct kernel_param *kp) 867 static int dmatest_run_set(const char *val, const struct kernel_param *kp)
846 { 868 {
847 struct dmatest_info *info = &test_info; 869 struct dmatest_info *info = &test_info;
848 int ret; 870 int ret;
849 871
850 mutex_lock(&info->lock); 872 mutex_lock(&info->lock);
851 ret = param_set_bool(val, kp); 873 ret = param_set_bool(val, kp);
852 if (ret) { 874 if (ret) {
853 mutex_unlock(&info->lock); 875 mutex_unlock(&info->lock);
854 return ret; 876 return ret;
855 } 877 }
856 878
857 if (is_threaded_test_run(info)) 879 if (is_threaded_test_run(info))
858 ret = -EBUSY; 880 ret = -EBUSY;
859 else if (dmatest_run) 881 else if (dmatest_run)
860 restart_threaded_test(info, dmatest_run); 882 restart_threaded_test(info, dmatest_run);
861 883
862 mutex_unlock(&info->lock); 884 mutex_unlock(&info->lock);
863 885
864 return ret; 886 return ret;
865 } 887 }
866 888
867 static int __init dmatest_init(void) 889 static int __init dmatest_init(void)
868 { 890 {
869 struct dmatest_info *info = &test_info; 891 struct dmatest_info *info = &test_info;
870 892
871 if (dmatest_run) { 893 if (dmatest_run) {
872 mutex_lock(&info->lock); 894 mutex_lock(&info->lock);
873 run_threaded_test(info); 895 run_threaded_test(info);
874 mutex_unlock(&info->lock); 896 mutex_unlock(&info->lock);
875 } 897 }
876 898
877 /* module parameters are stable, inittime tests are started, 899 /* module parameters are stable, inittime tests are started,
878 * let userspace take over 'run' control 900 * let userspace take over 'run' control
879 */ 901 */
880 info->did_init = true; 902 info->did_init = true;
881 903
882 return 0; 904 return 0;
883 } 905 }
884 /* when compiled-in wait for drivers to load first */ 906 /* when compiled-in wait for drivers to load first */
885 late_initcall(dmatest_init); 907 late_initcall(dmatest_init);
886 908
887 static void __exit dmatest_exit(void) 909 static void __exit dmatest_exit(void)
888 { 910 {
889 struct dmatest_info *info = &test_info; 911 struct dmatest_info *info = &test_info;
890 912
891 mutex_lock(&info->lock); 913 mutex_lock(&info->lock);