Blame view

crypto/async_tx/async_raid6_recov.c 14.4 KB
0a82a6239   Dan Williams   async_tx: add sup...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
  /*
   * Asynchronous RAID-6 recovery calculations ASYNC_TX API.
   * Copyright(c) 2009 Intel Corporation
   *
   * based on raid6recov.c:
   *   Copyright 2002 H. Peter Anvin
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License as published by the Free
   * Software Foundation; either version 2 of the License, or (at your option)
   * any later version.
   *
   * This program is distributed in the hope that it will be useful, but WITHOUT
   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   * more details.
   *
   * You should have received a copy of the GNU General Public License along with
   * this program; if not, write to the Free Software Foundation, Inc., 51
   * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
   *
   */
  #include <linux/kernel.h>
  #include <linux/interrupt.h>
4bb33cc89   Paul Gortmaker   crypto: add modul...
25
  #include <linux/module.h>
0a82a6239   Dan Williams   async_tx: add sup...
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
  #include <linux/dma-mapping.h>
  #include <linux/raid/pq.h>
  #include <linux/async_tx.h>
  
  static struct dma_async_tx_descriptor *
  async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
  		  size_t len, struct async_submit_ctl *submit)
  {
  	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
  						      &dest, 1, srcs, 2, len);
  	struct dma_device *dma = chan ? chan->device : NULL;
  	const u8 *amul, *bmul;
  	u8 ax, bx;
  	u8 *a, *b, *c;
  
  	if (dma) {
  		dma_addr_t dma_dest[2];
  		dma_addr_t dma_src[2];
  		struct device *dev = dma->dev;
  		struct dma_async_tx_descriptor *tx;
  		enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
0403e3827   Dan Williams   dmaengine: add fe...
47
48
  		if (submit->flags & ASYNC_TX_FENCE)
  			dma_flags |= DMA_PREP_FENCE;
0a82a6239   Dan Williams   async_tx: add sup...
49
50
51
52
53
54
55
56
57
  		dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
  		dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
  		dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
  		tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef,
  					     len, dma_flags);
  		if (tx) {
  			async_tx_submit(chan, tx, submit);
  			return tx;
  		}
1f6672d44   Dan Williams   async_tx/raid6: a...
58
59
60
61
62
63
64
  
  		/* could not get a descriptor, unmap and fall through to
  		 * the synchronous path
  		 */
  		dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
  		dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
  		dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE);
0a82a6239   Dan Williams   async_tx: add sup...
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
  	}
  
  	/* run the operation synchronously */
  	async_tx_quiesce(&submit->depend_tx);
  	amul = raid6_gfmul[coef[0]];
  	bmul = raid6_gfmul[coef[1]];
  	a = page_address(srcs[0]);
  	b = page_address(srcs[1]);
  	c = page_address(dest);
  
  	while (len--) {
  		ax    = amul[*a++];
  		bx    = bmul[*b++];
  		*c++ = ax ^ bx;
  	}
  
  	return NULL;
  }
  
  static struct dma_async_tx_descriptor *
  async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
  	   struct async_submit_ctl *submit)
  {
  	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
  						      &dest, 1, &src, 1, len);
  	struct dma_device *dma = chan ? chan->device : NULL;
  	const u8 *qmul; /* Q multiplier table */
  	u8 *d, *s;
  
  	if (dma) {
  		dma_addr_t dma_dest[2];
  		dma_addr_t dma_src[1];
  		struct device *dev = dma->dev;
  		struct dma_async_tx_descriptor *tx;
  		enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
0403e3827   Dan Williams   dmaengine: add fe...
100
101
  		if (submit->flags & ASYNC_TX_FENCE)
  			dma_flags |= DMA_PREP_FENCE;
0a82a6239   Dan Williams   async_tx: add sup...
102
103
104
105
106
107
108
109
  		dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
  		dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
  		tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef,
  					     len, dma_flags);
  		if (tx) {
  			async_tx_submit(chan, tx, submit);
  			return tx;
  		}
1f6672d44   Dan Williams   async_tx/raid6: a...
110
111
112
113
114
115
  
  		/* could not get a descriptor, unmap and fall through to
  		 * the synchronous path
  		 */
  		dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
  		dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
0a82a6239   Dan Williams   async_tx: add sup...
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
  	}
  
  	/* no channel available, or failed to allocate a descriptor, so
  	 * perform the operation synchronously
  	 */
  	async_tx_quiesce(&submit->depend_tx);
  	qmul  = raid6_gfmul[coef];
  	d = page_address(dest);
  	s = page_address(src);
  
  	while (len--)
  		*d++ = qmul[*s++];
  
  	return NULL;
  }
  
  static struct dma_async_tx_descriptor *
da17bf430   Dan Williams   async_tx: fix asy...
133
134
  __2data_recov_4(int disks, size_t bytes, int faila, int failb,
  		struct page **blocks, struct async_submit_ctl *submit)
0a82a6239   Dan Williams   async_tx: add sup...
135
136
137
138
139
140
141
142
143
  {
  	struct dma_async_tx_descriptor *tx = NULL;
  	struct page *p, *q, *a, *b;
  	struct page *srcs[2];
  	unsigned char coef[2];
  	enum async_tx_flags flags = submit->flags;
  	dma_async_tx_callback cb_fn = submit->cb_fn;
  	void *cb_param = submit->cb_param;
  	void *scribble = submit->scribble;
da17bf430   Dan Williams   async_tx: fix asy...
144
145
  	p = blocks[disks-2];
  	q = blocks[disks-1];
0a82a6239   Dan Williams   async_tx: add sup...
146
147
148
149
150
151
152
153
154
155
  
  	a = blocks[faila];
  	b = blocks[failb];
  
  	/* in the 4 disk case P + Pxy == P and Q + Qxy == Q */
  	/* Dx = A*(P+Pxy) + B*(Q+Qxy) */
  	srcs[0] = p;
  	srcs[1] = q;
  	coef[0] = raid6_gfexi[failb-faila];
  	coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
0403e3827   Dan Williams   dmaengine: add fe...
156
  	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0a82a6239   Dan Williams   async_tx: add sup...
157
158
159
160
161
162
163
164
165
166
167
168
169
170
  	tx = async_sum_product(b, srcs, coef, bytes, submit);
  
  	/* Dy = P+Pxy+Dx */
  	srcs[0] = p;
  	srcs[1] = b;
  	init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn,
  			  cb_param, scribble);
  	tx = async_xor(a, srcs, 0, 2, bytes, submit);
  
  	return tx;
  
  }
  
  static struct dma_async_tx_descriptor *
da17bf430   Dan Williams   async_tx: fix asy...
171
172
  __2data_recov_5(int disks, size_t bytes, int faila, int failb,
  		struct page **blocks, struct async_submit_ctl *submit)
0a82a6239   Dan Williams   async_tx: add sup...
173
174
175
176
177
178
179
180
181
  {
  	struct dma_async_tx_descriptor *tx = NULL;
  	struct page *p, *q, *g, *dp, *dq;
  	struct page *srcs[2];
  	unsigned char coef[2];
  	enum async_tx_flags flags = submit->flags;
  	dma_async_tx_callback cb_fn = submit->cb_fn;
  	void *cb_param = submit->cb_param;
  	void *scribble = submit->scribble;
da17bf430   Dan Williams   async_tx: fix asy...
182
  	int good_srcs, good, i;
0a82a6239   Dan Williams   async_tx: add sup...
183

da17bf430   Dan Williams   async_tx: fix asy...
184
185
186
187
188
  	good_srcs = 0;
  	good = -1;
  	for (i = 0; i < disks-2; i++) {
  		if (blocks[i] == NULL)
  			continue;
0a82a6239   Dan Williams   async_tx: add sup...
189
190
  		if (i == faila || i == failb)
  			continue;
da17bf430   Dan Williams   async_tx: fix asy...
191
192
  		good = i;
  		good_srcs++;
0a82a6239   Dan Williams   async_tx: add sup...
193
  	}
da17bf430   Dan Williams   async_tx: fix asy...
194
  	BUG_ON(good_srcs > 1);
0a82a6239   Dan Williams   async_tx: add sup...
195

da17bf430   Dan Williams   async_tx: fix asy...
196
197
  	p = blocks[disks-2];
  	q = blocks[disks-1];
0a82a6239   Dan Williams   async_tx: add sup...
198
199
200
201
202
203
204
205
  	g = blocks[good];
  
  	/* Compute syndrome with zero for the missing data pages
  	 * Use the dead data pages as temporary storage for delta p and
  	 * delta q
  	 */
  	dp = blocks[faila];
  	dq = blocks[failb];
0403e3827   Dan Williams   dmaengine: add fe...
206
  	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0a82a6239   Dan Williams   async_tx: add sup...
207
  	tx = async_memcpy(dp, g, 0, 0, bytes, submit);
0403e3827   Dan Williams   dmaengine: add fe...
208
  	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0a82a6239   Dan Williams   async_tx: add sup...
209
210
211
212
213
  	tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
  
  	/* compute P + Pxy */
  	srcs[0] = dp;
  	srcs[1] = p;
0403e3827   Dan Williams   dmaengine: add fe...
214
215
  	init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
  			  NULL, NULL, scribble);
0a82a6239   Dan Williams   async_tx: add sup...
216
217
218
219
220
  	tx = async_xor(dp, srcs, 0, 2, bytes, submit);
  
  	/* compute Q + Qxy */
  	srcs[0] = dq;
  	srcs[1] = q;
0403e3827   Dan Williams   dmaengine: add fe...
221
222
  	init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
  			  NULL, NULL, scribble);
0a82a6239   Dan Williams   async_tx: add sup...
223
224
225
226
227
228
229
  	tx = async_xor(dq, srcs, 0, 2, bytes, submit);
  
  	/* Dx = A*(P+Pxy) + B*(Q+Qxy) */
  	srcs[0] = dp;
  	srcs[1] = dq;
  	coef[0] = raid6_gfexi[failb-faila];
  	coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
0403e3827   Dan Williams   dmaengine: add fe...
230
  	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0a82a6239   Dan Williams   async_tx: add sup...
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
  	tx = async_sum_product(dq, srcs, coef, bytes, submit);
  
  	/* Dy = P+Pxy+Dx */
  	srcs[0] = dp;
  	srcs[1] = dq;
  	init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
  			  cb_param, scribble);
  	tx = async_xor(dp, srcs, 0, 2, bytes, submit);
  
  	return tx;
  }
  
  static struct dma_async_tx_descriptor *
  __2data_recov_n(int disks, size_t bytes, int faila, int failb,
  	      struct page **blocks, struct async_submit_ctl *submit)
  {
  	struct dma_async_tx_descriptor *tx = NULL;
  	struct page *p, *q, *dp, *dq;
  	struct page *srcs[2];
  	unsigned char coef[2];
  	enum async_tx_flags flags = submit->flags;
  	dma_async_tx_callback cb_fn = submit->cb_fn;
  	void *cb_param = submit->cb_param;
  	void *scribble = submit->scribble;
  
  	p = blocks[disks-2];
  	q = blocks[disks-1];
  
  	/* Compute syndrome with zero for the missing data pages
  	 * Use the dead data pages as temporary storage for
  	 * delta p and delta q
  	 */
  	dp = blocks[faila];
5dd33c9a4   NeilBrown   md/async: don't p...
264
  	blocks[faila] = NULL;
0a82a6239   Dan Williams   async_tx: add sup...
265
266
  	blocks[disks-2] = dp;
  	dq = blocks[failb];
5dd33c9a4   NeilBrown   md/async: don't p...
267
  	blocks[failb] = NULL;
0a82a6239   Dan Williams   async_tx: add sup...
268
  	blocks[disks-1] = dq;
0403e3827   Dan Williams   dmaengine: add fe...
269
  	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0a82a6239   Dan Williams   async_tx: add sup...
270
271
272
273
274
275
276
277
278
279
280
  	tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
  
  	/* Restore pointer table */
  	blocks[faila]   = dp;
  	blocks[failb]   = dq;
  	blocks[disks-2] = p;
  	blocks[disks-1] = q;
  
  	/* compute P + Pxy */
  	srcs[0] = dp;
  	srcs[1] = p;
0403e3827   Dan Williams   dmaengine: add fe...
281
282
  	init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
  			  NULL, NULL, scribble);
0a82a6239   Dan Williams   async_tx: add sup...
283
284
285
286
287
  	tx = async_xor(dp, srcs, 0, 2, bytes, submit);
  
  	/* compute Q + Qxy */
  	srcs[0] = dq;
  	srcs[1] = q;
0403e3827   Dan Williams   dmaengine: add fe...
288
289
  	init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
  			  NULL, NULL, scribble);
0a82a6239   Dan Williams   async_tx: add sup...
290
291
292
293
294
295
296
  	tx = async_xor(dq, srcs, 0, 2, bytes, submit);
  
  	/* Dx = A*(P+Pxy) + B*(Q+Qxy) */
  	srcs[0] = dp;
  	srcs[1] = dq;
  	coef[0] = raid6_gfexi[failb-faila];
  	coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
0403e3827   Dan Williams   dmaengine: add fe...
297
  	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0a82a6239   Dan Williams   async_tx: add sup...
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
  	tx = async_sum_product(dq, srcs, coef, bytes, submit);
  
  	/* Dy = P+Pxy+Dx */
  	srcs[0] = dp;
  	srcs[1] = dq;
  	init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
  			  cb_param, scribble);
  	tx = async_xor(dp, srcs, 0, 2, bytes, submit);
  
  	return tx;
  }
  
  /**
   * async_raid6_2data_recov - asynchronously calculate two missing data blocks
   * @disks: number of disks in the RAID-6 array
   * @bytes: block size
   * @faila: first failed drive index
   * @failb: second failed drive index
   * @blocks: array of source pointers where the last two entries are p and q
   * @submit: submission/completion modifiers
   */
  struct dma_async_tx_descriptor *
  async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
  			struct page **blocks, struct async_submit_ctl *submit)
  {
5157b4aa5   Dan Williams   raid6: fix recove...
323
  	void *scribble = submit->scribble;
da17bf430   Dan Williams   async_tx: fix asy...
324
  	int non_zero_srcs, i;
0a82a6239   Dan Williams   async_tx: add sup...
325
326
327
328
329
330
  	BUG_ON(faila == failb);
  	if (failb < faila)
  		swap(faila, failb);
  
  	pr_debug("%s: disks: %d len: %zu
  ", __func__, disks, bytes);
5157b4aa5   Dan Williams   raid6: fix recove...
331
332
333
334
  	/* if a dma resource is not available or a scribble buffer is not
  	 * available punt to the synchronous path.  In the 'dma not
  	 * available' case be sure to use the scribble buffer to
  	 * preserve the content of 'blocks' as the caller intended.
0a82a6239   Dan Williams   async_tx: add sup...
335
  	 */
5157b4aa5   Dan Williams   raid6: fix recove...
336
337
  	if (!async_dma_find_channel(DMA_PQ) || !scribble) {
  		void **ptrs = scribble ? scribble : (void **) blocks;
0a82a6239   Dan Williams   async_tx: add sup...
338
339
340
  
  		async_tx_quiesce(&submit->depend_tx);
  		for (i = 0; i < disks; i++)
5dd33c9a4   NeilBrown   md/async: don't p...
341
  			if (blocks[i] == NULL)
da17bf430   Dan Williams   async_tx: fix asy...
342
  				ptrs[i] = (void *) raid6_empty_zero_page;
5dd33c9a4   NeilBrown   md/async: don't p...
343
344
  			else
  				ptrs[i] = page_address(blocks[i]);
0a82a6239   Dan Williams   async_tx: add sup...
345
346
347
348
349
350
351
  
  		raid6_2data_recov(disks, bytes, faila, failb, ptrs);
  
  		async_tx_sync_epilog(submit);
  
  		return NULL;
  	}
da17bf430   Dan Williams   async_tx: fix asy...
352
353
354
355
356
357
358
359
360
361
362
  	non_zero_srcs = 0;
  	for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
  		if (blocks[i])
  			non_zero_srcs++;
  	switch (non_zero_srcs) {
  	case 0:
  	case 1:
  		/* There must be at least 2 sources - the failed devices. */
  		BUG();
  
  	case 2:
0a82a6239   Dan Williams   async_tx: add sup...
363
364
  		/* dma devices do not uniformly understand a zero source pq
  		 * operation (in contrast to the synchronous case), so
da17bf430   Dan Williams   async_tx: fix asy...
365
366
  		 * explicitly handle the special case of a 4 disk array with
  		 * both data disks missing.
0a82a6239   Dan Williams   async_tx: add sup...
367
  		 */
da17bf430   Dan Williams   async_tx: fix asy...
368
369
  		return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
  	case 3:
0a82a6239   Dan Williams   async_tx: add sup...
370
371
  		/* dma devices do not uniformly understand a single
  		 * source pq operation (in contrast to the synchronous
da17bf430   Dan Williams   async_tx: fix asy...
372
373
  		 * case), so explicitly handle the special case of a 5 disk
  		 * array with 2 of 3 data disks missing.
0a82a6239   Dan Williams   async_tx: add sup...
374
  		 */
da17bf430   Dan Williams   async_tx: fix asy...
375
  		return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
0a82a6239   Dan Williams   async_tx: add sup...
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
  	default:
  		return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
  	}
  }
  EXPORT_SYMBOL_GPL(async_raid6_2data_recov);
  
  /**
   * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block
   * @disks: number of disks in the RAID-6 array
   * @bytes: block size
   * @faila: failed drive index
   * @blocks: array of source pointers where the last two entries are p and q
   * @submit: submission/completion modifiers
   */
  struct dma_async_tx_descriptor *
  async_raid6_datap_recov(int disks, size_t bytes, int faila,
  			struct page **blocks, struct async_submit_ctl *submit)
  {
  	struct dma_async_tx_descriptor *tx = NULL;
  	struct page *p, *q, *dq;
  	u8 coef;
  	enum async_tx_flags flags = submit->flags;
  	dma_async_tx_callback cb_fn = submit->cb_fn;
  	void *cb_param = submit->cb_param;
  	void *scribble = submit->scribble;
da17bf430   Dan Williams   async_tx: fix asy...
401
  	int good_srcs, good, i;
0a82a6239   Dan Williams   async_tx: add sup...
402
403
404
405
  	struct page *srcs[2];
  
  	pr_debug("%s: disks: %d len: %zu
  ", __func__, disks, bytes);
5157b4aa5   Dan Williams   raid6: fix recove...
406
407
408
409
  	/* if a dma resource is not available or a scribble buffer is not
  	 * available punt to the synchronous path.  In the 'dma not
  	 * available' case be sure to use the scribble buffer to
  	 * preserve the content of 'blocks' as the caller intended.
0a82a6239   Dan Williams   async_tx: add sup...
410
  	 */
5157b4aa5   Dan Williams   raid6: fix recove...
411
412
  	if (!async_dma_find_channel(DMA_PQ) || !scribble) {
  		void **ptrs = scribble ? scribble : (void **) blocks;
0a82a6239   Dan Williams   async_tx: add sup...
413
414
415
  
  		async_tx_quiesce(&submit->depend_tx);
  		for (i = 0; i < disks; i++)
5dd33c9a4   NeilBrown   md/async: don't p...
416
417
418
419
  			if (blocks[i] == NULL)
  				ptrs[i] = (void*)raid6_empty_zero_page;
  			else
  				ptrs[i] = page_address(blocks[i]);
0a82a6239   Dan Williams   async_tx: add sup...
420
421
422
423
424
425
426
  
  		raid6_datap_recov(disks, bytes, faila, ptrs);
  
  		async_tx_sync_epilog(submit);
  
  		return NULL;
  	}
da17bf430   Dan Williams   async_tx: fix asy...
427
428
429
430
431
432
433
434
435
436
437
438
439
  	good_srcs = 0;
  	good = -1;
  	for (i = 0; i < disks-2; i++) {
  		if (i == faila)
  			continue;
  		if (blocks[i]) {
  			good = i;
  			good_srcs++;
  			if (good_srcs > 1)
  				break;
  		}
  	}
  	BUG_ON(good_srcs == 0);
0a82a6239   Dan Williams   async_tx: add sup...
440
441
442
443
444
445
446
  	p = blocks[disks-2];
  	q = blocks[disks-1];
  
  	/* Compute syndrome with zero for the missing data page
  	 * Use the dead data page as temporary storage for delta q
  	 */
  	dq = blocks[faila];
5dd33c9a4   NeilBrown   md/async: don't p...
447
  	blocks[faila] = NULL;
0a82a6239   Dan Williams   async_tx: add sup...
448
  	blocks[disks-1] = dq;
da17bf430   Dan Williams   async_tx: fix asy...
449
450
  	/* in the 4-disk case we only need to perform a single source
  	 * multiplication with the one good data block.
0a82a6239   Dan Williams   async_tx: add sup...
451
  	 */
da17bf430   Dan Williams   async_tx: fix asy...
452
  	if (good_srcs == 1) {
0a82a6239   Dan Williams   async_tx: add sup...
453
  		struct page *g = blocks[good];
0403e3827   Dan Williams   dmaengine: add fe...
454
455
  		init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
  				  scribble);
0a82a6239   Dan Williams   async_tx: add sup...
456
  		tx = async_memcpy(p, g, 0, 0, bytes, submit);
0403e3827   Dan Williams   dmaengine: add fe...
457
458
  		init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
  				  scribble);
0a82a6239   Dan Williams   async_tx: add sup...
459
460
  		tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
  	} else {
0403e3827   Dan Williams   dmaengine: add fe...
461
462
  		init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
  				  scribble);
0a82a6239   Dan Williams   async_tx: add sup...
463
464
465
466
467
468
469
470
471
472
473
474
  		tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
  	}
  
  	/* Restore pointer table */
  	blocks[faila]   = dq;
  	blocks[disks-1] = q;
  
  	/* calculate g^{-faila} */
  	coef = raid6_gfinv[raid6_gfexp[faila]];
  
  	srcs[0] = dq;
  	srcs[1] = q;
0403e3827   Dan Williams   dmaengine: add fe...
475
476
  	init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
  			  NULL, NULL, scribble);
0a82a6239   Dan Williams   async_tx: add sup...
477
  	tx = async_xor(dq, srcs, 0, 2, bytes, submit);
0403e3827   Dan Williams   dmaengine: add fe...
478
  	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0a82a6239   Dan Williams   async_tx: add sup...
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
  	tx = async_mult(dq, dq, coef, bytes, submit);
  
  	srcs[0] = p;
  	srcs[1] = dq;
  	init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
  			  cb_param, scribble);
  	tx = async_xor(p, srcs, 0, 2, bytes, submit);
  
  	return tx;
  }
  EXPORT_SYMBOL_GPL(async_raid6_datap_recov);
  
  MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
  MODULE_DESCRIPTION("asynchronous RAID-6 recovery api");
  MODULE_LICENSE("GPL");