Commit 7b3cc2b1fc2066391e498f3387204908c4eced21

Authored by Dan Williams
1 parent 4499a24dec

async_tx: build-time toggling of async_{syndrome,xor}_val dma support

ioat3.2 does not support asynchronous error notifications which makes
the driver experience latencies when non-zero pq validate results are
expected.  Provide a mechanism for turning off async_xor_val and
async_syndrome_val via Kconfig.  This approach is generally useful for
any driver that specifies ASYNC_TX_DISABLE_CHANNEL_SWITCH and would like
to force the async_tx api to fall back to the synchronous path for
certain operations.

Signed-off-by: Dan Williams <dan.j.williams@intel.com>

Showing 6 changed files with 46 additions and 6 deletions Side-by-side Diff

crypto/async_tx/Kconfig
... ... @@ -22,4 +22,10 @@
22 22 tristate
23 23 select ASYNC_CORE
24 24 select ASYNC_PQ
  25 +
  26 +config ASYNC_TX_DISABLE_PQ_VAL_DMA
  27 + bool
  28 +
  29 +config ASYNC_TX_DISABLE_XOR_VAL_DMA
  30 + bool
crypto/async_tx/async_pq.c
... ... @@ -240,6 +240,16 @@
240 240 }
241 241 EXPORT_SYMBOL_GPL(async_gen_syndrome);
242 242  
  243 +static inline struct dma_chan *
  244 +pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
  245 +{
  246 + #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
  247 + return NULL;
  248 + #endif
  249 + return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
  250 + disks, len);
  251 +}
  252 +
243 253 /**
244 254 * async_syndrome_val - asynchronously validate a raid6 syndrome
245 255 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
... ... @@ -260,9 +270,7 @@
260 270 size_t len, enum sum_check_flags *pqres, struct page *spare,
261 271 struct async_submit_ctl *submit)
262 272 {
263   - struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL,
264   - NULL, 0, blocks, disks,
265   - len);
  273 + struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
266 274 struct dma_device *device = chan ? chan->device : NULL;
267 275 struct dma_async_tx_descriptor *tx;
268 276 unsigned char coefs[disks-2];
crypto/async_tx/async_xor.c
... ... @@ -234,6 +234,17 @@
234 234 memcmp(a, a + 4, len - 4) == 0);
235 235 }
236 236  
  237 +static inline struct dma_chan *
  238 +xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
  239 + struct page **src_list, int src_cnt, size_t len)
  240 +{
  241 + #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
  242 + return NULL;
  243 + #endif
  244 + return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
  245 + src_cnt, len);
  246 +}
  247 +
237 248 /**
238 249 * async_xor_val - attempt a xor parity check with a dma engine.
239 250 * @dest: destination page used if the xor is performed synchronously
... ... @@ -255,9 +266,7 @@
255 266 int src_cnt, size_t len, enum sum_check_flags *result,
256 267 struct async_submit_ctl *submit)
257 268 {
258   - struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL,
259   - &dest, 1, src_list,
260   - src_cnt, len);
  269 + struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
261 270 struct dma_device *device = chan ? chan->device : NULL;
262 271 struct dma_async_tx_descriptor *tx = NULL;
263 272 dma_addr_t *dma_src = NULL;
... ... @@ -26,6 +26,8 @@
26 26 select DMA_ENGINE
27 27 select DCA
28 28 select ASYNC_TX_DISABLE_CHANNEL_SWITCH
  29 + select ASYNC_TX_DISABLE_PQ_VAL_DMA
  30 + select ASYNC_TX_DISABLE_XOR_VAL_DMA
29 31 help
30 32 Enable support for the Intel(R) I/OAT DMA engine present
31 33 in recent Intel Xeon chipsets.
drivers/dma/dmaengine.c
... ... @@ -632,15 +632,21 @@
632 632 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
633 633 if (!dma_has_cap(DMA_XOR, device->cap_mask))
634 634 return false;
  635 +
  636 + #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
635 637 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
636 638 return false;
637 639 #endif
  640 + #endif
638 641  
639 642 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
640 643 if (!dma_has_cap(DMA_PQ, device->cap_mask))
641 644 return false;
  645 +
  646 + #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
642 647 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
643 648 return false;
  649 + #endif
644 650 #endif
645 651  
646 652 return true;
drivers/dma/ioat/dma_v3.c
... ... @@ -1206,6 +1206,16 @@
1206 1206 device->timer_fn = ioat2_timer_event;
1207 1207 }
1208 1208  
  1209 + #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
  1210 + dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
  1211 + dma->device_prep_dma_pq_val = NULL;
  1212 + #endif
  1213 +
  1214 + #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
  1215 + dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
  1216 + dma->device_prep_dma_xor_val = NULL;
  1217 + #endif
  1218 +
1209 1219 /* -= IOAT ver.3 workarounds =- */
1210 1220 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1211 1221 * that can cause stability issues for IOAT ver.3