Commit b02bab6b0f928d49dbfb03e1e4e9dd43647623d7

Authored by NeilBrown
Committed by Vinod Koul
1 parent 16605e8d50

async_tx: use GFP_NOWAIT rather than GFP_IO

These async_XX functions are called from md/raid5 in an atomic
section, between get_cpu() and put_cpu(), so they must not sleep.
So use GFP_NOWAIT rather than GFP_IO.

Dan Williams writes: Longer term async_tx needs to be merged into md
directly as we can allocate this unmap data statically per-stripe
rather than per request.

Fixed: 7476bd79fc01 ("async_pq: convert to dmaengine_unmap_data")
Cc: stable@vger.kernel.org (v3.13+)
Reported-and-tested-by: Stanislav Samsonov <slava@annapurnalabs.com>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>

Showing 4 changed files with 7 additions and 7 deletions Side-by-side Diff

crypto/async_tx/async_memcpy.c
... ... @@ -53,7 +53,7 @@
53 53 struct dmaengine_unmap_data *unmap = NULL;
54 54  
55 55 if (device)
56   - unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
  56 + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
57 57  
58 58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
59 59 unsigned long dma_prep_flags = 0;
crypto/async_tx/async_pq.c
... ... @@ -188,7 +188,7 @@
188 188 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
189 189  
190 190 if (device)
191   - unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
  191 + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
192 192  
193 193 /* XORing P/Q is only implemented in software */
194 194 if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
... ... @@ -307,7 +307,7 @@
307 307 BUG_ON(disks < 4);
308 308  
309 309 if (device)
310   - unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
  310 + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
311 311  
312 312 if (unmap && disks <= dma_maxpq(device, 0) &&
313 313 is_dma_pq_aligned(device, offset, 0, len)) {
crypto/async_tx/async_raid6_recov.c
... ... @@ -41,7 +41,7 @@
41 41 u8 *a, *b, *c;
42 42  
43 43 if (dma)
44   - unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
  44 + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
45 45  
46 46 if (unmap) {
47 47 struct device *dev = dma->dev;
... ... @@ -105,7 +105,7 @@
105 105 u8 *d, *s;
106 106  
107 107 if (dma)
108   - unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
  108 + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
109 109  
110 110 if (unmap) {
111 111 dma_addr_t dma_dest[2];
crypto/async_tx/async_xor.c
... ... @@ -182,7 +182,7 @@
182 182 BUG_ON(src_cnt <= 1);
183 183  
184 184 if (device)
185   - unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO);
  185 + unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
186 186  
187 187 if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
188 188 struct dma_async_tx_descriptor *tx;
... ... @@ -278,7 +278,7 @@
278 278 BUG_ON(src_cnt <= 1);
279 279  
280 280 if (device)
281   - unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO);
  281 + unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
282 282  
283 283 if (unmap && src_cnt <= device->max_xor &&
284 284 is_dma_xor_aligned(device, offset, 0, len)) {