Commit a2c06ee2fe5b48a71e697bae00c6e7195fc016b6

Authored by Dave Airlie
1 parent 63871f89d1

Revert "ttm: Include the 'struct dev' when using the DMA API."

This reverts commit 5a893fc28f0393adb7c885a871b8c59e623fd528.

This causes a use after free in the ttm free alloc pages path,
when it tries to get the be after the be has been destroyed.

Signed-off-by: Dave Airlie <airlied@redhat.com>

Showing 7 changed files with 10 additions and 18 deletions Side-by-side Diff

drivers/gpu/drm/nouveau/nouveau_mem.c
... ... @@ -409,7 +409,6 @@
409 409 if (ret)
410 410 return ret;
411 411  
412   - dev_priv->ttm.bdev.dev = dev->dev;
413 412 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
414 413 dev_priv->ttm.bo_global_ref.ref.object,
415 414 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
drivers/gpu/drm/radeon/radeon_ttm.c
... ... @@ -513,7 +513,6 @@
513 513 if (r) {
514 514 return r;
515 515 }
516   - rdev->mman.bdev.dev = rdev->dev;
517 516 /* No others user of address space so set it to 0 */
518 517 r = ttm_bo_device_init(&rdev->mman.bdev,
519 518 rdev->mman.bo_global_ref.ref.object,
drivers/gpu/drm/ttm/ttm_page_alloc.c
... ... @@ -664,7 +664,7 @@
664 664 */
665 665 int ttm_get_pages(struct list_head *pages, int flags,
666 666 enum ttm_caching_state cstate, unsigned count,
667   - dma_addr_t *dma_address, struct device *dev)
  667 + dma_addr_t *dma_address)
668 668 {
669 669 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
670 670 struct page *p = NULL;
... ... @@ -685,7 +685,7 @@
685 685 for (r = 0; r < count; ++r) {
686 686 if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
687 687 void *addr;
688   - addr = dma_alloc_coherent(dev, PAGE_SIZE,
  688 + addr = dma_alloc_coherent(NULL, PAGE_SIZE,
689 689 &dma_address[r],
690 690 gfp_flags);
691 691 if (addr == NULL)
... ... @@ -730,7 +730,7 @@
730 730 printk(KERN_ERR TTM_PFX
731 731 "Failed to allocate extra pages "
732 732 "for large request.");
733   - ttm_put_pages(pages, 0, flags, cstate, NULL, NULL);
  733 + ttm_put_pages(pages, 0, flags, cstate, NULL);
734 734 return r;
735 735 }
736 736 }
... ... @@ -741,8 +741,7 @@
741 741  
742 742 /* Put all pages in pages list to correct pool to wait for reuse */
743 743 void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
744   - enum ttm_caching_state cstate, dma_addr_t *dma_address,
745   - struct device *dev)
  744 + enum ttm_caching_state cstate, dma_addr_t *dma_address)
746 745 {
747 746 unsigned long irq_flags;
748 747 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
... ... @@ -758,7 +757,7 @@
758 757 void *addr = page_address(p);
759 758 WARN_ON(!addr || !dma_address[r]);
760 759 if (addr)
761   - dma_free_coherent(dev, PAGE_SIZE,
  760 + dma_free_coherent(NULL, PAGE_SIZE,
762 761 addr,
763 762 dma_address[r]);
764 763 dma_address[r] = 0;
drivers/gpu/drm/ttm/ttm_tt.c
... ... @@ -110,7 +110,7 @@
110 110 INIT_LIST_HEAD(&h);
111 111  
112 112 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
113   - &ttm->dma_address[index], ttm->be->bdev->dev);
  113 + &ttm->dma_address[index]);
114 114  
115 115 if (ret != 0)
116 116 return NULL;
... ... @@ -304,7 +304,7 @@
304 304 }
305 305 }
306 306 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
307   - ttm->dma_address, ttm->be->bdev->dev);
  307 + ttm->dma_address);
308 308 ttm->state = tt_unpopulated;
309 309 ttm->first_himem_page = ttm->num_pages;
310 310 ttm->last_lomem_page = -1;
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
... ... @@ -322,7 +322,7 @@
322 322 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
323 323 dev_priv->active_master = &dev_priv->fbdev_master;
324 324  
325   - dev_priv->bdev.dev = dev->dev;
  325 +
326 326 ret = ttm_bo_device_init(&dev_priv->bdev,
327 327 dev_priv->bo_global_ref.ref.object,
328 328 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
include/drm/ttm/ttm_bo_driver.h
... ... @@ -551,7 +551,6 @@
551 551 struct list_head device_list;
552 552 struct ttm_bo_global *glob;
553 553 struct ttm_bo_driver *driver;
554   - struct device *dev;
555 554 rwlock_t vm_lock;
556 555 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
557 556 spinlock_t fence_lock;
include/drm/ttm/ttm_page_alloc.h
... ... @@ -37,14 +37,12 @@
37 37 * @cstate: ttm caching state for the page.
38 38 * @count: number of pages to allocate.
39 39 * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
40   - * @dev: struct device for appropiate DMA accounting.
41 40 */
42 41 int ttm_get_pages(struct list_head *pages,
43 42 int flags,
44 43 enum ttm_caching_state cstate,
45 44 unsigned count,
46   - dma_addr_t *dma_address,
47   - struct device *dev);
  45 + dma_addr_t *dma_address);
48 46 /**
49 47 * Put linked list of pages to pool.
50 48 *
51 49  
... ... @@ -54,14 +52,12 @@
54 52 * @flags: ttm flags for page allocation.
55 53 * @cstate: ttm caching state.
56 54 * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
57   - * @dev: struct device for appropiate DMA accounting.
58 55 */
59 56 void ttm_put_pages(struct list_head *pages,
60 57 unsigned page_count,
61 58 int flags,
62 59 enum ttm_caching_state cstate,
63   - dma_addr_t *dma_address,
64   - struct device *dev);
  60 + dma_addr_t *dma_address);
65 61 /**
66 62 * Initialize pool allocator.
67 63 */