Commit a7b9761d0a2ded58170ffb4d423ff3d7228103f4
Committed by
Daniel Vetter
1 parent
016fd0c1ae
Exists in
master
and in
20 other branches
drm/i915: Split i915_gem_flush_ring() into seperate invalidate/flush funcs
By moving the function to intel_ringbuffer and currying the appropriate parameter, hopefully we make the callsites easier to read and understand. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Showing 5 changed files with 44 additions and 37 deletions Side-by-side Diff
drivers/gpu/drm/i915/i915_drv.h
... | ... | @@ -1256,9 +1256,6 @@ |
1256 | 1256 | struct drm_file *file_priv); |
1257 | 1257 | void i915_gem_load(struct drm_device *dev); |
1258 | 1258 | int i915_gem_init_object(struct drm_gem_object *obj); |
1259 | -int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, | |
1260 | - uint32_t invalidate_domains, | |
1261 | - uint32_t flush_domains); | |
1262 | 1259 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
1263 | 1260 | size_t size); |
1264 | 1261 | void i915_gem_free_object(struct drm_gem_object *obj); |
drivers/gpu/drm/i915/i915_gem.c
... | ... | @@ -1549,14 +1549,10 @@ |
1549 | 1549 | * is that the flush _must_ happen before the next request, no matter |
1550 | 1550 | * what. |
1551 | 1551 | */ |
1552 | - if (ring->gpu_caches_dirty) { | |
1553 | - ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS); | |
1554 | - if (ret) | |
1555 | - return ret; | |
1552 | + ret = intel_ring_flush_all_caches(ring); | |
1553 | + if (ret) | |
1554 | + return ret; | |
1556 | 1555 | |
1557 | - ring->gpu_caches_dirty = false; | |
1558 | - } | |
1559 | - | |
1560 | 1556 | if (request == NULL) { |
1561 | 1557 | request = kmalloc(sizeof(*request), GFP_KERNEL); |
1562 | 1558 | if (request == NULL) |
... | ... | @@ -2252,25 +2248,6 @@ |
2252 | 2248 | i915_gem_object_truncate(obj); |
2253 | 2249 | |
2254 | 2250 | return ret; |
2255 | -} | |
2256 | - | |
2257 | -int | |
2258 | -i915_gem_flush_ring(struct intel_ring_buffer *ring, | |
2259 | - uint32_t invalidate_domains, | |
2260 | - uint32_t flush_domains) | |
2261 | -{ | |
2262 | - int ret; | |
2263 | - | |
2264 | - if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0) | |
2265 | - return 0; | |
2266 | - | |
2267 | - trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); | |
2268 | - | |
2269 | - ret = ring->flush(ring, invalidate_domains, flush_domains); | |
2270 | - if (ret) | |
2271 | - return ret; | |
2272 | - | |
2273 | - return 0; | |
2274 | 2251 | } |
2275 | 2252 | |
2276 | 2253 | static int i915_ring_idle(struct intel_ring_buffer *ring) |
drivers/gpu/drm/i915/i915_gem_execbuffer.c
... | ... | @@ -707,14 +707,7 @@ |
707 | 707 | /* Unconditionally invalidate gpu caches and ensure that we do flush |
708 | 708 | * any residual writes from the previous batch. |
709 | 709 | */ |
710 | - ret = i915_gem_flush_ring(ring, | |
711 | - I915_GEM_GPU_DOMAINS, | |
712 | - ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0); | |
713 | - if (ret) | |
714 | - return ret; | |
715 | - | |
716 | - ring->gpu_caches_dirty = false; | |
717 | - return 0; | |
710 | + return intel_ring_invalidate_all_caches(ring); | |
718 | 711 | } |
719 | 712 | |
720 | 713 | static bool |
drivers/gpu/drm/i915/intel_ringbuffer.c
... | ... | @@ -1564,4 +1564,42 @@ |
1564 | 1564 | |
1565 | 1565 | return intel_init_ring_buffer(dev, ring); |
1566 | 1566 | } |
1567 | + | |
1568 | +int | |
1569 | +intel_ring_flush_all_caches(struct intel_ring_buffer *ring) | |
1570 | +{ | |
1571 | + int ret; | |
1572 | + | |
1573 | + if (!ring->gpu_caches_dirty) | |
1574 | + return 0; | |
1575 | + | |
1576 | + ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); | |
1577 | + if (ret) | |
1578 | + return ret; | |
1579 | + | |
1580 | + trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); | |
1581 | + | |
1582 | + ring->gpu_caches_dirty = false; | |
1583 | + return 0; | |
1584 | +} | |
1585 | + | |
1586 | +int | |
1587 | +intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) | |
1588 | +{ | |
1589 | + uint32_t flush_domains; | |
1590 | + int ret; | |
1591 | + | |
1592 | + flush_domains = 0; | |
1593 | + if (ring->gpu_caches_dirty) | |
1594 | + flush_domains = I915_GEM_GPU_DOMAINS; | |
1595 | + | |
1596 | + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); | |
1597 | + if (ret) | |
1598 | + return ret; | |
1599 | + | |
1600 | + trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); | |
1601 | + | |
1602 | + ring->gpu_caches_dirty = false; | |
1603 | + return 0; | |
1604 | +} |
drivers/gpu/drm/i915/intel_ringbuffer.h
... | ... | @@ -195,6 +195,8 @@ |
195 | 195 | void intel_ring_advance(struct intel_ring_buffer *ring); |
196 | 196 | |
197 | 197 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
198 | +int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); | |
199 | +int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); | |
198 | 200 | |
199 | 201 | int intel_init_render_ring_buffer(struct drm_device *dev); |
200 | 202 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |