Commit 112522f6789581824903f6f72082b5b841a7f0f9

Authored by Chris Wilson
Committed by Daniel Vetter
1 parent 21a8e6a485

drm/i915: put context upon switching

In order to be notified of when the context and all of its associated
objects is idle (for if the context maps to a ppgtt) we need a callback
from the retire handler. We can arrange this by using the kref_get/put
of the context for request tracking and by inserting a request to
demarque the switch away from the old context.

[Ben: fixed minor error to patch compile, AND s/last_context/from/]
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Showing 2 changed files with 25 additions and 14 deletions Side-by-side Diff

drivers/gpu/drm/i915/i915_gem_context.c
... ... @@ -361,13 +361,13 @@
361 361 static int do_switch(struct i915_hw_context *to)
362 362 {
363 363 struct intel_ring_buffer *ring = to->ring;
364   - struct drm_i915_gem_object *from_obj = ring->last_context_obj;
  364 + struct i915_hw_context *from = ring->last_context;
365 365 u32 hw_flags = 0;
366 366 int ret;
367 367  
368   - BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
  368 + BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
369 369  
370   - if (from_obj == to->obj)
  370 + if (from == to)
371 371 return 0;
372 372  
373 373 ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
... ... @@ -390,7 +390,7 @@
390 390  
391 391 if (!to->is_initialized || is_default_context(to))
392 392 hw_flags |= MI_RESTORE_INHIBIT;
393   - else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
  393 + else if (WARN_ON_ONCE(from == to)) /* not yet expected */
394 394 hw_flags |= MI_FORCE_RESTORE;
395 395  
396 396 ret = mi_set_context(ring, to, hw_flags);
... ... @@ -405,9 +405,9 @@
405 405 * is a bit suboptimal because the retiring can occur simply after the
406 406 * MI_SET_CONTEXT instead of when the next seqno has completed.
407 407 */
408   - if (from_obj != NULL) {
409   - from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
410   - i915_gem_object_move_to_active(from_obj, ring);
  408 + if (from != NULL) {
  409 + from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
  410 + i915_gem_object_move_to_active(from->obj, ring);
411 411 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
412 412 * whole damn pipeline, we don't need to explicitly mark the
413 413 * object dirty. The only exception is that the context must be
414 414  
415 415  
... ... @@ -415,15 +415,26 @@
415 415 * able to defer doing this until we know the object would be
416 416 * swapped, but there is no way to do that yet.
417 417 */
418   - from_obj->dirty = 1;
419   - BUG_ON(from_obj->ring != ring);
420   - i915_gem_object_unpin(from_obj);
  418 + from->obj->dirty = 1;
  419 + BUG_ON(from->obj->ring != ring);
421 420  
422   - drm_gem_object_unreference(&from_obj->base);
  421 + ret = i915_add_request(ring, NULL, NULL);
  422 + if (ret) {
  423 + /* Too late, we've already scheduled a context switch.
  424 + * Try to undo the change so that the hw state is
  425 + * consistent with out tracking. In case of emergency,
  426 + * scream.
  427 + */
  428 + WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
  429 + return ret;
  430 + }
  431 +
  432 + i915_gem_object_unpin(from->obj);
  433 + i915_gem_context_unreference(from);
423 434 }
424 435  
425   - drm_gem_object_reference(&to->obj->base);
426   - ring->last_context_obj = to->obj;
  436 + i915_gem_context_reference(to);
  437 + ring->last_context = to;
427 438 to->is_initialized = true;
428 439  
429 440 return 0;
drivers/gpu/drm/i915/intel_ringbuffer.h
... ... @@ -135,7 +135,7 @@
135 135 */
136 136 bool itlb_before_ctx_switch;
137 137 struct i915_hw_context *default_context;
138   - struct drm_i915_gem_object *last_context_obj;
  138 + struct i915_hw_context *last_context;
139 139  
140 140 void *private;
141 141 };