Commit 59cdb63d529c81fc8ac0620ad50f29d5fb4411c9

Authored by Daniel Vetter
1 parent 2adbee62e0

drm/i915: kill dev_priv->rps.lock

Now that the rps interrupt locking isn't clearly separated (at elast
conceptually) from all the other interrupt locking having a different
lock stopped making sense: It protects much more than just the rps
workqueue it started out with. But with the addition of VECS the
separation started to blurr and resulted in some more complex locking
for the ring interrupt refcount.

With this we can (again) unifiy the ringbuffer irq refcounts without
causing a massive confusion, but that's for the next patch.

v2: Explain better why the rps.lock once made sense and why no longer,
requested by Ben.

Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Showing 6 changed files with 23 additions and 24 deletions Side-by-side Diff

drivers/gpu/drm/i915/i915_dma.c
... ... @@ -1607,7 +1607,6 @@
1607 1607  
1608 1608 spin_lock_init(&dev_priv->irq_lock);
1609 1609 spin_lock_init(&dev_priv->gpu_error.lock);
1610   - spin_lock_init(&dev_priv->rps.lock);
1611 1610 spin_lock_init(&dev_priv->backlight.lock);
1612 1611 mutex_init(&dev_priv->dpio_lock);
1613 1612  
drivers/gpu/drm/i915/i915_drv.h
... ... @@ -743,12 +743,12 @@
743 743 };
744 744  
745 745 struct intel_gen6_power_mgmt {
  746 + /* work and pm_iir are protected by dev_priv->irq_lock */
746 747 struct work_struct work;
747   - struct delayed_work vlv_work;
748 748 u32 pm_iir;
749   - /* lock - irqsave spinlock that protectects the work_struct and
750   - * pm_iir. */
751   - spinlock_t lock;
  749 +
  750 + /* On vlv we need to manually drop to Vmin with a delayed work. */
  751 + struct delayed_work vlv_work;
752 752  
753 753 /* The below variables an all the rps hw state are protected by
754 754 * dev->struct mutext. */
drivers/gpu/drm/i915/i915_irq.c
... ... @@ -719,13 +719,13 @@
719 719 u32 pm_iir, pm_imr;
720 720 u8 new_delay;
721 721  
722   - spin_lock_irq(&dev_priv->rps.lock);
  722 + spin_lock_irq(&dev_priv->irq_lock);
723 723 pm_iir = dev_priv->rps.pm_iir;
724 724 dev_priv->rps.pm_iir = 0;
725 725 pm_imr = I915_READ(GEN6_PMIMR);
726 726 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
727 727 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
728   - spin_unlock_irq(&dev_priv->rps.lock);
  728 + spin_unlock_irq(&dev_priv->irq_lock);
729 729  
730 730 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
731 731 return;
732 732  
... ... @@ -887,11 +887,11 @@
887 887 * The mask bit in IMR is cleared by dev_priv->rps.work.
888 888 */
889 889  
890   - spin_lock(&dev_priv->rps.lock);
  890 + spin_lock(&dev_priv->irq_lock);
891 891 dev_priv->rps.pm_iir |= pm_iir;
892 892 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
893 893 POSTING_READ(GEN6_PMIMR);
894   - spin_unlock(&dev_priv->rps.lock);
  894 + spin_unlock(&dev_priv->irq_lock);
895 895  
896 896 queue_work(dev_priv->wq, &dev_priv->rps.work);
897 897 }
898 898  
... ... @@ -964,12 +964,12 @@
964 964 u32 pm_iir)
965 965 {
966 966 if (pm_iir & GEN6_PM_RPS_EVENTS) {
967   - spin_lock(&dev_priv->rps.lock);
  967 + spin_lock(&dev_priv->irq_lock);
968 968 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
969 969 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
970 970 /* never want to mask useful interrupts. (also posting read) */
971 971 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
972   - spin_unlock(&dev_priv->rps.lock);
  972 + spin_unlock(&dev_priv->irq_lock);
973 973  
974 974 queue_work(dev_priv->wq, &dev_priv->rps.work);
975 975 }
drivers/gpu/drm/i915/intel_pm.c
... ... @@ -3135,9 +3135,9 @@
3135 3135 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3136 3136 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3137 3137  
3138   - spin_lock_irq(&dev_priv->rps.lock);
  3138 + spin_lock_irq(&dev_priv->irq_lock);
3139 3139 dev_priv->rps.pm_iir = 0;
3140   - spin_unlock_irq(&dev_priv->rps.lock);
  3140 + spin_unlock_irq(&dev_priv->irq_lock);
3141 3141  
3142 3142 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3143 3143 }
3144 3144  
... ... @@ -3154,9 +3154,9 @@
3154 3154 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3155 3155 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3156 3156  
3157   - spin_lock_irq(&dev_priv->rps.lock);
  3157 + spin_lock_irq(&dev_priv->irq_lock);
3158 3158 dev_priv->rps.pm_iir = 0;
3159   - spin_unlock_irq(&dev_priv->rps.lock);
  3159 + spin_unlock_irq(&dev_priv->irq_lock);
3160 3160  
3161 3161 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
3162 3162  
3163 3163  
... ... @@ -3321,13 +3321,13 @@
3321 3321  
3322 3322 /* requires MSI enabled */
3323 3323 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
3324   - spin_lock_irq(&dev_priv->rps.lock);
  3324 + spin_lock_irq(&dev_priv->irq_lock);
3325 3325 /* FIXME: Our interrupt enabling sequence is bonghits.
3326 3326 * dev_priv->rps.pm_iir really should be 0 here. */
3327 3327 dev_priv->rps.pm_iir = 0;
3328 3328 I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
3329 3329 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3330   - spin_unlock_irq(&dev_priv->rps.lock);
  3330 + spin_unlock_irq(&dev_priv->irq_lock);
3331 3331 /* unmask all PM interrupts */
3332 3332 I915_WRITE(GEN6_PMINTRMSK, 0);
3333 3333  
3334 3334  
... ... @@ -3601,10 +3601,10 @@
3601 3601  
3602 3602 /* requires MSI enabled */
3603 3603 I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
3604   - spin_lock_irq(&dev_priv->rps.lock);
  3604 + spin_lock_irq(&dev_priv->irq_lock);
3605 3605 WARN_ON(dev_priv->rps.pm_iir != 0);
3606 3606 I915_WRITE(GEN6_PMIMR, 0);
3607   - spin_unlock_irq(&dev_priv->rps.lock);
  3607 + spin_unlock_irq(&dev_priv->irq_lock);
3608 3608 /* enable all PM interrupts */
3609 3609 I915_WRITE(GEN6_PMINTRMSK, 0);
3610 3610  
drivers/gpu/drm/i915/intel_ringbuffer.c
... ... @@ -1055,14 +1055,14 @@
1055 1055 if (!dev->irq_enabled)
1056 1056 return false;
1057 1057  
1058   - spin_lock_irqsave(&dev_priv->rps.lock, flags);
  1058 + spin_lock_irqsave(&dev_priv->irq_lock, flags);
1059 1059 if (ring->irq_refcount.pm++ == 0) {
1060 1060 u32 pm_imr = I915_READ(GEN6_PMIMR);
1061 1061 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1062 1062 I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
1063 1063 POSTING_READ(GEN6_PMIMR);
1064 1064 }
1065   - spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
  1065 + spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1066 1066  
1067 1067 return true;
1068 1068 }
1069 1069  
... ... @@ -1077,14 +1077,14 @@
1077 1077 if (!dev->irq_enabled)
1078 1078 return;
1079 1079  
1080   - spin_lock_irqsave(&dev_priv->rps.lock, flags);
  1080 + spin_lock_irqsave(&dev_priv->irq_lock, flags);
1081 1081 if (--ring->irq_refcount.pm == 0) {
1082 1082 u32 pm_imr = I915_READ(GEN6_PMIMR);
1083 1083 I915_WRITE_IMR(ring, ~0);
1084 1084 I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
1085 1085 POSTING_READ(GEN6_PMIMR);
1086 1086 }
1087   - spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
  1087 + spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1088 1088 }
1089 1089  
1090 1090 static int
drivers/gpu/drm/i915/intel_ringbuffer.h
... ... @@ -80,7 +80,7 @@
80 80  
81 81 struct {
82 82 u32 gt; /* protected by dev_priv->irq_lock */
83   - u32 pm; /* protected by dev_priv->rps.lock (sucks) */
  83 + u32 pm; /* protected by dev_priv->irq_lock */
84 84 } irq_refcount;
85 85 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
86 86 u32 trace_irq_seqno;