Commit b5eb78f76ddfa7caf4340cf6893b032f45d8114a

Authored by Yinghai Lu
Committed by H. Peter Anvin
1 parent 99558f0bbe

sparseirq: Use radix_tree instead of ptrs array

Use radix_tree irq_desc_tree instead of irq_desc_ptrs.

-v2: according to Eric and cyrill to use radix_tree_lookup_slot and
     radix_tree_replace_slot

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <1265793639-15071-32-git-send-email-yinghai@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>

Showing 1 changed file with 25 additions and 24 deletions Side-by-side Diff

... ... @@ -19,6 +19,7 @@
19 19 #include <linux/kernel_stat.h>
20 20 #include <linux/rculist.h>
21 21 #include <linux/hash.h>
  22 +#include <linux/radix-tree.h>
22 23 #include <trace/events/irq.h>
23 24  
24 25 #include "internals.h"
25 26  
... ... @@ -127,8 +128,27 @@
127 128 */
128 129 DEFINE_RAW_SPINLOCK(sparse_irq_lock);
129 130  
130   -static struct irq_desc **irq_desc_ptrs __read_mostly;
  131 +static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
131 132  
  133 +static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
  134 +{
  135 + radix_tree_insert(&irq_desc_tree, irq, desc);
  136 +}
  137 +
  138 +struct irq_desc *irq_to_desc(unsigned int irq)
  139 +{
  140 + return radix_tree_lookup(&irq_desc_tree, irq);
  141 +}
  142 +
  143 +void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
  144 +{
  145 + void **ptr;
  146 +
  147 + ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
  148 + if (ptr)
  149 + radix_tree_replace_slot(ptr, desc);
  150 +}
  151 +
132 152 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
133 153 [0 ... NR_IRQS_LEGACY-1] = {
134 154 .irq = -1,
... ... @@ -159,9 +179,6 @@
159 179 legacy_count = ARRAY_SIZE(irq_desc_legacy);
160 180 node = first_online_node;
161 181  
162   - /* allocate irq_desc_ptrs array based on nr_irqs */
163   - irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
164   -
165 182 /* allocate based on nr_cpu_ids */
166 183 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
167 184 sizeof(int), GFP_NOWAIT, node);
168 185  
169 186  
... ... @@ -175,28 +192,12 @@
175 192 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
176 193 alloc_desc_masks(&desc[i], node, true);
177 194 init_desc_masks(&desc[i]);
178   - irq_desc_ptrs[i] = desc + i;
  195 + set_irq_desc(i, &desc[i]);
179 196 }
180 197  
181   - for (i = legacy_count; i < nr_irqs; i++)
182   - irq_desc_ptrs[i] = NULL;
183   -
184 198 return arch_early_irq_init();
185 199 }
186 200  
187   -struct irq_desc *irq_to_desc(unsigned int irq)
188   -{
189   - if (irq_desc_ptrs && irq < nr_irqs)
190   - return irq_desc_ptrs[irq];
191   -
192   - return NULL;
193   -}
194   -
195   -void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
196   -{
197   - irq_desc_ptrs[irq] = desc;
198   -}
199   -
200 201 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
201 202 {
202 203 struct irq_desc *desc;
203 204  
... ... @@ -208,14 +209,14 @@
208 209 return NULL;
209 210 }
210 211  
211   - desc = irq_desc_ptrs[irq];
  212 + desc = irq_to_desc(irq);
212 213 if (desc)
213 214 return desc;
214 215  
215 216 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
216 217  
217 218 /* We have to check it to avoid races with another CPU */
218   - desc = irq_desc_ptrs[irq];
  219 + desc = irq_to_desc(irq);
219 220 if (desc)
220 221 goto out_unlock;
221 222  
... ... @@ -228,7 +229,7 @@
228 229 }
229 230 init_one_irq_desc(irq, desc, node);
230 231  
231   - irq_desc_ptrs[irq] = desc;
  232 + set_irq_desc(irq, desc);
232 233  
233 234 out_unlock:
234 235 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);