Commit 2d534926205db9ffce4bbbde67cb9b2cee4b835c

Authored by Linus Torvalds

Merge tag 'irqdomain-for-linus' of git://git.secretlab.ca/git/linux-2.6

Pull irqdomain changes from Grant Likely:
 "Round of refactoring and enhancements to irq_domain infrastructure.
  This series starts the process of simplifying irqdomain.  The ultimate
  goal is to merge LEGACY, LINEAR and TREE mappings into a single
  system, but had to back off from that after some last minute bugs.
  Instead it mainly reorganizes the code and ensures that the reverse
  map gets populated when the irq is mapped instead of the first time it
  is looked up.

  Merging of the irq_domain types is deferred to v3.7

  In other news, this series adds helpers for creating static mappings
  on a linear or tree mapping."

* tag 'irqdomain-for-linus' of git://git.secretlab.ca/git/linux-2.6:
  irqdomain: Improve diagnostics when a domain mapping fails
  irqdomain: eliminate slow-path revmap lookups
  irqdomain: Fix irq_create_direct_mapping() to test irq_domain type.
  irqdomain: Eliminate dedicated radix lookup functions
  irqdomain: Support for static IRQ mapping and association.
  irqdomain: Always update revmap when setting up a virq
  irqdomain: Split disassociating code into separate function
  irq_domain: correct a minor wrong comment for linear revmap
  irq_domain: Standardise legacy/linear domain selection
  irqdomain: Make ops->map hook optional
  irqdomain: Remove unnecessary test for IRQ_DOMAIN_MAP_LEGACY
  irqdomain: Simple NUMA awareness.
  devicetree: add helper inline for retrieving a node's full name

Showing 7 changed files Side-by-side Diff

Documentation/IRQ-domain.txt
... ... @@ -93,6 +93,7 @@
93 93 Most drivers cannot use this mapping.
94 94  
95 95 ==== Legacy ====
  96 +irq_domain_add_simple()
96 97 irq_domain_add_legacy()
97 98 irq_domain_add_legacy_isa()
98 99  
... ... @@ -115,4 +116,8 @@
115 116 supported. For example, ISA controllers would use the legacy map for
116 117 mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ
117 118 numbers.
  119 +
  120 +Most users of legacy mappings should use irq_domain_add_simple() which
  121 +will use a legacy domain only if an IRQ range is supplied by the
  122 +system and will otherwise use a linear domain mapping.
arch/powerpc/sysdev/xics/icp-hv.c
... ... @@ -111,7 +111,7 @@
111 111 if (vec == XICS_IRQ_SPURIOUS)
112 112 return NO_IRQ;
113 113  
114   - irq = irq_radix_revmap_lookup(xics_host, vec);
  114 + irq = irq_find_mapping(xics_host, vec);
115 115 if (likely(irq != NO_IRQ)) {
116 116 xics_push_cppr(vec);
117 117 return irq;
arch/powerpc/sysdev/xics/icp-native.c
... ... @@ -119,7 +119,7 @@
119 119 if (vec == XICS_IRQ_SPURIOUS)
120 120 return NO_IRQ;
121 121  
122   - irq = irq_radix_revmap_lookup(xics_host, vec);
  122 + irq = irq_find_mapping(xics_host, vec);
123 123 if (likely(irq != NO_IRQ)) {
124 124 xics_push_cppr(vec);
125 125 return irq;
arch/powerpc/sysdev/xics/xics-common.c
... ... @@ -329,9 +329,6 @@
329 329  
330 330 pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
331 331  
332   - /* Insert the interrupt mapping into the radix tree for fast lookup */
333   - irq_radix_revmap_insert(xics_host, virq, hw);
334   -
335 332 /* They aren't all level sensitive but we just don't really know */
336 333 irq_set_status_flags(virq, IRQ_LEVEL);
337 334  
include/linux/irqdomain.h
... ... @@ -112,6 +112,11 @@
112 112 };
113 113  
114 114 #ifdef CONFIG_IRQ_DOMAIN
  115 +struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
  116 + unsigned int size,
  117 + unsigned int first_irq,
  118 + const struct irq_domain_ops *ops,
  119 + void *host_data);
115 120 struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
116 121 unsigned int size,
117 122 unsigned int first_irq,
118 123  
... ... @@ -144,16 +149,31 @@
144 149  
145 150 extern void irq_domain_remove(struct irq_domain *host);
146 151  
  152 +extern int irq_domain_associate_many(struct irq_domain *domain,
  153 + unsigned int irq_base,
  154 + irq_hw_number_t hwirq_base, int count);
  155 +static inline int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
  156 + irq_hw_number_t hwirq)
  157 +{
  158 + return irq_domain_associate_many(domain, irq, hwirq, 1);
  159 +}
  160 +
147 161 extern unsigned int irq_create_mapping(struct irq_domain *host,
148 162 irq_hw_number_t hwirq);
149 163 extern void irq_dispose_mapping(unsigned int virq);
150 164 extern unsigned int irq_find_mapping(struct irq_domain *host,
151 165 irq_hw_number_t hwirq);
152 166 extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
153   -extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq,
154   - irq_hw_number_t hwirq);
155   -extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host,
156   - irq_hw_number_t hwirq);
  167 +extern int irq_create_strict_mappings(struct irq_domain *domain,
  168 + unsigned int irq_base,
  169 + irq_hw_number_t hwirq_base, int count);
  170 +
  171 +static inline int irq_create_identity_mapping(struct irq_domain *host,
  172 + irq_hw_number_t hwirq)
  173 +{
  174 + return irq_create_strict_mappings(host, hwirq, hwirq, 1);
  175 +}
  176 +
157 177 extern unsigned int irq_linear_revmap(struct irq_domain *host,
158 178 irq_hw_number_t hwirq);
159 179  
... ... @@ -21,6 +21,7 @@
21 21 #include <linux/kref.h>
22 22 #include <linux/mod_devicetable.h>
23 23 #include <linux/spinlock.h>
  24 +#include <linux/topology.h>
24 25  
25 26 #include <asm/byteorder.h>
26 27 #include <asm/errno.h>
... ... @@ -158,11 +159,6 @@
158 159  
159 160 #define OF_BAD_ADDR ((u64)-1)
160 161  
161   -#ifndef of_node_to_nid
162   -static inline int of_node_to_nid(struct device_node *np) { return -1; }
163   -#define of_node_to_nid of_node_to_nid
164   -#endif
165   -
166 162 static inline const char* of_node_full_name(struct device_node *np)
167 163 {
168 164 return np ? np->full_name : "<no-node>";
... ... @@ -426,6 +422,15 @@
426 422 #define of_property_for_each_string(np, propname, prop, s) \
427 423 while (0)
428 424 #endif /* CONFIG_OF */
  425 +
  426 +#ifndef of_node_to_nid
  427 +static inline int of_node_to_nid(struct device_node *np)
  428 +{
  429 + return numa_node_id();
  430 +}
  431 +
  432 +#define of_node_to_nid of_node_to_nid
  433 +#endif
429 434  
430 435 /**
431 436 * of_property_read_bool - Findfrom a property
kernel/irq/irqdomain.c
... ... @@ -10,6 +10,7 @@
10 10 #include <linux/mutex.h>
11 11 #include <linux/of.h>
12 12 #include <linux/of_address.h>
  13 +#include <linux/topology.h>
13 14 #include <linux/seq_file.h>
14 15 #include <linux/slab.h>
15 16 #include <linux/smp.h>
... ... @@ -45,7 +46,8 @@
45 46 {
46 47 struct irq_domain *domain;
47 48  
48   - domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  49 + domain = kzalloc_node(sizeof(*domain), GFP_KERNEL,
  50 + of_node_to_nid(of_node));
49 51 if (WARN_ON(!domain))
50 52 return NULL;
51 53  
... ... @@ -138,6 +140,36 @@
138 140 }
139 141  
140 142 /**
  143 + * irq_domain_add_simple() - Allocate and register a simple irq_domain.
  144 + * @of_node: pointer to interrupt controller's device tree node.
  145 + * @size: total number of irqs in mapping
  146 + * @first_irq: first number of irq block assigned to the domain
  147 + * @ops: map/unmap domain callbacks
  148 + * @host_data: Controller private data pointer
  149 + *
  150 + * Allocates a legacy irq_domain if irq_base is positive or a linear
  151 + * domain otherwise.
  152 + *
  153 + * This is intended to implement the expected behaviour for most
  154 + * interrupt controllers which is that a linear mapping should
  155 + * normally be used unless the system requires a legacy mapping in
  156 + * order to support supplying interrupt numbers during non-DT
  157 + * registration of devices.
  158 + */
  159 +struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
  160 + unsigned int size,
  161 + unsigned int first_irq,
  162 + const struct irq_domain_ops *ops,
  163 + void *host_data)
  164 +{
  165 + if (first_irq > 0)
  166 + return irq_domain_add_legacy(of_node, size, first_irq, 0,
  167 + ops, host_data);
  168 + else
  169 + return irq_domain_add_linear(of_node, size, ops, host_data);
  170 +}
  171 +
  172 +/**
141 173 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
142 174 * @of_node: pointer to interrupt controller's device tree node.
143 175 * @size: total number of irqs in legacy mapping
... ... @@ -203,7 +235,8 @@
203 235 * one can then use irq_create_mapping() to
204 236 * explicitly change them
205 237 */
206   - ops->map(domain, irq, hwirq);
  238 + if (ops->map)
  239 + ops->map(domain, irq, hwirq);
207 240  
208 241 /* Clear norequest flags */
209 242 irq_clear_status_flags(irq, IRQ_NOREQUEST);
... ... @@ -215,7 +248,7 @@
215 248 EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
216 249  
217 250 /**
218   - * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
  251 + * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
219 252 * @of_node: pointer to interrupt controller's device tree node.
220 253 * @size: Number of interrupts in the domain.
221 254 * @ops: map/unmap domain callbacks
... ... @@ -229,7 +262,8 @@
229 262 struct irq_domain *domain;
230 263 unsigned int *revmap;
231 264  
232   - revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL);
  265 + revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL,
  266 + of_node_to_nid(of_node));
233 267 if (WARN_ON(!revmap))
234 268 return NULL;
235 269  
236 270  
237 271  
238 272  
239 273  
240 274  
241 275  
242 276  
243 277  
... ... @@ -330,24 +364,112 @@
330 364 }
331 365 EXPORT_SYMBOL_GPL(irq_set_default_host);
332 366  
333   -static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
334   - irq_hw_number_t hwirq)
  367 +static void irq_domain_disassociate_many(struct irq_domain *domain,
  368 + unsigned int irq_base, int count)
335 369 {
336   - struct irq_data *irq_data = irq_get_irq_data(virq);
  370 + /*
  371 + * disassociate in reverse order;
  372 + * not strictly necessary, but nice for unwinding
  373 + */
  374 + while (count--) {
  375 + int irq = irq_base + count;
  376 + struct irq_data *irq_data = irq_get_irq_data(irq);
  377 + irq_hw_number_t hwirq = irq_data->hwirq;
337 378  
338   - irq_data->hwirq = hwirq;
339   - irq_data->domain = domain;
340   - if (domain->ops->map(domain, virq, hwirq)) {
341   - pr_debug("irq-%i==>hwirq-0x%lx mapping failed\n", virq, hwirq);
  379 + if (WARN_ON(!irq_data || irq_data->domain != domain))
  380 + continue;
  381 +
  382 + irq_set_status_flags(irq, IRQ_NOREQUEST);
  383 +
  384 + /* remove chip and handler */
  385 + irq_set_chip_and_handler(irq, NULL, NULL);
  386 +
  387 + /* Make sure it's completed */
  388 + synchronize_irq(irq);
  389 +
  390 + /* Tell the PIC about it */
  391 + if (domain->ops->unmap)
  392 + domain->ops->unmap(domain, irq);
  393 + smp_mb();
  394 +
342 395 irq_data->domain = NULL;
343 396 irq_data->hwirq = 0;
344   - return -1;
  397 +
  398 + /* Clear reverse map */
  399 + switch(domain->revmap_type) {
  400 + case IRQ_DOMAIN_MAP_LINEAR:
  401 + if (hwirq < domain->revmap_data.linear.size)
  402 + domain->revmap_data.linear.revmap[hwirq] = 0;
  403 + break;
  404 + case IRQ_DOMAIN_MAP_TREE:
  405 + mutex_lock(&revmap_trees_mutex);
  406 + radix_tree_delete(&domain->revmap_data.tree, hwirq);
  407 + mutex_unlock(&revmap_trees_mutex);
  408 + break;
  409 + }
345 410 }
  411 +}
346 412  
347   - irq_clear_status_flags(virq, IRQ_NOREQUEST);
  413 +int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
  414 + irq_hw_number_t hwirq_base, int count)
  415 +{
  416 + unsigned int virq = irq_base;
  417 + irq_hw_number_t hwirq = hwirq_base;
  418 + int i, ret;
348 419  
  420 + pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
  421 + of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
  422 +
  423 + for (i = 0; i < count; i++) {
  424 + struct irq_data *irq_data = irq_get_irq_data(virq + i);
  425 +
  426 + if (WARN(!irq_data, "error: irq_desc not allocated; "
  427 + "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
  428 + return -EINVAL;
  429 + if (WARN(irq_data->domain, "error: irq_desc already associated; "
  430 + "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
  431 + return -EINVAL;
  432 + };
  433 +
  434 + for (i = 0; i < count; i++, virq++, hwirq++) {
  435 + struct irq_data *irq_data = irq_get_irq_data(virq);
  436 +
  437 + irq_data->hwirq = hwirq;
  438 + irq_data->domain = domain;
  439 + if (domain->ops->map) {
  440 + ret = domain->ops->map(domain, virq, hwirq);
  441 + if (ret != 0) {
  442 + pr_err("irq-%i==>hwirq-0x%lx mapping failed: %d\n",
  443 + virq, hwirq, ret);
  444 + WARN_ON(1);
  445 + irq_data->domain = NULL;
  446 + irq_data->hwirq = 0;
  447 + goto err_unmap;
  448 + }
  449 + }
  450 +
  451 + switch (domain->revmap_type) {
  452 + case IRQ_DOMAIN_MAP_LINEAR:
  453 + if (hwirq < domain->revmap_data.linear.size)
  454 + domain->revmap_data.linear.revmap[hwirq] = virq;
  455 + break;
  456 + case IRQ_DOMAIN_MAP_TREE:
  457 + mutex_lock(&revmap_trees_mutex);
  458 + radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
  459 + mutex_unlock(&revmap_trees_mutex);
  460 + break;
  461 + }
  462 +
  463 + irq_clear_status_flags(virq, IRQ_NOREQUEST);
  464 + }
  465 +
349 466 return 0;
  467 +
  468 + err_unmap:
  469 + irq_domain_disassociate_many(domain, irq_base, i);
  470 + return -EINVAL;
350 471 }
  472 +EXPORT_SYMBOL_GPL(irq_domain_associate_many);
351 473  
352 474 /**
353 475 * irq_create_direct_mapping() - Allocate an irq for direct mapping
354 476  
... ... @@ -364,10 +486,10 @@
364 486 if (domain == NULL)
365 487 domain = irq_default_domain;
366 488  
367   - BUG_ON(domain == NULL);
368   - WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP);
  489 + if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
  490 + return 0;
369 491  
370   - virq = irq_alloc_desc_from(1, 0);
  492 + virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
371 493 if (!virq) {
372 494 pr_debug("create_direct virq allocation failed\n");
373 495 return 0;
... ... @@ -380,7 +502,7 @@
380 502 }
381 503 pr_debug("create_direct obtained virq %d\n", virq);
382 504  
383   - if (irq_setup_virq(domain, virq, virq)) {
  505 + if (irq_domain_associate(domain, virq, virq)) {
384 506 irq_free_desc(virq);
385 507 return 0;
386 508 }
387 509  
388 510  
... ... @@ -433,17 +555,16 @@
433 555 hint = hwirq % nr_irqs;
434 556 if (hint == 0)
435 557 hint++;
436   - virq = irq_alloc_desc_from(hint, 0);
  558 + virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
437 559 if (virq <= 0)
438   - virq = irq_alloc_desc_from(1, 0);
  560 + virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
439 561 if (virq <= 0) {
440 562 pr_debug("-> virq allocation failed\n");
441 563 return 0;
442 564 }
443 565  
444   - if (irq_setup_virq(domain, virq, hwirq)) {
445   - if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY)
446   - irq_free_desc(virq);
  566 + if (irq_domain_associate(domain, virq, hwirq)) {
  567 + irq_free_desc(virq);
447 568 return 0;
448 569 }
449 570  
... ... @@ -454,6 +575,44 @@
454 575 }
455 576 EXPORT_SYMBOL_GPL(irq_create_mapping);
456 577  
  578 +/**
  579 + * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
  580 + * @domain: domain owning the interrupt range
  581 + * @irq_base: beginning of linux IRQ range
  582 + * @hwirq_base: beginning of hardware IRQ range
  583 + * @count: Number of interrupts to map
  584 + *
  585 + * This routine is used for allocating and mapping a range of hardware
  586 + * irqs to linux irqs where the linux irq numbers are at pre-defined
  587 + * locations. For use by controllers that already have static mappings
  588 + * to insert in to the domain.
  589 + *
  590 + * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
  591 + * domain insertion.
  592 + *
  593 + * 0 is returned upon success, while any failure to establish a static
  594 + * mapping is treated as an error.
  595 + */
  596 +int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
  597 + irq_hw_number_t hwirq_base, int count)
  598 +{
  599 + int ret;
  600 +
  601 + ret = irq_alloc_descs(irq_base, irq_base, count,
  602 + of_node_to_nid(domain->of_node));
  603 + if (unlikely(ret < 0))
  604 + return ret;
  605 +
  606 + ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
  607 + if (unlikely(ret < 0)) {
  608 + irq_free_descs(irq_base, count);
  609 + return ret;
  610 + }
  611 +
  612 + return 0;
  613 +}
  614 +EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
  615 +
457 616 unsigned int irq_create_of_mapping(struct device_node *controller,
458 617 const u32 *intspec, unsigned int intsize)
459 618 {
... ... @@ -511,7 +670,6 @@
511 670 {
512 671 struct irq_data *irq_data = irq_get_irq_data(virq);
513 672 struct irq_domain *domain;
514   - irq_hw_number_t hwirq;
515 673  
516 674 if (!virq || !irq_data)
517 675 return;
... ... @@ -524,33 +682,7 @@
524 682 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
525 683 return;
526 684  
527   - irq_set_status_flags(virq, IRQ_NOREQUEST);
528   -
529   - /* remove chip and handler */
530   - irq_set_chip_and_handler(virq, NULL, NULL);
531   -
532   - /* Make sure it's completed */
533   - synchronize_irq(virq);
534   -
535   - /* Tell the PIC about it */
536   - if (domain->ops->unmap)
537   - domain->ops->unmap(domain, virq);
538   - smp_mb();
539   -
540   - /* Clear reverse map */
541   - hwirq = irq_data->hwirq;
542   - switch(domain->revmap_type) {
543   - case IRQ_DOMAIN_MAP_LINEAR:
544   - if (hwirq < domain->revmap_data.linear.size)
545   - domain->revmap_data.linear.revmap[hwirq] = 0;
546   - break;
547   - case IRQ_DOMAIN_MAP_TREE:
548   - mutex_lock(&revmap_trees_mutex);
549   - radix_tree_delete(&domain->revmap_data.tree, hwirq);
550   - mutex_unlock(&revmap_trees_mutex);
551   - break;
552   - }
553   -
  685 + irq_domain_disassociate_many(domain, virq, 1);
554 686 irq_free_desc(virq);
555 687 }
556 688 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
557 689  
... ... @@ -559,16 +691,11 @@
559 691 * irq_find_mapping() - Find a linux irq from an hw irq number.
560 692 * @domain: domain owning this hardware interrupt
561 693 * @hwirq: hardware irq number in that domain space
562   - *
563   - * This is a slow path, for use by generic code. It's expected that an
564   - * irq controller implementation directly calls the appropriate low level
565   - * mapping function.
566 694 */
567 695 unsigned int irq_find_mapping(struct irq_domain *domain,
568 696 irq_hw_number_t hwirq)
569 697 {
570   - unsigned int i;
571   - unsigned int hint = hwirq % nr_irqs;
  698 + struct irq_data *data;
572 699  
573 700 /* Look for default domain if nececssary */
574 701 if (domain == NULL)
575 702  
576 703  
577 704  
578 705  
579 706  
580 707  
581 708  
... ... @@ -576,115 +703,47 @@
576 703 if (domain == NULL)
577 704 return 0;
578 705  
579   - /* legacy -> bail early */
580   - if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
  706 + switch (domain->revmap_type) {
  707 + case IRQ_DOMAIN_MAP_LEGACY:
581 708 return irq_domain_legacy_revmap(domain, hwirq);
582   -
583   - /* Slow path does a linear search of the map */
584   - if (hint == 0)
585   - hint = 1;
586   - i = hint;
587   - do {
588   - struct irq_data *data = irq_get_irq_data(i);
  709 + case IRQ_DOMAIN_MAP_LINEAR:
  710 + return irq_linear_revmap(domain, hwirq);
  711 + case IRQ_DOMAIN_MAP_TREE:
  712 + rcu_read_lock();
  713 + data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
  714 + rcu_read_unlock();
  715 + if (data)
  716 + return data->irq;
  717 + break;
  718 + case IRQ_DOMAIN_MAP_NOMAP:
  719 + data = irq_get_irq_data(hwirq);
589 720 if (data && (data->domain == domain) && (data->hwirq == hwirq))
590   - return i;
591   - i++;
592   - if (i >= nr_irqs)
593   - i = 1;
594   - } while(i != hint);
  721 + return hwirq;
  722 + break;
  723 + }
  724 +
595 725 return 0;
596 726 }
597 727 EXPORT_SYMBOL_GPL(irq_find_mapping);
598 728  
599 729 /**
600   - * irq_radix_revmap_lookup() - Find a linux irq from a hw irq number.
601   - * @domain: domain owning this hardware interrupt
602   - * @hwirq: hardware irq number in that domain space
603   - *
604   - * This is a fast path, for use by irq controller code that uses radix tree
605   - * revmaps
606   - */
607   -unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
608   - irq_hw_number_t hwirq)
609   -{
610   - struct irq_data *irq_data;
611   -
612   - if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
613   - return irq_find_mapping(domain, hwirq);
614   -
615   - /*
616   - * Freeing an irq can delete nodes along the path to
617   - * do the lookup via call_rcu.
618   - */
619   - rcu_read_lock();
620   - irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
621   - rcu_read_unlock();
622   -
623   - /*
624   - * If found in radix tree, then fine.
625   - * Else fallback to linear lookup - this should not happen in practice
626   - * as it means that we failed to insert the node in the radix tree.
627   - */
628   - return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
629   -}
630   -EXPORT_SYMBOL_GPL(irq_radix_revmap_lookup);
631   -
632   -/**
633   - * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
634   - * @domain: domain owning this hardware interrupt
635   - * @virq: linux irq number
636   - * @hwirq: hardware irq number in that domain space
637   - *
638   - * This is for use by irq controllers that use a radix tree reverse
639   - * mapping for fast lookup.
640   - */
641   -void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
642   - irq_hw_number_t hwirq)
643   -{
644   - struct irq_data *irq_data = irq_get_irq_data(virq);
645   -
646   - if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
647   - return;
648   -
649   - if (virq) {
650   - mutex_lock(&revmap_trees_mutex);
651   - radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
652   - mutex_unlock(&revmap_trees_mutex);
653   - }
654   -}
655   -EXPORT_SYMBOL_GPL(irq_radix_revmap_insert);
656   -
657   -/**
658 730 * irq_linear_revmap() - Find a linux irq from a hw irq number.
659 731 * @domain: domain owning this hardware interrupt
660 732 * @hwirq: hardware irq number in that domain space
661 733 *
662   - * This is a fast path, for use by irq controller code that uses linear
663   - * revmaps. It does fallback to the slow path if the revmap doesn't exist
664   - * yet and will create the revmap entry with appropriate locking
  734 + * This is a fast path that can be called directly by irq controller code to
  735 + * save a handful of instructions.
665 736 */
666 737 unsigned int irq_linear_revmap(struct irq_domain *domain,
667 738 irq_hw_number_t hwirq)
668 739 {
669   - unsigned int *revmap;
  740 + BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
670 741  
671   - if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR))
672   - return irq_find_mapping(domain, hwirq);
  742 + /* Check revmap bounds; complain if exceeded */
  743 + if (WARN_ON(hwirq >= domain->revmap_data.linear.size))
  744 + return 0;
673 745  
674   - /* Check revmap bounds */
675   - if (unlikely(hwirq >= domain->revmap_data.linear.size))
676   - return irq_find_mapping(domain, hwirq);
677   -
678   - /* Check if revmap was allocated */
679   - revmap = domain->revmap_data.linear.revmap;
680   - if (unlikely(revmap == NULL))
681   - return irq_find_mapping(domain, hwirq);
682   -
683   - /* Fill up revmap with slow path if no mapping found */
684   - if (unlikely(!revmap[hwirq]))
685   - revmap[hwirq] = irq_find_mapping(domain, hwirq);
686   -
687   - return revmap[hwirq];
  746 + return domain->revmap_data.linear.revmap[hwirq];
688 747 }
689 748 EXPORT_SYMBOL_GPL(irq_linear_revmap);
690 749  
... ... @@ -761,12 +820,6 @@
761 820 __initcall(irq_debugfs_init);
762 821 #endif /* CONFIG_IRQ_DOMAIN_DEBUG */
763 822  
764   -static int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
765   - irq_hw_number_t hwirq)
766   -{
767   - return 0;
768   -}
769   -
770 823 /**
771 824 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
772 825 *
... ... @@ -829,7 +882,6 @@
829 882 EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
830 883  
831 884 const struct irq_domain_ops irq_domain_simple_ops = {
832   - .map = irq_domain_simple_map,
833 885 .xlate = irq_domain_xlate_onetwocell,
834 886 };
835 887 EXPORT_SYMBOL_GPL(irq_domain_simple_ops);