Commit a4883ef6af5e513a1e8c2ab9aab721604aa3a4f5

Authored by Linus Torvalds

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core irq changes from Ingo Molnar:
 "The main changes:

  - generic-irqchip driver additions, cleanups and fixes

  - 3 new irqchip drivers: ARMv7-M NVIC, TB10x and Marvell Orion SoCs

  - irq_get_trigger_type() simplification and cross-arch cleanup

  - various cleanups, simplifications

  - documentation updates"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (26 commits)
  softirq: Use _RET_IP_
  genirq: Add the generic chip to the genirq docbook
  genirq: generic-chip: Export some irq_gc_ functions
  genirq: Fix can_request_irq() for IRQs without an action
  irqchip: exynos-combiner: Staticize combiner_init
  irqchip: Add support for ARMv7-M NVIC
  irqchip: Add TB10x interrupt controller driver
  irqdomain: Use irq_get_trigger_type() to get IRQ flags
  MIPS: octeon: Use irq_get_trigger_type() to get IRQ flags
  arm: orion: Use irq_get_trigger_type() to get IRQ flags
  mfd: stmpe: use irq_get_trigger_type() to get IRQ flags
  mfd: twl4030-irq: Use irq_get_trigger_type() to get IRQ flags
  gpio: mvebu: Use irq_get_trigger_type() to get IRQ flags
  genirq: Add irq_get_trigger_type() to get IRQ flags
  genirq: Irqchip: document gcflags arg of irq_alloc_domain_generic_chips
  genirq: Set irq thread to RT priority on creation
  irqchip: Add support for Marvell Orion SoCs
  genirq: Add kerneldoc for irq_disable.
  genirq: irqchip: Add mask to block out invalid irqs
  genirq: Generic chip: Add linear irq domain support
  ...

Showing 21 changed files Side-by-side Diff

Documentation/DocBook/genericirq.tmpl
... ... @@ -464,6 +464,19 @@
464 464 protected via desc->lock, by the generic layer.
465 465 </para>
466 466 </chapter>
  467 +
  468 + <chapter id="genericchip">
  469 + <title>Generic interrupt chip</title>
  470 + <para>
  471 + To avoid copies of identical implementations of irq chips the
  472 + core provides a configurable generic interrupt chip
  473 + implementation. Developers should check carefuly whether the
  474 + generic chip fits their needs before implementing the same
  475 + functionality slightly different themself.
  476 + </para>
  477 +!Ekernel/irq/generic-chip.c
  478 + </chapter>
  479 +
467 480 <chapter id="structs">
468 481 <title>Structures</title>
469 482 <para>
Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt
  1 +TB10x Top Level Interrupt Controller
  2 +====================================
  3 +
  4 +The Abilis TB10x SOC contains a custom interrupt controller. It performs
  5 +one-to-one mapping of external interrupt sources to CPU interrupts and
  6 +provides support for reconfigurable trigger modes.
  7 +
  8 +Required properties
  9 +-------------------
  10 +
  11 +- compatible: Should be "abilis,tb10x-ictl"
  12 +- reg: specifies physical base address and size of register range.
  13 +- interrupt-congroller: Identifies the node as an interrupt controller.
  14 +- #interrupt cells: Specifies the number of cells used to encode an interrupt
  15 + source connected to this controller. The value shall be 2.
  16 +- interrupt-parent: Specifies the parent interrupt controller.
  17 +- interrupts: Specifies the list of interrupt lines which are handled by
  18 + the interrupt controller in the parent controller's notation. Interrupts
  19 + are mapped one-to-one to parent interrupts.
  20 +
  21 +Example
  22 +-------
  23 +
  24 +intc: interrupt-controller { /* Parent interrupt controller */
  25 + interrupt-controller;
  26 + #interrupt-cells = <1>; /* For example below */
  27 + /* ... */
  28 +};
  29 +
  30 +tb10x_ictl: pic@2000 { /* TB10x interrupt controller */
  31 + compatible = "abilis,tb10x-ictl";
  32 + reg = <0x2000 0x20>;
  33 + interrupt-controller;
  34 + #interrupt-cells = <2>;
  35 + interrupt-parent = <&intc>;
  36 + interrupts = <5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
  37 + 20 21 22 23 24 25 26 27 28 29 30 31>;
  38 +};
Documentation/devicetree/bindings/interrupt-controller/marvell,orion-intc.txt
  1 +Marvell Orion SoC interrupt controllers
  2 +
  3 +* Main interrupt controller
  4 +
  5 +Required properties:
  6 +- compatible: shall be "marvell,orion-intc"
  7 +- reg: base address(es) of interrupt registers starting with CAUSE register
  8 +- interrupt-controller: identifies the node as an interrupt controller
  9 +- #interrupt-cells: number of cells to encode an interrupt source, shall be 1
  10 +
  11 +The interrupt sources map to the corresponding bits in the interrupt
  12 +registers, i.e.
  13 +- 0 maps to bit 0 of first base address,
  14 +- 1 maps to bit 1 of first base address,
  15 +- 32 maps to bit 0 of second base address, and so on.
  16 +
  17 +Example:
  18 + intc: interrupt-controller {
  19 + compatible = "marvell,orion-intc";
  20 + interrupt-controller;
  21 + #interrupt-cells = <1>;
  22 + /* Dove has 64 first level interrupts */
  23 + reg = <0x20200 0x10>, <0x20210 0x10>;
  24 + };
  25 +
  26 +* Bridge interrupt controller
  27 +
  28 +Required properties:
  29 +- compatible: shall be "marvell,orion-bridge-intc"
  30 +- reg: base address of bridge interrupt registers starting with CAUSE register
  31 +- interrupts: bridge interrupt of the main interrupt controller
  32 +- interrupt-controller: identifies the node as an interrupt controller
  33 +- #interrupt-cells: number of cells to encode an interrupt source, shall be 1
  34 +
  35 +Optional properties:
  36 +- marvell,#interrupts: number of interrupts provided by bridge interrupt
  37 + controller, defaults to 32 if not set
  38 +
  39 +Example:
  40 + bridge_intc: interrupt-controller {
  41 + compatible = "marvell,orion-bridge-intc";
  42 + interrupt-controller;
  43 + #interrupt-cells = <1>;
  44 + reg = <0x20110 0x8>;
  45 + interrupts = <0>;
  46 + /* Dove bridge provides 5 interrupts */
  47 + marvell,#interrupts = <5>;
  48 + };
arch/arm/plat-orion/gpio.c
... ... @@ -426,7 +426,7 @@
426 426 if (!(cause & (1 << i)))
427 427 continue;
428 428  
429   - type = irqd_get_trigger_type(irq_get_irq_data(irq));
  429 + type = irq_get_trigger_type(irq);
430 430 if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
431 431 /* Swap polarity (race with GPIO line) */
432 432 u32 polarity;
arch/mips/cavium-octeon/octeon-irq.c
... ... @@ -607,7 +607,7 @@
607 607  
608 608 static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc)
609 609 {
610   - if (irqd_get_trigger_type(irq_desc_get_irq_data(desc)) & IRQ_TYPE_EDGE_BOTH)
  610 + if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH)
611 611 handle_edge_irq(irq, desc);
612 612 else
613 613 handle_level_irq(irq, desc);
drivers/gpio/gpio-mvebu.c
... ... @@ -457,7 +457,7 @@
457 457 if (!(cause & (1 << i)))
458 458 continue;
459 459  
460   - type = irqd_get_trigger_type(irq_get_irq_data(irq));
  460 + type = irq_get_trigger_type(irq);
461 461 if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
462 462 /* Swap polarity (race with GPIO line) */
463 463 u32 polarity;
drivers/irqchip/Kconfig
... ... @@ -10,6 +10,11 @@
10 10 config GIC_NON_BANKED
11 11 bool
12 12  
  13 +config ARM_NVIC
  14 + bool
  15 + select IRQ_DOMAIN
  16 + select GENERIC_IRQ_CHIP
  17 +
13 18 config ARM_VIC
14 19 bool
15 20 select IRQ_DOMAIN
... ... @@ -25,6 +30,11 @@
25 30 The maximum number of VICs available in the system, for
26 31 power management.
27 32  
  33 +config ORION_IRQCHIP
  34 + bool
  35 + select IRQ_DOMAIN
  36 + select MULTI_IRQ_HANDLER
  37 +
28 38 config RENESAS_INTC_IRQPIN
29 39 bool
30 40 select IRQ_DOMAIN
... ... @@ -32,6 +42,11 @@
32 42 config RENESAS_IRQC
33 43 bool
34 44 select IRQ_DOMAIN
  45 +
  46 +config TB10X_IRQC
  47 + bool
  48 + select IRQ_DOMAIN
  49 + select GENERIC_IRQ_CHIP
35 50  
36 51 config VERSATILE_FPGA_IRQ
37 52 bool
drivers/irqchip/Makefile
... ... @@ -7,13 +7,16 @@
7 7 obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
8 8 obj-$(CONFIG_METAG) += irq-metag-ext.o
9 9 obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
  10 +obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
10 11 obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
11 12 obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
12 13 obj-$(CONFIG_ARM_GIC) += irq-gic.o
  14 +obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
13 15 obj-$(CONFIG_ARM_VIC) += irq-vic.o
14 16 obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
15 17 obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
16 18 obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
17 19 obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
18 20 obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
  21 +obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
drivers/irqchip/exynos-combiner.c
... ... @@ -204,10 +204,10 @@
204 204 return 0;
205 205 }
206 206  
207   -void __init combiner_init(void __iomem *combiner_base,
208   - struct device_node *np,
209   - unsigned int max_nr,
210   - int irq_base)
  207 +static void __init combiner_init(void __iomem *combiner_base,
  208 + struct device_node *np,
  209 + unsigned int max_nr,
  210 + int irq_base)
211 211 {
212 212 int i, irq;
213 213 unsigned int nr_irq;
drivers/irqchip/irq-nvic.c
  1 +/*
  2 + * drivers/irq/irq-nvic.c
  3 + *
  4 + * Copyright (C) 2008 ARM Limited, All Rights Reserved.
  5 + * Copyright (C) 2013 Pengutronix
  6 + *
  7 + * This program is free software; you can redistribute it and/or modify
  8 + * it under the terms of the GNU General Public License version 2 as
  9 + * published by the Free Software Foundation.
  10 + *
  11 + * Support for the Nested Vectored Interrupt Controller found on the
  12 + * ARMv7-M CPUs (Cortex-M3/M4)
  13 + */
  14 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15 +
  16 +#include <linux/init.h>
  17 +#include <linux/kernel.h>
  18 +#include <linux/slab.h>
  19 +#include <linux/err.h>
  20 +#include <linux/io.h>
  21 +#include <linux/of.h>
  22 +#include <linux/of_address.h>
  23 +#include <linux/irq.h>
  24 +#include <linux/irqdomain.h>
  25 +
  26 +#include <asm/v7m.h>
  27 +#include <asm/exception.h>
  28 +
  29 +#include "irqchip.h"
  30 +
  31 +#define NVIC_ISER 0x000
  32 +#define NVIC_ICER 0x080
  33 +#define NVIC_IPR 0x300
  34 +
  35 +#define NVIC_MAX_BANKS 16
  36 +/*
  37 + * Each bank handles 32 irqs. Only the 16th (= last) bank handles only
  38 + * 16 irqs.
  39 + */
  40 +#define NVIC_MAX_IRQ ((NVIC_MAX_BANKS - 1) * 32 + 16)
  41 +
  42 +static struct irq_domain *nvic_irq_domain;
  43 +
  44 +asmlinkage void __exception_irq_entry
  45 +nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
  46 +{
  47 + unsigned int irq = irq_linear_revmap(nvic_irq_domain, hwirq);
  48 +
  49 + handle_IRQ(irq, regs);
  50 +}
  51 +
  52 +static void nvic_eoi(struct irq_data *d)
  53 +{
  54 + /*
  55 + * This is a no-op as end of interrupt is signaled by the exception
  56 + * return sequence.
  57 + */
  58 +}
  59 +
  60 +static int __init nvic_of_init(struct device_node *node,
  61 + struct device_node *parent)
  62 +{
  63 + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
  64 + unsigned int irqs, i, ret, numbanks;
  65 + void __iomem *nvic_base;
  66 +
  67 + numbanks = (readl_relaxed(V7M_SCS_ICTR) &
  68 + V7M_SCS_ICTR_INTLINESNUM_MASK) + 1;
  69 +
  70 + nvic_base = of_iomap(node, 0);
  71 + if (!nvic_base) {
  72 + pr_warn("unable to map nvic registers\n");
  73 + return -ENOMEM;
  74 + }
  75 +
  76 + irqs = numbanks * 32;
  77 + if (irqs > NVIC_MAX_IRQ)
  78 + irqs = NVIC_MAX_IRQ;
  79 +
  80 + nvic_irq_domain =
  81 + irq_domain_add_linear(node, irqs, &irq_generic_chip_ops, NULL);
  82 + if (!nvic_irq_domain) {
  83 + pr_warn("Failed to allocate irq domain\n");
  84 + return -ENOMEM;
  85 + }
  86 +
  87 + ret = irq_alloc_domain_generic_chips(nvic_irq_domain, 32, numbanks,
  88 + "nvic_irq", handle_fasteoi_irq,
  89 + clr, 0, IRQ_GC_INIT_MASK_CACHE);
  90 + if (ret) {
  91 + pr_warn("Failed to allocate irq chips\n");
  92 + irq_domain_remove(nvic_irq_domain);
  93 + return ret;
  94 + }
  95 +
  96 + for (i = 0; i < numbanks; ++i) {
  97 + struct irq_chip_generic *gc;
  98 +
  99 + gc = irq_get_domain_generic_chip(nvic_irq_domain, 32 * i);
  100 + gc->reg_base = nvic_base + 4 * i;
  101 + gc->chip_types[0].regs.enable = NVIC_ISER;
  102 + gc->chip_types[0].regs.disable = NVIC_ICER;
  103 + gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
  104 + gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
  105 + gc->chip_types[0].chip.irq_eoi = nvic_eoi;
  106 +
  107 + /* disable interrupts */
  108 + writel_relaxed(~0, gc->reg_base + NVIC_ICER);
  109 + }
  110 +
  111 + /* Set priority on all interrupts */
  112 + for (i = 0; i < irqs; i += 4)
  113 + writel_relaxed(0, nvic_base + NVIC_IPR + i);
  114 +
  115 + return 0;
  116 +}
  117 +IRQCHIP_DECLARE(armv7m_nvic, "arm,armv7m-nvic", nvic_of_init);
drivers/irqchip/irq-orion.c
  1 +/*
  2 + * Marvell Orion SoCs IRQ chip driver.
  3 + *
  4 + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
  5 + *
  6 + * This file is licensed under the terms of the GNU General Public
  7 + * License version 2. This program is licensed "as is" without any
  8 + * warranty of any kind, whether express or implied.
  9 + */
  10 +
  11 +#include <linux/io.h>
  12 +#include <linux/irq.h>
  13 +#include <linux/of.h>
  14 +#include <linux/of_address.h>
  15 +#include <linux/of_irq.h>
  16 +#include <asm/exception.h>
  17 +#include <asm/mach/irq.h>
  18 +
  19 +#include "irqchip.h"
  20 +
  21 +/*
  22 + * Orion SoC main interrupt controller
  23 + */
  24 +#define ORION_IRQS_PER_CHIP 32
  25 +
  26 +#define ORION_IRQ_CAUSE 0x00
  27 +#define ORION_IRQ_MASK 0x04
  28 +#define ORION_IRQ_FIQ_MASK 0x08
  29 +#define ORION_IRQ_ENDP_MASK 0x0c
  30 +
  31 +static struct irq_domain *orion_irq_domain;
  32 +
  33 +static asmlinkage void
  34 +__exception_irq_entry orion_handle_irq(struct pt_regs *regs)
  35 +{
  36 + struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
  37 + int n, base = 0;
  38 +
  39 + for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) {
  40 + struct irq_chip_generic *gc =
  41 + irq_get_domain_generic_chip(orion_irq_domain, base);
  42 + u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
  43 + gc->mask_cache;
  44 + while (stat) {
  45 + u32 hwirq = ffs(stat) - 1;
  46 + u32 irq = irq_find_mapping(orion_irq_domain,
  47 + gc->irq_base + hwirq);
  48 + handle_IRQ(irq, regs);
  49 + stat &= ~(1 << hwirq);
  50 + }
  51 + }
  52 +}
  53 +
  54 +static int __init orion_irq_init(struct device_node *np,
  55 + struct device_node *parent)
  56 +{
  57 + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
  58 + int n, ret, base, num_chips = 0;
  59 + struct resource r;
  60 +
  61 + /* count number of irq chips by valid reg addresses */
  62 + while (of_address_to_resource(np, num_chips, &r) == 0)
  63 + num_chips++;
  64 +
  65 + orion_irq_domain = irq_domain_add_linear(np,
  66 + num_chips * ORION_IRQS_PER_CHIP,
  67 + &irq_generic_chip_ops, NULL);
  68 + if (!orion_irq_domain)
  69 + panic("%s: unable to add irq domain\n", np->name);
  70 +
  71 + ret = irq_alloc_domain_generic_chips(orion_irq_domain,
  72 + ORION_IRQS_PER_CHIP, 1, np->name,
  73 + handle_level_irq, clr, 0,
  74 + IRQ_GC_INIT_MASK_CACHE);
  75 + if (ret)
  76 + panic("%s: unable to alloc irq domain gc\n", np->name);
  77 +
  78 + for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) {
  79 + struct irq_chip_generic *gc =
  80 + irq_get_domain_generic_chip(orion_irq_domain, base);
  81 +
  82 + of_address_to_resource(np, n, &r);
  83 +
  84 + if (!request_mem_region(r.start, resource_size(&r), np->name))
  85 + panic("%s: unable to request mem region %d",
  86 + np->name, n);
  87 +
  88 + gc->reg_base = ioremap(r.start, resource_size(&r));
  89 + if (!gc->reg_base)
  90 + panic("%s: unable to map resource %d", np->name, n);
  91 +
  92 + gc->chip_types[0].regs.mask = ORION_IRQ_MASK;
  93 + gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
  94 + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
  95 +
  96 + /* mask all interrupts */
  97 + writel(0, gc->reg_base + ORION_IRQ_MASK);
  98 + }
  99 +
  100 + set_handle_irq(orion_handle_irq);
  101 + return 0;
  102 +}
  103 +IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
  104 +
  105 +/*
  106 + * Orion SoC bridge interrupt controller
  107 + */
  108 +#define ORION_BRIDGE_IRQ_CAUSE 0x00
  109 +#define ORION_BRIDGE_IRQ_MASK 0x04
  110 +
  111 +static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
  112 +{
  113 + struct irq_domain *d = irq_get_handler_data(irq);
  114 + struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, irq);
  115 + u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
  116 + gc->mask_cache;
  117 +
  118 + while (stat) {
  119 + u32 hwirq = ffs(stat) - 1;
  120 +
  121 + generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq));
  122 + stat &= ~(1 << hwirq);
  123 + }
  124 +}
  125 +
  126 +static int __init orion_bridge_irq_init(struct device_node *np,
  127 + struct device_node *parent)
  128 +{
  129 + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
  130 + struct resource r;
  131 + struct irq_domain *domain;
  132 + struct irq_chip_generic *gc;
  133 + int ret, irq, nrirqs = 32;
  134 +
  135 + /* get optional number of interrupts provided */
  136 + of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
  137 +
  138 + domain = irq_domain_add_linear(np, nrirqs,
  139 + &irq_generic_chip_ops, NULL);
  140 + if (!domain) {
  141 + pr_err("%s: unable to add irq domain\n", np->name);
  142 + return -ENOMEM;
  143 + }
  144 +
  145 + ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
  146 + handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
  147 + if (ret) {
  148 + pr_err("%s: unable to alloc irq domain gc\n", np->name);
  149 + return ret;
  150 + }
  151 +
  152 + ret = of_address_to_resource(np, 0, &r);
  153 + if (ret) {
  154 + pr_err("%s: unable to get resource\n", np->name);
  155 + return ret;
  156 + }
  157 +
  158 + if (!request_mem_region(r.start, resource_size(&r), np->name)) {
  159 + pr_err("%s: unable to request mem region\n", np->name);
  160 + return -ENOMEM;
  161 + }
  162 +
  163 + /* Map the parent interrupt for the chained handler */
  164 + irq = irq_of_parse_and_map(np, 0);
  165 + if (irq <= 0) {
  166 + pr_err("%s: unable to parse irq\n", np->name);
  167 + return -EINVAL;
  168 + }
  169 +
  170 + gc = irq_get_domain_generic_chip(domain, 0);
  171 + gc->reg_base = ioremap(r.start, resource_size(&r));
  172 + if (!gc->reg_base) {
  173 + pr_err("%s: unable to map resource\n", np->name);
  174 + return -ENOMEM;
  175 + }
  176 +
  177 + gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
  178 + gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
  179 + gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
  180 + gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
  181 + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
  182 +
  183 + /* mask all interrupts */
  184 + writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
  185 +
  186 + irq_set_handler_data(irq, domain);
  187 + irq_set_chained_handler(irq, orion_bridge_irq_handler);
  188 +
  189 + return 0;
  190 +}
  191 +IRQCHIP_DECLARE(orion_bridge_intc,
  192 + "marvell,orion-bridge-intc", orion_bridge_irq_init);
drivers/irqchip/irq-tb10x.c
  1 +/*
  2 + * Abilis Systems interrupt controller driver
  3 + *
  4 + * Copyright (C) Abilis Systems 2012
  5 + *
  6 + * Author: Christian Ruppert <christian.ruppert@abilis.com>
  7 + *
  8 + * This program is free software; you can redistribute it and/or modify
  9 + * it under the terms of the GNU General Public License version 2 as
  10 + * published by the Free Software Foundation.
  11 + *
  12 + * This program is distributed in the hope that it will be useful,
  13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15 + * GNU General Public License for more details.
  16 + *
  17 + * You should have received a copy of the GNU General Public License
  18 + * along with this program; if not, write to the Free Software
  19 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20 + */
  21 +
  22 +#include <linux/interrupt.h>
  23 +#include <linux/irqdomain.h>
  24 +#include <linux/irq.h>
  25 +#include <linux/of_irq.h>
  26 +#include <linux/of_address.h>
  27 +#include <linux/of_platform.h>
  28 +#include <linux/io.h>
  29 +#include <linux/slab.h>
  30 +#include <linux/bitops.h>
  31 +#include "irqchip.h"
  32 +
  33 +#define AB_IRQCTL_INT_ENABLE 0x00
  34 +#define AB_IRQCTL_INT_STATUS 0x04
  35 +#define AB_IRQCTL_SRC_MODE 0x08
  36 +#define AB_IRQCTL_SRC_POLARITY 0x0C
  37 +#define AB_IRQCTL_INT_MODE 0x10
  38 +#define AB_IRQCTL_INT_POLARITY 0x14
  39 +#define AB_IRQCTL_INT_FORCE 0x18
  40 +
  41 +#define AB_IRQCTL_MAXIRQ 32
  42 +
  43 +static inline void ab_irqctl_writereg(struct irq_chip_generic *gc, u32 reg,
  44 + u32 val)
  45 +{
  46 + irq_reg_writel(val, gc->reg_base + reg);
  47 +}
  48 +
  49 +static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg)
  50 +{
  51 + return irq_reg_readl(gc->reg_base + reg);
  52 +}
  53 +
  54 +static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
  55 +{
  56 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
  57 + uint32_t im, mod, pol;
  58 +
  59 + im = data->mask;
  60 +
  61 + irq_gc_lock(gc);
  62 +
  63 + mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im;
  64 + pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im;
  65 +
  66 + switch (flow_type & IRQF_TRIGGER_MASK) {
  67 + case IRQ_TYPE_EDGE_FALLING:
  68 + pol ^= im;
  69 + break;
  70 + case IRQ_TYPE_LEVEL_HIGH:
  71 + mod ^= im;
  72 + break;
  73 + case IRQ_TYPE_NONE:
  74 + flow_type = IRQ_TYPE_LEVEL_LOW;
  75 + case IRQ_TYPE_LEVEL_LOW:
  76 + mod ^= im;
  77 + pol ^= im;
  78 + break;
  79 + case IRQ_TYPE_EDGE_RISING:
  80 + break;
  81 + default:
  82 + irq_gc_unlock(gc);
  83 + pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
  84 + __func__, data->irq);
  85 + return -EBADR;
  86 + }
  87 +
  88 + irqd_set_trigger_type(data, flow_type);
  89 + irq_setup_alt_chip(data, flow_type);
  90 +
  91 + ab_irqctl_writereg(gc, AB_IRQCTL_SRC_MODE, mod);
  92 + ab_irqctl_writereg(gc, AB_IRQCTL_SRC_POLARITY, pol);
  93 + ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im);
  94 +
  95 + irq_gc_unlock(gc);
  96 +
  97 + return IRQ_SET_MASK_OK;
  98 +}
  99 +
  100 +static void tb10x_irq_cascade(unsigned int irq, struct irq_desc *desc)
  101 +{
  102 + struct irq_domain *domain = irq_desc_get_handler_data(desc);
  103 +
  104 + generic_handle_irq(irq_find_mapping(domain, irq));
  105 +}
  106 +
  107 +static int __init of_tb10x_init_irq(struct device_node *ictl,
  108 + struct device_node *parent)
  109 +{
  110 + int i, ret, nrirqs = of_irq_count(ictl);
  111 + struct resource mem;
  112 + struct irq_chip_generic *gc;
  113 + struct irq_domain *domain;
  114 + void __iomem *reg_base;
  115 +
  116 + if (of_address_to_resource(ictl, 0, &mem)) {
  117 + pr_err("%s: No registers declared in DeviceTree.\n",
  118 + ictl->name);
  119 + return -EINVAL;
  120 + }
  121 +
  122 + if (!request_mem_region(mem.start, resource_size(&mem),
  123 + ictl->name)) {
  124 + pr_err("%s: Request mem region failed.\n", ictl->name);
  125 + return -EBUSY;
  126 + }
  127 +
  128 + reg_base = ioremap(mem.start, resource_size(&mem));
  129 + if (!reg_base) {
  130 + ret = -EBUSY;
  131 + pr_err("%s: ioremap failed.\n", ictl->name);
  132 + goto ioremap_fail;
  133 + }
  134 +
  135 + domain = irq_domain_add_linear(ictl, AB_IRQCTL_MAXIRQ,
  136 + &irq_generic_chip_ops, NULL);
  137 + if (!domain) {
  138 + ret = -ENOMEM;
  139 + pr_err("%s: Could not register interrupt domain.\n",
  140 + ictl->name);
  141 + goto irq_domain_add_fail;
  142 + }
  143 +
  144 + ret = irq_alloc_domain_generic_chips(domain, AB_IRQCTL_MAXIRQ,
  145 + 2, ictl->name, handle_level_irq,
  146 + IRQ_NOREQUEST, IRQ_NOPROBE,
  147 + IRQ_GC_INIT_MASK_CACHE);
  148 + if (ret) {
  149 + pr_err("%s: Could not allocate generic interrupt chip.\n",
  150 + ictl->name);
  151 + goto gc_alloc_fail;
  152 + }
  153 +
  154 + gc = domain->gc->gc[0];
  155 + gc->reg_base = reg_base;
  156 +
  157 + gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
  158 + gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
  159 + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
  160 + gc->chip_types[0].chip.irq_set_type = tb10x_irq_set_type;
  161 + gc->chip_types[0].regs.mask = AB_IRQCTL_INT_ENABLE;
  162 +
  163 + gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
  164 + gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
  165 + gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
  166 + gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
  167 + gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
  168 + gc->chip_types[1].chip.irq_set_type = tb10x_irq_set_type;
  169 + gc->chip_types[1].regs.ack = AB_IRQCTL_INT_STATUS;
  170 + gc->chip_types[1].regs.mask = AB_IRQCTL_INT_ENABLE;
  171 + gc->chip_types[1].handler = handle_edge_irq;
  172 +
  173 + for (i = 0; i < nrirqs; i++) {
  174 + unsigned int irq = irq_of_parse_and_map(ictl, i);
  175 +
  176 + irq_set_handler_data(irq, domain);
  177 + irq_set_chained_handler(irq, tb10x_irq_cascade);
  178 + }
  179 +
  180 + ab_irqctl_writereg(gc, AB_IRQCTL_INT_ENABLE, 0);
  181 + ab_irqctl_writereg(gc, AB_IRQCTL_INT_MODE, 0);
  182 + ab_irqctl_writereg(gc, AB_IRQCTL_INT_POLARITY, 0);
  183 + ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, ~0UL);
  184 +
  185 + return 0;
  186 +
  187 +gc_alloc_fail:
  188 + irq_domain_remove(domain);
  189 +irq_domain_add_fail:
  190 + iounmap(reg_base);
  191 +ioremap_fail:
  192 + release_mem_region(mem.start, resource_size(&mem));
  193 + return ret;
  194 +}
  195 +IRQCHIP_DECLARE(tb10x_intc, "abilis,tb10x-ictl", of_tb10x_init_irq);
... ... @@ -1208,8 +1208,7 @@
1208 1208 }
1209 1209 stmpe->variant = stmpe_noirq_variant_info[stmpe->partnum];
1210 1210 } else if (pdata->irq_trigger == IRQF_TRIGGER_NONE) {
1211   - pdata->irq_trigger =
1212   - irqd_get_trigger_type(irq_get_irq_data(stmpe->irq));
  1211 + pdata->irq_trigger = irq_get_trigger_type(stmpe->irq);
1213 1212 }
1214 1213  
1215 1214 ret = stmpe_chip_init(stmpe);
drivers/mfd/twl4030-irq.c
... ... @@ -537,16 +537,13 @@
537 537 /* Modify only the bits we know must change */
538 538 while (edge_change) {
539 539 int i = fls(edge_change) - 1;
540   - struct irq_data *idata;
541 540 int byte = i >> 2;
542 541 int off = (i & 0x3) * 2;
543 542 unsigned int type;
544 543  
545   - idata = irq_get_irq_data(i + agent->irq_base);
546   -
547 544 bytes[byte] &= ~(0x03 << off);
548 545  
549   - type = irqd_get_trigger_type(idata);
  546 + type = irq_get_trigger_type(i + agent->irq_base);
550 547 if (type & IRQ_TYPE_EDGE_RISING)
551 548 bytes[byte] |= BIT(off + 1);
552 549 if (type & IRQ_TYPE_EDGE_FALLING)
... ... @@ -119,6 +119,7 @@
119 119  
120 120 /**
121 121 * struct irq_data - per irq and irq chip data passed down to chip functions
  122 + * @mask: precomputed bitmask for accessing the chip registers
122 123 * @irq: interrupt number
123 124 * @hwirq: hardware interrupt number, local to the interrupt domain
124 125 * @node: node index useful for balancing
... ... @@ -138,6 +139,7 @@
138 139 * irq_data.
139 140 */
140 141 struct irq_data {
  142 + u32 mask;
141 143 unsigned int irq;
142 144 unsigned long hwirq;
143 145 unsigned int node;
... ... @@ -294,6 +296,7 @@
294 296 * @irq_suspend: function called from core code on suspend once per chip
295 297 * @irq_resume: function called from core code on resume once per chip
296 298 * @irq_pm_shutdown: function called from core code on shutdown once per chip
  299 + * @irq_calc_mask: Optional function to set irq_data.mask for special cases
297 300 * @irq_print_chip: optional to print special chip info in show_interrupts
298 301 * @flags: chip specific flags
299 302 */
... ... @@ -325,6 +328,8 @@
325 328 void (*irq_resume)(struct irq_data *data);
326 329 void (*irq_pm_shutdown)(struct irq_data *data);
327 330  
  331 + void (*irq_calc_mask)(struct irq_data *data);
  332 +
328 333 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
329 334  
330 335 unsigned long flags;
... ... @@ -579,6 +584,12 @@
579 584 return d->msi_desc;
580 585 }
581 586  
  587 +static inline u32 irq_get_trigger_type(unsigned int irq)
  588 +{
  589 + struct irq_data *d = irq_get_irq_data(irq);
  590 + return d ? irqd_get_trigger_type(d) : 0;
  591 +}
  592 +
582 593 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
583 594 struct module *owner);
584 595  
... ... @@ -644,6 +655,8 @@
644 655 * @regs: Register offsets for this chip
645 656 * @handler: Flow handler associated with this chip
646 657 * @type: Chip can handle these flow types
  658 + * @mask_cache_priv: Cached mask register private to the chip type
  659 + * @mask_cache: Pointer to cached mask register
647 660 *
648 661 * A irq_generic_chip can have several instances of irq_chip_type when
649 662 * it requires different functions and register offsets for different
... ... @@ -654,6 +667,8 @@
654 667 struct irq_chip_regs regs;
655 668 irq_flow_handler_t handler;
656 669 u32 type;
  670 + u32 mask_cache_priv;
  671 + u32 *mask_cache;
657 672 };
658 673  
659 674 /**
660 675  
... ... @@ -662,13 +677,16 @@
662 677 * @reg_base: Register base address (virtual)
663 678 * @irq_base: Interrupt base nr for this chip
664 679 * @irq_cnt: Number of interrupts handled by this chip
665   - * @mask_cache: Cached mask register
  680 + * @mask_cache: Cached mask register shared between all chip types
666 681 * @type_cache: Cached type register
667 682 * @polarity_cache: Cached polarity register
668 683 * @wake_enabled: Interrupt can wakeup from suspend
669 684 * @wake_active: Interrupt is marked as an wakeup from suspend source
670 685 * @num_ct: Number of available irq_chip_type instances (usually 1)
671 686 * @private: Private data for non generic chip callbacks
  687 + * @installed: bitfield to denote installed interrupts
  688 + * @unused: bitfield to denote unused interrupts
  689 + * @domain: irq domain pointer
672 690 * @list: List head for keeping track of instances
673 691 * @chip_types: Array of interrupt irq_chip_types
674 692 *
... ... @@ -690,6 +708,9 @@
690 708 u32 wake_active;
691 709 unsigned int num_ct;
692 710 void *private;
  711 + unsigned long installed;
  712 + unsigned long unused;
  713 + struct irq_domain *domain;
693 714 struct list_head list;
694 715 struct irq_chip_type chip_types[0];
695 716 };
696 717  
697 718  
... ... @@ -700,12 +721,34 @@
700 721 * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for
701 722 * irq chips which need to call irq_set_wake() on
702 723 * the parent irq. Usually GPIO implementations
  724 + * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private
  725 + * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask
703 726 */
704 727 enum irq_gc_flags {
705 728 IRQ_GC_INIT_MASK_CACHE = 1 << 0,
706 729 IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
  730 + IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
  731 + IRQ_GC_NO_MASK = 1 << 3,
707 732 };
708 733  
  734 +/*
  735 + * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains
  736 + * @irqs_per_chip: Number of interrupts per chip
  737 + * @num_chips: Number of chips
  738 + * @irq_flags_to_set: IRQ* flags to set on irq setup
  739 + * @irq_flags_to_clear: IRQ* flags to clear on irq setup
  740 + * @gc_flags: Generic chip specific setup flags
  741 + * @gc: Array of pointers to generic interrupt chips
  742 + */
  743 +struct irq_domain_chip_generic {
  744 + unsigned int irqs_per_chip;
  745 + unsigned int num_chips;
  746 + unsigned int irq_flags_to_clear;
  747 + unsigned int irq_flags_to_set;
  748 + enum irq_gc_flags gc_flags;
  749 + struct irq_chip_generic *gc[0];
  750 +};
  751 +
709 752 /* Generic chip callback functions */
710 753 void irq_gc_noop(struct irq_data *d);
711 754 void irq_gc_mask_disable_reg(struct irq_data *d);
... ... @@ -728,6 +771,14 @@
728 771 int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
729 772 void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
730 773 unsigned int clr, unsigned int set);
  774 +
  775 +struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
  776 +int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
  777 + int num_ct, const char *name,
  778 + irq_flow_handler_t handler,
  779 + unsigned int clr, unsigned int set,
  780 + enum irq_gc_flags flags);
  781 +
731 782  
732 783 static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
733 784 {
include/linux/irqdomain.h
... ... @@ -66,6 +66,10 @@
66 66 unsigned long *out_hwirq, unsigned int *out_type);
67 67 };
68 68  
  69 +extern struct irq_domain_ops irq_generic_chip_ops;
  70 +
  71 +struct irq_domain_chip_generic;
  72 +
69 73 /**
70 74 * struct irq_domain - Hardware interrupt number translation object
71 75 * @link: Element in global irq_domain list.
72 76  
... ... @@ -109,7 +113,15 @@
109 113  
110 114 /* Optional device node pointer */
111 115 struct device_node *of_node;
  116 + /* Optional pointer to generic interrupt chips */
  117 + struct irq_domain_chip_generic *gc;
112 118 };
  119 +
  120 +#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
  121 + * ie. legacy 8259, gets irqs 1..15 */
  122 +#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
  123 +#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
  124 +#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
113 125  
114 126 #ifdef CONFIG_IRQ_DOMAIN
115 127 struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
... ... @@ -213,6 +213,19 @@
213 213 irq_state_clr_masked(desc);
214 214 }
215 215  
  216 +/**
  217 + * irq_disable - Mark interupt disabled
  218 + * @desc: irq descriptor which should be disabled
  219 + *
  220 + * If the chip does not implement the irq_disable callback, we
  221 + * use a lazy disable approach. That means we mark the interrupt
  222 + * disabled, but leave the hardware unmasked. That's an
  223 + * optimization because we avoid the hardware access for the
  224 + * common case where no interrupt happens after we marked it
  225 + * disabled. If an interrupt happens, then the interrupt flow
  226 + * handler masks the line at the hardware level and marks it
  227 + * pending.
  228 + */
216 229 void irq_disable(struct irq_desc *desc)
217 230 {
218 231 irq_state_set_disabled(desc);
kernel/irq/generic-chip.c
... ... @@ -7,6 +7,7 @@
7 7 #include <linux/irq.h>
8 8 #include <linux/slab.h>
9 9 #include <linux/export.h>
  10 +#include <linux/irqdomain.h>
10 11 #include <linux/interrupt.h>
11 12 #include <linux/kernel_stat.h>
12 13 #include <linux/syscore_ops.h>
... ... @@ -16,11 +17,6 @@
16 17 static LIST_HEAD(gc_list);
17 18 static DEFINE_RAW_SPINLOCK(gc_lock);
18 19  
19   -static inline struct irq_chip_regs *cur_regs(struct irq_data *d)
20   -{
21   - return &container_of(d->chip, struct irq_chip_type, chip)->regs;
22   -}
23   -
24 20 /**
25 21 * irq_gc_noop - NOOP function
26 22 * @d: irq_data
27 23  
28 24  
... ... @@ -39,16 +35,17 @@
39 35 void irq_gc_mask_disable_reg(struct irq_data *d)
40 36 {
41 37 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
42   - u32 mask = 1 << (d->irq - gc->irq_base);
  38 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
  39 + u32 mask = d->mask;
43 40  
44 41 irq_gc_lock(gc);
45   - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->disable);
46   - gc->mask_cache &= ~mask;
  42 + irq_reg_writel(mask, gc->reg_base + ct->regs.disable);
  43 + *ct->mask_cache &= ~mask;
47 44 irq_gc_unlock(gc);
48 45 }
49 46  
50 47 /**
51   - * irq_gc_mask_set_mask_bit - Mask chip via setting bit in mask register
  48 + * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
52 49 * @d: irq_data
53 50 *
54 51 * Chip has a single mask register. Values of this register are cached
55 52  
56 53  
57 54  
... ... @@ -57,16 +54,18 @@
57 54 void irq_gc_mask_set_bit(struct irq_data *d)
58 55 {
59 56 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
60   - u32 mask = 1 << (d->irq - gc->irq_base);
  57 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
  58 + u32 mask = d->mask;
61 59  
62 60 irq_gc_lock(gc);
63   - gc->mask_cache |= mask;
64   - irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask);
  61 + *ct->mask_cache |= mask;
  62 + irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask);
65 63 irq_gc_unlock(gc);
66 64 }
  65 +EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
67 66  
68 67 /**
69   - * irq_gc_mask_set_mask_bit - Mask chip via clearing bit in mask register
  68 + * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
70 69 * @d: irq_data
71 70 *
72 71 * Chip has a single mask register. Values of this register are cached
73 72  
74 73  
... ... @@ -75,13 +74,15 @@
75 74 void irq_gc_mask_clr_bit(struct irq_data *d)
76 75 {
77 76 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
78   - u32 mask = 1 << (d->irq - gc->irq_base);
  77 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
  78 + u32 mask = d->mask;
79 79  
80 80 irq_gc_lock(gc);
81   - gc->mask_cache &= ~mask;
82   - irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask);
  81 + *ct->mask_cache &= ~mask;
  82 + irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask);
83 83 irq_gc_unlock(gc);
84 84 }
  85 +EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
85 86  
86 87 /**
87 88 * irq_gc_unmask_enable_reg - Unmask chip via enable register
88 89  
... ... @@ -93,11 +94,12 @@
93 94 void irq_gc_unmask_enable_reg(struct irq_data *d)
94 95 {
95 96 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
96   - u32 mask = 1 << (d->irq - gc->irq_base);
  97 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
  98 + u32 mask = d->mask;
97 99  
98 100 irq_gc_lock(gc);
99   - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->enable);
100   - gc->mask_cache |= mask;
  101 + irq_reg_writel(mask, gc->reg_base + ct->regs.enable);
  102 + *ct->mask_cache |= mask;
101 103 irq_gc_unlock(gc);
102 104 }
103 105  
104 106  
105 107  
... ... @@ -108,12 +110,14 @@
108 110 void irq_gc_ack_set_bit(struct irq_data *d)
109 111 {
110 112 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
111   - u32 mask = 1 << (d->irq - gc->irq_base);
  113 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
  114 + u32 mask = d->mask;
112 115  
113 116 irq_gc_lock(gc);
114   - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
  117 + irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
115 118 irq_gc_unlock(gc);
116 119 }
  120 +EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
117 121  
118 122 /**
119 123 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
120 124  
... ... @@ -122,10 +126,11 @@
122 126 void irq_gc_ack_clr_bit(struct irq_data *d)
123 127 {
124 128 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
125   - u32 mask = ~(1 << (d->irq - gc->irq_base));
  129 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
  130 + u32 mask = ~d->mask;
126 131  
127 132 irq_gc_lock(gc);
128   - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
  133 + irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
129 134 irq_gc_unlock(gc);
130 135 }
131 136  
132 137  
... ... @@ -136,11 +141,12 @@
136 141 void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
137 142 {
138 143 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
139   - u32 mask = 1 << (d->irq - gc->irq_base);
  144 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
  145 + u32 mask = d->mask;
140 146  
141 147 irq_gc_lock(gc);
142   - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->mask);
143   - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
  148 + irq_reg_writel(mask, gc->reg_base + ct->regs.mask);
  149 + irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
144 150 irq_gc_unlock(gc);
145 151 }
146 152  
147 153  
148 154  
... ... @@ -151,16 +157,18 @@
151 157 void irq_gc_eoi(struct irq_data *d)
152 158 {
153 159 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
154   - u32 mask = 1 << (d->irq - gc->irq_base);
  160 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
  161 + u32 mask = d->mask;
155 162  
156 163 irq_gc_lock(gc);
157   - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->eoi);
  164 + irq_reg_writel(mask, gc->reg_base + ct->regs.eoi);
158 165 irq_gc_unlock(gc);
159 166 }
160 167  
161 168 /**
162 169 * irq_gc_set_wake - Set/clr wake bit for an interrupt
163   - * @d: irq_data
  170 + * @d: irq_data
  171 + * @on: Indicates whether the wake bit should be set or cleared
164 172 *
165 173 * For chips where the wake from suspend functionality is not
166 174 * configured in a separate register and the wakeup active state is
... ... @@ -169,7 +177,7 @@
169 177 int irq_gc_set_wake(struct irq_data *d, unsigned int on)
170 178 {
171 179 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
172   - u32 mask = 1 << (d->irq - gc->irq_base);
  180 + u32 mask = d->mask;
173 181  
174 182 if (!(mask & gc->wake_enabled))
175 183 return -EINVAL;
... ... @@ -183,6 +191,19 @@
183 191 return 0;
184 192 }
185 193  
  194 +static void
  195 +irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
  196 + int num_ct, unsigned int irq_base,
  197 + void __iomem *reg_base, irq_flow_handler_t handler)
  198 +{
  199 + raw_spin_lock_init(&gc->lock);
  200 + gc->num_ct = num_ct;
  201 + gc->irq_base = irq_base;
  202 + gc->reg_base = reg_base;
  203 + gc->chip_types->chip.name = name;
  204 + gc->chip_types->handler = handler;
  205 +}
  206 +
186 207 /**
187 208 * irq_alloc_generic_chip - Allocate a generic chip and initialize it
188 209 * @name: Name of the irq chip
189 210  
190 211  
... ... @@ -203,23 +224,185 @@
203 224  
204 225 gc = kzalloc(sz, GFP_KERNEL);
205 226 if (gc) {
206   - raw_spin_lock_init(&gc->lock);
207   - gc->num_ct = num_ct;
208   - gc->irq_base = irq_base;
209   - gc->reg_base = reg_base;
210   - gc->chip_types->chip.name = name;
211   - gc->chip_types->handler = handler;
  227 + irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
  228 + handler);
212 229 }
213 230 return gc;
214 231 }
215 232 EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
216 233  
  234 +static void
  235 +irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
  236 +{
  237 + struct irq_chip_type *ct = gc->chip_types;
  238 + u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
  239 + int i;
  240 +
  241 + for (i = 0; i < gc->num_ct; i++) {
  242 + if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
  243 + mskptr = &ct[i].mask_cache_priv;
  244 + mskreg = ct[i].regs.mask;
  245 + }
  246 + ct[i].mask_cache = mskptr;
  247 + if (flags & IRQ_GC_INIT_MASK_CACHE)
  248 + *mskptr = irq_reg_readl(gc->reg_base + mskreg);
  249 + }
  250 +}
  251 +
  252 +/**
  253 + * irq_alloc_domain_generic_chip - Allocate generic chips for an irq domain
  254 + * @d: irq domain for which to allocate chips
  255 + * @irqs_per_chip: Number of interrupts each chip handles
  256 + * @num_ct: Number of irq_chip_type instances associated with this
  257 + * @name: Name of the irq chip
  258 + * @handler: Default flow handler associated with these chips
  259 + * @clr: IRQ_* bits to clear in the mapping function
  260 + * @set: IRQ_* bits to set in the mapping function
  261 + * @gcflags: Generic chip specific setup flags
  262 + */
  263 +int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
  264 + int num_ct, const char *name,
  265 + irq_flow_handler_t handler,
  266 + unsigned int clr, unsigned int set,
  267 + enum irq_gc_flags gcflags)
  268 +{
  269 + struct irq_domain_chip_generic *dgc;
  270 + struct irq_chip_generic *gc;
  271 + int numchips, sz, i;
  272 + unsigned long flags;
  273 + void *tmp;
  274 +
  275 + if (d->gc)
  276 + return -EBUSY;
  277 +
  278 + if (d->revmap_type != IRQ_DOMAIN_MAP_LINEAR)
  279 + return -EINVAL;
  280 +
  281 + numchips = d->revmap_data.linear.size / irqs_per_chip;
  282 + if (!numchips)
  283 + return -EINVAL;
  284 +
  285 + /* Allocate a pointer, generic chip and chiptypes for each chip */
  286 + sz = sizeof(*dgc) + numchips * sizeof(gc);
  287 + sz += numchips * (sizeof(*gc) + num_ct * sizeof(struct irq_chip_type));
  288 +
  289 + tmp = dgc = kzalloc(sz, GFP_KERNEL);
  290 + if (!dgc)
  291 + return -ENOMEM;
  292 + dgc->irqs_per_chip = irqs_per_chip;
  293 + dgc->num_chips = numchips;
  294 + dgc->irq_flags_to_set = set;
  295 + dgc->irq_flags_to_clear = clr;
  296 + dgc->gc_flags = gcflags;
  297 + d->gc = dgc;
  298 +
  299 + /* Calc pointer to the first generic chip */
  300 + tmp += sizeof(*dgc) + numchips * sizeof(gc);
  301 + for (i = 0; i < numchips; i++) {
  302 + /* Store the pointer to the generic chip */
  303 + dgc->gc[i] = gc = tmp;
  304 + irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
  305 + NULL, handler);
  306 + gc->domain = d;
  307 + raw_spin_lock_irqsave(&gc_lock, flags);
  308 + list_add_tail(&gc->list, &gc_list);
  309 + raw_spin_unlock_irqrestore(&gc_lock, flags);
  310 + /* Calc pointer to the next generic chip */
  311 + tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
  312 + }
  313 + return 0;
  314 +}
  315 +EXPORT_SYMBOL_GPL(irq_alloc_domain_generic_chips);
  316 +
  317 +/**
  318 + * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
  319 + * @d: irq domain pointer
  320 + * @hw_irq: Hardware interrupt number
  321 + */
  322 +struct irq_chip_generic *
  323 +irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
  324 +{
  325 + struct irq_domain_chip_generic *dgc = d->gc;
  326 + int idx;
  327 +
  328 + if (!dgc)
  329 + return NULL;
  330 + idx = hw_irq / dgc->irqs_per_chip;
  331 + if (idx >= dgc->num_chips)
  332 + return NULL;
  333 + return dgc->gc[idx];
  334 +}
  335 +EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
  336 +
217 337 /*
218 338 * Separate lockdep class for interrupt chip which can nest irq_desc
219 339 * lock.
220 340 */
221 341 static struct lock_class_key irq_nested_lock_class;
222 342  
  343 +/*
  344 + * irq_map_generic_chip - Map a generic chip for an irq domain
  345 + */
  346 +static int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
  347 + irq_hw_number_t hw_irq)
  348 +{
  349 + struct irq_data *data = irq_get_irq_data(virq);
  350 + struct irq_domain_chip_generic *dgc = d->gc;
  351 + struct irq_chip_generic *gc;
  352 + struct irq_chip_type *ct;
  353 + struct irq_chip *chip;
  354 + unsigned long flags;
  355 + int idx;
  356 +
  357 + if (!d->gc)
  358 + return -ENODEV;
  359 +
  360 + idx = hw_irq / dgc->irqs_per_chip;
  361 + if (idx >= dgc->num_chips)
  362 + return -EINVAL;
  363 + gc = dgc->gc[idx];
  364 +
  365 + idx = hw_irq % dgc->irqs_per_chip;
  366 +
  367 + if (test_bit(idx, &gc->unused))
  368 + return -ENOTSUPP;
  369 +
  370 + if (test_bit(idx, &gc->installed))
  371 + return -EBUSY;
  372 +
  373 + ct = gc->chip_types;
  374 + chip = &ct->chip;
  375 +
  376 + /* We only init the cache for the first mapping of a generic chip */
  377 + if (!gc->installed) {
  378 + raw_spin_lock_irqsave(&gc->lock, flags);
  379 + irq_gc_init_mask_cache(gc, dgc->gc_flags);
  380 + raw_spin_unlock_irqrestore(&gc->lock, flags);
  381 + }
  382 +
  383 + /* Mark the interrupt as installed */
  384 + set_bit(idx, &gc->installed);
  385 +
  386 + if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
  387 + irq_set_lockdep_class(virq, &irq_nested_lock_class);
  388 +
  389 + if (chip->irq_calc_mask)
  390 + chip->irq_calc_mask(data);
  391 + else
  392 + data->mask = 1 << idx;
  393 +
  394 + irq_set_chip_and_handler(virq, chip, ct->handler);
  395 + irq_set_chip_data(virq, gc);
  396 + irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
  397 + return 0;
  398 +}
  399 +
  400 +struct irq_domain_ops irq_generic_chip_ops = {
  401 + .map = irq_map_generic_chip,
  402 + .xlate = irq_domain_xlate_onetwocell,
  403 +};
  404 +EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
  405 +
223 406 /**
224 407 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
225 408 * @gc: Generic irq chip holding all data
226 409  
... ... @@ -237,15 +420,14 @@
237 420 unsigned int set)
238 421 {
239 422 struct irq_chip_type *ct = gc->chip_types;
  423 + struct irq_chip *chip = &ct->chip;
240 424 unsigned int i;
241 425  
242 426 raw_spin_lock(&gc_lock);
243 427 list_add_tail(&gc->list, &gc_list);
244 428 raw_spin_unlock(&gc_lock);
245 429  
246   - /* Init mask cache ? */
247   - if (flags & IRQ_GC_INIT_MASK_CACHE)
248   - gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask);
  430 + irq_gc_init_mask_cache(gc, flags);
249 431  
250 432 for (i = gc->irq_base; msk; msk >>= 1, i++) {
251 433 if (!(msk & 0x01))
... ... @@ -254,7 +436,15 @@
254 436 if (flags & IRQ_GC_INIT_NESTED_LOCK)
255 437 irq_set_lockdep_class(i, &irq_nested_lock_class);
256 438  
257   - irq_set_chip_and_handler(i, &ct->chip, ct->handler);
  439 + if (!(flags & IRQ_GC_NO_MASK)) {
  440 + struct irq_data *d = irq_get_irq_data(i);
  441 +
  442 + if (chip->irq_calc_mask)
  443 + chip->irq_calc_mask(d);
  444 + else
  445 + d->mask = 1 << (i - gc->irq_base);
  446 + }
  447 + irq_set_chip_and_handler(i, chip, ct->handler);
258 448 irq_set_chip_data(i, gc);
259 449 irq_modify_status(i, clr, set);
260 450 }
... ... @@ -265,7 +455,7 @@
265 455 /**
266 456 * irq_setup_alt_chip - Switch to alternative chip
267 457 * @d: irq_data for this interrupt
268   - * @type Flow type to be initialized
  458 + * @type: Flow type to be initialized
269 459 *
270 460 * Only to be called from chip->irq_set_type() callbacks.
271 461 */
... ... @@ -317,6 +507,24 @@
317 507 }
318 508 EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
319 509  
  510 +static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
  511 +{
  512 + unsigned int virq;
  513 +
  514 + if (!gc->domain)
  515 + return irq_get_irq_data(gc->irq_base);
  516 +
  517 + /*
  518 + * We don't know which of the irqs has been actually
  519 + * installed. Use the first one.
  520 + */
  521 + if (!gc->installed)
  522 + return NULL;
  523 +
  524 + virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
  525 + return virq ? irq_get_irq_data(virq) : NULL;
  526 +}
  527 +
320 528 #ifdef CONFIG_PM
321 529 static int irq_gc_suspend(void)
322 530 {
... ... @@ -325,8 +533,12 @@
325 533 list_for_each_entry(gc, &gc_list, list) {
326 534 struct irq_chip_type *ct = gc->chip_types;
327 535  
328   - if (ct->chip.irq_suspend)
329   - ct->chip.irq_suspend(irq_get_irq_data(gc->irq_base));
  536 + if (ct->chip.irq_suspend) {
  537 + struct irq_data *data = irq_gc_get_irq_data(gc);
  538 +
  539 + if (data)
  540 + ct->chip.irq_suspend(data);
  541 + }
330 542 }
331 543 return 0;
332 544 }
... ... @@ -338,8 +550,12 @@
338 550 list_for_each_entry(gc, &gc_list, list) {
339 551 struct irq_chip_type *ct = gc->chip_types;
340 552  
341   - if (ct->chip.irq_resume)
342   - ct->chip.irq_resume(irq_get_irq_data(gc->irq_base));
  553 + if (ct->chip.irq_resume) {
  554 + struct irq_data *data = irq_gc_get_irq_data(gc);
  555 +
  556 + if (data)
  557 + ct->chip.irq_resume(data);
  558 + }
343 559 }
344 560 }
345 561 #else
... ... @@ -354,8 +570,12 @@
354 570 list_for_each_entry(gc, &gc_list, list) {
355 571 struct irq_chip_type *ct = gc->chip_types;
356 572  
357   - if (ct->chip.irq_pm_shutdown)
358   - ct->chip.irq_pm_shutdown(irq_get_irq_data(gc->irq_base));
  573 + if (ct->chip.irq_pm_shutdown) {
  574 + struct irq_data *data = irq_gc_get_irq_data(gc);
  575 +
  576 + if (data)
  577 + ct->chip.irq_pm_shutdown(data);
  578 + }
359 579 }
360 580 }
361 581  
kernel/irq/irqdomain.c
... ... @@ -16,12 +16,6 @@
16 16 #include <linux/smp.h>
17 17 #include <linux/fs.h>
18 18  
19   -#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
20   - * ie. legacy 8259, gets irqs 1..15 */
21   -#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
22   -#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
23   -#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
24   -
25 19 static LIST_HEAD(irq_domain_list);
26 20 static DEFINE_MUTEX(irq_domain_mutex);
27 21  
... ... @@ -698,7 +692,7 @@
698 692  
699 693 /* Set type if specified and different than the current one */
700 694 if (type != IRQ_TYPE_NONE &&
701   - type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
  695 + type != irq_get_trigger_type(virq))
702 696 irq_set_irq_type(virq, type);
703 697 return virq;
704 698 }
... ... @@ -555,9 +555,9 @@
555 555 return 0;
556 556  
557 557 if (irq_settings_can_request(desc)) {
558   - if (desc->action)
559   - if (irqflags & desc->action->flags & IRQF_SHARED)
560   - canrequest =1;
  558 + if (!desc->action ||
  559 + irqflags & desc->action->flags & IRQF_SHARED)
  560 + canrequest = 1;
561 561 }
562 562 irq_put_desc_unlock(desc, flags);
563 563 return canrequest;
... ... @@ -840,9 +840,6 @@
840 840 static int irq_thread(void *data)
841 841 {
842 842 struct callback_head on_exit_work;
843   - static const struct sched_param param = {
844   - .sched_priority = MAX_USER_RT_PRIO/2,
845   - };
846 843 struct irqaction *action = data;
847 844 struct irq_desc *desc = irq_to_desc(action->irq);
848 845 irqreturn_t (*handler_fn)(struct irq_desc *desc,
... ... @@ -854,8 +851,6 @@
854 851 else
855 852 handler_fn = irq_thread_fn;
856 853  
857   - sched_setscheduler(current, SCHED_FIFO, &param);
858   -
859 854 init_task_work(&on_exit_work, irq_thread_dtor);
860 855 task_work_add(current, &on_exit_work, false);
861 856  
... ... @@ -950,6 +945,9 @@
950 945 */
951 946 if (new->thread_fn && !nested) {
952 947 struct task_struct *t;
  948 + static const struct sched_param param = {
  949 + .sched_priority = MAX_USER_RT_PRIO/2,
  950 + };
953 951  
954 952 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
955 953 new->name);
... ... @@ -957,6 +955,9 @@
957 955 ret = PTR_ERR(t);
958 956 goto out_mput;
959 957 }
  958 +
  959 + sched_setscheduler(t, SCHED_FIFO, &param);
  960 +
960 961 /*
961 962 * We keep the reference to the task struct even if
962 963 * the thread dies to avoid that the interrupt code
... ... @@ -127,8 +127,7 @@
127 127  
128 128 void local_bh_disable(void)
129 129 {
130   - __local_bh_disable((unsigned long)__builtin_return_address(0),
131   - SOFTIRQ_DISABLE_OFFSET);
  130 + __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET);
132 131 }
133 132  
134 133 EXPORT_SYMBOL(local_bh_disable);
... ... @@ -139,7 +138,7 @@
139 138 WARN_ON_ONCE(!irqs_disabled());
140 139  
141 140 if (softirq_count() == cnt)
142   - trace_softirqs_on((unsigned long)__builtin_return_address(0));
  141 + trace_softirqs_on(_RET_IP_);
143 142 sub_preempt_count(cnt);
144 143 }
145 144  
... ... @@ -184,7 +183,7 @@
184 183  
185 184 void local_bh_enable(void)
186 185 {
187   - _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  186 + _local_bh_enable_ip(_RET_IP_);
188 187 }
189 188 EXPORT_SYMBOL(local_bh_enable);
190 189  
... ... @@ -229,8 +228,7 @@
229 228 pending = local_softirq_pending();
230 229 account_irq_enter_time(current);
231 230  
232   - __local_bh_disable((unsigned long)__builtin_return_address(0),
233   - SOFTIRQ_OFFSET);
  231 + __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
234 232 lockdep_softirq_enter();
235 233  
236 234 cpu = smp_processor_id();