Commit cec9694af7ada37611eb41733065427c0e72cd6c

Authored by Jason Cooper

Merge branch 'irqchip/hip04' into irqchip/core

Showing 2 changed files Side-by-side Diff

drivers/irqchip/Makefile
... ... @@ -2,6 +2,7 @@
2 2  
3 3 obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
4 4 obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
  5 +obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
5 6 obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
6 7 obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o
7 8 obj-$(CONFIG_ARCH_MXS) += irq-mxs.o
drivers/irqchip/irq-hip04.c
  1 +/*
  2 + * Hisilicon HiP04 INTC
  3 + *
  4 + * Copyright (C) 2002-2014 ARM Limited.
  5 + * Copyright (c) 2013-2014 Hisilicon Ltd.
  6 + * Copyright (c) 2013-2014 Linaro Ltd.
  7 + *
  8 + * This program is free software; you can redistribute it and/or modify
  9 + * it under the terms of the GNU General Public License version 2 as
  10 + * published by the Free Software Foundation.
  11 + *
  12 + * Interrupt architecture for the HIP04 INTC:
  13 + *
  14 + * o There is one Interrupt Distributor, which receives interrupts
  15 + * from system devices and sends them to the Interrupt Controllers.
  16 + *
  17 + * o There is one CPU Interface per CPU, which sends interrupts sent
  18 + * by the Distributor, and interrupts generated locally, to the
  19 + * associated CPU. The base address of the CPU interface is usually
  20 + * aliased so that the same address points to different chips depending
  21 + * on the CPU it is accessed from.
  22 + *
  23 + * Note that IRQs 0-31 are special - they are local to each CPU.
  24 + * As such, the enable set/clear, pending set/clear and active bit
  25 + * registers are banked per-cpu for these sources.
  26 + */
  27 +
  28 +#include <linux/init.h>
  29 +#include <linux/kernel.h>
  30 +#include <linux/err.h>
  31 +#include <linux/module.h>
  32 +#include <linux/list.h>
  33 +#include <linux/smp.h>
  34 +#include <linux/cpu.h>
  35 +#include <linux/cpu_pm.h>
  36 +#include <linux/cpumask.h>
  37 +#include <linux/io.h>
  38 +#include <linux/of.h>
  39 +#include <linux/of_address.h>
  40 +#include <linux/of_irq.h>
  41 +#include <linux/irqdomain.h>
  42 +#include <linux/interrupt.h>
  43 +#include <linux/slab.h>
  44 +#include <linux/irqchip/arm-gic.h>
  45 +
  46 +#include <asm/irq.h>
  47 +#include <asm/exception.h>
  48 +#include <asm/smp_plat.h>
  49 +
  50 +#include "irq-gic-common.h"
  51 +#include "irqchip.h"
  52 +
  53 +#define HIP04_MAX_IRQS 510
  54 +
  55 +struct hip04_irq_data {
  56 + void __iomem *dist_base;
  57 + void __iomem *cpu_base;
  58 + struct irq_domain *domain;
  59 + unsigned int nr_irqs;
  60 +};
  61 +
  62 +static DEFINE_RAW_SPINLOCK(irq_controller_lock);
  63 +
  64 +/*
  65 + * The GIC mapping of CPU interfaces does not necessarily match
  66 + * the logical CPU numbering. Let's use a mapping as returned
  67 + * by the GIC itself.
  68 + */
  69 +#define NR_HIP04_CPU_IF 16
  70 +static u16 hip04_cpu_map[NR_HIP04_CPU_IF] __read_mostly;
  71 +
  72 +static struct hip04_irq_data hip04_data __read_mostly;
  73 +
  74 +static inline void __iomem *hip04_dist_base(struct irq_data *d)
  75 +{
  76 + struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d);
  77 + return hip04_data->dist_base;
  78 +}
  79 +
  80 +static inline void __iomem *hip04_cpu_base(struct irq_data *d)
  81 +{
  82 + struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d);
  83 + return hip04_data->cpu_base;
  84 +}
  85 +
  86 +static inline unsigned int hip04_irq(struct irq_data *d)
  87 +{
  88 + return d->hwirq;
  89 +}
  90 +
  91 +/*
  92 + * Routines to acknowledge, disable and enable interrupts
  93 + */
  94 +static void hip04_mask_irq(struct irq_data *d)
  95 +{
  96 + u32 mask = 1 << (hip04_irq(d) % 32);
  97 +
  98 + raw_spin_lock(&irq_controller_lock);
  99 + writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_CLEAR +
  100 + (hip04_irq(d) / 32) * 4);
  101 + raw_spin_unlock(&irq_controller_lock);
  102 +}
  103 +
  104 +static void hip04_unmask_irq(struct irq_data *d)
  105 +{
  106 + u32 mask = 1 << (hip04_irq(d) % 32);
  107 +
  108 + raw_spin_lock(&irq_controller_lock);
  109 + writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_SET +
  110 + (hip04_irq(d) / 32) * 4);
  111 + raw_spin_unlock(&irq_controller_lock);
  112 +}
  113 +
  114 +static void hip04_eoi_irq(struct irq_data *d)
  115 +{
  116 + writel_relaxed(hip04_irq(d), hip04_cpu_base(d) + GIC_CPU_EOI);
  117 +}
  118 +
  119 +static int hip04_irq_set_type(struct irq_data *d, unsigned int type)
  120 +{
  121 + void __iomem *base = hip04_dist_base(d);
  122 + unsigned int irq = hip04_irq(d);
  123 +
  124 + /* Interrupt configuration for SGIs can't be changed */
  125 + if (irq < 16)
  126 + return -EINVAL;
  127 +
  128 + if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
  129 + return -EINVAL;
  130 +
  131 + raw_spin_lock(&irq_controller_lock);
  132 +
  133 + gic_configure_irq(irq, type, base, NULL);
  134 +
  135 + raw_spin_unlock(&irq_controller_lock);
  136 +
  137 + return 0;
  138 +}
  139 +
  140 +#ifdef CONFIG_SMP
  141 +static int hip04_irq_set_affinity(struct irq_data *d,
  142 + const struct cpumask *mask_val,
  143 + bool force)
  144 +{
  145 + void __iomem *reg;
  146 + unsigned int cpu, shift = (hip04_irq(d) % 2) * 16;
  147 + u32 val, mask, bit;
  148 +
  149 + if (!force)
  150 + cpu = cpumask_any_and(mask_val, cpu_online_mask);
  151 + else
  152 + cpu = cpumask_first(mask_val);
  153 +
  154 + if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids)
  155 + return -EINVAL;
  156 +
  157 + raw_spin_lock(&irq_controller_lock);
  158 + reg = hip04_dist_base(d) + GIC_DIST_TARGET + ((hip04_irq(d) * 2) & ~3);
  159 + mask = 0xffff << shift;
  160 + bit = hip04_cpu_map[cpu] << shift;
  161 + val = readl_relaxed(reg) & ~mask;
  162 + writel_relaxed(val | bit, reg);
  163 + raw_spin_unlock(&irq_controller_lock);
  164 +
  165 + return IRQ_SET_MASK_OK;
  166 +}
  167 +#endif
  168 +
  169 +static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
  170 +{
  171 + u32 irqstat, irqnr;
  172 + void __iomem *cpu_base = hip04_data.cpu_base;
  173 +
  174 + do {
  175 + irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
  176 + irqnr = irqstat & GICC_IAR_INT_ID_MASK;
  177 +
  178 + if (likely(irqnr > 15 && irqnr <= HIP04_MAX_IRQS)) {
  179 + irqnr = irq_find_mapping(hip04_data.domain, irqnr);
  180 + handle_IRQ(irqnr, regs);
  181 + continue;
  182 + }
  183 + if (irqnr < 16) {
  184 + writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
  185 +#ifdef CONFIG_SMP
  186 + handle_IPI(irqnr, regs);
  187 +#endif
  188 + continue;
  189 + }
  190 + break;
  191 + } while (1);
  192 +}
  193 +
  194 +static struct irq_chip hip04_irq_chip = {
  195 + .name = "HIP04 INTC",
  196 + .irq_mask = hip04_mask_irq,
  197 + .irq_unmask = hip04_unmask_irq,
  198 + .irq_eoi = hip04_eoi_irq,
  199 + .irq_set_type = hip04_irq_set_type,
  200 +#ifdef CONFIG_SMP
  201 + .irq_set_affinity = hip04_irq_set_affinity,
  202 +#endif
  203 +};
  204 +
  205 +static u16 hip04_get_cpumask(struct hip04_irq_data *intc)
  206 +{
  207 + void __iomem *base = intc->dist_base;
  208 + u32 mask, i;
  209 +
  210 + for (i = mask = 0; i < 32; i += 2) {
  211 + mask = readl_relaxed(base + GIC_DIST_TARGET + i * 2);
  212 + mask |= mask >> 16;
  213 + if (mask)
  214 + break;
  215 + }
  216 +
  217 + if (!mask)
  218 + pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
  219 +
  220 + return mask;
  221 +}
  222 +
  223 +static void __init hip04_irq_dist_init(struct hip04_irq_data *intc)
  224 +{
  225 + unsigned int i;
  226 + u32 cpumask;
  227 + unsigned int nr_irqs = intc->nr_irqs;
  228 + void __iomem *base = intc->dist_base;
  229 +
  230 + writel_relaxed(0, base + GIC_DIST_CTRL);
  231 +
  232 + /*
  233 + * Set all global interrupts to this CPU only.
  234 + */
  235 + cpumask = hip04_get_cpumask(intc);
  236 + cpumask |= cpumask << 16;
  237 + for (i = 32; i < nr_irqs; i += 2)
  238 + writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3));
  239 +
  240 + gic_dist_config(base, nr_irqs, NULL);
  241 +
  242 + writel_relaxed(1, base + GIC_DIST_CTRL);
  243 +}
  244 +
  245 +static void hip04_irq_cpu_init(struct hip04_irq_data *intc)
  246 +{
  247 + void __iomem *dist_base = intc->dist_base;
  248 + void __iomem *base = intc->cpu_base;
  249 + unsigned int cpu_mask, cpu = smp_processor_id();
  250 + int i;
  251 +
  252 + /*
  253 + * Get what the GIC says our CPU mask is.
  254 + */
  255 + BUG_ON(cpu >= NR_HIP04_CPU_IF);
  256 + cpu_mask = hip04_get_cpumask(intc);
  257 + hip04_cpu_map[cpu] = cpu_mask;
  258 +
  259 + /*
  260 + * Clear our mask from the other map entries in case they're
  261 + * still undefined.
  262 + */
  263 + for (i = 0; i < NR_HIP04_CPU_IF; i++)
  264 + if (i != cpu)
  265 + hip04_cpu_map[i] &= ~cpu_mask;
  266 +
  267 + gic_cpu_config(dist_base, NULL);
  268 +
  269 + writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
  270 + writel_relaxed(1, base + GIC_CPU_CTRL);
  271 +}
  272 +
  273 +#ifdef CONFIG_SMP
  274 +static void hip04_raise_softirq(const struct cpumask *mask, unsigned int irq)
  275 +{
  276 + int cpu;
  277 + unsigned long flags, map = 0;
  278 +
  279 + raw_spin_lock_irqsave(&irq_controller_lock, flags);
  280 +
  281 + /* Convert our logical CPU mask into a physical one. */
  282 + for_each_cpu(cpu, mask)
  283 + map |= hip04_cpu_map[cpu];
  284 +
  285 + /*
  286 + * Ensure that stores to Normal memory are visible to the
  287 + * other CPUs before they observe us issuing the IPI.
  288 + */
  289 + dmb(ishst);
  290 +
  291 + /* this always happens on GIC0 */
  292 + writel_relaxed(map << 8 | irq, hip04_data.dist_base + GIC_DIST_SOFTINT);
  293 +
  294 + raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
  295 +}
  296 +#endif
  297 +
  298 +static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
  299 + irq_hw_number_t hw)
  300 +{
  301 + if (hw < 32) {
  302 + irq_set_percpu_devid(irq);
  303 + irq_set_chip_and_handler(irq, &hip04_irq_chip,
  304 + handle_percpu_devid_irq);
  305 + set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
  306 + } else {
  307 + irq_set_chip_and_handler(irq, &hip04_irq_chip,
  308 + handle_fasteoi_irq);
  309 + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
  310 + }
  311 + irq_set_chip_data(irq, d->host_data);
  312 + return 0;
  313 +}
  314 +
  315 +static int hip04_irq_domain_xlate(struct irq_domain *d,
  316 + struct device_node *controller,
  317 + const u32 *intspec, unsigned int intsize,
  318 + unsigned long *out_hwirq,
  319 + unsigned int *out_type)
  320 +{
  321 + unsigned long ret = 0;
  322 +
  323 + if (d->of_node != controller)
  324 + return -EINVAL;
  325 + if (intsize < 3)
  326 + return -EINVAL;
  327 +
  328 + /* Get the interrupt number and add 16 to skip over SGIs */
  329 + *out_hwirq = intspec[1] + 16;
  330 +
  331 + /* For SPIs, we need to add 16 more to get the irq ID number */
  332 + if (!intspec[0])
  333 + *out_hwirq += 16;
  334 +
  335 + *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  336 +
  337 + return ret;
  338 +}
  339 +
  340 +#ifdef CONFIG_SMP
  341 +static int hip04_irq_secondary_init(struct notifier_block *nfb,
  342 + unsigned long action,
  343 + void *hcpu)
  344 +{
  345 + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
  346 + hip04_irq_cpu_init(&hip04_data);
  347 + return NOTIFY_OK;
  348 +}
  349 +
  350 +/*
  351 + * Notifier for enabling the INTC CPU interface. Set an arbitrarily high
  352 + * priority because the GIC needs to be up before the ARM generic timers.
  353 + */
  354 +static struct notifier_block hip04_irq_cpu_notifier = {
  355 + .notifier_call = hip04_irq_secondary_init,
  356 + .priority = 100,
  357 +};
  358 +#endif
  359 +
  360 +static const struct irq_domain_ops hip04_irq_domain_ops = {
  361 + .map = hip04_irq_domain_map,
  362 + .xlate = hip04_irq_domain_xlate,
  363 +};
  364 +
  365 +static int __init
  366 +hip04_of_init(struct device_node *node, struct device_node *parent)
  367 +{
  368 + irq_hw_number_t hwirq_base = 16;
  369 + int nr_irqs, irq_base, i;
  370 +
  371 + if (WARN_ON(!node))
  372 + return -ENODEV;
  373 +
  374 + hip04_data.dist_base = of_iomap(node, 0);
  375 + WARN(!hip04_data.dist_base, "fail to map hip04 intc dist registers\n");
  376 +
  377 + hip04_data.cpu_base = of_iomap(node, 1);
  378 + WARN(!hip04_data.cpu_base, "unable to map hip04 intc cpu registers\n");
  379 +
  380 + /*
  381 + * Initialize the CPU interface map to all CPUs.
  382 + * It will be refined as each CPU probes its ID.
  383 + */
  384 + for (i = 0; i < NR_HIP04_CPU_IF; i++)
  385 + hip04_cpu_map[i] = 0xff;
  386 +
  387 + /*
  388 + * Find out how many interrupts are supported.
  389 + * The HIP04 INTC only supports up to 510 interrupt sources.
  390 + */
  391 + nr_irqs = readl_relaxed(hip04_data.dist_base + GIC_DIST_CTR) & 0x1f;
  392 + nr_irqs = (nr_irqs + 1) * 32;
  393 + if (nr_irqs > HIP04_MAX_IRQS)
  394 + nr_irqs = HIP04_MAX_IRQS;
  395 + hip04_data.nr_irqs = nr_irqs;
  396 +
  397 + nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */
  398 +
  399 + irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id());
  400 + if (IS_ERR_VALUE(irq_base)) {
  401 + pr_err("failed to allocate IRQ numbers\n");
  402 + return -EINVAL;
  403 + }
  404 +
  405 + hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base,
  406 + hwirq_base,
  407 + &hip04_irq_domain_ops,
  408 + &hip04_data);
  409 +
  410 + if (WARN_ON(!hip04_data.domain))
  411 + return -EINVAL;
  412 +
  413 +#ifdef CONFIG_SMP
  414 + set_smp_cross_call(hip04_raise_softirq);
  415 + register_cpu_notifier(&hip04_irq_cpu_notifier);
  416 +#endif
  417 + set_handle_irq(hip04_handle_irq);
  418 +
  419 + hip04_irq_dist_init(&hip04_data);
  420 + hip04_irq_cpu_init(&hip04_data);
  421 +
  422 + return 0;
  423 +}
  424 +IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init);