Commit 6e0b7b2c39b91b467270dd0bc383914f99e1fb28

Authored by Linus Torvalds

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  genirq: Clear CPU mask in affinity_hint when none is provided
  genirq: Add CPU mask affinity hint
  genirq: Remove IRQF_DISABLED from core code
  genirq: Run irq handlers with interrupts disabled
  genirq: Introduce request_any_context_irq()
  genirq: Expose irq_desc->node in proc/irq

Fixed up trivial conflicts in Documentation/feature-removal-schedule.txt

Showing 7 changed files Side-by-side Diff

Documentation/feature-removal-schedule.txt
... ... @@ -589,4 +589,13 @@
589 589 provided by the vtx API, then that functionality should be build
590 590 around the sliced VBI API instead.
591 591 Who: Hans Verkuil <hverkuil@xs4all.nl>
  592 +
  593 +----------------------------
  594 +
  595 +What: IRQF_DISABLED
  596 +When: 2.6.36
  597 +Why: The flag is a NOOP as we run interrupt handlers with interrupts disabled
  598 +Who: Thomas Gleixner <tglx@linutronix.de>
  599 +
  600 +----------------------------
Documentation/filesystems/proc.txt
... ... @@ -565,6 +565,10 @@
565 565 IRQs which have not yet been allocated/activated, and hence which lack a
566 566 /proc/irq/[0-9]* directory.
567 567  
  568 +The node file on an SMP system shows the node to which the device using the IRQ
  569 +reports itself as being attached. This hardware locality information does not
  570 +include information about any possible driver locality preference.
  571 +
568 572 prof_cpu_mask specifies which CPUs are to be profiled by the system wide
569 573 profiler. Default value is ffffffff (all cpus).
570 574  
include/linux/interrupt.h
... ... @@ -39,7 +39,8 @@
39 39 * These flags used only by the kernel as part of the
40 40 * irq handling routines.
41 41 *
42   - * IRQF_DISABLED - keep irqs disabled when calling the action handler
  42 + * IRQF_DISABLED - keep irqs disabled when calling the action handler.
  43 + * DEPRECATED. This flag is a NOOP and scheduled to be removed
43 44 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
44 45 * IRQF_SHARED - allow sharing the irq among several devices
45 46 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
... ... @@ -77,6 +78,18 @@
77 78 IRQTF_AFFINITY,
78 79 };
79 80  
  81 +/**
  82 + * These values can be returned by request_any_context_irq() and
  83 + * describe the context the interrupt will be run in.
  84 + *
  85 + * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
  86 + * IRQC_IS_NESTED - interrupt runs in a nested threaded context
  87 + */
  88 +enum {
  89 + IRQC_IS_HARDIRQ = 0,
  90 + IRQC_IS_NESTED,
  91 +};
  92 +
80 93 typedef irqreturn_t (*irq_handler_t)(int, void *);
81 94  
82 95 /**
... ... @@ -120,6 +133,10 @@
120 133 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
121 134 }
122 135  
  136 +extern int __must_check
  137 +request_any_context_irq(unsigned int irq, irq_handler_t handler,
  138 + unsigned long flags, const char *name, void *dev_id);
  139 +
123 140 extern void exit_irq_thread(void);
124 141 #else
125 142  
... ... @@ -141,6 +158,13 @@
141 158 return request_irq(irq, handler, flags, name, dev);
142 159 }
143 160  
  161 +static inline int __must_check
  162 +request_any_context_irq(unsigned int irq, irq_handler_t handler,
  163 + unsigned long flags, const char *name, void *dev_id)
  164 +{
  165 + return request_irq(irq, handler, flags, name, dev_id);
  166 +}
  167 +
144 168 static inline void exit_irq_thread(void) { }
145 169 #endif
146 170  
... ... @@ -209,6 +233,7 @@
209 233 extern int irq_can_set_affinity(unsigned int irq);
210 234 extern int irq_select_affinity(unsigned int irq);
211 235  
  236 +extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
212 237 #else /* CONFIG_SMP */
213 238  
214 239 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
... ... @@ -223,6 +248,11 @@
223 248  
224 249 static inline int irq_select_affinity(unsigned int irq) { return 0; }
225 250  
  251 +static inline int irq_set_affinity_hint(unsigned int irq,
  252 + const struct cpumask *m)
  253 +{
  254 + return -EINVAL;
  255 +}
226 256 #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
227 257  
228 258 #ifdef CONFIG_GENERIC_HARDIRQS
... ... @@ -195,6 +195,7 @@
195 195 raw_spinlock_t lock;
196 196 #ifdef CONFIG_SMP
197 197 cpumask_var_t affinity;
  198 + const struct cpumask *affinity_hint;
198 199 unsigned int node;
199 200 #ifdef CONFIG_GENERIC_PENDING_IRQ
200 201 cpumask_var_t pending_mask;
... ... @@ -370,9 +370,6 @@
370 370 irqreturn_t ret, retval = IRQ_NONE;
371 371 unsigned int status = 0;
372 372  
373   - if (!(action->flags & IRQF_DISABLED))
374   - local_irq_enable_in_hardirq();
375   -
376 373 do {
377 374 trace_irq_handler_entry(irq, action);
378 375 ret = action->handler(irq, action->dev_id);
... ... @@ -138,6 +138,22 @@
138 138 return 0;
139 139 }
140 140  
  141 +int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
  142 +{
  143 + struct irq_desc *desc = irq_to_desc(irq);
  144 + unsigned long flags;
  145 +
  146 + if (!desc)
  147 + return -EINVAL;
  148 +
  149 + raw_spin_lock_irqsave(&desc->lock, flags);
  150 + desc->affinity_hint = m;
  151 + raw_spin_unlock_irqrestore(&desc->lock, flags);
  152 +
  153 + return 0;
  154 +}
  155 +EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
  156 +
141 157 #ifndef CONFIG_AUTO_IRQ_AFFINITY
142 158 /*
143 159 * Generic version of the affinity autoselector.
... ... @@ -757,16 +773,6 @@
757 773 if (new->flags & IRQF_ONESHOT)
758 774 desc->status |= IRQ_ONESHOT;
759 775  
760   - /*
761   - * Force MSI interrupts to run with interrupts
762   - * disabled. The multi vector cards can cause stack
763   - * overflows due to nested interrupts when enough of
764   - * them are directed to a core and fire at the same
765   - * time.
766   - */
767   - if (desc->msi_desc)
768   - new->flags |= IRQF_DISABLED;
769   -
770 776 if (!(desc->status & IRQ_NOAUTOEN)) {
771 777 desc->depth = 0;
772 778 desc->status &= ~IRQ_DISABLED;
... ... @@ -916,6 +922,12 @@
916 922 desc->chip->disable(irq);
917 923 }
918 924  
  925 +#ifdef CONFIG_SMP
  926 + /* make sure affinity_hint is cleaned up */
  927 + if (WARN_ON_ONCE(desc->affinity_hint))
  928 + desc->affinity_hint = NULL;
  929 +#endif
  930 +
919 931 raw_spin_unlock_irqrestore(&desc->lock, flags);
920 932  
921 933 unregister_handler_proc(irq, action);
... ... @@ -1027,7 +1039,6 @@
1027 1039 * Flags:
1028 1040 *
1029 1041 * IRQF_SHARED Interrupt is shared
1030   - * IRQF_DISABLED Disable local interrupts while processing
1031 1042 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
1032 1043 * IRQF_TRIGGER_* Specify active edge(s) or level
1033 1044 *
... ... @@ -1041,25 +1052,6 @@
1041 1052 int retval;
1042 1053  
1043 1054 /*
1044   - * handle_IRQ_event() always ignores IRQF_DISABLED except for
1045   - * the _first_ irqaction (sigh). That can cause oopsing, but
1046   - * the behavior is classified as "will not fix" so we need to
1047   - * start nudging drivers away from using that idiom.
1048   - */
1049   - if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
1050   - (IRQF_SHARED|IRQF_DISABLED)) {
1051   - pr_warning(
1052   - "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
1053   - irq, devname);
1054   - }
1055   -
1056   -#ifdef CONFIG_LOCKDEP
1057   - /*
1058   - * Lockdep wants atomic interrupt handlers:
1059   - */
1060   - irqflags |= IRQF_DISABLED;
1061   -#endif
1062   - /*
1063 1055 * Sanity-check: shared interrupts must pass in a real dev-ID,
1064 1056 * otherwise we'll have trouble later trying to figure out
1065 1057 * which interrupt is which (messes up the interrupt freeing
... ... @@ -1120,4 +1112,41 @@
1120 1112 return retval;
1121 1113 }
1122 1114 EXPORT_SYMBOL(request_threaded_irq);
  1115 +
  1116 +/**
  1117 + * request_any_context_irq - allocate an interrupt line
  1118 + * @irq: Interrupt line to allocate
  1119 + * @handler: Function to be called when the IRQ occurs.
  1120 + * Threaded handler for threaded interrupts.
  1121 + * @flags: Interrupt type flags
  1122 + * @name: An ascii name for the claiming device
  1123 + * @dev_id: A cookie passed back to the handler function
  1124 + *
  1125 + * This call allocates interrupt resources and enables the
  1126 + * interrupt line and IRQ handling. It selects either a
  1127 + * hardirq or threaded handling method depending on the
  1128 + * context.
  1129 + *
  1130 + * On failure, it returns a negative value. On success,
  1131 + * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
  1132 + */
  1133 +int request_any_context_irq(unsigned int irq, irq_handler_t handler,
  1134 + unsigned long flags, const char *name, void *dev_id)
  1135 +{
  1136 + struct irq_desc *desc = irq_to_desc(irq);
  1137 + int ret;
  1138 +
  1139 + if (!desc)
  1140 + return -EINVAL;
  1141 +
  1142 + if (desc->status & IRQ_NESTED_THREAD) {
  1143 + ret = request_threaded_irq(irq, NULL, handler,
  1144 + flags, name, dev_id);
  1145 + return !ret ? IRQC_IS_NESTED : ret;
  1146 + }
  1147 +
  1148 + ret = request_irq(irq, handler, flags, name, dev_id);
  1149 + return !ret ? IRQC_IS_HARDIRQ : ret;
  1150 +}
  1151 +EXPORT_SYMBOL_GPL(request_any_context_irq);
... ... @@ -32,6 +32,27 @@
32 32 return 0;
33 33 }
34 34  
  35 +static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
  36 +{
  37 + struct irq_desc *desc = irq_to_desc((long)m->private);
  38 + unsigned long flags;
  39 + cpumask_var_t mask;
  40 +
  41 + if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
  42 + return -ENOMEM;
  43 +
  44 + raw_spin_lock_irqsave(&desc->lock, flags);
  45 + if (desc->affinity_hint)
  46 + cpumask_copy(mask, desc->affinity_hint);
  47 + raw_spin_unlock_irqrestore(&desc->lock, flags);
  48 +
  49 + seq_cpumask(m, mask);
  50 + seq_putc(m, '\n');
  51 + free_cpumask_var(mask);
  52 +
  53 + return 0;
  54 +}
  55 +
35 56 #ifndef is_affinity_mask_valid
36 57 #define is_affinity_mask_valid(val) 1
37 58 #endif
... ... @@ -84,6 +105,11 @@
84 105 return single_open(file, irq_affinity_proc_show, PDE(inode)->data);
85 106 }
86 107  
  108 +static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
  109 +{
  110 + return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data);
  111 +}
  112 +
87 113 static const struct file_operations irq_affinity_proc_fops = {
88 114 .open = irq_affinity_proc_open,
89 115 .read = seq_read,
... ... @@ -92,6 +118,13 @@
92 118 .write = irq_affinity_proc_write,
93 119 };
94 120  
  121 +static const struct file_operations irq_affinity_hint_proc_fops = {
  122 + .open = irq_affinity_hint_proc_open,
  123 + .read = seq_read,
  124 + .llseek = seq_lseek,
  125 + .release = single_release,
  126 +};
  127 +
95 128 static int default_affinity_show(struct seq_file *m, void *v)
96 129 {
97 130 seq_cpumask(m, irq_default_affinity);
... ... @@ -147,6 +180,26 @@
147 180 .release = single_release,
148 181 .write = default_affinity_write,
149 182 };
  183 +
  184 +static int irq_node_proc_show(struct seq_file *m, void *v)
  185 +{
  186 + struct irq_desc *desc = irq_to_desc((long) m->private);
  187 +
  188 + seq_printf(m, "%d\n", desc->node);
  189 + return 0;
  190 +}
  191 +
  192 +static int irq_node_proc_open(struct inode *inode, struct file *file)
  193 +{
  194 + return single_open(file, irq_node_proc_show, PDE(inode)->data);
  195 +}
  196 +
  197 +static const struct file_operations irq_node_proc_fops = {
  198 + .open = irq_node_proc_open,
  199 + .read = seq_read,
  200 + .llseek = seq_lseek,
  201 + .release = single_release,
  202 +};
150 203 #endif
151 204  
152 205 static int irq_spurious_proc_show(struct seq_file *m, void *v)
... ... @@ -231,6 +284,13 @@
231 284 /* create /proc/irq/<irq>/smp_affinity */
232 285 proc_create_data("smp_affinity", 0600, desc->dir,
233 286 &irq_affinity_proc_fops, (void *)(long)irq);
  287 +
  288 + /* create /proc/irq/<irq>/affinity_hint */
  289 + proc_create_data("affinity_hint", 0400, desc->dir,
  290 + &irq_affinity_hint_proc_fops, (void *)(long)irq);
  291 +
  292 + proc_create_data("node", 0444, desc->dir,
  293 + &irq_node_proc_fops, (void *)(long)irq);
234 294 #endif
235 295  
236 296 proc_create_data("spurious", 0444, desc->dir,