PageRenderTime 34ms CodeModel.GetById 19ms app.highlight 13ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/x86/kernel/irq.c

https://bitbucket.org/thekraven/iscream_thunderc-2.6.35
C | 366 lines | 260 code | 57 blank | 49 comment | 39 complexity | cf231e3ca6955b67140e227df238f215 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1/*
  2 * Common interrupt code for 32 and 64 bit
  3 */
  4#include <linux/cpu.h>
  5#include <linux/interrupt.h>
  6#include <linux/kernel_stat.h>
  7#include <linux/seq_file.h>
  8#include <linux/smp.h>
  9#include <linux/ftrace.h>
 10
 11#include <asm/apic.h>
 12#include <asm/io_apic.h>
 13#include <asm/irq.h>
 14#include <asm/idle.h>
 15#include <asm/mce.h>
 16#include <asm/hw_irq.h>
 17
 18atomic_t irq_err_count;
 19
 20/* Function pointer for generic interrupt vector handling */
 21void (*x86_platform_ipi_callback)(void) = NULL;
 22
 23/*
 24 * 'what should we do if we get a hw irq event on an illegal vector'.
 25 * each architecture has to answer this themselves.
 26 */
 27void ack_bad_irq(unsigned int irq)
 28{
 29	if (printk_ratelimit())
 30		pr_err("unexpected IRQ trap at vector %02x\n", irq);
 31
 32	/*
 33	 * Currently unexpected vectors happen only on SMP and APIC.
 34	 * We _must_ ack these because every local APIC has only N
 35	 * irq slots per priority level, and a 'hanging, unacked' IRQ
 36	 * holds up an irq slot - in excessive cases (when multiple
 37	 * unexpected vectors occur) that might lock up the APIC
 38	 * completely.
 39	 * But only ack when the APIC is enabled -AK
 40	 */
 41	ack_APIC_irq();
 42}
 43
 44#define irq_stats(x)		(&per_cpu(irq_stat, x))
 45/*
 46 * /proc/interrupts printing:
 47 */
 48static int show_other_interrupts(struct seq_file *p, int prec)
 49{
 50	int j;
 51
 52	seq_printf(p, "%*s: ", prec, "NMI");
 53	for_each_online_cpu(j)
 54		seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
 55	seq_printf(p, "  Non-maskable interrupts\n");
 56#ifdef CONFIG_X86_LOCAL_APIC
 57	seq_printf(p, "%*s: ", prec, "LOC");
 58	for_each_online_cpu(j)
 59		seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
 60	seq_printf(p, "  Local timer interrupts\n");
 61
 62	seq_printf(p, "%*s: ", prec, "SPU");
 63	for_each_online_cpu(j)
 64		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
 65	seq_printf(p, "  Spurious interrupts\n");
 66	seq_printf(p, "%*s: ", prec, "PMI");
 67	for_each_online_cpu(j)
 68		seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
 69	seq_printf(p, "  Performance monitoring interrupts\n");
 70	seq_printf(p, "%*s: ", prec, "PND");
 71	for_each_online_cpu(j)
 72		seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
 73	seq_printf(p, "  Performance pending work\n");
 74#endif
 75	if (x86_platform_ipi_callback) {
 76		seq_printf(p, "%*s: ", prec, "PLT");
 77		for_each_online_cpu(j)
 78			seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
 79		seq_printf(p, "  Platform interrupts\n");
 80	}
 81#ifdef CONFIG_SMP
 82	seq_printf(p, "%*s: ", prec, "RES");
 83	for_each_online_cpu(j)
 84		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
 85	seq_printf(p, "  Rescheduling interrupts\n");
 86	seq_printf(p, "%*s: ", prec, "CAL");
 87	for_each_online_cpu(j)
 88		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
 89	seq_printf(p, "  Function call interrupts\n");
 90	seq_printf(p, "%*s: ", prec, "TLB");
 91	for_each_online_cpu(j)
 92		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
 93	seq_printf(p, "  TLB shootdowns\n");
 94#endif
 95#ifdef CONFIG_X86_THERMAL_VECTOR
 96	seq_printf(p, "%*s: ", prec, "TRM");
 97	for_each_online_cpu(j)
 98		seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
 99	seq_printf(p, "  Thermal event interrupts\n");
100#endif
101#ifdef CONFIG_X86_MCE_THRESHOLD
102	seq_printf(p, "%*s: ", prec, "THR");
103	for_each_online_cpu(j)
104		seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
105	seq_printf(p, "  Threshold APIC interrupts\n");
106#endif
107#ifdef CONFIG_X86_MCE
108	seq_printf(p, "%*s: ", prec, "MCE");
109	for_each_online_cpu(j)
110		seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
111	seq_printf(p, "  Machine check exceptions\n");
112	seq_printf(p, "%*s: ", prec, "MCP");
113	for_each_online_cpu(j)
114		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
115	seq_printf(p, "  Machine check polls\n");
116#endif
117	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
118#if defined(CONFIG_X86_IO_APIC)
119	seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
120#endif
121	return 0;
122}
123
124int show_interrupts(struct seq_file *p, void *v)
125{
126	unsigned long flags, any_count = 0;
127	int i = *(loff_t *) v, j, prec;
128	struct irqaction *action;
129	struct irq_desc *desc;
130
131	if (i > nr_irqs)
132		return 0;
133
134	for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
135		j *= 10;
136
137	if (i == nr_irqs)
138		return show_other_interrupts(p, prec);
139
140	/* print header */
141	if (i == 0) {
142		seq_printf(p, "%*s", prec + 8, "");
143		for_each_online_cpu(j)
144			seq_printf(p, "CPU%-8d", j);
145		seq_putc(p, '\n');
146	}
147
148	desc = irq_to_desc(i);
149	if (!desc)
150		return 0;
151
152	raw_spin_lock_irqsave(&desc->lock, flags);
153	for_each_online_cpu(j)
154		any_count |= kstat_irqs_cpu(i, j);
155	action = desc->action;
156	if (!action && !any_count)
157		goto out;
158
159	seq_printf(p, "%*d: ", prec, i);
160	for_each_online_cpu(j)
161		seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
162	seq_printf(p, " %8s", desc->chip->name);
163	seq_printf(p, "-%-8s", desc->name);
164
165	if (action) {
166		seq_printf(p, "  %s", action->name);
167		while ((action = action->next) != NULL)
168			seq_printf(p, ", %s", action->name);
169	}
170
171	seq_putc(p, '\n');
172out:
173	raw_spin_unlock_irqrestore(&desc->lock, flags);
174	return 0;
175}
176
177/*
178 * /proc/stat helpers
179 */
180u64 arch_irq_stat_cpu(unsigned int cpu)
181{
182	u64 sum = irq_stats(cpu)->__nmi_count;
183
184#ifdef CONFIG_X86_LOCAL_APIC
185	sum += irq_stats(cpu)->apic_timer_irqs;
186	sum += irq_stats(cpu)->irq_spurious_count;
187	sum += irq_stats(cpu)->apic_perf_irqs;
188	sum += irq_stats(cpu)->apic_pending_irqs;
189#endif
190	if (x86_platform_ipi_callback)
191		sum += irq_stats(cpu)->x86_platform_ipis;
192#ifdef CONFIG_SMP
193	sum += irq_stats(cpu)->irq_resched_count;
194	sum += irq_stats(cpu)->irq_call_count;
195	sum += irq_stats(cpu)->irq_tlb_count;
196#endif
197#ifdef CONFIG_X86_THERMAL_VECTOR
198	sum += irq_stats(cpu)->irq_thermal_count;
199#endif
200#ifdef CONFIG_X86_MCE_THRESHOLD
201	sum += irq_stats(cpu)->irq_threshold_count;
202#endif
203#ifdef CONFIG_X86_MCE
204	sum += per_cpu(mce_exception_count, cpu);
205	sum += per_cpu(mce_poll_count, cpu);
206#endif
207	return sum;
208}
209
210u64 arch_irq_stat(void)
211{
212	u64 sum = atomic_read(&irq_err_count);
213
214#ifdef CONFIG_X86_IO_APIC
215	sum += atomic_read(&irq_mis_count);
216#endif
217	return sum;
218}
219
220
221/*
222 * do_IRQ handles all normal device IRQ's (the special
223 * SMP cross-CPU interrupts have their own specific
224 * handlers).
225 */
226unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
227{
228	struct pt_regs *old_regs = set_irq_regs(regs);
229
230	/* high bit used in ret_from_ code  */
231	unsigned vector = ~regs->orig_ax;
232	unsigned irq;
233
234	exit_idle();
235	irq_enter();
236
237	irq = __get_cpu_var(vector_irq)[vector];
238
239	if (!handle_irq(irq, regs)) {
240		ack_APIC_irq();
241
242		if (printk_ratelimit())
243			pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
244				__func__, smp_processor_id(), vector, irq);
245	}
246
247	irq_exit();
248
249	set_irq_regs(old_regs);
250	return 1;
251}
252
253/*
254 * Handler for X86_PLATFORM_IPI_VECTOR.
255 */
256void smp_x86_platform_ipi(struct pt_regs *regs)
257{
258	struct pt_regs *old_regs = set_irq_regs(regs);
259
260	ack_APIC_irq();
261
262	exit_idle();
263
264	irq_enter();
265
266	inc_irq_stat(x86_platform_ipis);
267
268	if (x86_platform_ipi_callback)
269		x86_platform_ipi_callback();
270
271	irq_exit();
272
273	set_irq_regs(old_regs);
274}
275
276EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
277
278#ifdef CONFIG_HOTPLUG_CPU
279/* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
280void fixup_irqs(void)
281{
282	unsigned int irq, vector;
283	static int warned;
284	struct irq_desc *desc;
285
286	for_each_irq_desc(irq, desc) {
287		int break_affinity = 0;
288		int set_affinity = 1;
289		const struct cpumask *affinity;
290
291		if (!desc)
292			continue;
293		if (irq == 2)
294			continue;
295
296		/* interrupt's are disabled at this point */
297		raw_spin_lock(&desc->lock);
298
299		affinity = desc->affinity;
300		if (!irq_has_action(irq) ||
301		    cpumask_equal(affinity, cpu_online_mask)) {
302			raw_spin_unlock(&desc->lock);
303			continue;
304		}
305
306		/*
307		 * Complete the irq move. This cpu is going down and for
308		 * non intr-remapping case, we can't wait till this interrupt
309		 * arrives at this cpu before completing the irq move.
310		 */
311		irq_force_complete_move(irq);
312
313		if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
314			break_affinity = 1;
315			affinity = cpu_all_mask;
316		}
317
318		if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask)
319			desc->chip->mask(irq);
320
321		if (desc->chip->set_affinity)
322			desc->chip->set_affinity(irq, affinity);
323		else if (!(warned++))
324			set_affinity = 0;
325
326		if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
327			desc->chip->unmask(irq);
328
329		raw_spin_unlock(&desc->lock);
330
331		if (break_affinity && set_affinity)
332			printk("Broke affinity for irq %i\n", irq);
333		else if (!set_affinity)
334			printk("Cannot set affinity for irq %i\n", irq);
335	}
336
337	/*
338	 * We can remove mdelay() and then send spuriuous interrupts to
339	 * new cpu targets for all the irqs that were handled previously by
340	 * this cpu. While it works, I have seen spurious interrupt messages
341	 * (nothing wrong but still...).
342	 *
343	 * So for now, retain mdelay(1) and check the IRR and then send those
344	 * interrupts to new targets as this cpu is already offlined...
345	 */
346	mdelay(1);
347
348	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
349		unsigned int irr;
350
351		if (__get_cpu_var(vector_irq)[vector] < 0)
352			continue;
353
354		irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
355		if (irr  & (1 << (vector % 32))) {
356			irq = __get_cpu_var(vector_irq)[vector];
357
358			desc = irq_to_desc(irq);
359			raw_spin_lock(&desc->lock);
360			if (desc->chip->retrigger)
361				desc->chip->retrigger(irq);
362			raw_spin_unlock(&desc->lock);
363		}
364	}
365}
366#endif