/arch/arm/kernel/irq.c
C | 206 lines | 136 code | 29 blank | 41 comment | 22 complexity | 8cfc953ca28e461f61804633ca826557 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
1/* 2 * linux/arch/arm/kernel/irq.c 3 * 4 * Copyright (C) 1992 Linus Torvalds 5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. 6 * 7 * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. 8 * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and 9 * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>. 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 * 15 * This file contains the code used by various IRQ handling routines: 16 * asking for different IRQ's should be done through these routines 17 * instead of just grabbing them. Thus setups with different IRQ numbers 18 * shouldn't result in any weird surprises, and installing new handlers 19 * should be easier. 20 * 21 * IRQ's are in fact implemented a bit like signal handlers for the kernel. 22 * Naturally it's not a 1:1 relation, but there are similarities. 23 */ 24#include <linux/kernel_stat.h> 25#include <linux/module.h> 26#include <linux/signal.h> 27#include <linux/ioport.h> 28#include <linux/interrupt.h> 29#include <linux/irq.h> 30#include <linux/random.h> 31#include <linux/smp.h> 32#include <linux/init.h> 33#include <linux/seq_file.h> 34#include <linux/errno.h> 35#include <linux/list.h> 36#include <linux/kallsyms.h> 37#include <linux/proc_fs.h> 38 39#include <asm/system.h> 40#include <asm/mach/irq.h> 41#include <asm/mach/time.h> 42 43#include <asm/perftypes.h> 44 45/* 46 * No architecture-specific irq_finish function defined in arm/arch/irqs.h. 47 */ 48#ifndef irq_finish 49#define irq_finish(irq) do { } while (0) 50#endif 51 52void (*init_arch_irq)(void) __initdata = NULL; 53unsigned long irq_err_count; 54 55int show_interrupts(struct seq_file *p, void *v) 56{ 57 int i = *(loff_t *) v, cpu; 58 struct irqaction * action; 59 unsigned long flags; 60 61 if (i == 0) { 62 char cpuname[12]; 63 64 seq_printf(p, " "); 65 for_each_present_cpu(cpu) { 66 sprintf(cpuname, "CPU%d", cpu); 67 seq_printf(p, " %10s", cpuname); 68 } 69 seq_putc(p, '\n'); 70 } 71 72 if (i < NR_IRQS) { 73 raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 74 action = irq_desc[i].action; 75 if (!action) 76 goto unlock; 77 78 seq_printf(p, "%3d: ", i); 79 for_each_present_cpu(cpu) 80 seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); 81 seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-"); 82 seq_printf(p, " %s", action->name); 83 for (action = action->next; action; action = action->next) 84 seq_printf(p, ", %s", action->name); 85 86 seq_putc(p, '\n'); 87unlock: 88 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 89 } else if (i == NR_IRQS) { 90#ifdef CONFIG_FIQ 91 show_fiq_list(p, v); 92#endif 93#ifdef CONFIG_SMP 94 show_ipi_list(p); 95 show_local_irqs(p); 96#endif 97 seq_printf(p, "Err: %10lu\n", irq_err_count); 98 } 99 return 0; 100} 101 102/* 103 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not 104 * come via this function. Instead, they should provide their 105 * own 'handler' 106 */ 107asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs) 108{ 109 struct pt_regs *old_regs = set_irq_regs(regs); 110 111 perf_mon_interrupt_in(); 112 irq_enter(); 113 114 /* 115 * Some hardware gives randomly wrong interrupts. Rather 116 * than crashing, do something sensible. 117 */ 118 if (unlikely(irq >= NR_IRQS)) { 119 if (printk_ratelimit()) 120 printk(KERN_WARNING "Bad IRQ%u\n", irq); 121 ack_bad_irq(irq); 122 } else { 123 generic_handle_irq(irq); 124 } 125 126 /* AT91 specific workaround */ 127 irq_finish(irq); 128 129 irq_exit(); 130 set_irq_regs(old_regs); 131 perf_mon_interrupt_out(); 132} 133 134void set_irq_flags(unsigned int irq, unsigned int iflags) 135{ 136 struct irq_desc *desc; 137 unsigned long flags; 138 139 if (irq >= NR_IRQS) { 140 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); 141 return; 142 } 143 144 desc = irq_desc + irq; 145 raw_spin_lock_irqsave(&desc->lock, flags); 146 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 147 if (iflags & IRQF_VALID) 148 desc->status &= ~IRQ_NOREQUEST; 149 if (iflags & IRQF_PROBE) 150 desc->status &= ~IRQ_NOPROBE; 151 if (!(iflags & IRQF_NOAUTOEN)) 152 desc->status &= ~IRQ_NOAUTOEN; 153 raw_spin_unlock_irqrestore(&desc->lock, flags); 154} 155 156void __init init_IRQ(void) 157{ 158 int irq; 159 160 for (irq = 0; irq < NR_IRQS; irq++) 161 irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; 162 163 init_arch_irq(); 164} 165 166#ifdef CONFIG_HOTPLUG_CPU 167 168static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) 169{ 170 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); 171 172 raw_spin_lock_irq(&desc->lock); 173 desc->chip->set_affinity(irq, cpumask_of(cpu)); 174 raw_spin_unlock_irq(&desc->lock); 175} 176 177/* 178 * The CPU has been marked offline. Migrate IRQs off this CPU. If 179 * the affinity settings do not allow other CPUs, force them onto any 180 * available CPU. 181 */ 182void migrate_irqs(void) 183{ 184 unsigned int i, cpu = smp_processor_id(); 185 186 for (i = 0; i < NR_IRQS; i++) { 187 struct irq_desc *desc = irq_desc + i; 188 189 if (desc->node == cpu) { 190 unsigned int newcpu = cpumask_any_and(desc->affinity, 191 cpu_online_mask); 192 if (newcpu >= nr_cpu_ids) { 193 if (printk_ratelimit()) 194 printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", 195 i, cpu); 196 197 cpumask_setall(desc->affinity); 198 newcpu = cpumask_any_and(desc->affinity, 199 cpu_online_mask); 200 } 201 202 route_irq(desc, i, newcpu); 203 } 204 } 205} 206#endif /* CONFIG_HOTPLUG_CPU */