/arch/arm/kernel/irq.c

https://bitbucket.org/evzijst/gittest · C · 1038 lines · 650 code · 162 blank · 226 comment · 130 complexity · 3bcead28cbf2a0afd9e3ac20675802ee MD5 · raw file

  1. /*
  2. * linux/arch/arm/kernel/irq.c
  3. *
  4. * Copyright (C) 1992 Linus Torvalds
  5. * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This file contains the code used by various IRQ handling routines:
  12. * asking for different IRQ's should be done through these routines
  13. * instead of just grabbing them. Thus setups with different IRQ numbers
  14. * shouldn't result in any weird surprises, and installing new handlers
  15. * should be easier.
  16. *
  17. * IRQ's are in fact implemented a bit like signal handlers for the kernel.
  18. * Naturally it's not a 1:1 relation, but there are similarities.
  19. */
  20. #include <linux/config.h>
  21. #include <linux/kernel_stat.h>
  22. #include <linux/module.h>
  23. #include <linux/signal.h>
  24. #include <linux/ioport.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/slab.h>
  28. #include <linux/random.h>
  29. #include <linux/smp.h>
  30. #include <linux/init.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/errno.h>
  33. #include <linux/list.h>
  34. #include <linux/kallsyms.h>
  35. #include <linux/proc_fs.h>
  36. #include <asm/irq.h>
  37. #include <asm/system.h>
  38. #include <asm/mach/irq.h>
  39. /*
  40. * Maximum IRQ count. Currently, this is arbitary. However, it should
  41. * not be set too low to prevent false triggering. Conversely, if it
  42. * is set too high, then you could miss a stuck IRQ.
  43. *
  44. * Maybe we ought to set a timer and re-enable the IRQ at a later time?
  45. */
  46. #define MAX_IRQ_CNT 100000
  47. static int noirqdebug;
  48. static volatile unsigned long irq_err_count;
  49. static DEFINE_SPINLOCK(irq_controller_lock);
  50. static LIST_HEAD(irq_pending);
  51. struct irqdesc irq_desc[NR_IRQS];
  52. void (*init_arch_irq)(void) __initdata = NULL;
  53. /*
  54. * No architecture-specific irq_finish function defined in arm/arch/irqs.h.
  55. */
  56. #ifndef irq_finish
  57. #define irq_finish(irq) do { } while (0)
  58. #endif
  59. /*
  60. * Dummy mask/unmask handler
  61. */
  62. void dummy_mask_unmask_irq(unsigned int irq)
  63. {
  64. }
  65. irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
  66. {
  67. return IRQ_NONE;
  68. }
  69. void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  70. {
  71. irq_err_count += 1;
  72. printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
  73. }
  74. static struct irqchip bad_chip = {
  75. .ack = dummy_mask_unmask_irq,
  76. .mask = dummy_mask_unmask_irq,
  77. .unmask = dummy_mask_unmask_irq,
  78. };
  79. static struct irqdesc bad_irq_desc = {
  80. .chip = &bad_chip,
  81. .handle = do_bad_IRQ,
  82. .pend = LIST_HEAD_INIT(bad_irq_desc.pend),
  83. .disable_depth = 1,
  84. };
  85. #ifdef CONFIG_SMP
  86. void synchronize_irq(unsigned int irq)
  87. {
  88. struct irqdesc *desc = irq_desc + irq;
  89. while (desc->running)
  90. barrier();
  91. }
  92. EXPORT_SYMBOL(synchronize_irq);
  93. #define smp_set_running(desc) do { desc->running = 1; } while (0)
  94. #define smp_clear_running(desc) do { desc->running = 0; } while (0)
  95. #else
  96. #define smp_set_running(desc) do { } while (0)
  97. #define smp_clear_running(desc) do { } while (0)
  98. #endif
  99. /**
  100. * disable_irq_nosync - disable an irq without waiting
  101. * @irq: Interrupt to disable
  102. *
  103. * Disable the selected interrupt line. Enables and disables
  104. * are nested. We do this lazily.
  105. *
  106. * This function may be called from IRQ context.
  107. */
  108. void disable_irq_nosync(unsigned int irq)
  109. {
  110. struct irqdesc *desc = irq_desc + irq;
  111. unsigned long flags;
  112. spin_lock_irqsave(&irq_controller_lock, flags);
  113. desc->disable_depth++;
  114. list_del_init(&desc->pend);
  115. spin_unlock_irqrestore(&irq_controller_lock, flags);
  116. }
  117. EXPORT_SYMBOL(disable_irq_nosync);
  118. /**
  119. * disable_irq - disable an irq and wait for completion
  120. * @irq: Interrupt to disable
  121. *
  122. * Disable the selected interrupt line. Enables and disables
  123. * are nested. This functions waits for any pending IRQ
  124. * handlers for this interrupt to complete before returning.
  125. * If you use this function while holding a resource the IRQ
  126. * handler may need you will deadlock.
  127. *
  128. * This function may be called - with care - from IRQ context.
  129. */
  130. void disable_irq(unsigned int irq)
  131. {
  132. struct irqdesc *desc = irq_desc + irq;
  133. disable_irq_nosync(irq);
  134. if (desc->action)
  135. synchronize_irq(irq);
  136. }
  137. EXPORT_SYMBOL(disable_irq);
  138. /**
  139. * enable_irq - enable interrupt handling on an irq
  140. * @irq: Interrupt to enable
  141. *
  142. * Re-enables the processing of interrupts on this IRQ line.
  143. * Note that this may call the interrupt handler, so you may
  144. * get unexpected results if you hold IRQs disabled.
  145. *
  146. * This function may be called from IRQ context.
  147. */
  148. void enable_irq(unsigned int irq)
  149. {
  150. struct irqdesc *desc = irq_desc + irq;
  151. unsigned long flags;
  152. spin_lock_irqsave(&irq_controller_lock, flags);
  153. if (unlikely(!desc->disable_depth)) {
  154. printk("enable_irq(%u) unbalanced from %p\n", irq,
  155. __builtin_return_address(0));
  156. } else if (!--desc->disable_depth) {
  157. desc->probing = 0;
  158. desc->chip->unmask(irq);
  159. /*
  160. * If the interrupt is waiting to be processed,
  161. * try to re-run it. We can't directly run it
  162. * from here since the caller might be in an
  163. * interrupt-protected region.
  164. */
  165. if (desc->pending && list_empty(&desc->pend)) {
  166. desc->pending = 0;
  167. if (!desc->chip->retrigger ||
  168. desc->chip->retrigger(irq))
  169. list_add(&desc->pend, &irq_pending);
  170. }
  171. }
  172. spin_unlock_irqrestore(&irq_controller_lock, flags);
  173. }
  174. EXPORT_SYMBOL(enable_irq);
  175. /*
  176. * Enable wake on selected irq
  177. */
  178. void enable_irq_wake(unsigned int irq)
  179. {
  180. struct irqdesc *desc = irq_desc + irq;
  181. unsigned long flags;
  182. spin_lock_irqsave(&irq_controller_lock, flags);
  183. if (desc->chip->wake)
  184. desc->chip->wake(irq, 1);
  185. spin_unlock_irqrestore(&irq_controller_lock, flags);
  186. }
  187. EXPORT_SYMBOL(enable_irq_wake);
  188. void disable_irq_wake(unsigned int irq)
  189. {
  190. struct irqdesc *desc = irq_desc + irq;
  191. unsigned long flags;
  192. spin_lock_irqsave(&irq_controller_lock, flags);
  193. if (desc->chip->wake)
  194. desc->chip->wake(irq, 0);
  195. spin_unlock_irqrestore(&irq_controller_lock, flags);
  196. }
  197. EXPORT_SYMBOL(disable_irq_wake);
  198. int show_interrupts(struct seq_file *p, void *v)
  199. {
  200. int i = *(loff_t *) v, cpu;
  201. struct irqaction * action;
  202. unsigned long flags;
  203. if (i == 0) {
  204. char cpuname[12];
  205. seq_printf(p, " ");
  206. for_each_present_cpu(cpu) {
  207. sprintf(cpuname, "CPU%d", cpu);
  208. seq_printf(p, " %10s", cpuname);
  209. }
  210. seq_putc(p, '\n');
  211. }
  212. if (i < NR_IRQS) {
  213. spin_lock_irqsave(&irq_controller_lock, flags);
  214. action = irq_desc[i].action;
  215. if (!action)
  216. goto unlock;
  217. seq_printf(p, "%3d: ", i);
  218. for_each_present_cpu(cpu)
  219. seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
  220. seq_printf(p, " %s", action->name);
  221. for (action = action->next; action; action = action->next)
  222. seq_printf(p, ", %s", action->name);
  223. seq_putc(p, '\n');
  224. unlock:
  225. spin_unlock_irqrestore(&irq_controller_lock, flags);
  226. } else if (i == NR_IRQS) {
  227. #ifdef CONFIG_ARCH_ACORN
  228. show_fiq_list(p, v);
  229. #endif
  230. #ifdef CONFIG_SMP
  231. show_ipi_list(p);
  232. #endif
  233. seq_printf(p, "Err: %10lu\n", irq_err_count);
  234. }
  235. return 0;
  236. }
  237. /*
  238. * IRQ lock detection.
  239. *
  240. * Hopefully, this should get us out of a few locked situations.
  241. * However, it may take a while for this to happen, since we need
  242. * a large number if IRQs to appear in the same jiffie with the
  243. * same instruction pointer (or within 2 instructions).
  244. */
  245. static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
  246. {
  247. unsigned long instr_ptr = instruction_pointer(regs);
  248. if (desc->lck_jif == jiffies &&
  249. desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
  250. desc->lck_cnt += 1;
  251. if (desc->lck_cnt > MAX_IRQ_CNT) {
  252. printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
  253. return 1;
  254. }
  255. } else {
  256. desc->lck_cnt = 0;
  257. desc->lck_pc = instruction_pointer(regs);
  258. desc->lck_jif = jiffies;
  259. }
  260. return 0;
  261. }
  262. static void
  263. report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
  264. {
  265. static int count = 100;
  266. struct irqaction *action;
  267. if (!count || noirqdebug)
  268. return;
  269. count--;
  270. if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
  271. printk("irq%u: bogus retval mask %x\n", irq, ret);
  272. } else {
  273. printk("irq%u: nobody cared\n", irq);
  274. }
  275. show_regs(regs);
  276. dump_stack();
  277. printk(KERN_ERR "handlers:");
  278. action = desc->action;
  279. do {
  280. printk("\n" KERN_ERR "[<%p>]", action->handler);
  281. print_symbol(" (%s)", (unsigned long)action->handler);
  282. action = action->next;
  283. } while (action);
  284. printk("\n");
  285. }
  286. static int
  287. __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
  288. {
  289. unsigned int status;
  290. int ret, retval = 0;
  291. spin_unlock(&irq_controller_lock);
  292. if (!(action->flags & SA_INTERRUPT))
  293. local_irq_enable();
  294. status = 0;
  295. do {
  296. ret = action->handler(irq, action->dev_id, regs);
  297. if (ret == IRQ_HANDLED)
  298. status |= action->flags;
  299. retval |= ret;
  300. action = action->next;
  301. } while (action);
  302. if (status & SA_SAMPLE_RANDOM)
  303. add_interrupt_randomness(irq);
  304. spin_lock_irq(&irq_controller_lock);
  305. return retval;
  306. }
  307. /*
  308. * This is for software-decoded IRQs. The caller is expected to
  309. * handle the ack, clear, mask and unmask issues.
  310. */
  311. void
  312. do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  313. {
  314. struct irqaction *action;
  315. const unsigned int cpu = smp_processor_id();
  316. desc->triggered = 1;
  317. kstat_cpu(cpu).irqs[irq]++;
  318. smp_set_running(desc);
  319. action = desc->action;
  320. if (action) {
  321. int ret = __do_irq(irq, action, regs);
  322. if (ret != IRQ_HANDLED)
  323. report_bad_irq(irq, regs, desc, ret);
  324. }
  325. smp_clear_running(desc);
  326. }
  327. /*
  328. * Most edge-triggered IRQ implementations seem to take a broken
  329. * approach to this. Hence the complexity.
  330. */
  331. void
  332. do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  333. {
  334. const unsigned int cpu = smp_processor_id();
  335. desc->triggered = 1;
  336. /*
  337. * If we're currently running this IRQ, or its disabled,
  338. * we shouldn't process the IRQ. Instead, turn on the
  339. * hardware masks.
  340. */
  341. if (unlikely(desc->running || desc->disable_depth))
  342. goto running;
  343. /*
  344. * Acknowledge and clear the IRQ, but don't mask it.
  345. */
  346. desc->chip->ack(irq);
  347. /*
  348. * Mark the IRQ currently in progress.
  349. */
  350. desc->running = 1;
  351. kstat_cpu(cpu).irqs[irq]++;
  352. do {
  353. struct irqaction *action;
  354. action = desc->action;
  355. if (!action)
  356. break;
  357. if (desc->pending && !desc->disable_depth) {
  358. desc->pending = 0;
  359. desc->chip->unmask(irq);
  360. }
  361. __do_irq(irq, action, regs);
  362. } while (desc->pending && !desc->disable_depth);
  363. desc->running = 0;
  364. /*
  365. * If we were disabled or freed, shut down the handler.
  366. */
  367. if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
  368. return;
  369. running:
  370. /*
  371. * We got another IRQ while this one was masked or
  372. * currently running. Delay it.
  373. */
  374. desc->pending = 1;
  375. desc->chip->mask(irq);
  376. desc->chip->ack(irq);
  377. }
  378. /*
  379. * Level-based IRQ handler. Nice and simple.
  380. */
  381. void
  382. do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  383. {
  384. struct irqaction *action;
  385. const unsigned int cpu = smp_processor_id();
  386. desc->triggered = 1;
  387. /*
  388. * Acknowledge, clear _AND_ disable the interrupt.
  389. */
  390. desc->chip->ack(irq);
  391. if (likely(!desc->disable_depth)) {
  392. kstat_cpu(cpu).irqs[irq]++;
  393. smp_set_running(desc);
  394. /*
  395. * Return with this interrupt masked if no action
  396. */
  397. action = desc->action;
  398. if (action) {
  399. int ret = __do_irq(irq, desc->action, regs);
  400. if (ret != IRQ_HANDLED)
  401. report_bad_irq(irq, regs, desc, ret);
  402. if (likely(!desc->disable_depth &&
  403. !check_irq_lock(desc, irq, regs)))
  404. desc->chip->unmask(irq);
  405. }
  406. smp_clear_running(desc);
  407. }
  408. }
  409. static void do_pending_irqs(struct pt_regs *regs)
  410. {
  411. struct list_head head, *l, *n;
  412. do {
  413. struct irqdesc *desc;
  414. /*
  415. * First, take the pending interrupts off the list.
  416. * The act of calling the handlers may add some IRQs
  417. * back onto the list.
  418. */
  419. head = irq_pending;
  420. INIT_LIST_HEAD(&irq_pending);
  421. head.next->prev = &head;
  422. head.prev->next = &head;
  423. /*
  424. * Now run each entry. We must delete it from our
  425. * list before calling the handler.
  426. */
  427. list_for_each_safe(l, n, &head) {
  428. desc = list_entry(l, struct irqdesc, pend);
  429. list_del_init(&desc->pend);
  430. desc->handle(desc - irq_desc, desc, regs);
  431. }
  432. /*
  433. * The list must be empty.
  434. */
  435. BUG_ON(!list_empty(&head));
  436. } while (!list_empty(&irq_pending));
  437. }
  438. /*
  439. * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
  440. * come via this function. Instead, they should provide their
  441. * own 'handler'
  442. */
  443. asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
  444. {
  445. struct irqdesc *desc = irq_desc + irq;
  446. /*
  447. * Some hardware gives randomly wrong interrupts. Rather
  448. * than crashing, do something sensible.
  449. */
  450. if (irq >= NR_IRQS)
  451. desc = &bad_irq_desc;
  452. irq_enter();
  453. spin_lock(&irq_controller_lock);
  454. desc->handle(irq, desc, regs);
  455. /*
  456. * Now re-run any pending interrupts.
  457. */
  458. if (!list_empty(&irq_pending))
  459. do_pending_irqs(regs);
  460. irq_finish(irq);
  461. spin_unlock(&irq_controller_lock);
  462. irq_exit();
  463. }
  464. void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
  465. {
  466. struct irqdesc *desc;
  467. unsigned long flags;
  468. if (irq >= NR_IRQS) {
  469. printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
  470. return;
  471. }
  472. if (handle == NULL)
  473. handle = do_bad_IRQ;
  474. desc = irq_desc + irq;
  475. if (is_chained && desc->chip == &bad_chip)
  476. printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
  477. spin_lock_irqsave(&irq_controller_lock, flags);
  478. if (handle == do_bad_IRQ) {
  479. desc->chip->mask(irq);
  480. desc->chip->ack(irq);
  481. desc->disable_depth = 1;
  482. }
  483. desc->handle = handle;
  484. if (handle != do_bad_IRQ && is_chained) {
  485. desc->valid = 0;
  486. desc->probe_ok = 0;
  487. desc->disable_depth = 0;
  488. desc->chip->unmask(irq);
  489. }
  490. spin_unlock_irqrestore(&irq_controller_lock, flags);
  491. }
  492. void set_irq_chip(unsigned int irq, struct irqchip *chip)
  493. {
  494. struct irqdesc *desc;
  495. unsigned long flags;
  496. if (irq >= NR_IRQS) {
  497. printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
  498. return;
  499. }
  500. if (chip == NULL)
  501. chip = &bad_chip;
  502. desc = irq_desc + irq;
  503. spin_lock_irqsave(&irq_controller_lock, flags);
  504. desc->chip = chip;
  505. spin_unlock_irqrestore(&irq_controller_lock, flags);
  506. }
  507. int set_irq_type(unsigned int irq, unsigned int type)
  508. {
  509. struct irqdesc *desc;
  510. unsigned long flags;
  511. int ret = -ENXIO;
  512. if (irq >= NR_IRQS) {
  513. printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
  514. return -ENODEV;
  515. }
  516. desc = irq_desc + irq;
  517. if (desc->chip->type) {
  518. spin_lock_irqsave(&irq_controller_lock, flags);
  519. ret = desc->chip->type(irq, type);
  520. spin_unlock_irqrestore(&irq_controller_lock, flags);
  521. }
  522. return ret;
  523. }
  524. EXPORT_SYMBOL(set_irq_type);
  525. void set_irq_flags(unsigned int irq, unsigned int iflags)
  526. {
  527. struct irqdesc *desc;
  528. unsigned long flags;
  529. if (irq >= NR_IRQS) {
  530. printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
  531. return;
  532. }
  533. desc = irq_desc + irq;
  534. spin_lock_irqsave(&irq_controller_lock, flags);
  535. desc->valid = (iflags & IRQF_VALID) != 0;
  536. desc->probe_ok = (iflags & IRQF_PROBE) != 0;
  537. desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
  538. spin_unlock_irqrestore(&irq_controller_lock, flags);
  539. }
  540. int setup_irq(unsigned int irq, struct irqaction *new)
  541. {
  542. int shared = 0;
  543. struct irqaction *old, **p;
  544. unsigned long flags;
  545. struct irqdesc *desc;
  546. /*
  547. * Some drivers like serial.c use request_irq() heavily,
  548. * so we have to be careful not to interfere with a
  549. * running system.
  550. */
  551. if (new->flags & SA_SAMPLE_RANDOM) {
  552. /*
  553. * This function might sleep, we want to call it first,
  554. * outside of the atomic block.
  555. * Yes, this might clear the entropy pool if the wrong
  556. * driver is attempted to be loaded, without actually
  557. * installing a new handler, but is this really a problem,
  558. * only the sysadmin is able to do this.
  559. */
  560. rand_initialize_irq(irq);
  561. }
  562. /*
  563. * The following block of code has to be executed atomically
  564. */
  565. desc = irq_desc + irq;
  566. spin_lock_irqsave(&irq_controller_lock, flags);
  567. p = &desc->action;
  568. if ((old = *p) != NULL) {
  569. /* Can't share interrupts unless both agree to */
  570. if (!(old->flags & new->flags & SA_SHIRQ)) {
  571. spin_unlock_irqrestore(&irq_controller_lock, flags);
  572. return -EBUSY;
  573. }
  574. /* add new interrupt at end of irq queue */
  575. do {
  576. p = &old->next;
  577. old = *p;
  578. } while (old);
  579. shared = 1;
  580. }
  581. *p = new;
  582. if (!shared) {
  583. desc->probing = 0;
  584. desc->running = 0;
  585. desc->pending = 0;
  586. desc->disable_depth = 1;
  587. if (!desc->noautoenable) {
  588. desc->disable_depth = 0;
  589. desc->chip->unmask(irq);
  590. }
  591. }
  592. spin_unlock_irqrestore(&irq_controller_lock, flags);
  593. return 0;
  594. }
  595. /**
  596. * request_irq - allocate an interrupt line
  597. * @irq: Interrupt line to allocate
  598. * @handler: Function to be called when the IRQ occurs
  599. * @irqflags: Interrupt type flags
  600. * @devname: An ascii name for the claiming device
  601. * @dev_id: A cookie passed back to the handler function
  602. *
  603. * This call allocates interrupt resources and enables the
  604. * interrupt line and IRQ handling. From the point this
  605. * call is made your handler function may be invoked. Since
  606. * your handler function must clear any interrupt the board
  607. * raises, you must take care both to initialise your hardware
  608. * and to set up the interrupt handler in the right order.
  609. *
  610. * Dev_id must be globally unique. Normally the address of the
  611. * device data structure is used as the cookie. Since the handler
  612. * receives this value it makes sense to use it.
  613. *
  614. * If your interrupt is shared you must pass a non NULL dev_id
  615. * as this is required when freeing the interrupt.
  616. *
  617. * Flags:
  618. *
  619. * SA_SHIRQ Interrupt is shared
  620. *
  621. * SA_INTERRUPT Disable local interrupts while processing
  622. *
  623. * SA_SAMPLE_RANDOM The interrupt can be used for entropy
  624. *
  625. */
  626. int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
  627. unsigned long irq_flags, const char * devname, void *dev_id)
  628. {
  629. unsigned long retval;
  630. struct irqaction *action;
  631. if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
  632. (irq_flags & SA_SHIRQ && !dev_id))
  633. return -EINVAL;
  634. action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
  635. if (!action)
  636. return -ENOMEM;
  637. action->handler = handler;
  638. action->flags = irq_flags;
  639. cpus_clear(action->mask);
  640. action->name = devname;
  641. action->next = NULL;
  642. action->dev_id = dev_id;
  643. retval = setup_irq(irq, action);
  644. if (retval)
  645. kfree(action);
  646. return retval;
  647. }
  648. EXPORT_SYMBOL(request_irq);
  649. /**
  650. * free_irq - free an interrupt
  651. * @irq: Interrupt line to free
  652. * @dev_id: Device identity to free
  653. *
  654. * Remove an interrupt handler. The handler is removed and if the
  655. * interrupt line is no longer in use by any driver it is disabled.
  656. * On a shared IRQ the caller must ensure the interrupt is disabled
  657. * on the card it drives before calling this function.
  658. *
  659. * This function must not be called from interrupt context.
  660. */
  661. void free_irq(unsigned int irq, void *dev_id)
  662. {
  663. struct irqaction * action, **p;
  664. unsigned long flags;
  665. if (irq >= NR_IRQS || !irq_desc[irq].valid) {
  666. printk(KERN_ERR "Trying to free IRQ%d\n",irq);
  667. dump_stack();
  668. return;
  669. }
  670. spin_lock_irqsave(&irq_controller_lock, flags);
  671. for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
  672. if (action->dev_id != dev_id)
  673. continue;
  674. /* Found it - now free it */
  675. *p = action->next;
  676. break;
  677. }
  678. spin_unlock_irqrestore(&irq_controller_lock, flags);
  679. if (!action) {
  680. printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
  681. dump_stack();
  682. } else {
  683. synchronize_irq(irq);
  684. kfree(action);
  685. }
  686. }
  687. EXPORT_SYMBOL(free_irq);
  688. static DECLARE_MUTEX(probe_sem);
  689. /* Start the interrupt probing. Unlike other architectures,
  690. * we don't return a mask of interrupts from probe_irq_on,
  691. * but return the number of interrupts enabled for the probe.
  692. * The interrupts which have been enabled for probing is
  693. * instead recorded in the irq_desc structure.
  694. */
  695. unsigned long probe_irq_on(void)
  696. {
  697. unsigned int i, irqs = 0;
  698. unsigned long delay;
  699. down(&probe_sem);
  700. /*
  701. * first snaffle up any unassigned but
  702. * probe-able interrupts
  703. */
  704. spin_lock_irq(&irq_controller_lock);
  705. for (i = 0; i < NR_IRQS; i++) {
  706. if (!irq_desc[i].probe_ok || irq_desc[i].action)
  707. continue;
  708. irq_desc[i].probing = 1;
  709. irq_desc[i].triggered = 0;
  710. if (irq_desc[i].chip->type)
  711. irq_desc[i].chip->type(i, IRQT_PROBE);
  712. irq_desc[i].chip->unmask(i);
  713. irqs += 1;
  714. }
  715. spin_unlock_irq(&irq_controller_lock);
  716. /*
  717. * wait for spurious interrupts to mask themselves out again
  718. */
  719. for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
  720. /* min 100ms delay */;
  721. /*
  722. * now filter out any obviously spurious interrupts
  723. */
  724. spin_lock_irq(&irq_controller_lock);
  725. for (i = 0; i < NR_IRQS; i++) {
  726. if (irq_desc[i].probing && irq_desc[i].triggered) {
  727. irq_desc[i].probing = 0;
  728. irqs -= 1;
  729. }
  730. }
  731. spin_unlock_irq(&irq_controller_lock);
  732. return irqs;
  733. }
  734. EXPORT_SYMBOL(probe_irq_on);
  735. unsigned int probe_irq_mask(unsigned long irqs)
  736. {
  737. unsigned int mask = 0, i;
  738. spin_lock_irq(&irq_controller_lock);
  739. for (i = 0; i < 16 && i < NR_IRQS; i++)
  740. if (irq_desc[i].probing && irq_desc[i].triggered)
  741. mask |= 1 << i;
  742. spin_unlock_irq(&irq_controller_lock);
  743. up(&probe_sem);
  744. return mask;
  745. }
  746. EXPORT_SYMBOL(probe_irq_mask);
  747. /*
  748. * Possible return values:
  749. * >= 0 - interrupt number
  750. * -1 - no interrupt/many interrupts
  751. */
  752. int probe_irq_off(unsigned long irqs)
  753. {
  754. unsigned int i;
  755. int irq_found = NO_IRQ;
  756. /*
  757. * look at the interrupts, and find exactly one
  758. * that we were probing has been triggered
  759. */
  760. spin_lock_irq(&irq_controller_lock);
  761. for (i = 0; i < NR_IRQS; i++) {
  762. if (irq_desc[i].probing &&
  763. irq_desc[i].triggered) {
  764. if (irq_found != NO_IRQ) {
  765. irq_found = NO_IRQ;
  766. goto out;
  767. }
  768. irq_found = i;
  769. }
  770. }
  771. if (irq_found == -1)
  772. irq_found = NO_IRQ;
  773. out:
  774. spin_unlock_irq(&irq_controller_lock);
  775. up(&probe_sem);
  776. return irq_found;
  777. }
  778. EXPORT_SYMBOL(probe_irq_off);
  779. #ifdef CONFIG_SMP
  780. static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
  781. {
  782. pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
  783. spin_lock_irq(&irq_controller_lock);
  784. desc->cpu = cpu;
  785. desc->chip->set_cpu(desc, irq, cpu);
  786. spin_unlock_irq(&irq_controller_lock);
  787. }
  788. #ifdef CONFIG_PROC_FS
  789. static int
  790. irq_affinity_read_proc(char *page, char **start, off_t off, int count,
  791. int *eof, void *data)
  792. {
  793. struct irqdesc *desc = irq_desc + ((int)data);
  794. int len = cpumask_scnprintf(page, count, desc->affinity);
  795. if (count - len < 2)
  796. return -EINVAL;
  797. page[len++] = '\n';
  798. page[len] = '\0';
  799. return len;
  800. }
  801. static int
  802. irq_affinity_write_proc(struct file *file, const char __user *buffer,
  803. unsigned long count, void *data)
  804. {
  805. unsigned int irq = (unsigned int)data;
  806. struct irqdesc *desc = irq_desc + irq;
  807. cpumask_t affinity, tmp;
  808. int ret = -EIO;
  809. if (!desc->chip->set_cpu)
  810. goto out;
  811. ret = cpumask_parse(buffer, count, affinity);
  812. if (ret)
  813. goto out;
  814. cpus_and(tmp, affinity, cpu_online_map);
  815. if (cpus_empty(tmp)) {
  816. ret = -EINVAL;
  817. goto out;
  818. }
  819. desc->affinity = affinity;
  820. route_irq(desc, irq, first_cpu(tmp));
  821. ret = count;
  822. out:
  823. return ret;
  824. }
  825. #endif
  826. #endif
  827. void __init init_irq_proc(void)
  828. {
  829. #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
  830. struct proc_dir_entry *dir;
  831. int irq;
  832. dir = proc_mkdir("irq", 0);
  833. if (!dir)
  834. return;
  835. for (irq = 0; irq < NR_IRQS; irq++) {
  836. struct proc_dir_entry *entry;
  837. struct irqdesc *desc;
  838. char name[16];
  839. desc = irq_desc + irq;
  840. memset(name, 0, sizeof(name));
  841. snprintf(name, sizeof(name) - 1, "%u", irq);
  842. desc->procdir = proc_mkdir(name, dir);
  843. if (!desc->procdir)
  844. continue;
  845. entry = create_proc_entry("smp_affinity", 0600, desc->procdir);
  846. if (entry) {
  847. entry->nlink = 1;
  848. entry->data = (void *)irq;
  849. entry->read_proc = irq_affinity_read_proc;
  850. entry->write_proc = irq_affinity_write_proc;
  851. }
  852. }
  853. #endif
  854. }
  855. void __init init_IRQ(void)
  856. {
  857. struct irqdesc *desc;
  858. extern void init_dma(void);
  859. int irq;
  860. #ifdef CONFIG_SMP
  861. bad_irq_desc.affinity = CPU_MASK_ALL;
  862. bad_irq_desc.cpu = smp_processor_id();
  863. #endif
  864. for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
  865. *desc = bad_irq_desc;
  866. INIT_LIST_HEAD(&desc->pend);
  867. }
  868. init_arch_irq();
  869. init_dma();
  870. }
  871. static int __init noirqdebug_setup(char *str)
  872. {
  873. noirqdebug = 1;
  874. return 1;
  875. }
  876. __setup("noirqdebug", noirqdebug_setup);