/arch/ia64/sn/kernel/irq.c

https://bitbucket.org/evzijst/gittest · C · 431 lines · 344 code · 59 blank · 28 comment · 60 complexity · 45c20dedec5d6426352e1df62d635e58 MD5 · raw file

  1. /*
  2. * Platform dependent support for SGI SN
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
  9. */
  10. #include <linux/irq.h>
  11. #include <asm/sn/intr.h>
  12. #include <asm/sn/addrs.h>
  13. #include <asm/sn/arch.h>
  14. #include "xtalk/xwidgetdev.h"
  15. #include "pci/pcibus_provider_defs.h"
  16. #include "pci/pcidev.h"
  17. #include "pci/pcibr_provider.h"
  18. #include <asm/sn/shub_mmr.h>
  19. #include <asm/sn/sn_sal.h>
  20. static void force_interrupt(int irq);
  21. static void register_intr_pda(struct sn_irq_info *sn_irq_info);
  22. static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
  23. extern int sn_force_interrupt_flag;
  24. extern int sn_ioif_inited;
  25. struct sn_irq_info **sn_irq;
  26. static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
  27. u64 sn_irq_info,
  28. int req_irq, nasid_t req_nasid,
  29. int req_slice)
  30. {
  31. struct ia64_sal_retval ret_stuff;
  32. ret_stuff.status = 0;
  33. ret_stuff.v0 = 0;
  34. SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
  35. (u64) SAL_INTR_ALLOC, (u64) local_nasid,
  36. (u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
  37. (u64) req_nasid, (u64) req_slice);
  38. return ret_stuff.status;
  39. }
  40. static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
  41. struct sn_irq_info *sn_irq_info)
  42. {
  43. struct ia64_sal_retval ret_stuff;
  44. ret_stuff.status = 0;
  45. ret_stuff.v0 = 0;
  46. SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
  47. (u64) SAL_INTR_FREE, (u64) local_nasid,
  48. (u64) local_widget, (u64) sn_irq_info->irq_irq,
  49. (u64) sn_irq_info->irq_cookie, 0, 0);
  50. }
  51. static unsigned int sn_startup_irq(unsigned int irq)
  52. {
  53. return 0;
  54. }
  55. static void sn_shutdown_irq(unsigned int irq)
  56. {
  57. }
  58. static void sn_disable_irq(unsigned int irq)
  59. {
  60. }
  61. static void sn_enable_irq(unsigned int irq)
  62. {
  63. }
  64. static void sn_ack_irq(unsigned int irq)
  65. {
  66. uint64_t event_occurred, mask = 0;
  67. int nasid;
  68. irq = irq & 0xff;
  69. nasid = get_nasid();
  70. event_occurred =
  71. HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
  72. if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
  73. mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
  74. }
  75. if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
  76. mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
  77. }
  78. if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
  79. mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
  80. }
  81. if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
  82. mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
  83. }
  84. HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
  85. mask);
  86. __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
  87. move_irq(irq);
  88. }
  89. static void sn_end_irq(unsigned int irq)
  90. {
  91. int nasid;
  92. int ivec;
  93. uint64_t event_occurred;
  94. ivec = irq & 0xff;
  95. if (ivec == SGI_UART_VECTOR) {
  96. nasid = get_nasid();
  97. event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR
  98. (nasid, SH_EVENT_OCCURRED));
  99. /* If the UART bit is set here, we may have received an
  100. * interrupt from the UART that the driver missed. To
  101. * make sure, we IPI ourselves to force us to look again.
  102. */
  103. if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
  104. platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
  105. IA64_IPI_DM_INT, 0);
  106. }
  107. }
  108. __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
  109. if (sn_force_interrupt_flag)
  110. force_interrupt(irq);
  111. }
  112. static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
  113. {
  114. struct sn_irq_info *sn_irq_info = sn_irq[irq];
  115. struct sn_irq_info *tmp_sn_irq_info;
  116. int cpuid, cpuphys;
  117. nasid_t t_nasid; /* nasid to target */
  118. int t_slice; /* slice to target */
  119. /* allocate a temp sn_irq_info struct to get new target info */
  120. tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL);
  121. if (!tmp_sn_irq_info)
  122. return;
  123. cpuid = first_cpu(mask);
  124. cpuphys = cpu_physical_id(cpuid);
  125. t_nasid = cpuid_to_nasid(cpuid);
  126. t_slice = cpuid_to_slice(cpuid);
  127. while (sn_irq_info) {
  128. int status;
  129. int local_widget;
  130. uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
  131. nasid_t local_nasid = NASID_GET(bridge);
  132. if (!bridge)
  133. break; /* irq is not a device interrupt */
  134. if (local_nasid & 1)
  135. local_widget = TIO_SWIN_WIDGETNUM(bridge);
  136. else
  137. local_widget = SWIN_WIDGETNUM(bridge);
  138. /* Free the old PROM sn_irq_info structure */
  139. sn_intr_free(local_nasid, local_widget, sn_irq_info);
  140. /* allocate a new PROM sn_irq_info struct */
  141. status = sn_intr_alloc(local_nasid, local_widget,
  142. __pa(tmp_sn_irq_info), irq, t_nasid,
  143. t_slice);
  144. if (status == 0) {
  145. /* Update kernels sn_irq_info with new target info */
  146. unregister_intr_pda(sn_irq_info);
  147. sn_irq_info->irq_cpuid = cpuid;
  148. sn_irq_info->irq_nasid = t_nasid;
  149. sn_irq_info->irq_slice = t_slice;
  150. sn_irq_info->irq_xtalkaddr =
  151. tmp_sn_irq_info->irq_xtalkaddr;
  152. sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie;
  153. register_intr_pda(sn_irq_info);
  154. if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) {
  155. pcibr_change_devices_irq(sn_irq_info);
  156. }
  157. sn_irq_info = sn_irq_info->irq_next;
  158. #ifdef CONFIG_SMP
  159. set_irq_affinity_info((irq & 0xff), cpuphys, 0);
  160. #endif
  161. } else {
  162. break; /* snp_affinity failed the intr_alloc */
  163. }
  164. }
  165. kfree(tmp_sn_irq_info);
  166. }
  167. struct hw_interrupt_type irq_type_sn = {
  168. "SN hub",
  169. sn_startup_irq,
  170. sn_shutdown_irq,
  171. sn_enable_irq,
  172. sn_disable_irq,
  173. sn_ack_irq,
  174. sn_end_irq,
  175. sn_set_affinity_irq
  176. };
  177. unsigned int sn_local_vector_to_irq(u8 vector)
  178. {
  179. return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
  180. }
  181. void sn_irq_init(void)
  182. {
  183. int i;
  184. irq_desc_t *base_desc = irq_desc;
  185. for (i = 0; i < NR_IRQS; i++) {
  186. if (base_desc[i].handler == &no_irq_type) {
  187. base_desc[i].handler = &irq_type_sn;
  188. }
  189. }
  190. }
  191. static void register_intr_pda(struct sn_irq_info *sn_irq_info)
  192. {
  193. int irq = sn_irq_info->irq_irq;
  194. int cpu = sn_irq_info->irq_cpuid;
  195. if (pdacpu(cpu)->sn_last_irq < irq) {
  196. pdacpu(cpu)->sn_last_irq = irq;
  197. }
  198. if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) {
  199. pdacpu(cpu)->sn_first_irq = irq;
  200. }
  201. }
  202. static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
  203. {
  204. int irq = sn_irq_info->irq_irq;
  205. int cpu = sn_irq_info->irq_cpuid;
  206. struct sn_irq_info *tmp_irq_info;
  207. int i, foundmatch;
  208. if (pdacpu(cpu)->sn_last_irq == irq) {
  209. foundmatch = 0;
  210. for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) {
  211. tmp_irq_info = sn_irq[i];
  212. while (tmp_irq_info) {
  213. if (tmp_irq_info->irq_cpuid == cpu) {
  214. foundmatch++;
  215. break;
  216. }
  217. tmp_irq_info = tmp_irq_info->irq_next;
  218. }
  219. if (foundmatch) {
  220. break;
  221. }
  222. }
  223. pdacpu(cpu)->sn_last_irq = i;
  224. }
  225. if (pdacpu(cpu)->sn_first_irq == irq) {
  226. foundmatch = 0;
  227. for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) {
  228. tmp_irq_info = sn_irq[i];
  229. while (tmp_irq_info) {
  230. if (tmp_irq_info->irq_cpuid == cpu) {
  231. foundmatch++;
  232. break;
  233. }
  234. tmp_irq_info = tmp_irq_info->irq_next;
  235. }
  236. if (foundmatch) {
  237. break;
  238. }
  239. }
  240. pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
  241. }
  242. }
  243. struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq,
  244. nasid_t nasid, int slice)
  245. {
  246. struct sn_irq_info *sn_irq_info;
  247. int status;
  248. sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL);
  249. if (sn_irq_info == NULL)
  250. return NULL;
  251. memset(sn_irq_info, 0x0, sizeof(*sn_irq_info));
  252. status =
  253. sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq,
  254. nasid, slice);
  255. if (status) {
  256. kfree(sn_irq_info);
  257. return NULL;
  258. } else {
  259. return sn_irq_info;
  260. }
  261. }
  262. void sn_irq_free(struct sn_irq_info *sn_irq_info)
  263. {
  264. uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
  265. nasid_t local_nasid = NASID_GET(bridge);
  266. int local_widget;
  267. if (local_nasid & 1) /* tio check */
  268. local_widget = TIO_SWIN_WIDGETNUM(bridge);
  269. else
  270. local_widget = SWIN_WIDGETNUM(bridge);
  271. sn_intr_free(local_nasid, local_widget, sn_irq_info);
  272. kfree(sn_irq_info);
  273. }
  274. void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
  275. {
  276. nasid_t nasid = sn_irq_info->irq_nasid;
  277. int slice = sn_irq_info->irq_slice;
  278. int cpu = nasid_slice_to_cpuid(nasid, slice);
  279. sn_irq_info->irq_cpuid = cpu;
  280. sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
  281. /* link it into the sn_irq[irq] list */
  282. sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq];
  283. sn_irq[sn_irq_info->irq_irq] = sn_irq_info;
  284. (void)register_intr_pda(sn_irq_info);
  285. }
  286. static void force_interrupt(int irq)
  287. {
  288. struct sn_irq_info *sn_irq_info;
  289. if (!sn_ioif_inited)
  290. return;
  291. sn_irq_info = sn_irq[irq];
  292. while (sn_irq_info) {
  293. if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
  294. (sn_irq_info->irq_bridge != NULL)) {
  295. pcibr_force_interrupt(sn_irq_info);
  296. }
  297. sn_irq_info = sn_irq_info->irq_next;
  298. }
  299. }
  300. /*
  301. * Check for lost interrupts. If the PIC int_status reg. says that
  302. * an interrupt has been sent, but not handled, and the interrupt
  303. * is not pending in either the cpu irr regs or in the soft irr regs,
  304. * and the interrupt is not in service, then the interrupt may have
  305. * been lost. Force an interrupt on that pin. It is possible that
  306. * the interrupt is in flight, so we may generate a spurious interrupt,
  307. * but we should never miss a real lost interrupt.
  308. */
  309. static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
  310. {
  311. uint64_t regval;
  312. int irr_reg_num;
  313. int irr_bit;
  314. uint64_t irr_reg;
  315. struct pcidev_info *pcidev_info;
  316. struct pcibus_info *pcibus_info;
  317. pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
  318. if (!pcidev_info)
  319. return;
  320. pcibus_info =
  321. (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
  322. pdi_pcibus_info;
  323. regval = pcireg_intr_status_get(pcibus_info);
  324. irr_reg_num = irq_to_vector(irq) / 64;
  325. irr_bit = irq_to_vector(irq) % 64;
  326. switch (irr_reg_num) {
  327. case 0:
  328. irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
  329. break;
  330. case 1:
  331. irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
  332. break;
  333. case 2:
  334. irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
  335. break;
  336. case 3:
  337. irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
  338. break;
  339. }
  340. if (!test_bit(irr_bit, &irr_reg)) {
  341. if (!test_bit(irq, pda->sn_soft_irr)) {
  342. if (!test_bit(irq, pda->sn_in_service_ivecs)) {
  343. regval &= 0xff;
  344. if (sn_irq_info->irq_int_bit & regval &
  345. sn_irq_info->irq_last_intr) {
  346. regval &=
  347. ~(sn_irq_info->
  348. irq_int_bit & regval);
  349. pcibr_force_interrupt(sn_irq_info);
  350. }
  351. }
  352. }
  353. }
  354. sn_irq_info->irq_last_intr = regval;
  355. }
  356. void sn_lb_int_war_check(void)
  357. {
  358. int i;
  359. if (!sn_ioif_inited || pda->sn_first_irq == 0)
  360. return;
  361. for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
  362. struct sn_irq_info *sn_irq_info = sn_irq[i];
  363. while (sn_irq_info) {
  364. /* Only call for PCI bridges that are fully initialized. */
  365. if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
  366. (sn_irq_info->irq_bridge != NULL)) {
  367. sn_check_intr(i, sn_irq_info);
  368. }
  369. sn_irq_info = sn_irq_info->irq_next;
  370. }
  371. }
  372. }