PageRenderTime 33599ms CodeModel.GetById 41ms RepoModel.GetById 113ms app.codeStats 0ms

/arch/powerpc/platforms/cell/axon_msi.c

https://bitbucket.org/cresqo/cm7-p500-kernel
C | 507 lines | 371 code | 100 blank | 36 comment | 43 complexity | 7907f0e5a1175cd59110e2eff6710462 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. /*
  2. * Copyright 2007, Michael Ellerman, IBM Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/interrupt.h>
  10. #include <linux/irq.h>
  11. #include <linux/kernel.h>
  12. #include <linux/pci.h>
  13. #include <linux/msi.h>
  14. #include <linux/of_platform.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/slab.h>
  17. #include <asm/dcr.h>
  18. #include <asm/machdep.h>
  19. #include <asm/prom.h>
  20. /*
  21. * MSIC registers, specified as offsets from dcr_base
  22. */
  23. #define MSIC_CTRL_REG 0x0
  24. /* Base Address registers specify FIFO location in BE memory */
  25. #define MSIC_BASE_ADDR_HI_REG 0x3
  26. #define MSIC_BASE_ADDR_LO_REG 0x4
  27. /* Hold the read/write offsets into the FIFO */
  28. #define MSIC_READ_OFFSET_REG 0x5
  29. #define MSIC_WRITE_OFFSET_REG 0x6
  30. /* MSIC control register flags */
  31. #define MSIC_CTRL_ENABLE 0x0001
  32. #define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002
  33. #define MSIC_CTRL_IRQ_ENABLE 0x0008
  34. #define MSIC_CTRL_FULL_STOP_ENABLE 0x0010
  35. /*
  36. * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
  37. * Currently we're using a 64KB FIFO size.
  38. */
  39. #define MSIC_FIFO_SIZE_SHIFT 16
  40. #define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT)
  41. /*
  42. * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
  43. * 8-9 of the MSIC control reg.
  44. */
  45. #define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
  46. /*
  47. * We need to mask the read/write offsets to make sure they stay within
  48. * the bounds of the FIFO. Also they should always be 16-byte aligned.
  49. */
  50. #define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
  51. /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
  52. #define MSIC_FIFO_ENTRY_SIZE 0x10
  53. struct axon_msic {
  54. struct irq_host *irq_host;
  55. __le32 *fifo_virt;
  56. dma_addr_t fifo_phys;
  57. dcr_host_t dcr_host;
  58. u32 read_offset;
  59. #ifdef DEBUG
  60. u32 __iomem *trigger;
  61. #endif
  62. };
  63. #ifdef DEBUG
  64. void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
  65. #else
  66. static inline void axon_msi_debug_setup(struct device_node *dn,
  67. struct axon_msic *msic) { }
  68. #endif
  69. static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
  70. {
  71. pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
  72. dcr_write(msic->dcr_host, dcr_n, val);
  73. }
  74. static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
  75. {
  76. struct axon_msic *msic = get_irq_data(irq);
  77. u32 write_offset, msi;
  78. int idx;
  79. int retry = 0;
  80. write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
  81. pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
  82. /* write_offset doesn't wrap properly, so we have to mask it */
  83. write_offset &= MSIC_FIFO_SIZE_MASK;
  84. while (msic->read_offset != write_offset && retry < 100) {
  85. idx = msic->read_offset / sizeof(__le32);
  86. msi = le32_to_cpu(msic->fifo_virt[idx]);
  87. msi &= 0xFFFF;
  88. pr_devel("axon_msi: woff %x roff %x msi %x\n",
  89. write_offset, msic->read_offset, msi);
  90. if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) {
  91. generic_handle_irq(msi);
  92. msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
  93. } else {
  94. /*
  95. * Reading the MSIC_WRITE_OFFSET_REG does not
  96. * reliably flush the outstanding DMA to the
  97. * FIFO buffer. Here we were reading stale
  98. * data, so we need to retry.
  99. */
  100. udelay(1);
  101. retry++;
  102. pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
  103. continue;
  104. }
  105. if (retry) {
  106. pr_devel("axon_msi: late irq 0x%x, retry %d\n",
  107. msi, retry);
  108. retry = 0;
  109. }
  110. msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
  111. msic->read_offset &= MSIC_FIFO_SIZE_MASK;
  112. }
  113. if (retry) {
  114. printk(KERN_WARNING "axon_msi: irq timed out\n");
  115. msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
  116. msic->read_offset &= MSIC_FIFO_SIZE_MASK;
  117. }
  118. desc->chip->eoi(irq);
  119. }
  120. static struct axon_msic *find_msi_translator(struct pci_dev *dev)
  121. {
  122. struct irq_host *irq_host;
  123. struct device_node *dn, *tmp;
  124. const phandle *ph;
  125. struct axon_msic *msic = NULL;
  126. dn = of_node_get(pci_device_to_OF_node(dev));
  127. if (!dn) {
  128. dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
  129. return NULL;
  130. }
  131. for (; dn; dn = of_get_next_parent(dn)) {
  132. ph = of_get_property(dn, "msi-translator", NULL);
  133. if (ph)
  134. break;
  135. }
  136. if (!ph) {
  137. dev_dbg(&dev->dev,
  138. "axon_msi: no msi-translator property found\n");
  139. goto out_error;
  140. }
  141. tmp = dn;
  142. dn = of_find_node_by_phandle(*ph);
  143. of_node_put(tmp);
  144. if (!dn) {
  145. dev_dbg(&dev->dev,
  146. "axon_msi: msi-translator doesn't point to a node\n");
  147. goto out_error;
  148. }
  149. irq_host = irq_find_host(dn);
  150. if (!irq_host) {
  151. dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n",
  152. dn->full_name);
  153. goto out_error;
  154. }
  155. msic = irq_host->host_data;
  156. out_error:
  157. of_node_put(dn);
  158. return msic;
  159. }
  160. static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type)
  161. {
  162. if (!find_msi_translator(dev))
  163. return -ENODEV;
  164. return 0;
  165. }
  166. static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
  167. {
  168. struct device_node *dn;
  169. struct msi_desc *entry;
  170. int len;
  171. const u32 *prop;
  172. dn = of_node_get(pci_device_to_OF_node(dev));
  173. if (!dn) {
  174. dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
  175. return -ENODEV;
  176. }
  177. entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
  178. for (; dn; dn = of_get_next_parent(dn)) {
  179. if (entry->msi_attrib.is_64) {
  180. prop = of_get_property(dn, "msi-address-64", &len);
  181. if (prop)
  182. break;
  183. }
  184. prop = of_get_property(dn, "msi-address-32", &len);
  185. if (prop)
  186. break;
  187. }
  188. if (!prop) {
  189. dev_dbg(&dev->dev,
  190. "axon_msi: no msi-address-(32|64) properties found\n");
  191. return -ENOENT;
  192. }
  193. switch (len) {
  194. case 8:
  195. msg->address_hi = prop[0];
  196. msg->address_lo = prop[1];
  197. break;
  198. case 4:
  199. msg->address_hi = 0;
  200. msg->address_lo = prop[0];
  201. break;
  202. default:
  203. dev_dbg(&dev->dev,
  204. "axon_msi: malformed msi-address-(32|64) property\n");
  205. of_node_put(dn);
  206. return -EINVAL;
  207. }
  208. of_node_put(dn);
  209. return 0;
  210. }
  211. static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  212. {
  213. unsigned int virq, rc;
  214. struct msi_desc *entry;
  215. struct msi_msg msg;
  216. struct axon_msic *msic;
  217. msic = find_msi_translator(dev);
  218. if (!msic)
  219. return -ENODEV;
  220. rc = setup_msi_msg_address(dev, &msg);
  221. if (rc)
  222. return rc;
  223. /* We rely on being able to stash a virq in a u16 */
  224. BUILD_BUG_ON(NR_IRQS > 65536);
  225. list_for_each_entry(entry, &dev->msi_list, list) {
  226. virq = irq_create_direct_mapping(msic->irq_host);
  227. if (virq == NO_IRQ) {
  228. dev_warn(&dev->dev,
  229. "axon_msi: virq allocation failed!\n");
  230. return -1;
  231. }
  232. dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
  233. set_irq_msi(virq, entry);
  234. msg.data = virq;
  235. write_msi_msg(virq, &msg);
  236. }
  237. return 0;
  238. }
  239. static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
  240. {
  241. struct msi_desc *entry;
  242. dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
  243. list_for_each_entry(entry, &dev->msi_list, list) {
  244. if (entry->irq == NO_IRQ)
  245. continue;
  246. set_irq_msi(entry->irq, NULL);
  247. irq_dispose_mapping(entry->irq);
  248. }
  249. }
  250. static struct irq_chip msic_irq_chip = {
  251. .mask = mask_msi_irq,
  252. .unmask = unmask_msi_irq,
  253. .shutdown = unmask_msi_irq,
  254. .name = "AXON-MSI",
  255. };
  256. static int msic_host_map(struct irq_host *h, unsigned int virq,
  257. irq_hw_number_t hw)
  258. {
  259. set_irq_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
  260. return 0;
  261. }
  262. static struct irq_host_ops msic_host_ops = {
  263. .map = msic_host_map,
  264. };
  265. static int axon_msi_shutdown(struct of_device *device)
  266. {
  267. struct axon_msic *msic = dev_get_drvdata(&device->dev);
  268. u32 tmp;
  269. pr_devel("axon_msi: disabling %s\n",
  270. msic->irq_host->of_node->full_name);
  271. tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
  272. tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
  273. msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
  274. return 0;
  275. }
  276. static int axon_msi_probe(struct of_device *device,
  277. const struct of_device_id *device_id)
  278. {
  279. struct device_node *dn = device->dev.of_node;
  280. struct axon_msic *msic;
  281. unsigned int virq;
  282. int dcr_base, dcr_len;
  283. pr_devel("axon_msi: setting up dn %s\n", dn->full_name);
  284. msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
  285. if (!msic) {
  286. printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n",
  287. dn->full_name);
  288. goto out;
  289. }
  290. dcr_base = dcr_resource_start(dn, 0);
  291. dcr_len = dcr_resource_len(dn, 0);
  292. if (dcr_base == 0 || dcr_len == 0) {
  293. printk(KERN_ERR
  294. "axon_msi: couldn't parse dcr properties on %s\n",
  295. dn->full_name);
  296. goto out_free_msic;
  297. }
  298. msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
  299. if (!DCR_MAP_OK(msic->dcr_host)) {
  300. printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
  301. dn->full_name);
  302. goto out_free_msic;
  303. }
  304. msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
  305. &msic->fifo_phys, GFP_KERNEL);
  306. if (!msic->fifo_virt) {
  307. printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
  308. dn->full_name);
  309. goto out_free_msic;
  310. }
  311. virq = irq_of_parse_and_map(dn, 0);
  312. if (virq == NO_IRQ) {
  313. printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
  314. dn->full_name);
  315. goto out_free_fifo;
  316. }
  317. memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
  318. msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP,
  319. NR_IRQS, &msic_host_ops, 0);
  320. if (!msic->irq_host) {
  321. printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n",
  322. dn->full_name);
  323. goto out_free_fifo;
  324. }
  325. msic->irq_host->host_data = msic;
  326. set_irq_data(virq, msic);
  327. set_irq_chained_handler(virq, axon_msi_cascade);
  328. pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
  329. /* Enable the MSIC hardware */
  330. msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
  331. msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
  332. msic->fifo_phys & 0xFFFFFFFF);
  333. msic_dcr_write(msic, MSIC_CTRL_REG,
  334. MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
  335. MSIC_CTRL_FIFO_SIZE);
  336. msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
  337. & MSIC_FIFO_SIZE_MASK;
  338. dev_set_drvdata(&device->dev, msic);
  339. ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
  340. ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
  341. ppc_md.msi_check_device = axon_msi_check_device;
  342. axon_msi_debug_setup(dn, msic);
  343. printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
  344. return 0;
  345. out_free_fifo:
  346. dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
  347. msic->fifo_phys);
  348. out_free_msic:
  349. kfree(msic);
  350. out:
  351. return -1;
  352. }
  353. static const struct of_device_id axon_msi_device_id[] = {
  354. {
  355. .compatible = "ibm,axon-msic"
  356. },
  357. {}
  358. };
  359. static struct of_platform_driver axon_msi_driver = {
  360. .probe = axon_msi_probe,
  361. .shutdown = axon_msi_shutdown,
  362. .driver = {
  363. .name = "axon-msi",
  364. .owner = THIS_MODULE,
  365. .of_match_table = axon_msi_device_id,
  366. },
  367. };
  368. static int __init axon_msi_init(void)
  369. {
  370. return of_register_platform_driver(&axon_msi_driver);
  371. }
  372. subsys_initcall(axon_msi_init);
  373. #ifdef DEBUG
  374. static int msic_set(void *data, u64 val)
  375. {
  376. struct axon_msic *msic = data;
  377. out_le32(msic->trigger, val);
  378. return 0;
  379. }
  380. static int msic_get(void *data, u64 *val)
  381. {
  382. *val = 0;
  383. return 0;
  384. }
  385. DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
  386. void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
  387. {
  388. char name[8];
  389. u64 addr;
  390. addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
  391. if (addr == OF_BAD_ADDR) {
  392. pr_devel("axon_msi: couldn't translate reg property\n");
  393. return;
  394. }
  395. msic->trigger = ioremap(addr, 0x4);
  396. if (!msic->trigger) {
  397. pr_devel("axon_msi: ioremap failed\n");
  398. return;
  399. }
  400. snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
  401. if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
  402. msic, &fops_msic)) {
  403. pr_devel("axon_msi: debugfs_create_file failed!\n");
  404. return;
  405. }
  406. }
  407. #endif /* DEBUG */