/arch/powerpc/platforms/cell/axon_msi.c

http://github.com/mirrors/linux · C · 485 lines · 358 code · 95 blank · 32 comment · 38 complexity · 013d0497294e9039b3c44f413fbba1b4 MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright 2007, Michael Ellerman, IBM Corporation.
  4. */
  5. #include <linux/interrupt.h>
  6. #include <linux/irq.h>
  7. #include <linux/kernel.h>
  8. #include <linux/pci.h>
  9. #include <linux/msi.h>
  10. #include <linux/export.h>
  11. #include <linux/of_platform.h>
  12. #include <linux/slab.h>
  13. #include <asm/debugfs.h>
  14. #include <asm/dcr.h>
  15. #include <asm/machdep.h>
  16. #include <asm/prom.h>
  17. #include "cell.h"
  18. /*
  19. * MSIC registers, specified as offsets from dcr_base
  20. */
  21. #define MSIC_CTRL_REG 0x0
  22. /* Base Address registers specify FIFO location in BE memory */
  23. #define MSIC_BASE_ADDR_HI_REG 0x3
  24. #define MSIC_BASE_ADDR_LO_REG 0x4
  25. /* Hold the read/write offsets into the FIFO */
  26. #define MSIC_READ_OFFSET_REG 0x5
  27. #define MSIC_WRITE_OFFSET_REG 0x6
  28. /* MSIC control register flags */
  29. #define MSIC_CTRL_ENABLE 0x0001
  30. #define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002
  31. #define MSIC_CTRL_IRQ_ENABLE 0x0008
  32. #define MSIC_CTRL_FULL_STOP_ENABLE 0x0010
  33. /*
  34. * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
  35. * Currently we're using a 64KB FIFO size.
  36. */
  37. #define MSIC_FIFO_SIZE_SHIFT 16
  38. #define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT)
  39. /*
  40. * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
  41. * 8-9 of the MSIC control reg.
  42. */
  43. #define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
  44. /*
  45. * We need to mask the read/write offsets to make sure they stay within
  46. * the bounds of the FIFO. Also they should always be 16-byte aligned.
  47. */
  48. #define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
  49. /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
  50. #define MSIC_FIFO_ENTRY_SIZE 0x10
  51. struct axon_msic {
  52. struct irq_domain *irq_domain;
  53. __le32 *fifo_virt;
  54. dma_addr_t fifo_phys;
  55. dcr_host_t dcr_host;
  56. u32 read_offset;
  57. #ifdef DEBUG
  58. u32 __iomem *trigger;
  59. #endif
  60. };
  61. #ifdef DEBUG
  62. void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
  63. #else
  64. static inline void axon_msi_debug_setup(struct device_node *dn,
  65. struct axon_msic *msic) { }
  66. #endif
  67. static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
  68. {
  69. pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
  70. dcr_write(msic->dcr_host, dcr_n, val);
  71. }
  72. static void axon_msi_cascade(struct irq_desc *desc)
  73. {
  74. struct irq_chip *chip = irq_desc_get_chip(desc);
  75. struct axon_msic *msic = irq_desc_get_handler_data(desc);
  76. u32 write_offset, msi;
  77. int idx;
  78. int retry = 0;
  79. write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
  80. pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
  81. /* write_offset doesn't wrap properly, so we have to mask it */
  82. write_offset &= MSIC_FIFO_SIZE_MASK;
  83. while (msic->read_offset != write_offset && retry < 100) {
  84. idx = msic->read_offset / sizeof(__le32);
  85. msi = le32_to_cpu(msic->fifo_virt[idx]);
  86. msi &= 0xFFFF;
  87. pr_devel("axon_msi: woff %x roff %x msi %x\n",
  88. write_offset, msic->read_offset, msi);
  89. if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
  90. generic_handle_irq(msi);
  91. msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
  92. } else {
  93. /*
  94. * Reading the MSIC_WRITE_OFFSET_REG does not
  95. * reliably flush the outstanding DMA to the
  96. * FIFO buffer. Here we were reading stale
  97. * data, so we need to retry.
  98. */
  99. udelay(1);
  100. retry++;
  101. pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
  102. continue;
  103. }
  104. if (retry) {
  105. pr_devel("axon_msi: late irq 0x%x, retry %d\n",
  106. msi, retry);
  107. retry = 0;
  108. }
  109. msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
  110. msic->read_offset &= MSIC_FIFO_SIZE_MASK;
  111. }
  112. if (retry) {
  113. printk(KERN_WARNING "axon_msi: irq timed out\n");
  114. msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
  115. msic->read_offset &= MSIC_FIFO_SIZE_MASK;
  116. }
  117. chip->irq_eoi(&desc->irq_data);
  118. }
  119. static struct axon_msic *find_msi_translator(struct pci_dev *dev)
  120. {
  121. struct irq_domain *irq_domain;
  122. struct device_node *dn, *tmp;
  123. const phandle *ph;
  124. struct axon_msic *msic = NULL;
  125. dn = of_node_get(pci_device_to_OF_node(dev));
  126. if (!dn) {
  127. dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
  128. return NULL;
  129. }
  130. for (; dn; dn = of_get_next_parent(dn)) {
  131. ph = of_get_property(dn, "msi-translator", NULL);
  132. if (ph)
  133. break;
  134. }
  135. if (!ph) {
  136. dev_dbg(&dev->dev,
  137. "axon_msi: no msi-translator property found\n");
  138. goto out_error;
  139. }
  140. tmp = dn;
  141. dn = of_find_node_by_phandle(*ph);
  142. of_node_put(tmp);
  143. if (!dn) {
  144. dev_dbg(&dev->dev,
  145. "axon_msi: msi-translator doesn't point to a node\n");
  146. goto out_error;
  147. }
  148. irq_domain = irq_find_host(dn);
  149. if (!irq_domain) {
  150. dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n",
  151. dn);
  152. goto out_error;
  153. }
  154. msic = irq_domain->host_data;
  155. out_error:
  156. of_node_put(dn);
  157. return msic;
  158. }
  159. static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
  160. {
  161. struct device_node *dn;
  162. struct msi_desc *entry;
  163. int len;
  164. const u32 *prop;
  165. dn = of_node_get(pci_device_to_OF_node(dev));
  166. if (!dn) {
  167. dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
  168. return -ENODEV;
  169. }
  170. entry = first_pci_msi_entry(dev);
  171. for (; dn; dn = of_get_next_parent(dn)) {
  172. if (entry->msi_attrib.is_64) {
  173. prop = of_get_property(dn, "msi-address-64", &len);
  174. if (prop)
  175. break;
  176. }
  177. prop = of_get_property(dn, "msi-address-32", &len);
  178. if (prop)
  179. break;
  180. }
  181. if (!prop) {
  182. dev_dbg(&dev->dev,
  183. "axon_msi: no msi-address-(32|64) properties found\n");
  184. return -ENOENT;
  185. }
  186. switch (len) {
  187. case 8:
  188. msg->address_hi = prop[0];
  189. msg->address_lo = prop[1];
  190. break;
  191. case 4:
  192. msg->address_hi = 0;
  193. msg->address_lo = prop[0];
  194. break;
  195. default:
  196. dev_dbg(&dev->dev,
  197. "axon_msi: malformed msi-address-(32|64) property\n");
  198. of_node_put(dn);
  199. return -EINVAL;
  200. }
  201. of_node_put(dn);
  202. return 0;
  203. }
  204. static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  205. {
  206. unsigned int virq, rc;
  207. struct msi_desc *entry;
  208. struct msi_msg msg;
  209. struct axon_msic *msic;
  210. msic = find_msi_translator(dev);
  211. if (!msic)
  212. return -ENODEV;
  213. rc = setup_msi_msg_address(dev, &msg);
  214. if (rc)
  215. return rc;
  216. for_each_pci_msi_entry(entry, dev) {
  217. virq = irq_create_direct_mapping(msic->irq_domain);
  218. if (!virq) {
  219. dev_warn(&dev->dev,
  220. "axon_msi: virq allocation failed!\n");
  221. return -1;
  222. }
  223. dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
  224. irq_set_msi_desc(virq, entry);
  225. msg.data = virq;
  226. pci_write_msi_msg(virq, &msg);
  227. }
  228. return 0;
  229. }
  230. static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
  231. {
  232. struct msi_desc *entry;
  233. dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
  234. for_each_pci_msi_entry(entry, dev) {
  235. if (!entry->irq)
  236. continue;
  237. irq_set_msi_desc(entry->irq, NULL);
  238. irq_dispose_mapping(entry->irq);
  239. }
  240. }
  241. static struct irq_chip msic_irq_chip = {
  242. .irq_mask = pci_msi_mask_irq,
  243. .irq_unmask = pci_msi_unmask_irq,
  244. .irq_shutdown = pci_msi_mask_irq,
  245. .name = "AXON-MSI",
  246. };
  247. static int msic_host_map(struct irq_domain *h, unsigned int virq,
  248. irq_hw_number_t hw)
  249. {
  250. irq_set_chip_data(virq, h->host_data);
  251. irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
  252. return 0;
  253. }
  254. static const struct irq_domain_ops msic_host_ops = {
  255. .map = msic_host_map,
  256. };
  257. static void axon_msi_shutdown(struct platform_device *device)
  258. {
  259. struct axon_msic *msic = dev_get_drvdata(&device->dev);
  260. u32 tmp;
  261. pr_devel("axon_msi: disabling %pOF\n",
  262. irq_domain_get_of_node(msic->irq_domain));
  263. tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
  264. tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
  265. msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
  266. }
  267. static int axon_msi_probe(struct platform_device *device)
  268. {
  269. struct device_node *dn = device->dev.of_node;
  270. struct axon_msic *msic;
  271. unsigned int virq;
  272. int dcr_base, dcr_len;
  273. pr_devel("axon_msi: setting up dn %pOF\n", dn);
  274. msic = kzalloc(sizeof(*msic), GFP_KERNEL);
  275. if (!msic) {
  276. printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n",
  277. dn);
  278. goto out;
  279. }
  280. dcr_base = dcr_resource_start(dn, 0);
  281. dcr_len = dcr_resource_len(dn, 0);
  282. if (dcr_base == 0 || dcr_len == 0) {
  283. printk(KERN_ERR
  284. "axon_msi: couldn't parse dcr properties on %pOF\n",
  285. dn);
  286. goto out_free_msic;
  287. }
  288. msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
  289. if (!DCR_MAP_OK(msic->dcr_host)) {
  290. printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n",
  291. dn);
  292. goto out_free_msic;
  293. }
  294. msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
  295. &msic->fifo_phys, GFP_KERNEL);
  296. if (!msic->fifo_virt) {
  297. printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n",
  298. dn);
  299. goto out_free_msic;
  300. }
  301. virq = irq_of_parse_and_map(dn, 0);
  302. if (!virq) {
  303. printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n",
  304. dn);
  305. goto out_free_fifo;
  306. }
  307. memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
  308. /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
  309. msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
  310. if (!msic->irq_domain) {
  311. printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n",
  312. dn);
  313. goto out_free_fifo;
  314. }
  315. irq_set_handler_data(virq, msic);
  316. irq_set_chained_handler(virq, axon_msi_cascade);
  317. pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
  318. /* Enable the MSIC hardware */
  319. msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
  320. msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
  321. msic->fifo_phys & 0xFFFFFFFF);
  322. msic_dcr_write(msic, MSIC_CTRL_REG,
  323. MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
  324. MSIC_CTRL_FIFO_SIZE);
  325. msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
  326. & MSIC_FIFO_SIZE_MASK;
  327. dev_set_drvdata(&device->dev, msic);
  328. cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs;
  329. cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
  330. axon_msi_debug_setup(dn, msic);
  331. printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn);
  332. return 0;
  333. out_free_fifo:
  334. dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
  335. msic->fifo_phys);
  336. out_free_msic:
  337. kfree(msic);
  338. out:
  339. return -1;
  340. }
  341. static const struct of_device_id axon_msi_device_id[] = {
  342. {
  343. .compatible = "ibm,axon-msic"
  344. },
  345. {}
  346. };
  347. static struct platform_driver axon_msi_driver = {
  348. .probe = axon_msi_probe,
  349. .shutdown = axon_msi_shutdown,
  350. .driver = {
  351. .name = "axon-msi",
  352. .of_match_table = axon_msi_device_id,
  353. },
  354. };
  355. static int __init axon_msi_init(void)
  356. {
  357. return platform_driver_register(&axon_msi_driver);
  358. }
  359. subsys_initcall(axon_msi_init);
  360. #ifdef DEBUG
  361. static int msic_set(void *data, u64 val)
  362. {
  363. struct axon_msic *msic = data;
  364. out_le32(msic->trigger, val);
  365. return 0;
  366. }
  367. static int msic_get(void *data, u64 *val)
  368. {
  369. *val = 0;
  370. return 0;
  371. }
  372. DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
  373. void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
  374. {
  375. char name[8];
  376. u64 addr;
  377. addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
  378. if (addr == OF_BAD_ADDR) {
  379. pr_devel("axon_msi: couldn't translate reg property\n");
  380. return;
  381. }
  382. msic->trigger = ioremap(addr, 0x4);
  383. if (!msic->trigger) {
  384. pr_devel("axon_msi: ioremap failed\n");
  385. return;
  386. }
  387. snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
  388. debugfs_create_file(name, 0600, powerpc_debugfs_root, msic, &fops_msic);
  389. }
  390. #endif /* DEBUG */