/drivers/staging/vme/bridges/vme_tsi148.c

https://bitbucket.org/wisechild/galaxy-nexus · C · 2640 lines · 1852 code · 429 blank · 359 comment · 260 complexity · f2771802109565078f29c72070a82e0f MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * Support for the Tundra TSI148 VME-PCI Bridge Chip
  3. *
  4. * Author: Martyn Welch <martyn.welch@ge.com>
  5. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
  6. *
  7. * Based on work by Tom Armistead and Ajit Prem
  8. * Copyright 2004 Motorola Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/moduleparam.h>
  17. #include <linux/mm.h>
  18. #include <linux/types.h>
  19. #include <linux/errno.h>
  20. #include <linux/proc_fs.h>
  21. #include <linux/pci.h>
  22. #include <linux/poll.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/sched.h>
  27. #include <linux/slab.h>
  28. #include <linux/time.h>
  29. #include <linux/io.h>
  30. #include <linux/uaccess.h>
  31. #include "../vme.h"
  32. #include "../vme_bridge.h"
  33. #include "vme_tsi148.h"
  34. static int __init tsi148_init(void);
  35. static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
  36. static void tsi148_remove(struct pci_dev *);
  37. static void __exit tsi148_exit(void);
  38. /* Module parameter */
  39. static int err_chk;
  40. static int geoid;
  41. static char driver_name[] = "vme_tsi148";
  42. static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
  43. { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
  44. { },
  45. };
  46. static struct pci_driver tsi148_driver = {
  47. .name = driver_name,
  48. .id_table = tsi148_ids,
  49. .probe = tsi148_probe,
  50. .remove = tsi148_remove,
  51. };
  52. static void reg_join(unsigned int high, unsigned int low,
  53. unsigned long long *variable)
  54. {
  55. *variable = (unsigned long long)high << 32;
  56. *variable |= (unsigned long long)low;
  57. }
  58. static void reg_split(unsigned long long variable, unsigned int *high,
  59. unsigned int *low)
  60. {
  61. *low = (unsigned int)variable & 0xFFFFFFFF;
  62. *high = (unsigned int)(variable >> 32);
  63. }
  64. /*
  65. * Wakes up DMA queue.
  66. */
  67. static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
  68. int channel_mask)
  69. {
  70. u32 serviced = 0;
  71. if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
  72. wake_up(&bridge->dma_queue[0]);
  73. serviced |= TSI148_LCSR_INTC_DMA0C;
  74. }
  75. if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
  76. wake_up(&bridge->dma_queue[1]);
  77. serviced |= TSI148_LCSR_INTC_DMA1C;
  78. }
  79. return serviced;
  80. }
  81. /*
  82. * Wake up location monitor queue
  83. */
  84. static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
  85. {
  86. int i;
  87. u32 serviced = 0;
  88. for (i = 0; i < 4; i++) {
  89. if (stat & TSI148_LCSR_INTS_LMS[i]) {
  90. /* We only enable interrupts if the callback is set */
  91. bridge->lm_callback[i](i);
  92. serviced |= TSI148_LCSR_INTC_LMC[i];
  93. }
  94. }
  95. return serviced;
  96. }
  97. /*
  98. * Wake up mail box queue.
  99. *
  100. * XXX This functionality is not exposed up though API.
  101. */
  102. static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
  103. {
  104. int i;
  105. u32 val;
  106. u32 serviced = 0;
  107. struct tsi148_driver *bridge;
  108. bridge = tsi148_bridge->driver_priv;
  109. for (i = 0; i < 4; i++) {
  110. if (stat & TSI148_LCSR_INTS_MBS[i]) {
  111. val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
  112. dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
  113. ": 0x%x\n", i, val);
  114. serviced |= TSI148_LCSR_INTC_MBC[i];
  115. }
  116. }
  117. return serviced;
  118. }
  119. /*
  120. * Display error & status message when PERR (PCI) exception interrupt occurs.
  121. */
  122. static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
  123. {
  124. struct tsi148_driver *bridge;
  125. bridge = tsi148_bridge->driver_priv;
  126. dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
  127. "attributes: %08x\n",
  128. ioread32be(bridge->base + TSI148_LCSR_EDPAU),
  129. ioread32be(bridge->base + TSI148_LCSR_EDPAL),
  130. ioread32be(bridge->base + TSI148_LCSR_EDPAT));
  131. dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
  132. "completion reg: %08x\n",
  133. ioread32be(bridge->base + TSI148_LCSR_EDPXA),
  134. ioread32be(bridge->base + TSI148_LCSR_EDPXS));
  135. iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
  136. return TSI148_LCSR_INTC_PERRC;
  137. }
  138. /*
  139. * Save address and status when VME error interrupt occurs.
  140. */
  141. static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
  142. {
  143. unsigned int error_addr_high, error_addr_low;
  144. unsigned long long error_addr;
  145. u32 error_attrib;
  146. struct vme_bus_error *error;
  147. struct tsi148_driver *bridge;
  148. bridge = tsi148_bridge->driver_priv;
  149. error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
  150. error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
  151. error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
  152. reg_join(error_addr_high, error_addr_low, &error_addr);
  153. /* Check for exception register overflow (we have lost error data) */
  154. if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
  155. dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
  156. "Occurred\n");
  157. }
  158. error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
  159. if (error) {
  160. error->address = error_addr;
  161. error->attributes = error_attrib;
  162. list_add_tail(&error->list, &tsi148_bridge->vme_errors);
  163. } else {
  164. dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
  165. "VMEbus Error reporting\n");
  166. dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
  167. "0x%llx, attributes: %08x\n", error_addr, error_attrib);
  168. }
  169. /* Clear Status */
  170. iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
  171. return TSI148_LCSR_INTC_VERRC;
  172. }
  173. /*
  174. * Wake up IACK queue.
  175. */
  176. static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
  177. {
  178. wake_up(&bridge->iack_queue);
  179. return TSI148_LCSR_INTC_IACKC;
  180. }
  181. /*
  182. * Calling VME bus interrupt callback if provided.
  183. */
  184. static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
  185. u32 stat)
  186. {
  187. int vec, i, serviced = 0;
  188. struct tsi148_driver *bridge;
  189. bridge = tsi148_bridge->driver_priv;
  190. for (i = 7; i > 0; i--) {
  191. if (stat & (1 << i)) {
  192. /*
  193. * Note: Even though the registers are defined as
  194. * 32-bits in the spec, we only want to issue 8-bit
  195. * IACK cycles on the bus, read from offset 3.
  196. */
  197. vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
  198. vme_irq_handler(tsi148_bridge, i, vec);
  199. serviced |= (1 << i);
  200. }
  201. }
  202. return serviced;
  203. }
  204. /*
  205. * Top level interrupt handler. Clears appropriate interrupt status bits and
  206. * then calls appropriate sub handler(s).
  207. */
  208. static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
  209. {
  210. u32 stat, enable, serviced = 0;
  211. struct vme_bridge *tsi148_bridge;
  212. struct tsi148_driver *bridge;
  213. tsi148_bridge = ptr;
  214. bridge = tsi148_bridge->driver_priv;
  215. /* Determine which interrupts are unmasked and set */
  216. enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  217. stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
  218. /* Only look at unmasked interrupts */
  219. stat &= enable;
  220. if (unlikely(!stat))
  221. return IRQ_NONE;
  222. /* Call subhandlers as appropriate */
  223. /* DMA irqs */
  224. if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
  225. serviced |= tsi148_DMA_irqhandler(bridge, stat);
  226. /* Location monitor irqs */
  227. if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
  228. TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
  229. serviced |= tsi148_LM_irqhandler(bridge, stat);
  230. /* Mail box irqs */
  231. if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
  232. TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
  233. serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
  234. /* PCI bus error */
  235. if (stat & TSI148_LCSR_INTS_PERRS)
  236. serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
  237. /* VME bus error */
  238. if (stat & TSI148_LCSR_INTS_VERRS)
  239. serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
  240. /* IACK irq */
  241. if (stat & TSI148_LCSR_INTS_IACKS)
  242. serviced |= tsi148_IACK_irqhandler(bridge);
  243. /* VME bus irqs */
  244. if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
  245. TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
  246. TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
  247. TSI148_LCSR_INTS_IRQ1S))
  248. serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
  249. /* Clear serviced interrupts */
  250. iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
  251. return IRQ_HANDLED;
  252. }
  253. static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
  254. {
  255. int result;
  256. unsigned int tmp;
  257. struct pci_dev *pdev;
  258. struct tsi148_driver *bridge;
  259. pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
  260. bridge = tsi148_bridge->driver_priv;
  261. /* Initialise list for VME bus errors */
  262. INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
  263. mutex_init(&tsi148_bridge->irq_mtx);
  264. result = request_irq(pdev->irq,
  265. tsi148_irqhandler,
  266. IRQF_SHARED,
  267. driver_name, tsi148_bridge);
  268. if (result) {
  269. dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
  270. "vector %02X\n", pdev->irq);
  271. return result;
  272. }
  273. /* Enable and unmask interrupts */
  274. tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
  275. TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
  276. TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
  277. TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
  278. TSI148_LCSR_INTEO_IACKEO;
  279. /* This leaves the following interrupts masked.
  280. * TSI148_LCSR_INTEO_VIEEO
  281. * TSI148_LCSR_INTEO_SYSFLEO
  282. * TSI148_LCSR_INTEO_ACFLEO
  283. */
  284. /* Don't enable Location Monitor interrupts here - they will be
  285. * enabled when the location monitors are properly configured and
  286. * a callback has been attached.
  287. * TSI148_LCSR_INTEO_LM0EO
  288. * TSI148_LCSR_INTEO_LM1EO
  289. * TSI148_LCSR_INTEO_LM2EO
  290. * TSI148_LCSR_INTEO_LM3EO
  291. */
  292. /* Don't enable VME interrupts until we add a handler, else the board
  293. * will respond to it and we don't want that unless it knows how to
  294. * properly deal with it.
  295. * TSI148_LCSR_INTEO_IRQ7EO
  296. * TSI148_LCSR_INTEO_IRQ6EO
  297. * TSI148_LCSR_INTEO_IRQ5EO
  298. * TSI148_LCSR_INTEO_IRQ4EO
  299. * TSI148_LCSR_INTEO_IRQ3EO
  300. * TSI148_LCSR_INTEO_IRQ2EO
  301. * TSI148_LCSR_INTEO_IRQ1EO
  302. */
  303. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  304. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
  305. return 0;
  306. }
  307. static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
  308. struct pci_dev *pdev)
  309. {
  310. struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
  311. /* Turn off interrupts */
  312. iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
  313. iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
  314. /* Clear all interrupts */
  315. iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
  316. /* Detach interrupt handler */
  317. free_irq(pdev->irq, tsi148_bridge);
  318. }
  319. /*
  320. * Check to see if an IACk has been received, return true (1) or false (0).
  321. */
  322. static int tsi148_iack_received(struct tsi148_driver *bridge)
  323. {
  324. u32 tmp;
  325. tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
  326. if (tmp & TSI148_LCSR_VICR_IRQS)
  327. return 0;
  328. else
  329. return 1;
  330. }
  331. /*
  332. * Configure VME interrupt
  333. */
  334. static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
  335. int state, int sync)
  336. {
  337. struct pci_dev *pdev;
  338. u32 tmp;
  339. struct tsi148_driver *bridge;
  340. bridge = tsi148_bridge->driver_priv;
  341. /* We need to do the ordering differently for enabling and disabling */
  342. if (state == 0) {
  343. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
  344. tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
  345. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
  346. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  347. tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
  348. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  349. if (sync != 0) {
  350. pdev = container_of(tsi148_bridge->parent,
  351. struct pci_dev, dev);
  352. synchronize_irq(pdev->irq);
  353. }
  354. } else {
  355. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  356. tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
  357. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  358. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
  359. tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
  360. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
  361. }
  362. }
  363. /*
  364. * Generate a VME bus interrupt at the requested level & vector. Wait for
  365. * interrupt to be acked.
  366. */
  367. static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
  368. int statid)
  369. {
  370. u32 tmp;
  371. struct tsi148_driver *bridge;
  372. bridge = tsi148_bridge->driver_priv;
  373. mutex_lock(&bridge->vme_int);
  374. /* Read VICR register */
  375. tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
  376. /* Set Status/ID */
  377. tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
  378. (statid & TSI148_LCSR_VICR_STID_M);
  379. iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
  380. /* Assert VMEbus IRQ */
  381. tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
  382. iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
  383. /* XXX Consider implementing a timeout? */
  384. wait_event_interruptible(bridge->iack_queue,
  385. tsi148_iack_received(bridge));
  386. mutex_unlock(&bridge->vme_int);
  387. return 0;
  388. }
  389. /*
  390. * Find the first error in this address range
  391. */
  392. static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
  393. vme_address_t aspace, unsigned long long address, size_t count)
  394. {
  395. struct list_head *err_pos;
  396. struct vme_bus_error *vme_err, *valid = NULL;
  397. unsigned long long bound;
  398. bound = address + count;
  399. /*
  400. * XXX We are currently not looking at the address space when parsing
  401. * for errors. This is because parsing the Address Modifier Codes
  402. * is going to be quite resource intensive to do properly. We
  403. * should be OK just looking at the addresses and this is certainly
  404. * much better than what we had before.
  405. */
  406. err_pos = NULL;
  407. /* Iterate through errors */
  408. list_for_each(err_pos, &tsi148_bridge->vme_errors) {
  409. vme_err = list_entry(err_pos, struct vme_bus_error, list);
  410. if ((vme_err->address >= address) &&
  411. (vme_err->address < bound)) {
  412. valid = vme_err;
  413. break;
  414. }
  415. }
  416. return valid;
  417. }
  418. /*
  419. * Clear errors in the provided address range.
  420. */
  421. static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
  422. vme_address_t aspace, unsigned long long address, size_t count)
  423. {
  424. struct list_head *err_pos, *temp;
  425. struct vme_bus_error *vme_err;
  426. unsigned long long bound;
  427. bound = address + count;
  428. /*
  429. * XXX We are currently not looking at the address space when parsing
  430. * for errors. This is because parsing the Address Modifier Codes
  431. * is going to be quite resource intensive to do properly. We
  432. * should be OK just looking at the addresses and this is certainly
  433. * much better than what we had before.
  434. */
  435. err_pos = NULL;
  436. /* Iterate through errors */
  437. list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
  438. vme_err = list_entry(err_pos, struct vme_bus_error, list);
  439. if ((vme_err->address >= address) &&
  440. (vme_err->address < bound)) {
  441. list_del(err_pos);
  442. kfree(vme_err);
  443. }
  444. }
  445. }
  446. /*
  447. * Initialize a slave window with the requested attributes.
  448. */
  449. static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
  450. unsigned long long vme_base, unsigned long long size,
  451. dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
  452. {
  453. unsigned int i, addr = 0, granularity = 0;
  454. unsigned int temp_ctl = 0;
  455. unsigned int vme_base_low, vme_base_high;
  456. unsigned int vme_bound_low, vme_bound_high;
  457. unsigned int pci_offset_low, pci_offset_high;
  458. unsigned long long vme_bound, pci_offset;
  459. struct vme_bridge *tsi148_bridge;
  460. struct tsi148_driver *bridge;
  461. tsi148_bridge = image->parent;
  462. bridge = tsi148_bridge->driver_priv;
  463. i = image->number;
  464. switch (aspace) {
  465. case VME_A16:
  466. granularity = 0x10;
  467. addr |= TSI148_LCSR_ITAT_AS_A16;
  468. break;
  469. case VME_A24:
  470. granularity = 0x1000;
  471. addr |= TSI148_LCSR_ITAT_AS_A24;
  472. break;
  473. case VME_A32:
  474. granularity = 0x10000;
  475. addr |= TSI148_LCSR_ITAT_AS_A32;
  476. break;
  477. case VME_A64:
  478. granularity = 0x10000;
  479. addr |= TSI148_LCSR_ITAT_AS_A64;
  480. break;
  481. case VME_CRCSR:
  482. case VME_USER1:
  483. case VME_USER2:
  484. case VME_USER3:
  485. case VME_USER4:
  486. default:
  487. dev_err(tsi148_bridge->parent, "Invalid address space\n");
  488. return -EINVAL;
  489. break;
  490. }
  491. /* Convert 64-bit variables to 2x 32-bit variables */
  492. reg_split(vme_base, &vme_base_high, &vme_base_low);
  493. /*
  494. * Bound address is a valid address for the window, adjust
  495. * accordingly
  496. */
  497. vme_bound = vme_base + size - granularity;
  498. reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
  499. pci_offset = (unsigned long long)pci_base - vme_base;
  500. reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
  501. if (vme_base_low & (granularity - 1)) {
  502. dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
  503. return -EINVAL;
  504. }
  505. if (vme_bound_low & (granularity - 1)) {
  506. dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
  507. return -EINVAL;
  508. }
  509. if (pci_offset_low & (granularity - 1)) {
  510. dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
  511. "alignment\n");
  512. return -EINVAL;
  513. }
  514. /* Disable while we are mucking around */
  515. temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  516. TSI148_LCSR_OFFSET_ITAT);
  517. temp_ctl &= ~TSI148_LCSR_ITAT_EN;
  518. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
  519. TSI148_LCSR_OFFSET_ITAT);
  520. /* Setup mapping */
  521. iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
  522. TSI148_LCSR_OFFSET_ITSAU);
  523. iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
  524. TSI148_LCSR_OFFSET_ITSAL);
  525. iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
  526. TSI148_LCSR_OFFSET_ITEAU);
  527. iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
  528. TSI148_LCSR_OFFSET_ITEAL);
  529. iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
  530. TSI148_LCSR_OFFSET_ITOFU);
  531. iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
  532. TSI148_LCSR_OFFSET_ITOFL);
  533. /* Setup 2eSST speeds */
  534. temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
  535. switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
  536. case VME_2eSST160:
  537. temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
  538. break;
  539. case VME_2eSST267:
  540. temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
  541. break;
  542. case VME_2eSST320:
  543. temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
  544. break;
  545. }
  546. /* Setup cycle types */
  547. temp_ctl &= ~(0x1F << 7);
  548. if (cycle & VME_BLT)
  549. temp_ctl |= TSI148_LCSR_ITAT_BLT;
  550. if (cycle & VME_MBLT)
  551. temp_ctl |= TSI148_LCSR_ITAT_MBLT;
  552. if (cycle & VME_2eVME)
  553. temp_ctl |= TSI148_LCSR_ITAT_2eVME;
  554. if (cycle & VME_2eSST)
  555. temp_ctl |= TSI148_LCSR_ITAT_2eSST;
  556. if (cycle & VME_2eSSTB)
  557. temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
  558. /* Setup address space */
  559. temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
  560. temp_ctl |= addr;
  561. temp_ctl &= ~0xF;
  562. if (cycle & VME_SUPER)
  563. temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
  564. if (cycle & VME_USER)
  565. temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
  566. if (cycle & VME_PROG)
  567. temp_ctl |= TSI148_LCSR_ITAT_PGM;
  568. if (cycle & VME_DATA)
  569. temp_ctl |= TSI148_LCSR_ITAT_DATA;
  570. /* Write ctl reg without enable */
  571. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
  572. TSI148_LCSR_OFFSET_ITAT);
  573. if (enabled)
  574. temp_ctl |= TSI148_LCSR_ITAT_EN;
  575. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
  576. TSI148_LCSR_OFFSET_ITAT);
  577. return 0;
  578. }
  579. /*
  580. * Get slave window configuration.
  581. */
  582. static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
  583. unsigned long long *vme_base, unsigned long long *size,
  584. dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
  585. {
  586. unsigned int i, granularity = 0, ctl = 0;
  587. unsigned int vme_base_low, vme_base_high;
  588. unsigned int vme_bound_low, vme_bound_high;
  589. unsigned int pci_offset_low, pci_offset_high;
  590. unsigned long long vme_bound, pci_offset;
  591. struct tsi148_driver *bridge;
  592. bridge = image->parent->driver_priv;
  593. i = image->number;
  594. /* Read registers */
  595. ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  596. TSI148_LCSR_OFFSET_ITAT);
  597. vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  598. TSI148_LCSR_OFFSET_ITSAU);
  599. vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  600. TSI148_LCSR_OFFSET_ITSAL);
  601. vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  602. TSI148_LCSR_OFFSET_ITEAU);
  603. vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  604. TSI148_LCSR_OFFSET_ITEAL);
  605. pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  606. TSI148_LCSR_OFFSET_ITOFU);
  607. pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  608. TSI148_LCSR_OFFSET_ITOFL);
  609. /* Convert 64-bit variables to 2x 32-bit variables */
  610. reg_join(vme_base_high, vme_base_low, vme_base);
  611. reg_join(vme_bound_high, vme_bound_low, &vme_bound);
  612. reg_join(pci_offset_high, pci_offset_low, &pci_offset);
  613. *pci_base = (dma_addr_t)vme_base + pci_offset;
  614. *enabled = 0;
  615. *aspace = 0;
  616. *cycle = 0;
  617. if (ctl & TSI148_LCSR_ITAT_EN)
  618. *enabled = 1;
  619. if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
  620. granularity = 0x10;
  621. *aspace |= VME_A16;
  622. }
  623. if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
  624. granularity = 0x1000;
  625. *aspace |= VME_A24;
  626. }
  627. if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
  628. granularity = 0x10000;
  629. *aspace |= VME_A32;
  630. }
  631. if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
  632. granularity = 0x10000;
  633. *aspace |= VME_A64;
  634. }
  635. /* Need granularity before we set the size */
  636. *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
  637. if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
  638. *cycle |= VME_2eSST160;
  639. if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
  640. *cycle |= VME_2eSST267;
  641. if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
  642. *cycle |= VME_2eSST320;
  643. if (ctl & TSI148_LCSR_ITAT_BLT)
  644. *cycle |= VME_BLT;
  645. if (ctl & TSI148_LCSR_ITAT_MBLT)
  646. *cycle |= VME_MBLT;
  647. if (ctl & TSI148_LCSR_ITAT_2eVME)
  648. *cycle |= VME_2eVME;
  649. if (ctl & TSI148_LCSR_ITAT_2eSST)
  650. *cycle |= VME_2eSST;
  651. if (ctl & TSI148_LCSR_ITAT_2eSSTB)
  652. *cycle |= VME_2eSSTB;
  653. if (ctl & TSI148_LCSR_ITAT_SUPR)
  654. *cycle |= VME_SUPER;
  655. if (ctl & TSI148_LCSR_ITAT_NPRIV)
  656. *cycle |= VME_USER;
  657. if (ctl & TSI148_LCSR_ITAT_PGM)
  658. *cycle |= VME_PROG;
  659. if (ctl & TSI148_LCSR_ITAT_DATA)
  660. *cycle |= VME_DATA;
  661. return 0;
  662. }
  663. /*
  664. * Allocate and map PCI Resource
  665. */
  666. static int tsi148_alloc_resource(struct vme_master_resource *image,
  667. unsigned long long size)
  668. {
  669. unsigned long long existing_size;
  670. int retval = 0;
  671. struct pci_dev *pdev;
  672. struct vme_bridge *tsi148_bridge;
  673. tsi148_bridge = image->parent;
  674. pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
  675. existing_size = (unsigned long long)(image->bus_resource.end -
  676. image->bus_resource.start);
  677. /* If the existing size is OK, return */
  678. if ((size != 0) && (existing_size == (size - 1)))
  679. return 0;
  680. if (existing_size != 0) {
  681. iounmap(image->kern_base);
  682. image->kern_base = NULL;
  683. kfree(image->bus_resource.name);
  684. release_resource(&image->bus_resource);
  685. memset(&image->bus_resource, 0, sizeof(struct resource));
  686. }
  687. /* Exit here if size is zero */
  688. if (size == 0)
  689. return 0;
  690. if (image->bus_resource.name == NULL) {
  691. image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
  692. if (image->bus_resource.name == NULL) {
  693. dev_err(tsi148_bridge->parent, "Unable to allocate "
  694. "memory for resource name\n");
  695. retval = -ENOMEM;
  696. goto err_name;
  697. }
  698. }
  699. sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
  700. image->number);
  701. image->bus_resource.start = 0;
  702. image->bus_resource.end = (unsigned long)size;
  703. image->bus_resource.flags = IORESOURCE_MEM;
  704. retval = pci_bus_alloc_resource(pdev->bus,
  705. &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
  706. 0, NULL, NULL);
  707. if (retval) {
  708. dev_err(tsi148_bridge->parent, "Failed to allocate mem "
  709. "resource for window %d size 0x%lx start 0x%lx\n",
  710. image->number, (unsigned long)size,
  711. (unsigned long)image->bus_resource.start);
  712. goto err_resource;
  713. }
  714. image->kern_base = ioremap_nocache(
  715. image->bus_resource.start, size);
  716. if (image->kern_base == NULL) {
  717. dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
  718. retval = -ENOMEM;
  719. goto err_remap;
  720. }
  721. return 0;
  722. err_remap:
  723. release_resource(&image->bus_resource);
  724. err_resource:
  725. kfree(image->bus_resource.name);
  726. memset(&image->bus_resource, 0, sizeof(struct resource));
  727. err_name:
  728. return retval;
  729. }
  730. /*
  731. * Free and unmap PCI Resource
  732. */
  733. static void tsi148_free_resource(struct vme_master_resource *image)
  734. {
  735. iounmap(image->kern_base);
  736. image->kern_base = NULL;
  737. release_resource(&image->bus_resource);
  738. kfree(image->bus_resource.name);
  739. memset(&image->bus_resource, 0, sizeof(struct resource));
  740. }
  741. /*
  742. * Set the attributes of an outbound window.
  743. */
  744. static int tsi148_master_set(struct vme_master_resource *image, int enabled,
  745. unsigned long long vme_base, unsigned long long size,
  746. vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
  747. {
  748. int retval = 0;
  749. unsigned int i;
  750. unsigned int temp_ctl = 0;
  751. unsigned int pci_base_low, pci_base_high;
  752. unsigned int pci_bound_low, pci_bound_high;
  753. unsigned int vme_offset_low, vme_offset_high;
  754. unsigned long long pci_bound, vme_offset, pci_base;
  755. struct vme_bridge *tsi148_bridge;
  756. struct tsi148_driver *bridge;
  757. tsi148_bridge = image->parent;
  758. bridge = tsi148_bridge->driver_priv;
  759. /* Verify input data */
  760. if (vme_base & 0xFFFF) {
  761. dev_err(tsi148_bridge->parent, "Invalid VME Window "
  762. "alignment\n");
  763. retval = -EINVAL;
  764. goto err_window;
  765. }
  766. if ((size == 0) && (enabled != 0)) {
  767. dev_err(tsi148_bridge->parent, "Size must be non-zero for "
  768. "enabled windows\n");
  769. retval = -EINVAL;
  770. goto err_window;
  771. }
  772. spin_lock(&image->lock);
  773. /* Let's allocate the resource here rather than further up the stack as
  774. * it avoids pushing loads of bus dependent stuff up the stack. If size
  775. * is zero, any existing resource will be freed.
  776. */
  777. retval = tsi148_alloc_resource(image, size);
  778. if (retval) {
  779. spin_unlock(&image->lock);
  780. dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
  781. "resource\n");
  782. goto err_res;
  783. }
  784. if (size == 0) {
  785. pci_base = 0;
  786. pci_bound = 0;
  787. vme_offset = 0;
  788. } else {
  789. pci_base = (unsigned long long)image->bus_resource.start;
  790. /*
  791. * Bound address is a valid address for the window, adjust
  792. * according to window granularity.
  793. */
  794. pci_bound = pci_base + (size - 0x10000);
  795. vme_offset = vme_base - pci_base;
  796. }
  797. /* Convert 64-bit variables to 2x 32-bit variables */
  798. reg_split(pci_base, &pci_base_high, &pci_base_low);
  799. reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
  800. reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
  801. if (pci_base_low & 0xFFFF) {
  802. spin_unlock(&image->lock);
  803. dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
  804. retval = -EINVAL;
  805. goto err_gran;
  806. }
  807. if (pci_bound_low & 0xFFFF) {
  808. spin_unlock(&image->lock);
  809. dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
  810. retval = -EINVAL;
  811. goto err_gran;
  812. }
  813. if (vme_offset_low & 0xFFFF) {
  814. spin_unlock(&image->lock);
  815. dev_err(tsi148_bridge->parent, "Invalid VME Offset "
  816. "alignment\n");
  817. retval = -EINVAL;
  818. goto err_gran;
  819. }
  820. i = image->number;
  821. /* Disable while we are mucking around */
  822. temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  823. TSI148_LCSR_OFFSET_OTAT);
  824. temp_ctl &= ~TSI148_LCSR_OTAT_EN;
  825. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
  826. TSI148_LCSR_OFFSET_OTAT);
  827. /* Setup 2eSST speeds */
  828. temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
  829. switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
  830. case VME_2eSST160:
  831. temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
  832. break;
  833. case VME_2eSST267:
  834. temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
  835. break;
  836. case VME_2eSST320:
  837. temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
  838. break;
  839. }
  840. /* Setup cycle types */
  841. if (cycle & VME_BLT) {
  842. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  843. temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
  844. }
  845. if (cycle & VME_MBLT) {
  846. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  847. temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
  848. }
  849. if (cycle & VME_2eVME) {
  850. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  851. temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
  852. }
  853. if (cycle & VME_2eSST) {
  854. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  855. temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
  856. }
  857. if (cycle & VME_2eSSTB) {
  858. dev_warn(tsi148_bridge->parent, "Currently not setting "
  859. "Broadcast Select Registers\n");
  860. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  861. temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
  862. }
  863. /* Setup data width */
  864. temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
  865. switch (dwidth) {
  866. case VME_D16:
  867. temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
  868. break;
  869. case VME_D32:
  870. temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
  871. break;
  872. default:
  873. spin_unlock(&image->lock);
  874. dev_err(tsi148_bridge->parent, "Invalid data width\n");
  875. retval = -EINVAL;
  876. goto err_dwidth;
  877. }
  878. /* Setup address space */
  879. temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
  880. switch (aspace) {
  881. case VME_A16:
  882. temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
  883. break;
  884. case VME_A24:
  885. temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
  886. break;
  887. case VME_A32:
  888. temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
  889. break;
  890. case VME_A64:
  891. temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
  892. break;
  893. case VME_CRCSR:
  894. temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
  895. break;
  896. case VME_USER1:
  897. temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
  898. break;
  899. case VME_USER2:
  900. temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
  901. break;
  902. case VME_USER3:
  903. temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
  904. break;
  905. case VME_USER4:
  906. temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
  907. break;
  908. default:
  909. spin_unlock(&image->lock);
  910. dev_err(tsi148_bridge->parent, "Invalid address space\n");
  911. retval = -EINVAL;
  912. goto err_aspace;
  913. break;
  914. }
  915. temp_ctl &= ~(3<<4);
  916. if (cycle & VME_SUPER)
  917. temp_ctl |= TSI148_LCSR_OTAT_SUP;
  918. if (cycle & VME_PROG)
  919. temp_ctl |= TSI148_LCSR_OTAT_PGM;
  920. /* Setup mapping */
  921. iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
  922. TSI148_LCSR_OFFSET_OTSAU);
  923. iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
  924. TSI148_LCSR_OFFSET_OTSAL);
  925. iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
  926. TSI148_LCSR_OFFSET_OTEAU);
  927. iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
  928. TSI148_LCSR_OFFSET_OTEAL);
  929. iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
  930. TSI148_LCSR_OFFSET_OTOFU);
  931. iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
  932. TSI148_LCSR_OFFSET_OTOFL);
  933. /* Write ctl reg without enable */
  934. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
  935. TSI148_LCSR_OFFSET_OTAT);
  936. if (enabled)
  937. temp_ctl |= TSI148_LCSR_OTAT_EN;
  938. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
  939. TSI148_LCSR_OFFSET_OTAT);
  940. spin_unlock(&image->lock);
  941. return 0;
  942. err_aspace:
  943. err_dwidth:
  944. err_gran:
  945. tsi148_free_resource(image);
  946. err_res:
  947. err_window:
  948. return retval;
  949. }
  950. /*
  951. * Set the attributes of an outbound window.
  952. *
  953. * XXX Not parsing prefetch information.
  954. */
  955. static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
  956. unsigned long long *vme_base, unsigned long long *size,
  957. vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
  958. {
  959. unsigned int i, ctl;
  960. unsigned int pci_base_low, pci_base_high;
  961. unsigned int pci_bound_low, pci_bound_high;
  962. unsigned int vme_offset_low, vme_offset_high;
  963. unsigned long long pci_base, pci_bound, vme_offset;
  964. struct tsi148_driver *bridge;
  965. bridge = image->parent->driver_priv;
  966. i = image->number;
  967. ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  968. TSI148_LCSR_OFFSET_OTAT);
  969. pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  970. TSI148_LCSR_OFFSET_OTSAU);
  971. pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  972. TSI148_LCSR_OFFSET_OTSAL);
  973. pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  974. TSI148_LCSR_OFFSET_OTEAU);
  975. pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  976. TSI148_LCSR_OFFSET_OTEAL);
  977. vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  978. TSI148_LCSR_OFFSET_OTOFU);
  979. vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  980. TSI148_LCSR_OFFSET_OTOFL);
  981. /* Convert 64-bit variables to 2x 32-bit variables */
  982. reg_join(pci_base_high, pci_base_low, &pci_base);
  983. reg_join(pci_bound_high, pci_bound_low, &pci_bound);
  984. reg_join(vme_offset_high, vme_offset_low, &vme_offset);
  985. *vme_base = pci_base + vme_offset;
  986. *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
  987. *enabled = 0;
  988. *aspace = 0;
  989. *cycle = 0;
  990. *dwidth = 0;
  991. if (ctl & TSI148_LCSR_OTAT_EN)
  992. *enabled = 1;
  993. /* Setup address space */
  994. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
  995. *aspace |= VME_A16;
  996. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
  997. *aspace |= VME_A24;
  998. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
  999. *aspace |= VME_A32;
  1000. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
  1001. *aspace |= VME_A64;
  1002. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
  1003. *aspace |= VME_CRCSR;
  1004. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
  1005. *aspace |= VME_USER1;
  1006. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
  1007. *aspace |= VME_USER2;
  1008. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
  1009. *aspace |= VME_USER3;
  1010. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
  1011. *aspace |= VME_USER4;
  1012. /* Setup 2eSST speeds */
  1013. if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
  1014. *cycle |= VME_2eSST160;
  1015. if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
  1016. *cycle |= VME_2eSST267;
  1017. if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
  1018. *cycle |= VME_2eSST320;
  1019. /* Setup cycle types */
  1020. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
  1021. *cycle |= VME_SCT;
  1022. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
  1023. *cycle |= VME_BLT;
  1024. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
  1025. *cycle |= VME_MBLT;
  1026. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
  1027. *cycle |= VME_2eVME;
  1028. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
  1029. *cycle |= VME_2eSST;
  1030. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
  1031. *cycle |= VME_2eSSTB;
  1032. if (ctl & TSI148_LCSR_OTAT_SUP)
  1033. *cycle |= VME_SUPER;
  1034. else
  1035. *cycle |= VME_USER;
  1036. if (ctl & TSI148_LCSR_OTAT_PGM)
  1037. *cycle |= VME_PROG;
  1038. else
  1039. *cycle |= VME_DATA;
  1040. /* Setup data width */
  1041. if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
  1042. *dwidth = VME_D16;
  1043. if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
  1044. *dwidth = VME_D32;
  1045. return 0;
  1046. }
  1047. static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
  1048. unsigned long long *vme_base, unsigned long long *size,
  1049. vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
  1050. {
  1051. int retval;
  1052. spin_lock(&image->lock);
  1053. retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
  1054. cycle, dwidth);
  1055. spin_unlock(&image->lock);
  1056. return retval;
  1057. }
  1058. static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
  1059. size_t count, loff_t offset)
  1060. {
  1061. int retval, enabled;
  1062. unsigned long long vme_base, size;
  1063. vme_address_t aspace;
  1064. vme_cycle_t cycle;
  1065. vme_width_t dwidth;
  1066. struct vme_bus_error *vme_err = NULL;
  1067. struct vme_bridge *tsi148_bridge;
  1068. tsi148_bridge = image->parent;
  1069. spin_lock(&image->lock);
  1070. memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
  1071. retval = count;
  1072. if (!err_chk)
  1073. goto skip_chk;
  1074. __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
  1075. &dwidth);
  1076. vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
  1077. count);
  1078. if (vme_err != NULL) {
  1079. dev_err(image->parent->parent, "First VME read error detected "
  1080. "an at address 0x%llx\n", vme_err->address);
  1081. retval = vme_err->address - (vme_base + offset);
  1082. /* Clear down save errors in this address range */
  1083. tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
  1084. count);
  1085. }
  1086. skip_chk:
  1087. spin_unlock(&image->lock);
  1088. return retval;
  1089. }
  1090. static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
  1091. size_t count, loff_t offset)
  1092. {
  1093. int retval = 0, enabled;
  1094. unsigned long long vme_base, size;
  1095. vme_address_t aspace;
  1096. vme_cycle_t cycle;
  1097. vme_width_t dwidth;
  1098. struct vme_bus_error *vme_err = NULL;
  1099. struct vme_bridge *tsi148_bridge;
  1100. struct tsi148_driver *bridge;
  1101. tsi148_bridge = image->parent;
  1102. bridge = tsi148_bridge->driver_priv;
  1103. spin_lock(&image->lock);
  1104. memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
  1105. retval = count;
  1106. /*
  1107. * Writes are posted. We need to do a read on the VME bus to flush out
  1108. * all of the writes before we check for errors. We can't guarantee
  1109. * that reading the data we have just written is safe. It is believed
  1110. * that there isn't any read, write re-ordering, so we can read any
  1111. * location in VME space, so lets read the Device ID from the tsi148's
  1112. * own registers as mapped into CR/CSR space.
  1113. *
  1114. * We check for saved errors in the written address range/space.
  1115. */
  1116. if (!err_chk)
  1117. goto skip_chk;
  1118. /*
  1119. * Get window info first, to maximise the time that the buffers may
  1120. * fluch on their own
  1121. */
  1122. __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
  1123. &dwidth);
  1124. ioread16(bridge->flush_image->kern_base + 0x7F000);
  1125. vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
  1126. count);
  1127. if (vme_err != NULL) {
  1128. dev_warn(tsi148_bridge->parent, "First VME write error detected"
  1129. " an at address 0x%llx\n", vme_err->address);
  1130. retval = vme_err->address - (vme_base + offset);
  1131. /* Clear down save errors in this address range */
  1132. tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
  1133. count);
  1134. }
  1135. skip_chk:
  1136. spin_unlock(&image->lock);
  1137. return retval;
  1138. }
  1139. /*
  1140. * Perform an RMW cycle on the VME bus.
  1141. *
  1142. * Requires a previously configured master window, returns final value.
  1143. */
  1144. static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
  1145. unsigned int mask, unsigned int compare, unsigned int swap,
  1146. loff_t offset)
  1147. {
  1148. unsigned long long pci_addr;
  1149. unsigned int pci_addr_high, pci_addr_low;
  1150. u32 tmp, result;
  1151. int i;
  1152. struct tsi148_driver *bridge;
  1153. bridge = image->parent->driver_priv;
  1154. /* Find the PCI address that maps to the desired VME address */
  1155. i = image->number;
  1156. /* Locking as we can only do one of these at a time */
  1157. mutex_lock(&bridge->vme_rmw);
  1158. /* Lock image */
  1159. spin_lock(&image->lock);
  1160. pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  1161. TSI148_LCSR_OFFSET_OTSAU);
  1162. pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  1163. TSI148_LCSR_OFFSET_OTSAL);
  1164. reg_join(pci_addr_high, pci_addr_low, &pci_addr);
  1165. reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
  1166. /* Configure registers */
  1167. iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
  1168. iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
  1169. iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
  1170. iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
  1171. iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
  1172. /* Enable RMW */
  1173. tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
  1174. tmp |= TSI148_LCSR_VMCTRL_RMWEN;
  1175. iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
  1176. /* Kick process off with a read to the required address. */
  1177. result = ioread32be(image->kern_base + offset);
  1178. /* Disable RMW */
  1179. tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
  1180. tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
  1181. iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
  1182. spin_unlock(&image->lock);
  1183. mutex_unlock(&bridge->vme_rmw);
  1184. return result;
  1185. }
  1186. static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
  1187. vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
  1188. {
  1189. /* Setup 2eSST speeds */
  1190. switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
  1191. case VME_2eSST160:
  1192. *attr |= TSI148_LCSR_DSAT_2eSSTM_160;
  1193. break;
  1194. case VME_2eSST267:
  1195. *attr |= TSI148_LCSR_DSAT_2eSSTM_267;
  1196. break;
  1197. case VME_2eSST320:
  1198. *attr |= TSI148_LCSR_DSAT_2eSSTM_320;
  1199. break;
  1200. }
  1201. /* Setup cycle types */
  1202. if (cycle & VME_SCT)
  1203. *attr |= TSI148_LCSR_DSAT_TM_SCT;
  1204. if (cycle & VME_BLT)
  1205. *attr |= TSI148_LCSR_DSAT_TM_BLT;
  1206. if (cycle & VME_MBLT)
  1207. *attr |= TSI148_LCSR_DSAT_TM_MBLT;
  1208. if (cycle & VME_2eVME)
  1209. *attr |= TSI148_LCSR_DSAT_TM_2eVME;
  1210. if (cycle & VME_2eSST)
  1211. *attr |= TSI148_LCSR_DSAT_TM_2eSST;
  1212. if (cycle & VME_2eSSTB) {
  1213. dev_err(dev, "Currently not setting Broadcast Select "
  1214. "Registers\n");
  1215. *attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
  1216. }
  1217. /* Setup data width */
  1218. switch (dwidth) {
  1219. case VME_D16:
  1220. *attr |= TSI148_LCSR_DSAT_DBW_16;
  1221. break;
  1222. case VME_D32:
  1223. *attr |= TSI148_LCSR_DSAT_DBW_32;
  1224. break;
  1225. default:
  1226. dev_err(dev, "Invalid data width\n");
  1227. return -EINVAL;
  1228. }
  1229. /* Setup address space */
  1230. switch (aspace) {
  1231. case VME_A16:
  1232. *attr |= TSI148_LCSR_DSAT_AMODE_A16;
  1233. break;
  1234. case VME_A24:
  1235. *attr |= TSI148_LCSR_DSAT_AMODE_A24;
  1236. break;
  1237. case VME_A32:
  1238. *attr |= TSI148_LCSR_DSAT_AMODE_A32;
  1239. break;
  1240. case VME_A64:
  1241. *attr |= TSI148_LCSR_DSAT_AMODE_A64;
  1242. break;
  1243. case VME_CRCSR:
  1244. *attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
  1245. break;
  1246. case VME_USER1:
  1247. *attr |= TSI148_LCSR_DSAT_AMODE_USER1;
  1248. break;
  1249. case VME_USER2:
  1250. *attr |= TSI148_LCSR_DSAT_AMODE_USER2;
  1251. break;
  1252. case VME_USER3:
  1253. *attr |= TSI148_LCSR_DSAT_AMODE_USER3;
  1254. break;
  1255. case VME_USER4:
  1256. *attr |= TSI148_LCSR_DSAT_AMODE_USER4;
  1257. break;
  1258. default:
  1259. dev_err(dev, "Invalid address space\n");
  1260. return -EINVAL;
  1261. break;
  1262. }
  1263. if (cycle & VME_SUPER)
  1264. *attr |= TSI148_LCSR_DSAT_SUP;
  1265. if (cycle & VME_PROG)
  1266. *attr |= TSI148_LCSR_DSAT_PGM;
  1267. return 0;
  1268. }
  1269. static int tsi148_dma_set_vme_dest_attributes(struct device *dev, u32 *attr,
  1270. vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
  1271. {
  1272. /* Setup 2eSST speeds */
  1273. switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
  1274. case VME_2eSST160:
  1275. *attr |= TSI148_LCSR_DDAT_2eSSTM_160;
  1276. break;
  1277. case VME_2eSST267:
  1278. *attr |= TSI148_LCSR_DDAT_2eSSTM_267;
  1279. break;
  1280. case VME_2eSST320:
  1281. *attr |= TSI148_LCSR_DDAT_2eSSTM_320;
  1282. break;
  1283. }
  1284. /* Setup cycle types */
  1285. if (cycle & VME_SCT)
  1286. *attr |= TSI148_LCSR_DDAT_TM_SCT;
  1287. if (cycle & VME_BLT)
  1288. *attr |= TSI148_LCSR_DDAT_TM_BLT;
  1289. if (cycle & VME_MBLT)
  1290. *attr |= TSI148_LCSR_DDAT_TM_MBLT;
  1291. if (cycle & VME_2eVME)
  1292. *attr |= TSI148_LCSR_DDAT_TM_2eVME;
  1293. if (cycle & VME_2eSST)
  1294. *attr |= TSI148_LCSR_DDAT_TM_2eSST;
  1295. if (cycle & VME_2eSSTB) {
  1296. dev_err(dev, "Currently not setting Broadcast Select "
  1297. "Registers\n");
  1298. *attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
  1299. }
  1300. /* Setup data width */
  1301. switch (dwidth) {
  1302. case VME_D16:
  1303. *attr |= TSI148_LCSR_DDAT_DBW_16;
  1304. break;
  1305. case VME_D32:
  1306. *attr |= TSI148_LCSR_DDAT_DBW_32;
  1307. break;
  1308. default:
  1309. dev_err(dev, "Invalid data width\n");
  1310. return -EINVAL;
  1311. }
  1312. /* Setup address space */
  1313. switch (aspace) {
  1314. case VME_A16:
  1315. *attr |= TSI148_LCSR_DDAT_AMODE_A16;
  1316. break;
  1317. case VME_A24:
  1318. *attr |= TSI148_LCSR_DDAT_AMODE_A24;
  1319. break;
  1320. case VME_A32:
  1321. *attr |= TSI148_LCSR_DDAT_AMODE_A32;
  1322. break;
  1323. case VME_A64:
  1324. *attr |= TSI148_LCSR_DDAT_AMODE_A64;
  1325. break;
  1326. case VME_CRCSR:
  1327. *attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
  1328. break;
  1329. case VME_USER1:
  1330. *attr |= TSI148_LCSR_DDAT_AMODE_USER1;
  1331. break;
  1332. case VME_USER2:
  1333. *attr |= TSI148_LCSR_DDAT_AMODE_USER2;
  1334. break;
  1335. case VME_USER3:
  1336. *attr |= TSI148_LCSR_DDAT_AMODE_USER3;
  1337. break;
  1338. case VME_USER4:
  1339. *attr |= TSI148_LCSR_DDAT_AMODE_USER4;
  1340. break;
  1341. default:
  1342. dev_err(dev, "Invalid address space\n");
  1343. return -EINVAL;
  1344. break;
  1345. }
  1346. if (cycle & VME_SUPER)
  1347. *attr |= TSI148_LCSR_DDAT_SUP;
  1348. if (cycle & VME_PROG)
  1349. *attr |= TSI148_LCSR_DDAT_PGM;
  1350. return 0;
  1351. }
  1352. /*
  1353. * Add a link list descriptor to the list
  1354. */
  1355. static int tsi148_dma_list_add(struct vme_dma_list *list,
  1356. struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
  1357. {
  1358. struct tsi148_dma_entry *entry, *prev;
  1359. u32 address_high, address_low;
  1360. struct vme_dma_pattern *pattern_attr;
  1361. struct vme_dma_pci *pci_attr;
  1362. struct vme_dma_vme *vme_attr;
  1363. dma_addr_t desc_ptr;
  1364. int retval = 0;
  1365. struct vme_bridge *tsi148_bridge;
  1366. tsi148_bridge = list->parent->parent;
  1367. /* Descriptor must be aligned on 64-bit boundaries */
  1368. entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
  1369. if (entry == NULL) {
  1370. dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
  1371. "dma resource structure\n");
  1372. retval = -ENOMEM;
  1373. goto err_mem;
  1374. }
  1375. /* Test descriptor alignment */
  1376. if ((unsigned long)&entry->descriptor & 0x7) {
  1377. dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
  1378. "byte boundary as required: %p\n",
  1379. &entry->descriptor);
  1380. retval = -EINVAL;
  1381. goto err_align;
  1382. }
  1383. /* Given we are going to fill out the structure, we probably don't
  1384. * need to zero it, but better safe than sorry for now.
  1385. */
  1386. memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
  1387. /* Fill out source part */
  1388. switch (src->type) {
  1389. case VME_DMA_PATTERN:
  1390. pattern_attr = src->private;
  1391. entry->descriptor.dsal = pattern_attr->pattern;
  1392. entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
  1393. /* Default behaviour is 32 bit pattern */
  1394. if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
  1395. entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
  1396. /* It seems that the default behaviour is to increment */
  1397. if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
  1398. entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
  1399. break;
  1400. case VME_DMA_PCI:
  1401. pci_attr = src->private;
  1402. reg_split((unsigned long long)pci_attr->address, &address_high,
  1403. &address_low);
  1404. entry->descriptor.dsau = address_high;
  1405. entry->descriptor.dsal = address_low;
  1406. entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
  1407. break;
  1408. case VME_DMA_VME:
  1409. vme_attr = src->private;
  1410. reg_split((unsigned long long)vme_attr->address, &address_high,
  1411. &address_low);
  1412. entry->descriptor.dsau = address_high;
  1413. entry->descriptor.dsal = address_low;
  1414. entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
  1415. retval = tsi148_dma_set_vme_src_attributes(
  1416. tsi148_bridge->parent, &entry->descriptor.dsat,
  1417. vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
  1418. if (retval < 0)
  1419. goto err_source;
  1420. break;
  1421. default:
  1422. dev_err(tsi148_bridge->parent, "Invalid source type\n");
  1423. retval = -EINVAL;
  1424. goto err_source;
  1425. break;
  1426. }
  1427. /* Assume last link - this will be over-written by adding another */
  1428. entry->descriptor.dnlau = 0;
  1429. entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
  1430. /* Fill out destination part */
  1431. switch (dest->type) {
  1432. case VME_DMA_PCI:
  1433. pci_attr = dest->private;
  1434. reg_split((unsigned long long)pci_attr->address, &address_high,
  1435. &address_low);
  1436. entry->descriptor.ddau = address_high;
  1437. entry->descriptor.ddal = address_low;
  1438. entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
  1439. break;
  1440. case VME_DMA_VME:
  1441. vme_attr = dest->private;
  1442. reg_split((unsigned long long)vme_attr->address, &address_high,
  1443. &address_low);
  1444. entry->descriptor.ddau = address_high;
  1445. entry->descriptor.ddal = address_low;
  1446. entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
  1447. retval = tsi148_dma_set_vme_dest_attributes(
  1448. tsi148_bridge->parent, &entry->descriptor.ddat,
  1449. vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
  1450. if (retval < 0)
  1451. goto err_dest;
  1452. break;
  1453. default:
  1454. dev_err(tsi148_bridge->parent, "Invalid destination type\n");
  1455. retval = -EINVAL;
  1456. goto err_dest;
  1457. break;
  1458. }
  1459. /* Fill out count */
  1460. entry->descriptor.dcnt = (u32)count;
  1461. /* Add to list */
  1462. list_add_tail(&entry->list, &list->entries);
  1463. /* Fill out previous descriptors "Next Address" */
  1464. if (entry->list.prev != &list->entries) {
  1465. prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
  1466. list);
  1467. /* We need the bus address for the pointer */
  1468. desc_ptr = virt_to_bus(&entry->descriptor);
  1469. reg_split(desc_ptr, &prev->descriptor.dnlau,
  1470. &prev->descriptor.dnlal);
  1471. }
  1472. return 0;
  1473. err_dest:
  1474. err_source:
  1475. err_align:
  1476. kfree(entry);
  1477. err_mem:
  1478. return retval;
  1479. }
  1480. /*
  1481. * Check to see if the provided DMA channel is busy.
  1482. */
  1483. static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
  1484. {
  1485. u32 tmp;
  1486. struct tsi148_driver *bridge;
  1487. bridge = tsi148_bridge->driver_priv;
  1488. tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
  1489. TSI148_LCSR_OFFSET_DSTA);
  1490. if (tmp & TSI148_LCSR_DSTA_BSY)
  1491. return 0;
  1492. else
  1493. return 1;
  1494. }
  1495. /*
  1496. * Execute a previously generated link list
  1497. *
  1498. * XXX Need to provide control register configuration.
  1499. */
  1500. static int tsi148_dma_list_exec(struct vme_dma_list *list)
  1501. {
  1502. struct vme_dma_resource *ctrlr;
  1503. int channel, retval = 0;
  1504. struct tsi148_dma_entry *entry;
  1505. dma_addr_t bus_addr;
  1506. u32 bus_addr_high, bus_addr_low;
  1507. u32 val, dctlreg = 0;
  1508. struct vme_bridge *tsi148_bridge;
  1509. struct tsi148_driver *bridge;
  1510. ctrlr = list->parent;
  1511. tsi148_bridge = ctrlr->parent;
  1512. bridge = tsi148_bridge->driver_priv;
  1513. mutex_lock(&ctrlr->mtx);
  1514. channel = ctrlr->number;
  1515. if (!list_empty(&ctrlr->running)) {
  1516. /*
  1517. * XXX We have an active DMA transfer and currently haven't
  1518. * sorted out the mechanism for "pending" DMA transfers.
  1519. * Return busy.
  1520. */
  1521. /* Need to add to pending here */
  1522. mutex_unlock(&ctrlr->mtx);
  1523. return -EBUSY;
  1524. } else {
  1525. list_add(&list->list, &ctrlr->running);
  1526. }
  1527. /* Get first bus address and write into registers */
  1528. entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
  1529. list);
  1530. bus_addr = virt_to_bus(&entry->descriptor);
  1531. mutex_unlock(&ctrlr->mtx);
  1532. reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
  1533. iowrite32be(bus_addr_high, bridge->base +
  1534. TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
  1535. iowrite32be(bus_addr_low, bridge->base +
  1536. TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
  1537. /* Start the operation */
  1538. iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
  1539. TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
  1540. wait_event_interruptible(bridge->dma_queue[channel],
  1541. tsi148_dma_busy(ctrlr->parent, channel));
  1542. /*
  1543. * Read status register, this register is valid until we kick off a
  1544. * new transfer.
  1545. */
  1546. val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
  1547. TSI148_LCSR_OFFSET_DSTA);
  1548. if (val & TSI148_LCSR_DSTA_VBE) {
  1549. dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
  1550. retval = -EIO;
  1551. }
  1552. /* Remove list from running list */
  1553. mutex_lock(&ctrlr->mtx);
  1554. list_del(&list->list);
  1555. mutex_unlock(&ctrlr->mtx);
  1556. return retval;
  1557. }
  1558. /*
  1559. * Clean up a previously generated link list
  1560. *
  1561. * We have a separate function, don't assume that the chain can't be reused.
  1562. */
  1563. static int tsi148_dma_list_empty(struct vme_dma_list *list)
  1564. {
  1565. struct list_head *pos, *temp;
  1566. struct tsi148_dma_entry *entry;
  1567. /* detach and free each entry */
  1568. list_for_each_safe(pos, temp, &list->entries) {
  1569. list_del(pos);
  1570. entry = list_entry(pos, struct tsi148_dma_entry, list);
  1571. kfree(entry);
  1572. }
  1573. return 0;
  1574. }
  1575. /*
  1576. * All 4 location monitors reside at the same base - this is therefore a
  1577. * system wide configuration.
  1578. *
  1579. * This does not enable the LM monitor - that shoul…