PageRenderTime 270ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/staging/vme/bridges/vme_tsi148.c

https://bitbucket.org/wisechild/galaxy-nexus
C | 2640 lines | 1852 code | 429 blank | 359 comment | 260 complexity | f2771802109565078f29c72070a82e0f MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * Support for the Tundra TSI148 VME-PCI Bridge Chip
  3. *
  4. * Author: Martyn Welch <martyn.welch@ge.com>
  5. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
  6. *
  7. * Based on work by Tom Armistead and Ajit Prem
  8. * Copyright 2004 Motorola Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/moduleparam.h>
  17. #include <linux/mm.h>
  18. #include <linux/types.h>
  19. #include <linux/errno.h>
  20. #include <linux/proc_fs.h>
  21. #include <linux/pci.h>
  22. #include <linux/poll.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/sched.h>
  27. #include <linux/slab.h>
  28. #include <linux/time.h>
  29. #include <linux/io.h>
  30. #include <linux/uaccess.h>
  31. #include "../vme.h"
  32. #include "../vme_bridge.h"
  33. #include "vme_tsi148.h"
  34. static int __init tsi148_init(void);
  35. static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
  36. static void tsi148_remove(struct pci_dev *);
  37. static void __exit tsi148_exit(void);
  38. /* Module parameter */
  39. static int err_chk;
  40. static int geoid;
  41. static char driver_name[] = "vme_tsi148";
  42. static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
  43. { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
  44. { },
  45. };
  46. static struct pci_driver tsi148_driver = {
  47. .name = driver_name,
  48. .id_table = tsi148_ids,
  49. .probe = tsi148_probe,
  50. .remove = tsi148_remove,
  51. };
  52. static void reg_join(unsigned int high, unsigned int low,
  53. unsigned long long *variable)
  54. {
  55. *variable = (unsigned long long)high << 32;
  56. *variable |= (unsigned long long)low;
  57. }
  58. static void reg_split(unsigned long long variable, unsigned int *high,
  59. unsigned int *low)
  60. {
  61. *low = (unsigned int)variable & 0xFFFFFFFF;
  62. *high = (unsigned int)(variable >> 32);
  63. }
  64. /*
  65. * Wakes up DMA queue.
  66. */
  67. static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
  68. int channel_mask)
  69. {
  70. u32 serviced = 0;
  71. if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
  72. wake_up(&bridge->dma_queue[0]);
  73. serviced |= TSI148_LCSR_INTC_DMA0C;
  74. }
  75. if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
  76. wake_up(&bridge->dma_queue[1]);
  77. serviced |= TSI148_LCSR_INTC_DMA1C;
  78. }
  79. return serviced;
  80. }
  81. /*
  82. * Wake up location monitor queue
  83. */
  84. static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
  85. {
  86. int i;
  87. u32 serviced = 0;
  88. for (i = 0; i < 4; i++) {
  89. if (stat & TSI148_LCSR_INTS_LMS[i]) {
  90. /* We only enable interrupts if the callback is set */
  91. bridge->lm_callback[i](i);
  92. serviced |= TSI148_LCSR_INTC_LMC[i];
  93. }
  94. }
  95. return serviced;
  96. }
  97. /*
  98. * Wake up mail box queue.
  99. *
  100. * XXX This functionality is not exposed up though API.
  101. */
  102. static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
  103. {
  104. int i;
  105. u32 val;
  106. u32 serviced = 0;
  107. struct tsi148_driver *bridge;
  108. bridge = tsi148_bridge->driver_priv;
  109. for (i = 0; i < 4; i++) {
  110. if (stat & TSI148_LCSR_INTS_MBS[i]) {
  111. val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
  112. dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
  113. ": 0x%x\n", i, val);
  114. serviced |= TSI148_LCSR_INTC_MBC[i];
  115. }
  116. }
  117. return serviced;
  118. }
  119. /*
  120. * Display error & status message when PERR (PCI) exception interrupt occurs.
  121. */
  122. static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
  123. {
  124. struct tsi148_driver *bridge;
  125. bridge = tsi148_bridge->driver_priv;
  126. dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
  127. "attributes: %08x\n",
  128. ioread32be(bridge->base + TSI148_LCSR_EDPAU),
  129. ioread32be(bridge->base + TSI148_LCSR_EDPAL),
  130. ioread32be(bridge->base + TSI148_LCSR_EDPAT));
  131. dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
  132. "completion reg: %08x\n",
  133. ioread32be(bridge->base + TSI148_LCSR_EDPXA),
  134. ioread32be(bridge->base + TSI148_LCSR_EDPXS));
  135. iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
  136. return TSI148_LCSR_INTC_PERRC;
  137. }
  138. /*
  139. * Save address and status when VME error interrupt occurs.
  140. */
  141. static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
  142. {
  143. unsigned int error_addr_high, error_addr_low;
  144. unsigned long long error_addr;
  145. u32 error_attrib;
  146. struct vme_bus_error *error;
  147. struct tsi148_driver *bridge;
  148. bridge = tsi148_bridge->driver_priv;
  149. error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
  150. error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
  151. error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
  152. reg_join(error_addr_high, error_addr_low, &error_addr);
  153. /* Check for exception register overflow (we have lost error data) */
  154. if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
  155. dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
  156. "Occurred\n");
  157. }
  158. error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
  159. if (error) {
  160. error->address = error_addr;
  161. error->attributes = error_attrib;
  162. list_add_tail(&error->list, &tsi148_bridge->vme_errors);
  163. } else {
  164. dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
  165. "VMEbus Error reporting\n");
  166. dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
  167. "0x%llx, attributes: %08x\n", error_addr, error_attrib);
  168. }
  169. /* Clear Status */
  170. iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
  171. return TSI148_LCSR_INTC_VERRC;
  172. }
  173. /*
  174. * Wake up IACK queue.
  175. */
  176. static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
  177. {
  178. wake_up(&bridge->iack_queue);
  179. return TSI148_LCSR_INTC_IACKC;
  180. }
  181. /*
  182. * Calling VME bus interrupt callback if provided.
  183. */
  184. static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
  185. u32 stat)
  186. {
  187. int vec, i, serviced = 0;
  188. struct tsi148_driver *bridge;
  189. bridge = tsi148_bridge->driver_priv;
  190. for (i = 7; i > 0; i--) {
  191. if (stat & (1 << i)) {
  192. /*
  193. * Note: Even though the registers are defined as
  194. * 32-bits in the spec, we only want to issue 8-bit
  195. * IACK cycles on the bus, read from offset 3.
  196. */
  197. vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
  198. vme_irq_handler(tsi148_bridge, i, vec);
  199. serviced |= (1 << i);
  200. }
  201. }
  202. return serviced;
  203. }
  204. /*
  205. * Top level interrupt handler. Clears appropriate interrupt status bits and
  206. * then calls appropriate sub handler(s).
  207. */
  208. static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
  209. {
  210. u32 stat, enable, serviced = 0;
  211. struct vme_bridge *tsi148_bridge;
  212. struct tsi148_driver *bridge;
  213. tsi148_bridge = ptr;
  214. bridge = tsi148_bridge->driver_priv;
  215. /* Determine which interrupts are unmasked and set */
  216. enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  217. stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
  218. /* Only look at unmasked interrupts */
  219. stat &= enable;
  220. if (unlikely(!stat))
  221. return IRQ_NONE;
  222. /* Call subhandlers as appropriate */
  223. /* DMA irqs */
  224. if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
  225. serviced |= tsi148_DMA_irqhandler(bridge, stat);
  226. /* Location monitor irqs */
  227. if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
  228. TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
  229. serviced |= tsi148_LM_irqhandler(bridge, stat);
  230. /* Mail box irqs */
  231. if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
  232. TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
  233. serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
  234. /* PCI bus error */
  235. if (stat & TSI148_LCSR_INTS_PERRS)
  236. serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
  237. /* VME bus error */
  238. if (stat & TSI148_LCSR_INTS_VERRS)
  239. serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
  240. /* IACK irq */
  241. if (stat & TSI148_LCSR_INTS_IACKS)
  242. serviced |= tsi148_IACK_irqhandler(bridge);
  243. /* VME bus irqs */
  244. if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
  245. TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
  246. TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
  247. TSI148_LCSR_INTS_IRQ1S))
  248. serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
  249. /* Clear serviced interrupts */
  250. iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
  251. return IRQ_HANDLED;
  252. }
  253. static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
  254. {
  255. int result;
  256. unsigned int tmp;
  257. struct pci_dev *pdev;
  258. struct tsi148_driver *bridge;
  259. pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
  260. bridge = tsi148_bridge->driver_priv;
  261. /* Initialise list for VME bus errors */
  262. INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
  263. mutex_init(&tsi148_bridge->irq_mtx);
  264. result = request_irq(pdev->irq,
  265. tsi148_irqhandler,
  266. IRQF_SHARED,
  267. driver_name, tsi148_bridge);
  268. if (result) {
  269. dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
  270. "vector %02X\n", pdev->irq);
  271. return result;
  272. }
  273. /* Enable and unmask interrupts */
  274. tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
  275. TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
  276. TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
  277. TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
  278. TSI148_LCSR_INTEO_IACKEO;
  279. /* This leaves the following interrupts masked.
  280. * TSI148_LCSR_INTEO_VIEEO
  281. * TSI148_LCSR_INTEO_SYSFLEO
  282. * TSI148_LCSR_INTEO_ACFLEO
  283. */
  284. /* Don't enable Location Monitor interrupts here - they will be
  285. * enabled when the location monitors are properly configured and
  286. * a callback has been attached.
  287. * TSI148_LCSR_INTEO_LM0EO
  288. * TSI148_LCSR_INTEO_LM1EO
  289. * TSI148_LCSR_INTEO_LM2EO
  290. * TSI148_LCSR_INTEO_LM3EO
  291. */
  292. /* Don't enable VME interrupts until we add a handler, else the board
  293. * will respond to it and we don't want that unless it knows how to
  294. * properly deal with it.
  295. * TSI148_LCSR_INTEO_IRQ7EO
  296. * TSI148_LCSR_INTEO_IRQ6EO
  297. * TSI148_LCSR_INTEO_IRQ5EO
  298. * TSI148_LCSR_INTEO_IRQ4EO
  299. * TSI148_LCSR_INTEO_IRQ3EO
  300. * TSI148_LCSR_INTEO_IRQ2EO
  301. * TSI148_LCSR_INTEO_IRQ1EO
  302. */
  303. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  304. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
  305. return 0;
  306. }
  307. static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
  308. struct pci_dev *pdev)
  309. {
  310. struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
  311. /* Turn off interrupts */
  312. iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
  313. iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
  314. /* Clear all interrupts */
  315. iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
  316. /* Detach interrupt handler */
  317. free_irq(pdev->irq, tsi148_bridge);
  318. }
  319. /*
  320. * Check to see if an IACk has been received, return true (1) or false (0).
  321. */
  322. static int tsi148_iack_received(struct tsi148_driver *bridge)
  323. {
  324. u32 tmp;
  325. tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
  326. if (tmp & TSI148_LCSR_VICR_IRQS)
  327. return 0;
  328. else
  329. return 1;
  330. }
  331. /*
  332. * Configure VME interrupt
  333. */
  334. static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
  335. int state, int sync)
  336. {
  337. struct pci_dev *pdev;
  338. u32 tmp;
  339. struct tsi148_driver *bridge;
  340. bridge = tsi148_bridge->driver_priv;
  341. /* We need to do the ordering differently for enabling and disabling */
  342. if (state == 0) {
  343. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
  344. tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
  345. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
  346. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  347. tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
  348. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  349. if (sync != 0) {
  350. pdev = container_of(tsi148_bridge->parent,
  351. struct pci_dev, dev);
  352. synchronize_irq(pdev->irq);
  353. }
  354. } else {
  355. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  356. tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
  357. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  358. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
  359. tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
  360. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
  361. }
  362. }
  363. /*
  364. * Generate a VME bus interrupt at the requested level & vector. Wait for
  365. * interrupt to be acked.
  366. */
  367. static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
  368. int statid)
  369. {
  370. u32 tmp;
  371. struct tsi148_driver *bridge;
  372. bridge = tsi148_bridge->driver_priv;
  373. mutex_lock(&bridge->vme_int);
  374. /* Read VICR register */
  375. tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
  376. /* Set Status/ID */
  377. tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
  378. (statid & TSI148_LCSR_VICR_STID_M);
  379. iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
  380. /* Assert VMEbus IRQ */
  381. tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
  382. iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
  383. /* XXX Consider implementing a timeout? */
  384. wait_event_interruptible(bridge->iack_queue,
  385. tsi148_iack_received(bridge));
  386. mutex_unlock(&bridge->vme_int);
  387. return 0;
  388. }
  389. /*
  390. * Find the first error in this address range
  391. */
  392. static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
  393. vme_address_t aspace, unsigned long long address, size_t count)
  394. {
  395. struct list_head *err_pos;
  396. struct vme_bus_error *vme_err, *valid = NULL;
  397. unsigned long long bound;
  398. bound = address + count;
  399. /*
  400. * XXX We are currently not looking at the address space when parsing
  401. * for errors. This is because parsing the Address Modifier Codes
  402. * is going to be quite resource intensive to do properly. We
  403. * should be OK just looking at the addresses and this is certainly
  404. * much better than what we had before.
  405. */
  406. err_pos = NULL;
  407. /* Iterate through errors */
  408. list_for_each(err_pos, &tsi148_bridge->vme_errors) {
  409. vme_err = list_entry(err_pos, struct vme_bus_error, list);
  410. if ((vme_err->address >= address) &&
  411. (vme_err->address < bound)) {
  412. valid = vme_err;
  413. break;
  414. }
  415. }
  416. return valid;
  417. }
  418. /*
  419. * Clear errors in the provided address range.
  420. */
  421. static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
  422. vme_address_t aspace, unsigned long long address, size_t count)
  423. {
  424. struct list_head *err_pos, *temp;
  425. struct vme_bus_error *vme_err;
  426. unsigned long long bound;
  427. bound = address + count;
  428. /*
  429. * XXX We are currently not looking at the address space when parsing
  430. * for errors. This is because parsing the Address Modifier Codes
  431. * is going to be quite resource intensive to do properly. We
  432. * should be OK just looking at the addresses and this is certainly
  433. * much better than what we had before.
  434. */
  435. err_pos = NULL;
  436. /* Iterate through errors */
  437. list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
  438. vme_err = list_entry(err_pos, struct vme_bus_error, list);
  439. if ((vme_err->address >= address) &&
  440. (vme_err->address < bound)) {
  441. list_del(err_pos);
  442. kfree(vme_err);
  443. }
  444. }
  445. }
  446. /*
  447. * Initialize a slave window with the requested attributes.
  448. */
  449. static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
  450. unsigned long long vme_base, unsigned long long size,
  451. dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
  452. {
  453. unsigned int i, addr = 0, granularity = 0;
  454. unsigned int temp_ctl = 0;
  455. unsigned int vme_base_low, vme_base_high;
  456. unsigned int vme_bound_low, vme_bound_high;
  457. unsigned int pci_offset_low, pci_offset_high;
  458. unsigned long long vme_bound, pci_offset;
  459. struct vme_bridge *tsi148_bridge;
  460. struct tsi148_driver *bridge;
  461. tsi148_bridge = image->parent;
  462. bridge = tsi148_bridge->driver_priv;
  463. i = image->number;
  464. switch (aspace) {
  465. case VME_A16:
  466. granularity = 0x10;
  467. addr |= TSI148_LCSR_ITAT_AS_A16;
  468. break;
  469. case VME_A24:
  470. granularity = 0x1000;
  471. addr |= TSI148_LCSR_ITAT_AS_A24;
  472. break;
  473. case VME_A32:
  474. granularity = 0x10000;
  475. addr |= TSI148_LCSR_ITAT_AS_A32;
  476. break;
  477. case VME_A64:
  478. granularity = 0x10000;
  479. addr |= TSI148_LCSR_ITAT_AS_A64;
  480. break;
  481. case VME_CRCSR:
  482. case VME_USER1:
  483. case VME_USER2:
  484. case VME_USER3:
  485. case VME_USER4:
  486. default:
  487. dev_err(tsi148_bridge->parent, "Invalid address space\n");
  488. return -EINVAL;
  489. break;
  490. }
  491. /* Convert 64-bit variables to 2x 32-bit variables */
  492. reg_split(vme_base, &vme_base_high, &vme_base_low);
  493. /*
  494. * Bound address is a valid address for the window, adjust
  495. * accordingly
  496. */
  497. vme_bound = vme_base + size - granularity;
  498. reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
  499. pci_offset = (unsigned long long)pci_base - vme_base;
  500. reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
  501. if (vme_base_low & (granularity - 1)) {
  502. dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
  503. return -EINVAL;
  504. }
  505. if (vme_bound_low & (granularity - 1)) {
  506. dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
  507. return -EINVAL;
  508. }
  509. if (pci_offset_low & (granularity - 1)) {
  510. dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
  511. "alignment\n");
  512. return -EINVAL;
  513. }
  514. /* Disable while we are mucking around */
  515. temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  516. TSI148_LCSR_OFFSET_ITAT);
  517. temp_ctl &= ~TSI148_LCSR_ITAT_EN;
  518. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
  519. TSI148_LCSR_OFFSET_ITAT);
  520. /* Setup mapping */
  521. iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
  522. TSI148_LCSR_OFFSET_ITSAU);
  523. iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
  524. TSI148_LCSR_OFFSET_ITSAL);
  525. iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
  526. TSI148_LCSR_OFFSET_ITEAU);
  527. iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
  528. TSI148_LCSR_OFFSET_ITEAL);
  529. iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
  530. TSI148_LCSR_OFFSET_ITOFU);
  531. iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
  532. TSI148_LCSR_OFFSET_ITOFL);
  533. /* Setup 2eSST speeds */
  534. temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
  535. switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
  536. case VME_2eSST160:
  537. temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
  538. break;
  539. case VME_2eSST267:
  540. temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
  541. break;
  542. case VME_2eSST320:
  543. temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
  544. break;
  545. }
  546. /* Setup cycle types */
  547. temp_ctl &= ~(0x1F << 7);
  548. if (cycle & VME_BLT)
  549. temp_ctl |= TSI148_LCSR_ITAT_BLT;
  550. if (cycle & VME_MBLT)
  551. temp_ctl |= TSI148_LCSR_ITAT_MBLT;
  552. if (cycle & VME_2eVME)
  553. temp_ctl |= TSI148_LCSR_ITAT_2eVME;
  554. if (cycle & VME_2eSST)
  555. temp_ctl |= TSI148_LCSR_ITAT_2eSST;
  556. if (cycle & VME_2eSSTB)
  557. temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
  558. /* Setup address space */
  559. temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
  560. temp_ctl |= addr;
  561. temp_ctl &= ~0xF;
  562. if (cycle & VME_SUPER)
  563. temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
  564. if (cycle & VME_USER)
  565. temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
  566. if (cycle & VME_PROG)
  567. temp_ctl |= TSI148_LCSR_ITAT_PGM;
  568. if (cycle & VME_DATA)
  569. temp_ctl |= TSI148_LCSR_ITAT_DATA;
  570. /* Write ctl reg without enable */
  571. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
  572. TSI148_LCSR_OFFSET_ITAT);
  573. if (enabled)
  574. temp_ctl |= TSI148_LCSR_ITAT_EN;
  575. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
  576. TSI148_LCSR_OFFSET_ITAT);
  577. return 0;
  578. }
  579. /*
  580. * Get slave window configuration.
  581. */
  582. static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
  583. unsigned long long *vme_base, unsigned long long *size,
  584. dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
  585. {
  586. unsigned int i, granularity = 0, ctl = 0;
  587. unsigned int vme_base_low, vme_base_high;
  588. unsigned int vme_bound_low, vme_bound_high;
  589. unsigned int pci_offset_low, pci_offset_high;
  590. unsigned long long vme_bound, pci_offset;
  591. struct tsi148_driver *bridge;
  592. bridge = image->parent->driver_priv;
  593. i = image->number;
  594. /* Read registers */
  595. ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  596. TSI148_LCSR_OFFSET_ITAT);
  597. vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  598. TSI148_LCSR_OFFSET_ITSAU);
  599. vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  600. TSI148_LCSR_OFFSET_ITSAL);
  601. vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  602. TSI148_LCSR_OFFSET_ITEAU);
  603. vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  604. TSI148_LCSR_OFFSET_ITEAL);
  605. pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  606. TSI148_LCSR_OFFSET_ITOFU);
  607. pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  608. TSI148_LCSR_OFFSET_ITOFL);
  609. /* Convert 64-bit variables to 2x 32-bit variables */
  610. reg_join(vme_base_high, vme_base_low, vme_base);
  611. reg_join(vme_bound_high, vme_bound_low, &vme_bound);
  612. reg_join(pci_offset_high, pci_offset_low, &pci_offset);
  613. *pci_base = (dma_addr_t)vme_base + pci_offset;
  614. *enabled = 0;
  615. *aspace = 0;
  616. *cycle = 0;
  617. if (ctl & TSI148_LCSR_ITAT_EN)
  618. *enabled = 1;
  619. if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
  620. granularity = 0x10;
  621. *aspace |= VME_A16;
  622. }
  623. if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
  624. granularity = 0x1000;
  625. *aspace |= VME_A24;
  626. }
  627. if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
  628. granularity = 0x10000;
  629. *aspace |= VME_A32;
  630. }
  631. if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
  632. granularity = 0x10000;
  633. *aspace |= VME_A64;
  634. }
  635. /* Need granularity before we set the size */
  636. *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
  637. if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
  638. *cycle |= VME_2eSST160;
  639. if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
  640. *cycle |= VME_2eSST267;
  641. if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
  642. *cycle |= VME_2eSST320;
  643. if (ctl & TSI148_LCSR_ITAT_BLT)
  644. *cycle |= VME_BLT;
  645. if (ctl & TSI148_LCSR_ITAT_MBLT)
  646. *cycle |= VME_MBLT;
  647. if (ctl & TSI148_LCSR_ITAT_2eVME)
  648. *cycle |= VME_2eVME;
  649. if (ctl & TSI148_LCSR_ITAT_2eSST)
  650. *cycle |= VME_2eSST;
  651. if (ctl & TSI148_LCSR_ITAT_2eSSTB)
  652. *cycle |= VME_2eSSTB;
  653. if (ctl & TSI148_LCSR_ITAT_SUPR)
  654. *cycle |= VME_SUPER;
  655. if (ctl & TSI148_LCSR_ITAT_NPRIV)
  656. *cycle |= VME_USER;
  657. if (ctl & TSI148_LCSR_ITAT_PGM)
  658. *cycle |= VME_PROG;
  659. if (ctl & TSI148_LCSR_ITAT_DATA)
  660. *cycle |= VME_DATA;
  661. return 0;
  662. }
  663. /*
  664. * Allocate and map PCI Resource
  665. */
  666. static int tsi148_alloc_resource(struct vme_master_resource *image,
  667. unsigned long long size)
  668. {
  669. unsigned long long existing_size;
  670. int retval = 0;
  671. struct pci_dev *pdev;
  672. struct vme_bridge *tsi148_bridge;
  673. tsi148_bridge = image->parent;
  674. pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
  675. existing_size = (unsigned long long)(image->bus_resource.end -
  676. image->bus_resource.start);
  677. /* If the existing size is OK, return */
  678. if ((size != 0) && (existing_size == (size - 1)))
  679. return 0;
  680. if (existing_size != 0) {
  681. iounmap(image->kern_base);
  682. image->kern_base = NULL;
  683. kfree(image->bus_resource.name);
  684. release_resource(&image->bus_resource);
  685. memset(&image->bus_resource, 0, sizeof(struct resource));
  686. }
  687. /* Exit here if size is zero */
  688. if (size == 0)
  689. return 0;
  690. if (image->bus_resource.name == NULL) {
  691. image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
  692. if (image->bus_resource.name == NULL) {
  693. dev_err(tsi148_bridge->parent, "Unable to allocate "
  694. "memory for resource name\n");
  695. retval = -ENOMEM;
  696. goto err_name;
  697. }
  698. }
  699. sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
  700. image->number);
  701. image->bus_resource.start = 0;
  702. image->bus_resource.end = (unsigned long)size;
  703. image->bus_resource.flags = IORESOURCE_MEM;
  704. retval = pci_bus_alloc_resource(pdev->bus,
  705. &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
  706. 0, NULL, NULL);
  707. if (retval) {
  708. dev_err(tsi148_bridge->parent, "Failed to allocate mem "
  709. "resource for window %d size 0x%lx start 0x%lx\n",
  710. image->number, (unsigned long)size,
  711. (unsigned long)image->bus_resource.start);
  712. goto err_resource;
  713. }
  714. image->kern_base = ioremap_nocache(
  715. image->bus_resource.start, size);
  716. if (image->kern_base == NULL) {
  717. dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
  718. retval = -ENOMEM;
  719. goto err_remap;
  720. }
  721. return 0;
  722. err_remap:
  723. release_resource(&image->bus_resource);
  724. err_resource:
  725. kfree(image->bus_resource.name);
  726. memset(&image->bus_resource, 0, sizeof(struct resource));
  727. err_name:
  728. return retval;
  729. }
  730. /*
  731. * Free and unmap PCI Resource
  732. */
  733. static void tsi148_free_resource(struct vme_master_resource *image)
  734. {
  735. iounmap(image->kern_base);
  736. image->kern_base = NULL;
  737. release_resource(&image->bus_resource);
  738. kfree(image->bus_resource.name);
  739. memset(&image->bus_resource, 0, sizeof(struct resource));
  740. }
  741. /*
  742. * Set the attributes of an outbound window.
  743. */
  744. static int tsi148_master_set(struct vme_master_resource *image, int enabled,
  745. unsigned long long vme_base, unsigned long long size,
  746. vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
  747. {
  748. int retval = 0;
  749. unsigned int i;
  750. unsigned int temp_ctl = 0;
  751. unsigned int pci_base_low, pci_base_high;
  752. unsigned int pci_bound_low, pci_bound_high;
  753. unsigned int vme_offset_low, vme_offset_high;
  754. unsigned long long pci_bound, vme_offset, pci_base;
  755. struct vme_bridge *tsi148_bridge;
  756. struct tsi148_driver *bridge;
  757. tsi148_bridge = image->parent;
  758. bridge = tsi148_bridge->driver_priv;
  759. /* Verify input data */
  760. if (vme_base & 0xFFFF) {
  761. dev_err(tsi148_bridge->parent, "Invalid VME Window "
  762. "alignment\n");
  763. retval = -EINVAL;
  764. goto err_window;
  765. }
  766. if ((size == 0) && (enabled != 0)) {
  767. dev_err(tsi148_bridge->parent, "Size must be non-zero for "
  768. "enabled windows\n");
  769. retval = -EINVAL;
  770. goto err_window;
  771. }
  772. spin_lock(&image->lock);
  773. /* Let's allocate the resource here rather than further up the stack as
  774. * it avoids pushing loads of bus dependent stuff up the stack. If size
  775. * is zero, any existing resource will be freed.
  776. */
  777. retval = tsi148_alloc_resource(image, size);
  778. if (retval) {
  779. spin_unlock(&image->lock);
  780. dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
  781. "resource\n");
  782. goto err_res;
  783. }
  784. if (size == 0) {
  785. pci_base = 0;
  786. pci_bound = 0;
  787. vme_offset = 0;
  788. } else {
  789. pci_base = (unsigned long long)image->bus_resource.start;
  790. /*
  791. * Bound address is a valid address for the window, adjust
  792. * according to window granularity.
  793. */
  794. pci_bound = pci_base + (size - 0x10000);
  795. vme_offset = vme_base - pci_base;
  796. }
  797. /* Convert 64-bit variables to 2x 32-bit variables */
  798. reg_split(pci_base, &pci_base_high, &pci_base_low);
  799. reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
  800. reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
  801. if (pci_base_low & 0xFFFF) {
  802. spin_unlock(&image->lock);
  803. dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
  804. retval = -EINVAL;
  805. goto err_gran;
  806. }
  807. if (pci_bound_low & 0xFFFF) {
  808. spin_unlock(&image->lock);
  809. dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
  810. retval = -EINVAL;
  811. goto err_gran;
  812. }
  813. if (vme_offset_low & 0xFFFF) {
  814. spin_unlock(&image->lock);
  815. dev_err(tsi148_bridge->parent, "Invalid VME Offset "
  816. "alignment\n");
  817. retval = -EINVAL;
  818. goto err_gran;
  819. }
  820. i = image->number;
  821. /* Disable while we are mucking around */
  822. temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  823. TSI148_LCSR_OFFSET_OTAT);
  824. temp_ctl &= ~TSI148_LCSR_OTAT_EN;
  825. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
  826. TSI148_LCSR_OFFSET_OTAT);
  827. /* Setup 2eSST speeds */
  828. temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
  829. switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
  830. case VME_2eSST160:
  831. temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
  832. break;
  833. case VME_2eSST267:
  834. temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
  835. break;
  836. case VME_2eSST320:
  837. temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
  838. break;
  839. }
  840. /* Setup cycle types */
  841. if (cycle & VME_BLT) {
  842. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  843. temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
  844. }
  845. if (cycle & VME_MBLT) {
  846. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  847. temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
  848. }
  849. if (cycle & VME_2eVME) {
  850. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  851. temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
  852. }
  853. if (cycle & VME_2eSST) {
  854. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  855. temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
  856. }
  857. if (cycle & VME_2eSSTB) {
  858. dev_warn(tsi148_bridge->parent, "Currently not setting "
  859. "Broadcast Select Registers\n");
  860. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  861. temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
  862. }
  863. /* Setup data width */
  864. temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
  865. switch (dwidth) {
  866. case VME_D16:
  867. temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
  868. break;
  869. case VME_D32:
  870. temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
  871. break;
  872. default:
  873. spin_unlock(&image->lock);
  874. dev_err(tsi148_bridge->parent, "Invalid data width\n");
  875. retval = -EINVAL;
  876. goto err_dwidth;
  877. }
  878. /* Setup address space */
  879. temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
  880. switch (aspace) {
  881. case VME_A16:
  882. temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
  883. break;
  884. case VME_A24:
  885. temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
  886. break;
  887. case VME_A32:
  888. temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
  889. break;
  890. case VME_A64:
  891. temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
  892. break;
  893. case VME_CRCSR:
  894. temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
  895. break;
  896. case VME_USER1:
  897. temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
  898. break;
  899. case VME_USER2:
  900. temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
  901. break;
  902. case VME_USER3:
  903. temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
  904. break;
  905. case VME_USER4:
  906. temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
  907. break;
  908. default:
  909. spin_unlock(&image->lock);
  910. dev_err(tsi148_bridge->parent, "Invalid address space\n");
  911. retval = -EINVAL;
  912. goto err_aspace;
  913. break;
  914. }
  915. temp_ctl &= ~(3<<4);
  916. if (cycle & VME_SUPER)
  917. temp_ctl |= TSI148_LCSR_OTAT_SUP;
  918. if (cycle & VME_PROG)
  919. temp_ctl |= TSI148_LCSR_OTAT_PGM;
  920. /* Setup mapping */
  921. iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
  922. TSI148_LCSR_OFFSET_OTSAU);
  923. iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
  924. TSI148_LCSR_OFFSET_OTSAL);
  925. iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
  926. TSI148_LCSR_OFFSET_OTEAU);
  927. iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
  928. TSI148_LCSR_OFFSET_OTEAL);
  929. iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
  930. TSI148_LCSR_OFFSET_OTOFU);
  931. iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
  932. TSI148_LCSR_OFFSET_OTOFL);
  933. /* Write ctl reg without enable */
  934. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
  935. TSI148_LCSR_OFFSET_OTAT);
  936. if (enabled)
  937. temp_ctl |= TSI148_LCSR_OTAT_EN;
  938. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
  939. TSI148_LCSR_OFFSET_OTAT);
  940. spin_unlock(&image->lock);
  941. return 0;
  942. err_aspace:
  943. err_dwidth:
  944. err_gran:
  945. tsi148_free_resource(image);
  946. err_res:
  947. err_window:
  948. return retval;
  949. }
  950. /*
  951. * Set the attributes of an outbound window.
  952. *
  953. * XXX Not parsing prefetch information.
  954. */
  955. static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
  956. unsigned long long *vme_base, unsigned long long *size,
  957. vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
  958. {
  959. unsigned int i, ctl;
  960. unsigned int pci_base_low, pci_base_high;
  961. unsigned int pci_bound_low, pci_bound_high;
  962. unsigned int vme_offset_low, vme_offset_high;
  963. unsigned long long pci_base, pci_bound, vme_offset;
  964. struct tsi148_driver *bridge;
  965. bridge = image->parent->driver_priv;
  966. i = image->number;
  967. ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  968. TSI148_LCSR_OFFSET_OTAT);
  969. pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  970. TSI148_LCSR_OFFSET_OTSAU);
  971. pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  972. TSI148_LCSR_OFFSET_OTSAL);
  973. pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  974. TSI148_LCSR_OFFSET_OTEAU);
  975. pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  976. TSI148_LCSR_OFFSET_OTEAL);
  977. vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  978. TSI148_LCSR_OFFSET_OTOFU);
  979. vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  980. TSI148_LCSR_OFFSET_OTOFL);
  981. /* Convert 64-bit variables to 2x 32-bit variables */
  982. reg_join(pci_base_high, pci_base_low, &pci_base);
  983. reg_join(pci_bound_high, pci_bound_low, &pci_bound);
  984. reg_join(vme_offset_high, vme_offset_low, &vme_offset);
  985. *vme_base = pci_base + vme_offset;
  986. *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
  987. *enabled = 0;
  988. *aspace = 0;
  989. *cycle = 0;
  990. *dwidth = 0;
  991. if (ctl & TSI148_LCSR_OTAT_EN)
  992. *enabled = 1;
  993. /* Setup address space */
  994. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
  995. *aspace |= VME_A16;
  996. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
  997. *aspace |= VME_A24;
  998. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
  999. *aspace |= VME_A32;
  1000. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
  1001. *aspace |= VME_A64;
  1002. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
  1003. *aspace |= VME_CRCSR;
  1004. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
  1005. *aspace |= VME_USER1;
  1006. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
  1007. *aspace |= VME_USER2;
  1008. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
  1009. *aspace |= VME_USER3;
  1010. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
  1011. *aspace |= VME_USER4;
  1012. /* Setup 2eSST speeds */
  1013. if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
  1014. *cycle |= VME_2eSST160;
  1015. if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
  1016. *cycle |= VME_2eSST267;
  1017. if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
  1018. *cycle |= VME_2eSST320;
  1019. /* Setup cycle types */
  1020. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
  1021. *cycle |= VME_SCT;
  1022. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
  1023. *cycle |= VME_BLT;
  1024. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
  1025. *cycle |= VME_MBLT;
  1026. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
  1027. *cycle |= VME_2eVME;
  1028. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
  1029. *cycle |= VME_2eSST;
  1030. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
  1031. *cycle |= VME_2eSSTB;
  1032. if (ctl & TSI148_LCSR_OTAT_SUP)
  1033. *cycle |= VME_SUPER;
  1034. else
  1035. *cycle |= VME_USER;
  1036. if (ctl & TSI148_LCSR_OTAT_PGM)
  1037. *cycle |= VME_PROG;
  1038. else
  1039. *cycle |= VME_DATA;
  1040. /* Setup data width */
  1041. if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
  1042. *dwidth = VME_D16;
  1043. if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
  1044. *dwidth = VME_D32;
  1045. return 0;
  1046. }
  1047. static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
  1048. unsigned long long *vme_base, unsigned long long *size,
  1049. vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
  1050. {
  1051. int retval;
  1052. spin_lock(&image->lock);
  1053. retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
  1054. cycle, dwidth);
  1055. spin_unlock(&image->lock);
  1056. return retval;
  1057. }
  1058. static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
  1059. size_t count, loff_t offset)
  1060. {
  1061. int retval, enabled;
  1062. unsigned long long vme_base, size;
  1063. vme_address_t aspace;
  1064. vme_cycle_t cycle;
  1065. vme_width_t dwidth;
  1066. struct vme_bus_error *vme_err = NULL;
  1067. struct vme_bridge *tsi148_bridge;
  1068. tsi148_bridge = image->parent;
  1069. spin_lock(&image->lock);
  1070. memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
  1071. retval = count;
  1072. if (!err_chk)
  1073. goto skip_chk;
  1074. __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
  1075. &dwidth);
  1076. vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
  1077. count);
  1078. if (vme_err != NULL) {
  1079. dev_err(image->parent->parent, "First VME read error detected "
  1080. "an at address 0x%llx\n", vme_err->address);
  1081. retval = vme_err->address - (vme_base + offset);
  1082. /* Clear down save errors in this address range */
  1083. tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
  1084. count);
  1085. }
  1086. skip_chk:
  1087. spin_unlock(&image->lock);
  1088. return retval;
  1089. }
  1090. static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
  1091. size_t count, loff_t offset)
  1092. {
  1093. int retval = 0, enabled;
  1094. unsigned long long vme_base, size;
  1095. vme_address_t aspace;
  1096. vme_cycle_t cycle;
  1097. vme_width_t dwidth;
  1098. struct vme_bus_error *vme_err = NULL;
  1099. struct vme_bridge *tsi148_bridge;
  1100. struct tsi148_driver *bridge;
  1101. tsi148_bridge = image->parent;
  1102. bridge = tsi148_bridge->driver_priv;
  1103. spin_lock(&image->lock);
  1104. memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
  1105. retval = count;
  1106. /*
  1107. * Writes are posted. We need to do a read on the VME bus to flush out
  1108. * all of the writes before we check for errors. We can't guarantee
  1109. * that reading the data we have just written is safe. It is believed
  1110. * that there isn't any read, write re-ordering, so we can read any
  1111. * location in VME space, so lets read the Device ID from the tsi148's
  1112. * own registers as mapped into CR/CSR space.
  1113. *
  1114. * We check for saved errors in the written address range/space.
  1115. */
  1116. if (!err_chk)
  1117. goto skip_chk;
  1118. /*
  1119. * Get window info first, to maximise the time that the buffers may
  1120. * fluch on their own
  1121. */
  1122. __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
  1123. &dwidth);
  1124. ioread16(bridge->flush_image->kern_base + 0x7F000);
  1125. vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
  1126. count);
  1127. if (vme_err != NULL) {
  1128. dev_warn(tsi148_bridge->parent, "First VME write error detected"
  1129. " an at address 0x%llx\n", vme_err->address);
  1130. retval = vme_err->address - (vme_base + offset);
  1131. /* Clear down save errors in this address range */
  1132. tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
  1133. count);
  1134. }
  1135. skip_chk:
  1136. spin_unlock(&image->lock);
  1137. return retval;
  1138. }
  1139. /*
  1140. * Perform an RMW cycle on the VME bus.
  1141. *
  1142. * Requires a previously configured master window, returns final value.
  1143. */
  1144. static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
  1145. unsigned int mask, unsigned int compare, unsigned int swap,
  1146. loff_t offset)
  1147. {
  1148. unsigned long long pci_addr;
  1149. unsigned int pci_addr_high, pci_addr_low;
  1150. u32 tmp, result;
  1151. int i;
  1152. struct tsi148_driver *bridge;
  1153. bridge = image->parent->driver_priv;
  1154. /* Find the PCI address that maps to the desired VME address */
  1155. i = image->number;
  1156. /* Locking as we can only do one of these at a time */
  1157. mutex_lock(&bridge->vme_rmw);
  1158. /* Lock image */
  1159. spin_lock(&image->lock);
  1160. pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  1161. TSI148_LCSR_OFFSET_OTSAU);
  1162. pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  1163. TSI148_LCSR_OFFSET_OTSAL);
  1164. reg_join(pci_addr_high, pci_addr_low, &pci_addr);
  1165. reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
  1166. /* Configure registers */
  1167. iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
  1168. iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
  1169. iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
  1170. iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
  1171. iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
  1172. /* Enable RMW */
  1173. tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
  1174. tmp |= TSI148_LCSR_VMCTRL_RMWEN;
  1175. iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
  1176. /* Kick process off with a read to the required address. */
  1177. result = ioread32be(image->kern_base + offset);
  1178. /* Disable RMW */
  1179. tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
  1180. tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
  1181. iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
  1182. spin_unlock(&image->lock);
  1183. mutex_unlock(&bridge->vme_rmw);
  1184. return result;
  1185. }
  1186. static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
  1187. vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
  1188. {
  1189. /* Setup 2eSST speeds */
  1190. switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
  1191. case VME_2eSST160:
  1192. *attr |= TSI148_LCSR_DSAT_2eSSTM_160;
  1193. break;
  1194. case VME_2eSST267:
  1195. *attr |= TSI148_LCSR_DSAT_2eSSTM_267;
  1196. break;
  1197. case VME_2eSST320:
  1198. *attr |= TSI148_LCSR_DSAT_2eSSTM_320;
  1199. break;
  1200. }
  1201. /* Setup cycle types */
  1202. if (cycle & VME_SCT)
  1203. *attr |= TSI148_LCSR_DSAT_TM_SCT;
  1204. if (cycle & VME_BLT)
  1205. *attr |= TSI148_LCSR_DSAT_TM_BLT;
  1206. if (cycle & VME_MBLT)
  1207. *attr |= TSI148_LCSR_DSAT_TM_MBLT;
  1208. if (cycle & VME_2eVME)
  1209. *attr |= TSI148_LCSR_DSAT_TM_2eVME;
  1210. if (cycle & VME_2eSST)
  1211. *attr |= TSI148_LCSR_DSAT_TM_2eSST;
  1212. if (cycle & VME_2eSSTB) {
  1213. dev_err(dev, "Currently not setting Broadcast Select "
  1214. "Registers\n");
  1215. *attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
  1216. }
  1217. /* Setup data width */
  1218. switch (dwidth) {
  1219. case VME_D16:
  1220. *attr |= TSI148_LCSR_DSAT_DBW_16;
  1221. break;
  1222. case VME_D32:
  1223. *attr |= TSI148_LCSR_DSAT_DBW_32;
  1224. break;
  1225. default:
  1226. dev_err(dev, "Invalid data width\n");
  1227. return -EINVAL;
  1228. }
  1229. /* Setup address space */
  1230. switch (aspace) {
  1231. case VME_A16:
  1232. *attr |= TSI148_LCSR_DSAT_AMODE_A16;
  1233. break;
  1234. case VME_A24:
  1235. *attr |= TSI148_LCSR_DSAT_AMODE_A24;
  1236. break;
  1237. case VME_A32:
  1238. *attr |= TSI148_LCSR_DSAT_AMODE_A32;
  1239. break;
  1240. case VME_A64:
  1241. *attr |= TSI148_LCSR_DSAT_AMODE_A64;
  1242. break;
  1243. case VME_CRCSR:
  1244. *attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
  1245. break;
  1246. case VME_USER1:
  1247. *attr |= TSI148_LCSR_DSAT_AMODE_USER1;
  1248. break;
  1249. case VME_USER2:
  1250. *attr |= TSI148_LCSR_DSAT_AMODE_USER2;
  1251. break;
  1252. case VME_USER3:
  1253. *attr |= TSI148_LCSR_DSAT_AMODE_USER3;
  1254. break;
  1255. case VME_USER4:
  1256. *attr |= TSI148_LCSR_DSAT_AMODE_USER4;
  1257. break;
  1258. default:
  1259. dev_err(dev, "Invalid address space\n");
  1260. return -EINVAL;
  1261. break;
  1262. }
  1263. if (cycle & VME_SUPER)
  1264. *attr |= TSI148_LCSR_DSAT_SUP;
  1265. if (cycle & VME_PROG)
  1266. *attr |= TSI148_LCSR_DSAT_PGM;
  1267. return 0;
  1268. }
  1269. static int tsi148_dma_set_vme_dest_attributes(struct device *dev, u32 *attr,
  1270. vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
  1271. {
  1272. /* Setup 2eSST speeds */
  1273. switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
  1274. case VME_2eSST160:
  1275. *attr |= TSI148_LCSR_DDAT_2eSSTM_160;
  1276. break;
  1277. case VME_2eSST267:
  1278. *attr |= TSI148_LCSR_DDAT_2eSSTM_267;
  1279. break;
  1280. case VME_2eSST320:
  1281. *attr |= TSI148_LCSR_DDAT_2eSSTM_320;
  1282. break;
  1283. }
  1284. /* Setup cycle types */
  1285. if (cycle & VME_SCT)
  1286. *attr |= TSI148_LCSR_DDAT_TM_SCT;
  1287. if (cycle & VME_BLT)
  1288. *attr |= TSI148_LCSR_DDAT_TM_BLT;
  1289. if (cycle & VME_MBLT)
  1290. *attr |= TSI148_LCSR_DDAT_TM_MBLT;
  1291. if (cycle & VME_2eVME)
  1292. *attr |= TSI148_LCSR_DDAT_TM_2eVME;
  1293. if (cycle & VME_2eSST)
  1294. *attr |= TSI148_LCSR_DDAT_TM_2eSST;
  1295. if (cycle & VME_2eSSTB) {
  1296. dev_err(dev, "Currently not setting Broadcast Select "
  1297. "Registers\n");
  1298. *attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
  1299. }
  1300. /* Setup data width */
  1301. switch (dwidth) {
  1302. case VME_D16:
  1303. *attr |= TSI148_LCSR_DDAT_DBW_16;
  1304. break;
  1305. case VME_D32:
  1306. *attr |= TSI148_LCSR_DDAT_DBW_32;
  1307. break;
  1308. default:
  1309. dev_err(dev, "Invalid data width\n");
  1310. return -EINVAL;
  1311. }
  1312. /* Setup address space */
  1313. switch (aspace) {
  1314. case VME_A16:
  1315. *attr |= TSI148_LCSR_DDAT_AMODE_A16;
  1316. break;
  1317. case VME_A24:
  1318. *attr |= TSI148_LCSR_DDAT_AMODE_A24;
  1319. break;
  1320. case VME_A32:
  1321. *attr |= TSI148_LCSR_DDAT_AMODE_A32;
  1322. break;
  1323. case VME_A64:
  1324. *attr |= TSI148_LCSR_DDAT_AMODE_A64;
  1325. break;
  1326. case VME_CRCSR:
  1327. *attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
  1328. break;
  1329. case VME_USER1:
  1330. *attr |= TSI148_LCSR_DDAT_AMODE_USER1;
  1331. break;
  1332. case VME_USER2:
  1333. *attr |= TSI148_LCSR_DDAT_AMODE_USER2;
  1334. break;
  1335. case VME_USER3:
  1336. *attr |= TSI148_LCSR_DDAT_AMODE_USER3;
  1337. break;
  1338. case VME_USER4:
  1339. *attr |= TSI148_LCSR_DDAT_AMODE_USER4;
  1340. break;
  1341. default:
  1342. dev_err(dev, "Invalid address space\n");
  1343. return -EINVAL;
  1344. break;
  1345. }
  1346. if (cycle & VME_SUPER)
  1347. *attr |= TSI148_LCSR_DDAT_SUP;
  1348. if (cycle & VME_PROG)
  1349. *attr |= TSI148_LCSR_DDAT_PGM;
  1350. return 0;
  1351. }
  1352. /*
  1353. * Add a link list descriptor to the list
  1354. */
  1355. static int tsi148_dma_list_add(struct vme_dma_list *list,
  1356. struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
  1357. {
  1358. struct tsi148_dma_entry *entry, *prev;
  1359. u32 address_high, address_low;
  1360. struct vme_dma_pattern *pattern_attr;
  1361. struct vme_dma_pci *pci_attr;
  1362. struct vme_dma_vme *vme_attr;
  1363. dma_addr_t desc_ptr;
  1364. int retval = 0;
  1365. struct vme_bridge *tsi148_bridge;
  1366. tsi148_bridge = list->parent->parent;
  1367. /* Descriptor must be aligned on 64-bit boundaries */
  1368. entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
  1369. if (entry == NULL) {
  1370. dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
  1371. "dma resource structure\n");
  1372. retval = -ENOMEM;
  1373. goto err_mem;
  1374. }
  1375. /* Test descriptor alignment */
  1376. if ((unsigned long)&entry->descriptor & 0x7) {
  1377. dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
  1378. "byte boundary as required: %p\n",
  1379. &entry->descriptor);
  1380. retval = -EINVAL;
  1381. goto err_align;
  1382. }
  1383. /* Given we are going to fill out the structure, we probably don't
  1384. * need to zero it, but better safe than sorry for now.
  1385. */
  1386. memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
  1387. /* Fill out source part */
  1388. switch (src->type) {
  1389. case VME_DMA_PATTERN:
  1390. pattern_attr = src->private;
  1391. entry->descriptor.dsal = pattern_attr->pattern;
  1392. entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
  1393. /* Default behaviour is 32 bit pattern */
  1394. if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
  1395. entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
  1396. /* It seems that the default behaviour is to increment */
  1397. if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
  1398. entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
  1399. break;
  1400. case VME_DMA_PCI:
  1401. pci_attr = src->private;
  1402. reg_split((unsigned long long)pci_attr->address, &address_high,
  1403. &address_low);
  1404. entry->descriptor.dsau = address_high;
  1405. entry->descriptor.dsal = address_low;
  1406. entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
  1407. break;
  1408. case VME_DMA_VME:
  1409. vme_attr = src->private;
  1410. reg_split((unsigned long long)vme_attr->address, &address_high,
  1411. &address_low);
  1412. entry->descriptor.dsau = address_high;
  1413. entry->descriptor.dsal = address_low;
  1414. entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
  1415. retval = tsi148_dma_set_vme_src_attributes(
  1416. tsi148_bridge->parent, &entry->descriptor.dsat,
  1417. vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
  1418. if (retval < 0)
  1419. goto err_source;
  1420. break;
  1421. default:
  1422. dev_err(tsi148_bridge->parent, "Invalid source type\n");
  1423. retval = -EINVAL;
  1424. goto err_source;
  1425. break;
  1426. }
  1427. /* Assume last link - this will be over-written by adding another */
  1428. entry->descriptor.dnlau = 0;
  1429. entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
  1430. /* Fill out destination part */
  1431. switch (dest->type) {
  1432. case VME_DMA_PCI:
  1433. pci_attr = dest->private;
  1434. reg_split((unsigned long long)pci_attr->address, &address_high,
  1435. &address_low);
  1436. entry->descriptor.ddau = address_high;
  1437. entry->descriptor.ddal = address_low;
  1438. entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
  1439. break;
  1440. case VME_DMA_VME:
  1441. vme_attr = dest->private;
  1442. reg_split((unsigned long long)vme_attr->address, &address_high,
  1443. &address_low);
  1444. entry->descriptor.ddau = address_high;
  1445. entry->descriptor.ddal = address_low;
  1446. entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
  1447. retval = tsi148_dma_set_vme_dest_attributes(
  1448. tsi148_bridge->parent, &entry->descriptor.ddat,
  1449. vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
  1450. if (retval < 0)
  1451. goto err_dest;
  1452. break;
  1453. default:
  1454. dev_err(tsi148_bridge->parent, "Invalid destination type\n");
  1455. retval = -EINVAL;
  1456. goto err_dest;
  1457. break;
  1458. }
  1459. /* Fill out count */
  1460. entry->descriptor.dcnt = (u32)count;
  1461. /* Add to list */
  1462. list_add_tail(&entry->list, &list->entries);
  1463. /* Fill out previous descriptors "Next Address" */
  1464. if (entry->list.prev != &list->entries) {
  1465. prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
  1466. list);
  1467. /* We need the bus address for the pointer */
  1468. desc_ptr = virt_to_bus(&entry->descriptor);
  1469. reg_split(desc_ptr, &prev->descriptor.dnlau,
  1470. &prev->descriptor.dnlal);
  1471. }
  1472. return 0;
  1473. err_dest:
  1474. err_source:
  1475. err_align:
  1476. kfree(entry);
  1477. err_mem:
  1478. return retval;
  1479. }
  1480. /*
  1481. * Check to see if the provided DMA channel is busy.
  1482. */
  1483. static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
  1484. {
  1485. u32 tmp;
  1486. struct tsi148_driver *bridge;
  1487. bridge = tsi148_bridge->driver_priv;
  1488. tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
  1489. TSI148_LCSR_OFFSET_DSTA);
  1490. if (tmp & TSI148_LCSR_DSTA_BSY)
  1491. return 0;
  1492. else
  1493. return 1;
  1494. }
  1495. /*
  1496. * Execute a previously generated link list
  1497. *
  1498. * XXX Need to provide control register configuration.
  1499. */
  1500. static int tsi148_dma_list_exec(struct vme_dma_list *list)
  1501. {
  1502. struct vme_dma_resource *ctrlr;
  1503. int channel, retval = 0;
  1504. struct tsi148_dma_entry *entry;
  1505. dma_addr_t bus_addr;
  1506. u32 bus_addr_high, bus_addr_low;
  1507. u32 val, dctlreg = 0;
  1508. struct vme_bridge *tsi148_bridge;
  1509. struct tsi148_driver *bridge;
  1510. ctrlr = list->parent;
  1511. tsi148_bridge = ctrlr->parent;
  1512. bridge = tsi148_bridge->driver_priv;
  1513. mutex_lock(&ctrlr->mtx);
  1514. channel = ctrlr->number;
  1515. if (!list_empty(&ctrlr->running)) {
  1516. /*
  1517. * XXX We have an active DMA transfer and currently haven't
  1518. * sorted out the mechanism for "pending" DMA transfers.
  1519. * Return busy.
  1520. */
  1521. /* Need to add to pending here */
  1522. mutex_unlock(&ctrlr->mtx);
  1523. return -EBUSY;
  1524. } else {
  1525. list_add(&list->list, &ctrlr->running);
  1526. }
  1527. /* Get first bus address and write into registers */
  1528. entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
  1529. list);
  1530. bus_addr = virt_to_bus(&entry->descriptor);
  1531. mutex_unlock(&ctrlr->mtx);
  1532. reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
  1533. iowrite32be(bus_addr_high, bridge->base +
  1534. TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
  1535. iowrite32be(bus_addr_low, bridge->base +
  1536. TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
  1537. /* Start the operation */
  1538. iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
  1539. TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
  1540. wait_event_interruptible(bridge->dma_queue[channel],
  1541. tsi148_dma_busy(ctrlr->parent, channel));
  1542. /*
  1543. * Read status register, this register is valid until we kick off a
  1544. * new transfer.
  1545. */
  1546. val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
  1547. TSI148_LCSR_OFFSET_DSTA);
  1548. if (val & TSI148_LCSR_DSTA_VBE) {
  1549. dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
  1550. retval = -EIO;
  1551. }
  1552. /* Remove list from running list */
  1553. mutex_lock(&ctrlr->mtx);
  1554. list_del(&list->list);
  1555. mutex_unlock(&ctrlr->mtx);
  1556. return retval;
  1557. }
  1558. /*
  1559. * Clean up a previously generated link list
  1560. *
  1561. * We have a separate function, don't assume that the chain can't be reused.
  1562. */
  1563. static int tsi148_dma_list_empty(struct vme_dma_list *list)
  1564. {
  1565. struct list_head *pos, *temp;
  1566. struct tsi148_dma_entry *entry;
  1567. /* detach and free each entry */
  1568. list_for_each_safe(pos, temp, &list->entries) {
  1569. list_del(pos);
  1570. entry = list_entry(pos, struct tsi148_dma_entry, list);
  1571. kfree(entry);
  1572. }
  1573. return 0;
  1574. }
  1575. /*
  1576. * All 4 location monitors reside at the same base - this is therefore a
  1577. * system wide configuration.
  1578. *
  1579. * This does not enable the LM monitor - that should be done when the first
  1580. * callback is attached and disabled when the last callback is removed.
  1581. */
  1582. static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
  1583. vme_address_t aspace, vme_cycle_t cycle)
  1584. {
  1585. u32 lm_base_high, lm_base_low, lm_ctl = 0;
  1586. int i;
  1587. struct vme_bridge *tsi148_bridge;
  1588. struct tsi148_driver *bridge;
  1589. tsi148_bridge = lm->parent;
  1590. bridge = tsi148_bridge->driver_priv;
  1591. mutex_lock(&lm->mtx);
  1592. /* If we already have a callback attached, we can't move it! */
  1593. for (i = 0; i < lm->monitors; i++) {
  1594. if (bridge->lm_callback[i] != NULL) {
  1595. mutex_unlock(&lm->mtx);
  1596. dev_err(tsi148_bridge->parent, "Location monitor "
  1597. "callback attached, can't reset\n");
  1598. return -EBUSY;
  1599. }
  1600. }
  1601. switch (aspace) {
  1602. case VME_A16:
  1603. lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
  1604. break;
  1605. case VME_A24:
  1606. lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
  1607. break;
  1608. case VME_A32:
  1609. lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
  1610. break;
  1611. case VME_A64:
  1612. lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
  1613. break;
  1614. default:
  1615. mutex_unlock(&lm->mtx);
  1616. dev_err(tsi148_bridge->parent, "Invalid address space\n");
  1617. return -EINVAL;
  1618. break;
  1619. }
  1620. if (cycle & VME_SUPER)
  1621. lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
  1622. if (cycle & VME_USER)
  1623. lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
  1624. if (cycle & VME_PROG)
  1625. lm_ctl |= TSI148_LCSR_LMAT_PGM;
  1626. if (cycle & VME_DATA)
  1627. lm_ctl |= TSI148_LCSR_LMAT_DATA;
  1628. reg_split(lm_base, &lm_base_high, &lm_base_low);
  1629. iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
  1630. iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
  1631. iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
  1632. mutex_unlock(&lm->mtx);
  1633. return 0;
  1634. }
  1635. /* Get configuration of the callback monitor and return whether it is enabled
  1636. * or disabled.
  1637. */
  1638. static int tsi148_lm_get(struct vme_lm_resource *lm,
  1639. unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
  1640. {
  1641. u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
  1642. struct tsi148_driver *bridge;
  1643. bridge = lm->parent->driver_priv;
  1644. mutex_lock(&lm->mtx);
  1645. lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
  1646. lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
  1647. lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
  1648. reg_join(lm_base_high, lm_base_low, lm_base);
  1649. if (lm_ctl & TSI148_LCSR_LMAT_EN)
  1650. enabled = 1;
  1651. if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
  1652. *aspace |= VME_A16;
  1653. if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
  1654. *aspace |= VME_A24;
  1655. if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
  1656. *aspace |= VME_A32;
  1657. if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
  1658. *aspace |= VME_A64;
  1659. if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
  1660. *cycle |= VME_SUPER;
  1661. if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
  1662. *cycle |= VME_USER;
  1663. if (lm_ctl & TSI148_LCSR_LMAT_PGM)
  1664. *cycle |= VME_PROG;
  1665. if (lm_ctl & TSI148_LCSR_LMAT_DATA)
  1666. *cycle |= VME_DATA;
  1667. mutex_unlock(&lm->mtx);
  1668. return enabled;
  1669. }
  1670. /*
  1671. * Attach a callback to a specific location monitor.
  1672. *
  1673. * Callback will be passed the monitor triggered.
  1674. */
  1675. static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
  1676. void (*callback)(int))
  1677. {
  1678. u32 lm_ctl, tmp;
  1679. struct vme_bridge *tsi148_bridge;
  1680. struct tsi148_driver *bridge;
  1681. tsi148_bridge = lm->parent;
  1682. bridge = tsi148_bridge->driver_priv;
  1683. mutex_lock(&lm->mtx);
  1684. /* Ensure that the location monitor is configured - need PGM or DATA */
  1685. lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
  1686. if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
  1687. mutex_unlock(&lm->mtx);
  1688. dev_err(tsi148_bridge->parent, "Location monitor not properly "
  1689. "configured\n");
  1690. return -EINVAL;
  1691. }
  1692. /* Check that a callback isn't already attached */
  1693. if (bridge->lm_callback[monitor] != NULL) {
  1694. mutex_unlock(&lm->mtx);
  1695. dev_err(tsi148_bridge->parent, "Existing callback attached\n");
  1696. return -EBUSY;
  1697. }
  1698. /* Attach callback */
  1699. bridge->lm_callback[monitor] = callback;
  1700. /* Enable Location Monitor interrupt */
  1701. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
  1702. tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
  1703. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
  1704. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  1705. tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
  1706. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  1707. /* Ensure that global Location Monitor Enable set */
  1708. if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
  1709. lm_ctl |= TSI148_LCSR_LMAT_EN;
  1710. iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
  1711. }
  1712. mutex_unlock(&lm->mtx);
  1713. return 0;
  1714. }
  1715. /*
  1716. * Detach a callback function forn a specific location monitor.
  1717. */
  1718. static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
  1719. {
  1720. u32 lm_en, tmp;
  1721. struct tsi148_driver *bridge;
  1722. bridge = lm->parent->driver_priv;
  1723. mutex_lock(&lm->mtx);
  1724. /* Disable Location Monitor and ensure previous interrupts are clear */
  1725. lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
  1726. lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
  1727. iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
  1728. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  1729. tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
  1730. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  1731. iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
  1732. bridge->base + TSI148_LCSR_INTC);
  1733. /* Detach callback */
  1734. bridge->lm_callback[monitor] = NULL;
  1735. /* If all location monitors disabled, disable global Location Monitor */
  1736. if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
  1737. TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
  1738. tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
  1739. tmp &= ~TSI148_LCSR_LMAT_EN;
  1740. iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
  1741. }
  1742. mutex_unlock(&lm->mtx);
  1743. return 0;
  1744. }
  1745. /*
  1746. * Determine Geographical Addressing
  1747. */
  1748. static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
  1749. {
  1750. u32 slot = 0;
  1751. struct tsi148_driver *bridge;
  1752. bridge = tsi148_bridge->driver_priv;
  1753. if (!geoid) {
  1754. slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
  1755. slot = slot & TSI148_LCSR_VSTAT_GA_M;
  1756. } else
  1757. slot = geoid;
  1758. return (int)slot;
  1759. }
  1760. static int __init tsi148_init(void)
  1761. {
  1762. return pci_register_driver(&tsi148_driver);
  1763. }
  1764. /*
  1765. * Configure CR/CSR space
  1766. *
  1767. * Access to the CR/CSR can be configured at power-up. The location of the
  1768. * CR/CSR registers in the CR/CSR address space is determined by the boards
  1769. * Auto-ID or Geographic address. This function ensures that the window is
  1770. * enabled at an offset consistent with the boards geopgraphic address.
  1771. *
  1772. * Each board has a 512kB window, with the highest 4kB being used for the
  1773. * boards registers, this means there is a fix length 508kB window which must
  1774. * be mapped onto PCI memory.
  1775. */
  1776. static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
  1777. struct pci_dev *pdev)
  1778. {
  1779. u32 cbar, crat, vstat;
  1780. u32 crcsr_bus_high, crcsr_bus_low;
  1781. int retval;
  1782. struct tsi148_driver *bridge;
  1783. bridge = tsi148_bridge->driver_priv;
  1784. /* Allocate mem for CR/CSR image */
  1785. bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
  1786. &bridge->crcsr_bus);
  1787. if (bridge->crcsr_kernel == NULL) {
  1788. dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
  1789. "CR/CSR image\n");
  1790. return -ENOMEM;
  1791. }
  1792. memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
  1793. reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
  1794. iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
  1795. iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
  1796. /* Ensure that the CR/CSR is configured at the correct offset */
  1797. cbar = ioread32be(bridge->base + TSI148_CBAR);
  1798. cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
  1799. vstat = tsi148_slot_get(tsi148_bridge);
  1800. if (cbar != vstat) {
  1801. cbar = vstat;
  1802. dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
  1803. iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
  1804. }
  1805. dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
  1806. crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
  1807. if (crat & TSI148_LCSR_CRAT_EN) {
  1808. dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
  1809. iowrite32be(crat | TSI148_LCSR_CRAT_EN,
  1810. bridge->base + TSI148_LCSR_CRAT);
  1811. } else
  1812. dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
  1813. /* If we want flushed, error-checked writes, set up a window
  1814. * over the CR/CSR registers. We read from here to safely flush
  1815. * through VME writes.
  1816. */
  1817. if (err_chk) {
  1818. retval = tsi148_master_set(bridge->flush_image, 1,
  1819. (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
  1820. VME_D16);
  1821. if (retval)
  1822. dev_err(tsi148_bridge->parent, "Configuring flush image"
  1823. " failed\n");
  1824. }
  1825. return 0;
  1826. }
  1827. static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
  1828. struct pci_dev *pdev)
  1829. {
  1830. u32 crat;
  1831. struct tsi148_driver *bridge;
  1832. bridge = tsi148_bridge->driver_priv;
  1833. /* Turn off CR/CSR space */
  1834. crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
  1835. iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
  1836. bridge->base + TSI148_LCSR_CRAT);
  1837. /* Free image */
  1838. iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
  1839. iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
  1840. pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
  1841. bridge->crcsr_bus);
  1842. }
  1843. static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1844. {
  1845. int retval, i, master_num;
  1846. u32 data;
  1847. struct list_head *pos = NULL;
  1848. struct vme_bridge *tsi148_bridge;
  1849. struct tsi148_driver *tsi148_device;
  1850. struct vme_master_resource *master_image;
  1851. struct vme_slave_resource *slave_image;
  1852. struct vme_dma_resource *dma_ctrlr;
  1853. struct vme_lm_resource *lm;
  1854. /* If we want to support more than one of each bridge, we need to
  1855. * dynamically generate this so we get one per device
  1856. */
  1857. tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
  1858. if (tsi148_bridge == NULL) {
  1859. dev_err(&pdev->dev, "Failed to allocate memory for device "
  1860. "structure\n");
  1861. retval = -ENOMEM;
  1862. goto err_struct;
  1863. }
  1864. tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
  1865. if (tsi148_device == NULL) {
  1866. dev_err(&pdev->dev, "Failed to allocate memory for device "
  1867. "structure\n");
  1868. retval = -ENOMEM;
  1869. goto err_driver;
  1870. }
  1871. tsi148_bridge->driver_priv = tsi148_device;
  1872. /* Enable the device */
  1873. retval = pci_enable_device(pdev);
  1874. if (retval) {
  1875. dev_err(&pdev->dev, "Unable to enable device\n");
  1876. goto err_enable;
  1877. }
  1878. /* Map Registers */
  1879. retval = pci_request_regions(pdev, driver_name);
  1880. if (retval) {
  1881. dev_err(&pdev->dev, "Unable to reserve resources\n");
  1882. goto err_resource;
  1883. }
  1884. /* map registers in BAR 0 */
  1885. tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
  1886. 4096);
  1887. if (!tsi148_device->base) {
  1888. dev_err(&pdev->dev, "Unable to remap CRG region\n");
  1889. retval = -EIO;
  1890. goto err_remap;
  1891. }
  1892. /* Check to see if the mapping worked out */
  1893. data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
  1894. if (data != PCI_VENDOR_ID_TUNDRA) {
  1895. dev_err(&pdev->dev, "CRG region check failed\n");
  1896. retval = -EIO;
  1897. goto err_test;
  1898. }
  1899. /* Initialize wait queues & mutual exclusion flags */
  1900. init_waitqueue_head(&tsi148_device->dma_queue[0]);
  1901. init_waitqueue_head(&tsi148_device->dma_queue[1]);
  1902. init_waitqueue_head(&tsi148_device->iack_queue);
  1903. mutex_init(&tsi148_device->vme_int);
  1904. mutex_init(&tsi148_device->vme_rmw);
  1905. tsi148_bridge->parent = &pdev->dev;
  1906. strcpy(tsi148_bridge->name, driver_name);
  1907. /* Setup IRQ */
  1908. retval = tsi148_irq_init(tsi148_bridge);
  1909. if (retval != 0) {
  1910. dev_err(&pdev->dev, "Chip Initialization failed.\n");
  1911. goto err_irq;
  1912. }
  1913. /* If we are going to flush writes, we need to read from the VME bus.
  1914. * We need to do this safely, thus we read the devices own CR/CSR
  1915. * register. To do this we must set up a window in CR/CSR space and
  1916. * hence have one less master window resource available.
  1917. */
  1918. master_num = TSI148_MAX_MASTER;
  1919. if (err_chk) {
  1920. master_num--;
  1921. tsi148_device->flush_image =
  1922. kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
  1923. if (tsi148_device->flush_image == NULL) {
  1924. dev_err(&pdev->dev, "Failed to allocate memory for "
  1925. "flush resource structure\n");
  1926. retval = -ENOMEM;
  1927. goto err_master;
  1928. }
  1929. tsi148_device->flush_image->parent = tsi148_bridge;
  1930. spin_lock_init(&tsi148_device->flush_image->lock);
  1931. tsi148_device->flush_image->locked = 1;
  1932. tsi148_device->flush_image->number = master_num;
  1933. tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
  1934. VME_A32 | VME_A64;
  1935. tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
  1936. VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
  1937. VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
  1938. VME_USER | VME_PROG | VME_DATA;
  1939. tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
  1940. memset(&tsi148_device->flush_image->bus_resource, 0,
  1941. sizeof(struct resource));
  1942. tsi148_device->flush_image->kern_base = NULL;
  1943. }
  1944. /* Add master windows to list */
  1945. INIT_LIST_HEAD(&tsi148_bridge->master_resources);
  1946. for (i = 0; i < master_num; i++) {
  1947. master_image = kmalloc(sizeof(struct vme_master_resource),
  1948. GFP_KERNEL);
  1949. if (master_image == NULL) {
  1950. dev_err(&pdev->dev, "Failed to allocate memory for "
  1951. "master resource structure\n");
  1952. retval = -ENOMEM;
  1953. goto err_master;
  1954. }
  1955. master_image->parent = tsi148_bridge;
  1956. spin_lock_init(&master_image->lock);
  1957. master_image->locked = 0;
  1958. master_image->number = i;
  1959. master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
  1960. VME_A64;
  1961. master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
  1962. VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
  1963. VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
  1964. VME_PROG | VME_DATA;
  1965. master_image->width_attr = VME_D16 | VME_D32;
  1966. memset(&master_image->bus_resource, 0,
  1967. sizeof(struct resource));
  1968. master_image->kern_base = NULL;
  1969. list_add_tail(&master_image->list,
  1970. &tsi148_bridge->master_resources);
  1971. }
  1972. /* Add slave windows to list */
  1973. INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
  1974. for (i = 0; i < TSI148_MAX_SLAVE; i++) {
  1975. slave_image = kmalloc(sizeof(struct vme_slave_resource),
  1976. GFP_KERNEL);
  1977. if (slave_image == NULL) {
  1978. dev_err(&pdev->dev, "Failed to allocate memory for "
  1979. "slave resource structure\n");
  1980. retval = -ENOMEM;
  1981. goto err_slave;
  1982. }
  1983. slave_image->parent = tsi148_bridge;
  1984. mutex_init(&slave_image->mtx);
  1985. slave_image->locked = 0;
  1986. slave_image->number = i;
  1987. slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
  1988. VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
  1989. VME_USER3 | VME_USER4;
  1990. slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
  1991. VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
  1992. VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
  1993. VME_PROG | VME_DATA;
  1994. list_add_tail(&slave_image->list,
  1995. &tsi148_bridge->slave_resources);
  1996. }
  1997. /* Add dma engines to list */
  1998. INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
  1999. for (i = 0; i < TSI148_MAX_DMA; i++) {
  2000. dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
  2001. GFP_KERNEL);
  2002. if (dma_ctrlr == NULL) {
  2003. dev_err(&pdev->dev, "Failed to allocate memory for "
  2004. "dma resource structure\n");
  2005. retval = -ENOMEM;
  2006. goto err_dma;
  2007. }
  2008. dma_ctrlr->parent = tsi148_bridge;
  2009. mutex_init(&dma_ctrlr->mtx);
  2010. dma_ctrlr->locked = 0;
  2011. dma_ctrlr->number = i;
  2012. dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
  2013. VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
  2014. VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
  2015. VME_DMA_PATTERN_TO_MEM;
  2016. INIT_LIST_HEAD(&dma_ctrlr->pending);
  2017. INIT_LIST_HEAD(&dma_ctrlr->running);
  2018. list_add_tail(&dma_ctrlr->list,
  2019. &tsi148_bridge->dma_resources);
  2020. }
  2021. /* Add location monitor to list */
  2022. INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
  2023. lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
  2024. if (lm == NULL) {
  2025. dev_err(&pdev->dev, "Failed to allocate memory for "
  2026. "location monitor resource structure\n");
  2027. retval = -ENOMEM;
  2028. goto err_lm;
  2029. }
  2030. lm->parent = tsi148_bridge;
  2031. mutex_init(&lm->mtx);
  2032. lm->locked = 0;
  2033. lm->number = 1;
  2034. lm->monitors = 4;
  2035. list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
  2036. tsi148_bridge->slave_get = tsi148_slave_get;
  2037. tsi148_bridge->slave_set = tsi148_slave_set;
  2038. tsi148_bridge->master_get = tsi148_master_get;
  2039. tsi148_bridge->master_set = tsi148_master_set;
  2040. tsi148_bridge->master_read = tsi148_master_read;
  2041. tsi148_bridge->master_write = tsi148_master_write;
  2042. tsi148_bridge->master_rmw = tsi148_master_rmw;
  2043. tsi148_bridge->dma_list_add = tsi148_dma_list_add;
  2044. tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
  2045. tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
  2046. tsi148_bridge->irq_set = tsi148_irq_set;
  2047. tsi148_bridge->irq_generate = tsi148_irq_generate;
  2048. tsi148_bridge->lm_set = tsi148_lm_set;
  2049. tsi148_bridge->lm_get = tsi148_lm_get;
  2050. tsi148_bridge->lm_attach = tsi148_lm_attach;
  2051. tsi148_bridge->lm_detach = tsi148_lm_detach;
  2052. tsi148_bridge->slot_get = tsi148_slot_get;
  2053. data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
  2054. dev_info(&pdev->dev, "Board is%s the VME system controller\n",
  2055. (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
  2056. if (!geoid)
  2057. dev_info(&pdev->dev, "VME geographical address is %d\n",
  2058. data & TSI148_LCSR_VSTAT_GA_M);
  2059. else
  2060. dev_info(&pdev->dev, "VME geographical address is set to %d\n",
  2061. geoid);
  2062. dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
  2063. err_chk ? "enabled" : "disabled");
  2064. if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
  2065. dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
  2066. goto err_crcsr;
  2067. }
  2068. retval = vme_register_bridge(tsi148_bridge);
  2069. if (retval != 0) {
  2070. dev_err(&pdev->dev, "Chip Registration failed.\n");
  2071. goto err_reg;
  2072. }
  2073. pci_set_drvdata(pdev, tsi148_bridge);
  2074. /* Clear VME bus "board fail", and "power-up reset" lines */
  2075. data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
  2076. data &= ~TSI148_LCSR_VSTAT_BRDFL;
  2077. data |= TSI148_LCSR_VSTAT_CPURST;
  2078. iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
  2079. return 0;
  2080. err_reg:
  2081. tsi148_crcsr_exit(tsi148_bridge, pdev);
  2082. err_crcsr:
  2083. err_lm:
  2084. /* resources are stored in link list */
  2085. list_for_each(pos, &tsi148_bridge->lm_resources) {
  2086. lm = list_entry(pos, struct vme_lm_resource, list);
  2087. list_del(pos);
  2088. kfree(lm);
  2089. }
  2090. err_dma:
  2091. /* resources are stored in link list */
  2092. list_for_each(pos, &tsi148_bridge->dma_resources) {
  2093. dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
  2094. list_del(pos);
  2095. kfree(dma_ctrlr);
  2096. }
  2097. err_slave:
  2098. /* resources are stored in link list */
  2099. list_for_each(pos, &tsi148_bridge->slave_resources) {
  2100. slave_image = list_entry(pos, struct vme_slave_resource, list);
  2101. list_del(pos);
  2102. kfree(slave_image);
  2103. }
  2104. err_master:
  2105. /* resources are stored in link list */
  2106. list_for_each(pos, &tsi148_bridge->master_resources) {
  2107. master_image = list_entry(pos, struct vme_master_resource,
  2108. list);
  2109. list_del(pos);
  2110. kfree(master_image);
  2111. }
  2112. tsi148_irq_exit(tsi148_bridge, pdev);
  2113. err_irq:
  2114. err_test:
  2115. iounmap(tsi148_device->base);
  2116. err_remap:
  2117. pci_release_regions(pdev);
  2118. err_resource:
  2119. pci_disable_device(pdev);
  2120. err_enable:
  2121. kfree(tsi148_device);
  2122. err_driver:
  2123. kfree(tsi148_bridge);
  2124. err_struct:
  2125. return retval;
  2126. }
  2127. static void tsi148_remove(struct pci_dev *pdev)
  2128. {
  2129. struct list_head *pos = NULL;
  2130. struct list_head *tmplist;
  2131. struct vme_master_resource *master_image;
  2132. struct vme_slave_resource *slave_image;
  2133. struct vme_dma_resource *dma_ctrlr;
  2134. int i;
  2135. struct tsi148_driver *bridge;
  2136. struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
  2137. bridge = tsi148_bridge->driver_priv;
  2138. dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
  2139. /*
  2140. * Shutdown all inbound and outbound windows.
  2141. */
  2142. for (i = 0; i < 8; i++) {
  2143. iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
  2144. TSI148_LCSR_OFFSET_ITAT);
  2145. iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
  2146. TSI148_LCSR_OFFSET_OTAT);
  2147. }
  2148. /*
  2149. * Shutdown Location monitor.
  2150. */
  2151. iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
  2152. /*
  2153. * Shutdown CRG map.
  2154. */
  2155. iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
  2156. /*
  2157. * Clear error status.
  2158. */
  2159. iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
  2160. iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
  2161. iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
  2162. /*
  2163. * Remove VIRQ interrupt (if any)
  2164. */
  2165. if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
  2166. iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
  2167. /*
  2168. * Map all Interrupts to PCI INTA
  2169. */
  2170. iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
  2171. iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
  2172. tsi148_irq_exit(tsi148_bridge, pdev);
  2173. vme_unregister_bridge(tsi148_bridge);
  2174. tsi148_crcsr_exit(tsi148_bridge, pdev);
  2175. /* resources are stored in link list */
  2176. list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
  2177. dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
  2178. list_del(pos);
  2179. kfree(dma_ctrlr);
  2180. }
  2181. /* resources are stored in link list */
  2182. list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
  2183. slave_image = list_entry(pos, struct vme_slave_resource, list);
  2184. list_del(pos);
  2185. kfree(slave_image);
  2186. }
  2187. /* resources are stored in link list */
  2188. list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
  2189. master_image = list_entry(pos, struct vme_master_resource,
  2190. list);
  2191. list_del(pos);
  2192. kfree(master_image);
  2193. }
  2194. iounmap(bridge->base);
  2195. pci_release_regions(pdev);
  2196. pci_disable_device(pdev);
  2197. kfree(tsi148_bridge->driver_priv);
  2198. kfree(tsi148_bridge);
  2199. }
  2200. static void __exit tsi148_exit(void)
  2201. {
  2202. pci_unregister_driver(&tsi148_driver);
  2203. }
  2204. MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
  2205. module_param(err_chk, bool, 0);
  2206. MODULE_PARM_DESC(geoid, "Override geographical addressing");
  2207. module_param(geoid, int, 0);
  2208. MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
  2209. MODULE_LICENSE("GPL");
  2210. module_init(tsi148_init);
  2211. module_exit(tsi148_exit);