/drivers/ieee1394/ohci1394.c

https://bitbucket.org/evzijst/gittest · C · 3705 lines · 2512 code · 685 blank · 508 comment · 416 complexity · 1b44604c45ed287f0ac5dea7f1e3021c MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * ohci1394.c - driver for OHCI 1394 boards
  3. * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
  4. * Gord Peters <GordPeters@smarttech.com>
  5. * 2001 Ben Collins <bcollins@debian.org>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software Foundation,
  19. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20. */
  21. /*
  22. * Things known to be working:
  23. * . Async Request Transmit
  24. * . Async Response Receive
  25. * . Async Request Receive
  26. * . Async Response Transmit
  27. * . Iso Receive
  28. * . DMA mmap for iso receive
  29. * . Config ROM generation
  30. *
  31. * Things implemented, but still in test phase:
  32. * . Iso Transmit
  33. * . Async Stream Packets Transmit (Receive done via Iso interface)
  34. *
  35. * Things not implemented:
  36. * . DMA error recovery
  37. *
  38. * Known bugs:
  39. * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
  40. * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
  41. */
  42. /*
  43. * Acknowledgments:
  44. *
  45. * Adam J Richter <adam@yggdrasil.com>
  46. * . Use of pci_class to find device
  47. *
  48. * Emilie Chung <emilie.chung@axis.com>
  49. * . Tip on Async Request Filter
  50. *
  51. * Pascal Drolet <pascal.drolet@informission.ca>
  52. * . Various tips for optimization and functionnalities
  53. *
  54. * Robert Ficklin <rficklin@westengineering.com>
  55. * . Loop in irq_handler
  56. *
  57. * James Goodwin <jamesg@Filanet.com>
  58. * . Various tips on initialization, self-id reception, etc.
  59. *
  60. * Albrecht Dress <ad@mpifr-bonn.mpg.de>
  61. * . Apple PowerBook detection
  62. *
  63. * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
  64. * . Reset the board properly before leaving + misc cleanups
  65. *
  66. * Leon van Stuivenberg <leonvs@iae.nl>
  67. * . Bug fixes
  68. *
  69. * Ben Collins <bcollins@debian.org>
  70. * . Working big-endian support
  71. * . Updated to 2.4.x module scheme (PCI aswell)
  72. * . Config ROM generation
  73. *
  74. * Manfred Weihs <weihs@ict.tuwien.ac.at>
  75. * . Reworked code for initiating bus resets
  76. * (long, short, with or without hold-off)
  77. *
  78. * Nandu Santhi <contactnandu@users.sourceforge.net>
  79. * . Added support for nVidia nForce2 onboard Firewire chipset
  80. *
  81. */
  82. #include <linux/config.h>
  83. #include <linux/kernel.h>
  84. #include <linux/list.h>
  85. #include <linux/slab.h>
  86. #include <linux/interrupt.h>
  87. #include <linux/wait.h>
  88. #include <linux/errno.h>
  89. #include <linux/module.h>
  90. #include <linux/moduleparam.h>
  91. #include <linux/pci.h>
  92. #include <linux/fs.h>
  93. #include <linux/poll.h>
  94. #include <asm/byteorder.h>
  95. #include <asm/atomic.h>
  96. #include <asm/uaccess.h>
  97. #include <linux/delay.h>
  98. #include <linux/spinlock.h>
  99. #include <asm/pgtable.h>
  100. #include <asm/page.h>
  101. #include <asm/irq.h>
  102. #include <linux/sched.h>
  103. #include <linux/types.h>
  104. #include <linux/vmalloc.h>
  105. #include <linux/init.h>
  106. #ifdef CONFIG_PPC_PMAC
  107. #include <asm/machdep.h>
  108. #include <asm/pmac_feature.h>
  109. #include <asm/prom.h>
  110. #include <asm/pci-bridge.h>
  111. #endif
  112. #include "csr1212.h"
  113. #include "ieee1394.h"
  114. #include "ieee1394_types.h"
  115. #include "hosts.h"
  116. #include "dma.h"
  117. #include "iso.h"
  118. #include "ieee1394_core.h"
  119. #include "highlevel.h"
  120. #include "ohci1394.h"
  121. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  122. #define OHCI1394_DEBUG
  123. #endif
  124. #ifdef DBGMSG
  125. #undef DBGMSG
  126. #endif
  127. #ifdef OHCI1394_DEBUG
  128. #define DBGMSG(fmt, args...) \
  129. printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
  130. #else
  131. #define DBGMSG(fmt, args...)
  132. #endif
  133. #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
  134. #define OHCI_DMA_ALLOC(fmt, args...) \
  135. HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
  136. ++global_outstanding_dmas, ## args)
  137. #define OHCI_DMA_FREE(fmt, args...) \
  138. HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
  139. --global_outstanding_dmas, ## args)
  140. static int global_outstanding_dmas = 0;
  141. #else
  142. #define OHCI_DMA_ALLOC(fmt, args...)
  143. #define OHCI_DMA_FREE(fmt, args...)
  144. #endif
  145. /* print general (card independent) information */
  146. #define PRINT_G(level, fmt, args...) \
  147. printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
  148. /* print card specific information */
  149. #define PRINT(level, fmt, args...) \
  150. printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
  151. static char version[] __devinitdata =
  152. "$Rev: 1250 $ Ben Collins <bcollins@debian.org>";
  153. /* Module Parameters */
  154. static int phys_dma = 1;
  155. module_param(phys_dma, int, 0644);
  156. MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
  157. static void dma_trm_tasklet(unsigned long data);
  158. static void dma_trm_reset(struct dma_trm_ctx *d);
  159. static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
  160. enum context_type type, int ctx, int num_desc,
  161. int buf_size, int split_buf_size, int context_base);
  162. static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
  163. static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
  164. static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
  165. enum context_type type, int ctx, int num_desc,
  166. int context_base);
  167. static void ohci1394_pci_remove(struct pci_dev *pdev);
  168. #ifndef __LITTLE_ENDIAN
  169. static unsigned hdr_sizes[] =
  170. {
  171. 3, /* TCODE_WRITEQ */
  172. 4, /* TCODE_WRITEB */
  173. 3, /* TCODE_WRITE_RESPONSE */
  174. 0, /* ??? */
  175. 3, /* TCODE_READQ */
  176. 4, /* TCODE_READB */
  177. 3, /* TCODE_READQ_RESPONSE */
  178. 4, /* TCODE_READB_RESPONSE */
  179. 1, /* TCODE_CYCLE_START (???) */
  180. 4, /* TCODE_LOCK_REQUEST */
  181. 2, /* TCODE_ISO_DATA */
  182. 4, /* TCODE_LOCK_RESPONSE */
  183. };
  184. /* Swap headers */
  185. static inline void packet_swab(quadlet_t *data, int tcode)
  186. {
  187. size_t size = hdr_sizes[tcode];
  188. if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
  189. return;
  190. while (size--)
  191. data[size] = swab32(data[size]);
  192. }
  193. #else
  194. /* Don't waste cycles on same sex byte swaps */
  195. #define packet_swab(w,x)
  196. #endif /* !LITTLE_ENDIAN */
  197. /***********************************
  198. * IEEE-1394 functionality section *
  199. ***********************************/
  200. static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
  201. {
  202. int i;
  203. unsigned long flags;
  204. quadlet_t r;
  205. spin_lock_irqsave (&ohci->phy_reg_lock, flags);
  206. reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
  207. for (i = 0; i < OHCI_LOOP_COUNT; i++) {
  208. if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
  209. break;
  210. mdelay(1);
  211. }
  212. r = reg_read(ohci, OHCI1394_PhyControl);
  213. if (i >= OHCI_LOOP_COUNT)
  214. PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
  215. r, r & 0x80000000, i);
  216. spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
  217. return (r & 0x00ff0000) >> 16;
  218. }
  219. static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
  220. {
  221. int i;
  222. unsigned long flags;
  223. u32 r = 0;
  224. spin_lock_irqsave (&ohci->phy_reg_lock, flags);
  225. reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
  226. for (i = 0; i < OHCI_LOOP_COUNT; i++) {
  227. r = reg_read(ohci, OHCI1394_PhyControl);
  228. if (!(r & 0x00004000))
  229. break;
  230. mdelay(1);
  231. }
  232. if (i == OHCI_LOOP_COUNT)
  233. PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
  234. r, r & 0x00004000, i);
  235. spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
  236. return;
  237. }
  238. /* Or's our value into the current value */
  239. static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
  240. {
  241. u8 old;
  242. old = get_phy_reg (ohci, addr);
  243. old |= data;
  244. set_phy_reg (ohci, addr, old);
  245. return;
  246. }
  247. static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
  248. int phyid, int isroot)
  249. {
  250. quadlet_t *q = ohci->selfid_buf_cpu;
  251. quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
  252. size_t size;
  253. quadlet_t q0, q1;
  254. /* Check status of self-id reception */
  255. if (ohci->selfid_swap)
  256. q0 = le32_to_cpu(q[0]);
  257. else
  258. q0 = q[0];
  259. if ((self_id_count & 0x80000000) ||
  260. ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
  261. PRINT(KERN_ERR,
  262. "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
  263. self_id_count, q0, ohci->self_id_errors);
  264. /* Tip by James Goodwin <jamesg@Filanet.com>:
  265. * We had an error, generate another bus reset in response. */
  266. if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
  267. set_phy_reg_mask (ohci, 1, 0x40);
  268. ohci->self_id_errors++;
  269. } else {
  270. PRINT(KERN_ERR,
  271. "Too many errors on SelfID error reception, giving up!");
  272. }
  273. return;
  274. }
  275. /* SelfID Ok, reset error counter. */
  276. ohci->self_id_errors = 0;
  277. size = ((self_id_count & 0x00001FFC) >> 2) - 1;
  278. q++;
  279. while (size > 0) {
  280. if (ohci->selfid_swap) {
  281. q0 = le32_to_cpu(q[0]);
  282. q1 = le32_to_cpu(q[1]);
  283. } else {
  284. q0 = q[0];
  285. q1 = q[1];
  286. }
  287. if (q0 == ~q1) {
  288. DBGMSG ("SelfID packet 0x%x received", q0);
  289. hpsb_selfid_received(host, cpu_to_be32(q0));
  290. if (((q0 & 0x3f000000) >> 24) == phyid)
  291. DBGMSG ("SelfID for this node is 0x%08x", q0);
  292. } else {
  293. PRINT(KERN_ERR,
  294. "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
  295. }
  296. q += 2;
  297. size -= 2;
  298. }
  299. DBGMSG("SelfID complete");
  300. return;
  301. }
  302. static void ohci_soft_reset(struct ti_ohci *ohci) {
  303. int i;
  304. reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
  305. for (i = 0; i < OHCI_LOOP_COUNT; i++) {
  306. if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
  307. break;
  308. mdelay(1);
  309. }
  310. DBGMSG ("Soft reset finished");
  311. }
  312. /* Generate the dma receive prgs and start the context */
  313. static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
  314. {
  315. struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
  316. int i;
  317. ohci1394_stop_context(ohci, d->ctrlClear, NULL);
  318. for (i=0; i<d->num_desc; i++) {
  319. u32 c;
  320. c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
  321. if (generate_irq)
  322. c |= DMA_CTL_IRQ;
  323. d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
  324. /* End of descriptor list? */
  325. if (i + 1 < d->num_desc) {
  326. d->prg_cpu[i]->branchAddress =
  327. cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
  328. } else {
  329. d->prg_cpu[i]->branchAddress =
  330. cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
  331. }
  332. d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
  333. d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
  334. }
  335. d->buf_ind = 0;
  336. d->buf_offset = 0;
  337. if (d->type == DMA_CTX_ISO) {
  338. /* Clear contextControl */
  339. reg_write(ohci, d->ctrlClear, 0xffffffff);
  340. /* Set bufferFill, isochHeader, multichannel for IR context */
  341. reg_write(ohci, d->ctrlSet, 0xd0000000);
  342. /* Set the context match register to match on all tags */
  343. reg_write(ohci, d->ctxtMatch, 0xf0000000);
  344. /* Clear the multi channel mask high and low registers */
  345. reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
  346. reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
  347. /* Set up isoRecvIntMask to generate interrupts */
  348. reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
  349. }
  350. /* Tell the controller where the first AR program is */
  351. reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
  352. /* Run context */
  353. reg_write(ohci, d->ctrlSet, 0x00008000);
  354. DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
  355. }
  356. /* Initialize the dma transmit context */
  357. static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
  358. {
  359. struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
  360. /* Stop the context */
  361. ohci1394_stop_context(ohci, d->ctrlClear, NULL);
  362. d->prg_ind = 0;
  363. d->sent_ind = 0;
  364. d->free_prgs = d->num_desc;
  365. d->branchAddrPtr = NULL;
  366. INIT_LIST_HEAD(&d->fifo_list);
  367. INIT_LIST_HEAD(&d->pending_list);
  368. if (d->type == DMA_CTX_ISO) {
  369. /* enable interrupts */
  370. reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
  371. }
  372. DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
  373. }
  374. /* Count the number of available iso contexts */
  375. static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
  376. {
  377. int i,ctx=0;
  378. u32 tmp;
  379. reg_write(ohci, reg, 0xffffffff);
  380. tmp = reg_read(ohci, reg);
  381. DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
  382. /* Count the number of contexts */
  383. for (i=0; i<32; i++) {
  384. if (tmp & 1) ctx++;
  385. tmp >>= 1;
  386. }
  387. return ctx;
  388. }
  389. /* Global initialization */
  390. static void ohci_initialize(struct ti_ohci *ohci)
  391. {
  392. char irq_buf[16];
  393. quadlet_t buf;
  394. int num_ports, i;
  395. spin_lock_init(&ohci->phy_reg_lock);
  396. spin_lock_init(&ohci->event_lock);
  397. /* Put some defaults to these undefined bus options */
  398. buf = reg_read(ohci, OHCI1394_BusOptions);
  399. buf |= 0x60000000; /* Enable CMC and ISC */
  400. if (!hpsb_disable_irm)
  401. buf |= 0x80000000; /* Enable IRMC */
  402. buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
  403. buf &= ~0x18000000; /* Disable PMC and BMC */
  404. reg_write(ohci, OHCI1394_BusOptions, buf);
  405. /* Set the bus number */
  406. reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
  407. /* Enable posted writes */
  408. reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
  409. /* Clear link control register */
  410. reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
  411. /* Enable cycle timer and cycle master and set the IRM
  412. * contender bit in our self ID packets if appropriate. */
  413. reg_write(ohci, OHCI1394_LinkControlSet,
  414. OHCI1394_LinkControl_CycleTimerEnable |
  415. OHCI1394_LinkControl_CycleMaster);
  416. set_phy_reg_mask(ohci, 4, PHY_04_LCTRL |
  417. (hpsb_disable_irm ? 0 : PHY_04_CONTENDER));
  418. /* Set up self-id dma buffer */
  419. reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
  420. /* enable self-id and phys */
  421. reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
  422. OHCI1394_LinkControl_RcvPhyPkt);
  423. /* Set the Config ROM mapping register */
  424. reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
  425. /* Now get our max packet size */
  426. ohci->max_packet_size =
  427. 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
  428. /* Don't accept phy packets into AR request context */
  429. reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
  430. /* Clear the interrupt mask */
  431. reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
  432. reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
  433. /* Clear the interrupt mask */
  434. reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
  435. reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
  436. /* Initialize AR dma */
  437. initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
  438. initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
  439. /* Initialize AT dma */
  440. initialize_dma_trm_ctx(&ohci->at_req_context);
  441. initialize_dma_trm_ctx(&ohci->at_resp_context);
  442. /* Initialize IR Legacy DMA */
  443. ohci->ir_legacy_channels = 0;
  444. initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
  445. DBGMSG("ISO receive legacy context activated");
  446. /*
  447. * Accept AT requests from all nodes. This probably
  448. * will have to be controlled from the subsystem
  449. * on a per node basis.
  450. */
  451. reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
  452. /* Specify AT retries */
  453. reg_write(ohci, OHCI1394_ATRetries,
  454. OHCI1394_MAX_AT_REQ_RETRIES |
  455. (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
  456. (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
  457. /* We don't want hardware swapping */
  458. reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
  459. /* Enable interrupts */
  460. reg_write(ohci, OHCI1394_IntMaskSet,
  461. OHCI1394_unrecoverableError |
  462. OHCI1394_masterIntEnable |
  463. OHCI1394_busReset |
  464. OHCI1394_selfIDComplete |
  465. OHCI1394_RSPkt |
  466. OHCI1394_RQPkt |
  467. OHCI1394_respTxComplete |
  468. OHCI1394_reqTxComplete |
  469. OHCI1394_isochRx |
  470. OHCI1394_isochTx |
  471. OHCI1394_cycleInconsistent);
  472. /* Enable link */
  473. reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
  474. buf = reg_read(ohci, OHCI1394_Version);
  475. #ifndef __sparc__
  476. sprintf (irq_buf, "%d", ohci->dev->irq);
  477. #else
  478. sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
  479. #endif
  480. PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
  481. "MMIO=[%lx-%lx] Max Packet=[%d]",
  482. ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
  483. ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
  484. pci_resource_start(ohci->dev, 0),
  485. pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
  486. ohci->max_packet_size);
  487. /* Check all of our ports to make sure that if anything is
  488. * connected, we enable that port. */
  489. num_ports = get_phy_reg(ohci, 2) & 0xf;
  490. for (i = 0; i < num_ports; i++) {
  491. unsigned int status;
  492. set_phy_reg(ohci, 7, i);
  493. status = get_phy_reg(ohci, 8);
  494. if (status & 0x20)
  495. set_phy_reg(ohci, 8, status & ~1);
  496. }
  497. /* Serial EEPROM Sanity check. */
  498. if ((ohci->max_packet_size < 512) ||
  499. (ohci->max_packet_size > 4096)) {
  500. /* Serial EEPROM contents are suspect, set a sane max packet
  501. * size and print the raw contents for bug reports if verbose
  502. * debug is enabled. */
  503. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  504. int i;
  505. #endif
  506. PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
  507. "attempting to setting max_packet_size to 512 bytes");
  508. reg_write(ohci, OHCI1394_BusOptions,
  509. (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
  510. ohci->max_packet_size = 512;
  511. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  512. PRINT(KERN_DEBUG, " EEPROM Present: %d",
  513. (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
  514. reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
  515. for (i = 0;
  516. ((i < 1000) &&
  517. (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
  518. udelay(10);
  519. for (i = 0; i < 0x20; i++) {
  520. reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
  521. PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
  522. (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
  523. }
  524. #endif
  525. }
  526. }
  527. /*
  528. * Insert a packet in the DMA fifo and generate the DMA prg
  529. * FIXME: rewrite the program in order to accept packets crossing
  530. * page boundaries.
  531. * check also that a single dma descriptor doesn't cross a
  532. * page boundary.
  533. */
  534. static void insert_packet(struct ti_ohci *ohci,
  535. struct dma_trm_ctx *d, struct hpsb_packet *packet)
  536. {
  537. u32 cycleTimer;
  538. int idx = d->prg_ind;
  539. DBGMSG("Inserting packet for node " NODE_BUS_FMT
  540. ", tlabel=%d, tcode=0x%x, speed=%d",
  541. NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
  542. packet->tcode, packet->speed_code);
  543. d->prg_cpu[idx]->begin.address = 0;
  544. d->prg_cpu[idx]->begin.branchAddress = 0;
  545. if (d->type == DMA_CTX_ASYNC_RESP) {
  546. /*
  547. * For response packets, we need to put a timeout value in
  548. * the 16 lower bits of the status... let's try 1 sec timeout
  549. */
  550. cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
  551. d->prg_cpu[idx]->begin.status = cpu_to_le32(
  552. (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
  553. ((cycleTimer&0x01fff000)>>12));
  554. DBGMSG("cycleTimer: %08x timeStamp: %08x",
  555. cycleTimer, d->prg_cpu[idx]->begin.status);
  556. } else
  557. d->prg_cpu[idx]->begin.status = 0;
  558. if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
  559. if (packet->type == hpsb_raw) {
  560. d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
  561. d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
  562. d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
  563. } else {
  564. d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
  565. (packet->header[0] & 0xFFFF);
  566. if (packet->tcode == TCODE_ISO_DATA) {
  567. /* Sending an async stream packet */
  568. d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
  569. } else {
  570. /* Sending a normal async request or response */
  571. d->prg_cpu[idx]->data[1] =
  572. (packet->header[1] & 0xFFFF) |
  573. (packet->header[0] & 0xFFFF0000);
  574. d->prg_cpu[idx]->data[2] = packet->header[2];
  575. d->prg_cpu[idx]->data[3] = packet->header[3];
  576. }
  577. packet_swab(d->prg_cpu[idx]->data, packet->tcode);
  578. }
  579. if (packet->data_size) { /* block transmit */
  580. if (packet->tcode == TCODE_STREAM_DATA){
  581. d->prg_cpu[idx]->begin.control =
  582. cpu_to_le32(DMA_CTL_OUTPUT_MORE |
  583. DMA_CTL_IMMEDIATE | 0x8);
  584. } else {
  585. d->prg_cpu[idx]->begin.control =
  586. cpu_to_le32(DMA_CTL_OUTPUT_MORE |
  587. DMA_CTL_IMMEDIATE | 0x10);
  588. }
  589. d->prg_cpu[idx]->end.control =
  590. cpu_to_le32(DMA_CTL_OUTPUT_LAST |
  591. DMA_CTL_IRQ |
  592. DMA_CTL_BRANCH |
  593. packet->data_size);
  594. /*
  595. * Check that the packet data buffer
  596. * does not cross a page boundary.
  597. *
  598. * XXX Fix this some day. eth1394 seems to trigger
  599. * it, but ignoring it doesn't seem to cause a
  600. * problem.
  601. */
  602. #if 0
  603. if (cross_bound((unsigned long)packet->data,
  604. packet->data_size)>0) {
  605. /* FIXME: do something about it */
  606. PRINT(KERN_ERR,
  607. "%s: packet data addr: %p size %Zd bytes "
  608. "cross page boundary", __FUNCTION__,
  609. packet->data, packet->data_size);
  610. }
  611. #endif
  612. d->prg_cpu[idx]->end.address = cpu_to_le32(
  613. pci_map_single(ohci->dev, packet->data,
  614. packet->data_size,
  615. PCI_DMA_TODEVICE));
  616. OHCI_DMA_ALLOC("single, block transmit packet");
  617. d->prg_cpu[idx]->end.branchAddress = 0;
  618. d->prg_cpu[idx]->end.status = 0;
  619. if (d->branchAddrPtr)
  620. *(d->branchAddrPtr) =
  621. cpu_to_le32(d->prg_bus[idx] | 0x3);
  622. d->branchAddrPtr =
  623. &(d->prg_cpu[idx]->end.branchAddress);
  624. } else { /* quadlet transmit */
  625. if (packet->type == hpsb_raw)
  626. d->prg_cpu[idx]->begin.control =
  627. cpu_to_le32(DMA_CTL_OUTPUT_LAST |
  628. DMA_CTL_IMMEDIATE |
  629. DMA_CTL_IRQ |
  630. DMA_CTL_BRANCH |
  631. (packet->header_size + 4));
  632. else
  633. d->prg_cpu[idx]->begin.control =
  634. cpu_to_le32(DMA_CTL_OUTPUT_LAST |
  635. DMA_CTL_IMMEDIATE |
  636. DMA_CTL_IRQ |
  637. DMA_CTL_BRANCH |
  638. packet->header_size);
  639. if (d->branchAddrPtr)
  640. *(d->branchAddrPtr) =
  641. cpu_to_le32(d->prg_bus[idx] | 0x2);
  642. d->branchAddrPtr =
  643. &(d->prg_cpu[idx]->begin.branchAddress);
  644. }
  645. } else { /* iso packet */
  646. d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
  647. (packet->header[0] & 0xFFFF);
  648. d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
  649. packet_swab(d->prg_cpu[idx]->data, packet->tcode);
  650. d->prg_cpu[idx]->begin.control =
  651. cpu_to_le32(DMA_CTL_OUTPUT_MORE |
  652. DMA_CTL_IMMEDIATE | 0x8);
  653. d->prg_cpu[idx]->end.control =
  654. cpu_to_le32(DMA_CTL_OUTPUT_LAST |
  655. DMA_CTL_UPDATE |
  656. DMA_CTL_IRQ |
  657. DMA_CTL_BRANCH |
  658. packet->data_size);
  659. d->prg_cpu[idx]->end.address = cpu_to_le32(
  660. pci_map_single(ohci->dev, packet->data,
  661. packet->data_size, PCI_DMA_TODEVICE));
  662. OHCI_DMA_ALLOC("single, iso transmit packet");
  663. d->prg_cpu[idx]->end.branchAddress = 0;
  664. d->prg_cpu[idx]->end.status = 0;
  665. DBGMSG("Iso xmit context info: header[%08x %08x]\n"
  666. " begin=%08x %08x %08x %08x\n"
  667. " %08x %08x %08x %08x\n"
  668. " end =%08x %08x %08x %08x",
  669. d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
  670. d->prg_cpu[idx]->begin.control,
  671. d->prg_cpu[idx]->begin.address,
  672. d->prg_cpu[idx]->begin.branchAddress,
  673. d->prg_cpu[idx]->begin.status,
  674. d->prg_cpu[idx]->data[0],
  675. d->prg_cpu[idx]->data[1],
  676. d->prg_cpu[idx]->data[2],
  677. d->prg_cpu[idx]->data[3],
  678. d->prg_cpu[idx]->end.control,
  679. d->prg_cpu[idx]->end.address,
  680. d->prg_cpu[idx]->end.branchAddress,
  681. d->prg_cpu[idx]->end.status);
  682. if (d->branchAddrPtr)
  683. *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
  684. d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
  685. }
  686. d->free_prgs--;
  687. /* queue the packet in the appropriate context queue */
  688. list_add_tail(&packet->driver_list, &d->fifo_list);
  689. d->prg_ind = (d->prg_ind + 1) % d->num_desc;
  690. }
  691. /*
  692. * This function fills the FIFO with the (eventual) pending packets
  693. * and runs or wakes up the DMA prg if necessary.
  694. *
  695. * The function MUST be called with the d->lock held.
  696. */
  697. static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
  698. {
  699. struct hpsb_packet *packet, *ptmp;
  700. int idx = d->prg_ind;
  701. int z = 0;
  702. /* insert the packets into the dma fifo */
  703. list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
  704. if (!d->free_prgs)
  705. break;
  706. /* For the first packet only */
  707. if (!z)
  708. z = (packet->data_size) ? 3 : 2;
  709. /* Insert the packet */
  710. list_del_init(&packet->driver_list);
  711. insert_packet(ohci, d, packet);
  712. }
  713. /* Nothing must have been done, either no free_prgs or no packets */
  714. if (z == 0)
  715. return;
  716. /* Is the context running ? (should be unless it is
  717. the first packet to be sent in this context) */
  718. if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
  719. u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
  720. DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
  721. reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
  722. /* Check that the node id is valid, and not 63 */
  723. if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
  724. PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
  725. else
  726. reg_write(ohci, d->ctrlSet, 0x8000);
  727. } else {
  728. /* Wake up the dma context if necessary */
  729. if (!(reg_read(ohci, d->ctrlSet) & 0x400))
  730. DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
  731. /* do this always, to avoid race condition */
  732. reg_write(ohci, d->ctrlSet, 0x1000);
  733. }
  734. return;
  735. }
  736. /* Transmission of an async or iso packet */
  737. static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
  738. {
  739. struct ti_ohci *ohci = host->hostdata;
  740. struct dma_trm_ctx *d;
  741. unsigned long flags;
  742. if (packet->data_size > ohci->max_packet_size) {
  743. PRINT(KERN_ERR,
  744. "Transmit packet size %Zd is too big",
  745. packet->data_size);
  746. return -EOVERFLOW;
  747. }
  748. /* Decide whether we have an iso, a request, or a response packet */
  749. if (packet->type == hpsb_raw)
  750. d = &ohci->at_req_context;
  751. else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
  752. /* The legacy IT DMA context is initialized on first
  753. * use. However, the alloc cannot be run from
  754. * interrupt context, so we bail out if that is the
  755. * case. I don't see anyone sending ISO packets from
  756. * interrupt context anyway... */
  757. if (ohci->it_legacy_context.ohci == NULL) {
  758. if (in_interrupt()) {
  759. PRINT(KERN_ERR,
  760. "legacy IT context cannot be initialized during interrupt");
  761. return -EINVAL;
  762. }
  763. if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
  764. DMA_CTX_ISO, 0, IT_NUM_DESC,
  765. OHCI1394_IsoXmitContextBase) < 0) {
  766. PRINT(KERN_ERR,
  767. "error initializing legacy IT context");
  768. return -ENOMEM;
  769. }
  770. initialize_dma_trm_ctx(&ohci->it_legacy_context);
  771. }
  772. d = &ohci->it_legacy_context;
  773. } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
  774. d = &ohci->at_resp_context;
  775. else
  776. d = &ohci->at_req_context;
  777. spin_lock_irqsave(&d->lock,flags);
  778. list_add_tail(&packet->driver_list, &d->pending_list);
  779. dma_trm_flush(ohci, d);
  780. spin_unlock_irqrestore(&d->lock,flags);
  781. return 0;
  782. }
  783. static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
  784. {
  785. struct ti_ohci *ohci = host->hostdata;
  786. int retval = 0;
  787. unsigned long flags;
  788. int phy_reg;
  789. switch (cmd) {
  790. case RESET_BUS:
  791. switch (arg) {
  792. case SHORT_RESET:
  793. phy_reg = get_phy_reg(ohci, 5);
  794. phy_reg |= 0x40;
  795. set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
  796. break;
  797. case LONG_RESET:
  798. phy_reg = get_phy_reg(ohci, 1);
  799. phy_reg |= 0x40;
  800. set_phy_reg(ohci, 1, phy_reg); /* set IBR */
  801. break;
  802. case SHORT_RESET_NO_FORCE_ROOT:
  803. phy_reg = get_phy_reg(ohci, 1);
  804. if (phy_reg & 0x80) {
  805. phy_reg &= ~0x80;
  806. set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
  807. }
  808. phy_reg = get_phy_reg(ohci, 5);
  809. phy_reg |= 0x40;
  810. set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
  811. break;
  812. case LONG_RESET_NO_FORCE_ROOT:
  813. phy_reg = get_phy_reg(ohci, 1);
  814. phy_reg &= ~0x80;
  815. phy_reg |= 0x40;
  816. set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
  817. break;
  818. case SHORT_RESET_FORCE_ROOT:
  819. phy_reg = get_phy_reg(ohci, 1);
  820. if (!(phy_reg & 0x80)) {
  821. phy_reg |= 0x80;
  822. set_phy_reg(ohci, 1, phy_reg); /* set RHB */
  823. }
  824. phy_reg = get_phy_reg(ohci, 5);
  825. phy_reg |= 0x40;
  826. set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
  827. break;
  828. case LONG_RESET_FORCE_ROOT:
  829. phy_reg = get_phy_reg(ohci, 1);
  830. phy_reg |= 0xc0;
  831. set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
  832. break;
  833. default:
  834. retval = -1;
  835. }
  836. break;
  837. case GET_CYCLE_COUNTER:
  838. retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
  839. break;
  840. case SET_CYCLE_COUNTER:
  841. reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
  842. break;
  843. case SET_BUS_ID:
  844. PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
  845. break;
  846. case ACT_CYCLE_MASTER:
  847. if (arg) {
  848. /* check if we are root and other nodes are present */
  849. u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
  850. if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
  851. /*
  852. * enable cycleTimer, cycleMaster
  853. */
  854. DBGMSG("Cycle master enabled");
  855. reg_write(ohci, OHCI1394_LinkControlSet,
  856. OHCI1394_LinkControl_CycleTimerEnable |
  857. OHCI1394_LinkControl_CycleMaster);
  858. }
  859. } else {
  860. /* disable cycleTimer, cycleMaster, cycleSource */
  861. reg_write(ohci, OHCI1394_LinkControlClear,
  862. OHCI1394_LinkControl_CycleTimerEnable |
  863. OHCI1394_LinkControl_CycleMaster |
  864. OHCI1394_LinkControl_CycleSource);
  865. }
  866. break;
  867. case CANCEL_REQUESTS:
  868. DBGMSG("Cancel request received");
  869. dma_trm_reset(&ohci->at_req_context);
  870. dma_trm_reset(&ohci->at_resp_context);
  871. break;
  872. case ISO_LISTEN_CHANNEL:
  873. {
  874. u64 mask;
  875. if (arg<0 || arg>63) {
  876. PRINT(KERN_ERR,
  877. "%s: IS0 listen channel %d is out of range",
  878. __FUNCTION__, arg);
  879. return -EFAULT;
  880. }
  881. mask = (u64)0x1<<arg;
  882. spin_lock_irqsave(&ohci->IR_channel_lock, flags);
  883. if (ohci->ISO_channel_usage & mask) {
  884. PRINT(KERN_ERR,
  885. "%s: IS0 listen channel %d is already used",
  886. __FUNCTION__, arg);
  887. spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
  888. return -EFAULT;
  889. }
  890. ohci->ISO_channel_usage |= mask;
  891. ohci->ir_legacy_channels |= mask;
  892. if (arg>31)
  893. reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
  894. 1<<(arg-32));
  895. else
  896. reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
  897. 1<<arg);
  898. spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
  899. DBGMSG("Listening enabled on channel %d", arg);
  900. break;
  901. }
  902. case ISO_UNLISTEN_CHANNEL:
  903. {
  904. u64 mask;
  905. if (arg<0 || arg>63) {
  906. PRINT(KERN_ERR,
  907. "%s: IS0 unlisten channel %d is out of range",
  908. __FUNCTION__, arg);
  909. return -EFAULT;
  910. }
  911. mask = (u64)0x1<<arg;
  912. spin_lock_irqsave(&ohci->IR_channel_lock, flags);
  913. if (!(ohci->ISO_channel_usage & mask)) {
  914. PRINT(KERN_ERR,
  915. "%s: IS0 unlisten channel %d is not used",
  916. __FUNCTION__, arg);
  917. spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
  918. return -EFAULT;
  919. }
  920. ohci->ISO_channel_usage &= ~mask;
  921. ohci->ir_legacy_channels &= ~mask;
  922. if (arg>31)
  923. reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
  924. 1<<(arg-32));
  925. else
  926. reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
  927. 1<<arg);
  928. spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
  929. DBGMSG("Listening disabled on channel %d", arg);
  930. break;
  931. }
  932. default:
  933. PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
  934. cmd);
  935. break;
  936. }
  937. return retval;
  938. }
  939. /***********************************
  940. * rawiso ISO reception *
  941. ***********************************/
  942. /*
  943. We use either buffer-fill or packet-per-buffer DMA mode. The DMA
  944. buffer is split into "blocks" (regions described by one DMA
  945. descriptor). Each block must be one page or less in size, and
  946. must not cross a page boundary.
  947. There is one little wrinkle with buffer-fill mode: a packet that
  948. starts in the final block may wrap around into the first block. But
  949. the user API expects all packets to be contiguous. Our solution is
  950. to keep the very last page of the DMA buffer in reserve - if a
  951. packet spans the gap, we copy its tail into this page.
  952. */
  953. struct ohci_iso_recv {
  954. struct ti_ohci *ohci;
  955. struct ohci1394_iso_tasklet task;
  956. int task_active;
  957. enum { BUFFER_FILL_MODE = 0,
  958. PACKET_PER_BUFFER_MODE = 1 } dma_mode;
  959. /* memory and PCI mapping for the DMA descriptors */
  960. struct dma_prog_region prog;
  961. struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
  962. /* how many DMA blocks fit in the buffer */
  963. unsigned int nblocks;
  964. /* stride of DMA blocks */
  965. unsigned int buf_stride;
  966. /* number of blocks to batch between interrupts */
  967. int block_irq_interval;
  968. /* block that DMA will finish next */
  969. int block_dma;
  970. /* (buffer-fill only) block that the reader will release next */
  971. int block_reader;
  972. /* (buffer-fill only) bytes of buffer the reader has released,
  973. less than one block */
  974. int released_bytes;
  975. /* (buffer-fill only) buffer offset at which the next packet will appear */
  976. int dma_offset;
  977. /* OHCI DMA context control registers */
  978. u32 ContextControlSet;
  979. u32 ContextControlClear;
  980. u32 CommandPtr;
  981. u32 ContextMatch;
  982. };
  983. static void ohci_iso_recv_task(unsigned long data);
  984. static void ohci_iso_recv_stop(struct hpsb_iso *iso);
  985. static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
  986. static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
  987. static void ohci_iso_recv_program(struct hpsb_iso *iso);
  988. static int ohci_iso_recv_init(struct hpsb_iso *iso)
  989. {
  990. struct ti_ohci *ohci = iso->host->hostdata;
  991. struct ohci_iso_recv *recv;
  992. int ctx;
  993. int ret = -ENOMEM;
  994. recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
  995. if (!recv)
  996. return -ENOMEM;
  997. iso->hostdata = recv;
  998. recv->ohci = ohci;
  999. recv->task_active = 0;
  1000. dma_prog_region_init(&recv->prog);
  1001. recv->block = NULL;
  1002. /* use buffer-fill mode, unless irq_interval is 1
  1003. (note: multichannel requires buffer-fill) */
  1004. if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
  1005. iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
  1006. recv->dma_mode = PACKET_PER_BUFFER_MODE;
  1007. } else {
  1008. recv->dma_mode = BUFFER_FILL_MODE;
  1009. }
  1010. /* set nblocks, buf_stride, block_irq_interval */
  1011. if (recv->dma_mode == BUFFER_FILL_MODE) {
  1012. recv->buf_stride = PAGE_SIZE;
  1013. /* one block per page of data in the DMA buffer, minus the final guard page */
  1014. recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
  1015. if (recv->nblocks < 3) {
  1016. DBGMSG("ohci_iso_recv_init: DMA buffer too small");
  1017. goto err;
  1018. }
  1019. /* iso->irq_interval is in packets - translate that to blocks */
  1020. if (iso->irq_interval == 1)
  1021. recv->block_irq_interval = 1;
  1022. else
  1023. recv->block_irq_interval = iso->irq_interval *
  1024. ((recv->nblocks+1)/iso->buf_packets);
  1025. if (recv->block_irq_interval*4 > recv->nblocks)
  1026. recv->block_irq_interval = recv->nblocks/4;
  1027. if (recv->block_irq_interval < 1)
  1028. recv->block_irq_interval = 1;
  1029. } else {
  1030. int max_packet_size;
  1031. recv->nblocks = iso->buf_packets;
  1032. recv->block_irq_interval = iso->irq_interval;
  1033. if (recv->block_irq_interval * 4 > iso->buf_packets)
  1034. recv->block_irq_interval = iso->buf_packets / 4;
  1035. if (recv->block_irq_interval < 1)
  1036. recv->block_irq_interval = 1;
  1037. /* choose a buffer stride */
  1038. /* must be a power of 2, and <= PAGE_SIZE */
  1039. max_packet_size = iso->buf_size / iso->buf_packets;
  1040. for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
  1041. recv->buf_stride *= 2);
  1042. if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
  1043. recv->buf_stride > PAGE_SIZE) {
  1044. /* this shouldn't happen, but anyway... */
  1045. DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
  1046. goto err;
  1047. }
  1048. }
  1049. recv->block_reader = 0;
  1050. recv->released_bytes = 0;
  1051. recv->block_dma = 0;
  1052. recv->dma_offset = 0;
  1053. /* size of DMA program = one descriptor per block */
  1054. if (dma_prog_region_alloc(&recv->prog,
  1055. sizeof(struct dma_cmd) * recv->nblocks,
  1056. recv->ohci->dev))
  1057. goto err;
  1058. recv->block = (struct dma_cmd*) recv->prog.kvirt;
  1059. ohci1394_init_iso_tasklet(&recv->task,
  1060. iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
  1061. OHCI_ISO_RECEIVE,
  1062. ohci_iso_recv_task, (unsigned long) iso);
  1063. if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0)
  1064. goto err;
  1065. recv->task_active = 1;
  1066. /* recv context registers are spaced 32 bytes apart */
  1067. ctx = recv->task.context;
  1068. recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
  1069. recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
  1070. recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
  1071. recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
  1072. if (iso->channel == -1) {
  1073. /* clear multi-channel selection mask */
  1074. reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
  1075. reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
  1076. }
  1077. /* write the DMA program */
  1078. ohci_iso_recv_program(iso);
  1079. DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
  1080. " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
  1081. recv->dma_mode == BUFFER_FILL_MODE ?
  1082. "buffer-fill" : "packet-per-buffer",
  1083. iso->buf_size/PAGE_SIZE, iso->buf_size,
  1084. recv->nblocks, recv->buf_stride, recv->block_irq_interval);
  1085. return 0;
  1086. err:
  1087. ohci_iso_recv_shutdown(iso);
  1088. return ret;
  1089. }
  1090. static void ohci_iso_recv_stop(struct hpsb_iso *iso)
  1091. {
  1092. struct ohci_iso_recv *recv = iso->hostdata;
  1093. /* disable interrupts */
  1094. reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
  1095. /* halt DMA */
  1096. ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
  1097. }
  1098. static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
  1099. {
  1100. struct ohci_iso_recv *recv = iso->hostdata;
  1101. if (recv->task_active) {
  1102. ohci_iso_recv_stop(iso);
  1103. ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
  1104. recv->task_active = 0;
  1105. }
  1106. dma_prog_region_free(&recv->prog);
  1107. kfree(recv);
  1108. iso->hostdata = NULL;
  1109. }
  1110. /* set up a "gapped" ring buffer DMA program */
  1111. static void ohci_iso_recv_program(struct hpsb_iso *iso)
  1112. {
  1113. struct ohci_iso_recv *recv = iso->hostdata;
  1114. int blk;
  1115. /* address of 'branch' field in previous DMA descriptor */
  1116. u32 *prev_branch = NULL;
  1117. for (blk = 0; blk < recv->nblocks; blk++) {
  1118. u32 control;
  1119. /* the DMA descriptor */
  1120. struct dma_cmd *cmd = &recv->block[blk];
  1121. /* offset of the DMA descriptor relative to the DMA prog buffer */
  1122. unsigned long prog_offset = blk * sizeof(struct dma_cmd);
  1123. /* offset of this packet's data within the DMA buffer */
  1124. unsigned long buf_offset = blk * recv->buf_stride;
  1125. if (recv->dma_mode == BUFFER_FILL_MODE) {
  1126. control = 2 << 28; /* INPUT_MORE */
  1127. } else {
  1128. control = 3 << 28; /* INPUT_LAST */
  1129. }
  1130. control |= 8 << 24; /* s = 1, update xferStatus and resCount */
  1131. /* interrupt on last block, and at intervals */
  1132. if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
  1133. control |= 3 << 20; /* want interrupt */
  1134. }
  1135. control |= 3 << 18; /* enable branch to address */
  1136. control |= recv->buf_stride;
  1137. cmd->control = cpu_to_le32(control);
  1138. cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
  1139. cmd->branchAddress = 0; /* filled in on next loop */
  1140. cmd->status = cpu_to_le32(recv->buf_stride);
  1141. /* link the previous descriptor to this one */
  1142. if (prev_branch) {
  1143. *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
  1144. }
  1145. prev_branch = &cmd->branchAddress;
  1146. }
  1147. /* the final descriptor's branch address and Z should be left at 0 */
  1148. }
  1149. /* listen or unlisten to a specific channel (multi-channel mode only) */
  1150. static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
  1151. {
  1152. struct ohci_iso_recv *recv = iso->hostdata;
  1153. int reg, i;
  1154. if (channel < 32) {
  1155. reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
  1156. i = channel;
  1157. } else {
  1158. reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
  1159. i = channel - 32;
  1160. }
  1161. reg_write(recv->ohci, reg, (1 << i));
  1162. /* issue a dummy read to force all PCI writes to be posted immediately */
  1163. mb();
  1164. reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
  1165. }
  1166. static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
  1167. {
  1168. struct ohci_iso_recv *recv = iso->hostdata;
  1169. int i;
  1170. for (i = 0; i < 64; i++) {
  1171. if (mask & (1ULL << i)) {
  1172. if (i < 32)
  1173. reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
  1174. else
  1175. reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
  1176. } else {
  1177. if (i < 32)
  1178. reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
  1179. else
  1180. reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
  1181. }
  1182. }
  1183. /* issue a dummy read to force all PCI writes to be posted immediately */
  1184. mb();
  1185. reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
  1186. }
  1187. static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
  1188. {
  1189. struct ohci_iso_recv *recv = iso->hostdata;
  1190. struct ti_ohci *ohci = recv->ohci;
  1191. u32 command, contextMatch;
  1192. reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
  1193. wmb();
  1194. /* always keep ISO headers */
  1195. command = (1 << 30);
  1196. if (recv->dma_mode == BUFFER_FILL_MODE)
  1197. command |= (1 << 31);
  1198. reg_write(recv->ohci, recv->ContextControlSet, command);
  1199. /* match on specified tags */
  1200. contextMatch = tag_mask << 28;
  1201. if (iso->channel == -1) {
  1202. /* enable multichannel reception */
  1203. reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
  1204. } else {
  1205. /* listen on channel */
  1206. contextMatch |= iso->channel;
  1207. }
  1208. if (cycle != -1) {
  1209. u32 seconds;
  1210. /* enable cycleMatch */
  1211. reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
  1212. /* set starting cycle */
  1213. cycle &= 0x1FFF;
  1214. /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
  1215. just snarf them from the current time */
  1216. seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
  1217. /* advance one second to give some extra time for DMA to start */
  1218. seconds += 1;
  1219. cycle |= (seconds & 3) << 13;
  1220. contextMatch |= cycle << 12;
  1221. }
  1222. if (sync != -1) {
  1223. /* set sync flag on first DMA descriptor */
  1224. struct dma_cmd *cmd = &recv->block[recv->block_dma];
  1225. cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
  1226. /* match sync field */
  1227. contextMatch |= (sync&0xf)<<8;
  1228. }
  1229. reg_write(recv->ohci, recv->ContextMatch, contextMatch);
  1230. /* address of first descriptor block */
  1231. command = dma_prog_region_offset_to_bus(&recv->prog,
  1232. recv->block_dma * sizeof(struct dma_cmd));
  1233. command |= 1; /* Z=1 */
  1234. reg_write(recv->ohci, recv->CommandPtr, command);
  1235. /* enable interrupts */
  1236. reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
  1237. wmb();
  1238. /* run */
  1239. reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
  1240. /* issue a dummy read of the cycle timer register to force
  1241. all PCI writes to be posted immediately */
  1242. mb();
  1243. reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
  1244. /* check RUN */
  1245. if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
  1246. PRINT(KERN_ERR,
  1247. "Error starting IR DMA (ContextControl 0x%08x)\n",
  1248. reg_read(recv->ohci, recv->ContextControlSet));
  1249. return -1;
  1250. }
  1251. return 0;
  1252. }
  1253. static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
  1254. {
  1255. /* re-use the DMA descriptor for the block */
  1256. /* by linking the previous descriptor to it */
  1257. int next_i = block;
  1258. int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
  1259. struct dma_cmd *next = &recv->block[next_i];
  1260. struct dma_cmd *prev = &recv->block[prev_i];
  1261. /* 'next' becomes the new end of the DMA chain,
  1262. so disable branch and enable interrupt */
  1263. next->branchAddress = 0;
  1264. next->control |= cpu_to_le32(3 << 20);
  1265. next->status = cpu_to_le32(recv->buf_stride);
  1266. /* link prev to next */
  1267. prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
  1268. sizeof(struct dma_cmd) * next_i)
  1269. | 1); /* Z=1 */
  1270. /* disable interrupt on previous DMA descriptor, except at intervals */
  1271. if ((prev_i % recv->block_irq_interval) == 0) {
  1272. prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
  1273. } else {
  1274. prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
  1275. }
  1276. wmb();
  1277. /* wake up DMA in case it fell asleep */
  1278. reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
  1279. }
  1280. static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
  1281. struct hpsb_iso_packet_info *info)
  1282. {
  1283. int len;
  1284. /* release the memory where the packet was */
  1285. len = info->len;
  1286. /* add the wasted space for padding to 4 bytes */
  1287. if (len % 4)
  1288. len += 4 - (len % 4);
  1289. /* add 8 bytes for the OHCI DMA data format overhead */
  1290. len += 8;
  1291. recv->released_bytes += len;
  1292. /* have we released enough memory for one block? */
  1293. while (recv->released_bytes > recv->buf_stride) {
  1294. ohci_iso_recv_release_block(recv, recv->block_reader);
  1295. recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
  1296. recv->released_bytes -= recv->buf_stride;
  1297. }
  1298. }
  1299. static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
  1300. {
  1301. struct ohci_iso_recv *recv = iso->hostdata;
  1302. if (recv->dma_mode == BUFFER_FILL_MODE) {
  1303. ohci_iso_recv_bufferfill_release(recv, info);
  1304. } else {
  1305. ohci_iso_recv_release_block(recv, info - iso->infos);
  1306. }
  1307. }
  1308. /* parse all packets from blocks that have been fully received */
  1309. static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
  1310. {
  1311. int wake = 0;
  1312. int runaway = 0;
  1313. struct ti_ohci *ohci = recv->ohci;
  1314. while (1) {
  1315. /* we expect the next parsable packet to begin at recv->dma_offset */
  1316. /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
  1317. unsigned int offset;
  1318. unsigned short len, cycle;
  1319. unsigned char channel, tag, sy;
  1320. unsigned char *p = iso->data_buf.kvirt;
  1321. unsigned int this_block = recv->dma_offset/recv->buf_stride;
  1322. /* don't loop indefinitely */
  1323. if (runaway++ > 100000) {
  1324. atomic_inc(&iso->overflows);
  1325. PRINT(KERN_ERR,
  1326. "IR DMA error - Runaway during buffer parsing!\n");
  1327. break;
  1328. }
  1329. /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
  1330. if (this_block == recv->block_dma)
  1331. break;
  1332. wake = 1;
  1333. /* parse data length, tag, channel, and sy */
  1334. /* note: we keep our own local copies of 'len' and 'offset'
  1335. so the user can't mess with them by poking in the mmap area */
  1336. len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
  1337. if (len > 4096) {
  1338. PRINT(KERN_ERR,
  1339. "IR DMA error - bogus 'len' value %u\n", len);
  1340. }
  1341. channel = p[recv->dma_offset+1] & 0x3F;
  1342. tag = p[recv->dma_offset+1] >> 6;
  1343. sy = p[recv->dma_offset+0] & 0xF;
  1344. /* advance to data payload */
  1345. recv->dma_offset += 4;
  1346. /* check for wrap-around */
  1347. if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
  1348. recv->dma_offset -= recv->buf_stride*recv->nblocks;
  1349. }
  1350. /* dma_offset now points to the first byte of the data payload */
  1351. offset = recv->dma_offset;
  1352. /* advance to xferStatus/timeStamp */
  1353. recv->dma_offset += len;
  1354. /* payload is padded to 4 bytes */
  1355. if (len % 4) {
  1356. recv->dma_offset += 4 - (len%4);
  1357. }
  1358. /* check for wrap-around */
  1359. if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
  1360. /* uh oh, the packet data wraps from the last
  1361. to the first DMA block - make the packet
  1362. contiguous by copying its "tail" into the
  1363. guard page */
  1364. int guard_off = recv->buf_stride*recv->nblocks;
  1365. int tail_len = len - (guard_off - offset);
  1366. if (tail_len > 0 && tail_len < recv->buf_stride) {
  1367. memcpy(iso->data_buf.kvirt + guard_off,
  1368. iso->data_buf.kvirt,
  1369. tail_len);
  1370. }
  1371. recv->dma_offset -= recv->buf_stride*recv->nblocks;
  1372. }
  1373. /* parse timestamp */
  1374. cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
  1375. cycle &= 0x1FFF;
  1376. /* advance to next packet */
  1377. recv->dma_offset += 4;
  1378. /* check for wrap-around */
  1379. if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
  1380. recv->dma_offset -= recv->buf_stride*recv->nblocks;
  1381. }
  1382. hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
  1383. }
  1384. if (wake)
  1385. hpsb_iso_wake(iso);
  1386. }
  1387. static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
  1388. {
  1389. int loop;
  1390. struct ti_ohci *ohci = recv->ohci;
  1391. /* loop over all blocks */
  1392. for (loop = 0; loop < recv->nblocks; loop++) {
  1393. /* check block_dma to see if it's done */
  1394. struct dma_cmd *im = &recv->block[recv->block_dma];
  1395. /* check the DMA descriptor for new writes to xferStatus */
  1396. u16 xferstatus = le32_to_cpu(im->status) >> 16;
  1397. /* rescount is the number of bytes *remaining to be w…