PageRenderTime 67ms CodeModel.GetById 21ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/usb/host/isp1362-hcd.c

https://gitlab.com/stalker-android/linux-omap3
C | 2884 lines | 2336 code | 381 blank | 167 comment | 354 complexity | 412aa43275b7a120a83438b23cf5cb38 MD5 | raw file
Possible License(s): GPL-2.0
  1. /*
  2. * ISP1362 HCD (Host Controller Driver) for USB.
  3. *
  4. * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
  5. *
  6. * Derived from the SL811 HCD, rewritten for ISP116x.
  7. * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
  8. *
  9. * Portions:
  10. * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
  11. * Copyright (C) 2004 David Brownell
  12. */
  13. /*
  14. * The ISP1362 chip requires a large delay (300ns and 462ns) between
  15. * accesses to the address and data register.
  16. * The following timing options exist:
  17. *
  18. * 1. Configure your memory controller to add such delays if it can (the best)
  19. * 2. Implement platform-specific delay function possibly
  20. * combined with configuring the memory controller; see
  21. * include/linux/usb_isp1362.h for more info.
  22. * 3. Use ndelay (easiest, poorest).
  23. *
  24. * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
  25. * platform specific section of isp1362.h to select the appropriate variant.
  26. *
  27. * Also note that according to the Philips "ISP1362 Errata" document
  28. * Rev 1.00 from 27 May data corruption may occur when the #WR signal
  29. * is reasserted (even with #CS deasserted) within 132ns after a
  30. * write cycle to any controller register. If the hardware doesn't
  31. * implement the recommended fix (gating the #WR with #CS) software
  32. * must ensure that no further write cycle (not necessarily to the chip!)
  33. * is issued by the CPU within this interval.
  34. * For PXA25x this can be ensured by using VLIO with the maximum
  35. * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
  36. */
  37. #ifdef CONFIG_USB_DEBUG
  38. # define ISP1362_DEBUG
  39. #else
  40. # undef ISP1362_DEBUG
  41. #endif
  42. /*
  43. * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
  44. * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
  45. * requests are carried out in separate frames. This will delay any SETUP
  46. * packets until the start of the next frame so that this situation is
  47. * unlikely to occur (and makes usbtest happy running with a PXA255 target
  48. * device).
  49. */
  50. #undef BUGGY_PXA2XX_UDC_USBTEST
  51. #undef PTD_TRACE
  52. #undef URB_TRACE
  53. #undef VERBOSE
  54. #undef REGISTERS
  55. /* This enables a memory test on the ISP1362 chip memory to make sure the
  56. * chip access timing is correct.
  57. */
  58. #undef CHIP_BUFFER_TEST
  59. #include <linux/module.h>
  60. #include <linux/moduleparam.h>
  61. #include <linux/kernel.h>
  62. #include <linux/delay.h>
  63. #include <linux/ioport.h>
  64. #include <linux/sched.h>
  65. #include <linux/slab.h>
  66. #include <linux/errno.h>
  67. #include <linux/init.h>
  68. #include <linux/list.h>
  69. #include <linux/interrupt.h>
  70. #include <linux/usb.h>
  71. #include <linux/usb/isp1362.h>
  72. #include <linux/usb/hcd.h>
  73. #include <linux/platform_device.h>
  74. #include <linux/pm.h>
  75. #include <linux/io.h>
  76. #include <linux/bitmap.h>
  77. #include <asm/irq.h>
  78. #include <asm/system.h>
  79. #include <asm/byteorder.h>
  80. #include <asm/unaligned.h>
  81. static int dbg_level;
  82. #ifdef ISP1362_DEBUG
  83. module_param(dbg_level, int, 0644);
  84. #else
  85. module_param(dbg_level, int, 0);
  86. #define STUB_DEBUG_FILE
  87. #endif
  88. #include "../core/usb.h"
  89. #include "isp1362.h"
  90. #define DRIVER_VERSION "2005-04-04"
  91. #define DRIVER_DESC "ISP1362 USB Host Controller Driver"
  92. MODULE_DESCRIPTION(DRIVER_DESC);
  93. MODULE_LICENSE("GPL");
  94. static const char hcd_name[] = "isp1362-hcd";
  95. static void isp1362_hc_stop(struct usb_hcd *hcd);
  96. static int isp1362_hc_start(struct usb_hcd *hcd);
  97. /*-------------------------------------------------------------------------*/
  98. /*
  99. * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
  100. * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
  101. * completion.
  102. * We don't need a 'disable' counterpart, since interrupts will be disabled
  103. * only by the interrupt handler.
  104. */
  105. static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
  106. {
  107. if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
  108. return;
  109. if (mask & ~isp1362_hcd->irqenb)
  110. isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
  111. isp1362_hcd->irqenb |= mask;
  112. if (isp1362_hcd->irq_active)
  113. return;
  114. isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
  115. }
  116. /*-------------------------------------------------------------------------*/
  117. static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
  118. u16 offset)
  119. {
  120. struct isp1362_ep_queue *epq = NULL;
  121. if (offset < isp1362_hcd->istl_queue[1].buf_start)
  122. epq = &isp1362_hcd->istl_queue[0];
  123. else if (offset < isp1362_hcd->intl_queue.buf_start)
  124. epq = &isp1362_hcd->istl_queue[1];
  125. else if (offset < isp1362_hcd->atl_queue.buf_start)
  126. epq = &isp1362_hcd->intl_queue;
  127. else if (offset < isp1362_hcd->atl_queue.buf_start +
  128. isp1362_hcd->atl_queue.buf_size)
  129. epq = &isp1362_hcd->atl_queue;
  130. if (epq)
  131. DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
  132. else
  133. pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
  134. return epq;
  135. }
  136. static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
  137. {
  138. int offset;
  139. if (index * epq->blk_size > epq->buf_size) {
  140. pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
  141. epq->buf_size / epq->blk_size);
  142. return -EINVAL;
  143. }
  144. offset = epq->buf_start + index * epq->blk_size;
  145. DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
  146. return offset;
  147. }
  148. /*-------------------------------------------------------------------------*/
  149. static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
  150. int mps)
  151. {
  152. u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
  153. xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
  154. if (xfer_size < size && xfer_size % mps)
  155. xfer_size -= xfer_size % mps;
  156. return xfer_size;
  157. }
  158. static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
  159. struct isp1362_ep *ep, u16 len)
  160. {
  161. int ptd_offset = -EINVAL;
  162. int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
  163. int found;
  164. BUG_ON(len > epq->buf_size);
  165. if (!epq->buf_avail)
  166. return -ENOMEM;
  167. if (ep->num_ptds)
  168. pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
  169. epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
  170. BUG_ON(ep->num_ptds != 0);
  171. found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
  172. num_ptds, 0);
  173. if (found >= epq->buf_count)
  174. return -EOVERFLOW;
  175. DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
  176. num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
  177. ptd_offset = get_ptd_offset(epq, found);
  178. WARN_ON(ptd_offset < 0);
  179. ep->ptd_offset = ptd_offset;
  180. ep->num_ptds += num_ptds;
  181. epq->buf_avail -= num_ptds;
  182. BUG_ON(epq->buf_avail > epq->buf_count);
  183. ep->ptd_index = found;
  184. bitmap_set(&epq->buf_map, found, num_ptds);
  185. DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
  186. __func__, epq->name, ep->ptd_index, ep->ptd_offset,
  187. epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
  188. return found;
  189. }
  190. static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
  191. {
  192. int index = ep->ptd_index;
  193. int last = ep->ptd_index + ep->num_ptds;
  194. if (last > epq->buf_count)
  195. pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
  196. __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
  197. ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
  198. epq->buf_map, epq->skip_map);
  199. BUG_ON(last > epq->buf_count);
  200. for (; index < last; index++) {
  201. __clear_bit(index, &epq->buf_map);
  202. __set_bit(index, &epq->skip_map);
  203. }
  204. epq->buf_avail += ep->num_ptds;
  205. epq->ptd_count--;
  206. BUG_ON(epq->buf_avail > epq->buf_count);
  207. BUG_ON(epq->ptd_count > epq->buf_count);
  208. DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
  209. __func__, epq->name,
  210. ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
  211. DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
  212. epq->buf_map, epq->skip_map);
  213. ep->num_ptds = 0;
  214. ep->ptd_offset = -EINVAL;
  215. ep->ptd_index = -EINVAL;
  216. }
  217. /*-------------------------------------------------------------------------*/
  218. /*
  219. Set up PTD's.
  220. */
  221. static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
  222. struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
  223. u16 fno)
  224. {
  225. struct ptd *ptd;
  226. int toggle;
  227. int dir;
  228. u16 len;
  229. size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
  230. DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
  231. ptd = &ep->ptd;
  232. ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
  233. switch (ep->nextpid) {
  234. case USB_PID_IN:
  235. toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
  236. dir = PTD_DIR_IN;
  237. if (usb_pipecontrol(urb->pipe)) {
  238. len = min_t(size_t, ep->maxpacket, buf_len);
  239. } else if (usb_pipeisoc(urb->pipe)) {
  240. len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
  241. ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
  242. } else
  243. len = max_transfer_size(epq, buf_len, ep->maxpacket);
  244. DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
  245. (int)buf_len);
  246. break;
  247. case USB_PID_OUT:
  248. toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
  249. dir = PTD_DIR_OUT;
  250. if (usb_pipecontrol(urb->pipe))
  251. len = min_t(size_t, ep->maxpacket, buf_len);
  252. else if (usb_pipeisoc(urb->pipe))
  253. len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
  254. else
  255. len = max_transfer_size(epq, buf_len, ep->maxpacket);
  256. if (len == 0)
  257. pr_info("%s: Sending ZERO packet: %d\n", __func__,
  258. urb->transfer_flags & URB_ZERO_PACKET);
  259. DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
  260. (int)buf_len);
  261. break;
  262. case USB_PID_SETUP:
  263. toggle = 0;
  264. dir = PTD_DIR_SETUP;
  265. len = sizeof(struct usb_ctrlrequest);
  266. DBG(1, "%s: SETUP len %d\n", __func__, len);
  267. ep->data = urb->setup_packet;
  268. break;
  269. case USB_PID_ACK:
  270. toggle = 1;
  271. len = 0;
  272. dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
  273. PTD_DIR_OUT : PTD_DIR_IN;
  274. DBG(1, "%s: ACK len %d\n", __func__, len);
  275. break;
  276. default:
  277. toggle = dir = len = 0;
  278. pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
  279. BUG_ON(1);
  280. }
  281. ep->length = len;
  282. if (!len)
  283. ep->data = NULL;
  284. ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
  285. ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
  286. PTD_EP(ep->epnum);
  287. ptd->len = PTD_LEN(len) | PTD_DIR(dir);
  288. ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
  289. if (usb_pipeint(urb->pipe)) {
  290. ptd->faddr |= PTD_SF_INT(ep->branch);
  291. ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
  292. }
  293. if (usb_pipeisoc(urb->pipe))
  294. ptd->faddr |= PTD_SF_ISO(fno);
  295. DBG(1, "%s: Finished\n", __func__);
  296. }
  297. static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
  298. struct isp1362_ep_queue *epq)
  299. {
  300. struct ptd *ptd = &ep->ptd;
  301. int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
  302. _BUG_ON(ep->ptd_offset < 0);
  303. prefetch(ptd);
  304. isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
  305. if (len)
  306. isp1362_write_buffer(isp1362_hcd, ep->data,
  307. ep->ptd_offset + PTD_HEADER_SIZE, len);
  308. dump_ptd(ptd);
  309. dump_ptd_out_data(ptd, ep->data);
  310. }
  311. static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
  312. struct isp1362_ep_queue *epq)
  313. {
  314. struct ptd *ptd = &ep->ptd;
  315. int act_len;
  316. WARN_ON(list_empty(&ep->active));
  317. BUG_ON(ep->ptd_offset < 0);
  318. list_del_init(&ep->active);
  319. DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
  320. prefetchw(ptd);
  321. isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
  322. dump_ptd(ptd);
  323. act_len = PTD_GET_COUNT(ptd);
  324. if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
  325. return;
  326. if (act_len > ep->length)
  327. pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
  328. ep->ptd_offset, act_len, ep->length);
  329. BUG_ON(act_len > ep->length);
  330. /* Only transfer the amount of data that has actually been overwritten
  331. * in the chip buffer. We don't want any data that doesn't belong to the
  332. * transfer to leak out of the chip to the callers transfer buffer!
  333. */
  334. prefetchw(ep->data);
  335. isp1362_read_buffer(isp1362_hcd, ep->data,
  336. ep->ptd_offset + PTD_HEADER_SIZE, act_len);
  337. dump_ptd_in_data(ptd, ep->data);
  338. }
  339. /*
  340. * INT PTDs will stay in the chip until data is available.
  341. * This function will remove a PTD from the chip when the URB is dequeued.
  342. * Must be called with the spinlock held and IRQs disabled
  343. */
  344. static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
  345. {
  346. int index;
  347. struct isp1362_ep_queue *epq;
  348. DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
  349. BUG_ON(ep->ptd_offset < 0);
  350. epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
  351. BUG_ON(!epq);
  352. /* put ep in remove_list for cleanup */
  353. WARN_ON(!list_empty(&ep->remove_list));
  354. list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
  355. /* let SOF interrupt handle the cleanup */
  356. isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
  357. index = ep->ptd_index;
  358. if (index < 0)
  359. /* ISO queues don't have SKIP registers */
  360. return;
  361. DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
  362. index, ep->ptd_offset, epq->skip_map, 1 << index);
  363. /* prevent further processing of PTD (will be effective after next SOF) */
  364. epq->skip_map |= 1 << index;
  365. if (epq == &isp1362_hcd->atl_queue) {
  366. DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
  367. isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
  368. isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
  369. if (~epq->skip_map == 0)
  370. isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
  371. } else if (epq == &isp1362_hcd->intl_queue) {
  372. DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
  373. isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
  374. isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
  375. if (~epq->skip_map == 0)
  376. isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
  377. }
  378. }
  379. /*
  380. Take done or failed requests out of schedule. Give back
  381. processed urbs.
  382. */
  383. static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
  384. struct urb *urb, int status)
  385. __releases(isp1362_hcd->lock)
  386. __acquires(isp1362_hcd->lock)
  387. {
  388. urb->hcpriv = NULL;
  389. ep->error_count = 0;
  390. if (usb_pipecontrol(urb->pipe))
  391. ep->nextpid = USB_PID_SETUP;
  392. URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
  393. ep->num_req, usb_pipedevice(urb->pipe),
  394. usb_pipeendpoint(urb->pipe),
  395. !usb_pipein(urb->pipe) ? "out" : "in",
  396. usb_pipecontrol(urb->pipe) ? "ctrl" :
  397. usb_pipeint(urb->pipe) ? "int" :
  398. usb_pipebulk(urb->pipe) ? "bulk" :
  399. "iso",
  400. urb->actual_length, urb->transfer_buffer_length,
  401. !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
  402. "short_ok" : "", urb->status);
  403. usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
  404. spin_unlock(&isp1362_hcd->lock);
  405. usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
  406. spin_lock(&isp1362_hcd->lock);
  407. /* take idle endpoints out of the schedule right away */
  408. if (!list_empty(&ep->hep->urb_list))
  409. return;
  410. /* async deschedule */
  411. if (!list_empty(&ep->schedule)) {
  412. list_del_init(&ep->schedule);
  413. return;
  414. }
  415. if (ep->interval) {
  416. /* periodic deschedule */
  417. DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
  418. ep, ep->branch, ep->load,
  419. isp1362_hcd->load[ep->branch],
  420. isp1362_hcd->load[ep->branch] - ep->load);
  421. isp1362_hcd->load[ep->branch] -= ep->load;
  422. ep->branch = PERIODIC_SIZE;
  423. }
  424. }
  425. /*
  426. * Analyze transfer results, handle partial transfers and errors
  427. */
  428. static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
  429. {
  430. struct urb *urb = get_urb(ep);
  431. struct usb_device *udev;
  432. struct ptd *ptd;
  433. int short_ok;
  434. u16 len;
  435. int urbstat = -EINPROGRESS;
  436. u8 cc;
  437. DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
  438. udev = urb->dev;
  439. ptd = &ep->ptd;
  440. cc = PTD_GET_CC(ptd);
  441. if (cc == PTD_NOTACCESSED) {
  442. pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
  443. ep->num_req, ptd);
  444. cc = PTD_DEVNOTRESP;
  445. }
  446. short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
  447. len = urb->transfer_buffer_length - urb->actual_length;
  448. /* Data underrun is special. For allowed underrun
  449. we clear the error and continue as normal. For
  450. forbidden underrun we finish the DATA stage
  451. immediately while for control transfer,
  452. we do a STATUS stage.
  453. */
  454. if (cc == PTD_DATAUNDERRUN) {
  455. if (short_ok) {
  456. DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
  457. __func__, ep->num_req, short_ok ? "" : "not_",
  458. PTD_GET_COUNT(ptd), ep->maxpacket, len);
  459. cc = PTD_CC_NOERROR;
  460. urbstat = 0;
  461. } else {
  462. DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
  463. __func__, ep->num_req,
  464. usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
  465. short_ok ? "" : "not_",
  466. PTD_GET_COUNT(ptd), ep->maxpacket, len);
  467. if (usb_pipecontrol(urb->pipe)) {
  468. ep->nextpid = USB_PID_ACK;
  469. /* save the data underrun error code for later and
  470. * procede with the status stage
  471. */
  472. urb->actual_length += PTD_GET_COUNT(ptd);
  473. BUG_ON(urb->actual_length > urb->transfer_buffer_length);
  474. if (urb->status == -EINPROGRESS)
  475. urb->status = cc_to_error[PTD_DATAUNDERRUN];
  476. } else {
  477. usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
  478. PTD_GET_TOGGLE(ptd));
  479. urbstat = cc_to_error[PTD_DATAUNDERRUN];
  480. }
  481. goto out;
  482. }
  483. }
  484. if (cc != PTD_CC_NOERROR) {
  485. if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
  486. urbstat = cc_to_error[cc];
  487. DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
  488. __func__, ep->num_req, ep->nextpid, urbstat, cc,
  489. ep->error_count);
  490. }
  491. goto out;
  492. }
  493. switch (ep->nextpid) {
  494. case USB_PID_OUT:
  495. if (PTD_GET_COUNT(ptd) != ep->length)
  496. pr_err("%s: count=%d len=%d\n", __func__,
  497. PTD_GET_COUNT(ptd), ep->length);
  498. BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
  499. urb->actual_length += ep->length;
  500. BUG_ON(urb->actual_length > urb->transfer_buffer_length);
  501. usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
  502. if (urb->actual_length == urb->transfer_buffer_length) {
  503. DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
  504. ep->num_req, len, ep->maxpacket, urbstat);
  505. if (usb_pipecontrol(urb->pipe)) {
  506. DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
  507. ep->num_req,
  508. usb_pipein(urb->pipe) ? "IN" : "OUT");
  509. ep->nextpid = USB_PID_ACK;
  510. } else {
  511. if (len % ep->maxpacket ||
  512. !(urb->transfer_flags & URB_ZERO_PACKET)) {
  513. urbstat = 0;
  514. DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
  515. __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
  516. urbstat, len, ep->maxpacket, urb->actual_length);
  517. }
  518. }
  519. }
  520. break;
  521. case USB_PID_IN:
  522. len = PTD_GET_COUNT(ptd);
  523. BUG_ON(len > ep->length);
  524. urb->actual_length += len;
  525. BUG_ON(urb->actual_length > urb->transfer_buffer_length);
  526. usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
  527. /* if transfer completed or (allowed) data underrun */
  528. if ((urb->transfer_buffer_length == urb->actual_length) ||
  529. len % ep->maxpacket) {
  530. DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
  531. ep->num_req, len, ep->maxpacket, urbstat);
  532. if (usb_pipecontrol(urb->pipe)) {
  533. DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
  534. ep->num_req,
  535. usb_pipein(urb->pipe) ? "IN" : "OUT");
  536. ep->nextpid = USB_PID_ACK;
  537. } else {
  538. urbstat = 0;
  539. DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
  540. __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
  541. urbstat, len, ep->maxpacket, urb->actual_length);
  542. }
  543. }
  544. break;
  545. case USB_PID_SETUP:
  546. if (urb->transfer_buffer_length == urb->actual_length) {
  547. ep->nextpid = USB_PID_ACK;
  548. } else if (usb_pipeout(urb->pipe)) {
  549. usb_settoggle(udev, 0, 1, 1);
  550. ep->nextpid = USB_PID_OUT;
  551. } else {
  552. usb_settoggle(udev, 0, 0, 1);
  553. ep->nextpid = USB_PID_IN;
  554. }
  555. break;
  556. case USB_PID_ACK:
  557. DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
  558. urbstat);
  559. WARN_ON(urbstat != -EINPROGRESS);
  560. urbstat = 0;
  561. ep->nextpid = 0;
  562. break;
  563. default:
  564. BUG_ON(1);
  565. }
  566. out:
  567. if (urbstat != -EINPROGRESS) {
  568. DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
  569. ep, ep->num_req, urb, urbstat);
  570. finish_request(isp1362_hcd, ep, urb, urbstat);
  571. }
  572. }
  573. static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
  574. {
  575. struct isp1362_ep *ep;
  576. struct isp1362_ep *tmp;
  577. list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
  578. struct isp1362_ep_queue *epq =
  579. get_ptd_queue(isp1362_hcd, ep->ptd_offset);
  580. int index = ep->ptd_index;
  581. BUG_ON(epq == NULL);
  582. if (index >= 0) {
  583. DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
  584. BUG_ON(ep->num_ptds == 0);
  585. release_ptd_buffers(epq, ep);
  586. }
  587. if (!list_empty(&ep->hep->urb_list)) {
  588. struct urb *urb = get_urb(ep);
  589. DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
  590. ep->num_req, ep);
  591. finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
  592. }
  593. WARN_ON(list_empty(&ep->active));
  594. if (!list_empty(&ep->active)) {
  595. list_del_init(&ep->active);
  596. DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
  597. }
  598. list_del_init(&ep->remove_list);
  599. DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
  600. }
  601. DBG(1, "%s: Done\n", __func__);
  602. }
  603. static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
  604. {
  605. if (count > 0) {
  606. if (count < isp1362_hcd->atl_queue.ptd_count)
  607. isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
  608. isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
  609. isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
  610. isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
  611. } else
  612. isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
  613. }
  614. static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
  615. {
  616. isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
  617. isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
  618. isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
  619. }
  620. static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
  621. {
  622. isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
  623. isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
  624. HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
  625. }
  626. static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
  627. struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
  628. {
  629. int index = epq->free_ptd;
  630. prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
  631. index = claim_ptd_buffers(epq, ep, ep->length);
  632. if (index == -ENOMEM) {
  633. DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
  634. ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
  635. return index;
  636. } else if (index == -EOVERFLOW) {
  637. DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
  638. __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
  639. epq->buf_map, epq->skip_map);
  640. return index;
  641. } else
  642. BUG_ON(index < 0);
  643. list_add_tail(&ep->active, &epq->active);
  644. DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
  645. ep, ep->num_req, ep->length, &epq->active);
  646. DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
  647. ep->ptd_offset, ep, ep->num_req);
  648. isp1362_write_ptd(isp1362_hcd, ep, epq);
  649. __clear_bit(ep->ptd_index, &epq->skip_map);
  650. return 0;
  651. }
  652. static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
  653. {
  654. int ptd_count = 0;
  655. struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
  656. struct isp1362_ep *ep;
  657. int defer = 0;
  658. if (atomic_read(&epq->finishing)) {
  659. DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
  660. return;
  661. }
  662. list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
  663. struct urb *urb = get_urb(ep);
  664. int ret;
  665. if (!list_empty(&ep->active)) {
  666. DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
  667. continue;
  668. }
  669. DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
  670. ep, ep->num_req);
  671. ret = submit_req(isp1362_hcd, urb, ep, epq);
  672. if (ret == -ENOMEM) {
  673. defer = 1;
  674. break;
  675. } else if (ret == -EOVERFLOW) {
  676. defer = 1;
  677. continue;
  678. }
  679. #ifdef BUGGY_PXA2XX_UDC_USBTEST
  680. defer = ep->nextpid == USB_PID_SETUP;
  681. #endif
  682. ptd_count++;
  683. }
  684. /* Avoid starving of endpoints */
  685. if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
  686. DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
  687. list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
  688. }
  689. if (ptd_count || defer)
  690. enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
  691. epq->ptd_count += ptd_count;
  692. if (epq->ptd_count > epq->stat_maxptds) {
  693. epq->stat_maxptds = epq->ptd_count;
  694. DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
  695. }
  696. }
  697. static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
  698. {
  699. int ptd_count = 0;
  700. struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
  701. struct isp1362_ep *ep;
  702. if (atomic_read(&epq->finishing)) {
  703. DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
  704. return;
  705. }
  706. list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
  707. struct urb *urb = get_urb(ep);
  708. int ret;
  709. if (!list_empty(&ep->active)) {
  710. DBG(1, "%s: Skipping active %s ep %p\n", __func__,
  711. epq->name, ep);
  712. continue;
  713. }
  714. DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
  715. epq->name, ep, ep->num_req);
  716. ret = submit_req(isp1362_hcd, urb, ep, epq);
  717. if (ret == -ENOMEM)
  718. break;
  719. else if (ret == -EOVERFLOW)
  720. continue;
  721. ptd_count++;
  722. }
  723. if (ptd_count) {
  724. static int last_count;
  725. if (ptd_count != last_count) {
  726. DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
  727. last_count = ptd_count;
  728. }
  729. enable_intl_transfers(isp1362_hcd);
  730. }
  731. epq->ptd_count += ptd_count;
  732. if (epq->ptd_count > epq->stat_maxptds)
  733. epq->stat_maxptds = epq->ptd_count;
  734. }
  735. static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
  736. {
  737. u16 ptd_offset = ep->ptd_offset;
  738. int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
  739. DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
  740. ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
  741. ptd_offset += num_ptds * epq->blk_size;
  742. if (ptd_offset < epq->buf_start + epq->buf_size)
  743. return ptd_offset;
  744. else
  745. return -ENOMEM;
  746. }
  747. static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
  748. {
  749. int ptd_count = 0;
  750. int flip = isp1362_hcd->istl_flip;
  751. struct isp1362_ep_queue *epq;
  752. int ptd_offset;
  753. struct isp1362_ep *ep;
  754. struct isp1362_ep *tmp;
  755. u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
  756. fill2:
  757. epq = &isp1362_hcd->istl_queue[flip];
  758. if (atomic_read(&epq->finishing)) {
  759. DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
  760. return;
  761. }
  762. if (!list_empty(&epq->active))
  763. return;
  764. ptd_offset = epq->buf_start;
  765. list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
  766. struct urb *urb = get_urb(ep);
  767. s16 diff = fno - (u16)urb->start_frame;
  768. DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
  769. if (diff > urb->number_of_packets) {
  770. /* time frame for this URB has elapsed */
  771. finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
  772. continue;
  773. } else if (diff < -1) {
  774. /* URB is not due in this frame or the next one.
  775. * Comparing with '-1' instead of '0' accounts for double
  776. * buffering in the ISP1362 which enables us to queue the PTD
  777. * one frame ahead of time
  778. */
  779. } else if (diff == -1) {
  780. /* submit PTD's that are due in the next frame */
  781. prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
  782. if (ptd_offset + PTD_HEADER_SIZE + ep->length >
  783. epq->buf_start + epq->buf_size) {
  784. pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
  785. __func__, ep->length);
  786. continue;
  787. }
  788. ep->ptd_offset = ptd_offset;
  789. list_add_tail(&ep->active, &epq->active);
  790. ptd_offset = next_ptd(epq, ep);
  791. if (ptd_offset < 0) {
  792. pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
  793. ep->num_req, epq->name);
  794. break;
  795. }
  796. }
  797. }
  798. list_for_each_entry(ep, &epq->active, active) {
  799. if (epq->active.next == &ep->active)
  800. ep->ptd.mps |= PTD_LAST_MSK;
  801. isp1362_write_ptd(isp1362_hcd, ep, epq);
  802. ptd_count++;
  803. }
  804. if (ptd_count)
  805. enable_istl_transfers(isp1362_hcd, flip);
  806. epq->ptd_count += ptd_count;
  807. if (epq->ptd_count > epq->stat_maxptds)
  808. epq->stat_maxptds = epq->ptd_count;
  809. /* check, whether the second ISTL buffer may also be filled */
  810. if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
  811. (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
  812. fno++;
  813. ptd_count = 0;
  814. flip = 1 - flip;
  815. goto fill2;
  816. }
  817. }
  818. static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
  819. struct isp1362_ep_queue *epq)
  820. {
  821. struct isp1362_ep *ep;
  822. struct isp1362_ep *tmp;
  823. if (list_empty(&epq->active)) {
  824. DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
  825. return;
  826. }
  827. DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
  828. atomic_inc(&epq->finishing);
  829. list_for_each_entry_safe(ep, tmp, &epq->active, active) {
  830. int index = ep->ptd_index;
  831. DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
  832. index, ep->ptd_offset);
  833. BUG_ON(index < 0);
  834. if (__test_and_clear_bit(index, &done_map)) {
  835. isp1362_read_ptd(isp1362_hcd, ep, epq);
  836. epq->free_ptd = index;
  837. BUG_ON(ep->num_ptds == 0);
  838. release_ptd_buffers(epq, ep);
  839. DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
  840. ep, ep->num_req);
  841. if (!list_empty(&ep->remove_list)) {
  842. list_del_init(&ep->remove_list);
  843. DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
  844. }
  845. DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
  846. ep, ep->num_req);
  847. postproc_ep(isp1362_hcd, ep);
  848. }
  849. if (!done_map)
  850. break;
  851. }
  852. if (done_map)
  853. pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
  854. epq->skip_map);
  855. atomic_dec(&epq->finishing);
  856. }
  857. static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
  858. {
  859. struct isp1362_ep *ep;
  860. struct isp1362_ep *tmp;
  861. if (list_empty(&epq->active)) {
  862. DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
  863. return;
  864. }
  865. DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
  866. atomic_inc(&epq->finishing);
  867. list_for_each_entry_safe(ep, tmp, &epq->active, active) {
  868. DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
  869. isp1362_read_ptd(isp1362_hcd, ep, epq);
  870. DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
  871. postproc_ep(isp1362_hcd, ep);
  872. }
  873. WARN_ON(epq->blk_size != 0);
  874. atomic_dec(&epq->finishing);
  875. }
  876. static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
  877. {
  878. int handled = 0;
  879. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  880. u16 irqstat;
  881. u16 svc_mask;
  882. spin_lock(&isp1362_hcd->lock);
  883. BUG_ON(isp1362_hcd->irq_active++);
  884. isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
  885. irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
  886. DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
  887. /* only handle interrupts that are currently enabled */
  888. irqstat &= isp1362_hcd->irqenb;
  889. isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
  890. svc_mask = irqstat;
  891. if (irqstat & HCuPINT_SOF) {
  892. isp1362_hcd->irqenb &= ~HCuPINT_SOF;
  893. isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
  894. handled = 1;
  895. svc_mask &= ~HCuPINT_SOF;
  896. DBG(3, "%s: SOF\n", __func__);
  897. isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
  898. if (!list_empty(&isp1362_hcd->remove_list))
  899. finish_unlinks(isp1362_hcd);
  900. if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
  901. if (list_empty(&isp1362_hcd->atl_queue.active)) {
  902. start_atl_transfers(isp1362_hcd);
  903. } else {
  904. isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
  905. isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
  906. isp1362_hcd->atl_queue.skip_map);
  907. isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
  908. }
  909. }
  910. }
  911. if (irqstat & HCuPINT_ISTL0) {
  912. isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
  913. handled = 1;
  914. svc_mask &= ~HCuPINT_ISTL0;
  915. isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
  916. DBG(1, "%s: ISTL0\n", __func__);
  917. WARN_ON((int)!!isp1362_hcd->istl_flip);
  918. WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
  919. HCBUFSTAT_ISTL0_ACTIVE);
  920. WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
  921. HCBUFSTAT_ISTL0_DONE));
  922. isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
  923. }
  924. if (irqstat & HCuPINT_ISTL1) {
  925. isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
  926. handled = 1;
  927. svc_mask &= ~HCuPINT_ISTL1;
  928. isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
  929. DBG(1, "%s: ISTL1\n", __func__);
  930. WARN_ON(!(int)isp1362_hcd->istl_flip);
  931. WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
  932. HCBUFSTAT_ISTL1_ACTIVE);
  933. WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
  934. HCBUFSTAT_ISTL1_DONE));
  935. isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
  936. }
  937. if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
  938. WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
  939. (HCuPINT_ISTL0 | HCuPINT_ISTL1));
  940. finish_iso_transfers(isp1362_hcd,
  941. &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
  942. start_iso_transfers(isp1362_hcd);
  943. isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
  944. }
  945. if (irqstat & HCuPINT_INTL) {
  946. u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
  947. u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
  948. isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
  949. DBG(2, "%s: INTL\n", __func__);
  950. svc_mask &= ~HCuPINT_INTL;
  951. isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
  952. if (~(done_map | skip_map) == 0)
  953. /* All PTDs are finished, disable INTL processing entirely */
  954. isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
  955. handled = 1;
  956. WARN_ON(!done_map);
  957. if (done_map) {
  958. DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
  959. finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
  960. start_intl_transfers(isp1362_hcd);
  961. }
  962. }
  963. if (irqstat & HCuPINT_ATL) {
  964. u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
  965. u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
  966. isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
  967. DBG(2, "%s: ATL\n", __func__);
  968. svc_mask &= ~HCuPINT_ATL;
  969. isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
  970. if (~(done_map | skip_map) == 0)
  971. isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
  972. if (done_map) {
  973. DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
  974. finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
  975. start_atl_transfers(isp1362_hcd);
  976. }
  977. handled = 1;
  978. }
  979. if (irqstat & HCuPINT_OPR) {
  980. u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
  981. isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
  982. svc_mask &= ~HCuPINT_OPR;
  983. DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
  984. intstat &= isp1362_hcd->intenb;
  985. if (intstat & OHCI_INTR_UE) {
  986. pr_err("Unrecoverable error\n");
  987. /* FIXME: do here reset or cleanup or whatever */
  988. }
  989. if (intstat & OHCI_INTR_RHSC) {
  990. isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
  991. isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
  992. isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
  993. }
  994. if (intstat & OHCI_INTR_RD) {
  995. pr_info("%s: RESUME DETECTED\n", __func__);
  996. isp1362_show_reg(isp1362_hcd, HCCONTROL);
  997. usb_hcd_resume_root_hub(hcd);
  998. }
  999. isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
  1000. irqstat &= ~HCuPINT_OPR;
  1001. handled = 1;
  1002. }
  1003. if (irqstat & HCuPINT_SUSP) {
  1004. isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
  1005. handled = 1;
  1006. svc_mask &= ~HCuPINT_SUSP;
  1007. pr_info("%s: SUSPEND IRQ\n", __func__);
  1008. }
  1009. if (irqstat & HCuPINT_CLKRDY) {
  1010. isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
  1011. handled = 1;
  1012. isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
  1013. svc_mask &= ~HCuPINT_CLKRDY;
  1014. pr_info("%s: CLKRDY IRQ\n", __func__);
  1015. }
  1016. if (svc_mask)
  1017. pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
  1018. isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
  1019. isp1362_hcd->irq_active--;
  1020. spin_unlock(&isp1362_hcd->lock);
  1021. return IRQ_RETVAL(handled);
  1022. }
  1023. /*-------------------------------------------------------------------------*/
  1024. #define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
  1025. static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
  1026. {
  1027. int i, branch = -ENOSPC;
  1028. /* search for the least loaded schedule branch of that interval
  1029. * which has enough bandwidth left unreserved.
  1030. */
  1031. for (i = 0; i < interval; i++) {
  1032. if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
  1033. int j;
  1034. for (j = i; j < PERIODIC_SIZE; j += interval) {
  1035. if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
  1036. pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
  1037. load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
  1038. break;
  1039. }
  1040. }
  1041. if (j < PERIODIC_SIZE)
  1042. continue;
  1043. branch = i;
  1044. }
  1045. }
  1046. return branch;
  1047. }
  1048. /* NB! ALL the code above this point runs with isp1362_hcd->lock
  1049. held, irqs off
  1050. */
  1051. /*-------------------------------------------------------------------------*/
  1052. static int isp1362_urb_enqueue(struct usb_hcd *hcd,
  1053. struct urb *urb,
  1054. gfp_t mem_flags)
  1055. {
  1056. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1057. struct usb_device *udev = urb->dev;
  1058. unsigned int pipe = urb->pipe;
  1059. int is_out = !usb_pipein(pipe);
  1060. int type = usb_pipetype(pipe);
  1061. int epnum = usb_pipeendpoint(pipe);
  1062. struct usb_host_endpoint *hep = urb->ep;
  1063. struct isp1362_ep *ep = NULL;
  1064. unsigned long flags;
  1065. int retval = 0;
  1066. DBG(3, "%s: urb %p\n", __func__, urb);
  1067. if (type == PIPE_ISOCHRONOUS) {
  1068. pr_err("Isochronous transfers not supported\n");
  1069. return -ENOSPC;
  1070. }
  1071. URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
  1072. usb_pipedevice(pipe), epnum,
  1073. is_out ? "out" : "in",
  1074. usb_pipecontrol(pipe) ? "ctrl" :
  1075. usb_pipeint(pipe) ? "int" :
  1076. usb_pipebulk(pipe) ? "bulk" :
  1077. "iso",
  1078. urb->transfer_buffer_length,
  1079. (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
  1080. !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
  1081. "short_ok" : "");
  1082. /* avoid all allocations within spinlocks: request or endpoint */
  1083. if (!hep->hcpriv) {
  1084. ep = kzalloc(sizeof *ep, mem_flags);
  1085. if (!ep)
  1086. return -ENOMEM;
  1087. }
  1088. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1089. /* don't submit to a dead or disabled port */
  1090. if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
  1091. USB_PORT_STAT_ENABLE) ||
  1092. !HC_IS_RUNNING(hcd->state)) {
  1093. kfree(ep);
  1094. retval = -ENODEV;
  1095. goto fail_not_linked;
  1096. }
  1097. retval = usb_hcd_link_urb_to_ep(hcd, urb);
  1098. if (retval) {
  1099. kfree(ep);
  1100. goto fail_not_linked;
  1101. }
  1102. if (hep->hcpriv) {
  1103. ep = hep->hcpriv;
  1104. } else {
  1105. INIT_LIST_HEAD(&ep->schedule);
  1106. INIT_LIST_HEAD(&ep->active);
  1107. INIT_LIST_HEAD(&ep->remove_list);
  1108. ep->udev = usb_get_dev(udev);
  1109. ep->hep = hep;
  1110. ep->epnum = epnum;
  1111. ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
  1112. ep->ptd_offset = -EINVAL;
  1113. ep->ptd_index = -EINVAL;
  1114. usb_settoggle(udev, epnum, is_out, 0);
  1115. if (type == PIPE_CONTROL)
  1116. ep->nextpid = USB_PID_SETUP;
  1117. else if (is_out)
  1118. ep->nextpid = USB_PID_OUT;
  1119. else
  1120. ep->nextpid = USB_PID_IN;
  1121. switch (type) {
  1122. case PIPE_ISOCHRONOUS:
  1123. case PIPE_INTERRUPT:
  1124. if (urb->interval > PERIODIC_SIZE)
  1125. urb->interval = PERIODIC_SIZE;
  1126. ep->interval = urb->interval;
  1127. ep->branch = PERIODIC_SIZE;
  1128. ep->load = usb_calc_bus_time(udev->speed, !is_out,
  1129. (type == PIPE_ISOCHRONOUS),
  1130. usb_maxpacket(udev, pipe, is_out)) / 1000;
  1131. break;
  1132. }
  1133. hep->hcpriv = ep;
  1134. }
  1135. ep->num_req = isp1362_hcd->req_serial++;
  1136. /* maybe put endpoint into schedule */
  1137. switch (type) {
  1138. case PIPE_CONTROL:
  1139. case PIPE_BULK:
  1140. if (list_empty(&ep->schedule)) {
  1141. DBG(1, "%s: Adding ep %p req %d to async schedule\n",
  1142. __func__, ep, ep->num_req);
  1143. list_add_tail(&ep->schedule, &isp1362_hcd->async);
  1144. }
  1145. break;
  1146. case PIPE_ISOCHRONOUS:
  1147. case PIPE_INTERRUPT:
  1148. urb->interval = ep->interval;
  1149. /* urb submitted for already existing EP */
  1150. if (ep->branch < PERIODIC_SIZE)
  1151. break;
  1152. retval = balance(isp1362_hcd, ep->interval, ep->load);
  1153. if (retval < 0) {
  1154. pr_err("%s: balance returned %d\n", __func__, retval);
  1155. goto fail;
  1156. }
  1157. ep->branch = retval;
  1158. retval = 0;
  1159. isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
  1160. DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
  1161. __func__, isp1362_hcd->fmindex, ep->branch,
  1162. ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
  1163. ~(PERIODIC_SIZE - 1)) + ep->branch,
  1164. (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
  1165. if (list_empty(&ep->schedule)) {
  1166. if (type == PIPE_ISOCHRONOUS) {
  1167. u16 frame = isp1362_hcd->fmindex;
  1168. frame += max_t(u16, 8, ep->interval);
  1169. frame &= ~(ep->interval - 1);
  1170. frame |= ep->branch;
  1171. if (frame_before(frame, isp1362_hcd->fmindex))
  1172. frame += ep->interval;
  1173. urb->start_frame = frame;
  1174. DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
  1175. list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
  1176. } else {
  1177. DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
  1178. list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
  1179. }
  1180. } else
  1181. DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
  1182. DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
  1183. ep->load / ep->interval, isp1362_hcd->load[ep->branch],
  1184. isp1362_hcd->load[ep->branch] + ep->load);
  1185. isp1362_hcd->load[ep->branch] += ep->load;
  1186. }
  1187. urb->hcpriv = hep;
  1188. ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
  1189. switch (type) {
  1190. case PIPE_CONTROL:
  1191. case PIPE_BULK:
  1192. start_atl_transfers(isp1362_hcd);
  1193. break;
  1194. case PIPE_INTERRUPT:
  1195. start_intl_transfers(isp1362_hcd);
  1196. break;
  1197. case PIPE_ISOCHRONOUS:
  1198. start_iso_transfers(isp1362_hcd);
  1199. break;
  1200. default:
  1201. BUG();
  1202. }
  1203. fail:
  1204. if (retval)
  1205. usb_hcd_unlink_urb_from_ep(hcd, urb);
  1206. fail_not_linked:
  1207. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1208. if (retval)
  1209. DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
  1210. return retval;
  1211. }
  1212. static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  1213. {
  1214. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1215. struct usb_host_endpoint *hep;
  1216. unsigned long flags;
  1217. struct isp1362_ep *ep;
  1218. int retval = 0;
  1219. DBG(3, "%s: urb %p\n", __func__, urb);
  1220. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1221. retval = usb_hcd_check_unlink_urb(hcd, urb, status);
  1222. if (retval)
  1223. goto done;
  1224. hep = urb->hcpriv;
  1225. if (!hep) {
  1226. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1227. return -EIDRM;
  1228. }
  1229. ep = hep->hcpriv;
  1230. if (ep) {
  1231. /* In front of queue? */
  1232. if (ep->hep->urb_list.next == &urb->urb_list) {
  1233. if (!list_empty(&ep->active)) {
  1234. DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
  1235. urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
  1236. /* disable processing and queue PTD for removal */
  1237. remove_ptd(isp1362_hcd, ep);
  1238. urb = NULL;
  1239. }
  1240. }
  1241. if (urb) {
  1242. DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
  1243. ep->num_req);
  1244. finish_request(isp1362_hcd, ep, urb, status);
  1245. } else
  1246. DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
  1247. } else {
  1248. pr_warning("%s: No EP in URB %p\n", __func__, urb);
  1249. retval = -EINVAL;
  1250. }
  1251. done:
  1252. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1253. DBG(3, "%s: exit\n", __func__);
  1254. return retval;
  1255. }
  1256. static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
  1257. {
  1258. struct isp1362_ep *ep = hep->hcpriv;
  1259. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1260. unsigned long flags;
  1261. DBG(1, "%s: ep %p\n", __func__, ep);
  1262. if (!ep)
  1263. return;
  1264. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1265. if (!list_empty(&hep->urb_list)) {
  1266. if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
  1267. DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
  1268. ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
  1269. remove_ptd(isp1362_hcd, ep);
  1270. pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
  1271. }
  1272. }
  1273. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1274. /* Wait for interrupt to clear out active list */
  1275. while (!list_empty(&ep->active))
  1276. msleep(1);
  1277. DBG(1, "%s: Freeing EP %p\n", __func__, ep);
  1278. usb_put_dev(ep->udev);
  1279. kfree(ep);
  1280. hep->hcpriv = NULL;
  1281. }
  1282. static int isp1362_get_frame(struct usb_hcd *hcd)
  1283. {
  1284. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1285. u32 fmnum;
  1286. unsigned long flags;
  1287. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1288. fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
  1289. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1290. return (int)fmnum;
  1291. }
  1292. /*-------------------------------------------------------------------------*/
  1293. /* Adapted from ohci-hub.c */
  1294. static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
  1295. {
  1296. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1297. int ports, i, changed = 0;
  1298. unsigned long flags;
  1299. if (!HC_IS_RUNNING(hcd->state))
  1300. return -ESHUTDOWN;
  1301. /* Report no status change now, if we are scheduled to be
  1302. called later */
  1303. if (timer_pending(&hcd->rh_timer))
  1304. return 0;
  1305. ports = isp1362_hcd->rhdesca & RH_A_NDP;
  1306. BUG_ON(ports > 2);
  1307. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1308. /* init status */
  1309. if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
  1310. buf[0] = changed = 1;
  1311. else
  1312. buf[0] = 0;
  1313. for (i = 0; i < ports; i++) {
  1314. u32 status = isp1362_hcd->rhport[i];
  1315. if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
  1316. RH_PS_OCIC | RH_PS_PRSC)) {
  1317. changed = 1;
  1318. buf[0] |= 1 << (i + 1);
  1319. continue;
  1320. }
  1321. if (!(status & RH_PS_CCS))
  1322. continue;
  1323. }
  1324. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1325. return changed;
  1326. }
  1327. static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
  1328. struct usb_hub_descriptor *desc)
  1329. {
  1330. u32 reg = isp1362_hcd->rhdesca;
  1331. DBG(3, "%s: enter\n", __func__);
  1332. desc->bDescriptorType = 0x29;
  1333. desc->bDescLength = 9;
  1334. desc->bHubContrCurrent = 0;
  1335. desc->bNbrPorts = reg & 0x3;
  1336. /* Power switching, device type, overcurrent. */
  1337. desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
  1338. DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
  1339. desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
  1340. /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
  1341. desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
  1342. desc->bitmap[1] = ~0;
  1343. DBG(3, "%s: exit\n", __func__);
  1344. }
  1345. /* Adapted from ohci-hub.c */
  1346. static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
  1347. u16 wIndex, char *buf, u16 wLength)
  1348. {
  1349. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1350. int retval = 0;
  1351. unsigned long flags;
  1352. unsigned long t1;
  1353. int ports = isp1362_hcd->rhdesca & RH_A_NDP;
  1354. u32 tmp = 0;
  1355. switch (typeReq) {
  1356. case ClearHubFeature:
  1357. DBG(0, "ClearHubFeature: ");
  1358. switch (wValue) {
  1359. case C_HUB_OVER_CURRENT:
  1360. _DBG(0, "C_HUB_OVER_CURRENT\n");
  1361. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1362. isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
  1363. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1364. case C_HUB_LOCAL_POWER:
  1365. _DBG(0, "C_HUB_LOCAL_POWER\n");
  1366. break;
  1367. default:
  1368. goto error;
  1369. }
  1370. break;
  1371. case SetHubFeature:
  1372. DBG(0, "SetHubFeature: ");
  1373. switch (wValue) {
  1374. case C_HUB_OVER_CURRENT:
  1375. case C_HUB_LOCAL_POWER:
  1376. _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
  1377. break;
  1378. default:
  1379. goto error;
  1380. }
  1381. break;
  1382. case GetHubDescriptor:
  1383. DBG(0, "GetHubDescriptor\n");
  1384. isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
  1385. break;
  1386. case GetHubStatus:
  1387. DBG(0, "GetHubStatus\n");
  1388. put_unaligned(cpu_to_le32(0), (__le32 *) buf);
  1389. break;
  1390. case GetPortStatus:
  1391. #ifndef VERBOSE
  1392. DBG(0, "GetPortStatus\n");
  1393. #endif
  1394. if (!wIndex || wIndex > ports)
  1395. goto error;
  1396. tmp = isp1362_hcd->rhport[--wIndex];
  1397. put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
  1398. break;
  1399. case ClearPortFeature:
  1400. DBG(0, "ClearPortFeature: ");
  1401. if (!wIndex || wIndex > ports)
  1402. goto error;
  1403. wIndex--;
  1404. switch (wValue) {
  1405. case USB_PORT_FEAT_ENABLE:
  1406. _DBG(0, "USB_PORT_FEAT_ENABLE\n");
  1407. tmp = RH_PS_CCS;
  1408. break;
  1409. case USB_PORT_FEAT_C_ENABLE:
  1410. _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
  1411. tmp = RH_PS_PESC;
  1412. break;
  1413. case USB_PORT_FEAT_SUSPEND:
  1414. _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
  1415. tmp = RH_PS_POCI;
  1416. break;
  1417. case USB_PORT_FEAT_C_SUSPEND:
  1418. _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
  1419. tmp = RH_PS_PSSC;
  1420. break;
  1421. case USB_PORT_FEAT_POWER:
  1422. _DBG(0, "USB_PORT_FEAT_POWER\n");
  1423. tmp = RH_PS_LSDA;
  1424. break;
  1425. case USB_PORT_FEAT_C_CONNECTION:
  1426. _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
  1427. tmp = RH_PS_CSC;
  1428. break;
  1429. case USB_PORT_FEAT_C_OVER_CURRENT:
  1430. _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
  1431. tmp = RH_PS_OCIC;
  1432. break;
  1433. case USB_PORT_FEAT_C_RESET:
  1434. _DBG(0, "USB_PORT_FEAT_C_RESET\n");
  1435. tmp = RH_PS_PRSC;
  1436. break;
  1437. default:
  1438. goto error;
  1439. }
  1440. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1441. isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
  1442. isp1362_hcd->rhport[wIndex] =
  1443. isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
  1444. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1445. break;
  1446. case SetPortFeature:
  1447. DBG(0, "SetPortFeature: ");
  1448. if (!wIndex || wIndex > ports)
  1449. goto error;
  1450. wIndex--;
  1451. switch (wValue) {
  1452. case USB_PORT_FEAT_SUSPEND:
  1453. _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
  1454. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1455. isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
  1456. isp1362_hcd->rhport[wIndex] =
  1457. isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
  1458. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1459. break;
  1460. case USB_PORT_FEAT_POWER:
  1461. _DBG(0, "USB_PORT_FEAT_POWER\n");
  1462. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1463. isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
  1464. isp1362_hcd->rhport[wIndex] =
  1465. isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
  1466. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1467. break;
  1468. case USB_PORT_FEAT_RESET:
  1469. _DBG(0, "USB_PORT_FEAT_RESET\n");
  1470. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1471. t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
  1472. while (time_before(jiffies, t1)) {
  1473. /* spin until any current reset finishes */
  1474. for (;;) {
  1475. tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
  1476. if (!(tmp & RH_PS_PRS))
  1477. break;
  1478. udelay(500);
  1479. }
  1480. if (!(tmp & RH_PS_CCS))
  1481. break;
  1482. /* Reset lasts 10ms (claims datasheet) */
  1483. isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
  1484. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1485. msleep(10);
  1486. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1487. }
  1488. isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
  1489. HCRHPORT1 + wIndex);
  1490. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1491. break;
  1492. default:
  1493. goto error;
  1494. }
  1495. break;
  1496. default:
  1497. error:
  1498. /* "protocol stall" on error */
  1499. _DBG(0, "PROTOCOL STALL\n");
  1500. retval = -EPIPE;
  1501. }
  1502. return retval;
  1503. }
  1504. #ifdef CONFIG_PM
  1505. static int isp1362_bus_suspend(struct usb_hcd *hcd)
  1506. {
  1507. int status = 0;
  1508. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1509. unsigned long flags;
  1510. if (time_before(jiffies, isp1362_hcd->next_statechange))
  1511. msleep(5);
  1512. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1513. isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
  1514. switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
  1515. case OHCI_USB_RESUME:
  1516. DBG(0, "%s: resume/suspend?\n", __func__);
  1517. isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
  1518. isp1362_hcd->hc_control |= OHCI_USB_RESET;
  1519. isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
  1520. /* FALL THROUGH */
  1521. case OHCI_USB_RESET:
  1522. status = -EBUSY;
  1523. pr_warning("%s: needs reinit!\n", __func__);
  1524. goto done;
  1525. case OHCI_USB_SUSPEND:
  1526. pr_warning("%s: already suspended?\n", __func__);
  1527. goto done;
  1528. }
  1529. DBG(0, "%s: suspend root hub\n", __func__);
  1530. /* First stop any processing */
  1531. hcd->state = HC_STATE_QUIESCING;
  1532. if (!list_empty(&isp1362_hcd->atl_queue.active) ||
  1533. !list_empty(&isp1362_hcd->intl_queue.active) ||
  1534. !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
  1535. !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
  1536. int limit;
  1537. isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
  1538. isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
  1539. isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
  1540. isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
  1541. isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
  1542. DBG(0, "%s: stopping schedules ...\n", __func__);
  1543. limit = 2000;
  1544. while (limit > 0) {
  1545. udelay(250);
  1546. limit -= 250;
  1547. if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
  1548. break;
  1549. }
  1550. mdelay(7);
  1551. if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
  1552. u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
  1553. finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
  1554. }
  1555. if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
  1556. u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
  1557. finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
  1558. }
  1559. if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
  1560. finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
  1561. if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
  1562. finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
  1563. }
  1564. DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
  1565. isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
  1566. isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
  1567. isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
  1568. /* Suspend hub */
  1569. isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
  1570. isp1362_show_reg(isp1362_hcd, HCCONTROL);
  1571. isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
  1572. isp1362_show_reg(isp1362_hcd, HCCONTROL);
  1573. #if 1
  1574. isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
  1575. if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
  1576. pr_err("%s: controller won't suspend %08x\n", __func__,
  1577. isp1362_hcd->hc_control);
  1578. status = -EBUSY;
  1579. } else
  1580. #endif
  1581. {
  1582. /* no resumes until devices finish suspending */
  1583. isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
  1584. }
  1585. done:
  1586. if (status == 0) {
  1587. hcd->state = HC_STATE_SUSPENDED;
  1588. DBG(0, "%s: HCD suspended: %08x\n", __func__,
  1589. isp1362_read_reg32(isp1362_hcd, HCCONTROL));
  1590. }
  1591. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1592. return status;
  1593. }
  1594. static int isp1362_bus_resume(struct usb_hcd *hcd)
  1595. {
  1596. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1597. u32 port;
  1598. unsigned long flags;
  1599. int status = -EINPROGRESS;
  1600. if (time_before(jiffies, isp1362_hcd->next_statechange))
  1601. msleep(5);
  1602. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1603. isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
  1604. pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
  1605. if (hcd->state == HC_STATE_RESUMING) {
  1606. pr_warning("%s: duplicate resume\n", __func__);
  1607. status = 0;
  1608. } else
  1609. switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
  1610. case OHCI_USB_SUSPEND:
  1611. DBG(0, "%s: resume root hub\n", __func__);
  1612. isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
  1613. isp1362_hcd->hc_control |= OHCI_USB_RESUME;
  1614. isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
  1615. break;
  1616. case OHCI_USB_RESUME:
  1617. /* HCFS changes sometime after INTR_RD */
  1618. DBG(0, "%s: remote wakeup\n", __func__);
  1619. break;
  1620. case OHCI_USB_OPER:
  1621. DBG(0, "%s: odd resume\n", __func__);
  1622. status = 0;
  1623. hcd->self.root_hub->dev.power.power_state = PMSG_ON;
  1624. break;
  1625. default: /* RESET, we lost power */
  1626. DBG(0, "%s: root hub hardware reset\n", __func__);
  1627. status = -EBUSY;
  1628. }
  1629. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1630. if (status == -EBUSY) {
  1631. DBG(0, "%s: Restarting HC\n", __func__);
  1632. isp1362_hc_stop(hcd);
  1633. return isp1362_hc_start(hcd);
  1634. }
  1635. if (status != -EINPROGRESS)
  1636. return status;
  1637. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1638. port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
  1639. while (port--) {
  1640. u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
  1641. /* force global, not selective, resume */
  1642. if (!(stat & RH_PS_PSS)) {
  1643. DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
  1644. continue;
  1645. }
  1646. DBG(0, "%s: Resuming RH port %d\n", __func__, port);
  1647. isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
  1648. }
  1649. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1650. /* Some controllers (lucent) need extra-long delays */
  1651. hcd->state = HC_STATE_RESUMING;
  1652. mdelay(20 /* usb 11.5.1.10 */ + 15);
  1653. isp1362_hcd->hc_control = OHCI_USB_OPER;
  1654. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1655. isp1362_show_reg(isp1362_hcd, HCCONTROL);
  1656. isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
  1657. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1658. /* TRSMRCY */
  1659. msleep(10);
  1660. /* keep it alive for ~5x suspend + resume costs */
  1661. isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
  1662. hcd->self.root_hub->dev.power.power_state = PMSG_ON;
  1663. hcd->state = HC_STATE_RUNNING;
  1664. return 0;
  1665. }
  1666. #else
  1667. #define isp1362_bus_suspend NULL
  1668. #define isp1362_bus_resume NULL
  1669. #endif
  1670. /*-------------------------------------------------------------------------*/
  1671. #ifdef STUB_DEBUG_FILE
  1672. static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
  1673. {
  1674. }
  1675. static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
  1676. {
  1677. }
  1678. #else
  1679. #include <linux/proc_fs.h>
  1680. #include <linux/seq_file.h>
  1681. static void dump_irq(struct seq_file *s, char *label, u16 mask)
  1682. {
  1683. seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
  1684. mask & HCuPINT_CLKRDY ? " clkrdy" : "",
  1685. mask & HCuPINT_SUSP ? " susp" : "",
  1686. mask & HCuPINT_OPR ? " opr" : "",
  1687. mask & HCuPINT_EOT ? " eot" : "",
  1688. mask & HCuPINT_ATL ? " atl" : "",
  1689. mask & HCuPINT_SOF ? " sof" : "");
  1690. }
  1691. static void dump_int(struct seq_file *s, char *label, u32 mask)
  1692. {
  1693. seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
  1694. mask & OHCI_INTR_MIE ? " MIE" : "",
  1695. mask & OHCI_INTR_RHSC ? " rhsc" : "",
  1696. mask & OHCI_INTR_FNO ? " fno" : "",
  1697. mask & OHCI_INTR_UE ? " ue" : "",
  1698. mask & OHCI_INTR_RD ? " rd" : "",
  1699. mask & OHCI_INTR_SF ? " sof" : "",
  1700. mask & OHCI_INTR_SO ? " so" : "");
  1701. }
  1702. static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
  1703. {
  1704. seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
  1705. mask & OHCI_CTRL_RWC ? " rwc" : "",
  1706. mask & OHCI_CTRL_RWE ? " rwe" : "",
  1707. ({
  1708. char *hcfs;
  1709. switch (mask & OHCI_CTRL_HCFS) {
  1710. case OHCI_USB_OPER:
  1711. hcfs = " oper";
  1712. break;
  1713. case OHCI_USB_RESET:
  1714. hcfs = " reset";
  1715. break;
  1716. case OHCI_USB_RESUME:
  1717. hcfs = " resume";
  1718. break;
  1719. case OHCI_USB_SUSPEND:
  1720. hcfs = " suspend";
  1721. break;
  1722. default:
  1723. hcfs = " ?";
  1724. }
  1725. hcfs;
  1726. }));
  1727. }
  1728. static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
  1729. {
  1730. seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
  1731. isp1362_read_reg32(isp1362_hcd, HCREVISION));
  1732. seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
  1733. isp1362_read_reg32(isp1362_hcd, HCCONTROL));
  1734. seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
  1735. isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
  1736. seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
  1737. isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
  1738. seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
  1739. isp1362_read_reg32(isp1362_hcd, HCINTENB));
  1740. seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
  1741. isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
  1742. seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
  1743. isp1362_read_reg32(isp1362_hcd, HCFMREM));
  1744. seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
  1745. isp1362_read_reg32(isp1362_hcd, HCFMNUM));
  1746. seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
  1747. isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
  1748. seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
  1749. isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
  1750. seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
  1751. isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
  1752. seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
  1753. isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
  1754. seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
  1755. isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
  1756. seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
  1757. isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
  1758. seq_printf(s, "\n");
  1759. seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
  1760. isp1362_read_reg16(isp1362_hcd, HCHWCFG));
  1761. seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
  1762. isp1362_read_reg16(isp1362_hcd, HCDMACFG));
  1763. seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
  1764. isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
  1765. seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
  1766. isp1362_read_reg16(isp1362_hcd, HCuPINT));
  1767. seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
  1768. isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
  1769. seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
  1770. isp1362_read_reg16(isp1362_hcd, HCCHIPID));
  1771. seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
  1772. isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
  1773. seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
  1774. isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
  1775. seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
  1776. isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
  1777. #if 0
  1778. seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
  1779. isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
  1780. #endif
  1781. seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
  1782. isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
  1783. seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
  1784. isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
  1785. seq_printf(s, "\n");
  1786. seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
  1787. isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
  1788. seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
  1789. isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
  1790. seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
  1791. isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
  1792. seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
  1793. isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
  1794. seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
  1795. isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
  1796. seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
  1797. isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
  1798. seq_printf(s, "\n");
  1799. seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
  1800. isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
  1801. seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
  1802. isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
  1803. #if 0
  1804. seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
  1805. isp1362_read_reg32(isp1362_hcd, HCATLDONE));
  1806. #endif
  1807. seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
  1808. isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
  1809. seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
  1810. isp1362_read_reg32(isp1362_hcd, HCATLLAST));
  1811. seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
  1812. isp1362_read_reg16(isp1362_hcd, HCATLCURR));
  1813. seq_printf(s, "\n");
  1814. seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
  1815. isp1362_read_reg16(isp1362_hcd, HCATLDTC));
  1816. seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
  1817. isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
  1818. }
  1819. static int proc_isp1362_show(struct seq_file *s, void *unused)
  1820. {
  1821. struct isp1362_hcd *isp1362_hcd = s->private;
  1822. struct isp1362_ep *ep;
  1823. int i;
  1824. seq_printf(s, "%s\n%s version %s\n",
  1825. isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
  1826. /* collect statistics to help estimate potential win for
  1827. * DMA engines that care about alignment (PXA)
  1828. */
  1829. seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
  1830. isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
  1831. isp1362_hcd->stat2, isp1362_hcd->stat1);
  1832. seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
  1833. seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
  1834. seq_printf(s, "max # ptds in ISTL fifo: %d\n",
  1835. max(isp1362_hcd->istl_queue[0] .stat_maxptds,
  1836. isp1362_hcd->istl_queue[1] .stat_maxptds));
  1837. /* FIXME: don't show the following in suspended state */
  1838. spin_lock_irq(&isp1362_hcd->lock);
  1839. dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
  1840. dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
  1841. dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
  1842. dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
  1843. dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
  1844. for (i = 0; i < NUM_ISP1362_IRQS; i++)
  1845. if (isp1362_hcd->irq_stat[i])
  1846. seq_printf(s, "%-15s: %d\n",
  1847. ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
  1848. dump_regs(s, isp1362_hcd);
  1849. list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
  1850. struct urb *urb;
  1851. seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
  1852. ({
  1853. char *s;
  1854. switch (ep->nextpid) {
  1855. case USB_PID_IN:
  1856. s = "in";
  1857. break;
  1858. case USB_PID_OUT:
  1859. s = "out";
  1860. break;
  1861. case USB_PID_SETUP:
  1862. s = "setup";
  1863. break;
  1864. case USB_PID_ACK:
  1865. s = "status";
  1866. break;
  1867. default:
  1868. s = "?";
  1869. break;
  1870. };
  1871. s;}), ep->maxpacket) ;
  1872. list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
  1873. seq_printf(s, " urb%p, %d/%d\n", urb,
  1874. urb->actual_length,
  1875. urb->transfer_buffer_length);
  1876. }
  1877. }
  1878. if (!list_empty(&isp1362_hcd->async))
  1879. seq_printf(s, "\n");
  1880. dump_ptd_queue(&isp1362_hcd->atl_queue);
  1881. seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
  1882. list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
  1883. seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
  1884. isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
  1885. seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
  1886. ep->interval, ep,
  1887. (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
  1888. ep->udev->devnum, ep->epnum,
  1889. (ep->epnum == 0) ? "" :
  1890. ((ep->nextpid == USB_PID_IN) ?
  1891. "in" : "out"), ep->maxpacket);
  1892. }
  1893. dump_ptd_queue(&isp1362_hcd->intl_queue);
  1894. seq_printf(s, "ISO:\n");
  1895. list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
  1896. seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
  1897. ep->interval, ep,
  1898. (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
  1899. ep->udev->devnum, ep->epnum,
  1900. (ep->epnum == 0) ? "" :
  1901. ((ep->nextpid == USB_PID_IN) ?
  1902. "in" : "out"), ep->maxpacket);
  1903. }
  1904. spin_unlock_irq(&isp1362_hcd->lock);
  1905. seq_printf(s, "\n");
  1906. return 0;
  1907. }
  1908. static int proc_isp1362_open(struct inode *inode, struct file *file)
  1909. {
  1910. return single_open(file, proc_isp1362_show, PDE(inode)->data);
  1911. }
  1912. static const struct file_operations proc_ops = {
  1913. .open = proc_isp1362_open,
  1914. .read = seq_read,
  1915. .llseek = seq_lseek,
  1916. .release = single_release,
  1917. };
  1918. /* expect just one isp1362_hcd per system */
  1919. static const char proc_filename[] = "driver/isp1362";
  1920. static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
  1921. {
  1922. struct proc_dir_entry *pde;
  1923. pde = create_proc_entry(proc_filename, 0, NULL);
  1924. if (pde == NULL) {
  1925. pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
  1926. return;
  1927. }
  1928. pde->proc_fops = &proc_ops;
  1929. pde->data = isp1362_hcd;
  1930. isp1362_hcd->pde = pde;
  1931. }
  1932. static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
  1933. {
  1934. if (isp1362_hcd->pde)
  1935. remove_proc_entry(proc_filename, NULL);
  1936. }
  1937. #endif
  1938. /*-------------------------------------------------------------------------*/
  1939. static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
  1940. {
  1941. int tmp = 20;
  1942. isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
  1943. isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
  1944. while (--tmp) {
  1945. mdelay(1);
  1946. if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
  1947. break;
  1948. }
  1949. if (!tmp)
  1950. pr_err("Software reset timeout\n");
  1951. }
  1952. static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
  1953. {
  1954. unsigned long flags;
  1955. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1956. __isp1362_sw_reset(isp1362_hcd);
  1957. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1958. }
  1959. static int isp1362_mem_config(struct usb_hcd *hcd)
  1960. {
  1961. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1962. unsigned long flags;
  1963. u32 total;
  1964. u16 istl_size = ISP1362_ISTL_BUFSIZE;
  1965. u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
  1966. u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
  1967. u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
  1968. u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
  1969. u16 atl_size;
  1970. int i;
  1971. WARN_ON(istl_size & 3);
  1972. WARN_ON(atl_blksize & 3);
  1973. WARN_ON(intl_blksize & 3);
  1974. WARN_ON(atl_blksize < PTD_HEADER_SIZE);
  1975. WARN_ON(intl_blksize < PTD_HEADER_SIZE);
  1976. BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
  1977. if (atl_buffers > 32)
  1978. atl_buffers = 32;
  1979. atl_size = atl_buffers * atl_blksize;
  1980. total = atl_size + intl_size + istl_size;
  1981. dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
  1982. dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
  1983. istl_size / 2, istl_size, 0, istl_size / 2);
  1984. dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
  1985. ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
  1986. intl_size, istl_size);
  1987. dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
  1988. atl_buffers, atl_blksize - PTD_HEADER_SIZE,
  1989. atl_size, istl_size + intl_size);
  1990. dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
  1991. ISP1362_BUF_SIZE - total);
  1992. if (total > ISP1362_BUF_SIZE) {
  1993. dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
  1994. __func__, total, ISP1362_BUF_SIZE);
  1995. return -ENOMEM;
  1996. }
  1997. total = istl_size + intl_size + atl_size;
  1998. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1999. for (i = 0; i < 2; i++) {
  2000. isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
  2001. isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
  2002. isp1362_hcd->istl_queue[i].blk_size = 4;
  2003. INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
  2004. snprintf(isp1362_hcd->istl_queue[i].name,
  2005. sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
  2006. DBG(3, "%s: %5s buf $%04x %d\n", __func__,
  2007. isp1362_hcd->istl_queue[i].name,
  2008. isp1362_hcd->istl_queue[i].buf_start,
  2009. isp1362_hcd->istl_queue[i].buf_size);
  2010. }
  2011. isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
  2012. isp1362_hcd->intl_queue.buf_start = istl_size;
  2013. isp1362_hcd->intl_queue.buf_size = intl_size;
  2014. isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
  2015. isp1362_hcd->intl_queue.blk_size = intl_blksize;
  2016. isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
  2017. isp1362_hcd->intl_queue.skip_map = ~0;
  2018. INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
  2019. isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
  2020. isp1362_hcd->intl_queue.buf_size);
  2021. isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
  2022. isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
  2023. isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
  2024. isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
  2025. 1 << (ISP1362_INTL_BUFFERS - 1));
  2026. isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
  2027. isp1362_hcd->atl_queue.buf_size = atl_size;
  2028. isp1362_hcd->atl_queue.buf_count = atl_buffers;
  2029. isp1362_hcd->atl_queue.blk_size = atl_blksize;
  2030. isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
  2031. isp1362_hcd->atl_queue.skip_map = ~0;
  2032. INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
  2033. isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
  2034. isp1362_hcd->atl_queue.buf_size);
  2035. isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
  2036. isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
  2037. isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
  2038. isp1362_write_reg32(isp1362_hcd, HCATLLAST,
  2039. 1 << (atl_buffers - 1));
  2040. snprintf(isp1362_hcd->atl_queue.name,
  2041. sizeof(isp1362_hcd->atl_queue.name), "ATL");
  2042. snprintf(isp1362_hcd->intl_queue.name,
  2043. sizeof(isp1362_hcd->intl_queue.name), "INTL");
  2044. DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
  2045. isp1362_hcd->intl_queue.name,
  2046. isp1362_hcd->intl_queue.buf_start,
  2047. ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
  2048. isp1362_hcd->intl_queue.buf_size);
  2049. DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
  2050. isp1362_hcd->atl_queue.name,
  2051. isp1362_hcd->atl_queue.buf_start,
  2052. atl_buffers, isp1362_hcd->atl_queue.blk_size,
  2053. isp1362_hcd->atl_queue.buf_size);
  2054. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2055. return 0;
  2056. }
  2057. static int isp1362_hc_reset(struct usb_hcd *hcd)
  2058. {
  2059. int ret = 0;
  2060. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  2061. unsigned long t;
  2062. unsigned long timeout = 100;
  2063. unsigned long flags;
  2064. int clkrdy = 0;
  2065. pr_info("%s:\n", __func__);
  2066. if (isp1362_hcd->board && isp1362_hcd->board->reset) {
  2067. isp1362_hcd->board->reset(hcd->self.controller, 1);
  2068. msleep(20);
  2069. if (isp1362_hcd->board->clock)
  2070. isp1362_hcd->board->clock(hcd->self.controller, 1);
  2071. isp1362_hcd->board->reset(hcd->self.controller, 0);
  2072. } else
  2073. isp1362_sw_reset(isp1362_hcd);
  2074. /* chip has been reset. First we need to see a clock */
  2075. t = jiffies + msecs_to_jiffies(timeout);
  2076. while (!clkrdy && time_before_eq(jiffies, t)) {
  2077. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2078. clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
  2079. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2080. if (!clkrdy)
  2081. msleep(4);
  2082. }
  2083. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2084. isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
  2085. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2086. if (!clkrdy) {
  2087. pr_err("Clock not ready after %lums\n", timeout);
  2088. ret = -ENODEV;
  2089. }
  2090. return ret;
  2091. }
  2092. static void isp1362_hc_stop(struct usb_hcd *hcd)
  2093. {
  2094. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  2095. unsigned long flags;
  2096. u32 tmp;
  2097. pr_info("%s:\n", __func__);
  2098. del_timer_sync(&hcd->rh_timer);
  2099. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2100. isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
  2101. /* Switch off power for all ports */
  2102. tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
  2103. tmp &= ~(RH_A_NPS | RH_A_PSM);
  2104. isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
  2105. isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
  2106. /* Reset the chip */
  2107. if (isp1362_hcd->board && isp1362_hcd->board->reset)
  2108. isp1362_hcd->board->reset(hcd->self.controller, 1);
  2109. else
  2110. __isp1362_sw_reset(isp1362_hcd);
  2111. if (isp1362_hcd->board && isp1362_hcd->board->clock)
  2112. isp1362_hcd->board->clock(hcd->self.controller, 0);
  2113. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2114. }
  2115. #ifdef CHIP_BUFFER_TEST
  2116. static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
  2117. {
  2118. int ret = 0;
  2119. u16 *ref;
  2120. unsigned long flags;
  2121. ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
  2122. if (ref) {
  2123. int offset;
  2124. u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
  2125. for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
  2126. ref[offset] = ~offset;
  2127. tst[offset] = offset;
  2128. }
  2129. for (offset = 0; offset < 4; offset++) {
  2130. int j;
  2131. for (j = 0; j < 8; j++) {
  2132. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2133. isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
  2134. isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
  2135. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2136. if (memcmp(ref, tst, j)) {
  2137. ret = -ENODEV;
  2138. pr_err("%s: memory check with %d byte offset %d failed\n",
  2139. __func__, j, offset);
  2140. dump_data((u8 *)ref + offset, j);
  2141. dump_data((u8 *)tst + offset, j);
  2142. }
  2143. }
  2144. }
  2145. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2146. isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
  2147. isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
  2148. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2149. if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
  2150. ret = -ENODEV;
  2151. pr_err("%s: memory check failed\n", __func__);
  2152. dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
  2153. }
  2154. for (offset = 0; offset < 256; offset++) {
  2155. int test_size = 0;
  2156. yield();
  2157. memset(tst, 0, ISP1362_BUF_SIZE);
  2158. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2159. isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
  2160. isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
  2161. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2162. if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
  2163. ISP1362_BUF_SIZE / 2)) {
  2164. pr_err("%s: Failed to clear buffer\n", __func__);
  2165. dump_data((u8 *)tst, ISP1362_BUF_SIZE);
  2166. break;
  2167. }
  2168. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2169. isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
  2170. isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
  2171. offset * 2 + PTD_HEADER_SIZE, test_size);
  2172. isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
  2173. PTD_HEADER_SIZE + test_size);
  2174. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2175. if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
  2176. dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
  2177. dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
  2178. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2179. isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
  2180. PTD_HEADER_SIZE + test_size);
  2181. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2182. if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
  2183. ret = -ENODEV;
  2184. pr_err("%s: memory check with offset %02x failed\n",
  2185. __func__, offset);
  2186. break;
  2187. }
  2188. pr_warning("%s: memory check with offset %02x ok after second read\n",
  2189. __func__, offset);
  2190. }
  2191. }
  2192. kfree(ref);
  2193. }
  2194. return ret;
  2195. }
  2196. #endif
  2197. static int isp1362_hc_start(struct usb_hcd *hcd)
  2198. {
  2199. int ret;
  2200. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  2201. struct isp1362_platform_data *board = isp1362_hcd->board;
  2202. u16 hwcfg;
  2203. u16 chipid;
  2204. unsigned long flags;
  2205. pr_info("%s:\n", __func__);
  2206. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2207. chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
  2208. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2209. if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
  2210. pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
  2211. return -ENODEV;
  2212. }
  2213. #ifdef CHIP_BUFFER_TEST
  2214. ret = isp1362_chip_test(isp1362_hcd);
  2215. if (ret)
  2216. return -ENODEV;
  2217. #endif
  2218. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2219. /* clear interrupt status and disable all interrupt sources */
  2220. isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
  2221. isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
  2222. /* HW conf */
  2223. hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
  2224. if (board->sel15Kres)
  2225. hwcfg |= HCHWCFG_PULLDOWN_DS2 |
  2226. ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
  2227. if (board->clknotstop)
  2228. hwcfg |= HCHWCFG_CLKNOTSTOP;
  2229. if (board->oc_enable)
  2230. hwcfg |= HCHWCFG_ANALOG_OC;
  2231. if (board->int_act_high)
  2232. hwcfg |= HCHWCFG_INT_POL;
  2233. if (board->int_edge_triggered)
  2234. hwcfg |= HCHWCFG_INT_TRIGGER;
  2235. if (board->dreq_act_high)
  2236. hwcfg |= HCHWCFG_DREQ_POL;
  2237. if (board->dack_act_high)
  2238. hwcfg |= HCHWCFG_DACK_POL;
  2239. isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
  2240. isp1362_show_reg(isp1362_hcd, HCHWCFG);
  2241. isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
  2242. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2243. ret = isp1362_mem_config(hcd);
  2244. if (ret)
  2245. return ret;
  2246. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2247. /* Root hub conf */
  2248. isp1362_hcd->rhdesca = 0;
  2249. if (board->no_power_switching)
  2250. isp1362_hcd->rhdesca |= RH_A_NPS;
  2251. if (board->power_switching_mode)
  2252. isp1362_hcd->rhdesca |= RH_A_PSM;
  2253. if (board->potpg)
  2254. isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
  2255. else
  2256. isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
  2257. isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
  2258. isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
  2259. isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
  2260. isp1362_hcd->rhdescb = RH_B_PPCM;
  2261. isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
  2262. isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
  2263. isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
  2264. isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
  2265. isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
  2266. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2267. isp1362_hcd->hc_control = OHCI_USB_OPER;
  2268. hcd->state = HC_STATE_RUNNING;
  2269. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2270. /* Set up interrupts */
  2271. isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
  2272. isp1362_hcd->intenb |= OHCI_INTR_RD;
  2273. isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
  2274. isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
  2275. isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
  2276. /* Go operational */
  2277. isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
  2278. /* enable global power */
  2279. isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
  2280. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2281. return 0;
  2282. }
  2283. /*-------------------------------------------------------------------------*/
  2284. static struct hc_driver isp1362_hc_driver = {
  2285. .description = hcd_name,
  2286. .product_desc = "ISP1362 Host Controller",
  2287. .hcd_priv_size = sizeof(struct isp1362_hcd),
  2288. .irq = isp1362_irq,
  2289. .flags = HCD_USB11 | HCD_MEMORY,
  2290. .reset = isp1362_hc_reset,
  2291. .start = isp1362_hc_start,
  2292. .stop = isp1362_hc_stop,
  2293. .urb_enqueue = isp1362_urb_enqueue,
  2294. .urb_dequeue = isp1362_urb_dequeue,
  2295. .endpoint_disable = isp1362_endpoint_disable,
  2296. .get_frame_number = isp1362_get_frame,
  2297. .hub_status_data = isp1362_hub_status_data,
  2298. .hub_control = isp1362_hub_control,
  2299. .bus_suspend = isp1362_bus_suspend,
  2300. .bus_resume = isp1362_bus_resume,
  2301. };
  2302. /*-------------------------------------------------------------------------*/
  2303. static int __devexit isp1362_remove(struct platform_device *pdev)
  2304. {
  2305. struct usb_hcd *hcd = platform_get_drvdata(pdev);
  2306. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  2307. struct resource *res;
  2308. remove_debug_file(isp1362_hcd);
  2309. DBG(0, "%s: Removing HCD\n", __func__);
  2310. usb_remove_hcd(hcd);
  2311. DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
  2312. isp1362_hcd->data_reg);
  2313. iounmap(isp1362_hcd->data_reg);
  2314. DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
  2315. isp1362_hcd->addr_reg);
  2316. iounmap(isp1362_hcd->addr_reg);
  2317. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  2318. DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
  2319. if (res)
  2320. release_mem_region(res->start, resource_size(res));
  2321. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2322. DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
  2323. if (res)
  2324. release_mem_region(res->start, resource_size(res));
  2325. DBG(0, "%s: put_hcd\n", __func__);
  2326. usb_put_hcd(hcd);
  2327. DBG(0, "%s: Done\n", __func__);
  2328. return 0;
  2329. }
  2330. static int __devinit isp1362_probe(struct platform_device *pdev)
  2331. {
  2332. struct usb_hcd *hcd;
  2333. struct isp1362_hcd *isp1362_hcd;
  2334. struct resource *addr, *data;
  2335. void __iomem *addr_reg;
  2336. void __iomem *data_reg;
  2337. int irq;
  2338. int retval = 0;
  2339. struct resource *irq_res;
  2340. unsigned int irq_flags = 0;
  2341. /* basic sanity checks first. board-specific init logic should
  2342. * have initialized this the three resources and probably board
  2343. * specific platform_data. we don't probe for IRQs, and do only
  2344. * minimal sanity checking.
  2345. */
  2346. if (pdev->num_resources < 3) {
  2347. retval = -ENODEV;
  2348. goto err1;
  2349. }
  2350. data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2351. addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  2352. irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  2353. if (!addr || !data || !irq_res) {
  2354. retval = -ENODEV;
  2355. goto err1;
  2356. }
  2357. irq = irq_res->start;
  2358. if (pdev->dev.dma_mask) {
  2359. DBG(1, "won't do DMA");
  2360. retval = -ENODEV;
  2361. goto err1;
  2362. }
  2363. if (!request_mem_region(addr->start, resource_size(addr), hcd_name)) {
  2364. retval = -EBUSY;
  2365. goto err1;
  2366. }
  2367. addr_reg = ioremap(addr->start, resource_size(addr));
  2368. if (addr_reg == NULL) {
  2369. retval = -ENOMEM;
  2370. goto err2;
  2371. }
  2372. if (!request_mem_region(data->start, resource_size(data), hcd_name)) {
  2373. retval = -EBUSY;
  2374. goto err3;
  2375. }
  2376. data_reg = ioremap(data->start, resource_size(data));
  2377. if (data_reg == NULL) {
  2378. retval = -ENOMEM;
  2379. goto err4;
  2380. }
  2381. /* allocate and initialize hcd */
  2382. hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
  2383. if (!hcd) {
  2384. retval = -ENOMEM;
  2385. goto err5;
  2386. }
  2387. hcd->rsrc_start = data->start;
  2388. isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  2389. isp1362_hcd->data_reg = data_reg;
  2390. isp1362_hcd->addr_reg = addr_reg;
  2391. isp1362_hcd->next_statechange = jiffies;
  2392. spin_lock_init(&isp1362_hcd->lock);
  2393. INIT_LIST_HEAD(&isp1362_hcd->async);
  2394. INIT_LIST_HEAD(&isp1362_hcd->periodic);
  2395. INIT_LIST_HEAD(&isp1362_hcd->isoc);
  2396. INIT_LIST_HEAD(&isp1362_hcd->remove_list);
  2397. isp1362_hcd->board = pdev->dev.platform_data;
  2398. #if USE_PLATFORM_DELAY
  2399. if (!isp1362_hcd->board->delay) {
  2400. dev_err(hcd->self.controller, "No platform delay function given\n");
  2401. retval = -ENODEV;
  2402. goto err6;
  2403. }
  2404. #endif
  2405. if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
  2406. irq_flags |= IRQF_TRIGGER_RISING;
  2407. if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
  2408. irq_flags |= IRQF_TRIGGER_FALLING;
  2409. if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
  2410. irq_flags |= IRQF_TRIGGER_HIGH;
  2411. if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
  2412. irq_flags |= IRQF_TRIGGER_LOW;
  2413. retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED);
  2414. if (retval != 0)
  2415. goto err6;
  2416. pr_info("%s, irq %d\n", hcd->product_desc, irq);
  2417. create_debug_file(isp1362_hcd);
  2418. return 0;
  2419. err6:
  2420. DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
  2421. usb_put_hcd(hcd);
  2422. err5:
  2423. DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
  2424. iounmap(data_reg);
  2425. err4:
  2426. DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
  2427. release_mem_region(data->start, resource_size(data));
  2428. err3:
  2429. DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
  2430. iounmap(addr_reg);
  2431. err2:
  2432. DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
  2433. release_mem_region(addr->start, resource_size(addr));
  2434. err1:
  2435. pr_err("%s: init error, %d\n", __func__, retval);
  2436. return retval;
  2437. }
  2438. #ifdef CONFIG_PM
  2439. static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
  2440. {
  2441. struct usb_hcd *hcd = platform_get_drvdata(pdev);
  2442. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  2443. unsigned long flags;
  2444. int retval = 0;
  2445. DBG(0, "%s: Suspending device\n", __func__);
  2446. if (state.event == PM_EVENT_FREEZE) {
  2447. DBG(0, "%s: Suspending root hub\n", __func__);
  2448. retval = isp1362_bus_suspend(hcd);
  2449. } else {
  2450. DBG(0, "%s: Suspending RH ports\n", __func__);
  2451. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2452. isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
  2453. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2454. }
  2455. if (retval == 0)
  2456. pdev->dev.power.power_state = state;
  2457. return retval;
  2458. }
  2459. static int isp1362_resume(struct platform_device *pdev)
  2460. {
  2461. struct usb_hcd *hcd = platform_get_drvdata(pdev);
  2462. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  2463. unsigned long flags;
  2464. DBG(0, "%s: Resuming\n", __func__);
  2465. if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
  2466. DBG(0, "%s: Resume RH ports\n", __func__);
  2467. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  2468. isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
  2469. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  2470. return 0;
  2471. }
  2472. pdev->dev.power.power_state = PMSG_ON;
  2473. return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
  2474. }
  2475. #else
  2476. #define isp1362_suspend NULL
  2477. #define isp1362_resume NULL
  2478. #endif
  2479. static struct platform_driver isp1362_driver = {
  2480. .probe = isp1362_probe,
  2481. .remove = __devexit_p(isp1362_remove),
  2482. .suspend = isp1362_suspend,
  2483. .resume = isp1362_resume,
  2484. .driver = {
  2485. .name = (char *)hcd_name,
  2486. .owner = THIS_MODULE,
  2487. },
  2488. };
  2489. /*-------------------------------------------------------------------------*/
  2490. static int __init isp1362_init(void)
  2491. {
  2492. if (usb_disabled())
  2493. return -ENODEV;
  2494. pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION);
  2495. return platform_driver_register(&isp1362_driver);
  2496. }
  2497. module_init(isp1362_init);
  2498. static void __exit isp1362_cleanup(void)
  2499. {
  2500. platform_driver_unregister(&isp1362_driver);
  2501. }
  2502. module_exit(isp1362_cleanup);