/drivers/usb/host/isp1362-hcd.c

https://gitlab.com/stalker-android/linux-omap3 · C · 2884 lines · 2336 code · 381 blank · 167 comment · 354 complexity · 412aa43275b7a120a83438b23cf5cb38 MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * ISP1362 HCD (Host Controller Driver) for USB.
  3. *
  4. * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
  5. *
  6. * Derived from the SL811 HCD, rewritten for ISP116x.
  7. * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
  8. *
  9. * Portions:
  10. * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
  11. * Copyright (C) 2004 David Brownell
  12. */
  13. /*
  14. * The ISP1362 chip requires a large delay (300ns and 462ns) between
  15. * accesses to the address and data register.
  16. * The following timing options exist:
  17. *
  18. * 1. Configure your memory controller to add such delays if it can (the best)
  19. * 2. Implement platform-specific delay function possibly
  20. * combined with configuring the memory controller; see
  21. * include/linux/usb_isp1362.h for more info.
  22. * 3. Use ndelay (easiest, poorest).
  23. *
  24. * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
  25. * platform specific section of isp1362.h to select the appropriate variant.
  26. *
  27. * Also note that according to the Philips "ISP1362 Errata" document
  28. * Rev 1.00 from 27 May data corruption may occur when the #WR signal
  29. * is reasserted (even with #CS deasserted) within 132ns after a
  30. * write cycle to any controller register. If the hardware doesn't
  31. * implement the recommended fix (gating the #WR with #CS) software
  32. * must ensure that no further write cycle (not necessarily to the chip!)
  33. * is issued by the CPU within this interval.
  34. * For PXA25x this can be ensured by using VLIO with the maximum
  35. * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
  36. */
  37. #ifdef CONFIG_USB_DEBUG
  38. # define ISP1362_DEBUG
  39. #else
  40. # undef ISP1362_DEBUG
  41. #endif
  42. /*
  43. * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
  44. * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
  45. * requests are carried out in separate frames. This will delay any SETUP
  46. * packets until the start of the next frame so that this situation is
  47. * unlikely to occur (and makes usbtest happy running with a PXA255 target
  48. * device).
  49. */
  50. #undef BUGGY_PXA2XX_UDC_USBTEST
  51. #undef PTD_TRACE
  52. #undef URB_TRACE
  53. #undef VERBOSE
  54. #undef REGISTERS
  55. /* This enables a memory test on the ISP1362 chip memory to make sure the
  56. * chip access timing is correct.
  57. */
  58. #undef CHIP_BUFFER_TEST
  59. #include <linux/module.h>
  60. #include <linux/moduleparam.h>
  61. #include <linux/kernel.h>
  62. #include <linux/delay.h>
  63. #include <linux/ioport.h>
  64. #include <linux/sched.h>
  65. #include <linux/slab.h>
  66. #include <linux/errno.h>
  67. #include <linux/init.h>
  68. #include <linux/list.h>
  69. #include <linux/interrupt.h>
  70. #include <linux/usb.h>
  71. #include <linux/usb/isp1362.h>
  72. #include <linux/usb/hcd.h>
  73. #include <linux/platform_device.h>
  74. #include <linux/pm.h>
  75. #include <linux/io.h>
  76. #include <linux/bitmap.h>
  77. #include <asm/irq.h>
  78. #include <asm/system.h>
  79. #include <asm/byteorder.h>
  80. #include <asm/unaligned.h>
  81. static int dbg_level;
  82. #ifdef ISP1362_DEBUG
  83. module_param(dbg_level, int, 0644);
  84. #else
  85. module_param(dbg_level, int, 0);
  86. #define STUB_DEBUG_FILE
  87. #endif
  88. #include "../core/usb.h"
  89. #include "isp1362.h"
  90. #define DRIVER_VERSION "2005-04-04"
  91. #define DRIVER_DESC "ISP1362 USB Host Controller Driver"
  92. MODULE_DESCRIPTION(DRIVER_DESC);
  93. MODULE_LICENSE("GPL");
  94. static const char hcd_name[] = "isp1362-hcd";
  95. static void isp1362_hc_stop(struct usb_hcd *hcd);
  96. static int isp1362_hc_start(struct usb_hcd *hcd);
  97. /*-------------------------------------------------------------------------*/
  98. /*
  99. * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
  100. * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
  101. * completion.
  102. * We don't need a 'disable' counterpart, since interrupts will be disabled
  103. * only by the interrupt handler.
  104. */
  105. static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
  106. {
  107. if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
  108. return;
  109. if (mask & ~isp1362_hcd->irqenb)
  110. isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
  111. isp1362_hcd->irqenb |= mask;
  112. if (isp1362_hcd->irq_active)
  113. return;
  114. isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
  115. }
  116. /*-------------------------------------------------------------------------*/
  117. static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
  118. u16 offset)
  119. {
  120. struct isp1362_ep_queue *epq = NULL;
  121. if (offset < isp1362_hcd->istl_queue[1].buf_start)
  122. epq = &isp1362_hcd->istl_queue[0];
  123. else if (offset < isp1362_hcd->intl_queue.buf_start)
  124. epq = &isp1362_hcd->istl_queue[1];
  125. else if (offset < isp1362_hcd->atl_queue.buf_start)
  126. epq = &isp1362_hcd->intl_queue;
  127. else if (offset < isp1362_hcd->atl_queue.buf_start +
  128. isp1362_hcd->atl_queue.buf_size)
  129. epq = &isp1362_hcd->atl_queue;
  130. if (epq)
  131. DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
  132. else
  133. pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
  134. return epq;
  135. }
  136. static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
  137. {
  138. int offset;
  139. if (index * epq->blk_size > epq->buf_size) {
  140. pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
  141. epq->buf_size / epq->blk_size);
  142. return -EINVAL;
  143. }
  144. offset = epq->buf_start + index * epq->blk_size;
  145. DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
  146. return offset;
  147. }
  148. /*-------------------------------------------------------------------------*/
  149. static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
  150. int mps)
  151. {
  152. u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
  153. xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
  154. if (xfer_size < size && xfer_size % mps)
  155. xfer_size -= xfer_size % mps;
  156. return xfer_size;
  157. }
  158. static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
  159. struct isp1362_ep *ep, u16 len)
  160. {
  161. int ptd_offset = -EINVAL;
  162. int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
  163. int found;
  164. BUG_ON(len > epq->buf_size);
  165. if (!epq->buf_avail)
  166. return -ENOMEM;
  167. if (ep->num_ptds)
  168. pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
  169. epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
  170. BUG_ON(ep->num_ptds != 0);
  171. found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
  172. num_ptds, 0);
  173. if (found >= epq->buf_count)
  174. return -EOVERFLOW;
  175. DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
  176. num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
  177. ptd_offset = get_ptd_offset(epq, found);
  178. WARN_ON(ptd_offset < 0);
  179. ep->ptd_offset = ptd_offset;
  180. ep->num_ptds += num_ptds;
  181. epq->buf_avail -= num_ptds;
  182. BUG_ON(epq->buf_avail > epq->buf_count);
  183. ep->ptd_index = found;
  184. bitmap_set(&epq->buf_map, found, num_ptds);
  185. DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
  186. __func__, epq->name, ep->ptd_index, ep->ptd_offset,
  187. epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
  188. return found;
  189. }
  190. static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
  191. {
  192. int index = ep->ptd_index;
  193. int last = ep->ptd_index + ep->num_ptds;
  194. if (last > epq->buf_count)
  195. pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
  196. __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
  197. ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
  198. epq->buf_map, epq->skip_map);
  199. BUG_ON(last > epq->buf_count);
  200. for (; index < last; index++) {
  201. __clear_bit(index, &epq->buf_map);
  202. __set_bit(index, &epq->skip_map);
  203. }
  204. epq->buf_avail += ep->num_ptds;
  205. epq->ptd_count--;
  206. BUG_ON(epq->buf_avail > epq->buf_count);
  207. BUG_ON(epq->ptd_count > epq->buf_count);
  208. DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
  209. __func__, epq->name,
  210. ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
  211. DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
  212. epq->buf_map, epq->skip_map);
  213. ep->num_ptds = 0;
  214. ep->ptd_offset = -EINVAL;
  215. ep->ptd_index = -EINVAL;
  216. }
  217. /*-------------------------------------------------------------------------*/
  218. /*
  219. Set up PTD's.
  220. */
  221. static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
  222. struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
  223. u16 fno)
  224. {
  225. struct ptd *ptd;
  226. int toggle;
  227. int dir;
  228. u16 len;
  229. size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
  230. DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
  231. ptd = &ep->ptd;
  232. ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
  233. switch (ep->nextpid) {
  234. case USB_PID_IN:
  235. toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
  236. dir = PTD_DIR_IN;
  237. if (usb_pipecontrol(urb->pipe)) {
  238. len = min_t(size_t, ep->maxpacket, buf_len);
  239. } else if (usb_pipeisoc(urb->pipe)) {
  240. len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
  241. ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
  242. } else
  243. len = max_transfer_size(epq, buf_len, ep->maxpacket);
  244. DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
  245. (int)buf_len);
  246. break;
  247. case USB_PID_OUT:
  248. toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
  249. dir = PTD_DIR_OUT;
  250. if (usb_pipecontrol(urb->pipe))
  251. len = min_t(size_t, ep->maxpacket, buf_len);
  252. else if (usb_pipeisoc(urb->pipe))
  253. len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
  254. else
  255. len = max_transfer_size(epq, buf_len, ep->maxpacket);
  256. if (len == 0)
  257. pr_info("%s: Sending ZERO packet: %d\n", __func__,
  258. urb->transfer_flags & URB_ZERO_PACKET);
  259. DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
  260. (int)buf_len);
  261. break;
  262. case USB_PID_SETUP:
  263. toggle = 0;
  264. dir = PTD_DIR_SETUP;
  265. len = sizeof(struct usb_ctrlrequest);
  266. DBG(1, "%s: SETUP len %d\n", __func__, len);
  267. ep->data = urb->setup_packet;
  268. break;
  269. case USB_PID_ACK:
  270. toggle = 1;
  271. len = 0;
  272. dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
  273. PTD_DIR_OUT : PTD_DIR_IN;
  274. DBG(1, "%s: ACK len %d\n", __func__, len);
  275. break;
  276. default:
  277. toggle = dir = len = 0;
  278. pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
  279. BUG_ON(1);
  280. }
  281. ep->length = len;
  282. if (!len)
  283. ep->data = NULL;
  284. ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
  285. ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
  286. PTD_EP(ep->epnum);
  287. ptd->len = PTD_LEN(len) | PTD_DIR(dir);
  288. ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
  289. if (usb_pipeint(urb->pipe)) {
  290. ptd->faddr |= PTD_SF_INT(ep->branch);
  291. ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
  292. }
  293. if (usb_pipeisoc(urb->pipe))
  294. ptd->faddr |= PTD_SF_ISO(fno);
  295. DBG(1, "%s: Finished\n", __func__);
  296. }
  297. static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
  298. struct isp1362_ep_queue *epq)
  299. {
  300. struct ptd *ptd = &ep->ptd;
  301. int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
  302. _BUG_ON(ep->ptd_offset < 0);
  303. prefetch(ptd);
  304. isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
  305. if (len)
  306. isp1362_write_buffer(isp1362_hcd, ep->data,
  307. ep->ptd_offset + PTD_HEADER_SIZE, len);
  308. dump_ptd(ptd);
  309. dump_ptd_out_data(ptd, ep->data);
  310. }
  311. static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
  312. struct isp1362_ep_queue *epq)
  313. {
  314. struct ptd *ptd = &ep->ptd;
  315. int act_len;
  316. WARN_ON(list_empty(&ep->active));
  317. BUG_ON(ep->ptd_offset < 0);
  318. list_del_init(&ep->active);
  319. DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
  320. prefetchw(ptd);
  321. isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
  322. dump_ptd(ptd);
  323. act_len = PTD_GET_COUNT(ptd);
  324. if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
  325. return;
  326. if (act_len > ep->length)
  327. pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
  328. ep->ptd_offset, act_len, ep->length);
  329. BUG_ON(act_len > ep->length);
  330. /* Only transfer the amount of data that has actually been overwritten
  331. * in the chip buffer. We don't want any data that doesn't belong to the
  332. * transfer to leak out of the chip to the callers transfer buffer!
  333. */
  334. prefetchw(ep->data);
  335. isp1362_read_buffer(isp1362_hcd, ep->data,
  336. ep->ptd_offset + PTD_HEADER_SIZE, act_len);
  337. dump_ptd_in_data(ptd, ep->data);
  338. }
  339. /*
  340. * INT PTDs will stay in the chip until data is available.
  341. * This function will remove a PTD from the chip when the URB is dequeued.
  342. * Must be called with the spinlock held and IRQs disabled
  343. */
  344. static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
  345. {
  346. int index;
  347. struct isp1362_ep_queue *epq;
  348. DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
  349. BUG_ON(ep->ptd_offset < 0);
  350. epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
  351. BUG_ON(!epq);
  352. /* put ep in remove_list for cleanup */
  353. WARN_ON(!list_empty(&ep->remove_list));
  354. list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
  355. /* let SOF interrupt handle the cleanup */
  356. isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
  357. index = ep->ptd_index;
  358. if (index < 0)
  359. /* ISO queues don't have SKIP registers */
  360. return;
  361. DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
  362. index, ep->ptd_offset, epq->skip_map, 1 << index);
  363. /* prevent further processing of PTD (will be effective after next SOF) */
  364. epq->skip_map |= 1 << index;
  365. if (epq == &isp1362_hcd->atl_queue) {
  366. DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
  367. isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
  368. isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
  369. if (~epq->skip_map == 0)
  370. isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
  371. } else if (epq == &isp1362_hcd->intl_queue) {
  372. DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
  373. isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
  374. isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
  375. if (~epq->skip_map == 0)
  376. isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
  377. }
  378. }
  379. /*
  380. Take done or failed requests out of schedule. Give back
  381. processed urbs.
  382. */
  383. static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
  384. struct urb *urb, int status)
  385. __releases(isp1362_hcd->lock)
  386. __acquires(isp1362_hcd->lock)
  387. {
  388. urb->hcpriv = NULL;
  389. ep->error_count = 0;
  390. if (usb_pipecontrol(urb->pipe))
  391. ep->nextpid = USB_PID_SETUP;
  392. URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
  393. ep->num_req, usb_pipedevice(urb->pipe),
  394. usb_pipeendpoint(urb->pipe),
  395. !usb_pipein(urb->pipe) ? "out" : "in",
  396. usb_pipecontrol(urb->pipe) ? "ctrl" :
  397. usb_pipeint(urb->pipe) ? "int" :
  398. usb_pipebulk(urb->pipe) ? "bulk" :
  399. "iso",
  400. urb->actual_length, urb->transfer_buffer_length,
  401. !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
  402. "short_ok" : "", urb->status);
  403. usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
  404. spin_unlock(&isp1362_hcd->lock);
  405. usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
  406. spin_lock(&isp1362_hcd->lock);
  407. /* take idle endpoints out of the schedule right away */
  408. if (!list_empty(&ep->hep->urb_list))
  409. return;
  410. /* async deschedule */
  411. if (!list_empty(&ep->schedule)) {
  412. list_del_init(&ep->schedule);
  413. return;
  414. }
  415. if (ep->interval) {
  416. /* periodic deschedule */
  417. DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
  418. ep, ep->branch, ep->load,
  419. isp1362_hcd->load[ep->branch],
  420. isp1362_hcd->load[ep->branch] - ep->load);
  421. isp1362_hcd->load[ep->branch] -= ep->load;
  422. ep->branch = PERIODIC_SIZE;
  423. }
  424. }
  425. /*
  426. * Analyze transfer results, handle partial transfers and errors
  427. */
  428. static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
  429. {
  430. struct urb *urb = get_urb(ep);
  431. struct usb_device *udev;
  432. struct ptd *ptd;
  433. int short_ok;
  434. u16 len;
  435. int urbstat = -EINPROGRESS;
  436. u8 cc;
  437. DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
  438. udev = urb->dev;
  439. ptd = &ep->ptd;
  440. cc = PTD_GET_CC(ptd);
  441. if (cc == PTD_NOTACCESSED) {
  442. pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
  443. ep->num_req, ptd);
  444. cc = PTD_DEVNOTRESP;
  445. }
  446. short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
  447. len = urb->transfer_buffer_length - urb->actual_length;
  448. /* Data underrun is special. For allowed underrun
  449. we clear the error and continue as normal. For
  450. forbidden underrun we finish the DATA stage
  451. immediately while for control transfer,
  452. we do a STATUS stage.
  453. */
  454. if (cc == PTD_DATAUNDERRUN) {
  455. if (short_ok) {
  456. DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
  457. __func__, ep->num_req, short_ok ? "" : "not_",
  458. PTD_GET_COUNT(ptd), ep->maxpacket, len);
  459. cc = PTD_CC_NOERROR;
  460. urbstat = 0;
  461. } else {
  462. DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
  463. __func__, ep->num_req,
  464. usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
  465. short_ok ? "" : "not_",
  466. PTD_GET_COUNT(ptd), ep->maxpacket, len);
  467. if (usb_pipecontrol(urb->pipe)) {
  468. ep->nextpid = USB_PID_ACK;
  469. /* save the data underrun error code for later and
  470. * procede with the status stage
  471. */
  472. urb->actual_length += PTD_GET_COUNT(ptd);
  473. BUG_ON(urb->actual_length > urb->transfer_buffer_length);
  474. if (urb->status == -EINPROGRESS)
  475. urb->status = cc_to_error[PTD_DATAUNDERRUN];
  476. } else {
  477. usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
  478. PTD_GET_TOGGLE(ptd));
  479. urbstat = cc_to_error[PTD_DATAUNDERRUN];
  480. }
  481. goto out;
  482. }
  483. }
  484. if (cc != PTD_CC_NOERROR) {
  485. if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
  486. urbstat = cc_to_error[cc];
  487. DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
  488. __func__, ep->num_req, ep->nextpid, urbstat, cc,
  489. ep->error_count);
  490. }
  491. goto out;
  492. }
  493. switch (ep->nextpid) {
  494. case USB_PID_OUT:
  495. if (PTD_GET_COUNT(ptd) != ep->length)
  496. pr_err("%s: count=%d len=%d\n", __func__,
  497. PTD_GET_COUNT(ptd), ep->length);
  498. BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
  499. urb->actual_length += ep->length;
  500. BUG_ON(urb->actual_length > urb->transfer_buffer_length);
  501. usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
  502. if (urb->actual_length == urb->transfer_buffer_length) {
  503. DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
  504. ep->num_req, len, ep->maxpacket, urbstat);
  505. if (usb_pipecontrol(urb->pipe)) {
  506. DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
  507. ep->num_req,
  508. usb_pipein(urb->pipe) ? "IN" : "OUT");
  509. ep->nextpid = USB_PID_ACK;
  510. } else {
  511. if (len % ep->maxpacket ||
  512. !(urb->transfer_flags & URB_ZERO_PACKET)) {
  513. urbstat = 0;
  514. DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
  515. __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
  516. urbstat, len, ep->maxpacket, urb->actual_length);
  517. }
  518. }
  519. }
  520. break;
  521. case USB_PID_IN:
  522. len = PTD_GET_COUNT(ptd);
  523. BUG_ON(len > ep->length);
  524. urb->actual_length += len;
  525. BUG_ON(urb->actual_length > urb->transfer_buffer_length);
  526. usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
  527. /* if transfer completed or (allowed) data underrun */
  528. if ((urb->transfer_buffer_length == urb->actual_length) ||
  529. len % ep->maxpacket) {
  530. DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
  531. ep->num_req, len, ep->maxpacket, urbstat);
  532. if (usb_pipecontrol(urb->pipe)) {
  533. DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
  534. ep->num_req,
  535. usb_pipein(urb->pipe) ? "IN" : "OUT");
  536. ep->nextpid = USB_PID_ACK;
  537. } else {
  538. urbstat = 0;
  539. DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
  540. __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
  541. urbstat, len, ep->maxpacket, urb->actual_length);
  542. }
  543. }
  544. break;
  545. case USB_PID_SETUP:
  546. if (urb->transfer_buffer_length == urb->actual_length) {
  547. ep->nextpid = USB_PID_ACK;
  548. } else if (usb_pipeout(urb->pipe)) {
  549. usb_settoggle(udev, 0, 1, 1);
  550. ep->nextpid = USB_PID_OUT;
  551. } else {
  552. usb_settoggle(udev, 0, 0, 1);
  553. ep->nextpid = USB_PID_IN;
  554. }
  555. break;
  556. case USB_PID_ACK:
  557. DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
  558. urbstat);
  559. WARN_ON(urbstat != -EINPROGRESS);
  560. urbstat = 0;
  561. ep->nextpid = 0;
  562. break;
  563. default:
  564. BUG_ON(1);
  565. }
  566. out:
  567. if (urbstat != -EINPROGRESS) {
  568. DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
  569. ep, ep->num_req, urb, urbstat);
  570. finish_request(isp1362_hcd, ep, urb, urbstat);
  571. }
  572. }
  573. static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
  574. {
  575. struct isp1362_ep *ep;
  576. struct isp1362_ep *tmp;
  577. list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
  578. struct isp1362_ep_queue *epq =
  579. get_ptd_queue(isp1362_hcd, ep->ptd_offset);
  580. int index = ep->ptd_index;
  581. BUG_ON(epq == NULL);
  582. if (index >= 0) {
  583. DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
  584. BUG_ON(ep->num_ptds == 0);
  585. release_ptd_buffers(epq, ep);
  586. }
  587. if (!list_empty(&ep->hep->urb_list)) {
  588. struct urb *urb = get_urb(ep);
  589. DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
  590. ep->num_req, ep);
  591. finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
  592. }
  593. WARN_ON(list_empty(&ep->active));
  594. if (!list_empty(&ep->active)) {
  595. list_del_init(&ep->active);
  596. DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
  597. }
  598. list_del_init(&ep->remove_list);
  599. DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
  600. }
  601. DBG(1, "%s: Done\n", __func__);
  602. }
  603. static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
  604. {
  605. if (count > 0) {
  606. if (count < isp1362_hcd->atl_queue.ptd_count)
  607. isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
  608. isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
  609. isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
  610. isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
  611. } else
  612. isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
  613. }
  614. static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
  615. {
  616. isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
  617. isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
  618. isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
  619. }
  620. static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
  621. {
  622. isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
  623. isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
  624. HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
  625. }
  626. static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
  627. struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
  628. {
  629. int index = epq->free_ptd;
  630. prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
  631. index = claim_ptd_buffers(epq, ep, ep->length);
  632. if (index == -ENOMEM) {
  633. DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
  634. ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
  635. return index;
  636. } else if (index == -EOVERFLOW) {
  637. DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
  638. __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
  639. epq->buf_map, epq->skip_map);
  640. return index;
  641. } else
  642. BUG_ON(index < 0);
  643. list_add_tail(&ep->active, &epq->active);
  644. DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
  645. ep, ep->num_req, ep->length, &epq->active);
  646. DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
  647. ep->ptd_offset, ep, ep->num_req);
  648. isp1362_write_ptd(isp1362_hcd, ep, epq);
  649. __clear_bit(ep->ptd_index, &epq->skip_map);
  650. return 0;
  651. }
  652. static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
  653. {
  654. int ptd_count = 0;
  655. struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
  656. struct isp1362_ep *ep;
  657. int defer = 0;
  658. if (atomic_read(&epq->finishing)) {
  659. DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
  660. return;
  661. }
  662. list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
  663. struct urb *urb = get_urb(ep);
  664. int ret;
  665. if (!list_empty(&ep->active)) {
  666. DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
  667. continue;
  668. }
  669. DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
  670. ep, ep->num_req);
  671. ret = submit_req(isp1362_hcd, urb, ep, epq);
  672. if (ret == -ENOMEM) {
  673. defer = 1;
  674. break;
  675. } else if (ret == -EOVERFLOW) {
  676. defer = 1;
  677. continue;
  678. }
  679. #ifdef BUGGY_PXA2XX_UDC_USBTEST
  680. defer = ep->nextpid == USB_PID_SETUP;
  681. #endif
  682. ptd_count++;
  683. }
  684. /* Avoid starving of endpoints */
  685. if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
  686. DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
  687. list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
  688. }
  689. if (ptd_count || defer)
  690. enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
  691. epq->ptd_count += ptd_count;
  692. if (epq->ptd_count > epq->stat_maxptds) {
  693. epq->stat_maxptds = epq->ptd_count;
  694. DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
  695. }
  696. }
  697. static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
  698. {
  699. int ptd_count = 0;
  700. struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
  701. struct isp1362_ep *ep;
  702. if (atomic_read(&epq->finishing)) {
  703. DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
  704. return;
  705. }
  706. list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
  707. struct urb *urb = get_urb(ep);
  708. int ret;
  709. if (!list_empty(&ep->active)) {
  710. DBG(1, "%s: Skipping active %s ep %p\n", __func__,
  711. epq->name, ep);
  712. continue;
  713. }
  714. DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
  715. epq->name, ep, ep->num_req);
  716. ret = submit_req(isp1362_hcd, urb, ep, epq);
  717. if (ret == -ENOMEM)
  718. break;
  719. else if (ret == -EOVERFLOW)
  720. continue;
  721. ptd_count++;
  722. }
  723. if (ptd_count) {
  724. static int last_count;
  725. if (ptd_count != last_count) {
  726. DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
  727. last_count = ptd_count;
  728. }
  729. enable_intl_transfers(isp1362_hcd);
  730. }
  731. epq->ptd_count += ptd_count;
  732. if (epq->ptd_count > epq->stat_maxptds)
  733. epq->stat_maxptds = epq->ptd_count;
  734. }
  735. static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
  736. {
  737. u16 ptd_offset = ep->ptd_offset;
  738. int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
  739. DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
  740. ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
  741. ptd_offset += num_ptds * epq->blk_size;
  742. if (ptd_offset < epq->buf_start + epq->buf_size)
  743. return ptd_offset;
  744. else
  745. return -ENOMEM;
  746. }
  747. static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
  748. {
  749. int ptd_count = 0;
  750. int flip = isp1362_hcd->istl_flip;
  751. struct isp1362_ep_queue *epq;
  752. int ptd_offset;
  753. struct isp1362_ep *ep;
  754. struct isp1362_ep *tmp;
  755. u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
  756. fill2:
  757. epq = &isp1362_hcd->istl_queue[flip];
  758. if (atomic_read(&epq->finishing)) {
  759. DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
  760. return;
  761. }
  762. if (!list_empty(&epq->active))
  763. return;
  764. ptd_offset = epq->buf_start;
  765. list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
  766. struct urb *urb = get_urb(ep);
  767. s16 diff = fno - (u16)urb->start_frame;
  768. DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
  769. if (diff > urb->number_of_packets) {
  770. /* time frame for this URB has elapsed */
  771. finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
  772. continue;
  773. } else if (diff < -1) {
  774. /* URB is not due in this frame or the next one.
  775. * Comparing with '-1' instead of '0' accounts for double
  776. * buffering in the ISP1362 which enables us to queue the PTD
  777. * one frame ahead of time
  778. */
  779. } else if (diff == -1) {
  780. /* submit PTD's that are due in the next frame */
  781. prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
  782. if (ptd_offset + PTD_HEADER_SIZE + ep->length >
  783. epq->buf_start + epq->buf_size) {
  784. pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
  785. __func__, ep->length);
  786. continue;
  787. }
  788. ep->ptd_offset = ptd_offset;
  789. list_add_tail(&ep->active, &epq->active);
  790. ptd_offset = next_ptd(epq, ep);
  791. if (ptd_offset < 0) {
  792. pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
  793. ep->num_req, epq->name);
  794. break;
  795. }
  796. }
  797. }
  798. list_for_each_entry(ep, &epq->active, active) {
  799. if (epq->active.next == &ep->active)
  800. ep->ptd.mps |= PTD_LAST_MSK;
  801. isp1362_write_ptd(isp1362_hcd, ep, epq);
  802. ptd_count++;
  803. }
  804. if (ptd_count)
  805. enable_istl_transfers(isp1362_hcd, flip);
  806. epq->ptd_count += ptd_count;
  807. if (epq->ptd_count > epq->stat_maxptds)
  808. epq->stat_maxptds = epq->ptd_count;
  809. /* check, whether the second ISTL buffer may also be filled */
  810. if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
  811. (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
  812. fno++;
  813. ptd_count = 0;
  814. flip = 1 - flip;
  815. goto fill2;
  816. }
  817. }
  818. static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
  819. struct isp1362_ep_queue *epq)
  820. {
  821. struct isp1362_ep *ep;
  822. struct isp1362_ep *tmp;
  823. if (list_empty(&epq->active)) {
  824. DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
  825. return;
  826. }
  827. DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
  828. atomic_inc(&epq->finishing);
  829. list_for_each_entry_safe(ep, tmp, &epq->active, active) {
  830. int index = ep->ptd_index;
  831. DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
  832. index, ep->ptd_offset);
  833. BUG_ON(index < 0);
  834. if (__test_and_clear_bit(index, &done_map)) {
  835. isp1362_read_ptd(isp1362_hcd, ep, epq);
  836. epq->free_ptd = index;
  837. BUG_ON(ep->num_ptds == 0);
  838. release_ptd_buffers(epq, ep);
  839. DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
  840. ep, ep->num_req);
  841. if (!list_empty(&ep->remove_list)) {
  842. list_del_init(&ep->remove_list);
  843. DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
  844. }
  845. DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
  846. ep, ep->num_req);
  847. postproc_ep(isp1362_hcd, ep);
  848. }
  849. if (!done_map)
  850. break;
  851. }
  852. if (done_map)
  853. pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
  854. epq->skip_map);
  855. atomic_dec(&epq->finishing);
  856. }
  857. static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
  858. {
  859. struct isp1362_ep *ep;
  860. struct isp1362_ep *tmp;
  861. if (list_empty(&epq->active)) {
  862. DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
  863. return;
  864. }
  865. DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
  866. atomic_inc(&epq->finishing);
  867. list_for_each_entry_safe(ep, tmp, &epq->active, active) {
  868. DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
  869. isp1362_read_ptd(isp1362_hcd, ep, epq);
  870. DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
  871. postproc_ep(isp1362_hcd, ep);
  872. }
  873. WARN_ON(epq->blk_size != 0);
  874. atomic_dec(&epq->finishing);
  875. }
  876. static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
  877. {
  878. int handled = 0;
  879. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  880. u16 irqstat;
  881. u16 svc_mask;
  882. spin_lock(&isp1362_hcd->lock);
  883. BUG_ON(isp1362_hcd->irq_active++);
  884. isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
  885. irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
  886. DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
  887. /* only handle interrupts that are currently enabled */
  888. irqstat &= isp1362_hcd->irqenb;
  889. isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
  890. svc_mask = irqstat;
  891. if (irqstat & HCuPINT_SOF) {
  892. isp1362_hcd->irqenb &= ~HCuPINT_SOF;
  893. isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
  894. handled = 1;
  895. svc_mask &= ~HCuPINT_SOF;
  896. DBG(3, "%s: SOF\n", __func__);
  897. isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
  898. if (!list_empty(&isp1362_hcd->remove_list))
  899. finish_unlinks(isp1362_hcd);
  900. if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
  901. if (list_empty(&isp1362_hcd->atl_queue.active)) {
  902. start_atl_transfers(isp1362_hcd);
  903. } else {
  904. isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
  905. isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
  906. isp1362_hcd->atl_queue.skip_map);
  907. isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
  908. }
  909. }
  910. }
  911. if (irqstat & HCuPINT_ISTL0) {
  912. isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
  913. handled = 1;
  914. svc_mask &= ~HCuPINT_ISTL0;
  915. isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
  916. DBG(1, "%s: ISTL0\n", __func__);
  917. WARN_ON((int)!!isp1362_hcd->istl_flip);
  918. WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
  919. HCBUFSTAT_ISTL0_ACTIVE);
  920. WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
  921. HCBUFSTAT_ISTL0_DONE));
  922. isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
  923. }
  924. if (irqstat & HCuPINT_ISTL1) {
  925. isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
  926. handled = 1;
  927. svc_mask &= ~HCuPINT_ISTL1;
  928. isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
  929. DBG(1, "%s: ISTL1\n", __func__);
  930. WARN_ON(!(int)isp1362_hcd->istl_flip);
  931. WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
  932. HCBUFSTAT_ISTL1_ACTIVE);
  933. WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
  934. HCBUFSTAT_ISTL1_DONE));
  935. isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
  936. }
  937. if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
  938. WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
  939. (HCuPINT_ISTL0 | HCuPINT_ISTL1));
  940. finish_iso_transfers(isp1362_hcd,
  941. &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
  942. start_iso_transfers(isp1362_hcd);
  943. isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
  944. }
  945. if (irqstat & HCuPINT_INTL) {
  946. u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
  947. u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
  948. isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
  949. DBG(2, "%s: INTL\n", __func__);
  950. svc_mask &= ~HCuPINT_INTL;
  951. isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
  952. if (~(done_map | skip_map) == 0)
  953. /* All PTDs are finished, disable INTL processing entirely */
  954. isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
  955. handled = 1;
  956. WARN_ON(!done_map);
  957. if (done_map) {
  958. DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
  959. finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
  960. start_intl_transfers(isp1362_hcd);
  961. }
  962. }
  963. if (irqstat & HCuPINT_ATL) {
  964. u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
  965. u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
  966. isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
  967. DBG(2, "%s: ATL\n", __func__);
  968. svc_mask &= ~HCuPINT_ATL;
  969. isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
  970. if (~(done_map | skip_map) == 0)
  971. isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
  972. if (done_map) {
  973. DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
  974. finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
  975. start_atl_transfers(isp1362_hcd);
  976. }
  977. handled = 1;
  978. }
  979. if (irqstat & HCuPINT_OPR) {
  980. u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
  981. isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
  982. svc_mask &= ~HCuPINT_OPR;
  983. DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
  984. intstat &= isp1362_hcd->intenb;
  985. if (intstat & OHCI_INTR_UE) {
  986. pr_err("Unrecoverable error\n");
  987. /* FIXME: do here reset or cleanup or whatever */
  988. }
  989. if (intstat & OHCI_INTR_RHSC) {
  990. isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
  991. isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
  992. isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
  993. }
  994. if (intstat & OHCI_INTR_RD) {
  995. pr_info("%s: RESUME DETECTED\n", __func__);
  996. isp1362_show_reg(isp1362_hcd, HCCONTROL);
  997. usb_hcd_resume_root_hub(hcd);
  998. }
  999. isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
  1000. irqstat &= ~HCuPINT_OPR;
  1001. handled = 1;
  1002. }
  1003. if (irqstat & HCuPINT_SUSP) {
  1004. isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
  1005. handled = 1;
  1006. svc_mask &= ~HCuPINT_SUSP;
  1007. pr_info("%s: SUSPEND IRQ\n", __func__);
  1008. }
  1009. if (irqstat & HCuPINT_CLKRDY) {
  1010. isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
  1011. handled = 1;
  1012. isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
  1013. svc_mask &= ~HCuPINT_CLKRDY;
  1014. pr_info("%s: CLKRDY IRQ\n", __func__);
  1015. }
  1016. if (svc_mask)
  1017. pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
  1018. isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
  1019. isp1362_hcd->irq_active--;
  1020. spin_unlock(&isp1362_hcd->lock);
  1021. return IRQ_RETVAL(handled);
  1022. }
  1023. /*-------------------------------------------------------------------------*/
  1024. #define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
  1025. static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
  1026. {
  1027. int i, branch = -ENOSPC;
  1028. /* search for the least loaded schedule branch of that interval
  1029. * which has enough bandwidth left unreserved.
  1030. */
  1031. for (i = 0; i < interval; i++) {
  1032. if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
  1033. int j;
  1034. for (j = i; j < PERIODIC_SIZE; j += interval) {
  1035. if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
  1036. pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
  1037. load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
  1038. break;
  1039. }
  1040. }
  1041. if (j < PERIODIC_SIZE)
  1042. continue;
  1043. branch = i;
  1044. }
  1045. }
  1046. return branch;
  1047. }
  1048. /* NB! ALL the code above this point runs with isp1362_hcd->lock
  1049. held, irqs off
  1050. */
  1051. /*-------------------------------------------------------------------------*/
  1052. static int isp1362_urb_enqueue(struct usb_hcd *hcd,
  1053. struct urb *urb,
  1054. gfp_t mem_flags)
  1055. {
  1056. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1057. struct usb_device *udev = urb->dev;
  1058. unsigned int pipe = urb->pipe;
  1059. int is_out = !usb_pipein(pipe);
  1060. int type = usb_pipetype(pipe);
  1061. int epnum = usb_pipeendpoint(pipe);
  1062. struct usb_host_endpoint *hep = urb->ep;
  1063. struct isp1362_ep *ep = NULL;
  1064. unsigned long flags;
  1065. int retval = 0;
  1066. DBG(3, "%s: urb %p\n", __func__, urb);
  1067. if (type == PIPE_ISOCHRONOUS) {
  1068. pr_err("Isochronous transfers not supported\n");
  1069. return -ENOSPC;
  1070. }
  1071. URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
  1072. usb_pipedevice(pipe), epnum,
  1073. is_out ? "out" : "in",
  1074. usb_pipecontrol(pipe) ? "ctrl" :
  1075. usb_pipeint(pipe) ? "int" :
  1076. usb_pipebulk(pipe) ? "bulk" :
  1077. "iso",
  1078. urb->transfer_buffer_length,
  1079. (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
  1080. !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
  1081. "short_ok" : "");
  1082. /* avoid all allocations within spinlocks: request or endpoint */
  1083. if (!hep->hcpriv) {
  1084. ep = kzalloc(sizeof *ep, mem_flags);
  1085. if (!ep)
  1086. return -ENOMEM;
  1087. }
  1088. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1089. /* don't submit to a dead or disabled port */
  1090. if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
  1091. USB_PORT_STAT_ENABLE) ||
  1092. !HC_IS_RUNNING(hcd->state)) {
  1093. kfree(ep);
  1094. retval = -ENODEV;
  1095. goto fail_not_linked;
  1096. }
  1097. retval = usb_hcd_link_urb_to_ep(hcd, urb);
  1098. if (retval) {
  1099. kfree(ep);
  1100. goto fail_not_linked;
  1101. }
  1102. if (hep->hcpriv) {
  1103. ep = hep->hcpriv;
  1104. } else {
  1105. INIT_LIST_HEAD(&ep->schedule);
  1106. INIT_LIST_HEAD(&ep->active);
  1107. INIT_LIST_HEAD(&ep->remove_list);
  1108. ep->udev = usb_get_dev(udev);
  1109. ep->hep = hep;
  1110. ep->epnum = epnum;
  1111. ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
  1112. ep->ptd_offset = -EINVAL;
  1113. ep->ptd_index = -EINVAL;
  1114. usb_settoggle(udev, epnum, is_out, 0);
  1115. if (type == PIPE_CONTROL)
  1116. ep->nextpid = USB_PID_SETUP;
  1117. else if (is_out)
  1118. ep->nextpid = USB_PID_OUT;
  1119. else
  1120. ep->nextpid = USB_PID_IN;
  1121. switch (type) {
  1122. case PIPE_ISOCHRONOUS:
  1123. case PIPE_INTERRUPT:
  1124. if (urb->interval > PERIODIC_SIZE)
  1125. urb->interval = PERIODIC_SIZE;
  1126. ep->interval = urb->interval;
  1127. ep->branch = PERIODIC_SIZE;
  1128. ep->load = usb_calc_bus_time(udev->speed, !is_out,
  1129. (type == PIPE_ISOCHRONOUS),
  1130. usb_maxpacket(udev, pipe, is_out)) / 1000;
  1131. break;
  1132. }
  1133. hep->hcpriv = ep;
  1134. }
  1135. ep->num_req = isp1362_hcd->req_serial++;
  1136. /* maybe put endpoint into schedule */
  1137. switch (type) {
  1138. case PIPE_CONTROL:
  1139. case PIPE_BULK:
  1140. if (list_empty(&ep->schedule)) {
  1141. DBG(1, "%s: Adding ep %p req %d to async schedule\n",
  1142. __func__, ep, ep->num_req);
  1143. list_add_tail(&ep->schedule, &isp1362_hcd->async);
  1144. }
  1145. break;
  1146. case PIPE_ISOCHRONOUS:
  1147. case PIPE_INTERRUPT:
  1148. urb->interval = ep->interval;
  1149. /* urb submitted for already existing EP */
  1150. if (ep->branch < PERIODIC_SIZE)
  1151. break;
  1152. retval = balance(isp1362_hcd, ep->interval, ep->load);
  1153. if (retval < 0) {
  1154. pr_err("%s: balance returned %d\n", __func__, retval);
  1155. goto fail;
  1156. }
  1157. ep->branch = retval;
  1158. retval = 0;
  1159. isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
  1160. DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
  1161. __func__, isp1362_hcd->fmindex, ep->branch,
  1162. ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
  1163. ~(PERIODIC_SIZE - 1)) + ep->branch,
  1164. (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
  1165. if (list_empty(&ep->schedule)) {
  1166. if (type == PIPE_ISOCHRONOUS) {
  1167. u16 frame = isp1362_hcd->fmindex;
  1168. frame += max_t(u16, 8, ep->interval);
  1169. frame &= ~(ep->interval - 1);
  1170. frame |= ep->branch;
  1171. if (frame_before(frame, isp1362_hcd->fmindex))
  1172. frame += ep->interval;
  1173. urb->start_frame = frame;
  1174. DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
  1175. list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
  1176. } else {
  1177. DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
  1178. list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
  1179. }
  1180. } else
  1181. DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
  1182. DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
  1183. ep->load / ep->interval, isp1362_hcd->load[ep->branch],
  1184. isp1362_hcd->load[ep->branch] + ep->load);
  1185. isp1362_hcd->load[ep->branch] += ep->load;
  1186. }
  1187. urb->hcpriv = hep;
  1188. ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
  1189. switch (type) {
  1190. case PIPE_CONTROL:
  1191. case PIPE_BULK:
  1192. start_atl_transfers(isp1362_hcd);
  1193. break;
  1194. case PIPE_INTERRUPT:
  1195. start_intl_transfers(isp1362_hcd);
  1196. break;
  1197. case PIPE_ISOCHRONOUS:
  1198. start_iso_transfers(isp1362_hcd);
  1199. break;
  1200. default:
  1201. BUG();
  1202. }
  1203. fail:
  1204. if (retval)
  1205. usb_hcd_unlink_urb_from_ep(hcd, urb);
  1206. fail_not_linked:
  1207. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1208. if (retval)
  1209. DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
  1210. return retval;
  1211. }
  1212. static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  1213. {
  1214. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1215. struct usb_host_endpoint *hep;
  1216. unsigned long flags;
  1217. struct isp1362_ep *ep;
  1218. int retval = 0;
  1219. DBG(3, "%s: urb %p\n", __func__, urb);
  1220. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1221. retval = usb_hcd_check_unlink_urb(hcd, urb, status);
  1222. if (retval)
  1223. goto done;
  1224. hep = urb->hcpriv;
  1225. if (!hep) {
  1226. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1227. return -EIDRM;
  1228. }
  1229. ep = hep->hcpriv;
  1230. if (ep) {
  1231. /* In front of queue? */
  1232. if (ep->hep->urb_list.next == &urb->urb_list) {
  1233. if (!list_empty(&ep->active)) {
  1234. DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
  1235. urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
  1236. /* disable processing and queue PTD for removal */
  1237. remove_ptd(isp1362_hcd, ep);
  1238. urb = NULL;
  1239. }
  1240. }
  1241. if (urb) {
  1242. DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
  1243. ep->num_req);
  1244. finish_request(isp1362_hcd, ep, urb, status);
  1245. } else
  1246. DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
  1247. } else {
  1248. pr_warning("%s: No EP in URB %p\n", __func__, urb);
  1249. retval = -EINVAL;
  1250. }
  1251. done:
  1252. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1253. DBG(3, "%s: exit\n", __func__);
  1254. return retval;
  1255. }
  1256. static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
  1257. {
  1258. struct isp1362_ep *ep = hep->hcpriv;
  1259. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1260. unsigned long flags;
  1261. DBG(1, "%s: ep %p\n", __func__, ep);
  1262. if (!ep)
  1263. return;
  1264. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1265. if (!list_empty(&hep->urb_list)) {
  1266. if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
  1267. DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
  1268. ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
  1269. remove_ptd(isp1362_hcd, ep);
  1270. pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
  1271. }
  1272. }
  1273. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1274. /* Wait for interrupt to clear out active list */
  1275. while (!list_empty(&ep->active))
  1276. msleep(1);
  1277. DBG(1, "%s: Freeing EP %p\n", __func__, ep);
  1278. usb_put_dev(ep->udev);
  1279. kfree(ep);
  1280. hep->hcpriv = NULL;
  1281. }
  1282. static int isp1362_get_frame(struct usb_hcd *hcd)
  1283. {
  1284. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1285. u32 fmnum;
  1286. unsigned long flags;
  1287. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1288. fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
  1289. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1290. return (int)fmnum;
  1291. }
  1292. /*-------------------------------------------------------------------------*/
  1293. /* Adapted from ohci-hub.c */
  1294. static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
  1295. {
  1296. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1297. int ports, i, changed = 0;
  1298. unsigned long flags;
  1299. if (!HC_IS_RUNNING(hcd->state))
  1300. return -ESHUTDOWN;
  1301. /* Report no status change now, if we are scheduled to be
  1302. called later */
  1303. if (timer_pending(&hcd->rh_timer))
  1304. return 0;
  1305. ports = isp1362_hcd->rhdesca & RH_A_NDP;
  1306. BUG_ON(ports > 2);
  1307. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1308. /* init status */
  1309. if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
  1310. buf[0] = changed = 1;
  1311. else
  1312. buf[0] = 0;
  1313. for (i = 0; i < ports; i++) {
  1314. u32 status = isp1362_hcd->rhport[i];
  1315. if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
  1316. RH_PS_OCIC | RH_PS_PRSC)) {
  1317. changed = 1;
  1318. buf[0] |= 1 << (i + 1);
  1319. continue;
  1320. }
  1321. if (!(status & RH_PS_CCS))
  1322. continue;
  1323. }
  1324. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1325. return changed;
  1326. }
  1327. static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
  1328. struct usb_hub_descriptor *desc)
  1329. {
  1330. u32 reg = isp1362_hcd->rhdesca;
  1331. DBG(3, "%s: enter\n", __func__);
  1332. desc->bDescriptorType = 0x29;
  1333. desc->bDescLength = 9;
  1334. desc->bHubContrCurrent = 0;
  1335. desc->bNbrPorts = reg & 0x3;
  1336. /* Power switching, device type, overcurrent. */
  1337. desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
  1338. DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
  1339. desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
  1340. /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
  1341. desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
  1342. desc->bitmap[1] = ~0;
  1343. DBG(3, "%s: exit\n", __func__);
  1344. }
  1345. /* Adapted from ohci-hub.c */
  1346. static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
  1347. u16 wIndex, char *buf, u16 wLength)
  1348. {
  1349. struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
  1350. int retval = 0;
  1351. unsigned long flags;
  1352. unsigned long t1;
  1353. int ports = isp1362_hcd->rhdesca & RH_A_NDP;
  1354. u32 tmp = 0;
  1355. switch (typeReq) {
  1356. case ClearHubFeature:
  1357. DBG(0, "ClearHubFeature: ");
  1358. switch (wValue) {
  1359. case C_HUB_OVER_CURRENT:
  1360. _DBG(0, "C_HUB_OVER_CURRENT\n");
  1361. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1362. isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
  1363. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1364. case C_HUB_LOCAL_POWER:
  1365. _DBG(0, "C_HUB_LOCAL_POWER\n");
  1366. break;
  1367. default:
  1368. goto error;
  1369. }
  1370. break;
  1371. case SetHubFeature:
  1372. DBG(0, "SetHubFeature: ");
  1373. switch (wValue) {
  1374. case C_HUB_OVER_CURRENT:
  1375. case C_HUB_LOCAL_POWER:
  1376. _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
  1377. break;
  1378. default:
  1379. goto error;
  1380. }
  1381. break;
  1382. case GetHubDescriptor:
  1383. DBG(0, "GetHubDescriptor\n");
  1384. isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
  1385. break;
  1386. case GetHubStatus:
  1387. DBG(0, "GetHubStatus\n");
  1388. put_unaligned(cpu_to_le32(0), (__le32 *) buf);
  1389. break;
  1390. case GetPortStatus:
  1391. #ifndef VERBOSE
  1392. DBG(0, "GetPortStatus\n");
  1393. #endif
  1394. if (!wIndex || wIndex > ports)
  1395. goto error;
  1396. tmp = isp1362_hcd->rhport[--wIndex];
  1397. put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
  1398. break;
  1399. case ClearPortFeature:
  1400. DBG(0, "ClearPortFeature: ");
  1401. if (!wIndex || wIndex > ports)
  1402. goto error;
  1403. wIndex--;
  1404. switch (wValue) {
  1405. case USB_PORT_FEAT_ENABLE:
  1406. _DBG(0, "USB_PORT_FEAT_ENABLE\n");
  1407. tmp = RH_PS_CCS;
  1408. break;
  1409. case USB_PORT_FEAT_C_ENABLE:
  1410. _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
  1411. tmp = RH_PS_PESC;
  1412. break;
  1413. case USB_PORT_FEAT_SUSPEND:
  1414. _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
  1415. tmp = RH_PS_POCI;
  1416. break;
  1417. case USB_PORT_FEAT_C_SUSPEND:
  1418. _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
  1419. tmp = RH_PS_PSSC;
  1420. break;
  1421. case USB_PORT_FEAT_POWER:
  1422. _DBG(0, "USB_PORT_FEAT_POWER\n");
  1423. tmp = RH_PS_LSDA;
  1424. break;
  1425. case USB_PORT_FEAT_C_CONNECTION:
  1426. _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
  1427. tmp = RH_PS_CSC;
  1428. break;
  1429. case USB_PORT_FEAT_C_OVER_CURRENT:
  1430. _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
  1431. tmp = RH_PS_OCIC;
  1432. break;
  1433. case USB_PORT_FEAT_C_RESET:
  1434. _DBG(0, "USB_PORT_FEAT_C_RESET\n");
  1435. tmp = RH_PS_PRSC;
  1436. break;
  1437. default:
  1438. goto error;
  1439. }
  1440. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1441. isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
  1442. isp1362_hcd->rhport[wIndex] =
  1443. isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
  1444. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1445. break;
  1446. case SetPortFeature:
  1447. DBG(0, "SetPortFeature: ");
  1448. if (!wIndex || wIndex > ports)
  1449. goto error;
  1450. wIndex--;
  1451. switch (wValue) {
  1452. case USB_PORT_FEAT_SUSPEND:
  1453. _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
  1454. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1455. isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
  1456. isp1362_hcd->rhport[wIndex] =
  1457. isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
  1458. spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
  1459. break;
  1460. case USB_PORT_FEAT_POWER:
  1461. _DBG(0, "USB_PORT_FEAT_POWER\n");
  1462. spin_lock_irqsave(&isp1362_hcd->lock, flags);
  1463. isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
  1464. isp1