PageRenderTime 147ms CodeModel.GetById 14ms app.highlight 118ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/usb/host/isp1362-hcd.c

https://gitlab.com/stalker-android/linux-omap3
C | 2884 lines | 2336 code | 381 blank | 167 comment | 354 complexity | 412aa43275b7a120a83438b23cf5cb38 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * ISP1362 HCD (Host Controller Driver) for USB.
   3 *
   4 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
   5 *
   6 * Derived from the SL811 HCD, rewritten for ISP116x.
   7 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
   8 *
   9 * Portions:
  10 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
  11 * Copyright (C) 2004 David Brownell
  12 */
  13
  14/*
  15 * The ISP1362 chip requires a large delay (300ns and 462ns) between
  16 * accesses to the address and data register.
  17 * The following timing options exist:
  18 *
  19 * 1. Configure your memory controller to add such delays if it can (the best)
  20 * 2. Implement platform-specific delay function possibly
  21 *    combined with configuring the memory controller; see
  22 *    include/linux/usb_isp1362.h for more info.
  23 * 3. Use ndelay (easiest, poorest).
  24 *
  25 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
  26 * platform specific section of isp1362.h to select the appropriate variant.
  27 *
  28 * Also note that according to the Philips "ISP1362 Errata" document
  29 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
  30 * is reasserted (even with #CS deasserted) within 132ns after a
  31 * write cycle to any controller register. If the hardware doesn't
  32 * implement the recommended fix (gating the #WR with #CS) software
  33 * must ensure that no further write cycle (not necessarily to the chip!)
  34 * is issued by the CPU within this interval.
  35
  36 * For PXA25x this can be ensured by using VLIO with the maximum
  37 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
  38 */
  39
  40#ifdef CONFIG_USB_DEBUG
  41# define ISP1362_DEBUG
  42#else
  43# undef ISP1362_DEBUG
  44#endif
  45
  46/*
  47 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
  48 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
  49 * requests are carried out in separate frames. This will delay any SETUP
  50 * packets until the start of the next frame so that this situation is
  51 * unlikely to occur (and makes usbtest happy running with a PXA255 target
  52 * device).
  53 */
  54#undef BUGGY_PXA2XX_UDC_USBTEST
  55
  56#undef PTD_TRACE
  57#undef URB_TRACE
  58#undef VERBOSE
  59#undef REGISTERS
  60
  61/* This enables a memory test on the ISP1362 chip memory to make sure the
  62 * chip access timing is correct.
  63 */
  64#undef CHIP_BUFFER_TEST
  65
  66#include <linux/module.h>
  67#include <linux/moduleparam.h>
  68#include <linux/kernel.h>
  69#include <linux/delay.h>
  70#include <linux/ioport.h>
  71#include <linux/sched.h>
  72#include <linux/slab.h>
  73#include <linux/errno.h>
  74#include <linux/init.h>
  75#include <linux/list.h>
  76#include <linux/interrupt.h>
  77#include <linux/usb.h>
  78#include <linux/usb/isp1362.h>
  79#include <linux/usb/hcd.h>
  80#include <linux/platform_device.h>
  81#include <linux/pm.h>
  82#include <linux/io.h>
  83#include <linux/bitmap.h>
  84
  85#include <asm/irq.h>
  86#include <asm/system.h>
  87#include <asm/byteorder.h>
  88#include <asm/unaligned.h>
  89
  90static int dbg_level;
  91#ifdef ISP1362_DEBUG
  92module_param(dbg_level, int, 0644);
  93#else
  94module_param(dbg_level, int, 0);
  95#define	STUB_DEBUG_FILE
  96#endif
  97
  98#include "../core/usb.h"
  99#include "isp1362.h"
 100
 101
 102#define DRIVER_VERSION	"2005-04-04"
 103#define DRIVER_DESC	"ISP1362 USB Host Controller Driver"
 104
 105MODULE_DESCRIPTION(DRIVER_DESC);
 106MODULE_LICENSE("GPL");
 107
 108static const char hcd_name[] = "isp1362-hcd";
 109
 110static void isp1362_hc_stop(struct usb_hcd *hcd);
 111static int isp1362_hc_start(struct usb_hcd *hcd);
 112
 113/*-------------------------------------------------------------------------*/
 114
 115/*
 116 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
 117 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
 118 * completion.
 119 * We don't need a 'disable' counterpart, since interrupts will be disabled
 120 * only by the interrupt handler.
 121 */
 122static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
 123{
 124	if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
 125		return;
 126	if (mask & ~isp1362_hcd->irqenb)
 127		isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
 128	isp1362_hcd->irqenb |= mask;
 129	if (isp1362_hcd->irq_active)
 130		return;
 131	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
 132}
 133
 134/*-------------------------------------------------------------------------*/
 135
 136static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
 137						     u16 offset)
 138{
 139	struct isp1362_ep_queue *epq = NULL;
 140
 141	if (offset < isp1362_hcd->istl_queue[1].buf_start)
 142		epq = &isp1362_hcd->istl_queue[0];
 143	else if (offset < isp1362_hcd->intl_queue.buf_start)
 144		epq = &isp1362_hcd->istl_queue[1];
 145	else if (offset < isp1362_hcd->atl_queue.buf_start)
 146		epq = &isp1362_hcd->intl_queue;
 147	else if (offset < isp1362_hcd->atl_queue.buf_start +
 148		   isp1362_hcd->atl_queue.buf_size)
 149		epq = &isp1362_hcd->atl_queue;
 150
 151	if (epq)
 152		DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
 153	else
 154		pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
 155
 156	return epq;
 157}
 158
 159static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
 160{
 161	int offset;
 162
 163	if (index * epq->blk_size > epq->buf_size) {
 164		pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
 165		     epq->buf_size / epq->blk_size);
 166		return -EINVAL;
 167	}
 168	offset = epq->buf_start + index * epq->blk_size;
 169	DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
 170
 171	return offset;
 172}
 173
 174/*-------------------------------------------------------------------------*/
 175
 176static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
 177				    int mps)
 178{
 179	u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
 180
 181	xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
 182	if (xfer_size < size && xfer_size % mps)
 183		xfer_size -= xfer_size % mps;
 184
 185	return xfer_size;
 186}
 187
 188static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
 189			     struct isp1362_ep *ep, u16 len)
 190{
 191	int ptd_offset = -EINVAL;
 192	int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
 193	int found;
 194
 195	BUG_ON(len > epq->buf_size);
 196
 197	if (!epq->buf_avail)
 198		return -ENOMEM;
 199
 200	if (ep->num_ptds)
 201		pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
 202		    epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
 203	BUG_ON(ep->num_ptds != 0);
 204
 205	found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
 206						num_ptds, 0);
 207	if (found >= epq->buf_count)
 208		return -EOVERFLOW;
 209
 210	DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
 211	    num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
 212	ptd_offset = get_ptd_offset(epq, found);
 213	WARN_ON(ptd_offset < 0);
 214	ep->ptd_offset = ptd_offset;
 215	ep->num_ptds += num_ptds;
 216	epq->buf_avail -= num_ptds;
 217	BUG_ON(epq->buf_avail > epq->buf_count);
 218	ep->ptd_index = found;
 219	bitmap_set(&epq->buf_map, found, num_ptds);
 220	DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
 221	    __func__, epq->name, ep->ptd_index, ep->ptd_offset,
 222	    epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
 223
 224	return found;
 225}
 226
 227static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 228{
 229	int index = ep->ptd_index;
 230	int last = ep->ptd_index + ep->num_ptds;
 231
 232	if (last > epq->buf_count)
 233		pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
 234		    __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
 235		    ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
 236		    epq->buf_map, epq->skip_map);
 237	BUG_ON(last > epq->buf_count);
 238
 239	for (; index < last; index++) {
 240		__clear_bit(index, &epq->buf_map);
 241		__set_bit(index, &epq->skip_map);
 242	}
 243	epq->buf_avail += ep->num_ptds;
 244	epq->ptd_count--;
 245
 246	BUG_ON(epq->buf_avail > epq->buf_count);
 247	BUG_ON(epq->ptd_count > epq->buf_count);
 248
 249	DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
 250	    __func__, epq->name,
 251	    ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
 252	DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
 253	    epq->buf_map, epq->skip_map);
 254
 255	ep->num_ptds = 0;
 256	ep->ptd_offset = -EINVAL;
 257	ep->ptd_index = -EINVAL;
 258}
 259
 260/*-------------------------------------------------------------------------*/
 261
 262/*
 263  Set up PTD's.
 264*/
 265static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 266			struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
 267			u16 fno)
 268{
 269	struct ptd *ptd;
 270	int toggle;
 271	int dir;
 272	u16 len;
 273	size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
 274
 275	DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
 276
 277	ptd = &ep->ptd;
 278
 279	ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
 280
 281	switch (ep->nextpid) {
 282	case USB_PID_IN:
 283		toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
 284		dir = PTD_DIR_IN;
 285		if (usb_pipecontrol(urb->pipe)) {
 286			len = min_t(size_t, ep->maxpacket, buf_len);
 287		} else if (usb_pipeisoc(urb->pipe)) {
 288			len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
 289			ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
 290		} else
 291			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 292		DBG(1, "%s: IN    len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 293		    (int)buf_len);
 294		break;
 295	case USB_PID_OUT:
 296		toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
 297		dir = PTD_DIR_OUT;
 298		if (usb_pipecontrol(urb->pipe))
 299			len = min_t(size_t, ep->maxpacket, buf_len);
 300		else if (usb_pipeisoc(urb->pipe))
 301			len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
 302		else
 303			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 304		if (len == 0)
 305			pr_info("%s: Sending ZERO packet: %d\n", __func__,
 306			     urb->transfer_flags & URB_ZERO_PACKET);
 307		DBG(1, "%s: OUT   len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 308		    (int)buf_len);
 309		break;
 310	case USB_PID_SETUP:
 311		toggle = 0;
 312		dir = PTD_DIR_SETUP;
 313		len = sizeof(struct usb_ctrlrequest);
 314		DBG(1, "%s: SETUP len %d\n", __func__, len);
 315		ep->data = urb->setup_packet;
 316		break;
 317	case USB_PID_ACK:
 318		toggle = 1;
 319		len = 0;
 320		dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
 321			PTD_DIR_OUT : PTD_DIR_IN;
 322		DBG(1, "%s: ACK   len %d\n", __func__, len);
 323		break;
 324	default:
 325		toggle = dir = len = 0;
 326		pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
 327		BUG_ON(1);
 328	}
 329
 330	ep->length = len;
 331	if (!len)
 332		ep->data = NULL;
 333
 334	ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
 335	ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
 336		PTD_EP(ep->epnum);
 337	ptd->len = PTD_LEN(len) | PTD_DIR(dir);
 338	ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
 339
 340	if (usb_pipeint(urb->pipe)) {
 341		ptd->faddr |= PTD_SF_INT(ep->branch);
 342		ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
 343	}
 344	if (usb_pipeisoc(urb->pipe))
 345		ptd->faddr |= PTD_SF_ISO(fno);
 346
 347	DBG(1, "%s: Finished\n", __func__);
 348}
 349
 350static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 351			      struct isp1362_ep_queue *epq)
 352{
 353	struct ptd *ptd = &ep->ptd;
 354	int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
 355
 356	_BUG_ON(ep->ptd_offset < 0);
 357
 358	prefetch(ptd);
 359	isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 360	if (len)
 361		isp1362_write_buffer(isp1362_hcd, ep->data,
 362				     ep->ptd_offset + PTD_HEADER_SIZE, len);
 363
 364	dump_ptd(ptd);
 365	dump_ptd_out_data(ptd, ep->data);
 366}
 367
 368static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 369			     struct isp1362_ep_queue *epq)
 370{
 371	struct ptd *ptd = &ep->ptd;
 372	int act_len;
 373
 374	WARN_ON(list_empty(&ep->active));
 375	BUG_ON(ep->ptd_offset < 0);
 376
 377	list_del_init(&ep->active);
 378	DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
 379
 380	prefetchw(ptd);
 381	isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 382	dump_ptd(ptd);
 383	act_len = PTD_GET_COUNT(ptd);
 384	if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
 385		return;
 386	if (act_len > ep->length)
 387		pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
 388			 ep->ptd_offset, act_len, ep->length);
 389	BUG_ON(act_len > ep->length);
 390	/* Only transfer the amount of data that has actually been overwritten
 391	 * in the chip buffer. We don't want any data that doesn't belong to the
 392	 * transfer to leak out of the chip to the callers transfer buffer!
 393	 */
 394	prefetchw(ep->data);
 395	isp1362_read_buffer(isp1362_hcd, ep->data,
 396			    ep->ptd_offset + PTD_HEADER_SIZE, act_len);
 397	dump_ptd_in_data(ptd, ep->data);
 398}
 399
 400/*
 401 * INT PTDs will stay in the chip until data is available.
 402 * This function will remove a PTD from the chip when the URB is dequeued.
 403 * Must be called with the spinlock held and IRQs disabled
 404 */
 405static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 406
 407{
 408	int index;
 409	struct isp1362_ep_queue *epq;
 410
 411	DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
 412	BUG_ON(ep->ptd_offset < 0);
 413
 414	epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 415	BUG_ON(!epq);
 416
 417	/* put ep in remove_list for cleanup */
 418	WARN_ON(!list_empty(&ep->remove_list));
 419	list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
 420	/* let SOF interrupt handle the cleanup */
 421	isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 422
 423	index = ep->ptd_index;
 424	if (index < 0)
 425		/* ISO queues don't have SKIP registers */
 426		return;
 427
 428	DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
 429	    index, ep->ptd_offset, epq->skip_map, 1 << index);
 430
 431	/* prevent further processing of PTD (will be effective after next SOF) */
 432	epq->skip_map |= 1 << index;
 433	if (epq == &isp1362_hcd->atl_queue) {
 434		DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
 435		    isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
 436		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
 437		if (~epq->skip_map == 0)
 438			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 439	} else if (epq == &isp1362_hcd->intl_queue) {
 440		DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
 441		    isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
 442		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
 443		if (~epq->skip_map == 0)
 444			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 445	}
 446}
 447
 448/*
 449  Take done or failed requests out of schedule. Give back
 450  processed urbs.
 451*/
 452static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 453			   struct urb *urb, int status)
 454     __releases(isp1362_hcd->lock)
 455     __acquires(isp1362_hcd->lock)
 456{
 457	urb->hcpriv = NULL;
 458	ep->error_count = 0;
 459
 460	if (usb_pipecontrol(urb->pipe))
 461		ep->nextpid = USB_PID_SETUP;
 462
 463	URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
 464		ep->num_req, usb_pipedevice(urb->pipe),
 465		usb_pipeendpoint(urb->pipe),
 466		!usb_pipein(urb->pipe) ? "out" : "in",
 467		usb_pipecontrol(urb->pipe) ? "ctrl" :
 468			usb_pipeint(urb->pipe) ? "int" :
 469			usb_pipebulk(urb->pipe) ? "bulk" :
 470			"iso",
 471		urb->actual_length, urb->transfer_buffer_length,
 472		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
 473		"short_ok" : "", urb->status);
 474
 475
 476	usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
 477	spin_unlock(&isp1362_hcd->lock);
 478	usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
 479	spin_lock(&isp1362_hcd->lock);
 480
 481	/* take idle endpoints out of the schedule right away */
 482	if (!list_empty(&ep->hep->urb_list))
 483		return;
 484
 485	/* async deschedule */
 486	if (!list_empty(&ep->schedule)) {
 487		list_del_init(&ep->schedule);
 488		return;
 489	}
 490
 491
 492	if (ep->interval) {
 493		/* periodic deschedule */
 494		DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
 495		    ep, ep->branch, ep->load,
 496		    isp1362_hcd->load[ep->branch],
 497		    isp1362_hcd->load[ep->branch] - ep->load);
 498		isp1362_hcd->load[ep->branch] -= ep->load;
 499		ep->branch = PERIODIC_SIZE;
 500	}
 501}
 502
 503/*
 504 * Analyze transfer results, handle partial transfers and errors
 505*/
 506static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 507{
 508	struct urb *urb = get_urb(ep);
 509	struct usb_device *udev;
 510	struct ptd *ptd;
 511	int short_ok;
 512	u16 len;
 513	int urbstat = -EINPROGRESS;
 514	u8 cc;
 515
 516	DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
 517
 518	udev = urb->dev;
 519	ptd = &ep->ptd;
 520	cc = PTD_GET_CC(ptd);
 521	if (cc == PTD_NOTACCESSED) {
 522		pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
 523		    ep->num_req, ptd);
 524		cc = PTD_DEVNOTRESP;
 525	}
 526
 527	short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
 528	len = urb->transfer_buffer_length - urb->actual_length;
 529
 530	/* Data underrun is special. For allowed underrun
 531	   we clear the error and continue as normal. For
 532	   forbidden underrun we finish the DATA stage
 533	   immediately while for control transfer,
 534	   we do a STATUS stage.
 535	*/
 536	if (cc == PTD_DATAUNDERRUN) {
 537		if (short_ok) {
 538			DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
 539			    __func__, ep->num_req, short_ok ? "" : "not_",
 540			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 541			cc = PTD_CC_NOERROR;
 542			urbstat = 0;
 543		} else {
 544			DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
 545			    __func__, ep->num_req,
 546			    usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
 547			    short_ok ? "" : "not_",
 548			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 549			if (usb_pipecontrol(urb->pipe)) {
 550				ep->nextpid = USB_PID_ACK;
 551				/* save the data underrun error code for later and
 552				 * procede with the status stage
 553				 */
 554				urb->actual_length += PTD_GET_COUNT(ptd);
 555				BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 556
 557				if (urb->status == -EINPROGRESS)
 558					urb->status = cc_to_error[PTD_DATAUNDERRUN];
 559			} else {
 560				usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
 561					      PTD_GET_TOGGLE(ptd));
 562				urbstat = cc_to_error[PTD_DATAUNDERRUN];
 563			}
 564			goto out;
 565		}
 566	}
 567
 568	if (cc != PTD_CC_NOERROR) {
 569		if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
 570			urbstat = cc_to_error[cc];
 571			DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
 572			    __func__, ep->num_req, ep->nextpid, urbstat, cc,
 573			    ep->error_count);
 574		}
 575		goto out;
 576	}
 577
 578	switch (ep->nextpid) {
 579	case USB_PID_OUT:
 580		if (PTD_GET_COUNT(ptd) != ep->length)
 581			pr_err("%s: count=%d len=%d\n", __func__,
 582			   PTD_GET_COUNT(ptd), ep->length);
 583		BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
 584		urb->actual_length += ep->length;
 585		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 586		usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
 587		if (urb->actual_length == urb->transfer_buffer_length) {
 588			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 589			    ep->num_req, len, ep->maxpacket, urbstat);
 590			if (usb_pipecontrol(urb->pipe)) {
 591				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 592				    ep->num_req,
 593				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 594				ep->nextpid = USB_PID_ACK;
 595			} else {
 596				if (len % ep->maxpacket ||
 597				    !(urb->transfer_flags & URB_ZERO_PACKET)) {
 598					urbstat = 0;
 599					DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 600					    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 601					    urbstat, len, ep->maxpacket, urb->actual_length);
 602				}
 603			}
 604		}
 605		break;
 606	case USB_PID_IN:
 607		len = PTD_GET_COUNT(ptd);
 608		BUG_ON(len > ep->length);
 609		urb->actual_length += len;
 610		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 611		usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
 612		/* if transfer completed or (allowed) data underrun */
 613		if ((urb->transfer_buffer_length == urb->actual_length) ||
 614		    len % ep->maxpacket) {
 615			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 616			    ep->num_req, len, ep->maxpacket, urbstat);
 617			if (usb_pipecontrol(urb->pipe)) {
 618				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 619				    ep->num_req,
 620				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 621				ep->nextpid = USB_PID_ACK;
 622			} else {
 623				urbstat = 0;
 624				DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 625				    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 626				    urbstat, len, ep->maxpacket, urb->actual_length);
 627			}
 628		}
 629		break;
 630	case USB_PID_SETUP:
 631		if (urb->transfer_buffer_length == urb->actual_length) {
 632			ep->nextpid = USB_PID_ACK;
 633		} else if (usb_pipeout(urb->pipe)) {
 634			usb_settoggle(udev, 0, 1, 1);
 635			ep->nextpid = USB_PID_OUT;
 636		} else {
 637			usb_settoggle(udev, 0, 0, 1);
 638			ep->nextpid = USB_PID_IN;
 639		}
 640		break;
 641	case USB_PID_ACK:
 642		DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
 643		    urbstat);
 644		WARN_ON(urbstat != -EINPROGRESS);
 645		urbstat = 0;
 646		ep->nextpid = 0;
 647		break;
 648	default:
 649		BUG_ON(1);
 650	}
 651
 652 out:
 653	if (urbstat != -EINPROGRESS) {
 654		DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
 655		    ep, ep->num_req, urb, urbstat);
 656		finish_request(isp1362_hcd, ep, urb, urbstat);
 657	}
 658}
 659
 660static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
 661{
 662	struct isp1362_ep *ep;
 663	struct isp1362_ep *tmp;
 664
 665	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
 666		struct isp1362_ep_queue *epq =
 667			get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 668		int index = ep->ptd_index;
 669
 670		BUG_ON(epq == NULL);
 671		if (index >= 0) {
 672			DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
 673			BUG_ON(ep->num_ptds == 0);
 674			release_ptd_buffers(epq, ep);
 675		}
 676		if (!list_empty(&ep->hep->urb_list)) {
 677			struct urb *urb = get_urb(ep);
 678
 679			DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
 680			    ep->num_req, ep);
 681			finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
 682		}
 683		WARN_ON(list_empty(&ep->active));
 684		if (!list_empty(&ep->active)) {
 685			list_del_init(&ep->active);
 686			DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
 687		}
 688		list_del_init(&ep->remove_list);
 689		DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
 690	}
 691	DBG(1, "%s: Done\n", __func__);
 692}
 693
 694static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
 695{
 696	if (count > 0) {
 697		if (count < isp1362_hcd->atl_queue.ptd_count)
 698			isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
 699		isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
 700		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
 701		isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 702	} else
 703		isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 704}
 705
 706static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 707{
 708	isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
 709	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 710	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
 711}
 712
 713static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
 714{
 715	isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
 716	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
 717			   HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
 718}
 719
 720static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 721		      struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
 722{
 723	int index = epq->free_ptd;
 724
 725	prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
 726	index = claim_ptd_buffers(epq, ep, ep->length);
 727	if (index == -ENOMEM) {
 728		DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
 729		    ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
 730		return index;
 731	} else if (index == -EOVERFLOW) {
 732		DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
 733		    __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
 734		    epq->buf_map, epq->skip_map);
 735		return index;
 736	} else
 737		BUG_ON(index < 0);
 738	list_add_tail(&ep->active, &epq->active);
 739	DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
 740	    ep, ep->num_req, ep->length, &epq->active);
 741	DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
 742	    ep->ptd_offset, ep, ep->num_req);
 743	isp1362_write_ptd(isp1362_hcd, ep, epq);
 744	__clear_bit(ep->ptd_index, &epq->skip_map);
 745
 746	return 0;
 747}
 748
 749static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
 750{
 751	int ptd_count = 0;
 752	struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
 753	struct isp1362_ep *ep;
 754	int defer = 0;
 755
 756	if (atomic_read(&epq->finishing)) {
 757		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 758		return;
 759	}
 760
 761	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
 762		struct urb *urb = get_urb(ep);
 763		int ret;
 764
 765		if (!list_empty(&ep->active)) {
 766			DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
 767			continue;
 768		}
 769
 770		DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
 771		    ep, ep->num_req);
 772
 773		ret = submit_req(isp1362_hcd, urb, ep, epq);
 774		if (ret == -ENOMEM) {
 775			defer = 1;
 776			break;
 777		} else if (ret == -EOVERFLOW) {
 778			defer = 1;
 779			continue;
 780		}
 781#ifdef BUGGY_PXA2XX_UDC_USBTEST
 782		defer = ep->nextpid == USB_PID_SETUP;
 783#endif
 784		ptd_count++;
 785	}
 786
 787	/* Avoid starving of endpoints */
 788	if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
 789		DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
 790		list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
 791	}
 792	if (ptd_count || defer)
 793		enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
 794
 795	epq->ptd_count += ptd_count;
 796	if (epq->ptd_count > epq->stat_maxptds) {
 797		epq->stat_maxptds = epq->ptd_count;
 798		DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
 799	}
 800}
 801
 802static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 803{
 804	int ptd_count = 0;
 805	struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
 806	struct isp1362_ep *ep;
 807
 808	if (atomic_read(&epq->finishing)) {
 809		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 810		return;
 811	}
 812
 813	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
 814		struct urb *urb = get_urb(ep);
 815		int ret;
 816
 817		if (!list_empty(&ep->active)) {
 818			DBG(1, "%s: Skipping active %s ep %p\n", __func__,
 819			    epq->name, ep);
 820			continue;
 821		}
 822
 823		DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
 824		    epq->name, ep, ep->num_req);
 825		ret = submit_req(isp1362_hcd, urb, ep, epq);
 826		if (ret == -ENOMEM)
 827			break;
 828		else if (ret == -EOVERFLOW)
 829			continue;
 830		ptd_count++;
 831	}
 832
 833	if (ptd_count) {
 834		static int last_count;
 835
 836		if (ptd_count != last_count) {
 837			DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
 838			last_count = ptd_count;
 839		}
 840		enable_intl_transfers(isp1362_hcd);
 841	}
 842
 843	epq->ptd_count += ptd_count;
 844	if (epq->ptd_count > epq->stat_maxptds)
 845		epq->stat_maxptds = epq->ptd_count;
 846}
 847
 848static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 849{
 850	u16 ptd_offset = ep->ptd_offset;
 851	int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
 852
 853	DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
 854	    ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
 855
 856	ptd_offset += num_ptds * epq->blk_size;
 857	if (ptd_offset < epq->buf_start + epq->buf_size)
 858		return ptd_offset;
 859	else
 860		return -ENOMEM;
 861}
 862
 863static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
 864{
 865	int ptd_count = 0;
 866	int flip = isp1362_hcd->istl_flip;
 867	struct isp1362_ep_queue *epq;
 868	int ptd_offset;
 869	struct isp1362_ep *ep;
 870	struct isp1362_ep *tmp;
 871	u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
 872
 873 fill2:
 874	epq = &isp1362_hcd->istl_queue[flip];
 875	if (atomic_read(&epq->finishing)) {
 876		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 877		return;
 878	}
 879
 880	if (!list_empty(&epq->active))
 881		return;
 882
 883	ptd_offset = epq->buf_start;
 884	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
 885		struct urb *urb = get_urb(ep);
 886		s16 diff = fno - (u16)urb->start_frame;
 887
 888		DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
 889
 890		if (diff > urb->number_of_packets) {
 891			/* time frame for this URB has elapsed */
 892			finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
 893			continue;
 894		} else if (diff < -1) {
 895			/* URB is not due in this frame or the next one.
 896			 * Comparing with '-1' instead of '0' accounts for double
 897			 * buffering in the ISP1362 which enables us to queue the PTD
 898			 * one frame ahead of time
 899			 */
 900		} else if (diff == -1) {
 901			/* submit PTD's that are due in the next frame */
 902			prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
 903			if (ptd_offset + PTD_HEADER_SIZE + ep->length >
 904			    epq->buf_start + epq->buf_size) {
 905				pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
 906				    __func__, ep->length);
 907				continue;
 908			}
 909			ep->ptd_offset = ptd_offset;
 910			list_add_tail(&ep->active, &epq->active);
 911
 912			ptd_offset = next_ptd(epq, ep);
 913			if (ptd_offset < 0) {
 914				pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
 915				     ep->num_req, epq->name);
 916				break;
 917			}
 918		}
 919	}
 920	list_for_each_entry(ep, &epq->active, active) {
 921		if (epq->active.next == &ep->active)
 922			ep->ptd.mps |= PTD_LAST_MSK;
 923		isp1362_write_ptd(isp1362_hcd, ep, epq);
 924		ptd_count++;
 925	}
 926
 927	if (ptd_count)
 928		enable_istl_transfers(isp1362_hcd, flip);
 929
 930	epq->ptd_count += ptd_count;
 931	if (epq->ptd_count > epq->stat_maxptds)
 932		epq->stat_maxptds = epq->ptd_count;
 933
 934	/* check, whether the second ISTL buffer may also be filled */
 935	if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
 936	      (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
 937		fno++;
 938		ptd_count = 0;
 939		flip = 1 - flip;
 940		goto fill2;
 941	}
 942}
 943
 944static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
 945			     struct isp1362_ep_queue *epq)
 946{
 947	struct isp1362_ep *ep;
 948	struct isp1362_ep *tmp;
 949
 950	if (list_empty(&epq->active)) {
 951		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 952		return;
 953	}
 954
 955	DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
 956
 957	atomic_inc(&epq->finishing);
 958	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
 959		int index = ep->ptd_index;
 960
 961		DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
 962		    index, ep->ptd_offset);
 963
 964		BUG_ON(index < 0);
 965		if (__test_and_clear_bit(index, &done_map)) {
 966			isp1362_read_ptd(isp1362_hcd, ep, epq);
 967			epq->free_ptd = index;
 968			BUG_ON(ep->num_ptds == 0);
 969			release_ptd_buffers(epq, ep);
 970
 971			DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
 972			    ep, ep->num_req);
 973			if (!list_empty(&ep->remove_list)) {
 974				list_del_init(&ep->remove_list);
 975				DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
 976			}
 977			DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
 978			    ep, ep->num_req);
 979			postproc_ep(isp1362_hcd, ep);
 980		}
 981		if (!done_map)
 982			break;
 983	}
 984	if (done_map)
 985		pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
 986		     epq->skip_map);
 987	atomic_dec(&epq->finishing);
 988}
 989
 990static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
 991{
 992	struct isp1362_ep *ep;
 993	struct isp1362_ep *tmp;
 994
 995	if (list_empty(&epq->active)) {
 996		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 997		return;
 998	}
 999
1000	DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
1001
1002	atomic_inc(&epq->finishing);
1003	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
1004		DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
1005
1006		isp1362_read_ptd(isp1362_hcd, ep, epq);
1007		DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1008		postproc_ep(isp1362_hcd, ep);
1009	}
1010	WARN_ON(epq->blk_size != 0);
1011	atomic_dec(&epq->finishing);
1012}
1013
1014static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1015{
1016	int handled = 0;
1017	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1018	u16 irqstat;
1019	u16 svc_mask;
1020
1021	spin_lock(&isp1362_hcd->lock);
1022
1023	BUG_ON(isp1362_hcd->irq_active++);
1024
1025	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1026
1027	irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1028	DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1029
1030	/* only handle interrupts that are currently enabled */
1031	irqstat &= isp1362_hcd->irqenb;
1032	isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1033	svc_mask = irqstat;
1034
1035	if (irqstat & HCuPINT_SOF) {
1036		isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1037		isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1038		handled = 1;
1039		svc_mask &= ~HCuPINT_SOF;
1040		DBG(3, "%s: SOF\n", __func__);
1041		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1042		if (!list_empty(&isp1362_hcd->remove_list))
1043			finish_unlinks(isp1362_hcd);
1044		if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1045			if (list_empty(&isp1362_hcd->atl_queue.active)) {
1046				start_atl_transfers(isp1362_hcd);
1047			} else {
1048				isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1049				isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1050						    isp1362_hcd->atl_queue.skip_map);
1051				isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1052			}
1053		}
1054	}
1055
1056	if (irqstat & HCuPINT_ISTL0) {
1057		isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1058		handled = 1;
1059		svc_mask &= ~HCuPINT_ISTL0;
1060		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1061		DBG(1, "%s: ISTL0\n", __func__);
1062		WARN_ON((int)!!isp1362_hcd->istl_flip);
1063		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1064			HCBUFSTAT_ISTL0_ACTIVE);
1065		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1066			HCBUFSTAT_ISTL0_DONE));
1067		isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1068	}
1069
1070	if (irqstat & HCuPINT_ISTL1) {
1071		isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1072		handled = 1;
1073		svc_mask &= ~HCuPINT_ISTL1;
1074		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1075		DBG(1, "%s: ISTL1\n", __func__);
1076		WARN_ON(!(int)isp1362_hcd->istl_flip);
1077		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1078			HCBUFSTAT_ISTL1_ACTIVE);
1079		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1080			HCBUFSTAT_ISTL1_DONE));
1081		isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1082	}
1083
1084	if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1085		WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1086			(HCuPINT_ISTL0 | HCuPINT_ISTL1));
1087		finish_iso_transfers(isp1362_hcd,
1088				     &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1089		start_iso_transfers(isp1362_hcd);
1090		isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1091	}
1092
1093	if (irqstat & HCuPINT_INTL) {
1094		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1095		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1096		isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1097
1098		DBG(2, "%s: INTL\n", __func__);
1099
1100		svc_mask &= ~HCuPINT_INTL;
1101
1102		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1103		if (~(done_map | skip_map) == 0)
1104			/* All PTDs are finished, disable INTL processing entirely */
1105			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1106
1107		handled = 1;
1108		WARN_ON(!done_map);
1109		if (done_map) {
1110			DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1111			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1112			start_intl_transfers(isp1362_hcd);
1113		}
1114	}
1115
1116	if (irqstat & HCuPINT_ATL) {
1117		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1118		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1119		isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1120
1121		DBG(2, "%s: ATL\n", __func__);
1122
1123		svc_mask &= ~HCuPINT_ATL;
1124
1125		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1126		if (~(done_map | skip_map) == 0)
1127			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1128		if (done_map) {
1129			DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1130			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1131			start_atl_transfers(isp1362_hcd);
1132		}
1133		handled = 1;
1134	}
1135
1136	if (irqstat & HCuPINT_OPR) {
1137		u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1138		isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1139
1140		svc_mask &= ~HCuPINT_OPR;
1141		DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1142		intstat &= isp1362_hcd->intenb;
1143		if (intstat & OHCI_INTR_UE) {
1144			pr_err("Unrecoverable error\n");
1145			/* FIXME: do here reset or cleanup or whatever */
1146		}
1147		if (intstat & OHCI_INTR_RHSC) {
1148			isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1149			isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1150			isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1151		}
1152		if (intstat & OHCI_INTR_RD) {
1153			pr_info("%s: RESUME DETECTED\n", __func__);
1154			isp1362_show_reg(isp1362_hcd, HCCONTROL);
1155			usb_hcd_resume_root_hub(hcd);
1156		}
1157		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1158		irqstat &= ~HCuPINT_OPR;
1159		handled = 1;
1160	}
1161
1162	if (irqstat & HCuPINT_SUSP) {
1163		isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1164		handled = 1;
1165		svc_mask &= ~HCuPINT_SUSP;
1166
1167		pr_info("%s: SUSPEND IRQ\n", __func__);
1168	}
1169
1170	if (irqstat & HCuPINT_CLKRDY) {
1171		isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1172		handled = 1;
1173		isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1174		svc_mask &= ~HCuPINT_CLKRDY;
1175		pr_info("%s: CLKRDY IRQ\n", __func__);
1176	}
1177
1178	if (svc_mask)
1179		pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1180
1181	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1182	isp1362_hcd->irq_active--;
1183	spin_unlock(&isp1362_hcd->lock);
1184
1185	return IRQ_RETVAL(handled);
1186}
1187
1188/*-------------------------------------------------------------------------*/
1189
1190#define	MAX_PERIODIC_LOAD	900	/* out of 1000 usec */
1191static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1192{
1193	int i, branch = -ENOSPC;
1194
1195	/* search for the least loaded schedule branch of that interval
1196	 * which has enough bandwidth left unreserved.
1197	 */
1198	for (i = 0; i < interval; i++) {
1199		if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1200			int j;
1201
1202			for (j = i; j < PERIODIC_SIZE; j += interval) {
1203				if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1204					pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1205					    load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1206					break;
1207				}
1208			}
1209			if (j < PERIODIC_SIZE)
1210				continue;
1211			branch = i;
1212		}
1213	}
1214	return branch;
1215}
1216
1217/* NB! ALL the code above this point runs with isp1362_hcd->lock
1218   held, irqs off
1219*/
1220
1221/*-------------------------------------------------------------------------*/
1222
1223static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1224			       struct urb *urb,
1225			       gfp_t mem_flags)
1226{
1227	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1228	struct usb_device *udev = urb->dev;
1229	unsigned int pipe = urb->pipe;
1230	int is_out = !usb_pipein(pipe);
1231	int type = usb_pipetype(pipe);
1232	int epnum = usb_pipeendpoint(pipe);
1233	struct usb_host_endpoint *hep = urb->ep;
1234	struct isp1362_ep *ep = NULL;
1235	unsigned long flags;
1236	int retval = 0;
1237
1238	DBG(3, "%s: urb %p\n", __func__, urb);
1239
1240	if (type == PIPE_ISOCHRONOUS) {
1241		pr_err("Isochronous transfers not supported\n");
1242		return -ENOSPC;
1243	}
1244
1245	URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1246		usb_pipedevice(pipe), epnum,
1247		is_out ? "out" : "in",
1248		usb_pipecontrol(pipe) ? "ctrl" :
1249			usb_pipeint(pipe) ? "int" :
1250			usb_pipebulk(pipe) ? "bulk" :
1251			"iso",
1252		urb->transfer_buffer_length,
1253		(urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1254		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1255		"short_ok" : "");
1256
1257	/* avoid all allocations within spinlocks: request or endpoint */
1258	if (!hep->hcpriv) {
1259		ep = kzalloc(sizeof *ep, mem_flags);
1260		if (!ep)
1261			return -ENOMEM;
1262	}
1263	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1264
1265	/* don't submit to a dead or disabled port */
1266	if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1267	      USB_PORT_STAT_ENABLE) ||
1268	    !HC_IS_RUNNING(hcd->state)) {
1269		kfree(ep);
1270		retval = -ENODEV;
1271		goto fail_not_linked;
1272	}
1273
1274	retval = usb_hcd_link_urb_to_ep(hcd, urb);
1275	if (retval) {
1276		kfree(ep);
1277		goto fail_not_linked;
1278	}
1279
1280	if (hep->hcpriv) {
1281		ep = hep->hcpriv;
1282	} else {
1283		INIT_LIST_HEAD(&ep->schedule);
1284		INIT_LIST_HEAD(&ep->active);
1285		INIT_LIST_HEAD(&ep->remove_list);
1286		ep->udev = usb_get_dev(udev);
1287		ep->hep = hep;
1288		ep->epnum = epnum;
1289		ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1290		ep->ptd_offset = -EINVAL;
1291		ep->ptd_index = -EINVAL;
1292		usb_settoggle(udev, epnum, is_out, 0);
1293
1294		if (type == PIPE_CONTROL)
1295			ep->nextpid = USB_PID_SETUP;
1296		else if (is_out)
1297			ep->nextpid = USB_PID_OUT;
1298		else
1299			ep->nextpid = USB_PID_IN;
1300
1301		switch (type) {
1302		case PIPE_ISOCHRONOUS:
1303		case PIPE_INTERRUPT:
1304			if (urb->interval > PERIODIC_SIZE)
1305				urb->interval = PERIODIC_SIZE;
1306			ep->interval = urb->interval;
1307			ep->branch = PERIODIC_SIZE;
1308			ep->load = usb_calc_bus_time(udev->speed, !is_out,
1309						     (type == PIPE_ISOCHRONOUS),
1310						     usb_maxpacket(udev, pipe, is_out)) / 1000;
1311			break;
1312		}
1313		hep->hcpriv = ep;
1314	}
1315	ep->num_req = isp1362_hcd->req_serial++;
1316
1317	/* maybe put endpoint into schedule */
1318	switch (type) {
1319	case PIPE_CONTROL:
1320	case PIPE_BULK:
1321		if (list_empty(&ep->schedule)) {
1322			DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1323				__func__, ep, ep->num_req);
1324			list_add_tail(&ep->schedule, &isp1362_hcd->async);
1325		}
1326		break;
1327	case PIPE_ISOCHRONOUS:
1328	case PIPE_INTERRUPT:
1329		urb->interval = ep->interval;
1330
1331		/* urb submitted for already existing EP */
1332		if (ep->branch < PERIODIC_SIZE)
1333			break;
1334
1335		retval = balance(isp1362_hcd, ep->interval, ep->load);
1336		if (retval < 0) {
1337			pr_err("%s: balance returned %d\n", __func__, retval);
1338			goto fail;
1339		}
1340		ep->branch = retval;
1341		retval = 0;
1342		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1343		DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1344		    __func__, isp1362_hcd->fmindex, ep->branch,
1345		    ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1346		     ~(PERIODIC_SIZE - 1)) + ep->branch,
1347		    (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1348
1349		if (list_empty(&ep->schedule)) {
1350			if (type == PIPE_ISOCHRONOUS) {
1351				u16 frame = isp1362_hcd->fmindex;
1352
1353				frame += max_t(u16, 8, ep->interval);
1354				frame &= ~(ep->interval - 1);
1355				frame |= ep->branch;
1356				if (frame_before(frame, isp1362_hcd->fmindex))
1357					frame += ep->interval;
1358				urb->start_frame = frame;
1359
1360				DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1361				list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1362			} else {
1363				DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1364				list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1365			}
1366		} else
1367			DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1368
1369		DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1370		    ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1371		    isp1362_hcd->load[ep->branch] + ep->load);
1372		isp1362_hcd->load[ep->branch] += ep->load;
1373	}
1374
1375	urb->hcpriv = hep;
1376	ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1377
1378	switch (type) {
1379	case PIPE_CONTROL:
1380	case PIPE_BULK:
1381		start_atl_transfers(isp1362_hcd);
1382		break;
1383	case PIPE_INTERRUPT:
1384		start_intl_transfers(isp1362_hcd);
1385		break;
1386	case PIPE_ISOCHRONOUS:
1387		start_iso_transfers(isp1362_hcd);
1388		break;
1389	default:
1390		BUG();
1391	}
1392 fail:
1393	if (retval)
1394		usb_hcd_unlink_urb_from_ep(hcd, urb);
1395
1396
1397 fail_not_linked:
1398	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1399	if (retval)
1400		DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1401	return retval;
1402}
1403
1404static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1405{
1406	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1407	struct usb_host_endpoint *hep;
1408	unsigned long flags;
1409	struct isp1362_ep *ep;
1410	int retval = 0;
1411
1412	DBG(3, "%s: urb %p\n", __func__, urb);
1413
1414	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1415	retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1416	if (retval)
1417		goto done;
1418
1419	hep = urb->hcpriv;
1420
1421	if (!hep) {
1422		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1423		return -EIDRM;
1424	}
1425
1426	ep = hep->hcpriv;
1427	if (ep) {
1428		/* In front of queue? */
1429		if (ep->hep->urb_list.next == &urb->urb_list) {
1430			if (!list_empty(&ep->active)) {
1431				DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1432				    urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1433				/* disable processing and queue PTD for removal */
1434				remove_ptd(isp1362_hcd, ep);
1435				urb = NULL;
1436			}
1437		}
1438		if (urb) {
1439			DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1440			    ep->num_req);
1441			finish_request(isp1362_hcd, ep, urb, status);
1442		} else
1443			DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1444	} else {
1445		pr_warning("%s: No EP in URB %p\n", __func__, urb);
1446		retval = -EINVAL;
1447	}
1448done:
1449	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1450
1451	DBG(3, "%s: exit\n", __func__);
1452
1453	return retval;
1454}
1455
1456static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1457{
1458	struct isp1362_ep *ep = hep->hcpriv;
1459	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1460	unsigned long flags;
1461
1462	DBG(1, "%s: ep %p\n", __func__, ep);
1463	if (!ep)
1464		return;
1465	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1466	if (!list_empty(&hep->urb_list)) {
1467		if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1468			DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1469			    ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1470			remove_ptd(isp1362_hcd, ep);
1471			pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1472		}
1473	}
1474	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1475	/* Wait for interrupt to clear out active list */
1476	while (!list_empty(&ep->active))
1477		msleep(1);
1478
1479	DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1480
1481	usb_put_dev(ep->udev);
1482	kfree(ep);
1483	hep->hcpriv = NULL;
1484}
1485
1486static int isp1362_get_frame(struct usb_hcd *hcd)
1487{
1488	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1489	u32 fmnum;
1490	unsigned long flags;
1491
1492	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1493	fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1494	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1495
1496	return (int)fmnum;
1497}
1498
1499/*-------------------------------------------------------------------------*/
1500
1501/* Adapted from ohci-hub.c */
1502static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1503{
1504	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1505	int ports, i, changed = 0;
1506	unsigned long flags;
1507
1508	if (!HC_IS_RUNNING(hcd->state))
1509		return -ESHUTDOWN;
1510
1511	/* Report no status change now, if we are scheduled to be
1512	   called later */
1513	if (timer_pending(&hcd->rh_timer))
1514		return 0;
1515
1516	ports = isp1362_hcd->rhdesca & RH_A_NDP;
1517	BUG_ON(ports > 2);
1518
1519	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1520	/* init status */
1521	if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1522		buf[0] = changed = 1;
1523	else
1524		buf[0] = 0;
1525
1526	for (i = 0; i < ports; i++) {
1527		u32 status = isp1362_hcd->rhport[i];
1528
1529		if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1530			      RH_PS_OCIC | RH_PS_PRSC)) {
1531			changed = 1;
1532			buf[0] |= 1 << (i + 1);
1533			continue;
1534		}
1535
1536		if (!(status & RH_PS_CCS))
1537			continue;
1538	}
1539	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1540	return changed;
1541}
1542
1543static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1544				   struct usb_hub_descriptor *desc)
1545{
1546	u32 reg = isp1362_hcd->rhdesca;
1547
1548	DBG(3, "%s: enter\n", __func__);
1549
1550	desc->bDescriptorType = 0x29;
1551	desc->bDescLength = 9;
1552	desc->bHubContrCurrent = 0;
1553	desc->bNbrPorts = reg & 0x3;
1554	/* Power switching, device type, overcurrent. */
1555	desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
1556	DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
1557	desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1558	/* two bitmaps:  ports removable, and legacy PortPwrCtrlMask */
1559	desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1560	desc->bitmap[1] = ~0;
1561
1562	DBG(3, "%s: exit\n", __func__);
1563}
1564
1565/* Adapted from ohci-hub.c */
1566static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1567			       u16 wIndex, char *buf, u16 wLength)
1568{
1569	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1570	int retval = 0;
1571	unsigned long flags;
1572	unsigned long t1;
1573	int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1574	u32 tmp = 0;
1575
1576	switch (typeReq) {
1577	case ClearHubFeature:
1578		DBG(0, "ClearHubFeature: ");
1579		switch (wValue) {
1580		case C_HUB_OVER_CURRENT:
1581			_DBG(0, "C_HUB_OVER_CURRENT\n");
1582			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1583			isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1584			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1585		case C_HUB_LOCAL_POWER:
1586			_DBG(0, "C_HUB_LOCAL_POWER\n");
1587			break;
1588		default:
1589			goto error;
1590		}
1591		break;
1592	case SetHubFeature:
1593		DBG(0, "SetHubFeature: ");
1594		switch (wValue) {
1595		case C_HUB_OVER_CURRENT:
1596		case C_HUB_LOCAL_POWER:
1597			_DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1598			break;
1599		default:
1600			goto error;
1601		}
1602		break;
1603	case GetHubDescriptor:
1604		DBG(0, "GetHubDescriptor\n");
1605		isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1606		break;
1607	case GetHubStatus:
1608		DBG(0, "GetHubStatus\n");
1609		put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1610		break;
1611	case GetPortStatus:
1612#ifndef VERBOSE
1613		DBG(0, "GetPortStatus\n");
1614#endif
1615		if (!wIndex || wIndex > ports)
1616			goto error;
1617		tmp = isp1362_hcd->rhport[--wIndex];
1618		put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1619		break;
1620	case ClearPortFeature:
1621		DBG(0, "ClearPortFeature: ");
1622		if (!wIndex || wIndex > ports)
1623			goto error;
1624		wIndex--;
1625
1626		switch (wValue) {
1627		case USB_PORT_FEAT_ENABLE:
1628			_DBG(0, "USB_PORT_FEAT_ENABLE\n");
1629			tmp = RH_PS_CCS;
1630			break;
1631		case USB_PORT_FEAT_C_ENABLE:
1632			_DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1633			tmp = RH_PS_PESC;
1634			break;
1635		case USB_PORT_FEAT_SUSPEND:
1636			_DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1637			tmp = RH_PS_POCI;
1638			break;
1639		case USB_PORT_FEAT_C_SUSPEND:
1640			_DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1641			tmp = RH_PS_PSSC;
1642			break;
1643		case USB_PORT_FEAT_POWER:
1644			_DBG(0, "USB_PORT_FEAT_POWER\n");
1645			tmp = RH_PS_LSDA;
1646
1647			break;
1648		case USB_PORT_FEAT_C_CONNECTION:
1649			_DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1650			tmp = RH_PS_CSC;
1651			break;
1652		case USB_PORT_FEAT_C_OVER_CURRENT:
1653			_DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1654			tmp = RH_PS_OCIC;
1655			break;
1656		case USB_PORT_FEAT_C_RESET:
1657			_DBG(0, "USB_PORT_FEAT_C_RESET\n");
1658			tmp = RH_PS_PRSC;
1659			break;
1660		default:
1661			goto error;
1662		}
1663
1664		spin_lock_irqsave(&isp1362_hcd->lock, flags);
1665		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1666		isp1362_hcd->rhport[wIndex] =
1667			isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1668		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1669		break;
1670	case SetPortFeature:
1671		DBG(0, "SetPortFeature: ");
1672		if (!wIndex || wIndex > ports)
1673			goto error;
1674		wIndex--;
1675		switch (wValue) {
1676		case USB_PORT_FEAT_SUSPEND:
1677			_DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1678			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1679			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1680			isp1362_hcd->rhport[wIndex] =
1681				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1682			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1683			break;
1684		case USB_PORT_FEAT_POWER:
1685			_DBG(0, "USB_PORT_FEAT_POWER\n");
1686			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1687			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1688			isp1

Large files files are truncated, but you can click here to view the full file