PageRenderTime 119ms CodeModel.GetById 20ms app.highlight 82ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/ieee1394/ohci1394.c

https://bitbucket.org/evzijst/gittest
C | 3705 lines | 2512 code | 685 blank | 508 comment | 415 complexity | 1b44604c45ed287f0ac5dea7f1e3021c MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * ohci1394.c - driver for OHCI 1394 boards
   3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
   4 *                        Gord Peters <GordPeters@smarttech.com>
   5 *              2001      Ben Collins <bcollins@debian.org>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software Foundation,
  19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20 */
  21
  22/*
  23 * Things known to be working:
  24 * . Async Request Transmit
  25 * . Async Response Receive
  26 * . Async Request Receive
  27 * . Async Response Transmit
  28 * . Iso Receive
  29 * . DMA mmap for iso receive
  30 * . Config ROM generation
  31 *
  32 * Things implemented, but still in test phase:
  33 * . Iso Transmit
  34 * . Async Stream Packets Transmit (Receive done via Iso interface)
  35 *
  36 * Things not implemented:
  37 * . DMA error recovery
  38 *
  39 * Known bugs:
  40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
  41 *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
  42 */
  43
  44/*
  45 * Acknowledgments:
  46 *
  47 * Adam J Richter <adam@yggdrasil.com>
  48 *  . Use of pci_class to find device
  49 *
  50 * Emilie Chung	<emilie.chung@axis.com>
  51 *  . Tip on Async Request Filter
  52 *
  53 * Pascal Drolet <pascal.drolet@informission.ca>
  54 *  . Various tips for optimization and functionnalities
  55 *
  56 * Robert Ficklin <rficklin@westengineering.com>
  57 *  . Loop in irq_handler
  58 *
  59 * James Goodwin <jamesg@Filanet.com>
  60 *  . Various tips on initialization, self-id reception, etc.
  61 *
  62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
  63 *  . Apple PowerBook detection
  64 *
  65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
  66 *  . Reset the board properly before leaving + misc cleanups
  67 *
  68 * Leon van Stuivenberg <leonvs@iae.nl>
  69 *  . Bug fixes
  70 *
  71 * Ben Collins <bcollins@debian.org>
  72 *  . Working big-endian support
  73 *  . Updated to 2.4.x module scheme (PCI aswell)
  74 *  . Config ROM generation
  75 *
  76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
  77 *  . Reworked code for initiating bus resets
  78 *    (long, short, with or without hold-off)
  79 *
  80 * Nandu Santhi <contactnandu@users.sourceforge.net>
  81 *  . Added support for nVidia nForce2 onboard Firewire chipset
  82 *
  83 */
  84
  85#include <linux/config.h>
  86#include <linux/kernel.h>
  87#include <linux/list.h>
  88#include <linux/slab.h>
  89#include <linux/interrupt.h>
  90#include <linux/wait.h>
  91#include <linux/errno.h>
  92#include <linux/module.h>
  93#include <linux/moduleparam.h>
  94#include <linux/pci.h>
  95#include <linux/fs.h>
  96#include <linux/poll.h>
  97#include <asm/byteorder.h>
  98#include <asm/atomic.h>
  99#include <asm/uaccess.h>
 100#include <linux/delay.h>
 101#include <linux/spinlock.h>
 102
 103#include <asm/pgtable.h>
 104#include <asm/page.h>
 105#include <asm/irq.h>
 106#include <linux/sched.h>
 107#include <linux/types.h>
 108#include <linux/vmalloc.h>
 109#include <linux/init.h>
 110
 111#ifdef CONFIG_PPC_PMAC
 112#include <asm/machdep.h>
 113#include <asm/pmac_feature.h>
 114#include <asm/prom.h>
 115#include <asm/pci-bridge.h>
 116#endif
 117
 118#include "csr1212.h"
 119#include "ieee1394.h"
 120#include "ieee1394_types.h"
 121#include "hosts.h"
 122#include "dma.h"
 123#include "iso.h"
 124#include "ieee1394_core.h"
 125#include "highlevel.h"
 126#include "ohci1394.h"
 127
 128#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
 129#define OHCI1394_DEBUG
 130#endif
 131
 132#ifdef DBGMSG
 133#undef DBGMSG
 134#endif
 135
 136#ifdef OHCI1394_DEBUG
 137#define DBGMSG(fmt, args...) \
 138printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
 139#else
 140#define DBGMSG(fmt, args...)
 141#endif
 142
 143#ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
 144#define OHCI_DMA_ALLOC(fmt, args...) \
 145	HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
 146		++global_outstanding_dmas, ## args)
 147#define OHCI_DMA_FREE(fmt, args...) \
 148	HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
 149		--global_outstanding_dmas, ## args)
 150static int global_outstanding_dmas = 0;
 151#else
 152#define OHCI_DMA_ALLOC(fmt, args...)
 153#define OHCI_DMA_FREE(fmt, args...)
 154#endif
 155
 156/* print general (card independent) information */
 157#define PRINT_G(level, fmt, args...) \
 158printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
 159
 160/* print card specific information */
 161#define PRINT(level, fmt, args...) \
 162printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
 163
 164static char version[] __devinitdata =
 165	"$Rev: 1250 $ Ben Collins <bcollins@debian.org>";
 166
 167/* Module Parameters */
 168static int phys_dma = 1;
 169module_param(phys_dma, int, 0644);
 170MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
 171
 172static void dma_trm_tasklet(unsigned long data);
 173static void dma_trm_reset(struct dma_trm_ctx *d);
 174
 175static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
 176			     enum context_type type, int ctx, int num_desc,
 177			     int buf_size, int split_buf_size, int context_base);
 178static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
 179static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
 180
 181static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
 182			     enum context_type type, int ctx, int num_desc,
 183			     int context_base);
 184
 185static void ohci1394_pci_remove(struct pci_dev *pdev);
 186
 187#ifndef __LITTLE_ENDIAN
 188static unsigned hdr_sizes[] =
 189{
 190	3,	/* TCODE_WRITEQ */
 191	4,	/* TCODE_WRITEB */
 192	3,	/* TCODE_WRITE_RESPONSE */
 193	0,	/* ??? */
 194	3,	/* TCODE_READQ */
 195	4,	/* TCODE_READB */
 196	3,	/* TCODE_READQ_RESPONSE */
 197	4,	/* TCODE_READB_RESPONSE */
 198	1,	/* TCODE_CYCLE_START (???) */
 199	4,	/* TCODE_LOCK_REQUEST */
 200	2,	/* TCODE_ISO_DATA */
 201	4,	/* TCODE_LOCK_RESPONSE */
 202};
 203
 204/* Swap headers */
 205static inline void packet_swab(quadlet_t *data, int tcode)
 206{
 207	size_t size = hdr_sizes[tcode];
 208
 209	if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
 210		return;
 211
 212	while (size--)
 213		data[size] = swab32(data[size]);
 214}
 215#else
 216/* Don't waste cycles on same sex byte swaps */
 217#define packet_swab(w,x)
 218#endif /* !LITTLE_ENDIAN */
 219
 220/***********************************
 221 * IEEE-1394 functionality section *
 222 ***********************************/
 223
 224static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
 225{
 226	int i;
 227	unsigned long flags;
 228	quadlet_t r;
 229
 230	spin_lock_irqsave (&ohci->phy_reg_lock, flags);
 231
 232	reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
 233
 234	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
 235		if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
 236			break;
 237
 238		mdelay(1);
 239	}
 240
 241	r = reg_read(ohci, OHCI1394_PhyControl);
 242
 243	if (i >= OHCI_LOOP_COUNT)
 244		PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
 245		       r, r & 0x80000000, i);
 246
 247	spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
 248
 249	return (r & 0x00ff0000) >> 16;
 250}
 251
 252static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
 253{
 254	int i;
 255	unsigned long flags;
 256	u32 r = 0;
 257
 258	spin_lock_irqsave (&ohci->phy_reg_lock, flags);
 259
 260	reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
 261
 262	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
 263		r = reg_read(ohci, OHCI1394_PhyControl);
 264		if (!(r & 0x00004000))
 265			break;
 266
 267		mdelay(1);
 268	}
 269
 270	if (i == OHCI_LOOP_COUNT)
 271		PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
 272		       r, r & 0x00004000, i);
 273
 274	spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
 275
 276	return;
 277}
 278
 279/* Or's our value into the current value */
 280static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
 281{
 282	u8 old;
 283
 284	old = get_phy_reg (ohci, addr);
 285	old |= data;
 286	set_phy_reg (ohci, addr, old);
 287
 288	return;
 289}
 290
 291static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
 292				int phyid, int isroot)
 293{
 294	quadlet_t *q = ohci->selfid_buf_cpu;
 295	quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
 296	size_t size;
 297	quadlet_t q0, q1;
 298
 299	/* Check status of self-id reception */
 300
 301	if (ohci->selfid_swap)
 302		q0 = le32_to_cpu(q[0]);
 303	else
 304		q0 = q[0];
 305
 306	if ((self_id_count & 0x80000000) ||
 307	    ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
 308		PRINT(KERN_ERR,
 309		      "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
 310		      self_id_count, q0, ohci->self_id_errors);
 311
 312		/* Tip by James Goodwin <jamesg@Filanet.com>:
 313		 * We had an error, generate another bus reset in response.  */
 314		if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
 315			set_phy_reg_mask (ohci, 1, 0x40);
 316			ohci->self_id_errors++;
 317		} else {
 318			PRINT(KERN_ERR,
 319			      "Too many errors on SelfID error reception, giving up!");
 320		}
 321		return;
 322	}
 323
 324	/* SelfID Ok, reset error counter. */
 325	ohci->self_id_errors = 0;
 326
 327	size = ((self_id_count & 0x00001FFC) >> 2) - 1;
 328	q++;
 329
 330	while (size > 0) {
 331		if (ohci->selfid_swap) {
 332			q0 = le32_to_cpu(q[0]);
 333			q1 = le32_to_cpu(q[1]);
 334		} else {
 335			q0 = q[0];
 336			q1 = q[1];
 337		}
 338
 339		if (q0 == ~q1) {
 340			DBGMSG ("SelfID packet 0x%x received", q0);
 341			hpsb_selfid_received(host, cpu_to_be32(q0));
 342			if (((q0 & 0x3f000000) >> 24) == phyid)
 343				DBGMSG ("SelfID for this node is 0x%08x", q0);
 344		} else {
 345			PRINT(KERN_ERR,
 346			      "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
 347		}
 348		q += 2;
 349		size -= 2;
 350	}
 351
 352	DBGMSG("SelfID complete");
 353
 354	return;
 355}
 356
 357static void ohci_soft_reset(struct ti_ohci *ohci) {
 358	int i;
 359
 360	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
 361
 362	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
 363		if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
 364			break;
 365		mdelay(1);
 366	}
 367	DBGMSG ("Soft reset finished");
 368}
 369
 370
 371/* Generate the dma receive prgs and start the context */
 372static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
 373{
 374	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
 375	int i;
 376
 377	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
 378
 379	for (i=0; i<d->num_desc; i++) {
 380		u32 c;
 381
 382		c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
 383		if (generate_irq)
 384			c |= DMA_CTL_IRQ;
 385
 386		d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
 387
 388		/* End of descriptor list? */
 389		if (i + 1 < d->num_desc) {
 390			d->prg_cpu[i]->branchAddress =
 391				cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
 392		} else {
 393			d->prg_cpu[i]->branchAddress =
 394				cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
 395		}
 396
 397		d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
 398		d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
 399	}
 400
 401        d->buf_ind = 0;
 402        d->buf_offset = 0;
 403
 404	if (d->type == DMA_CTX_ISO) {
 405		/* Clear contextControl */
 406		reg_write(ohci, d->ctrlClear, 0xffffffff);
 407
 408		/* Set bufferFill, isochHeader, multichannel for IR context */
 409		reg_write(ohci, d->ctrlSet, 0xd0000000);
 410
 411		/* Set the context match register to match on all tags */
 412		reg_write(ohci, d->ctxtMatch, 0xf0000000);
 413
 414		/* Clear the multi channel mask high and low registers */
 415		reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
 416		reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
 417
 418		/* Set up isoRecvIntMask to generate interrupts */
 419		reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
 420	}
 421
 422	/* Tell the controller where the first AR program is */
 423	reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
 424
 425	/* Run context */
 426	reg_write(ohci, d->ctrlSet, 0x00008000);
 427
 428	DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
 429}
 430
 431/* Initialize the dma transmit context */
 432static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
 433{
 434	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
 435
 436	/* Stop the context */
 437	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
 438
 439        d->prg_ind = 0;
 440	d->sent_ind = 0;
 441	d->free_prgs = d->num_desc;
 442        d->branchAddrPtr = NULL;
 443	INIT_LIST_HEAD(&d->fifo_list);
 444	INIT_LIST_HEAD(&d->pending_list);
 445
 446	if (d->type == DMA_CTX_ISO) {
 447		/* enable interrupts */
 448		reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
 449	}
 450
 451	DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
 452}
 453
 454/* Count the number of available iso contexts */
 455static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
 456{
 457	int i,ctx=0;
 458	u32 tmp;
 459
 460	reg_write(ohci, reg, 0xffffffff);
 461	tmp = reg_read(ohci, reg);
 462
 463	DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
 464
 465	/* Count the number of contexts */
 466	for (i=0; i<32; i++) {
 467	    	if (tmp & 1) ctx++;
 468		tmp >>= 1;
 469	}
 470	return ctx;
 471}
 472
 473/* Global initialization */
 474static void ohci_initialize(struct ti_ohci *ohci)
 475{
 476	char irq_buf[16];
 477	quadlet_t buf;
 478	int num_ports, i;
 479
 480	spin_lock_init(&ohci->phy_reg_lock);
 481	spin_lock_init(&ohci->event_lock);
 482
 483	/* Put some defaults to these undefined bus options */
 484	buf = reg_read(ohci, OHCI1394_BusOptions);
 485	buf |=  0x60000000; /* Enable CMC and ISC */
 486	if (!hpsb_disable_irm)
 487		buf |=  0x80000000; /* Enable IRMC */
 488	buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
 489	buf &= ~0x18000000; /* Disable PMC and BMC */
 490	reg_write(ohci, OHCI1394_BusOptions, buf);
 491
 492	/* Set the bus number */
 493	reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
 494
 495	/* Enable posted writes */
 496	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
 497
 498	/* Clear link control register */
 499	reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
 500
 501	/* Enable cycle timer and cycle master and set the IRM
 502	 * contender bit in our self ID packets if appropriate. */
 503	reg_write(ohci, OHCI1394_LinkControlSet,
 504		  OHCI1394_LinkControl_CycleTimerEnable |
 505		  OHCI1394_LinkControl_CycleMaster);
 506	set_phy_reg_mask(ohci, 4, PHY_04_LCTRL |
 507			 (hpsb_disable_irm ? 0 : PHY_04_CONTENDER));
 508
 509	/* Set up self-id dma buffer */
 510	reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
 511
 512	/* enable self-id and phys */
 513	reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
 514		  OHCI1394_LinkControl_RcvPhyPkt);
 515
 516	/* Set the Config ROM mapping register */
 517	reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
 518
 519	/* Now get our max packet size */
 520	ohci->max_packet_size =
 521		1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
 522		
 523	/* Don't accept phy packets into AR request context */
 524	reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
 525
 526	/* Clear the interrupt mask */
 527	reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
 528	reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
 529
 530	/* Clear the interrupt mask */
 531	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
 532	reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
 533
 534	/* Initialize AR dma */
 535	initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
 536	initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
 537
 538	/* Initialize AT dma */
 539	initialize_dma_trm_ctx(&ohci->at_req_context);
 540	initialize_dma_trm_ctx(&ohci->at_resp_context);
 541	
 542	/* Initialize IR Legacy DMA */
 543	ohci->ir_legacy_channels = 0;
 544	initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
 545	DBGMSG("ISO receive legacy context activated");
 546
 547	/*
 548	 * Accept AT requests from all nodes. This probably
 549	 * will have to be controlled from the subsystem
 550	 * on a per node basis.
 551	 */
 552	reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
 553
 554	/* Specify AT retries */
 555	reg_write(ohci, OHCI1394_ATRetries,
 556		  OHCI1394_MAX_AT_REQ_RETRIES |
 557		  (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
 558		  (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
 559
 560	/* We don't want hardware swapping */
 561	reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
 562
 563	/* Enable interrupts */
 564	reg_write(ohci, OHCI1394_IntMaskSet,
 565		  OHCI1394_unrecoverableError |
 566		  OHCI1394_masterIntEnable |
 567		  OHCI1394_busReset |
 568		  OHCI1394_selfIDComplete |
 569		  OHCI1394_RSPkt |
 570		  OHCI1394_RQPkt |
 571		  OHCI1394_respTxComplete |
 572		  OHCI1394_reqTxComplete |
 573		  OHCI1394_isochRx |
 574		  OHCI1394_isochTx |
 575		  OHCI1394_cycleInconsistent);
 576
 577	/* Enable link */
 578	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
 579
 580	buf = reg_read(ohci, OHCI1394_Version);
 581#ifndef __sparc__
 582	sprintf (irq_buf, "%d", ohci->dev->irq);
 583#else
 584	sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
 585#endif
 586	PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s]  "
 587	      "MMIO=[%lx-%lx]  Max Packet=[%d]",
 588	      ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
 589	      ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
 590	      pci_resource_start(ohci->dev, 0),
 591	      pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
 592	      ohci->max_packet_size);
 593
 594	/* Check all of our ports to make sure that if anything is
 595	 * connected, we enable that port. */
 596	num_ports = get_phy_reg(ohci, 2) & 0xf;
 597	for (i = 0; i < num_ports; i++) {
 598		unsigned int status;
 599
 600		set_phy_reg(ohci, 7, i);
 601		status = get_phy_reg(ohci, 8);
 602
 603		if (status & 0x20)
 604			set_phy_reg(ohci, 8, status & ~1);
 605	}
 606
 607        /* Serial EEPROM Sanity check. */
 608        if ((ohci->max_packet_size < 512) ||
 609	    (ohci->max_packet_size > 4096)) {
 610		/* Serial EEPROM contents are suspect, set a sane max packet
 611		 * size and print the raw contents for bug reports if verbose
 612		 * debug is enabled. */
 613#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
 614		int i;
 615#endif
 616
 617		PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
 618                      "attempting to setting max_packet_size to 512 bytes");
 619		reg_write(ohci, OHCI1394_BusOptions,
 620			  (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
 621		ohci->max_packet_size = 512;
 622#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
 623		PRINT(KERN_DEBUG, "    EEPROM Present: %d",
 624		      (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
 625		reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
 626
 627		for (i = 0;
 628		     ((i < 1000) &&
 629		      (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
 630			udelay(10);
 631
 632		for (i = 0; i < 0x20; i++) {
 633			reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
 634			PRINT(KERN_DEBUG, "    EEPROM %02x: %02x", i,
 635			      (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
 636		}
 637#endif
 638	}
 639}
 640
 641/*
 642 * Insert a packet in the DMA fifo and generate the DMA prg
 643 * FIXME: rewrite the program in order to accept packets crossing
 644 *        page boundaries.
 645 *        check also that a single dma descriptor doesn't cross a
 646 *        page boundary.
 647 */
 648static void insert_packet(struct ti_ohci *ohci,
 649			  struct dma_trm_ctx *d, struct hpsb_packet *packet)
 650{
 651	u32 cycleTimer;
 652	int idx = d->prg_ind;
 653
 654	DBGMSG("Inserting packet for node " NODE_BUS_FMT
 655	       ", tlabel=%d, tcode=0x%x, speed=%d",
 656	       NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
 657	       packet->tcode, packet->speed_code);
 658
 659	d->prg_cpu[idx]->begin.address = 0;
 660	d->prg_cpu[idx]->begin.branchAddress = 0;
 661
 662	if (d->type == DMA_CTX_ASYNC_RESP) {
 663		/*
 664		 * For response packets, we need to put a timeout value in
 665		 * the 16 lower bits of the status... let's try 1 sec timeout
 666		 */
 667		cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
 668		d->prg_cpu[idx]->begin.status = cpu_to_le32(
 669			(((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
 670			((cycleTimer&0x01fff000)>>12));
 671
 672		DBGMSG("cycleTimer: %08x timeStamp: %08x",
 673		       cycleTimer, d->prg_cpu[idx]->begin.status);
 674	} else 
 675		d->prg_cpu[idx]->begin.status = 0;
 676
 677        if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
 678
 679                if (packet->type == hpsb_raw) {
 680			d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
 681                        d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
 682                        d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
 683                } else {
 684                        d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
 685                                (packet->header[0] & 0xFFFF);
 686
 687			if (packet->tcode == TCODE_ISO_DATA) {
 688				/* Sending an async stream packet */
 689				d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
 690			} else {
 691				/* Sending a normal async request or response */
 692				d->prg_cpu[idx]->data[1] =
 693					(packet->header[1] & 0xFFFF) |
 694					(packet->header[0] & 0xFFFF0000);
 695				d->prg_cpu[idx]->data[2] = packet->header[2];
 696				d->prg_cpu[idx]->data[3] = packet->header[3];
 697			}
 698			packet_swab(d->prg_cpu[idx]->data, packet->tcode);
 699                }
 700
 701                if (packet->data_size) { /* block transmit */
 702			if (packet->tcode == TCODE_STREAM_DATA){
 703				d->prg_cpu[idx]->begin.control =
 704					cpu_to_le32(DMA_CTL_OUTPUT_MORE |
 705						    DMA_CTL_IMMEDIATE | 0x8);
 706			} else {
 707				d->prg_cpu[idx]->begin.control =
 708					cpu_to_le32(DMA_CTL_OUTPUT_MORE |
 709						    DMA_CTL_IMMEDIATE | 0x10);
 710			}
 711                        d->prg_cpu[idx]->end.control =
 712                                cpu_to_le32(DMA_CTL_OUTPUT_LAST |
 713					    DMA_CTL_IRQ |
 714					    DMA_CTL_BRANCH |
 715					    packet->data_size);
 716                        /*
 717                         * Check that the packet data buffer
 718                         * does not cross a page boundary.
 719			 *
 720			 * XXX Fix this some day. eth1394 seems to trigger
 721			 * it, but ignoring it doesn't seem to cause a
 722			 * problem.
 723                         */
 724#if 0
 725                        if (cross_bound((unsigned long)packet->data,
 726                                        packet->data_size)>0) {
 727                                /* FIXME: do something about it */
 728                                PRINT(KERN_ERR,
 729                                      "%s: packet data addr: %p size %Zd bytes "
 730                                      "cross page boundary", __FUNCTION__,
 731                                      packet->data, packet->data_size);
 732                        }
 733#endif
 734                        d->prg_cpu[idx]->end.address = cpu_to_le32(
 735                                pci_map_single(ohci->dev, packet->data,
 736                                               packet->data_size,
 737                                               PCI_DMA_TODEVICE));
 738			OHCI_DMA_ALLOC("single, block transmit packet");
 739
 740                        d->prg_cpu[idx]->end.branchAddress = 0;
 741                        d->prg_cpu[idx]->end.status = 0;
 742                        if (d->branchAddrPtr)
 743                                *(d->branchAddrPtr) =
 744					cpu_to_le32(d->prg_bus[idx] | 0x3);
 745                        d->branchAddrPtr =
 746                                &(d->prg_cpu[idx]->end.branchAddress);
 747                } else { /* quadlet transmit */
 748                        if (packet->type == hpsb_raw)
 749                                d->prg_cpu[idx]->begin.control =
 750					cpu_to_le32(DMA_CTL_OUTPUT_LAST |
 751						    DMA_CTL_IMMEDIATE |
 752						    DMA_CTL_IRQ |
 753						    DMA_CTL_BRANCH |
 754						    (packet->header_size + 4));
 755                        else
 756                                d->prg_cpu[idx]->begin.control =
 757					cpu_to_le32(DMA_CTL_OUTPUT_LAST |
 758						    DMA_CTL_IMMEDIATE |
 759						    DMA_CTL_IRQ |
 760						    DMA_CTL_BRANCH |
 761						    packet->header_size);
 762
 763                        if (d->branchAddrPtr)
 764                                *(d->branchAddrPtr) =
 765					cpu_to_le32(d->prg_bus[idx] | 0x2);
 766                        d->branchAddrPtr =
 767                                &(d->prg_cpu[idx]->begin.branchAddress);
 768                }
 769
 770        } else { /* iso packet */
 771                d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
 772                        (packet->header[0] & 0xFFFF);
 773                d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
 774		packet_swab(d->prg_cpu[idx]->data, packet->tcode);
 775
 776                d->prg_cpu[idx]->begin.control =
 777			cpu_to_le32(DMA_CTL_OUTPUT_MORE |
 778				    DMA_CTL_IMMEDIATE | 0x8);
 779                d->prg_cpu[idx]->end.control =
 780			cpu_to_le32(DMA_CTL_OUTPUT_LAST |
 781				    DMA_CTL_UPDATE |
 782				    DMA_CTL_IRQ |
 783				    DMA_CTL_BRANCH |
 784				    packet->data_size);
 785                d->prg_cpu[idx]->end.address = cpu_to_le32(
 786				pci_map_single(ohci->dev, packet->data,
 787				packet->data_size, PCI_DMA_TODEVICE));
 788		OHCI_DMA_ALLOC("single, iso transmit packet");
 789
 790                d->prg_cpu[idx]->end.branchAddress = 0;
 791                d->prg_cpu[idx]->end.status = 0;
 792                DBGMSG("Iso xmit context info: header[%08x %08x]\n"
 793                       "                       begin=%08x %08x %08x %08x\n"
 794                       "                             %08x %08x %08x %08x\n"
 795                       "                       end  =%08x %08x %08x %08x",
 796                       d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
 797                       d->prg_cpu[idx]->begin.control,
 798                       d->prg_cpu[idx]->begin.address,
 799                       d->prg_cpu[idx]->begin.branchAddress,
 800                       d->prg_cpu[idx]->begin.status,
 801                       d->prg_cpu[idx]->data[0],
 802                       d->prg_cpu[idx]->data[1],
 803                       d->prg_cpu[idx]->data[2],
 804                       d->prg_cpu[idx]->data[3],
 805                       d->prg_cpu[idx]->end.control,
 806                       d->prg_cpu[idx]->end.address,
 807                       d->prg_cpu[idx]->end.branchAddress,
 808                       d->prg_cpu[idx]->end.status);
 809                if (d->branchAddrPtr)
 810  		        *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
 811                d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
 812        }
 813	d->free_prgs--;
 814
 815	/* queue the packet in the appropriate context queue */
 816	list_add_tail(&packet->driver_list, &d->fifo_list);
 817	d->prg_ind = (d->prg_ind + 1) % d->num_desc;
 818}
 819
 820/*
 821 * This function fills the FIFO with the (eventual) pending packets
 822 * and runs or wakes up the DMA prg if necessary.
 823 *
 824 * The function MUST be called with the d->lock held.
 825 */
 826static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
 827{
 828	struct hpsb_packet *packet, *ptmp;
 829	int idx = d->prg_ind;
 830	int z = 0;
 831
 832	/* insert the packets into the dma fifo */
 833	list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
 834		if (!d->free_prgs)
 835			break;
 836
 837		/* For the first packet only */
 838		if (!z)
 839			z = (packet->data_size) ? 3 : 2;
 840
 841		/* Insert the packet */
 842		list_del_init(&packet->driver_list);
 843		insert_packet(ohci, d, packet);
 844	}
 845
 846	/* Nothing must have been done, either no free_prgs or no packets */
 847	if (z == 0)
 848		return;
 849
 850	/* Is the context running ? (should be unless it is
 851	   the first packet to be sent in this context) */
 852	if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
 853		u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
 854
 855		DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
 856		reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
 857
 858		/* Check that the node id is valid, and not 63 */
 859		if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
 860			PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
 861		else
 862			reg_write(ohci, d->ctrlSet, 0x8000);
 863	} else {
 864		/* Wake up the dma context if necessary */
 865		if (!(reg_read(ohci, d->ctrlSet) & 0x400))
 866			DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
 867
 868		/* do this always, to avoid race condition */
 869		reg_write(ohci, d->ctrlSet, 0x1000);
 870	}
 871
 872	return;
 873}
 874
 875/* Transmission of an async or iso packet */
 876static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
 877{
 878	struct ti_ohci *ohci = host->hostdata;
 879	struct dma_trm_ctx *d;
 880	unsigned long flags;
 881
 882	if (packet->data_size > ohci->max_packet_size) {
 883		PRINT(KERN_ERR,
 884		      "Transmit packet size %Zd is too big",
 885		      packet->data_size);
 886		return -EOVERFLOW;
 887	}
 888
 889	/* Decide whether we have an iso, a request, or a response packet */
 890	if (packet->type == hpsb_raw)
 891		d = &ohci->at_req_context;
 892	else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
 893		/* The legacy IT DMA context is initialized on first
 894		 * use.  However, the alloc cannot be run from
 895		 * interrupt context, so we bail out if that is the
 896		 * case. I don't see anyone sending ISO packets from
 897		 * interrupt context anyway... */
 898
 899		if (ohci->it_legacy_context.ohci == NULL) {
 900			if (in_interrupt()) {
 901				PRINT(KERN_ERR,
 902				      "legacy IT context cannot be initialized during interrupt");
 903				return -EINVAL;
 904			}
 905
 906			if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
 907					      DMA_CTX_ISO, 0, IT_NUM_DESC,
 908					      OHCI1394_IsoXmitContextBase) < 0) {
 909				PRINT(KERN_ERR,
 910				      "error initializing legacy IT context");
 911				return -ENOMEM;
 912			}
 913
 914			initialize_dma_trm_ctx(&ohci->it_legacy_context);
 915		}
 916
 917		d = &ohci->it_legacy_context;
 918	} else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
 919		d = &ohci->at_resp_context;
 920	else
 921		d = &ohci->at_req_context;
 922
 923	spin_lock_irqsave(&d->lock,flags);
 924
 925	list_add_tail(&packet->driver_list, &d->pending_list);
 926
 927	dma_trm_flush(ohci, d);
 928
 929	spin_unlock_irqrestore(&d->lock,flags);
 930
 931	return 0;
 932}
 933
 934static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
 935{
 936	struct ti_ohci *ohci = host->hostdata;
 937	int retval = 0;
 938	unsigned long flags;
 939	int phy_reg;
 940
 941	switch (cmd) {
 942	case RESET_BUS:
 943		switch (arg) {
 944		case SHORT_RESET:
 945			phy_reg = get_phy_reg(ohci, 5);
 946			phy_reg |= 0x40;
 947			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
 948			break;
 949		case LONG_RESET:
 950			phy_reg = get_phy_reg(ohci, 1);
 951			phy_reg |= 0x40;
 952			set_phy_reg(ohci, 1, phy_reg); /* set IBR */
 953			break;
 954		case SHORT_RESET_NO_FORCE_ROOT:
 955			phy_reg = get_phy_reg(ohci, 1);
 956			if (phy_reg & 0x80) {
 957				phy_reg &= ~0x80;
 958				set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
 959			}
 960
 961			phy_reg = get_phy_reg(ohci, 5);
 962			phy_reg |= 0x40;
 963			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
 964			break;
 965		case LONG_RESET_NO_FORCE_ROOT:
 966			phy_reg = get_phy_reg(ohci, 1);
 967			phy_reg &= ~0x80;
 968			phy_reg |= 0x40;
 969			set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
 970			break;
 971		case SHORT_RESET_FORCE_ROOT:
 972			phy_reg = get_phy_reg(ohci, 1);
 973			if (!(phy_reg & 0x80)) {
 974				phy_reg |= 0x80;
 975				set_phy_reg(ohci, 1, phy_reg); /* set RHB */
 976			}
 977
 978			phy_reg = get_phy_reg(ohci, 5);
 979			phy_reg |= 0x40;
 980			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
 981			break;
 982		case LONG_RESET_FORCE_ROOT:
 983			phy_reg = get_phy_reg(ohci, 1);
 984			phy_reg |= 0xc0;
 985			set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
 986			break;
 987		default:
 988			retval = -1;
 989		}
 990		break;
 991
 992	case GET_CYCLE_COUNTER:
 993		retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
 994		break;
 995
 996	case SET_CYCLE_COUNTER:
 997		reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
 998		break;
 999
1000	case SET_BUS_ID:
1001		PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1002		break;
1003
1004	case ACT_CYCLE_MASTER:
1005		if (arg) {
1006			/* check if we are root and other nodes are present */
1007			u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1008			if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1009				/*
1010				 * enable cycleTimer, cycleMaster
1011				 */
1012				DBGMSG("Cycle master enabled");
1013				reg_write(ohci, OHCI1394_LinkControlSet,
1014					  OHCI1394_LinkControl_CycleTimerEnable |
1015					  OHCI1394_LinkControl_CycleMaster);
1016			}
1017		} else {
1018			/* disable cycleTimer, cycleMaster, cycleSource */
1019			reg_write(ohci, OHCI1394_LinkControlClear,
1020				  OHCI1394_LinkControl_CycleTimerEnable |
1021				  OHCI1394_LinkControl_CycleMaster |
1022				  OHCI1394_LinkControl_CycleSource);
1023		}
1024		break;
1025
1026	case CANCEL_REQUESTS:
1027		DBGMSG("Cancel request received");
1028		dma_trm_reset(&ohci->at_req_context);
1029		dma_trm_reset(&ohci->at_resp_context);
1030		break;
1031
1032	case ISO_LISTEN_CHANNEL:
1033        {
1034		u64 mask;
1035
1036		if (arg<0 || arg>63) {
1037			PRINT(KERN_ERR,
1038			      "%s: IS0 listen channel %d is out of range",
1039			      __FUNCTION__, arg);
1040			return -EFAULT;
1041		}
1042
1043		mask = (u64)0x1<<arg;
1044
1045                spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1046
1047		if (ohci->ISO_channel_usage & mask) {
1048			PRINT(KERN_ERR,
1049			      "%s: IS0 listen channel %d is already used",
1050			      __FUNCTION__, arg);
1051			spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1052			return -EFAULT;
1053		}
1054
1055		ohci->ISO_channel_usage |= mask;
1056		ohci->ir_legacy_channels |= mask;
1057
1058		if (arg>31)
1059			reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1060				  1<<(arg-32));
1061		else
1062			reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1063				  1<<arg);
1064
1065                spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1066                DBGMSG("Listening enabled on channel %d", arg);
1067                break;
1068        }
1069	case ISO_UNLISTEN_CHANNEL:
1070        {
1071		u64 mask;
1072
1073		if (arg<0 || arg>63) {
1074			PRINT(KERN_ERR,
1075			      "%s: IS0 unlisten channel %d is out of range",
1076			      __FUNCTION__, arg);
1077			return -EFAULT;
1078		}
1079
1080		mask = (u64)0x1<<arg;
1081
1082                spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1083
1084		if (!(ohci->ISO_channel_usage & mask)) {
1085			PRINT(KERN_ERR,
1086			      "%s: IS0 unlisten channel %d is not used",
1087			      __FUNCTION__, arg);
1088			spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1089			return -EFAULT;
1090		}
1091
1092		ohci->ISO_channel_usage &= ~mask;
1093		ohci->ir_legacy_channels &= ~mask;
1094
1095		if (arg>31)
1096			reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1097				  1<<(arg-32));
1098		else
1099			reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1100				  1<<arg);
1101
1102                spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1103                DBGMSG("Listening disabled on channel %d", arg);
1104                break;
1105        }
1106	default:
1107		PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1108			cmd);
1109		break;
1110	}
1111	return retval;
1112}
1113
1114/***********************************
1115 * rawiso ISO reception            *
1116 ***********************************/
1117
1118/*
1119  We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1120  buffer is split into "blocks" (regions described by one DMA
1121  descriptor). Each block must be one page or less in size, and
1122  must not cross a page boundary.
1123
1124  There is one little wrinkle with buffer-fill mode: a packet that
1125  starts in the final block may wrap around into the first block. But
1126  the user API expects all packets to be contiguous. Our solution is
1127  to keep the very last page of the DMA buffer in reserve - if a
1128  packet spans the gap, we copy its tail into this page.
1129*/
1130
1131struct ohci_iso_recv {
1132	struct ti_ohci *ohci;
1133
1134	struct ohci1394_iso_tasklet task;
1135	int task_active;
1136
1137	enum { BUFFER_FILL_MODE = 0,
1138	       PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1139
1140	/* memory and PCI mapping for the DMA descriptors */
1141	struct dma_prog_region prog;
1142	struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1143
1144	/* how many DMA blocks fit in the buffer */
1145	unsigned int nblocks;
1146
1147	/* stride of DMA blocks */
1148	unsigned int buf_stride;
1149
1150	/* number of blocks to batch between interrupts */
1151	int block_irq_interval;
1152
1153	/* block that DMA will finish next */
1154	int block_dma;
1155
1156	/* (buffer-fill only) block that the reader will release next */
1157	int block_reader;
1158
1159	/* (buffer-fill only) bytes of buffer the reader has released,
1160	   less than one block */
1161	int released_bytes;
1162
1163	/* (buffer-fill only) buffer offset at which the next packet will appear */
1164	int dma_offset;
1165
1166	/* OHCI DMA context control registers */
1167	u32 ContextControlSet;
1168	u32 ContextControlClear;
1169	u32 CommandPtr;
1170	u32 ContextMatch;
1171};
1172
1173static void ohci_iso_recv_task(unsigned long data);
1174static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1175static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1176static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1177static void ohci_iso_recv_program(struct hpsb_iso *iso);
1178
1179static int ohci_iso_recv_init(struct hpsb_iso *iso)
1180{
1181	struct ti_ohci *ohci = iso->host->hostdata;
1182	struct ohci_iso_recv *recv;
1183	int ctx;
1184	int ret = -ENOMEM;
1185
1186	recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1187	if (!recv)
1188		return -ENOMEM;
1189
1190	iso->hostdata = recv;
1191	recv->ohci = ohci;
1192	recv->task_active = 0;
1193	dma_prog_region_init(&recv->prog);
1194	recv->block = NULL;
1195
1196	/* use buffer-fill mode, unless irq_interval is 1
1197	   (note: multichannel requires buffer-fill) */
1198
1199	if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1200	     iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1201		recv->dma_mode = PACKET_PER_BUFFER_MODE;
1202	} else {
1203		recv->dma_mode = BUFFER_FILL_MODE;
1204	}
1205
1206	/* set nblocks, buf_stride, block_irq_interval */
1207
1208	if (recv->dma_mode == BUFFER_FILL_MODE) {
1209		recv->buf_stride = PAGE_SIZE;
1210
1211		/* one block per page of data in the DMA buffer, minus the final guard page */
1212		recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1213		if (recv->nblocks < 3) {
1214			DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1215			goto err;
1216		}
1217
1218		/* iso->irq_interval is in packets - translate that to blocks */
1219		if (iso->irq_interval == 1)
1220			recv->block_irq_interval = 1;
1221		else
1222			recv->block_irq_interval = iso->irq_interval *
1223							((recv->nblocks+1)/iso->buf_packets);
1224		if (recv->block_irq_interval*4 > recv->nblocks)
1225			recv->block_irq_interval = recv->nblocks/4;
1226		if (recv->block_irq_interval < 1)
1227			recv->block_irq_interval = 1;
1228
1229	} else {
1230		int max_packet_size;
1231
1232		recv->nblocks = iso->buf_packets;
1233		recv->block_irq_interval = iso->irq_interval;
1234		if (recv->block_irq_interval * 4 > iso->buf_packets)
1235			recv->block_irq_interval = iso->buf_packets / 4;
1236		if (recv->block_irq_interval < 1)
1237		recv->block_irq_interval = 1;
1238
1239		/* choose a buffer stride */
1240		/* must be a power of 2, and <= PAGE_SIZE */
1241
1242		max_packet_size = iso->buf_size / iso->buf_packets;
1243
1244		for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1245		    recv->buf_stride *= 2);
1246
1247		if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1248		   recv->buf_stride > PAGE_SIZE) {
1249			/* this shouldn't happen, but anyway... */
1250			DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1251			goto err;
1252		}
1253	}
1254
1255	recv->block_reader = 0;
1256	recv->released_bytes = 0;
1257	recv->block_dma = 0;
1258	recv->dma_offset = 0;
1259
1260	/* size of DMA program = one descriptor per block */
1261	if (dma_prog_region_alloc(&recv->prog,
1262				 sizeof(struct dma_cmd) * recv->nblocks,
1263				 recv->ohci->dev))
1264		goto err;
1265
1266	recv->block = (struct dma_cmd*) recv->prog.kvirt;
1267
1268	ohci1394_init_iso_tasklet(&recv->task,
1269				  iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1270				                       OHCI_ISO_RECEIVE,
1271				  ohci_iso_recv_task, (unsigned long) iso);
1272
1273	if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0)
1274		goto err;
1275
1276	recv->task_active = 1;
1277
1278	/* recv context registers are spaced 32 bytes apart */
1279	ctx = recv->task.context;
1280	recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1281	recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1282	recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1283	recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1284
1285	if (iso->channel == -1) {
1286		/* clear multi-channel selection mask */
1287		reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1288		reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1289	}
1290
1291	/* write the DMA program */
1292	ohci_iso_recv_program(iso);
1293
1294	DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1295	       " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1296	       recv->dma_mode == BUFFER_FILL_MODE ?
1297	       "buffer-fill" : "packet-per-buffer",
1298	       iso->buf_size/PAGE_SIZE, iso->buf_size,
1299	       recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1300
1301	return 0;
1302
1303err:
1304	ohci_iso_recv_shutdown(iso);
1305	return ret;
1306}
1307
1308static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1309{
1310	struct ohci_iso_recv *recv = iso->hostdata;
1311
1312	/* disable interrupts */
1313	reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1314
1315	/* halt DMA */
1316	ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1317}
1318
1319static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1320{
1321	struct ohci_iso_recv *recv = iso->hostdata;
1322
1323	if (recv->task_active) {
1324		ohci_iso_recv_stop(iso);
1325		ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1326		recv->task_active = 0;
1327	}
1328
1329	dma_prog_region_free(&recv->prog);
1330	kfree(recv);
1331	iso->hostdata = NULL;
1332}
1333
1334/* set up a "gapped" ring buffer DMA program */
1335static void ohci_iso_recv_program(struct hpsb_iso *iso)
1336{
1337	struct ohci_iso_recv *recv = iso->hostdata;
1338	int blk;
1339
1340	/* address of 'branch' field in previous DMA descriptor */
1341	u32 *prev_branch = NULL;
1342
1343	for (blk = 0; blk < recv->nblocks; blk++) {
1344		u32 control;
1345
1346		/* the DMA descriptor */
1347		struct dma_cmd *cmd = &recv->block[blk];
1348
1349		/* offset of the DMA descriptor relative to the DMA prog buffer */
1350		unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1351
1352		/* offset of this packet's data within the DMA buffer */
1353		unsigned long buf_offset = blk * recv->buf_stride;
1354
1355		if (recv->dma_mode == BUFFER_FILL_MODE) {
1356			control = 2 << 28; /* INPUT_MORE */
1357		} else {
1358			control = 3 << 28; /* INPUT_LAST */
1359		}
1360
1361		control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1362
1363		/* interrupt on last block, and at intervals */
1364		if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1365			control |= 3 << 20; /* want interrupt */
1366		}
1367
1368		control |= 3 << 18; /* enable branch to address */
1369		control |= recv->buf_stride;
1370
1371		cmd->control = cpu_to_le32(control);
1372		cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1373		cmd->branchAddress = 0; /* filled in on next loop */
1374		cmd->status = cpu_to_le32(recv->buf_stride);
1375
1376		/* link the previous descriptor to this one */
1377		if (prev_branch) {
1378			*prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1379		}
1380
1381		prev_branch = &cmd->branchAddress;
1382	}
1383
1384	/* the final descriptor's branch address and Z should be left at 0 */
1385}
1386
1387/* listen or unlisten to a specific channel (multi-channel mode only) */
1388static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1389{
1390	struct ohci_iso_recv *recv = iso->hostdata;
1391	int reg, i;
1392
1393	if (channel < 32) {
1394		reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1395		i = channel;
1396	} else {
1397		reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1398		i = channel - 32;
1399	}
1400
1401	reg_write(recv->ohci, reg, (1 << i));
1402
1403	/* issue a dummy read to force all PCI writes to be posted immediately */
1404	mb();
1405	reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1406}
1407
1408static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1409{
1410	struct ohci_iso_recv *recv = iso->hostdata;
1411	int i;
1412
1413	for (i = 0; i < 64; i++) {
1414		if (mask & (1ULL << i)) {
1415			if (i < 32)
1416				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1417			else
1418				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1419		} else {
1420			if (i < 32)
1421				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1422			else
1423				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1424		}
1425	}
1426
1427	/* issue a dummy read to force all PCI writes to be posted immediately */
1428	mb();
1429	reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1430}
1431
1432static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1433{
1434	struct ohci_iso_recv *recv = iso->hostdata;
1435	struct ti_ohci *ohci = recv->ohci;
1436	u32 command, contextMatch;
1437
1438	reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1439	wmb();
1440
1441	/* always keep ISO headers */
1442	command = (1 << 30);
1443
1444	if (recv->dma_mode == BUFFER_FILL_MODE)
1445		command |= (1 << 31);
1446
1447	reg_write(recv->ohci, recv->ContextControlSet, command);
1448
1449	/* match on specified tags */
1450	contextMatch = tag_mask << 28;
1451
1452	if (iso->channel == -1) {
1453		/* enable multichannel reception */
1454		reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1455	} else {
1456		/* listen on channel */
1457		contextMatch |= iso->channel;
1458	}
1459
1460	if (cycle != -1) {
1461		u32 seconds;
1462
1463		/* enable cycleMatch */
1464		reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1465
1466		/* set starting cycle */
1467		cycle &= 0x1FFF;
1468
1469		/* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1470		   just snarf them from the current time */
1471		seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1472
1473		/* advance one second to give some extra time for DMA to start */
1474		seconds += 1;
1475
1476		cycle |= (seconds & 3) << 13;
1477
1478		contextMatch |= cycle << 12;
1479	}
1480
1481	if (sync != -1) {
1482		/* set sync flag on first DMA descriptor */
1483		struct dma_cmd *cmd = &recv->block[recv->block_dma];
1484		cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1485
1486		/* match sync field */
1487		contextMatch |= (sync&0xf)<<8;
1488	}
1489
1490	reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1491
1492	/* address of first descriptor block */
1493	command = dma_prog_region_offset_to_bus(&recv->prog,
1494						recv->block_dma * sizeof(struct dma_cmd));
1495	command |= 1; /* Z=1 */
1496
1497	reg_write(recv->ohci, recv->CommandPtr, command);
1498
1499	/* enable interrupts */
1500	reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1501
1502	wmb();
1503
1504	/* run */
1505	reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1506
1507	/* issue a dummy read of the cycle timer register to force
1508	   all PCI writes to be posted immediately */
1509	mb();
1510	reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1511
1512	/* check RUN */
1513	if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1514		PRINT(KERN_ERR,
1515		      "Error starting IR DMA (ContextControl 0x%08x)\n",
1516		      reg_read(recv->ohci, recv->ContextControlSet));
1517		return -1;
1518	}
1519
1520	return 0;
1521}
1522
1523static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1524{
1525	/* re-use the DMA descriptor for the block */
1526	/* by linking the previous descriptor to it */
1527
1528	int next_i = block;
1529	int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1530
1531	struct dma_cmd *next = &recv->block[next_i];
1532	struct dma_cmd *prev = &recv->block[prev_i];
1533
1534	/* 'next' becomes the new end of the DMA chain,
1535	   so disable branch and enable interrupt */
1536	next->branchAddress = 0;
1537	next->control |= cpu_to_le32(3 << 20);
1538	next->status = cpu_to_le32(recv->buf_stride);
1539
1540	/* link prev to next */
1541	prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1542									sizeof(struct dma_cmd) * next_i)
1543					  | 1); /* Z=1 */
1544
1545	/* disable interrupt on previous DMA descriptor, except at intervals */
1546	if ((prev_i % recv->block_irq_interval) == 0) {
1547		prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1548	} else {
1549		prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1550	}
1551	wmb();
1552
1553	/* wake up DMA in case it fell asleep */
1554	reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1555}
1556
1557static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1558					     struct hpsb_iso_packet_info *info)
1559{
1560	int len;
1561
1562	/* release the memory where the packet was */
1563	len = info->len;
1564
1565	/* add the wasted space for padding to 4 bytes */
1566	if (len % 4)
1567		len += 4 - (len % 4);
1568
1569	/* add 8 bytes for the OHCI DMA data format overhead */
1570	len += 8;
1571
1572	recv->released_bytes += len;
1573
1574	/* have we released enough memory for one block? */
1575	while (recv->released_bytes > recv->buf_stride) {
1576		ohci_iso_recv_release_block(recv, recv->block_reader);
1577		recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1578		recv->released_bytes -= recv->buf_stride;
1579	}
1580}
1581
1582static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1583{
1584	struct ohci_iso_recv *recv = iso->hostdata;
1585	if (recv->dma_mode == BUFFER_FILL_MODE) {
1586		ohci_iso_recv_bufferfill_release(recv, info);
1587	} else {
1588		ohci_iso_recv_release_block(recv, info - iso->infos);
1589	}
1590}
1591
1592/* parse all packets from blocks that have been fully received */
1593static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1594{
1595	int wake = 0;
1596	int runaway = 0;
1597	struct ti_ohci *ohci = recv->ohci;
1598
1599	while (1) {
1600		/* we expect the next parsable packet to begin at recv->dma_offset */
1601		/* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1602
1603		unsigned int offset;
1604		unsigned short len, cycle;
1605		unsigned char channel, tag, sy;
1606
1607		unsigned char *p = iso->data_buf.kvirt;
1608
1609		unsigned int this_block = recv->dma_offset/recv->buf_stride;
1610
1611		/* don't loop indefinitely */
1612		if (runaway++ > 100000) {
1613			atomic_inc(&iso->overflows);
1614			PRINT(KERN_ERR,
1615			      "IR DMA error - Runaway during buffer parsing!\n");
1616			break;
1617		}
1618
1619		/* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1620		if (this_block == recv->block_dma)
1621			break;
1622
1623		wake = 1;
1624
1625		/* parse data length, tag, channel, and sy */
1626
1627		/* note: we keep our own local copies of 'len' and 'offset'
1628		   so the user can't mess with them by poking in the mmap area */
1629
1630		len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1631
1632		if (len > 4096) {
1633			PRINT(KERN_ERR,
1634			      "IR DMA error - bogus 'len' value %u\n", len);
1635		}
1636
1637		channel = p[recv->dma_offset+1] & 0x3F;
1638		tag = p[recv->dma_offset+1] >> 6;
1639		sy = p[recv->dma_offset+0] & 0xF;
1640
1641		/* advance to data payload */
1642		recv->dma_offset += 4;
1643
1644		/* check for wrap-around */
1645		if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1646			recv->dma_offset -= recv->buf_stride*recv->nblocks;
1647		}
1648
1649		/* dma_offset now points to the first byte of the data payload */
1650		offset = recv->dma_offset;
1651
1652		/* advance to xferStatus/timeStamp */
1653		recv->dma_offset += len;
1654
1655		/* payload is padded to 4 bytes */
1656		if (len % 4) {
1657			recv->dma_offset += 4 - (len%4);
1658		}
1659
1660		/* check for wrap-around */
1661		if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1662			/* uh oh, the packet data wraps from the last
1663                           to the first DMA block - make the packet
1664                           contiguous by copying its "tail" into the
1665                           guard page */
1666
1667			int guard_off = recv->buf_stride*recv->nblocks;
1668			int tail_len = len - (guard_off - offset);
1669
1670			if (tail_len > 0  && tail_len < recv->buf_stride) {
1671				memcpy(iso->data_buf.kvirt + guard_off,
1672				       iso->data_buf.kvirt,
1673				       tail_len);
1674			}
1675
1676			recv->dma_offset -= recv->buf_stride*recv->nblocks;
1677		}
1678
1679		/* parse timestamp */
1680		cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1681		cycle &= 0x1FFF;
1682
1683		/* advance to next packet */
1684		recv->dma_offset += 4;
1685
1686		/* check for wrap-around */
1687		if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1688			recv->dma_offset -= recv->buf_stride*recv->nblocks;
1689		}
1690
1691		hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
1692	}
1693
1694	if (wake)
1695		hpsb_iso_wake(iso);
1696}
1697
1698static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1699{
1700	int loop;
1701	struct ti_ohci *ohci = recv->ohci;
1702
1703	/* loop over all blocks */
1704	for (loop = 0; loop < recv->nblocks; loop++) {
1705
1706		/* check block_dma to see if it's done */
1707		struct dma_cmd *im = &recv->block[recv->block_dma];
1708
1709		/* check the DMA descriptor for new writes to xferStatus */
1710		u16 xferstatus = le32_to_cpu(im->status) >> 16;
1711
1712		/* rescount is the number of bytes *remaining to be w…

Large files files are truncated, but you can click here to view the full file