PageRenderTime 77ms CodeModel.GetById 16ms app.highlight 46ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/cassini.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 5304 lines | 3807 code | 715 blank | 782 comment | 712 complexity | 721c6ff0c6b6841be5c326fb80e861ca MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

   1/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
   2 *
   3 * Copyright (C) 2004 Sun Microsystems Inc.
   4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation; either version 2 of the
   9 * License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
  19 * 02111-1307, USA.
  20 *
  21 * This driver uses the sungem driver (c) David Miller
  22 * (davem@redhat.com) as its basis.
  23 *
  24 * The cassini chip has a number of features that distinguish it from
  25 * the gem chip:
  26 *  4 transmit descriptor rings that are used for either QoS (VLAN) or
  27 *      load balancing (non-VLAN mode)
  28 *  batching of multiple packets
  29 *  multiple CPU dispatching
  30 *  page-based RX descriptor engine with separate completion rings
  31 *  Gigabit support (GMII and PCS interface)
  32 *  MIF link up/down detection works
  33 *
  34 * RX is handled by page sized buffers that are attached as fragments to
  35 * the skb. here's what's done:
  36 *  -- driver allocates pages at a time and keeps reference counts
  37 *     on them.
  38 *  -- the upper protocol layers assume that the header is in the skb
  39 *     itself. as a result, cassini will copy a small amount (64 bytes)
  40 *     to make them happy.
  41 *  -- driver appends the rest of the data pages as frags to skbuffs
  42 *     and increments the reference count
  43 *  -- on page reclamation, the driver swaps the page with a spare page.
  44 *     if that page is still in use, it frees its reference to that page,
  45 *     and allocates a new page for use. otherwise, it just recycles the
  46 *     the page.
  47 *
  48 * NOTE: cassini can parse the header. however, it's not worth it
  49 *       as long as the network stack requires a header copy.
  50 *
  51 * TX has 4 queues. currently these queues are used in a round-robin
  52 * fashion for load balancing. They can also be used for QoS. for that
  53 * to work, however, QoS information needs to be exposed down to the driver
  54 * level so that subqueues get targeted to particular transmit rings.
  55 * alternatively, the queues can be configured via use of the all-purpose
  56 * ioctl.
  57 *
  58 * RX DATA: the rx completion ring has all the info, but the rx desc
  59 * ring has all of the data. RX can conceivably come in under multiple
  60 * interrupts, but the INT# assignment needs to be set up properly by
  61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
  62 * that. also, the two descriptor rings are designed to distinguish between
  63 * encrypted and non-encrypted packets, but we use them for buffering
  64 * instead.
  65 *
  66 * by default, the selective clear mask is set up to process rx packets.
  67 */
  68
  69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  70
  71#include <linux/module.h>
  72#include <linux/kernel.h>
  73#include <linux/types.h>
  74#include <linux/compiler.h>
  75#include <linux/slab.h>
  76#include <linux/delay.h>
  77#include <linux/init.h>
  78#include <linux/vmalloc.h>
  79#include <linux/ioport.h>
  80#include <linux/pci.h>
  81#include <linux/mm.h>
  82#include <linux/highmem.h>
  83#include <linux/list.h>
  84#include <linux/dma-mapping.h>
  85
  86#include <linux/netdevice.h>
  87#include <linux/etherdevice.h>
  88#include <linux/skbuff.h>
  89#include <linux/ethtool.h>
  90#include <linux/crc32.h>
  91#include <linux/random.h>
  92#include <linux/mii.h>
  93#include <linux/ip.h>
  94#include <linux/tcp.h>
  95#include <linux/mutex.h>
  96#include <linux/firmware.h>
  97
  98#include <net/checksum.h>
  99
 100#include <asm/atomic.h>
 101#include <asm/system.h>
 102#include <asm/io.h>
 103#include <asm/byteorder.h>
 104#include <asm/uaccess.h>
 105
 106#define cas_page_map(x)      kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
 107#define cas_page_unmap(x)    kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
 108#define CAS_NCPUS            num_online_cpus()
 109
 110#define cas_skb_release(x)  netif_rx(x)
 111
 112/* select which firmware to use */
 113#define USE_HP_WORKAROUND
 114#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
 115#define CAS_HP_ALT_FIRMWARE   cas_prog_null /* alternate firmware */
 116
 117#include "cassini.h"
 118
 119#define USE_TX_COMPWB      /* use completion writeback registers */
 120#define USE_CSMA_CD_PROTO  /* standard CSMA/CD */
 121#define USE_RX_BLANK       /* hw interrupt mitigation */
 122#undef USE_ENTROPY_DEV     /* don't test for entropy device */
 123
 124/* NOTE: these aren't useable unless PCI interrupts can be assigned.
 125 * also, we need to make cp->lock finer-grained.
 126 */
 127#undef  USE_PCI_INTB
 128#undef  USE_PCI_INTC
 129#undef  USE_PCI_INTD
 130#undef  USE_QOS
 131
 132#undef  USE_VPD_DEBUG       /* debug vpd information if defined */
 133
 134/* rx processing options */
 135#define USE_PAGE_ORDER      /* specify to allocate large rx pages */
 136#define RX_DONT_BATCH  0    /* if 1, don't batch flows */
 137#define RX_COPY_ALWAYS 0    /* if 0, use frags */
 138#define RX_COPY_MIN    64   /* copy a little to make upper layers happy */
 139#undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
 140
 141#define DRV_MODULE_NAME		"cassini"
 142#define DRV_MODULE_VERSION	"1.6"
 143#define DRV_MODULE_RELDATE	"21 May 2008"
 144
 145#define CAS_DEF_MSG_ENABLE	  \
 146	(NETIF_MSG_DRV		| \
 147	 NETIF_MSG_PROBE	| \
 148	 NETIF_MSG_LINK		| \
 149	 NETIF_MSG_TIMER	| \
 150	 NETIF_MSG_IFDOWN	| \
 151	 NETIF_MSG_IFUP		| \
 152	 NETIF_MSG_RX_ERR	| \
 153	 NETIF_MSG_TX_ERR)
 154
 155/* length of time before we decide the hardware is borked,
 156 * and dev->tx_timeout() should be called to fix the problem
 157 */
 158#define CAS_TX_TIMEOUT			(HZ)
 159#define CAS_LINK_TIMEOUT                (22*HZ/10)
 160#define CAS_LINK_FAST_TIMEOUT           (1)
 161
 162/* timeout values for state changing. these specify the number
 163 * of 10us delays to be used before giving up.
 164 */
 165#define STOP_TRIES_PHY 1000
 166#define STOP_TRIES     5000
 167
 168/* specify a minimum frame size to deal with some fifo issues
 169 * max mtu == 2 * page size - ethernet header - 64 - swivel =
 170 *            2 * page_size - 0x50
 171 */
 172#define CAS_MIN_FRAME			97
 173#define CAS_1000MB_MIN_FRAME            255
 174#define CAS_MIN_MTU                     60
 175#define CAS_MAX_MTU                     min(((cp->page_size << 1) - 0x50), 9000)
 176
 177#if 1
 178/*
 179 * Eliminate these and use separate atomic counters for each, to
 180 * avoid a race condition.
 181 */
 182#else
 183#define CAS_RESET_MTU                   1
 184#define CAS_RESET_ALL                   2
 185#define CAS_RESET_SPARE                 3
 186#endif
 187
 188static char version[] __devinitdata =
 189	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 190
 191static int cassini_debug = -1;	/* -1 == use CAS_DEF_MSG_ENABLE as value */
 192static int link_mode;
 193
 194MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
 195MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
 196MODULE_LICENSE("GPL");
 197MODULE_FIRMWARE("sun/cassini.bin");
 198module_param(cassini_debug, int, 0);
 199MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
 200module_param(link_mode, int, 0);
 201MODULE_PARM_DESC(link_mode, "default link mode");
 202
 203/*
 204 * Work around for a PCS bug in which the link goes down due to the chip
 205 * being confused and never showing a link status of "up."
 206 */
 207#define DEFAULT_LINKDOWN_TIMEOUT 5
 208/*
 209 * Value in seconds, for user input.
 210 */
 211static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
 212module_param(linkdown_timeout, int, 0);
 213MODULE_PARM_DESC(linkdown_timeout,
 214"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
 215
 216/*
 217 * value in 'ticks' (units used by jiffies). Set when we init the
 218 * module because 'HZ' in actually a function call on some flavors of
 219 * Linux.  This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
 220 */
 221static int link_transition_timeout;
 222
 223
 224
 225static u16 link_modes[] __devinitdata = {
 226	BMCR_ANENABLE,			 /* 0 : autoneg */
 227	0,				 /* 1 : 10bt half duplex */
 228	BMCR_SPEED100,			 /* 2 : 100bt half duplex */
 229	BMCR_FULLDPLX,			 /* 3 : 10bt full duplex */
 230	BMCR_SPEED100|BMCR_FULLDPLX,	 /* 4 : 100bt full duplex */
 231	CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
 232};
 233
 234static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
 235	{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
 236	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 237	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
 238	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 239	{ 0, }
 240};
 241
 242MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
 243
 244static void cas_set_link_modes(struct cas *cp);
 245
 246static inline void cas_lock_tx(struct cas *cp)
 247{
 248	int i;
 249
 250	for (i = 0; i < N_TX_RINGS; i++)
 251		spin_lock(&cp->tx_lock[i]);
 252}
 253
 254static inline void cas_lock_all(struct cas *cp)
 255{
 256	spin_lock_irq(&cp->lock);
 257	cas_lock_tx(cp);
 258}
 259
 260/* WTZ: QA was finding deadlock problems with the previous
 261 * versions after long test runs with multiple cards per machine.
 262 * See if replacing cas_lock_all with safer versions helps. The
 263 * symptoms QA is reporting match those we'd expect if interrupts
 264 * aren't being properly restored, and we fixed a previous deadlock
 265 * with similar symptoms by using save/restore versions in other
 266 * places.
 267 */
 268#define cas_lock_all_save(cp, flags) \
 269do { \
 270	struct cas *xxxcp = (cp); \
 271	spin_lock_irqsave(&xxxcp->lock, flags); \
 272	cas_lock_tx(xxxcp); \
 273} while (0)
 274
 275static inline void cas_unlock_tx(struct cas *cp)
 276{
 277	int i;
 278
 279	for (i = N_TX_RINGS; i > 0; i--)
 280		spin_unlock(&cp->tx_lock[i - 1]);
 281}
 282
 283static inline void cas_unlock_all(struct cas *cp)
 284{
 285	cas_unlock_tx(cp);
 286	spin_unlock_irq(&cp->lock);
 287}
 288
 289#define cas_unlock_all_restore(cp, flags) \
 290do { \
 291	struct cas *xxxcp = (cp); \
 292	cas_unlock_tx(xxxcp); \
 293	spin_unlock_irqrestore(&xxxcp->lock, flags); \
 294} while (0)
 295
 296static void cas_disable_irq(struct cas *cp, const int ring)
 297{
 298	/* Make sure we won't get any more interrupts */
 299	if (ring == 0) {
 300		writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
 301		return;
 302	}
 303
 304	/* disable completion interrupts and selectively mask */
 305	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 306		switch (ring) {
 307#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 308#ifdef USE_PCI_INTB
 309		case 1:
 310#endif
 311#ifdef USE_PCI_INTC
 312		case 2:
 313#endif
 314#ifdef USE_PCI_INTD
 315		case 3:
 316#endif
 317			writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
 318			       cp->regs + REG_PLUS_INTRN_MASK(ring));
 319			break;
 320#endif
 321		default:
 322			writel(INTRN_MASK_CLEAR_ALL, cp->regs +
 323			       REG_PLUS_INTRN_MASK(ring));
 324			break;
 325		}
 326	}
 327}
 328
 329static inline void cas_mask_intr(struct cas *cp)
 330{
 331	int i;
 332
 333	for (i = 0; i < N_RX_COMP_RINGS; i++)
 334		cas_disable_irq(cp, i);
 335}
 336
 337static void cas_enable_irq(struct cas *cp, const int ring)
 338{
 339	if (ring == 0) { /* all but TX_DONE */
 340		writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
 341		return;
 342	}
 343
 344	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 345		switch (ring) {
 346#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 347#ifdef USE_PCI_INTB
 348		case 1:
 349#endif
 350#ifdef USE_PCI_INTC
 351		case 2:
 352#endif
 353#ifdef USE_PCI_INTD
 354		case 3:
 355#endif
 356			writel(INTRN_MASK_RX_EN, cp->regs +
 357			       REG_PLUS_INTRN_MASK(ring));
 358			break;
 359#endif
 360		default:
 361			break;
 362		}
 363	}
 364}
 365
 366static inline void cas_unmask_intr(struct cas *cp)
 367{
 368	int i;
 369
 370	for (i = 0; i < N_RX_COMP_RINGS; i++)
 371		cas_enable_irq(cp, i);
 372}
 373
 374static inline void cas_entropy_gather(struct cas *cp)
 375{
 376#ifdef USE_ENTROPY_DEV
 377	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 378		return;
 379
 380	batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
 381			    readl(cp->regs + REG_ENTROPY_IV),
 382			    sizeof(uint64_t)*8);
 383#endif
 384}
 385
 386static inline void cas_entropy_reset(struct cas *cp)
 387{
 388#ifdef USE_ENTROPY_DEV
 389	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 390		return;
 391
 392	writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
 393	       cp->regs + REG_BIM_LOCAL_DEV_EN);
 394	writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
 395	writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
 396
 397	/* if we read back 0x0, we don't have an entropy device */
 398	if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
 399		cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
 400#endif
 401}
 402
 403/* access to the phy. the following assumes that we've initialized the MIF to
 404 * be in frame rather than bit-bang mode
 405 */
 406static u16 cas_phy_read(struct cas *cp, int reg)
 407{
 408	u32 cmd;
 409	int limit = STOP_TRIES_PHY;
 410
 411	cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
 412	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 413	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 414	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 415	writel(cmd, cp->regs + REG_MIF_FRAME);
 416
 417	/* poll for completion */
 418	while (limit-- > 0) {
 419		udelay(10);
 420		cmd = readl(cp->regs + REG_MIF_FRAME);
 421		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 422			return cmd & MIF_FRAME_DATA_MASK;
 423	}
 424	return 0xFFFF; /* -1 */
 425}
 426
 427static int cas_phy_write(struct cas *cp, int reg, u16 val)
 428{
 429	int limit = STOP_TRIES_PHY;
 430	u32 cmd;
 431
 432	cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
 433	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 434	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 435	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 436	cmd |= val & MIF_FRAME_DATA_MASK;
 437	writel(cmd, cp->regs + REG_MIF_FRAME);
 438
 439	/* poll for completion */
 440	while (limit-- > 0) {
 441		udelay(10);
 442		cmd = readl(cp->regs + REG_MIF_FRAME);
 443		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 444			return 0;
 445	}
 446	return -1;
 447}
 448
 449static void cas_phy_powerup(struct cas *cp)
 450{
 451	u16 ctl = cas_phy_read(cp, MII_BMCR);
 452
 453	if ((ctl & BMCR_PDOWN) == 0)
 454		return;
 455	ctl &= ~BMCR_PDOWN;
 456	cas_phy_write(cp, MII_BMCR, ctl);
 457}
 458
 459static void cas_phy_powerdown(struct cas *cp)
 460{
 461	u16 ctl = cas_phy_read(cp, MII_BMCR);
 462
 463	if (ctl & BMCR_PDOWN)
 464		return;
 465	ctl |= BMCR_PDOWN;
 466	cas_phy_write(cp, MII_BMCR, ctl);
 467}
 468
 469/* cp->lock held. note: the last put_page will free the buffer */
 470static int cas_page_free(struct cas *cp, cas_page_t *page)
 471{
 472	pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
 473		       PCI_DMA_FROMDEVICE);
 474	__free_pages(page->buffer, cp->page_order);
 475	kfree(page);
 476	return 0;
 477}
 478
 479#ifdef RX_COUNT_BUFFERS
 480#define RX_USED_ADD(x, y)       ((x)->used += (y))
 481#define RX_USED_SET(x, y)       ((x)->used  = (y))
 482#else
 483#define RX_USED_ADD(x, y)
 484#define RX_USED_SET(x, y)
 485#endif
 486
 487/* local page allocation routines for the receive buffers. jumbo pages
 488 * require at least 8K contiguous and 8K aligned buffers.
 489 */
 490static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
 491{
 492	cas_page_t *page;
 493
 494	page = kmalloc(sizeof(cas_page_t), flags);
 495	if (!page)
 496		return NULL;
 497
 498	INIT_LIST_HEAD(&page->list);
 499	RX_USED_SET(page, 0);
 500	page->buffer = alloc_pages(flags, cp->page_order);
 501	if (!page->buffer)
 502		goto page_err;
 503	page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
 504				      cp->page_size, PCI_DMA_FROMDEVICE);
 505	return page;
 506
 507page_err:
 508	kfree(page);
 509	return NULL;
 510}
 511
 512/* initialize spare pool of rx buffers, but allocate during the open */
 513static void cas_spare_init(struct cas *cp)
 514{
 515  	spin_lock(&cp->rx_inuse_lock);
 516	INIT_LIST_HEAD(&cp->rx_inuse_list);
 517	spin_unlock(&cp->rx_inuse_lock);
 518
 519	spin_lock(&cp->rx_spare_lock);
 520	INIT_LIST_HEAD(&cp->rx_spare_list);
 521	cp->rx_spares_needed = RX_SPARE_COUNT;
 522	spin_unlock(&cp->rx_spare_lock);
 523}
 524
 525/* used on close. free all the spare buffers. */
 526static void cas_spare_free(struct cas *cp)
 527{
 528	struct list_head list, *elem, *tmp;
 529
 530	/* free spare buffers */
 531	INIT_LIST_HEAD(&list);
 532	spin_lock(&cp->rx_spare_lock);
 533	list_splice_init(&cp->rx_spare_list, &list);
 534	spin_unlock(&cp->rx_spare_lock);
 535	list_for_each_safe(elem, tmp, &list) {
 536		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 537	}
 538
 539	INIT_LIST_HEAD(&list);
 540#if 1
 541	/*
 542	 * Looks like Adrian had protected this with a different
 543	 * lock than used everywhere else to manipulate this list.
 544	 */
 545	spin_lock(&cp->rx_inuse_lock);
 546	list_splice_init(&cp->rx_inuse_list, &list);
 547	spin_unlock(&cp->rx_inuse_lock);
 548#else
 549	spin_lock(&cp->rx_spare_lock);
 550	list_splice_init(&cp->rx_inuse_list, &list);
 551	spin_unlock(&cp->rx_spare_lock);
 552#endif
 553	list_for_each_safe(elem, tmp, &list) {
 554		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 555	}
 556}
 557
 558/* replenish spares if needed */
 559static void cas_spare_recover(struct cas *cp, const gfp_t flags)
 560{
 561	struct list_head list, *elem, *tmp;
 562	int needed, i;
 563
 564	/* check inuse list. if we don't need any more free buffers,
 565	 * just free it
 566	 */
 567
 568	/* make a local copy of the list */
 569	INIT_LIST_HEAD(&list);
 570	spin_lock(&cp->rx_inuse_lock);
 571	list_splice_init(&cp->rx_inuse_list, &list);
 572	spin_unlock(&cp->rx_inuse_lock);
 573
 574	list_for_each_safe(elem, tmp, &list) {
 575		cas_page_t *page = list_entry(elem, cas_page_t, list);
 576
 577		/*
 578		 * With the lockless pagecache, cassini buffering scheme gets
 579		 * slightly less accurate: we might find that a page has an
 580		 * elevated reference count here, due to a speculative ref,
 581		 * and skip it as in-use. Ideally we would be able to reclaim
 582		 * it. However this would be such a rare case, it doesn't
 583		 * matter too much as we should pick it up the next time round.
 584		 *
 585		 * Importantly, if we find that the page has a refcount of 1
 586		 * here (our refcount), then we know it is definitely not inuse
 587		 * so we can reuse it.
 588		 */
 589		if (page_count(page->buffer) > 1)
 590			continue;
 591
 592		list_del(elem);
 593		spin_lock(&cp->rx_spare_lock);
 594		if (cp->rx_spares_needed > 0) {
 595			list_add(elem, &cp->rx_spare_list);
 596			cp->rx_spares_needed--;
 597			spin_unlock(&cp->rx_spare_lock);
 598		} else {
 599			spin_unlock(&cp->rx_spare_lock);
 600			cas_page_free(cp, page);
 601		}
 602	}
 603
 604	/* put any inuse buffers back on the list */
 605	if (!list_empty(&list)) {
 606		spin_lock(&cp->rx_inuse_lock);
 607		list_splice(&list, &cp->rx_inuse_list);
 608		spin_unlock(&cp->rx_inuse_lock);
 609	}
 610
 611	spin_lock(&cp->rx_spare_lock);
 612	needed = cp->rx_spares_needed;
 613	spin_unlock(&cp->rx_spare_lock);
 614	if (!needed)
 615		return;
 616
 617	/* we still need spares, so try to allocate some */
 618	INIT_LIST_HEAD(&list);
 619	i = 0;
 620	while (i < needed) {
 621		cas_page_t *spare = cas_page_alloc(cp, flags);
 622		if (!spare)
 623			break;
 624		list_add(&spare->list, &list);
 625		i++;
 626	}
 627
 628	spin_lock(&cp->rx_spare_lock);
 629	list_splice(&list, &cp->rx_spare_list);
 630	cp->rx_spares_needed -= i;
 631	spin_unlock(&cp->rx_spare_lock);
 632}
 633
 634/* pull a page from the list. */
 635static cas_page_t *cas_page_dequeue(struct cas *cp)
 636{
 637	struct list_head *entry;
 638	int recover;
 639
 640	spin_lock(&cp->rx_spare_lock);
 641	if (list_empty(&cp->rx_spare_list)) {
 642		/* try to do a quick recovery */
 643		spin_unlock(&cp->rx_spare_lock);
 644		cas_spare_recover(cp, GFP_ATOMIC);
 645		spin_lock(&cp->rx_spare_lock);
 646		if (list_empty(&cp->rx_spare_list)) {
 647			netif_err(cp, rx_err, cp->dev,
 648				  "no spare buffers available\n");
 649			spin_unlock(&cp->rx_spare_lock);
 650			return NULL;
 651		}
 652	}
 653
 654	entry = cp->rx_spare_list.next;
 655	list_del(entry);
 656	recover = ++cp->rx_spares_needed;
 657	spin_unlock(&cp->rx_spare_lock);
 658
 659	/* trigger the timer to do the recovery */
 660	if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
 661#if 1
 662		atomic_inc(&cp->reset_task_pending);
 663		atomic_inc(&cp->reset_task_pending_spare);
 664		schedule_work(&cp->reset_task);
 665#else
 666		atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
 667		schedule_work(&cp->reset_task);
 668#endif
 669	}
 670	return list_entry(entry, cas_page_t, list);
 671}
 672
 673
 674static void cas_mif_poll(struct cas *cp, const int enable)
 675{
 676	u32 cfg;
 677
 678	cfg  = readl(cp->regs + REG_MIF_CFG);
 679	cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
 680
 681	if (cp->phy_type & CAS_PHY_MII_MDIO1)
 682		cfg |= MIF_CFG_PHY_SELECT;
 683
 684	/* poll and interrupt on link status change. */
 685	if (enable) {
 686		cfg |= MIF_CFG_POLL_EN;
 687		cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
 688		cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
 689	}
 690	writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
 691	       cp->regs + REG_MIF_MASK);
 692	writel(cfg, cp->regs + REG_MIF_CFG);
 693}
 694
 695/* Must be invoked under cp->lock */
 696static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
 697{
 698	u16 ctl;
 699#if 1
 700	int lcntl;
 701	int changed = 0;
 702	int oldstate = cp->lstate;
 703	int link_was_not_down = !(oldstate == link_down);
 704#endif
 705	/* Setup link parameters */
 706	if (!ep)
 707		goto start_aneg;
 708	lcntl = cp->link_cntl;
 709	if (ep->autoneg == AUTONEG_ENABLE)
 710		cp->link_cntl = BMCR_ANENABLE;
 711	else {
 712		u32 speed = ethtool_cmd_speed(ep);
 713		cp->link_cntl = 0;
 714		if (speed == SPEED_100)
 715			cp->link_cntl |= BMCR_SPEED100;
 716		else if (speed == SPEED_1000)
 717			cp->link_cntl |= CAS_BMCR_SPEED1000;
 718		if (ep->duplex == DUPLEX_FULL)
 719			cp->link_cntl |= BMCR_FULLDPLX;
 720	}
 721#if 1
 722	changed = (lcntl != cp->link_cntl);
 723#endif
 724start_aneg:
 725	if (cp->lstate == link_up) {
 726		netdev_info(cp->dev, "PCS link down\n");
 727	} else {
 728		if (changed) {
 729			netdev_info(cp->dev, "link configuration changed\n");
 730		}
 731	}
 732	cp->lstate = link_down;
 733	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
 734	if (!cp->hw_running)
 735		return;
 736#if 1
 737	/*
 738	 * WTZ: If the old state was link_up, we turn off the carrier
 739	 * to replicate everything we do elsewhere on a link-down
 740	 * event when we were already in a link-up state..
 741	 */
 742	if (oldstate == link_up)
 743		netif_carrier_off(cp->dev);
 744	if (changed  && link_was_not_down) {
 745		/*
 746		 * WTZ: This branch will simply schedule a full reset after
 747		 * we explicitly changed link modes in an ioctl. See if this
 748		 * fixes the link-problems we were having for forced mode.
 749		 */
 750		atomic_inc(&cp->reset_task_pending);
 751		atomic_inc(&cp->reset_task_pending_all);
 752		schedule_work(&cp->reset_task);
 753		cp->timer_ticks = 0;
 754		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 755		return;
 756	}
 757#endif
 758	if (cp->phy_type & CAS_PHY_SERDES) {
 759		u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
 760
 761		if (cp->link_cntl & BMCR_ANENABLE) {
 762			val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
 763			cp->lstate = link_aneg;
 764		} else {
 765			if (cp->link_cntl & BMCR_FULLDPLX)
 766				val |= PCS_MII_CTRL_DUPLEX;
 767			val &= ~PCS_MII_AUTONEG_EN;
 768			cp->lstate = link_force_ok;
 769		}
 770		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 771		writel(val, cp->regs + REG_PCS_MII_CTRL);
 772
 773	} else {
 774		cas_mif_poll(cp, 0);
 775		ctl = cas_phy_read(cp, MII_BMCR);
 776		ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
 777			 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
 778		ctl |= cp->link_cntl;
 779		if (ctl & BMCR_ANENABLE) {
 780			ctl |= BMCR_ANRESTART;
 781			cp->lstate = link_aneg;
 782		} else {
 783			cp->lstate = link_force_ok;
 784		}
 785		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 786		cas_phy_write(cp, MII_BMCR, ctl);
 787		cas_mif_poll(cp, 1);
 788	}
 789
 790	cp->timer_ticks = 0;
 791	mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 792}
 793
 794/* Must be invoked under cp->lock. */
 795static int cas_reset_mii_phy(struct cas *cp)
 796{
 797	int limit = STOP_TRIES_PHY;
 798	u16 val;
 799
 800	cas_phy_write(cp, MII_BMCR, BMCR_RESET);
 801	udelay(100);
 802	while (--limit) {
 803		val = cas_phy_read(cp, MII_BMCR);
 804		if ((val & BMCR_RESET) == 0)
 805			break;
 806		udelay(10);
 807	}
 808	return limit <= 0;
 809}
 810
 811static int cas_saturn_firmware_init(struct cas *cp)
 812{
 813	const struct firmware *fw;
 814	const char fw_name[] = "sun/cassini.bin";
 815	int err;
 816
 817	if (PHY_NS_DP83065 != cp->phy_id)
 818		return 0;
 819
 820	err = request_firmware(&fw, fw_name, &cp->pdev->dev);
 821	if (err) {
 822		pr_err("Failed to load firmware \"%s\"\n",
 823		       fw_name);
 824		return err;
 825	}
 826	if (fw->size < 2) {
 827		pr_err("bogus length %zu in \"%s\"\n",
 828		       fw->size, fw_name);
 829		err = -EINVAL;
 830		goto out;
 831	}
 832	cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
 833	cp->fw_size = fw->size - 2;
 834	cp->fw_data = vmalloc(cp->fw_size);
 835	if (!cp->fw_data) {
 836		err = -ENOMEM;
 837		pr_err("\"%s\" Failed %d\n", fw_name, err);
 838		goto out;
 839	}
 840	memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
 841out:
 842	release_firmware(fw);
 843	return err;
 844}
 845
 846static void cas_saturn_firmware_load(struct cas *cp)
 847{
 848	int i;
 849
 850	cas_phy_powerdown(cp);
 851
 852	/* expanded memory access mode */
 853	cas_phy_write(cp, DP83065_MII_MEM, 0x0);
 854
 855	/* pointer configuration for new firmware */
 856	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
 857	cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
 858	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
 859	cas_phy_write(cp, DP83065_MII_REGD, 0x82);
 860	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
 861	cas_phy_write(cp, DP83065_MII_REGD, 0x0);
 862	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
 863	cas_phy_write(cp, DP83065_MII_REGD, 0x39);
 864
 865	/* download new firmware */
 866	cas_phy_write(cp, DP83065_MII_MEM, 0x1);
 867	cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
 868	for (i = 0; i < cp->fw_size; i++)
 869		cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
 870
 871	/* enable firmware */
 872	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
 873	cas_phy_write(cp, DP83065_MII_REGD, 0x1);
 874}
 875
 876
 877/* phy initialization */
 878static void cas_phy_init(struct cas *cp)
 879{
 880	u16 val;
 881
 882	/* if we're in MII/GMII mode, set up phy */
 883	if (CAS_PHY_MII(cp->phy_type)) {
 884		writel(PCS_DATAPATH_MODE_MII,
 885		       cp->regs + REG_PCS_DATAPATH_MODE);
 886
 887		cas_mif_poll(cp, 0);
 888		cas_reset_mii_phy(cp); /* take out of isolate mode */
 889
 890		if (PHY_LUCENT_B0 == cp->phy_id) {
 891			/* workaround link up/down issue with lucent */
 892			cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
 893			cas_phy_write(cp, MII_BMCR, 0x00f1);
 894			cas_phy_write(cp, LUCENT_MII_REG, 0x0);
 895
 896		} else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
 897			/* workarounds for broadcom phy */
 898			cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
 899			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
 900			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
 901			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
 902			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
 903			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 904			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
 905			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 906			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
 907			cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
 908			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
 909
 910		} else if (PHY_BROADCOM_5411 == cp->phy_id) {
 911			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 912			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 913			if (val & 0x0080) {
 914				/* link workaround */
 915				cas_phy_write(cp, BROADCOM_MII_REG4,
 916					      val & ~0x0080);
 917			}
 918
 919		} else if (cp->cas_flags & CAS_FLAG_SATURN) {
 920			writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
 921			       SATURN_PCFG_FSI : 0x0,
 922			       cp->regs + REG_SATURN_PCFG);
 923
 924			/* load firmware to address 10Mbps auto-negotiation
 925			 * issue. NOTE: this will need to be changed if the
 926			 * default firmware gets fixed.
 927			 */
 928			if (PHY_NS_DP83065 == cp->phy_id) {
 929				cas_saturn_firmware_load(cp);
 930			}
 931			cas_phy_powerup(cp);
 932		}
 933
 934		/* advertise capabilities */
 935		val = cas_phy_read(cp, MII_BMCR);
 936		val &= ~BMCR_ANENABLE;
 937		cas_phy_write(cp, MII_BMCR, val);
 938		udelay(10);
 939
 940		cas_phy_write(cp, MII_ADVERTISE,
 941			      cas_phy_read(cp, MII_ADVERTISE) |
 942			      (ADVERTISE_10HALF | ADVERTISE_10FULL |
 943			       ADVERTISE_100HALF | ADVERTISE_100FULL |
 944			       CAS_ADVERTISE_PAUSE |
 945			       CAS_ADVERTISE_ASYM_PAUSE));
 946
 947		if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
 948			/* make sure that we don't advertise half
 949			 * duplex to avoid a chip issue
 950			 */
 951			val  = cas_phy_read(cp, CAS_MII_1000_CTRL);
 952			val &= ~CAS_ADVERTISE_1000HALF;
 953			val |= CAS_ADVERTISE_1000FULL;
 954			cas_phy_write(cp, CAS_MII_1000_CTRL, val);
 955		}
 956
 957	} else {
 958		/* reset pcs for serdes */
 959		u32 val;
 960		int limit;
 961
 962		writel(PCS_DATAPATH_MODE_SERDES,
 963		       cp->regs + REG_PCS_DATAPATH_MODE);
 964
 965		/* enable serdes pins on saturn */
 966		if (cp->cas_flags & CAS_FLAG_SATURN)
 967			writel(0, cp->regs + REG_SATURN_PCFG);
 968
 969		/* Reset PCS unit. */
 970		val = readl(cp->regs + REG_PCS_MII_CTRL);
 971		val |= PCS_MII_RESET;
 972		writel(val, cp->regs + REG_PCS_MII_CTRL);
 973
 974		limit = STOP_TRIES;
 975		while (--limit > 0) {
 976			udelay(10);
 977			if ((readl(cp->regs + REG_PCS_MII_CTRL) &
 978			     PCS_MII_RESET) == 0)
 979				break;
 980		}
 981		if (limit <= 0)
 982			netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
 983				    readl(cp->regs + REG_PCS_STATE_MACHINE));
 984
 985		/* Make sure PCS is disabled while changing advertisement
 986		 * configuration.
 987		 */
 988		writel(0x0, cp->regs + REG_PCS_CFG);
 989
 990		/* Advertise all capabilities except half-duplex. */
 991		val  = readl(cp->regs + REG_PCS_MII_ADVERT);
 992		val &= ~PCS_MII_ADVERT_HD;
 993		val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
 994			PCS_MII_ADVERT_ASYM_PAUSE);
 995		writel(val, cp->regs + REG_PCS_MII_ADVERT);
 996
 997		/* enable PCS */
 998		writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
 999
1000		/* pcs workaround: enable sync detect */
1001		writel(PCS_SERDES_CTRL_SYNCD_EN,
1002		       cp->regs + REG_PCS_SERDES_CTRL);
1003	}
1004}
1005
1006
1007static int cas_pcs_link_check(struct cas *cp)
1008{
1009	u32 stat, state_machine;
1010	int retval = 0;
1011
1012	/* The link status bit latches on zero, so you must
1013	 * read it twice in such a case to see a transition
1014	 * to the link being up.
1015	 */
1016	stat = readl(cp->regs + REG_PCS_MII_STATUS);
1017	if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1018		stat = readl(cp->regs + REG_PCS_MII_STATUS);
1019
1020	/* The remote-fault indication is only valid
1021	 * when autoneg has completed.
1022	 */
1023	if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1024		     PCS_MII_STATUS_REMOTE_FAULT)) ==
1025	    (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1026		netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1027
1028	/* work around link detection issue by querying the PCS state
1029	 * machine directly.
1030	 */
1031	state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1032	if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1033		stat &= ~PCS_MII_STATUS_LINK_STATUS;
1034	} else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1035		stat |= PCS_MII_STATUS_LINK_STATUS;
1036	}
1037
1038	if (stat & PCS_MII_STATUS_LINK_STATUS) {
1039		if (cp->lstate != link_up) {
1040			if (cp->opened) {
1041				cp->lstate = link_up;
1042				cp->link_transition = LINK_TRANSITION_LINK_UP;
1043
1044				cas_set_link_modes(cp);
1045				netif_carrier_on(cp->dev);
1046			}
1047		}
1048	} else if (cp->lstate == link_up) {
1049		cp->lstate = link_down;
1050		if (link_transition_timeout != 0 &&
1051		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1052		    !cp->link_transition_jiffies_valid) {
1053			/*
1054			 * force a reset, as a workaround for the
1055			 * link-failure problem. May want to move this to a
1056			 * point a bit earlier in the sequence. If we had
1057			 * generated a reset a short time ago, we'll wait for
1058			 * the link timer to check the status until a
1059			 * timer expires (link_transistion_jiffies_valid is
1060			 * true when the timer is running.)  Instead of using
1061			 * a system timer, we just do a check whenever the
1062			 * link timer is running - this clears the flag after
1063			 * a suitable delay.
1064			 */
1065			retval = 1;
1066			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1067			cp->link_transition_jiffies = jiffies;
1068			cp->link_transition_jiffies_valid = 1;
1069		} else {
1070			cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1071		}
1072		netif_carrier_off(cp->dev);
1073		if (cp->opened)
1074			netif_info(cp, link, cp->dev, "PCS link down\n");
1075
1076		/* Cassini only: if you force a mode, there can be
1077		 * sync problems on link down. to fix that, the following
1078		 * things need to be checked:
1079		 * 1) read serialink state register
1080		 * 2) read pcs status register to verify link down.
1081		 * 3) if link down and serial link == 0x03, then you need
1082		 *    to global reset the chip.
1083		 */
1084		if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1085			/* should check to see if we're in a forced mode */
1086			stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1087			if (stat == 0x03)
1088				return 1;
1089		}
1090	} else if (cp->lstate == link_down) {
1091		if (link_transition_timeout != 0 &&
1092		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1093		    !cp->link_transition_jiffies_valid) {
1094			/* force a reset, as a workaround for the
1095			 * link-failure problem.  May want to move
1096			 * this to a point a bit earlier in the
1097			 * sequence.
1098			 */
1099			retval = 1;
1100			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1101			cp->link_transition_jiffies = jiffies;
1102			cp->link_transition_jiffies_valid = 1;
1103		} else {
1104			cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1105		}
1106	}
1107
1108	return retval;
1109}
1110
1111static int cas_pcs_interrupt(struct net_device *dev,
1112			     struct cas *cp, u32 status)
1113{
1114	u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1115
1116	if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1117		return 0;
1118	return cas_pcs_link_check(cp);
1119}
1120
1121static int cas_txmac_interrupt(struct net_device *dev,
1122			       struct cas *cp, u32 status)
1123{
1124	u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1125
1126	if (!txmac_stat)
1127		return 0;
1128
1129	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1130		     "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1131
1132	/* Defer timer expiration is quite normal,
1133	 * don't even log the event.
1134	 */
1135	if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1136	    !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1137		return 0;
1138
1139	spin_lock(&cp->stat_lock[0]);
1140	if (txmac_stat & MAC_TX_UNDERRUN) {
1141		netdev_err(dev, "TX MAC xmit underrun\n");
1142		cp->net_stats[0].tx_fifo_errors++;
1143	}
1144
1145	if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1146		netdev_err(dev, "TX MAC max packet size error\n");
1147		cp->net_stats[0].tx_errors++;
1148	}
1149
1150	/* The rest are all cases of one of the 16-bit TX
1151	 * counters expiring.
1152	 */
1153	if (txmac_stat & MAC_TX_COLL_NORMAL)
1154		cp->net_stats[0].collisions += 0x10000;
1155
1156	if (txmac_stat & MAC_TX_COLL_EXCESS) {
1157		cp->net_stats[0].tx_aborted_errors += 0x10000;
1158		cp->net_stats[0].collisions += 0x10000;
1159	}
1160
1161	if (txmac_stat & MAC_TX_COLL_LATE) {
1162		cp->net_stats[0].tx_aborted_errors += 0x10000;
1163		cp->net_stats[0].collisions += 0x10000;
1164	}
1165	spin_unlock(&cp->stat_lock[0]);
1166
1167	/* We do not keep track of MAC_TX_COLL_FIRST and
1168	 * MAC_TX_PEAK_ATTEMPTS events.
1169	 */
1170	return 0;
1171}
1172
1173static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1174{
1175	cas_hp_inst_t *inst;
1176	u32 val;
1177	int i;
1178
1179	i = 0;
1180	while ((inst = firmware) && inst->note) {
1181		writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1182
1183		val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1184		val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1185		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1186
1187		val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1188		val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1189		val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1190		val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1191		val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1192		val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1193		val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1194		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1195
1196		val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1197		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1198		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1199		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1200		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1201		++firmware;
1202		++i;
1203	}
1204}
1205
1206static void cas_init_rx_dma(struct cas *cp)
1207{
1208	u64 desc_dma = cp->block_dvma;
1209	u32 val;
1210	int i, size;
1211
1212	/* rx free descriptors */
1213	val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1214	val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1215	val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1216	if ((N_RX_DESC_RINGS > 1) &&
1217	    (cp->cas_flags & CAS_FLAG_REG_PLUS))  /* do desc 2 */
1218		val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1219	writel(val, cp->regs + REG_RX_CFG);
1220
1221	val = (unsigned long) cp->init_rxds[0] -
1222		(unsigned long) cp->init_block;
1223	writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1224	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1225	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1226
1227	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1228		/* rx desc 2 is for IPSEC packets. however,
1229		 * we don't it that for that purpose.
1230		 */
1231		val = (unsigned long) cp->init_rxds[1] -
1232			(unsigned long) cp->init_block;
1233		writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1234		writel((desc_dma + val) & 0xffffffff, cp->regs +
1235		       REG_PLUS_RX_DB1_LOW);
1236		writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1237		       REG_PLUS_RX_KICK1);
1238	}
1239
1240	/* rx completion registers */
1241	val = (unsigned long) cp->init_rxcs[0] -
1242		(unsigned long) cp->init_block;
1243	writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1244	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1245
1246	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1247		/* rx comp 2-4 */
1248		for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1249			val = (unsigned long) cp->init_rxcs[i] -
1250				(unsigned long) cp->init_block;
1251			writel((desc_dma + val) >> 32, cp->regs +
1252			       REG_PLUS_RX_CBN_HI(i));
1253			writel((desc_dma + val) & 0xffffffff, cp->regs +
1254			       REG_PLUS_RX_CBN_LOW(i));
1255		}
1256	}
1257
1258	/* read selective clear regs to prevent spurious interrupts
1259	 * on reset because complete == kick.
1260	 * selective clear set up to prevent interrupts on resets
1261	 */
1262	readl(cp->regs + REG_INTR_STATUS_ALIAS);
1263	writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1264	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1265		for (i = 1; i < N_RX_COMP_RINGS; i++)
1266			readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1267
1268		/* 2 is different from 3 and 4 */
1269		if (N_RX_COMP_RINGS > 1)
1270			writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1271			       cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1272
1273		for (i = 2; i < N_RX_COMP_RINGS; i++)
1274			writel(INTR_RX_DONE_ALT,
1275			       cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1276	}
1277
1278	/* set up pause thresholds */
1279	val  = CAS_BASE(RX_PAUSE_THRESH_OFF,
1280			cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1281	val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1282			cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1283	writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1284
1285	/* zero out dma reassembly buffers */
1286	for (i = 0; i < 64; i++) {
1287		writel(i, cp->regs + REG_RX_TABLE_ADDR);
1288		writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1289		writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1290		writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1291	}
1292
1293	/* make sure address register is 0 for normal operation */
1294	writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1295	writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1296
1297	/* interrupt mitigation */
1298#ifdef USE_RX_BLANK
1299	val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1300	val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1301	writel(val, cp->regs + REG_RX_BLANK);
1302#else
1303	writel(0x0, cp->regs + REG_RX_BLANK);
1304#endif
1305
1306	/* interrupt generation as a function of low water marks for
1307	 * free desc and completion entries. these are used to trigger
1308	 * housekeeping for rx descs. we don't use the free interrupt
1309	 * as it's not very useful
1310	 */
1311	/* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1312	val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1313	writel(val, cp->regs + REG_RX_AE_THRESH);
1314	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1315		val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1316		writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1317	}
1318
1319	/* Random early detect registers. useful for congestion avoidance.
1320	 * this should be tunable.
1321	 */
1322	writel(0x0, cp->regs + REG_RX_RED);
1323
1324	/* receive page sizes. default == 2K (0x800) */
1325	val = 0;
1326	if (cp->page_size == 0x1000)
1327		val = 0x1;
1328	else if (cp->page_size == 0x2000)
1329		val = 0x2;
1330	else if (cp->page_size == 0x4000)
1331		val = 0x3;
1332
1333	/* round mtu + offset. constrain to page size. */
1334	size = cp->dev->mtu + 64;
1335	if (size > cp->page_size)
1336		size = cp->page_size;
1337
1338	if (size <= 0x400)
1339		i = 0x0;
1340	else if (size <= 0x800)
1341		i = 0x1;
1342	else if (size <= 0x1000)
1343		i = 0x2;
1344	else
1345		i = 0x3;
1346
1347	cp->mtu_stride = 1 << (i + 10);
1348	val  = CAS_BASE(RX_PAGE_SIZE, val);
1349	val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1350	val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1351	val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1352	writel(val, cp->regs + REG_RX_PAGE_SIZE);
1353
1354	/* enable the header parser if desired */
1355	if (CAS_HP_FIRMWARE == cas_prog_null)
1356		return;
1357
1358	val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1359	val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1360	val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1361	writel(val, cp->regs + REG_HP_CFG);
1362}
1363
1364static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1365{
1366	memset(rxc, 0, sizeof(*rxc));
1367	rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1368}
1369
1370/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1371 * flipping is protected by the fact that the chip will not
1372 * hand back the same page index while it's being processed.
1373 */
1374static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1375{
1376	cas_page_t *page = cp->rx_pages[1][index];
1377	cas_page_t *new;
1378
1379	if (page_count(page->buffer) == 1)
1380		return page;
1381
1382	new = cas_page_dequeue(cp);
1383	if (new) {
1384		spin_lock(&cp->rx_inuse_lock);
1385		list_add(&page->list, &cp->rx_inuse_list);
1386		spin_unlock(&cp->rx_inuse_lock);
1387	}
1388	return new;
1389}
1390
1391/* this needs to be changed if we actually use the ENC RX DESC ring */
1392static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1393				 const int index)
1394{
1395	cas_page_t **page0 = cp->rx_pages[0];
1396	cas_page_t **page1 = cp->rx_pages[1];
1397
1398	/* swap if buffer is in use */
1399	if (page_count(page0[index]->buffer) > 1) {
1400		cas_page_t *new = cas_page_spare(cp, index);
1401		if (new) {
1402			page1[index] = page0[index];
1403			page0[index] = new;
1404		}
1405	}
1406	RX_USED_SET(page0[index], 0);
1407	return page0[index];
1408}
1409
1410static void cas_clean_rxds(struct cas *cp)
1411{
1412	/* only clean ring 0 as ring 1 is used for spare buffers */
1413        struct cas_rx_desc *rxd = cp->init_rxds[0];
1414	int i, size;
1415
1416	/* release all rx flows */
1417	for (i = 0; i < N_RX_FLOWS; i++) {
1418		struct sk_buff *skb;
1419		while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1420			cas_skb_release(skb);
1421		}
1422	}
1423
1424	/* initialize descriptors */
1425	size = RX_DESC_RINGN_SIZE(0);
1426	for (i = 0; i < size; i++) {
1427		cas_page_t *page = cas_page_swap(cp, 0, i);
1428		rxd[i].buffer = cpu_to_le64(page->dma_addr);
1429		rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1430					    CAS_BASE(RX_INDEX_RING, 0));
1431	}
1432
1433	cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4;
1434	cp->rx_last[0] = 0;
1435	cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1436}
1437
1438static void cas_clean_rxcs(struct cas *cp)
1439{
1440	int i, j;
1441
1442	/* take ownership of rx comp descriptors */
1443	memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1444	memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1445	for (i = 0; i < N_RX_COMP_RINGS; i++) {
1446		struct cas_rx_comp *rxc = cp->init_rxcs[i];
1447		for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1448			cas_rxc_init(rxc + j);
1449		}
1450	}
1451}
1452
1453#if 0
1454/* When we get a RX fifo overflow, the RX unit is probably hung
1455 * so we do the following.
1456 *
1457 * If any part of the reset goes wrong, we return 1 and that causes the
1458 * whole chip to be reset.
1459 */
1460static int cas_rxmac_reset(struct cas *cp)
1461{
1462	struct net_device *dev = cp->dev;
1463	int limit;
1464	u32 val;
1465
1466	/* First, reset MAC RX. */
1467	writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1468	for (limit = 0; limit < STOP_TRIES; limit++) {
1469		if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1470			break;
1471		udelay(10);
1472	}
1473	if (limit == STOP_TRIES) {
1474		netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1475		return 1;
1476	}
1477
1478	/* Second, disable RX DMA. */
1479	writel(0, cp->regs + REG_RX_CFG);
1480	for (limit = 0; limit < STOP_TRIES; limit++) {
1481		if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1482			break;
1483		udelay(10);
1484	}
1485	if (limit == STOP_TRIES) {
1486		netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1487		return 1;
1488	}
1489
1490	mdelay(5);
1491
1492	/* Execute RX reset command. */
1493	writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1494	for (limit = 0; limit < STOP_TRIES; limit++) {
1495		if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1496			break;
1497		udelay(10);
1498	}
1499	if (limit == STOP_TRIES) {
1500		netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1501		return 1;
1502	}
1503
1504	/* reset driver rx state */
1505	cas_clean_rxds(cp);
1506	cas_clean_rxcs(cp);
1507
1508	/* Now, reprogram the rest of RX unit. */
1509	cas_init_rx_dma(cp);
1510
1511	/* re-enable */
1512	val = readl(cp->regs + REG_RX_CFG);
1513	writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1514	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1515	val = readl(cp->regs + REG_MAC_RX_CFG);
1516	writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1517	return 0;
1518}
1519#endif
1520
1521static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1522			       u32 status)
1523{
1524	u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1525
1526	if (!stat)
1527		return 0;
1528
1529	netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1530
1531	/* these are all rollovers */
1532	spin_lock(&cp->stat_lock[0]);
1533	if (stat & MAC_RX_ALIGN_ERR)
1534		cp->net_stats[0].rx_frame_errors += 0x10000;
1535
1536	if (stat & MAC_RX_CRC_ERR)
1537		cp->net_stats[0].rx_crc_errors += 0x10000;
1538
1539	if (stat & MAC_RX_LEN_ERR)
1540		cp->net_stats[0].rx_length_errors += 0x10000;
1541
1542	if (stat & MAC_RX_OVERFLOW) {
1543		cp->net_stats[0].rx_over_errors++;
1544		cp->net_stats[0].rx_fifo_errors++;
1545	}
1546
1547	/* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1548	 * events.
1549	 */
1550	spin_unlock(&cp->stat_lock[0]);
1551	return 0;
1552}
1553
1554static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1555			     u32 status)
1556{
1557	u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1558
1559	if (!stat)
1560		return 0;
1561
1562	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1563		     "mac interrupt, stat: 0x%x\n", stat);
1564
1565	/* This interrupt is just for pause frame and pause
1566	 * tracking.  It is useful for diagnostics and debug
1567	 * but probably by default we will mask these events.
1568	 */
1569	if (stat & MAC_CTRL_PAUSE_STATE)
1570		cp->pause_entered++;
1571
1572	if (stat & MAC_CTRL_PAUSE_RECEIVED)
1573		cp->pause_last_time_recvd = (stat >> 16);
1574
1575	return 0;
1576}
1577
1578
1579/* Must be invoked under cp->lock. */
1580static inline int cas_mdio_link_not_up(struct cas *cp)
1581{
1582	u16 val;
1583
1584	switch (cp->lstate) {
1585	case link_force_ret:
1586		netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1587		cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1588		cp->timer_ticks = 5;
1589		cp->lstate = link_force_ok;
1590		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1591		break;
1592
1593	case link_aneg:
1594		val = cas_phy_read(cp, MII_BMCR);
1595
1596		/* Try forced modes. we try things in the following order:
1597		 * 1000 full -> 100 full/half -> 10 half
1598		 */
1599		val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1600		val |= BMCR_FULLDPLX;
1601		val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1602			CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1603		cas_phy_write(cp, MII_BMCR, val);
1604		cp->timer_ticks = 5;
1605		cp->lstate = link_force_try;
1606		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1607		break;
1608
1609	case link_force_try:
1610		/* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1611		val = cas_phy_read(cp, MII_BMCR);
1612		cp->timer_ticks = 5;
1613		if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1614			val &= ~CAS_BMCR_SPEED1000;
1615			val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1616			cas_phy_write(cp, MII_BMCR, val);
1617			break;
1618		}
1619
1620		if (val & BMCR_SPEED100) {
1621			if (val & BMCR_FULLDPLX) /* fd failed */
1622				val &= ~BMCR_FULLDPLX;
1623			else { /* 100Mbps failed */
1624				val &= ~BMCR_SPEED100;
1625			}
1626			cas_phy_write(cp, MII_BMCR, val);
1627			break;
1628		}
1629	default:
1630		break;
1631	}
1632	return 0;
1633}
1634
1635
1636/* must be invoked with cp->lock held */
1637static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1638{
1639	int restart;
1640
1641	if (bmsr & BMSR_LSTATUS) {
1642		/* Ok, here we got a link. If we had it due to a forced
1643		 * fallback, and we were configured for autoneg, we
1644		 * retry a short autoneg pass. If you know your hub is
1645		 * broken, use ethtool ;)
1646		 */
1647		if ((cp->lstate == link_force_try) &&
1648		    (cp->link_cntl & BMCR_ANENABLE)) {
1649			cp->lstate = link_force_ret;
1650			cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1651			cas_mif_poll(cp, 0);
1652			cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1653			cp->timer_ticks = 5;
1654			if (cp->opened)
1655				netif_info(cp, link, cp->dev,
1656					   "Got link after fallback, retrying autoneg once...\n");
1657			cas_phy_write(cp, MII_BMCR,
1658				      cp->link_fcntl | BMCR_ANENABLE |
1659				      BMCR_ANRESTART);
1660			cas_mif_poll(cp, 1);
1661
1662		} else if (cp->lstate != link_up) {
1663			cp->lstate = link_up;
1664			cp->link_transition = LINK_TRANSITION_LINK_UP;
1665
1666			if (cp->opened) {
1667				cas_set_link_modes(cp);
1668				netif_carrier_on(cp->dev);
1669			}
1670		}
1671		return 0;
1672	}
1673
1674	/* link not up. if the link was previously up, we restart the
1675	 * whole process
1676	 */
1677	restart = 0;
1678	if (cp->lstate == link_up) {
1679		cp->lstate = link_down;
1680		cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1681
1682		netif_carrier_off(cp->dev);
1683		if (cp->opened)
1684			netif_info(cp, link, cp->dev, "Link down\n");
1685		restart = 1;
1686
1687	} else if (++cp->timer_ticks > 10)
1688		cas_mdio_link_not_up(cp);
1689
1690	return restart;
1691}
1692
1693static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1694			     u32 status)
1695{
1696	u32 stat = readl(cp->regs + REG_MIF_STATUS);
1697	u16 bmsr;
1698
1699	/* check for a link change */
1700	if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1701		return 0;
1702
1703	bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1704	return cas_mii_link_check(cp, bmsr);
1705}
1706
1707static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1708			     u32 status)
1709{
1710	u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1711
1712	if (!stat)
1713		return 0;
1714
1715	netdev_err(dev, "PCI error [%04x:%04x]",
1716		   stat, readl(cp->regs + REG_BIM_DIAG));
1717
1718	/* cassini+ has this reserved */
1719	if ((stat & PCI_ERR_BADACK) &&
1720	    ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1721		pr_cont(" <No ACK64# during ABS64 cycle>");
1722
1723	if (stat & PCI_ERR_DTRTO)
1724		pr_cont(" <Delayed transaction timeout>");
1725	if (stat & PCI_ERR_OTHER)
1726		pr_cont(" <other>");
1727	if (stat & PCI_ERR_BIM_DMA_WRITE)
1728		pr_cont(" <BIM DMA 0 write req>");
1729	if (stat & PCI_ERR_BIM_DMA_READ)
1730		pr_cont(" <BIM DMA 0 read req>");
1731	pr_cont("\n");
1732
1733	if (stat & PCI_ERR_OTHER) {
1734		u16 cfg;
1735
1736		/* Interrogate PCI config space for the
1737		 * true cause.
1738		 */
1739		pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1740		netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
1741		if (cfg & PCI_STATUS_PARITY)
1742			netdev_err(dev, "PCI parity error detected\n");
1743		if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1744			netdev_err(dev, "PCI target abort\n");
1745		if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1746			netdev_err(dev, "PCI master acks target abort\n");
1747		if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1748			netdev_err(dev, "PCI master abort\n");
1749		if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1750			netdev_err(dev, "PCI system error SERR#\n");
1751		if (cfg & PCI_STATUS_DETECTED_PARITY)
1752			netdev_err(dev, "PCI parity error\n");
1753
1754		/* Write the error bits back to clear them. */
1755		cfg &= (PCI_STATUS_PARITY |
1756			PCI_STATUS_SIG_TARGET_ABORT |
1757			PCI_STATUS_REC_TARGET_ABORT |
1758			PCI_STATUS_REC_MASTER_ABORT |
1759			PCI_STATUS_SIG_SYSTEM_ERROR |
1760			PCI_STATUS_DETECTED_PARITY);
1761		pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1762	}
1763
1764	/* For all PCI errors, we should reset the chip. */
1765	return 1;
1766}
1767
1768/* All non-normal interrupt conditions get serviced here.
1769 * Returns non-zero if we should just exit the interrupt
1770 * handler right now (ie. if we reset the card which invalidates
1771 * all of the other original irq status bits).
1772 */
1773static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1774			    u32 status)
1775{
1776	if (status & INTR_RX_TAG_ERROR) {
1777		/* corrupt …

Large files files are truncated, but you can click here to view the full file