PageRenderTime 111ms CodeModel.GetById 14ms app.highlight 82ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/net/ethernet/atheros/atlx/atl1.c

http://github.com/mirrors/linux
C | 3643 lines | 2638 code | 466 blank | 539 comment | 381 complexity | 0e7b4c625f87d6dde0202f8ca5339d63 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
   4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
   5 * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
   6 *
   7 * Derived from Intel e1000 driver
   8 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
   9 *
  10 * Contact Information:
  11 * Xiong Huang <xiong.huang@atheros.com>
  12 * Jie Yang <jie.yang@atheros.com>
  13 * Chris Snook <csnook@redhat.com>
  14 * Jay Cliburn <jcliburn@gmail.com>
  15 *
  16 * This version is adapted from the Attansic reference driver.
  17 *
  18 * TODO:
  19 * Add more ethtool functions.
  20 * Fix abstruse irq enable/disable condition described here:
  21 *	http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
  22 *
  23 * NEEDS TESTING:
  24 * VLAN
  25 * multicast
  26 * promiscuous mode
  27 * interrupt coalescing
  28 * SMP torture testing
  29 */
  30
  31#include <linux/atomic.h>
  32#include <asm/byteorder.h>
  33
  34#include <linux/compiler.h>
  35#include <linux/crc32.h>
  36#include <linux/delay.h>
  37#include <linux/dma-mapping.h>
  38#include <linux/etherdevice.h>
  39#include <linux/hardirq.h>
  40#include <linux/if_ether.h>
  41#include <linux/if_vlan.h>
  42#include <linux/in.h>
  43#include <linux/interrupt.h>
  44#include <linux/ip.h>
  45#include <linux/irqflags.h>
  46#include <linux/irqreturn.h>
  47#include <linux/jiffies.h>
  48#include <linux/mii.h>
  49#include <linux/module.h>
  50#include <linux/net.h>
  51#include <linux/netdevice.h>
  52#include <linux/pci.h>
  53#include <linux/pci_ids.h>
  54#include <linux/pm.h>
  55#include <linux/skbuff.h>
  56#include <linux/slab.h>
  57#include <linux/spinlock.h>
  58#include <linux/string.h>
  59#include <linux/tcp.h>
  60#include <linux/timer.h>
  61#include <linux/types.h>
  62#include <linux/workqueue.h>
  63
  64#include <net/checksum.h>
  65
  66#include "atl1.h"
  67
  68MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, "
  69	      "Chris Snook <csnook@redhat.com>, "
  70	      "Jay Cliburn <jcliburn@gmail.com>");
  71MODULE_LICENSE("GPL");
  72
  73/* Temporary hack for merging atl1 and atl2 */
  74#include "atlx.c"
  75
  76static const struct ethtool_ops atl1_ethtool_ops;
  77
  78/*
  79 * This is the only thing that needs to be changed to adjust the
  80 * maximum number of ports that the driver can manage.
  81 */
  82#define ATL1_MAX_NIC 4
  83
  84#define OPTION_UNSET    -1
  85#define OPTION_DISABLED 0
  86#define OPTION_ENABLED  1
  87
  88#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
  89
  90/*
  91 * Interrupt Moderate Timer in units of 2 us
  92 *
  93 * Valid Range: 10-65535
  94 *
  95 * Default Value: 100 (200us)
  96 */
  97static int int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
  98static unsigned int num_int_mod_timer;
  99module_param_array_named(int_mod_timer, int_mod_timer, int,
 100	&num_int_mod_timer, 0);
 101MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
 102
 103#define DEFAULT_INT_MOD_CNT	100	/* 200us */
 104#define MAX_INT_MOD_CNT		65000
 105#define MIN_INT_MOD_CNT		50
 106
 107struct atl1_option {
 108	enum { enable_option, range_option, list_option } type;
 109	char *name;
 110	char *err;
 111	int def;
 112	union {
 113		struct {	/* range_option info */
 114			int min;
 115			int max;
 116		} r;
 117		struct {	/* list_option info */
 118			int nr;
 119			struct atl1_opt_list {
 120				int i;
 121				char *str;
 122			} *p;
 123		} l;
 124	} arg;
 125};
 126
 127static int atl1_validate_option(int *value, struct atl1_option *opt,
 128				struct pci_dev *pdev)
 129{
 130	if (*value == OPTION_UNSET) {
 131		*value = opt->def;
 132		return 0;
 133	}
 134
 135	switch (opt->type) {
 136	case enable_option:
 137		switch (*value) {
 138		case OPTION_ENABLED:
 139			dev_info(&pdev->dev, "%s enabled\n", opt->name);
 140			return 0;
 141		case OPTION_DISABLED:
 142			dev_info(&pdev->dev, "%s disabled\n", opt->name);
 143			return 0;
 144		}
 145		break;
 146	case range_option:
 147		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
 148			dev_info(&pdev->dev, "%s set to %i\n", opt->name,
 149				*value);
 150			return 0;
 151		}
 152		break;
 153	case list_option:{
 154			int i;
 155			struct atl1_opt_list *ent;
 156
 157			for (i = 0; i < opt->arg.l.nr; i++) {
 158				ent = &opt->arg.l.p[i];
 159				if (*value == ent->i) {
 160					if (ent->str[0] != '\0')
 161						dev_info(&pdev->dev, "%s\n",
 162							ent->str);
 163					return 0;
 164				}
 165			}
 166		}
 167		break;
 168
 169	default:
 170		break;
 171	}
 172
 173	dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
 174		opt->name, *value, opt->err);
 175	*value = opt->def;
 176	return -1;
 177}
 178
 179/**
 180 * atl1_check_options - Range Checking for Command Line Parameters
 181 * @adapter: board private structure
 182 *
 183 * This routine checks all command line parameters for valid user
 184 * input.  If an invalid value is given, or if no user specified
 185 * value exists, a default value is used.  The final value is stored
 186 * in a variable in the adapter structure.
 187 */
 188static void atl1_check_options(struct atl1_adapter *adapter)
 189{
 190	struct pci_dev *pdev = adapter->pdev;
 191	int bd = adapter->bd_number;
 192	if (bd >= ATL1_MAX_NIC) {
 193		dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
 194		dev_notice(&pdev->dev, "using defaults for all values\n");
 195	}
 196	{			/* Interrupt Moderate Timer */
 197		struct atl1_option opt = {
 198			.type = range_option,
 199			.name = "Interrupt Moderator Timer",
 200			.err = "using default of "
 201				__MODULE_STRING(DEFAULT_INT_MOD_CNT),
 202			.def = DEFAULT_INT_MOD_CNT,
 203			.arg = {.r = {.min = MIN_INT_MOD_CNT,
 204					.max = MAX_INT_MOD_CNT} }
 205		};
 206		int val;
 207		if (num_int_mod_timer > bd) {
 208			val = int_mod_timer[bd];
 209			atl1_validate_option(&val, &opt, pdev);
 210			adapter->imt = (u16) val;
 211		} else
 212			adapter->imt = (u16) (opt.def);
 213	}
 214}
 215
 216/*
 217 * atl1_pci_tbl - PCI Device ID Table
 218 */
 219static const struct pci_device_id atl1_pci_tbl[] = {
 220	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
 221	/* required last entry */
 222	{0,}
 223};
 224MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
 225
 226static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
 227	NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
 228
 229static int debug = -1;
 230module_param(debug, int, 0);
 231MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)");
 232
 233/*
 234 * Reset the transmit and receive units; mask and clear all interrupts.
 235 * hw - Struct containing variables accessed by shared code
 236 * return : 0  or  idle status (if error)
 237 */
 238static s32 atl1_reset_hw(struct atl1_hw *hw)
 239{
 240	struct pci_dev *pdev = hw->back->pdev;
 241	struct atl1_adapter *adapter = hw->back;
 242	u32 icr;
 243	int i;
 244
 245	/*
 246	 * Clear Interrupt mask to stop board from generating
 247	 * interrupts & Clear any pending interrupt events
 248	 */
 249	/*
 250	 * atlx_irq_disable(adapter);
 251	 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
 252	 */
 253
 254	/*
 255	 * Issue Soft Reset to the MAC.  This will reset the chip's
 256	 * transmit, receive, DMA.  It will not effect
 257	 * the current PCI configuration.  The global reset bit is self-
 258	 * clearing, and should clear within a microsecond.
 259	 */
 260	iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
 261	ioread32(hw->hw_addr + REG_MASTER_CTRL);
 262
 263	iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
 264	ioread16(hw->hw_addr + REG_PHY_ENABLE);
 265
 266	/* delay about 1ms */
 267	msleep(1);
 268
 269	/* Wait at least 10ms for All module to be Idle */
 270	for (i = 0; i < 10; i++) {
 271		icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
 272		if (!icr)
 273			break;
 274		/* delay 1 ms */
 275		msleep(1);
 276		/* FIXME: still the right way to do this? */
 277		cpu_relax();
 278	}
 279
 280	if (icr) {
 281		if (netif_msg_hw(adapter))
 282			dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
 283		return icr;
 284	}
 285
 286	return 0;
 287}
 288
 289/* function about EEPROM
 290 *
 291 * check_eeprom_exist
 292 * return 0 if eeprom exist
 293 */
 294static int atl1_check_eeprom_exist(struct atl1_hw *hw)
 295{
 296	u32 value;
 297	value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
 298	if (value & SPI_FLASH_CTRL_EN_VPD) {
 299		value &= ~SPI_FLASH_CTRL_EN_VPD;
 300		iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
 301	}
 302
 303	value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
 304	return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
 305}
 306
 307static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
 308{
 309	int i;
 310	u32 control;
 311
 312	if (offset & 3)
 313		/* address do not align */
 314		return false;
 315
 316	iowrite32(0, hw->hw_addr + REG_VPD_DATA);
 317	control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
 318	iowrite32(control, hw->hw_addr + REG_VPD_CAP);
 319	ioread32(hw->hw_addr + REG_VPD_CAP);
 320
 321	for (i = 0; i < 10; i++) {
 322		msleep(2);
 323		control = ioread32(hw->hw_addr + REG_VPD_CAP);
 324		if (control & VPD_CAP_VPD_FLAG)
 325			break;
 326	}
 327	if (control & VPD_CAP_VPD_FLAG) {
 328		*p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
 329		return true;
 330	}
 331	/* timeout */
 332	return false;
 333}
 334
 335/*
 336 * Reads the value from a PHY register
 337 * hw - Struct containing variables accessed by shared code
 338 * reg_addr - address of the PHY register to read
 339 */
 340static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
 341{
 342	u32 val;
 343	int i;
 344
 345	val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
 346		MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
 347		MDIO_CLK_SEL_SHIFT;
 348	iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
 349	ioread32(hw->hw_addr + REG_MDIO_CTRL);
 350
 351	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
 352		udelay(2);
 353		val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
 354		if (!(val & (MDIO_START | MDIO_BUSY)))
 355			break;
 356	}
 357	if (!(val & (MDIO_START | MDIO_BUSY))) {
 358		*phy_data = (u16) val;
 359		return 0;
 360	}
 361	return ATLX_ERR_PHY;
 362}
 363
 364#define CUSTOM_SPI_CS_SETUP	2
 365#define CUSTOM_SPI_CLK_HI	2
 366#define CUSTOM_SPI_CLK_LO	2
 367#define CUSTOM_SPI_CS_HOLD	2
 368#define CUSTOM_SPI_CS_HI	3
 369
 370static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
 371{
 372	int i;
 373	u32 value;
 374
 375	iowrite32(0, hw->hw_addr + REG_SPI_DATA);
 376	iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
 377
 378	value = SPI_FLASH_CTRL_WAIT_READY |
 379	    (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
 380	    SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
 381					     SPI_FLASH_CTRL_CLK_HI_MASK) <<
 382	    SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
 383					   SPI_FLASH_CTRL_CLK_LO_MASK) <<
 384	    SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
 385					   SPI_FLASH_CTRL_CS_HOLD_MASK) <<
 386	    SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
 387					    SPI_FLASH_CTRL_CS_HI_MASK) <<
 388	    SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
 389	    SPI_FLASH_CTRL_INS_SHIFT;
 390
 391	iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
 392
 393	value |= SPI_FLASH_CTRL_START;
 394	iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
 395	ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
 396
 397	for (i = 0; i < 10; i++) {
 398		msleep(1);
 399		value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
 400		if (!(value & SPI_FLASH_CTRL_START))
 401			break;
 402	}
 403
 404	if (value & SPI_FLASH_CTRL_START)
 405		return false;
 406
 407	*buf = ioread32(hw->hw_addr + REG_SPI_DATA);
 408
 409	return true;
 410}
 411
 412/*
 413 * get_permanent_address
 414 * return 0 if get valid mac address,
 415 */
 416static int atl1_get_permanent_address(struct atl1_hw *hw)
 417{
 418	u32 addr[2];
 419	u32 i, control;
 420	u16 reg;
 421	u8 eth_addr[ETH_ALEN];
 422	bool key_valid;
 423
 424	if (is_valid_ether_addr(hw->perm_mac_addr))
 425		return 0;
 426
 427	/* init */
 428	addr[0] = addr[1] = 0;
 429
 430	if (!atl1_check_eeprom_exist(hw)) {
 431		reg = 0;
 432		key_valid = false;
 433		/* Read out all EEPROM content */
 434		i = 0;
 435		while (1) {
 436			if (atl1_read_eeprom(hw, i + 0x100, &control)) {
 437				if (key_valid) {
 438					if (reg == REG_MAC_STA_ADDR)
 439						addr[0] = control;
 440					else if (reg == (REG_MAC_STA_ADDR + 4))
 441						addr[1] = control;
 442					key_valid = false;
 443				} else if ((control & 0xff) == 0x5A) {
 444					key_valid = true;
 445					reg = (u16) (control >> 16);
 446				} else
 447					break;
 448			} else
 449				/* read error */
 450				break;
 451			i += 4;
 452		}
 453
 454		*(u32 *) &eth_addr[2] = swab32(addr[0]);
 455		*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
 456		if (is_valid_ether_addr(eth_addr)) {
 457			memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
 458			return 0;
 459		}
 460	}
 461
 462	/* see if SPI FLAGS exist ? */
 463	addr[0] = addr[1] = 0;
 464	reg = 0;
 465	key_valid = false;
 466	i = 0;
 467	while (1) {
 468		if (atl1_spi_read(hw, i + 0x1f000, &control)) {
 469			if (key_valid) {
 470				if (reg == REG_MAC_STA_ADDR)
 471					addr[0] = control;
 472				else if (reg == (REG_MAC_STA_ADDR + 4))
 473					addr[1] = control;
 474				key_valid = false;
 475			} else if ((control & 0xff) == 0x5A) {
 476				key_valid = true;
 477				reg = (u16) (control >> 16);
 478			} else
 479				/* data end */
 480				break;
 481		} else
 482			/* read error */
 483			break;
 484		i += 4;
 485	}
 486
 487	*(u32 *) &eth_addr[2] = swab32(addr[0]);
 488	*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
 489	if (is_valid_ether_addr(eth_addr)) {
 490		memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
 491		return 0;
 492	}
 493
 494	/*
 495	 * On some motherboards, the MAC address is written by the
 496	 * BIOS directly to the MAC register during POST, and is
 497	 * not stored in eeprom.  If all else thus far has failed
 498	 * to fetch the permanent MAC address, try reading it directly.
 499	 */
 500	addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
 501	addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
 502	*(u32 *) &eth_addr[2] = swab32(addr[0]);
 503	*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
 504	if (is_valid_ether_addr(eth_addr)) {
 505		memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
 506		return 0;
 507	}
 508
 509	return 1;
 510}
 511
 512/*
 513 * Reads the adapter's MAC address from the EEPROM
 514 * hw - Struct containing variables accessed by shared code
 515 */
 516static s32 atl1_read_mac_addr(struct atl1_hw *hw)
 517{
 518	s32 ret = 0;
 519	u16 i;
 520
 521	if (atl1_get_permanent_address(hw)) {
 522		eth_random_addr(hw->perm_mac_addr);
 523		ret = 1;
 524	}
 525
 526	for (i = 0; i < ETH_ALEN; i++)
 527		hw->mac_addr[i] = hw->perm_mac_addr[i];
 528	return ret;
 529}
 530
 531/*
 532 * Hashes an address to determine its location in the multicast table
 533 * hw - Struct containing variables accessed by shared code
 534 * mc_addr - the multicast address to hash
 535 *
 536 * atl1_hash_mc_addr
 537 *  purpose
 538 *      set hash value for a multicast address
 539 *      hash calcu processing :
 540 *          1. calcu 32bit CRC for multicast address
 541 *          2. reverse crc with MSB to LSB
 542 */
 543static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
 544{
 545	u32 crc32, value = 0;
 546	int i;
 547
 548	crc32 = ether_crc_le(6, mc_addr);
 549	for (i = 0; i < 32; i++)
 550		value |= (((crc32 >> i) & 1) << (31 - i));
 551
 552	return value;
 553}
 554
 555/*
 556 * Sets the bit in the multicast table corresponding to the hash value.
 557 * hw - Struct containing variables accessed by shared code
 558 * hash_value - Multicast address hash value
 559 */
 560static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
 561{
 562	u32 hash_bit, hash_reg;
 563	u32 mta;
 564
 565	/*
 566	 * The HASH Table  is a register array of 2 32-bit registers.
 567	 * It is treated like an array of 64 bits.  We want to set
 568	 * bit BitArray[hash_value]. So we figure out what register
 569	 * the bit is in, read it, OR in the new bit, then write
 570	 * back the new value.  The register is determined by the
 571	 * upper 7 bits of the hash value and the bit within that
 572	 * register are determined by the lower 5 bits of the value.
 573	 */
 574	hash_reg = (hash_value >> 31) & 0x1;
 575	hash_bit = (hash_value >> 26) & 0x1F;
 576	mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
 577	mta |= (1 << hash_bit);
 578	iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
 579}
 580
 581/*
 582 * Writes a value to a PHY register
 583 * hw - Struct containing variables accessed by shared code
 584 * reg_addr - address of the PHY register to write
 585 * data - data to write to the PHY
 586 */
 587static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
 588{
 589	int i;
 590	u32 val;
 591
 592	val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
 593	    (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
 594	    MDIO_SUP_PREAMBLE |
 595	    MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
 596	iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
 597	ioread32(hw->hw_addr + REG_MDIO_CTRL);
 598
 599	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
 600		udelay(2);
 601		val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
 602		if (!(val & (MDIO_START | MDIO_BUSY)))
 603			break;
 604	}
 605
 606	if (!(val & (MDIO_START | MDIO_BUSY)))
 607		return 0;
 608
 609	return ATLX_ERR_PHY;
 610}
 611
 612/*
 613 * Make L001's PHY out of Power Saving State (bug)
 614 * hw - Struct containing variables accessed by shared code
 615 * when power on, L001's PHY always on Power saving State
 616 * (Gigabit Link forbidden)
 617 */
 618static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
 619{
 620	s32 ret;
 621	ret = atl1_write_phy_reg(hw, 29, 0x0029);
 622	if (ret)
 623		return ret;
 624	return atl1_write_phy_reg(hw, 30, 0);
 625}
 626
 627/*
 628 * Resets the PHY and make all config validate
 629 * hw - Struct containing variables accessed by shared code
 630 *
 631 * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
 632 */
 633static s32 atl1_phy_reset(struct atl1_hw *hw)
 634{
 635	struct pci_dev *pdev = hw->back->pdev;
 636	struct atl1_adapter *adapter = hw->back;
 637	s32 ret_val;
 638	u16 phy_data;
 639
 640	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
 641	    hw->media_type == MEDIA_TYPE_1000M_FULL)
 642		phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
 643	else {
 644		switch (hw->media_type) {
 645		case MEDIA_TYPE_100M_FULL:
 646			phy_data =
 647			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
 648			    MII_CR_RESET;
 649			break;
 650		case MEDIA_TYPE_100M_HALF:
 651			phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
 652			break;
 653		case MEDIA_TYPE_10M_FULL:
 654			phy_data =
 655			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
 656			break;
 657		default:
 658			/* MEDIA_TYPE_10M_HALF: */
 659			phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
 660			break;
 661		}
 662	}
 663
 664	ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
 665	if (ret_val) {
 666		u32 val;
 667		int i;
 668		/* pcie serdes link may be down! */
 669		if (netif_msg_hw(adapter))
 670			dev_dbg(&pdev->dev, "pcie phy link down\n");
 671
 672		for (i = 0; i < 25; i++) {
 673			msleep(1);
 674			val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
 675			if (!(val & (MDIO_START | MDIO_BUSY)))
 676				break;
 677		}
 678
 679		if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
 680			if (netif_msg_hw(adapter))
 681				dev_warn(&pdev->dev,
 682					"pcie link down at least 25ms\n");
 683			return ret_val;
 684		}
 685	}
 686	return 0;
 687}
 688
 689/*
 690 * Configures PHY autoneg and flow control advertisement settings
 691 * hw - Struct containing variables accessed by shared code
 692 */
 693static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
 694{
 695	s32 ret_val;
 696	s16 mii_autoneg_adv_reg;
 697	s16 mii_1000t_ctrl_reg;
 698
 699	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
 700	mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
 701
 702	/* Read the MII 1000Base-T Control Register (Address 9). */
 703	mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
 704
 705	/*
 706	 * First we clear all the 10/100 mb speed bits in the Auto-Neg
 707	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
 708	 * the  1000Base-T Control Register (Address 9).
 709	 */
 710	mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
 711	mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
 712
 713	/*
 714	 * Need to parse media_type  and set up
 715	 * the appropriate PHY registers.
 716	 */
 717	switch (hw->media_type) {
 718	case MEDIA_TYPE_AUTO_SENSOR:
 719		mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
 720					MII_AR_10T_FD_CAPS |
 721					MII_AR_100TX_HD_CAPS |
 722					MII_AR_100TX_FD_CAPS);
 723		mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
 724		break;
 725
 726	case MEDIA_TYPE_1000M_FULL:
 727		mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
 728		break;
 729
 730	case MEDIA_TYPE_100M_FULL:
 731		mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
 732		break;
 733
 734	case MEDIA_TYPE_100M_HALF:
 735		mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
 736		break;
 737
 738	case MEDIA_TYPE_10M_FULL:
 739		mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
 740		break;
 741
 742	default:
 743		mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
 744		break;
 745	}
 746
 747	/* flow control fixed to enable all */
 748	mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
 749
 750	hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
 751	hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
 752
 753	ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
 754	if (ret_val)
 755		return ret_val;
 756
 757	ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
 758	if (ret_val)
 759		return ret_val;
 760
 761	return 0;
 762}
 763
 764/*
 765 * Configures link settings.
 766 * hw - Struct containing variables accessed by shared code
 767 * Assumes the hardware has previously been reset and the
 768 * transmitter and receiver are not enabled.
 769 */
 770static s32 atl1_setup_link(struct atl1_hw *hw)
 771{
 772	struct pci_dev *pdev = hw->back->pdev;
 773	struct atl1_adapter *adapter = hw->back;
 774	s32 ret_val;
 775
 776	/*
 777	 * Options:
 778	 *  PHY will advertise value(s) parsed from
 779	 *  autoneg_advertised and fc
 780	 *  no matter what autoneg is , We will not wait link result.
 781	 */
 782	ret_val = atl1_phy_setup_autoneg_adv(hw);
 783	if (ret_val) {
 784		if (netif_msg_link(adapter))
 785			dev_dbg(&pdev->dev,
 786				"error setting up autonegotiation\n");
 787		return ret_val;
 788	}
 789	/* SW.Reset , En-Auto-Neg if needed */
 790	ret_val = atl1_phy_reset(hw);
 791	if (ret_val) {
 792		if (netif_msg_link(adapter))
 793			dev_dbg(&pdev->dev, "error resetting phy\n");
 794		return ret_val;
 795	}
 796	hw->phy_configured = true;
 797	return ret_val;
 798}
 799
 800static void atl1_init_flash_opcode(struct atl1_hw *hw)
 801{
 802	if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
 803		/* Atmel */
 804		hw->flash_vendor = 0;
 805
 806	/* Init OP table */
 807	iowrite8(flash_table[hw->flash_vendor].cmd_program,
 808		hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
 809	iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
 810		hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
 811	iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
 812		hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
 813	iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
 814		hw->hw_addr + REG_SPI_FLASH_OP_RDID);
 815	iowrite8(flash_table[hw->flash_vendor].cmd_wren,
 816		hw->hw_addr + REG_SPI_FLASH_OP_WREN);
 817	iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
 818		hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
 819	iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
 820		hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
 821	iowrite8(flash_table[hw->flash_vendor].cmd_read,
 822		hw->hw_addr + REG_SPI_FLASH_OP_READ);
 823}
 824
 825/*
 826 * Performs basic configuration of the adapter.
 827 * hw - Struct containing variables accessed by shared code
 828 * Assumes that the controller has previously been reset and is in a
 829 * post-reset uninitialized state. Initializes multicast table,
 830 * and  Calls routines to setup link
 831 * Leaves the transmit and receive units disabled and uninitialized.
 832 */
 833static s32 atl1_init_hw(struct atl1_hw *hw)
 834{
 835	u32 ret_val = 0;
 836
 837	/* Zero out the Multicast HASH table */
 838	iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
 839	/* clear the old settings from the multicast hash table */
 840	iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
 841
 842	atl1_init_flash_opcode(hw);
 843
 844	if (!hw->phy_configured) {
 845		/* enable GPHY LinkChange Interrupt */
 846		ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
 847		if (ret_val)
 848			return ret_val;
 849		/* make PHY out of power-saving state */
 850		ret_val = atl1_phy_leave_power_saving(hw);
 851		if (ret_val)
 852			return ret_val;
 853		/* Call a subroutine to configure the link */
 854		ret_val = atl1_setup_link(hw);
 855	}
 856	return ret_val;
 857}
 858
 859/*
 860 * Detects the current speed and duplex settings of the hardware.
 861 * hw - Struct containing variables accessed by shared code
 862 * speed - Speed of the connection
 863 * duplex - Duplex setting of the connection
 864 */
 865static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
 866{
 867	struct pci_dev *pdev = hw->back->pdev;
 868	struct atl1_adapter *adapter = hw->back;
 869	s32 ret_val;
 870	u16 phy_data;
 871
 872	/* ; --- Read   PHY Specific Status Register (17) */
 873	ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
 874	if (ret_val)
 875		return ret_val;
 876
 877	if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
 878		return ATLX_ERR_PHY_RES;
 879
 880	switch (phy_data & MII_ATLX_PSSR_SPEED) {
 881	case MII_ATLX_PSSR_1000MBS:
 882		*speed = SPEED_1000;
 883		break;
 884	case MII_ATLX_PSSR_100MBS:
 885		*speed = SPEED_100;
 886		break;
 887	case MII_ATLX_PSSR_10MBS:
 888		*speed = SPEED_10;
 889		break;
 890	default:
 891		if (netif_msg_hw(adapter))
 892			dev_dbg(&pdev->dev, "error getting speed\n");
 893		return ATLX_ERR_PHY_SPEED;
 894	}
 895	if (phy_data & MII_ATLX_PSSR_DPLX)
 896		*duplex = FULL_DUPLEX;
 897	else
 898		*duplex = HALF_DUPLEX;
 899
 900	return 0;
 901}
 902
 903static void atl1_set_mac_addr(struct atl1_hw *hw)
 904{
 905	u32 value;
 906	/*
 907	 * 00-0B-6A-F6-00-DC
 908	 * 0:  6AF600DC   1: 000B
 909	 * low dword
 910	 */
 911	value = (((u32) hw->mac_addr[2]) << 24) |
 912	    (((u32) hw->mac_addr[3]) << 16) |
 913	    (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
 914	iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
 915	/* high dword */
 916	value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
 917	iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
 918}
 919
 920/**
 921 * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
 922 * @adapter: board private structure to initialize
 923 *
 924 * atl1_sw_init initializes the Adapter private data structure.
 925 * Fields are initialized based on PCI device information and
 926 * OS network device settings (MTU size).
 927 */
 928static int atl1_sw_init(struct atl1_adapter *adapter)
 929{
 930	struct atl1_hw *hw = &adapter->hw;
 931	struct net_device *netdev = adapter->netdev;
 932
 933	hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
 934	hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 935
 936	adapter->wol = 0;
 937	device_set_wakeup_enable(&adapter->pdev->dev, false);
 938	adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
 939	adapter->ict = 50000;		/* 100ms */
 940	adapter->link_speed = SPEED_0;	/* hardware init */
 941	adapter->link_duplex = FULL_DUPLEX;
 942
 943	hw->phy_configured = false;
 944	hw->preamble_len = 7;
 945	hw->ipgt = 0x60;
 946	hw->min_ifg = 0x50;
 947	hw->ipgr1 = 0x40;
 948	hw->ipgr2 = 0x60;
 949	hw->max_retry = 0xf;
 950	hw->lcol = 0x37;
 951	hw->jam_ipg = 7;
 952	hw->rfd_burst = 8;
 953	hw->rrd_burst = 8;
 954	hw->rfd_fetch_gap = 1;
 955	hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
 956	hw->rx_jumbo_lkah = 1;
 957	hw->rrd_ret_timer = 16;
 958	hw->tpd_burst = 4;
 959	hw->tpd_fetch_th = 16;
 960	hw->txf_burst = 0x100;
 961	hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
 962	hw->tpd_fetch_gap = 1;
 963	hw->rcb_value = atl1_rcb_64;
 964	hw->dma_ord = atl1_dma_ord_enh;
 965	hw->dmar_block = atl1_dma_req_256;
 966	hw->dmaw_block = atl1_dma_req_256;
 967	hw->cmb_rrd = 4;
 968	hw->cmb_tpd = 4;
 969	hw->cmb_rx_timer = 1;	/* about 2us */
 970	hw->cmb_tx_timer = 1;	/* about 2us */
 971	hw->smb_timer = 100000;	/* about 200ms */
 972
 973	spin_lock_init(&adapter->lock);
 974	spin_lock_init(&adapter->mb_lock);
 975
 976	return 0;
 977}
 978
 979static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
 980{
 981	struct atl1_adapter *adapter = netdev_priv(netdev);
 982	u16 result;
 983
 984	atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
 985
 986	return result;
 987}
 988
 989static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
 990	int val)
 991{
 992	struct atl1_adapter *adapter = netdev_priv(netdev);
 993
 994	atl1_write_phy_reg(&adapter->hw, reg_num, val);
 995}
 996
 997static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 998{
 999	struct atl1_adapter *adapter = netdev_priv(netdev);
1000	unsigned long flags;
1001	int retval;
1002
1003	if (!netif_running(netdev))
1004		return -EINVAL;
1005
1006	spin_lock_irqsave(&adapter->lock, flags);
1007	retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
1008	spin_unlock_irqrestore(&adapter->lock, flags);
1009
1010	return retval;
1011}
1012
1013/**
1014 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
1015 * @adapter: board private structure
1016 *
1017 * Return 0 on success, negative on failure
1018 */
1019static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
1020{
1021	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1022	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1023	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1024	struct atl1_ring_header *ring_header = &adapter->ring_header;
1025	struct pci_dev *pdev = adapter->pdev;
1026	int size;
1027	u8 offset = 0;
1028
1029	size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
1030	tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
1031	if (unlikely(!tpd_ring->buffer_info)) {
1032		if (netif_msg_drv(adapter))
1033			dev_err(&pdev->dev, "kzalloc failed , size = D%d\n",
1034				size);
1035		goto err_nomem;
1036	}
1037	rfd_ring->buffer_info =
1038		(tpd_ring->buffer_info + tpd_ring->count);
1039
1040	/*
1041	 * real ring DMA buffer
1042	 * each ring/block may need up to 8 bytes for alignment, hence the
1043	 * additional 40 bytes tacked onto the end.
1044	 */
1045	ring_header->size = size =
1046		sizeof(struct tx_packet_desc) * tpd_ring->count
1047		+ sizeof(struct rx_free_desc) * rfd_ring->count
1048		+ sizeof(struct rx_return_desc) * rrd_ring->count
1049		+ sizeof(struct coals_msg_block)
1050		+ sizeof(struct stats_msg_block)
1051		+ 40;
1052
1053	ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
1054		&ring_header->dma);
1055	if (unlikely(!ring_header->desc)) {
1056		if (netif_msg_drv(adapter))
1057			dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
1058		goto err_nomem;
1059	}
1060
1061	/* init TPD ring */
1062	tpd_ring->dma = ring_header->dma;
1063	offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
1064	tpd_ring->dma += offset;
1065	tpd_ring->desc = (u8 *) ring_header->desc + offset;
1066	tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
1067
1068	/* init RFD ring */
1069	rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
1070	offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
1071	rfd_ring->dma += offset;
1072	rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
1073	rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
1074
1075
1076	/* init RRD ring */
1077	rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
1078	offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
1079	rrd_ring->dma += offset;
1080	rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
1081	rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
1082
1083
1084	/* init CMB */
1085	adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
1086	offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
1087	adapter->cmb.dma += offset;
1088	adapter->cmb.cmb = (struct coals_msg_block *)
1089		((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
1090
1091	/* init SMB */
1092	adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
1093	offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
1094	adapter->smb.dma += offset;
1095	adapter->smb.smb = (struct stats_msg_block *)
1096		((u8 *) adapter->cmb.cmb +
1097		(sizeof(struct coals_msg_block) + offset));
1098
1099	return 0;
1100
1101err_nomem:
1102	kfree(tpd_ring->buffer_info);
1103	return -ENOMEM;
1104}
1105
1106static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
1107{
1108	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1109	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1110	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1111
1112	atomic_set(&tpd_ring->next_to_use, 0);
1113	atomic_set(&tpd_ring->next_to_clean, 0);
1114
1115	rfd_ring->next_to_clean = 0;
1116	atomic_set(&rfd_ring->next_to_use, 0);
1117
1118	rrd_ring->next_to_use = 0;
1119	atomic_set(&rrd_ring->next_to_clean, 0);
1120}
1121
1122/**
1123 * atl1_clean_rx_ring - Free RFD Buffers
1124 * @adapter: board private structure
1125 */
1126static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
1127{
1128	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1129	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1130	struct atl1_buffer *buffer_info;
1131	struct pci_dev *pdev = adapter->pdev;
1132	unsigned long size;
1133	unsigned int i;
1134
1135	/* Free all the Rx ring sk_buffs */
1136	for (i = 0; i < rfd_ring->count; i++) {
1137		buffer_info = &rfd_ring->buffer_info[i];
1138		if (buffer_info->dma) {
1139			pci_unmap_page(pdev, buffer_info->dma,
1140				buffer_info->length, PCI_DMA_FROMDEVICE);
1141			buffer_info->dma = 0;
1142		}
1143		if (buffer_info->skb) {
1144			dev_kfree_skb(buffer_info->skb);
1145			buffer_info->skb = NULL;
1146		}
1147	}
1148
1149	size = sizeof(struct atl1_buffer) * rfd_ring->count;
1150	memset(rfd_ring->buffer_info, 0, size);
1151
1152	/* Zero out the descriptor ring */
1153	memset(rfd_ring->desc, 0, rfd_ring->size);
1154
1155	rfd_ring->next_to_clean = 0;
1156	atomic_set(&rfd_ring->next_to_use, 0);
1157
1158	rrd_ring->next_to_use = 0;
1159	atomic_set(&rrd_ring->next_to_clean, 0);
1160}
1161
1162/**
1163 * atl1_clean_tx_ring - Free Tx Buffers
1164 * @adapter: board private structure
1165 */
1166static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
1167{
1168	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1169	struct atl1_buffer *buffer_info;
1170	struct pci_dev *pdev = adapter->pdev;
1171	unsigned long size;
1172	unsigned int i;
1173
1174	/* Free all the Tx ring sk_buffs */
1175	for (i = 0; i < tpd_ring->count; i++) {
1176		buffer_info = &tpd_ring->buffer_info[i];
1177		if (buffer_info->dma) {
1178			pci_unmap_page(pdev, buffer_info->dma,
1179				buffer_info->length, PCI_DMA_TODEVICE);
1180			buffer_info->dma = 0;
1181		}
1182	}
1183
1184	for (i = 0; i < tpd_ring->count; i++) {
1185		buffer_info = &tpd_ring->buffer_info[i];
1186		if (buffer_info->skb) {
1187			dev_kfree_skb_any(buffer_info->skb);
1188			buffer_info->skb = NULL;
1189		}
1190	}
1191
1192	size = sizeof(struct atl1_buffer) * tpd_ring->count;
1193	memset(tpd_ring->buffer_info, 0, size);
1194
1195	/* Zero out the descriptor ring */
1196	memset(tpd_ring->desc, 0, tpd_ring->size);
1197
1198	atomic_set(&tpd_ring->next_to_use, 0);
1199	atomic_set(&tpd_ring->next_to_clean, 0);
1200}
1201
1202/**
1203 * atl1_free_ring_resources - Free Tx / RX descriptor Resources
1204 * @adapter: board private structure
1205 *
1206 * Free all transmit software resources
1207 */
1208static void atl1_free_ring_resources(struct atl1_adapter *adapter)
1209{
1210	struct pci_dev *pdev = adapter->pdev;
1211	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1212	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1213	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1214	struct atl1_ring_header *ring_header = &adapter->ring_header;
1215
1216	atl1_clean_tx_ring(adapter);
1217	atl1_clean_rx_ring(adapter);
1218
1219	kfree(tpd_ring->buffer_info);
1220	pci_free_consistent(pdev, ring_header->size, ring_header->desc,
1221		ring_header->dma);
1222
1223	tpd_ring->buffer_info = NULL;
1224	tpd_ring->desc = NULL;
1225	tpd_ring->dma = 0;
1226
1227	rfd_ring->buffer_info = NULL;
1228	rfd_ring->desc = NULL;
1229	rfd_ring->dma = 0;
1230
1231	rrd_ring->desc = NULL;
1232	rrd_ring->dma = 0;
1233
1234	adapter->cmb.dma = 0;
1235	adapter->cmb.cmb = NULL;
1236
1237	adapter->smb.dma = 0;
1238	adapter->smb.smb = NULL;
1239}
1240
1241static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
1242{
1243	u32 value;
1244	struct atl1_hw *hw = &adapter->hw;
1245	struct net_device *netdev = adapter->netdev;
1246	/* Config MAC CTRL Register */
1247	value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
1248	/* duplex */
1249	if (FULL_DUPLEX == adapter->link_duplex)
1250		value |= MAC_CTRL_DUPLX;
1251	/* speed */
1252	value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
1253			 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
1254		  MAC_CTRL_SPEED_SHIFT);
1255	/* flow control */
1256	value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1257	/* PAD & CRC */
1258	value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1259	/* preamble length */
1260	value |= (((u32) adapter->hw.preamble_len
1261		   & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
1262	/* vlan */
1263	__atlx_vlan_mode(netdev->features, &value);
1264	/* rx checksum
1265	   if (adapter->rx_csum)
1266	   value |= MAC_CTRL_RX_CHKSUM_EN;
1267	 */
1268	/* filter mode */
1269	value |= MAC_CTRL_BC_EN;
1270	if (netdev->flags & IFF_PROMISC)
1271		value |= MAC_CTRL_PROMIS_EN;
1272	else if (netdev->flags & IFF_ALLMULTI)
1273		value |= MAC_CTRL_MC_ALL_EN;
1274	/* value |= MAC_CTRL_LOOPBACK; */
1275	iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
1276}
1277
1278static u32 atl1_check_link(struct atl1_adapter *adapter)
1279{
1280	struct atl1_hw *hw = &adapter->hw;
1281	struct net_device *netdev = adapter->netdev;
1282	u32 ret_val;
1283	u16 speed, duplex, phy_data;
1284	int reconfig = 0;
1285
1286	/* MII_BMSR must read twice */
1287	atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
1288	atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
1289	if (!(phy_data & BMSR_LSTATUS)) {
1290		/* link down */
1291		if (netif_carrier_ok(netdev)) {
1292			/* old link state: Up */
1293			if (netif_msg_link(adapter))
1294				dev_info(&adapter->pdev->dev, "link is down\n");
1295			adapter->link_speed = SPEED_0;
1296			netif_carrier_off(netdev);
1297		}
1298		return 0;
1299	}
1300
1301	/* Link Up */
1302	ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
1303	if (ret_val)
1304		return ret_val;
1305
1306	switch (hw->media_type) {
1307	case MEDIA_TYPE_1000M_FULL:
1308		if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
1309			reconfig = 1;
1310		break;
1311	case MEDIA_TYPE_100M_FULL:
1312		if (speed != SPEED_100 || duplex != FULL_DUPLEX)
1313			reconfig = 1;
1314		break;
1315	case MEDIA_TYPE_100M_HALF:
1316		if (speed != SPEED_100 || duplex != HALF_DUPLEX)
1317			reconfig = 1;
1318		break;
1319	case MEDIA_TYPE_10M_FULL:
1320		if (speed != SPEED_10 || duplex != FULL_DUPLEX)
1321			reconfig = 1;
1322		break;
1323	case MEDIA_TYPE_10M_HALF:
1324		if (speed != SPEED_10 || duplex != HALF_DUPLEX)
1325			reconfig = 1;
1326		break;
1327	}
1328
1329	/* link result is our setting */
1330	if (!reconfig) {
1331		if (adapter->link_speed != speed ||
1332		    adapter->link_duplex != duplex) {
1333			adapter->link_speed = speed;
1334			adapter->link_duplex = duplex;
1335			atl1_setup_mac_ctrl(adapter);
1336			if (netif_msg_link(adapter))
1337				dev_info(&adapter->pdev->dev,
1338					"%s link is up %d Mbps %s\n",
1339					netdev->name, adapter->link_speed,
1340					adapter->link_duplex == FULL_DUPLEX ?
1341					"full duplex" : "half duplex");
1342		}
1343		if (!netif_carrier_ok(netdev)) {
1344			/* Link down -> Up */
1345			netif_carrier_on(netdev);
1346		}
1347		return 0;
1348	}
1349
1350	/* change original link status */
1351	if (netif_carrier_ok(netdev)) {
1352		adapter->link_speed = SPEED_0;
1353		netif_carrier_off(netdev);
1354		netif_stop_queue(netdev);
1355	}
1356
1357	if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
1358	    hw->media_type != MEDIA_TYPE_1000M_FULL) {
1359		switch (hw->media_type) {
1360		case MEDIA_TYPE_100M_FULL:
1361			phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
1362			           MII_CR_RESET;
1363			break;
1364		case MEDIA_TYPE_100M_HALF:
1365			phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
1366			break;
1367		case MEDIA_TYPE_10M_FULL:
1368			phy_data =
1369			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
1370			break;
1371		default:
1372			/* MEDIA_TYPE_10M_HALF: */
1373			phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
1374			break;
1375		}
1376		atl1_write_phy_reg(hw, MII_BMCR, phy_data);
1377		return 0;
1378	}
1379
1380	/* auto-neg, insert timer to re-config phy */
1381	if (!adapter->phy_timer_pending) {
1382		adapter->phy_timer_pending = true;
1383		mod_timer(&adapter->phy_config_timer,
1384			  round_jiffies(jiffies + 3 * HZ));
1385	}
1386
1387	return 0;
1388}
1389
1390static void set_flow_ctrl_old(struct atl1_adapter *adapter)
1391{
1392	u32 hi, lo, value;
1393
1394	/* RFD Flow Control */
1395	value = adapter->rfd_ring.count;
1396	hi = value / 16;
1397	if (hi < 2)
1398		hi = 2;
1399	lo = value * 7 / 8;
1400
1401	value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
1402		((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
1403	iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
1404
1405	/* RRD Flow Control */
1406	value = adapter->rrd_ring.count;
1407	lo = value / 16;
1408	hi = value * 7 / 8;
1409	if (lo < 2)
1410		lo = 2;
1411	value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
1412		((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
1413	iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
1414}
1415
1416static void set_flow_ctrl_new(struct atl1_hw *hw)
1417{
1418	u32 hi, lo, value;
1419
1420	/* RXF Flow Control */
1421	value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
1422	lo = value / 16;
1423	if (lo < 192)
1424		lo = 192;
1425	hi = value * 7 / 8;
1426	if (hi < lo)
1427		hi = lo + 16;
1428	value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
1429		((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
1430	iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
1431
1432	/* RRD Flow Control */
1433	value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
1434	lo = value / 8;
1435	hi = value * 7 / 8;
1436	if (lo < 2)
1437		lo = 2;
1438	if (hi < lo)
1439		hi = lo + 3;
1440	value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
1441		((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
1442	iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
1443}
1444
1445/**
1446 * atl1_configure - Configure Transmit&Receive Unit after Reset
1447 * @adapter: board private structure
1448 *
1449 * Configure the Tx /Rx unit of the MAC after a reset.
1450 */
1451static u32 atl1_configure(struct atl1_adapter *adapter)
1452{
1453	struct atl1_hw *hw = &adapter->hw;
1454	u32 value;
1455
1456	/* clear interrupt status */
1457	iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
1458
1459	/* set MAC Address */
1460	value = (((u32) hw->mac_addr[2]) << 24) |
1461		(((u32) hw->mac_addr[3]) << 16) |
1462		(((u32) hw->mac_addr[4]) << 8) |
1463		(((u32) hw->mac_addr[5]));
1464	iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
1465	value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
1466	iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
1467
1468	/* tx / rx ring */
1469
1470	/* HI base address */
1471	iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
1472		hw->hw_addr + REG_DESC_BASE_ADDR_HI);
1473	/* LO base address */
1474	iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
1475		hw->hw_addr + REG_DESC_RFD_ADDR_LO);
1476	iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
1477		hw->hw_addr + REG_DESC_RRD_ADDR_LO);
1478	iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
1479		hw->hw_addr + REG_DESC_TPD_ADDR_LO);
1480	iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
1481		hw->hw_addr + REG_DESC_CMB_ADDR_LO);
1482	iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
1483		hw->hw_addr + REG_DESC_SMB_ADDR_LO);
1484
1485	/* element count */
1486	value = adapter->rrd_ring.count;
1487	value <<= 16;
1488	value += adapter->rfd_ring.count;
1489	iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
1490	iowrite32(adapter->tpd_ring.count, hw->hw_addr +
1491		REG_DESC_TPD_RING_SIZE);
1492
1493	/* Load Ptr */
1494	iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
1495
1496	/* config Mailbox */
1497	value = ((atomic_read(&adapter->tpd_ring.next_to_use)
1498		  & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
1499		((atomic_read(&adapter->rrd_ring.next_to_clean)
1500		& MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
1501		((atomic_read(&adapter->rfd_ring.next_to_use)
1502		& MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
1503	iowrite32(value, hw->hw_addr + REG_MAILBOX);
1504
1505	/* config IPG/IFG */
1506	value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
1507		 << MAC_IPG_IFG_IPGT_SHIFT) |
1508		(((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
1509		<< MAC_IPG_IFG_MIFG_SHIFT) |
1510		(((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
1511		<< MAC_IPG_IFG_IPGR1_SHIFT) |
1512		(((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
1513		<< MAC_IPG_IFG_IPGR2_SHIFT);
1514	iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
1515
1516	/* config  Half-Duplex Control */
1517	value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
1518		(((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
1519		<< MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
1520		MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
1521		(0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
1522		(((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
1523		<< MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
1524	iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
1525
1526	/* set Interrupt Moderator Timer */
1527	iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
1528	iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
1529
1530	/* set Interrupt Clear Timer */
1531	iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
1532
1533	/* set max frame size hw will accept */
1534	iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
1535
1536	/* jumbo size & rrd retirement timer */
1537	value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
1538		 << RXQ_JMBOSZ_TH_SHIFT) |
1539		(((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
1540		<< RXQ_JMBO_LKAH_SHIFT) |
1541		(((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
1542		<< RXQ_RRD_TIMER_SHIFT);
1543	iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
1544
1545	/* Flow Control */
1546	switch (hw->dev_rev) {
1547	case 0x8001:
1548	case 0x9001:
1549	case 0x9002:
1550	case 0x9003:
1551		set_flow_ctrl_old(adapter);
1552		break;
1553	default:
1554		set_flow_ctrl_new(hw);
1555		break;
1556	}
1557
1558	/* config TXQ */
1559	value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
1560		 << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
1561		(((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
1562		<< TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
1563		(((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
1564		<< TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
1565		TXQ_CTRL_EN;
1566	iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
1567
1568	/* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
1569	value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
1570		<< TX_JUMBO_TASK_TH_SHIFT) |
1571		(((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
1572		<< TX_TPD_MIN_IPG_SHIFT);
1573	iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
1574
1575	/* config RXQ */
1576	value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
1577		<< RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
1578		(((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
1579		<< RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
1580		(((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
1581		<< RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
1582		RXQ_CTRL_EN;
1583	iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
1584
1585	/* config DMA Engine */
1586	value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1587		<< DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
1588		((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
1589		<< DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
1590		DMA_CTRL_DMAW_EN;
1591	value |= (u32) hw->dma_ord;
1592	if (atl1_rcb_128 == hw->rcb_value)
1593		value |= DMA_CTRL_RCB_VALUE;
1594	iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
1595
1596	/* config CMB / SMB */
1597	value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
1598		hw->cmb_tpd : adapter->tpd_ring.count;
1599	value <<= 16;
1600	value |= hw->cmb_rrd;
1601	iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
1602	value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
1603	iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
1604	iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
1605
1606	/* --- enable CMB / SMB */
1607	value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
1608	iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
1609
1610	value = ioread32(adapter->hw.hw_addr + REG_ISR);
1611	if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
1612		value = 1;	/* config failed */
1613	else
1614		value = 0;
1615
1616	/* clear all interrupt status */
1617	iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
1618	iowrite32(0, adapter->hw.hw_addr + REG_ISR);
1619	return value;
1620}
1621
1622/*
1623 * atl1_pcie_patch - Patch for PCIE module
1624 */
1625static void atl1_pcie_patch(struct atl1_adapter *adapter)
1626{
1627	u32 value;
1628
1629	/* much vendor magic here */
1630	value = 0x6500;
1631	iowrite32(value, adapter->hw.hw_addr + 0x12FC);
1632	/* pcie flow control mode change */
1633	value = ioread32(adapter->hw.hw_addr + 0x1008);
1634	value |= 0x8000;
1635	iowrite32(value, adapter->hw.hw_addr + 0x1008);
1636}
1637
1638/*
1639 * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
1640 * on PCI Command register is disable.
1641 * The function enable this bit.
1642 * Brackett, 2006/03/15
1643 */
1644static void atl1_via_workaround(struct atl1_adapter *adapter)
1645{
1646	unsigned long value;
1647
1648	value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
1649	if (value & PCI_COMMAND_INTX_DISABLE)
1650		value &= ~PCI_COMMAND_INTX_DISABLE;
1651	iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
1652}
1653
1654static void atl1_inc_smb(struct atl1_adapter *adapter)
1655{
1656	struct net_device *netdev = adapter->netdev;
1657	struct stats_msg_block *smb = adapter->smb.smb;
1658
1659	u64 new_rx_errors = smb->rx_frag +
1660			    smb->rx_fcs_err +
1661			    smb->rx_len_err +
1662			    smb->rx_sz_ov +
1663			    smb->rx_rxf_ov +
1664			    smb->rx_rrd_ov +
1665			    smb->rx_align_err;
1666	u64 new_tx_errors = smb->tx_late_col +
1667			    smb->tx_abort_col +
1668			    smb->tx_underrun +
1669			    smb->tx_trunc;
1670
1671	/* Fill out the OS statistics structure */
1672	adapter->soft_stats.rx_packets += smb->rx_ok + new_rx_errors;
1673	adapter->soft_stats.tx_packets += smb->tx_ok + new_tx_errors;
1674	adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
1675	adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
1676	adapter->soft_stats.multicast += smb->rx_mcast;
1677	adapter->soft_stats.collisions += smb->tx_1_col +
1678					  smb->tx_2_col +
1679					  smb->tx_late_col +
1680					  smb->tx_abort_col;
1681
1682	/* Rx Errors */
1683	adapter->soft_stats.rx_errors += new_rx_errors;
1684	adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
1685	adapter->soft_stats.rx_length_errors += smb->rx_len_err;
1686	adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
1687	adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
1688
1689	adapter->soft_stats.rx_pause += smb->rx_pause;
1690	adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
1691	adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
1692
1693	/* Tx Errors */
1694	adapter->soft_stats.tx_errors += new_tx_errors;
1695	adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
1696	adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
1697	adapter->soft_stats.tx_window_errors += smb->tx_late_col;
1698
1699	adapter->soft_stats.excecol += smb->tx_abort_col;
1700	adapter->soft_stats.deffer += smb->tx_defer;
1701	adapter->soft_stats.scc += smb->tx_1_col;
1702	adapter->soft_stats.mcc += smb->tx_2_col;
1703	adapter->soft_stats.latecol += smb->tx_late_col;
1704	adapter->soft_stats.tx_underrun += smb->tx_underrun;
1705	adapter->soft_stats.tx_trunc += smb->tx_trunc;
1706	adapter->soft_stats.tx_pause += smb->tx_pause;
1707
1708	netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes;
1709	netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes;
1710	netdev->stats.multicast = adapter->soft_stats.multicast;
1711	netdev->stats.collisions = adapter->soft_stats.collisions;
1712	netdev->stats.rx_errors = adapter->soft_stats.rx_errors;
1713	netdev->stats.rx_length_errors =
1714		adapter->soft_stats.rx_length_errors;
1715	netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
1716	netdev->stats.rx_frame_errors =
1717		adapter->soft_stats.rx_frame_errors;
1718	netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
1719	netdev->stats.rx_dropped = adapter->soft_stats.rx_rrd_ov;
1720	netdev->stats.tx_errors = adapter->soft_stats.tx_errors;
1721	netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
1722	netdev->stats.tx_aborted_errors =
1723		adapter->soft_stats.tx_aborted_errors;
1724	netdev->stats.tx_window_errors =
1725		adapter->soft_stats.tx_window_errors;
1726	netdev->stats.tx_carrier_errors =
1727		adapter->soft_stats.tx_carrier_errors;
1728
1729	netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
1730	netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
1731}
1732
1733static void atl1_update_mailbox(struct atl1_adapter *adapter)
1734{
1735	unsigned long flags;
1736	u32 tpd_next_to_use;
1737	u32 rfd_next_to_use;
1738	u32 rrd_next_to_clean;
1739	u32 value;
1740
1741	spin_lock_irqsave(&adapter->mb_lock, flags);
1742
1743	tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
1744	rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
1745	rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
1746
1747	value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
1748		MB_RFD_PROD_INDX_SHIFT) |
1749		((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
1750		MB_RRD_CONS_INDX_SHIFT) |
1751		((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
1752		MB_TPD_PROD_INDX_SHIFT);
1753	iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
1754
1755	spin_unlock_irqrestore(&adapter->mb_lock, flags);
1756}
1757
1758static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
1759	struct rx_return_desc *rrd, u16 offset)
1760{
1761	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1762
1763	while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
1764		rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
1765		if (++rfd_ring->next_to_clean == rfd_ring->count) {
1766			rfd_ring->next_to_clean = 0;
1767		}
1768	}
1769}
1770
1771static void atl1_update_rfd_index(struct atl1_adapter *adapter,
1772	struct rx_return_desc *rrd)
1773{
1774	u16 num_buf;
1775
1776	num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
1777		adapter->rx_buffer_len;
1778	if (rrd->num_buf == num_buf)
1779		/* clean alloc flag for bad rrd */
1780		atl1_clean_alloc_flag(adapter, rrd, num_buf);
1781}
1782
1783static void atl1_rx_checksum(struct atl1_adapter *adapter,
1784	struct rx_return_desc *rrd, struct sk_buff *skb)
1785{
1786	struct pci_dev *pdev = adapter->pdev;
1787
1788	/*
1789	 * The L1 hardware contains a bug that erroneously sets the
1790	 * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a
1791	 * fragmented IP packet is receiv…

Large files files are truncated, but you can click here to view the full file