/drivers/net/atlx/atl1.c

https://bitbucket.org/ndreys/linux-sunxi · C · 3672 lines · 2659 code · 457 blank · 556 comment · 389 complexity · 75b6eb1456f287e935808b55aad80592 MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
  3. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
  4. * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
  5. *
  6. * Derived from Intel e1000 driver
  7. * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the Free
  11. * Software Foundation; either version 2 of the License, or (at your option)
  12. * any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful, but WITHOUT
  15. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  17. * more details.
  18. *
  19. * You should have received a copy of the GNU General Public License along with
  20. * this program; if not, write to the Free Software Foundation, Inc., 59
  21. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  22. *
  23. * The full GNU General Public License is included in this distribution in the
  24. * file called COPYING.
  25. *
  26. * Contact Information:
  27. * Xiong Huang <xiong.huang@atheros.com>
  28. * Jie Yang <jie.yang@atheros.com>
  29. * Chris Snook <csnook@redhat.com>
  30. * Jay Cliburn <jcliburn@gmail.com>
  31. *
  32. * This version is adapted from the Attansic reference driver.
  33. *
  34. * TODO:
  35. * Add more ethtool functions.
  36. * Fix abstruse irq enable/disable condition described here:
  37. * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
  38. *
  39. * NEEDS TESTING:
  40. * VLAN
  41. * multicast
  42. * promiscuous mode
  43. * interrupt coalescing
  44. * SMP torture testing
  45. */
  46. #include <asm/atomic.h>
  47. #include <asm/byteorder.h>
  48. #include <linux/compiler.h>
  49. #include <linux/crc32.h>
  50. #include <linux/delay.h>
  51. #include <linux/dma-mapping.h>
  52. #include <linux/etherdevice.h>
  53. #include <linux/hardirq.h>
  54. #include <linux/if_ether.h>
  55. #include <linux/if_vlan.h>
  56. #include <linux/in.h>
  57. #include <linux/interrupt.h>
  58. #include <linux/ip.h>
  59. #include <linux/irqflags.h>
  60. #include <linux/irqreturn.h>
  61. #include <linux/jiffies.h>
  62. #include <linux/mii.h>
  63. #include <linux/module.h>
  64. #include <linux/moduleparam.h>
  65. #include <linux/net.h>
  66. #include <linux/netdevice.h>
  67. #include <linux/pci.h>
  68. #include <linux/pci_ids.h>
  69. #include <linux/pm.h>
  70. #include <linux/skbuff.h>
  71. #include <linux/slab.h>
  72. #include <linux/spinlock.h>
  73. #include <linux/string.h>
  74. #include <linux/tcp.h>
  75. #include <linux/timer.h>
  76. #include <linux/types.h>
  77. #include <linux/workqueue.h>
  78. #include <net/checksum.h>
  79. #include "atl1.h"
  80. #define ATLX_DRIVER_VERSION "2.1.3"
  81. MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, "
  82. "Chris Snook <csnook@redhat.com>, "
  83. "Jay Cliburn <jcliburn@gmail.com>");
  84. MODULE_LICENSE("GPL");
  85. MODULE_VERSION(ATLX_DRIVER_VERSION);
  86. /* Temporary hack for merging atl1 and atl2 */
  87. #include "atlx.c"
  88. static const struct ethtool_ops atl1_ethtool_ops;
  89. /*
  90. * This is the only thing that needs to be changed to adjust the
  91. * maximum number of ports that the driver can manage.
  92. */
  93. #define ATL1_MAX_NIC 4
  94. #define OPTION_UNSET -1
  95. #define OPTION_DISABLED 0
  96. #define OPTION_ENABLED 1
  97. #define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
  98. /*
  99. * Interrupt Moderate Timer in units of 2 us
  100. *
  101. * Valid Range: 10-65535
  102. *
  103. * Default Value: 100 (200us)
  104. */
  105. static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
  106. static unsigned int num_int_mod_timer;
  107. module_param_array_named(int_mod_timer, int_mod_timer, int,
  108. &num_int_mod_timer, 0);
  109. MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
  110. #define DEFAULT_INT_MOD_CNT 100 /* 200us */
  111. #define MAX_INT_MOD_CNT 65000
  112. #define MIN_INT_MOD_CNT 50
  113. struct atl1_option {
  114. enum { enable_option, range_option, list_option } type;
  115. char *name;
  116. char *err;
  117. int def;
  118. union {
  119. struct { /* range_option info */
  120. int min;
  121. int max;
  122. } r;
  123. struct { /* list_option info */
  124. int nr;
  125. struct atl1_opt_list {
  126. int i;
  127. char *str;
  128. } *p;
  129. } l;
  130. } arg;
  131. };
  132. static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
  133. struct pci_dev *pdev)
  134. {
  135. if (*value == OPTION_UNSET) {
  136. *value = opt->def;
  137. return 0;
  138. }
  139. switch (opt->type) {
  140. case enable_option:
  141. switch (*value) {
  142. case OPTION_ENABLED:
  143. dev_info(&pdev->dev, "%s enabled\n", opt->name);
  144. return 0;
  145. case OPTION_DISABLED:
  146. dev_info(&pdev->dev, "%s disabled\n", opt->name);
  147. return 0;
  148. }
  149. break;
  150. case range_option:
  151. if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
  152. dev_info(&pdev->dev, "%s set to %i\n", opt->name,
  153. *value);
  154. return 0;
  155. }
  156. break;
  157. case list_option:{
  158. int i;
  159. struct atl1_opt_list *ent;
  160. for (i = 0; i < opt->arg.l.nr; i++) {
  161. ent = &opt->arg.l.p[i];
  162. if (*value == ent->i) {
  163. if (ent->str[0] != '\0')
  164. dev_info(&pdev->dev, "%s\n",
  165. ent->str);
  166. return 0;
  167. }
  168. }
  169. }
  170. break;
  171. default:
  172. break;
  173. }
  174. dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
  175. opt->name, *value, opt->err);
  176. *value = opt->def;
  177. return -1;
  178. }
  179. /*
  180. * atl1_check_options - Range Checking for Command Line Parameters
  181. * @adapter: board private structure
  182. *
  183. * This routine checks all command line parameters for valid user
  184. * input. If an invalid value is given, or if no user specified
  185. * value exists, a default value is used. The final value is stored
  186. * in a variable in the adapter structure.
  187. */
  188. static void __devinit atl1_check_options(struct atl1_adapter *adapter)
  189. {
  190. struct pci_dev *pdev = adapter->pdev;
  191. int bd = adapter->bd_number;
  192. if (bd >= ATL1_MAX_NIC) {
  193. dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
  194. dev_notice(&pdev->dev, "using defaults for all values\n");
  195. }
  196. { /* Interrupt Moderate Timer */
  197. struct atl1_option opt = {
  198. .type = range_option,
  199. .name = "Interrupt Moderator Timer",
  200. .err = "using default of "
  201. __MODULE_STRING(DEFAULT_INT_MOD_CNT),
  202. .def = DEFAULT_INT_MOD_CNT,
  203. .arg = {.r = {.min = MIN_INT_MOD_CNT,
  204. .max = MAX_INT_MOD_CNT} }
  205. };
  206. int val;
  207. if (num_int_mod_timer > bd) {
  208. val = int_mod_timer[bd];
  209. atl1_validate_option(&val, &opt, pdev);
  210. adapter->imt = (u16) val;
  211. } else
  212. adapter->imt = (u16) (opt.def);
  213. }
  214. }
  215. /*
  216. * atl1_pci_tbl - PCI Device ID Table
  217. */
  218. static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = {
  219. {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
  220. /* required last entry */
  221. {0,}
  222. };
  223. MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
  224. static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
  225. NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
  226. static int debug = -1;
  227. module_param(debug, int, 0);
  228. MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)");
  229. /*
  230. * Reset the transmit and receive units; mask and clear all interrupts.
  231. * hw - Struct containing variables accessed by shared code
  232. * return : 0 or idle status (if error)
  233. */
  234. static s32 atl1_reset_hw(struct atl1_hw *hw)
  235. {
  236. struct pci_dev *pdev = hw->back->pdev;
  237. struct atl1_adapter *adapter = hw->back;
  238. u32 icr;
  239. int i;
  240. /*
  241. * Clear Interrupt mask to stop board from generating
  242. * interrupts & Clear any pending interrupt events
  243. */
  244. /*
  245. * iowrite32(0, hw->hw_addr + REG_IMR);
  246. * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
  247. */
  248. /*
  249. * Issue Soft Reset to the MAC. This will reset the chip's
  250. * transmit, receive, DMA. It will not effect
  251. * the current PCI configuration. The global reset bit is self-
  252. * clearing, and should clear within a microsecond.
  253. */
  254. iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
  255. ioread32(hw->hw_addr + REG_MASTER_CTRL);
  256. iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
  257. ioread16(hw->hw_addr + REG_PHY_ENABLE);
  258. /* delay about 1ms */
  259. msleep(1);
  260. /* Wait at least 10ms for All module to be Idle */
  261. for (i = 0; i < 10; i++) {
  262. icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
  263. if (!icr)
  264. break;
  265. /* delay 1 ms */
  266. msleep(1);
  267. /* FIXME: still the right way to do this? */
  268. cpu_relax();
  269. }
  270. if (icr) {
  271. if (netif_msg_hw(adapter))
  272. dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
  273. return icr;
  274. }
  275. return 0;
  276. }
  277. /* function about EEPROM
  278. *
  279. * check_eeprom_exist
  280. * return 0 if eeprom exist
  281. */
  282. static int atl1_check_eeprom_exist(struct atl1_hw *hw)
  283. {
  284. u32 value;
  285. value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
  286. if (value & SPI_FLASH_CTRL_EN_VPD) {
  287. value &= ~SPI_FLASH_CTRL_EN_VPD;
  288. iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
  289. }
  290. value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
  291. return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
  292. }
  293. static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
  294. {
  295. int i;
  296. u32 control;
  297. if (offset & 3)
  298. /* address do not align */
  299. return false;
  300. iowrite32(0, hw->hw_addr + REG_VPD_DATA);
  301. control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
  302. iowrite32(control, hw->hw_addr + REG_VPD_CAP);
  303. ioread32(hw->hw_addr + REG_VPD_CAP);
  304. for (i = 0; i < 10; i++) {
  305. msleep(2);
  306. control = ioread32(hw->hw_addr + REG_VPD_CAP);
  307. if (control & VPD_CAP_VPD_FLAG)
  308. break;
  309. }
  310. if (control & VPD_CAP_VPD_FLAG) {
  311. *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
  312. return true;
  313. }
  314. /* timeout */
  315. return false;
  316. }
  317. /*
  318. * Reads the value from a PHY register
  319. * hw - Struct containing variables accessed by shared code
  320. * reg_addr - address of the PHY register to read
  321. */
  322. static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
  323. {
  324. u32 val;
  325. int i;
  326. val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
  327. MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
  328. MDIO_CLK_SEL_SHIFT;
  329. iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
  330. ioread32(hw->hw_addr + REG_MDIO_CTRL);
  331. for (i = 0; i < MDIO_WAIT_TIMES; i++) {
  332. udelay(2);
  333. val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
  334. if (!(val & (MDIO_START | MDIO_BUSY)))
  335. break;
  336. }
  337. if (!(val & (MDIO_START | MDIO_BUSY))) {
  338. *phy_data = (u16) val;
  339. return 0;
  340. }
  341. return ATLX_ERR_PHY;
  342. }
  343. #define CUSTOM_SPI_CS_SETUP 2
  344. #define CUSTOM_SPI_CLK_HI 2
  345. #define CUSTOM_SPI_CLK_LO 2
  346. #define CUSTOM_SPI_CS_HOLD 2
  347. #define CUSTOM_SPI_CS_HI 3
  348. static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
  349. {
  350. int i;
  351. u32 value;
  352. iowrite32(0, hw->hw_addr + REG_SPI_DATA);
  353. iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
  354. value = SPI_FLASH_CTRL_WAIT_READY |
  355. (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
  356. SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
  357. SPI_FLASH_CTRL_CLK_HI_MASK) <<
  358. SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
  359. SPI_FLASH_CTRL_CLK_LO_MASK) <<
  360. SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
  361. SPI_FLASH_CTRL_CS_HOLD_MASK) <<
  362. SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
  363. SPI_FLASH_CTRL_CS_HI_MASK) <<
  364. SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
  365. SPI_FLASH_CTRL_INS_SHIFT;
  366. iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
  367. value |= SPI_FLASH_CTRL_START;
  368. iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
  369. ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
  370. for (i = 0; i < 10; i++) {
  371. msleep(1);
  372. value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
  373. if (!(value & SPI_FLASH_CTRL_START))
  374. break;
  375. }
  376. if (value & SPI_FLASH_CTRL_START)
  377. return false;
  378. *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
  379. return true;
  380. }
  381. /*
  382. * get_permanent_address
  383. * return 0 if get valid mac address,
  384. */
  385. static int atl1_get_permanent_address(struct atl1_hw *hw)
  386. {
  387. u32 addr[2];
  388. u32 i, control;
  389. u16 reg;
  390. u8 eth_addr[ETH_ALEN];
  391. bool key_valid;
  392. if (is_valid_ether_addr(hw->perm_mac_addr))
  393. return 0;
  394. /* init */
  395. addr[0] = addr[1] = 0;
  396. if (!atl1_check_eeprom_exist(hw)) {
  397. reg = 0;
  398. key_valid = false;
  399. /* Read out all EEPROM content */
  400. i = 0;
  401. while (1) {
  402. if (atl1_read_eeprom(hw, i + 0x100, &control)) {
  403. if (key_valid) {
  404. if (reg == REG_MAC_STA_ADDR)
  405. addr[0] = control;
  406. else if (reg == (REG_MAC_STA_ADDR + 4))
  407. addr[1] = control;
  408. key_valid = false;
  409. } else if ((control & 0xff) == 0x5A) {
  410. key_valid = true;
  411. reg = (u16) (control >> 16);
  412. } else
  413. break;
  414. } else
  415. /* read error */
  416. break;
  417. i += 4;
  418. }
  419. *(u32 *) &eth_addr[2] = swab32(addr[0]);
  420. *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
  421. if (is_valid_ether_addr(eth_addr)) {
  422. memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
  423. return 0;
  424. }
  425. }
  426. /* see if SPI FLAGS exist ? */
  427. addr[0] = addr[1] = 0;
  428. reg = 0;
  429. key_valid = false;
  430. i = 0;
  431. while (1) {
  432. if (atl1_spi_read(hw, i + 0x1f000, &control)) {
  433. if (key_valid) {
  434. if (reg == REG_MAC_STA_ADDR)
  435. addr[0] = control;
  436. else if (reg == (REG_MAC_STA_ADDR + 4))
  437. addr[1] = control;
  438. key_valid = false;
  439. } else if ((control & 0xff) == 0x5A) {
  440. key_valid = true;
  441. reg = (u16) (control >> 16);
  442. } else
  443. /* data end */
  444. break;
  445. } else
  446. /* read error */
  447. break;
  448. i += 4;
  449. }
  450. *(u32 *) &eth_addr[2] = swab32(addr[0]);
  451. *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
  452. if (is_valid_ether_addr(eth_addr)) {
  453. memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
  454. return 0;
  455. }
  456. /*
  457. * On some motherboards, the MAC address is written by the
  458. * BIOS directly to the MAC register during POST, and is
  459. * not stored in eeprom. If all else thus far has failed
  460. * to fetch the permanent MAC address, try reading it directly.
  461. */
  462. addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
  463. addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
  464. *(u32 *) &eth_addr[2] = swab32(addr[0]);
  465. *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
  466. if (is_valid_ether_addr(eth_addr)) {
  467. memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
  468. return 0;
  469. }
  470. return 1;
  471. }
  472. /*
  473. * Reads the adapter's MAC address from the EEPROM
  474. * hw - Struct containing variables accessed by shared code
  475. */
  476. static s32 atl1_read_mac_addr(struct atl1_hw *hw)
  477. {
  478. u16 i;
  479. if (atl1_get_permanent_address(hw))
  480. random_ether_addr(hw->perm_mac_addr);
  481. for (i = 0; i < ETH_ALEN; i++)
  482. hw->mac_addr[i] = hw->perm_mac_addr[i];
  483. return 0;
  484. }
  485. /*
  486. * Hashes an address to determine its location in the multicast table
  487. * hw - Struct containing variables accessed by shared code
  488. * mc_addr - the multicast address to hash
  489. *
  490. * atl1_hash_mc_addr
  491. * purpose
  492. * set hash value for a multicast address
  493. * hash calcu processing :
  494. * 1. calcu 32bit CRC for multicast address
  495. * 2. reverse crc with MSB to LSB
  496. */
  497. static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
  498. {
  499. u32 crc32, value = 0;
  500. int i;
  501. crc32 = ether_crc_le(6, mc_addr);
  502. for (i = 0; i < 32; i++)
  503. value |= (((crc32 >> i) & 1) << (31 - i));
  504. return value;
  505. }
  506. /*
  507. * Sets the bit in the multicast table corresponding to the hash value.
  508. * hw - Struct containing variables accessed by shared code
  509. * hash_value - Multicast address hash value
  510. */
  511. static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
  512. {
  513. u32 hash_bit, hash_reg;
  514. u32 mta;
  515. /*
  516. * The HASH Table is a register array of 2 32-bit registers.
  517. * It is treated like an array of 64 bits. We want to set
  518. * bit BitArray[hash_value]. So we figure out what register
  519. * the bit is in, read it, OR in the new bit, then write
  520. * back the new value. The register is determined by the
  521. * upper 7 bits of the hash value and the bit within that
  522. * register are determined by the lower 5 bits of the value.
  523. */
  524. hash_reg = (hash_value >> 31) & 0x1;
  525. hash_bit = (hash_value >> 26) & 0x1F;
  526. mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
  527. mta |= (1 << hash_bit);
  528. iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
  529. }
  530. /*
  531. * Writes a value to a PHY register
  532. * hw - Struct containing variables accessed by shared code
  533. * reg_addr - address of the PHY register to write
  534. * data - data to write to the PHY
  535. */
  536. static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
  537. {
  538. int i;
  539. u32 val;
  540. val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
  541. (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
  542. MDIO_SUP_PREAMBLE |
  543. MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
  544. iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
  545. ioread32(hw->hw_addr + REG_MDIO_CTRL);
  546. for (i = 0; i < MDIO_WAIT_TIMES; i++) {
  547. udelay(2);
  548. val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
  549. if (!(val & (MDIO_START | MDIO_BUSY)))
  550. break;
  551. }
  552. if (!(val & (MDIO_START | MDIO_BUSY)))
  553. return 0;
  554. return ATLX_ERR_PHY;
  555. }
  556. /*
  557. * Make L001's PHY out of Power Saving State (bug)
  558. * hw - Struct containing variables accessed by shared code
  559. * when power on, L001's PHY always on Power saving State
  560. * (Gigabit Link forbidden)
  561. */
  562. static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
  563. {
  564. s32 ret;
  565. ret = atl1_write_phy_reg(hw, 29, 0x0029);
  566. if (ret)
  567. return ret;
  568. return atl1_write_phy_reg(hw, 30, 0);
  569. }
  570. /*
  571. * Resets the PHY and make all config validate
  572. * hw - Struct containing variables accessed by shared code
  573. *
  574. * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
  575. */
  576. static s32 atl1_phy_reset(struct atl1_hw *hw)
  577. {
  578. struct pci_dev *pdev = hw->back->pdev;
  579. struct atl1_adapter *adapter = hw->back;
  580. s32 ret_val;
  581. u16 phy_data;
  582. if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
  583. hw->media_type == MEDIA_TYPE_1000M_FULL)
  584. phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
  585. else {
  586. switch (hw->media_type) {
  587. case MEDIA_TYPE_100M_FULL:
  588. phy_data =
  589. MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
  590. MII_CR_RESET;
  591. break;
  592. case MEDIA_TYPE_100M_HALF:
  593. phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
  594. break;
  595. case MEDIA_TYPE_10M_FULL:
  596. phy_data =
  597. MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
  598. break;
  599. default:
  600. /* MEDIA_TYPE_10M_HALF: */
  601. phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
  602. break;
  603. }
  604. }
  605. ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
  606. if (ret_val) {
  607. u32 val;
  608. int i;
  609. /* pcie serdes link may be down! */
  610. if (netif_msg_hw(adapter))
  611. dev_dbg(&pdev->dev, "pcie phy link down\n");
  612. for (i = 0; i < 25; i++) {
  613. msleep(1);
  614. val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
  615. if (!(val & (MDIO_START | MDIO_BUSY)))
  616. break;
  617. }
  618. if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
  619. if (netif_msg_hw(adapter))
  620. dev_warn(&pdev->dev,
  621. "pcie link down at least 25ms\n");
  622. return ret_val;
  623. }
  624. }
  625. return 0;
  626. }
  627. /*
  628. * Configures PHY autoneg and flow control advertisement settings
  629. * hw - Struct containing variables accessed by shared code
  630. */
  631. static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
  632. {
  633. s32 ret_val;
  634. s16 mii_autoneg_adv_reg;
  635. s16 mii_1000t_ctrl_reg;
  636. /* Read the MII Auto-Neg Advertisement Register (Address 4). */
  637. mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
  638. /* Read the MII 1000Base-T Control Register (Address 9). */
  639. mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
  640. /*
  641. * First we clear all the 10/100 mb speed bits in the Auto-Neg
  642. * Advertisement Register (Address 4) and the 1000 mb speed bits in
  643. * the 1000Base-T Control Register (Address 9).
  644. */
  645. mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
  646. mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
  647. /*
  648. * Need to parse media_type and set up
  649. * the appropriate PHY registers.
  650. */
  651. switch (hw->media_type) {
  652. case MEDIA_TYPE_AUTO_SENSOR:
  653. mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
  654. MII_AR_10T_FD_CAPS |
  655. MII_AR_100TX_HD_CAPS |
  656. MII_AR_100TX_FD_CAPS);
  657. mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
  658. break;
  659. case MEDIA_TYPE_1000M_FULL:
  660. mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
  661. break;
  662. case MEDIA_TYPE_100M_FULL:
  663. mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
  664. break;
  665. case MEDIA_TYPE_100M_HALF:
  666. mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
  667. break;
  668. case MEDIA_TYPE_10M_FULL:
  669. mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
  670. break;
  671. default:
  672. mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
  673. break;
  674. }
  675. /* flow control fixed to enable all */
  676. mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
  677. hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
  678. hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
  679. ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
  680. if (ret_val)
  681. return ret_val;
  682. ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
  683. if (ret_val)
  684. return ret_val;
  685. return 0;
  686. }
  687. /*
  688. * Configures link settings.
  689. * hw - Struct containing variables accessed by shared code
  690. * Assumes the hardware has previously been reset and the
  691. * transmitter and receiver are not enabled.
  692. */
  693. static s32 atl1_setup_link(struct atl1_hw *hw)
  694. {
  695. struct pci_dev *pdev = hw->back->pdev;
  696. struct atl1_adapter *adapter = hw->back;
  697. s32 ret_val;
  698. /*
  699. * Options:
  700. * PHY will advertise value(s) parsed from
  701. * autoneg_advertised and fc
  702. * no matter what autoneg is , We will not wait link result.
  703. */
  704. ret_val = atl1_phy_setup_autoneg_adv(hw);
  705. if (ret_val) {
  706. if (netif_msg_link(adapter))
  707. dev_dbg(&pdev->dev,
  708. "error setting up autonegotiation\n");
  709. return ret_val;
  710. }
  711. /* SW.Reset , En-Auto-Neg if needed */
  712. ret_val = atl1_phy_reset(hw);
  713. if (ret_val) {
  714. if (netif_msg_link(adapter))
  715. dev_dbg(&pdev->dev, "error resetting phy\n");
  716. return ret_val;
  717. }
  718. hw->phy_configured = true;
  719. return ret_val;
  720. }
  721. static void atl1_init_flash_opcode(struct atl1_hw *hw)
  722. {
  723. if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
  724. /* Atmel */
  725. hw->flash_vendor = 0;
  726. /* Init OP table */
  727. iowrite8(flash_table[hw->flash_vendor].cmd_program,
  728. hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
  729. iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
  730. hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
  731. iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
  732. hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
  733. iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
  734. hw->hw_addr + REG_SPI_FLASH_OP_RDID);
  735. iowrite8(flash_table[hw->flash_vendor].cmd_wren,
  736. hw->hw_addr + REG_SPI_FLASH_OP_WREN);
  737. iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
  738. hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
  739. iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
  740. hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
  741. iowrite8(flash_table[hw->flash_vendor].cmd_read,
  742. hw->hw_addr + REG_SPI_FLASH_OP_READ);
  743. }
  744. /*
  745. * Performs basic configuration of the adapter.
  746. * hw - Struct containing variables accessed by shared code
  747. * Assumes that the controller has previously been reset and is in a
  748. * post-reset uninitialized state. Initializes multicast table,
  749. * and Calls routines to setup link
  750. * Leaves the transmit and receive units disabled and uninitialized.
  751. */
  752. static s32 atl1_init_hw(struct atl1_hw *hw)
  753. {
  754. u32 ret_val = 0;
  755. /* Zero out the Multicast HASH table */
  756. iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
  757. /* clear the old settings from the multicast hash table */
  758. iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
  759. atl1_init_flash_opcode(hw);
  760. if (!hw->phy_configured) {
  761. /* enable GPHY LinkChange Interrrupt */
  762. ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
  763. if (ret_val)
  764. return ret_val;
  765. /* make PHY out of power-saving state */
  766. ret_val = atl1_phy_leave_power_saving(hw);
  767. if (ret_val)
  768. return ret_val;
  769. /* Call a subroutine to configure the link */
  770. ret_val = atl1_setup_link(hw);
  771. }
  772. return ret_val;
  773. }
  774. /*
  775. * Detects the current speed and duplex settings of the hardware.
  776. * hw - Struct containing variables accessed by shared code
  777. * speed - Speed of the connection
  778. * duplex - Duplex setting of the connection
  779. */
  780. static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
  781. {
  782. struct pci_dev *pdev = hw->back->pdev;
  783. struct atl1_adapter *adapter = hw->back;
  784. s32 ret_val;
  785. u16 phy_data;
  786. /* ; --- Read PHY Specific Status Register (17) */
  787. ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
  788. if (ret_val)
  789. return ret_val;
  790. if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
  791. return ATLX_ERR_PHY_RES;
  792. switch (phy_data & MII_ATLX_PSSR_SPEED) {
  793. case MII_ATLX_PSSR_1000MBS:
  794. *speed = SPEED_1000;
  795. break;
  796. case MII_ATLX_PSSR_100MBS:
  797. *speed = SPEED_100;
  798. break;
  799. case MII_ATLX_PSSR_10MBS:
  800. *speed = SPEED_10;
  801. break;
  802. default:
  803. if (netif_msg_hw(adapter))
  804. dev_dbg(&pdev->dev, "error getting speed\n");
  805. return ATLX_ERR_PHY_SPEED;
  806. break;
  807. }
  808. if (phy_data & MII_ATLX_PSSR_DPLX)
  809. *duplex = FULL_DUPLEX;
  810. else
  811. *duplex = HALF_DUPLEX;
  812. return 0;
  813. }
  814. static void atl1_set_mac_addr(struct atl1_hw *hw)
  815. {
  816. u32 value;
  817. /*
  818. * 00-0B-6A-F6-00-DC
  819. * 0: 6AF600DC 1: 000B
  820. * low dword
  821. */
  822. value = (((u32) hw->mac_addr[2]) << 24) |
  823. (((u32) hw->mac_addr[3]) << 16) |
  824. (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
  825. iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
  826. /* high dword */
  827. value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
  828. iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
  829. }
  830. /*
  831. * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
  832. * @adapter: board private structure to initialize
  833. *
  834. * atl1_sw_init initializes the Adapter private data structure.
  835. * Fields are initialized based on PCI device information and
  836. * OS network device settings (MTU size).
  837. */
  838. static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
  839. {
  840. struct atl1_hw *hw = &adapter->hw;
  841. struct net_device *netdev = adapter->netdev;
  842. hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  843. hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
  844. adapter->wol = 0;
  845. device_set_wakeup_enable(&adapter->pdev->dev, false);
  846. adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
  847. adapter->ict = 50000; /* 100ms */
  848. adapter->link_speed = SPEED_0; /* hardware init */
  849. adapter->link_duplex = FULL_DUPLEX;
  850. hw->phy_configured = false;
  851. hw->preamble_len = 7;
  852. hw->ipgt = 0x60;
  853. hw->min_ifg = 0x50;
  854. hw->ipgr1 = 0x40;
  855. hw->ipgr2 = 0x60;
  856. hw->max_retry = 0xf;
  857. hw->lcol = 0x37;
  858. hw->jam_ipg = 7;
  859. hw->rfd_burst = 8;
  860. hw->rrd_burst = 8;
  861. hw->rfd_fetch_gap = 1;
  862. hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
  863. hw->rx_jumbo_lkah = 1;
  864. hw->rrd_ret_timer = 16;
  865. hw->tpd_burst = 4;
  866. hw->tpd_fetch_th = 16;
  867. hw->txf_burst = 0x100;
  868. hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
  869. hw->tpd_fetch_gap = 1;
  870. hw->rcb_value = atl1_rcb_64;
  871. hw->dma_ord = atl1_dma_ord_enh;
  872. hw->dmar_block = atl1_dma_req_256;
  873. hw->dmaw_block = atl1_dma_req_256;
  874. hw->cmb_rrd = 4;
  875. hw->cmb_tpd = 4;
  876. hw->cmb_rx_timer = 1; /* about 2us */
  877. hw->cmb_tx_timer = 1; /* about 2us */
  878. hw->smb_timer = 100000; /* about 200ms */
  879. spin_lock_init(&adapter->lock);
  880. spin_lock_init(&adapter->mb_lock);
  881. return 0;
  882. }
  883. static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
  884. {
  885. struct atl1_adapter *adapter = netdev_priv(netdev);
  886. u16 result;
  887. atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
  888. return result;
  889. }
  890. static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
  891. int val)
  892. {
  893. struct atl1_adapter *adapter = netdev_priv(netdev);
  894. atl1_write_phy_reg(&adapter->hw, reg_num, val);
  895. }
  896. /*
  897. * atl1_mii_ioctl -
  898. * @netdev:
  899. * @ifreq:
  900. * @cmd:
  901. */
  902. static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  903. {
  904. struct atl1_adapter *adapter = netdev_priv(netdev);
  905. unsigned long flags;
  906. int retval;
  907. if (!netif_running(netdev))
  908. return -EINVAL;
  909. spin_lock_irqsave(&adapter->lock, flags);
  910. retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
  911. spin_unlock_irqrestore(&adapter->lock, flags);
  912. return retval;
  913. }
  914. /*
  915. * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
  916. * @adapter: board private structure
  917. *
  918. * Return 0 on success, negative on failure
  919. */
  920. static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
  921. {
  922. struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
  923. struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
  924. struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
  925. struct atl1_ring_header *ring_header = &adapter->ring_header;
  926. struct pci_dev *pdev = adapter->pdev;
  927. int size;
  928. u8 offset = 0;
  929. size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
  930. tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
  931. if (unlikely(!tpd_ring->buffer_info)) {
  932. if (netif_msg_drv(adapter))
  933. dev_err(&pdev->dev, "kzalloc failed , size = D%d\n",
  934. size);
  935. goto err_nomem;
  936. }
  937. rfd_ring->buffer_info =
  938. (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
  939. /*
  940. * real ring DMA buffer
  941. * each ring/block may need up to 8 bytes for alignment, hence the
  942. * additional 40 bytes tacked onto the end.
  943. */
  944. ring_header->size = size =
  945. sizeof(struct tx_packet_desc) * tpd_ring->count
  946. + sizeof(struct rx_free_desc) * rfd_ring->count
  947. + sizeof(struct rx_return_desc) * rrd_ring->count
  948. + sizeof(struct coals_msg_block)
  949. + sizeof(struct stats_msg_block)
  950. + 40;
  951. ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
  952. &ring_header->dma);
  953. if (unlikely(!ring_header->desc)) {
  954. if (netif_msg_drv(adapter))
  955. dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
  956. goto err_nomem;
  957. }
  958. memset(ring_header->desc, 0, ring_header->size);
  959. /* init TPD ring */
  960. tpd_ring->dma = ring_header->dma;
  961. offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
  962. tpd_ring->dma += offset;
  963. tpd_ring->desc = (u8 *) ring_header->desc + offset;
  964. tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
  965. /* init RFD ring */
  966. rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
  967. offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
  968. rfd_ring->dma += offset;
  969. rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
  970. rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
  971. /* init RRD ring */
  972. rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
  973. offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
  974. rrd_ring->dma += offset;
  975. rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
  976. rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
  977. /* init CMB */
  978. adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
  979. offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
  980. adapter->cmb.dma += offset;
  981. adapter->cmb.cmb = (struct coals_msg_block *)
  982. ((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
  983. /* init SMB */
  984. adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
  985. offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
  986. adapter->smb.dma += offset;
  987. adapter->smb.smb = (struct stats_msg_block *)
  988. ((u8 *) adapter->cmb.cmb +
  989. (sizeof(struct coals_msg_block) + offset));
  990. return 0;
  991. err_nomem:
  992. kfree(tpd_ring->buffer_info);
  993. return -ENOMEM;
  994. }
  995. static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
  996. {
  997. struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
  998. struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
  999. struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
  1000. atomic_set(&tpd_ring->next_to_use, 0);
  1001. atomic_set(&tpd_ring->next_to_clean, 0);
  1002. rfd_ring->next_to_clean = 0;
  1003. atomic_set(&rfd_ring->next_to_use, 0);
  1004. rrd_ring->next_to_use = 0;
  1005. atomic_set(&rrd_ring->next_to_clean, 0);
  1006. }
  1007. /*
  1008. * atl1_clean_rx_ring - Free RFD Buffers
  1009. * @adapter: board private structure
  1010. */
  1011. static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
  1012. {
  1013. struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
  1014. struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
  1015. struct atl1_buffer *buffer_info;
  1016. struct pci_dev *pdev = adapter->pdev;
  1017. unsigned long size;
  1018. unsigned int i;
  1019. /* Free all the Rx ring sk_buffs */
  1020. for (i = 0; i < rfd_ring->count; i++) {
  1021. buffer_info = &rfd_ring->buffer_info[i];
  1022. if (buffer_info->dma) {
  1023. pci_unmap_page(pdev, buffer_info->dma,
  1024. buffer_info->length, PCI_DMA_FROMDEVICE);
  1025. buffer_info->dma = 0;
  1026. }
  1027. if (buffer_info->skb) {
  1028. dev_kfree_skb(buffer_info->skb);
  1029. buffer_info->skb = NULL;
  1030. }
  1031. }
  1032. size = sizeof(struct atl1_buffer) * rfd_ring->count;
  1033. memset(rfd_ring->buffer_info, 0, size);
  1034. /* Zero out the descriptor ring */
  1035. memset(rfd_ring->desc, 0, rfd_ring->size);
  1036. rfd_ring->next_to_clean = 0;
  1037. atomic_set(&rfd_ring->next_to_use, 0);
  1038. rrd_ring->next_to_use = 0;
  1039. atomic_set(&rrd_ring->next_to_clean, 0);
  1040. }
  1041. /*
  1042. * atl1_clean_tx_ring - Free Tx Buffers
  1043. * @adapter: board private structure
  1044. */
  1045. static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
  1046. {
  1047. struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
  1048. struct atl1_buffer *buffer_info;
  1049. struct pci_dev *pdev = adapter->pdev;
  1050. unsigned long size;
  1051. unsigned int i;
  1052. /* Free all the Tx ring sk_buffs */
  1053. for (i = 0; i < tpd_ring->count; i++) {
  1054. buffer_info = &tpd_ring->buffer_info[i];
  1055. if (buffer_info->dma) {
  1056. pci_unmap_page(pdev, buffer_info->dma,
  1057. buffer_info->length, PCI_DMA_TODEVICE);
  1058. buffer_info->dma = 0;
  1059. }
  1060. }
  1061. for (i = 0; i < tpd_ring->count; i++) {
  1062. buffer_info = &tpd_ring->buffer_info[i];
  1063. if (buffer_info->skb) {
  1064. dev_kfree_skb_any(buffer_info->skb);
  1065. buffer_info->skb = NULL;
  1066. }
  1067. }
  1068. size = sizeof(struct atl1_buffer) * tpd_ring->count;
  1069. memset(tpd_ring->buffer_info, 0, size);
  1070. /* Zero out the descriptor ring */
  1071. memset(tpd_ring->desc, 0, tpd_ring->size);
  1072. atomic_set(&tpd_ring->next_to_use, 0);
  1073. atomic_set(&tpd_ring->next_to_clean, 0);
  1074. }
  1075. /*
  1076. * atl1_free_ring_resources - Free Tx / RX descriptor Resources
  1077. * @adapter: board private structure
  1078. *
  1079. * Free all transmit software resources
  1080. */
  1081. static void atl1_free_ring_resources(struct atl1_adapter *adapter)
  1082. {
  1083. struct pci_dev *pdev = adapter->pdev;
  1084. struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
  1085. struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
  1086. struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
  1087. struct atl1_ring_header *ring_header = &adapter->ring_header;
  1088. atl1_clean_tx_ring(adapter);
  1089. atl1_clean_rx_ring(adapter);
  1090. kfree(tpd_ring->buffer_info);
  1091. pci_free_consistent(pdev, ring_header->size, ring_header->desc,
  1092. ring_header->dma);
  1093. tpd_ring->buffer_info = NULL;
  1094. tpd_ring->desc = NULL;
  1095. tpd_ring->dma = 0;
  1096. rfd_ring->buffer_info = NULL;
  1097. rfd_ring->desc = NULL;
  1098. rfd_ring->dma = 0;
  1099. rrd_ring->desc = NULL;
  1100. rrd_ring->dma = 0;
  1101. adapter->cmb.dma = 0;
  1102. adapter->cmb.cmb = NULL;
  1103. adapter->smb.dma = 0;
  1104. adapter->smb.smb = NULL;
  1105. }
  1106. static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
  1107. {
  1108. u32 value;
  1109. struct atl1_hw *hw = &adapter->hw;
  1110. struct net_device *netdev = adapter->netdev;
  1111. /* Config MAC CTRL Register */
  1112. value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
  1113. /* duplex */
  1114. if (FULL_DUPLEX == adapter->link_duplex)
  1115. value |= MAC_CTRL_DUPLX;
  1116. /* speed */
  1117. value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
  1118. MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
  1119. MAC_CTRL_SPEED_SHIFT);
  1120. /* flow control */
  1121. value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
  1122. /* PAD & CRC */
  1123. value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
  1124. /* preamble length */
  1125. value |= (((u32) adapter->hw.preamble_len
  1126. & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
  1127. /* vlan */
  1128. if (adapter->vlgrp)
  1129. value |= MAC_CTRL_RMV_VLAN;
  1130. /* rx checksum
  1131. if (adapter->rx_csum)
  1132. value |= MAC_CTRL_RX_CHKSUM_EN;
  1133. */
  1134. /* filter mode */
  1135. value |= MAC_CTRL_BC_EN;
  1136. if (netdev->flags & IFF_PROMISC)
  1137. value |= MAC_CTRL_PROMIS_EN;
  1138. else if (netdev->flags & IFF_ALLMULTI)
  1139. value |= MAC_CTRL_MC_ALL_EN;
  1140. /* value |= MAC_CTRL_LOOPBACK; */
  1141. iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
  1142. }
  1143. static u32 atl1_check_link(struct atl1_adapter *adapter)
  1144. {
  1145. struct atl1_hw *hw = &adapter->hw;
  1146. struct net_device *netdev = adapter->netdev;
  1147. u32 ret_val;
  1148. u16 speed, duplex, phy_data;
  1149. int reconfig = 0;
  1150. /* MII_BMSR must read twice */
  1151. atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
  1152. atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
  1153. if (!(phy_data & BMSR_LSTATUS)) {
  1154. /* link down */
  1155. if (netif_carrier_ok(netdev)) {
  1156. /* old link state: Up */
  1157. if (netif_msg_link(adapter))
  1158. dev_info(&adapter->pdev->dev, "link is down\n");
  1159. adapter->link_speed = SPEED_0;
  1160. netif_carrier_off(netdev);
  1161. }
  1162. return 0;
  1163. }
  1164. /* Link Up */
  1165. ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
  1166. if (ret_val)
  1167. return ret_val;
  1168. switch (hw->media_type) {
  1169. case MEDIA_TYPE_1000M_FULL:
  1170. if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
  1171. reconfig = 1;
  1172. break;
  1173. case MEDIA_TYPE_100M_FULL:
  1174. if (speed != SPEED_100 || duplex != FULL_DUPLEX)
  1175. reconfig = 1;
  1176. break;
  1177. case MEDIA_TYPE_100M_HALF:
  1178. if (speed != SPEED_100 || duplex != HALF_DUPLEX)
  1179. reconfig = 1;
  1180. break;
  1181. case MEDIA_TYPE_10M_FULL:
  1182. if (speed != SPEED_10 || duplex != FULL_DUPLEX)
  1183. reconfig = 1;
  1184. break;
  1185. case MEDIA_TYPE_10M_HALF:
  1186. if (speed != SPEED_10 || duplex != HALF_DUPLEX)
  1187. reconfig = 1;
  1188. break;
  1189. }
  1190. /* link result is our setting */
  1191. if (!reconfig) {
  1192. if (adapter->link_speed != speed ||
  1193. adapter->link_duplex != duplex) {
  1194. adapter->link_speed = speed;
  1195. adapter->link_duplex = duplex;
  1196. atl1_setup_mac_ctrl(adapter);
  1197. if (netif_msg_link(adapter))
  1198. dev_info(&adapter->pdev->dev,
  1199. "%s link is up %d Mbps %s\n",
  1200. netdev->name, adapter->link_speed,
  1201. adapter->link_duplex == FULL_DUPLEX ?
  1202. "full duplex" : "half duplex");
  1203. }
  1204. if (!netif_carrier_ok(netdev)) {
  1205. /* Link down -> Up */
  1206. netif_carrier_on(netdev);
  1207. }
  1208. return 0;
  1209. }
  1210. /* change original link status */
  1211. if (netif_carrier_ok(netdev)) {
  1212. adapter->link_speed = SPEED_0;
  1213. netif_carrier_off(netdev);
  1214. netif_stop_queue(netdev);
  1215. }
  1216. if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
  1217. hw->media_type != MEDIA_TYPE_1000M_FULL) {
  1218. switch (hw->media_type) {
  1219. case MEDIA_TYPE_100M_FULL:
  1220. phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
  1221. MII_CR_RESET;
  1222. break;
  1223. case MEDIA_TYPE_100M_HALF:
  1224. phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
  1225. break;
  1226. case MEDIA_TYPE_10M_FULL:
  1227. phy_data =
  1228. MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
  1229. break;
  1230. default:
  1231. /* MEDIA_TYPE_10M_HALF: */
  1232. phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
  1233. break;
  1234. }
  1235. atl1_write_phy_reg(hw, MII_BMCR, phy_data);
  1236. return 0;
  1237. }
  1238. /* auto-neg, insert timer to re-config phy */
  1239. if (!adapter->phy_timer_pending) {
  1240. adapter->phy_timer_pending = true;
  1241. mod_timer(&adapter->phy_config_timer,
  1242. round_jiffies(jiffies + 3 * HZ));
  1243. }
  1244. return 0;
  1245. }
  1246. static void set_flow_ctrl_old(struct atl1_adapter *adapter)
  1247. {
  1248. u32 hi, lo, value;
  1249. /* RFD Flow Control */
  1250. value = adapter->rfd_ring.count;
  1251. hi = value / 16;
  1252. if (hi < 2)
  1253. hi = 2;
  1254. lo = value * 7 / 8;
  1255. value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
  1256. ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
  1257. iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
  1258. /* RRD Flow Control */
  1259. value = adapter->rrd_ring.count;
  1260. lo = value / 16;
  1261. hi = value * 7 / 8;
  1262. if (lo < 2)
  1263. lo = 2;
  1264. value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
  1265. ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
  1266. iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
  1267. }
  1268. static void set_flow_ctrl_new(struct atl1_hw *hw)
  1269. {
  1270. u32 hi, lo, value;
  1271. /* RXF Flow Control */
  1272. value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
  1273. lo = value / 16;
  1274. if (lo < 192)
  1275. lo = 192;
  1276. hi = value * 7 / 8;
  1277. if (hi < lo)
  1278. hi = lo + 16;
  1279. value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
  1280. ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
  1281. iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
  1282. /* RRD Flow Control */
  1283. value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
  1284. lo = value / 8;
  1285. hi = value * 7 / 8;
  1286. if (lo < 2)
  1287. lo = 2;
  1288. if (hi < lo)
  1289. hi = lo + 3;
  1290. value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
  1291. ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
  1292. iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
  1293. }
  1294. /*
  1295. * atl1_configure - Configure Transmit&Receive Unit after Reset
  1296. * @adapter: board private structure
  1297. *
  1298. * Configure the Tx /Rx unit of the MAC after a reset.
  1299. */
  1300. static u32 atl1_configure(struct atl1_adapter *adapter)
  1301. {
  1302. struct atl1_hw *hw = &adapter->hw;
  1303. u32 value;
  1304. /* clear interrupt status */
  1305. iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
  1306. /* set MAC Address */
  1307. value = (((u32) hw->mac_addr[2]) << 24) |
  1308. (((u32) hw->mac_addr[3]) << 16) |
  1309. (((u32) hw->mac_addr[4]) << 8) |
  1310. (((u32) hw->mac_addr[5]));
  1311. iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
  1312. value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
  1313. iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
  1314. /* tx / rx ring */
  1315. /* HI base address */
  1316. iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
  1317. hw->hw_addr + REG_DESC_BASE_ADDR_HI);
  1318. /* LO base address */
  1319. iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
  1320. hw->hw_addr + REG_DESC_RFD_ADDR_LO);
  1321. iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
  1322. hw->hw_addr + REG_DESC_RRD_ADDR_LO);
  1323. iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
  1324. hw->hw_addr + REG_DESC_TPD_ADDR_LO);
  1325. iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
  1326. hw->hw_addr + REG_DESC_CMB_ADDR_LO);
  1327. iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
  1328. hw->hw_addr + REG_DESC_SMB_ADDR_LO);
  1329. /* element count */
  1330. value = adapter->rrd_ring.count;
  1331. value <<= 16;
  1332. value += adapter->rfd_ring.count;
  1333. iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
  1334. iowrite32(adapter->tpd_ring.count, hw->hw_addr +
  1335. REG_DESC_TPD_RING_SIZE);
  1336. /* Load Ptr */
  1337. iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
  1338. /* config Mailbox */
  1339. value = ((atomic_read(&adapter->tpd_ring.next_to_use)
  1340. & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
  1341. ((atomic_read(&adapter->rrd_ring.next_to_clean)
  1342. & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
  1343. ((atomic_read(&adapter->rfd_ring.next_to_use)
  1344. & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
  1345. iowrite32(value, hw->hw_addr + REG_MAILBOX);
  1346. /* config IPG/IFG */
  1347. value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
  1348. << MAC_IPG_IFG_IPGT_SHIFT) |
  1349. (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
  1350. << MAC_IPG_IFG_MIFG_SHIFT) |
  1351. (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
  1352. << MAC_IPG_IFG_IPGR1_SHIFT) |
  1353. (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
  1354. << MAC_IPG_IFG_IPGR2_SHIFT);
  1355. iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
  1356. /* config Half-Duplex Control */
  1357. value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
  1358. (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
  1359. << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
  1360. MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
  1361. (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
  1362. (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
  1363. << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
  1364. iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
  1365. /* set Interrupt Moderator Timer */
  1366. iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
  1367. iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
  1368. /* set Interrupt Clear Timer */
  1369. iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
  1370. /* set max frame size hw will accept */
  1371. iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
  1372. /* jumbo size & rrd retirement timer */
  1373. value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
  1374. << RXQ_JMBOSZ_TH_SHIFT) |
  1375. (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
  1376. << RXQ_JMBO_LKAH_SHIFT) |
  1377. (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
  1378. << RXQ_RRD_TIMER_SHIFT);
  1379. iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
  1380. /* Flow Control */
  1381. switch (hw->dev_rev) {
  1382. case 0x8001:
  1383. case 0x9001:
  1384. case 0x9002:
  1385. case 0x9003:
  1386. set_flow_ctrl_old(adapter);
  1387. break;
  1388. default:
  1389. set_flow_ctrl_new(hw);
  1390. break;
  1391. }
  1392. /* config TXQ */
  1393. value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
  1394. << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
  1395. (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
  1396. << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
  1397. (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
  1398. << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
  1399. TXQ_CTRL_EN;
  1400. iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
  1401. /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
  1402. value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
  1403. << TX_JUMBO_TASK_TH_SHIFT) |
  1404. (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
  1405. << TX_TPD_MIN_IPG_SHIFT);
  1406. iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
  1407. /* config RXQ */
  1408. value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
  1409. << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
  1410. (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
  1411. << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
  1412. (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
  1413. << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
  1414. RXQ_CTRL_EN;
  1415. iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
  1416. /* config DMA Engine */
  1417. value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
  1418. << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
  1419. ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
  1420. << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
  1421. DMA_CTRL_DMAW_EN;
  1422. value |= (u32) hw->dma_ord;
  1423. if (atl1_rcb_128 == hw->rcb_value)
  1424. value |= DMA_CTRL_RCB_VALUE;
  1425. iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
  1426. /* config CMB / SMB */
  1427. value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
  1428. hw->cmb_tpd : adapter->tpd_ring.count;
  1429. value <<= 16;
  1430. value |= hw->cmb_rrd;
  1431. iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
  1432. value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
  1433. iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
  1434. iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
  1435. /* --- enable CMB / SMB */
  1436. value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
  1437. iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
  1438. value = ioread32(adapter->hw.hw_addr + REG_ISR);
  1439. if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
  1440. value = 1; /* config failed */
  1441. else
  1442. value = 0;
  1443. /* clear all interrupt status */
  1444. iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
  1445. iowrite32(0, adapter->hw.hw_addr + REG_ISR);
  1446. return value;
  1447. }
  1448. /*
  1449. * atl1_pcie_patch - Patch for PCIE module
  1450. */
  1451. static void atl1_pcie_patch(struct atl1_adapter *adapter)
  1452. {
  1453. u32 value;
  1454. /* much vendor magic here */
  1455. value = 0x6500;
  1456. iowrite32(value, adapter->hw.hw_addr + 0x12FC);
  1457. /* pcie flow control mode change */
  1458. value = ioread32(adapter->hw.hw_addr + 0x1008);
  1459. value |= 0x8000;
  1460. iowrite32(value, adapter->hw.hw_addr + 0x1008);
  1461. }
  1462. /*
  1463. * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
  1464. * on PCI Command register is disable.
  1465. * The function enable this bit.
  1466. * Brackett, 2006/03/15
  1467. */
  1468. static void atl1_via_workaround(struct atl1_adapter *adapter)
  1469. {
  1470. unsigned long value;
  1471. value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
  1472. if (value & PCI_COMMAND_INTX_DISABLE)
  1473. value &= ~PCI_COMMAND_INTX_DISABLE;
  1474. iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
  1475. }
  1476. static void atl1_inc_smb(struct atl1_adapter *adapter)
  1477. {
  1478. struct net_device *netdev = adapter->netdev;
  1479. struct stats_msg_block *smb = adapter->smb.smb;
  1480. /* Fill out the OS statistics structure */
  1481. adapter->soft_stats.rx_packets += smb->rx_ok;
  1482. adapter->soft_stats.tx_packets += smb->tx_ok;
  1483. adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
  1484. adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
  1485. adapter->soft_stats.multicast += smb->rx_mcast;
  1486. adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
  1487. smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
  1488. /* Rx Errors */
  1489. adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
  1490. smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
  1491. smb->rx_rrd_ov + smb->rx_align_err);
  1492. adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
  1493. adapter->soft_stats.rx_length_errors += smb->rx_len_err;
  1494. adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
  1495. adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
  1496. adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
  1497. smb->rx_rxf_ov);
  1498. adapter->soft_stats.rx_pause += smb->rx_pause;
  1499. adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
  1500. adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
  1501. /* Tx Errors */
  1502. adapter->soft_stats.tx_errors += (smb->tx_late_col +
  1503. smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
  1504. adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
  1505. adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
  1506. adapter->soft_stats.tx_window_errors += smb->tx_late_col;
  1507. adapter->soft_stats.excecol += smb->tx_abort_col;
  1508. adapter->soft_stats.deffer += smb->tx_defer;
  1509. adapter->soft_stats.scc += smb->tx_1_col;
  1510. adapter->soft_stats.mcc += smb->tx_2_col;
  1511. adapter->soft_stats.latecol += smb->tx_late_col;
  1512. adapter->soft_stats.tx_underun += smb->tx_underrun;
  1513. adapter->soft_stats.tx_trunc += smb->tx_trunc;
  1514. adapter->soft_stats.tx_pause += smb->tx_pause;
  1515. netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
  1516. netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
  1517. netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes;
  1518. netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes;
  1519. netdev->stats.multicast = adapter->soft_stats.multicast;
  1520. netdev->stats.collisions = adapter->soft_stats.collisions;
  1521. netdev->stats.rx_errors = adapter->soft_stats.rx_errors;
  1522. netdev->stats.rx_over_errors =
  1523. adapter->soft_stats.rx_missed_errors;
  1524. netdev->stats.rx_length_errors =
  1525. adapter->soft_stats.rx_length_errors;
  1526. netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
  1527. netdev->stats.rx_frame_errors =
  1528. adapter->soft_stats.rx_frame_errors;
  1529. netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
  1530. netdev->stats.rx_missed_errors =
  1531. adapter->soft_stats.rx_missed_errors;
  1532. netdev->stats.tx_errors = adapter->soft_stats.tx_errors;
  1533. netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
  1534. netdev->stats.tx_aborted_errors =
  1535. adapter->soft_stats.tx_aborted_errors;
  1536. netdev->stats.tx_window_errors =
  1537. adapter->soft_stats.tx_window_errors;
  1538. netdev->stats.tx_carrier_errors =
  1539. adapter->soft_stats.tx_carrier_errors;
  1540. }
  1541. static void atl1_update_mailbox(struct atl1_adapter *adapter)
  1542. {
  1543. unsigned long flags;
  1544. u32 tpd_next_to_use;
  1545. u32 rfd_next_to_use;
  1546. u32 rrd_next_to_clean;
  1547. u32 value;
  1548. spin_lock_irqsave(&adapter->mb_lock, flags);
  1549. tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
  1550. rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
  1551. rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
  1552. value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
  1553. MB_RFD_PROD_INDX_SHIFT) |
  1554. ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
  1555. MB_RRD_CONS_INDX_SHIFT) |
  1556. ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
  1557. MB_TPD_PROD_INDX_SHIFT);
  1558. iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
  1559. spin_unlock_ir