PageRenderTime 190ms CodeModel.GetById 22ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/e1000-6.x/src/e1000_main.c

https://github.com/bhesmans/click
C | 5042 lines | 3632 code | 711 blank | 699 comment | 558 complexity | d9e8f1bb3eb4ca7692a3fe71ae6225ac MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause
  1. /*******************************************************************************
  2. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
  3. This program is free software; you can redistribute it and/or modify it
  4. under the terms of the GNU General Public License as published by the Free
  5. Software Foundation; either version 2 of the License, or (at your option)
  6. any later version.
  7. This program is distributed in the hope that it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc., 59
  13. Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  14. The full GNU General Public License is included in this distribution in the
  15. file called LICENSE.
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. *******************************************************************************/
  20. #include "e1000.h"
  21. /* Change Log
  22. * 6.0.58 4/20/05
  23. * o e1000_set_spd_dplx tests for compatible speed/duplex specification
  24. * for fiber adapters
  25. * 6.0.57 4/19/05
  26. * o Added code to fix register test failure for devices >= 82571
  27. *
  28. * 6.0.52 3/15/05
  29. * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
  30. * calls, one from mii_ioctl and other from within update_stats while
  31. * processing MIIREG ioctl.
  32. *
  33. * 6.1.2 4/13/05
  34. * o Fixed ethtool diagnostics
  35. * o Enabled flow control to take default eeprom settings
  36. * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
  37. * calls, one from mii_ioctl and other from within update_stats while processing
  38. * MIIREG ioctl.
  39. * 6.0.55 3/23/05
  40. * o Support for MODULE_VERSION
  41. * o Fix APM setting for 82544 based adapters
  42. * 6.0.54 3/26/05
  43. * o Added a timer to expire packets that were deferred for cleanup
  44. * 6.0.52 3/15/05
  45. * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
  46. * calls, one from mii_ioctl and other from within update_stats while
  47. * processing MIIREG ioctl.
  48. * 6.0.47 3/2/05
  49. * o Added enhanced functionality to the loopback diags to wrap the
  50. * descriptor rings
  51. * o Added manageability vlan filtering workaround.
  52. *
  53. * 6.0.44+ 2/15/05
  54. * o Added code to handle raw packet based DHCP packets
  55. * o Added code to fix the errata 10 buffer overflow issue
  56. * o Sync up with WR01-05
  57. * o applied Anton's patch to resolve tx hang in hardware
  58. * o e1000 timeouts with early writeback patch
  59. * o Removed Queensport IDs
  60. * o fixed driver panic if MAC receives a bad large packets when packet
  61. * split is enabled
  62. * o Applied Andrew Mortons patch - e1000 stops working after resume
  63. * 5.2.29 12/24/03
  64. * o Bug fix: Endianess issue causing ethtool diags to fail on ppc.
  65. * o Bug fix: Use pdev->irq instead of netdev->irq for MSI support.
  66. * o Report driver message on user override of InterruptThrottleRate module
  67. * parameter.
  68. * o Bug fix: Change I/O address storage from uint32_t to unsigned long.
  69. * o Feature: Added ethtool RINGPARAM support.
  70. * o Feature: Added netpoll support.
  71. * o Bug fix: Race between Tx queue and Tx clean fixed with a spin lock.
  72. * o Bug fix: Allow 1000/Full setting for autoneg param for fiber connections.
  73. * Jon D Mason [jonmason@us.ibm.com].
  74. *
  75. * 5.2.22 10/15/03
  76. * o Bug fix: SERDES devices might be connected to a back-plane switch that
  77. * doesn't support auto-neg, so add the capability to force 1000/Full.
  78. * Also, since forcing 1000/Full, sample RxSynchronize bit to detect link
  79. * state.
  80. * o Bug fix: Flow control settings for hi/lo watermark didn't consider
  81. * changes in the RX FIFO size, which could occur with Jumbo Frames or with
  82. * the reduced FIFO in 82547.
  83. * o Bug fix: Better propagation of error codes.
  84. * [Janice Girouard (janiceg -a-t- us.ibm.com)]
  85. * o Bug fix: hang under heavy Tx stress when running out of Tx descriptors;
  86. * wasn't clearing context descriptor when backing out of send because of
  87. * no-resource condition.
  88. * o Bug fix: check netif_running in dev->poll so we don't have to hang in
  89. * dev->close until all polls are finished. [Rober Olsson
  90. * (robert.olsson@data.slu.se)].
  91. * o Revert TxDescriptor ring size back to 256 since change to 1024 wasn't
  92. * accepted into the kernel.
  93. *
  94. * 5.2.16 8/8/03
  95. */
  96. char e1000_driver_name[] = "e1000";
  97. char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  98. #ifndef CONFIG_E1000_NAPI
  99. #define DRIVERNAPI
  100. #else
  101. #define DRIVERNAPI "-NAPI"
  102. #endif
  103. #define DRV_VERSION "6.1.16.2.DB"DRIVERNAPI
  104. char e1000_driver_version[] = DRV_VERSION;
  105. char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
  106. #if !HAVE___NETIF_RECEIVE_SKB
  107. #define netif_receive_skb(skb) netif_receive_skb((skb), (skb)->protocol, 0)
  108. #endif
  109. /* e1000_pci_tbl - PCI Device ID Table
  110. *
  111. * Last entry must be all 0s
  112. *
  113. * Macro expands to...
  114. * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  115. */
  116. static struct pci_device_id e1000_pci_tbl[] = {
  117. INTEL_E1000_ETHERNET_DEVICE(0x1000),
  118. INTEL_E1000_ETHERNET_DEVICE(0x1001),
  119. INTEL_E1000_ETHERNET_DEVICE(0x1004),
  120. INTEL_E1000_ETHERNET_DEVICE(0x1008),
  121. INTEL_E1000_ETHERNET_DEVICE(0x1009),
  122. INTEL_E1000_ETHERNET_DEVICE(0x100C),
  123. INTEL_E1000_ETHERNET_DEVICE(0x100D),
  124. INTEL_E1000_ETHERNET_DEVICE(0x100E),
  125. INTEL_E1000_ETHERNET_DEVICE(0x100F),
  126. INTEL_E1000_ETHERNET_DEVICE(0x1010),
  127. INTEL_E1000_ETHERNET_DEVICE(0x1011),
  128. INTEL_E1000_ETHERNET_DEVICE(0x1012),
  129. INTEL_E1000_ETHERNET_DEVICE(0x1013),
  130. INTEL_E1000_ETHERNET_DEVICE(0x1014),
  131. INTEL_E1000_ETHERNET_DEVICE(0x1015),
  132. INTEL_E1000_ETHERNET_DEVICE(0x1016),
  133. INTEL_E1000_ETHERNET_DEVICE(0x1017),
  134. INTEL_E1000_ETHERNET_DEVICE(0x1018),
  135. INTEL_E1000_ETHERNET_DEVICE(0x1019),
  136. INTEL_E1000_ETHERNET_DEVICE(0x101A),
  137. INTEL_E1000_ETHERNET_DEVICE(0x101D),
  138. INTEL_E1000_ETHERNET_DEVICE(0x101E),
  139. INTEL_E1000_ETHERNET_DEVICE(0x1026),
  140. INTEL_E1000_ETHERNET_DEVICE(0x1027),
  141. INTEL_E1000_ETHERNET_DEVICE(0x1028),
  142. INTEL_E1000_ETHERNET_DEVICE(0x105E),
  143. INTEL_E1000_ETHERNET_DEVICE(0x105F),
  144. INTEL_E1000_ETHERNET_DEVICE(0x1060),
  145. INTEL_E1000_ETHERNET_DEVICE(0x1075),
  146. INTEL_E1000_ETHERNET_DEVICE(0x1076),
  147. INTEL_E1000_ETHERNET_DEVICE(0x1077),
  148. INTEL_E1000_ETHERNET_DEVICE(0x1078),
  149. INTEL_E1000_ETHERNET_DEVICE(0x1079),
  150. INTEL_E1000_ETHERNET_DEVICE(0x107A),
  151. INTEL_E1000_ETHERNET_DEVICE(0x107B),
  152. INTEL_E1000_ETHERNET_DEVICE(0x107C),
  153. INTEL_E1000_ETHERNET_DEVICE(0x107D),
  154. INTEL_E1000_ETHERNET_DEVICE(0x107E),
  155. INTEL_E1000_ETHERNET_DEVICE(0x107F),
  156. INTEL_E1000_ETHERNET_DEVICE(0x108A),
  157. INTEL_E1000_ETHERNET_DEVICE(0x108B),
  158. INTEL_E1000_ETHERNET_DEVICE(0x108C),
  159. INTEL_E1000_ETHERNET_DEVICE(0x109A),
  160. INTEL_E1000_ETHERNET_DEVICE(0x10A0),
  161. INTEL_E1000_ETHERNET_DEVICE(0x10A1),
  162. /* required last entry */
  163. {0,}
  164. };
  165. MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  166. int e1000_up(struct e1000_adapter *adapter);
  167. void e1000_down(struct e1000_adapter *adapter);
  168. void e1000_reset(struct e1000_adapter *adapter);
  169. int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
  170. int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  171. int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  172. void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
  173. void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
  174. int e1000_setup_tx_resources(struct e1000_adapter *adapter,
  175. struct e1000_tx_ring *txdr);
  176. int e1000_setup_rx_resources(struct e1000_adapter *adapter,
  177. struct e1000_rx_ring *rxdr);
  178. void e1000_free_tx_resources(struct e1000_adapter *adapter,
  179. struct e1000_tx_ring *tx_ring);
  180. void e1000_free_rx_resources(struct e1000_adapter *adapter,
  181. struct e1000_rx_ring *rx_ring);
  182. void e1000_update_stats(struct e1000_adapter *adapter);
  183. static int e1000_init_module(void);
  184. static void e1000_exit_module(void);
  185. static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  186. static void __devexit e1000_remove(struct pci_dev *pdev);
  187. static int e1000_alloc_queues(struct e1000_adapter *adapter);
  188. #ifdef CONFIG_E1000_MQ
  189. static void e1000_setup_queue_mapping(struct e1000_adapter *adapter);
  190. #endif
  191. static int e1000_sw_init(struct e1000_adapter *adapter);
  192. static int e1000_open(struct net_device *netdev);
  193. static int e1000_close(struct net_device *netdev);
  194. static void e1000_configure_tx(struct e1000_adapter *adapter);
  195. static void e1000_configure_rx(struct e1000_adapter *adapter);
  196. static void e1000_setup_rctl(struct e1000_adapter *adapter);
  197. static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
  198. static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
  199. static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
  200. struct e1000_tx_ring *tx_ring);
  201. static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
  202. struct e1000_rx_ring *rx_ring);
  203. static void e1000_set_multi(struct net_device *netdev);
  204. static void e1000_update_phy_info(unsigned long data);
  205. static void e1000_watchdog(unsigned long data);
  206. static void e1000_watchdog_1(struct e1000_adapter *adapter);
  207. static void e1000_82547_tx_fifo_stall(unsigned long data);
  208. static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
  209. static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
  210. static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
  211. static int e1000_set_mac(struct net_device *netdev, void *p);
  212. static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
  213. static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
  214. struct e1000_tx_ring *tx_ring);
  215. #ifdef CONFIG_E1000_NAPI
  216. static int e1000_clean(struct net_device *poll_dev, int *budget);
  217. static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
  218. struct e1000_rx_ring *rx_ring,
  219. int *work_done, int work_to_do);
  220. static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  221. struct e1000_rx_ring *rx_ring,
  222. int *work_done, int work_to_do);
  223. #else
  224. static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
  225. struct e1000_rx_ring *rx_ring);
  226. static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  227. struct e1000_rx_ring *rx_ring);
  228. #endif
  229. static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
  230. struct e1000_rx_ring *rx_ring);
  231. static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
  232. struct e1000_rx_ring *rx_ring);
  233. static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
  234. #ifdef SIOCGMIIPHY
  235. static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
  236. int cmd);
  237. #endif
  238. void set_ethtool_ops(struct net_device *netdev);
  239. extern int ethtool_ioctl(struct ifreq *ifr);
  240. extern int e1000_bypass_ctrl_ioctl(struct net_device *netdev, struct ifreq *ifr);
  241. static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
  242. static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
  243. static void e1000_tx_timeout(struct net_device *dev);
  244. static void e1000_tx_timeout_task(struct net_device *dev);
  245. static void e1000_smartspeed(struct e1000_adapter *adapter);
  246. static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
  247. struct sk_buff *skb);
  248. #ifdef NETIF_F_HW_VLAN_TX
  249. static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
  250. static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
  251. static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
  252. static void e1000_restore_vlan(struct e1000_adapter *adapter);
  253. #endif
  254. static int e1000_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
  255. static int e1000_suspend(struct pci_dev *pdev, uint32_t state);
  256. #ifdef CONFIG_PM
  257. static int e1000_resume(struct pci_dev *pdev);
  258. #endif
  259. /* For Click polling */
  260. static int e1000_tx_pqueue(struct net_device *dev, struct sk_buff *skb);
  261. static int e1000_tx_start(struct net_device *dev);
  262. static int e1000_rx_refill(struct net_device *dev, struct sk_buff **);
  263. static int e1000_tx_eob(struct net_device *dev);
  264. static struct sk_buff *e1000_tx_clean(struct net_device *dev);
  265. static struct sk_buff *e1000_rx_poll(struct net_device *dev, int *want);
  266. static int e1000_poll_on(struct net_device *dev);
  267. static int e1000_poll_off(struct net_device *dev);
  268. #ifdef CONFIG_NET_POLL_CONTROLLER
  269. /* for netdump / net console */
  270. static void e1000_netpoll (struct net_device *netdev);
  271. #endif
  272. #ifdef CONFIG_E1000_MQ
  273. /* for multiple Rx queues */
  274. void e1000_rx_schedule(void *data);
  275. #endif
  276. struct notifier_block e1000_notifier_reboot = {
  277. .notifier_call = e1000_notify_reboot,
  278. .next = NULL,
  279. .priority = 0
  280. };
  281. #undef DEBUG_PRINT
  282. #ifdef DEBUG_PRINT
  283. static void e1000_print_rx_buffer_info(struct e1000_buffer *bi);
  284. static void e1000_print_rx_desc(struct e1000_rx_desc *rx_desc);
  285. static void e1000_print_skb(struct sk_buff* skb);
  286. #endif
  287. /* Exported from other modules */
  288. extern void e1000_check_options(struct e1000_adapter *adapter);
  289. static struct pci_driver e1000_driver = {
  290. .name = e1000_driver_name,
  291. .id_table = e1000_pci_tbl,
  292. .probe = e1000_probe,
  293. .remove = __devexit_p(e1000_remove),
  294. /* Power Managment Hooks */
  295. #ifdef CONFIG_PM
  296. .suspend = e1000_suspend,
  297. .resume = e1000_resume
  298. #endif
  299. };
  300. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  301. MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
  302. MODULE_LICENSE("GPL");
  303. MODULE_VERSION(DRV_VERSION);
  304. static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
  305. module_param(debug, int, 0);
  306. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  307. /**
  308. * e1000_init_module - Driver Registration Routine
  309. *
  310. * e1000_init_module is the first routine called when the driver is
  311. * loaded. All it does is register with the PCI subsystem.
  312. **/
  313. static int __init
  314. e1000_init_module(void)
  315. {
  316. int ret;
  317. printk(KERN_INFO "%s - version %s\n",
  318. e1000_driver_string, e1000_driver_version);
  319. printk(KERN_INFO " w/ Click polling\n");
  320. printk(KERN_INFO "%s\n", e1000_copyright);
  321. ret = pci_module_init(&e1000_driver);
  322. if(ret >= 0) {
  323. register_reboot_notifier(&e1000_notifier_reboot);
  324. }
  325. return ret;
  326. }
  327. module_init(e1000_init_module);
  328. /**
  329. * e1000_exit_module - Driver Exit Cleanup Routine
  330. *
  331. * e1000_exit_module is called just before the driver is removed
  332. * from memory.
  333. **/
  334. static void __exit
  335. e1000_exit_module(void)
  336. {
  337. unregister_reboot_notifier(&e1000_notifier_reboot);
  338. pci_unregister_driver(&e1000_driver);
  339. }
  340. module_exit(e1000_exit_module);
  341. /**
  342. * e1000_irq_disable - Mask off interrupt generation on the NIC
  343. * @adapter: board private structure
  344. **/
  345. static inline void
  346. e1000_irq_disable(struct e1000_adapter *adapter)
  347. {
  348. atomic_inc(&adapter->irq_sem);
  349. E1000_WRITE_REG(&adapter->hw, IMC, ~0);
  350. E1000_WRITE_FLUSH(&adapter->hw);
  351. synchronize_irq(adapter->pdev->irq);
  352. }
  353. /**
  354. * e1000_irq_enable - Enable default interrupt generation settings
  355. * @adapter: board private structure
  356. **/
  357. static inline void
  358. e1000_irq_enable(struct e1000_adapter *adapter)
  359. {
  360. if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
  361. E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
  362. E1000_WRITE_FLUSH(&adapter->hw);
  363. }
  364. }
  365. #ifdef NETIF_F_HW_VLAN_TX
  366. void
  367. e1000_update_mng_vlan(struct e1000_adapter *adapter)
  368. {
  369. struct net_device *netdev = adapter->netdev;
  370. uint16_t vid = adapter->hw.mng_cookie.vlan_id;
  371. uint16_t old_vid = adapter->mng_vlan_id;
  372. if(adapter->vlgrp) {
  373. if(!adapter->vlgrp->vlan_devices[vid]) {
  374. if(adapter->hw.mng_cookie.status &
  375. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
  376. e1000_vlan_rx_add_vid(netdev, vid);
  377. adapter->mng_vlan_id = vid;
  378. } else
  379. adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  380. if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
  381. (vid != old_vid) &&
  382. !adapter->vlgrp->vlan_devices[old_vid])
  383. e1000_vlan_rx_kill_vid(netdev, old_vid);
  384. }
  385. }
  386. }
  387. #endif
  388. int
  389. e1000_up(struct e1000_adapter *adapter)
  390. {
  391. struct net_device *netdev = adapter->netdev;
  392. int i, err;
  393. /* hardware has been reset, we need to reload some things */
  394. /* Reset the PHY if it was previously powered down */
  395. if(adapter->hw.media_type == e1000_media_type_copper) {
  396. uint16_t mii_reg;
  397. e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
  398. if(mii_reg & MII_CR_POWER_DOWN)
  399. e1000_phy_reset(&adapter->hw);
  400. }
  401. e1000_set_multi(netdev);
  402. #ifdef NETIF_F_HW_VLAN_TX
  403. e1000_restore_vlan(adapter);
  404. #endif
  405. e1000_configure_tx(adapter);
  406. e1000_setup_rctl(adapter);
  407. e1000_configure_rx(adapter);
  408. for (i = 0; i < adapter->num_queues; i++)
  409. adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]);
  410. #ifdef CONFIG_PCI_MSI
  411. if(adapter->hw.mac_type > e1000_82547_rev_2) {
  412. adapter->have_msi = TRUE;
  413. if((err = pci_enable_msi(adapter->pdev))) {
  414. DPRINTK(PROBE, ERR,
  415. "Unable to allocate MSI interrupt Error: %d\n", err);
  416. adapter->have_msi = FALSE;
  417. }
  418. }
  419. #endif
  420. if((err = request_irq(adapter->pdev->irq, &e1000_intr,
  421. SA_SHIRQ | SA_SAMPLE_RANDOM,
  422. netdev->name, netdev))) {
  423. DPRINTK(PROBE, ERR,
  424. "Unable to allocate interrupt Error: %d\n", err);
  425. return err;
  426. }
  427. mod_timer(&adapter->watchdog_timer, jiffies);
  428. #ifdef CONFIG_E1000_NAPI
  429. netif_poll_enable(netdev);
  430. #endif
  431. e1000_irq_enable(adapter);
  432. return 0;
  433. }
  434. void
  435. e1000_down(struct e1000_adapter *adapter)
  436. {
  437. struct net_device *netdev = adapter->netdev;
  438. e1000_irq_disable(adapter);
  439. #ifdef CONFIG_E1000_MQ
  440. while (atomic_read(&adapter->rx_sched_call_data.count) != 0);
  441. #endif
  442. free_irq(adapter->pdev->irq, netdev);
  443. #ifdef CONFIG_PCI_MSI
  444. if(adapter->hw.mac_type > e1000_82547_rev_2 &&
  445. adapter->have_msi == TRUE)
  446. pci_disable_msi(adapter->pdev);
  447. #endif
  448. del_timer_sync(&adapter->tx_fifo_stall_timer);
  449. del_timer_sync(&adapter->watchdog_timer);
  450. del_timer_sync(&adapter->phy_info_timer);
  451. #ifdef CONFIG_E1000_NAPI
  452. netif_poll_disable(netdev);
  453. #endif
  454. adapter->link_speed = 0;
  455. adapter->link_duplex = 0;
  456. netif_carrier_off(netdev);
  457. netif_stop_queue(netdev);
  458. e1000_reset(adapter);
  459. e1000_clean_all_tx_rings(adapter);
  460. e1000_clean_all_rx_rings(adapter);
  461. /* If WoL is not enabled and management mode is not IAMT
  462. * Power down the PHY so no link is implied when interface is down */
  463. if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
  464. adapter->hw.media_type == e1000_media_type_copper &&
  465. !e1000_check_mng_mode(&adapter->hw) &&
  466. !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) {
  467. uint16_t mii_reg;
  468. e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
  469. mii_reg |= MII_CR_POWER_DOWN;
  470. e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
  471. mdelay(1);
  472. }
  473. }
  474. void
  475. e1000_reset(struct e1000_adapter *adapter)
  476. {
  477. struct net_device *netdev = adapter->netdev;
  478. uint32_t pba, manc;
  479. uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
  480. uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
  481. /* Repartition Pba for greater than 9k mtu
  482. * To take effect CTRL.RST is required.
  483. */
  484. switch (adapter->hw.mac_type) {
  485. case e1000_82547:
  486. case e1000_82547_rev_2:
  487. pba = E1000_PBA_30K;
  488. break;
  489. case e1000_82571:
  490. case e1000_82572:
  491. pba = E1000_PBA_38K;
  492. break;
  493. case e1000_82573:
  494. pba = E1000_PBA_12K;
  495. break;
  496. default:
  497. pba = E1000_PBA_48K;
  498. break;
  499. }
  500. if((adapter->hw.mac_type != e1000_82573) &&
  501. (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) {
  502. pba -= 8; /* allocate more FIFO for Tx */
  503. /* send an XOFF when there is enough space in the
  504. * Rx FIFO to hold one extra full size Rx packet
  505. */
  506. fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE +
  507. ETHERNET_FCS_SIZE + 1;
  508. fc_low_water_mark = fc_high_water_mark + 8;
  509. }
  510. if(adapter->hw.mac_type == e1000_82547) {
  511. adapter->tx_fifo_head = 0;
  512. adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
  513. adapter->tx_fifo_size =
  514. (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
  515. atomic_set(&adapter->tx_fifo_stall, 0);
  516. }
  517. E1000_WRITE_REG(&adapter->hw, PBA, pba);
  518. /* flow control settings */
  519. adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
  520. fc_high_water_mark;
  521. adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
  522. fc_low_water_mark;
  523. adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
  524. adapter->hw.fc_send_xon = 1;
  525. adapter->hw.fc = adapter->hw.original_fc;
  526. /* Allow time for pending master requests to run */
  527. e1000_reset_hw(&adapter->hw);
  528. if(adapter->hw.mac_type >= e1000_82544)
  529. E1000_WRITE_REG(&adapter->hw, WUC, 0);
  530. if(e1000_init_hw(&adapter->hw))
  531. DPRINTK(PROBE, ERR, "Hardware Error\n");
  532. #ifdef NETIF_F_HW_VLAN_TX
  533. e1000_update_mng_vlan(adapter);
  534. #endif
  535. /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
  536. E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
  537. e1000_reset_adaptive(&adapter->hw);
  538. e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
  539. if(adapter->en_mng_pt) {
  540. manc = E1000_READ_REG(&adapter->hw, MANC);
  541. manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
  542. E1000_WRITE_REG(&adapter->hw, MANC, manc);
  543. }
  544. }
  545. /**
  546. * e1000_probe - Device Initialization Routine
  547. * @pdev: PCI device information struct
  548. * @ent: entry in e1000_pci_tbl
  549. *
  550. * Returns 0 on success, negative on failure
  551. *
  552. * e1000_probe initializes an adapter identified by a pci_dev structure.
  553. * The OS initialization, configuring of the adapter private structure,
  554. * and a hardware reset occur.
  555. **/
  556. #define SHOW_INTERFACE(d) printk("Interface mac_type=%d\n", d->hw.mac_type)
  557. static int __devinit
  558. e1000_probe(struct pci_dev *pdev,
  559. const struct pci_device_id *ent)
  560. {
  561. struct net_device *netdev;
  562. struct e1000_adapter *adapter;
  563. unsigned long mmio_start, mmio_len;
  564. uint32_t ctrl_ext;
  565. uint32_t swsm;
  566. static int cards_found = 0;
  567. int i, err, pci_using_dac;
  568. uint16_t eeprom_data;
  569. uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
  570. if((err = pci_enable_device(pdev)))
  571. return err;
  572. if(!(err = pci_set_dma_mask(pdev, PCI_DMA_64BIT))) {
  573. pci_using_dac = 1;
  574. } else {
  575. if((err = pci_set_dma_mask(pdev, PCI_DMA_32BIT))) {
  576. E1000_ERR("No usable DMA configuration, aborting\n");
  577. return err;
  578. }
  579. pci_using_dac = 0;
  580. }
  581. if((err = pci_request_regions(pdev, e1000_driver_name)))
  582. return err;
  583. pci_set_master(pdev);
  584. netdev = alloc_etherdev(sizeof(struct e1000_adapter));
  585. if(!netdev) {
  586. err = -ENOMEM;
  587. goto err_alloc_etherdev;
  588. }
  589. SET_MODULE_OWNER(netdev);
  590. SET_NETDEV_DEV(netdev, &pdev->dev);
  591. pci_set_drvdata(pdev, netdev);
  592. adapter = netdev_priv(netdev);
  593. adapter->netdev = netdev;
  594. adapter->pdev = pdev;
  595. adapter->hw.back = adapter;
  596. adapter->msg_enable = (1 << debug) - 1;
  597. mmio_start = pci_resource_start(pdev, BAR_0);
  598. mmio_len = pci_resource_len(pdev, BAR_0);
  599. SHOW_INTERFACE(adapter);
  600. adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
  601. if(!adapter->hw.hw_addr) {
  602. err = -EIO;
  603. goto err_ioremap;
  604. }
  605. for(i = BAR_1; i <= BAR_5; i++) {
  606. if(pci_resource_len(pdev, i) == 0)
  607. continue;
  608. if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
  609. adapter->hw.io_base = pci_resource_start(pdev, i);
  610. break;
  611. }
  612. }
  613. netdev->open = &e1000_open;
  614. netdev->stop = &e1000_close;
  615. netdev->hard_start_xmit = &e1000_xmit_frame;
  616. netdev->get_stats = &e1000_get_stats;
  617. netdev->set_multicast_list = &e1000_set_multi;
  618. netdev->set_mac_address = &e1000_set_mac;
  619. netdev->change_mtu = &e1000_change_mtu;
  620. netdev->do_ioctl = &e1000_ioctl;
  621. set_ethtool_ops(netdev);
  622. #ifdef HAVE_TX_TIMEOUT
  623. netdev->tx_timeout = &e1000_tx_timeout;
  624. netdev->watchdog_timeo = 5 * HZ;
  625. #endif
  626. #ifdef CONFIG_E1000_NAPI
  627. netdev->poll = &e1000_clean;
  628. netdev->weight = 64;
  629. #endif
  630. #ifdef NETIF_F_HW_VLAN_TX
  631. netdev->vlan_rx_register = e1000_vlan_rx_register;
  632. netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
  633. netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
  634. #endif
  635. /* Click - polling extensions */
  636. netdev->polling = 0;
  637. netdev->rx_poll = e1000_rx_poll;
  638. netdev->rx_refill = e1000_rx_refill;
  639. netdev->tx_queue = e1000_tx_pqueue;
  640. netdev->tx_eob = e1000_tx_eob;
  641. netdev->tx_start = e1000_tx_start;
  642. netdev->tx_clean = e1000_tx_clean;
  643. netdev->poll_off = e1000_poll_off;
  644. netdev->poll_on = e1000_poll_on;
  645. #ifdef CONFIG_NET_POLL_CONTROLLER
  646. netdev->poll_controller = e1000_netpoll;
  647. #endif
  648. strcpy(netdev->name, pci_name(pdev));
  649. netdev->mem_start = mmio_start;
  650. netdev->mem_end = mmio_start + mmio_len;
  651. netdev->base_addr = adapter->hw.io_base;
  652. adapter->bd_number = cards_found;
  653. /* setup the private structure */
  654. if((err = e1000_sw_init(adapter)))
  655. goto err_sw_init;
  656. if((err = e1000_check_phy_reset_block(&adapter->hw)))
  657. DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
  658. #ifdef MAX_SKB_FRAGS
  659. if(adapter->hw.mac_type >= e1000_82543) {
  660. #ifdef NETIF_F_HW_VLAN_TX
  661. netdev->features = NETIF_F_SG |
  662. NETIF_F_HW_CSUM |
  663. NETIF_F_HW_VLAN_TX |
  664. NETIF_F_HW_VLAN_RX |
  665. NETIF_F_HW_VLAN_FILTER;
  666. #else
  667. netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM;
  668. #endif
  669. }
  670. #ifdef NETIF_F_TSO
  671. if((adapter->hw.mac_type >= e1000_82544) &&
  672. (adapter->hw.mac_type != e1000_82547))
  673. netdev->features |= NETIF_F_TSO;
  674. #ifdef NETIF_F_TSO_IPV6
  675. if(adapter->hw.mac_type > e1000_82547_rev_2)
  676. netdev->features |= NETIF_F_TSO_IPV6;
  677. #endif
  678. #endif
  679. if(pci_using_dac)
  680. netdev->features |= NETIF_F_HIGHDMA;
  681. #endif
  682. #ifdef NETIF_F_LLTX
  683. netdev->features |= NETIF_F_LLTX;
  684. #endif
  685. adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
  686. /* before reading the EEPROM, reset the controller to
  687. * put the device in a known good starting state */
  688. e1000_reset_hw(&adapter->hw);
  689. /* make sure the EEPROM is good */
  690. if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
  691. DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
  692. err = -EIO;
  693. goto err_eeprom;
  694. }
  695. /* copy the MAC address out of the EEPROM */
  696. if(e1000_read_mac_addr(&adapter->hw))
  697. DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
  698. memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
  699. SHOW_INTERFACE(adapter);
  700. if(!is_valid_ether_addr(netdev->dev_addr)) {
  701. DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
  702. err = -EIO;
  703. goto err_eeprom;
  704. }
  705. e1000_read_part_num(&adapter->hw, &(adapter->part_num));
  706. e1000_get_bus_info(&adapter->hw);
  707. init_timer(&adapter->tx_fifo_stall_timer);
  708. adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
  709. adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
  710. init_timer(&adapter->watchdog_timer);
  711. adapter->watchdog_timer.function = &e1000_watchdog;
  712. adapter->watchdog_timer.data = (unsigned long) adapter;
  713. init_timer(&adapter->phy_info_timer);
  714. adapter->phy_info_timer.function = &e1000_update_phy_info;
  715. adapter->phy_info_timer.data = (unsigned long) adapter;
  716. INIT_WORK(&adapter->tx_timeout_task,
  717. (void (*)(void *))e1000_tx_timeout_task, netdev);
  718. /* we're going to reset, so assume we have no link for now */
  719. netif_carrier_off(netdev);
  720. netif_stop_queue(netdev);
  721. e1000_check_options(adapter);
  722. /* Initial Wake on LAN setting
  723. * If APM wake is enabled in the EEPROM,
  724. * enable the ACPI Magic Packet filter
  725. */
  726. switch(adapter->hw.mac_type) {
  727. case e1000_82542_rev2_0:
  728. case e1000_82542_rev2_1:
  729. case e1000_82543:
  730. break;
  731. case e1000_82544:
  732. e1000_read_eeprom(&adapter->hw,
  733. EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
  734. eeprom_apme_mask = E1000_EEPROM_82544_APM;
  735. break;
  736. case e1000_82546:
  737. case e1000_82546_rev_3:
  738. if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
  739. && (adapter->hw.media_type == e1000_media_type_copper)) {
  740. e1000_read_eeprom(&adapter->hw,
  741. EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
  742. break;
  743. }
  744. /* Fall Through */
  745. default:
  746. e1000_read_eeprom(&adapter->hw,
  747. EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
  748. break;
  749. }
  750. if(eeprom_data & eeprom_apme_mask)
  751. adapter->wol |= E1000_WUFC_MAG;
  752. /* reset the hardware with the new settings */
  753. e1000_reset(adapter);
  754. /* Let firmware know the driver has taken over */
  755. switch(adapter->hw.mac_type) {
  756. case e1000_82571:
  757. case e1000_82572:
  758. ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  759. E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  760. ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  761. break;
  762. case e1000_82573:
  763. swsm = E1000_READ_REG(&adapter->hw, SWSM);
  764. E1000_WRITE_REG(&adapter->hw, SWSM,
  765. swsm | E1000_SWSM_DRV_LOAD);
  766. break;
  767. default:
  768. break;
  769. }
  770. strcpy(netdev->name, "eth%d");
  771. if((err = register_netdev(netdev)))
  772. goto err_register;
  773. DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
  774. cards_found++;
  775. return 0;
  776. err_register:
  777. err_sw_init:
  778. err_eeprom:
  779. iounmap(adapter->hw.hw_addr);
  780. err_ioremap:
  781. free_netdev(netdev);
  782. err_alloc_etherdev:
  783. pci_release_regions(pdev);
  784. return err;
  785. }
  786. /**
  787. * e1000_remove - Device Removal Routine
  788. * @pdev: PCI device information struct
  789. *
  790. * e1000_remove is called by the PCI subsystem to alert the driver
  791. * that it should release a PCI device. The could be caused by a
  792. * Hot-Plug event, or because the driver is going to be removed from
  793. * memory.
  794. **/
  795. static void __devexit
  796. e1000_remove(struct pci_dev *pdev)
  797. {
  798. struct net_device *netdev = pci_get_drvdata(pdev);
  799. struct e1000_adapter *adapter = netdev_priv(netdev);
  800. uint32_t ctrl_ext;
  801. uint32_t manc, swsm;
  802. #ifdef CONFIG_E1000_NAPI
  803. int i;
  804. #endif
  805. if(adapter->hw.mac_type >= e1000_82540 &&
  806. adapter->hw.media_type == e1000_media_type_copper) {
  807. manc = E1000_READ_REG(&adapter->hw, MANC);
  808. if(manc & E1000_MANC_SMBUS_EN) {
  809. manc |= E1000_MANC_ARP_EN;
  810. E1000_WRITE_REG(&adapter->hw, MANC, manc);
  811. }
  812. }
  813. switch(adapter->hw.mac_type) {
  814. case e1000_82571:
  815. case e1000_82572:
  816. ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  817. E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  818. ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  819. break;
  820. case e1000_82573:
  821. swsm = E1000_READ_REG(&adapter->hw, SWSM);
  822. E1000_WRITE_REG(&adapter->hw, SWSM,
  823. swsm & ~E1000_SWSM_DRV_LOAD);
  824. break;
  825. default:
  826. break;
  827. }
  828. unregister_netdev(netdev);
  829. #ifdef CONFIG_E1000_NAPI
  830. for (i = 0; i < adapter->num_queues; i++)
  831. __dev_put(&adapter->polling_netdev[i]);
  832. #endif
  833. if(!e1000_check_phy_reset_block(&adapter->hw))
  834. e1000_phy_hw_reset(&adapter->hw);
  835. kfree(adapter->tx_ring);
  836. kfree(adapter->rx_ring);
  837. #ifdef CONFIG_E1000_NAPI
  838. kfree(adapter->polling_netdev);
  839. #endif
  840. iounmap(adapter->hw.hw_addr);
  841. pci_release_regions(pdev);
  842. #ifdef CONFIG_E1000_MQ
  843. free_percpu(adapter->cpu_netdev);
  844. free_percpu(adapter->cpu_tx_ring);
  845. #endif
  846. free_netdev(netdev);
  847. }
  848. /**
  849. * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
  850. * @adapter: board private structure to initialize
  851. *
  852. * e1000_sw_init initializes the Adapter private data structure.
  853. * Fields are initialized based on PCI device information and
  854. * OS network device settings (MTU size).
  855. **/
  856. static int __devinit
  857. e1000_sw_init(struct e1000_adapter *adapter)
  858. {
  859. struct e1000_hw *hw = &adapter->hw;
  860. struct net_device *netdev = adapter->netdev;
  861. struct pci_dev *pdev = adapter->pdev;
  862. #ifdef CONFIG_E1000_NAPI
  863. int i;
  864. #endif
  865. /* PCI config space info */
  866. hw->vendor_id = pdev->vendor;
  867. hw->device_id = pdev->device;
  868. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  869. hw->subsystem_id = pdev->subsystem_device;
  870. pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
  871. pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
  872. adapter->rx_buffer_len = E1000_RXBUFFER_2048;
  873. adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
  874. hw->max_frame_size = netdev->mtu +
  875. ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
  876. hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
  877. /* identify the MAC */
  878. if(hw->device_id == 0x10A0 || hw->device_id == 0x10A1) {
  879. DPRINTK(PROBE, INFO, "Setting MAC Type for Dewey Jones Beach Device\n");
  880. hw->mac_type = e1000_82571;
  881. }
  882. else if(e1000_set_mac_type(hw)) {
  883. DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
  884. return -EIO;
  885. }
  886. /* initialize eeprom parameters */
  887. if(e1000_init_eeprom_params(hw)) {
  888. E1000_ERR("EEPROM initialization failed\n");
  889. return -EIO;
  890. }
  891. switch(hw->mac_type) {
  892. default:
  893. break;
  894. case e1000_82541:
  895. case e1000_82547:
  896. case e1000_82541_rev_2:
  897. case e1000_82547_rev_2:
  898. hw->phy_init_script = 1;
  899. break;
  900. }
  901. e1000_set_media_type(hw);
  902. hw->wait_autoneg_complete = FALSE;
  903. hw->tbi_compatibility_en = TRUE;
  904. hw->adaptive_ifs = TRUE;
  905. /* Copper options */
  906. if(hw->media_type == e1000_media_type_copper) {
  907. hw->mdix = AUTO_ALL_MODES;
  908. hw->disable_polarity_correction = FALSE;
  909. hw->master_slave = E1000_MASTER_SLAVE;
  910. }
  911. #ifdef CONFIG_E1000_MQ
  912. /* Number of supported queues */
  913. switch (hw->mac_type) {
  914. case e1000_82571:
  915. case e1000_82572:
  916. adapter->num_queues = 2;
  917. break;
  918. default:
  919. adapter->num_queues = 1;
  920. break;
  921. }
  922. adapter->num_queues = min(adapter->num_queues, num_online_cpus());
  923. #else
  924. adapter->num_queues = 1;
  925. #endif
  926. if (e1000_alloc_queues(adapter)) {
  927. DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
  928. return -ENOMEM;
  929. }
  930. #ifdef CONFIG_E1000_NAPI
  931. for (i = 0; i < adapter->num_queues; i++) {
  932. adapter->polling_netdev[i].priv = adapter;
  933. adapter->polling_netdev[i].poll = &e1000_clean;
  934. adapter->polling_netdev[i].weight = 64;
  935. dev_hold(&adapter->polling_netdev[i]);
  936. set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
  937. }
  938. #endif
  939. #ifdef CONFIG_E1000_MQ
  940. e1000_setup_queue_mapping(adapter);
  941. #endif
  942. atomic_set(&adapter->irq_sem, 1);
  943. spin_lock_init(&adapter->stats_lock);
  944. return 0;
  945. }
  946. /**
  947. * e1000_alloc_queues - Allocate memory for all rings
  948. * @adapter: board private structure to initialize
  949. *
  950. * We allocate one ring per queue at run-time since we don't know the
  951. * number of queues at compile-time. The polling_netdev array is
  952. * intended for Multiqueue, but should work fine with a single queue.
  953. **/
  954. static int __devinit
  955. e1000_alloc_queues(struct e1000_adapter *adapter)
  956. {
  957. int size;
  958. size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
  959. adapter->tx_ring = kmalloc(size, GFP_KERNEL);
  960. if (!adapter->tx_ring)
  961. return -ENOMEM;
  962. memset(adapter->tx_ring, 0, size);
  963. size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
  964. adapter->rx_ring = kmalloc(size, GFP_KERNEL);
  965. if (!adapter->rx_ring) {
  966. kfree(adapter->tx_ring);
  967. return -ENOMEM;
  968. }
  969. memset(adapter->rx_ring, 0, size);
  970. #ifdef CONFIG_E1000_NAPI
  971. size = sizeof(struct net_device) * adapter->num_queues;
  972. adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
  973. if (!adapter->polling_netdev) {
  974. kfree(adapter->tx_ring);
  975. kfree(adapter->rx_ring);
  976. return -ENOMEM;
  977. }
  978. memset(adapter->polling_netdev, 0, size);
  979. #endif
  980. return E1000_SUCCESS;
  981. }
  982. #ifdef CONFIG_E1000_MQ
  983. static void __devinit
  984. e1000_setup_queue_mapping(struct e1000_adapter *adapter)
  985. {
  986. int i, cpu;
  987. adapter->rx_sched_call_data.func = e1000_rx_schedule;
  988. adapter->rx_sched_call_data.info = adapter->netdev;
  989. cpus_clear(adapter->rx_sched_call_data.cpumask);
  990. adapter->cpu_netdev = alloc_percpu(struct net_device *);
  991. adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
  992. lock_cpu_hotplug();
  993. i = 0;
  994. for_each_online_cpu(cpu) {
  995. *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues];
  996. /* This is incomplete because we'd like to assign separate
  997. * physical cpus to these netdev polling structures and
  998. * avoid saturating a subset of cpus.
  999. */
  1000. if (i < adapter->num_queues) {
  1001. *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
  1002. adapter->cpu_for_queue[i] = cpu;
  1003. } else
  1004. *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
  1005. i++;
  1006. }
  1007. unlock_cpu_hotplug();
  1008. }
  1009. #endif
  1010. /**
  1011. * e1000_open - Called when a network interface is made active
  1012. * @netdev: network interface device structure
  1013. *
  1014. * Returns 0 on success, negative value on failure
  1015. *
  1016. * The open entry point is called when a network interface is made
  1017. * active by the system (IFF_UP). At this point all resources needed
  1018. * for transmit and receive operations are allocated, the interrupt
  1019. * handler is registered with the OS, the watchdog timer is started,
  1020. * and the stack is notified that the interface is ready.
  1021. **/
  1022. static int
  1023. e1000_open(struct net_device *netdev)
  1024. {
  1025. struct e1000_adapter *adapter = netdev_priv(netdev);
  1026. int err;
  1027. /* allocate transmit descriptors */
  1028. if ((err = e1000_setup_all_tx_resources(adapter)))
  1029. goto err_setup_tx;
  1030. /* allocate receive descriptors */
  1031. if ((err = e1000_setup_all_rx_resources(adapter)))
  1032. goto err_setup_rx;
  1033. if ((err = e1000_up(adapter)))
  1034. goto err_up;
  1035. #ifdef NETIF_F_HW_VLAN_TX
  1036. adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  1037. if ((adapter->hw.mng_cookie.status &
  1038. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
  1039. e1000_update_mng_vlan(adapter);
  1040. }
  1041. #endif
  1042. return E1000_SUCCESS;
  1043. err_up:
  1044. e1000_free_all_rx_resources(adapter);
  1045. err_setup_rx:
  1046. e1000_free_all_tx_resources(adapter);
  1047. err_setup_tx:
  1048. e1000_reset(adapter);
  1049. return err;
  1050. }
  1051. /**
  1052. * e1000_close - Disables a network interface
  1053. * @netdev: network interface device structure
  1054. *
  1055. * Returns 0, this is not allowed to fail
  1056. *
  1057. * The close entry point is called when an interface is de-activated
  1058. * by the OS. The hardware is still under the drivers control, but
  1059. * needs to be disabled. A global MAC reset is issued to stop the
  1060. * hardware, and all transmit and receive resources are freed.
  1061. **/
  1062. static int
  1063. e1000_close(struct net_device *netdev)
  1064. {
  1065. struct e1000_adapter *adapter = netdev_priv(netdev);
  1066. e1000_down(adapter);
  1067. e1000_free_all_tx_resources(adapter);
  1068. e1000_free_all_rx_resources(adapter);
  1069. #ifdef NETIF_F_HW_VLAN_TX
  1070. if((adapter->hw.mng_cookie.status &
  1071. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
  1072. e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  1073. }
  1074. #endif
  1075. return 0;
  1076. }
  1077. /**
  1078. * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
  1079. * @adapter: address of board private structure
  1080. * @start: address of beginning of memory
  1081. * @len: length of memory
  1082. **/
  1083. static inline boolean_t
  1084. e1000_check_64k_bound(struct e1000_adapter *adapter,
  1085. void *start, unsigned long len)
  1086. {
  1087. unsigned long begin = (unsigned long) start;
  1088. unsigned long end = begin + len;
  1089. /* First rev 82545 and 82546 need to not allow any memory
  1090. * write location to cross 64k boundary due to errata 23 */
  1091. if(adapter->hw.mac_type == e1000_82545 ||
  1092. adapter->hw.mac_type == e1000_82546) {
  1093. return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
  1094. }
  1095. return TRUE;
  1096. }
  1097. /**
  1098. * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
  1099. * @adapter: board private structure
  1100. * @txdr: tx descriptor ring (for a specific queue) to setup
  1101. *
  1102. * Return 0 on success, negative on failure
  1103. **/
  1104. int
  1105. e1000_setup_tx_resources(struct e1000_adapter *adapter,
  1106. struct e1000_tx_ring *txdr)
  1107. {
  1108. struct pci_dev *pdev = adapter->pdev;
  1109. int size;
  1110. size = sizeof(struct e1000_buffer) * txdr->count;
  1111. txdr->buffer_info = vmalloc(size);
  1112. if (!txdr->buffer_info) {
  1113. DPRINTK(PROBE, ERR,
  1114. "Unable to allocate memory for the transmit descriptor ring\n");
  1115. return -ENOMEM;
  1116. }
  1117. memset(txdr->buffer_info, 0, size);
  1118. memset(&txdr->previous_buffer_info, 0, sizeof(struct e1000_buffer));
  1119. /* round up to nearest 4K */
  1120. txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
  1121. E1000_ROUNDUP(txdr->size, 4096);
  1122. txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  1123. if (!txdr->desc) {
  1124. setup_tx_desc_die:
  1125. vfree(txdr->buffer_info);
  1126. DPRINTK(PROBE, ERR,
  1127. "Unable to allocate memory for the transmit descriptor ring\n");
  1128. return -ENOMEM;
  1129. }
  1130. /* Fix for errata 23, can't cross 64kB boundary */
  1131. if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
  1132. void *olddesc = txdr->desc;
  1133. dma_addr_t olddma = txdr->dma;
  1134. DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
  1135. "at %p\n", txdr->size, txdr->desc);
  1136. /* Try again, without freeing the previous */
  1137. txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  1138. /* Failed allocation, critical failure */
  1139. if (!txdr->desc) {
  1140. pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  1141. goto setup_tx_desc_die;
  1142. }
  1143. if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
  1144. /* give up */
  1145. pci_free_consistent(pdev, txdr->size, txdr->desc,
  1146. txdr->dma);
  1147. pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  1148. DPRINTK(PROBE, ERR,
  1149. "Unable to allocate aligned memory "
  1150. "for the transmit descriptor ring\n");
  1151. vfree(txdr->buffer_info);
  1152. return -ENOMEM;
  1153. } else {
  1154. /* Free old allocation, new allocation was successful */
  1155. pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  1156. }
  1157. }
  1158. memset(txdr->desc, 0, txdr->size);
  1159. txdr->next_to_use = 0;
  1160. txdr->next_to_clean = 0;
  1161. spin_lock_init(&txdr->tx_lock);
  1162. return 0;
  1163. }
  1164. /**
  1165. * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
  1166. * (Descriptors) for all queues
  1167. * @adapter: board private structure
  1168. *
  1169. * If this function returns with an error, then it's possible one or
  1170. * more of the rings is populated (while the rest are not). It is the
  1171. * callers duty to clean those orphaned rings.
  1172. *
  1173. * Return 0 on success, negative on failure
  1174. **/
  1175. int
  1176. e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
  1177. {
  1178. int i, err = 0;
  1179. for (i = 0; i < adapter->num_queues; i++) {
  1180. err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
  1181. if (err) {
  1182. DPRINTK(PROBE, ERR,
  1183. "Allocation for Tx Queue %u failed\n", i);
  1184. break;
  1185. }
  1186. }
  1187. return err;
  1188. }
  1189. /**
  1190. * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
  1191. * @adapter: board private structure
  1192. *
  1193. * Configure the Tx unit of the MAC after a reset.
  1194. **/
  1195. static void
  1196. e1000_configure_tx(struct e1000_adapter *adapter)
  1197. {
  1198. uint64_t tdba;
  1199. struct e1000_hw *hw = &adapter->hw;
  1200. uint32_t tdlen, tctl, tipg, tarc;
  1201. /* Setup the HW Tx Head and Tail descriptor pointers */
  1202. switch (adapter->num_queues) {
  1203. case 2:
  1204. tdba = adapter->tx_ring[1].dma;
  1205. tdlen = adapter->tx_ring[1].count *
  1206. sizeof(struct e1000_tx_desc);
  1207. E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
  1208. E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32));
  1209. E1000_WRITE_REG(hw, TDLEN1, tdlen);
  1210. E1000_WRITE_REG(hw, TDH1, 0);
  1211. E1000_WRITE_REG(hw, TDT1, 0);
  1212. adapter->tx_ring[1].tdh = E1000_TDH1;
  1213. adapter->tx_ring[1].tdt = E1000_TDT1;
  1214. /* Fall Through */
  1215. case 1:
  1216. default:
  1217. tdba = adapter->tx_ring[0].dma;
  1218. tdlen = adapter->tx_ring[0].count *
  1219. sizeof(struct e1000_tx_desc);
  1220. E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
  1221. E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
  1222. E1000_WRITE_REG(hw, TDLEN, tdlen);
  1223. E1000_WRITE_REG(hw, TDH, 0);
  1224. E1000_WRITE_REG(hw, TDT, 0);
  1225. adapter->tx_ring[0].tdh = E1000_TDH;
  1226. adapter->tx_ring[0].tdt = E1000_TDT;
  1227. break;
  1228. }
  1229. /* Set the default values for the Tx Inter Packet Gap timer */
  1230. switch (hw->mac_type) {
  1231. case e1000_82542_rev2_0:
  1232. case e1000_82542_rev2_1:
  1233. tipg = DEFAULT_82542_TIPG_IPGT;
  1234. tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
  1235. tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
  1236. break;
  1237. default:
  1238. if (hw->media_type == e1000_media_type_fiber ||
  1239. hw->media_type == e1000_media_type_internal_serdes)
  1240. tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
  1241. else
  1242. tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
  1243. tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
  1244. tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
  1245. }
  1246. E1000_WRITE_REG(hw, TIPG, tipg);
  1247. /* Set the Tx Interrupt Delay register */
  1248. E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
  1249. if (hw->mac_type >= e1000_82540)
  1250. E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
  1251. /* Program the Transmit Control Register */
  1252. tctl = E1000_READ_REG(hw, TCTL);
  1253. tctl &= ~E1000_TCTL_CT;
  1254. tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC |
  1255. (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
  1256. E1000_WRITE_REG(hw, TCTL, tctl);
  1257. if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
  1258. tarc = E1000_READ_REG(hw, TARC0);
  1259. /* disabled bit 21 to fix network problems when forced to 100/10 Mbps */
  1260. tarc |= (1 << 25);
  1261. E1000_WRITE_REG(hw, TARC0, tarc);
  1262. tarc = E1000_READ_REG(hw, TARC1);
  1263. tarc |= (1 << 25);
  1264. if (tctl & E1000_TCTL_MULR)
  1265. tarc &= ~(1 << 28);
  1266. else
  1267. tarc |= (1 << 28);
  1268. E1000_WRITE_REG(hw, TARC1, tarc);
  1269. }
  1270. e1000_config_collision_dist(hw);
  1271. /* Setup Transmit Descriptor Settings for eop descriptor */
  1272. adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
  1273. E1000_TXD_CMD_IFCS;
  1274. if (hw->mac_type < e1000_82543)
  1275. adapter->txd_cmd |= E1000_TXD_CMD_RPS;
  1276. else
  1277. adapter->txd_cmd |= E1000_TXD_CMD_RS;
  1278. /* Cache if we're 82544 running in PCI-X because we'll
  1279. * need this to apply a workaround later in the send path. */
  1280. if (hw->mac_type == e1000_82544 &&
  1281. hw->bus_type == e1000_bus_type_pcix)
  1282. adapter->pcix_82544 = 1;
  1283. }
  1284. /**
  1285. * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
  1286. * @adapter: board private structure
  1287. * @rxdr: rx descriptor ring (for a specific queue) to setup
  1288. *
  1289. * Returns 0 on success, negative on failure
  1290. **/
  1291. int
  1292. e1000_setup_rx_resources(struct e1000_adapter *adapter,
  1293. struct e1000_rx_ring *rxdr)
  1294. {
  1295. struct pci_dev *pdev = adapter->pdev;
  1296. int size, desc_len;
  1297. size = sizeof(struct e1000_buffer) * rxdr->count;
  1298. rxdr->buffer_info = vmalloc(size);
  1299. if (!rxdr->buffer_info) {
  1300. DPRINTK(PROBE, ERR,
  1301. "Unable to allocate memory for the receive descriptor ring\n");
  1302. return -ENOMEM;
  1303. }
  1304. memset(rxdr->buffer_info, 0, size);
  1305. size = sizeof(struct e1000_ps_page) * rxdr->count;
  1306. rxdr->ps_page = kmalloc(size, GFP_KERNEL);
  1307. if (!rxdr->ps_page) {
  1308. vfree(rxdr->buffer_info);
  1309. DPRINTK(PROBE, ERR,
  1310. "Unable to allocate memory for the receive descriptor ring\n");
  1311. return -ENOMEM;
  1312. }
  1313. memset(rxdr->ps_page, 0, size);
  1314. size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
  1315. rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
  1316. if (!rxdr->ps_page_dma) {
  1317. vfree(rxdr->buffer_info);
  1318. kfree(rxdr->ps_page);
  1319. DPRINTK(PROBE, ERR,
  1320. "Unable to allocate memory for the receive descriptor ring\n");
  1321. return -ENOMEM;
  1322. }
  1323. memset(rxdr->ps_page_dma, 0, size);
  1324. if (adapter->hw.mac_type <= e1000_82547_rev_2)
  1325. desc_len = sizeof(struct e1000_rx_desc);
  1326. else
  1327. desc_len = sizeof(union e1000_rx_desc_packet_split);
  1328. /* Round up to nearest 4K */
  1329. rxdr->size = rxdr->count * desc_len;
  1330. E1000_ROUNDUP(rxdr->size, 4096);
  1331. rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  1332. if (!rxdr->desc) {
  1333. DPRINTK(PROBE, ERR,
  1334. "Unable to allocate memory for the receive descriptor ring\n");
  1335. setup_rx_desc_die:
  1336. vfree(rxdr->buffer_info);
  1337. kfree(rxdr->ps_page);
  1338. kfree(rxdr->ps_page_dma);
  1339. return -ENOMEM;
  1340. }
  1341. /* Fix for errata 23, can't cross 64kB boundary */
  1342. if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
  1343. void *olddesc = rxdr->desc;
  1344. dma_addr_t olddma = rxdr->dma;
  1345. DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
  1346. "at %p\n", rxdr->size, rxdr->desc);
  1347. /* Try again, without freeing the previous */
  1348. rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  1349. /* Failed allocation, critical failure */
  1350. if (!rxdr->desc) {
  1351. pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  1352. DPRINTK(PROBE, ERR,
  1353. "Unable to allocate memory "
  1354. "for the receive descriptor ring\n");
  1355. goto setup_rx_desc_die;
  1356. }
  1357. if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
  1358. /* give up */
  1359. pci_free_consistent(pdev, rxdr->size, rxdr->desc,
  1360. rxdr->dma);
  1361. pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  1362. DPRINTK(PROBE, ERR,
  1363. "Unable to allocate aligned memory "
  1364. "for the receive descriptor ring\n");
  1365. goto setup_rx_desc_die;
  1366. } else {
  1367. /* Free old allocation, new allocation was successful */
  1368. pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  1369. }
  1370. }
  1371. memset(rxdr->desc, 0, rxdr->size);
  1372. rxdr->next_to_clean = 0;
  1373. rxdr->next_to_use = 0;
  1374. return 0;
  1375. }
  1376. /**
  1377. * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
  1378. * (Descriptors) for all queues
  1379. * @adapter: board private structure
  1380. *
  1381. * If this function returns with an error, then it's possible one or
  1382. * more of the rings is populated (while the rest are not). It is the
  1383. * callers duty to clean those orphaned rings.
  1384. *
  1385. * Return 0 on success, negative on failure
  1386. **/
  1387. int
  1388. e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
  1389. {
  1390. int i, err = 0;
  1391. for (i = 0; i < adapter->num_queues; i++) {
  1392. err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
  1393. if (err) {
  1394. DPRINTK(PROBE, ERR,
  1395. "Allocation for Rx Queue %u failed\n", i);
  1396. break;
  1397. }
  1398. }
  1399. return err;
  1400. }
  1401. /**
  1402. * e1000_setup_rctl - configure the receive control registers
  1403. * @adapter: Board private structure
  1404. **/
  1405. #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
  1406. (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
  1407. static void
  1408. e1000_setup_rctl(struct e1000_adapter *adapter)
  1409. {
  1410. uint32_t rctl, rfctl;
  1411. uint32_t psrctl = 0;
  1412. #ifdef CONFIG_E1000_PACKET_SPLIT
  1413. uint32_t pages = 0;
  1414. #endif
  1415. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  1416. rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
  1417. rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
  1418. E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
  1419. (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
  1420. if(adapter->hw.tbi_compatibility_on == 1)
  1421. rctl |= E1000_RCTL_SBP;
  1422. else
  1423. rctl &= ~E1000_RCTL_SBP;
  1424. if(adapter->netdev->mtu <= ETH_DATA_LEN)
  1425. rctl &= ~E1000_RCTL_LPE;
  1426. else
  1427. rctl |= E1000_RCTL_LPE;
  1428. /* Setup buffer sizes */
  1429. if(adapter->hw.mac_type >= e1000_82571) {
  1430. /* We can now specify buffers in 1K increments.
  1431. * BSIZE and BSEX are ignored in this case. */
  1432. rctl |= adapter->rx_buffer_len << 0x11;
  1433. } else {
  1434. rctl &= ~E1000_RCTL_SZ_4096;
  1435. rctl |= E1000_RCTL_BSEX;
  1436. switch (adapter->rx_buffer_len) {
  1437. case E1000_RXBUFFER_2048:
  1438. default:
  1439. rctl |= E1000_RCTL_SZ_2048;
  1440. rctl &= ~E1000_RCTL_BSEX;
  1441. break;
  1442. case E1000_RXBUFFER_4096:
  1443. rctl |= E1000_RCTL_SZ_4096;
  1444. break;
  1445. case E1000_RXBUFFER_8192:
  1446. rctl |= E1000_RCTL_SZ_8192;
  1447. break;
  1448. case E1000_RXBUFFER_16384:
  1449. rctl |= E1000_RCTL_SZ_16384;
  1450. break;
  1451. }
  1452. }
  1453. #ifdef CONFIG_E1000_PACKET_SPLIT
  1454. /* 82571 and greater support packet-split where the protocol
  1455. * header is placed in skb->data and the packet data is
  1456. * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
  1457. * In the case of a non-split, skb->data is linearly filled,
  1458. * followed by the page buffers. Therefore, skb->data is
  1459. * sized to hold the largest protocol header.
  1460. */
  1461. pages = PAGE_USE_COUNT(adapter->netdev->mtu);
  1462. if ((adapter->hw.mac_type > e1000_82547_rev_2) && (pages <= 3) &&
  1463. PAGE_SIZE <= 16384)
  1464. adapter->rx_ps_pages = pages;
  1465. else
  1466. adapter->rx_ps_pages = 0;
  1467. #endif
  1468. if (adapter->rx_ps_pages) {
  1469. /* Configure extra packet-split registers */
  1470. rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
  1471. rfctl |= E1000_RFCTL_EXTEN;
  1472. /* disable IPv6 packet split support */
  1473. rfctl |= E1000_RFCTL_IPV6_DIS;
  1474. E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
  1475. rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
  1476. psrctl |= adapter->rx_ps_bsize0 >>
  1477. E1000_PSRCTL_BSIZE0_SHIFT;
  1478. switch (adapter->rx_ps_pages) {
  1479. case 3:
  1480. psrctl |= PAGE_SIZE <<
  1481. E1000_PSRCTL_BSIZE3_SHIFT;
  1482. case 2:
  1483. psrctl |= PAGE_SIZE <<
  1484. E1000_PSRCTL_BSIZE2_SHIFT;
  1485. case 1:
  1486. psrctl |= PAGE_SIZE >>
  1487. E1000_PSRCTL_BSIZE1_SHIFT;
  1488. break;
  1489. }
  1490. E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
  1491. }
  1492. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  1493. }
  1494. /**
  1495. * e1000_configure_rx - Configure 8254x Receive Unit after Reset
  1496. * @adapter: board private structure
  1497. *
  1498. * Configure the Rx unit of the MAC after a reset.
  1499. **/
  1500. static void
  1501. e1000_configure_rx(struct e1000_adapter *adapter)
  1502. {
  1503. uint64_t rdba;
  1504. struct e1000_hw *hw = &adapter->hw;
  1505. uint32_t rdlen, rctl, rxcsum, ctrl_ext;
  1506. #ifdef CONFIG_E1000_MQ
  1507. uint32_t reta, mrqc;
  1508. int i;
  1509. #endif
  1510. if (adapter->rx_ps_pages) {
  1511. rdlen = adapter->rx_ring[0].count *
  1512. sizeof(union e1000_rx_desc_packet_split);
  1513. adapter->clean_rx = e1000_clean_rx_irq_ps;
  1514. adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
  1515. } else {
  1516. rdlen = adapter->rx_ring[0].count *
  1517. sizeof(struct e1000_rx_desc);
  1518. adapter->clean_rx = e1000_clean_rx_irq;
  1519. adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
  1520. }
  1521. /* disable receives while setting up the descriptors */
  1522. rctl = E1000_READ_REG(hw, RCTL);
  1523. E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
  1524. /* set the Receive Delay Timer Register */
  1525. E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
  1526. if (hw->mac_type >= e1000_82540) {
  1527. E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
  1528. if(adapter->itr > 1)
  1529. E1000_WRITE_REG(hw, ITR,
  1530. 1000000000 / (adapter->itr * 256));
  1531. }
  1532. if (hw->mac_type >= e1000_82571) {
  1533. /* Reset delay timers after every interrupt */
  1534. ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
  1535. ctrl_ext |= E1000_CTRL_EXT_CANC;
  1536. E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
  1537. E1000_WRITE_FLUSH(hw);
  1538. }
  1539. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  1540. * the Base and Length of the Rx Descriptor Ring */
  1541. switch (adapter->num_queues) {
  1542. #ifdef CONFIG_E1000_MQ
  1543. case 2:
  1544. rdba = adapter->rx_ring[1].dma;
  1545. E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL));
  1546. E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32));
  1547. E1000_WRITE_REG(hw, RDLEN1, rdlen);
  1548. E1000_WRITE_REG(hw, RDH1, 0);
  1549. E1000_WRITE_REG(hw, RDT1, 0);
  1550. adapter->rx_ring[1].rdh = E1000_RDH1;
  1551. adapter->rx_ring[1].rdt = E1000_RDT1;
  1552. /* Fall Through */
  1553. #endif
  1554. case 1:
  1555. default:
  1556. rdba = adapter->rx_ring[0].dma;
  1557. E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
  1558. E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
  1559. E1000_WRITE_REG(hw, RDLEN, rdlen);
  1560. E1000_WRITE_REG(hw, RDH, 0);
  1561. E1000_WRITE_REG(hw, RDT, 0);
  1562. adapter->rx_ring[0].rdh = E1000_RDH;
  1563. adapter->rx_ring[0].rdt = E1000_RDT;
  1564. break;
  1565. }
  1566. #ifdef CONFIG_E1000_MQ
  1567. if (adapter->num_queues > 1) {
  1568. uint32_t random[10];
  1569. get_random_bytes(&random[0], 40);
  1570. if (hw->mac_type <= e1000_82572) {
  1571. E1000_WRITE_REG(hw, RSSIR, 0);
  1572. E1000_WRITE_REG(hw, RSSIM, 0);
  1573. }
  1574. switch (adapter->num_queues) {
  1575. case 2:
  1576. default:
  1577. reta = 0x00800080;
  1578. mrqc = E1000_MRQC_ENABLE_RSS_2Q;
  1579. break;
  1580. }
  1581. /* Fill out redirection table */
  1582. for (i = 0; i < 32; i++)
  1583. E1000_WRITE_REG_ARRAY(hw, RETA, i, reta);
  1584. /* Fill out hash function seeds */
  1585. for (i = 0; i < 10; i++)
  1586. E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]);
  1587. mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
  1588. E1000_MRQC_RSS_FIELD_IPV4_TCP);
  1589. E1000_WRITE_REG(hw, MRQC, mrqc);
  1590. }
  1591. /* Multiqueue and packet checksumming are mutually exclusive. */
  1592. if (hw->mac_type >= e1000_82571) {
  1593. rxcsum = E1000_READ_REG(hw, RXCSUM);
  1594. rxcsum |= E1000_RXCSUM_PCSD;
  1595. E1000_WRITE_REG(hw, RXCSUM, rxcsum);
  1596. }
  1597. #else
  1598. /* Enable 82543 Receive Checksum Offload for TCP and UDP */
  1599. if (hw->mac_type >= e1000_82543) {
  1600. rxcsum = E1000_READ_REG(hw, RXCSUM);
  1601. if(adapter->rx_csum == TRUE) {
  1602. rxcsum |= E1000_RXCSUM_TUOFL;
  1603. /* Enable 82571 IPv4 payload checksum for UDP fragments
  1604. * Must be used in conjunction with packet-split. */
  1605. if ((hw->mac_type >= e1000_82571) &&
  1606. (adapter->rx_ps_pages)) {
  1607. rxcsum |= E1000_RXCSUM_IPPCSE;
  1608. }
  1609. } else {
  1610. rxcsum &= ~E1000_RXCSUM_TUOFL;
  1611. /* don't need to clear IPPCSE as it defaults to 0 */
  1612. }
  1613. E1000_WRITE_REG(hw, RXCSUM, rxcsum);
  1614. }
  1615. #endif /* CONFIG_E1000_MQ */
  1616. if (hw->mac_type == e1000_82573)
  1617. E1000_WRITE_REG(hw, ERT, 0x0100);
  1618. /* Enable Receives */
  1619. E1000_WRITE_REG(hw, RCTL, rctl);
  1620. }
  1621. /**
  1622. * e1000_free_tx_resources - Free Tx Resources per Queue
  1623. * @adapter: board private structure
  1624. * @tx_ring: Tx descriptor ring for a specific queue
  1625. *
  1626. * Free all transmit software resources
  1627. **/
  1628. void
  1629. e1000_free_tx_resources(struct e1000_adapter *adapter,
  1630. struct e1000_tx_ring *tx_ring)
  1631. {
  1632. struct pci_dev *pdev = adapter->pdev;
  1633. e1000_clean_tx_ring(adapter, tx_ring);
  1634. vfree(tx_ring->buffer_info);
  1635. tx_ring->buffer_info = NULL;
  1636. pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
  1637. tx_ring->desc = NULL;
  1638. }
  1639. /**
  1640. * e1000_free_all_tx_resources - Free Tx Resources for All Queues
  1641. * @adapter: board private structure
  1642. *
  1643. * Free all transmit software resources
  1644. **/
  1645. void
  1646. e1000_free_all_tx_resources(struct e1000_adapter *adapter)
  1647. {
  1648. int i;
  1649. for (i = 0; i < adapter->num_queues; i++)
  1650. e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
  1651. }
  1652. static inline void
  1653. e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
  1654. struct e1000_buffer *buffer_info)
  1655. {
  1656. if(buffer_info->dma) {
  1657. pci_unmap_page(adapter->pdev,
  1658. buffer_info->dma,
  1659. buffer_info->length,
  1660. PCI_DMA_TODEVICE);
  1661. buffer_info->dma = 0;
  1662. }
  1663. if(buffer_info->skb) {
  1664. dev_kfree_skb_any(buffer_info->skb);
  1665. buffer_info->skb = NULL;
  1666. }
  1667. }
  1668. /**
  1669. * e1000_clean_tx_ring - Free Tx Buffers
  1670. * @adapter: board private structure
  1671. * @tx_ring: ring to be cleaned
  1672. **/
  1673. static void
  1674. e1000_clean_tx_ring(struct e1000_adapter *adapter,
  1675. struct e1000_tx_ring *tx_ring)
  1676. {
  1677. struct e1000_buffer *buffer_info;
  1678. unsigned long size;
  1679. unsigned int i;
  1680. /* Free all the Tx ring sk_buffs */
  1681. if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
  1682. e1000_unmap_and_free_tx_resource(adapter,
  1683. &tx_ring->previous_buffer_info);
  1684. }
  1685. for (i = 0; i < tx_ring->count; i++) {
  1686. buffer_info = &tx_ring->buffer_info[i];
  1687. e1000_unmap_and_free_tx_resource(adapter, buffer_info);
  1688. }
  1689. size = sizeof(struct e1000_buffer) * tx_ring->count;
  1690. memset(tx_ring->buffer_info, 0, size);
  1691. /* Zero out the descriptor ring */
  1692. memset(tx_ring->desc, 0, tx_ring->size);
  1693. tx_ring->next_to_use = 0;
  1694. tx_ring->next_to_clean = 0;
  1695. writel(0, adapter->hw.hw_addr + tx_ring->tdh);
  1696. writel(0, adapter->hw.hw_addr + tx_ring->tdt);
  1697. }
  1698. /**
  1699. * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
  1700. * @adapter: board private structure
  1701. **/
  1702. static void
  1703. e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
  1704. {
  1705. int i;
  1706. for (i = 0; i < adapter->num_queues; i++)
  1707. e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
  1708. }
  1709. /**
  1710. * e1000_free_rx_resources - Free Rx Resources
  1711. * @adapter: board private structure
  1712. * @rx_ring: ring to clean the resources from
  1713. *
  1714. * Free all receive software resources
  1715. **/
  1716. void
  1717. e1000_free_rx_resources(struct e1000_adapter *adapter,
  1718. struct e1000_rx_ring *rx_ring)
  1719. {
  1720. struct pci_dev *pdev = adapter->pdev;
  1721. e1000_clean_rx_ring(adapter, rx_ring);
  1722. vfree(rx_ring->buffer_info);
  1723. rx_ring->buffer_info = NULL;
  1724. kfree(rx_ring->ps_page);
  1725. rx_ring->ps_page = NULL;
  1726. kfree(rx_ring->ps_page_dma);
  1727. rx_ring->ps_page_dma = NULL;
  1728. pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
  1729. rx_ring->desc = NULL;
  1730. }
  1731. /**
  1732. * e1000_free_all_rx_resources - Free Rx Resources for All Queues
  1733. * @adapter: board private structure
  1734. *
  1735. * Free all receive software resources
  1736. **/
  1737. void
  1738. e1000_free_all_rx_resources(struct e1000_adapter *adapter)
  1739. {
  1740. int i;
  1741. for (i = 0; i < adapter->num_queues; i++)
  1742. e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
  1743. }
  1744. /**
  1745. * e1000_clean_rx_ring - Free Rx Buffers per Queue
  1746. * @adapter: board private structure
  1747. * @rx_ring: ring to free buffers from
  1748. **/
  1749. static void
  1750. e1000_clean_rx_ring(struct e1000_adapter *adapter,
  1751. struct e1000_rx_ring *rx_ring)
  1752. {
  1753. struct e1000_buffer *buffer_info;
  1754. struct e1000_ps_page *ps_page;
  1755. struct e1000_ps_page_dma *ps_page_dma;
  1756. struct pci_dev *pdev = adapter->pdev;
  1757. unsigned long size;
  1758. unsigned int i, j;
  1759. /* Free all the Rx ring sk_buffs */
  1760. for(i = 0; i < rx_ring->count; i++) {
  1761. buffer_info = &rx_ring->buffer_info[i];
  1762. if(buffer_info->skb) {
  1763. ps_page = &rx_ring->ps_page[i];
  1764. ps_page_dma = &rx_ring->ps_page_dma[i];
  1765. pci_unmap_single(pdev,
  1766. buffer_info->dma,
  1767. buffer_info->length,
  1768. PCI_DMA_FROMDEVICE);
  1769. dev_kfree_skb(buffer_info->skb);
  1770. buffer_info->skb = NULL;
  1771. for(j = 0; j < adapter->rx_ps_pages; j++) {
  1772. if(!ps_page->ps_page[j]) break;
  1773. pci_unmap_single(pdev,
  1774. ps_page_dma->ps_page_dma[j],
  1775. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  1776. ps_page_dma->ps_page_dma[j] = 0;
  1777. put_page(ps_page->ps_page[j]);
  1778. ps_page->ps_page[j] = NULL;
  1779. }
  1780. }
  1781. }
  1782. size = sizeof(struct e1000_buffer) * rx_ring->count;
  1783. memset(rx_ring->buffer_info, 0, size);
  1784. size = sizeof(struct e1000_ps_page) * rx_ring->count;
  1785. memset(rx_ring->ps_page, 0, size);
  1786. size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
  1787. memset(rx_ring->ps_page_dma, 0, size);
  1788. /* Zero out the descriptor ring */
  1789. memset(rx_ring->desc, 0, rx_ring->size);
  1790. rx_ring->next_to_clean = 0;
  1791. rx_ring->next_to_use = 0;
  1792. writel(0, adapter->hw.hw_addr + rx_ring->rdh);
  1793. writel(0, adapter->hw.hw_addr + rx_ring->rdt);
  1794. }
  1795. /**
  1796. * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
  1797. * @adapter: board private structure
  1798. **/
  1799. static void
  1800. e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
  1801. {
  1802. int i;
  1803. for (i = 0; i < adapter->num_queues; i++)
  1804. e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
  1805. }
  1806. /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
  1807. * and memory write and invalidate disabled for certain operations
  1808. */
  1809. static void
  1810. e1000_enter_82542_rst(struct e1000_adapter *adapter)
  1811. {
  1812. struct net_device *netdev = adapter->netdev;
  1813. uint32_t rctl;
  1814. e1000_pci_clear_mwi(&adapter->hw);
  1815. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  1816. rctl |= E1000_RCTL_RST;
  1817. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  1818. E1000_WRITE_FLUSH(&adapter->hw);
  1819. mdelay(5);
  1820. if(netif_running(netdev))
  1821. e1000_clean_all_rx_rings(adapter);
  1822. }
  1823. static void
  1824. e1000_leave_82542_rst(struct e1000_adapter *adapter)
  1825. {
  1826. struct net_device *netdev = adapter->netdev;
  1827. uint32_t rctl;
  1828. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  1829. rctl &= ~E1000_RCTL_RST;
  1830. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  1831. E1000_WRITE_FLUSH(&adapter->hw);
  1832. mdelay(5);
  1833. if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
  1834. e1000_pci_set_mwi(&adapter->hw);
  1835. if(netif_running(netdev)) {
  1836. e1000_configure_rx(adapter);
  1837. e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]);
  1838. }
  1839. }
  1840. /**
  1841. * e1000_set_mac - Change the Ethernet Address of the NIC
  1842. * @netdev: network interface device structure
  1843. * @p: pointer to an address structure
  1844. *
  1845. * Returns 0 on success, negative on failure
  1846. **/
  1847. static int
  1848. e1000_set_mac(struct net_device *netdev, void *p)
  1849. {
  1850. struct e1000_adapter *adapter = netdev_priv(netdev);
  1851. struct sockaddr *addr = p;
  1852. if(!is_valid_ether_addr(addr->sa_data))
  1853. return -EADDRNOTAVAIL;
  1854. /* 82542 2.0 needs to be in reset to write receive address registers */
  1855. if(adapter->hw.mac_type == e1000_82542_rev2_0)
  1856. e1000_enter_82542_rst(adapter);
  1857. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  1858. memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
  1859. e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
  1860. /* With 82571 controllers, LAA may be overwritten (with the default)
  1861. * due to controller reset from the other port. */
  1862. if (adapter->hw.mac_type == e1000_82571) {
  1863. /* activate the work around */
  1864. adapter->hw.laa_is_present = 1;
  1865. /* Hold a copy of the LAA in RAR[14] This is done so that
  1866. * between the time RAR[0] gets clobbered and the time it
  1867. * gets fixed (in e1000_watchdog), the actual LAA is in one
  1868. * of the RARs and no incoming packets directed to this port
  1869. * are dropped. Eventaully the LAA will be in RAR[0] and
  1870. * RAR[14] */
  1871. e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
  1872. E1000_RAR_ENTRIES - 1);
  1873. }
  1874. if(adapter->hw.mac_type == e1000_82542_rev2_0)
  1875. e1000_leave_82542_rst(adapter);
  1876. return 0;
  1877. }
  1878. /**
  1879. * e1000_set_multi - Multicast and Promiscuous mode set
  1880. * @netdev: network interface device structure
  1881. *
  1882. * The set_multi entry point is called whenever the multicast address
  1883. * list or the network interface flags are updated. This routine is
  1884. * responsible for configuring the hardware for proper multicast,
  1885. * promiscuous mode, and all-multi behavior.
  1886. **/
  1887. static void
  1888. e1000_set_multi(struct net_device *netdev)
  1889. {
  1890. struct e1000_adapter *adapter = netdev_priv(netdev);
  1891. struct e1000_hw *hw = &adapter->hw;
  1892. struct dev_mc_list *mc_ptr;
  1893. uint32_t rctl;
  1894. uint32_t hash_value;
  1895. int i, rar_entries = E1000_RAR_ENTRIES;
  1896. /* reserve RAR[14] for LAA over-write work-around */
  1897. if (adapter->hw.mac_type == e1000_82571)
  1898. rar_entries--;
  1899. /* Check for Promiscuous and All Multicast modes */
  1900. rctl = E1000_READ_REG(hw, RCTL);
  1901. if (netdev->flags & IFF_PROMISC) {
  1902. rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
  1903. } else if (netdev->flags & IFF_ALLMULTI) {
  1904. rctl |= E1000_RCTL_MPE;
  1905. rctl &= ~E1000_RCTL_UPE;
  1906. } else {
  1907. rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
  1908. }
  1909. E1000_WRITE_REG(hw, RCTL, rctl);
  1910. /* 82542 2.0 needs to be in reset to write receive address registers */
  1911. if (hw->mac_type == e1000_82542_rev2_0)
  1912. e1000_enter_82542_rst(adapter);
  1913. /* load the first 14 multicast address into the exact filters 1-14
  1914. * RAR 0 is used for the station MAC adddress
  1915. * if there are not 14 addresses, go ahead and clear the filters
  1916. * -- with 82571 controllers only 0-13 entries are filled here
  1917. */
  1918. mc_ptr = netdev->mc_list;
  1919. for (i = 1; i < rar_entries; i++) {
  1920. if (mc_ptr) {
  1921. e1000_rar_set(hw, mc_ptr->dmi_addr, i);
  1922. mc_ptr = mc_ptr->next;
  1923. } else {
  1924. E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
  1925. E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
  1926. }
  1927. }
  1928. /* clear the old settings from the multicast hash table */
  1929. for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
  1930. E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
  1931. /* load any remaining addresses into the hash table */
  1932. for (; mc_ptr; mc_ptr = mc_ptr->next) {
  1933. hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
  1934. e1000_mta_set(hw, hash_value);
  1935. }
  1936. if (hw->mac_type == e1000_82542_rev2_0)
  1937. e1000_leave_82542_rst(adapter);
  1938. }
  1939. /* Need to wait a few seconds after link up to get diagnostic information from
  1940. * the phy */
  1941. static void
  1942. e1000_update_phy_info(unsigned long data)
  1943. {
  1944. struct e1000_adapter *adapter = (struct e1000_adapter *) data;
  1945. e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
  1946. }
  1947. /**
  1948. * e1000_82547_tx_fifo_stall - Timer Call-back
  1949. * @data: pointer to adapter cast into an unsigned long
  1950. **/
  1951. static void
  1952. e1000_82547_tx_fifo_stall(unsigned long data)
  1953. {
  1954. struct e1000_adapter *adapter = (struct e1000_adapter *) data;
  1955. struct net_device *netdev = adapter->netdev;
  1956. uint32_t tctl;
  1957. if(atomic_read(&adapter->tx_fifo_stall)) {
  1958. if((E1000_READ_REG(&adapter->hw, TDT) ==
  1959. E1000_READ_REG(&adapter->hw, TDH)) &&
  1960. (E1000_READ_REG(&adapter->hw, TDFT) ==
  1961. E1000_READ_REG(&adapter->hw, TDFH)) &&
  1962. (E1000_READ_REG(&adapter->hw, TDFTS) ==
  1963. E1000_READ_REG(&adapter->hw, TDFHS))) {
  1964. tctl = E1000_READ_REG(&adapter->hw, TCTL);
  1965. E1000_WRITE_REG(&adapter->hw, TCTL,
  1966. tctl & ~E1000_TCTL_EN);
  1967. E1000_WRITE_REG(&adapter->hw, TDFT,
  1968. adapter->tx_head_addr);
  1969. E1000_WRITE_REG(&adapter->hw, TDFH,
  1970. adapter->tx_head_addr);
  1971. E1000_WRITE_REG(&adapter->hw, TDFTS,
  1972. adapter->tx_head_addr);
  1973. E1000_WRITE_REG(&adapter->hw, TDFHS,
  1974. adapter->tx_head_addr);
  1975. E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
  1976. E1000_WRITE_FLUSH(&adapter->hw);
  1977. adapter->tx_fifo_head = 0;
  1978. atomic_set(&adapter->tx_fifo_stall, 0);
  1979. netif_wake_queue(netdev);
  1980. } else {
  1981. mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
  1982. }
  1983. }
  1984. }
  1985. /**
  1986. * e1000_watchdog - Timer Call-back
  1987. * @data: pointer to adapter cast into an unsigned long
  1988. **/
  1989. static void
  1990. e1000_watchdog(unsigned long data)
  1991. {
  1992. struct e1000_adapter *adapter = (struct e1000_adapter *) data;
  1993. if(adapter->netdev->polling){
  1994. adapter->do_poll_watchdog = 1;
  1995. } else {
  1996. e1000_watchdog_1(adapter);
  1997. }
  1998. mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
  1999. }
  2000. static void
  2001. e1000_watchdog_1(struct e1000_adapter *adapter)
  2002. {
  2003. struct net_device *netdev = adapter->netdev;
  2004. struct e1000_tx_ring *txdr = &adapter->tx_ring[0];
  2005. uint32_t link;
  2006. e1000_check_for_link(&adapter->hw);
  2007. if (adapter->hw.mac_type == e1000_82573) {
  2008. e1000_enable_tx_pkt_filtering(&adapter->hw);
  2009. #ifdef NETIF_F_HW_VLAN_TX
  2010. if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
  2011. e1000_update_mng_vlan(adapter);
  2012. #endif
  2013. }
  2014. if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
  2015. !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
  2016. link = !adapter->hw.serdes_link_down;
  2017. else
  2018. link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
  2019. if (link) {
  2020. if (!netif_carrier_ok(netdev)) {
  2021. e1000_get_speed_and_duplex(&adapter->hw,
  2022. &adapter->link_speed,
  2023. &adapter->link_duplex);
  2024. DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
  2025. adapter->link_speed,
  2026. adapter->link_duplex == FULL_DUPLEX ?
  2027. "Full Duplex" : "Half Duplex");
  2028. netif_carrier_on(netdev);
  2029. netif_wake_queue(netdev);
  2030. mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
  2031. adapter->smartspeed = 0;
  2032. }
  2033. } else {
  2034. if (netif_carrier_ok(netdev)) {
  2035. adapter->link_speed = 0;
  2036. adapter->link_duplex = 0;
  2037. DPRINTK(LINK, INFO, "NIC Link is Down\n");
  2038. netif_carrier_off(netdev);
  2039. netif_stop_queue(netdev);
  2040. mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
  2041. }
  2042. e1000_smartspeed(adapter);
  2043. }
  2044. e1000_update_stats(adapter);
  2045. adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
  2046. adapter->tpt_old = adapter->stats.tpt;
  2047. adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
  2048. adapter->colc_old = adapter->stats.colc;
  2049. adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
  2050. adapter->gorcl_old = adapter->stats.gorcl;
  2051. adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
  2052. adapter->gotcl_old = adapter->stats.gotcl;
  2053. e1000_update_adaptive(&adapter->hw);
  2054. if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) {
  2055. if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
  2056. /* We've lost link, so the controller stops DMA,
  2057. * but we've got queued Tx work that's never going
  2058. * to get done, so reset controller to flush Tx.
  2059. * (Do the reset outside of interrupt context). */
  2060. schedule_work(&adapter->tx_timeout_task);
  2061. }
  2062. }
  2063. /* Dynamic mode for Interrupt Throttle Rate (ITR) */
  2064. if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
  2065. /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
  2066. * asymmetrical Tx or Rx gets ITR=8000; everyone
  2067. * else is between 2000-8000. */
  2068. uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
  2069. uint32_t dif = (adapter->gotcl > adapter->gorcl ?
  2070. adapter->gotcl - adapter->gorcl :
  2071. adapter->gorcl - adapter->gotcl) / 10000;
  2072. uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
  2073. E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
  2074. }
  2075. /* Cause software interrupt to ensure rx ring is cleaned */
  2076. E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
  2077. /* Force detection of hung controller every watchdog period */
  2078. adapter->detect_tx_hung = TRUE;
  2079. /* With 82571 controllers, LAA may be overwritten due to controller
  2080. * reset from the other port. Set the appropriate LAA in RAR[0] */
  2081. if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
  2082. e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
  2083. }
  2084. #define E1000_TX_FLAGS_CSUM 0x00000001
  2085. #define E1000_TX_FLAGS_VLAN 0x00000002
  2086. #define E1000_TX_FLAGS_TSO 0x00000004
  2087. #define E1000_TX_FLAGS_IPV4 0x00000008
  2088. #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
  2089. #define E1000_TX_FLAGS_VLAN_SHIFT 16
  2090. static inline int
  2091. e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
  2092. struct sk_buff *skb)
  2093. {
  2094. #ifdef NETIF_F_TSO
  2095. struct e1000_context_desc *context_desc;
  2096. unsigned int i;
  2097. uint32_t cmd_length = 0;
  2098. uint16_t ipcse = 0, tucse, mss;
  2099. uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
  2100. int err;
  2101. if (skb_shinfo(skb)->tso_size) {
  2102. if (skb_header_cloned(skb)) {
  2103. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2104. if (err)
  2105. return err;
  2106. }
  2107. hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  2108. mss = skb_shinfo(skb)->tso_size;
  2109. if (skb->protocol == ntohs(ETH_P_IP)) {
  2110. skb->nh.iph->tot_len = 0;
  2111. skb->nh.iph->check = 0;
  2112. skb->h.th->check =
  2113. ~csum_tcpudp_magic(skb->nh.iph->saddr,
  2114. skb->nh.iph->daddr,
  2115. 0,
  2116. IPPROTO_TCP,
  2117. 0);
  2118. cmd_length = E1000_TXD_CMD_IP;
  2119. ipcse = skb->h.raw - skb->data - 1;
  2120. #ifdef NETIF_F_TSO_IPV6
  2121. } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
  2122. skb->nh.ipv6h->payload_len = 0;
  2123. skb->h.th->check =
  2124. ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
  2125. &skb->nh.ipv6h->daddr,
  2126. 0,
  2127. IPPROTO_TCP,
  2128. 0);
  2129. ipcse = 0;
  2130. #endif
  2131. }
  2132. ipcss = skb->nh.raw - skb->data;
  2133. ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
  2134. tucss = skb->h.raw - skb->data;
  2135. tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
  2136. tucse = 0;
  2137. cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
  2138. E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
  2139. i = tx_ring->next_to_use;
  2140. context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
  2141. context_desc->lower_setup.ip_fields.ipcss = ipcss;
  2142. context_desc->lower_setup.ip_fields.ipcso = ipcso;
  2143. context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
  2144. context_desc->upper_setup.tcp_fields.tucss = tucss;
  2145. context_desc->upper_setup.tcp_fields.tucso = tucso;
  2146. context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
  2147. context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
  2148. context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
  2149. context_desc->cmd_and_length = cpu_to_le32(cmd_length);
  2150. if (++i == tx_ring->count) i = 0;
  2151. tx_ring->next_to_use = i;
  2152. return TRUE;
  2153. }
  2154. #endif
  2155. return FALSE;
  2156. }
  2157. static inline boolean_t
  2158. e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
  2159. struct sk_buff *skb)
  2160. {
  2161. struct e1000_context_desc *context_desc;
  2162. unsigned int i;
  2163. uint8_t css;
  2164. if (likely(skb->ip_summed == CHECKSUM_HW)) {
  2165. css = skb->h.raw - skb->data;
  2166. i = tx_ring->next_to_use;
  2167. context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
  2168. context_desc->upper_setup.tcp_fields.tucss = css;
  2169. context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
  2170. context_desc->upper_setup.tcp_fields.tucse = 0;
  2171. context_desc->tcp_seg_setup.data = 0;
  2172. context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
  2173. if (unlikely(++i == tx_ring->count)) i = 0;
  2174. tx_ring->next_to_use = i;
  2175. return TRUE;
  2176. }
  2177. return FALSE;
  2178. }
  2179. #define E1000_MAX_TXD_PWR 12
  2180. #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
  2181. static inline int
  2182. e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
  2183. struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
  2184. unsigned int nr_frags, unsigned int mss)
  2185. {
  2186. struct e1000_buffer *buffer_info;
  2187. unsigned int len = skb->len;
  2188. unsigned int offset = 0, size, count = 0, i;
  2189. #ifdef MAX_SKB_FRAGS
  2190. unsigned int f;
  2191. len -= skb->data_len;
  2192. #endif
  2193. i = tx_ring->next_to_use;
  2194. while(len) {
  2195. buffer_info = &tx_ring->buffer_info[i];
  2196. size = min(len, max_per_txd);
  2197. #ifdef NETIF_F_TSO
  2198. /* Workaround for premature desc write-backs
  2199. * in TSO mode. Append 4-byte sentinel desc */
  2200. if(unlikely(mss && !nr_frags && size == len && size > 8))
  2201. size -= 4;
  2202. #endif
  2203. /* work-around for errata 10 and it applies
  2204. * to all controllers in PCI-X mode
  2205. * The fix is to make sure that the first descriptor of a
  2206. * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
  2207. */
  2208. if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
  2209. (size > 2015) && count == 0))
  2210. size = 2015;
  2211. /* Workaround for potential 82544 hang in PCI-X. Avoid
  2212. * terminating buffers within evenly-aligned dwords. */
  2213. if(unlikely(adapter->pcix_82544 &&
  2214. !((unsigned long)(skb->data + offset + size - 1) & 4) &&
  2215. size > 4))
  2216. size -= 4;
  2217. buffer_info->length = size;
  2218. buffer_info->dma =
  2219. pci_map_single(adapter->pdev,
  2220. skb->data + offset,
  2221. size,
  2222. PCI_DMA_TODEVICE);
  2223. buffer_info->time_stamp = jiffies;
  2224. len -= size;
  2225. offset += size;
  2226. count++;
  2227. if(unlikely(++i == tx_ring->count)) i = 0;
  2228. }
  2229. #ifdef MAX_SKB_FRAGS
  2230. for(f = 0; f < nr_frags; f++) {
  2231. struct skb_frag_struct *frag;
  2232. frag = &skb_shinfo(skb)->frags[f];
  2233. len = frag->size;
  2234. offset = frag->page_offset;
  2235. while(len) {
  2236. buffer_info = &tx_ring->buffer_info[i];
  2237. size = min(len, max_per_txd);
  2238. #ifdef NETIF_F_TSO
  2239. /* Workaround for premature desc write-backs
  2240. * in TSO mode. Append 4-byte sentinel desc */
  2241. if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
  2242. size -= 4;
  2243. #endif
  2244. /* Workaround for potential 82544 hang in PCI-X.
  2245. * Avoid terminating buffers within evenly-aligned
  2246. * dwords. */
  2247. if(unlikely(adapter->pcix_82544 &&
  2248. !((unsigned long)(frag->page+offset+size-1) & 4) &&
  2249. size > 4))
  2250. size -= 4;
  2251. buffer_info->length = size;
  2252. buffer_info->dma =
  2253. pci_map_page(adapter->pdev,
  2254. frag->page,
  2255. offset,
  2256. size,
  2257. PCI_DMA_TODEVICE);
  2258. buffer_info->time_stamp = jiffies;
  2259. len -= size;
  2260. offset += size;
  2261. count++;
  2262. if(unlikely(++i == tx_ring->count)) i = 0;
  2263. }
  2264. }
  2265. #endif
  2266. i = (i == 0) ? tx_ring->count - 1 : i - 1;
  2267. tx_ring->buffer_info[i].skb = skb;
  2268. tx_ring->buffer_info[first].next_to_watch = i;
  2269. return count;
  2270. }
  2271. static inline void
  2272. e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
  2273. int tx_flags, int count)
  2274. {
  2275. struct e1000_tx_desc *tx_desc = NULL;
  2276. struct e1000_buffer *buffer_info;
  2277. uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
  2278. unsigned int i;
  2279. if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
  2280. txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
  2281. E1000_TXD_CMD_TSE;
  2282. txd_upper |= E1000_TXD_POPTS_TXSM << 8;
  2283. if(likely(tx_flags & E1000_TX_FLAGS_IPV4))
  2284. txd_upper |= E1000_TXD_POPTS_IXSM << 8;
  2285. }
  2286. if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
  2287. txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
  2288. txd_upper |= E1000_TXD_POPTS_TXSM << 8;
  2289. }
  2290. if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
  2291. txd_lower |= E1000_TXD_CMD_VLE;
  2292. txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
  2293. }
  2294. i = tx_ring->next_to_use;
  2295. while(count--) {
  2296. buffer_info = &tx_ring->buffer_info[i];
  2297. tx_desc = E1000_TX_DESC(*tx_ring, i);
  2298. tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  2299. tx_desc->lower.data =
  2300. cpu_to_le32(txd_lower | buffer_info->length);
  2301. tx_desc->upper.data = cpu_to_le32(txd_upper);
  2302. if(unlikely(++i == tx_ring->count)) i = 0;
  2303. }
  2304. tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
  2305. /* Force memory writes to complete before letting h/w
  2306. * know there are new descriptors to fetch. (Only
  2307. * applicable for weak-ordered memory model archs,
  2308. * such as IA-64). */
  2309. wmb();
  2310. tx_ring->next_to_use = i;
  2311. writel(i, adapter->hw.hw_addr + tx_ring->tdt);
  2312. }
  2313. /**
  2314. * 82547 workaround to avoid controller hang in half-duplex environment.
  2315. * The workaround is to avoid queuing a large packet that would span
  2316. * the internal Tx FIFO ring boundary by notifying the stack to resend
  2317. * the packet at a later time. This gives the Tx FIFO an opportunity to
  2318. * flush all packets. When that occurs, we reset the Tx FIFO pointers
  2319. * to the beginning of the Tx FIFO.
  2320. **/
  2321. #define E1000_FIFO_HDR 0x10
  2322. #define E1000_82547_PAD_LEN 0x3E0
  2323. static inline int
  2324. e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
  2325. {
  2326. uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
  2327. uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
  2328. E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
  2329. if(adapter->link_duplex != HALF_DUPLEX)
  2330. goto no_fifo_stall_required;
  2331. if(atomic_read(&adapter->tx_fifo_stall))
  2332. return 1;
  2333. if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
  2334. atomic_set(&adapter->tx_fifo_stall, 1);
  2335. return 1;
  2336. }
  2337. no_fifo_stall_required:
  2338. adapter->tx_fifo_head += skb_fifo_len;
  2339. if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
  2340. adapter->tx_fifo_head -= adapter->tx_fifo_size;
  2341. return 0;
  2342. }
  2343. #define MINIMUM_DHCP_PACKET_SIZE 282
  2344. static inline int
  2345. e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
  2346. {
  2347. struct e1000_hw *hw = &adapter->hw;
  2348. uint16_t length, offset;
  2349. #ifdef NETIF_F_HW_VLAN_TX
  2350. if(vlan_tx_tag_present(skb)) {
  2351. if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
  2352. ( adapter->hw.mng_cookie.status &
  2353. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
  2354. return 0;
  2355. }
  2356. #endif
  2357. if(htons(ETH_P_IP) == skb->protocol) {
  2358. const struct iphdr *ip = skb->nh.iph;
  2359. if(IPPROTO_UDP == ip->protocol) {
  2360. struct udphdr *udp = (struct udphdr *)(skb->h.uh);
  2361. if(ntohs(udp->dest) == 67) {
  2362. offset = (uint8_t *)udp + 8 - skb->data;
  2363. length = skb->len - offset;
  2364. return e1000_mng_write_dhcp_info(hw,
  2365. (uint8_t *)udp + 8, length);
  2366. }
  2367. }
  2368. } else if((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
  2369. struct ethhdr *eth = (struct ethhdr *) skb->data;
  2370. if((htons(ETH_P_IP) == eth->h_proto)) {
  2371. const struct iphdr *ip =
  2372. (struct iphdr *)((uint8_t *)skb->data+14);
  2373. if(IPPROTO_UDP == ip->protocol) {
  2374. struct udphdr *udp =
  2375. (struct udphdr *)((uint8_t *)ip +
  2376. (ip->ihl << 2));
  2377. if(ntohs(udp->dest) == 67) {
  2378. offset = (uint8_t *)udp + 8 - skb->data;
  2379. length = skb->len - offset;
  2380. return e1000_mng_write_dhcp_info(hw,
  2381. (uint8_t *)udp + 8,
  2382. length);
  2383. }
  2384. }
  2385. }
  2386. }
  2387. return 0;
  2388. }
  2389. #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
  2390. static int
  2391. e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  2392. {
  2393. struct e1000_adapter *adapter = netdev_priv(netdev);
  2394. struct e1000_tx_ring *tx_ring;
  2395. unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
  2396. unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
  2397. unsigned int tx_flags = 0;
  2398. unsigned int len = skb->len;
  2399. unsigned long flags;
  2400. unsigned int nr_frags = 0;
  2401. unsigned int mss = 0;
  2402. int count = 0;
  2403. int tso;
  2404. #ifdef MAX_SKB_FRAGS
  2405. unsigned int f;
  2406. len -= skb->data_len;
  2407. #endif
  2408. #ifdef CONFIG_E1000_MQ
  2409. tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
  2410. #else
  2411. tx_ring = adapter->tx_ring;
  2412. #endif
  2413. if (unlikely(skb->len <= 0)) {
  2414. dev_kfree_skb_any(skb);
  2415. return NETDEV_TX_OK;
  2416. }
  2417. #ifdef NETIF_F_TSO
  2418. mss = skb_shinfo(skb)->tso_size;
  2419. /* The controller does a simple calculation to
  2420. * make sure there is enough room in the FIFO before
  2421. * initiating the DMA for each buffer. The calc is:
  2422. * 4 = ceil(buffer len/mss). To make sure we don't
  2423. * overrun the FIFO, adjust the max buffer len if mss
  2424. * drops. */
  2425. if(mss) {
  2426. max_per_txd = min(mss << 2, max_per_txd);
  2427. max_txd_pwr = fls(max_per_txd) - 1;
  2428. }
  2429. if((mss) || (skb->ip_summed == CHECKSUM_HW))
  2430. count++;
  2431. count++;
  2432. #else
  2433. if(skb->ip_summed == CHECKSUM_HW)
  2434. count++;
  2435. #endif
  2436. count += TXD_USE_COUNT(len, max_txd_pwr);
  2437. if(adapter->pcix_82544)
  2438. count++;
  2439. /* work-around for errata 10 and it applies to all controllers
  2440. * in PCI-X mode, so add one more descriptor to the count
  2441. */
  2442. if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
  2443. (len > 2015)))
  2444. count++;
  2445. #ifdef MAX_SKB_FRAGS
  2446. nr_frags = skb_shinfo(skb)->nr_frags;
  2447. for(f = 0; f < nr_frags; f++)
  2448. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
  2449. max_txd_pwr);
  2450. if(adapter->pcix_82544)
  2451. count += nr_frags;
  2452. #ifdef NETIF_F_TSO
  2453. /* TSO Workaround for 82571/2 Controllers -- if skb->data
  2454. * points to just header, pull a few bytes of payload from
  2455. * frags into skb->data */
  2456. if (skb_shinfo(skb)->tso_size) {
  2457. uint8_t hdr_len;
  2458. hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  2459. if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) &&
  2460. (adapter->hw.mac_type == e1000_82571 ||
  2461. adapter->hw.mac_type == e1000_82572)) {
  2462. unsigned int pull_size;
  2463. pull_size = min((unsigned int)4, skb->data_len);
  2464. if (!__pskb_pull_tail(skb, pull_size)) {
  2465. printk(KERN_ERR "__pskb_pull_tail failed.\n");
  2466. dev_kfree_skb_any(skb);
  2467. return -EFAULT;
  2468. }
  2469. }
  2470. }
  2471. #endif
  2472. #endif
  2473. if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
  2474. e1000_transfer_dhcp_info(adapter, skb);
  2475. #ifdef NETIF_F_LLTX
  2476. local_irq_save(flags);
  2477. if (!spin_trylock(&tx_ring->tx_lock)) {
  2478. /* Collision - tell upper layer to requeue */
  2479. local_irq_restore(flags);
  2480. return NETDEV_TX_LOCKED;
  2481. }
  2482. #else
  2483. spin_lock_irqsave(&tx_ring->tx_lock, flags);
  2484. #endif
  2485. /* need: count + 2 desc gap to keep tail from touching
  2486. * head, otherwise try next time */
  2487. if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) {
  2488. netif_stop_queue(netdev);
  2489. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2490. return NETDEV_TX_BUSY;
  2491. }
  2492. if(unlikely(adapter->hw.mac_type == e1000_82547)) {
  2493. if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
  2494. netif_stop_queue(netdev);
  2495. mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
  2496. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2497. return NETDEV_TX_BUSY;
  2498. }
  2499. }
  2500. #ifndef NETIF_F_LLTX
  2501. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2502. #endif
  2503. #ifdef NETIF_F_HW_VLAN_TX
  2504. if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
  2505. tx_flags |= E1000_TX_FLAGS_VLAN;
  2506. tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
  2507. }
  2508. #endif
  2509. first = tx_ring->next_to_use;
  2510. tso = e1000_tso(adapter, tx_ring, skb);
  2511. if (tso < 0) {
  2512. dev_kfree_skb_any(skb);
  2513. #ifdef NETIF_F_LLTX
  2514. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2515. #endif
  2516. return NETDEV_TX_OK;
  2517. }
  2518. if (likely(tso))
  2519. tx_flags |= E1000_TX_FLAGS_TSO;
  2520. else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
  2521. tx_flags |= E1000_TX_FLAGS_CSUM;
  2522. /* Old method was to assume IPv4 packet by default if TSO was enabled.
  2523. * 82571 hardware supports TSO capabilities for IPv6 as well...
  2524. * no longer assume, we must. */
  2525. if (likely(skb->protocol == ntohs(ETH_P_IP)))
  2526. tx_flags |= E1000_TX_FLAGS_IPV4;
  2527. e1000_tx_queue(adapter, tx_ring, tx_flags,
  2528. e1000_tx_map(adapter, tx_ring, skb, first,
  2529. max_per_txd, nr_frags, mss));
  2530. netdev->trans_start = jiffies;
  2531. #ifdef NETIF_F_LLTX
  2532. /* Make sure there is space in the ring for the next send. */
  2533. if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 2))
  2534. netif_stop_queue(netdev);
  2535. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2536. #endif
  2537. return NETDEV_TX_OK;
  2538. }
  2539. /**
  2540. * e1000_tx_timeout - Respond to a Tx Hang
  2541. * @netdev: network interface device structure
  2542. **/
  2543. static void
  2544. e1000_tx_timeout(struct net_device *netdev)
  2545. {
  2546. struct e1000_adapter *adapter = netdev_priv(netdev);
  2547. /* Do the reset outside of interrupt context */
  2548. schedule_work(&adapter->tx_timeout_task);
  2549. }
  2550. static void
  2551. e1000_tx_timeout_task(struct net_device *netdev)
  2552. {
  2553. struct e1000_adapter *adapter = netdev_priv(netdev);
  2554. e1000_down(adapter);
  2555. e1000_up(adapter);
  2556. }
  2557. /**
  2558. * e1000_get_stats - Get System Network Statistics
  2559. * @netdev: network interface device structure
  2560. *
  2561. * Returns the address of the device statistics structure.
  2562. * The statistics are actually updated from the timer callback.
  2563. **/
  2564. static struct net_device_stats *
  2565. e1000_get_stats(struct net_device *netdev)
  2566. {
  2567. struct e1000_adapter *adapter = netdev_priv(netdev);
  2568. e1000_update_stats(adapter);
  2569. return &adapter->net_stats;
  2570. }
  2571. /**
  2572. * e1000_change_mtu - Change the Maximum Transfer Unit
  2573. * @netdev: network interface device structure
  2574. * @new_mtu: new value for maximum frame size
  2575. *
  2576. * Returns 0 on success, negative on failure
  2577. **/
  2578. static int
  2579. e1000_change_mtu(struct net_device *netdev, int new_mtu)
  2580. {
  2581. struct e1000_adapter *adapter = netdev_priv(netdev);
  2582. int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
  2583. if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
  2584. (max_frame > MAX_JUMBO_FRAME_SIZE)) {
  2585. DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
  2586. return -EINVAL;
  2587. }
  2588. #define MAX_STD_JUMBO_FRAME_SIZE 9234
  2589. /* might want this to be bigger enum check... */
  2590. /* 82571 controllers limit jumbo frame size to 10500 bytes */
  2591. if ((adapter->hw.mac_type == e1000_82571 ||
  2592. adapter->hw.mac_type == e1000_82572) &&
  2593. max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
  2594. DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
  2595. "on 82571 and 82572 controllers.\n");
  2596. return -EINVAL;
  2597. }
  2598. if(adapter->hw.mac_type == e1000_82573 &&
  2599. max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
  2600. DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
  2601. "on 82573\n");
  2602. return -EINVAL;
  2603. }
  2604. if(adapter->hw.mac_type > e1000_82547_rev_2) {
  2605. adapter->rx_buffer_len = max_frame;
  2606. E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
  2607. } else {
  2608. if(unlikely((adapter->hw.mac_type < e1000_82543) &&
  2609. (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
  2610. DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
  2611. "on 82542\n");
  2612. return -EINVAL;
  2613. } else {
  2614. if(max_frame <= E1000_RXBUFFER_2048) {
  2615. adapter->rx_buffer_len = E1000_RXBUFFER_2048;
  2616. } else if(max_frame <= E1000_RXBUFFER_4096) {
  2617. adapter->rx_buffer_len = E1000_RXBUFFER_4096;
  2618. } else if(max_frame <= E1000_RXBUFFER_8192) {
  2619. adapter->rx_buffer_len = E1000_RXBUFFER_8192;
  2620. } else if(max_frame <= E1000_RXBUFFER_16384) {
  2621. adapter->rx_buffer_len = E1000_RXBUFFER_16384;
  2622. }
  2623. }
  2624. }
  2625. netdev->mtu = new_mtu;
  2626. if(netif_running(netdev)) {
  2627. e1000_down(adapter);
  2628. e1000_up(adapter);
  2629. }
  2630. adapter->hw.max_frame_size = max_frame;
  2631. return 0;
  2632. }
  2633. /**
  2634. * e1000_update_stats - Update the board statistics counters
  2635. * @adapter: board private structure
  2636. **/
  2637. void
  2638. e1000_update_stats(struct e1000_adapter *adapter)
  2639. {
  2640. struct e1000_hw *hw = &adapter->hw;
  2641. unsigned long flags;
  2642. uint16_t phy_tmp;
  2643. #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
  2644. spin_lock_irqsave(&adapter->stats_lock, flags);
  2645. /* these counters are modified from e1000_adjust_tbi_stats,
  2646. * called from the interrupt context, so they must only
  2647. * be written while holding adapter->stats_lock
  2648. */
  2649. adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
  2650. adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
  2651. adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
  2652. adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
  2653. adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
  2654. adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
  2655. adapter->stats.roc += E1000_READ_REG(hw, ROC);
  2656. adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
  2657. adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
  2658. adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
  2659. adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
  2660. adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
  2661. adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
  2662. adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
  2663. adapter->stats.mpc += E1000_READ_REG(hw, MPC);
  2664. adapter->stats.scc += E1000_READ_REG(hw, SCC);
  2665. adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
  2666. adapter->stats.mcc += E1000_READ_REG(hw, MCC);
  2667. adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
  2668. adapter->stats.dc += E1000_READ_REG(hw, DC);
  2669. adapter->stats.sec += E1000_READ_REG(hw, SEC);
  2670. adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
  2671. adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
  2672. adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
  2673. adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
  2674. adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
  2675. adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
  2676. adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
  2677. adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
  2678. adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
  2679. adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
  2680. adapter->stats.ruc += E1000_READ_REG(hw, RUC);
  2681. adapter->stats.rfc += E1000_READ_REG(hw, RFC);
  2682. adapter->stats.rjc += E1000_READ_REG(hw, RJC);
  2683. adapter->stats.torl += E1000_READ_REG(hw, TORL);
  2684. adapter->stats.torh += E1000_READ_REG(hw, TORH);
  2685. adapter->stats.totl += E1000_READ_REG(hw, TOTL);
  2686. adapter->stats.toth += E1000_READ_REG(hw, TOTH);
  2687. adapter->stats.tpr += E1000_READ_REG(hw, TPR);
  2688. adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
  2689. adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
  2690. adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
  2691. adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
  2692. adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
  2693. adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
  2694. adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
  2695. adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
  2696. /* used for adaptive IFS */
  2697. hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
  2698. adapter->stats.tpt += hw->tx_packet_delta;
  2699. hw->collision_delta = E1000_READ_REG(hw, COLC);
  2700. adapter->stats.colc += hw->collision_delta;
  2701. if(hw->mac_type >= e1000_82543) {
  2702. adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
  2703. adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
  2704. adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
  2705. adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
  2706. adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
  2707. adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
  2708. }
  2709. if(hw->mac_type > e1000_82547_rev_2) {
  2710. adapter->stats.iac += E1000_READ_REG(hw, IAC);
  2711. adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
  2712. adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
  2713. adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
  2714. adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
  2715. adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
  2716. adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
  2717. adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
  2718. adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
  2719. }
  2720. /* Fill out the OS statistics structure */
  2721. adapter->net_stats.rx_packets = adapter->stats.gprc;
  2722. adapter->net_stats.tx_packets = adapter->stats.gptc;
  2723. adapter->net_stats.rx_bytes = adapter->stats.gorcl;
  2724. adapter->net_stats.tx_bytes = adapter->stats.gotcl;
  2725. adapter->net_stats.multicast = adapter->stats.mprc;
  2726. adapter->net_stats.collisions = adapter->stats.colc;
  2727. /* Rx Errors */
  2728. adapter->net_stats.rx_errors = adapter->stats.rxerrc +
  2729. adapter->stats.crcerrs + adapter->stats.algnerrc +
  2730. adapter->stats.rlec + adapter->stats.mpc +
  2731. adapter->stats.cexterr;
  2732. adapter->net_stats.rx_dropped = adapter->stats.mpc;
  2733. adapter->net_stats.rx_length_errors = adapter->stats.rlec;
  2734. adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
  2735. adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
  2736. adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
  2737. adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
  2738. /* Tx Errors */
  2739. adapter->net_stats.tx_errors = adapter->stats.ecol +
  2740. adapter->stats.latecol;
  2741. adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
  2742. adapter->net_stats.tx_window_errors = adapter->stats.latecol;
  2743. adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
  2744. /* Tx Dropped needs to be maintained elsewhere */
  2745. /* Phy Stats */
  2746. if(hw->media_type == e1000_media_type_copper) {
  2747. if((adapter->link_speed == SPEED_1000) &&
  2748. (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
  2749. phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
  2750. adapter->phy_stats.idle_errors += phy_tmp;
  2751. }
  2752. if((hw->mac_type <= e1000_82546) &&
  2753. (hw->phy_type == e1000_phy_m88) &&
  2754. !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
  2755. adapter->phy_stats.receive_errors += phy_tmp;
  2756. }
  2757. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  2758. }
  2759. #ifdef CONFIG_E1000_MQ
  2760. void
  2761. e1000_rx_schedule(void *data)
  2762. {
  2763. struct net_device *poll_dev, *netdev = data;
  2764. struct e1000_adapter *adapter = netdev->priv;
  2765. int this_cpu = get_cpu();
  2766. poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu);
  2767. if (poll_dev == NULL) {
  2768. put_cpu();
  2769. return;
  2770. }
  2771. if (likely(netif_rx_schedule_prep(poll_dev)))
  2772. __netif_rx_schedule(poll_dev);
  2773. else
  2774. e1000_irq_enable(adapter);
  2775. put_cpu();
  2776. }
  2777. #endif
  2778. /**
  2779. * e1000_intr - Interrupt Handler
  2780. * @irq: interrupt number
  2781. * @data: pointer to a network interface device structure
  2782. * @pt_regs: CPU registers structure
  2783. **/
  2784. static irqreturn_t
  2785. e1000_intr(int irq, void *data, struct pt_regs *regs)
  2786. {
  2787. struct net_device *netdev = data;
  2788. struct e1000_adapter *adapter = netdev_priv(netdev);
  2789. struct e1000_hw *hw = &adapter->hw;
  2790. uint32_t icr = E1000_READ_REG(hw, ICR);
  2791. int i;
  2792. if(unlikely(!icr))
  2793. return IRQ_NONE; /* Not our interrupt */
  2794. if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
  2795. hw->get_link_status = 1;
  2796. mod_timer(&adapter->watchdog_timer, jiffies);
  2797. }
  2798. #ifdef CONFIG_E1000_NAPI
  2799. atomic_inc(&adapter->irq_sem);
  2800. E1000_WRITE_REG(hw, IMC, ~0);
  2801. E1000_WRITE_FLUSH(hw);
  2802. #ifdef CONFIG_E1000_MQ
  2803. if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
  2804. cpu_set(adapter->cpu_for_queue[0],
  2805. adapter->rx_sched_call_data.cpumask);
  2806. for (i = 1; i < adapter->num_queues; i++) {
  2807. cpu_set(adapter->cpu_for_queue[i],
  2808. adapter->rx_sched_call_data.cpumask);
  2809. atomic_inc(&adapter->irq_sem);
  2810. }
  2811. atomic_set(&adapter->rx_sched_call_data.count, i);
  2812. smp_call_async_mask(&adapter->rx_sched_call_data);
  2813. } else {
  2814. printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
  2815. }
  2816. #else
  2817. if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
  2818. __netif_rx_schedule(&adapter->polling_netdev[0]);
  2819. else
  2820. e1000_irq_enable(adapter);
  2821. #endif
  2822. #else
  2823. /* Writing IMC and IMS is needed for 82547.
  2824. * Due to Hub Link bus being occupied, an interrupt
  2825. * de-assertion message is not able to be sent.
  2826. * When an interrupt assertion message is generated later,
  2827. * two messages are re-ordered and sent out.
  2828. * That causes APIC to think 82547 is in de-assertion
  2829. * state, while 82547 is in assertion state, resulting
  2830. * in dead lock. Writing IMC forces 82547 into
  2831. * de-assertion state.
  2832. */
  2833. if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
  2834. atomic_inc(&adapter->irq_sem);
  2835. E1000_WRITE_REG(hw, IMC, ~0);
  2836. }
  2837. for (i = 0; i < E1000_MAX_INTR; i++)
  2838. if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
  2839. !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
  2840. break;
  2841. if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
  2842. e1000_irq_enable(adapter);
  2843. #endif
  2844. #ifdef E1000_COUNT_ICR
  2845. adapter->icr_txdw += icr & 0x01;
  2846. icr >>= 1;
  2847. adapter->icr_txqe += icr & 0x01;
  2848. icr >>= 1;
  2849. adapter->icr_lsc += icr & 0x01;
  2850. icr >>= 1;
  2851. adapter->icr_rxseq += icr & 0x01;
  2852. icr >>= 1;
  2853. adapter->icr_rxdmt += icr & 0x01;
  2854. icr >>= 2;
  2855. adapter->icr_rxo += icr & 0x01;
  2856. icr >>= 1;
  2857. adapter->icr_rxt += icr & 0x01;
  2858. icr >>= 2;
  2859. adapter->icr_mdac += icr & 0x01;
  2860. icr >>= 1;
  2861. adapter->icr_rxcfg += icr & 0x01;
  2862. icr >>= 1;
  2863. adapter->icr_gpi += icr & 0x01;
  2864. #endif
  2865. return IRQ_HANDLED;
  2866. }
  2867. #ifdef CONFIG_E1000_NAPI
  2868. /**
  2869. * e1000_clean - NAPI Rx polling callback
  2870. * @adapter: board private structure
  2871. **/
  2872. static int
  2873. e1000_clean(struct net_device *poll_dev, int *budget)
  2874. {
  2875. struct e1000_adapter *adapter;
  2876. int work_to_do = min(*budget, poll_dev->quota);
  2877. int tx_cleaned, i = 0, work_done = 0;
  2878. /* Must NOT use netdev_priv macro here. */
  2879. adapter = poll_dev->priv;
  2880. /* Keep link state information with original netdev */
  2881. if (!netif_carrier_ok(adapter->netdev))
  2882. goto quit_polling;
  2883. while (poll_dev != &adapter->polling_netdev[i]) {
  2884. i++;
  2885. if (unlikely(i == adapter->num_queues))
  2886. BUG();
  2887. }
  2888. tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
  2889. adapter->clean_rx(adapter, &adapter->rx_ring[i],
  2890. &work_done, work_to_do);
  2891. *budget -= work_done;
  2892. poll_dev->quota -= work_done;
  2893. /* If no Tx and not enough Rx work done, exit the polling mode */
  2894. if((!tx_cleaned && (work_done == 0)) ||
  2895. !netif_running(adapter->netdev)) {
  2896. quit_polling:
  2897. netif_rx_complete(poll_dev);
  2898. e1000_irq_enable(adapter);
  2899. return 0;
  2900. }
  2901. return 1;
  2902. }
  2903. #endif
  2904. /**
  2905. * e1000_clean_tx_irq - Reclaim resources after transmit completes
  2906. * @adapter: board private structure
  2907. **/
  2908. static boolean_t
  2909. e1000_clean_tx_irq(struct e1000_adapter *adapter,
  2910. struct e1000_tx_ring *tx_ring)
  2911. {
  2912. struct net_device *netdev = adapter->netdev;
  2913. struct e1000_tx_desc *tx_desc, *eop_desc;
  2914. struct e1000_buffer *buffer_info;
  2915. unsigned int i, eop;
  2916. boolean_t cleaned = FALSE;
  2917. i = tx_ring->next_to_clean;
  2918. eop = tx_ring->buffer_info[i].next_to_watch;
  2919. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  2920. while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
  2921. /* Premature writeback of Tx descriptors clear (free buffers
  2922. * and unmap pci_mapping) previous_buffer_info */
  2923. if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
  2924. e1000_unmap_and_free_tx_resource(adapter,
  2925. &tx_ring->previous_buffer_info);
  2926. }
  2927. for (cleaned = FALSE; !cleaned; ) {
  2928. tx_desc = E1000_TX_DESC(*tx_ring, i);
  2929. buffer_info = &tx_ring->buffer_info[i];
  2930. cleaned = (i == eop);
  2931. #ifdef NETIF_F_TSO
  2932. if (!(netdev->features & NETIF_F_TSO)) {
  2933. #endif
  2934. e1000_unmap_and_free_tx_resource(adapter,
  2935. buffer_info);
  2936. #ifdef NETIF_F_TSO
  2937. } else {
  2938. if (cleaned) {
  2939. memcpy(&tx_ring->previous_buffer_info,
  2940. buffer_info,
  2941. sizeof(struct e1000_buffer));
  2942. memset(buffer_info, 0,
  2943. sizeof(struct e1000_buffer));
  2944. } else {
  2945. e1000_unmap_and_free_tx_resource(
  2946. adapter, buffer_info);
  2947. }
  2948. }
  2949. #endif
  2950. tx_desc->buffer_addr = 0;
  2951. tx_desc->lower.data = 0;
  2952. tx_desc->upper.data = 0;
  2953. if (unlikely(++i == tx_ring->count)) i = 0;
  2954. }
  2955. tx_ring->pkt++;
  2956. eop = tx_ring->buffer_info[i].next_to_watch;
  2957. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  2958. }
  2959. tx_ring->next_to_clean = i;
  2960. spin_lock(&tx_ring->tx_lock);
  2961. if (unlikely(cleaned && netif_queue_stopped(netdev) &&
  2962. netif_carrier_ok(netdev)))
  2963. netif_wake_queue(netdev);
  2964. spin_unlock(&tx_ring->tx_lock);
  2965. if (adapter->detect_tx_hung) {
  2966. /* Detect a transmit hang in hardware, this serializes the
  2967. * check with the clearing of time_stamp and movement of i */
  2968. adapter->detect_tx_hung = FALSE;
  2969. if (tx_ring->buffer_info[i].dma &&
  2970. time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
  2971. && !(E1000_READ_REG(&adapter->hw, STATUS) &
  2972. E1000_STATUS_TXOFF)) {
  2973. /* detected Tx unit hang */
  2974. i = tx_ring->next_to_clean;
  2975. eop = tx_ring->buffer_info[i].next_to_watch;
  2976. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  2977. DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
  2978. " TDH <%x>\n"
  2979. " TDT <%x>\n"
  2980. " next_to_use <%x>\n"
  2981. " next_to_clean <%x>\n"
  2982. "buffer_info[next_to_clean]\n"
  2983. " dma <%zx>\n"
  2984. " time_stamp <%lx>\n"
  2985. " next_to_watch <%x>\n"
  2986. " jiffies <%lx>\n"
  2987. " next_to_watch.status <%x>\n",
  2988. readl(adapter->hw.hw_addr + tx_ring->tdh),
  2989. readl(adapter->hw.hw_addr + tx_ring->tdt),
  2990. tx_ring->next_to_use,
  2991. i,
  2992. (size_t)tx_ring->buffer_info[i].dma,
  2993. tx_ring->buffer_info[i].time_stamp,
  2994. eop,
  2995. jiffies,
  2996. eop_desc->upper.fields.status);
  2997. netif_stop_queue(netdev);
  2998. }
  2999. }
  3000. #ifdef NETIF_F_TSO
  3001. if (unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
  3002. time_after(jiffies, tx_ring->previous_buffer_info.time_stamp + HZ)))
  3003. e1000_unmap_and_free_tx_resource(
  3004. adapter, &tx_ring->previous_buffer_info);
  3005. #endif
  3006. return cleaned;
  3007. }
  3008. /**
  3009. * e1000_rx_checksum - Receive Checksum Offload for 82543
  3010. * @adapter: board private structure
  3011. * @status_err: receive descriptor status and error fields
  3012. * @csum: receive descriptor csum field
  3013. * @sk_buff: socket buffer with received data
  3014. **/
  3015. static inline void
  3016. e1000_rx_checksum(struct e1000_adapter *adapter,
  3017. uint32_t status_err, uint32_t csum,
  3018. struct sk_buff *skb)
  3019. {
  3020. uint16_t status = (uint16_t)status_err;
  3021. uint8_t errors = (uint8_t)(status_err >> 24);
  3022. skb->ip_summed = CHECKSUM_NONE;
  3023. /* 82543 or newer only */
  3024. if(unlikely(adapter->hw.mac_type < e1000_82543)) return;
  3025. /* Ignore Checksum bit is set */
  3026. if(unlikely(status & E1000_RXD_STAT_IXSM)) return;
  3027. /* TCP/UDP checksum error bit is set */
  3028. if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
  3029. /* let the stack verify checksum errors */
  3030. adapter->hw_csum_err++;
  3031. return;
  3032. }
  3033. /* TCP/UDP Checksum has not been calculated */
  3034. if(adapter->hw.mac_type <= e1000_82547_rev_2) {
  3035. if(!(status & E1000_RXD_STAT_TCPCS))
  3036. return;
  3037. } else {
  3038. if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
  3039. return;
  3040. }
  3041. /* It must be a TCP or UDP packet with a valid checksum */
  3042. if(likely(status & E1000_RXD_STAT_TCPCS)) {
  3043. /* TCP checksum is good */
  3044. skb->ip_summed = CHECKSUM_UNNECESSARY;
  3045. } else if(adapter->hw.mac_type > e1000_82547_rev_2) {
  3046. /* IP fragment with UDP payload */
  3047. /* Hardware complements the payload checksum, so we undo it
  3048. * and then put the value in host order for further stack use.
  3049. */
  3050. csum = ntohl(csum ^ 0xFFFF);
  3051. skb->csum = csum;
  3052. skb->ip_summed = CHECKSUM_HW;
  3053. }
  3054. adapter->hw_csum_good++;
  3055. }
  3056. /**
  3057. * e1000_clean_rx_irq - Send received data up the network stack; legacy
  3058. * @adapter: board private structure
  3059. **/
  3060. static boolean_t
  3061. #ifdef CONFIG_E1000_NAPI
  3062. e1000_clean_rx_irq(struct e1000_adapter *adapter,
  3063. struct e1000_rx_ring *rx_ring,
  3064. int *work_done, int work_to_do)
  3065. #else
  3066. e1000_clean_rx_irq(struct e1000_adapter *adapter,
  3067. struct e1000_rx_ring *rx_ring)
  3068. #endif
  3069. {
  3070. struct net_device *netdev = adapter->netdev;
  3071. struct pci_dev *pdev = adapter->pdev;
  3072. struct e1000_rx_desc *rx_desc;
  3073. struct e1000_buffer *buffer_info;
  3074. struct sk_buff *skb;
  3075. unsigned long flags;
  3076. uint32_t length;
  3077. uint8_t last_byte;
  3078. unsigned int i;
  3079. boolean_t cleaned = FALSE;
  3080. i = rx_ring->next_to_clean;
  3081. rx_desc = E1000_RX_DESC(*rx_ring, i);
  3082. while(rx_desc->status & E1000_RXD_STAT_DD) {
  3083. buffer_info = &rx_ring->buffer_info[i];
  3084. #ifdef CONFIG_E1000_NAPI
  3085. if(*work_done >= work_to_do)
  3086. break;
  3087. (*work_done)++;
  3088. #endif
  3089. cleaned = TRUE;
  3090. pci_unmap_single(pdev,
  3091. buffer_info->dma,
  3092. buffer_info->length,
  3093. PCI_DMA_FROMDEVICE);
  3094. skb = buffer_info->skb;
  3095. length = le16_to_cpu(rx_desc->length);
  3096. if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
  3097. /* All receives must fit into a single buffer */
  3098. E1000_DBG("%s: Receive packet consumed multiple"
  3099. " buffers\n", netdev->name);
  3100. dev_kfree_skb_irq(skb);
  3101. goto next_desc;
  3102. }
  3103. if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
  3104. last_byte = *(skb->data + length - 1);
  3105. if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
  3106. rx_desc->errors, length, last_byte)) {
  3107. spin_lock_irqsave(&adapter->stats_lock, flags);
  3108. e1000_tbi_adjust_stats(&adapter->hw,
  3109. &adapter->stats,
  3110. length, skb->data);
  3111. spin_unlock_irqrestore(&adapter->stats_lock,
  3112. flags);
  3113. length--;
  3114. } else {
  3115. dev_kfree_skb_irq(skb);
  3116. goto next_desc;
  3117. }
  3118. }
  3119. /* Good Receive */
  3120. skb_put(skb, length - ETHERNET_FCS_SIZE);
  3121. /* Receive Checksum Offload */
  3122. e1000_rx_checksum(adapter,
  3123. (uint32_t)(rx_desc->status) |
  3124. ((uint32_t)(rx_desc->errors) << 24),
  3125. rx_desc->csum, skb);
  3126. skb->protocol = eth_type_trans(skb, netdev);
  3127. #ifdef CONFIG_E1000_NAPI
  3128. #ifdef NETIF_F_HW_VLAN_TX
  3129. if(unlikely(adapter->vlgrp &&
  3130. (rx_desc->status & E1000_RXD_STAT_VP))) {
  3131. vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
  3132. le16_to_cpu(rx_desc->special) &
  3133. E1000_RXD_SPC_VLAN_MASK);
  3134. } else {
  3135. netif_receive_skb(skb);
  3136. }
  3137. #else
  3138. netif_receive_skb(skb);
  3139. #endif
  3140. #else /* CONFIG_E1000_NAPI */
  3141. #ifdef NETIF_F_HW_VLAN_TX
  3142. if(unlikely(adapter->vlgrp &&
  3143. (rx_desc->status & E1000_RXD_STAT_VP))) {
  3144. vlan_hwaccel_rx(skb, adapter->vlgrp,
  3145. le16_to_cpu(rx_desc->special) &
  3146. E1000_RXD_SPC_VLAN_MASK);
  3147. } else {
  3148. netif_rx(skb);
  3149. }
  3150. #else
  3151. netif_rx(skb);
  3152. #endif
  3153. #endif /* CONFIG_E1000_NAPI */
  3154. netdev->last_rx = jiffies;
  3155. rx_ring->pkt++;
  3156. next_desc:
  3157. rx_desc->status = 0;
  3158. buffer_info->skb = NULL;
  3159. if(unlikely(++i == rx_ring->count)) i = 0;
  3160. rx_desc = E1000_RX_DESC(*rx_ring, i);
  3161. }
  3162. rx_ring->next_to_clean = i;
  3163. adapter->alloc_rx_buf(adapter, rx_ring);
  3164. return cleaned;
  3165. }
  3166. /**
  3167. * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
  3168. * @adapter: board private structure
  3169. **/
  3170. static boolean_t
  3171. #ifdef CONFIG_E1000_NAPI
  3172. e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  3173. struct e1000_rx_ring *rx_ring,
  3174. int *work_done, int work_to_do)
  3175. #else
  3176. e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  3177. struct e1000_rx_ring *rx_ring)
  3178. #endif
  3179. {
  3180. union e1000_rx_desc_packet_split *rx_desc;
  3181. struct net_device *netdev = adapter->netdev;
  3182. struct pci_dev *pdev = adapter->pdev;
  3183. struct e1000_buffer *buffer_info;
  3184. struct e1000_ps_page *ps_page;
  3185. struct e1000_ps_page_dma *ps_page_dma;
  3186. struct sk_buff *skb;
  3187. unsigned int i, j;
  3188. uint32_t length, staterr;
  3189. boolean_t cleaned = FALSE;
  3190. i = rx_ring->next_to_clean;
  3191. rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  3192. staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  3193. while(staterr & E1000_RXD_STAT_DD) {
  3194. buffer_info = &rx_ring->buffer_info[i];
  3195. ps_page = &rx_ring->ps_page[i];
  3196. ps_page_dma = &rx_ring->ps_page_dma[i];
  3197. #ifdef CONFIG_E1000_NAPI
  3198. if(unlikely(*work_done >= work_to_do))
  3199. break;
  3200. (*work_done)++;
  3201. #endif
  3202. cleaned = TRUE;
  3203. pci_unmap_single(pdev, buffer_info->dma,
  3204. buffer_info->length,
  3205. PCI_DMA_FROMDEVICE);
  3206. skb = buffer_info->skb;
  3207. if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
  3208. E1000_DBG("%s: Packet Split buffers didn't pick up"
  3209. " the full packet\n", netdev->name);
  3210. dev_kfree_skb_irq(skb);
  3211. goto next_desc;
  3212. }
  3213. if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
  3214. dev_kfree_skb_irq(skb);
  3215. goto next_desc;
  3216. }
  3217. length = le16_to_cpu(rx_desc->wb.middle.length0);
  3218. if(unlikely(!length)) {
  3219. E1000_DBG("%s: Last part of the packet spanning"
  3220. " multiple descriptors\n", netdev->name);
  3221. dev_kfree_skb_irq(skb);
  3222. goto next_desc;
  3223. }
  3224. /* Good Receive */
  3225. skb_put(skb, length);
  3226. for(j = 0; j < adapter->rx_ps_pages; j++) {
  3227. if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
  3228. break;
  3229. pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
  3230. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  3231. ps_page_dma->ps_page_dma[j] = 0;
  3232. skb_shinfo(skb)->frags[j].page =
  3233. ps_page->ps_page[j];
  3234. ps_page->ps_page[j] = NULL;
  3235. skb_shinfo(skb)->frags[j].page_offset = 0;
  3236. skb_shinfo(skb)->frags[j].size = length;
  3237. skb_shinfo(skb)->nr_frags++;
  3238. skb->len += length;
  3239. skb->data_len += length;
  3240. }
  3241. e1000_rx_checksum(adapter, staterr,
  3242. rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
  3243. skb->protocol = eth_type_trans(skb, netdev);
  3244. if(likely(rx_desc->wb.upper.header_status &
  3245. E1000_RXDPS_HDRSTAT_HDRSP)) {
  3246. adapter->rx_hdr_split++;
  3247. #ifdef HAVE_RX_ZERO_COPY
  3248. skb_shinfo(skb)->zero_copy = TRUE;
  3249. #endif
  3250. }
  3251. #ifdef CONFIG_E1000_NAPI
  3252. #ifdef NETIF_F_HW_VLAN_TX
  3253. if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
  3254. vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
  3255. le16_to_cpu(rx_desc->wb.middle.vlan) &
  3256. E1000_RXD_SPC_VLAN_MASK);
  3257. } else {
  3258. netif_receive_skb(skb);
  3259. }
  3260. #else
  3261. netif_receive_skb(skb);
  3262. #endif
  3263. #else /* CONFIG_E1000_NAPI */
  3264. #ifdef NETIF_F_HW_VLAN_TX
  3265. if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
  3266. vlan_hwaccel_rx(skb, adapter->vlgrp,
  3267. le16_to_cpu(rx_desc->wb.middle.vlan) &
  3268. E1000_RXD_SPC_VLAN_MASK);
  3269. } else {
  3270. netif_rx(skb);
  3271. }
  3272. #else
  3273. netif_rx(skb);
  3274. #endif
  3275. #endif /* CONFIG_E1000_NAPI */
  3276. netdev->last_rx = jiffies;
  3277. rx_ring->pkt++;
  3278. next_desc:
  3279. rx_desc->wb.middle.status_error &= ~0xFF;
  3280. buffer_info->skb = NULL;
  3281. if(unlikely(++i == rx_ring->count)) i = 0;
  3282. rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  3283. staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  3284. }
  3285. rx_ring->next_to_clean = i;
  3286. adapter->alloc_rx_buf(adapter, rx_ring);
  3287. return cleaned;
  3288. }
  3289. /**
  3290. * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
  3291. * @adapter: address of board private structure
  3292. **/
  3293. static void
  3294. e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
  3295. struct e1000_rx_ring *rx_ring)
  3296. {
  3297. struct net_device *netdev = adapter->netdev;
  3298. struct pci_dev *pdev = adapter->pdev;
  3299. struct e1000_rx_desc *rx_desc;
  3300. struct e1000_buffer *buffer_info;
  3301. struct sk_buff *skb;
  3302. unsigned int i;
  3303. unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
  3304. i = rx_ring->next_to_use;
  3305. buffer_info = &rx_ring->buffer_info[i];
  3306. while(!buffer_info->skb) {
  3307. skb = dev_alloc_skb(bufsz);
  3308. if(unlikely(!skb)) {
  3309. /* Better luck next round */
  3310. break;
  3311. }
  3312. /* Fix for errata 23, can't cross 64kB boundary */
  3313. if(!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
  3314. struct sk_buff *oldskb = skb;
  3315. DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
  3316. "at %p\n", bufsz, skb->data);
  3317. /* Try again, without freeing the previous */
  3318. skb = dev_alloc_skb(bufsz);
  3319. /* Failed allocation, critical failure */
  3320. if(!skb) {
  3321. dev_kfree_skb(oldskb);
  3322. break;
  3323. }
  3324. if(!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
  3325. /* give up */
  3326. dev_kfree_skb(skb);
  3327. dev_kfree_skb(oldskb);
  3328. break; /* while !buffer_info->skb */
  3329. } else {
  3330. /* Use new allocation */
  3331. dev_kfree_skb(oldskb);
  3332. }
  3333. }
  3334. /* Make buffer alignment 2 beyond a 16 byte boundary
  3335. * this will result in a 16 byte aligned IP header after
  3336. * the 14 byte MAC header is removed
  3337. */
  3338. skb_reserve(skb, NET_IP_ALIGN);
  3339. skb->dev = netdev;
  3340. buffer_info->skb = skb;
  3341. buffer_info->length = adapter->rx_buffer_len;
  3342. buffer_info->dma = pci_map_single(pdev,
  3343. skb->data,
  3344. adapter->rx_buffer_len,
  3345. PCI_DMA_FROMDEVICE);
  3346. /* Fix for errata 23, can't cross 64kB boundary */
  3347. if(!e1000_check_64k_bound(adapter,
  3348. (void *)(unsigned long)buffer_info->dma,
  3349. adapter->rx_buffer_len)) {
  3350. DPRINTK(RX_ERR, ERR,
  3351. "dma align check failed: %u bytes at %p\n",
  3352. adapter->rx_buffer_len,
  3353. (void *)(unsigned long)buffer_info->dma);
  3354. dev_kfree_skb(skb);
  3355. buffer_info->skb = NULL;
  3356. pci_unmap_single(pdev, buffer_info->dma,
  3357. adapter->rx_buffer_len,
  3358. PCI_DMA_FROMDEVICE);
  3359. break; /* while !buffer_info->skb */
  3360. }
  3361. rx_desc = E1000_RX_DESC(*rx_ring, i);
  3362. rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  3363. if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
  3364. /* Force memory writes to complete before letting h/w
  3365. * know there are new descriptors to fetch. (Only
  3366. * applicable for weak-ordered memory model archs,
  3367. * such as IA-64). */
  3368. wmb();
  3369. writel(i, adapter->hw.hw_addr + rx_ring->rdt);
  3370. }
  3371. if(unlikely(++i == rx_ring->count)) i = 0;
  3372. buffer_info = &rx_ring->buffer_info[i];
  3373. }
  3374. rx_ring->next_to_use = i;
  3375. }
  3376. /**
  3377. * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
  3378. * @adapter: address of board private structure
  3379. **/
  3380. static void
  3381. e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
  3382. struct e1000_rx_ring *rx_ring)
  3383. {
  3384. struct net_device *netdev = adapter->netdev;
  3385. struct pci_dev *pdev = adapter->pdev;
  3386. union e1000_rx_desc_packet_split *rx_desc;
  3387. struct e1000_buffer *buffer_info;
  3388. struct e1000_ps_page *ps_page;
  3389. struct e1000_ps_page_dma *ps_page_dma;
  3390. struct sk_buff *skb;
  3391. unsigned int i, j;
  3392. i = rx_ring->next_to_use;
  3393. buffer_info = &rx_ring->buffer_info[i];
  3394. ps_page = &rx_ring->ps_page[i];
  3395. ps_page_dma = &rx_ring->ps_page_dma[i];
  3396. while (!buffer_info->skb) {
  3397. rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  3398. for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  3399. if (j < adapter->rx_ps_pages) {
  3400. if (likely(!ps_page->ps_page[j])) {
  3401. ps_page->ps_page[j] =
  3402. alloc_page(GFP_ATOMIC);
  3403. if (unlikely(!ps_page->ps_page[j]))
  3404. goto no_buffers;
  3405. ps_page_dma->ps_page_dma[j] =
  3406. pci_map_page(pdev,
  3407. ps_page->ps_page[j],
  3408. 0, PAGE_SIZE,
  3409. PCI_DMA_FROMDEVICE);
  3410. }
  3411. /* Refresh the desc even if buffer_addrs didn't
  3412. * change because each write-back erases
  3413. * this info.
  3414. */
  3415. rx_desc->read.buffer_addr[j+1] =
  3416. cpu_to_le64(ps_page_dma->ps_page_dma[j]);
  3417. } else
  3418. rx_desc->read.buffer_addr[j+1] = ~0;
  3419. }
  3420. skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
  3421. if (unlikely(!skb))
  3422. break;
  3423. /* Make buffer alignment 2 beyond a 16 byte boundary
  3424. * this will result in a 16 byte aligned IP header after
  3425. * the 14 byte MAC header is removed
  3426. */
  3427. skb_reserve(skb, NET_IP_ALIGN);
  3428. skb->dev = netdev;
  3429. buffer_info->skb = skb;
  3430. buffer_info->length = adapter->rx_ps_bsize0;
  3431. buffer_info->dma = pci_map_single(pdev, skb->data,
  3432. adapter->rx_ps_bsize0,
  3433. PCI_DMA_FROMDEVICE);
  3434. rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
  3435. if (unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
  3436. /* Force memory writes to complete before letting h/w
  3437. * know there are new descriptors to fetch. (Only
  3438. * applicable for weak-ordered memory model archs,
  3439. * such as IA-64). */
  3440. wmb();
  3441. /* Hardware increments by 16 bytes, but packet split
  3442. * descriptors are 32 bytes...so we increment tail
  3443. * twice as much.
  3444. */
  3445. writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
  3446. }
  3447. if (unlikely(++i == rx_ring->count)) i = 0;
  3448. buffer_info = &rx_ring->buffer_info[i];
  3449. ps_page = &rx_ring->ps_page[i];
  3450. ps_page_dma = &rx_ring->ps_page_dma[i];
  3451. }
  3452. no_buffers:
  3453. rx_ring->next_to_use = i;
  3454. }
  3455. /**
  3456. * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
  3457. * @adapter:
  3458. **/
  3459. static void
  3460. e1000_smartspeed(struct e1000_adapter *adapter)
  3461. {
  3462. uint16_t phy_status;
  3463. uint16_t phy_ctrl;
  3464. if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
  3465. !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
  3466. return;
  3467. if(adapter->smartspeed == 0) {
  3468. /* If Master/Slave config fault is asserted twice,
  3469. * we assume back-to-back */
  3470. e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
  3471. if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
  3472. e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
  3473. if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
  3474. e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
  3475. if(phy_ctrl & CR_1000T_MS_ENABLE) {
  3476. phy_ctrl &= ~CR_1000T_MS_ENABLE;
  3477. e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
  3478. phy_ctrl);
  3479. adapter->smartspeed++;
  3480. if(!e1000_phy_setup_autoneg(&adapter->hw) &&
  3481. !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
  3482. &phy_ctrl)) {
  3483. phy_ctrl |= (MII_CR_AUTO_NEG_EN |
  3484. MII_CR_RESTART_AUTO_NEG);
  3485. e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
  3486. phy_ctrl);
  3487. }
  3488. }
  3489. return;
  3490. } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
  3491. /* If still no link, perhaps using 2/3 pair cable */
  3492. e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
  3493. phy_ctrl |= CR_1000T_MS_ENABLE;
  3494. e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
  3495. if(!e1000_phy_setup_autoneg(&adapter->hw) &&
  3496. !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
  3497. phy_ctrl |= (MII_CR_AUTO_NEG_EN |
  3498. MII_CR_RESTART_AUTO_NEG);
  3499. e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
  3500. }
  3501. }
  3502. /* Restart process after E1000_SMARTSPEED_MAX iterations */
  3503. if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
  3504. adapter->smartspeed = 0;
  3505. }
  3506. /**
  3507. * e1000_ioctl -
  3508. * @netdev:
  3509. * @ifreq:
  3510. * @cmd:
  3511. **/
  3512. static int
  3513. e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  3514. {
  3515. switch (cmd) {
  3516. #ifdef SIOCGMIIPHY
  3517. case SIOCGMIIPHY:
  3518. case SIOCGMIIREG:
  3519. case SIOCSMIIREG:
  3520. return e1000_mii_ioctl(netdev, ifr, cmd);
  3521. #endif
  3522. case BYPASS_MODE_CTRL_SIOC:
  3523. return e1000_bypass_ctrl_ioctl(netdev, ifr);
  3524. #ifdef ETHTOOL_OPS_COMPAT
  3525. case SIOCETHTOOL:
  3526. return ethtool_ioctl(ifr);
  3527. #endif
  3528. default:
  3529. return -EOPNOTSUPP;
  3530. }
  3531. }
  3532. #ifdef SIOCGMIIPHY
  3533. /**
  3534. * e1000_mii_ioctl -
  3535. * @netdev:
  3536. * @ifreq:
  3537. * @cmd:
  3538. **/
  3539. static int
  3540. e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  3541. {
  3542. struct e1000_adapter *adapter = netdev_priv(netdev);
  3543. struct mii_ioctl_data *data = if_mii(ifr);
  3544. int retval;
  3545. uint16_t mii_reg;
  3546. uint16_t spddplx;
  3547. unsigned long flags;
  3548. if(adapter->hw.media_type != e1000_media_type_copper)
  3549. return -EOPNOTSUPP;
  3550. switch (cmd) {
  3551. case SIOCGMIIPHY:
  3552. data->phy_id = adapter->hw.phy_addr;
  3553. break;
  3554. case SIOCGMIIREG:
  3555. if(!capable(CAP_NET_ADMIN))
  3556. return -EPERM;
  3557. spin_lock_irqsave(&adapter->stats_lock, flags);
  3558. if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
  3559. &data->val_out)) {
  3560. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3561. return -EIO;
  3562. }
  3563. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3564. break;
  3565. case SIOCSMIIREG:
  3566. if(!capable(CAP_NET_ADMIN))
  3567. return -EPERM;
  3568. if(data->reg_num & ~(0x1F))
  3569. return -EFAULT;
  3570. mii_reg = data->val_in;
  3571. spin_lock_irqsave(&adapter->stats_lock, flags);
  3572. if(e1000_write_phy_reg(&adapter->hw, data->reg_num,
  3573. mii_reg)) {
  3574. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3575. return -EIO;
  3576. }
  3577. if(adapter->hw.phy_type == e1000_phy_m88) {
  3578. switch (data->reg_num) {
  3579. case PHY_CTRL:
  3580. if(mii_reg & MII_CR_POWER_DOWN)
  3581. break;
  3582. if(mii_reg & MII_CR_AUTO_NEG_EN) {
  3583. adapter->hw.autoneg = 1;
  3584. adapter->hw.autoneg_advertised = 0x2F;
  3585. } else {
  3586. if(mii_reg & 0x40)
  3587. spddplx = SPEED_1000;
  3588. else if(mii_reg & 0x2000)
  3589. spddplx = SPEED_100;
  3590. else
  3591. spddplx = SPEED_10;
  3592. spddplx += (mii_reg & 0x100)
  3593. ? FULL_DUPLEX :
  3594. HALF_DUPLEX;
  3595. retval = e1000_set_spd_dplx(adapter,
  3596. spddplx);
  3597. if(retval) {
  3598. spin_unlock_irqrestore(
  3599. &adapter->stats_lock,
  3600. flags);
  3601. return retval;
  3602. }
  3603. }
  3604. if(netif_running(adapter->netdev)) {
  3605. e1000_down(adapter);
  3606. e1000_up(adapter);
  3607. } else
  3608. e1000_reset(adapter);
  3609. break;
  3610. case M88E1000_PHY_SPEC_CTRL:
  3611. case M88E1000_EXT_PHY_SPEC_CTRL:
  3612. if(e1000_phy_reset(&adapter->hw)) {
  3613. spin_unlock_irqrestore(
  3614. &adapter->stats_lock, flags);
  3615. return -EIO;
  3616. }
  3617. break;
  3618. }
  3619. } else {
  3620. switch (data->reg_num) {
  3621. case PHY_CTRL:
  3622. if(mii_reg & MII_CR_POWER_DOWN)
  3623. break;
  3624. if(netif_running(adapter->netdev)) {
  3625. e1000_down(adapter);
  3626. e1000_up(adapter);
  3627. } else
  3628. e1000_reset(adapter);
  3629. break;
  3630. }
  3631. }
  3632. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3633. break;
  3634. default:
  3635. return -EOPNOTSUPP;
  3636. }
  3637. return E1000_SUCCESS;
  3638. }
  3639. #endif
  3640. void
  3641. e1000_pci_set_mwi(struct e1000_hw *hw)
  3642. {
  3643. struct e1000_adapter *adapter = hw->back;
  3644. #ifdef HAVE_PCI_SET_MWI
  3645. int ret_val = pci_set_mwi(adapter->pdev);
  3646. if(ret_val)
  3647. DPRINTK(PROBE, ERR, "Error in setting MWI\n");
  3648. #else
  3649. pci_write_config_word(adapter->pdev, PCI_COMMAND,
  3650. adapter->hw.pci_cmd_word |
  3651. PCI_COMMAND_INVALIDATE);
  3652. #endif
  3653. }
  3654. void
  3655. e1000_pci_clear_mwi(struct e1000_hw *hw)
  3656. {
  3657. struct e1000_adapter *adapter = hw->back;
  3658. #ifdef HAVE_PCI_SET_MWI
  3659. pci_clear_mwi(adapter->pdev);
  3660. #else
  3661. pci_write_config_word(adapter->pdev, PCI_COMMAND,
  3662. adapter->hw.pci_cmd_word &
  3663. ~PCI_COMMAND_INVALIDATE);
  3664. #endif
  3665. }
  3666. void
  3667. e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
  3668. {
  3669. struct e1000_adapter *adapter = hw->back;
  3670. pci_read_config_word(adapter->pdev, reg, value);
  3671. }
  3672. void
  3673. e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
  3674. {
  3675. struct e1000_adapter *adapter = hw->back;
  3676. pci_write_config_word(adapter->pdev, reg, *value);
  3677. }
  3678. uint32_t
  3679. e1000_io_read(struct e1000_hw *hw, unsigned long port)
  3680. {
  3681. return inl(port);
  3682. }
  3683. void
  3684. e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
  3685. {
  3686. outl(value, port);
  3687. }
  3688. #ifdef NETIF_F_HW_VLAN_TX
  3689. static void
  3690. e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
  3691. {
  3692. struct e1000_adapter *adapter = netdev_priv(netdev);
  3693. uint32_t ctrl, rctl;
  3694. e1000_irq_disable(adapter);
  3695. adapter->vlgrp = grp;
  3696. if(grp) {
  3697. /* enable VLAN tag insert/strip */
  3698. ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  3699. ctrl |= E1000_CTRL_VME;
  3700. E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  3701. /* enable VLAN receive filtering */
  3702. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  3703. rctl |= E1000_RCTL_VFE;
  3704. rctl &= ~E1000_RCTL_CFIEN;
  3705. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  3706. e1000_update_mng_vlan(adapter);
  3707. } else {
  3708. /* disable VLAN tag insert/strip */
  3709. ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  3710. ctrl &= ~E1000_CTRL_VME;
  3711. E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  3712. /* disable VLAN filtering */
  3713. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  3714. rctl &= ~E1000_RCTL_VFE;
  3715. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  3716. if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
  3717. e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  3718. adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  3719. }
  3720. }
  3721. e1000_irq_enable(adapter);
  3722. }
  3723. static void
  3724. e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
  3725. {
  3726. struct e1000_adapter *adapter = netdev_priv(netdev);
  3727. uint32_t vfta, index;
  3728. if((adapter->hw.mng_cookie.status &
  3729. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
  3730. (vid == adapter->mng_vlan_id))
  3731. return;
  3732. /* add VID to filter table */
  3733. index = (vid >> 5) & 0x7F;
  3734. vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  3735. vfta |= (1 << (vid & 0x1F));
  3736. e1000_write_vfta(&adapter->hw, index, vfta);
  3737. }
  3738. static void
  3739. e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
  3740. {
  3741. struct e1000_adapter *adapter = netdev_priv(netdev);
  3742. uint32_t vfta, index;
  3743. e1000_irq_disable(adapter);
  3744. if(adapter->vlgrp)
  3745. adapter->vlgrp->vlan_devices[vid] = NULL;
  3746. e1000_irq_enable(adapter);
  3747. if((adapter->hw.mng_cookie.status &
  3748. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
  3749. (vid == adapter->mng_vlan_id))
  3750. return;
  3751. /* remove VID from filter table */
  3752. index = (vid >> 5) & 0x7F;
  3753. vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  3754. vfta &= ~(1 << (vid & 0x1F));
  3755. e1000_write_vfta(&adapter->hw, index, vfta);
  3756. }
  3757. static void
  3758. e1000_restore_vlan(struct e1000_adapter *adapter)
  3759. {
  3760. e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
  3761. if(adapter->vlgrp) {
  3762. uint16_t vid;
  3763. for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  3764. if(!adapter->vlgrp->vlan_devices[vid])
  3765. continue;
  3766. e1000_vlan_rx_add_vid(adapter->netdev, vid);
  3767. }
  3768. }
  3769. }
  3770. #endif
  3771. int
  3772. e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
  3773. {
  3774. adapter->hw.autoneg = 0;
  3775. /* Fiber NICs only allow 1000 gbps Full duplex */
  3776. if((adapter->hw.media_type == e1000_media_type_fiber) &&
  3777. spddplx != (SPEED_1000 + DUPLEX_FULL)) {
  3778. DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
  3779. return -EINVAL;
  3780. }
  3781. switch(spddplx) {
  3782. case SPEED_10 + DUPLEX_HALF:
  3783. adapter->hw.forced_speed_duplex = e1000_10_half;
  3784. break;
  3785. case SPEED_10 + DUPLEX_FULL:
  3786. adapter->hw.forced_speed_duplex = e1000_10_full;
  3787. break;
  3788. case SPEED_100 + DUPLEX_HALF:
  3789. adapter->hw.forced_speed_duplex = e1000_100_half;
  3790. break;
  3791. case SPEED_100 + DUPLEX_FULL:
  3792. adapter->hw.forced_speed_duplex = e1000_100_full;
  3793. break;
  3794. case SPEED_1000 + DUPLEX_FULL:
  3795. adapter->hw.autoneg = 1;
  3796. adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
  3797. break;
  3798. case SPEED_1000 + DUPLEX_HALF: /* not supported */
  3799. default:
  3800. DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
  3801. return -EINVAL;
  3802. }
  3803. return 0;
  3804. }
  3805. static int
  3806. e1000_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
  3807. {
  3808. struct pci_dev *pdev = NULL;
  3809. switch(event) {
  3810. case SYS_DOWN:
  3811. case SYS_HALT:
  3812. case SYS_POWER_OFF:
  3813. while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
  3814. if(pci_dev_driver(pdev) == &e1000_driver)
  3815. e1000_suspend(pdev, 3);
  3816. }
  3817. }
  3818. return NOTIFY_DONE;
  3819. }
  3820. static int
  3821. e1000_suspend(struct pci_dev *pdev, uint32_t state)
  3822. {
  3823. struct net_device *netdev = pci_get_drvdata(pdev);
  3824. struct e1000_adapter *adapter = netdev_priv(netdev);
  3825. uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm;
  3826. uint32_t wufc = adapter->wol;
  3827. netif_device_detach(netdev);
  3828. if(netif_running(netdev))
  3829. e1000_down(adapter);
  3830. status = E1000_READ_REG(&adapter->hw, STATUS);
  3831. if(status & E1000_STATUS_LU)
  3832. wufc &= ~E1000_WUFC_LNKC;
  3833. if(wufc) {
  3834. e1000_setup_rctl(adapter);
  3835. e1000_set_multi(netdev);
  3836. /* turn on all-multi mode if wake on multicast is enabled */
  3837. if(adapter->wol & E1000_WUFC_MC) {
  3838. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  3839. rctl |= E1000_RCTL_MPE;
  3840. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  3841. }
  3842. if(adapter->hw.mac_type >= e1000_82540) {
  3843. ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  3844. /* advertise wake from D3Cold */
  3845. #define E1000_CTRL_ADVD3WUC 0x00100000
  3846. /* phy power management enable */
  3847. #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
  3848. ctrl |= E1000_CTRL_ADVD3WUC |
  3849. E1000_CTRL_EN_PHY_PWR_MGMT;
  3850. E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  3851. }
  3852. if(adapter->hw.media_type == e1000_media_type_fiber ||
  3853. adapter->hw.media_type == e1000_media_type_internal_serdes) {
  3854. /* keep the laser running in D3 */
  3855. ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  3856. ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
  3857. E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
  3858. }
  3859. /* Allow time for pending master requests to run */
  3860. e1000_disable_pciex_master(&adapter->hw);
  3861. E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
  3862. E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
  3863. pci_enable_wake(pdev, 3, 1);
  3864. pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
  3865. } else {
  3866. E1000_WRITE_REG(&adapter->hw, WUC, 0);
  3867. E1000_WRITE_REG(&adapter->hw, WUFC, 0);
  3868. pci_enable_wake(pdev, 3, 0);
  3869. pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
  3870. }
  3871. pci_save_state(pdev);
  3872. if(adapter->hw.mac_type >= e1000_82540 &&
  3873. adapter->hw.media_type == e1000_media_type_copper) {
  3874. manc = E1000_READ_REG(&adapter->hw, MANC);
  3875. if(manc & E1000_MANC_SMBUS_EN) {
  3876. manc |= E1000_MANC_ARP_EN;
  3877. E1000_WRITE_REG(&adapter->hw, MANC, manc);
  3878. pci_enable_wake(pdev, 3, 1);
  3879. pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
  3880. }
  3881. }
  3882. switch(adapter->hw.mac_type) {
  3883. case e1000_82571:
  3884. case e1000_82572:
  3885. ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  3886. E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  3887. ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  3888. break;
  3889. case e1000_82573:
  3890. swsm = E1000_READ_REG(&adapter->hw, SWSM);
  3891. E1000_WRITE_REG(&adapter->hw, SWSM,
  3892. swsm & ~E1000_SWSM_DRV_LOAD);
  3893. break;
  3894. default:
  3895. break;
  3896. }
  3897. pci_disable_device(pdev);
  3898. state = (state > 0) ? 3 : 0;
  3899. pci_set_power_state(pdev, state);
  3900. return 0;
  3901. }
  3902. #ifdef CONFIG_PM
  3903. static int
  3904. e1000_resume(struct pci_dev *pdev)
  3905. {
  3906. struct net_device *netdev = pci_get_drvdata(pdev);
  3907. struct e1000_adapter *adapter = netdev_priv(netdev);
  3908. uint32_t manc, ret_val, swsm;
  3909. uint32_t ctrl_ext;
  3910. pci_set_power_state(pdev, 0);
  3911. pci_restore_state(pdev);
  3912. ret_val = pci_enable_device(pdev);
  3913. pci_set_master(pdev);
  3914. pci_enable_wake(pdev, 3, 0);
  3915. pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
  3916. e1000_reset(adapter);
  3917. E1000_WRITE_REG(&adapter->hw, WUS, ~0);
  3918. if(netif_running(netdev))
  3919. e1000_up(adapter);
  3920. netif_device_attach(netdev);
  3921. if(adapter->hw.mac_type >= e1000_82540 &&
  3922. adapter->hw.media_type == e1000_media_type_copper) {
  3923. manc = E1000_READ_REG(&adapter->hw, MANC);
  3924. manc &= ~(E1000_MANC_ARP_EN);
  3925. E1000_WRITE_REG(&adapter->hw, MANC, manc);
  3926. }
  3927. switch(adapter->hw.mac_type) {
  3928. case e1000_82571:
  3929. case e1000_82572:
  3930. ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  3931. E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  3932. ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  3933. break;
  3934. case e1000_82573:
  3935. swsm = E1000_READ_REG(&adapter->hw, SWSM);
  3936. E1000_WRITE_REG(&adapter->hw, SWSM,
  3937. swsm | E1000_SWSM_DRV_LOAD);
  3938. break;
  3939. default:
  3940. break;
  3941. }
  3942. return 0;
  3943. }
  3944. #endif
  3945. #ifdef CONFIG_NET_POLL_CONTROLLER
  3946. /*
  3947. * Polling 'interrupt' - used by things like netconsole to send skbs
  3948. * without having to re-enable interrupts. It's not called while
  3949. * the interrupt routine is executing.
  3950. */
  3951. static void
  3952. e1000_netpoll(struct net_device *netdev)
  3953. {
  3954. struct e1000_adapter *adapter = netdev_priv(netdev);
  3955. disable_irq(adapter->pdev->irq);
  3956. e1000_intr(adapter->pdev->irq, netdev, NULL);
  3957. enable_irq(adapter->pdev->irq);
  3958. }
  3959. #endif
  3960. /* Click polling support */
  3961. static struct sk_buff *
  3962. e1000_rx_poll(struct net_device *dev, int *want)
  3963. {
  3964. struct e1000_adapter *adapter = dev->priv;
  3965. struct pci_dev *pdev = adapter->pdev;
  3966. struct e1000_rx_desc *rx_desc;
  3967. struct e1000_rx_ring *rx_ring = adapter->rx_ring;
  3968. struct sk_buff *skb_head = NULL, **skb;
  3969. uint32_t length;
  3970. int got, next;
  3971. skb = &skb_head;
  3972. for( got = 0, next = (rx_ring->next_to_clean + 1) % rx_ring->count;
  3973. got < *want && next != rx_ring->next_to_use;
  3974. got++, rx_ring->next_to_clean = next,
  3975. next = (rx_ring->next_to_clean + 1) % rx_ring->count) {
  3976. int i = rx_ring->next_to_clean;
  3977. rx_desc = E1000_RX_DESC(*rx_ring, i);
  3978. if(!(rx_desc->status & E1000_RXD_STAT_DD))
  3979. break;
  3980. pci_unmap_single(pdev, rx_ring->buffer_info[i].dma,
  3981. rx_ring->buffer_info[i].length,
  3982. PCI_DMA_FROMDEVICE);
  3983. *skb = rx_ring->buffer_info[i].skb;
  3984. rx_ring->buffer_info[i].skb = NULL;
  3985. if(!(rx_desc->status & E1000_RXD_STAT_EOP) ||
  3986. (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
  3987. rx_desc->status = 0;
  3988. dev_kfree_skb(*skb);
  3989. *skb = NULL;
  3990. got--;
  3991. continue;
  3992. }
  3993. rx_desc->status = 0;
  3994. length = le16_to_cpu(rx_desc->length);
  3995. skb_put(*skb, length - CRC_LENGTH);
  3996. e1000_rx_checksum(adapter,
  3997. (uint32_t)(rx_desc->status) | ((uint32_t)(rx_desc->errors) << 24),
  3998. rx_desc->csum, *skb);
  3999. skb_pull(*skb, dev->hard_header_len);
  4000. skb = &((*skb)->next);
  4001. *skb = NULL;
  4002. }
  4003. *want = got;
  4004. /*
  4005. * Receive Lockup detection and recovery
  4006. */
  4007. if (got) {
  4008. adapter->rx_state = E1000_RX_STATE_NORMAL;
  4009. adapter->rx_normal_jiffies = jiffies + HZ;
  4010. } else {
  4011. int rdfh;
  4012. int rdft;
  4013. switch (adapter->rx_state) {
  4014. case E1000_RX_STATE_NORMAL:
  4015. if (jiffies < adapter->rx_normal_jiffies)
  4016. break;
  4017. adapter->rx_state = E1000_RX_STATE_QUIET;
  4018. adapter->rx_quiet_jiffies = jiffies + HZ;
  4019. adapter->prev_rdfh = E1000_READ_REG(&adapter->hw, RDFH);
  4020. adapter->prev_rdft = E1000_READ_REG(&adapter->hw, RDFT);
  4021. break;
  4022. case E1000_RX_STATE_QUIET:
  4023. rdfh = E1000_READ_REG(&adapter->hw, RDFH);
  4024. rdft = E1000_READ_REG(&adapter->hw, RDFT);
  4025. if (adapter->prev_rdfh != rdfh ||
  4026. adapter->prev_rdft != rdft ||
  4027. adapter->prev_rdfh == adapter->prev_rdft) {
  4028. adapter->prev_rdfh = rdfh;
  4029. adapter->prev_rdft = rdft;
  4030. adapter->rx_quiet_jiffies = jiffies + HZ;
  4031. break;
  4032. }
  4033. if (jiffies < adapter->rx_quiet_jiffies)
  4034. break;
  4035. /* Fall into the lockup case */
  4036. case E1000_RX_STATE_LOCKUP:
  4037. /* Receive lockup detected: perform a recovery */
  4038. adapter->rx_lockup_recoveries++;
  4039. /* taken from e1000_down() */
  4040. e1000_reset(adapter);
  4041. e1000_clean_tx_ring(adapter, adapter->tx_ring);
  4042. e1000_clean_rx_ring(adapter, adapter->rx_ring);
  4043. /* taken from e1000_up() */
  4044. e1000_set_multi(dev);
  4045. e1000_configure_tx(adapter);
  4046. e1000_setup_rctl(adapter);
  4047. e1000_configure_rx(adapter);
  4048. e1000_alloc_rx_buffers(adapter, adapter->rx_ring);
  4049. /* reset the lockup detection */
  4050. adapter->rx_state = E1000_RX_STATE_NORMAL;
  4051. adapter->rx_normal_jiffies = jiffies + HZ;
  4052. break;
  4053. }
  4054. }
  4055. return skb_head;
  4056. }
  4057. int
  4058. e1000_rx_refill(struct net_device *dev, struct sk_buff **skbs)
  4059. {
  4060. struct e1000_adapter *adapter = dev->priv;
  4061. struct e1000_rx_ring *rx_ring = adapter->rx_ring;
  4062. struct pci_dev *pdev = adapter->pdev;
  4063. struct e1000_rx_desc *rx_desc;
  4064. struct sk_buff *skb;
  4065. int next;
  4066. /*
  4067. * Update statistics counters, check link.
  4068. * do_poll_watchdog is set by the timer interrupt e1000_watchdog(),
  4069. * but we don't want to do the work in an interrupt (since it may
  4070. * happen while polling code is active), so defer it to here.
  4071. */
  4072. if(adapter->do_poll_watchdog){
  4073. adapter->do_poll_watchdog = 0;
  4074. e1000_watchdog_1(adapter);
  4075. }
  4076. if (!netif_carrier_ok(dev))
  4077. return 0;
  4078. if(skbs == 0)
  4079. return E1000_DESC_UNUSED(rx_ring);
  4080. for( next = (rx_ring->next_to_use + 1) % rx_ring->count;
  4081. next != rx_ring->next_to_clean;
  4082. rx_ring->next_to_use = next,
  4083. next = (rx_ring->next_to_use + 1) % rx_ring->count ) {
  4084. int i = rx_ring->next_to_use;
  4085. if(rx_ring->buffer_info[i].skb != NULL)
  4086. break;
  4087. if(!(skb = *skbs))
  4088. break;
  4089. *skbs = skb->next;
  4090. skb->next = NULL;
  4091. skb->dev = dev;
  4092. rx_ring->buffer_info[i].skb = skb;
  4093. rx_ring->buffer_info[i].length = adapter->rx_buffer_len;
  4094. rx_ring->buffer_info[i].dma =
  4095. pci_map_single(pdev,
  4096. skb->data,
  4097. adapter->rx_buffer_len,
  4098. PCI_DMA_FROMDEVICE);
  4099. rx_desc = E1000_RX_DESC(*rx_ring, i);
  4100. rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma);
  4101. /* Intel documnetation says: "Software adds receive descriptors by
  4102. * writing the tail pointer with the index of the entry beyond the
  4103. * last valid descriptor." (ref 11337 p 27) */
  4104. E1000_WRITE_REG(&adapter->hw, RDT, next);
  4105. }
  4106. return E1000_DESC_UNUSED(adapter->rx_ring);
  4107. }
  4108. static int
  4109. e1000_tx_pqueue(struct net_device *netdev, struct sk_buff *skb)
  4110. {
  4111. /*
  4112. * This function is just a streamlined version of
  4113. * return e1000_xmit_frame(skb, netdev);
  4114. */
  4115. struct e1000_adapter *adapter = netdev->priv;
  4116. struct pci_dev *pdev = adapter->pdev;
  4117. struct e1000_tx_desc *tx_desc;
  4118. int i, len, offset, txd_needed;
  4119. uint32_t txd_upper, txd_lower;
  4120. unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
  4121. if(!netif_carrier_ok(netdev)) {
  4122. netif_stop_queue(netdev);
  4123. return -1;
  4124. }
  4125. txd_needed = TXD_USE_COUNT(skb->len, max_txd_pwr);
  4126. /* make sure there are enough Tx descriptors available in the ring */
  4127. if(E1000_DESC_UNUSED(adapter->tx_ring) <= (txd_needed + 1)) {
  4128. adapter->net_stats.tx_dropped++;
  4129. netif_stop_queue(netdev);
  4130. return -1;
  4131. }
  4132. txd_upper = 0;
  4133. txd_lower = adapter->txd_cmd;
  4134. if(e1000_tx_csum(adapter, adapter->tx_ring, skb)){
  4135. txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
  4136. txd_upper |= E1000_TXD_POPTS_TXSM << 8;
  4137. }
  4138. i = adapter->tx_ring->next_to_use;
  4139. tx_desc = E1000_TX_DESC(*(adapter->tx_ring), i);
  4140. len = skb->len;
  4141. offset = 0;
  4142. adapter->tx_ring->buffer_info[i].length = len;
  4143. adapter->tx_ring->buffer_info[i].dma =
  4144. pci_map_page(pdev, virt_to_page(skb->data + offset),
  4145. (unsigned long) (skb->data + offset) & ~PAGE_MASK, len,
  4146. PCI_DMA_TODEVICE);
  4147. /* thanks Adam Greenhalgh and Beyers Cronje! */
  4148. adapter->tx_ring->buffer_info[i].time_stamp = jiffies;
  4149. tx_desc->buffer_addr = cpu_to_le64(adapter->tx_ring->buffer_info[i].dma);
  4150. tx_desc->lower.data = cpu_to_le32(txd_lower | len);
  4151. tx_desc->upper.data = cpu_to_le32(txd_upper);
  4152. /* EOP and SKB pointer go with the last fragment */
  4153. tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP);
  4154. adapter->tx_ring->buffer_info[i].skb = skb;
  4155. i = i + 1;
  4156. if(i >= adapter->tx_ring->count)
  4157. i = 0;
  4158. /* Move the HW Tx Tail Pointer */
  4159. adapter->tx_ring->next_to_use = i;
  4160. netdev->trans_start = jiffies;
  4161. return 0;
  4162. }
  4163. static struct sk_buff *
  4164. e1000_tx_clean(struct net_device *netdev)
  4165. {
  4166. /*
  4167. * This function is a streamlined version of
  4168. * return e1000_clean_tx_irq(adapter, 1);
  4169. */
  4170. struct e1000_adapter *adapter = netdev->priv;
  4171. struct pci_dev *pdev = adapter->pdev;
  4172. int i;
  4173. struct e1000_tx_desc *tx_desc;
  4174. struct sk_buff *skb_head, *skb_last;
  4175. skb_head = skb_last = 0;
  4176. i = adapter->tx_ring->next_to_clean;
  4177. tx_desc = E1000_TX_DESC(*(adapter->tx_ring), i);
  4178. while(tx_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
  4179. if(adapter->tx_ring->buffer_info[i].dma != 0) {
  4180. pci_unmap_page(pdev, adapter->tx_ring->buffer_info[i].dma,
  4181. adapter->tx_ring->buffer_info[i].length,
  4182. PCI_DMA_TODEVICE);
  4183. adapter->tx_ring->buffer_info[i].dma = 0;
  4184. }
  4185. if(adapter->tx_ring->buffer_info[i].skb != NULL) {
  4186. struct sk_buff *skb = adapter->tx_ring->buffer_info[i].skb;
  4187. if (skb_head == 0) {
  4188. skb_head = skb;
  4189. skb_last = skb;
  4190. skb_last->next = NULL;
  4191. } else {
  4192. skb_last->next = skb;
  4193. skb->next = NULL;
  4194. skb_last = skb;
  4195. }
  4196. adapter->tx_ring->buffer_info[i].skb = NULL;
  4197. }
  4198. i = (i + 1) % adapter->tx_ring->count;
  4199. tx_desc->upper.data = 0;
  4200. tx_desc = E1000_TX_DESC(*(adapter->tx_ring), i);
  4201. }
  4202. adapter->tx_ring->next_to_clean = i;
  4203. if(netif_queue_stopped(netdev) &&
  4204. (E1000_DESC_UNUSED(adapter->tx_ring) > E1000_TX_QUEUE_WAKE)) {
  4205. netif_start_queue(netdev);
  4206. }
  4207. return skb_head;
  4208. }
  4209. static int
  4210. e1000_poll_on(struct net_device *dev)
  4211. {
  4212. struct e1000_adapter *adapter = dev->priv;
  4213. unsigned long flags;
  4214. if (!dev->polling) {
  4215. printk("e1000_poll_on\n");
  4216. local_irq_save(flags);
  4217. local_irq_disable();
  4218. dev->polling = 2;
  4219. e1000_irq_disable(adapter);
  4220. local_irq_restore(flags);
  4221. }
  4222. return 0;
  4223. }
  4224. static int
  4225. e1000_poll_off(struct net_device *dev)
  4226. {
  4227. struct e1000_adapter *adapter = dev->priv;
  4228. if(dev->polling > 0){
  4229. dev->polling = 0;
  4230. e1000_irq_enable(adapter);
  4231. printk("e1000_poll_off\n");
  4232. }
  4233. return 0;
  4234. }
  4235. static int
  4236. e1000_tx_eob(struct net_device *dev)
  4237. {
  4238. struct e1000_adapter *adapter = dev->priv;
  4239. E1000_WRITE_REG(&adapter->hw, TDT, adapter->tx_ring->next_to_use);
  4240. return 0;
  4241. }
  4242. static int
  4243. e1000_tx_start(struct net_device *dev)
  4244. {
  4245. /* printk("e1000_tx_start called\n"); */
  4246. e1000_tx_eob(dev);
  4247. return 0;
  4248. }
  4249. #ifdef DEBUG_PRINT
  4250. /* debugging tools */
  4251. #define PRT_HEX(str,value) printk("skb->%-10s = 0x%08x\n", str, (unsigned int)value);
  4252. #define PRT_DEC(str,value) printk("skb->%-10s = %d\n", str, value);
  4253. void e1000_print_skb(struct sk_buff* skb)
  4254. {
  4255. int i;
  4256. printk("========================\n");
  4257. printk("skb = 0x%08x\n", (unsigned int)skb);
  4258. PRT_HEX("next", skb->next);
  4259. PRT_HEX("prev", skb->prev);
  4260. PRT_DEC("len", skb->len);
  4261. PRT_HEX("data", skb->data);
  4262. PRT_HEX("tail", skb->tail);
  4263. PRT_HEX("dev", skb->dev);
  4264. PRT_DEC("cloned", skb->cloned);
  4265. PRT_DEC("pkt_type", skb->pkt_type);
  4266. PRT_DEC("users", skb->users);
  4267. PRT_DEC("truesize", skb->truesize);
  4268. PRT_HEX("head", skb->head);
  4269. PRT_HEX("end", skb->end);
  4270. PRT_HEX("list", skb->list);
  4271. PRT_DEC("data_len", skb->data_len);
  4272. PRT_HEX("csum", skb->csum);
  4273. PRT_HEX("skb_shinfo", skb_shinfo(skb));
  4274. PRT_HEX("skb_shinfo->frag_list", skb_shinfo(skb)->frag_list);
  4275. PRT_DEC("skb_shinfo->nr_frags", skb_shinfo(skb)->nr_frags);
  4276. PRT_DEC("skb_shinfo->dataref", skb_shinfo(skb)->dataref);
  4277. for (i=0; i<skb_shinfo(skb)->nr_frags && i<8; ++i)
  4278. printk("skb->skb_shinfo->frags[%d] = 0x%08x\n", i, skb_shinfo(skb)->frags[i]);
  4279. }
  4280. void e1000_print_rx_desc(struct e1000_rx_desc *rx_desc)
  4281. {
  4282. printk("rx_desc = 0x%08x\n", rx_desc);
  4283. printk("rx_desc->buffer_addr = 0x%08x\n", rx_desc->buffer_addr);
  4284. printk("rx_desc->length = %d\n", rx_desc->length);
  4285. printk("rx_desc->csum = 0x%04x\n", rx_desc->csum);
  4286. printk("rx_desc->status = 0x%02x\n", rx_desc->status);
  4287. printk("rx_desc->errors = 0x%02x\n", rx_desc->errors);
  4288. printk("rx_desc->special = 0x%04x\n", rx_desc->special);
  4289. }
  4290. void e1000_print_rx_buffer_info(struct e1000_buffer *bi)
  4291. {
  4292. printk("buffer_info = 0x%08x\n", bi);
  4293. printk("buffer_info->skb = 0x%08x\n", bi->skb);
  4294. printk("buffer_info->length = 0x%08x (%d)\n", bi->length, bi->length);
  4295. printk("buffer_info->time_stamp = 0x%08x\n", bi->time_stamp);
  4296. }
  4297. void e1000_print_rx_desc_ring(struct e1000_desc_ring *desc_ring)
  4298. {
  4299. int i;
  4300. struct e1000_buffer *bi;
  4301. struct e1000_rx_desc *desc;
  4302. printk("\n");
  4303. printk("desc_ring = 0x%08x\n", desc_ring);
  4304. printk("desc_ring->desc = 0x%08x\n", desc_ring->desc);
  4305. printk("desc_ring->dma = 0x%08x\n", desc_ring->dma);
  4306. printk("desc_ring->size = 0x%08x (%d)\n", desc_ring->size, desc_ring->size);
  4307. printk("desc_ring->count = 0x%08x (%d)\n", desc_ring->count, desc_ring->count);
  4308. printk("desc_ring->next_to_use = 0x%08x (%d)\n", desc_ring->next_to_use, desc_ring->next_to_use);
  4309. printk("desc_ring->next_to_clean = 0x%08x (%d)\n", desc_ring->next_to_clean, desc_ring->next_to_clean);
  4310. printk("desc_ring->buffer_info = 0x%08x\n", desc_ring->buffer_info);
  4311. printk("\n");
  4312. bi = desc_ring->buffer_info;
  4313. desc = desc_ring->desc;
  4314. for (i=0; i<desc_ring->count; ++i) {
  4315. printk("===================================================== desc/buffer_info # %d\n", i);
  4316. e1000_print_rx_buffer_info(bi++);
  4317. e1000_print_rx_desc(desc++);
  4318. }
  4319. }
  4320. #undef PRT_HEX
  4321. #undef PRT_DEC
  4322. #endif
  4323. /* e1000_main.c */