PageRenderTime 68ms CodeModel.GetById 28ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/e1000-6.x/src/e1000_main.c

https://github.com/bhesmans/click
C | 5042 lines | 3632 code | 711 blank | 699 comment | 558 complexity | d9e8f1bb3eb4ca7692a3fe71ae6225ac MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause

Large files files are truncated, but you can click here to view the full file

  1. /*******************************************************************************
  2. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
  3. This program is free software; you can redistribute it and/or modify it
  4. under the terms of the GNU General Public License as published by the Free
  5. Software Foundation; either version 2 of the License, or (at your option)
  6. any later version.
  7. This program is distributed in the hope that it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc., 59
  13. Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  14. The full GNU General Public License is included in this distribution in the
  15. file called LICENSE.
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. *******************************************************************************/
  20. #include "e1000.h"
  21. /* Change Log
  22. * 6.0.58 4/20/05
  23. * o e1000_set_spd_dplx tests for compatible speed/duplex specification
  24. * for fiber adapters
  25. * 6.0.57 4/19/05
  26. * o Added code to fix register test failure for devices >= 82571
  27. *
  28. * 6.0.52 3/15/05
  29. * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
  30. * calls, one from mii_ioctl and other from within update_stats while
  31. * processing MIIREG ioctl.
  32. *
  33. * 6.1.2 4/13/05
  34. * o Fixed ethtool diagnostics
  35. * o Enabled flow control to take default eeprom settings
  36. * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
  37. * calls, one from mii_ioctl and other from within update_stats while processing
  38. * MIIREG ioctl.
  39. * 6.0.55 3/23/05
  40. * o Support for MODULE_VERSION
  41. * o Fix APM setting for 82544 based adapters
  42. * 6.0.54 3/26/05
  43. * o Added a timer to expire packets that were deferred for cleanup
  44. * 6.0.52 3/15/05
  45. * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
  46. * calls, one from mii_ioctl and other from within update_stats while
  47. * processing MIIREG ioctl.
  48. * 6.0.47 3/2/05
  49. * o Added enhanced functionality to the loopback diags to wrap the
  50. * descriptor rings
  51. * o Added manageability vlan filtering workaround.
  52. *
  53. * 6.0.44+ 2/15/05
  54. * o Added code to handle raw packet based DHCP packets
  55. * o Added code to fix the errata 10 buffer overflow issue
  56. * o Sync up with WR01-05
  57. * o applied Anton's patch to resolve tx hang in hardware
  58. * o e1000 timeouts with early writeback patch
  59. * o Removed Queensport IDs
  60. * o fixed driver panic if MAC receives a bad large packets when packet
  61. * split is enabled
  62. * o Applied Andrew Mortons patch - e1000 stops working after resume
  63. * 5.2.29 12/24/03
  64. * o Bug fix: Endianess issue causing ethtool diags to fail on ppc.
  65. * o Bug fix: Use pdev->irq instead of netdev->irq for MSI support.
  66. * o Report driver message on user override of InterruptThrottleRate module
  67. * parameter.
  68. * o Bug fix: Change I/O address storage from uint32_t to unsigned long.
  69. * o Feature: Added ethtool RINGPARAM support.
  70. * o Feature: Added netpoll support.
  71. * o Bug fix: Race between Tx queue and Tx clean fixed with a spin lock.
  72. * o Bug fix: Allow 1000/Full setting for autoneg param for fiber connections.
  73. * Jon D Mason [jonmason@us.ibm.com].
  74. *
  75. * 5.2.22 10/15/03
  76. * o Bug fix: SERDES devices might be connected to a back-plane switch that
  77. * doesn't support auto-neg, so add the capability to force 1000/Full.
  78. * Also, since forcing 1000/Full, sample RxSynchronize bit to detect link
  79. * state.
  80. * o Bug fix: Flow control settings for hi/lo watermark didn't consider
  81. * changes in the RX FIFO size, which could occur with Jumbo Frames or with
  82. * the reduced FIFO in 82547.
  83. * o Bug fix: Better propagation of error codes.
  84. * [Janice Girouard (janiceg -a-t- us.ibm.com)]
  85. * o Bug fix: hang under heavy Tx stress when running out of Tx descriptors;
  86. * wasn't clearing context descriptor when backing out of send because of
  87. * no-resource condition.
  88. * o Bug fix: check netif_running in dev->poll so we don't have to hang in
  89. * dev->close until all polls are finished. [Rober Olsson
  90. * (robert.olsson@data.slu.se)].
  91. * o Revert TxDescriptor ring size back to 256 since change to 1024 wasn't
  92. * accepted into the kernel.
  93. *
  94. * 5.2.16 8/8/03
  95. */
  96. char e1000_driver_name[] = "e1000";
  97. char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  98. #ifndef CONFIG_E1000_NAPI
  99. #define DRIVERNAPI
  100. #else
  101. #define DRIVERNAPI "-NAPI"
  102. #endif
  103. #define DRV_VERSION "6.1.16.2.DB"DRIVERNAPI
  104. char e1000_driver_version[] = DRV_VERSION;
  105. char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
  106. #if !HAVE___NETIF_RECEIVE_SKB
  107. #define netif_receive_skb(skb) netif_receive_skb((skb), (skb)->protocol, 0)
  108. #endif
  109. /* e1000_pci_tbl - PCI Device ID Table
  110. *
  111. * Last entry must be all 0s
  112. *
  113. * Macro expands to...
  114. * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  115. */
  116. static struct pci_device_id e1000_pci_tbl[] = {
  117. INTEL_E1000_ETHERNET_DEVICE(0x1000),
  118. INTEL_E1000_ETHERNET_DEVICE(0x1001),
  119. INTEL_E1000_ETHERNET_DEVICE(0x1004),
  120. INTEL_E1000_ETHERNET_DEVICE(0x1008),
  121. INTEL_E1000_ETHERNET_DEVICE(0x1009),
  122. INTEL_E1000_ETHERNET_DEVICE(0x100C),
  123. INTEL_E1000_ETHERNET_DEVICE(0x100D),
  124. INTEL_E1000_ETHERNET_DEVICE(0x100E),
  125. INTEL_E1000_ETHERNET_DEVICE(0x100F),
  126. INTEL_E1000_ETHERNET_DEVICE(0x1010),
  127. INTEL_E1000_ETHERNET_DEVICE(0x1011),
  128. INTEL_E1000_ETHERNET_DEVICE(0x1012),
  129. INTEL_E1000_ETHERNET_DEVICE(0x1013),
  130. INTEL_E1000_ETHERNET_DEVICE(0x1014),
  131. INTEL_E1000_ETHERNET_DEVICE(0x1015),
  132. INTEL_E1000_ETHERNET_DEVICE(0x1016),
  133. INTEL_E1000_ETHERNET_DEVICE(0x1017),
  134. INTEL_E1000_ETHERNET_DEVICE(0x1018),
  135. INTEL_E1000_ETHERNET_DEVICE(0x1019),
  136. INTEL_E1000_ETHERNET_DEVICE(0x101A),
  137. INTEL_E1000_ETHERNET_DEVICE(0x101D),
  138. INTEL_E1000_ETHERNET_DEVICE(0x101E),
  139. INTEL_E1000_ETHERNET_DEVICE(0x1026),
  140. INTEL_E1000_ETHERNET_DEVICE(0x1027),
  141. INTEL_E1000_ETHERNET_DEVICE(0x1028),
  142. INTEL_E1000_ETHERNET_DEVICE(0x105E),
  143. INTEL_E1000_ETHERNET_DEVICE(0x105F),
  144. INTEL_E1000_ETHERNET_DEVICE(0x1060),
  145. INTEL_E1000_ETHERNET_DEVICE(0x1075),
  146. INTEL_E1000_ETHERNET_DEVICE(0x1076),
  147. INTEL_E1000_ETHERNET_DEVICE(0x1077),
  148. INTEL_E1000_ETHERNET_DEVICE(0x1078),
  149. INTEL_E1000_ETHERNET_DEVICE(0x1079),
  150. INTEL_E1000_ETHERNET_DEVICE(0x107A),
  151. INTEL_E1000_ETHERNET_DEVICE(0x107B),
  152. INTEL_E1000_ETHERNET_DEVICE(0x107C),
  153. INTEL_E1000_ETHERNET_DEVICE(0x107D),
  154. INTEL_E1000_ETHERNET_DEVICE(0x107E),
  155. INTEL_E1000_ETHERNET_DEVICE(0x107F),
  156. INTEL_E1000_ETHERNET_DEVICE(0x108A),
  157. INTEL_E1000_ETHERNET_DEVICE(0x108B),
  158. INTEL_E1000_ETHERNET_DEVICE(0x108C),
  159. INTEL_E1000_ETHERNET_DEVICE(0x109A),
  160. INTEL_E1000_ETHERNET_DEVICE(0x10A0),
  161. INTEL_E1000_ETHERNET_DEVICE(0x10A1),
  162. /* required last entry */
  163. {0,}
  164. };
  165. MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  166. int e1000_up(struct e1000_adapter *adapter);
  167. void e1000_down(struct e1000_adapter *adapter);
  168. void e1000_reset(struct e1000_adapter *adapter);
  169. int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
  170. int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  171. int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  172. void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
  173. void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
  174. int e1000_setup_tx_resources(struct e1000_adapter *adapter,
  175. struct e1000_tx_ring *txdr);
  176. int e1000_setup_rx_resources(struct e1000_adapter *adapter,
  177. struct e1000_rx_ring *rxdr);
  178. void e1000_free_tx_resources(struct e1000_adapter *adapter,
  179. struct e1000_tx_ring *tx_ring);
  180. void e1000_free_rx_resources(struct e1000_adapter *adapter,
  181. struct e1000_rx_ring *rx_ring);
  182. void e1000_update_stats(struct e1000_adapter *adapter);
  183. static int e1000_init_module(void);
  184. static void e1000_exit_module(void);
  185. static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  186. static void __devexit e1000_remove(struct pci_dev *pdev);
  187. static int e1000_alloc_queues(struct e1000_adapter *adapter);
  188. #ifdef CONFIG_E1000_MQ
  189. static void e1000_setup_queue_mapping(struct e1000_adapter *adapter);
  190. #endif
  191. static int e1000_sw_init(struct e1000_adapter *adapter);
  192. static int e1000_open(struct net_device *netdev);
  193. static int e1000_close(struct net_device *netdev);
  194. static void e1000_configure_tx(struct e1000_adapter *adapter);
  195. static void e1000_configure_rx(struct e1000_adapter *adapter);
  196. static void e1000_setup_rctl(struct e1000_adapter *adapter);
  197. static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
  198. static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
  199. static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
  200. struct e1000_tx_ring *tx_ring);
  201. static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
  202. struct e1000_rx_ring *rx_ring);
  203. static void e1000_set_multi(struct net_device *netdev);
  204. static void e1000_update_phy_info(unsigned long data);
  205. static void e1000_watchdog(unsigned long data);
  206. static void e1000_watchdog_1(struct e1000_adapter *adapter);
  207. static void e1000_82547_tx_fifo_stall(unsigned long data);
  208. static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
  209. static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
  210. static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
  211. static int e1000_set_mac(struct net_device *netdev, void *p);
  212. static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
  213. static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
  214. struct e1000_tx_ring *tx_ring);
  215. #ifdef CONFIG_E1000_NAPI
  216. static int e1000_clean(struct net_device *poll_dev, int *budget);
  217. static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
  218. struct e1000_rx_ring *rx_ring,
  219. int *work_done, int work_to_do);
  220. static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  221. struct e1000_rx_ring *rx_ring,
  222. int *work_done, int work_to_do);
  223. #else
  224. static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
  225. struct e1000_rx_ring *rx_ring);
  226. static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  227. struct e1000_rx_ring *rx_ring);
  228. #endif
  229. static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
  230. struct e1000_rx_ring *rx_ring);
  231. static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
  232. struct e1000_rx_ring *rx_ring);
  233. static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
  234. #ifdef SIOCGMIIPHY
  235. static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
  236. int cmd);
  237. #endif
  238. void set_ethtool_ops(struct net_device *netdev);
  239. extern int ethtool_ioctl(struct ifreq *ifr);
  240. extern int e1000_bypass_ctrl_ioctl(struct net_device *netdev, struct ifreq *ifr);
  241. static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
  242. static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
  243. static void e1000_tx_timeout(struct net_device *dev);
  244. static void e1000_tx_timeout_task(struct net_device *dev);
  245. static void e1000_smartspeed(struct e1000_adapter *adapter);
  246. static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
  247. struct sk_buff *skb);
  248. #ifdef NETIF_F_HW_VLAN_TX
  249. static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
  250. static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
  251. static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
  252. static void e1000_restore_vlan(struct e1000_adapter *adapter);
  253. #endif
  254. static int e1000_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
  255. static int e1000_suspend(struct pci_dev *pdev, uint32_t state);
  256. #ifdef CONFIG_PM
  257. static int e1000_resume(struct pci_dev *pdev);
  258. #endif
  259. /* For Click polling */
  260. static int e1000_tx_pqueue(struct net_device *dev, struct sk_buff *skb);
  261. static int e1000_tx_start(struct net_device *dev);
  262. static int e1000_rx_refill(struct net_device *dev, struct sk_buff **);
  263. static int e1000_tx_eob(struct net_device *dev);
  264. static struct sk_buff *e1000_tx_clean(struct net_device *dev);
  265. static struct sk_buff *e1000_rx_poll(struct net_device *dev, int *want);
  266. static int e1000_poll_on(struct net_device *dev);
  267. static int e1000_poll_off(struct net_device *dev);
  268. #ifdef CONFIG_NET_POLL_CONTROLLER
  269. /* for netdump / net console */
  270. static void e1000_netpoll (struct net_device *netdev);
  271. #endif
  272. #ifdef CONFIG_E1000_MQ
  273. /* for multiple Rx queues */
  274. void e1000_rx_schedule(void *data);
  275. #endif
  276. struct notifier_block e1000_notifier_reboot = {
  277. .notifier_call = e1000_notify_reboot,
  278. .next = NULL,
  279. .priority = 0
  280. };
  281. #undef DEBUG_PRINT
  282. #ifdef DEBUG_PRINT
  283. static void e1000_print_rx_buffer_info(struct e1000_buffer *bi);
  284. static void e1000_print_rx_desc(struct e1000_rx_desc *rx_desc);
  285. static void e1000_print_skb(struct sk_buff* skb);
  286. #endif
  287. /* Exported from other modules */
  288. extern void e1000_check_options(struct e1000_adapter *adapter);
  289. static struct pci_driver e1000_driver = {
  290. .name = e1000_driver_name,
  291. .id_table = e1000_pci_tbl,
  292. .probe = e1000_probe,
  293. .remove = __devexit_p(e1000_remove),
  294. /* Power Managment Hooks */
  295. #ifdef CONFIG_PM
  296. .suspend = e1000_suspend,
  297. .resume = e1000_resume
  298. #endif
  299. };
  300. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  301. MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
  302. MODULE_LICENSE("GPL");
  303. MODULE_VERSION(DRV_VERSION);
  304. static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
  305. module_param(debug, int, 0);
  306. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  307. /**
  308. * e1000_init_module - Driver Registration Routine
  309. *
  310. * e1000_init_module is the first routine called when the driver is
  311. * loaded. All it does is register with the PCI subsystem.
  312. **/
  313. static int __init
  314. e1000_init_module(void)
  315. {
  316. int ret;
  317. printk(KERN_INFO "%s - version %s\n",
  318. e1000_driver_string, e1000_driver_version);
  319. printk(KERN_INFO " w/ Click polling\n");
  320. printk(KERN_INFO "%s\n", e1000_copyright);
  321. ret = pci_module_init(&e1000_driver);
  322. if(ret >= 0) {
  323. register_reboot_notifier(&e1000_notifier_reboot);
  324. }
  325. return ret;
  326. }
  327. module_init(e1000_init_module);
  328. /**
  329. * e1000_exit_module - Driver Exit Cleanup Routine
  330. *
  331. * e1000_exit_module is called just before the driver is removed
  332. * from memory.
  333. **/
  334. static void __exit
  335. e1000_exit_module(void)
  336. {
  337. unregister_reboot_notifier(&e1000_notifier_reboot);
  338. pci_unregister_driver(&e1000_driver);
  339. }
  340. module_exit(e1000_exit_module);
  341. /**
  342. * e1000_irq_disable - Mask off interrupt generation on the NIC
  343. * @adapter: board private structure
  344. **/
  345. static inline void
  346. e1000_irq_disable(struct e1000_adapter *adapter)
  347. {
  348. atomic_inc(&adapter->irq_sem);
  349. E1000_WRITE_REG(&adapter->hw, IMC, ~0);
  350. E1000_WRITE_FLUSH(&adapter->hw);
  351. synchronize_irq(adapter->pdev->irq);
  352. }
  353. /**
  354. * e1000_irq_enable - Enable default interrupt generation settings
  355. * @adapter: board private structure
  356. **/
  357. static inline void
  358. e1000_irq_enable(struct e1000_adapter *adapter)
  359. {
  360. if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
  361. E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
  362. E1000_WRITE_FLUSH(&adapter->hw);
  363. }
  364. }
  365. #ifdef NETIF_F_HW_VLAN_TX
  366. void
  367. e1000_update_mng_vlan(struct e1000_adapter *adapter)
  368. {
  369. struct net_device *netdev = adapter->netdev;
  370. uint16_t vid = adapter->hw.mng_cookie.vlan_id;
  371. uint16_t old_vid = adapter->mng_vlan_id;
  372. if(adapter->vlgrp) {
  373. if(!adapter->vlgrp->vlan_devices[vid]) {
  374. if(adapter->hw.mng_cookie.status &
  375. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
  376. e1000_vlan_rx_add_vid(netdev, vid);
  377. adapter->mng_vlan_id = vid;
  378. } else
  379. adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  380. if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
  381. (vid != old_vid) &&
  382. !adapter->vlgrp->vlan_devices[old_vid])
  383. e1000_vlan_rx_kill_vid(netdev, old_vid);
  384. }
  385. }
  386. }
  387. #endif
  388. int
  389. e1000_up(struct e1000_adapter *adapter)
  390. {
  391. struct net_device *netdev = adapter->netdev;
  392. int i, err;
  393. /* hardware has been reset, we need to reload some things */
  394. /* Reset the PHY if it was previously powered down */
  395. if(adapter->hw.media_type == e1000_media_type_copper) {
  396. uint16_t mii_reg;
  397. e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
  398. if(mii_reg & MII_CR_POWER_DOWN)
  399. e1000_phy_reset(&adapter->hw);
  400. }
  401. e1000_set_multi(netdev);
  402. #ifdef NETIF_F_HW_VLAN_TX
  403. e1000_restore_vlan(adapter);
  404. #endif
  405. e1000_configure_tx(adapter);
  406. e1000_setup_rctl(adapter);
  407. e1000_configure_rx(adapter);
  408. for (i = 0; i < adapter->num_queues; i++)
  409. adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]);
  410. #ifdef CONFIG_PCI_MSI
  411. if(adapter->hw.mac_type > e1000_82547_rev_2) {
  412. adapter->have_msi = TRUE;
  413. if((err = pci_enable_msi(adapter->pdev))) {
  414. DPRINTK(PROBE, ERR,
  415. "Unable to allocate MSI interrupt Error: %d\n", err);
  416. adapter->have_msi = FALSE;
  417. }
  418. }
  419. #endif
  420. if((err = request_irq(adapter->pdev->irq, &e1000_intr,
  421. SA_SHIRQ | SA_SAMPLE_RANDOM,
  422. netdev->name, netdev))) {
  423. DPRINTK(PROBE, ERR,
  424. "Unable to allocate interrupt Error: %d\n", err);
  425. return err;
  426. }
  427. mod_timer(&adapter->watchdog_timer, jiffies);
  428. #ifdef CONFIG_E1000_NAPI
  429. netif_poll_enable(netdev);
  430. #endif
  431. e1000_irq_enable(adapter);
  432. return 0;
  433. }
  434. void
  435. e1000_down(struct e1000_adapter *adapter)
  436. {
  437. struct net_device *netdev = adapter->netdev;
  438. e1000_irq_disable(adapter);
  439. #ifdef CONFIG_E1000_MQ
  440. while (atomic_read(&adapter->rx_sched_call_data.count) != 0);
  441. #endif
  442. free_irq(adapter->pdev->irq, netdev);
  443. #ifdef CONFIG_PCI_MSI
  444. if(adapter->hw.mac_type > e1000_82547_rev_2 &&
  445. adapter->have_msi == TRUE)
  446. pci_disable_msi(adapter->pdev);
  447. #endif
  448. del_timer_sync(&adapter->tx_fifo_stall_timer);
  449. del_timer_sync(&adapter->watchdog_timer);
  450. del_timer_sync(&adapter->phy_info_timer);
  451. #ifdef CONFIG_E1000_NAPI
  452. netif_poll_disable(netdev);
  453. #endif
  454. adapter->link_speed = 0;
  455. adapter->link_duplex = 0;
  456. netif_carrier_off(netdev);
  457. netif_stop_queue(netdev);
  458. e1000_reset(adapter);
  459. e1000_clean_all_tx_rings(adapter);
  460. e1000_clean_all_rx_rings(adapter);
  461. /* If WoL is not enabled and management mode is not IAMT
  462. * Power down the PHY so no link is implied when interface is down */
  463. if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
  464. adapter->hw.media_type == e1000_media_type_copper &&
  465. !e1000_check_mng_mode(&adapter->hw) &&
  466. !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) {
  467. uint16_t mii_reg;
  468. e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
  469. mii_reg |= MII_CR_POWER_DOWN;
  470. e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
  471. mdelay(1);
  472. }
  473. }
  474. void
  475. e1000_reset(struct e1000_adapter *adapter)
  476. {
  477. struct net_device *netdev = adapter->netdev;
  478. uint32_t pba, manc;
  479. uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
  480. uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
  481. /* Repartition Pba for greater than 9k mtu
  482. * To take effect CTRL.RST is required.
  483. */
  484. switch (adapter->hw.mac_type) {
  485. case e1000_82547:
  486. case e1000_82547_rev_2:
  487. pba = E1000_PBA_30K;
  488. break;
  489. case e1000_82571:
  490. case e1000_82572:
  491. pba = E1000_PBA_38K;
  492. break;
  493. case e1000_82573:
  494. pba = E1000_PBA_12K;
  495. break;
  496. default:
  497. pba = E1000_PBA_48K;
  498. break;
  499. }
  500. if((adapter->hw.mac_type != e1000_82573) &&
  501. (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) {
  502. pba -= 8; /* allocate more FIFO for Tx */
  503. /* send an XOFF when there is enough space in the
  504. * Rx FIFO to hold one extra full size Rx packet
  505. */
  506. fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE +
  507. ETHERNET_FCS_SIZE + 1;
  508. fc_low_water_mark = fc_high_water_mark + 8;
  509. }
  510. if(adapter->hw.mac_type == e1000_82547) {
  511. adapter->tx_fifo_head = 0;
  512. adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
  513. adapter->tx_fifo_size =
  514. (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
  515. atomic_set(&adapter->tx_fifo_stall, 0);
  516. }
  517. E1000_WRITE_REG(&adapter->hw, PBA, pba);
  518. /* flow control settings */
  519. adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
  520. fc_high_water_mark;
  521. adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
  522. fc_low_water_mark;
  523. adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
  524. adapter->hw.fc_send_xon = 1;
  525. adapter->hw.fc = adapter->hw.original_fc;
  526. /* Allow time for pending master requests to run */
  527. e1000_reset_hw(&adapter->hw);
  528. if(adapter->hw.mac_type >= e1000_82544)
  529. E1000_WRITE_REG(&adapter->hw, WUC, 0);
  530. if(e1000_init_hw(&adapter->hw))
  531. DPRINTK(PROBE, ERR, "Hardware Error\n");
  532. #ifdef NETIF_F_HW_VLAN_TX
  533. e1000_update_mng_vlan(adapter);
  534. #endif
  535. /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
  536. E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
  537. e1000_reset_adaptive(&adapter->hw);
  538. e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
  539. if(adapter->en_mng_pt) {
  540. manc = E1000_READ_REG(&adapter->hw, MANC);
  541. manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
  542. E1000_WRITE_REG(&adapter->hw, MANC, manc);
  543. }
  544. }
  545. /**
  546. * e1000_probe - Device Initialization Routine
  547. * @pdev: PCI device information struct
  548. * @ent: entry in e1000_pci_tbl
  549. *
  550. * Returns 0 on success, negative on failure
  551. *
  552. * e1000_probe initializes an adapter identified by a pci_dev structure.
  553. * The OS initialization, configuring of the adapter private structure,
  554. * and a hardware reset occur.
  555. **/
  556. #define SHOW_INTERFACE(d) printk("Interface mac_type=%d\n", d->hw.mac_type)
  557. static int __devinit
  558. e1000_probe(struct pci_dev *pdev,
  559. const struct pci_device_id *ent)
  560. {
  561. struct net_device *netdev;
  562. struct e1000_adapter *adapter;
  563. unsigned long mmio_start, mmio_len;
  564. uint32_t ctrl_ext;
  565. uint32_t swsm;
  566. static int cards_found = 0;
  567. int i, err, pci_using_dac;
  568. uint16_t eeprom_data;
  569. uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
  570. if((err = pci_enable_device(pdev)))
  571. return err;
  572. if(!(err = pci_set_dma_mask(pdev, PCI_DMA_64BIT))) {
  573. pci_using_dac = 1;
  574. } else {
  575. if((err = pci_set_dma_mask(pdev, PCI_DMA_32BIT))) {
  576. E1000_ERR("No usable DMA configuration, aborting\n");
  577. return err;
  578. }
  579. pci_using_dac = 0;
  580. }
  581. if((err = pci_request_regions(pdev, e1000_driver_name)))
  582. return err;
  583. pci_set_master(pdev);
  584. netdev = alloc_etherdev(sizeof(struct e1000_adapter));
  585. if(!netdev) {
  586. err = -ENOMEM;
  587. goto err_alloc_etherdev;
  588. }
  589. SET_MODULE_OWNER(netdev);
  590. SET_NETDEV_DEV(netdev, &pdev->dev);
  591. pci_set_drvdata(pdev, netdev);
  592. adapter = netdev_priv(netdev);
  593. adapter->netdev = netdev;
  594. adapter->pdev = pdev;
  595. adapter->hw.back = adapter;
  596. adapter->msg_enable = (1 << debug) - 1;
  597. mmio_start = pci_resource_start(pdev, BAR_0);
  598. mmio_len = pci_resource_len(pdev, BAR_0);
  599. SHOW_INTERFACE(adapter);
  600. adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
  601. if(!adapter->hw.hw_addr) {
  602. err = -EIO;
  603. goto err_ioremap;
  604. }
  605. for(i = BAR_1; i <= BAR_5; i++) {
  606. if(pci_resource_len(pdev, i) == 0)
  607. continue;
  608. if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
  609. adapter->hw.io_base = pci_resource_start(pdev, i);
  610. break;
  611. }
  612. }
  613. netdev->open = &e1000_open;
  614. netdev->stop = &e1000_close;
  615. netdev->hard_start_xmit = &e1000_xmit_frame;
  616. netdev->get_stats = &e1000_get_stats;
  617. netdev->set_multicast_list = &e1000_set_multi;
  618. netdev->set_mac_address = &e1000_set_mac;
  619. netdev->change_mtu = &e1000_change_mtu;
  620. netdev->do_ioctl = &e1000_ioctl;
  621. set_ethtool_ops(netdev);
  622. #ifdef HAVE_TX_TIMEOUT
  623. netdev->tx_timeout = &e1000_tx_timeout;
  624. netdev->watchdog_timeo = 5 * HZ;
  625. #endif
  626. #ifdef CONFIG_E1000_NAPI
  627. netdev->poll = &e1000_clean;
  628. netdev->weight = 64;
  629. #endif
  630. #ifdef NETIF_F_HW_VLAN_TX
  631. netdev->vlan_rx_register = e1000_vlan_rx_register;
  632. netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
  633. netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
  634. #endif
  635. /* Click - polling extensions */
  636. netdev->polling = 0;
  637. netdev->rx_poll = e1000_rx_poll;
  638. netdev->rx_refill = e1000_rx_refill;
  639. netdev->tx_queue = e1000_tx_pqueue;
  640. netdev->tx_eob = e1000_tx_eob;
  641. netdev->tx_start = e1000_tx_start;
  642. netdev->tx_clean = e1000_tx_clean;
  643. netdev->poll_off = e1000_poll_off;
  644. netdev->poll_on = e1000_poll_on;
  645. #ifdef CONFIG_NET_POLL_CONTROLLER
  646. netdev->poll_controller = e1000_netpoll;
  647. #endif
  648. strcpy(netdev->name, pci_name(pdev));
  649. netdev->mem_start = mmio_start;
  650. netdev->mem_end = mmio_start + mmio_len;
  651. netdev->base_addr = adapter->hw.io_base;
  652. adapter->bd_number = cards_found;
  653. /* setup the private structure */
  654. if((err = e1000_sw_init(adapter)))
  655. goto err_sw_init;
  656. if((err = e1000_check_phy_reset_block(&adapter->hw)))
  657. DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
  658. #ifdef MAX_SKB_FRAGS
  659. if(adapter->hw.mac_type >= e1000_82543) {
  660. #ifdef NETIF_F_HW_VLAN_TX
  661. netdev->features = NETIF_F_SG |
  662. NETIF_F_HW_CSUM |
  663. NETIF_F_HW_VLAN_TX |
  664. NETIF_F_HW_VLAN_RX |
  665. NETIF_F_HW_VLAN_FILTER;
  666. #else
  667. netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM;
  668. #endif
  669. }
  670. #ifdef NETIF_F_TSO
  671. if((adapter->hw.mac_type >= e1000_82544) &&
  672. (adapter->hw.mac_type != e1000_82547))
  673. netdev->features |= NETIF_F_TSO;
  674. #ifdef NETIF_F_TSO_IPV6
  675. if(adapter->hw.mac_type > e1000_82547_rev_2)
  676. netdev->features |= NETIF_F_TSO_IPV6;
  677. #endif
  678. #endif
  679. if(pci_using_dac)
  680. netdev->features |= NETIF_F_HIGHDMA;
  681. #endif
  682. #ifdef NETIF_F_LLTX
  683. netdev->features |= NETIF_F_LLTX;
  684. #endif
  685. adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
  686. /* before reading the EEPROM, reset the controller to
  687. * put the device in a known good starting state */
  688. e1000_reset_hw(&adapter->hw);
  689. /* make sure the EEPROM is good */
  690. if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
  691. DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
  692. err = -EIO;
  693. goto err_eeprom;
  694. }
  695. /* copy the MAC address out of the EEPROM */
  696. if(e1000_read_mac_addr(&adapter->hw))
  697. DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
  698. memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
  699. SHOW_INTERFACE(adapter);
  700. if(!is_valid_ether_addr(netdev->dev_addr)) {
  701. DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
  702. err = -EIO;
  703. goto err_eeprom;
  704. }
  705. e1000_read_part_num(&adapter->hw, &(adapter->part_num));
  706. e1000_get_bus_info(&adapter->hw);
  707. init_timer(&adapter->tx_fifo_stall_timer);
  708. adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
  709. adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
  710. init_timer(&adapter->watchdog_timer);
  711. adapter->watchdog_timer.function = &e1000_watchdog;
  712. adapter->watchdog_timer.data = (unsigned long) adapter;
  713. init_timer(&adapter->phy_info_timer);
  714. adapter->phy_info_timer.function = &e1000_update_phy_info;
  715. adapter->phy_info_timer.data = (unsigned long) adapter;
  716. INIT_WORK(&adapter->tx_timeout_task,
  717. (void (*)(void *))e1000_tx_timeout_task, netdev);
  718. /* we're going to reset, so assume we have no link for now */
  719. netif_carrier_off(netdev);
  720. netif_stop_queue(netdev);
  721. e1000_check_options(adapter);
  722. /* Initial Wake on LAN setting
  723. * If APM wake is enabled in the EEPROM,
  724. * enable the ACPI Magic Packet filter
  725. */
  726. switch(adapter->hw.mac_type) {
  727. case e1000_82542_rev2_0:
  728. case e1000_82542_rev2_1:
  729. case e1000_82543:
  730. break;
  731. case e1000_82544:
  732. e1000_read_eeprom(&adapter->hw,
  733. EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
  734. eeprom_apme_mask = E1000_EEPROM_82544_APM;
  735. break;
  736. case e1000_82546:
  737. case e1000_82546_rev_3:
  738. if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
  739. && (adapter->hw.media_type == e1000_media_type_copper)) {
  740. e1000_read_eeprom(&adapter->hw,
  741. EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
  742. break;
  743. }
  744. /* Fall Through */
  745. default:
  746. e1000_read_eeprom(&adapter->hw,
  747. EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
  748. break;
  749. }
  750. if(eeprom_data & eeprom_apme_mask)
  751. adapter->wol |= E1000_WUFC_MAG;
  752. /* reset the hardware with the new settings */
  753. e1000_reset(adapter);
  754. /* Let firmware know the driver has taken over */
  755. switch(adapter->hw.mac_type) {
  756. case e1000_82571:
  757. case e1000_82572:
  758. ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  759. E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  760. ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  761. break;
  762. case e1000_82573:
  763. swsm = E1000_READ_REG(&adapter->hw, SWSM);
  764. E1000_WRITE_REG(&adapter->hw, SWSM,
  765. swsm | E1000_SWSM_DRV_LOAD);
  766. break;
  767. default:
  768. break;
  769. }
  770. strcpy(netdev->name, "eth%d");
  771. if((err = register_netdev(netdev)))
  772. goto err_register;
  773. DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
  774. cards_found++;
  775. return 0;
  776. err_register:
  777. err_sw_init:
  778. err_eeprom:
  779. iounmap(adapter->hw.hw_addr);
  780. err_ioremap:
  781. free_netdev(netdev);
  782. err_alloc_etherdev:
  783. pci_release_regions(pdev);
  784. return err;
  785. }
  786. /**
  787. * e1000_remove - Device Removal Routine
  788. * @pdev: PCI device information struct
  789. *
  790. * e1000_remove is called by the PCI subsystem to alert the driver
  791. * that it should release a PCI device. The could be caused by a
  792. * Hot-Plug event, or because the driver is going to be removed from
  793. * memory.
  794. **/
  795. static void __devexit
  796. e1000_remove(struct pci_dev *pdev)
  797. {
  798. struct net_device *netdev = pci_get_drvdata(pdev);
  799. struct e1000_adapter *adapter = netdev_priv(netdev);
  800. uint32_t ctrl_ext;
  801. uint32_t manc, swsm;
  802. #ifdef CONFIG_E1000_NAPI
  803. int i;
  804. #endif
  805. if(adapter->hw.mac_type >= e1000_82540 &&
  806. adapter->hw.media_type == e1000_media_type_copper) {
  807. manc = E1000_READ_REG(&adapter->hw, MANC);
  808. if(manc & E1000_MANC_SMBUS_EN) {
  809. manc |= E1000_MANC_ARP_EN;
  810. E1000_WRITE_REG(&adapter->hw, MANC, manc);
  811. }
  812. }
  813. switch(adapter->hw.mac_type) {
  814. case e1000_82571:
  815. case e1000_82572:
  816. ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  817. E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  818. ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  819. break;
  820. case e1000_82573:
  821. swsm = E1000_READ_REG(&adapter->hw, SWSM);
  822. E1000_WRITE_REG(&adapter->hw, SWSM,
  823. swsm & ~E1000_SWSM_DRV_LOAD);
  824. break;
  825. default:
  826. break;
  827. }
  828. unregister_netdev(netdev);
  829. #ifdef CONFIG_E1000_NAPI
  830. for (i = 0; i < adapter->num_queues; i++)
  831. __dev_put(&adapter->polling_netdev[i]);
  832. #endif
  833. if(!e1000_check_phy_reset_block(&adapter->hw))
  834. e1000_phy_hw_reset(&adapter->hw);
  835. kfree(adapter->tx_ring);
  836. kfree(adapter->rx_ring);
  837. #ifdef CONFIG_E1000_NAPI
  838. kfree(adapter->polling_netdev);
  839. #endif
  840. iounmap(adapter->hw.hw_addr);
  841. pci_release_regions(pdev);
  842. #ifdef CONFIG_E1000_MQ
  843. free_percpu(adapter->cpu_netdev);
  844. free_percpu(adapter->cpu_tx_ring);
  845. #endif
  846. free_netdev(netdev);
  847. }
  848. /**
  849. * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
  850. * @adapter: board private structure to initialize
  851. *
  852. * e1000_sw_init initializes the Adapter private data structure.
  853. * Fields are initialized based on PCI device information and
  854. * OS network device settings (MTU size).
  855. **/
  856. static int __devinit
  857. e1000_sw_init(struct e1000_adapter *adapter)
  858. {
  859. struct e1000_hw *hw = &adapter->hw;
  860. struct net_device *netdev = adapter->netdev;
  861. struct pci_dev *pdev = adapter->pdev;
  862. #ifdef CONFIG_E1000_NAPI
  863. int i;
  864. #endif
  865. /* PCI config space info */
  866. hw->vendor_id = pdev->vendor;
  867. hw->device_id = pdev->device;
  868. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  869. hw->subsystem_id = pdev->subsystem_device;
  870. pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
  871. pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
  872. adapter->rx_buffer_len = E1000_RXBUFFER_2048;
  873. adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
  874. hw->max_frame_size = netdev->mtu +
  875. ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
  876. hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
  877. /* identify the MAC */
  878. if(hw->device_id == 0x10A0 || hw->device_id == 0x10A1) {
  879. DPRINTK(PROBE, INFO, "Setting MAC Type for Dewey Jones Beach Device\n");
  880. hw->mac_type = e1000_82571;
  881. }
  882. else if(e1000_set_mac_type(hw)) {
  883. DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
  884. return -EIO;
  885. }
  886. /* initialize eeprom parameters */
  887. if(e1000_init_eeprom_params(hw)) {
  888. E1000_ERR("EEPROM initialization failed\n");
  889. return -EIO;
  890. }
  891. switch(hw->mac_type) {
  892. default:
  893. break;
  894. case e1000_82541:
  895. case e1000_82547:
  896. case e1000_82541_rev_2:
  897. case e1000_82547_rev_2:
  898. hw->phy_init_script = 1;
  899. break;
  900. }
  901. e1000_set_media_type(hw);
  902. hw->wait_autoneg_complete = FALSE;
  903. hw->tbi_compatibility_en = TRUE;
  904. hw->adaptive_ifs = TRUE;
  905. /* Copper options */
  906. if(hw->media_type == e1000_media_type_copper) {
  907. hw->mdix = AUTO_ALL_MODES;
  908. hw->disable_polarity_correction = FALSE;
  909. hw->master_slave = E1000_MASTER_SLAVE;
  910. }
  911. #ifdef CONFIG_E1000_MQ
  912. /* Number of supported queues */
  913. switch (hw->mac_type) {
  914. case e1000_82571:
  915. case e1000_82572:
  916. adapter->num_queues = 2;
  917. break;
  918. default:
  919. adapter->num_queues = 1;
  920. break;
  921. }
  922. adapter->num_queues = min(adapter->num_queues, num_online_cpus());
  923. #else
  924. adapter->num_queues = 1;
  925. #endif
  926. if (e1000_alloc_queues(adapter)) {
  927. DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
  928. return -ENOMEM;
  929. }
  930. #ifdef CONFIG_E1000_NAPI
  931. for (i = 0; i < adapter->num_queues; i++) {
  932. adapter->polling_netdev[i].priv = adapter;
  933. adapter->polling_netdev[i].poll = &e1000_clean;
  934. adapter->polling_netdev[i].weight = 64;
  935. dev_hold(&adapter->polling_netdev[i]);
  936. set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
  937. }
  938. #endif
  939. #ifdef CONFIG_E1000_MQ
  940. e1000_setup_queue_mapping(adapter);
  941. #endif
  942. atomic_set(&adapter->irq_sem, 1);
  943. spin_lock_init(&adapter->stats_lock);
  944. return 0;
  945. }
  946. /**
  947. * e1000_alloc_queues - Allocate memory for all rings
  948. * @adapter: board private structure to initialize
  949. *
  950. * We allocate one ring per queue at run-time since we don't know the
  951. * number of queues at compile-time. The polling_netdev array is
  952. * intended for Multiqueue, but should work fine with a single queue.
  953. **/
  954. static int __devinit
  955. e1000_alloc_queues(struct e1000_adapter *adapter)
  956. {
  957. int size;
  958. size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
  959. adapter->tx_ring = kmalloc(size, GFP_KERNEL);
  960. if (!adapter->tx_ring)
  961. return -ENOMEM;
  962. memset(adapter->tx_ring, 0, size);
  963. size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
  964. adapter->rx_ring = kmalloc(size, GFP_KERNEL);
  965. if (!adapter->rx_ring) {
  966. kfree(adapter->tx_ring);
  967. return -ENOMEM;
  968. }
  969. memset(adapter->rx_ring, 0, size);
  970. #ifdef CONFIG_E1000_NAPI
  971. size = sizeof(struct net_device) * adapter->num_queues;
  972. adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
  973. if (!adapter->polling_netdev) {
  974. kfree(adapter->tx_ring);
  975. kfree(adapter->rx_ring);
  976. return -ENOMEM;
  977. }
  978. memset(adapter->polling_netdev, 0, size);
  979. #endif
  980. return E1000_SUCCESS;
  981. }
  982. #ifdef CONFIG_E1000_MQ
  983. static void __devinit
  984. e1000_setup_queue_mapping(struct e1000_adapter *adapter)
  985. {
  986. int i, cpu;
  987. adapter->rx_sched_call_data.func = e1000_rx_schedule;
  988. adapter->rx_sched_call_data.info = adapter->netdev;
  989. cpus_clear(adapter->rx_sched_call_data.cpumask);
  990. adapter->cpu_netdev = alloc_percpu(struct net_device *);
  991. adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
  992. lock_cpu_hotplug();
  993. i = 0;
  994. for_each_online_cpu(cpu) {
  995. *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues];
  996. /* This is incomplete because we'd like to assign separate
  997. * physical cpus to these netdev polling structures and
  998. * avoid saturating a subset of cpus.
  999. */
  1000. if (i < adapter->num_queues) {
  1001. *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
  1002. adapter->cpu_for_queue[i] = cpu;
  1003. } else
  1004. *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
  1005. i++;
  1006. }
  1007. unlock_cpu_hotplug();
  1008. }
  1009. #endif
  1010. /**
  1011. * e1000_open - Called when a network interface is made active
  1012. * @netdev: network interface device structure
  1013. *
  1014. * Returns 0 on success, negative value on failure
  1015. *
  1016. * The open entry point is called when a network interface is made
  1017. * active by the system (IFF_UP). At this point all resources needed
  1018. * for transmit and receive operations are allocated, the interrupt
  1019. * handler is registered with the OS, the watchdog timer is started,
  1020. * and the stack is notified that the interface is ready.
  1021. **/
  1022. static int
  1023. e1000_open(struct net_device *netdev)
  1024. {
  1025. struct e1000_adapter *adapter = netdev_priv(netdev);
  1026. int err;
  1027. /* allocate transmit descriptors */
  1028. if ((err = e1000_setup_all_tx_resources(adapter)))
  1029. goto err_setup_tx;
  1030. /* allocate receive descriptors */
  1031. if ((err = e1000_setup_all_rx_resources(adapter)))
  1032. goto err_setup_rx;
  1033. if ((err = e1000_up(adapter)))
  1034. goto err_up;
  1035. #ifdef NETIF_F_HW_VLAN_TX
  1036. adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  1037. if ((adapter->hw.mng_cookie.status &
  1038. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
  1039. e1000_update_mng_vlan(adapter);
  1040. }
  1041. #endif
  1042. return E1000_SUCCESS;
  1043. err_up:
  1044. e1000_free_all_rx_resources(adapter);
  1045. err_setup_rx:
  1046. e1000_free_all_tx_resources(adapter);
  1047. err_setup_tx:
  1048. e1000_reset(adapter);
  1049. return err;
  1050. }
  1051. /**
  1052. * e1000_close - Disables a network interface
  1053. * @netdev: network interface device structure
  1054. *
  1055. * Returns 0, this is not allowed to fail
  1056. *
  1057. * The close entry point is called when an interface is de-activated
  1058. * by the OS. The hardware is still under the drivers control, but
  1059. * needs to be disabled. A global MAC reset is issued to stop the
  1060. * hardware, and all transmit and receive resources are freed.
  1061. **/
  1062. static int
  1063. e1000_close(struct net_device *netdev)
  1064. {
  1065. struct e1000_adapter *adapter = netdev_priv(netdev);
  1066. e1000_down(adapter);
  1067. e1000_free_all_tx_resources(adapter);
  1068. e1000_free_all_rx_resources(adapter);
  1069. #ifdef NETIF_F_HW_VLAN_TX
  1070. if((adapter->hw.mng_cookie.status &
  1071. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
  1072. e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  1073. }
  1074. #endif
  1075. return 0;
  1076. }
  1077. /**
  1078. * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
  1079. * @adapter: address of board private structure
  1080. * @start: address of beginning of memory
  1081. * @len: length of memory
  1082. **/
  1083. static inline boolean_t
  1084. e1000_check_64k_bound(struct e1000_adapter *adapter,
  1085. void *start, unsigned long len)
  1086. {
  1087. unsigned long begin = (unsigned long) start;
  1088. unsigned long end = begin + len;
  1089. /* First rev 82545 and 82546 need to not allow any memory
  1090. * write location to cross 64k boundary due to errata 23 */
  1091. if(adapter->hw.mac_type == e1000_82545 ||
  1092. adapter->hw.mac_type == e1000_82546) {
  1093. return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
  1094. }
  1095. return TRUE;
  1096. }
  1097. /**
  1098. * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
  1099. * @adapter: board private structure
  1100. * @txdr: tx descriptor ring (for a specific queue) to setup
  1101. *
  1102. * Return 0 on success, negative on failure
  1103. **/
  1104. int
  1105. e1000_setup_tx_resources(struct e1000_adapter *adapter,
  1106. struct e1000_tx_ring *txdr)
  1107. {
  1108. struct pci_dev *pdev = adapter->pdev;
  1109. int size;
  1110. size = sizeof(struct e1000_buffer) * txdr->count;
  1111. txdr->buffer_info = vmalloc(size);
  1112. if (!txdr->buffer_info) {
  1113. DPRINTK(PROBE, ERR,
  1114. "Unable to allocate memory for the transmit descriptor ring\n");
  1115. return -ENOMEM;
  1116. }
  1117. memset(txdr->buffer_info, 0, size);
  1118. memset(&txdr->previous_buffer_info, 0, sizeof(struct e1000_buffer));
  1119. /* round up to nearest 4K */
  1120. txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
  1121. E1000_ROUNDUP(txdr->size, 4096);
  1122. txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  1123. if (!txdr->desc) {
  1124. setup_tx_desc_die:
  1125. vfree(txdr->buffer_info);
  1126. DPRINTK(PROBE, ERR,
  1127. "Unable to allocate memory for the transmit descriptor ring\n");
  1128. return -ENOMEM;
  1129. }
  1130. /* Fix for errata 23, can't cross 64kB boundary */
  1131. if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
  1132. void *olddesc = txdr->desc;
  1133. dma_addr_t olddma = txdr->dma;
  1134. DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
  1135. "at %p\n", txdr->size, txdr->desc);
  1136. /* Try again, without freeing the previous */
  1137. txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  1138. /* Failed allocation, critical failure */
  1139. if (!txdr->desc) {
  1140. pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  1141. goto setup_tx_desc_die;
  1142. }
  1143. if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
  1144. /* give up */
  1145. pci_free_consistent(pdev, txdr->size, txdr->desc,
  1146. txdr->dma);
  1147. pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  1148. DPRINTK(PROBE, ERR,
  1149. "Unable to allocate aligned memory "
  1150. "for the transmit descriptor ring\n");
  1151. vfree(txdr->buffer_info);
  1152. return -ENOMEM;
  1153. } else {
  1154. /* Free old allocation, new allocation was successful */
  1155. pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  1156. }
  1157. }
  1158. memset(txdr->desc, 0, txdr->size);
  1159. txdr->next_to_use = 0;
  1160. txdr->next_to_clean = 0;
  1161. spin_lock_init(&txdr->tx_lock);
  1162. return 0;
  1163. }
  1164. /**
  1165. * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
  1166. * (Descriptors) for all queues
  1167. * @adapter: board private structure
  1168. *
  1169. * If this function returns with an error, then it's possible one or
  1170. * more of the rings is populated (while the rest are not). It is the
  1171. * callers duty to clean those orphaned rings.
  1172. *
  1173. * Return 0 on success, negative on failure
  1174. **/
  1175. int
  1176. e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
  1177. {
  1178. int i, err = 0;
  1179. for (i = 0; i < adapter->num_queues; i++) {
  1180. err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
  1181. if (err) {
  1182. DPRINTK(PROBE, ERR,
  1183. "Allocation for Tx Queue %u failed\n", i);
  1184. break;
  1185. }
  1186. }
  1187. return err;
  1188. }
  1189. /**
  1190. * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
  1191. * @adapter: board private structure
  1192. *
  1193. * Configure the Tx unit of the MAC after a reset.
  1194. **/
  1195. static void
  1196. e1000_configure_tx(struct e1000_adapter *adapter)
  1197. {
  1198. uint64_t tdba;
  1199. struct e1000_hw *hw = &adapter->hw;
  1200. uint32_t tdlen, tctl, tipg, tarc;
  1201. /* Setup the HW Tx Head and Tail descriptor pointers */
  1202. switch (adapter->num_queues) {
  1203. case 2:
  1204. tdba = adapter->tx_ring[1].dma;
  1205. tdlen = adapter->tx_ring[1].count *
  1206. sizeof(struct e1000_tx_desc);
  1207. E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
  1208. E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32));
  1209. E1000_WRITE_REG(hw, TDLEN1, tdlen);
  1210. E1000_WRITE_REG(hw, TDH1, 0);
  1211. E1000_WRITE_REG(hw, TDT1, 0);
  1212. adapter->tx_ring[1].tdh = E1000_TDH1;
  1213. adapter->tx_ring[1].tdt = E1000_TDT1;
  1214. /* Fall Through */
  1215. case 1:
  1216. default:
  1217. tdba = adapter->tx_ring[0].dma;
  1218. tdlen = adapter->tx_ring[0].count *
  1219. sizeof(struct e1000_tx_desc);
  1220. E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
  1221. E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
  1222. E1000_WRITE_REG(hw, TDLEN, tdlen);
  1223. E1000_WRITE_REG(hw, TDH, 0);
  1224. E1000_WRITE_REG(hw, TDT, 0);
  1225. adapter->tx_ring[0].tdh = E1000_TDH;
  1226. adapter->tx_ring[0].tdt = E1000_TDT;
  1227. break;
  1228. }
  1229. /* Set the default values for the Tx Inter Packet Gap timer */
  1230. switch (hw->mac_type) {
  1231. case e1000_82542_rev2_0:
  1232. case e1000_82542_rev2_1:
  1233. tipg = DEFAULT_82542_TIPG_IPGT;
  1234. tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
  1235. tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
  1236. break;
  1237. default:
  1238. if (hw->media_type == e1000_media_type_fiber ||
  1239. hw->media_type == e1000_media_type_internal_serdes)
  1240. tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
  1241. else
  1242. tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
  1243. tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
  1244. tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
  1245. }
  1246. E1000_WRITE_REG(hw, TIPG, tipg);
  1247. /* Set the Tx Interrupt Delay register */
  1248. E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
  1249. if (hw->mac_type >= e1000_82540)
  1250. E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
  1251. /* Program the Transmit Control Register */
  1252. tctl = E1000_READ_REG(hw, TCTL);
  1253. tctl &= ~E1000_TCTL_CT;
  1254. tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC |
  1255. (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
  1256. E1000_WRITE_REG(hw, TCTL, tctl);
  1257. if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
  1258. tarc = E1000_READ_REG(hw, TARC0);
  1259. /* disabled bit 21 to fix network problems when forced to 100/10 Mbps */
  1260. tarc |= (1 << 25);
  1261. E1000_WRITE_REG(hw, TARC0, tarc);
  1262. tarc = E1000_READ_REG(hw, TARC1);
  1263. tarc |= (1 << 25);
  1264. if (tctl & E1000_TCTL_MULR)
  1265. tarc &= ~(1 << 28);
  1266. else
  1267. tarc |= (1 << 28);
  1268. E1000_WRITE_REG(hw, TARC1, tarc);
  1269. }
  1270. e1000_config_collision_dist(hw);
  1271. /* Setup Transmit Descriptor Settings for eop descriptor */
  1272. adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
  1273. E1000_TXD_CMD_IFCS;
  1274. if (hw->mac_type < e1000_82543)
  1275. adapter->txd_cmd |= E1000_TXD_CMD_RPS;
  1276. else
  1277. adapter->txd_cmd |= E1000_TXD_CMD_RS;
  1278. /* Cache if we're 82544 running in PCI-X because we'll
  1279. * need this to apply a workaround later in the send path. */
  1280. if (hw->mac_type == e1000_82544 &&
  1281. hw->bus_type == e1000_bus_type_pcix)
  1282. adapter->pcix_82544 = 1;
  1283. }
  1284. /**
  1285. * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
  1286. * @adapter: board private structure
  1287. * @rxdr: rx descriptor ring (for a specific queue) to setup
  1288. *
  1289. * Returns 0 on success, negative on failure
  1290. **/
  1291. int
  1292. e1000_setup_rx_resources(struct e1000_adapter *adapter,
  1293. struct e1000_rx_ring *rxdr)
  1294. {
  1295. struct pci_dev *pdev = adapter->pdev;
  1296. int size, desc_len;
  1297. size = sizeof(struct e1000_buffer) * rxdr->count;
  1298. rxdr->buffer_info = vmalloc(size);
  1299. if (!rxdr->buffer_info) {
  1300. DPRINTK(PROBE, ERR,
  1301. "Unable to allocate memory for the receive descriptor ring\n");
  1302. return -ENOMEM;
  1303. }
  1304. memset(rxdr->buffer_info, 0, size);
  1305. size = sizeof(struct e1000_ps_page) * rxdr->count;
  1306. rxdr->ps_page = kmalloc(size, GFP_KERNEL);
  1307. if (!rxdr->ps_page) {
  1308. vfree(rxdr->buffer_info);
  1309. DPRINTK(PROBE, ERR,
  1310. "Unable to allocate memory for the receive descriptor ring\n");
  1311. return -ENOMEM;
  1312. }
  1313. memset(rxdr->ps_page, 0, size);
  1314. size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
  1315. rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
  1316. if (!rxdr->ps_page_dma) {
  1317. vfree(rxdr->buffer_info);
  1318. kfree(rxdr->ps_page);
  1319. DPRINTK(PROBE, ERR,
  1320. "Unable to allocate memory for the receive descriptor ring\n");
  1321. return -ENOMEM;
  1322. }
  1323. memset(rxdr->ps_page_dma, 0, size);
  1324. if (adapter->hw.mac_type <= e1000_82547_rev_2)
  1325. desc_len = sizeof(struct e1000_rx_desc);
  1326. else
  1327. desc_len = sizeof(union e1000_rx_desc_packet_split);
  1328. /* Round up to nearest 4K */
  1329. rxdr->size = rxdr->count * desc_len;
  1330. E1000_ROUNDUP(rxdr->size, 4096);
  1331. rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  1332. if (!rxdr->desc) {
  1333. DPRINTK(PROBE, ERR,
  1334. "Unable to allocate memory for the receive descriptor ring\n");
  1335. setup_rx_desc_die:
  1336. vfree(rxdr->buffer_info);
  1337. kfree(rxdr->ps_page);
  1338. kfree(rxdr->ps_page_dma);
  1339. return -ENOMEM;
  1340. }
  1341. /* Fix for errata 23, can't cross 64kB boundary */
  1342. if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
  1343. void *olddesc = rxdr->desc;
  1344. dma_addr_t olddma = rxdr->dma;
  1345. DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
  1346. "at %p\n", rxdr->size, rxdr->desc);
  1347. /* Try again, without freeing the previous */
  1348. rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  1349. /* Failed allocation, critical failure */
  1350. if (!rxdr->desc) {
  1351. pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  1352. DPRINTK(PROBE, ERR,
  1353. "Unable to allocate memory "
  1354. "for the receive descriptor ring\n");
  1355. goto setup_rx_desc_die;
  1356. }
  1357. if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
  1358. /* give up */
  1359. pci_free_consistent(pdev, rxdr->size, rxdr->desc,
  1360. rxdr->dma);
  1361. pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  1362. DPRINTK(PROBE, ERR,
  1363. "Unable to allocate aligned memory "
  1364. "for the receive descriptor ring\n");
  1365. goto setup_rx_desc_die;
  1366. } else {
  1367. /* Free old allocation, new allocation was successful */
  1368. pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  1369. }
  1370. }
  1371. memset(rxdr->desc, 0, rxdr->size);
  1372. rxdr->next_to_clean = 0;
  1373. rxdr->next_to_use = 0;
  1374. return 0;
  1375. }
  1376. /**
  1377. * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
  1378. * (Descriptors) for all queues
  1379. * @adapter: board private structure
  1380. *
  1381. * If this function returns with an error, then it's possible one or
  1382. * more of the rings is populated (while the rest are not). It is the
  1383. * callers duty to clean those orphaned rings.
  1384. *
  1385. * Return 0 on success, negative on failure
  1386. **/
  1387. int
  1388. e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
  1389. {
  1390. int i, err = 0;
  1391. for (i = 0; i < adapter->num_queues; i++) {
  1392. err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
  1393. if (err) {
  1394. DPRINTK(PROBE, ERR,
  1395. "Allocation for Rx Queue %u failed\n", i);
  1396. break;
  1397. }
  1398. }
  1399. return err;
  1400. }
  1401. /**
  1402. * e1000_setup_rctl - configure the receive control registers
  1403. * @adapter: Board private structure
  1404. **/
  1405. #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
  1406. (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
  1407. static void
  1408. e1000_setup_rctl(struct e1000_adapter *adapter)
  1409. {
  1410. uint32_t rctl, rfctl;
  1411. uint32_t psrctl = 0;
  1412. #ifdef CONFIG_E1000_PACKET_SPLIT
  1413. uint32_t pages = 0;
  1414. #endif
  1415. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  1416. rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
  1417. rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
  1418. E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
  1419. (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
  1420. if(adapter->hw.tbi_compatibility_on == 1)
  1421. rctl |= E1000_RCTL_SBP;
  1422. else
  1423. rctl &= ~E1000_RCTL_SBP;
  1424. if(adapter->netdev->mtu <= ETH_DATA_LEN)
  1425. rctl &= ~E1000_RCTL_LPE;
  1426. else
  1427. rctl |= E1000_RCTL_LPE;
  1428. /* Setup buffer sizes */
  1429. if(adapter->hw.mac_type >= e1000_82571) {
  1430. /* We can now specify buffers in 1K increments.
  1431. * BSIZE and BSEX are ignored in this case. */
  1432. rctl |= adapter->rx_buffer_len << 0x11;
  1433. } else {
  1434. rctl &= ~E1000_RCTL_SZ_4096;
  1435. rctl |= E1000_RCTL_BSEX;
  1436. switch (adapter->rx_buffer_len) {
  1437. case E1000_RXBUFFER_2048:
  1438. default:
  1439. rctl |= E1000_RCTL_SZ_2048;
  1440. rctl &= ~E1000_RCTL_BSEX;
  1441. break;
  1442. case E1000_RXBUFFER_4096:
  1443. rctl |= E1000_RCTL_SZ_4096;
  1444. break;
  1445. case E1000

Large files files are truncated, but you can click here to view the full file