/drivers/net/ethernet/intel/igb/igb_main.c

http://github.com/mirrors/linux · C · 9603 lines · 6359 code · 1551 blank · 1693 comment · 1075 complexity · 087bb451361a649de260261d03bce9ee MD5 · raw file

Large files are truncated click here to view the full file

  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2007 - 2018 Intel Corporation. */
  3. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  4. #include <linux/module.h>
  5. #include <linux/types.h>
  6. #include <linux/init.h>
  7. #include <linux/bitops.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/ipv6.h>
  12. #include <linux/slab.h>
  13. #include <net/checksum.h>
  14. #include <net/ip6_checksum.h>
  15. #include <net/pkt_sched.h>
  16. #include <net/pkt_cls.h>
  17. #include <linux/net_tstamp.h>
  18. #include <linux/mii.h>
  19. #include <linux/ethtool.h>
  20. #include <linux/if.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/pci.h>
  23. #include <linux/delay.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/ip.h>
  26. #include <linux/tcp.h>
  27. #include <linux/sctp.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/aer.h>
  30. #include <linux/prefetch.h>
  31. #include <linux/pm_runtime.h>
  32. #include <linux/etherdevice.h>
  33. #ifdef CONFIG_IGB_DCA
  34. #include <linux/dca.h>
  35. #endif
  36. #include <linux/i2c.h>
  37. #include "igb.h"
  38. #define MAJ 5
  39. #define MIN 6
  40. #define BUILD 0
  41. #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
  42. __stringify(BUILD) "-k"
  43. enum queue_mode {
  44. QUEUE_MODE_STRICT_PRIORITY,
  45. QUEUE_MODE_STREAM_RESERVATION,
  46. };
  47. enum tx_queue_prio {
  48. TX_QUEUE_PRIO_HIGH,
  49. TX_QUEUE_PRIO_LOW,
  50. };
  51. char igb_driver_name[] = "igb";
  52. char igb_driver_version[] = DRV_VERSION;
  53. static const char igb_driver_string[] =
  54. "Intel(R) Gigabit Ethernet Network Driver";
  55. static const char igb_copyright[] =
  56. "Copyright (c) 2007-2014 Intel Corporation.";
  57. static const struct e1000_info *igb_info_tbl[] = {
  58. [board_82575] = &e1000_82575_info,
  59. };
  60. static const struct pci_device_id igb_pci_tbl[] = {
  61. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
  62. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
  63. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
  64. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
  65. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
  66. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
  67. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
  68. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
  69. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
  70. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
  71. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
  72. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
  73. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
  74. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
  75. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
  76. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
  77. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
  78. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
  79. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
  80. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
  81. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
  82. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
  83. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
  84. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
  85. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
  86. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
  87. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
  88. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
  89. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
  90. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
  91. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
  92. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
  93. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
  94. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
  95. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
  96. /* required last entry */
  97. {0, }
  98. };
  99. MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
  100. static int igb_setup_all_tx_resources(struct igb_adapter *);
  101. static int igb_setup_all_rx_resources(struct igb_adapter *);
  102. static void igb_free_all_tx_resources(struct igb_adapter *);
  103. static void igb_free_all_rx_resources(struct igb_adapter *);
  104. static void igb_setup_mrqc(struct igb_adapter *);
  105. static int igb_probe(struct pci_dev *, const struct pci_device_id *);
  106. static void igb_remove(struct pci_dev *pdev);
  107. static int igb_sw_init(struct igb_adapter *);
  108. int igb_open(struct net_device *);
  109. int igb_close(struct net_device *);
  110. static void igb_configure(struct igb_adapter *);
  111. static void igb_configure_tx(struct igb_adapter *);
  112. static void igb_configure_rx(struct igb_adapter *);
  113. static void igb_clean_all_tx_rings(struct igb_adapter *);
  114. static void igb_clean_all_rx_rings(struct igb_adapter *);
  115. static void igb_clean_tx_ring(struct igb_ring *);
  116. static void igb_clean_rx_ring(struct igb_ring *);
  117. static void igb_set_rx_mode(struct net_device *);
  118. static void igb_update_phy_info(struct timer_list *);
  119. static void igb_watchdog(struct timer_list *);
  120. static void igb_watchdog_task(struct work_struct *);
  121. static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
  122. static void igb_get_stats64(struct net_device *dev,
  123. struct rtnl_link_stats64 *stats);
  124. static int igb_change_mtu(struct net_device *, int);
  125. static int igb_set_mac(struct net_device *, void *);
  126. static void igb_set_uta(struct igb_adapter *adapter, bool set);
  127. static irqreturn_t igb_intr(int irq, void *);
  128. static irqreturn_t igb_intr_msi(int irq, void *);
  129. static irqreturn_t igb_msix_other(int irq, void *);
  130. static irqreturn_t igb_msix_ring(int irq, void *);
  131. #ifdef CONFIG_IGB_DCA
  132. static void igb_update_dca(struct igb_q_vector *);
  133. static void igb_setup_dca(struct igb_adapter *);
  134. #endif /* CONFIG_IGB_DCA */
  135. static int igb_poll(struct napi_struct *, int);
  136. static bool igb_clean_tx_irq(struct igb_q_vector *, int);
  137. static int igb_clean_rx_irq(struct igb_q_vector *, int);
  138. static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
  139. static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
  140. static void igb_reset_task(struct work_struct *);
  141. static void igb_vlan_mode(struct net_device *netdev,
  142. netdev_features_t features);
  143. static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
  144. static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
  145. static void igb_restore_vlan(struct igb_adapter *);
  146. static void igb_rar_set_index(struct igb_adapter *, u32);
  147. static void igb_ping_all_vfs(struct igb_adapter *);
  148. static void igb_msg_task(struct igb_adapter *);
  149. static void igb_vmm_control(struct igb_adapter *);
  150. static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
  151. static void igb_flush_mac_table(struct igb_adapter *);
  152. static int igb_available_rars(struct igb_adapter *, u8);
  153. static void igb_set_default_mac_filter(struct igb_adapter *);
  154. static int igb_uc_sync(struct net_device *, const unsigned char *);
  155. static int igb_uc_unsync(struct net_device *, const unsigned char *);
  156. static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
  157. static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
  158. static int igb_ndo_set_vf_vlan(struct net_device *netdev,
  159. int vf, u16 vlan, u8 qos, __be16 vlan_proto);
  160. static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
  161. static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
  162. bool setting);
  163. static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
  164. bool setting);
  165. static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
  166. struct ifla_vf_info *ivi);
  167. static void igb_check_vf_rate_limit(struct igb_adapter *);
  168. static void igb_nfc_filter_exit(struct igb_adapter *adapter);
  169. static void igb_nfc_filter_restore(struct igb_adapter *adapter);
  170. #ifdef CONFIG_PCI_IOV
  171. static int igb_vf_configure(struct igb_adapter *adapter, int vf);
  172. static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
  173. static int igb_disable_sriov(struct pci_dev *dev);
  174. static int igb_pci_disable_sriov(struct pci_dev *dev);
  175. #endif
  176. static int igb_suspend(struct device *);
  177. static int igb_resume(struct device *);
  178. static int igb_runtime_suspend(struct device *dev);
  179. static int igb_runtime_resume(struct device *dev);
  180. static int igb_runtime_idle(struct device *dev);
  181. static const struct dev_pm_ops igb_pm_ops = {
  182. SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
  183. SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
  184. igb_runtime_idle)
  185. };
  186. static void igb_shutdown(struct pci_dev *);
  187. static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
  188. #ifdef CONFIG_IGB_DCA
  189. static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
  190. static struct notifier_block dca_notifier = {
  191. .notifier_call = igb_notify_dca,
  192. .next = NULL,
  193. .priority = 0
  194. };
  195. #endif
  196. #ifdef CONFIG_PCI_IOV
  197. static unsigned int max_vfs;
  198. module_param(max_vfs, uint, 0);
  199. MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
  200. #endif /* CONFIG_PCI_IOV */
  201. static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
  202. pci_channel_state_t);
  203. static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
  204. static void igb_io_resume(struct pci_dev *);
  205. static const struct pci_error_handlers igb_err_handler = {
  206. .error_detected = igb_io_error_detected,
  207. .slot_reset = igb_io_slot_reset,
  208. .resume = igb_io_resume,
  209. };
  210. static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
  211. static struct pci_driver igb_driver = {
  212. .name = igb_driver_name,
  213. .id_table = igb_pci_tbl,
  214. .probe = igb_probe,
  215. .remove = igb_remove,
  216. #ifdef CONFIG_PM
  217. .driver.pm = &igb_pm_ops,
  218. #endif
  219. .shutdown = igb_shutdown,
  220. .sriov_configure = igb_pci_sriov_configure,
  221. .err_handler = &igb_err_handler
  222. };
  223. MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
  224. MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
  225. MODULE_LICENSE("GPL v2");
  226. MODULE_VERSION(DRV_VERSION);
  227. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  228. static int debug = -1;
  229. module_param(debug, int, 0);
  230. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  231. struct igb_reg_info {
  232. u32 ofs;
  233. char *name;
  234. };
  235. static const struct igb_reg_info igb_reg_info_tbl[] = {
  236. /* General Registers */
  237. {E1000_CTRL, "CTRL"},
  238. {E1000_STATUS, "STATUS"},
  239. {E1000_CTRL_EXT, "CTRL_EXT"},
  240. /* Interrupt Registers */
  241. {E1000_ICR, "ICR"},
  242. /* RX Registers */
  243. {E1000_RCTL, "RCTL"},
  244. {E1000_RDLEN(0), "RDLEN"},
  245. {E1000_RDH(0), "RDH"},
  246. {E1000_RDT(0), "RDT"},
  247. {E1000_RXDCTL(0), "RXDCTL"},
  248. {E1000_RDBAL(0), "RDBAL"},
  249. {E1000_RDBAH(0), "RDBAH"},
  250. /* TX Registers */
  251. {E1000_TCTL, "TCTL"},
  252. {E1000_TDBAL(0), "TDBAL"},
  253. {E1000_TDBAH(0), "TDBAH"},
  254. {E1000_TDLEN(0), "TDLEN"},
  255. {E1000_TDH(0), "TDH"},
  256. {E1000_TDT(0), "TDT"},
  257. {E1000_TXDCTL(0), "TXDCTL"},
  258. {E1000_TDFH, "TDFH"},
  259. {E1000_TDFT, "TDFT"},
  260. {E1000_TDFHS, "TDFHS"},
  261. {E1000_TDFPC, "TDFPC"},
  262. /* List Terminator */
  263. {}
  264. };
  265. /* igb_regdump - register printout routine */
  266. static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
  267. {
  268. int n = 0;
  269. char rname[16];
  270. u32 regs[8];
  271. switch (reginfo->ofs) {
  272. case E1000_RDLEN(0):
  273. for (n = 0; n < 4; n++)
  274. regs[n] = rd32(E1000_RDLEN(n));
  275. break;
  276. case E1000_RDH(0):
  277. for (n = 0; n < 4; n++)
  278. regs[n] = rd32(E1000_RDH(n));
  279. break;
  280. case E1000_RDT(0):
  281. for (n = 0; n < 4; n++)
  282. regs[n] = rd32(E1000_RDT(n));
  283. break;
  284. case E1000_RXDCTL(0):
  285. for (n = 0; n < 4; n++)
  286. regs[n] = rd32(E1000_RXDCTL(n));
  287. break;
  288. case E1000_RDBAL(0):
  289. for (n = 0; n < 4; n++)
  290. regs[n] = rd32(E1000_RDBAL(n));
  291. break;
  292. case E1000_RDBAH(0):
  293. for (n = 0; n < 4; n++)
  294. regs[n] = rd32(E1000_RDBAH(n));
  295. break;
  296. case E1000_TDBAL(0):
  297. for (n = 0; n < 4; n++)
  298. regs[n] = rd32(E1000_RDBAL(n));
  299. break;
  300. case E1000_TDBAH(0):
  301. for (n = 0; n < 4; n++)
  302. regs[n] = rd32(E1000_TDBAH(n));
  303. break;
  304. case E1000_TDLEN(0):
  305. for (n = 0; n < 4; n++)
  306. regs[n] = rd32(E1000_TDLEN(n));
  307. break;
  308. case E1000_TDH(0):
  309. for (n = 0; n < 4; n++)
  310. regs[n] = rd32(E1000_TDH(n));
  311. break;
  312. case E1000_TDT(0):
  313. for (n = 0; n < 4; n++)
  314. regs[n] = rd32(E1000_TDT(n));
  315. break;
  316. case E1000_TXDCTL(0):
  317. for (n = 0; n < 4; n++)
  318. regs[n] = rd32(E1000_TXDCTL(n));
  319. break;
  320. default:
  321. pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
  322. return;
  323. }
  324. snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
  325. pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
  326. regs[2], regs[3]);
  327. }
  328. /* igb_dump - Print registers, Tx-rings and Rx-rings */
  329. static void igb_dump(struct igb_adapter *adapter)
  330. {
  331. struct net_device *netdev = adapter->netdev;
  332. struct e1000_hw *hw = &adapter->hw;
  333. struct igb_reg_info *reginfo;
  334. struct igb_ring *tx_ring;
  335. union e1000_adv_tx_desc *tx_desc;
  336. struct my_u0 { u64 a; u64 b; } *u0;
  337. struct igb_ring *rx_ring;
  338. union e1000_adv_rx_desc *rx_desc;
  339. u32 staterr;
  340. u16 i, n;
  341. if (!netif_msg_hw(adapter))
  342. return;
  343. /* Print netdevice Info */
  344. if (netdev) {
  345. dev_info(&adapter->pdev->dev, "Net device Info\n");
  346. pr_info("Device Name state trans_start\n");
  347. pr_info("%-15s %016lX %016lX\n", netdev->name,
  348. netdev->state, dev_trans_start(netdev));
  349. }
  350. /* Print Registers */
  351. dev_info(&adapter->pdev->dev, "Register Dump\n");
  352. pr_info(" Register Name Value\n");
  353. for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
  354. reginfo->name; reginfo++) {
  355. igb_regdump(hw, reginfo);
  356. }
  357. /* Print TX Ring Summary */
  358. if (!netdev || !netif_running(netdev))
  359. goto exit;
  360. dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
  361. pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
  362. for (n = 0; n < adapter->num_tx_queues; n++) {
  363. struct igb_tx_buffer *buffer_info;
  364. tx_ring = adapter->tx_ring[n];
  365. buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
  366. pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
  367. n, tx_ring->next_to_use, tx_ring->next_to_clean,
  368. (u64)dma_unmap_addr(buffer_info, dma),
  369. dma_unmap_len(buffer_info, len),
  370. buffer_info->next_to_watch,
  371. (u64)buffer_info->time_stamp);
  372. }
  373. /* Print TX Rings */
  374. if (!netif_msg_tx_done(adapter))
  375. goto rx_ring_summary;
  376. dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
  377. /* Transmit Descriptor Formats
  378. *
  379. * Advanced Transmit Descriptor
  380. * +--------------------------------------------------------------+
  381. * 0 | Buffer Address [63:0] |
  382. * +--------------------------------------------------------------+
  383. * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
  384. * +--------------------------------------------------------------+
  385. * 63 46 45 40 39 38 36 35 32 31 24 15 0
  386. */
  387. for (n = 0; n < adapter->num_tx_queues; n++) {
  388. tx_ring = adapter->tx_ring[n];
  389. pr_info("------------------------------------\n");
  390. pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
  391. pr_info("------------------------------------\n");
  392. pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
  393. for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
  394. const char *next_desc;
  395. struct igb_tx_buffer *buffer_info;
  396. tx_desc = IGB_TX_DESC(tx_ring, i);
  397. buffer_info = &tx_ring->tx_buffer_info[i];
  398. u0 = (struct my_u0 *)tx_desc;
  399. if (i == tx_ring->next_to_use &&
  400. i == tx_ring->next_to_clean)
  401. next_desc = " NTC/U";
  402. else if (i == tx_ring->next_to_use)
  403. next_desc = " NTU";
  404. else if (i == tx_ring->next_to_clean)
  405. next_desc = " NTC";
  406. else
  407. next_desc = "";
  408. pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
  409. i, le64_to_cpu(u0->a),
  410. le64_to_cpu(u0->b),
  411. (u64)dma_unmap_addr(buffer_info, dma),
  412. dma_unmap_len(buffer_info, len),
  413. buffer_info->next_to_watch,
  414. (u64)buffer_info->time_stamp,
  415. buffer_info->skb, next_desc);
  416. if (netif_msg_pktdata(adapter) && buffer_info->skb)
  417. print_hex_dump(KERN_INFO, "",
  418. DUMP_PREFIX_ADDRESS,
  419. 16, 1, buffer_info->skb->data,
  420. dma_unmap_len(buffer_info, len),
  421. true);
  422. }
  423. }
  424. /* Print RX Rings Summary */
  425. rx_ring_summary:
  426. dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
  427. pr_info("Queue [NTU] [NTC]\n");
  428. for (n = 0; n < adapter->num_rx_queues; n++) {
  429. rx_ring = adapter->rx_ring[n];
  430. pr_info(" %5d %5X %5X\n",
  431. n, rx_ring->next_to_use, rx_ring->next_to_clean);
  432. }
  433. /* Print RX Rings */
  434. if (!netif_msg_rx_status(adapter))
  435. goto exit;
  436. dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
  437. /* Advanced Receive Descriptor (Read) Format
  438. * 63 1 0
  439. * +-----------------------------------------------------+
  440. * 0 | Packet Buffer Address [63:1] |A0/NSE|
  441. * +----------------------------------------------+------+
  442. * 8 | Header Buffer Address [63:1] | DD |
  443. * +-----------------------------------------------------+
  444. *
  445. *
  446. * Advanced Receive Descriptor (Write-Back) Format
  447. *
  448. * 63 48 47 32 31 30 21 20 17 16 4 3 0
  449. * +------------------------------------------------------+
  450. * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
  451. * | Checksum Ident | | | | Type | Type |
  452. * +------------------------------------------------------+
  453. * 8 | VLAN Tag | Length | Extended Error | Extended Status |
  454. * +------------------------------------------------------+
  455. * 63 48 47 32 31 20 19 0
  456. */
  457. for (n = 0; n < adapter->num_rx_queues; n++) {
  458. rx_ring = adapter->rx_ring[n];
  459. pr_info("------------------------------------\n");
  460. pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
  461. pr_info("------------------------------------\n");
  462. pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
  463. pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
  464. for (i = 0; i < rx_ring->count; i++) {
  465. const char *next_desc;
  466. struct igb_rx_buffer *buffer_info;
  467. buffer_info = &rx_ring->rx_buffer_info[i];
  468. rx_desc = IGB_RX_DESC(rx_ring, i);
  469. u0 = (struct my_u0 *)rx_desc;
  470. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  471. if (i == rx_ring->next_to_use)
  472. next_desc = " NTU";
  473. else if (i == rx_ring->next_to_clean)
  474. next_desc = " NTC";
  475. else
  476. next_desc = "";
  477. if (staterr & E1000_RXD_STAT_DD) {
  478. /* Descriptor Done */
  479. pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
  480. "RWB", i,
  481. le64_to_cpu(u0->a),
  482. le64_to_cpu(u0->b),
  483. next_desc);
  484. } else {
  485. pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
  486. "R ", i,
  487. le64_to_cpu(u0->a),
  488. le64_to_cpu(u0->b),
  489. (u64)buffer_info->dma,
  490. next_desc);
  491. if (netif_msg_pktdata(adapter) &&
  492. buffer_info->dma && buffer_info->page) {
  493. print_hex_dump(KERN_INFO, "",
  494. DUMP_PREFIX_ADDRESS,
  495. 16, 1,
  496. page_address(buffer_info->page) +
  497. buffer_info->page_offset,
  498. igb_rx_bufsz(rx_ring), true);
  499. }
  500. }
  501. }
  502. }
  503. exit:
  504. return;
  505. }
  506. /**
  507. * igb_get_i2c_data - Reads the I2C SDA data bit
  508. * @hw: pointer to hardware structure
  509. * @i2cctl: Current value of I2CCTL register
  510. *
  511. * Returns the I2C data bit value
  512. **/
  513. static int igb_get_i2c_data(void *data)
  514. {
  515. struct igb_adapter *adapter = (struct igb_adapter *)data;
  516. struct e1000_hw *hw = &adapter->hw;
  517. s32 i2cctl = rd32(E1000_I2CPARAMS);
  518. return !!(i2cctl & E1000_I2C_DATA_IN);
  519. }
  520. /**
  521. * igb_set_i2c_data - Sets the I2C data bit
  522. * @data: pointer to hardware structure
  523. * @state: I2C data value (0 or 1) to set
  524. *
  525. * Sets the I2C data bit
  526. **/
  527. static void igb_set_i2c_data(void *data, int state)
  528. {
  529. struct igb_adapter *adapter = (struct igb_adapter *)data;
  530. struct e1000_hw *hw = &adapter->hw;
  531. s32 i2cctl = rd32(E1000_I2CPARAMS);
  532. if (state)
  533. i2cctl |= E1000_I2C_DATA_OUT;
  534. else
  535. i2cctl &= ~E1000_I2C_DATA_OUT;
  536. i2cctl &= ~E1000_I2C_DATA_OE_N;
  537. i2cctl |= E1000_I2C_CLK_OE_N;
  538. wr32(E1000_I2CPARAMS, i2cctl);
  539. wrfl();
  540. }
  541. /**
  542. * igb_set_i2c_clk - Sets the I2C SCL clock
  543. * @data: pointer to hardware structure
  544. * @state: state to set clock
  545. *
  546. * Sets the I2C clock line to state
  547. **/
  548. static void igb_set_i2c_clk(void *data, int state)
  549. {
  550. struct igb_adapter *adapter = (struct igb_adapter *)data;
  551. struct e1000_hw *hw = &adapter->hw;
  552. s32 i2cctl = rd32(E1000_I2CPARAMS);
  553. if (state) {
  554. i2cctl |= E1000_I2C_CLK_OUT;
  555. i2cctl &= ~E1000_I2C_CLK_OE_N;
  556. } else {
  557. i2cctl &= ~E1000_I2C_CLK_OUT;
  558. i2cctl &= ~E1000_I2C_CLK_OE_N;
  559. }
  560. wr32(E1000_I2CPARAMS, i2cctl);
  561. wrfl();
  562. }
  563. /**
  564. * igb_get_i2c_clk - Gets the I2C SCL clock state
  565. * @data: pointer to hardware structure
  566. *
  567. * Gets the I2C clock state
  568. **/
  569. static int igb_get_i2c_clk(void *data)
  570. {
  571. struct igb_adapter *adapter = (struct igb_adapter *)data;
  572. struct e1000_hw *hw = &adapter->hw;
  573. s32 i2cctl = rd32(E1000_I2CPARAMS);
  574. return !!(i2cctl & E1000_I2C_CLK_IN);
  575. }
  576. static const struct i2c_algo_bit_data igb_i2c_algo = {
  577. .setsda = igb_set_i2c_data,
  578. .setscl = igb_set_i2c_clk,
  579. .getsda = igb_get_i2c_data,
  580. .getscl = igb_get_i2c_clk,
  581. .udelay = 5,
  582. .timeout = 20,
  583. };
  584. /**
  585. * igb_get_hw_dev - return device
  586. * @hw: pointer to hardware structure
  587. *
  588. * used by hardware layer to print debugging information
  589. **/
  590. struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
  591. {
  592. struct igb_adapter *adapter = hw->back;
  593. return adapter->netdev;
  594. }
  595. /**
  596. * igb_init_module - Driver Registration Routine
  597. *
  598. * igb_init_module is the first routine called when the driver is
  599. * loaded. All it does is register with the PCI subsystem.
  600. **/
  601. static int __init igb_init_module(void)
  602. {
  603. int ret;
  604. pr_info("%s - version %s\n",
  605. igb_driver_string, igb_driver_version);
  606. pr_info("%s\n", igb_copyright);
  607. #ifdef CONFIG_IGB_DCA
  608. dca_register_notify(&dca_notifier);
  609. #endif
  610. ret = pci_register_driver(&igb_driver);
  611. return ret;
  612. }
  613. module_init(igb_init_module);
  614. /**
  615. * igb_exit_module - Driver Exit Cleanup Routine
  616. *
  617. * igb_exit_module is called just before the driver is removed
  618. * from memory.
  619. **/
  620. static void __exit igb_exit_module(void)
  621. {
  622. #ifdef CONFIG_IGB_DCA
  623. dca_unregister_notify(&dca_notifier);
  624. #endif
  625. pci_unregister_driver(&igb_driver);
  626. }
  627. module_exit(igb_exit_module);
  628. #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
  629. /**
  630. * igb_cache_ring_register - Descriptor ring to register mapping
  631. * @adapter: board private structure to initialize
  632. *
  633. * Once we know the feature-set enabled for the device, we'll cache
  634. * the register offset the descriptor ring is assigned to.
  635. **/
  636. static void igb_cache_ring_register(struct igb_adapter *adapter)
  637. {
  638. int i = 0, j = 0;
  639. u32 rbase_offset = adapter->vfs_allocated_count;
  640. switch (adapter->hw.mac.type) {
  641. case e1000_82576:
  642. /* The queues are allocated for virtualization such that VF 0
  643. * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
  644. * In order to avoid collision we start at the first free queue
  645. * and continue consuming queues in the same sequence
  646. */
  647. if (adapter->vfs_allocated_count) {
  648. for (; i < adapter->rss_queues; i++)
  649. adapter->rx_ring[i]->reg_idx = rbase_offset +
  650. Q_IDX_82576(i);
  651. }
  652. /* Fall through */
  653. case e1000_82575:
  654. case e1000_82580:
  655. case e1000_i350:
  656. case e1000_i354:
  657. case e1000_i210:
  658. case e1000_i211:
  659. /* Fall through */
  660. default:
  661. for (; i < adapter->num_rx_queues; i++)
  662. adapter->rx_ring[i]->reg_idx = rbase_offset + i;
  663. for (; j < adapter->num_tx_queues; j++)
  664. adapter->tx_ring[j]->reg_idx = rbase_offset + j;
  665. break;
  666. }
  667. }
  668. u32 igb_rd32(struct e1000_hw *hw, u32 reg)
  669. {
  670. struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
  671. u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
  672. u32 value = 0;
  673. if (E1000_REMOVED(hw_addr))
  674. return ~value;
  675. value = readl(&hw_addr[reg]);
  676. /* reads should not return all F's */
  677. if (!(~value) && (!reg || !(~readl(hw_addr)))) {
  678. struct net_device *netdev = igb->netdev;
  679. hw->hw_addr = NULL;
  680. netdev_err(netdev, "PCIe link lost\n");
  681. WARN(pci_device_is_present(igb->pdev),
  682. "igb: Failed to read reg 0x%x!\n", reg);
  683. }
  684. return value;
  685. }
  686. /**
  687. * igb_write_ivar - configure ivar for given MSI-X vector
  688. * @hw: pointer to the HW structure
  689. * @msix_vector: vector number we are allocating to a given ring
  690. * @index: row index of IVAR register to write within IVAR table
  691. * @offset: column offset of in IVAR, should be multiple of 8
  692. *
  693. * This function is intended to handle the writing of the IVAR register
  694. * for adapters 82576 and newer. The IVAR table consists of 2 columns,
  695. * each containing an cause allocation for an Rx and Tx ring, and a
  696. * variable number of rows depending on the number of queues supported.
  697. **/
  698. static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
  699. int index, int offset)
  700. {
  701. u32 ivar = array_rd32(E1000_IVAR0, index);
  702. /* clear any bits that are currently set */
  703. ivar &= ~((u32)0xFF << offset);
  704. /* write vector and valid bit */
  705. ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
  706. array_wr32(E1000_IVAR0, index, ivar);
  707. }
  708. #define IGB_N0_QUEUE -1
  709. static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
  710. {
  711. struct igb_adapter *adapter = q_vector->adapter;
  712. struct e1000_hw *hw = &adapter->hw;
  713. int rx_queue = IGB_N0_QUEUE;
  714. int tx_queue = IGB_N0_QUEUE;
  715. u32 msixbm = 0;
  716. if (q_vector->rx.ring)
  717. rx_queue = q_vector->rx.ring->reg_idx;
  718. if (q_vector->tx.ring)
  719. tx_queue = q_vector->tx.ring->reg_idx;
  720. switch (hw->mac.type) {
  721. case e1000_82575:
  722. /* The 82575 assigns vectors using a bitmask, which matches the
  723. * bitmask for the EICR/EIMS/EIMC registers. To assign one
  724. * or more queues to a vector, we write the appropriate bits
  725. * into the MSIXBM register for that vector.
  726. */
  727. if (rx_queue > IGB_N0_QUEUE)
  728. msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
  729. if (tx_queue > IGB_N0_QUEUE)
  730. msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
  731. if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
  732. msixbm |= E1000_EIMS_OTHER;
  733. array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
  734. q_vector->eims_value = msixbm;
  735. break;
  736. case e1000_82576:
  737. /* 82576 uses a table that essentially consists of 2 columns
  738. * with 8 rows. The ordering is column-major so we use the
  739. * lower 3 bits as the row index, and the 4th bit as the
  740. * column offset.
  741. */
  742. if (rx_queue > IGB_N0_QUEUE)
  743. igb_write_ivar(hw, msix_vector,
  744. rx_queue & 0x7,
  745. (rx_queue & 0x8) << 1);
  746. if (tx_queue > IGB_N0_QUEUE)
  747. igb_write_ivar(hw, msix_vector,
  748. tx_queue & 0x7,
  749. ((tx_queue & 0x8) << 1) + 8);
  750. q_vector->eims_value = BIT(msix_vector);
  751. break;
  752. case e1000_82580:
  753. case e1000_i350:
  754. case e1000_i354:
  755. case e1000_i210:
  756. case e1000_i211:
  757. /* On 82580 and newer adapters the scheme is similar to 82576
  758. * however instead of ordering column-major we have things
  759. * ordered row-major. So we traverse the table by using
  760. * bit 0 as the column offset, and the remaining bits as the
  761. * row index.
  762. */
  763. if (rx_queue > IGB_N0_QUEUE)
  764. igb_write_ivar(hw, msix_vector,
  765. rx_queue >> 1,
  766. (rx_queue & 0x1) << 4);
  767. if (tx_queue > IGB_N0_QUEUE)
  768. igb_write_ivar(hw, msix_vector,
  769. tx_queue >> 1,
  770. ((tx_queue & 0x1) << 4) + 8);
  771. q_vector->eims_value = BIT(msix_vector);
  772. break;
  773. default:
  774. BUG();
  775. break;
  776. }
  777. /* add q_vector eims value to global eims_enable_mask */
  778. adapter->eims_enable_mask |= q_vector->eims_value;
  779. /* configure q_vector to set itr on first interrupt */
  780. q_vector->set_itr = 1;
  781. }
  782. /**
  783. * igb_configure_msix - Configure MSI-X hardware
  784. * @adapter: board private structure to initialize
  785. *
  786. * igb_configure_msix sets up the hardware to properly
  787. * generate MSI-X interrupts.
  788. **/
  789. static void igb_configure_msix(struct igb_adapter *adapter)
  790. {
  791. u32 tmp;
  792. int i, vector = 0;
  793. struct e1000_hw *hw = &adapter->hw;
  794. adapter->eims_enable_mask = 0;
  795. /* set vector for other causes, i.e. link changes */
  796. switch (hw->mac.type) {
  797. case e1000_82575:
  798. tmp = rd32(E1000_CTRL_EXT);
  799. /* enable MSI-X PBA support*/
  800. tmp |= E1000_CTRL_EXT_PBA_CLR;
  801. /* Auto-Mask interrupts upon ICR read. */
  802. tmp |= E1000_CTRL_EXT_EIAME;
  803. tmp |= E1000_CTRL_EXT_IRCA;
  804. wr32(E1000_CTRL_EXT, tmp);
  805. /* enable msix_other interrupt */
  806. array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
  807. adapter->eims_other = E1000_EIMS_OTHER;
  808. break;
  809. case e1000_82576:
  810. case e1000_82580:
  811. case e1000_i350:
  812. case e1000_i354:
  813. case e1000_i210:
  814. case e1000_i211:
  815. /* Turn on MSI-X capability first, or our settings
  816. * won't stick. And it will take days to debug.
  817. */
  818. wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
  819. E1000_GPIE_PBA | E1000_GPIE_EIAME |
  820. E1000_GPIE_NSICR);
  821. /* enable msix_other interrupt */
  822. adapter->eims_other = BIT(vector);
  823. tmp = (vector++ | E1000_IVAR_VALID) << 8;
  824. wr32(E1000_IVAR_MISC, tmp);
  825. break;
  826. default:
  827. /* do nothing, since nothing else supports MSI-X */
  828. break;
  829. } /* switch (hw->mac.type) */
  830. adapter->eims_enable_mask |= adapter->eims_other;
  831. for (i = 0; i < adapter->num_q_vectors; i++)
  832. igb_assign_vector(adapter->q_vector[i], vector++);
  833. wrfl();
  834. }
  835. /**
  836. * igb_request_msix - Initialize MSI-X interrupts
  837. * @adapter: board private structure to initialize
  838. *
  839. * igb_request_msix allocates MSI-X vectors and requests interrupts from the
  840. * kernel.
  841. **/
  842. static int igb_request_msix(struct igb_adapter *adapter)
  843. {
  844. struct net_device *netdev = adapter->netdev;
  845. int i, err = 0, vector = 0, free_vector = 0;
  846. err = request_irq(adapter->msix_entries[vector].vector,
  847. igb_msix_other, 0, netdev->name, adapter);
  848. if (err)
  849. goto err_out;
  850. for (i = 0; i < adapter->num_q_vectors; i++) {
  851. struct igb_q_vector *q_vector = adapter->q_vector[i];
  852. vector++;
  853. q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
  854. if (q_vector->rx.ring && q_vector->tx.ring)
  855. sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
  856. q_vector->rx.ring->queue_index);
  857. else if (q_vector->tx.ring)
  858. sprintf(q_vector->name, "%s-tx-%u", netdev->name,
  859. q_vector->tx.ring->queue_index);
  860. else if (q_vector->rx.ring)
  861. sprintf(q_vector->name, "%s-rx-%u", netdev->name,
  862. q_vector->rx.ring->queue_index);
  863. else
  864. sprintf(q_vector->name, "%s-unused", netdev->name);
  865. err = request_irq(adapter->msix_entries[vector].vector,
  866. igb_msix_ring, 0, q_vector->name,
  867. q_vector);
  868. if (err)
  869. goto err_free;
  870. }
  871. igb_configure_msix(adapter);
  872. return 0;
  873. err_free:
  874. /* free already assigned IRQs */
  875. free_irq(adapter->msix_entries[free_vector++].vector, adapter);
  876. vector--;
  877. for (i = 0; i < vector; i++) {
  878. free_irq(adapter->msix_entries[free_vector++].vector,
  879. adapter->q_vector[i]);
  880. }
  881. err_out:
  882. return err;
  883. }
  884. /**
  885. * igb_free_q_vector - Free memory allocated for specific interrupt vector
  886. * @adapter: board private structure to initialize
  887. * @v_idx: Index of vector to be freed
  888. *
  889. * This function frees the memory allocated to the q_vector.
  890. **/
  891. static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
  892. {
  893. struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  894. adapter->q_vector[v_idx] = NULL;
  895. /* igb_get_stats64() might access the rings on this vector,
  896. * we must wait a grace period before freeing it.
  897. */
  898. if (q_vector)
  899. kfree_rcu(q_vector, rcu);
  900. }
  901. /**
  902. * igb_reset_q_vector - Reset config for interrupt vector
  903. * @adapter: board private structure to initialize
  904. * @v_idx: Index of vector to be reset
  905. *
  906. * If NAPI is enabled it will delete any references to the
  907. * NAPI struct. This is preparation for igb_free_q_vector.
  908. **/
  909. static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
  910. {
  911. struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  912. /* Coming from igb_set_interrupt_capability, the vectors are not yet
  913. * allocated. So, q_vector is NULL so we should stop here.
  914. */
  915. if (!q_vector)
  916. return;
  917. if (q_vector->tx.ring)
  918. adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
  919. if (q_vector->rx.ring)
  920. adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
  921. netif_napi_del(&q_vector->napi);
  922. }
  923. static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
  924. {
  925. int v_idx = adapter->num_q_vectors;
  926. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  927. pci_disable_msix(adapter->pdev);
  928. else if (adapter->flags & IGB_FLAG_HAS_MSI)
  929. pci_disable_msi(adapter->pdev);
  930. while (v_idx--)
  931. igb_reset_q_vector(adapter, v_idx);
  932. }
  933. /**
  934. * igb_free_q_vectors - Free memory allocated for interrupt vectors
  935. * @adapter: board private structure to initialize
  936. *
  937. * This function frees the memory allocated to the q_vectors. In addition if
  938. * NAPI is enabled it will delete any references to the NAPI struct prior
  939. * to freeing the q_vector.
  940. **/
  941. static void igb_free_q_vectors(struct igb_adapter *adapter)
  942. {
  943. int v_idx = adapter->num_q_vectors;
  944. adapter->num_tx_queues = 0;
  945. adapter->num_rx_queues = 0;
  946. adapter->num_q_vectors = 0;
  947. while (v_idx--) {
  948. igb_reset_q_vector(adapter, v_idx);
  949. igb_free_q_vector(adapter, v_idx);
  950. }
  951. }
  952. /**
  953. * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
  954. * @adapter: board private structure to initialize
  955. *
  956. * This function resets the device so that it has 0 Rx queues, Tx queues, and
  957. * MSI-X interrupts allocated.
  958. */
  959. static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
  960. {
  961. igb_free_q_vectors(adapter);
  962. igb_reset_interrupt_capability(adapter);
  963. }
  964. /**
  965. * igb_set_interrupt_capability - set MSI or MSI-X if supported
  966. * @adapter: board private structure to initialize
  967. * @msix: boolean value of MSIX capability
  968. *
  969. * Attempt to configure interrupts using the best available
  970. * capabilities of the hardware and kernel.
  971. **/
  972. static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
  973. {
  974. int err;
  975. int numvecs, i;
  976. if (!msix)
  977. goto msi_only;
  978. adapter->flags |= IGB_FLAG_HAS_MSIX;
  979. /* Number of supported queues. */
  980. adapter->num_rx_queues = adapter->rss_queues;
  981. if (adapter->vfs_allocated_count)
  982. adapter->num_tx_queues = 1;
  983. else
  984. adapter->num_tx_queues = adapter->rss_queues;
  985. /* start with one vector for every Rx queue */
  986. numvecs = adapter->num_rx_queues;
  987. /* if Tx handler is separate add 1 for every Tx queue */
  988. if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
  989. numvecs += adapter->num_tx_queues;
  990. /* store the number of vectors reserved for queues */
  991. adapter->num_q_vectors = numvecs;
  992. /* add 1 vector for link status interrupts */
  993. numvecs++;
  994. for (i = 0; i < numvecs; i++)
  995. adapter->msix_entries[i].entry = i;
  996. err = pci_enable_msix_range(adapter->pdev,
  997. adapter->msix_entries,
  998. numvecs,
  999. numvecs);
  1000. if (err > 0)
  1001. return;
  1002. igb_reset_interrupt_capability(adapter);
  1003. /* If we can't do MSI-X, try MSI */
  1004. msi_only:
  1005. adapter->flags &= ~IGB_FLAG_HAS_MSIX;
  1006. #ifdef CONFIG_PCI_IOV
  1007. /* disable SR-IOV for non MSI-X configurations */
  1008. if (adapter->vf_data) {
  1009. struct e1000_hw *hw = &adapter->hw;
  1010. /* disable iov and allow time for transactions to clear */
  1011. pci_disable_sriov(adapter->pdev);
  1012. msleep(500);
  1013. kfree(adapter->vf_mac_list);
  1014. adapter->vf_mac_list = NULL;
  1015. kfree(adapter->vf_data);
  1016. adapter->vf_data = NULL;
  1017. wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
  1018. wrfl();
  1019. msleep(100);
  1020. dev_info(&adapter->pdev->dev, "IOV Disabled\n");
  1021. }
  1022. #endif
  1023. adapter->vfs_allocated_count = 0;
  1024. adapter->rss_queues = 1;
  1025. adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
  1026. adapter->num_rx_queues = 1;
  1027. adapter->num_tx_queues = 1;
  1028. adapter->num_q_vectors = 1;
  1029. if (!pci_enable_msi(adapter->pdev))
  1030. adapter->flags |= IGB_FLAG_HAS_MSI;
  1031. }
  1032. static void igb_add_ring(struct igb_ring *ring,
  1033. struct igb_ring_container *head)
  1034. {
  1035. head->ring = ring;
  1036. head->count++;
  1037. }
  1038. /**
  1039. * igb_alloc_q_vector - Allocate memory for a single interrupt vector
  1040. * @adapter: board private structure to initialize
  1041. * @v_count: q_vectors allocated on adapter, used for ring interleaving
  1042. * @v_idx: index of vector in adapter struct
  1043. * @txr_count: total number of Tx rings to allocate
  1044. * @txr_idx: index of first Tx ring to allocate
  1045. * @rxr_count: total number of Rx rings to allocate
  1046. * @rxr_idx: index of first Rx ring to allocate
  1047. *
  1048. * We allocate one q_vector. If allocation fails we return -ENOMEM.
  1049. **/
  1050. static int igb_alloc_q_vector(struct igb_adapter *adapter,
  1051. int v_count, int v_idx,
  1052. int txr_count, int txr_idx,
  1053. int rxr_count, int rxr_idx)
  1054. {
  1055. struct igb_q_vector *q_vector;
  1056. struct igb_ring *ring;
  1057. int ring_count;
  1058. size_t size;
  1059. /* igb only supports 1 Tx and/or 1 Rx queue per vector */
  1060. if (txr_count > 1 || rxr_count > 1)
  1061. return -ENOMEM;
  1062. ring_count = txr_count + rxr_count;
  1063. size = struct_size(q_vector, ring, ring_count);
  1064. /* allocate q_vector and rings */
  1065. q_vector = adapter->q_vector[v_idx];
  1066. if (!q_vector) {
  1067. q_vector = kzalloc(size, GFP_KERNEL);
  1068. } else if (size > ksize(q_vector)) {
  1069. kfree_rcu(q_vector, rcu);
  1070. q_vector = kzalloc(size, GFP_KERNEL);
  1071. } else {
  1072. memset(q_vector, 0, size);
  1073. }
  1074. if (!q_vector)
  1075. return -ENOMEM;
  1076. /* initialize NAPI */
  1077. netif_napi_add(adapter->netdev, &q_vector->napi,
  1078. igb_poll, 64);
  1079. /* tie q_vector and adapter together */
  1080. adapter->q_vector[v_idx] = q_vector;
  1081. q_vector->adapter = adapter;
  1082. /* initialize work limits */
  1083. q_vector->tx.work_limit = adapter->tx_work_limit;
  1084. /* initialize ITR configuration */
  1085. q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
  1086. q_vector->itr_val = IGB_START_ITR;
  1087. /* initialize pointer to rings */
  1088. ring = q_vector->ring;
  1089. /* intialize ITR */
  1090. if (rxr_count) {
  1091. /* rx or rx/tx vector */
  1092. if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
  1093. q_vector->itr_val = adapter->rx_itr_setting;
  1094. } else {
  1095. /* tx only vector */
  1096. if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
  1097. q_vector->itr_val = adapter->tx_itr_setting;
  1098. }
  1099. if (txr_count) {
  1100. /* assign generic ring traits */
  1101. ring->dev = &adapter->pdev->dev;
  1102. ring->netdev = adapter->netdev;
  1103. /* configure backlink on ring */
  1104. ring->q_vector = q_vector;
  1105. /* update q_vector Tx values */
  1106. igb_add_ring(ring, &q_vector->tx);
  1107. /* For 82575, context index must be unique per ring. */
  1108. if (adapter->hw.mac.type == e1000_82575)
  1109. set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
  1110. /* apply Tx specific ring traits */
  1111. ring->count = adapter->tx_ring_count;
  1112. ring->queue_index = txr_idx;
  1113. ring->cbs_enable = false;
  1114. ring->idleslope = 0;
  1115. ring->sendslope = 0;
  1116. ring->hicredit = 0;
  1117. ring->locredit = 0;
  1118. u64_stats_init(&ring->tx_syncp);
  1119. u64_stats_init(&ring->tx_syncp2);
  1120. /* assign ring to adapter */
  1121. adapter->tx_ring[txr_idx] = ring;
  1122. /* push pointer to next ring */
  1123. ring++;
  1124. }
  1125. if (rxr_count) {
  1126. /* assign generic ring traits */
  1127. ring->dev = &adapter->pdev->dev;
  1128. ring->netdev = adapter->netdev;
  1129. /* configure backlink on ring */
  1130. ring->q_vector = q_vector;
  1131. /* update q_vector Rx values */
  1132. igb_add_ring(ring, &q_vector->rx);
  1133. /* set flag indicating ring supports SCTP checksum offload */
  1134. if (adapter->hw.mac.type >= e1000_82576)
  1135. set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
  1136. /* On i350, i354, i210, and i211, loopback VLAN packets
  1137. * have the tag byte-swapped.
  1138. */
  1139. if (adapter->hw.mac.type >= e1000_i350)
  1140. set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
  1141. /* apply Rx specific ring traits */
  1142. ring->count = adapter->rx_ring_count;
  1143. ring->queue_index = rxr_idx;
  1144. u64_stats_init(&ring->rx_syncp);
  1145. /* assign ring to adapter */
  1146. adapter->rx_ring[rxr_idx] = ring;
  1147. }
  1148. return 0;
  1149. }
  1150. /**
  1151. * igb_alloc_q_vectors - Allocate memory for interrupt vectors
  1152. * @adapter: board private structure to initialize
  1153. *
  1154. * We allocate one q_vector per queue interrupt. If allocation fails we
  1155. * return -ENOMEM.
  1156. **/
  1157. static int igb_alloc_q_vectors(struct igb_adapter *adapter)
  1158. {
  1159. int q_vectors = adapter->num_q_vectors;
  1160. int rxr_remaining = adapter->num_rx_queues;
  1161. int txr_remaining = adapter->num_tx_queues;
  1162. int rxr_idx = 0, txr_idx = 0, v_idx = 0;
  1163. int err;
  1164. if (q_vectors >= (rxr_remaining + txr_remaining)) {
  1165. for (; rxr_remaining; v_idx++) {
  1166. err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
  1167. 0, 0, 1, rxr_idx);
  1168. if (err)
  1169. goto err_out;
  1170. /* update counts and index */
  1171. rxr_remaining--;
  1172. rxr_idx++;
  1173. }
  1174. }
  1175. for (; v_idx < q_vectors; v_idx++) {
  1176. int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
  1177. int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
  1178. err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
  1179. tqpv, txr_idx, rqpv, rxr_idx);
  1180. if (err)
  1181. goto err_out;
  1182. /* update counts and index */
  1183. rxr_remaining -= rqpv;
  1184. txr_remaining -= tqpv;
  1185. rxr_idx++;
  1186. txr_idx++;
  1187. }
  1188. return 0;
  1189. err_out:
  1190. adapter->num_tx_queues = 0;
  1191. adapter->num_rx_queues = 0;
  1192. adapter->num_q_vectors = 0;
  1193. while (v_idx--)
  1194. igb_free_q_vector(adapter, v_idx);
  1195. return -ENOMEM;
  1196. }
  1197. /**
  1198. * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
  1199. * @adapter: board private structure to initialize
  1200. * @msix: boolean value of MSIX capability
  1201. *
  1202. * This function initializes the interrupts and allocates all of the queues.
  1203. **/
  1204. static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
  1205. {
  1206. struct pci_dev *pdev = adapter->pdev;
  1207. int err;
  1208. igb_set_interrupt_capability(adapter, msix);
  1209. err = igb_alloc_q_vectors(adapter);
  1210. if (err) {
  1211. dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
  1212. goto err_alloc_q_vectors;
  1213. }
  1214. igb_cache_ring_register(adapter);
  1215. return 0;
  1216. err_alloc_q_vectors:
  1217. igb_reset_interrupt_capability(adapter);
  1218. return err;
  1219. }
  1220. /**
  1221. * igb_request_irq - initialize interrupts
  1222. * @adapter: board private structure to initialize
  1223. *
  1224. * Attempts to configure interrupts using the best available
  1225. * capabilities of the hardware and kernel.
  1226. **/
  1227. static int igb_request_irq(struct igb_adapter *adapter)
  1228. {
  1229. struct net_device *netdev = adapter->netdev;
  1230. struct pci_dev *pdev = adapter->pdev;
  1231. int err = 0;
  1232. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1233. err = igb_request_msix(adapter);
  1234. if (!err)
  1235. goto request_done;
  1236. /* fall back to MSI */
  1237. igb_free_all_tx_resources(adapter);
  1238. igb_free_all_rx_resources(adapter);
  1239. igb_clear_interrupt_scheme(adapter);
  1240. err = igb_init_interrupt_scheme(adapter, false);
  1241. if (err)
  1242. goto request_done;
  1243. igb_setup_all_tx_resources(adapter);
  1244. igb_setup_all_rx_resources(adapter);
  1245. igb_configure(adapter);
  1246. }
  1247. igb_assign_vector(adapter->q_vector[0], 0);
  1248. if (adapter->flags & IGB_FLAG_HAS_MSI) {
  1249. err = request_irq(pdev->irq, igb_intr_msi, 0,
  1250. netdev->name, adapter);
  1251. if (!err)
  1252. goto request_done;
  1253. /* fall back to legacy interrupts */
  1254. igb_reset_interrupt_capability(adapter);
  1255. adapter->flags &= ~IGB_FLAG_HAS_MSI;
  1256. }
  1257. err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
  1258. netdev->name, adapter);
  1259. if (err)
  1260. dev_err(&pdev->dev, "Error %d getting interrupt\n",
  1261. err);
  1262. request_done:
  1263. return err;
  1264. }
  1265. static void igb_free_irq(struct igb_adapter *adapter)
  1266. {
  1267. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1268. int vector = 0, i;
  1269. free_irq(adapter->msix_entries[vector++].vector, adapter);
  1270. for (i = 0; i < adapter->num_q_vectors; i++)
  1271. free_irq(adapter->msix_entries[vector++].vector,
  1272. adapter->q_vector[i]);
  1273. } else {
  1274. free_irq(adapter->pdev->irq, adapter);
  1275. }
  1276. }
  1277. /**
  1278. * igb_irq_disable - Mask off interrupt generation on the NIC
  1279. * @adapter: board private structure
  1280. **/
  1281. static void igb_irq_disable(struct igb_adapter *adapter)
  1282. {
  1283. struct e1000_hw *hw = &adapter->hw;
  1284. /* we need to be careful when disabling interrupts. The VFs are also
  1285. * mapped into these registers and so clearing the bits can cause
  1286. * issues on the VF drivers so we only need to clear what we set
  1287. */
  1288. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1289. u32 regval = rd32(E1000_EIAM);
  1290. wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
  1291. wr32(E1000_EIMC, adapter->eims_enable_mask);
  1292. regval = rd32(E1000_EIAC);
  1293. wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
  1294. }
  1295. wr32(E1000_IAM, 0);
  1296. wr32(E1000_IMC, ~0);
  1297. wrfl();
  1298. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1299. int i;
  1300. for (i = 0; i < adapter->num_q_vectors; i++)
  1301. synchronize_irq(adapter->msix_entries[i].vector);
  1302. } else {
  1303. synchronize_irq(adapter->pdev->irq);
  1304. }
  1305. }
  1306. /**
  1307. * igb_irq_enable - Enable default interrupt generation settings
  1308. * @adapter: board private structure
  1309. **/
  1310. static void igb_irq_enable(struct igb_adapter *adapter)
  1311. {
  1312. struct e1000_hw *hw = &adapter->hw;
  1313. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1314. u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
  1315. u32 regval = rd32(E1000_EIAC);
  1316. wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
  1317. regval = rd32(E1000_EIAM);
  1318. wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
  1319. wr32(E1000_EIMS, adapter->eims_enable_mask);
  1320. if (adapter->vfs_allocated_count) {
  1321. wr32(E1000_MBVFIMR, 0xFF);
  1322. ims |= E1000_IMS_VMMB;
  1323. }
  1324. wr32(E1000_IMS, ims);
  1325. } else {
  1326. wr32(E1000_IMS, IMS_ENABLE_MASK |
  1327. E1000_IMS_DRSTA);
  1328. wr32(E1000_IAM, IMS_ENABLE_MASK |
  1329. E1000_IMS_DRSTA);
  1330. }
  1331. }
  1332. static void igb_update_mng_vlan(struct igb_adapter *adapter)
  1333. {
  1334. struct e1000_hw *hw = &adapter->hw;
  1335. u16 pf_id = adapter->vfs_allocated_count;
  1336. u16 vid = adapter->hw.mng_cookie.vlan_id;
  1337. u16 old_vid = adapter->mng_vlan_id;
  1338. if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
  1339. /* add VID to filter table */
  1340. igb_vfta_set(hw, vid, pf_id, true, true);
  1341. adapter->mng_vlan_id = vid;
  1342. } else {
  1343. adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
  1344. }
  1345. if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
  1346. (vid != old_vid) &&
  1347. !test_bit(old_vid, adapter->active_vlans)) {
  1348. /* remove VID from filter table */
  1349. igb_vfta_set(hw, vid, pf_id, false, true);
  1350. }
  1351. }
  1352. /**
  1353. * igb_release_hw_control - release control of the h/w to f/w
  1354. * @adapter: address of board private structure
  1355. *
  1356. * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
  1357. * For ASF and Pass Through versions of f/w this means that the
  1358. * driver is no longer loaded.
  1359. **/
  1360. static void igb_release_hw_control(struct igb_adapter *adapter)
  1361. {
  1362. struct e1000_hw *hw = &adapter->hw;
  1363. u32 ctrl_ext;
  1364. /* Let firmware take over control of h/w */
  1365. ctrl_ext = rd32(E1000_CTRL_EXT);
  1366. wr32(E1000_CTRL_EXT,
  1367. ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  1368. }
  1369. /**
  1370. * igb_get_hw_control - get control of the h/w from f/w
  1371. * @adapter: address of board private structure
  1372. *
  1373. * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
  1374. * For ASF and Pass Through versions of f/w this means that
  1375. * the driver is loaded.
  1376. **/
  1377. static void igb_get_hw_control(struct igb_adapter *adapter)
  1378. {
  1379. struct e1000_hw *hw = &adapter->hw;
  1380. u32 ctrl_ext;
  1381. /* Let firmware know the driver has taken over */
  1382. ctrl_ext = rd32(E1000_CTRL_EXT);
  1383. wr32(E1000_CTRL_EXT,
  1384. ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  1385. }
  1386. static void enable_fqtss(struct igb_adapter *adapter, bool enable)
  1387. {
  1388. struct net_device *netdev = adapter->netdev;
  1389. struct e1000_hw *hw = &adapter->hw;
  1390. WARN_ON(hw->mac.type != e1000_i210);
  1391. if (enable)
  1392. adapter->flags |= IGB_FLAG_FQTSS;
  1393. else
  1394. adapter->flags &= ~IGB_FLAG_FQTSS;
  1395. if (netif_running(netdev))
  1396. schedule_work(&adapter->reset_task);
  1397. }
  1398. static bool is_fqtss_enabled(struct igb_adapter *adapter)
  1399. {
  1400. return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
  1401. }
  1402. static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
  1403. enum tx_queue_prio prio)
  1404. {
  1405. u32 val;
  1406. WARN_ON(hw->mac.type != e1000_i210);
  1407. WARN_ON(queue < 0 || queue > 4);
  1408. val = rd32(E1000_I210_TXDCTL(queue));
  1409. if (prio == TX_QUEUE_PRIO_HIGH)
  1410. val |= E1000_TXDCTL_PRIORITY;
  1411. else
  1412. val &= ~E1000_TXDCTL_PRIORITY;
  1413. wr32(E1000_I210_TXDCTL(queue), val);
  1414. }
  1415. static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
  1416. {
  1417. u32 val;
  1418. WARN_ON(hw->mac.type != e1000_i210);
  1419. WARN_ON(queue < 0 || queue > 1);
  1420. val = rd32(E1000_I210_TQAVCC(queue));
  1421. if (mode == QUEUE_MODE_STREAM_RESERVATION)
  1422. val |= E1000_TQAVCC_QUEUEMODE;
  1423. else
  1424. val &= ~E1000_TQAVCC_QUEUEMODE;
  1425. wr32(E1000_I210_TQAVCC(queue), val);
  1426. }
  1427. static bool is_any_cbs_enabled(struct igb_adapter *adapter)
  1428. {
  1429. int i;
  1430. for (i = 0; i < adapter->num_tx_queues; i++) {
  1431. if (adapter->tx_ring[i]->cbs_enable)
  1432. return true;
  1433. }
  1434. return false;
  1435. }
  1436. static bool is_any_txtime_enabled(struct igb_adapter *adapter)
  1437. {
  1438. int i;
  1439. for (i = 0; i < adapter->num_tx_queues; i++) {
  1440. if (adapter->tx_ring[i]->launchtime_enable)
  1441. return true;
  1442. }
  1443. return false;
  1444. }
  1445. /**
  1446. * igb_config_tx_modes - Configure "Qav Tx mode" features on igb
  1447. * @adapter: pointer to adapter struct
  1448. * @queue: queue number
  1449. *
  1450. * Configure CBS and Launchtime for a given hardware queue.
  1451. * Parameters are retrieved from the correct Tx ring, so
  1452. * igb_save_cbs_params() and igb_save_txtime_params() should be used
  1453. * for setting those correctly prior to this function being called.
  1454. **/
  1455. static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
  1456. {
  1457. struct igb_ring *ring = adapter->tx_ring[queue];
  1458. struct net_device *netdev = adapter->netdev;
  1459. struct e1000_hw *hw = &adapter->hw;
  1460. u32 tqavcc, tqavctrl;
  1461. u16 value;
  1462. WARN_ON(hw->mac.type != e1000_i210);
  1463. WARN_ON(queue < 0 || queue > 1);
  1464. /* If any of the Qav features is enabled, configure queues as SR and
  1465. * with HIGH PRIO. If none is, then configure them with LOW PRIO and
  1466. * as SP.
  1467. */
  1468. if (ring->cbs_enable || ring->launchtime_enable) {
  1469. set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
  1470. set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
  1471. } else {
  1472. set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
  1473. set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
  1474. }
  1475. /* If CBS is enabled, set DataTranARB and config its parameters. */
  1476. if (ring->cbs_enable || queue == 0) {
  1477. /* i210 does not allow the queue 0 to be in the Strict
  1478. * Priority mode while the Qav mode is enabled, so,
  1479. * instead of disabling strict priority mode, we give
  1480. * queue 0 the maximum of credits possible.
  1481. *
  1482. * See section 8.12.19 of the i210 datasheet, "Note:
  1483. * Queue0 QueueMode must be set to 1b when
  1484. * TransmitMode is set to Qav."
  1485. */
  1486. if (queue == 0 && !ring->cbs_enable) {
  1487. /* max "linkspeed" idleslope in kbps */
  1488. ring->idleslope