/drivers/net/e1000e/netdev.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2 · C · 6346 lines · 4211 code · 921 blank · 1214 comment · 700 complexity · b5da9e8fdeaef4b47ff565540de3efed MD5 · raw file

Large files are truncated click here to view the full file

  1. /*******************************************************************************
  2. Intel PRO/1000 Linux driver
  3. Copyright(c) 1999 - 2011 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. *******************************************************************************/
  21. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22. #include <linux/module.h>
  23. #include <linux/types.h>
  24. #include <linux/init.h>
  25. #include <linux/pci.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/pagemap.h>
  28. #include <linux/delay.h>
  29. #include <linux/netdevice.h>
  30. #include <linux/tcp.h>
  31. #include <linux/ipv6.h>
  32. #include <linux/slab.h>
  33. #include <net/checksum.h>
  34. #include <net/ip6_checksum.h>
  35. #include <linux/mii.h>
  36. #include <linux/ethtool.h>
  37. #include <linux/if_vlan.h>
  38. #include <linux/cpu.h>
  39. #include <linux/smp.h>
  40. #include <linux/pm_qos_params.h>
  41. #include <linux/pm_runtime.h>
  42. #include <linux/aer.h>
  43. #include <linux/prefetch.h>
  44. #include "e1000.h"
  45. #define DRV_EXTRAVERSION "-k2"
  46. #define DRV_VERSION "1.3.10" DRV_EXTRAVERSION
  47. char e1000e_driver_name[] = "e1000e";
  48. const char e1000e_driver_version[] = DRV_VERSION;
  49. static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
  50. static const struct e1000_info *e1000_info_tbl[] = {
  51. [board_82571] = &e1000_82571_info,
  52. [board_82572] = &e1000_82572_info,
  53. [board_82573] = &e1000_82573_info,
  54. [board_82574] = &e1000_82574_info,
  55. [board_82583] = &e1000_82583_info,
  56. [board_80003es2lan] = &e1000_es2_info,
  57. [board_ich8lan] = &e1000_ich8_info,
  58. [board_ich9lan] = &e1000_ich9_info,
  59. [board_ich10lan] = &e1000_ich10_info,
  60. [board_pchlan] = &e1000_pch_info,
  61. [board_pch2lan] = &e1000_pch2_info,
  62. };
  63. struct e1000_reg_info {
  64. u32 ofs;
  65. char *name;
  66. };
  67. #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
  68. #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
  69. #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
  70. #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
  71. #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
  72. #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
  73. #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
  74. #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
  75. #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
  76. #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
  77. static const struct e1000_reg_info e1000_reg_info_tbl[] = {
  78. /* General Registers */
  79. {E1000_CTRL, "CTRL"},
  80. {E1000_STATUS, "STATUS"},
  81. {E1000_CTRL_EXT, "CTRL_EXT"},
  82. /* Interrupt Registers */
  83. {E1000_ICR, "ICR"},
  84. /* Rx Registers */
  85. {E1000_RCTL, "RCTL"},
  86. {E1000_RDLEN, "RDLEN"},
  87. {E1000_RDH, "RDH"},
  88. {E1000_RDT, "RDT"},
  89. {E1000_RDTR, "RDTR"},
  90. {E1000_RXDCTL(0), "RXDCTL"},
  91. {E1000_ERT, "ERT"},
  92. {E1000_RDBAL, "RDBAL"},
  93. {E1000_RDBAH, "RDBAH"},
  94. {E1000_RDFH, "RDFH"},
  95. {E1000_RDFT, "RDFT"},
  96. {E1000_RDFHS, "RDFHS"},
  97. {E1000_RDFTS, "RDFTS"},
  98. {E1000_RDFPC, "RDFPC"},
  99. /* Tx Registers */
  100. {E1000_TCTL, "TCTL"},
  101. {E1000_TDBAL, "TDBAL"},
  102. {E1000_TDBAH, "TDBAH"},
  103. {E1000_TDLEN, "TDLEN"},
  104. {E1000_TDH, "TDH"},
  105. {E1000_TDT, "TDT"},
  106. {E1000_TIDV, "TIDV"},
  107. {E1000_TXDCTL(0), "TXDCTL"},
  108. {E1000_TADV, "TADV"},
  109. {E1000_TARC(0), "TARC"},
  110. {E1000_TDFH, "TDFH"},
  111. {E1000_TDFT, "TDFT"},
  112. {E1000_TDFHS, "TDFHS"},
  113. {E1000_TDFTS, "TDFTS"},
  114. {E1000_TDFPC, "TDFPC"},
  115. /* List Terminator */
  116. {}
  117. };
  118. /*
  119. * e1000_regdump - register printout routine
  120. */
  121. static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
  122. {
  123. int n = 0;
  124. char rname[16];
  125. u32 regs[8];
  126. switch (reginfo->ofs) {
  127. case E1000_RXDCTL(0):
  128. for (n = 0; n < 2; n++)
  129. regs[n] = __er32(hw, E1000_RXDCTL(n));
  130. break;
  131. case E1000_TXDCTL(0):
  132. for (n = 0; n < 2; n++)
  133. regs[n] = __er32(hw, E1000_TXDCTL(n));
  134. break;
  135. case E1000_TARC(0):
  136. for (n = 0; n < 2; n++)
  137. regs[n] = __er32(hw, E1000_TARC(n));
  138. break;
  139. default:
  140. printk(KERN_INFO "%-15s %08x\n",
  141. reginfo->name, __er32(hw, reginfo->ofs));
  142. return;
  143. }
  144. snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
  145. printk(KERN_INFO "%-15s ", rname);
  146. for (n = 0; n < 2; n++)
  147. printk(KERN_CONT "%08x ", regs[n]);
  148. printk(KERN_CONT "\n");
  149. }
  150. /*
  151. * e1000e_dump - Print registers, Tx-ring and Rx-ring
  152. */
  153. static void e1000e_dump(struct e1000_adapter *adapter)
  154. {
  155. struct net_device *netdev = adapter->netdev;
  156. struct e1000_hw *hw = &adapter->hw;
  157. struct e1000_reg_info *reginfo;
  158. struct e1000_ring *tx_ring = adapter->tx_ring;
  159. struct e1000_tx_desc *tx_desc;
  160. struct my_u0 {
  161. u64 a;
  162. u64 b;
  163. } *u0;
  164. struct e1000_buffer *buffer_info;
  165. struct e1000_ring *rx_ring = adapter->rx_ring;
  166. union e1000_rx_desc_packet_split *rx_desc_ps;
  167. struct e1000_rx_desc *rx_desc;
  168. struct my_u1 {
  169. u64 a;
  170. u64 b;
  171. u64 c;
  172. u64 d;
  173. } *u1;
  174. u32 staterr;
  175. int i = 0;
  176. if (!netif_msg_hw(adapter))
  177. return;
  178. /* Print netdevice Info */
  179. if (netdev) {
  180. dev_info(&adapter->pdev->dev, "Net device Info\n");
  181. printk(KERN_INFO "Device Name state "
  182. "trans_start last_rx\n");
  183. printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
  184. netdev->name, netdev->state, netdev->trans_start,
  185. netdev->last_rx);
  186. }
  187. /* Print Registers */
  188. dev_info(&adapter->pdev->dev, "Register Dump\n");
  189. printk(KERN_INFO " Register Name Value\n");
  190. for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
  191. reginfo->name; reginfo++) {
  192. e1000_regdump(hw, reginfo);
  193. }
  194. /* Print Tx Ring Summary */
  195. if (!netdev || !netif_running(netdev))
  196. goto exit;
  197. dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
  198. printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
  199. " leng ntw timestamp\n");
  200. buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
  201. printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
  202. 0, tx_ring->next_to_use, tx_ring->next_to_clean,
  203. (unsigned long long)buffer_info->dma,
  204. buffer_info->length,
  205. buffer_info->next_to_watch,
  206. (unsigned long long)buffer_info->time_stamp);
  207. /* Print Tx Ring */
  208. if (!netif_msg_tx_done(adapter))
  209. goto rx_ring_summary;
  210. dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
  211. /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
  212. *
  213. * Legacy Transmit Descriptor
  214. * +--------------------------------------------------------------+
  215. * 0 | Buffer Address [63:0] (Reserved on Write Back) |
  216. * +--------------------------------------------------------------+
  217. * 8 | Special | CSS | Status | CMD | CSO | Length |
  218. * +--------------------------------------------------------------+
  219. * 63 48 47 36 35 32 31 24 23 16 15 0
  220. *
  221. * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
  222. * 63 48 47 40 39 32 31 16 15 8 7 0
  223. * +----------------------------------------------------------------+
  224. * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
  225. * +----------------------------------------------------------------+
  226. * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
  227. * +----------------------------------------------------------------+
  228. * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
  229. *
  230. * Extended Data Descriptor (DTYP=0x1)
  231. * +----------------------------------------------------------------+
  232. * 0 | Buffer Address [63:0] |
  233. * +----------------------------------------------------------------+
  234. * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
  235. * +----------------------------------------------------------------+
  236. * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
  237. */
  238. printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
  239. " [bi->dma ] leng ntw timestamp bi->skb "
  240. "<-- Legacy format\n");
  241. printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
  242. " [bi->dma ] leng ntw timestamp bi->skb "
  243. "<-- Ext Context format\n");
  244. printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
  245. " [bi->dma ] leng ntw timestamp bi->skb "
  246. "<-- Ext Data format\n");
  247. for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
  248. tx_desc = E1000_TX_DESC(*tx_ring, i);
  249. buffer_info = &tx_ring->buffer_info[i];
  250. u0 = (struct my_u0 *)tx_desc;
  251. printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
  252. "%04X %3X %016llX %p",
  253. (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
  254. ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
  255. (unsigned long long)le64_to_cpu(u0->a),
  256. (unsigned long long)le64_to_cpu(u0->b),
  257. (unsigned long long)buffer_info->dma,
  258. buffer_info->length, buffer_info->next_to_watch,
  259. (unsigned long long)buffer_info->time_stamp,
  260. buffer_info->skb);
  261. if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
  262. printk(KERN_CONT " NTC/U\n");
  263. else if (i == tx_ring->next_to_use)
  264. printk(KERN_CONT " NTU\n");
  265. else if (i == tx_ring->next_to_clean)
  266. printk(KERN_CONT " NTC\n");
  267. else
  268. printk(KERN_CONT "\n");
  269. if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
  270. print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
  271. 16, 1, phys_to_virt(buffer_info->dma),
  272. buffer_info->length, true);
  273. }
  274. /* Print Rx Ring Summary */
  275. rx_ring_summary:
  276. dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
  277. printk(KERN_INFO "Queue [NTU] [NTC]\n");
  278. printk(KERN_INFO " %5d %5X %5X\n", 0,
  279. rx_ring->next_to_use, rx_ring->next_to_clean);
  280. /* Print Rx Ring */
  281. if (!netif_msg_rx_status(adapter))
  282. goto exit;
  283. dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
  284. switch (adapter->rx_ps_pages) {
  285. case 1:
  286. case 2:
  287. case 3:
  288. /* [Extended] Packet Split Receive Descriptor Format
  289. *
  290. * +-----------------------------------------------------+
  291. * 0 | Buffer Address 0 [63:0] |
  292. * +-----------------------------------------------------+
  293. * 8 | Buffer Address 1 [63:0] |
  294. * +-----------------------------------------------------+
  295. * 16 | Buffer Address 2 [63:0] |
  296. * +-----------------------------------------------------+
  297. * 24 | Buffer Address 3 [63:0] |
  298. * +-----------------------------------------------------+
  299. */
  300. printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
  301. "[buffer 1 63:0 ] "
  302. "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
  303. "[bi->skb] <-- Ext Pkt Split format\n");
  304. /* [Extended] Receive Descriptor (Write-Back) Format
  305. *
  306. * 63 48 47 32 31 13 12 8 7 4 3 0
  307. * +------------------------------------------------------+
  308. * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
  309. * | Checksum | Ident | | Queue | | Type |
  310. * +------------------------------------------------------+
  311. * 8 | VLAN Tag | Length | Extended Error | Extended Status |
  312. * +------------------------------------------------------+
  313. * 63 48 47 32 31 20 19 0
  314. */
  315. printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
  316. "[vl l0 ee es] "
  317. "[ l3 l2 l1 hs] [reserved ] ---------------- "
  318. "[bi->skb] <-- Ext Rx Write-Back format\n");
  319. for (i = 0; i < rx_ring->count; i++) {
  320. buffer_info = &rx_ring->buffer_info[i];
  321. rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
  322. u1 = (struct my_u1 *)rx_desc_ps;
  323. staterr =
  324. le32_to_cpu(rx_desc_ps->wb.middle.status_error);
  325. if (staterr & E1000_RXD_STAT_DD) {
  326. /* Descriptor Done */
  327. printk(KERN_INFO "RWB[0x%03X] %016llX "
  328. "%016llX %016llX %016llX "
  329. "---------------- %p", i,
  330. (unsigned long long)le64_to_cpu(u1->a),
  331. (unsigned long long)le64_to_cpu(u1->b),
  332. (unsigned long long)le64_to_cpu(u1->c),
  333. (unsigned long long)le64_to_cpu(u1->d),
  334. buffer_info->skb);
  335. } else {
  336. printk(KERN_INFO "R [0x%03X] %016llX "
  337. "%016llX %016llX %016llX %016llX %p", i,
  338. (unsigned long long)le64_to_cpu(u1->a),
  339. (unsigned long long)le64_to_cpu(u1->b),
  340. (unsigned long long)le64_to_cpu(u1->c),
  341. (unsigned long long)le64_to_cpu(u1->d),
  342. (unsigned long long)buffer_info->dma,
  343. buffer_info->skb);
  344. if (netif_msg_pktdata(adapter))
  345. print_hex_dump(KERN_INFO, "",
  346. DUMP_PREFIX_ADDRESS, 16, 1,
  347. phys_to_virt(buffer_info->dma),
  348. adapter->rx_ps_bsize0, true);
  349. }
  350. if (i == rx_ring->next_to_use)
  351. printk(KERN_CONT " NTU\n");
  352. else if (i == rx_ring->next_to_clean)
  353. printk(KERN_CONT " NTC\n");
  354. else
  355. printk(KERN_CONT "\n");
  356. }
  357. break;
  358. default:
  359. case 0:
  360. /* Legacy Receive Descriptor Format
  361. *
  362. * +-----------------------------------------------------+
  363. * | Buffer Address [63:0] |
  364. * +-----------------------------------------------------+
  365. * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
  366. * +-----------------------------------------------------+
  367. * 63 48 47 40 39 32 31 16 15 0
  368. */
  369. printk(KERN_INFO "Rl[desc] [address 63:0 ] "
  370. "[vl er S cks ln] [bi->dma ] [bi->skb] "
  371. "<-- Legacy format\n");
  372. for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
  373. rx_desc = E1000_RX_DESC(*rx_ring, i);
  374. buffer_info = &rx_ring->buffer_info[i];
  375. u0 = (struct my_u0 *)rx_desc;
  376. printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
  377. "%016llX %p", i,
  378. (unsigned long long)le64_to_cpu(u0->a),
  379. (unsigned long long)le64_to_cpu(u0->b),
  380. (unsigned long long)buffer_info->dma,
  381. buffer_info->skb);
  382. if (i == rx_ring->next_to_use)
  383. printk(KERN_CONT " NTU\n");
  384. else if (i == rx_ring->next_to_clean)
  385. printk(KERN_CONT " NTC\n");
  386. else
  387. printk(KERN_CONT "\n");
  388. if (netif_msg_pktdata(adapter))
  389. print_hex_dump(KERN_INFO, "",
  390. DUMP_PREFIX_ADDRESS,
  391. 16, 1,
  392. phys_to_virt(buffer_info->dma),
  393. adapter->rx_buffer_len, true);
  394. }
  395. }
  396. exit:
  397. return;
  398. }
  399. /**
  400. * e1000_desc_unused - calculate if we have unused descriptors
  401. **/
  402. static int e1000_desc_unused(struct e1000_ring *ring)
  403. {
  404. if (ring->next_to_clean > ring->next_to_use)
  405. return ring->next_to_clean - ring->next_to_use - 1;
  406. return ring->count + ring->next_to_clean - ring->next_to_use - 1;
  407. }
  408. /**
  409. * e1000_receive_skb - helper function to handle Rx indications
  410. * @adapter: board private structure
  411. * @status: descriptor status field as written by hardware
  412. * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
  413. * @skb: pointer to sk_buff to be indicated to stack
  414. **/
  415. static void e1000_receive_skb(struct e1000_adapter *adapter,
  416. struct net_device *netdev, struct sk_buff *skb,
  417. u8 status, __le16 vlan)
  418. {
  419. u16 tag = le16_to_cpu(vlan);
  420. skb->protocol = eth_type_trans(skb, netdev);
  421. if (status & E1000_RXD_STAT_VP)
  422. __vlan_hwaccel_put_tag(skb, tag);
  423. napi_gro_receive(&adapter->napi, skb);
  424. }
  425. /**
  426. * e1000_rx_checksum - Receive Checksum Offload
  427. * @adapter: board private structure
  428. * @status_err: receive descriptor status and error fields
  429. * @csum: receive descriptor csum field
  430. * @sk_buff: socket buffer with received data
  431. **/
  432. static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
  433. u32 csum, struct sk_buff *skb)
  434. {
  435. u16 status = (u16)status_err;
  436. u8 errors = (u8)(status_err >> 24);
  437. skb_checksum_none_assert(skb);
  438. /* Ignore Checksum bit is set */
  439. if (status & E1000_RXD_STAT_IXSM)
  440. return;
  441. /* TCP/UDP checksum error bit is set */
  442. if (errors & E1000_RXD_ERR_TCPE) {
  443. /* let the stack verify checksum errors */
  444. adapter->hw_csum_err++;
  445. return;
  446. }
  447. /* TCP/UDP Checksum has not been calculated */
  448. if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
  449. return;
  450. /* It must be a TCP or UDP packet with a valid checksum */
  451. if (status & E1000_RXD_STAT_TCPCS) {
  452. /* TCP checksum is good */
  453. skb->ip_summed = CHECKSUM_UNNECESSARY;
  454. } else {
  455. /*
  456. * IP fragment with UDP payload
  457. * Hardware complements the payload checksum, so we undo it
  458. * and then put the value in host order for further stack use.
  459. */
  460. __sum16 sum = (__force __sum16)htons(csum);
  461. skb->csum = csum_unfold(~sum);
  462. skb->ip_summed = CHECKSUM_COMPLETE;
  463. }
  464. adapter->hw_csum_good++;
  465. }
  466. /**
  467. * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
  468. * @adapter: address of board private structure
  469. **/
  470. static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
  471. int cleaned_count)
  472. {
  473. struct net_device *netdev = adapter->netdev;
  474. struct pci_dev *pdev = adapter->pdev;
  475. struct e1000_ring *rx_ring = adapter->rx_ring;
  476. struct e1000_rx_desc *rx_desc;
  477. struct e1000_buffer *buffer_info;
  478. struct sk_buff *skb;
  479. unsigned int i;
  480. unsigned int bufsz = adapter->rx_buffer_len;
  481. i = rx_ring->next_to_use;
  482. buffer_info = &rx_ring->buffer_info[i];
  483. while (cleaned_count--) {
  484. skb = buffer_info->skb;
  485. if (skb) {
  486. skb_trim(skb, 0);
  487. goto map_skb;
  488. }
  489. skb = netdev_alloc_skb_ip_align(netdev, bufsz);
  490. if (!skb) {
  491. /* Better luck next round */
  492. adapter->alloc_rx_buff_failed++;
  493. break;
  494. }
  495. buffer_info->skb = skb;
  496. map_skb:
  497. buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
  498. adapter->rx_buffer_len,
  499. DMA_FROM_DEVICE);
  500. if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
  501. dev_err(&pdev->dev, "Rx DMA map failed\n");
  502. adapter->rx_dma_failed++;
  503. break;
  504. }
  505. rx_desc = E1000_RX_DESC(*rx_ring, i);
  506. rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  507. if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
  508. /*
  509. * Force memory writes to complete before letting h/w
  510. * know there are new descriptors to fetch. (Only
  511. * applicable for weak-ordered memory model archs,
  512. * such as IA-64).
  513. */
  514. wmb();
  515. writel(i, adapter->hw.hw_addr + rx_ring->tail);
  516. }
  517. i++;
  518. if (i == rx_ring->count)
  519. i = 0;
  520. buffer_info = &rx_ring->buffer_info[i];
  521. }
  522. rx_ring->next_to_use = i;
  523. }
  524. /**
  525. * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
  526. * @adapter: address of board private structure
  527. **/
  528. static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
  529. int cleaned_count)
  530. {
  531. struct net_device *netdev = adapter->netdev;
  532. struct pci_dev *pdev = adapter->pdev;
  533. union e1000_rx_desc_packet_split *rx_desc;
  534. struct e1000_ring *rx_ring = adapter->rx_ring;
  535. struct e1000_buffer *buffer_info;
  536. struct e1000_ps_page *ps_page;
  537. struct sk_buff *skb;
  538. unsigned int i, j;
  539. i = rx_ring->next_to_use;
  540. buffer_info = &rx_ring->buffer_info[i];
  541. while (cleaned_count--) {
  542. rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  543. for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  544. ps_page = &buffer_info->ps_pages[j];
  545. if (j >= adapter->rx_ps_pages) {
  546. /* all unused desc entries get hw null ptr */
  547. rx_desc->read.buffer_addr[j + 1] =
  548. ~cpu_to_le64(0);
  549. continue;
  550. }
  551. if (!ps_page->page) {
  552. ps_page->page = alloc_page(GFP_ATOMIC);
  553. if (!ps_page->page) {
  554. adapter->alloc_rx_buff_failed++;
  555. goto no_buffers;
  556. }
  557. ps_page->dma = dma_map_page(&pdev->dev,
  558. ps_page->page,
  559. 0, PAGE_SIZE,
  560. DMA_FROM_DEVICE);
  561. if (dma_mapping_error(&pdev->dev,
  562. ps_page->dma)) {
  563. dev_err(&adapter->pdev->dev,
  564. "Rx DMA page map failed\n");
  565. adapter->rx_dma_failed++;
  566. goto no_buffers;
  567. }
  568. }
  569. /*
  570. * Refresh the desc even if buffer_addrs
  571. * didn't change because each write-back
  572. * erases this info.
  573. */
  574. rx_desc->read.buffer_addr[j + 1] =
  575. cpu_to_le64(ps_page->dma);
  576. }
  577. skb = netdev_alloc_skb_ip_align(netdev,
  578. adapter->rx_ps_bsize0);
  579. if (!skb) {
  580. adapter->alloc_rx_buff_failed++;
  581. break;
  582. }
  583. buffer_info->skb = skb;
  584. buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
  585. adapter->rx_ps_bsize0,
  586. DMA_FROM_DEVICE);
  587. if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
  588. dev_err(&pdev->dev, "Rx DMA map failed\n");
  589. adapter->rx_dma_failed++;
  590. /* cleanup skb */
  591. dev_kfree_skb_any(skb);
  592. buffer_info->skb = NULL;
  593. break;
  594. }
  595. rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
  596. if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
  597. /*
  598. * Force memory writes to complete before letting h/w
  599. * know there are new descriptors to fetch. (Only
  600. * applicable for weak-ordered memory model archs,
  601. * such as IA-64).
  602. */
  603. wmb();
  604. writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
  605. }
  606. i++;
  607. if (i == rx_ring->count)
  608. i = 0;
  609. buffer_info = &rx_ring->buffer_info[i];
  610. }
  611. no_buffers:
  612. rx_ring->next_to_use = i;
  613. }
  614. /**
  615. * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
  616. * @adapter: address of board private structure
  617. * @cleaned_count: number of buffers to allocate this pass
  618. **/
  619. static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
  620. int cleaned_count)
  621. {
  622. struct net_device *netdev = adapter->netdev;
  623. struct pci_dev *pdev = adapter->pdev;
  624. struct e1000_rx_desc *rx_desc;
  625. struct e1000_ring *rx_ring = adapter->rx_ring;
  626. struct e1000_buffer *buffer_info;
  627. struct sk_buff *skb;
  628. unsigned int i;
  629. unsigned int bufsz = 256 - 16 /* for skb_reserve */;
  630. i = rx_ring->next_to_use;
  631. buffer_info = &rx_ring->buffer_info[i];
  632. while (cleaned_count--) {
  633. skb = buffer_info->skb;
  634. if (skb) {
  635. skb_trim(skb, 0);
  636. goto check_page;
  637. }
  638. skb = netdev_alloc_skb_ip_align(netdev, bufsz);
  639. if (unlikely(!skb)) {
  640. /* Better luck next round */
  641. adapter->alloc_rx_buff_failed++;
  642. break;
  643. }
  644. buffer_info->skb = skb;
  645. check_page:
  646. /* allocate a new page if necessary */
  647. if (!buffer_info->page) {
  648. buffer_info->page = alloc_page(GFP_ATOMIC);
  649. if (unlikely(!buffer_info->page)) {
  650. adapter->alloc_rx_buff_failed++;
  651. break;
  652. }
  653. }
  654. if (!buffer_info->dma)
  655. buffer_info->dma = dma_map_page(&pdev->dev,
  656. buffer_info->page, 0,
  657. PAGE_SIZE,
  658. DMA_FROM_DEVICE);
  659. rx_desc = E1000_RX_DESC(*rx_ring, i);
  660. rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  661. if (unlikely(++i == rx_ring->count))
  662. i = 0;
  663. buffer_info = &rx_ring->buffer_info[i];
  664. }
  665. if (likely(rx_ring->next_to_use != i)) {
  666. rx_ring->next_to_use = i;
  667. if (unlikely(i-- == 0))
  668. i = (rx_ring->count - 1);
  669. /* Force memory writes to complete before letting h/w
  670. * know there are new descriptors to fetch. (Only
  671. * applicable for weak-ordered memory model archs,
  672. * such as IA-64). */
  673. wmb();
  674. writel(i, adapter->hw.hw_addr + rx_ring->tail);
  675. }
  676. }
  677. /**
  678. * e1000_clean_rx_irq - Send received data up the network stack; legacy
  679. * @adapter: board private structure
  680. *
  681. * the return value indicates whether actual cleaning was done, there
  682. * is no guarantee that everything was cleaned
  683. **/
  684. static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
  685. int *work_done, int work_to_do)
  686. {
  687. struct net_device *netdev = adapter->netdev;
  688. struct pci_dev *pdev = adapter->pdev;
  689. struct e1000_hw *hw = &adapter->hw;
  690. struct e1000_ring *rx_ring = adapter->rx_ring;
  691. struct e1000_rx_desc *rx_desc, *next_rxd;
  692. struct e1000_buffer *buffer_info, *next_buffer;
  693. u32 length;
  694. unsigned int i;
  695. int cleaned_count = 0;
  696. bool cleaned = 0;
  697. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  698. i = rx_ring->next_to_clean;
  699. rx_desc = E1000_RX_DESC(*rx_ring, i);
  700. buffer_info = &rx_ring->buffer_info[i];
  701. while (rx_desc->status & E1000_RXD_STAT_DD) {
  702. struct sk_buff *skb;
  703. u8 status;
  704. if (*work_done >= work_to_do)
  705. break;
  706. (*work_done)++;
  707. rmb(); /* read descriptor and rx_buffer_info after status DD */
  708. status = rx_desc->status;
  709. skb = buffer_info->skb;
  710. buffer_info->skb = NULL;
  711. prefetch(skb->data - NET_IP_ALIGN);
  712. i++;
  713. if (i == rx_ring->count)
  714. i = 0;
  715. next_rxd = E1000_RX_DESC(*rx_ring, i);
  716. prefetch(next_rxd);
  717. next_buffer = &rx_ring->buffer_info[i];
  718. cleaned = 1;
  719. cleaned_count++;
  720. dma_unmap_single(&pdev->dev,
  721. buffer_info->dma,
  722. adapter->rx_buffer_len,
  723. DMA_FROM_DEVICE);
  724. buffer_info->dma = 0;
  725. length = le16_to_cpu(rx_desc->length);
  726. /*
  727. * !EOP means multiple descriptors were used to store a single
  728. * packet, if that's the case we need to toss it. In fact, we
  729. * need to toss every packet with the EOP bit clear and the
  730. * next frame that _does_ have the EOP bit set, as it is by
  731. * definition only a frame fragment
  732. */
  733. if (unlikely(!(status & E1000_RXD_STAT_EOP)))
  734. adapter->flags2 |= FLAG2_IS_DISCARDING;
  735. if (adapter->flags2 & FLAG2_IS_DISCARDING) {
  736. /* All receives must fit into a single buffer */
  737. e_dbg("Receive packet consumed multiple buffers\n");
  738. /* recycle */
  739. buffer_info->skb = skb;
  740. if (status & E1000_RXD_STAT_EOP)
  741. adapter->flags2 &= ~FLAG2_IS_DISCARDING;
  742. goto next_desc;
  743. }
  744. if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
  745. /* recycle */
  746. buffer_info->skb = skb;
  747. goto next_desc;
  748. }
  749. /* adjust length to remove Ethernet CRC */
  750. if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
  751. length -= 4;
  752. total_rx_bytes += length;
  753. total_rx_packets++;
  754. /*
  755. * code added for copybreak, this should improve
  756. * performance for small packets with large amounts
  757. * of reassembly being done in the stack
  758. */
  759. if (length < copybreak) {
  760. struct sk_buff *new_skb =
  761. netdev_alloc_skb_ip_align(netdev, length);
  762. if (new_skb) {
  763. skb_copy_to_linear_data_offset(new_skb,
  764. -NET_IP_ALIGN,
  765. (skb->data -
  766. NET_IP_ALIGN),
  767. (length +
  768. NET_IP_ALIGN));
  769. /* save the skb in buffer_info as good */
  770. buffer_info->skb = skb;
  771. skb = new_skb;
  772. }
  773. /* else just continue with the old one */
  774. }
  775. /* end copybreak code */
  776. skb_put(skb, length);
  777. /* Receive Checksum Offload */
  778. e1000_rx_checksum(adapter,
  779. (u32)(status) |
  780. ((u32)(rx_desc->errors) << 24),
  781. le16_to_cpu(rx_desc->csum), skb);
  782. e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
  783. next_desc:
  784. rx_desc->status = 0;
  785. /* return some buffers to hardware, one at a time is too slow */
  786. if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
  787. adapter->alloc_rx_buf(adapter, cleaned_count);
  788. cleaned_count = 0;
  789. }
  790. /* use prefetched values */
  791. rx_desc = next_rxd;
  792. buffer_info = next_buffer;
  793. }
  794. rx_ring->next_to_clean = i;
  795. cleaned_count = e1000_desc_unused(rx_ring);
  796. if (cleaned_count)
  797. adapter->alloc_rx_buf(adapter, cleaned_count);
  798. adapter->total_rx_bytes += total_rx_bytes;
  799. adapter->total_rx_packets += total_rx_packets;
  800. return cleaned;
  801. }
  802. static void e1000_put_txbuf(struct e1000_adapter *adapter,
  803. struct e1000_buffer *buffer_info)
  804. {
  805. if (buffer_info->dma) {
  806. if (buffer_info->mapped_as_page)
  807. dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
  808. buffer_info->length, DMA_TO_DEVICE);
  809. else
  810. dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
  811. buffer_info->length, DMA_TO_DEVICE);
  812. buffer_info->dma = 0;
  813. }
  814. if (buffer_info->skb) {
  815. dev_kfree_skb_any(buffer_info->skb);
  816. buffer_info->skb = NULL;
  817. }
  818. buffer_info->time_stamp = 0;
  819. }
  820. static void e1000_print_hw_hang(struct work_struct *work)
  821. {
  822. struct e1000_adapter *adapter = container_of(work,
  823. struct e1000_adapter,
  824. print_hang_task);
  825. struct net_device *netdev = adapter->netdev;
  826. struct e1000_ring *tx_ring = adapter->tx_ring;
  827. unsigned int i = tx_ring->next_to_clean;
  828. unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
  829. struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
  830. struct e1000_hw *hw = &adapter->hw;
  831. u16 phy_status, phy_1000t_status, phy_ext_status;
  832. u16 pci_status;
  833. if (test_bit(__E1000_DOWN, &adapter->state))
  834. return;
  835. if (!adapter->tx_hang_recheck &&
  836. (adapter->flags2 & FLAG2_DMA_BURST)) {
  837. /* May be block on write-back, flush and detect again
  838. * flush pending descriptor writebacks to memory
  839. */
  840. ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
  841. /* execute the writes immediately */
  842. e1e_flush();
  843. adapter->tx_hang_recheck = true;
  844. return;
  845. }
  846. /* Real hang detected */
  847. adapter->tx_hang_recheck = false;
  848. netif_stop_queue(netdev);
  849. e1e_rphy(hw, PHY_STATUS, &phy_status);
  850. e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
  851. e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
  852. pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
  853. /* detected Hardware unit hang */
  854. e_err("Detected Hardware Unit Hang:\n"
  855. " TDH <%x>\n"
  856. " TDT <%x>\n"
  857. " next_to_use <%x>\n"
  858. " next_to_clean <%x>\n"
  859. "buffer_info[next_to_clean]:\n"
  860. " time_stamp <%lx>\n"
  861. " next_to_watch <%x>\n"
  862. " jiffies <%lx>\n"
  863. " next_to_watch.status <%x>\n"
  864. "MAC Status <%x>\n"
  865. "PHY Status <%x>\n"
  866. "PHY 1000BASE-T Status <%x>\n"
  867. "PHY Extended Status <%x>\n"
  868. "PCI Status <%x>\n",
  869. readl(adapter->hw.hw_addr + tx_ring->head),
  870. readl(adapter->hw.hw_addr + tx_ring->tail),
  871. tx_ring->next_to_use,
  872. tx_ring->next_to_clean,
  873. tx_ring->buffer_info[eop].time_stamp,
  874. eop,
  875. jiffies,
  876. eop_desc->upper.fields.status,
  877. er32(STATUS),
  878. phy_status,
  879. phy_1000t_status,
  880. phy_ext_status,
  881. pci_status);
  882. }
  883. /**
  884. * e1000_clean_tx_irq - Reclaim resources after transmit completes
  885. * @adapter: board private structure
  886. *
  887. * the return value indicates whether actual cleaning was done, there
  888. * is no guarantee that everything was cleaned
  889. **/
  890. static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
  891. {
  892. struct net_device *netdev = adapter->netdev;
  893. struct e1000_hw *hw = &adapter->hw;
  894. struct e1000_ring *tx_ring = adapter->tx_ring;
  895. struct e1000_tx_desc *tx_desc, *eop_desc;
  896. struct e1000_buffer *buffer_info;
  897. unsigned int i, eop;
  898. unsigned int count = 0;
  899. unsigned int total_tx_bytes = 0, total_tx_packets = 0;
  900. i = tx_ring->next_to_clean;
  901. eop = tx_ring->buffer_info[i].next_to_watch;
  902. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  903. while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
  904. (count < tx_ring->count)) {
  905. bool cleaned = false;
  906. rmb(); /* read buffer_info after eop_desc */
  907. for (; !cleaned; count++) {
  908. tx_desc = E1000_TX_DESC(*tx_ring, i);
  909. buffer_info = &tx_ring->buffer_info[i];
  910. cleaned = (i == eop);
  911. if (cleaned) {
  912. total_tx_packets += buffer_info->segs;
  913. total_tx_bytes += buffer_info->bytecount;
  914. }
  915. e1000_put_txbuf(adapter, buffer_info);
  916. tx_desc->upper.data = 0;
  917. i++;
  918. if (i == tx_ring->count)
  919. i = 0;
  920. }
  921. if (i == tx_ring->next_to_use)
  922. break;
  923. eop = tx_ring->buffer_info[i].next_to_watch;
  924. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  925. }
  926. tx_ring->next_to_clean = i;
  927. #define TX_WAKE_THRESHOLD 32
  928. if (count && netif_carrier_ok(netdev) &&
  929. e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
  930. /* Make sure that anybody stopping the queue after this
  931. * sees the new next_to_clean.
  932. */
  933. smp_mb();
  934. if (netif_queue_stopped(netdev) &&
  935. !(test_bit(__E1000_DOWN, &adapter->state))) {
  936. netif_wake_queue(netdev);
  937. ++adapter->restart_queue;
  938. }
  939. }
  940. if (adapter->detect_tx_hung) {
  941. /*
  942. * Detect a transmit hang in hardware, this serializes the
  943. * check with the clearing of time_stamp and movement of i
  944. */
  945. adapter->detect_tx_hung = 0;
  946. if (tx_ring->buffer_info[i].time_stamp &&
  947. time_after(jiffies, tx_ring->buffer_info[i].time_stamp
  948. + (adapter->tx_timeout_factor * HZ)) &&
  949. !(er32(STATUS) & E1000_STATUS_TXOFF))
  950. schedule_work(&adapter->print_hang_task);
  951. else
  952. adapter->tx_hang_recheck = false;
  953. }
  954. adapter->total_tx_bytes += total_tx_bytes;
  955. adapter->total_tx_packets += total_tx_packets;
  956. return count < tx_ring->count;
  957. }
  958. /**
  959. * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
  960. * @adapter: board private structure
  961. *
  962. * the return value indicates whether actual cleaning was done, there
  963. * is no guarantee that everything was cleaned
  964. **/
  965. static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  966. int *work_done, int work_to_do)
  967. {
  968. struct e1000_hw *hw = &adapter->hw;
  969. union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
  970. struct net_device *netdev = adapter->netdev;
  971. struct pci_dev *pdev = adapter->pdev;
  972. struct e1000_ring *rx_ring = adapter->rx_ring;
  973. struct e1000_buffer *buffer_info, *next_buffer;
  974. struct e1000_ps_page *ps_page;
  975. struct sk_buff *skb;
  976. unsigned int i, j;
  977. u32 length, staterr;
  978. int cleaned_count = 0;
  979. bool cleaned = 0;
  980. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  981. i = rx_ring->next_to_clean;
  982. rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  983. staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  984. buffer_info = &rx_ring->buffer_info[i];
  985. while (staterr & E1000_RXD_STAT_DD) {
  986. if (*work_done >= work_to_do)
  987. break;
  988. (*work_done)++;
  989. skb = buffer_info->skb;
  990. rmb(); /* read descriptor and rx_buffer_info after status DD */
  991. /* in the packet split case this is header only */
  992. prefetch(skb->data - NET_IP_ALIGN);
  993. i++;
  994. if (i == rx_ring->count)
  995. i = 0;
  996. next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
  997. prefetch(next_rxd);
  998. next_buffer = &rx_ring->buffer_info[i];
  999. cleaned = 1;
  1000. cleaned_count++;
  1001. dma_unmap_single(&pdev->dev, buffer_info->dma,
  1002. adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
  1003. buffer_info->dma = 0;
  1004. /* see !EOP comment in other Rx routine */
  1005. if (!(staterr & E1000_RXD_STAT_EOP))
  1006. adapter->flags2 |= FLAG2_IS_DISCARDING;
  1007. if (adapter->flags2 & FLAG2_IS_DISCARDING) {
  1008. e_dbg("Packet Split buffers didn't pick up the full "
  1009. "packet\n");
  1010. dev_kfree_skb_irq(skb);
  1011. if (staterr & E1000_RXD_STAT_EOP)
  1012. adapter->flags2 &= ~FLAG2_IS_DISCARDING;
  1013. goto next_desc;
  1014. }
  1015. if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
  1016. dev_kfree_skb_irq(skb);
  1017. goto next_desc;
  1018. }
  1019. length = le16_to_cpu(rx_desc->wb.middle.length0);
  1020. if (!length) {
  1021. e_dbg("Last part of the packet spanning multiple "
  1022. "descriptors\n");
  1023. dev_kfree_skb_irq(skb);
  1024. goto next_desc;
  1025. }
  1026. /* Good Receive */
  1027. skb_put(skb, length);
  1028. {
  1029. /*
  1030. * this looks ugly, but it seems compiler issues make it
  1031. * more efficient than reusing j
  1032. */
  1033. int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
  1034. /*
  1035. * page alloc/put takes too long and effects small packet
  1036. * throughput, so unsplit small packets and save the alloc/put
  1037. * only valid in softirq (napi) context to call kmap_*
  1038. */
  1039. if (l1 && (l1 <= copybreak) &&
  1040. ((length + l1) <= adapter->rx_ps_bsize0)) {
  1041. u8 *vaddr;
  1042. ps_page = &buffer_info->ps_pages[0];
  1043. /*
  1044. * there is no documentation about how to call
  1045. * kmap_atomic, so we can't hold the mapping
  1046. * very long
  1047. */
  1048. dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
  1049. PAGE_SIZE, DMA_FROM_DEVICE);
  1050. vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
  1051. memcpy(skb_tail_pointer(skb), vaddr, l1);
  1052. kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
  1053. dma_sync_single_for_device(&pdev->dev, ps_page->dma,
  1054. PAGE_SIZE, DMA_FROM_DEVICE);
  1055. /* remove the CRC */
  1056. if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
  1057. l1 -= 4;
  1058. skb_put(skb, l1);
  1059. goto copydone;
  1060. } /* if */
  1061. }
  1062. for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  1063. length = le16_to_cpu(rx_desc->wb.upper.length[j]);
  1064. if (!length)
  1065. break;
  1066. ps_page = &buffer_info->ps_pages[j];
  1067. dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
  1068. DMA_FROM_DEVICE);
  1069. ps_page->dma = 0;
  1070. skb_fill_page_desc(skb, j, ps_page->page, 0, length);
  1071. ps_page->page = NULL;
  1072. skb->len += length;
  1073. skb->data_len += length;
  1074. skb->truesize += length;
  1075. }
  1076. /* strip the ethernet crc, problem is we're using pages now so
  1077. * this whole operation can get a little cpu intensive
  1078. */
  1079. if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
  1080. pskb_trim(skb, skb->len - 4);
  1081. copydone:
  1082. total_rx_bytes += skb->len;
  1083. total_rx_packets++;
  1084. e1000_rx_checksum(adapter, staterr, le16_to_cpu(
  1085. rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
  1086. if (rx_desc->wb.upper.header_status &
  1087. cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
  1088. adapter->rx_hdr_split++;
  1089. e1000_receive_skb(adapter, netdev, skb,
  1090. staterr, rx_desc->wb.middle.vlan);
  1091. next_desc:
  1092. rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
  1093. buffer_info->skb = NULL;
  1094. /* return some buffers to hardware, one at a time is too slow */
  1095. if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
  1096. adapter->alloc_rx_buf(adapter, cleaned_count);
  1097. cleaned_count = 0;
  1098. }
  1099. /* use prefetched values */
  1100. rx_desc = next_rxd;
  1101. buffer_info = next_buffer;
  1102. staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  1103. }
  1104. rx_ring->next_to_clean = i;
  1105. cleaned_count = e1000_desc_unused(rx_ring);
  1106. if (cleaned_count)
  1107. adapter->alloc_rx_buf(adapter, cleaned_count);
  1108. adapter->total_rx_bytes += total_rx_bytes;
  1109. adapter->total_rx_packets += total_rx_packets;
  1110. return cleaned;
  1111. }
  1112. /**
  1113. * e1000_consume_page - helper function
  1114. **/
  1115. static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
  1116. u16 length)
  1117. {
  1118. bi->page = NULL;
  1119. skb->len += length;
  1120. skb->data_len += length;
  1121. skb->truesize += length;
  1122. }
  1123. /**
  1124. * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
  1125. * @adapter: board private structure
  1126. *
  1127. * the return value indicates whether actual cleaning was done, there
  1128. * is no guarantee that everything was cleaned
  1129. **/
  1130. static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
  1131. int *work_done, int work_to_do)
  1132. {
  1133. struct net_device *netdev = adapter->netdev;
  1134. struct pci_dev *pdev = adapter->pdev;
  1135. struct e1000_ring *rx_ring = adapter->rx_ring;
  1136. struct e1000_rx_desc *rx_desc, *next_rxd;
  1137. struct e1000_buffer *buffer_info, *next_buffer;
  1138. u32 length;
  1139. unsigned int i;
  1140. int cleaned_count = 0;
  1141. bool cleaned = false;
  1142. unsigned int total_rx_bytes=0, total_rx_packets=0;
  1143. i = rx_ring->next_to_clean;
  1144. rx_desc = E1000_RX_DESC(*rx_ring, i);
  1145. buffer_info = &rx_ring->buffer_info[i];
  1146. while (rx_desc->status & E1000_RXD_STAT_DD) {
  1147. struct sk_buff *skb;
  1148. u8 status;
  1149. if (*work_done >= work_to_do)
  1150. break;
  1151. (*work_done)++;
  1152. rmb(); /* read descriptor and rx_buffer_info after status DD */
  1153. status = rx_desc->status;
  1154. skb = buffer_info->skb;
  1155. buffer_info->skb = NULL;
  1156. ++i;
  1157. if (i == rx_ring->count)
  1158. i = 0;
  1159. next_rxd = E1000_RX_DESC(*rx_ring, i);
  1160. prefetch(next_rxd);
  1161. next_buffer = &rx_ring->buffer_info[i];
  1162. cleaned = true;
  1163. cleaned_count++;
  1164. dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
  1165. DMA_FROM_DEVICE);
  1166. buffer_info->dma = 0;
  1167. length = le16_to_cpu(rx_desc->length);
  1168. /* errors is only valid for DD + EOP descriptors */
  1169. if (unlikely((status & E1000_RXD_STAT_EOP) &&
  1170. (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
  1171. /* recycle both page and skb */
  1172. buffer_info->skb = skb;
  1173. /* an error means any chain goes out the window
  1174. * too */
  1175. if (rx_ring->rx_skb_top)
  1176. dev_kfree_skb_irq(rx_ring->rx_skb_top);
  1177. rx_ring->rx_skb_top = NULL;
  1178. goto next_desc;
  1179. }
  1180. #define rxtop (rx_ring->rx_skb_top)
  1181. if (!(status & E1000_RXD_STAT_EOP)) {
  1182. /* this descriptor is only the beginning (or middle) */
  1183. if (!rxtop) {
  1184. /* this is the beginning of a chain */
  1185. rxtop = skb;
  1186. skb_fill_page_desc(rxtop, 0, buffer_info->page,
  1187. 0, length);
  1188. } else {
  1189. /* this is the middle of a chain */
  1190. skb_fill_page_desc(rxtop,
  1191. skb_shinfo(rxtop)->nr_frags,
  1192. buffer_info->page, 0, length);
  1193. /* re-use the skb, only consumed the page */
  1194. buffer_info->skb = skb;
  1195. }
  1196. e1000_consume_page(buffer_info, rxtop, length);
  1197. goto next_desc;
  1198. } else {
  1199. if (rxtop) {
  1200. /* end of the chain */
  1201. skb_fill_page_desc(rxtop,
  1202. skb_shinfo(rxtop)->nr_frags,
  1203. buffer_info->page, 0, length);
  1204. /* re-use the current skb, we only consumed the
  1205. * page */
  1206. buffer_info->skb = skb;
  1207. skb = rxtop;
  1208. rxtop = NULL;
  1209. e1000_consume_page(buffer_info, skb, length);
  1210. } else {
  1211. /* no chain, got EOP, this buf is the packet
  1212. * copybreak to save the put_page/alloc_page */
  1213. if (length <= copybreak &&
  1214. skb_tailroom(skb) >= length) {
  1215. u8 *vaddr;
  1216. vaddr = kmap_atomic(buffer_info->page,
  1217. KM_SKB_DATA_SOFTIRQ);
  1218. memcpy(skb_tail_pointer(skb), vaddr,
  1219. length);
  1220. kunmap_atomic(vaddr,
  1221. KM_SKB_DATA_SOFTIRQ);
  1222. /* re-use the page, so don't erase
  1223. * buffer_info->page */
  1224. skb_put(skb, length);
  1225. } else {
  1226. skb_fill_page_desc(skb, 0,
  1227. buffer_info->page, 0,
  1228. length);
  1229. e1000_consume_page(buffer_info, skb,
  1230. length);
  1231. }
  1232. }
  1233. }
  1234. /* Receive Checksum Offload XXX recompute due to CRC strip? */
  1235. e1000_rx_checksum(adapter,
  1236. (u32)(status) |
  1237. ((u32)(rx_desc->errors) << 24),
  1238. le16_to_cpu(rx_desc->csum), skb);
  1239. /* probably a little skewed due to removing CRC */
  1240. total_rx_bytes += skb->len;
  1241. total_rx_packets++;
  1242. /* eth type trans needs skb->data to point to something */
  1243. if (!pskb_may_pull(skb, ETH_HLEN)) {
  1244. e_err("pskb_may_pull failed.\n");
  1245. dev_kfree_skb_irq(skb);
  1246. goto next_desc;
  1247. }
  1248. e1000_receive_skb(adapter, netdev, skb, status,
  1249. rx_desc->special);
  1250. next_desc:
  1251. rx_desc->status = 0;
  1252. /* return some buffers to hardware, one at a time is too slow */
  1253. if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
  1254. adapter->alloc_rx_buf(adapter, cleaned_count);
  1255. cleaned_count = 0;
  1256. }
  1257. /* use prefetched values */
  1258. rx_desc = next_rxd;
  1259. buffer_info = next_buffer;
  1260. }
  1261. rx_ring->next_to_clean = i;
  1262. cleaned_count = e1000_desc_unused(rx_ring);
  1263. if (cleaned_count)
  1264. adapter->alloc_rx_buf(adapter, cleaned_count);
  1265. adapter->total_rx_bytes += total_rx_bytes;
  1266. adapter->total_rx_packets += total_rx_packets;
  1267. return cleaned;
  1268. }
  1269. /**
  1270. * e1000_clean_rx_ring - Free Rx Buffers per Queue
  1271. * @adapter: board private structure
  1272. **/
  1273. static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
  1274. {
  1275. struct e1000_ring *rx_ring = adapter->rx_ring;
  1276. struct e1000_buffer *buffer_info;
  1277. struct e1000_ps_page *ps_page;
  1278. struct pci_dev *pdev = adapter->pdev;
  1279. unsigned int i, j;
  1280. /* Free all the Rx ring sk_buffs */
  1281. for (i = 0; i < rx_ring->count; i++) {
  1282. buffer_info = &rx_ring->buffer_info[i];
  1283. if (buffer_info->dma) {
  1284. if (adapter->clean_rx == e1000_clean_rx_irq)
  1285. dma_unmap_single(&pdev->dev, buffer_info->dma,
  1286. adapter->rx_buffer_len,
  1287. DMA_FROM_DEVICE);
  1288. else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
  1289. dma_unmap_page(&pdev->dev, buffer_info->dma,
  1290. PAGE_SIZE,
  1291. DMA_FROM_DEVICE);
  1292. else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
  1293. dma_unmap_single(&pdev->dev, buffer_info->dma,
  1294. adapter->rx_ps_bsize0,
  1295. DMA_FROM_DEVICE);
  1296. buffer_info->dma = 0;
  1297. }
  1298. if (buffer_info->page) {
  1299. put_page(buffer_info->page);
  1300. buffer_info->page = NULL;
  1301. }
  1302. if (buffer_info->skb) {
  1303. dev_kfree_skb(buffer_info->skb);
  1304. buffer_info->skb = NULL;
  1305. }
  1306. for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  1307. ps_page = &buffer_info->ps_pages[j];
  1308. if (!ps_page->page)
  1309. break;
  1310. dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
  1311. DMA_FROM_DEVICE);
  1312. ps_page->dma = 0;
  1313. put_page(ps_page->page);
  1314. ps_page->page = NULL;
  1315. }
  1316. }
  1317. /* there also may be some cached data from a chained receive */
  1318. if (rx_ring->rx_skb_top) {
  1319. dev_kfree_skb(rx_ring->rx_skb_top);
  1320. rx_ring->rx_skb_top = NULL;
  1321. }
  1322. /* Zero out the descriptor ring */
  1323. memset(rx_ring->desc, 0, rx_ring->size);
  1324. rx_ring->next_to_clean = 0;
  1325. rx_ring->next_to_use = 0;
  1326. adapter->flags2 &= ~FLAG2_IS_DISCARDING;
  1327. writel(0, adapter->hw.hw_addr + rx_ring->head);
  1328. writel(0, adapter->hw.hw_addr + rx_ring->tail);
  1329. }
  1330. static void e1000e_downshift_workaround(struct work_struct *work)
  1331. {
  1332. struct e1000_adapter *adapter = container_of(work,
  1333. struct e1000_adapter, downshift_task);
  1334. if (test_bit(__E1000_DOWN, &adapter->state))
  1335. return;
  1336. e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
  1337. }
  1338. /**
  1339. * e1000_intr_msi - Interrupt Handler
  1340. * @irq: interrupt number
  1341. * @data: pointer to a network interface device structure
  1342. **/
  1343. static irqreturn_t e1000_intr_msi(int irq, void *data)
  1344. {
  1345. struct net_device *netdev = data;
  1346. struct e1000_adapter *adapter = netdev_priv(netdev);
  1347. struct e1000_hw *hw = &adapter->hw;
  1348. u32 icr = er32(ICR);
  1349. /*
  1350. * read ICR disables interrupts using IAM
  1351. */
  1352. if (icr & E1000_ICR_LSC) {
  1353. hw->mac.get_link_status = 1;
  1354. /*
  1355. * ICH8 workaround-- Call gig speed drop workaround on cable
  1356. * disconnect (LSC) before accessing any PHY registers
  1357. */
  1358. if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
  1359. (!(er32(STATUS) & E1000_STATUS_LU)))
  1360. schedule_work(&adapter->downshift_task);
  1361. /*
  1362. * 80003ES2LAN workaround-- For packet buffer work-around on
  1363. * link down event; disable receives here in the ISR and reset
  1364. * adapter in watchdog
  1365. */
  1366. if (netif_carrier_ok(netdev) &&
  1367. adapter->flags & FLAG_RX_NEEDS_RESTART) {
  1368. /* disable receives */
  1369. u32 rctl = er32(RCTL);
  1370. ew32(RCTL, rctl & ~E1000_RCTL_EN);
  1371. adapter->flags |= FLAG_RX_RESTART_NOW;
  1372. }
  1373. /* guard against interrupt when we're going down */
  1374. if (!test_bit(__E1000_DOWN, &adapter->state))
  1375. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  1376. }
  1377. if (napi_schedule_prep(&adapter->napi)) {
  1378. adapter->total_tx_bytes = 0;
  1379. adapter->total_tx_packets = 0;
  1380. adapter->total_rx_bytes = 0;
  1381. adapter->total_rx_packets = 0;
  1382. __napi_schedule(&adapter->napi);
  1383. }
  1384. return IRQ_HANDLED;
  1385. }
  1386. /**
  1387. * e1000_intr - Interrupt Handler
  1388. * @irq: interrupt number
  1389. * @data: pointer to a network interface device structure
  1390. **/
  1391. static irqreturn_t e1000_intr(int irq, void *data)
  1392. {
  1393. struct net_device *netdev = data;
  1394. struct e1000_adapter *adapter = netdev_priv(netdev);
  1395. struct e1000_hw *hw = &adapter->hw;
  1396. u32 rctl, icr = er32(ICR);
  1397. if (!icr || test_bit(__E1000_DOWN, &adapter->state))
  1398. return IRQ_NONE; /* Not our interrupt */
  1399. /*
  1400. * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
  1401. * not set, then the adapter didn't send an interrupt
  1402. */
  1403. if (!(icr & E1000_ICR_INT_ASSERTED))
  1404. return IRQ_NONE;
  1405. /*
  1406. * Interrupt Auto-Mask...upon reading ICR,
  1407. * interrupts are masked. No need for the
  1408. * IMC write
  1409. */
  1410. if (icr & E1000_ICR_LSC) {
  1411. hw->mac.get_link_status = 1;
  1412. /*
  1413. * ICH8 workaround-- Call gig speed drop workaround on cable
  1414. * disconnect (LSC) before accessing any PHY registers
  1415. */
  1416. if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
  1417. (!(er32(STATUS) & E1000_STATUS_LU)))
  1418. schedule_work(&adapter->downshift_task);
  1419. /*
  1420. * 80003ES2LAN workaround--
  1421. * For packet buffer work-around on link down event;
  1422. * disable receives here in the ISR and
  1423. * reset adapter in watchdog
  1424. */
  1425. if (netif_carrier_ok(netdev) &&
  1426. (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
  1427. /* disable receives */
  1428. rctl = er32(RCTL);
  1429. ew32(RCTL, rctl & ~E1000_RCTL_EN);
  1430. adapter->flags |= FLAG_RX_RESTART_NOW;
  1431. }
  1432. /* guard against interrupt when we're going down */
  1433. if (!test_bit(__E1000_DOWN, &adapter->state))
  1434. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  1435. }
  1436. if (napi_schedule_prep(&adapter->napi)) {
  1437. adapter->total_tx_bytes = 0;
  1438. adapter->total_tx_packets = 0;
  1439. adapter->total_rx_bytes = 0;
  1440. adapter->total_rx_packets = 0;
  1441. __napi_schedule(&adapter->napi);
  1442. }
  1443. return IRQ_HANDLED;
  1444. }
  1445. static irqreturn_t e1000_msix_other(int irq, void *data)
  1446. {
  1447. struct net_device *netdev = data;
  1448. struct e1000_adapter *adapter = netdev_priv(netdev);
  1449. struct e1000_hw *hw = &adapter->hw;
  1450. u32 icr = er32(ICR);
  1451. if (!(icr & E1000_ICR_INT_ASSERTED)) {
  1452. if (!test_bit(__E1000_DOWN, &adapter->state))
  1453. ew32(IMS, E1000_IMS_OTHER);
  1454. return IRQ_NONE;
  1455. }
  1456. if (icr & adapter->eiac_mask)
  1457. ew32(ICS, (icr & adapter->eiac_mask));
  1458. if (icr & E1000_ICR_OTHER) {
  1459. if (!(icr & E1000_ICR_LSC))
  1460. goto no_link_interrupt;
  1461. hw->mac.get_link_status = 1;
  1462. /* guard against interrupt when we're going down */
  1463. if (!test_bit(__E1000_DOWN, &adapter->state))
  1464. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  1465. }
  1466. no_link_interrupt:
  1467. if (!test_bit(__E1000_DOWN, &adapter->state))
  1468. ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
  1469. return IRQ_HANDLED;
  1470. }
  1471. static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
  1472. {
  1473. struct net_device *netdev = data;
  1474. struct e1000_adapter *adapter = netdev_priv(netdev);
  1475. struct e1000_hw *hw = &adapter->hw;
  1476. struct e1000_ring *tx_ring = adapter->tx_ring;
  1477. adapter->total_tx_bytes = 0;
  1478. adapter->total_tx_packets = 0;
  1479. if (!e1000_clean_tx_irq(adapter))
  1480. /* Ring was not completely cleaned, so fire another interrupt */
  1481. ew32(ICS, tx_ring->ims_val);
  1482. return IRQ_HANDLED;
  1483. }
  1484. static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
  1485. {
  1486. struct net_device *netdev = data;
  1487. struct e1000_adapter *adapter = netdev_priv(netdev);
  1488. /* Write the ITR v…