PageRenderTime 80ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/net/e1000e/netdev.c

https://code.google.com/
C | 5972 lines | 3981 code | 859 blank | 1132 comment | 673 complexity | 67d634e10a2c7aa1cec1c96cd57506cc MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

  1. /*******************************************************************************
  2. Intel PRO/1000 Linux driver
  3. Copyright(c) 1999 - 2009 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. *******************************************************************************/
  21. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22. #include <linux/module.h>
  23. #include <linux/types.h>
  24. #include <linux/init.h>
  25. #include <linux/pci.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/pagemap.h>
  28. #include <linux/delay.h>
  29. #include <linux/netdevice.h>
  30. #include <linux/tcp.h>
  31. #include <linux/ipv6.h>
  32. #include <linux/slab.h>
  33. #include <net/checksum.h>
  34. #include <net/ip6_checksum.h>
  35. #include <linux/mii.h>
  36. #include <linux/ethtool.h>
  37. #include <linux/if_vlan.h>
  38. #include <linux/cpu.h>
  39. #include <linux/smp.h>
  40. #include <linux/pm_qos_params.h>
  41. #include <linux/pm_runtime.h>
  42. #include <linux/aer.h>
  43. #include "e1000.h"
  44. #define DRV_VERSION "1.0.2-k4"
  45. char e1000e_driver_name[] = "e1000e";
  46. const char e1000e_driver_version[] = DRV_VERSION;
  47. static const struct e1000_info *e1000_info_tbl[] = {
  48. [board_82571] = &e1000_82571_info,
  49. [board_82572] = &e1000_82572_info,
  50. [board_82573] = &e1000_82573_info,
  51. [board_82574] = &e1000_82574_info,
  52. [board_82583] = &e1000_82583_info,
  53. [board_80003es2lan] = &e1000_es2_info,
  54. [board_ich8lan] = &e1000_ich8_info,
  55. [board_ich9lan] = &e1000_ich9_info,
  56. [board_ich10lan] = &e1000_ich10_info,
  57. [board_pchlan] = &e1000_pch_info,
  58. };
  59. struct e1000_reg_info {
  60. u32 ofs;
  61. char *name;
  62. };
  63. #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
  64. #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
  65. #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
  66. #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
  67. #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
  68. #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
  69. #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
  70. #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
  71. #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
  72. #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
  73. static const struct e1000_reg_info e1000_reg_info_tbl[] = {
  74. /* General Registers */
  75. {E1000_CTRL, "CTRL"},
  76. {E1000_STATUS, "STATUS"},
  77. {E1000_CTRL_EXT, "CTRL_EXT"},
  78. /* Interrupt Registers */
  79. {E1000_ICR, "ICR"},
  80. /* RX Registers */
  81. {E1000_RCTL, "RCTL"},
  82. {E1000_RDLEN, "RDLEN"},
  83. {E1000_RDH, "RDH"},
  84. {E1000_RDT, "RDT"},
  85. {E1000_RDTR, "RDTR"},
  86. {E1000_RXDCTL(0), "RXDCTL"},
  87. {E1000_ERT, "ERT"},
  88. {E1000_RDBAL, "RDBAL"},
  89. {E1000_RDBAH, "RDBAH"},
  90. {E1000_RDFH, "RDFH"},
  91. {E1000_RDFT, "RDFT"},
  92. {E1000_RDFHS, "RDFHS"},
  93. {E1000_RDFTS, "RDFTS"},
  94. {E1000_RDFPC, "RDFPC"},
  95. /* TX Registers */
  96. {E1000_TCTL, "TCTL"},
  97. {E1000_TDBAL, "TDBAL"},
  98. {E1000_TDBAH, "TDBAH"},
  99. {E1000_TDLEN, "TDLEN"},
  100. {E1000_TDH, "TDH"},
  101. {E1000_TDT, "TDT"},
  102. {E1000_TIDV, "TIDV"},
  103. {E1000_TXDCTL(0), "TXDCTL"},
  104. {E1000_TADV, "TADV"},
  105. {E1000_TARC(0), "TARC"},
  106. {E1000_TDFH, "TDFH"},
  107. {E1000_TDFT, "TDFT"},
  108. {E1000_TDFHS, "TDFHS"},
  109. {E1000_TDFTS, "TDFTS"},
  110. {E1000_TDFPC, "TDFPC"},
  111. /* List Terminator */
  112. {}
  113. };
  114. /*
  115. * e1000_regdump - register printout routine
  116. */
  117. static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
  118. {
  119. int n = 0;
  120. char rname[16];
  121. u32 regs[8];
  122. switch (reginfo->ofs) {
  123. case E1000_RXDCTL(0):
  124. for (n = 0; n < 2; n++)
  125. regs[n] = __er32(hw, E1000_RXDCTL(n));
  126. break;
  127. case E1000_TXDCTL(0):
  128. for (n = 0; n < 2; n++)
  129. regs[n] = __er32(hw, E1000_TXDCTL(n));
  130. break;
  131. case E1000_TARC(0):
  132. for (n = 0; n < 2; n++)
  133. regs[n] = __er32(hw, E1000_TARC(n));
  134. break;
  135. default:
  136. printk(KERN_INFO "%-15s %08x\n",
  137. reginfo->name, __er32(hw, reginfo->ofs));
  138. return;
  139. }
  140. snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
  141. printk(KERN_INFO "%-15s ", rname);
  142. for (n = 0; n < 2; n++)
  143. printk(KERN_CONT "%08x ", regs[n]);
  144. printk(KERN_CONT "\n");
  145. }
  146. /*
  147. * e1000e_dump - Print registers, tx-ring and rx-ring
  148. */
  149. static void e1000e_dump(struct e1000_adapter *adapter)
  150. {
  151. struct net_device *netdev = adapter->netdev;
  152. struct e1000_hw *hw = &adapter->hw;
  153. struct e1000_reg_info *reginfo;
  154. struct e1000_ring *tx_ring = adapter->tx_ring;
  155. struct e1000_tx_desc *tx_desc;
  156. struct my_u0 { u64 a; u64 b; } *u0;
  157. struct e1000_buffer *buffer_info;
  158. struct e1000_ring *rx_ring = adapter->rx_ring;
  159. union e1000_rx_desc_packet_split *rx_desc_ps;
  160. struct e1000_rx_desc *rx_desc;
  161. struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
  162. u32 staterr;
  163. int i = 0;
  164. if (!netif_msg_hw(adapter))
  165. return;
  166. /* Print netdevice Info */
  167. if (netdev) {
  168. dev_info(&adapter->pdev->dev, "Net device Info\n");
  169. printk(KERN_INFO "Device Name state "
  170. "trans_start last_rx\n");
  171. printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
  172. netdev->name,
  173. netdev->state,
  174. netdev->trans_start,
  175. netdev->last_rx);
  176. }
  177. /* Print Registers */
  178. dev_info(&adapter->pdev->dev, "Register Dump\n");
  179. printk(KERN_INFO " Register Name Value\n");
  180. for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
  181. reginfo->name; reginfo++) {
  182. e1000_regdump(hw, reginfo);
  183. }
  184. /* Print TX Ring Summary */
  185. if (!netdev || !netif_running(netdev))
  186. goto exit;
  187. dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
  188. printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
  189. " leng ntw timestamp\n");
  190. buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
  191. printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
  192. 0, tx_ring->next_to_use, tx_ring->next_to_clean,
  193. (u64)buffer_info->dma,
  194. buffer_info->length,
  195. buffer_info->next_to_watch,
  196. (u64)buffer_info->time_stamp);
  197. /* Print TX Rings */
  198. if (!netif_msg_tx_done(adapter))
  199. goto rx_ring_summary;
  200. dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
  201. /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
  202. *
  203. * Legacy Transmit Descriptor
  204. * +--------------------------------------------------------------+
  205. * 0 | Buffer Address [63:0] (Reserved on Write Back) |
  206. * +--------------------------------------------------------------+
  207. * 8 | Special | CSS | Status | CMD | CSO | Length |
  208. * +--------------------------------------------------------------+
  209. * 63 48 47 36 35 32 31 24 23 16 15 0
  210. *
  211. * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
  212. * 63 48 47 40 39 32 31 16 15 8 7 0
  213. * +----------------------------------------------------------------+
  214. * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
  215. * +----------------------------------------------------------------+
  216. * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
  217. * +----------------------------------------------------------------+
  218. * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
  219. *
  220. * Extended Data Descriptor (DTYP=0x1)
  221. * +----------------------------------------------------------------+
  222. * 0 | Buffer Address [63:0] |
  223. * +----------------------------------------------------------------+
  224. * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
  225. * +----------------------------------------------------------------+
  226. * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
  227. */
  228. printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
  229. " [bi->dma ] leng ntw timestamp bi->skb "
  230. "<-- Legacy format\n");
  231. printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
  232. " [bi->dma ] leng ntw timestamp bi->skb "
  233. "<-- Ext Context format\n");
  234. printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
  235. " [bi->dma ] leng ntw timestamp bi->skb "
  236. "<-- Ext Data format\n");
  237. for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
  238. tx_desc = E1000_TX_DESC(*tx_ring, i);
  239. buffer_info = &tx_ring->buffer_info[i];
  240. u0 = (struct my_u0 *)tx_desc;
  241. printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
  242. "%04X %3X %016llX %p",
  243. (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
  244. ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
  245. le64_to_cpu(u0->a), le64_to_cpu(u0->b),
  246. (u64)buffer_info->dma, buffer_info->length,
  247. buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
  248. buffer_info->skb);
  249. if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
  250. printk(KERN_CONT " NTC/U\n");
  251. else if (i == tx_ring->next_to_use)
  252. printk(KERN_CONT " NTU\n");
  253. else if (i == tx_ring->next_to_clean)
  254. printk(KERN_CONT " NTC\n");
  255. else
  256. printk(KERN_CONT "\n");
  257. if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
  258. print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
  259. 16, 1, phys_to_virt(buffer_info->dma),
  260. buffer_info->length, true);
  261. }
  262. /* Print RX Rings Summary */
  263. rx_ring_summary:
  264. dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
  265. printk(KERN_INFO "Queue [NTU] [NTC]\n");
  266. printk(KERN_INFO " %5d %5X %5X\n", 0,
  267. rx_ring->next_to_use, rx_ring->next_to_clean);
  268. /* Print RX Rings */
  269. if (!netif_msg_rx_status(adapter))
  270. goto exit;
  271. dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
  272. switch (adapter->rx_ps_pages) {
  273. case 1:
  274. case 2:
  275. case 3:
  276. /* [Extended] Packet Split Receive Descriptor Format
  277. *
  278. * +-----------------------------------------------------+
  279. * 0 | Buffer Address 0 [63:0] |
  280. * +-----------------------------------------------------+
  281. * 8 | Buffer Address 1 [63:0] |
  282. * +-----------------------------------------------------+
  283. * 16 | Buffer Address 2 [63:0] |
  284. * +-----------------------------------------------------+
  285. * 24 | Buffer Address 3 [63:0] |
  286. * +-----------------------------------------------------+
  287. */
  288. printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
  289. "[buffer 1 63:0 ] "
  290. "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
  291. "[bi->skb] <-- Ext Pkt Split format\n");
  292. /* [Extended] Receive Descriptor (Write-Back) Format
  293. *
  294. * 63 48 47 32 31 13 12 8 7 4 3 0
  295. * +------------------------------------------------------+
  296. * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
  297. * | Checksum | Ident | | Queue | | Type |
  298. * +------------------------------------------------------+
  299. * 8 | VLAN Tag | Length | Extended Error | Extended Status |
  300. * +------------------------------------------------------+
  301. * 63 48 47 32 31 20 19 0
  302. */
  303. printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
  304. "[vl l0 ee es] "
  305. "[ l3 l2 l1 hs] [reserved ] ---------------- "
  306. "[bi->skb] <-- Ext Rx Write-Back format\n");
  307. for (i = 0; i < rx_ring->count; i++) {
  308. buffer_info = &rx_ring->buffer_info[i];
  309. rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
  310. u1 = (struct my_u1 *)rx_desc_ps;
  311. staterr =
  312. le32_to_cpu(rx_desc_ps->wb.middle.status_error);
  313. if (staterr & E1000_RXD_STAT_DD) {
  314. /* Descriptor Done */
  315. printk(KERN_INFO "RWB[0x%03X] %016llX "
  316. "%016llX %016llX %016llX "
  317. "---------------- %p", i,
  318. le64_to_cpu(u1->a),
  319. le64_to_cpu(u1->b),
  320. le64_to_cpu(u1->c),
  321. le64_to_cpu(u1->d),
  322. buffer_info->skb);
  323. } else {
  324. printk(KERN_INFO "R [0x%03X] %016llX "
  325. "%016llX %016llX %016llX %016llX %p", i,
  326. le64_to_cpu(u1->a),
  327. le64_to_cpu(u1->b),
  328. le64_to_cpu(u1->c),
  329. le64_to_cpu(u1->d),
  330. (u64)buffer_info->dma,
  331. buffer_info->skb);
  332. if (netif_msg_pktdata(adapter))
  333. print_hex_dump(KERN_INFO, "",
  334. DUMP_PREFIX_ADDRESS, 16, 1,
  335. phys_to_virt(buffer_info->dma),
  336. adapter->rx_ps_bsize0, true);
  337. }
  338. if (i == rx_ring->next_to_use)
  339. printk(KERN_CONT " NTU\n");
  340. else if (i == rx_ring->next_to_clean)
  341. printk(KERN_CONT " NTC\n");
  342. else
  343. printk(KERN_CONT "\n");
  344. }
  345. break;
  346. default:
  347. case 0:
  348. /* Legacy Receive Descriptor Format
  349. *
  350. * +-----------------------------------------------------+
  351. * | Buffer Address [63:0] |
  352. * +-----------------------------------------------------+
  353. * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
  354. * +-----------------------------------------------------+
  355. * 63 48 47 40 39 32 31 16 15 0
  356. */
  357. printk(KERN_INFO "Rl[desc] [address 63:0 ] "
  358. "[vl er S cks ln] [bi->dma ] [bi->skb] "
  359. "<-- Legacy format\n");
  360. for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
  361. rx_desc = E1000_RX_DESC(*rx_ring, i);
  362. buffer_info = &rx_ring->buffer_info[i];
  363. u0 = (struct my_u0 *)rx_desc;
  364. printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
  365. "%016llX %p",
  366. i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
  367. (u64)buffer_info->dma, buffer_info->skb);
  368. if (i == rx_ring->next_to_use)
  369. printk(KERN_CONT " NTU\n");
  370. else if (i == rx_ring->next_to_clean)
  371. printk(KERN_CONT " NTC\n");
  372. else
  373. printk(KERN_CONT "\n");
  374. if (netif_msg_pktdata(adapter))
  375. print_hex_dump(KERN_INFO, "",
  376. DUMP_PREFIX_ADDRESS,
  377. 16, 1, phys_to_virt(buffer_info->dma),
  378. adapter->rx_buffer_len, true);
  379. }
  380. }
  381. exit:
  382. return;
  383. }
  384. /**
  385. * e1000_desc_unused - calculate if we have unused descriptors
  386. **/
  387. static int e1000_desc_unused(struct e1000_ring *ring)
  388. {
  389. if (ring->next_to_clean > ring->next_to_use)
  390. return ring->next_to_clean - ring->next_to_use - 1;
  391. return ring->count + ring->next_to_clean - ring->next_to_use - 1;
  392. }
  393. /**
  394. * e1000_receive_skb - helper function to handle Rx indications
  395. * @adapter: board private structure
  396. * @status: descriptor status field as written by hardware
  397. * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
  398. * @skb: pointer to sk_buff to be indicated to stack
  399. **/
  400. static void e1000_receive_skb(struct e1000_adapter *adapter,
  401. struct net_device *netdev,
  402. struct sk_buff *skb,
  403. u8 status, __le16 vlan)
  404. {
  405. skb->protocol = eth_type_trans(skb, netdev);
  406. if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
  407. vlan_gro_receive(&adapter->napi, adapter->vlgrp,
  408. le16_to_cpu(vlan), skb);
  409. else
  410. napi_gro_receive(&adapter->napi, skb);
  411. }
  412. /**
  413. * e1000_rx_checksum - Receive Checksum Offload for 82543
  414. * @adapter: board private structure
  415. * @status_err: receive descriptor status and error fields
  416. * @csum: receive descriptor csum field
  417. * @sk_buff: socket buffer with received data
  418. **/
  419. static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
  420. u32 csum, struct sk_buff *skb)
  421. {
  422. u16 status = (u16)status_err;
  423. u8 errors = (u8)(status_err >> 24);
  424. skb->ip_summed = CHECKSUM_NONE;
  425. /* Ignore Checksum bit is set */
  426. if (status & E1000_RXD_STAT_IXSM)
  427. return;
  428. /* TCP/UDP checksum error bit is set */
  429. if (errors & E1000_RXD_ERR_TCPE) {
  430. /* let the stack verify checksum errors */
  431. adapter->hw_csum_err++;
  432. return;
  433. }
  434. /* TCP/UDP Checksum has not been calculated */
  435. if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
  436. return;
  437. /* It must be a TCP or UDP packet with a valid checksum */
  438. if (status & E1000_RXD_STAT_TCPCS) {
  439. /* TCP checksum is good */
  440. skb->ip_summed = CHECKSUM_UNNECESSARY;
  441. } else {
  442. /*
  443. * IP fragment with UDP payload
  444. * Hardware complements the payload checksum, so we undo it
  445. * and then put the value in host order for further stack use.
  446. */
  447. __sum16 sum = (__force __sum16)htons(csum);
  448. skb->csum = csum_unfold(~sum);
  449. skb->ip_summed = CHECKSUM_COMPLETE;
  450. }
  451. adapter->hw_csum_good++;
  452. }
  453. /**
  454. * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
  455. * @adapter: address of board private structure
  456. **/
  457. static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
  458. int cleaned_count)
  459. {
  460. struct net_device *netdev = adapter->netdev;
  461. struct pci_dev *pdev = adapter->pdev;
  462. struct e1000_ring *rx_ring = adapter->rx_ring;
  463. struct e1000_rx_desc *rx_desc;
  464. struct e1000_buffer *buffer_info;
  465. struct sk_buff *skb;
  466. unsigned int i;
  467. unsigned int bufsz = adapter->rx_buffer_len;
  468. i = rx_ring->next_to_use;
  469. buffer_info = &rx_ring->buffer_info[i];
  470. while (cleaned_count--) {
  471. skb = buffer_info->skb;
  472. if (skb) {
  473. skb_trim(skb, 0);
  474. goto map_skb;
  475. }
  476. skb = netdev_alloc_skb_ip_align(netdev, bufsz);
  477. if (!skb) {
  478. /* Better luck next round */
  479. adapter->alloc_rx_buff_failed++;
  480. break;
  481. }
  482. buffer_info->skb = skb;
  483. map_skb:
  484. buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
  485. adapter->rx_buffer_len,
  486. DMA_FROM_DEVICE);
  487. if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
  488. dev_err(&pdev->dev, "RX DMA map failed\n");
  489. adapter->rx_dma_failed++;
  490. break;
  491. }
  492. rx_desc = E1000_RX_DESC(*rx_ring, i);
  493. rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  494. if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
  495. /*
  496. * Force memory writes to complete before letting h/w
  497. * know there are new descriptors to fetch. (Only
  498. * applicable for weak-ordered memory model archs,
  499. * such as IA-64).
  500. */
  501. wmb();
  502. writel(i, adapter->hw.hw_addr + rx_ring->tail);
  503. }
  504. i++;
  505. if (i == rx_ring->count)
  506. i = 0;
  507. buffer_info = &rx_ring->buffer_info[i];
  508. }
  509. rx_ring->next_to_use = i;
  510. }
  511. /**
  512. * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
  513. * @adapter: address of board private structure
  514. **/
  515. static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
  516. int cleaned_count)
  517. {
  518. struct net_device *netdev = adapter->netdev;
  519. struct pci_dev *pdev = adapter->pdev;
  520. union e1000_rx_desc_packet_split *rx_desc;
  521. struct e1000_ring *rx_ring = adapter->rx_ring;
  522. struct e1000_buffer *buffer_info;
  523. struct e1000_ps_page *ps_page;
  524. struct sk_buff *skb;
  525. unsigned int i, j;
  526. i = rx_ring->next_to_use;
  527. buffer_info = &rx_ring->buffer_info[i];
  528. while (cleaned_count--) {
  529. rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  530. for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  531. ps_page = &buffer_info->ps_pages[j];
  532. if (j >= adapter->rx_ps_pages) {
  533. /* all unused desc entries get hw null ptr */
  534. rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
  535. continue;
  536. }
  537. if (!ps_page->page) {
  538. ps_page->page = alloc_page(GFP_ATOMIC);
  539. if (!ps_page->page) {
  540. adapter->alloc_rx_buff_failed++;
  541. goto no_buffers;
  542. }
  543. ps_page->dma = dma_map_page(&pdev->dev,
  544. ps_page->page,
  545. 0, PAGE_SIZE,
  546. DMA_FROM_DEVICE);
  547. if (dma_mapping_error(&pdev->dev,
  548. ps_page->dma)) {
  549. dev_err(&adapter->pdev->dev,
  550. "RX DMA page map failed\n");
  551. adapter->rx_dma_failed++;
  552. goto no_buffers;
  553. }
  554. }
  555. /*
  556. * Refresh the desc even if buffer_addrs
  557. * didn't change because each write-back
  558. * erases this info.
  559. */
  560. rx_desc->read.buffer_addr[j+1] =
  561. cpu_to_le64(ps_page->dma);
  562. }
  563. skb = netdev_alloc_skb_ip_align(netdev,
  564. adapter->rx_ps_bsize0);
  565. if (!skb) {
  566. adapter->alloc_rx_buff_failed++;
  567. break;
  568. }
  569. buffer_info->skb = skb;
  570. buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
  571. adapter->rx_ps_bsize0,
  572. DMA_FROM_DEVICE);
  573. if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
  574. dev_err(&pdev->dev, "RX DMA map failed\n");
  575. adapter->rx_dma_failed++;
  576. /* cleanup skb */
  577. dev_kfree_skb_any(skb);
  578. buffer_info->skb = NULL;
  579. break;
  580. }
  581. rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
  582. if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
  583. /*
  584. * Force memory writes to complete before letting h/w
  585. * know there are new descriptors to fetch. (Only
  586. * applicable for weak-ordered memory model archs,
  587. * such as IA-64).
  588. */
  589. wmb();
  590. writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
  591. }
  592. i++;
  593. if (i == rx_ring->count)
  594. i = 0;
  595. buffer_info = &rx_ring->buffer_info[i];
  596. }
  597. no_buffers:
  598. rx_ring->next_to_use = i;
  599. }
  600. /**
  601. * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
  602. * @adapter: address of board private structure
  603. * @cleaned_count: number of buffers to allocate this pass
  604. **/
  605. static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
  606. int cleaned_count)
  607. {
  608. struct net_device *netdev = adapter->netdev;
  609. struct pci_dev *pdev = adapter->pdev;
  610. struct e1000_rx_desc *rx_desc;
  611. struct e1000_ring *rx_ring = adapter->rx_ring;
  612. struct e1000_buffer *buffer_info;
  613. struct sk_buff *skb;
  614. unsigned int i;
  615. unsigned int bufsz = 256 - 16 /* for skb_reserve */;
  616. i = rx_ring->next_to_use;
  617. buffer_info = &rx_ring->buffer_info[i];
  618. while (cleaned_count--) {
  619. skb = buffer_info->skb;
  620. if (skb) {
  621. skb_trim(skb, 0);
  622. goto check_page;
  623. }
  624. skb = netdev_alloc_skb_ip_align(netdev, bufsz);
  625. if (unlikely(!skb)) {
  626. /* Better luck next round */
  627. adapter->alloc_rx_buff_failed++;
  628. break;
  629. }
  630. buffer_info->skb = skb;
  631. check_page:
  632. /* allocate a new page if necessary */
  633. if (!buffer_info->page) {
  634. buffer_info->page = alloc_page(GFP_ATOMIC);
  635. if (unlikely(!buffer_info->page)) {
  636. adapter->alloc_rx_buff_failed++;
  637. break;
  638. }
  639. }
  640. if (!buffer_info->dma)
  641. buffer_info->dma = dma_map_page(&pdev->dev,
  642. buffer_info->page, 0,
  643. PAGE_SIZE,
  644. DMA_FROM_DEVICE);
  645. rx_desc = E1000_RX_DESC(*rx_ring, i);
  646. rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  647. if (unlikely(++i == rx_ring->count))
  648. i = 0;
  649. buffer_info = &rx_ring->buffer_info[i];
  650. }
  651. if (likely(rx_ring->next_to_use != i)) {
  652. rx_ring->next_to_use = i;
  653. if (unlikely(i-- == 0))
  654. i = (rx_ring->count - 1);
  655. /* Force memory writes to complete before letting h/w
  656. * know there are new descriptors to fetch. (Only
  657. * applicable for weak-ordered memory model archs,
  658. * such as IA-64). */
  659. wmb();
  660. writel(i, adapter->hw.hw_addr + rx_ring->tail);
  661. }
  662. }
  663. /**
  664. * e1000_clean_rx_irq - Send received data up the network stack; legacy
  665. * @adapter: board private structure
  666. *
  667. * the return value indicates whether actual cleaning was done, there
  668. * is no guarantee that everything was cleaned
  669. **/
  670. static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
  671. int *work_done, int work_to_do)
  672. {
  673. struct net_device *netdev = adapter->netdev;
  674. struct pci_dev *pdev = adapter->pdev;
  675. struct e1000_hw *hw = &adapter->hw;
  676. struct e1000_ring *rx_ring = adapter->rx_ring;
  677. struct e1000_rx_desc *rx_desc, *next_rxd;
  678. struct e1000_buffer *buffer_info, *next_buffer;
  679. u32 length;
  680. unsigned int i;
  681. int cleaned_count = 0;
  682. bool cleaned = 0;
  683. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  684. i = rx_ring->next_to_clean;
  685. rx_desc = E1000_RX_DESC(*rx_ring, i);
  686. buffer_info = &rx_ring->buffer_info[i];
  687. while (rx_desc->status & E1000_RXD_STAT_DD) {
  688. struct sk_buff *skb;
  689. u8 status;
  690. if (*work_done >= work_to_do)
  691. break;
  692. (*work_done)++;
  693. rmb(); /* read descriptor and rx_buffer_info after status DD */
  694. status = rx_desc->status;
  695. skb = buffer_info->skb;
  696. buffer_info->skb = NULL;
  697. prefetch(skb->data - NET_IP_ALIGN);
  698. i++;
  699. if (i == rx_ring->count)
  700. i = 0;
  701. next_rxd = E1000_RX_DESC(*rx_ring, i);
  702. prefetch(next_rxd);
  703. next_buffer = &rx_ring->buffer_info[i];
  704. cleaned = 1;
  705. cleaned_count++;
  706. dma_unmap_single(&pdev->dev,
  707. buffer_info->dma,
  708. adapter->rx_buffer_len,
  709. DMA_FROM_DEVICE);
  710. buffer_info->dma = 0;
  711. length = le16_to_cpu(rx_desc->length);
  712. /*
  713. * !EOP means multiple descriptors were used to store a single
  714. * packet, if that's the case we need to toss it. In fact, we
  715. * need to toss every packet with the EOP bit clear and the
  716. * next frame that _does_ have the EOP bit set, as it is by
  717. * definition only a frame fragment
  718. */
  719. if (unlikely(!(status & E1000_RXD_STAT_EOP)))
  720. adapter->flags2 |= FLAG2_IS_DISCARDING;
  721. if (adapter->flags2 & FLAG2_IS_DISCARDING) {
  722. /* All receives must fit into a single buffer */
  723. e_dbg("Receive packet consumed multiple buffers\n");
  724. /* recycle */
  725. buffer_info->skb = skb;
  726. if (status & E1000_RXD_STAT_EOP)
  727. adapter->flags2 &= ~FLAG2_IS_DISCARDING;
  728. goto next_desc;
  729. }
  730. if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
  731. /* recycle */
  732. buffer_info->skb = skb;
  733. goto next_desc;
  734. }
  735. /* adjust length to remove Ethernet CRC */
  736. if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
  737. length -= 4;
  738. total_rx_bytes += length;
  739. total_rx_packets++;
  740. /*
  741. * code added for copybreak, this should improve
  742. * performance for small packets with large amounts
  743. * of reassembly being done in the stack
  744. */
  745. if (length < copybreak) {
  746. struct sk_buff *new_skb =
  747. netdev_alloc_skb_ip_align(netdev, length);
  748. if (new_skb) {
  749. skb_copy_to_linear_data_offset(new_skb,
  750. -NET_IP_ALIGN,
  751. (skb->data -
  752. NET_IP_ALIGN),
  753. (length +
  754. NET_IP_ALIGN));
  755. /* save the skb in buffer_info as good */
  756. buffer_info->skb = skb;
  757. skb = new_skb;
  758. }
  759. /* else just continue with the old one */
  760. }
  761. /* end copybreak code */
  762. skb_put(skb, length);
  763. /* Receive Checksum Offload */
  764. e1000_rx_checksum(adapter,
  765. (u32)(status) |
  766. ((u32)(rx_desc->errors) << 24),
  767. le16_to_cpu(rx_desc->csum), skb);
  768. e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
  769. next_desc:
  770. rx_desc->status = 0;
  771. /* return some buffers to hardware, one at a time is too slow */
  772. if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
  773. adapter->alloc_rx_buf(adapter, cleaned_count);
  774. cleaned_count = 0;
  775. }
  776. /* use prefetched values */
  777. rx_desc = next_rxd;
  778. buffer_info = next_buffer;
  779. }
  780. rx_ring->next_to_clean = i;
  781. cleaned_count = e1000_desc_unused(rx_ring);
  782. if (cleaned_count)
  783. adapter->alloc_rx_buf(adapter, cleaned_count);
  784. adapter->total_rx_bytes += total_rx_bytes;
  785. adapter->total_rx_packets += total_rx_packets;
  786. netdev->stats.rx_bytes += total_rx_bytes;
  787. netdev->stats.rx_packets += total_rx_packets;
  788. return cleaned;
  789. }
  790. static void e1000_put_txbuf(struct e1000_adapter *adapter,
  791. struct e1000_buffer *buffer_info)
  792. {
  793. if (buffer_info->dma) {
  794. if (buffer_info->mapped_as_page)
  795. dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
  796. buffer_info->length, DMA_TO_DEVICE);
  797. else
  798. dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
  799. buffer_info->length, DMA_TO_DEVICE);
  800. buffer_info->dma = 0;
  801. }
  802. if (buffer_info->skb) {
  803. dev_kfree_skb_any(buffer_info->skb);
  804. buffer_info->skb = NULL;
  805. }
  806. buffer_info->time_stamp = 0;
  807. }
  808. static void e1000_print_hw_hang(struct work_struct *work)
  809. {
  810. struct e1000_adapter *adapter = container_of(work,
  811. struct e1000_adapter,
  812. print_hang_task);
  813. struct e1000_ring *tx_ring = adapter->tx_ring;
  814. unsigned int i = tx_ring->next_to_clean;
  815. unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
  816. struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
  817. struct e1000_hw *hw = &adapter->hw;
  818. u16 phy_status, phy_1000t_status, phy_ext_status;
  819. u16 pci_status;
  820. e1e_rphy(hw, PHY_STATUS, &phy_status);
  821. e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
  822. e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
  823. pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
  824. /* detected Hardware unit hang */
  825. e_err("Detected Hardware Unit Hang:\n"
  826. " TDH <%x>\n"
  827. " TDT <%x>\n"
  828. " next_to_use <%x>\n"
  829. " next_to_clean <%x>\n"
  830. "buffer_info[next_to_clean]:\n"
  831. " time_stamp <%lx>\n"
  832. " next_to_watch <%x>\n"
  833. " jiffies <%lx>\n"
  834. " next_to_watch.status <%x>\n"
  835. "MAC Status <%x>\n"
  836. "PHY Status <%x>\n"
  837. "PHY 1000BASE-T Status <%x>\n"
  838. "PHY Extended Status <%x>\n"
  839. "PCI Status <%x>\n",
  840. readl(adapter->hw.hw_addr + tx_ring->head),
  841. readl(adapter->hw.hw_addr + tx_ring->tail),
  842. tx_ring->next_to_use,
  843. tx_ring->next_to_clean,
  844. tx_ring->buffer_info[eop].time_stamp,
  845. eop,
  846. jiffies,
  847. eop_desc->upper.fields.status,
  848. er32(STATUS),
  849. phy_status,
  850. phy_1000t_status,
  851. phy_ext_status,
  852. pci_status);
  853. }
  854. /**
  855. * e1000_clean_tx_irq - Reclaim resources after transmit completes
  856. * @adapter: board private structure
  857. *
  858. * the return value indicates whether actual cleaning was done, there
  859. * is no guarantee that everything was cleaned
  860. **/
  861. static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
  862. {
  863. struct net_device *netdev = adapter->netdev;
  864. struct e1000_hw *hw = &adapter->hw;
  865. struct e1000_ring *tx_ring = adapter->tx_ring;
  866. struct e1000_tx_desc *tx_desc, *eop_desc;
  867. struct e1000_buffer *buffer_info;
  868. unsigned int i, eop;
  869. unsigned int count = 0;
  870. unsigned int total_tx_bytes = 0, total_tx_packets = 0;
  871. i = tx_ring->next_to_clean;
  872. eop = tx_ring->buffer_info[i].next_to_watch;
  873. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  874. while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
  875. (count < tx_ring->count)) {
  876. bool cleaned = false;
  877. rmb(); /* read buffer_info after eop_desc */
  878. for (; !cleaned; count++) {
  879. tx_desc = E1000_TX_DESC(*tx_ring, i);
  880. buffer_info = &tx_ring->buffer_info[i];
  881. cleaned = (i == eop);
  882. if (cleaned) {
  883. total_tx_packets += buffer_info->segs;
  884. total_tx_bytes += buffer_info->bytecount;
  885. }
  886. e1000_put_txbuf(adapter, buffer_info);
  887. tx_desc->upper.data = 0;
  888. i++;
  889. if (i == tx_ring->count)
  890. i = 0;
  891. }
  892. if (i == tx_ring->next_to_use)
  893. break;
  894. eop = tx_ring->buffer_info[i].next_to_watch;
  895. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  896. }
  897. tx_ring->next_to_clean = i;
  898. #define TX_WAKE_THRESHOLD 32
  899. if (count && netif_carrier_ok(netdev) &&
  900. e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
  901. /* Make sure that anybody stopping the queue after this
  902. * sees the new next_to_clean.
  903. */
  904. smp_mb();
  905. if (netif_queue_stopped(netdev) &&
  906. !(test_bit(__E1000_DOWN, &adapter->state))) {
  907. netif_wake_queue(netdev);
  908. ++adapter->restart_queue;
  909. }
  910. }
  911. if (adapter->detect_tx_hung) {
  912. /*
  913. * Detect a transmit hang in hardware, this serializes the
  914. * check with the clearing of time_stamp and movement of i
  915. */
  916. adapter->detect_tx_hung = 0;
  917. if (tx_ring->buffer_info[i].time_stamp &&
  918. time_after(jiffies, tx_ring->buffer_info[i].time_stamp
  919. + (adapter->tx_timeout_factor * HZ)) &&
  920. !(er32(STATUS) & E1000_STATUS_TXOFF)) {
  921. schedule_work(&adapter->print_hang_task);
  922. netif_stop_queue(netdev);
  923. }
  924. }
  925. adapter->total_tx_bytes += total_tx_bytes;
  926. adapter->total_tx_packets += total_tx_packets;
  927. netdev->stats.tx_bytes += total_tx_bytes;
  928. netdev->stats.tx_packets += total_tx_packets;
  929. return (count < tx_ring->count);
  930. }
  931. /**
  932. * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
  933. * @adapter: board private structure
  934. *
  935. * the return value indicates whether actual cleaning was done, there
  936. * is no guarantee that everything was cleaned
  937. **/
  938. static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  939. int *work_done, int work_to_do)
  940. {
  941. struct e1000_hw *hw = &adapter->hw;
  942. union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
  943. struct net_device *netdev = adapter->netdev;
  944. struct pci_dev *pdev = adapter->pdev;
  945. struct e1000_ring *rx_ring = adapter->rx_ring;
  946. struct e1000_buffer *buffer_info, *next_buffer;
  947. struct e1000_ps_page *ps_page;
  948. struct sk_buff *skb;
  949. unsigned int i, j;
  950. u32 length, staterr;
  951. int cleaned_count = 0;
  952. bool cleaned = 0;
  953. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  954. i = rx_ring->next_to_clean;
  955. rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  956. staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  957. buffer_info = &rx_ring->buffer_info[i];
  958. while (staterr & E1000_RXD_STAT_DD) {
  959. if (*work_done >= work_to_do)
  960. break;
  961. (*work_done)++;
  962. skb = buffer_info->skb;
  963. rmb(); /* read descriptor and rx_buffer_info after status DD */
  964. /* in the packet split case this is header only */
  965. prefetch(skb->data - NET_IP_ALIGN);
  966. i++;
  967. if (i == rx_ring->count)
  968. i = 0;
  969. next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
  970. prefetch(next_rxd);
  971. next_buffer = &rx_ring->buffer_info[i];
  972. cleaned = 1;
  973. cleaned_count++;
  974. dma_unmap_single(&pdev->dev, buffer_info->dma,
  975. adapter->rx_ps_bsize0,
  976. DMA_FROM_DEVICE);
  977. buffer_info->dma = 0;
  978. /* see !EOP comment in other rx routine */
  979. if (!(staterr & E1000_RXD_STAT_EOP))
  980. adapter->flags2 |= FLAG2_IS_DISCARDING;
  981. if (adapter->flags2 & FLAG2_IS_DISCARDING) {
  982. e_dbg("Packet Split buffers didn't pick up the full "
  983. "packet\n");
  984. dev_kfree_skb_irq(skb);
  985. if (staterr & E1000_RXD_STAT_EOP)
  986. adapter->flags2 &= ~FLAG2_IS_DISCARDING;
  987. goto next_desc;
  988. }
  989. if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
  990. dev_kfree_skb_irq(skb);
  991. goto next_desc;
  992. }
  993. length = le16_to_cpu(rx_desc->wb.middle.length0);
  994. if (!length) {
  995. e_dbg("Last part of the packet spanning multiple "
  996. "descriptors\n");
  997. dev_kfree_skb_irq(skb);
  998. goto next_desc;
  999. }
  1000. /* Good Receive */
  1001. skb_put(skb, length);
  1002. {
  1003. /*
  1004. * this looks ugly, but it seems compiler issues make it
  1005. * more efficient than reusing j
  1006. */
  1007. int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
  1008. /*
  1009. * page alloc/put takes too long and effects small packet
  1010. * throughput, so unsplit small packets and save the alloc/put
  1011. * only valid in softirq (napi) context to call kmap_*
  1012. */
  1013. if (l1 && (l1 <= copybreak) &&
  1014. ((length + l1) <= adapter->rx_ps_bsize0)) {
  1015. u8 *vaddr;
  1016. ps_page = &buffer_info->ps_pages[0];
  1017. /*
  1018. * there is no documentation about how to call
  1019. * kmap_atomic, so we can't hold the mapping
  1020. * very long
  1021. */
  1022. dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
  1023. PAGE_SIZE, DMA_FROM_DEVICE);
  1024. vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
  1025. memcpy(skb_tail_pointer(skb), vaddr, l1);
  1026. kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
  1027. dma_sync_single_for_device(&pdev->dev, ps_page->dma,
  1028. PAGE_SIZE, DMA_FROM_DEVICE);
  1029. /* remove the CRC */
  1030. if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
  1031. l1 -= 4;
  1032. skb_put(skb, l1);
  1033. goto copydone;
  1034. } /* if */
  1035. }
  1036. for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  1037. length = le16_to_cpu(rx_desc->wb.upper.length[j]);
  1038. if (!length)
  1039. break;
  1040. ps_page = &buffer_info->ps_pages[j];
  1041. dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
  1042. DMA_FROM_DEVICE);
  1043. ps_page->dma = 0;
  1044. skb_fill_page_desc(skb, j, ps_page->page, 0, length);
  1045. ps_page->page = NULL;
  1046. skb->len += length;
  1047. skb->data_len += length;
  1048. skb->truesize += length;
  1049. }
  1050. /* strip the ethernet crc, problem is we're using pages now so
  1051. * this whole operation can get a little cpu intensive
  1052. */
  1053. if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
  1054. pskb_trim(skb, skb->len - 4);
  1055. copydone:
  1056. total_rx_bytes += skb->len;
  1057. total_rx_packets++;
  1058. e1000_rx_checksum(adapter, staterr, le16_to_cpu(
  1059. rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
  1060. if (rx_desc->wb.upper.header_status &
  1061. cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
  1062. adapter->rx_hdr_split++;
  1063. e1000_receive_skb(adapter, netdev, skb,
  1064. staterr, rx_desc->wb.middle.vlan);
  1065. next_desc:
  1066. rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
  1067. buffer_info->skb = NULL;
  1068. /* return some buffers to hardware, one at a time is too slow */
  1069. if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
  1070. adapter->alloc_rx_buf(adapter, cleaned_count);
  1071. cleaned_count = 0;
  1072. }
  1073. /* use prefetched values */
  1074. rx_desc = next_rxd;
  1075. buffer_info = next_buffer;
  1076. staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  1077. }
  1078. rx_ring->next_to_clean = i;
  1079. cleaned_count = e1000_desc_unused(rx_ring);
  1080. if (cleaned_count)
  1081. adapter->alloc_rx_buf(adapter, cleaned_count);
  1082. adapter->total_rx_bytes += total_rx_bytes;
  1083. adapter->total_rx_packets += total_rx_packets;
  1084. netdev->stats.rx_bytes += total_rx_bytes;
  1085. netdev->stats.rx_packets += total_rx_packets;
  1086. return cleaned;
  1087. }
  1088. /**
  1089. * e1000_consume_page - helper function
  1090. **/
  1091. static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
  1092. u16 length)
  1093. {
  1094. bi->page = NULL;
  1095. skb->len += length;
  1096. skb->data_len += length;
  1097. skb->truesize += length;
  1098. }
  1099. /**
  1100. * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
  1101. * @adapter: board private structure
  1102. *
  1103. * the return value indicates whether actual cleaning was done, there
  1104. * is no guarantee that everything was cleaned
  1105. **/
  1106. static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
  1107. int *work_done, int work_to_do)
  1108. {
  1109. struct net_device *netdev = adapter->netdev;
  1110. struct pci_dev *pdev = adapter->pdev;
  1111. struct e1000_ring *rx_ring = adapter->rx_ring;
  1112. struct e1000_rx_desc *rx_desc, *next_rxd;
  1113. struct e1000_buffer *buffer_info, *next_buffer;
  1114. u32 length;
  1115. unsigned int i;
  1116. int cleaned_count = 0;
  1117. bool cleaned = false;
  1118. unsigned int total_rx_bytes=0, total_rx_packets=0;
  1119. i = rx_ring->next_to_clean;
  1120. rx_desc = E1000_RX_DESC(*rx_ring, i);
  1121. buffer_info = &rx_ring->buffer_info[i];
  1122. while (rx_desc->status & E1000_RXD_STAT_DD) {
  1123. struct sk_buff *skb;
  1124. u8 status;
  1125. if (*work_done >= work_to_do)
  1126. break;
  1127. (*work_done)++;
  1128. rmb(); /* read descriptor and rx_buffer_info after status DD */
  1129. status = rx_desc->status;
  1130. skb = buffer_info->skb;
  1131. buffer_info->skb = NULL;
  1132. ++i;
  1133. if (i == rx_ring->count)
  1134. i = 0;
  1135. next_rxd = E1000_RX_DESC(*rx_ring, i);
  1136. prefetch(next_rxd);
  1137. next_buffer = &rx_ring->buffer_info[i];
  1138. cleaned = true;
  1139. cleaned_count++;
  1140. dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
  1141. DMA_FROM_DEVICE);
  1142. buffer_info->dma = 0;
  1143. length = le16_to_cpu(rx_desc->length);
  1144. /* errors is only valid for DD + EOP descriptors */
  1145. if (unlikely((status & E1000_RXD_STAT_EOP) &&
  1146. (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
  1147. /* recycle both page and skb */
  1148. buffer_info->skb = skb;
  1149. /* an error means any chain goes out the window
  1150. * too */
  1151. if (rx_ring->rx_skb_top)
  1152. dev_kfree_skb(rx_ring->rx_skb_top);
  1153. rx_ring->rx_skb_top = NULL;
  1154. goto next_desc;
  1155. }
  1156. #define rxtop rx_ring->rx_skb_top
  1157. if (!(status & E1000_RXD_STAT_EOP)) {
  1158. /* this descriptor is only the beginning (or middle) */
  1159. if (!rxtop) {
  1160. /* this is the beginning of a chain */
  1161. rxtop = skb;
  1162. skb_fill_page_desc(rxtop, 0, buffer_info->page,
  1163. 0, length);
  1164. } else {
  1165. /* this is the middle of a chain */
  1166. skb_fill_page_desc(rxtop,
  1167. skb_shinfo(rxtop)->nr_frags,
  1168. buffer_info->page, 0, length);
  1169. /* re-use the skb, only consumed the page */
  1170. buffer_info->skb = skb;
  1171. }
  1172. e1000_consume_page(buffer_info, rxtop, length);
  1173. goto next_desc;
  1174. } else {
  1175. if (rxtop) {
  1176. /* end of the chain */
  1177. skb_fill_page_desc(rxtop,
  1178. skb_shinfo(rxtop)->nr_frags,
  1179. buffer_info->page, 0, length);
  1180. /* re-use the current skb, we only consumed the
  1181. * page */
  1182. buffer_info->skb = skb;
  1183. skb = rxtop;
  1184. rxtop = NULL;
  1185. e1000_consume_page(buffer_info, skb, length);
  1186. } else {
  1187. /* no chain, got EOP, this buf is the packet
  1188. * copybreak to save the put_page/alloc_page */
  1189. if (length <= copybreak &&
  1190. skb_tailroom(skb) >= length) {
  1191. u8 *vaddr;
  1192. vaddr = kmap_atomic(buffer_info->page,
  1193. KM_SKB_DATA_SOFTIRQ);
  1194. memcpy(skb_tail_pointer(skb), vaddr,
  1195. length);
  1196. kunmap_atomic(vaddr,
  1197. KM_SKB_DATA_SOFTIRQ);
  1198. /* re-use the page, so don't erase
  1199. * buffer_info->page */
  1200. skb_put(skb, length);
  1201. } else {
  1202. skb_fill_page_desc(skb, 0,
  1203. buffer_info->page, 0,
  1204. length);
  1205. e1000_consume_page(buffer_info, skb,
  1206. length);
  1207. }
  1208. }
  1209. }
  1210. /* Receive Checksum Offload XXX recompute due to CRC strip? */
  1211. e1000_rx_checksum(adapter,
  1212. (u32)(status) |
  1213. ((u32)(rx_desc->errors) << 24),
  1214. le16_to_cpu(rx_desc->csum), skb);
  1215. /* probably a little skewed due to removing CRC */
  1216. total_rx_bytes += skb->len;
  1217. total_rx_packets++;
  1218. /* eth type trans needs skb->data to point to something */
  1219. if (!pskb_may_pull(skb, ETH_HLEN)) {
  1220. e_err("pskb_may_pull failed.\n");
  1221. dev_kfree_skb(skb);
  1222. goto next_desc;
  1223. }
  1224. e1000_receive_skb(adapter, netdev, skb, status,
  1225. rx_desc->special);
  1226. next_desc:
  1227. rx_desc->status = 0;
  1228. /* return some buffers to hardware, one at a time is too slow */
  1229. if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
  1230. adapter->alloc_rx_buf(adapter, cleaned_count);
  1231. cleaned_count = 0;
  1232. }
  1233. /* use prefetched values */
  1234. rx_desc = next_rxd;
  1235. buffer_info = next_buffer;
  1236. }
  1237. rx_ring->next_to_clean = i;
  1238. cleaned_count = e1000_desc_unused(rx_ring);
  1239. if (cleaned_count)
  1240. adapter->alloc_rx_buf(adapter, cleaned_count);
  1241. adapter->total_rx_bytes += total_rx_bytes;
  1242. adapter->total_rx_packets += total_rx_packets;
  1243. netdev->stats.rx_bytes += total_rx_bytes;
  1244. netdev->stats.rx_packets += total_rx_packets;
  1245. return cleaned;
  1246. }
  1247. /**
  1248. * e1000_clean_rx_ring - Free Rx Buffers per Queue
  1249. * @adapter: board private structure
  1250. **/
  1251. static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
  1252. {
  1253. struct e1000_ring *rx_ring = adapter->rx_ring;
  1254. struct e1000_buffer *buffer_info;
  1255. struct e1000_ps_page *ps_page;
  1256. struct pci_dev *pdev = adapter->pdev;
  1257. unsigned int i, j;
  1258. /* Free all the Rx ring sk_buffs */
  1259. for (i = 0; i < rx_ring->count; i++) {
  1260. buffer_info = &rx_ring->buffer_info[i];
  1261. if (buffer_info->dma) {
  1262. if (adapter->clean_rx == e1000_clean_rx_irq)
  1263. dma_unmap_single(&pdev->dev, buffer_info->dma,
  1264. adapter->rx_buffer_len,
  1265. DMA_FROM_DEVICE);
  1266. else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
  1267. dma_unmap_page(&pdev->dev, buffer_info->dma,
  1268. PAGE_SIZE,
  1269. DMA_FROM_DEVICE);
  1270. else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
  1271. dma_unmap_single(&pdev->dev, buffer_info->dma,
  1272. adapter->rx_ps_bsize0,
  1273. DMA_FROM_DEVICE);
  1274. buffer_info->dma = 0;
  1275. }
  1276. if (buffer_info->page) {
  1277. put_page(buffer_info->page);
  1278. buffer_info->page = NULL;
  1279. }
  1280. if (buffer_info->skb) {
  1281. dev_kfree_skb(buffer_info->skb);
  1282. buffer_info->skb = NULL;
  1283. }
  1284. for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  1285. ps_page = &buffer_info->ps_pages[j];
  1286. if (!ps_page->page)
  1287. break;
  1288. dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
  1289. DMA_FROM_DEVICE);
  1290. ps_page->dma = 0;
  1291. put_page(ps_page->page);
  1292. ps_page->page = NULL;
  1293. }
  1294. }
  1295. /* there also may be some cached data from a chained receive */
  1296. if (rx_ring->rx_skb_top) {
  1297. dev_kfree_skb(rx_ring->rx_skb_top);
  1298. rx_ring->rx_skb_top = NULL;
  1299. }
  1300. /* Zero out the descriptor ring */
  1301. memset(rx_ring->desc, 0, rx_ring->size);
  1302. rx_ring->next_to_clean = 0;
  1303. rx_ring->next_to_use = 0;
  1304. adapter->flags2 &= ~FLAG2_IS_DISCARDING;
  1305. writel(0, adapter->hw.hw_addr + rx_ring->head);
  1306. writel(0, adapter->hw.hw_addr + rx_ring->tail);
  1307. }
  1308. static void e1000e_downshift_workaround(struct work_struct *work)
  1309. {
  1310. struct e1000_adapter *adapter = container_of(work,
  1311. struct e1000_adapter, downshift_task);
  1312. e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
  1313. }
  1314. /**
  1315. * e1000_intr_msi - Interrupt Handler
  1316. * @irq: interrupt number
  1317. * @data: pointer to a network interface device structure
  1318. **/
  1319. static irqreturn_t e1000_intr_msi(int irq, void *data)
  1320. {
  1321. struct net_device *netdev = data;
  1322. struct e1000_adapter *adapter = netdev_priv(netdev);
  1323. struct e1000_hw *hw = &adapter->hw;
  1324. u32 icr = er32(ICR);
  1325. /*
  1326. * read ICR disables interrupts using IAM
  1327. */
  1328. if (icr & E1000_ICR_LSC) {
  1329. hw->mac.get_link_status = 1;
  1330. /*
  1331. * ICH8 workaround-- Call gig speed drop workaround on cable
  1332. * disconnect (LSC) before accessing any PHY registers
  1333. */
  1334. if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
  1335. (!(er32(STATUS) & E1000_STATUS_LU)))
  1336. schedule_work(&adapter->downshift_task);
  1337. /*
  1338. * 80003ES2LAN workaround-- For packet buffer work-around on
  1339. * link down event; disable receives here in the ISR and reset
  1340. * adapter in watchdog
  1341. */
  1342. if (netif_carrier_ok(netdev) &&
  1343. adapter->flags & FLAG_RX_NEEDS_RESTART) {
  1344. /* disable receives */
  1345. u32 rctl = er32(RCTL);
  1346. ew32(RCTL, rctl & ~E1000_RCTL_EN);
  1347. adapter->flags |= FLAG_RX_RESTART_NOW;
  1348. }
  1349. /* guard against interrupt when we're going down */
  1350. if (!test_bit(__E1000_DOWN, &adapter->state))
  1351. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  1352. }
  1353. if (napi_schedule_prep(&adapter->napi)) {
  1354. adapter->total_tx_bytes = 0;
  1355. adapter->total_tx_packets = 0;
  1356. adapter->total_rx_bytes = 0;
  1357. adapter->total_rx_packets = 0;
  1358. __napi_schedule(&adapter->napi);
  1359. }
  1360. return IRQ_HANDLED;
  1361. }
  1362. /**
  1363. * e1000_intr - Interrupt Handler
  1364. * @irq: interrupt number
  1365. * @data: pointer to a network interface device structure
  1366. **/
  1367. static irqreturn_t e1000_intr(int irq, void *data)
  1368. {
  1369. struct net_device *netdev = data;
  1370. struct e1000_adapter *adapter = netdev_priv(netdev);
  1371. struct e1000_hw *hw = &adapter->hw;
  1372. u32 rctl, icr = er32(ICR);
  1373. if (!icr || test_bit(__E1000_DOWN, &adapter->state))
  1374. return IRQ_NONE; /* Not our interrupt */
  1375. /*
  1376. * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
  1377. * not set, then the adapter didn't send an interrupt
  1378. */
  1379. if (!(icr & E1000_ICR_INT_ASSERTED))
  1380. return IRQ_NONE;
  1381. /*
  1382. * Interrupt Auto-Mask...upon reading ICR,
  1383. * interrupts are masked. No need for the
  1384. * IMC write
  1385. */
  1386. if (icr & E1000_ICR_LSC) {
  1387. hw->mac.get_link_status = 1;
  1388. /*
  1389. * ICH8 workaround-- Call gig speed drop workaround on cable
  1390. * disconnect (LSC) before accessing any PHY registers
  1391. */
  1392. if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
  1393. (!(er32(STATUS) & E1000_STATUS_LU)))
  1394. schedule_work(&adapter->downshift_task);
  1395. /*
  1396. * 80003ES2LAN workaround--
  1397. * For packet buffer work-around on link down event;
  1398. * disable receives here in the ISR and
  1399. * reset adapter in watchdog
  1400. */
  1401. if (netif_carrier_ok(netdev) &&
  1402. (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
  1403. /* disable receives */
  1404. rctl = er32(RCTL);
  1405. ew32(RCTL, rctl & ~E1000_RCTL_EN);
  1406. adapter->flags |= FLAG_RX_RESTART_NOW;
  1407. }
  1408. /* guard against interrupt when we're going down */
  1409. if (!test_bit(__E1000_DOWN, &adapter->state))
  1410. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  1411. }
  1412. if (napi_schedule_prep(&adapter->napi)) {
  1413. adapter->total_tx_bytes = 0;
  1414. adapter->total_tx_packets = 0;
  1415. adapter->total_rx_bytes = 0;
  1416. adapter->total_rx_packets = 0;
  1417. __napi_schedule(&adapter->napi);
  1418. }
  1419. return IRQ_HANDLED;
  1420. }
  1421. static irqreturn_t e1000_msix_other(int irq, void *data)
  1422. {
  1423. struct net_device *netdev = data;
  1424. struct e1000_adapter *adapter = netdev_priv(netdev);
  1425. struct e1000_hw *hw = &adapter->hw;
  1426. u32 icr = er32(ICR);
  1427. if (!(icr & E1000_ICR_INT_ASSERTED)) {
  1428. if (!test_bit(__E1000_DOWN, &adapter->state))
  1429. ew32(IMS, E1000_IMS_OTHER);
  1430. return IRQ_NONE;
  1431. }
  1432. if (icr & adapter->eiac_mask)
  1433. ew32(ICS, (icr & adapter->eiac_mask));
  1434. if (icr & E1000_ICR_OTHER) {
  1435. if (!(icr & E1000_ICR_LSC))
  1436. goto no_link_interrupt;
  1437. hw->mac.get_link_status = 1;
  1438. /* guard against interrupt when we're going down */
  1439. if (!test_bit(__E1000_DOWN, &adapter->state))
  1440. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  1441. }
  1442. no_link_interrupt:
  1443. if (!test_bit(__E1000_DOWN, &adapter->state))
  1444. ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
  1445. return IRQ_HANDLED;
  1446. }
  1447. static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
  1448. {
  1449. struct net_device *netdev = data;
  1450. struct e1000_adapter *adapter = netdev_priv(netdev);
  1451. struct e1000_hw *hw = &adapter->hw;
  1452. struct e1000_ring *tx_ring = adapter->tx_ring;
  1453. adapter->total_tx_bytes = 0;
  1454. adapter->total_tx_packets = 0;
  1455. if (!e1000_clean_tx_irq(adapter))
  1456. /* Ring was not completely cleaned, so fire another interrupt */
  1457. ew32(ICS, tx_ring->ims_val);
  1458. return IRQ_HANDLED;
  1459. }
  1460. static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
  1461. {
  1462. struct net_device *netdev = data;
  1463. struct e1000_adapter *adapter = netdev_priv(netdev);
  1464. /* Write the ITR value calculated at the end of the
  1465. * previous interrupt.
  1466. */
  1467. if (adapter->rx_ring->set_itr) {
  1468. writel(1000000000 / (adapter->rx_ring->itr_val * 256),
  1469. adapter->hw.hw_addr + adapter->rx_ring->itr_register);
  1470. adapter->rx_ring->set_itr = 0;
  1471. }
  1472. if (napi_schedule_prep(&adapter->napi)) {
  1473. adapter->total_rx_bytes = 0;
  1474. adapter->total_rx_packets = 0;
  1475. __napi_schedule(&adapter->napi);
  1476. }
  1477. return IRQ_HANDLED;
  1478. }
  1479. /**
  1480. * e1000_configure_msix - Configure MSI-X hardware
  1481. *
  1482. * e1000_configure_msix sets up the hardware to properly
  1483. * generate MSI-X interrupts.
  1484. **/
  1485. static void e1000_configure_msix(struct e1000_adapter *adapter)
  1486. {
  1487. struct e1000_hw *hw = &adapter->hw;
  1488. struct e1000_ring *rx_ring = adapter->rx_ring;
  1489. struct e1000_ring *tx_ring = adapter->tx_ring;
  1490. int vector = 0;
  1491. u32 ctrl_ext, ivar = 0;
  1492. adapter->eiac_mask = 0;
  1493. /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
  1494. if (hw->mac.type == e1000_82574) {
  1495. u32 rfctl = er32(RFCTL);
  1496. rfctl |= E1000_RFCTL_ACK_DIS;
  1497. ew32(RFCTL, rfctl);
  1498. }

Large files files are truncated, but you can click here to view the full file