PageRenderTime 53ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 1ms

/kernel/2.6.32_froyo_photon_nightly/drivers/net/e1000e/netdev.c

http://photon-android.googlecode.com/
C | 5445 lines | 3562 code | 798 blank | 1085 comment | 594 complexity | 74d68e1ac47466efebe5b32d7a930a09 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0

Large files files are truncated, but you can click here to view the full file

  1. /*******************************************************************************
  2. Intel PRO/1000 Linux driver
  3. Copyright(c) 1999 - 2008 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. *******************************************************************************/
  21. #include <linux/module.h>
  22. #include <linux/types.h>
  23. #include <linux/init.h>
  24. #include <linux/pci.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/pagemap.h>
  27. #include <linux/delay.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/tcp.h>
  30. #include <linux/ipv6.h>
  31. #include <net/checksum.h>
  32. #include <net/ip6_checksum.h>
  33. #include <linux/mii.h>
  34. #include <linux/ethtool.h>
  35. #include <linux/if_vlan.h>
  36. #include <linux/cpu.h>
  37. #include <linux/smp.h>
  38. #include <linux/pm_qos_params.h>
  39. #include <linux/aer.h>
  40. #include "e1000.h"
  41. #define DRV_VERSION "1.0.2-k2"
  42. char e1000e_driver_name[] = "e1000e";
  43. const char e1000e_driver_version[] = DRV_VERSION;
  44. static const struct e1000_info *e1000_info_tbl[] = {
  45. [board_82571] = &e1000_82571_info,
  46. [board_82572] = &e1000_82572_info,
  47. [board_82573] = &e1000_82573_info,
  48. [board_82574] = &e1000_82574_info,
  49. [board_82583] = &e1000_82583_info,
  50. [board_80003es2lan] = &e1000_es2_info,
  51. [board_ich8lan] = &e1000_ich8_info,
  52. [board_ich9lan] = &e1000_ich9_info,
  53. [board_ich10lan] = &e1000_ich10_info,
  54. [board_pchlan] = &e1000_pch_info,
  55. };
  56. #ifdef DEBUG
  57. /**
  58. * e1000_get_hw_dev_name - return device name string
  59. * used by hardware layer to print debugging information
  60. **/
  61. char *e1000e_get_hw_dev_name(struct e1000_hw *hw)
  62. {
  63. return hw->adapter->netdev->name;
  64. }
  65. #endif
  66. /**
  67. * e1000_desc_unused - calculate if we have unused descriptors
  68. **/
  69. static int e1000_desc_unused(struct e1000_ring *ring)
  70. {
  71. if (ring->next_to_clean > ring->next_to_use)
  72. return ring->next_to_clean - ring->next_to_use - 1;
  73. return ring->count + ring->next_to_clean - ring->next_to_use - 1;
  74. }
  75. /**
  76. * e1000_receive_skb - helper function to handle Rx indications
  77. * @adapter: board private structure
  78. * @status: descriptor status field as written by hardware
  79. * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
  80. * @skb: pointer to sk_buff to be indicated to stack
  81. **/
  82. static void e1000_receive_skb(struct e1000_adapter *adapter,
  83. struct net_device *netdev,
  84. struct sk_buff *skb,
  85. u8 status, __le16 vlan)
  86. {
  87. skb->protocol = eth_type_trans(skb, netdev);
  88. if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
  89. vlan_gro_receive(&adapter->napi, adapter->vlgrp,
  90. le16_to_cpu(vlan), skb);
  91. else
  92. napi_gro_receive(&adapter->napi, skb);
  93. }
  94. /**
  95. * e1000_rx_checksum - Receive Checksum Offload for 82543
  96. * @adapter: board private structure
  97. * @status_err: receive descriptor status and error fields
  98. * @csum: receive descriptor csum field
  99. * @sk_buff: socket buffer with received data
  100. **/
  101. static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
  102. u32 csum, struct sk_buff *skb)
  103. {
  104. u16 status = (u16)status_err;
  105. u8 errors = (u8)(status_err >> 24);
  106. skb->ip_summed = CHECKSUM_NONE;
  107. /* Ignore Checksum bit is set */
  108. if (status & E1000_RXD_STAT_IXSM)
  109. return;
  110. /* TCP/UDP checksum error bit is set */
  111. if (errors & E1000_RXD_ERR_TCPE) {
  112. /* let the stack verify checksum errors */
  113. adapter->hw_csum_err++;
  114. return;
  115. }
  116. /* TCP/UDP Checksum has not been calculated */
  117. if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
  118. return;
  119. /* It must be a TCP or UDP packet with a valid checksum */
  120. if (status & E1000_RXD_STAT_TCPCS) {
  121. /* TCP checksum is good */
  122. skb->ip_summed = CHECKSUM_UNNECESSARY;
  123. } else {
  124. /*
  125. * IP fragment with UDP payload
  126. * Hardware complements the payload checksum, so we undo it
  127. * and then put the value in host order for further stack use.
  128. */
  129. __sum16 sum = (__force __sum16)htons(csum);
  130. skb->csum = csum_unfold(~sum);
  131. skb->ip_summed = CHECKSUM_COMPLETE;
  132. }
  133. adapter->hw_csum_good++;
  134. }
  135. /**
  136. * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
  137. * @adapter: address of board private structure
  138. **/
  139. static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
  140. int cleaned_count)
  141. {
  142. struct net_device *netdev = adapter->netdev;
  143. struct pci_dev *pdev = adapter->pdev;
  144. struct e1000_ring *rx_ring = adapter->rx_ring;
  145. struct e1000_rx_desc *rx_desc;
  146. struct e1000_buffer *buffer_info;
  147. struct sk_buff *skb;
  148. unsigned int i;
  149. unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
  150. i = rx_ring->next_to_use;
  151. buffer_info = &rx_ring->buffer_info[i];
  152. while (cleaned_count--) {
  153. skb = buffer_info->skb;
  154. if (skb) {
  155. skb_trim(skb, 0);
  156. goto map_skb;
  157. }
  158. skb = netdev_alloc_skb(netdev, bufsz);
  159. if (!skb) {
  160. /* Better luck next round */
  161. adapter->alloc_rx_buff_failed++;
  162. break;
  163. }
  164. /*
  165. * Make buffer alignment 2 beyond a 16 byte boundary
  166. * this will result in a 16 byte aligned IP header after
  167. * the 14 byte MAC header is removed
  168. */
  169. skb_reserve(skb, NET_IP_ALIGN);
  170. buffer_info->skb = skb;
  171. map_skb:
  172. buffer_info->dma = pci_map_single(pdev, skb->data,
  173. adapter->rx_buffer_len,
  174. PCI_DMA_FROMDEVICE);
  175. if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
  176. dev_err(&pdev->dev, "RX DMA map failed\n");
  177. adapter->rx_dma_failed++;
  178. break;
  179. }
  180. rx_desc = E1000_RX_DESC(*rx_ring, i);
  181. rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  182. i++;
  183. if (i == rx_ring->count)
  184. i = 0;
  185. buffer_info = &rx_ring->buffer_info[i];
  186. }
  187. if (rx_ring->next_to_use != i) {
  188. rx_ring->next_to_use = i;
  189. if (i-- == 0)
  190. i = (rx_ring->count - 1);
  191. /*
  192. * Force memory writes to complete before letting h/w
  193. * know there are new descriptors to fetch. (Only
  194. * applicable for weak-ordered memory model archs,
  195. * such as IA-64).
  196. */
  197. wmb();
  198. writel(i, adapter->hw.hw_addr + rx_ring->tail);
  199. }
  200. }
  201. /**
  202. * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
  203. * @adapter: address of board private structure
  204. **/
  205. static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
  206. int cleaned_count)
  207. {
  208. struct net_device *netdev = adapter->netdev;
  209. struct pci_dev *pdev = adapter->pdev;
  210. union e1000_rx_desc_packet_split *rx_desc;
  211. struct e1000_ring *rx_ring = adapter->rx_ring;
  212. struct e1000_buffer *buffer_info;
  213. struct e1000_ps_page *ps_page;
  214. struct sk_buff *skb;
  215. unsigned int i, j;
  216. i = rx_ring->next_to_use;
  217. buffer_info = &rx_ring->buffer_info[i];
  218. while (cleaned_count--) {
  219. rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  220. for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  221. ps_page = &buffer_info->ps_pages[j];
  222. if (j >= adapter->rx_ps_pages) {
  223. /* all unused desc entries get hw null ptr */
  224. rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
  225. continue;
  226. }
  227. if (!ps_page->page) {
  228. ps_page->page = alloc_page(GFP_ATOMIC);
  229. if (!ps_page->page) {
  230. adapter->alloc_rx_buff_failed++;
  231. goto no_buffers;
  232. }
  233. ps_page->dma = pci_map_page(pdev,
  234. ps_page->page,
  235. 0, PAGE_SIZE,
  236. PCI_DMA_FROMDEVICE);
  237. if (pci_dma_mapping_error(pdev, ps_page->dma)) {
  238. dev_err(&adapter->pdev->dev,
  239. "RX DMA page map failed\n");
  240. adapter->rx_dma_failed++;
  241. goto no_buffers;
  242. }
  243. }
  244. /*
  245. * Refresh the desc even if buffer_addrs
  246. * didn't change because each write-back
  247. * erases this info.
  248. */
  249. rx_desc->read.buffer_addr[j+1] =
  250. cpu_to_le64(ps_page->dma);
  251. }
  252. skb = netdev_alloc_skb(netdev,
  253. adapter->rx_ps_bsize0 + NET_IP_ALIGN);
  254. if (!skb) {
  255. adapter->alloc_rx_buff_failed++;
  256. break;
  257. }
  258. /*
  259. * Make buffer alignment 2 beyond a 16 byte boundary
  260. * this will result in a 16 byte aligned IP header after
  261. * the 14 byte MAC header is removed
  262. */
  263. skb_reserve(skb, NET_IP_ALIGN);
  264. buffer_info->skb = skb;
  265. buffer_info->dma = pci_map_single(pdev, skb->data,
  266. adapter->rx_ps_bsize0,
  267. PCI_DMA_FROMDEVICE);
  268. if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
  269. dev_err(&pdev->dev, "RX DMA map failed\n");
  270. adapter->rx_dma_failed++;
  271. /* cleanup skb */
  272. dev_kfree_skb_any(skb);
  273. buffer_info->skb = NULL;
  274. break;
  275. }
  276. rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
  277. i++;
  278. if (i == rx_ring->count)
  279. i = 0;
  280. buffer_info = &rx_ring->buffer_info[i];
  281. }
  282. no_buffers:
  283. if (rx_ring->next_to_use != i) {
  284. rx_ring->next_to_use = i;
  285. if (!(i--))
  286. i = (rx_ring->count - 1);
  287. /*
  288. * Force memory writes to complete before letting h/w
  289. * know there are new descriptors to fetch. (Only
  290. * applicable for weak-ordered memory model archs,
  291. * such as IA-64).
  292. */
  293. wmb();
  294. /*
  295. * Hardware increments by 16 bytes, but packet split
  296. * descriptors are 32 bytes...so we increment tail
  297. * twice as much.
  298. */
  299. writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
  300. }
  301. }
  302. /**
  303. * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
  304. * @adapter: address of board private structure
  305. * @cleaned_count: number of buffers to allocate this pass
  306. **/
  307. static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
  308. int cleaned_count)
  309. {
  310. struct net_device *netdev = adapter->netdev;
  311. struct pci_dev *pdev = adapter->pdev;
  312. struct e1000_rx_desc *rx_desc;
  313. struct e1000_ring *rx_ring = adapter->rx_ring;
  314. struct e1000_buffer *buffer_info;
  315. struct sk_buff *skb;
  316. unsigned int i;
  317. unsigned int bufsz = 256 -
  318. 16 /* for skb_reserve */ -
  319. NET_IP_ALIGN;
  320. i = rx_ring->next_to_use;
  321. buffer_info = &rx_ring->buffer_info[i];
  322. while (cleaned_count--) {
  323. skb = buffer_info->skb;
  324. if (skb) {
  325. skb_trim(skb, 0);
  326. goto check_page;
  327. }
  328. skb = netdev_alloc_skb(netdev, bufsz);
  329. if (unlikely(!skb)) {
  330. /* Better luck next round */
  331. adapter->alloc_rx_buff_failed++;
  332. break;
  333. }
  334. /* Make buffer alignment 2 beyond a 16 byte boundary
  335. * this will result in a 16 byte aligned IP header after
  336. * the 14 byte MAC header is removed
  337. */
  338. skb_reserve(skb, NET_IP_ALIGN);
  339. buffer_info->skb = skb;
  340. check_page:
  341. /* allocate a new page if necessary */
  342. if (!buffer_info->page) {
  343. buffer_info->page = alloc_page(GFP_ATOMIC);
  344. if (unlikely(!buffer_info->page)) {
  345. adapter->alloc_rx_buff_failed++;
  346. break;
  347. }
  348. }
  349. if (!buffer_info->dma)
  350. buffer_info->dma = pci_map_page(pdev,
  351. buffer_info->page, 0,
  352. PAGE_SIZE,
  353. PCI_DMA_FROMDEVICE);
  354. rx_desc = E1000_RX_DESC(*rx_ring, i);
  355. rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  356. if (unlikely(++i == rx_ring->count))
  357. i = 0;
  358. buffer_info = &rx_ring->buffer_info[i];
  359. }
  360. if (likely(rx_ring->next_to_use != i)) {
  361. rx_ring->next_to_use = i;
  362. if (unlikely(i-- == 0))
  363. i = (rx_ring->count - 1);
  364. /* Force memory writes to complete before letting h/w
  365. * know there are new descriptors to fetch. (Only
  366. * applicable for weak-ordered memory model archs,
  367. * such as IA-64). */
  368. wmb();
  369. writel(i, adapter->hw.hw_addr + rx_ring->tail);
  370. }
  371. }
  372. /**
  373. * e1000_clean_rx_irq - Send received data up the network stack; legacy
  374. * @adapter: board private structure
  375. *
  376. * the return value indicates whether actual cleaning was done, there
  377. * is no guarantee that everything was cleaned
  378. **/
  379. static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
  380. int *work_done, int work_to_do)
  381. {
  382. struct net_device *netdev = adapter->netdev;
  383. struct pci_dev *pdev = adapter->pdev;
  384. struct e1000_ring *rx_ring = adapter->rx_ring;
  385. struct e1000_rx_desc *rx_desc, *next_rxd;
  386. struct e1000_buffer *buffer_info, *next_buffer;
  387. u32 length;
  388. unsigned int i;
  389. int cleaned_count = 0;
  390. bool cleaned = 0;
  391. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  392. i = rx_ring->next_to_clean;
  393. rx_desc = E1000_RX_DESC(*rx_ring, i);
  394. buffer_info = &rx_ring->buffer_info[i];
  395. while (rx_desc->status & E1000_RXD_STAT_DD) {
  396. struct sk_buff *skb;
  397. u8 status;
  398. if (*work_done >= work_to_do)
  399. break;
  400. (*work_done)++;
  401. status = rx_desc->status;
  402. skb = buffer_info->skb;
  403. buffer_info->skb = NULL;
  404. prefetch(skb->data - NET_IP_ALIGN);
  405. i++;
  406. if (i == rx_ring->count)
  407. i = 0;
  408. next_rxd = E1000_RX_DESC(*rx_ring, i);
  409. prefetch(next_rxd);
  410. next_buffer = &rx_ring->buffer_info[i];
  411. cleaned = 1;
  412. cleaned_count++;
  413. pci_unmap_single(pdev,
  414. buffer_info->dma,
  415. adapter->rx_buffer_len,
  416. PCI_DMA_FROMDEVICE);
  417. buffer_info->dma = 0;
  418. length = le16_to_cpu(rx_desc->length);
  419. /*
  420. * !EOP means multiple descriptors were used to store a single
  421. * packet, if that's the case we need to toss it. In fact, we
  422. * need to toss every packet with the EOP bit clear and the
  423. * next frame that _does_ have the EOP bit set, as it is by
  424. * definition only a frame fragment
  425. */
  426. if (unlikely(!(status & E1000_RXD_STAT_EOP)))
  427. adapter->flags2 |= FLAG2_IS_DISCARDING;
  428. if (adapter->flags2 & FLAG2_IS_DISCARDING) {
  429. /* All receives must fit into a single buffer */
  430. e_dbg("%s: Receive packet consumed multiple buffers\n",
  431. netdev->name);
  432. /* recycle */
  433. buffer_info->skb = skb;
  434. if (status & E1000_RXD_STAT_EOP)
  435. adapter->flags2 &= ~FLAG2_IS_DISCARDING;
  436. goto next_desc;
  437. }
  438. if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
  439. /* recycle */
  440. buffer_info->skb = skb;
  441. goto next_desc;
  442. }
  443. /* adjust length to remove Ethernet CRC */
  444. if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
  445. length -= 4;
  446. total_rx_bytes += length;
  447. total_rx_packets++;
  448. /*
  449. * code added for copybreak, this should improve
  450. * performance for small packets with large amounts
  451. * of reassembly being done in the stack
  452. */
  453. if (length < copybreak) {
  454. struct sk_buff *new_skb =
  455. netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
  456. if (new_skb) {
  457. skb_reserve(new_skb, NET_IP_ALIGN);
  458. skb_copy_to_linear_data_offset(new_skb,
  459. -NET_IP_ALIGN,
  460. (skb->data -
  461. NET_IP_ALIGN),
  462. (length +
  463. NET_IP_ALIGN));
  464. /* save the skb in buffer_info as good */
  465. buffer_info->skb = skb;
  466. skb = new_skb;
  467. }
  468. /* else just continue with the old one */
  469. }
  470. /* end copybreak code */
  471. skb_put(skb, length);
  472. /* Receive Checksum Offload */
  473. e1000_rx_checksum(adapter,
  474. (u32)(status) |
  475. ((u32)(rx_desc->errors) << 24),
  476. le16_to_cpu(rx_desc->csum), skb);
  477. e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
  478. next_desc:
  479. rx_desc->status = 0;
  480. /* return some buffers to hardware, one at a time is too slow */
  481. if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
  482. adapter->alloc_rx_buf(adapter, cleaned_count);
  483. cleaned_count = 0;
  484. }
  485. /* use prefetched values */
  486. rx_desc = next_rxd;
  487. buffer_info = next_buffer;
  488. }
  489. rx_ring->next_to_clean = i;
  490. cleaned_count = e1000_desc_unused(rx_ring);
  491. if (cleaned_count)
  492. adapter->alloc_rx_buf(adapter, cleaned_count);
  493. adapter->total_rx_bytes += total_rx_bytes;
  494. adapter->total_rx_packets += total_rx_packets;
  495. adapter->net_stats.rx_bytes += total_rx_bytes;
  496. adapter->net_stats.rx_packets += total_rx_packets;
  497. return cleaned;
  498. }
  499. static void e1000_put_txbuf(struct e1000_adapter *adapter,
  500. struct e1000_buffer *buffer_info)
  501. {
  502. buffer_info->dma = 0;
  503. if (buffer_info->skb) {
  504. skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
  505. DMA_TO_DEVICE);
  506. dev_kfree_skb_any(buffer_info->skb);
  507. buffer_info->skb = NULL;
  508. }
  509. buffer_info->time_stamp = 0;
  510. }
  511. static void e1000_print_tx_hang(struct e1000_adapter *adapter)
  512. {
  513. struct e1000_ring *tx_ring = adapter->tx_ring;
  514. unsigned int i = tx_ring->next_to_clean;
  515. unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
  516. struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
  517. /* detected Tx unit hang */
  518. e_err("Detected Tx Unit Hang:\n"
  519. " TDH <%x>\n"
  520. " TDT <%x>\n"
  521. " next_to_use <%x>\n"
  522. " next_to_clean <%x>\n"
  523. "buffer_info[next_to_clean]:\n"
  524. " time_stamp <%lx>\n"
  525. " next_to_watch <%x>\n"
  526. " jiffies <%lx>\n"
  527. " next_to_watch.status <%x>\n",
  528. readl(adapter->hw.hw_addr + tx_ring->head),
  529. readl(adapter->hw.hw_addr + tx_ring->tail),
  530. tx_ring->next_to_use,
  531. tx_ring->next_to_clean,
  532. tx_ring->buffer_info[eop].time_stamp,
  533. eop,
  534. jiffies,
  535. eop_desc->upper.fields.status);
  536. }
  537. /**
  538. * e1000_clean_tx_irq - Reclaim resources after transmit completes
  539. * @adapter: board private structure
  540. *
  541. * the return value indicates whether actual cleaning was done, there
  542. * is no guarantee that everything was cleaned
  543. **/
  544. static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
  545. {
  546. struct net_device *netdev = adapter->netdev;
  547. struct e1000_hw *hw = &adapter->hw;
  548. struct e1000_ring *tx_ring = adapter->tx_ring;
  549. struct e1000_tx_desc *tx_desc, *eop_desc;
  550. struct e1000_buffer *buffer_info;
  551. unsigned int i, eop;
  552. unsigned int count = 0;
  553. unsigned int total_tx_bytes = 0, total_tx_packets = 0;
  554. i = tx_ring->next_to_clean;
  555. eop = tx_ring->buffer_info[i].next_to_watch;
  556. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  557. while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
  558. (count < tx_ring->count)) {
  559. bool cleaned = false;
  560. for (; !cleaned; count++) {
  561. tx_desc = E1000_TX_DESC(*tx_ring, i);
  562. buffer_info = &tx_ring->buffer_info[i];
  563. cleaned = (i == eop);
  564. if (cleaned) {
  565. struct sk_buff *skb = buffer_info->skb;
  566. unsigned int segs, bytecount;
  567. segs = skb_shinfo(skb)->gso_segs ?: 1;
  568. /* multiply data chunks by size of headers */
  569. bytecount = ((segs - 1) * skb_headlen(skb)) +
  570. skb->len;
  571. total_tx_packets += segs;
  572. total_tx_bytes += bytecount;
  573. }
  574. e1000_put_txbuf(adapter, buffer_info);
  575. tx_desc->upper.data = 0;
  576. i++;
  577. if (i == tx_ring->count)
  578. i = 0;
  579. }
  580. eop = tx_ring->buffer_info[i].next_to_watch;
  581. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  582. }
  583. tx_ring->next_to_clean = i;
  584. #define TX_WAKE_THRESHOLD 32
  585. if (count && netif_carrier_ok(netdev) &&
  586. e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
  587. /* Make sure that anybody stopping the queue after this
  588. * sees the new next_to_clean.
  589. */
  590. smp_mb();
  591. if (netif_queue_stopped(netdev) &&
  592. !(test_bit(__E1000_DOWN, &adapter->state))) {
  593. netif_wake_queue(netdev);
  594. ++adapter->restart_queue;
  595. }
  596. }
  597. if (adapter->detect_tx_hung) {
  598. /* Detect a transmit hang in hardware, this serializes the
  599. * check with the clearing of time_stamp and movement of i */
  600. adapter->detect_tx_hung = 0;
  601. if (tx_ring->buffer_info[i].time_stamp &&
  602. time_after(jiffies, tx_ring->buffer_info[i].time_stamp
  603. + (adapter->tx_timeout_factor * HZ))
  604. && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
  605. e1000_print_tx_hang(adapter);
  606. netif_stop_queue(netdev);
  607. }
  608. }
  609. adapter->total_tx_bytes += total_tx_bytes;
  610. adapter->total_tx_packets += total_tx_packets;
  611. adapter->net_stats.tx_bytes += total_tx_bytes;
  612. adapter->net_stats.tx_packets += total_tx_packets;
  613. return (count < tx_ring->count);
  614. }
  615. /**
  616. * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
  617. * @adapter: board private structure
  618. *
  619. * the return value indicates whether actual cleaning was done, there
  620. * is no guarantee that everything was cleaned
  621. **/
  622. static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  623. int *work_done, int work_to_do)
  624. {
  625. union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
  626. struct net_device *netdev = adapter->netdev;
  627. struct pci_dev *pdev = adapter->pdev;
  628. struct e1000_ring *rx_ring = adapter->rx_ring;
  629. struct e1000_buffer *buffer_info, *next_buffer;
  630. struct e1000_ps_page *ps_page;
  631. struct sk_buff *skb;
  632. unsigned int i, j;
  633. u32 length, staterr;
  634. int cleaned_count = 0;
  635. bool cleaned = 0;
  636. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  637. i = rx_ring->next_to_clean;
  638. rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  639. staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  640. buffer_info = &rx_ring->buffer_info[i];
  641. while (staterr & E1000_RXD_STAT_DD) {
  642. if (*work_done >= work_to_do)
  643. break;
  644. (*work_done)++;
  645. skb = buffer_info->skb;
  646. /* in the packet split case this is header only */
  647. prefetch(skb->data - NET_IP_ALIGN);
  648. i++;
  649. if (i == rx_ring->count)
  650. i = 0;
  651. next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
  652. prefetch(next_rxd);
  653. next_buffer = &rx_ring->buffer_info[i];
  654. cleaned = 1;
  655. cleaned_count++;
  656. pci_unmap_single(pdev, buffer_info->dma,
  657. adapter->rx_ps_bsize0,
  658. PCI_DMA_FROMDEVICE);
  659. buffer_info->dma = 0;
  660. /* see !EOP comment in other rx routine */
  661. if (!(staterr & E1000_RXD_STAT_EOP))
  662. adapter->flags2 |= FLAG2_IS_DISCARDING;
  663. if (adapter->flags2 & FLAG2_IS_DISCARDING) {
  664. e_dbg("%s: Packet Split buffers didn't pick up the "
  665. "full packet\n", netdev->name);
  666. dev_kfree_skb_irq(skb);
  667. if (staterr & E1000_RXD_STAT_EOP)
  668. adapter->flags2 &= ~FLAG2_IS_DISCARDING;
  669. goto next_desc;
  670. }
  671. if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
  672. dev_kfree_skb_irq(skb);
  673. goto next_desc;
  674. }
  675. length = le16_to_cpu(rx_desc->wb.middle.length0);
  676. if (!length) {
  677. e_dbg("%s: Last part of the packet spanning multiple "
  678. "descriptors\n", netdev->name);
  679. dev_kfree_skb_irq(skb);
  680. goto next_desc;
  681. }
  682. /* Good Receive */
  683. skb_put(skb, length);
  684. {
  685. /*
  686. * this looks ugly, but it seems compiler issues make it
  687. * more efficient than reusing j
  688. */
  689. int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
  690. /*
  691. * page alloc/put takes too long and effects small packet
  692. * throughput, so unsplit small packets and save the alloc/put
  693. * only valid in softirq (napi) context to call kmap_*
  694. */
  695. if (l1 && (l1 <= copybreak) &&
  696. ((length + l1) <= adapter->rx_ps_bsize0)) {
  697. u8 *vaddr;
  698. ps_page = &buffer_info->ps_pages[0];
  699. /*
  700. * there is no documentation about how to call
  701. * kmap_atomic, so we can't hold the mapping
  702. * very long
  703. */
  704. pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
  705. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  706. vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
  707. memcpy(skb_tail_pointer(skb), vaddr, l1);
  708. kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
  709. pci_dma_sync_single_for_device(pdev, ps_page->dma,
  710. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  711. /* remove the CRC */
  712. if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
  713. l1 -= 4;
  714. skb_put(skb, l1);
  715. goto copydone;
  716. } /* if */
  717. }
  718. for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  719. length = le16_to_cpu(rx_desc->wb.upper.length[j]);
  720. if (!length)
  721. break;
  722. ps_page = &buffer_info->ps_pages[j];
  723. pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
  724. PCI_DMA_FROMDEVICE);
  725. ps_page->dma = 0;
  726. skb_fill_page_desc(skb, j, ps_page->page, 0, length);
  727. ps_page->page = NULL;
  728. skb->len += length;
  729. skb->data_len += length;
  730. skb->truesize += length;
  731. }
  732. /* strip the ethernet crc, problem is we're using pages now so
  733. * this whole operation can get a little cpu intensive
  734. */
  735. if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
  736. pskb_trim(skb, skb->len - 4);
  737. copydone:
  738. total_rx_bytes += skb->len;
  739. total_rx_packets++;
  740. e1000_rx_checksum(adapter, staterr, le16_to_cpu(
  741. rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
  742. if (rx_desc->wb.upper.header_status &
  743. cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
  744. adapter->rx_hdr_split++;
  745. e1000_receive_skb(adapter, netdev, skb,
  746. staterr, rx_desc->wb.middle.vlan);
  747. next_desc:
  748. rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
  749. buffer_info->skb = NULL;
  750. /* return some buffers to hardware, one at a time is too slow */
  751. if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
  752. adapter->alloc_rx_buf(adapter, cleaned_count);
  753. cleaned_count = 0;
  754. }
  755. /* use prefetched values */
  756. rx_desc = next_rxd;
  757. buffer_info = next_buffer;
  758. staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  759. }
  760. rx_ring->next_to_clean = i;
  761. cleaned_count = e1000_desc_unused(rx_ring);
  762. if (cleaned_count)
  763. adapter->alloc_rx_buf(adapter, cleaned_count);
  764. adapter->total_rx_bytes += total_rx_bytes;
  765. adapter->total_rx_packets += total_rx_packets;
  766. adapter->net_stats.rx_bytes += total_rx_bytes;
  767. adapter->net_stats.rx_packets += total_rx_packets;
  768. return cleaned;
  769. }
  770. /**
  771. * e1000_consume_page - helper function
  772. **/
  773. static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
  774. u16 length)
  775. {
  776. bi->page = NULL;
  777. skb->len += length;
  778. skb->data_len += length;
  779. skb->truesize += length;
  780. }
  781. /**
  782. * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
  783. * @adapter: board private structure
  784. *
  785. * the return value indicates whether actual cleaning was done, there
  786. * is no guarantee that everything was cleaned
  787. **/
  788. static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
  789. int *work_done, int work_to_do)
  790. {
  791. struct net_device *netdev = adapter->netdev;
  792. struct pci_dev *pdev = adapter->pdev;
  793. struct e1000_ring *rx_ring = adapter->rx_ring;
  794. struct e1000_rx_desc *rx_desc, *next_rxd;
  795. struct e1000_buffer *buffer_info, *next_buffer;
  796. u32 length;
  797. unsigned int i;
  798. int cleaned_count = 0;
  799. bool cleaned = false;
  800. unsigned int total_rx_bytes=0, total_rx_packets=0;
  801. i = rx_ring->next_to_clean;
  802. rx_desc = E1000_RX_DESC(*rx_ring, i);
  803. buffer_info = &rx_ring->buffer_info[i];
  804. while (rx_desc->status & E1000_RXD_STAT_DD) {
  805. struct sk_buff *skb;
  806. u8 status;
  807. if (*work_done >= work_to_do)
  808. break;
  809. (*work_done)++;
  810. status = rx_desc->status;
  811. skb = buffer_info->skb;
  812. buffer_info->skb = NULL;
  813. ++i;
  814. if (i == rx_ring->count)
  815. i = 0;
  816. next_rxd = E1000_RX_DESC(*rx_ring, i);
  817. prefetch(next_rxd);
  818. next_buffer = &rx_ring->buffer_info[i];
  819. cleaned = true;
  820. cleaned_count++;
  821. pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
  822. PCI_DMA_FROMDEVICE);
  823. buffer_info->dma = 0;
  824. length = le16_to_cpu(rx_desc->length);
  825. /* errors is only valid for DD + EOP descriptors */
  826. if (unlikely((status & E1000_RXD_STAT_EOP) &&
  827. (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
  828. /* recycle both page and skb */
  829. buffer_info->skb = skb;
  830. /* an error means any chain goes out the window
  831. * too */
  832. if (rx_ring->rx_skb_top)
  833. dev_kfree_skb(rx_ring->rx_skb_top);
  834. rx_ring->rx_skb_top = NULL;
  835. goto next_desc;
  836. }
  837. #define rxtop rx_ring->rx_skb_top
  838. if (!(status & E1000_RXD_STAT_EOP)) {
  839. /* this descriptor is only the beginning (or middle) */
  840. if (!rxtop) {
  841. /* this is the beginning of a chain */
  842. rxtop = skb;
  843. skb_fill_page_desc(rxtop, 0, buffer_info->page,
  844. 0, length);
  845. } else {
  846. /* this is the middle of a chain */
  847. skb_fill_page_desc(rxtop,
  848. skb_shinfo(rxtop)->nr_frags,
  849. buffer_info->page, 0, length);
  850. /* re-use the skb, only consumed the page */
  851. buffer_info->skb = skb;
  852. }
  853. e1000_consume_page(buffer_info, rxtop, length);
  854. goto next_desc;
  855. } else {
  856. if (rxtop) {
  857. /* end of the chain */
  858. skb_fill_page_desc(rxtop,
  859. skb_shinfo(rxtop)->nr_frags,
  860. buffer_info->page, 0, length);
  861. /* re-use the current skb, we only consumed the
  862. * page */
  863. buffer_info->skb = skb;
  864. skb = rxtop;
  865. rxtop = NULL;
  866. e1000_consume_page(buffer_info, skb, length);
  867. } else {
  868. /* no chain, got EOP, this buf is the packet
  869. * copybreak to save the put_page/alloc_page */
  870. if (length <= copybreak &&
  871. skb_tailroom(skb) >= length) {
  872. u8 *vaddr;
  873. vaddr = kmap_atomic(buffer_info->page,
  874. KM_SKB_DATA_SOFTIRQ);
  875. memcpy(skb_tail_pointer(skb), vaddr,
  876. length);
  877. kunmap_atomic(vaddr,
  878. KM_SKB_DATA_SOFTIRQ);
  879. /* re-use the page, so don't erase
  880. * buffer_info->page */
  881. skb_put(skb, length);
  882. } else {
  883. skb_fill_page_desc(skb, 0,
  884. buffer_info->page, 0,
  885. length);
  886. e1000_consume_page(buffer_info, skb,
  887. length);
  888. }
  889. }
  890. }
  891. /* Receive Checksum Offload XXX recompute due to CRC strip? */
  892. e1000_rx_checksum(adapter,
  893. (u32)(status) |
  894. ((u32)(rx_desc->errors) << 24),
  895. le16_to_cpu(rx_desc->csum), skb);
  896. /* probably a little skewed due to removing CRC */
  897. total_rx_bytes += skb->len;
  898. total_rx_packets++;
  899. /* eth type trans needs skb->data to point to something */
  900. if (!pskb_may_pull(skb, ETH_HLEN)) {
  901. e_err("pskb_may_pull failed.\n");
  902. dev_kfree_skb(skb);
  903. goto next_desc;
  904. }
  905. e1000_receive_skb(adapter, netdev, skb, status,
  906. rx_desc->special);
  907. next_desc:
  908. rx_desc->status = 0;
  909. /* return some buffers to hardware, one at a time is too slow */
  910. if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
  911. adapter->alloc_rx_buf(adapter, cleaned_count);
  912. cleaned_count = 0;
  913. }
  914. /* use prefetched values */
  915. rx_desc = next_rxd;
  916. buffer_info = next_buffer;
  917. }
  918. rx_ring->next_to_clean = i;
  919. cleaned_count = e1000_desc_unused(rx_ring);
  920. if (cleaned_count)
  921. adapter->alloc_rx_buf(adapter, cleaned_count);
  922. adapter->total_rx_bytes += total_rx_bytes;
  923. adapter->total_rx_packets += total_rx_packets;
  924. adapter->net_stats.rx_bytes += total_rx_bytes;
  925. adapter->net_stats.rx_packets += total_rx_packets;
  926. return cleaned;
  927. }
  928. /**
  929. * e1000_clean_rx_ring - Free Rx Buffers per Queue
  930. * @adapter: board private structure
  931. **/
  932. static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
  933. {
  934. struct e1000_ring *rx_ring = adapter->rx_ring;
  935. struct e1000_buffer *buffer_info;
  936. struct e1000_ps_page *ps_page;
  937. struct pci_dev *pdev = adapter->pdev;
  938. unsigned int i, j;
  939. /* Free all the Rx ring sk_buffs */
  940. for (i = 0; i < rx_ring->count; i++) {
  941. buffer_info = &rx_ring->buffer_info[i];
  942. if (buffer_info->dma) {
  943. if (adapter->clean_rx == e1000_clean_rx_irq)
  944. pci_unmap_single(pdev, buffer_info->dma,
  945. adapter->rx_buffer_len,
  946. PCI_DMA_FROMDEVICE);
  947. else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
  948. pci_unmap_page(pdev, buffer_info->dma,
  949. PAGE_SIZE,
  950. PCI_DMA_FROMDEVICE);
  951. else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
  952. pci_unmap_single(pdev, buffer_info->dma,
  953. adapter->rx_ps_bsize0,
  954. PCI_DMA_FROMDEVICE);
  955. buffer_info->dma = 0;
  956. }
  957. if (buffer_info->page) {
  958. put_page(buffer_info->page);
  959. buffer_info->page = NULL;
  960. }
  961. if (buffer_info->skb) {
  962. dev_kfree_skb(buffer_info->skb);
  963. buffer_info->skb = NULL;
  964. }
  965. for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  966. ps_page = &buffer_info->ps_pages[j];
  967. if (!ps_page->page)
  968. break;
  969. pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
  970. PCI_DMA_FROMDEVICE);
  971. ps_page->dma = 0;
  972. put_page(ps_page->page);
  973. ps_page->page = NULL;
  974. }
  975. }
  976. /* there also may be some cached data from a chained receive */
  977. if (rx_ring->rx_skb_top) {
  978. dev_kfree_skb(rx_ring->rx_skb_top);
  979. rx_ring->rx_skb_top = NULL;
  980. }
  981. /* Zero out the descriptor ring */
  982. memset(rx_ring->desc, 0, rx_ring->size);
  983. rx_ring->next_to_clean = 0;
  984. rx_ring->next_to_use = 0;
  985. adapter->flags2 &= ~FLAG2_IS_DISCARDING;
  986. writel(0, adapter->hw.hw_addr + rx_ring->head);
  987. writel(0, adapter->hw.hw_addr + rx_ring->tail);
  988. }
  989. static void e1000e_downshift_workaround(struct work_struct *work)
  990. {
  991. struct e1000_adapter *adapter = container_of(work,
  992. struct e1000_adapter, downshift_task);
  993. e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
  994. }
  995. /**
  996. * e1000_intr_msi - Interrupt Handler
  997. * @irq: interrupt number
  998. * @data: pointer to a network interface device structure
  999. **/
  1000. static irqreturn_t e1000_intr_msi(int irq, void *data)
  1001. {
  1002. struct net_device *netdev = data;
  1003. struct e1000_adapter *adapter = netdev_priv(netdev);
  1004. struct e1000_hw *hw = &adapter->hw;
  1005. u32 icr = er32(ICR);
  1006. /*
  1007. * read ICR disables interrupts using IAM
  1008. */
  1009. if (icr & E1000_ICR_LSC) {
  1010. hw->mac.get_link_status = 1;
  1011. /*
  1012. * ICH8 workaround-- Call gig speed drop workaround on cable
  1013. * disconnect (LSC) before accessing any PHY registers
  1014. */
  1015. if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
  1016. (!(er32(STATUS) & E1000_STATUS_LU)))
  1017. schedule_work(&adapter->downshift_task);
  1018. /*
  1019. * 80003ES2LAN workaround-- For packet buffer work-around on
  1020. * link down event; disable receives here in the ISR and reset
  1021. * adapter in watchdog
  1022. */
  1023. if (netif_carrier_ok(netdev) &&
  1024. adapter->flags & FLAG_RX_NEEDS_RESTART) {
  1025. /* disable receives */
  1026. u32 rctl = er32(RCTL);
  1027. ew32(RCTL, rctl & ~E1000_RCTL_EN);
  1028. adapter->flags |= FLAG_RX_RESTART_NOW;
  1029. }
  1030. /* guard against interrupt when we're going down */
  1031. if (!test_bit(__E1000_DOWN, &adapter->state))
  1032. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  1033. }
  1034. if (napi_schedule_prep(&adapter->napi)) {
  1035. adapter->total_tx_bytes = 0;
  1036. adapter->total_tx_packets = 0;
  1037. adapter->total_rx_bytes = 0;
  1038. adapter->total_rx_packets = 0;
  1039. __napi_schedule(&adapter->napi);
  1040. }
  1041. return IRQ_HANDLED;
  1042. }
  1043. /**
  1044. * e1000_intr - Interrupt Handler
  1045. * @irq: interrupt number
  1046. * @data: pointer to a network interface device structure
  1047. **/
  1048. static irqreturn_t e1000_intr(int irq, void *data)
  1049. {
  1050. struct net_device *netdev = data;
  1051. struct e1000_adapter *adapter = netdev_priv(netdev);
  1052. struct e1000_hw *hw = &adapter->hw;
  1053. u32 rctl, icr = er32(ICR);
  1054. if (!icr)
  1055. return IRQ_NONE; /* Not our interrupt */
  1056. /*
  1057. * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
  1058. * not set, then the adapter didn't send an interrupt
  1059. */
  1060. if (!(icr & E1000_ICR_INT_ASSERTED))
  1061. return IRQ_NONE;
  1062. /*
  1063. * Interrupt Auto-Mask...upon reading ICR,
  1064. * interrupts are masked. No need for the
  1065. * IMC write
  1066. */
  1067. if (icr & E1000_ICR_LSC) {
  1068. hw->mac.get_link_status = 1;
  1069. /*
  1070. * ICH8 workaround-- Call gig speed drop workaround on cable
  1071. * disconnect (LSC) before accessing any PHY registers
  1072. */
  1073. if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
  1074. (!(er32(STATUS) & E1000_STATUS_LU)))
  1075. schedule_work(&adapter->downshift_task);
  1076. /*
  1077. * 80003ES2LAN workaround--
  1078. * For packet buffer work-around on link down event;
  1079. * disable receives here in the ISR and
  1080. * reset adapter in watchdog
  1081. */
  1082. if (netif_carrier_ok(netdev) &&
  1083. (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
  1084. /* disable receives */
  1085. rctl = er32(RCTL);
  1086. ew32(RCTL, rctl & ~E1000_RCTL_EN);
  1087. adapter->flags |= FLAG_RX_RESTART_NOW;
  1088. }
  1089. /* guard against interrupt when we're going down */
  1090. if (!test_bit(__E1000_DOWN, &adapter->state))
  1091. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  1092. }
  1093. if (napi_schedule_prep(&adapter->napi)) {
  1094. adapter->total_tx_bytes = 0;
  1095. adapter->total_tx_packets = 0;
  1096. adapter->total_rx_bytes = 0;
  1097. adapter->total_rx_packets = 0;
  1098. __napi_schedule(&adapter->napi);
  1099. }
  1100. return IRQ_HANDLED;
  1101. }
  1102. static irqreturn_t e1000_msix_other(int irq, void *data)
  1103. {
  1104. struct net_device *netdev = data;
  1105. struct e1000_adapter *adapter = netdev_priv(netdev);
  1106. struct e1000_hw *hw = &adapter->hw;
  1107. u32 icr = er32(ICR);
  1108. if (!(icr & E1000_ICR_INT_ASSERTED)) {
  1109. if (!test_bit(__E1000_DOWN, &adapter->state))
  1110. ew32(IMS, E1000_IMS_OTHER);
  1111. return IRQ_NONE;
  1112. }
  1113. if (icr & adapter->eiac_mask)
  1114. ew32(ICS, (icr & adapter->eiac_mask));
  1115. if (icr & E1000_ICR_OTHER) {
  1116. if (!(icr & E1000_ICR_LSC))
  1117. goto no_link_interrupt;
  1118. hw->mac.get_link_status = 1;
  1119. /* guard against interrupt when we're going down */
  1120. if (!test_bit(__E1000_DOWN, &adapter->state))
  1121. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  1122. }
  1123. no_link_interrupt:
  1124. if (!test_bit(__E1000_DOWN, &adapter->state))
  1125. ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
  1126. return IRQ_HANDLED;
  1127. }
  1128. static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
  1129. {
  1130. struct net_device *netdev = data;
  1131. struct e1000_adapter *adapter = netdev_priv(netdev);
  1132. struct e1000_hw *hw = &adapter->hw;
  1133. struct e1000_ring *tx_ring = adapter->tx_ring;
  1134. adapter->total_tx_bytes = 0;
  1135. adapter->total_tx_packets = 0;
  1136. if (!e1000_clean_tx_irq(adapter))
  1137. /* Ring was not completely cleaned, so fire another interrupt */
  1138. ew32(ICS, tx_ring->ims_val);
  1139. return IRQ_HANDLED;
  1140. }
  1141. static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
  1142. {
  1143. struct net_device *netdev = data;
  1144. struct e1000_adapter *adapter = netdev_priv(netdev);
  1145. /* Write the ITR value calculated at the end of the
  1146. * previous interrupt.
  1147. */
  1148. if (adapter->rx_ring->set_itr) {
  1149. writel(1000000000 / (adapter->rx_ring->itr_val * 256),
  1150. adapter->hw.hw_addr + adapter->rx_ring->itr_register);
  1151. adapter->rx_ring->set_itr = 0;
  1152. }
  1153. if (napi_schedule_prep(&adapter->napi)) {
  1154. adapter->total_rx_bytes = 0;
  1155. adapter->total_rx_packets = 0;
  1156. __napi_schedule(&adapter->napi);
  1157. }
  1158. return IRQ_HANDLED;
  1159. }
  1160. /**
  1161. * e1000_configure_msix - Configure MSI-X hardware
  1162. *
  1163. * e1000_configure_msix sets up the hardware to properly
  1164. * generate MSI-X interrupts.
  1165. **/
  1166. static void e1000_configure_msix(struct e1000_adapter *adapter)
  1167. {
  1168. struct e1000_hw *hw = &adapter->hw;
  1169. struct e1000_ring *rx_ring = adapter->rx_ring;
  1170. struct e1000_ring *tx_ring = adapter->tx_ring;
  1171. int vector = 0;
  1172. u32 ctrl_ext, ivar = 0;
  1173. adapter->eiac_mask = 0;
  1174. /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
  1175. if (hw->mac.type == e1000_82574) {
  1176. u32 rfctl = er32(RFCTL);
  1177. rfctl |= E1000_RFCTL_ACK_DIS;
  1178. ew32(RFCTL, rfctl);
  1179. }
  1180. #define E1000_IVAR_INT_ALLOC_VALID 0x8
  1181. /* Configure Rx vector */
  1182. rx_ring->ims_val = E1000_IMS_RXQ0;
  1183. adapter->eiac_mask |= rx_ring->ims_val;
  1184. if (rx_ring->itr_val)
  1185. writel(1000000000 / (rx_ring->itr_val * 256),
  1186. hw->hw_addr + rx_ring->itr_register);
  1187. else
  1188. writel(1, hw->hw_addr + rx_ring->itr_register);
  1189. ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
  1190. /* Configure Tx vector */
  1191. tx_ring->ims_val = E1000_IMS_TXQ0;
  1192. vector++;
  1193. if (tx_ring->itr_val)
  1194. writel(1000000000 / (tx_ring->itr_val * 256),
  1195. hw->hw_addr + tx_ring->itr_register);
  1196. else
  1197. writel(1, hw->hw_addr + tx_ring->itr_register);
  1198. adapter->eiac_mask |= tx_ring->ims_val;
  1199. ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
  1200. /* set vector for Other Causes, e.g. link changes */
  1201. vector++;
  1202. ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
  1203. if (rx_ring->itr_val)
  1204. writel(1000000000 / (rx_ring->itr_val * 256),
  1205. hw->hw_addr + E1000_EITR_82574(vector));
  1206. else
  1207. writel(1, hw->hw_addr + E1000_EITR_82574(vector));
  1208. /* Cause Tx interrupts on every write back */
  1209. ivar |= (1 << 31);
  1210. ew32(IVAR, ivar);
  1211. /* enable MSI-X PBA support */
  1212. ctrl_ext = er32(CTRL_EXT);
  1213. ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
  1214. /* Auto-Mask Other interrupts upon ICR read */
  1215. #define E1000_EIAC_MASK_82574 0x01F00000
  1216. ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
  1217. ctrl_ext |= E1000_CTRL_EXT_EIAME;
  1218. ew32(CTRL_EXT, ctrl_ext);
  1219. e1e_flush();
  1220. }
  1221. void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
  1222. {
  1223. if (adapter->msix_entries) {
  1224. pci_disable_msix(adapter->pdev);
  1225. kfree(adapter->msix_entries);
  1226. adapter->msix_entries = NULL;
  1227. } else if (adapter->flags & FLAG_MSI_ENABLED) {
  1228. pci_disable_msi(adapter->pdev);
  1229. adapter->flags &= ~FLAG_MSI_ENABLED;
  1230. }
  1231. return;
  1232. }
  1233. /**
  1234. * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
  1235. *
  1236. * Attempt to configure interrupts using the best available
  1237. * capabilities of the hardware and kernel.
  1238. **/
  1239. void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
  1240. {
  1241. int err;
  1242. int numvecs, i;
  1243. switch (adapter->int_mode) {
  1244. case E1000E_INT_MODE_MSIX:
  1245. if (adapter->flags & FLAG_HAS_MSIX) {
  1246. numvecs = 3; /* RxQ0, TxQ0 and other */
  1247. adapter->msix_entries = kcalloc(numvecs,
  1248. sizeof(struct msix_entry),
  1249. GFP_KERNEL);
  1250. if (adapter->msix_entries) {
  1251. for (i = 0; i < numvecs; i++)
  1252. adapter->msix_entries[i].entry = i;
  1253. err = pci_enable_msix(adapter->pdev,
  1254. adapter->msix_entries,
  1255. numvecs);
  1256. if (err == 0)
  1257. return;
  1258. }
  1259. /* MSI-X failed, so fall through and try MSI */
  1260. e_err("Failed to initialize MSI-X interrupts. "
  1261. "Falling back to MSI interrupts.\n");
  1262. e1000e_reset_interrupt_capability(adapter);
  1263. }
  1264. adapter->int_mode = E1000E_INT_MODE_MSI;
  1265. /* Fall through */
  1266. case E1000E_INT_MODE_MSI:
  1267. if (!pci_enable_msi(adapter->pdev)) {
  1268. adapter->flags |= FLAG_MSI_ENABLED;
  1269. } else {
  1270. adapter->int_mode = E1000E_INT_MODE_LEGACY;
  1271. e_err("Failed to initialize MSI interrupts. Falling "
  1272. "back to legacy interrupts.\n");
  1273. }
  1274. /* Fall through */
  1275. case E1000E_INT_MODE_LEGACY:
  1276. /* Don't do anything; this is the system default */
  1277. break;
  1278. }
  1279. return;
  1280. }
  1281. /**
  1282. * e1000_request_msix - Initialize MSI-X interrupts
  1283. *
  1284. * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
  1285. * kernel.
  1286. **/
  1287. static int e1000_request_msix(struct e1000_adapter *adapter)
  1288. {
  1289. struct net_device *netdev = adapter->netdev;
  1290. int err = 0, vector = 0;
  1291. if (strlen(netdev->name) < (IFNAMSIZ - 5))
  1292. sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
  1293. else
  1294. memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
  1295. err = request_irq(adapter->msix_entries[vector].vector,
  1296. &e1000_intr_msix_rx, 0, adapter->rx_ring->name,
  1297. netdev);
  1298. if (err)
  1299. goto out;
  1300. adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
  1301. adapter->rx_ring->itr_val = adapter->itr;
  1302. vector++;
  1303. if (strlen(netdev->name) < (IFNAMSIZ - 5))
  1304. sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
  1305. else
  1306. memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
  1307. err = request_irq(adapter->msix_entries[vector].vector,
  1308. &e1000_intr_msix_tx, 0, adapter->tx_ring->name,
  1309. netdev);
  1310. if (err)
  1311. goto out;
  1312. adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
  1313. adapter->tx_ring->itr_val = adapter->itr;
  1314. vector++;
  1315. err = request_irq(adapter->msix_entries[vector].vector,
  1316. &e1000_msix_other, 0, netdev->name, netdev);
  1317. if (err)
  1318. goto out;
  1319. e1000_configure_msix(adapter);
  1320. return 0;
  1321. out:
  1322. return err;
  1323. }
  1324. /**
  1325. * e1000_request_irq - initialize interrupts
  1326. *
  1327. * Attempts to configure interrupts using the best available
  1328. * capabilities of the hardware and kernel.
  1329. **/
  1330. static int e1000_request_irq(struct e1000_adapter *adapter)
  1331. {
  1332. struct net_device *netdev = adapter->netdev;
  1333. int err;
  1334. if (adapter->msix_entries) {
  1335. err = e1000_request_msix(adapter);
  1336. if (!err)
  1337. return err;
  1338. /* fall back to MSI */
  1339. e1000e_reset_interrupt_capability(adapter);
  1340. adapter->int_mode = E1000E_INT_MODE_MSI;
  1341. e1000e_set_interrupt_capability(adapter);
  1342. }
  1343. if (adapter->flags & FLAG_MSI_ENABLED) {
  1344. err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0,
  1345. netdev->name, netdev);
  1346. if (!err)
  1347. return err;
  1348. /* fall back to legacy interrupt */
  1349. e1000e_reset_interrupt_capability(adapter);
  1350. adapter->int_mode = E1000E_INT_MODE_LEGACY;
  1351. }
  1352. err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED,
  1353. netdev->name, netdev);
  1354. if (err)
  1355. e_err("Unable to allocate interrupt, Error: %d\n", err);
  1356. return err;
  1357. }
  1358. static void e1000_free_irq(struct e1000_adapter *adapter)
  1359. {
  1360. struct net_device *netdev = adapter->netdev;
  1361. if (adapter->msix_entries) {
  1362. int vector = 0;
  1363. free_irq(adapter->msix_entries[vector].vector, netdev);
  1364. vector++;
  1365. free_irq(adapter->msix_entries[vector].vector, netdev);
  1366. vector++;
  1367. /* Other Causes interrupt vector */
  1368. free_irq(adapter->msix_entries[vector].vector, netdev);
  1369. return;
  1370. }
  1371. free_irq(adapter->pdev->irq, netdev);
  1372. }
  1373. /**
  1374. * e1000_irq_disable - Mask off interrupt generation on the NIC
  1375. **/
  1376. static void e1000_irq_disable(struct e1000_adapter *adapter)
  1377. {
  1378. struct e1000_hw *hw = &adapter->hw;
  1379. ew32(IMC, ~0);
  1380. if (adapter->msix_entries)
  1381. ew32(EIAC_82574, 0);
  1382. e1e_flush();
  1383. synchronize_irq(adapter->pdev->irq);
  1384. }
  1385. /**
  1386. * e1000_irq_enable - Enable default interrupt generation settings
  1387. **/
  1388. static void e1000_irq_enable(struct e1000_adapter *adapter)
  1389. {
  1390. struct e1000_hw *hw = &adapter->hw;
  1391. if (adapter->msix_entries) {
  1392. ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
  1393. ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
  1394. } else {
  1395. ew32(IMS, IMS_ENABLE_MASK);
  1396. }
  1397. e1e_flush();
  1398. }
  1399. /**
  1400. * e1000_get_hw_control - get control of the h/w from f/w
  1401. * @adapter: address of board private structure
  1402. *
  1403. * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
  1404. * For ASF and Pass Through versions of f/w this means that
  1405. * the driver is loaded. For AMT version (only with 82573)
  1406. * of the f/w this means that the network i/f is open.
  1407. **/
  1408. static void e1000_get_hw_control(struct e1000_adapter *adapter)
  1409. {
  1410. struct e1000_hw *hw = &adapter->hw;
  1411. u32 ctrl_ext;
  1412. u32 swsm;
  1413. /* Let firmware know the driver has taken over */
  1414. if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
  1415. swsm = er32(SWSM);
  1416. ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
  1417. } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
  1418. ctrl_ext = er32(CTRL_EXT);
  1419. ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  1420. }
  1421. }
  1422. /**
  1423. * e1000_release_hw_control - release control of the h/w to f/w
  1424. * @adapter: address of board private structure
  1425. *
  1426. * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
  1427. * For ASF and Pass Through versions of f/w this means that the
  1428. * driver is no longer loaded. For AMT version (only with 82573) i
  1429. * of the f/w this means that the network i/f is closed.
  1430. *
  1431. **/
  1432. static void e1000_release_hw_control(struct e1000_adapter *adapter)
  1433. {
  1434. struct e1000_hw *hw = &adapter->hw;
  1435. u32 ctrl_ext;
  1436. u32 swsm;
  1437. /* Let firmware taken over control of h/w */
  1438. if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
  1439. swsm = er32(SWSM);
  1440. ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
  1441. } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
  1442. ctrl_ext = er32(CTRL_EXT);
  1443. ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  1444. }
  1445. }
  1446. /**
  1447. * @e1000_alloc_ring - allocate memory for a ring structure
  1448. **/
  1449. static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
  1450. struct e1000_ring *ring)
  1451. {
  1452. struct pci_dev *pdev = adapter->pdev;
  1453. ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
  1454. GFP_KERNEL);
  1455. if (!ring->desc)
  1456. return -ENOMEM;
  1457. return 0;
  1458. }
  1459. /**
  1460. * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
  1461. * @adapter: board private structure
  1462. *
  1463. * Return 0 on success, negative on failure
  1464. **/
  1465. int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
  1466. {
  1467. struct e1000_ring *tx_ring = adapter->tx_ring;
  1468. int err = -ENOMEM, size;
  1469. size = sizeof(struct e1000_buffer) * tx_ring->count;
  1470. tx_ring->buffer_info = vmalloc(size);
  1471. if (!tx_ring->buffer_info)
  1472. goto err;
  1473. memset(tx_ring->buffer_info, 0, size);
  1474. /* round up to nearest 4K */
  1475. tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
  1476. tx_ring->size = ALIGN(tx_ring->size, 4096);
  1477. err = e1000_alloc_ring_dma(adapter, tx_ring);
  1478. if (err)
  1479. goto err;
  1480. tx_ring->next_to_use = 0;
  1481. tx_ring->next_to_clean = 0;
  1482. return 0;
  1483. err:
  1484. vfree(tx_ring->buffer_info);
  1485. e_err("Unable to allocate memory for the transmit descriptor ring\n");
  1486. return err;
  1487. }
  1488. /**
  1489. * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
  1490. * @adapter: board private structure
  1491. *
  1492. * Returns 0 on success, negative on failure
  1493. **/
  1494. int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
  1495. {
  1496. struct e1000_ring *rx_ring = adapter->rx_ring;
  1497. struct e1000_buffer *buffer_info;
  1498. int i, size, desc_len, err = -ENOMEM;
  1499. size = sizeof(struct e1000_buffer) * rx_ring->count;
  1500. rx_ring->buffer_info = vmalloc(size);
  1501. if (!rx_ring->buffer_info)
  1502. goto err;
  1503. memset(rx_ring->buffer_info, 0, size);
  1504. for (i = 0; i < rx_ring->count; i++) {
  1505. buffer_info = &rx_ring->buffer_info[i];
  1506. buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
  1507. sizeof(struct e1000_ps_page),
  1508. GFP_KERNEL);
  1509. if (!buffer_info->ps_pages)
  1510. goto err_pages;
  1511. }
  1512. desc_len = sizeof(union e1000_rx_desc_packet_split);
  1513. /* Round up to nearest 4K */
  1514. rx_ring->size = rx_ring->count * desc_len;
  1515. rx_ring->size = ALIGN(rx_ring->size, 4096);
  1516. err = e1000_alloc_ring_dma(adapter, rx_ring);
  1517. if (err)
  1518. goto err_pages;
  1519. rx_ring->next_to_clean = 0;
  1520. rx_ring->next_to_use = 0;
  1521. rx_ring->rx_skb_top = NULL;
  1522. return 0;
  1523. err_pages:
  1524. for (i = 0; i < rx_ring->count; i++) {
  1525. buffer_info = &rx_ring->buffer_info[i];
  1526. kfree(buffer_info->ps_pages);
  1527. }
  1528. err:
  1529. vfree(rx_ring->buffer_info);
  1530. e_err("Unable to allocate memory for the transmit descriptor ring\n");
  1531. return err;
  1532. }
  1533. /**
  1534. * e1000_clean_tx_ring - Free Tx Buffers
  1535. * @adapter: board private structure
  1536. **/
  1537. static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
  1538. {
  1539. struct e1000_ring *tx_ring = adapter->tx_ring;
  1540. struct e1000_buffer *buffer_info;
  1541. unsigned long size;
  1542. unsigned int i;
  1543. for (i = 0; i < tx_ring->count; i++) {
  1544. buffer_info = &tx_ring->buffer_info[i];
  1545. e1000_put_txbuf(adapter, buffer_info);
  1546. }
  1547. size = sizeof(struct e1000_buffer) * tx_ring->count;
  1548. memset(tx_ring->buffer_info, 0, size);
  1549. memset(tx_ring->desc, 0, tx_ring->size);
  1550. tx_ring->next_to_use = 0;
  1551. tx_ring->next_to_clean = 0;
  1552. writel(0, adapter->hw.hw_addr + tx_ring->head);
  1553. writel(0, adapter->hw.hw_addr + tx_ring->tail);
  1554. }
  1555. /**
  1556. * e1000e_free_tx_resources - Free Tx Resources per Queue
  1557. * @adapter: board private structure
  1558. *
  1559. * Free all transmit software resources
  1560. **/
  1561. void e1000e_free_tx_resources(struct e1000_adapter *adapter)
  1562. {
  1563. struct pci_dev *pdev = adapter->pdev;
  1564. struct e1000_ring *tx_ring = adapter->tx_ring;
  1565. e1000_clean_tx_ring(adapter);
  1566. vfree(tx_ring->buffer_info);
  1567. tx_ring->buffer_in

Large files files are truncated, but you can click here to view the full file