PageRenderTime 61ms CodeModel.GetById 28ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/e1000e-0.4.1.7-2010-02-25.polling.patch

https://github.com/bhesmans/click
Patch | 673 lines | 669 code | 4 blank | 0 comment | 0 complexity | 5fa0d6f717c71086282e462ed408af95 MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause
  1. diff -ru e1000e-0.4.1.7/src/netdev.c e1000e-0.4.1.7-p/src/netdev.c
  2. --- e1000e-0.4.1.7/src/netdev.c 2008-06-23 09:27:33.000000000 -0700
  3. +++ e1000e-0.4.1.7-p/src/netdev.c 2010-02-25 18:47:07.000000000 -0800
  4. @@ -2319,6 +2319,7 @@
  5. static void e1000_irq_enable(struct e1000_adapter *adapter)
  6. {
  7. struct e1000_hw *hw = &adapter->hw;
  8. +
  9. #ifdef CONFIG_E1000E_MSIX
  10. if (adapter->msix_entries) {
  11. @@ -3173,7 +3174,8 @@
  12. if (adapter->msix_entries)
  13. e1000_configure_msix(adapter);
  14. #endif /* CONFIG_E1000E_MSIX */
  15. - e1000_irq_enable(adapter);
  16. + if (!adapter->netdev->polling)
  17. + e1000_irq_enable(adapter);
  18. /* fire a link change interrupt to start the watchdog */
  19. ew32(ICS, E1000_ICS_LSC);
  20. @@ -5113,6 +5115,632 @@
  21. (pba_num >> 8), (pba_num & 0xff));
  22. }
  23. +/* Click polling extension */
  24. +static struct sk_buff *__e1000_rx_poll(struct net_device *netdev, int *want)
  25. +{
  26. + struct e1000_adapter *adapter = netdev_priv(netdev);
  27. + struct pci_dev *pdev = adapter->pdev;
  28. + struct e1000_ring *rx_ring = adapter->rx_ring;
  29. + struct e1000_rx_desc *rx_desc, *next_rxd;
  30. + struct e1000_buffer *buffer_info, *next_buffer;
  31. + u32 length;
  32. + unsigned int i;
  33. + int cleaned_count = 0;
  34. + bool cleaned = 0;
  35. + unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  36. + struct sk_buff* skb_head = NULL, **skbs;
  37. + int got = 0;
  38. +
  39. + skbs = &skb_head;
  40. + i = rx_ring->next_to_clean;
  41. + rx_desc = E1000_RX_DESC(*rx_ring, i);
  42. + buffer_info = &rx_ring->buffer_info[i];
  43. +
  44. + while (rx_desc->status & E1000_RXD_STAT_DD) {
  45. + struct sk_buff *skb;
  46. + u8 status;
  47. +
  48. + if (got >= *want)
  49. + break;
  50. + status = rx_desc->status;
  51. + skb = buffer_info->skb;
  52. + buffer_info->skb = NULL;
  53. +
  54. + skb->dev = netdev;
  55. +
  56. + prefetch(skb->data - NET_IP_ALIGN);
  57. +
  58. + i++;
  59. + if (i == rx_ring->count)
  60. + i = 0;
  61. + next_rxd = E1000_RX_DESC(*rx_ring, i);
  62. + prefetch(next_rxd);
  63. +
  64. + next_buffer = &rx_ring->buffer_info[i];
  65. +
  66. + cleaned = 1;
  67. + cleaned_count++;
  68. + pci_unmap_single(pdev, buffer_info->dma,
  69. + adapter->rx_buffer_len,
  70. + PCI_DMA_FROMDEVICE);
  71. + buffer_info->dma = 0;
  72. +
  73. + length = le16_to_cpu(rx_desc->length);
  74. +
  75. + /* !EOP means multiple descriptors were used to store a single
  76. + * packet, also make sure the frame isn't just CRC only */
  77. + if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
  78. + /* All receives must fit into a single buffer */
  79. + e_dbg("Receive packet consumed multiple buffers\n");
  80. + /* recycle */
  81. + buffer_info->skb = skb;
  82. + goto next_desc;
  83. + }
  84. +
  85. + if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
  86. + /* recycle */
  87. + buffer_info->skb = skb;
  88. + goto next_desc;
  89. + }
  90. +
  91. + total_rx_bytes += length;
  92. + total_rx_packets++;
  93. +
  94. + /* end copybreak code */
  95. + skb_put(skb, length);
  96. +
  97. + /* Receive Checksum Offload */
  98. + e1000_rx_checksum(adapter,
  99. + (u32)(status) |
  100. + ((u32)(rx_desc->errors) << 24),
  101. + le16_to_cpu(rx_desc->csum), skb);
  102. +
  103. + skb_pull(skb, netdev->hard_header_len);
  104. + *skbs = skb;
  105. + skbs = &(*skbs)->next;
  106. + *skbs = NULL;
  107. + netdev->last_rx = jiffies;
  108. + got++;
  109. +next_desc:
  110. + rx_desc->status = 0;
  111. +
  112. + /* use prefetched values */
  113. + rx_desc = next_rxd;
  114. + buffer_info = next_buffer;
  115. + }
  116. + rx_ring->next_to_clean = i;
  117. +
  118. + cleaned_count = e1000_desc_unused(rx_ring);
  119. +
  120. + adapter->total_rx_packets += total_rx_packets;
  121. + adapter->total_rx_bytes += total_rx_bytes;
  122. + adapter->net_stats.rx_bytes += total_rx_bytes;
  123. + adapter->net_stats.rx_packets += total_rx_packets;
  124. +
  125. + *want = got;
  126. +
  127. + return skb_head;
  128. +}
  129. +
  130. +/* Click polling extension */
  131. +static struct sk_buff *__e1000_rx_poll_ps(struct net_device *netdev, int *want)
  132. +{
  133. + union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
  134. + struct e1000_adapter *adapter = netdev_priv(netdev);
  135. + struct pci_dev *pdev = adapter->pdev;
  136. + struct e1000_ring *rx_ring = adapter->rx_ring;
  137. + struct e1000_buffer *buffer_info, *next_buffer;
  138. + struct e1000_ps_page *ps_page;
  139. + struct sk_buff *skb;
  140. + unsigned int i, j;
  141. + u32 length, staterr;
  142. + unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  143. + int got = 0;
  144. + struct sk_buff* skb_head = NULL, **skbs;
  145. +
  146. + skbs = &skb_head;
  147. + i = rx_ring->next_to_clean;
  148. + rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  149. + staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  150. + buffer_info = &rx_ring->buffer_info[i];
  151. +
  152. + while (staterr & E1000_RXD_STAT_DD) {
  153. + if (got >= *want)
  154. + break;
  155. + skb = buffer_info->skb;
  156. +
  157. + /* in the packet split case this is header only */
  158. + prefetch(skb->data - NET_IP_ALIGN);
  159. +
  160. + i++;
  161. + if (i == rx_ring->count)
  162. + i = 0;
  163. + next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
  164. + prefetch(next_rxd);
  165. +
  166. + next_buffer = &rx_ring->buffer_info[i];
  167. +
  168. + pci_unmap_single(pdev, buffer_info->dma,
  169. + adapter->rx_ps_bsize0,
  170. + PCI_DMA_FROMDEVICE);
  171. + buffer_info->dma = 0;
  172. +
  173. + if (!(staterr & E1000_RXD_STAT_EOP)) {
  174. + e_dbg("Packet Split buffers didn't pick up the full"
  175. + " packet\n");
  176. + dev_kfree_skb_irq(skb);
  177. + goto next_desc;
  178. + }
  179. +
  180. + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
  181. + dev_kfree_skb_irq(skb);
  182. + goto next_desc;
  183. + }
  184. +
  185. + length = le16_to_cpu(rx_desc->wb.middle.length0);
  186. +
  187. + if (!length) {
  188. + e_dbg("Last part of the packet spanning multiple"
  189. + " descriptors\n");
  190. + dev_kfree_skb_irq(skb);
  191. + goto next_desc;
  192. + }
  193. +
  194. + /* Good Receive */
  195. + skb_put(skb, length);
  196. +
  197. +#ifdef CONFIG_E1000E_NAPI
  198. + {
  199. + /*
  200. + * this looks ugly, but it seems compiler issues make it
  201. + * more efficient than reusing j
  202. + */
  203. + int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
  204. +
  205. + /*
  206. + * page alloc/put takes too long and effects small packet
  207. + * throughput, so unsplit small packets and save the alloc/put
  208. + * only valid in softirq (napi) context to call kmap_*
  209. + */
  210. + if (l1 && (l1 <= copybreak) &&
  211. + ((length + l1) <= adapter->rx_ps_bsize0)) {
  212. + u8 *vaddr;
  213. +
  214. + ps_page = &buffer_info->ps_pages[0];
  215. +
  216. + /*
  217. + * there is no documentation about how to call
  218. + * kmap_atomic, so we can't hold the mapping
  219. + * very long
  220. + */
  221. + pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
  222. + PAGE_SIZE, PCI_DMA_FROMDEVICE);
  223. + vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
  224. + memcpy(skb_tail_pointer(skb), vaddr, l1);
  225. + kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
  226. + pci_dma_sync_single_for_device(pdev, ps_page->dma,
  227. + PAGE_SIZE, PCI_DMA_FROMDEVICE);
  228. +
  229. + skb_put(skb, l1);
  230. + goto copydone;
  231. + } /* if */
  232. + }
  233. +#endif
  234. +
  235. + for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  236. + length = le16_to_cpu(rx_desc->wb.upper.length[j]);
  237. + if (!length)
  238. + break;
  239. +
  240. + ps_page = &buffer_info->ps_pages[j];
  241. + pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
  242. + PCI_DMA_FROMDEVICE);
  243. + ps_page->dma = 0;
  244. + skb_fill_page_desc(skb, j, ps_page->page, 0, length);
  245. + ps_page->page = NULL;
  246. + skb->len += length;
  247. + skb->data_len += length;
  248. + skb->truesize += length;
  249. + }
  250. +
  251. +#ifdef CONFIG_E1000E_NAPI
  252. +copydone:
  253. +#endif
  254. + total_rx_bytes += skb->len;
  255. + total_rx_packets++;
  256. +
  257. + e1000_rx_checksum(adapter, staterr, le16_to_cpu(
  258. + rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
  259. +
  260. + if (rx_desc->wb.upper.header_status &
  261. + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
  262. + adapter->rx_hdr_split++;
  263. +
  264. + skb_pull(skb, netdev->hard_header_len);
  265. + *skbs = skb;
  266. + skbs = &(*skbs)->next;
  267. + *skbs = NULL;
  268. + netdev->last_rx = jiffies;
  269. + got++;
  270. +
  271. +next_desc:
  272. + rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
  273. + buffer_info->skb = NULL;
  274. +
  275. + /* use prefetched values */
  276. + rx_desc = next_rxd;
  277. + buffer_info = next_buffer;
  278. +
  279. + staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  280. + }
  281. + rx_ring->next_to_clean = i;
  282. +
  283. + adapter->total_rx_packets += total_rx_packets;
  284. + adapter->total_rx_bytes += total_rx_bytes;
  285. + adapter->net_stats.rx_bytes += total_rx_bytes;
  286. + adapter->net_stats.rx_packets += total_rx_packets;
  287. +
  288. + *want = got;
  289. +
  290. + return skb_head;
  291. +}
  292. +
  293. +/* Click polling extension */
  294. +static struct sk_buff *e1000_rx_poll(struct net_device *netdev, int *want)
  295. +{
  296. + struct sk_buff *skb;
  297. + struct e1000_adapter *adapter = netdev_priv(netdev);
  298. +
  299. + if (adapter->rx_ps_pages)
  300. + skb = __e1000_rx_poll_ps(netdev, want);
  301. + else
  302. + skb = __e1000_rx_poll(netdev, want);
  303. +
  304. + return skb;
  305. +}
  306. +
  307. +/* Click polling extension */
  308. +static int __e1000_rx_refill(struct net_device *netdev, struct sk_buff **skbs)
  309. +{
  310. + struct e1000_adapter *adapter = netdev_priv(netdev);
  311. + struct pci_dev *pdev = adapter->pdev;
  312. + struct e1000_ring *rx_ring = adapter->rx_ring;
  313. + struct e1000_rx_desc *rx_desc;
  314. + struct e1000_buffer *buffer_info;
  315. + unsigned int i;
  316. + int refill_count = 0;
  317. +
  318. + if (skbs == 0)
  319. + return e1000_desc_unused(rx_ring);
  320. +
  321. + i = rx_ring->next_to_use;
  322. + buffer_info = &rx_ring->buffer_info[i];
  323. +
  324. + while (*skbs) {
  325. + refill_count++;
  326. + buffer_info->skb = *skbs;
  327. + *skbs = (*skbs)->next;
  328. + buffer_info->dma = pci_map_single(pdev, buffer_info->skb->data,
  329. + adapter->rx_buffer_len,
  330. + PCI_DMA_FROMDEVICE);
  331. + if (pci_dma_mapping_error(buffer_info->dma)) {
  332. + dev_err(&pdev->dev, "RX DMA map failed\n");
  333. + adapter->rx_dma_failed++;
  334. + break;
  335. + }
  336. +
  337. + rx_desc = E1000_RX_DESC(*rx_ring, i);
  338. + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  339. +
  340. + i++;
  341. + if (i == rx_ring->count)
  342. + i = 0;
  343. + buffer_info = &rx_ring->buffer_info[i];
  344. + }
  345. +
  346. + if (rx_ring->next_to_use != i) {
  347. + rx_ring->next_to_use = i;
  348. + if (i-- == 0)
  349. + i = (rx_ring->count - 1);
  350. +
  351. + /*
  352. + * Force memory writes to complete before letting h/w
  353. + * know there are new descriptors to fetch. (Only
  354. + * applicable for weak-ordered memory model archs,
  355. + * such as IA-64).
  356. + */
  357. + wmb();
  358. + writel(i, adapter->hw.hw_addr + rx_ring->tail);
  359. + }
  360. +
  361. + return refill_count;
  362. +}
  363. +
  364. +/* Click polling extension */
  365. +static int __e1000_rx_refill_ps(struct net_device *netdev, struct sk_buff **skbs)
  366. +{
  367. + struct e1000_adapter *adapter = netdev_priv(netdev);
  368. + struct pci_dev *pdev = adapter->pdev;
  369. + union e1000_rx_desc_packet_split *rx_desc;
  370. + struct e1000_ring *rx_ring = adapter->rx_ring;
  371. + struct e1000_buffer *buffer_info;
  372. + struct e1000_ps_page *ps_page;
  373. + unsigned int i, j;
  374. + int refill_count = 0;
  375. +
  376. + if (skbs == 0)
  377. + return e1000_desc_unused(rx_ring);
  378. +
  379. + i = rx_ring->next_to_use;
  380. + buffer_info = &rx_ring->buffer_info[i];
  381. +
  382. + while (*skbs) {
  383. + rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  384. +
  385. + for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  386. + ps_page = &buffer_info->ps_pages[j];
  387. + if (j >= adapter->rx_ps_pages) {
  388. + /* all unused desc entries get hw null ptr */
  389. + rx_desc->read.buffer_addr[j+1] = ~0;
  390. + continue;
  391. + }
  392. + if (!ps_page->page) {
  393. + ps_page->page = alloc_page(GFP_ATOMIC);
  394. + if (!ps_page->page) {
  395. + adapter->alloc_rx_buff_failed++;
  396. + goto no_buffers;
  397. + }
  398. + ps_page->dma = pci_map_page(pdev,
  399. + ps_page->page,
  400. + 0, PAGE_SIZE,
  401. + PCI_DMA_FROMDEVICE);
  402. + if (pci_dma_mapping_error(ps_page->dma)) {
  403. + dev_err(&adapter->pdev->dev,
  404. + "RX DMA page map failed\n");
  405. + adapter->rx_dma_failed++;
  406. + goto no_buffers;
  407. + }
  408. + }
  409. + /*
  410. + * Refresh the desc even if buffer_addrs
  411. + * didn't change because each write-back
  412. + * erases this info.
  413. + */
  414. + rx_desc->read.buffer_addr[j+1] =
  415. + cpu_to_le64(ps_page->dma);
  416. + }
  417. +
  418. + buffer_info->skb = *skbs;
  419. + buffer_info->dma = pci_map_single(pdev, (*skbs)->data,
  420. + adapter->rx_ps_bsize0,
  421. + PCI_DMA_FROMDEVICE);
  422. + *skbs = (*skbs)->next;
  423. +
  424. + if (pci_dma_mapping_error(buffer_info->dma)) {
  425. + dev_err(&pdev->dev, "RX DMA map failed\n");
  426. + adapter->rx_dma_failed++;
  427. + break;
  428. + }
  429. +
  430. + rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
  431. +
  432. + i++;
  433. + if (i == rx_ring->count)
  434. + i = 0;
  435. + buffer_info = &rx_ring->buffer_info[i];
  436. +
  437. + refill_count++;
  438. + }
  439. +
  440. +no_buffers:
  441. + if (rx_ring->next_to_use != i) {
  442. + rx_ring->next_to_use = i;
  443. +
  444. + if (!(i--))
  445. + i = (rx_ring->count - 1);
  446. +
  447. + /*
  448. + * Force memory writes to complete before letting h/w
  449. + * know there are new descriptors to fetch. (Only
  450. + * applicable for weak-ordered memory model archs,
  451. + * such as IA-64).
  452. + */
  453. + wmb();
  454. + /*
  455. + * Hardware increments by 16 bytes, but packet split
  456. + * descriptors are 32 bytes...so we increment tail
  457. + * twice as much.
  458. + */
  459. + writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
  460. + }
  461. +
  462. + return refill_count;
  463. +}
  464. +
  465. +static int e1000_rx_refill(struct net_device *netdev, struct sk_buff **skbs)
  466. +{
  467. + int i;
  468. + struct e1000_adapter *adapter = netdev_priv(netdev);
  469. +
  470. + if (adapter->rx_ps_pages)
  471. + i = __e1000_rx_refill_ps(netdev, skbs);
  472. + else
  473. + i = __e1000_rx_refill(netdev, skbs);
  474. +
  475. + return i;
  476. +}
  477. +
  478. +/* Click polling extension */
  479. +static int e1000_tx_pqueue(struct net_device *netdev, struct sk_buff *skb)
  480. +{
  481. + int res = e1000_xmit_frame(skb, netdev);
  482. + return res;
  483. +}
  484. +
  485. +/* Click polling extension */
  486. +static int e1000_tx_eob(struct net_device *netdev)
  487. +{
  488. + return 0;
  489. +}
  490. +
  491. +/* Click polling extension */
  492. +static int e1000_tx_start(struct net_device *netdev)
  493. +{
  494. + return 0;
  495. +}
  496. +
  497. +/* Click polling extension */
  498. +static struct sk_buff* e1000_tx_clean(struct net_device *netdev)
  499. +{
  500. + struct e1000_adapter *adapter = netdev_priv(netdev);
  501. + struct e1000_hw *hw = &adapter->hw;
  502. + struct e1000_ring *tx_ring = adapter->tx_ring;
  503. + struct e1000_tx_desc *tx_desc, *eop_desc;
  504. + struct e1000_buffer *buffer_info;
  505. + unsigned int i, eop;
  506. + bool cleaned = 0, retval = 1;
  507. + unsigned int total_tx_bytes = 0, total_tx_packets = 0;
  508. + struct sk_buff *skb_head, *skb_last;
  509. +
  510. + skb_head = skb_last = NULL;
  511. +
  512. + i = tx_ring->next_to_clean;
  513. + eop = tx_ring->buffer_info[i].next_to_watch;
  514. + eop_desc = E1000_TX_DESC(*tx_ring, eop);
  515. +
  516. + while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
  517. + for (cleaned = 0; !cleaned; ) {
  518. + tx_desc = E1000_TX_DESC(*tx_ring, i);
  519. + buffer_info = &tx_ring->buffer_info[i];
  520. + cleaned = (i == eop);
  521. +
  522. + if (cleaned) {
  523. + struct sk_buff *skb = buffer_info->skb;
  524. +#ifdef NETIF_F_TSO
  525. + unsigned int segs, bytecount;
  526. + segs = skb_shinfo(skb)->gso_segs ?: 1;
  527. + /* multiply data chunks by size of headers */
  528. + bytecount = ((segs - 1) * skb_headlen(skb)) +
  529. + skb->len;
  530. + total_tx_packets += segs;
  531. + total_tx_bytes += bytecount;
  532. +#else
  533. + total_tx_packets++;
  534. + total_tx_bytes += skb->len;
  535. +#endif
  536. + }
  537. +
  538. + if (buffer_info->dma) {
  539. + pci_unmap_page(adapter->pdev,
  540. + buffer_info->dma,
  541. + buffer_info->length,
  542. + PCI_DMA_TODEVICE);
  543. + buffer_info->dma = 0;
  544. + }
  545. +
  546. + if (buffer_info->skb) {
  547. + struct sk_buff *skb = buffer_info->skb;
  548. + if (skb_head == 0) {
  549. + skb_head = skb;
  550. + skb_last = skb;
  551. + skb_last->next = NULL;
  552. + } else {
  553. + skb_last->next = skb;
  554. + skb->next = NULL;
  555. + skb_last = skb;
  556. + }
  557. + buffer_info->skb = NULL;
  558. + }
  559. + buffer_info->time_stamp = 0;
  560. + tx_desc->upper.data = 0;
  561. +
  562. + i++;
  563. + if (i == tx_ring->count)
  564. + i = 0;
  565. +#ifdef CONFIG_E1000E_NAPI
  566. + if (total_tx_packets >= tx_ring->count) {
  567. + retval = 0;
  568. + goto done_cleaning;
  569. + }
  570. +#endif
  571. + }
  572. +
  573. + eop = tx_ring->buffer_info[i].next_to_watch;
  574. + eop_desc = E1000_TX_DESC(*tx_ring, eop);
  575. + }
  576. +
  577. +#ifdef CONFIG_E1000E_NAPI
  578. +done_cleaning:
  579. +#endif
  580. + tx_ring->next_to_clean = i;
  581. +
  582. +#define TX_WAKE_THRESHOLD 32
  583. + if (cleaned && netif_carrier_ok(netdev) &&
  584. + e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
  585. + /*
  586. + * Make sure that anybody stopping the queue after this
  587. + * sees the new next_to_clean.
  588. + */
  589. + smp_mb();
  590. +
  591. + if (netif_queue_stopped(netdev) &&
  592. + !(test_bit(__E1000_DOWN, &adapter->state))) {
  593. + netif_wake_queue(netdev);
  594. + ++adapter->restart_queue;
  595. + }
  596. + }
  597. +
  598. + if (adapter->detect_tx_hung) {
  599. + /*
  600. + * Detect a transmit hang in hardware, this serializes the
  601. + * check with the clearing of time_stamp and movement of i
  602. + */
  603. + adapter->detect_tx_hung = 0;
  604. + if (tx_ring->buffer_info[eop].dma &&
  605. + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp
  606. + + (adapter->tx_timeout_factor * HZ))
  607. + && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
  608. + e1000_print_tx_hang(adapter);
  609. + netif_stop_queue(netdev);
  610. + }
  611. + }
  612. + adapter->total_tx_bytes += total_tx_bytes;
  613. + adapter->total_tx_packets += total_tx_packets;
  614. + adapter->net_stats.tx_bytes += total_tx_bytes;
  615. + adapter->net_stats.tx_packets += total_tx_packets;
  616. +
  617. + return skb_head;
  618. +}
  619. +
  620. +/* Click polling extension */
  621. +static int e1000_poll_off(struct net_device *netdev)
  622. +{
  623. + struct e1000_adapter *adapter = netdev_priv(netdev);
  624. +
  625. + if (netdev->polling > 0) {
  626. + netdev->polling = 0;
  627. + e1000_irq_enable(adapter);
  628. + }
  629. +
  630. + return 0;
  631. +}
  632. +
  633. +/* Click polling extension */
  634. +static int e1000_poll_on(struct net_device *netdev)
  635. +{
  636. + struct e1000_adapter *adapter = netdev_priv(netdev);
  637. + unsigned long flags;
  638. +
  639. + if (!netdev->polling) {
  640. + local_irq_save(flags);
  641. + netdev->polling = 2;
  642. + e1000_irq_disable(adapter);
  643. + local_irq_restore(flags);
  644. + }
  645. +
  646. + return adapter->rx_buffer_len + NET_IP_ALIGN;
  647. +}
  648. +
  649. /**
  650. * e1000_probe - Device Initialization Routine
  651. * @pdev: PCI device information struct
  652. @@ -5248,6 +5876,18 @@
  653. #ifdef CONFIG_NET_POLL_CONTROLLER
  654. netdev->poll_controller = e1000_netpoll;
  655. #endif
  656. +
  657. + /* Click polling extensions */
  658. + netdev->polling = 0;
  659. + netdev->rx_poll = e1000_rx_poll;
  660. + netdev->rx_refill = e1000_rx_refill;
  661. + netdev->tx_queue = e1000_tx_pqueue;
  662. + netdev->tx_eob = e1000_tx_eob;
  663. + netdev->tx_start = e1000_tx_start;
  664. + netdev->tx_clean = e1000_tx_clean;
  665. + netdev->poll_off = e1000_poll_off;
  666. + netdev->poll_on = e1000_poll_on;
  667. +
  668. strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  669. hw->mac.ops.get_bus_info(&adapter->hw);