PageRenderTime 415ms CodeModel.GetById 19ms app.highlight 357ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/net/e1000e/netdev.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 6346 lines | 4211 code | 921 blank | 1214 comment | 700 complexity | b5da9e8fdeaef4b47ff565540de3efed MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

   1/*******************************************************************************
   2
   3  Intel PRO/1000 Linux driver
   4  Copyright(c) 1999 - 2011 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/module.h>
  32#include <linux/types.h>
  33#include <linux/init.h>
  34#include <linux/pci.h>
  35#include <linux/vmalloc.h>
  36#include <linux/pagemap.h>
  37#include <linux/delay.h>
  38#include <linux/netdevice.h>
  39#include <linux/tcp.h>
  40#include <linux/ipv6.h>
  41#include <linux/slab.h>
  42#include <net/checksum.h>
  43#include <net/ip6_checksum.h>
  44#include <linux/mii.h>
  45#include <linux/ethtool.h>
  46#include <linux/if_vlan.h>
  47#include <linux/cpu.h>
  48#include <linux/smp.h>
  49#include <linux/pm_qos_params.h>
  50#include <linux/pm_runtime.h>
  51#include <linux/aer.h>
  52#include <linux/prefetch.h>
  53
  54#include "e1000.h"
  55
  56#define DRV_EXTRAVERSION "-k2"
  57
  58#define DRV_VERSION "1.3.10" DRV_EXTRAVERSION
  59char e1000e_driver_name[] = "e1000e";
  60const char e1000e_driver_version[] = DRV_VERSION;
  61
  62static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
  63
  64static const struct e1000_info *e1000_info_tbl[] = {
  65	[board_82571]		= &e1000_82571_info,
  66	[board_82572]		= &e1000_82572_info,
  67	[board_82573]		= &e1000_82573_info,
  68	[board_82574]		= &e1000_82574_info,
  69	[board_82583]		= &e1000_82583_info,
  70	[board_80003es2lan]	= &e1000_es2_info,
  71	[board_ich8lan]		= &e1000_ich8_info,
  72	[board_ich9lan]		= &e1000_ich9_info,
  73	[board_ich10lan]	= &e1000_ich10_info,
  74	[board_pchlan]		= &e1000_pch_info,
  75	[board_pch2lan]		= &e1000_pch2_info,
  76};
  77
  78struct e1000_reg_info {
  79	u32 ofs;
  80	char *name;
  81};
  82
  83#define E1000_RDFH	0x02410	/* Rx Data FIFO Head - RW */
  84#define E1000_RDFT	0x02418	/* Rx Data FIFO Tail - RW */
  85#define E1000_RDFHS	0x02420	/* Rx Data FIFO Head Saved - RW */
  86#define E1000_RDFTS	0x02428	/* Rx Data FIFO Tail Saved - RW */
  87#define E1000_RDFPC	0x02430	/* Rx Data FIFO Packet Count - RW */
  88
  89#define E1000_TDFH	0x03410	/* Tx Data FIFO Head - RW */
  90#define E1000_TDFT	0x03418	/* Tx Data FIFO Tail - RW */
  91#define E1000_TDFHS	0x03420	/* Tx Data FIFO Head Saved - RW */
  92#define E1000_TDFTS	0x03428	/* Tx Data FIFO Tail Saved - RW */
  93#define E1000_TDFPC	0x03430	/* Tx Data FIFO Packet Count - RW */
  94
  95static const struct e1000_reg_info e1000_reg_info_tbl[] = {
  96
  97	/* General Registers */
  98	{E1000_CTRL, "CTRL"},
  99	{E1000_STATUS, "STATUS"},
 100	{E1000_CTRL_EXT, "CTRL_EXT"},
 101
 102	/* Interrupt Registers */
 103	{E1000_ICR, "ICR"},
 104
 105	/* Rx Registers */
 106	{E1000_RCTL, "RCTL"},
 107	{E1000_RDLEN, "RDLEN"},
 108	{E1000_RDH, "RDH"},
 109	{E1000_RDT, "RDT"},
 110	{E1000_RDTR, "RDTR"},
 111	{E1000_RXDCTL(0), "RXDCTL"},
 112	{E1000_ERT, "ERT"},
 113	{E1000_RDBAL, "RDBAL"},
 114	{E1000_RDBAH, "RDBAH"},
 115	{E1000_RDFH, "RDFH"},
 116	{E1000_RDFT, "RDFT"},
 117	{E1000_RDFHS, "RDFHS"},
 118	{E1000_RDFTS, "RDFTS"},
 119	{E1000_RDFPC, "RDFPC"},
 120
 121	/* Tx Registers */
 122	{E1000_TCTL, "TCTL"},
 123	{E1000_TDBAL, "TDBAL"},
 124	{E1000_TDBAH, "TDBAH"},
 125	{E1000_TDLEN, "TDLEN"},
 126	{E1000_TDH, "TDH"},
 127	{E1000_TDT, "TDT"},
 128	{E1000_TIDV, "TIDV"},
 129	{E1000_TXDCTL(0), "TXDCTL"},
 130	{E1000_TADV, "TADV"},
 131	{E1000_TARC(0), "TARC"},
 132	{E1000_TDFH, "TDFH"},
 133	{E1000_TDFT, "TDFT"},
 134	{E1000_TDFHS, "TDFHS"},
 135	{E1000_TDFTS, "TDFTS"},
 136	{E1000_TDFPC, "TDFPC"},
 137
 138	/* List Terminator */
 139	{}
 140};
 141
 142/*
 143 * e1000_regdump - register printout routine
 144 */
 145static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
 146{
 147	int n = 0;
 148	char rname[16];
 149	u32 regs[8];
 150
 151	switch (reginfo->ofs) {
 152	case E1000_RXDCTL(0):
 153		for (n = 0; n < 2; n++)
 154			regs[n] = __er32(hw, E1000_RXDCTL(n));
 155		break;
 156	case E1000_TXDCTL(0):
 157		for (n = 0; n < 2; n++)
 158			regs[n] = __er32(hw, E1000_TXDCTL(n));
 159		break;
 160	case E1000_TARC(0):
 161		for (n = 0; n < 2; n++)
 162			regs[n] = __er32(hw, E1000_TARC(n));
 163		break;
 164	default:
 165		printk(KERN_INFO "%-15s %08x\n",
 166		       reginfo->name, __er32(hw, reginfo->ofs));
 167		return;
 168	}
 169
 170	snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
 171	printk(KERN_INFO "%-15s ", rname);
 172	for (n = 0; n < 2; n++)
 173		printk(KERN_CONT "%08x ", regs[n]);
 174	printk(KERN_CONT "\n");
 175}
 176
 177/*
 178 * e1000e_dump - Print registers, Tx-ring and Rx-ring
 179 */
 180static void e1000e_dump(struct e1000_adapter *adapter)
 181{
 182	struct net_device *netdev = adapter->netdev;
 183	struct e1000_hw *hw = &adapter->hw;
 184	struct e1000_reg_info *reginfo;
 185	struct e1000_ring *tx_ring = adapter->tx_ring;
 186	struct e1000_tx_desc *tx_desc;
 187	struct my_u0 {
 188		u64 a;
 189		u64 b;
 190	} *u0;
 191	struct e1000_buffer *buffer_info;
 192	struct e1000_ring *rx_ring = adapter->rx_ring;
 193	union e1000_rx_desc_packet_split *rx_desc_ps;
 194	struct e1000_rx_desc *rx_desc;
 195	struct my_u1 {
 196		u64 a;
 197		u64 b;
 198		u64 c;
 199		u64 d;
 200	} *u1;
 201	u32 staterr;
 202	int i = 0;
 203
 204	if (!netif_msg_hw(adapter))
 205		return;
 206
 207	/* Print netdevice Info */
 208	if (netdev) {
 209		dev_info(&adapter->pdev->dev, "Net device Info\n");
 210		printk(KERN_INFO "Device Name     state            "
 211		       "trans_start      last_rx\n");
 212		printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
 213		       netdev->name, netdev->state, netdev->trans_start,
 214		       netdev->last_rx);
 215	}
 216
 217	/* Print Registers */
 218	dev_info(&adapter->pdev->dev, "Register Dump\n");
 219	printk(KERN_INFO " Register Name   Value\n");
 220	for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
 221	     reginfo->name; reginfo++) {
 222		e1000_regdump(hw, reginfo);
 223	}
 224
 225	/* Print Tx Ring Summary */
 226	if (!netdev || !netif_running(netdev))
 227		goto exit;
 228
 229	dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
 230	printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
 231	       " leng ntw timestamp\n");
 232	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
 233	printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
 234	       0, tx_ring->next_to_use, tx_ring->next_to_clean,
 235	       (unsigned long long)buffer_info->dma,
 236	       buffer_info->length,
 237	       buffer_info->next_to_watch,
 238	       (unsigned long long)buffer_info->time_stamp);
 239
 240	/* Print Tx Ring */
 241	if (!netif_msg_tx_done(adapter))
 242		goto rx_ring_summary;
 243
 244	dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
 245
 246	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
 247	 *
 248	 * Legacy Transmit Descriptor
 249	 *   +--------------------------------------------------------------+
 250	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
 251	 *   +--------------------------------------------------------------+
 252	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
 253	 *   +--------------------------------------------------------------+
 254	 *   63       48 47        36 35    32 31     24 23    16 15        0
 255	 *
 256	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
 257	 *   63      48 47    40 39       32 31             16 15    8 7      0
 258	 *   +----------------------------------------------------------------+
 259	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
 260	 *   +----------------------------------------------------------------+
 261	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
 262	 *   +----------------------------------------------------------------+
 263	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
 264	 *
 265	 * Extended Data Descriptor (DTYP=0x1)
 266	 *   +----------------------------------------------------------------+
 267	 * 0 |                     Buffer Address [63:0]                      |
 268	 *   +----------------------------------------------------------------+
 269	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
 270	 *   +----------------------------------------------------------------+
 271	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
 272	 */
 273	printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
 274	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
 275	       "<-- Legacy format\n");
 276	printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
 277	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
 278	       "<-- Ext Context format\n");
 279	printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
 280	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
 281	       "<-- Ext Data format\n");
 282	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
 283		tx_desc = E1000_TX_DESC(*tx_ring, i);
 284		buffer_info = &tx_ring->buffer_info[i];
 285		u0 = (struct my_u0 *)tx_desc;
 286		printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
 287		       "%04X  %3X %016llX %p",
 288		       (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
 289			((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
 290		       (unsigned long long)le64_to_cpu(u0->a),
 291		       (unsigned long long)le64_to_cpu(u0->b),
 292		       (unsigned long long)buffer_info->dma,
 293		       buffer_info->length, buffer_info->next_to_watch,
 294		       (unsigned long long)buffer_info->time_stamp,
 295		       buffer_info->skb);
 296		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
 297			printk(KERN_CONT " NTC/U\n");
 298		else if (i == tx_ring->next_to_use)
 299			printk(KERN_CONT " NTU\n");
 300		else if (i == tx_ring->next_to_clean)
 301			printk(KERN_CONT " NTC\n");
 302		else
 303			printk(KERN_CONT "\n");
 304
 305		if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
 306			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
 307				       16, 1, phys_to_virt(buffer_info->dma),
 308				       buffer_info->length, true);
 309	}
 310
 311	/* Print Rx Ring Summary */
 312rx_ring_summary:
 313	dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
 314	printk(KERN_INFO "Queue [NTU] [NTC]\n");
 315	printk(KERN_INFO " %5d %5X %5X\n", 0,
 316	       rx_ring->next_to_use, rx_ring->next_to_clean);
 317
 318	/* Print Rx Ring */
 319	if (!netif_msg_rx_status(adapter))
 320		goto exit;
 321
 322	dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
 323	switch (adapter->rx_ps_pages) {
 324	case 1:
 325	case 2:
 326	case 3:
 327		/* [Extended] Packet Split Receive Descriptor Format
 328		 *
 329		 *    +-----------------------------------------------------+
 330		 *  0 |                Buffer Address 0 [63:0]              |
 331		 *    +-----------------------------------------------------+
 332		 *  8 |                Buffer Address 1 [63:0]              |
 333		 *    +-----------------------------------------------------+
 334		 * 16 |                Buffer Address 2 [63:0]              |
 335		 *    +-----------------------------------------------------+
 336		 * 24 |                Buffer Address 3 [63:0]              |
 337		 *    +-----------------------------------------------------+
 338		 */
 339		printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
 340		       "[buffer 1 63:0 ] "
 341		       "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
 342		       "[bi->skb] <-- Ext Pkt Split format\n");
 343		/* [Extended] Receive Descriptor (Write-Back) Format
 344		 *
 345		 *   63       48 47    32 31     13 12    8 7    4 3        0
 346		 *   +------------------------------------------------------+
 347		 * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
 348		 *   | Checksum | Ident  |         | Queue |      |  Type   |
 349		 *   +------------------------------------------------------+
 350		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
 351		 *   +------------------------------------------------------+
 352		 *   63       48 47    32 31            20 19               0
 353		 */
 354		printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
 355		       "[vl   l0 ee  es] "
 356		       "[ l3  l2  l1 hs] [reserved      ] ---------------- "
 357		       "[bi->skb] <-- Ext Rx Write-Back format\n");
 358		for (i = 0; i < rx_ring->count; i++) {
 359			buffer_info = &rx_ring->buffer_info[i];
 360			rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
 361			u1 = (struct my_u1 *)rx_desc_ps;
 362			staterr =
 363			    le32_to_cpu(rx_desc_ps->wb.middle.status_error);
 364			if (staterr & E1000_RXD_STAT_DD) {
 365				/* Descriptor Done */
 366				printk(KERN_INFO "RWB[0x%03X]     %016llX "
 367				       "%016llX %016llX %016llX "
 368				       "---------------- %p", i,
 369				       (unsigned long long)le64_to_cpu(u1->a),
 370				       (unsigned long long)le64_to_cpu(u1->b),
 371				       (unsigned long long)le64_to_cpu(u1->c),
 372				       (unsigned long long)le64_to_cpu(u1->d),
 373				       buffer_info->skb);
 374			} else {
 375				printk(KERN_INFO "R  [0x%03X]     %016llX "
 376				       "%016llX %016llX %016llX %016llX %p", i,
 377				       (unsigned long long)le64_to_cpu(u1->a),
 378				       (unsigned long long)le64_to_cpu(u1->b),
 379				       (unsigned long long)le64_to_cpu(u1->c),
 380				       (unsigned long long)le64_to_cpu(u1->d),
 381				       (unsigned long long)buffer_info->dma,
 382				       buffer_info->skb);
 383
 384				if (netif_msg_pktdata(adapter))
 385					print_hex_dump(KERN_INFO, "",
 386						DUMP_PREFIX_ADDRESS, 16, 1,
 387						phys_to_virt(buffer_info->dma),
 388						adapter->rx_ps_bsize0, true);
 389			}
 390
 391			if (i == rx_ring->next_to_use)
 392				printk(KERN_CONT " NTU\n");
 393			else if (i == rx_ring->next_to_clean)
 394				printk(KERN_CONT " NTC\n");
 395			else
 396				printk(KERN_CONT "\n");
 397		}
 398		break;
 399	default:
 400	case 0:
 401		/* Legacy Receive Descriptor Format
 402		 *
 403		 * +-----------------------------------------------------+
 404		 * |                Buffer Address [63:0]                |
 405		 * +-----------------------------------------------------+
 406		 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
 407		 * +-----------------------------------------------------+
 408		 * 63       48 47    40 39      32 31         16 15      0
 409		 */
 410		printk(KERN_INFO "Rl[desc]     [address 63:0  ] "
 411		       "[vl er S cks ln] [bi->dma       ] [bi->skb] "
 412		       "<-- Legacy format\n");
 413		for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
 414			rx_desc = E1000_RX_DESC(*rx_ring, i);
 415			buffer_info = &rx_ring->buffer_info[i];
 416			u0 = (struct my_u0 *)rx_desc;
 417			printk(KERN_INFO "Rl[0x%03X]    %016llX %016llX "
 418			       "%016llX %p", i,
 419			       (unsigned long long)le64_to_cpu(u0->a),
 420			       (unsigned long long)le64_to_cpu(u0->b),
 421			       (unsigned long long)buffer_info->dma,
 422			       buffer_info->skb);
 423			if (i == rx_ring->next_to_use)
 424				printk(KERN_CONT " NTU\n");
 425			else if (i == rx_ring->next_to_clean)
 426				printk(KERN_CONT " NTC\n");
 427			else
 428				printk(KERN_CONT "\n");
 429
 430			if (netif_msg_pktdata(adapter))
 431				print_hex_dump(KERN_INFO, "",
 432					       DUMP_PREFIX_ADDRESS,
 433					       16, 1,
 434					       phys_to_virt(buffer_info->dma),
 435					       adapter->rx_buffer_len, true);
 436		}
 437	}
 438
 439exit:
 440	return;
 441}
 442
 443/**
 444 * e1000_desc_unused - calculate if we have unused descriptors
 445 **/
 446static int e1000_desc_unused(struct e1000_ring *ring)
 447{
 448	if (ring->next_to_clean > ring->next_to_use)
 449		return ring->next_to_clean - ring->next_to_use - 1;
 450
 451	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
 452}
 453
 454/**
 455 * e1000_receive_skb - helper function to handle Rx indications
 456 * @adapter: board private structure
 457 * @status: descriptor status field as written by hardware
 458 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
 459 * @skb: pointer to sk_buff to be indicated to stack
 460 **/
 461static void e1000_receive_skb(struct e1000_adapter *adapter,
 462			      struct net_device *netdev, struct sk_buff *skb,
 463			      u8 status, __le16 vlan)
 464{
 465	u16 tag = le16_to_cpu(vlan);
 466	skb->protocol = eth_type_trans(skb, netdev);
 467
 468	if (status & E1000_RXD_STAT_VP)
 469		__vlan_hwaccel_put_tag(skb, tag);
 470
 471	napi_gro_receive(&adapter->napi, skb);
 472}
 473
 474/**
 475 * e1000_rx_checksum - Receive Checksum Offload
 476 * @adapter:     board private structure
 477 * @status_err:  receive descriptor status and error fields
 478 * @csum:	receive descriptor csum field
 479 * @sk_buff:     socket buffer with received data
 480 **/
 481static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
 482			      u32 csum, struct sk_buff *skb)
 483{
 484	u16 status = (u16)status_err;
 485	u8 errors = (u8)(status_err >> 24);
 486
 487	skb_checksum_none_assert(skb);
 488
 489	/* Ignore Checksum bit is set */
 490	if (status & E1000_RXD_STAT_IXSM)
 491		return;
 492	/* TCP/UDP checksum error bit is set */
 493	if (errors & E1000_RXD_ERR_TCPE) {
 494		/* let the stack verify checksum errors */
 495		adapter->hw_csum_err++;
 496		return;
 497	}
 498
 499	/* TCP/UDP Checksum has not been calculated */
 500	if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
 501		return;
 502
 503	/* It must be a TCP or UDP packet with a valid checksum */
 504	if (status & E1000_RXD_STAT_TCPCS) {
 505		/* TCP checksum is good */
 506		skb->ip_summed = CHECKSUM_UNNECESSARY;
 507	} else {
 508		/*
 509		 * IP fragment with UDP payload
 510		 * Hardware complements the payload checksum, so we undo it
 511		 * and then put the value in host order for further stack use.
 512		 */
 513		__sum16 sum = (__force __sum16)htons(csum);
 514		skb->csum = csum_unfold(~sum);
 515		skb->ip_summed = CHECKSUM_COMPLETE;
 516	}
 517	adapter->hw_csum_good++;
 518}
 519
 520/**
 521 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
 522 * @adapter: address of board private structure
 523 **/
 524static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 525				   int cleaned_count)
 526{
 527	struct net_device *netdev = adapter->netdev;
 528	struct pci_dev *pdev = adapter->pdev;
 529	struct e1000_ring *rx_ring = adapter->rx_ring;
 530	struct e1000_rx_desc *rx_desc;
 531	struct e1000_buffer *buffer_info;
 532	struct sk_buff *skb;
 533	unsigned int i;
 534	unsigned int bufsz = adapter->rx_buffer_len;
 535
 536	i = rx_ring->next_to_use;
 537	buffer_info = &rx_ring->buffer_info[i];
 538
 539	while (cleaned_count--) {
 540		skb = buffer_info->skb;
 541		if (skb) {
 542			skb_trim(skb, 0);
 543			goto map_skb;
 544		}
 545
 546		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 547		if (!skb) {
 548			/* Better luck next round */
 549			adapter->alloc_rx_buff_failed++;
 550			break;
 551		}
 552
 553		buffer_info->skb = skb;
 554map_skb:
 555		buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
 556						  adapter->rx_buffer_len,
 557						  DMA_FROM_DEVICE);
 558		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
 559			dev_err(&pdev->dev, "Rx DMA map failed\n");
 560			adapter->rx_dma_failed++;
 561			break;
 562		}
 563
 564		rx_desc = E1000_RX_DESC(*rx_ring, i);
 565		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
 566
 567		if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
 568			/*
 569			 * Force memory writes to complete before letting h/w
 570			 * know there are new descriptors to fetch.  (Only
 571			 * applicable for weak-ordered memory model archs,
 572			 * such as IA-64).
 573			 */
 574			wmb();
 575			writel(i, adapter->hw.hw_addr + rx_ring->tail);
 576		}
 577		i++;
 578		if (i == rx_ring->count)
 579			i = 0;
 580		buffer_info = &rx_ring->buffer_info[i];
 581	}
 582
 583	rx_ring->next_to_use = i;
 584}
 585
 586/**
 587 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
 588 * @adapter: address of board private structure
 589 **/
 590static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 591				      int cleaned_count)
 592{
 593	struct net_device *netdev = adapter->netdev;
 594	struct pci_dev *pdev = adapter->pdev;
 595	union e1000_rx_desc_packet_split *rx_desc;
 596	struct e1000_ring *rx_ring = adapter->rx_ring;
 597	struct e1000_buffer *buffer_info;
 598	struct e1000_ps_page *ps_page;
 599	struct sk_buff *skb;
 600	unsigned int i, j;
 601
 602	i = rx_ring->next_to_use;
 603	buffer_info = &rx_ring->buffer_info[i];
 604
 605	while (cleaned_count--) {
 606		rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
 607
 608		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
 609			ps_page = &buffer_info->ps_pages[j];
 610			if (j >= adapter->rx_ps_pages) {
 611				/* all unused desc entries get hw null ptr */
 612				rx_desc->read.buffer_addr[j + 1] =
 613				    ~cpu_to_le64(0);
 614				continue;
 615			}
 616			if (!ps_page->page) {
 617				ps_page->page = alloc_page(GFP_ATOMIC);
 618				if (!ps_page->page) {
 619					adapter->alloc_rx_buff_failed++;
 620					goto no_buffers;
 621				}
 622				ps_page->dma = dma_map_page(&pdev->dev,
 623							    ps_page->page,
 624							    0, PAGE_SIZE,
 625							    DMA_FROM_DEVICE);
 626				if (dma_mapping_error(&pdev->dev,
 627						      ps_page->dma)) {
 628					dev_err(&adapter->pdev->dev,
 629						"Rx DMA page map failed\n");
 630					adapter->rx_dma_failed++;
 631					goto no_buffers;
 632				}
 633			}
 634			/*
 635			 * Refresh the desc even if buffer_addrs
 636			 * didn't change because each write-back
 637			 * erases this info.
 638			 */
 639			rx_desc->read.buffer_addr[j + 1] =
 640			    cpu_to_le64(ps_page->dma);
 641		}
 642
 643		skb = netdev_alloc_skb_ip_align(netdev,
 644						adapter->rx_ps_bsize0);
 645
 646		if (!skb) {
 647			adapter->alloc_rx_buff_failed++;
 648			break;
 649		}
 650
 651		buffer_info->skb = skb;
 652		buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
 653						  adapter->rx_ps_bsize0,
 654						  DMA_FROM_DEVICE);
 655		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
 656			dev_err(&pdev->dev, "Rx DMA map failed\n");
 657			adapter->rx_dma_failed++;
 658			/* cleanup skb */
 659			dev_kfree_skb_any(skb);
 660			buffer_info->skb = NULL;
 661			break;
 662		}
 663
 664		rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
 665
 666		if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
 667			/*
 668			 * Force memory writes to complete before letting h/w
 669			 * know there are new descriptors to fetch.  (Only
 670			 * applicable for weak-ordered memory model archs,
 671			 * such as IA-64).
 672			 */
 673			wmb();
 674			writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
 675		}
 676
 677		i++;
 678		if (i == rx_ring->count)
 679			i = 0;
 680		buffer_info = &rx_ring->buffer_info[i];
 681	}
 682
 683no_buffers:
 684	rx_ring->next_to_use = i;
 685}
 686
 687/**
 688 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
 689 * @adapter: address of board private structure
 690 * @cleaned_count: number of buffers to allocate this pass
 691 **/
 692
 693static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 694                                         int cleaned_count)
 695{
 696	struct net_device *netdev = adapter->netdev;
 697	struct pci_dev *pdev = adapter->pdev;
 698	struct e1000_rx_desc *rx_desc;
 699	struct e1000_ring *rx_ring = adapter->rx_ring;
 700	struct e1000_buffer *buffer_info;
 701	struct sk_buff *skb;
 702	unsigned int i;
 703	unsigned int bufsz = 256 - 16 /* for skb_reserve */;
 704
 705	i = rx_ring->next_to_use;
 706	buffer_info = &rx_ring->buffer_info[i];
 707
 708	while (cleaned_count--) {
 709		skb = buffer_info->skb;
 710		if (skb) {
 711			skb_trim(skb, 0);
 712			goto check_page;
 713		}
 714
 715		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 716		if (unlikely(!skb)) {
 717			/* Better luck next round */
 718			adapter->alloc_rx_buff_failed++;
 719			break;
 720		}
 721
 722		buffer_info->skb = skb;
 723check_page:
 724		/* allocate a new page if necessary */
 725		if (!buffer_info->page) {
 726			buffer_info->page = alloc_page(GFP_ATOMIC);
 727			if (unlikely(!buffer_info->page)) {
 728				adapter->alloc_rx_buff_failed++;
 729				break;
 730			}
 731		}
 732
 733		if (!buffer_info->dma)
 734			buffer_info->dma = dma_map_page(&pdev->dev,
 735			                                buffer_info->page, 0,
 736			                                PAGE_SIZE,
 737							DMA_FROM_DEVICE);
 738
 739		rx_desc = E1000_RX_DESC(*rx_ring, i);
 740		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
 741
 742		if (unlikely(++i == rx_ring->count))
 743			i = 0;
 744		buffer_info = &rx_ring->buffer_info[i];
 745	}
 746
 747	if (likely(rx_ring->next_to_use != i)) {
 748		rx_ring->next_to_use = i;
 749		if (unlikely(i-- == 0))
 750			i = (rx_ring->count - 1);
 751
 752		/* Force memory writes to complete before letting h/w
 753		 * know there are new descriptors to fetch.  (Only
 754		 * applicable for weak-ordered memory model archs,
 755		 * such as IA-64). */
 756		wmb();
 757		writel(i, adapter->hw.hw_addr + rx_ring->tail);
 758	}
 759}
 760
 761/**
 762 * e1000_clean_rx_irq - Send received data up the network stack; legacy
 763 * @adapter: board private structure
 764 *
 765 * the return value indicates whether actual cleaning was done, there
 766 * is no guarantee that everything was cleaned
 767 **/
 768static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 769			       int *work_done, int work_to_do)
 770{
 771	struct net_device *netdev = adapter->netdev;
 772	struct pci_dev *pdev = adapter->pdev;
 773	struct e1000_hw *hw = &adapter->hw;
 774	struct e1000_ring *rx_ring = adapter->rx_ring;
 775	struct e1000_rx_desc *rx_desc, *next_rxd;
 776	struct e1000_buffer *buffer_info, *next_buffer;
 777	u32 length;
 778	unsigned int i;
 779	int cleaned_count = 0;
 780	bool cleaned = 0;
 781	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 782
 783	i = rx_ring->next_to_clean;
 784	rx_desc = E1000_RX_DESC(*rx_ring, i);
 785	buffer_info = &rx_ring->buffer_info[i];
 786
 787	while (rx_desc->status & E1000_RXD_STAT_DD) {
 788		struct sk_buff *skb;
 789		u8 status;
 790
 791		if (*work_done >= work_to_do)
 792			break;
 793		(*work_done)++;
 794		rmb();	/* read descriptor and rx_buffer_info after status DD */
 795
 796		status = rx_desc->status;
 797		skb = buffer_info->skb;
 798		buffer_info->skb = NULL;
 799
 800		prefetch(skb->data - NET_IP_ALIGN);
 801
 802		i++;
 803		if (i == rx_ring->count)
 804			i = 0;
 805		next_rxd = E1000_RX_DESC(*rx_ring, i);
 806		prefetch(next_rxd);
 807
 808		next_buffer = &rx_ring->buffer_info[i];
 809
 810		cleaned = 1;
 811		cleaned_count++;
 812		dma_unmap_single(&pdev->dev,
 813				 buffer_info->dma,
 814				 adapter->rx_buffer_len,
 815				 DMA_FROM_DEVICE);
 816		buffer_info->dma = 0;
 817
 818		length = le16_to_cpu(rx_desc->length);
 819
 820		/*
 821		 * !EOP means multiple descriptors were used to store a single
 822		 * packet, if that's the case we need to toss it.  In fact, we
 823		 * need to toss every packet with the EOP bit clear and the
 824		 * next frame that _does_ have the EOP bit set, as it is by
 825		 * definition only a frame fragment
 826		 */
 827		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
 828			adapter->flags2 |= FLAG2_IS_DISCARDING;
 829
 830		if (adapter->flags2 & FLAG2_IS_DISCARDING) {
 831			/* All receives must fit into a single buffer */
 832			e_dbg("Receive packet consumed multiple buffers\n");
 833			/* recycle */
 834			buffer_info->skb = skb;
 835			if (status & E1000_RXD_STAT_EOP)
 836				adapter->flags2 &= ~FLAG2_IS_DISCARDING;
 837			goto next_desc;
 838		}
 839
 840		if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
 841			/* recycle */
 842			buffer_info->skb = skb;
 843			goto next_desc;
 844		}
 845
 846		/* adjust length to remove Ethernet CRC */
 847		if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
 848			length -= 4;
 849
 850		total_rx_bytes += length;
 851		total_rx_packets++;
 852
 853		/*
 854		 * code added for copybreak, this should improve
 855		 * performance for small packets with large amounts
 856		 * of reassembly being done in the stack
 857		 */
 858		if (length < copybreak) {
 859			struct sk_buff *new_skb =
 860			    netdev_alloc_skb_ip_align(netdev, length);
 861			if (new_skb) {
 862				skb_copy_to_linear_data_offset(new_skb,
 863							       -NET_IP_ALIGN,
 864							       (skb->data -
 865								NET_IP_ALIGN),
 866							       (length +
 867								NET_IP_ALIGN));
 868				/* save the skb in buffer_info as good */
 869				buffer_info->skb = skb;
 870				skb = new_skb;
 871			}
 872			/* else just continue with the old one */
 873		}
 874		/* end copybreak code */
 875		skb_put(skb, length);
 876
 877		/* Receive Checksum Offload */
 878		e1000_rx_checksum(adapter,
 879				  (u32)(status) |
 880				  ((u32)(rx_desc->errors) << 24),
 881				  le16_to_cpu(rx_desc->csum), skb);
 882
 883		e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
 884
 885next_desc:
 886		rx_desc->status = 0;
 887
 888		/* return some buffers to hardware, one at a time is too slow */
 889		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
 890			adapter->alloc_rx_buf(adapter, cleaned_count);
 891			cleaned_count = 0;
 892		}
 893
 894		/* use prefetched values */
 895		rx_desc = next_rxd;
 896		buffer_info = next_buffer;
 897	}
 898	rx_ring->next_to_clean = i;
 899
 900	cleaned_count = e1000_desc_unused(rx_ring);
 901	if (cleaned_count)
 902		adapter->alloc_rx_buf(adapter, cleaned_count);
 903
 904	adapter->total_rx_bytes += total_rx_bytes;
 905	adapter->total_rx_packets += total_rx_packets;
 906	return cleaned;
 907}
 908
 909static void e1000_put_txbuf(struct e1000_adapter *adapter,
 910			     struct e1000_buffer *buffer_info)
 911{
 912	if (buffer_info->dma) {
 913		if (buffer_info->mapped_as_page)
 914			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
 915				       buffer_info->length, DMA_TO_DEVICE);
 916		else
 917			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 918					 buffer_info->length, DMA_TO_DEVICE);
 919		buffer_info->dma = 0;
 920	}
 921	if (buffer_info->skb) {
 922		dev_kfree_skb_any(buffer_info->skb);
 923		buffer_info->skb = NULL;
 924	}
 925	buffer_info->time_stamp = 0;
 926}
 927
 928static void e1000_print_hw_hang(struct work_struct *work)
 929{
 930	struct e1000_adapter *adapter = container_of(work,
 931	                                             struct e1000_adapter,
 932	                                             print_hang_task);
 933	struct net_device *netdev = adapter->netdev;
 934	struct e1000_ring *tx_ring = adapter->tx_ring;
 935	unsigned int i = tx_ring->next_to_clean;
 936	unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
 937	struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
 938	struct e1000_hw *hw = &adapter->hw;
 939	u16 phy_status, phy_1000t_status, phy_ext_status;
 940	u16 pci_status;
 941
 942	if (test_bit(__E1000_DOWN, &adapter->state))
 943		return;
 944
 945	if (!adapter->tx_hang_recheck &&
 946	    (adapter->flags2 & FLAG2_DMA_BURST)) {
 947		/* May be block on write-back, flush and detect again
 948		 * flush pending descriptor writebacks to memory
 949		 */
 950		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
 951		/* execute the writes immediately */
 952		e1e_flush();
 953		adapter->tx_hang_recheck = true;
 954		return;
 955	}
 956	/* Real hang detected */
 957	adapter->tx_hang_recheck = false;
 958	netif_stop_queue(netdev);
 959
 960	e1e_rphy(hw, PHY_STATUS, &phy_status);
 961	e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
 962	e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
 963
 964	pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
 965
 966	/* detected Hardware unit hang */
 967	e_err("Detected Hardware Unit Hang:\n"
 968	      "  TDH                  <%x>\n"
 969	      "  TDT                  <%x>\n"
 970	      "  next_to_use          <%x>\n"
 971	      "  next_to_clean        <%x>\n"
 972	      "buffer_info[next_to_clean]:\n"
 973	      "  time_stamp           <%lx>\n"
 974	      "  next_to_watch        <%x>\n"
 975	      "  jiffies              <%lx>\n"
 976	      "  next_to_watch.status <%x>\n"
 977	      "MAC Status             <%x>\n"
 978	      "PHY Status             <%x>\n"
 979	      "PHY 1000BASE-T Status  <%x>\n"
 980	      "PHY Extended Status    <%x>\n"
 981	      "PCI Status             <%x>\n",
 982	      readl(adapter->hw.hw_addr + tx_ring->head),
 983	      readl(adapter->hw.hw_addr + tx_ring->tail),
 984	      tx_ring->next_to_use,
 985	      tx_ring->next_to_clean,
 986	      tx_ring->buffer_info[eop].time_stamp,
 987	      eop,
 988	      jiffies,
 989	      eop_desc->upper.fields.status,
 990	      er32(STATUS),
 991	      phy_status,
 992	      phy_1000t_status,
 993	      phy_ext_status,
 994	      pci_status);
 995}
 996
 997/**
 998 * e1000_clean_tx_irq - Reclaim resources after transmit completes
 999 * @adapter: board private structure
1000 *
1001 * the return value indicates whether actual cleaning was done, there
1002 * is no guarantee that everything was cleaned
1003 **/
1004static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
1005{
1006	struct net_device *netdev = adapter->netdev;
1007	struct e1000_hw *hw = &adapter->hw;
1008	struct e1000_ring *tx_ring = adapter->tx_ring;
1009	struct e1000_tx_desc *tx_desc, *eop_desc;
1010	struct e1000_buffer *buffer_info;
1011	unsigned int i, eop;
1012	unsigned int count = 0;
1013	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1014
1015	i = tx_ring->next_to_clean;
1016	eop = tx_ring->buffer_info[i].next_to_watch;
1017	eop_desc = E1000_TX_DESC(*tx_ring, eop);
1018
1019	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1020	       (count < tx_ring->count)) {
1021		bool cleaned = false;
1022		rmb(); /* read buffer_info after eop_desc */
1023		for (; !cleaned; count++) {
1024			tx_desc = E1000_TX_DESC(*tx_ring, i);
1025			buffer_info = &tx_ring->buffer_info[i];
1026			cleaned = (i == eop);
1027
1028			if (cleaned) {
1029				total_tx_packets += buffer_info->segs;
1030				total_tx_bytes += buffer_info->bytecount;
1031			}
1032
1033			e1000_put_txbuf(adapter, buffer_info);
1034			tx_desc->upper.data = 0;
1035
1036			i++;
1037			if (i == tx_ring->count)
1038				i = 0;
1039		}
1040
1041		if (i == tx_ring->next_to_use)
1042			break;
1043		eop = tx_ring->buffer_info[i].next_to_watch;
1044		eop_desc = E1000_TX_DESC(*tx_ring, eop);
1045	}
1046
1047	tx_ring->next_to_clean = i;
1048
1049#define TX_WAKE_THRESHOLD 32
1050	if (count && netif_carrier_ok(netdev) &&
1051	    e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1052		/* Make sure that anybody stopping the queue after this
1053		 * sees the new next_to_clean.
1054		 */
1055		smp_mb();
1056
1057		if (netif_queue_stopped(netdev) &&
1058		    !(test_bit(__E1000_DOWN, &adapter->state))) {
1059			netif_wake_queue(netdev);
1060			++adapter->restart_queue;
1061		}
1062	}
1063
1064	if (adapter->detect_tx_hung) {
1065		/*
1066		 * Detect a transmit hang in hardware, this serializes the
1067		 * check with the clearing of time_stamp and movement of i
1068		 */
1069		adapter->detect_tx_hung = 0;
1070		if (tx_ring->buffer_info[i].time_stamp &&
1071		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1072			       + (adapter->tx_timeout_factor * HZ)) &&
1073		    !(er32(STATUS) & E1000_STATUS_TXOFF))
1074			schedule_work(&adapter->print_hang_task);
1075		else
1076			adapter->tx_hang_recheck = false;
1077	}
1078	adapter->total_tx_bytes += total_tx_bytes;
1079	adapter->total_tx_packets += total_tx_packets;
1080	return count < tx_ring->count;
1081}
1082
1083/**
1084 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1085 * @adapter: board private structure
1086 *
1087 * the return value indicates whether actual cleaning was done, there
1088 * is no guarantee that everything was cleaned
1089 **/
1090static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
1091				  int *work_done, int work_to_do)
1092{
1093	struct e1000_hw *hw = &adapter->hw;
1094	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1095	struct net_device *netdev = adapter->netdev;
1096	struct pci_dev *pdev = adapter->pdev;
1097	struct e1000_ring *rx_ring = adapter->rx_ring;
1098	struct e1000_buffer *buffer_info, *next_buffer;
1099	struct e1000_ps_page *ps_page;
1100	struct sk_buff *skb;
1101	unsigned int i, j;
1102	u32 length, staterr;
1103	int cleaned_count = 0;
1104	bool cleaned = 0;
1105	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1106
1107	i = rx_ring->next_to_clean;
1108	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1109	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1110	buffer_info = &rx_ring->buffer_info[i];
1111
1112	while (staterr & E1000_RXD_STAT_DD) {
1113		if (*work_done >= work_to_do)
1114			break;
1115		(*work_done)++;
1116		skb = buffer_info->skb;
1117		rmb();	/* read descriptor and rx_buffer_info after status DD */
1118
1119		/* in the packet split case this is header only */
1120		prefetch(skb->data - NET_IP_ALIGN);
1121
1122		i++;
1123		if (i == rx_ring->count)
1124			i = 0;
1125		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1126		prefetch(next_rxd);
1127
1128		next_buffer = &rx_ring->buffer_info[i];
1129
1130		cleaned = 1;
1131		cleaned_count++;
1132		dma_unmap_single(&pdev->dev, buffer_info->dma,
1133				 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1134		buffer_info->dma = 0;
1135
1136		/* see !EOP comment in other Rx routine */
1137		if (!(staterr & E1000_RXD_STAT_EOP))
1138			adapter->flags2 |= FLAG2_IS_DISCARDING;
1139
1140		if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1141			e_dbg("Packet Split buffers didn't pick up the full "
1142			      "packet\n");
1143			dev_kfree_skb_irq(skb);
1144			if (staterr & E1000_RXD_STAT_EOP)
1145				adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1146			goto next_desc;
1147		}
1148
1149		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
1150			dev_kfree_skb_irq(skb);
1151			goto next_desc;
1152		}
1153
1154		length = le16_to_cpu(rx_desc->wb.middle.length0);
1155
1156		if (!length) {
1157			e_dbg("Last part of the packet spanning multiple "
1158			      "descriptors\n");
1159			dev_kfree_skb_irq(skb);
1160			goto next_desc;
1161		}
1162
1163		/* Good Receive */
1164		skb_put(skb, length);
1165
1166		{
1167		/*
1168		 * this looks ugly, but it seems compiler issues make it
1169		 * more efficient than reusing j
1170		 */
1171		int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1172
1173		/*
1174		 * page alloc/put takes too long and effects small packet
1175		 * throughput, so unsplit small packets and save the alloc/put
1176		 * only valid in softirq (napi) context to call kmap_*
1177		 */
1178		if (l1 && (l1 <= copybreak) &&
1179		    ((length + l1) <= adapter->rx_ps_bsize0)) {
1180			u8 *vaddr;
1181
1182			ps_page = &buffer_info->ps_pages[0];
1183
1184			/*
1185			 * there is no documentation about how to call
1186			 * kmap_atomic, so we can't hold the mapping
1187			 * very long
1188			 */
1189			dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
1190						PAGE_SIZE, DMA_FROM_DEVICE);
1191			vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
1192			memcpy(skb_tail_pointer(skb), vaddr, l1);
1193			kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1194			dma_sync_single_for_device(&pdev->dev, ps_page->dma,
1195						   PAGE_SIZE, DMA_FROM_DEVICE);
1196
1197			/* remove the CRC */
1198			if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1199				l1 -= 4;
1200
1201			skb_put(skb, l1);
1202			goto copydone;
1203		} /* if */
1204		}
1205
1206		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1207			length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1208			if (!length)
1209				break;
1210
1211			ps_page = &buffer_info->ps_pages[j];
1212			dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1213				       DMA_FROM_DEVICE);
1214			ps_page->dma = 0;
1215			skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1216			ps_page->page = NULL;
1217			skb->len += length;
1218			skb->data_len += length;
1219			skb->truesize += length;
1220		}
1221
1222		/* strip the ethernet crc, problem is we're using pages now so
1223		 * this whole operation can get a little cpu intensive
1224		 */
1225		if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1226			pskb_trim(skb, skb->len - 4);
1227
1228copydone:
1229		total_rx_bytes += skb->len;
1230		total_rx_packets++;
1231
1232		e1000_rx_checksum(adapter, staterr, le16_to_cpu(
1233			rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
1234
1235		if (rx_desc->wb.upper.header_status &
1236			   cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1237			adapter->rx_hdr_split++;
1238
1239		e1000_receive_skb(adapter, netdev, skb,
1240				  staterr, rx_desc->wb.middle.vlan);
1241
1242next_desc:
1243		rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1244		buffer_info->skb = NULL;
1245
1246		/* return some buffers to hardware, one at a time is too slow */
1247		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1248			adapter->alloc_rx_buf(adapter, cleaned_count);
1249			cleaned_count = 0;
1250		}
1251
1252		/* use prefetched values */
1253		rx_desc = next_rxd;
1254		buffer_info = next_buffer;
1255
1256		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1257	}
1258	rx_ring->next_to_clean = i;
1259
1260	cleaned_count = e1000_desc_unused(rx_ring);
1261	if (cleaned_count)
1262		adapter->alloc_rx_buf(adapter, cleaned_count);
1263
1264	adapter->total_rx_bytes += total_rx_bytes;
1265	adapter->total_rx_packets += total_rx_packets;
1266	return cleaned;
1267}
1268
1269/**
1270 * e1000_consume_page - helper function
1271 **/
1272static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1273                               u16 length)
1274{
1275	bi->page = NULL;
1276	skb->len += length;
1277	skb->data_len += length;
1278	skb->truesize += length;
1279}
1280
1281/**
1282 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1283 * @adapter: board private structure
1284 *
1285 * the return value indicates whether actual cleaning was done, there
1286 * is no guarantee that everything was cleaned
1287 **/
1288
1289static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1290                                     int *work_done, int work_to_do)
1291{
1292	struct net_device *netdev = adapter->netdev;
1293	struct pci_dev *pdev = adapter->pdev;
1294	struct e1000_ring *rx_ring = adapter->rx_ring;
1295	struct e1000_rx_desc *rx_desc, *next_rxd;
1296	struct e1000_buffer *buffer_info, *next_buffer;
1297	u32 length;
1298	unsigned int i;
1299	int cleaned_count = 0;
1300	bool cleaned = false;
1301	unsigned int total_rx_bytes=0, total_rx_packets=0;
1302
1303	i = rx_ring->next_to_clean;
1304	rx_desc = E1000_RX_DESC(*rx_ring, i);
1305	buffer_info = &rx_ring->buffer_info[i];
1306
1307	while (rx_desc->status & E1000_RXD_STAT_DD) {
1308		struct sk_buff *skb;
1309		u8 status;
1310
1311		if (*work_done >= work_to_do)
1312			break;
1313		(*work_done)++;
1314		rmb();	/* read descriptor and rx_buffer_info after status DD */
1315
1316		status = rx_desc->status;
1317		skb = buffer_info->skb;
1318		buffer_info->skb = NULL;
1319
1320		++i;
1321		if (i == rx_ring->count)
1322			i = 0;
1323		next_rxd = E1000_RX_DESC(*rx_ring, i);
1324		prefetch(next_rxd);
1325
1326		next_buffer = &rx_ring->buffer_info[i];
1327
1328		cleaned = true;
1329		cleaned_count++;
1330		dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1331			       DMA_FROM_DEVICE);
1332		buffer_info->dma = 0;
1333
1334		length = le16_to_cpu(rx_desc->length);
1335
1336		/* errors is only valid for DD + EOP descriptors */
1337		if (unlikely((status & E1000_RXD_STAT_EOP) &&
1338		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
1339				/* recycle both page and skb */
1340				buffer_info->skb = skb;
1341				/* an error means any chain goes out the window
1342				 * too */
1343				if (rx_ring->rx_skb_top)
1344					dev_kfree_skb_irq(rx_ring->rx_skb_top);
1345				rx_ring->rx_skb_top = NULL;
1346				goto next_desc;
1347		}
1348
1349#define rxtop (rx_ring->rx_skb_top)
1350		if (!(status & E1000_RXD_STAT_EOP)) {
1351			/* this descriptor is only the beginning (or middle) */
1352			if (!rxtop) {
1353				/* this is the beginning of a chain */
1354				rxtop = skb;
1355				skb_fill_page_desc(rxtop, 0, buffer_info->page,
1356				                   0, length);
1357			} else {
1358				/* this is the middle of a chain */
1359				skb_fill_page_desc(rxtop,
1360				    skb_shinfo(rxtop)->nr_frags,
1361				    buffer_info->page, 0, length);
1362				/* re-use the skb, only consumed the page */
1363				buffer_info->skb = skb;
1364			}
1365			e1000_consume_page(buffer_info, rxtop, length);
1366			goto next_desc;
1367		} else {
1368			if (rxtop) {
1369				/* end of the chain */
1370				skb_fill_page_desc(rxtop,
1371				    skb_shinfo(rxtop)->nr_frags,
1372				    buffer_info->page, 0, length);
1373				/* re-use the current skb, we only consumed the
1374				 * page */
1375				buffer_info->skb = skb;
1376				skb = rxtop;
1377				rxtop = NULL;
1378				e1000_consume_page(buffer_info, skb, length);
1379			} else {
1380				/* no chain, got EOP, this buf is the packet
1381				 * copybreak to save the put_page/alloc_page */
1382				if (length <= copybreak &&
1383				    skb_tailroom(skb) >= length) {
1384					u8 *vaddr;
1385					vaddr = kmap_atomic(buffer_info->page,
1386					                   KM_SKB_DATA_SOFTIRQ);
1387					memcpy(skb_tail_pointer(skb), vaddr,
1388					       length);
1389					kunmap_atomic(vaddr,
1390					              KM_SKB_DATA_SOFTIRQ);
1391					/* re-use the page, so don't erase
1392					 * buffer_info->page */
1393					skb_put(skb, length);
1394				} else {
1395					skb_fill_page_desc(skb, 0,
1396					                   buffer_info->page, 0,
1397				                           length);
1398					e1000_consume_page(buffer_info, skb,
1399					                   length);
1400				}
1401			}
1402		}
1403
1404		/* Receive Checksum Offload XXX recompute due to CRC strip? */
1405		e1000_rx_checksum(adapter,
1406		                  (u32)(status) |
1407		                  ((u32)(rx_desc->errors) << 24),
1408		                  le16_to_cpu(rx_desc->csum), skb);
1409
1410		/* probably a little skewed due to removing CRC */
1411		total_rx_bytes += skb->len;
1412		total_rx_packets++;
1413
1414		/* eth type trans needs skb->data to point to something */
1415		if (!pskb_may_pull(skb, ETH_HLEN)) {
1416			e_err("pskb_may_pull failed.\n");
1417			dev_kfree_skb_irq(skb);
1418			goto next_desc;
1419		}
1420
1421		e1000_receive_skb(adapter, netdev, skb, status,
1422		                  rx_desc->special);
1423
1424next_desc:
1425		rx_desc->status = 0;
1426
1427		/* return some buffers to hardware, one at a time is too slow */
1428		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1429			adapter->alloc_rx_buf(adapter, cleaned_count);
1430			cleaned_count = 0;
1431		}
1432
1433		/* use prefetched values */
1434		rx_desc = next_rxd;
1435		buffer_info = next_buffer;
1436	}
1437	rx_ring->next_to_clean = i;
1438
1439	cleaned_count = e1000_desc_unused(rx_ring);
1440	if (cleaned_count)
1441		adapter->alloc_rx_buf(adapter, cleaned_count);
1442
1443	adapter->total_rx_bytes += total_rx_bytes;
1444	adapter->total_rx_packets += total_rx_packets;
1445	return cleaned;
1446}
1447
1448/**
1449 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1450 * @adapter: board private structure
1451 **/
1452static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1453{
1454	struct e1000_ring *rx_ring = adapter->rx_ring;
1455	struct e1000_buffer *buffer_info;
1456	struct e1000_ps_page *ps_page;
1457	struct pci_dev *pdev = adapter->pdev;
1458	unsigned int i, j;
1459
1460	/* Free all the Rx ring sk_buffs */
1461	for (i = 0; i < rx_ring->count; i++) {
1462		buffer_info = &rx_ring->buffer_info[i];
1463		if (buffer_info->dma) {
1464			if (adapter->clean_rx == e1000_clean_rx_irq)
1465				dma_unmap_single(&pdev->dev, buffer_info->dma,
1466						 adapter->rx_buffer_len,
1467						 DMA_FROM_DEVICE);
1468			else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1469				dma_unmap_page(&pdev->dev, buffer_info->dma,
1470				               PAGE_SIZE,
1471					       DMA_FROM_DEVICE);
1472			else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1473				dma_unmap_single(&pdev->dev, buffer_info->dma,
1474						 adapter->rx_ps_bsize0,
1475						 DMA_FROM_DEVICE);
1476			buffer_info->dma = 0;
1477		}
1478
1479		if (buffer_info->page) {
1480			put_page(buffer_info->page);
1481			buffer_info->page = NULL;
1482		}
1483
1484		if (buffer_info->skb) {
1485			dev_kfree_skb(buffer_info->skb);
1486			buffer_info->skb = NULL;
1487		}
1488
1489		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1490			ps_page = &buffer_info->ps_pages[j];
1491			if (!ps_page->page)
1492				break;
1493			dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1494				       DMA_FROM_DEVICE);
1495			ps_page->dma = 0;
1496			put_page(ps_page->page);
1497			ps_page->page = NULL;
1498		}
1499	}
1500
1501	/* there also may be some cached data from a chained receive */
1502	if (rx_ring->rx_skb_top) {
1503		dev_kfree_skb(rx_ring->rx_skb_top);
1504		rx_ring->rx_skb_top = NULL;
1505	}
1506
1507	/* Zero out the descriptor ring */
1508	memset(rx_ring->desc, 0, rx_ring->size);
1509
1510	rx_ring->next_to_clean = 0;
1511	rx_ring->next_to_use = 0;
1512	adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1513
1514	writel(0, adapter->hw.hw_addr + rx_ring->head);
1515	writel(0, adapter->hw.hw_addr + rx_ring->tail);
1516}
1517
1518static void e1000e_downshift_workaround(struct work_struct *work)
1519{
1520	struct e1000_adapter *adapter = container_of(work,
1521					struct e1000_adapter, downshift_task);
1522
1523	if (test_bit(__E1000_DOWN, &adapter->state))
1524		return;
1525
1526	e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1527}
1528
1529/**
1530 * e1000_intr_msi - Interrupt Handler
1531 * @irq: interrupt number
1532 * @data: pointer to a network interface device structure
1533 **/
1534static irqreturn_t e1000_intr_msi(int irq, void *data)
1535{
1536	struct net_device *netdev = data;
1537	struct e1000_adapter *adapter = netdev_priv(netdev);
1538	struct e1000_hw *hw = &adapter->hw;
1539	u32 icr = er32(ICR);
1540
1541	/*
1542	 * read ICR disables interrupts using IAM
1543	 */
1544
1545	if (icr & E1000_ICR_LSC) {
1546		hw->mac.get_link_status = 1;
1547		/*
1548		 * ICH8 workaround-- Call gig speed drop workaround on cable
1549		 * disconnect (LSC) before accessing any PHY registers
1550		 */
1551		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1552		    (!(er32(STATUS) & E1000_STATUS_LU)))
1553			schedule_work(&adapter->downshift_task);
1554
1555		/*
1556		 * 80003ES2LAN workaround-- For packet buffer work-around on
1557		 * link down event; disable receives here in the ISR and reset
1558		 * adapter in watchdog
1559		 */
1560		if (netif_carrier_ok(netdev) &&
1561		    adapter->flags & FLAG_RX_NEEDS_RESTART) {
1562			/* disable receives */
1563			u32 rctl = er32(RCTL);
1564			ew32(RCTL, rctl & ~E1000_RCTL_EN);
1565			adapter->flags |= FLAG_RX_RESTART_NOW;
1566		}
1567		/* guard against interrupt when we're going down */
1568		if (!test_bit(__E1000_DOWN, &adapter->state))
1569			mod_timer(&adapter->watchdog_timer, jiffies + 1);
1570	}
1571
1572	if (napi_schedule_prep(&adapter->napi)) {
1573		adapter->total_tx_bytes = 0;
1574		adapter->total_tx_packets = 0;
1575		adapter->total_rx_bytes = 0;
1576		adapter->total_rx_packets = 0;
1577		__napi_schedule(&adapter->napi);
1578	}
1579
1580	return IRQ_HANDLED;
1581}
1582
1583/**
1584 * e1000_intr - Interrupt Handler
1585 * @irq: interrupt number
1586 * @data: pointer to a network interface device structure
1587 **/
1588static irqreturn_t e1000_intr(int irq, void *data)
1589{
1590	struct net_device *netdev = data;
1591	struct e1000_adapter *adapter = netdev_priv(netdev);
1592	struct e1000_hw *hw = &adapter->hw;
1593	u32 rctl, icr = er32(ICR);
1594
1595	if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1596		return IRQ_NONE;  /* Not our interrupt */
1597
1598	/*
1599	 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1600	 * not set, then the adapter didn't send an interrupt
1601	 */
1602	if (!(icr & E1000_ICR_INT_ASSERTED))
1603		return IRQ_NONE;
1604
1605	/*
1606	 * Interrupt Auto-Mask...upon reading ICR,
1607	 * interrupts are masked.  No need for the
1608	 * IMC write
1609	 */
1610
1611	if (icr & E1000_ICR_LSC) {
1612		hw->mac.get_link_status = 1;
1613		/*
1614		 * ICH8 workaround-- Call gig speed drop workaround on cable
1615		 * disconnect (LSC) before accessing any PHY registers
1616		 */
1617		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1618		    (!(er32(STATUS) & E1000_STATUS_LU)))
1619			schedule_work(&adapter->downshift_task);
1620
1621		/*
1622		 * 80003ES2LAN workaround--
1623		 * For packet buffer work-around on link down event;
1624		 * disable receives here in the ISR and
1625		 * reset adapter in watchdog
1626		 */
1627		if (netif_carrier_ok(netdev) &&
1628		    (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1629			/* disable receives */
1630			rctl = er32(RCTL);
1631			ew32(RCTL, rctl & ~E1000_RCTL_EN);
1632			adapter->flags |= FLAG_RX_RESTART_NOW;
1633		}
1634		/* guard against interrupt when we're going down */
1635		if (!test_bit(__E1000_DOWN, &adapter->state))
1636			mod_timer(&adapter->watchdog_timer, jiffies + 1);
1637	}
1638
1639	if (napi_schedule_prep(&adapter->napi)) {
1640		adapter->total_tx_bytes = 0;
1641		adapter->total_tx_packets = 0;
1642		adapter->total_rx_bytes = 0;
1643		adapter->total_rx_packets = 0;
1644		__napi_schedule(&adapter->napi);
1645	}
1646
1647	return IRQ_HANDLED;
1648}
1649
1650static irqreturn_t e1000_msix_other(int irq, void *data)
1651{
1652	struct net_device *netdev = data;
1653	struct e1000_adapter *adapter = netdev_priv(netdev);
1654	struct e1000_hw *hw = &adapter->hw;
1655	u32 icr = er32(ICR);
1656
1657	if (!(icr & E1000_ICR_INT_ASSERTED)) {
1658		if (!test_bit(__E1000_DOWN, &adapter->state))
1659			ew32(IMS, E1000_IMS_OTHER);
1660		return IRQ_NONE;
1661	}
1662
1663	if (icr & adapter->eiac_mask)
1664		ew32(ICS, (icr & adapter->eiac_mask));
1665
1666	if (icr & E1000_ICR_OTHER) {
1667		if (!(icr & E1000_ICR_LSC))
1668			goto no_link_interrupt;
1669		hw->mac.get_link_status = 1;
1670		/* guard against interrupt when we're going down */
1671		if (!test_bit(__E1000_DOWN, &adapter->state))
1672			mod_timer(&adapter->watchdog_timer, jiffies + 1);
1673	}
1674
1675no_link_interrupt:
1676	if (!test_bit(__E1000_DOWN, &adapter->state))
1677		ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1678
1679	return IRQ_HANDLED;
1680}
1681
1682
1683static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1684{
1685	struct net_device *netdev = data;
1686	struct e1000_adapter *adapter = netdev_priv(netdev);
1687	struct e1000_hw *hw = &adapter->hw;
1688	struct e1000_ring *tx_ring = adapter->tx_ring;
1689
1690
1691	adapter->total_tx_bytes = 0;
1692	adapter->total_tx_packets = 0;
1693
1694	if (!e1000_clean_tx_irq(adapter))
1695		/* Ring was not completely cleaned, so fire another interrupt */
1696		ew32(ICS, tx_ring->ims_val);
1697
1698	return IRQ_HANDLED;
1699}
1700
1701static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1702{
1703	struct net_device *netdev = data;
1704	struct e1000_adapter *adapter = netdev_priv(netdev);
1705
1706	/* Write the ITR v…

Large files files are truncated, but you can click here to view the full file