PageRenderTime 158ms CodeModel.GetById 30ms app.highlight 111ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/net/ethernet/qlogic/qlge/qlge_main.c

http://github.com/mirrors/linux
C | 5026 lines | 3842 code | 547 blank | 637 comment | 541 complexity | e21be1e547e074043d555e0c6e178a8b MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * QLogic qlge NIC HBA Driver
   3 * Copyright (c)  2003-2008 QLogic Corporation
   4 * See LICENSE.qlge for copyright and licensing details.
   5 * Author:     Linux qlge network device driver by
   6 *                      Ron Mercer <ron.mercer@qlogic.com>
   7 */
   8#include <linux/kernel.h>
   9#include <linux/bitops.h>
  10#include <linux/types.h>
  11#include <linux/module.h>
  12#include <linux/list.h>
  13#include <linux/pci.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/pagemap.h>
  16#include <linux/sched.h>
  17#include <linux/slab.h>
  18#include <linux/dmapool.h>
  19#include <linux/mempool.h>
  20#include <linux/spinlock.h>
  21#include <linux/kthread.h>
  22#include <linux/interrupt.h>
  23#include <linux/errno.h>
  24#include <linux/ioport.h>
  25#include <linux/in.h>
  26#include <linux/ip.h>
  27#include <linux/ipv6.h>
  28#include <net/ipv6.h>
  29#include <linux/tcp.h>
  30#include <linux/udp.h>
  31#include <linux/if_arp.h>
  32#include <linux/if_ether.h>
  33#include <linux/netdevice.h>
  34#include <linux/etherdevice.h>
  35#include <linux/ethtool.h>
  36#include <linux/if_vlan.h>
  37#include <linux/skbuff.h>
  38#include <linux/delay.h>
  39#include <linux/mm.h>
  40#include <linux/vmalloc.h>
  41#include <linux/prefetch.h>
  42#include <net/ip6_checksum.h>
  43
  44#include "qlge.h"
  45
  46char qlge_driver_name[] = DRV_NAME;
  47const char qlge_driver_version[] = DRV_VERSION;
  48
  49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
  50MODULE_DESCRIPTION(DRV_STRING " ");
  51MODULE_LICENSE("GPL");
  52MODULE_VERSION(DRV_VERSION);
  53
  54static const u32 default_msg =
  55    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
  56/* NETIF_MSG_TIMER |	*/
  57    NETIF_MSG_IFDOWN |
  58    NETIF_MSG_IFUP |
  59    NETIF_MSG_RX_ERR |
  60    NETIF_MSG_TX_ERR |
  61/*  NETIF_MSG_TX_QUEUED | */
  62/*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
  63/* NETIF_MSG_PKTDATA | */
  64    NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  65
  66static int debug = -1;	/* defaults above */
  67module_param(debug, int, 0664);
  68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  69
  70#define MSIX_IRQ 0
  71#define MSI_IRQ 1
  72#define LEG_IRQ 2
  73static int qlge_irq_type = MSIX_IRQ;
  74module_param(qlge_irq_type, int, 0664);
  75MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  76
  77static int qlge_mpi_coredump;
  78module_param(qlge_mpi_coredump, int, 0);
  79MODULE_PARM_DESC(qlge_mpi_coredump,
  80		"Option to enable MPI firmware dump. "
  81		"Default is OFF - Do Not allocate memory. ");
  82
  83static int qlge_force_coredump;
  84module_param(qlge_force_coredump, int, 0);
  85MODULE_PARM_DESC(qlge_force_coredump,
  86		"Option to allow force of firmware core dump. "
  87		"Default is OFF - Do not allow.");
  88
  89static const struct pci_device_id qlge_pci_tbl[] = {
  90	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
  91	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
  92	/* required last entry */
  93	{0,}
  94};
  95
  96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
  97
  98static int ql_wol(struct ql_adapter *);
  99static void qlge_set_multicast_list(struct net_device *);
 100static int ql_adapter_down(struct ql_adapter *);
 101static int ql_adapter_up(struct ql_adapter *);
 102
 103/* This hardware semaphore causes exclusive access to
 104 * resources shared between the NIC driver, MPI firmware,
 105 * FCOE firmware and the FC driver.
 106 */
 107static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
 108{
 109	u32 sem_bits = 0;
 110
 111	switch (sem_mask) {
 112	case SEM_XGMAC0_MASK:
 113		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
 114		break;
 115	case SEM_XGMAC1_MASK:
 116		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
 117		break;
 118	case SEM_ICB_MASK:
 119		sem_bits = SEM_SET << SEM_ICB_SHIFT;
 120		break;
 121	case SEM_MAC_ADDR_MASK:
 122		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
 123		break;
 124	case SEM_FLASH_MASK:
 125		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
 126		break;
 127	case SEM_PROBE_MASK:
 128		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
 129		break;
 130	case SEM_RT_IDX_MASK:
 131		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
 132		break;
 133	case SEM_PROC_REG_MASK:
 134		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
 135		break;
 136	default:
 137		netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
 138		return -EINVAL;
 139	}
 140
 141	ql_write32(qdev, SEM, sem_bits | sem_mask);
 142	return !(ql_read32(qdev, SEM) & sem_bits);
 143}
 144
 145int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
 146{
 147	unsigned int wait_count = 30;
 148	do {
 149		if (!ql_sem_trylock(qdev, sem_mask))
 150			return 0;
 151		udelay(100);
 152	} while (--wait_count);
 153	return -ETIMEDOUT;
 154}
 155
 156void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
 157{
 158	ql_write32(qdev, SEM, sem_mask);
 159	ql_read32(qdev, SEM);	/* flush */
 160}
 161
 162/* This function waits for a specific bit to come ready
 163 * in a given register.  It is used mostly by the initialize
 164 * process, but is also used in kernel thread API such as
 165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
 166 */
 167int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
 168{
 169	u32 temp;
 170	int count = UDELAY_COUNT;
 171
 172	while (count) {
 173		temp = ql_read32(qdev, reg);
 174
 175		/* check for errors */
 176		if (temp & err_bit) {
 177			netif_alert(qdev, probe, qdev->ndev,
 178				    "register 0x%.08x access error, value = 0x%.08x!.\n",
 179				    reg, temp);
 180			return -EIO;
 181		} else if (temp & bit)
 182			return 0;
 183		udelay(UDELAY_DELAY);
 184		count--;
 185	}
 186	netif_alert(qdev, probe, qdev->ndev,
 187		    "Timed out waiting for reg %x to come ready.\n", reg);
 188	return -ETIMEDOUT;
 189}
 190
 191/* The CFG register is used to download TX and RX control blocks
 192 * to the chip. This function waits for an operation to complete.
 193 */
 194static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
 195{
 196	int count = UDELAY_COUNT;
 197	u32 temp;
 198
 199	while (count) {
 200		temp = ql_read32(qdev, CFG);
 201		if (temp & CFG_LE)
 202			return -EIO;
 203		if (!(temp & bit))
 204			return 0;
 205		udelay(UDELAY_DELAY);
 206		count--;
 207	}
 208	return -ETIMEDOUT;
 209}
 210
 211
 212/* Used to issue init control blocks to hw. Maps control block,
 213 * sets address, triggers download, waits for completion.
 214 */
 215int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
 216		 u16 q_id)
 217{
 218	u64 map;
 219	int status = 0;
 220	int direction;
 221	u32 mask;
 222	u32 value;
 223
 224	direction =
 225	    (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
 226	    PCI_DMA_FROMDEVICE;
 227
 228	map = pci_map_single(qdev->pdev, ptr, size, direction);
 229	if (pci_dma_mapping_error(qdev->pdev, map)) {
 230		netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
 231		return -ENOMEM;
 232	}
 233
 234	status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
 235	if (status)
 236		return status;
 237
 238	status = ql_wait_cfg(qdev, bit);
 239	if (status) {
 240		netif_err(qdev, ifup, qdev->ndev,
 241			  "Timed out waiting for CFG to come ready.\n");
 242		goto exit;
 243	}
 244
 245	ql_write32(qdev, ICB_L, (u32) map);
 246	ql_write32(qdev, ICB_H, (u32) (map >> 32));
 247
 248	mask = CFG_Q_MASK | (bit << 16);
 249	value = bit | (q_id << CFG_Q_SHIFT);
 250	ql_write32(qdev, CFG, (mask | value));
 251
 252	/*
 253	 * Wait for the bit to clear after signaling hw.
 254	 */
 255	status = ql_wait_cfg(qdev, bit);
 256exit:
 257	ql_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
 258	pci_unmap_single(qdev->pdev, map, size, direction);
 259	return status;
 260}
 261
 262/* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
 263int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
 264			u32 *value)
 265{
 266	u32 offset = 0;
 267	int status;
 268
 269	switch (type) {
 270	case MAC_ADDR_TYPE_MULTI_MAC:
 271	case MAC_ADDR_TYPE_CAM_MAC:
 272		{
 273			status =
 274			    ql_wait_reg_rdy(qdev,
 275				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 276			if (status)
 277				goto exit;
 278			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 279				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 280				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 281			status =
 282			    ql_wait_reg_rdy(qdev,
 283				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
 284			if (status)
 285				goto exit;
 286			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
 287			status =
 288			    ql_wait_reg_rdy(qdev,
 289				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 290			if (status)
 291				goto exit;
 292			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 293				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 294				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 295			status =
 296			    ql_wait_reg_rdy(qdev,
 297				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
 298			if (status)
 299				goto exit;
 300			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
 301			if (type == MAC_ADDR_TYPE_CAM_MAC) {
 302				status =
 303				    ql_wait_reg_rdy(qdev,
 304					MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 305				if (status)
 306					goto exit;
 307				ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 308					   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 309					   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 310				status =
 311				    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
 312						    MAC_ADDR_MR, 0);
 313				if (status)
 314					goto exit;
 315				*value++ = ql_read32(qdev, MAC_ADDR_DATA);
 316			}
 317			break;
 318		}
 319	case MAC_ADDR_TYPE_VLAN:
 320	case MAC_ADDR_TYPE_MULTI_FLTR:
 321	default:
 322		netif_crit(qdev, ifup, qdev->ndev,
 323			   "Address type %d not yet supported.\n", type);
 324		status = -EPERM;
 325	}
 326exit:
 327	return status;
 328}
 329
 330/* Set up a MAC, multicast or VLAN address for the
 331 * inbound frame matching.
 332 */
 333static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 334			       u16 index)
 335{
 336	u32 offset = 0;
 337	int status = 0;
 338
 339	switch (type) {
 340	case MAC_ADDR_TYPE_MULTI_MAC:
 341		{
 342			u32 upper = (addr[0] << 8) | addr[1];
 343			u32 lower = (addr[2] << 24) | (addr[3] << 16) |
 344					(addr[4] << 8) | (addr[5]);
 345
 346			status =
 347				ql_wait_reg_rdy(qdev,
 348				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 349			if (status)
 350				goto exit;
 351			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
 352				(index << MAC_ADDR_IDX_SHIFT) |
 353				type | MAC_ADDR_E);
 354			ql_write32(qdev, MAC_ADDR_DATA, lower);
 355			status =
 356				ql_wait_reg_rdy(qdev,
 357				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 358			if (status)
 359				goto exit;
 360			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
 361				(index << MAC_ADDR_IDX_SHIFT) |
 362				type | MAC_ADDR_E);
 363
 364			ql_write32(qdev, MAC_ADDR_DATA, upper);
 365			status =
 366				ql_wait_reg_rdy(qdev,
 367				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 368			if (status)
 369				goto exit;
 370			break;
 371		}
 372	case MAC_ADDR_TYPE_CAM_MAC:
 373		{
 374			u32 cam_output;
 375			u32 upper = (addr[0] << 8) | addr[1];
 376			u32 lower =
 377			    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
 378			    (addr[5]);
 379			status =
 380			    ql_wait_reg_rdy(qdev,
 381				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 382			if (status)
 383				goto exit;
 384			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 385				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 386				   type);	/* type */
 387			ql_write32(qdev, MAC_ADDR_DATA, lower);
 388			status =
 389			    ql_wait_reg_rdy(qdev,
 390				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 391			if (status)
 392				goto exit;
 393			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 394				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 395				   type);	/* type */
 396			ql_write32(qdev, MAC_ADDR_DATA, upper);
 397			status =
 398			    ql_wait_reg_rdy(qdev,
 399				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 400			if (status)
 401				goto exit;
 402			ql_write32(qdev, MAC_ADDR_IDX, (offset) |	/* offset */
 403				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
 404				   type);	/* type */
 405			/* This field should also include the queue id
 406			   and possibly the function id.  Right now we hardcode
 407			   the route field to NIC core.
 408			 */
 409			cam_output = (CAM_OUT_ROUTE_NIC |
 410				      (qdev->
 411				       func << CAM_OUT_FUNC_SHIFT) |
 412					(0 << CAM_OUT_CQ_ID_SHIFT));
 413			if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
 414				cam_output |= CAM_OUT_RV;
 415			/* route to NIC core */
 416			ql_write32(qdev, MAC_ADDR_DATA, cam_output);
 417			break;
 418		}
 419	case MAC_ADDR_TYPE_VLAN:
 420		{
 421			u32 enable_bit = *((u32 *) &addr[0]);
 422			/* For VLAN, the addr actually holds a bit that
 423			 * either enables or disables the vlan id we are
 424			 * addressing. It's either MAC_ADDR_E on or off.
 425			 * That's bit-27 we're talking about.
 426			 */
 427			status =
 428			    ql_wait_reg_rdy(qdev,
 429				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 430			if (status)
 431				goto exit;
 432			ql_write32(qdev, MAC_ADDR_IDX, offset |	/* offset */
 433				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
 434				   type |	/* type */
 435				   enable_bit);	/* enable/disable */
 436			break;
 437		}
 438	case MAC_ADDR_TYPE_MULTI_FLTR:
 439	default:
 440		netif_crit(qdev, ifup, qdev->ndev,
 441			   "Address type %d not yet supported.\n", type);
 442		status = -EPERM;
 443	}
 444exit:
 445	return status;
 446}
 447
 448/* Set or clear MAC address in hardware. We sometimes
 449 * have to clear it to prevent wrong frame routing
 450 * especially in a bonding environment.
 451 */
 452static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
 453{
 454	int status;
 455	char zero_mac_addr[ETH_ALEN];
 456	char *addr;
 457
 458	if (set) {
 459		addr = &qdev->current_mac_addr[0];
 460		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 461			     "Set Mac addr %pM\n", addr);
 462	} else {
 463		eth_zero_addr(zero_mac_addr);
 464		addr = &zero_mac_addr[0];
 465		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 466			     "Clearing MAC address\n");
 467	}
 468	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
 469	if (status)
 470		return status;
 471	status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
 472			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
 473	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 474	if (status)
 475		netif_err(qdev, ifup, qdev->ndev,
 476			  "Failed to init mac address.\n");
 477	return status;
 478}
 479
 480void ql_link_on(struct ql_adapter *qdev)
 481{
 482	netif_err(qdev, link, qdev->ndev, "Link is up.\n");
 483	netif_carrier_on(qdev->ndev);
 484	ql_set_mac_addr(qdev, 1);
 485}
 486
 487void ql_link_off(struct ql_adapter *qdev)
 488{
 489	netif_err(qdev, link, qdev->ndev, "Link is down.\n");
 490	netif_carrier_off(qdev->ndev);
 491	ql_set_mac_addr(qdev, 0);
 492}
 493
 494/* Get a specific frame routing value from the CAM.
 495 * Used for debug and reg dump.
 496 */
 497int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
 498{
 499	int status = 0;
 500
 501	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
 502	if (status)
 503		goto exit;
 504
 505	ql_write32(qdev, RT_IDX,
 506		   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
 507	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
 508	if (status)
 509		goto exit;
 510	*value = ql_read32(qdev, RT_DATA);
 511exit:
 512	return status;
 513}
 514
 515/* The NIC function for this chip has 16 routing indexes.  Each one can be used
 516 * to route different frame types to various inbound queues.  We send broadcast/
 517 * multicast/error frames to the default queue for slow handling,
 518 * and CAM hit/RSS frames to the fast handling queues.
 519 */
 520static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
 521			      int enable)
 522{
 523	int status = -EINVAL; /* Return error if no mask match. */
 524	u32 value = 0;
 525
 526	switch (mask) {
 527	case RT_IDX_CAM_HIT:
 528		{
 529			value = RT_IDX_DST_CAM_Q |	/* dest */
 530			    RT_IDX_TYPE_NICQ |	/* type */
 531			    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
 532			break;
 533		}
 534	case RT_IDX_VALID:	/* Promiscuous Mode frames. */
 535		{
 536			value = RT_IDX_DST_DFLT_Q |	/* dest */
 537			    RT_IDX_TYPE_NICQ |	/* type */
 538			    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
 539			break;
 540		}
 541	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
 542		{
 543			value = RT_IDX_DST_DFLT_Q |	/* dest */
 544			    RT_IDX_TYPE_NICQ |	/* type */
 545			    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
 546			break;
 547		}
 548	case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
 549		{
 550			value = RT_IDX_DST_DFLT_Q | /* dest */
 551				RT_IDX_TYPE_NICQ | /* type */
 552				(RT_IDX_IP_CSUM_ERR_SLOT <<
 553				RT_IDX_IDX_SHIFT); /* index */
 554			break;
 555		}
 556	case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
 557		{
 558			value = RT_IDX_DST_DFLT_Q | /* dest */
 559				RT_IDX_TYPE_NICQ | /* type */
 560				(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
 561				RT_IDX_IDX_SHIFT); /* index */
 562			break;
 563		}
 564	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
 565		{
 566			value = RT_IDX_DST_DFLT_Q |	/* dest */
 567			    RT_IDX_TYPE_NICQ |	/* type */
 568			    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
 569			break;
 570		}
 571	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
 572		{
 573			value = RT_IDX_DST_DFLT_Q |	/* dest */
 574			    RT_IDX_TYPE_NICQ |	/* type */
 575			    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
 576			break;
 577		}
 578	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
 579		{
 580			value = RT_IDX_DST_DFLT_Q |	/* dest */
 581			    RT_IDX_TYPE_NICQ |	/* type */
 582			    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
 583			break;
 584		}
 585	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
 586		{
 587			value = RT_IDX_DST_RSS |	/* dest */
 588			    RT_IDX_TYPE_NICQ |	/* type */
 589			    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
 590			break;
 591		}
 592	case 0:		/* Clear the E-bit on an entry. */
 593		{
 594			value = RT_IDX_DST_DFLT_Q |	/* dest */
 595			    RT_IDX_TYPE_NICQ |	/* type */
 596			    (index << RT_IDX_IDX_SHIFT);/* index */
 597			break;
 598		}
 599	default:
 600		netif_err(qdev, ifup, qdev->ndev,
 601			  "Mask type %d not yet supported.\n", mask);
 602		status = -EPERM;
 603		goto exit;
 604	}
 605
 606	if (value) {
 607		status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
 608		if (status)
 609			goto exit;
 610		value |= (enable ? RT_IDX_E : 0);
 611		ql_write32(qdev, RT_IDX, value);
 612		ql_write32(qdev, RT_DATA, enable ? mask : 0);
 613	}
 614exit:
 615	return status;
 616}
 617
 618static void ql_enable_interrupts(struct ql_adapter *qdev)
 619{
 620	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
 621}
 622
 623static void ql_disable_interrupts(struct ql_adapter *qdev)
 624{
 625	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
 626}
 627
 628/* If we're running with multiple MSI-X vectors then we enable on the fly.
 629 * Otherwise, we may have multiple outstanding workers and don't want to
 630 * enable until the last one finishes. In this case, the irq_cnt gets
 631 * incremented every time we queue a worker and decremented every time
 632 * a worker finishes.  Once it hits zero we enable the interrupt.
 633 */
 634u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 635{
 636	u32 var = 0;
 637	unsigned long hw_flags = 0;
 638	struct intr_context *ctx = qdev->intr_context + intr;
 639
 640	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
 641		/* Always enable if we're MSIX multi interrupts and
 642		 * it's not the default (zeroeth) interrupt.
 643		 */
 644		ql_write32(qdev, INTR_EN,
 645			   ctx->intr_en_mask);
 646		var = ql_read32(qdev, STS);
 647		return var;
 648	}
 649
 650	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 651	if (atomic_dec_and_test(&ctx->irq_cnt)) {
 652		ql_write32(qdev, INTR_EN,
 653			   ctx->intr_en_mask);
 654		var = ql_read32(qdev, STS);
 655	}
 656	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 657	return var;
 658}
 659
 660static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 661{
 662	u32 var = 0;
 663	struct intr_context *ctx;
 664
 665	/* HW disables for us if we're MSIX multi interrupts and
 666	 * it's not the default (zeroeth) interrupt.
 667	 */
 668	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
 669		return 0;
 670
 671	ctx = qdev->intr_context + intr;
 672	spin_lock(&qdev->hw_lock);
 673	if (!atomic_read(&ctx->irq_cnt)) {
 674		ql_write32(qdev, INTR_EN,
 675		ctx->intr_dis_mask);
 676		var = ql_read32(qdev, STS);
 677	}
 678	atomic_inc(&ctx->irq_cnt);
 679	spin_unlock(&qdev->hw_lock);
 680	return var;
 681}
 682
 683static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
 684{
 685	int i;
 686	for (i = 0; i < qdev->intr_count; i++) {
 687		/* The enable call does a atomic_dec_and_test
 688		 * and enables only if the result is zero.
 689		 * So we precharge it here.
 690		 */
 691		if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
 692			i == 0))
 693			atomic_set(&qdev->intr_context[i].irq_cnt, 1);
 694		ql_enable_completion_interrupt(qdev, i);
 695	}
 696
 697}
 698
 699static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
 700{
 701	int status, i;
 702	u16 csum = 0;
 703	__le16 *flash = (__le16 *)&qdev->flash;
 704
 705	status = strncmp((char *)&qdev->flash, str, 4);
 706	if (status) {
 707		netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
 708		return	status;
 709	}
 710
 711	for (i = 0; i < size; i++)
 712		csum += le16_to_cpu(*flash++);
 713
 714	if (csum)
 715		netif_err(qdev, ifup, qdev->ndev,
 716			  "Invalid flash checksum, csum = 0x%.04x.\n", csum);
 717
 718	return csum;
 719}
 720
 721static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
 722{
 723	int status = 0;
 724	/* wait for reg to come ready */
 725	status = ql_wait_reg_rdy(qdev,
 726			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 727	if (status)
 728		goto exit;
 729	/* set up for reg read */
 730	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
 731	/* wait for reg to come ready */
 732	status = ql_wait_reg_rdy(qdev,
 733			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 734	if (status)
 735		goto exit;
 736	 /* This data is stored on flash as an array of
 737	 * __le32.  Since ql_read32() returns cpu endian
 738	 * we need to swap it back.
 739	 */
 740	*data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
 741exit:
 742	return status;
 743}
 744
 745static int ql_get_8000_flash_params(struct ql_adapter *qdev)
 746{
 747	u32 i, size;
 748	int status;
 749	__le32 *p = (__le32 *)&qdev->flash;
 750	u32 offset;
 751	u8 mac_addr[6];
 752
 753	/* Get flash offset for function and adjust
 754	 * for dword access.
 755	 */
 756	if (!qdev->port)
 757		offset = FUNC0_FLASH_OFFSET / sizeof(u32);
 758	else
 759		offset = FUNC1_FLASH_OFFSET / sizeof(u32);
 760
 761	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
 762		return -ETIMEDOUT;
 763
 764	size = sizeof(struct flash_params_8000) / sizeof(u32);
 765	for (i = 0; i < size; i++, p++) {
 766		status = ql_read_flash_word(qdev, i+offset, p);
 767		if (status) {
 768			netif_err(qdev, ifup, qdev->ndev,
 769				  "Error reading flash.\n");
 770			goto exit;
 771		}
 772	}
 773
 774	status = ql_validate_flash(qdev,
 775			sizeof(struct flash_params_8000) / sizeof(u16),
 776			"8000");
 777	if (status) {
 778		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
 779		status = -EINVAL;
 780		goto exit;
 781	}
 782
 783	/* Extract either manufacturer or BOFM modified
 784	 * MAC address.
 785	 */
 786	if (qdev->flash.flash_params_8000.data_type1 == 2)
 787		memcpy(mac_addr,
 788			qdev->flash.flash_params_8000.mac_addr1,
 789			qdev->ndev->addr_len);
 790	else
 791		memcpy(mac_addr,
 792			qdev->flash.flash_params_8000.mac_addr,
 793			qdev->ndev->addr_len);
 794
 795	if (!is_valid_ether_addr(mac_addr)) {
 796		netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
 797		status = -EINVAL;
 798		goto exit;
 799	}
 800
 801	memcpy(qdev->ndev->dev_addr,
 802		mac_addr,
 803		qdev->ndev->addr_len);
 804
 805exit:
 806	ql_sem_unlock(qdev, SEM_FLASH_MASK);
 807	return status;
 808}
 809
 810static int ql_get_8012_flash_params(struct ql_adapter *qdev)
 811{
 812	int i;
 813	int status;
 814	__le32 *p = (__le32 *)&qdev->flash;
 815	u32 offset = 0;
 816	u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
 817
 818	/* Second function's parameters follow the first
 819	 * function's.
 820	 */
 821	if (qdev->port)
 822		offset = size;
 823
 824	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
 825		return -ETIMEDOUT;
 826
 827	for (i = 0; i < size; i++, p++) {
 828		status = ql_read_flash_word(qdev, i+offset, p);
 829		if (status) {
 830			netif_err(qdev, ifup, qdev->ndev,
 831				  "Error reading flash.\n");
 832			goto exit;
 833		}
 834
 835	}
 836
 837	status = ql_validate_flash(qdev,
 838			sizeof(struct flash_params_8012) / sizeof(u16),
 839			"8012");
 840	if (status) {
 841		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
 842		status = -EINVAL;
 843		goto exit;
 844	}
 845
 846	if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
 847		status = -EINVAL;
 848		goto exit;
 849	}
 850
 851	memcpy(qdev->ndev->dev_addr,
 852		qdev->flash.flash_params_8012.mac_addr,
 853		qdev->ndev->addr_len);
 854
 855exit:
 856	ql_sem_unlock(qdev, SEM_FLASH_MASK);
 857	return status;
 858}
 859
 860/* xgmac register are located behind the xgmac_addr and xgmac_data
 861 * register pair.  Each read/write requires us to wait for the ready
 862 * bit before reading/writing the data.
 863 */
 864static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
 865{
 866	int status;
 867	/* wait for reg to come ready */
 868	status = ql_wait_reg_rdy(qdev,
 869			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 870	if (status)
 871		return status;
 872	/* write the data to the data reg */
 873	ql_write32(qdev, XGMAC_DATA, data);
 874	/* trigger the write */
 875	ql_write32(qdev, XGMAC_ADDR, reg);
 876	return status;
 877}
 878
 879/* xgmac register are located behind the xgmac_addr and xgmac_data
 880 * register pair.  Each read/write requires us to wait for the ready
 881 * bit before reading/writing the data.
 882 */
 883int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
 884{
 885	int status = 0;
 886	/* wait for reg to come ready */
 887	status = ql_wait_reg_rdy(qdev,
 888			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 889	if (status)
 890		goto exit;
 891	/* set up for reg read */
 892	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
 893	/* wait for reg to come ready */
 894	status = ql_wait_reg_rdy(qdev,
 895			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 896	if (status)
 897		goto exit;
 898	/* get the data */
 899	*data = ql_read32(qdev, XGMAC_DATA);
 900exit:
 901	return status;
 902}
 903
 904/* This is used for reading the 64-bit statistics regs. */
 905int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
 906{
 907	int status = 0;
 908	u32 hi = 0;
 909	u32 lo = 0;
 910
 911	status = ql_read_xgmac_reg(qdev, reg, &lo);
 912	if (status)
 913		goto exit;
 914
 915	status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
 916	if (status)
 917		goto exit;
 918
 919	*data = (u64) lo | ((u64) hi << 32);
 920
 921exit:
 922	return status;
 923}
 924
 925static int ql_8000_port_initialize(struct ql_adapter *qdev)
 926{
 927	int status;
 928	/*
 929	 * Get MPI firmware version for driver banner
 930	 * and ethool info.
 931	 */
 932	status = ql_mb_about_fw(qdev);
 933	if (status)
 934		goto exit;
 935	status = ql_mb_get_fw_state(qdev);
 936	if (status)
 937		goto exit;
 938	/* Wake up a worker to get/set the TX/RX frame sizes. */
 939	queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
 940exit:
 941	return status;
 942}
 943
 944/* Take the MAC Core out of reset.
 945 * Enable statistics counting.
 946 * Take the transmitter/receiver out of reset.
 947 * This functionality may be done in the MPI firmware at a
 948 * later date.
 949 */
 950static int ql_8012_port_initialize(struct ql_adapter *qdev)
 951{
 952	int status = 0;
 953	u32 data;
 954
 955	if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
 956		/* Another function has the semaphore, so
 957		 * wait for the port init bit to come ready.
 958		 */
 959		netif_info(qdev, link, qdev->ndev,
 960			   "Another function has the semaphore, so wait for the port init bit to come ready.\n");
 961		status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
 962		if (status) {
 963			netif_crit(qdev, link, qdev->ndev,
 964				   "Port initialize timed out.\n");
 965		}
 966		return status;
 967	}
 968
 969	netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
 970	/* Set the core reset. */
 971	status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
 972	if (status)
 973		goto end;
 974	data |= GLOBAL_CFG_RESET;
 975	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
 976	if (status)
 977		goto end;
 978
 979	/* Clear the core reset and turn on jumbo for receiver. */
 980	data &= ~GLOBAL_CFG_RESET;	/* Clear core reset. */
 981	data |= GLOBAL_CFG_JUMBO;	/* Turn on jumbo. */
 982	data |= GLOBAL_CFG_TX_STAT_EN;
 983	data |= GLOBAL_CFG_RX_STAT_EN;
 984	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
 985	if (status)
 986		goto end;
 987
 988	/* Enable transmitter, and clear it's reset. */
 989	status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
 990	if (status)
 991		goto end;
 992	data &= ~TX_CFG_RESET;	/* Clear the TX MAC reset. */
 993	data |= TX_CFG_EN;	/* Enable the transmitter. */
 994	status = ql_write_xgmac_reg(qdev, TX_CFG, data);
 995	if (status)
 996		goto end;
 997
 998	/* Enable receiver and clear it's reset. */
 999	status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000	if (status)
1001		goto end;
1002	data &= ~RX_CFG_RESET;	/* Clear the RX MAC reset. */
1003	data |= RX_CFG_EN;	/* Enable the receiver. */
1004	status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005	if (status)
1006		goto end;
1007
1008	/* Turn on jumbo. */
1009	status =
1010	    ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011	if (status)
1012		goto end;
1013	status =
1014	    ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015	if (status)
1016		goto end;
1017
1018	/* Signal to the world that the port is enabled.        */
1019	ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020end:
1021	ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022	return status;
1023}
1024
1025static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026{
1027	return PAGE_SIZE << qdev->lbq_buf_order;
1028}
1029
1030/* Get the next large buffer. */
1031static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1032{
1033	struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034	rx_ring->lbq_curr_idx++;
1035	if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036		rx_ring->lbq_curr_idx = 0;
1037	rx_ring->lbq_free_cnt++;
1038	return lbq_desc;
1039}
1040
1041static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042		struct rx_ring *rx_ring)
1043{
1044	struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045
1046	pci_dma_sync_single_for_cpu(qdev->pdev,
1047					dma_unmap_addr(lbq_desc, mapaddr),
1048				    rx_ring->lbq_buf_size,
1049					PCI_DMA_FROMDEVICE);
1050
1051	/* If it's the last chunk of our master page then
1052	 * we unmap it.
1053	 */
1054	if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055					== ql_lbq_block_size(qdev))
1056		pci_unmap_page(qdev->pdev,
1057				lbq_desc->p.pg_chunk.map,
1058				ql_lbq_block_size(qdev),
1059				PCI_DMA_FROMDEVICE);
1060	return lbq_desc;
1061}
1062
1063/* Get the next small buffer. */
1064static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1065{
1066	struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067	rx_ring->sbq_curr_idx++;
1068	if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069		rx_ring->sbq_curr_idx = 0;
1070	rx_ring->sbq_free_cnt++;
1071	return sbq_desc;
1072}
1073
1074/* Update an rx ring index. */
1075static void ql_update_cq(struct rx_ring *rx_ring)
1076{
1077	rx_ring->cnsmr_idx++;
1078	rx_ring->curr_entry++;
1079	if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080		rx_ring->cnsmr_idx = 0;
1081		rx_ring->curr_entry = rx_ring->cq_base;
1082	}
1083}
1084
1085static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086{
1087	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088}
1089
1090static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091						struct bq_desc *lbq_desc)
1092{
1093	if (!rx_ring->pg_chunk.page) {
1094		u64 map;
1095		rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1096						GFP_ATOMIC,
1097						qdev->lbq_buf_order);
1098		if (unlikely(!rx_ring->pg_chunk.page)) {
1099			netif_err(qdev, drv, qdev->ndev,
1100				  "page allocation failed.\n");
1101			return -ENOMEM;
1102		}
1103		rx_ring->pg_chunk.offset = 0;
1104		map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1105					0, ql_lbq_block_size(qdev),
1106					PCI_DMA_FROMDEVICE);
1107		if (pci_dma_mapping_error(qdev->pdev, map)) {
1108			__free_pages(rx_ring->pg_chunk.page,
1109					qdev->lbq_buf_order);
1110			rx_ring->pg_chunk.page = NULL;
1111			netif_err(qdev, drv, qdev->ndev,
1112				  "PCI mapping failed.\n");
1113			return -ENOMEM;
1114		}
1115		rx_ring->pg_chunk.map = map;
1116		rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117	}
1118
1119	/* Copy the current master pg_chunk info
1120	 * to the current descriptor.
1121	 */
1122	lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1123
1124	/* Adjust the master page chunk for next
1125	 * buffer get.
1126	 */
1127	rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128	if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129		rx_ring->pg_chunk.page = NULL;
1130		lbq_desc->p.pg_chunk.last_flag = 1;
1131	} else {
1132		rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133		get_page(rx_ring->pg_chunk.page);
1134		lbq_desc->p.pg_chunk.last_flag = 0;
1135	}
1136	return 0;
1137}
1138/* Process (refill) a large buffer queue. */
1139static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1140{
1141	u32 clean_idx = rx_ring->lbq_clean_idx;
1142	u32 start_idx = clean_idx;
1143	struct bq_desc *lbq_desc;
1144	u64 map;
1145	int i;
1146
1147	while (rx_ring->lbq_free_cnt > 32) {
1148		for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1149			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1150				     "lbq: try cleaning clean_idx = %d.\n",
1151				     clean_idx);
1152			lbq_desc = &rx_ring->lbq[clean_idx];
1153			if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1154				rx_ring->lbq_clean_idx = clean_idx;
1155				netif_err(qdev, ifup, qdev->ndev,
1156						"Could not get a page chunk, i=%d, clean_idx =%d .\n",
1157						i, clean_idx);
1158				return;
1159			}
1160
1161			map = lbq_desc->p.pg_chunk.map +
1162				lbq_desc->p.pg_chunk.offset;
1163				dma_unmap_addr_set(lbq_desc, mapaddr, map);
1164			dma_unmap_len_set(lbq_desc, maplen,
1165					rx_ring->lbq_buf_size);
1166				*lbq_desc->addr = cpu_to_le64(map);
1167
1168			pci_dma_sync_single_for_device(qdev->pdev, map,
1169						rx_ring->lbq_buf_size,
1170						PCI_DMA_FROMDEVICE);
1171			clean_idx++;
1172			if (clean_idx == rx_ring->lbq_len)
1173				clean_idx = 0;
1174		}
1175
1176		rx_ring->lbq_clean_idx = clean_idx;
1177		rx_ring->lbq_prod_idx += 16;
1178		if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179			rx_ring->lbq_prod_idx = 0;
1180		rx_ring->lbq_free_cnt -= 16;
1181	}
1182
1183	if (start_idx != clean_idx) {
1184		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185			     "lbq: updating prod idx = %d.\n",
1186			     rx_ring->lbq_prod_idx);
1187		ql_write_db_reg(rx_ring->lbq_prod_idx,
1188				rx_ring->lbq_prod_idx_db_reg);
1189	}
1190}
1191
1192/* Process (refill) a small buffer queue. */
1193static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194{
1195	u32 clean_idx = rx_ring->sbq_clean_idx;
1196	u32 start_idx = clean_idx;
1197	struct bq_desc *sbq_desc;
1198	u64 map;
1199	int i;
1200
1201	while (rx_ring->sbq_free_cnt > 16) {
1202		for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1203			sbq_desc = &rx_ring->sbq[clean_idx];
1204			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205				     "sbq: try cleaning clean_idx = %d.\n",
1206				     clean_idx);
1207			if (sbq_desc->p.skb == NULL) {
1208				netif_printk(qdev, rx_status, KERN_DEBUG,
1209					     qdev->ndev,
1210					     "sbq: getting new skb for index %d.\n",
1211					     sbq_desc->index);
1212				sbq_desc->p.skb =
1213				    netdev_alloc_skb(qdev->ndev,
1214						     SMALL_BUFFER_SIZE);
1215				if (sbq_desc->p.skb == NULL) {
1216					rx_ring->sbq_clean_idx = clean_idx;
1217					return;
1218				}
1219				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220				map = pci_map_single(qdev->pdev,
1221						     sbq_desc->p.skb->data,
1222						     rx_ring->sbq_buf_size,
1223						     PCI_DMA_FROMDEVICE);
1224				if (pci_dma_mapping_error(qdev->pdev, map)) {
1225					netif_err(qdev, ifup, qdev->ndev,
1226						  "PCI mapping failed.\n");
1227					rx_ring->sbq_clean_idx = clean_idx;
1228					dev_kfree_skb_any(sbq_desc->p.skb);
1229					sbq_desc->p.skb = NULL;
1230					return;
1231				}
1232				dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233				dma_unmap_len_set(sbq_desc, maplen,
1234						  rx_ring->sbq_buf_size);
1235				*sbq_desc->addr = cpu_to_le64(map);
1236			}
1237
1238			clean_idx++;
1239			if (clean_idx == rx_ring->sbq_len)
1240				clean_idx = 0;
1241		}
1242		rx_ring->sbq_clean_idx = clean_idx;
1243		rx_ring->sbq_prod_idx += 16;
1244		if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245			rx_ring->sbq_prod_idx = 0;
1246		rx_ring->sbq_free_cnt -= 16;
1247	}
1248
1249	if (start_idx != clean_idx) {
1250		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251			     "sbq: updating prod idx = %d.\n",
1252			     rx_ring->sbq_prod_idx);
1253		ql_write_db_reg(rx_ring->sbq_prod_idx,
1254				rx_ring->sbq_prod_idx_db_reg);
1255	}
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259				    struct rx_ring *rx_ring)
1260{
1261	ql_update_sbq(qdev, rx_ring);
1262	ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers.  Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269			  struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271	int i;
1272	for (i = 0; i < mapped; i++) {
1273		if (i == 0 || (i == 7 && mapped > 7)) {
1274			/*
1275			 * Unmap the skb->data area, or the
1276			 * external sglist (AKA the Outbound
1277			 * Address List (OAL)).
1278			 * If its the zeroeth element, then it's
1279			 * the skb->data area.  If it's the 7th
1280			 * element and there is more than 6 frags,
1281			 * then its an OAL.
1282			 */
1283			if (i == 7) {
1284				netif_printk(qdev, tx_done, KERN_DEBUG,
1285					     qdev->ndev,
1286					     "unmapping OAL area.\n");
1287			}
1288			pci_unmap_single(qdev->pdev,
1289					 dma_unmap_addr(&tx_ring_desc->map[i],
1290							mapaddr),
1291					 dma_unmap_len(&tx_ring_desc->map[i],
1292						       maplen),
1293					 PCI_DMA_TODEVICE);
1294		} else {
1295			netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296				     "unmapping frag %d.\n", i);
1297			pci_unmap_page(qdev->pdev,
1298				       dma_unmap_addr(&tx_ring_desc->map[i],
1299						      mapaddr),
1300				       dma_unmap_len(&tx_ring_desc->map[i],
1301						     maplen), PCI_DMA_TODEVICE);
1302		}
1303	}
1304
1305}
1306
1307/* Map the buffers for this transmit.  This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310static int ql_map_send(struct ql_adapter *qdev,
1311		       struct ob_mac_iocb_req *mac_iocb_ptr,
1312		       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313{
1314	int len = skb_headlen(skb);
1315	dma_addr_t map;
1316	int frag_idx, err, map_idx = 0;
1317	struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318	int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320	if (frag_cnt) {
1321		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322			     "frag_cnt = %d.\n", frag_cnt);
1323	}
1324	/*
1325	 * Map the skb buffer first.
1326	 */
1327	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329	err = pci_dma_mapping_error(qdev->pdev, map);
1330	if (err) {
1331		netif_err(qdev, tx_queued, qdev->ndev,
1332			  "PCI mapping failed with error: %d\n", err);
1333
1334		return NETDEV_TX_BUSY;
1335	}
1336
1337	tbd->len = cpu_to_le32(len);
1338	tbd->addr = cpu_to_le64(map);
1339	dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340	dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1341	map_idx++;
1342
1343	/*
1344	 * This loop fills the remainder of the 8 address descriptors
1345	 * in the IOCB.  If there are more than 7 fragments, then the
1346	 * eighth address desc will point to an external list (OAL).
1347	 * When this happens, the remainder of the frags will be stored
1348	 * in this list.
1349	 */
1350	for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352		tbd++;
1353		if (frag_idx == 6 && frag_cnt > 7) {
1354			/* Let's tack on an sglist.
1355			 * Our control block will now
1356			 * look like this:
1357			 * iocb->seg[0] = skb->data
1358			 * iocb->seg[1] = frag[0]
1359			 * iocb->seg[2] = frag[1]
1360			 * iocb->seg[3] = frag[2]
1361			 * iocb->seg[4] = frag[3]
1362			 * iocb->seg[5] = frag[4]
1363			 * iocb->seg[6] = frag[5]
1364			 * iocb->seg[7] = ptr to OAL (external sglist)
1365			 * oal->seg[0] = frag[6]
1366			 * oal->seg[1] = frag[7]
1367			 * oal->seg[2] = frag[8]
1368			 * oal->seg[3] = frag[9]
1369			 * oal->seg[4] = frag[10]
1370			 *      etc...
1371			 */
1372			/* Tack on the OAL in the eighth segment of IOCB. */
1373			map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374					     sizeof(struct oal),
1375					     PCI_DMA_TODEVICE);
1376			err = pci_dma_mapping_error(qdev->pdev, map);
1377			if (err) {
1378				netif_err(qdev, tx_queued, qdev->ndev,
1379					  "PCI mapping outbound address list with error: %d\n",
1380					  err);
1381				goto map_error;
1382			}
1383
1384			tbd->addr = cpu_to_le64(map);
1385			/*
1386			 * The length is the number of fragments
1387			 * that remain to be mapped times the length
1388			 * of our sglist (OAL).
1389			 */
1390			tbd->len =
1391			    cpu_to_le32((sizeof(struct tx_buf_desc) *
1392					 (frag_cnt - frag_idx)) | TX_DESC_C);
1393			dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394					   map);
1395			dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396					  sizeof(struct oal));
1397			tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398			map_idx++;
1399		}
1400
1401		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1402				       DMA_TO_DEVICE);
1403
1404		err = dma_mapping_error(&qdev->pdev->dev, map);
1405		if (err) {
1406			netif_err(qdev, tx_queued, qdev->ndev,
1407				  "PCI mapping frags failed with error: %d.\n",
1408				  err);
1409			goto map_error;
1410		}
1411
1412		tbd->addr = cpu_to_le64(map);
1413		tbd->len = cpu_to_le32(skb_frag_size(frag));
1414		dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415		dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416				  skb_frag_size(frag));
1417
1418	}
1419	/* Save the number of segments we've mapped. */
1420	tx_ring_desc->map_cnt = map_idx;
1421	/* Terminate the last segment. */
1422	tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423	return NETDEV_TX_OK;
1424
1425map_error:
1426	/*
1427	 * If the first frag mapping failed, then i will be zero.
1428	 * This causes the unmap of the skb->data area.  Otherwise
1429	 * we pass in the number of frags that mapped successfully
1430	 * so they can be umapped.
1431	 */
1432	ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433	return NETDEV_TX_BUSY;
1434}
1435
1436/* Categorizing receive firmware frame errors */
1437static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438				 struct rx_ring *rx_ring)
1439{
1440	struct nic_stats *stats = &qdev->nic_stats;
1441
1442	stats->rx_err_count++;
1443	rx_ring->rx_errors++;
1444
1445	switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446	case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447		stats->rx_code_err++;
1448		break;
1449	case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450		stats->rx_oversize_err++;
1451		break;
1452	case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453		stats->rx_undersize_err++;
1454		break;
1455	case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456		stats->rx_preamble_err++;
1457		break;
1458	case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459		stats->rx_frame_len_err++;
1460		break;
1461	case IB_MAC_IOCB_RSP_ERR_CRC:
1462		stats->rx_crc_err++;
1463	default:
1464		break;
1465	}
1466}
1467
1468/**
1469 * ql_update_mac_hdr_len - helper routine to update the mac header length
1470 * based on vlan tags if present
1471 */
1472static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1473				  struct ib_mac_iocb_rsp *ib_mac_rsp,
1474				  void *page, size_t *len)
1475{
1476	u16 *tags;
1477
1478	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1479		return;
1480	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1481		tags = (u16 *)page;
1482		/* Look for stacked vlan tags in ethertype field */
1483		if (tags[6] == ETH_P_8021Q &&
1484		    tags[8] == ETH_P_8021Q)
1485			*len += 2 * VLAN_HLEN;
1486		else
1487			*len += VLAN_HLEN;
1488	}
1489}
1490
1491/* Process an inbound completion from an rx ring. */
1492static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1493					struct rx_ring *rx_ring,
1494					struct ib_mac_iocb_rsp *ib_mac_rsp,
1495					u32 length,
1496					u16 vlan_id)
1497{
1498	struct sk_buff *skb;
1499	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1500	struct napi_struct *napi = &rx_ring->napi;
1501
1502	/* Frame error, so drop the packet. */
1503	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1504		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1505		put_page(lbq_desc->p.pg_chunk.page);
1506		return;
1507	}
1508	napi->dev = qdev->ndev;
1509
1510	skb = napi_get_frags(napi);
1511	if (!skb) {
1512		netif_err(qdev, drv, qdev->ndev,
1513			  "Couldn't get an skb, exiting.\n");
1514		rx_ring->rx_dropped++;
1515		put_page(lbq_desc->p.pg_chunk.page);
1516		return;
1517	}
1518	prefetch(lbq_desc->p.pg_chunk.va);
1519	__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1520			     lbq_desc->p.pg_chunk.page,
1521			     lbq_desc->p.pg_chunk.offset,
1522			     length);
1523
1524	skb->len += length;
1525	skb->data_len += length;
1526	skb->truesize += length;
1527	skb_shinfo(skb)->nr_frags++;
1528
1529	rx_ring->rx_packets++;
1530	rx_ring->rx_bytes += length;
1531	skb->ip_summed = CHECKSUM_UNNECESSARY;
1532	skb_record_rx_queue(skb, rx_ring->cq_id);
1533	if (vlan_id != 0xffff)
1534		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1535	napi_gro_frags(napi);
1536}
1537
1538/* Process an inbound completion from an rx ring. */
1539static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540					struct rx_ring *rx_ring,
1541					struct ib_mac_iocb_rsp *ib_mac_rsp,
1542					u32 length,
1543					u16 vlan_id)
1544{
1545	struct net_device *ndev = qdev->ndev;
1546	struct sk_buff *skb = NULL;
1547	void *addr;
1548	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1549	struct napi_struct *napi = &rx_ring->napi;
1550	size_t hlen = ETH_HLEN;
1551
1552	skb = netdev_alloc_skb(ndev, length);
1553	if (!skb) {
1554		rx_ring->rx_dropped++;
1555		put_page(lbq_desc->p.pg_chunk.page);
1556		return;
1557	}
1558
1559	addr = lbq_desc->p.pg_chunk.va;
1560	prefetch(addr);
1561
1562	/* Frame error, so drop the packet. */
1563	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1564		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1565		goto err_out;
1566	}
1567
1568	/* Update the MAC header length*/
1569	ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1570
1571	/* The max framesize filter on this chip is set higher than
1572	 * MTU since FCoE uses 2k frames.
1573	 */
1574	if (skb->len > ndev->mtu + hlen) {
1575		netif_err(qdev, drv, qdev->ndev,
1576			  "Segment too small, dropping.\n");
1577		rx_ring->rx_dropped++;
1578		goto err_out;
1579	}
1580	memcpy(skb_put(skb, hlen), addr, hlen);
1581	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1582		     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1583		     length);
1584	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1585				lbq_desc->p.pg_chunk.offset + hlen,
1586				length - hlen);
1587	skb->len += length - hlen;
1588	skb->data_len += length - hlen;
1589	skb->truesize += length - hlen;
1590
1591	rx_ring->rx_packets++;
1592	rx_ring->rx_bytes += skb->len;
1593	skb->protocol = eth_type_trans(skb, ndev);
1594	skb_checksum_none_assert(skb);
1595
1596	if ((ndev->features & NETIF_F_RXCSUM) &&
1597		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1598		/* TCP frame. */
1599		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1600			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601				     "TCP checksum done!\n");
1602			skb->ip_summed = CHECKSUM_UNNECESSARY;
1603		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1604				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1605			/* Unfragmented ipv4 UDP frame. */
1606			struct iphdr *iph =
1607				(struct iphdr *)((u8 *)addr + hlen);
1608			if (!(iph->frag_off &
1609				htons(IP_MF|IP_OFFSET))) {
1610				skb->ip_summed = CHECKSUM_UNNECESSARY;
1611				netif_printk(qdev, rx_status, KERN_DEBUG,
1612					     qdev->ndev,
1613					     "UDP checksum done!\n");
1614			}
1615		}
1616	}
1617
1618	skb_record_rx_queue(skb, rx_ring->cq_id);
1619	if (vlan_id != 0xffff)
1620		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1621	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1622		napi_gro_receive(napi, skb);
1623	else
1624		netif_receive_skb(skb);
1625	return;
1626err_out:
1627	dev_kfree_skb_any(skb);
1628	put_page(lbq_desc->p.pg_chunk.page);
1629}
1630
1631/* Process an inbound completion from an rx ring. */
1632static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1633					struct rx_ring *rx_ring,
1634					struct ib_mac_iocb_rsp *ib_mac_rsp,
1635					u32 length,
1636					u16 vlan_id)
1637{
1638	struct net_device *ndev = qdev->ndev;
1639	struct sk_buff *skb = NULL;
1640	struct sk_buff *new_skb = NULL;
1641	struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1642
1643	skb = sbq_desc->p.skb;
1644	/* Allocate new_skb and copy */
1645	new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1646	if (new_skb == NULL) {
1647		rx_ring->rx_dropped++;
1648		return;
1649	}
1650	skb_reserve(new_skb, NET_IP_ALIGN);
1651
1652	pci_dma_sync_single_for_cpu(qdev->pdev,
1653				    dma_unmap_addr(sbq_desc, mapaddr),
1654				    dma_unmap_len(sbq_desc, maplen),
1655				    PCI_DMA_FROMDEVICE);
1656
1657	memcpy(skb_put(new_skb, length), skb->data, length);
1658
1659	pci_dma_sync_single_for_device(qdev->pdev,
1660				       dma_unmap_addr(sbq_desc, mapaddr),
1661				       dma_unmap_len(sbq_desc, maplen),
1662				       PCI_DMA_FROMDEVICE);
1663	skb = new_skb;
1664
1665	/* Frame error, so drop the packet. */
1666	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1667		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1668		dev_kfree_skb_any(skb);
1669		return;
1670	}
1671
1672	/* loopback self test for ethtool */
1673	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1674		ql_check_lb_frame(qdev, skb);
1675		dev_kfree_skb_any(skb);
1676		return;
1677	}
1678
1679	/* The max framesize filter on this chip is set higher than
1680	 * MTU since FCoE uses 2k frames.
1681	 */
1682	if (skb->len > ndev->mtu + ETH_HLEN) {
1683		dev_kfree_skb_any(skb);
1684		rx_ring->rx_dropped++;
1685		return;
1686	}
1687
1688	prefetch(skb->data);
1689	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1690		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1691			     "%s Multicast.\n",
1692			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1693			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1694			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1695			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1696			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1697			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1698	}
1699	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1700		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1701			     "Promiscuous Packet.\n");
1702
1703	rx_ring->rx_packets++;
1704	rx_ring->rx_bytes += skb->len;
1705	skb->protocol = eth_type_trans(skb, ndev);
1706	skb_checksum_none_assert(skb);
1707
1708	/* If rx checksum is on, and there are no
1709	 * csum or frame errors.
1710	 */
1711	if ((ndev->features & NETIF_F_RXCSUM) &&
1712		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1713		/* TCP frame. */
1714		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1715			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1716				     "TCP checksum done!\n");
1717			skb->ip_summed = CHECKSUM_UNNECESSARY;
1718		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1719				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1720			/* Unfragmented ipv4 UDP frame. */
1721			struct iphdr *iph = (struct iphdr *) skb->data;
1722			if (!(iph->frag_off &
1723				htons(IP_MF|IP_OFFSET))) {
1724				skb->ip_summed = CHECKSUM_UNNECESSARY;
1725				netif_printk(qdev, rx_status, KERN_DEBUG,
1726					     qdev->ndev,
1727					     "UDP checksum done!\n");
1728			}
1729		}
1730	}
1731
1732	skb_record_rx_queue(skb, rx_ring->cq_id);
1733	if (vlan_id != 0xffff)
1734		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1735	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1736		napi_gro_receive(&rx_ring->napi, skb);
1737	else
1738		netif_receive_skb(skb);
1739}
1740
1741static void ql_realign_skb(struct sk_buff *skb, int len)
1742{
1743	void *temp_addr = skb->data;
1744
1745	/* Undo the skb_reserve(skb,32) we did before
1746	 * giving to hardware, and realign data on
1747	 * a 2-byte boundary.
1748	 */
1749	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1750	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1751	skb_copy_to_linear_data(skb, temp_addr,
1752		(unsigned int)len);
1753}
1754
1755/*
1756 * This function builds an skb for the given inbound
1757 * completion.  It will be rewritten for readability in the near
1758 * future, but for not it works well.
1759 */
1760static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1761				       struct rx_ring *rx_ring,
1762				       struct ib_mac_iocb_rsp *ib_mac_rsp)
1763{
1764	struct bq_desc *lbq_desc;
1765	struct bq_desc *sbq_desc;
1766	struct sk_buff *skb = NULL;
1767	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1768	u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1769	size_t hlen = ETH_HLEN;
1770
1771	/*
1772	 * Handle the header buffer if present.
1773	 */
1774	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1775	    ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1776		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1777			     "Header of %d bytes in small buffer.\n", hdr_len);
1778		/*
1779		 * Headers fit nicely into a small buffer.
1780		 */
1781		sbq_desc = ql_get_curr_sbuf(rx_ring);
1782		pci_unmap_single(qdev->pdev,
1783				dma_unmap_addr(sbq_desc, mapaddr),
1784				dma_unmap_len(sbq_desc, maplen),
1785				PCI_DMA_FROMDEVICE);
1786		skb = sbq_desc->p.skb;
1787		ql_realign_skb(skb, hdr_len);
1788		skb_put(skb, hdr_len);
1789		sbq_desc->p.skb = NULL;
1790	}
1791
1792	/*
1793	 * Handle the data buffer(s).
1794	 */
1795	if (unlikely(!length)) {	/* Is there data too? */
1796		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797			     "No Data buffer in this packet.\n");
1798		return skb;
1799	}
1800
1801	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1802		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1803			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1804				     "Headers in small, data of %d bytes in small, combine them.\n",
1805				     length);
1806			/*
1807			 * Data is less than small buffer size so it's
1808			 * stuffed in a small buffer.
1809			 * For this case we append the data
1810			 * from the "data" small buffer to the "header" small
1811			 * buffer.
1812			 */
1813			sbq_desc = ql_get_curr_sbuf(rx_ring);
1814			pci_dma_sync_single_for_cpu(qdev->pdev,
1815						    dma_unmap_addr
1816						    (sbq_desc, mapaddr),
1817						    dma_unmap_len
1818						    (sbq_desc, maplen),
1819						    PCI_DMA_FROMDEVICE);
1820			memcpy(skb_put(skb, length),
1821			       sbq_desc->p.skb->data, length);
1822			pci_dma_sync_single_for_device(qdev->pdev,
1823						       dma_unmap_addr
1824						       (sbq_desc,
1825							mapaddr),
1826						    

Large files files are truncated, but you can click here to view the full file