/drivers/net/ethernet/qlogic/qlge/qlge_main.c
http://github.com/mirrors/linux · C · 5026 lines · 3842 code · 547 blank · 637 comment · 541 complexity · e21be1e547e074043d555e0c6e178a8b MD5 · raw file
Large files are truncated click here to view the full file
- /*
- * QLogic qlge NIC HBA Driver
- * Copyright (c) 2003-2008 QLogic Corporation
- * See LICENSE.qlge for copyright and licensing details.
- * Author: Linux qlge network device driver by
- * Ron Mercer <ron.mercer@qlogic.com>
- */
- #include <linux/kernel.h>
- #include <linux/bitops.h>
- #include <linux/types.h>
- #include <linux/module.h>
- #include <linux/list.h>
- #include <linux/pci.h>
- #include <linux/dma-mapping.h>
- #include <linux/pagemap.h>
- #include <linux/sched.h>
- #include <linux/slab.h>
- #include <linux/dmapool.h>
- #include <linux/mempool.h>
- #include <linux/spinlock.h>
- #include <linux/kthread.h>
- #include <linux/interrupt.h>
- #include <linux/errno.h>
- #include <linux/ioport.h>
- #include <linux/in.h>
- #include <linux/ip.h>
- #include <linux/ipv6.h>
- #include <net/ipv6.h>
- #include <linux/tcp.h>
- #include <linux/udp.h>
- #include <linux/if_arp.h>
- #include <linux/if_ether.h>
- #include <linux/netdevice.h>
- #include <linux/etherdevice.h>
- #include <linux/ethtool.h>
- #include <linux/if_vlan.h>
- #include <linux/skbuff.h>
- #include <linux/delay.h>
- #include <linux/mm.h>
- #include <linux/vmalloc.h>
- #include <linux/prefetch.h>
- #include <net/ip6_checksum.h>
- #include "qlge.h"
- char qlge_driver_name[] = DRV_NAME;
- const char qlge_driver_version[] = DRV_VERSION;
- MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
- MODULE_DESCRIPTION(DRV_STRING " ");
- MODULE_LICENSE("GPL");
- MODULE_VERSION(DRV_VERSION);
- static const u32 default_msg =
- NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
- /* NETIF_MSG_TIMER | */
- NETIF_MSG_IFDOWN |
- NETIF_MSG_IFUP |
- NETIF_MSG_RX_ERR |
- NETIF_MSG_TX_ERR |
- /* NETIF_MSG_TX_QUEUED | */
- /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
- /* NETIF_MSG_PKTDATA | */
- NETIF_MSG_HW | NETIF_MSG_WOL | 0;
- static int debug = -1; /* defaults above */
- module_param(debug, int, 0664);
- MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
- #define MSIX_IRQ 0
- #define MSI_IRQ 1
- #define LEG_IRQ 2
- static int qlge_irq_type = MSIX_IRQ;
- module_param(qlge_irq_type, int, 0664);
- MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
- static int qlge_mpi_coredump;
- module_param(qlge_mpi_coredump, int, 0);
- MODULE_PARM_DESC(qlge_mpi_coredump,
- "Option to enable MPI firmware dump. "
- "Default is OFF - Do Not allocate memory. ");
- static int qlge_force_coredump;
- module_param(qlge_force_coredump, int, 0);
- MODULE_PARM_DESC(qlge_force_coredump,
- "Option to allow force of firmware core dump. "
- "Default is OFF - Do not allow.");
- static const struct pci_device_id qlge_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
- {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
- /* required last entry */
- {0,}
- };
- MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
- static int ql_wol(struct ql_adapter *);
- static void qlge_set_multicast_list(struct net_device *);
- static int ql_adapter_down(struct ql_adapter *);
- static int ql_adapter_up(struct ql_adapter *);
- /* This hardware semaphore causes exclusive access to
- * resources shared between the NIC driver, MPI firmware,
- * FCOE firmware and the FC driver.
- */
- static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
- {
- u32 sem_bits = 0;
- switch (sem_mask) {
- case SEM_XGMAC0_MASK:
- sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
- break;
- case SEM_XGMAC1_MASK:
- sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
- break;
- case SEM_ICB_MASK:
- sem_bits = SEM_SET << SEM_ICB_SHIFT;
- break;
- case SEM_MAC_ADDR_MASK:
- sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
- break;
- case SEM_FLASH_MASK:
- sem_bits = SEM_SET << SEM_FLASH_SHIFT;
- break;
- case SEM_PROBE_MASK:
- sem_bits = SEM_SET << SEM_PROBE_SHIFT;
- break;
- case SEM_RT_IDX_MASK:
- sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
- break;
- case SEM_PROC_REG_MASK:
- sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
- break;
- default:
- netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
- return -EINVAL;
- }
- ql_write32(qdev, SEM, sem_bits | sem_mask);
- return !(ql_read32(qdev, SEM) & sem_bits);
- }
- int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
- {
- unsigned int wait_count = 30;
- do {
- if (!ql_sem_trylock(qdev, sem_mask))
- return 0;
- udelay(100);
- } while (--wait_count);
- return -ETIMEDOUT;
- }
- void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
- {
- ql_write32(qdev, SEM, sem_mask);
- ql_read32(qdev, SEM); /* flush */
- }
- /* This function waits for a specific bit to come ready
- * in a given register. It is used mostly by the initialize
- * process, but is also used in kernel thread API such as
- * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
- */
- int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
- {
- u32 temp;
- int count = UDELAY_COUNT;
- while (count) {
- temp = ql_read32(qdev, reg);
- /* check for errors */
- if (temp & err_bit) {
- netif_alert(qdev, probe, qdev->ndev,
- "register 0x%.08x access error, value = 0x%.08x!.\n",
- reg, temp);
- return -EIO;
- } else if (temp & bit)
- return 0;
- udelay(UDELAY_DELAY);
- count--;
- }
- netif_alert(qdev, probe, qdev->ndev,
- "Timed out waiting for reg %x to come ready.\n", reg);
- return -ETIMEDOUT;
- }
- /* The CFG register is used to download TX and RX control blocks
- * to the chip. This function waits for an operation to complete.
- */
- static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
- {
- int count = UDELAY_COUNT;
- u32 temp;
- while (count) {
- temp = ql_read32(qdev, CFG);
- if (temp & CFG_LE)
- return -EIO;
- if (!(temp & bit))
- return 0;
- udelay(UDELAY_DELAY);
- count--;
- }
- return -ETIMEDOUT;
- }
- /* Used to issue init control blocks to hw. Maps control block,
- * sets address, triggers download, waits for completion.
- */
- int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id)
- {
- u64 map;
- int status = 0;
- int direction;
- u32 mask;
- u32 value;
- direction =
- (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE;
- map = pci_map_single(qdev->pdev, ptr, size, direction);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
- return -ENOMEM;
- }
- status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
- if (status)
- return status;
- status = ql_wait_cfg(qdev, bit);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Timed out waiting for CFG to come ready.\n");
- goto exit;
- }
- ql_write32(qdev, ICB_L, (u32) map);
- ql_write32(qdev, ICB_H, (u32) (map >> 32));
- mask = CFG_Q_MASK | (bit << 16);
- value = bit | (q_id << CFG_Q_SHIFT);
- ql_write32(qdev, CFG, (mask | value));
- /*
- * Wait for the bit to clear after signaling hw.
- */
- status = ql_wait_cfg(qdev, bit);
- exit:
- ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
- pci_unmap_single(qdev->pdev, map, size, direction);
- return status;
- }
- /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
- int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
- u32 *value)
- {
- u32 offset = 0;
- int status;
- switch (type) {
- case MAC_ADDR_TYPE_MULTI_MAC:
- case MAC_ADDR_TYPE_CAM_MAC:
- {
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- if (type == MAC_ADDR_TYPE_CAM_MAC) {
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
- MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- }
- break;
- }
- case MAC_ADDR_TYPE_VLAN:
- case MAC_ADDR_TYPE_MULTI_FLTR:
- default:
- netif_crit(qdev, ifup, qdev->ndev,
- "Address type %d not yet supported.\n", type);
- status = -EPERM;
- }
- exit:
- return status;
- }
- /* Set up a MAC, multicast or VLAN address for the
- * inbound frame matching.
- */
- static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
- u16 index)
- {
- u32 offset = 0;
- int status = 0;
- switch (type) {
- case MAC_ADDR_TYPE_MULTI_MAC:
- {
- u32 upper = (addr[0] << 8) | addr[1];
- u32 lower = (addr[2] << 24) | (addr[3] << 16) |
- (addr[4] << 8) | (addr[5]);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
- (index << MAC_ADDR_IDX_SHIFT) |
- type | MAC_ADDR_E);
- ql_write32(qdev, MAC_ADDR_DATA, lower);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
- (index << MAC_ADDR_IDX_SHIFT) |
- type | MAC_ADDR_E);
- ql_write32(qdev, MAC_ADDR_DATA, upper);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- break;
- }
- case MAC_ADDR_TYPE_CAM_MAC:
- {
- u32 cam_output;
- u32 upper = (addr[0] << 8) | addr[1];
- u32 lower =
- (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
- (addr[5]);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- ql_write32(qdev, MAC_ADDR_DATA, lower);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- ql_write32(qdev, MAC_ADDR_DATA, upper);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- /* This field should also include the queue id
- and possibly the function id. Right now we hardcode
- the route field to NIC core.
- */
- cam_output = (CAM_OUT_ROUTE_NIC |
- (qdev->
- func << CAM_OUT_FUNC_SHIFT) |
- (0 << CAM_OUT_CQ_ID_SHIFT));
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
- cam_output |= CAM_OUT_RV;
- /* route to NIC core */
- ql_write32(qdev, MAC_ADDR_DATA, cam_output);
- break;
- }
- case MAC_ADDR_TYPE_VLAN:
- {
- u32 enable_bit = *((u32 *) &addr[0]);
- /* For VLAN, the addr actually holds a bit that
- * either enables or disables the vlan id we are
- * addressing. It's either MAC_ADDR_E on or off.
- * That's bit-27 we're talking about.
- */
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type | /* type */
- enable_bit); /* enable/disable */
- break;
- }
- case MAC_ADDR_TYPE_MULTI_FLTR:
- default:
- netif_crit(qdev, ifup, qdev->ndev,
- "Address type %d not yet supported.\n", type);
- status = -EPERM;
- }
- exit:
- return status;
- }
- /* Set or clear MAC address in hardware. We sometimes
- * have to clear it to prevent wrong frame routing
- * especially in a bonding environment.
- */
- static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
- {
- int status;
- char zero_mac_addr[ETH_ALEN];
- char *addr;
- if (set) {
- addr = &qdev->current_mac_addr[0];
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Set Mac addr %pM\n", addr);
- } else {
- eth_zero_addr(zero_mac_addr);
- addr = &zero_mac_addr[0];
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Clearing MAC address\n");
- }
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init mac address.\n");
- return status;
- }
- void ql_link_on(struct ql_adapter *qdev)
- {
- netif_err(qdev, link, qdev->ndev, "Link is up.\n");
- netif_carrier_on(qdev->ndev);
- ql_set_mac_addr(qdev, 1);
- }
- void ql_link_off(struct ql_adapter *qdev)
- {
- netif_err(qdev, link, qdev->ndev, "Link is down.\n");
- netif_carrier_off(qdev->ndev);
- ql_set_mac_addr(qdev, 0);
- }
- /* Get a specific frame routing value from the CAM.
- * Used for debug and reg dump.
- */
- int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
- {
- int status = 0;
- status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, RT_IDX,
- RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
- status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
- if (status)
- goto exit;
- *value = ql_read32(qdev, RT_DATA);
- exit:
- return status;
- }
- /* The NIC function for this chip has 16 routing indexes. Each one can be used
- * to route different frame types to various inbound queues. We send broadcast/
- * multicast/error frames to the default queue for slow handling,
- * and CAM hit/RSS frames to the fast handling queues.
- */
- static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
- int enable)
- {
- int status = -EINVAL; /* Return error if no mask match. */
- u32 value = 0;
- switch (mask) {
- case RT_IDX_CAM_HIT:
- {
- value = RT_IDX_DST_CAM_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_VALID: /* Promiscuous Mode frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_IP_CSUM_ERR_SLOT <<
- RT_IDX_IDX_SHIFT); /* index */
- break;
- }
- case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
- RT_IDX_IDX_SHIFT); /* index */
- break;
- }
- case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_MCAST: /* Pass up All Multicast frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
- {
- value = RT_IDX_DST_RSS | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case 0: /* Clear the E-bit on an entry. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (index << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- default:
- netif_err(qdev, ifup, qdev->ndev,
- "Mask type %d not yet supported.\n", mask);
- status = -EPERM;
- goto exit;
- }
- if (value) {
- status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
- if (status)
- goto exit;
- value |= (enable ? RT_IDX_E : 0);
- ql_write32(qdev, RT_IDX, value);
- ql_write32(qdev, RT_DATA, enable ? mask : 0);
- }
- exit:
- return status;
- }
- static void ql_enable_interrupts(struct ql_adapter *qdev)
- {
- ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
- }
- static void ql_disable_interrupts(struct ql_adapter *qdev)
- {
- ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
- }
- /* If we're running with multiple MSI-X vectors then we enable on the fly.
- * Otherwise, we may have multiple outstanding workers and don't want to
- * enable until the last one finishes. In this case, the irq_cnt gets
- * incremented every time we queue a worker and decremented every time
- * a worker finishes. Once it hits zero we enable the interrupt.
- */
- u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
- {
- u32 var = 0;
- unsigned long hw_flags = 0;
- struct intr_context *ctx = qdev->intr_context + intr;
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
- /* Always enable if we're MSIX multi interrupts and
- * it's not the default (zeroeth) interrupt.
- */
- ql_write32(qdev, INTR_EN,
- ctx->intr_en_mask);
- var = ql_read32(qdev, STS);
- return var;
- }
- spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- if (atomic_dec_and_test(&ctx->irq_cnt)) {
- ql_write32(qdev, INTR_EN,
- ctx->intr_en_mask);
- var = ql_read32(qdev, STS);
- }
- spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
- return var;
- }
- static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
- {
- u32 var = 0;
- struct intr_context *ctx;
- /* HW disables for us if we're MSIX multi interrupts and
- * it's not the default (zeroeth) interrupt.
- */
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
- return 0;
- ctx = qdev->intr_context + intr;
- spin_lock(&qdev->hw_lock);
- if (!atomic_read(&ctx->irq_cnt)) {
- ql_write32(qdev, INTR_EN,
- ctx->intr_dis_mask);
- var = ql_read32(qdev, STS);
- }
- atomic_inc(&ctx->irq_cnt);
- spin_unlock(&qdev->hw_lock);
- return var;
- }
- static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
- {
- int i;
- for (i = 0; i < qdev->intr_count; i++) {
- /* The enable call does a atomic_dec_and_test
- * and enables only if the result is zero.
- * So we precharge it here.
- */
- if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
- i == 0))
- atomic_set(&qdev->intr_context[i].irq_cnt, 1);
- ql_enable_completion_interrupt(qdev, i);
- }
- }
- static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
- {
- int status, i;
- u16 csum = 0;
- __le16 *flash = (__le16 *)&qdev->flash;
- status = strncmp((char *)&qdev->flash, str, 4);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
- return status;
- }
- for (i = 0; i < size; i++)
- csum += le16_to_cpu(*flash++);
- if (csum)
- netif_err(qdev, ifup, qdev->ndev,
- "Invalid flash checksum, csum = 0x%.04x.\n", csum);
- return csum;
- }
- static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
- {
- int status = 0;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
- if (status)
- goto exit;
- /* set up for reg read */
- ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
- if (status)
- goto exit;
- /* This data is stored on flash as an array of
- * __le32. Since ql_read32() returns cpu endian
- * we need to swap it back.
- */
- *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
- exit:
- return status;
- }
- static int ql_get_8000_flash_params(struct ql_adapter *qdev)
- {
- u32 i, size;
- int status;
- __le32 *p = (__le32 *)&qdev->flash;
- u32 offset;
- u8 mac_addr[6];
- /* Get flash offset for function and adjust
- * for dword access.
- */
- if (!qdev->port)
- offset = FUNC0_FLASH_OFFSET / sizeof(u32);
- else
- offset = FUNC1_FLASH_OFFSET / sizeof(u32);
- if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
- return -ETIMEDOUT;
- size = sizeof(struct flash_params_8000) / sizeof(u32);
- for (i = 0; i < size; i++, p++) {
- status = ql_read_flash_word(qdev, i+offset, p);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Error reading flash.\n");
- goto exit;
- }
- }
- status = ql_validate_flash(qdev,
- sizeof(struct flash_params_8000) / sizeof(u16),
- "8000");
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
- status = -EINVAL;
- goto exit;
- }
- /* Extract either manufacturer or BOFM modified
- * MAC address.
- */
- if (qdev->flash.flash_params_8000.data_type1 == 2)
- memcpy(mac_addr,
- qdev->flash.flash_params_8000.mac_addr1,
- qdev->ndev->addr_len);
- else
- memcpy(mac_addr,
- qdev->flash.flash_params_8000.mac_addr,
- qdev->ndev->addr_len);
- if (!is_valid_ether_addr(mac_addr)) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
- status = -EINVAL;
- goto exit;
- }
- memcpy(qdev->ndev->dev_addr,
- mac_addr,
- qdev->ndev->addr_len);
- exit:
- ql_sem_unlock(qdev, SEM_FLASH_MASK);
- return status;
- }
- static int ql_get_8012_flash_params(struct ql_adapter *qdev)
- {
- int i;
- int status;
- __le32 *p = (__le32 *)&qdev->flash;
- u32 offset = 0;
- u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
- /* Second function's parameters follow the first
- * function's.
- */
- if (qdev->port)
- offset = size;
- if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
- return -ETIMEDOUT;
- for (i = 0; i < size; i++, p++) {
- status = ql_read_flash_word(qdev, i+offset, p);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Error reading flash.\n");
- goto exit;
- }
- }
- status = ql_validate_flash(qdev,
- sizeof(struct flash_params_8012) / sizeof(u16),
- "8012");
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
- status = -EINVAL;
- goto exit;
- }
- if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
- status = -EINVAL;
- goto exit;
- }
- memcpy(qdev->ndev->dev_addr,
- qdev->flash.flash_params_8012.mac_addr,
- qdev->ndev->addr_len);
- exit:
- ql_sem_unlock(qdev, SEM_FLASH_MASK);
- return status;
- }
- /* xgmac register are located behind the xgmac_addr and xgmac_data
- * register pair. Each read/write requires us to wait for the ready
- * bit before reading/writing the data.
- */
- static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
- {
- int status;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- return status;
- /* write the data to the data reg */
- ql_write32(qdev, XGMAC_DATA, data);
- /* trigger the write */
- ql_write32(qdev, XGMAC_ADDR, reg);
- return status;
- }
- /* xgmac register are located behind the xgmac_addr and xgmac_data
- * register pair. Each read/write requires us to wait for the ready
- * bit before reading/writing the data.
- */
- int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
- {
- int status = 0;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
- /* set up for reg read */
- ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
- /* get the data */
- *data = ql_read32(qdev, XGMAC_DATA);
- exit:
- return status;
- }
- /* This is used for reading the 64-bit statistics regs. */
- int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
- {
- int status = 0;
- u32 hi = 0;
- u32 lo = 0;
- status = ql_read_xgmac_reg(qdev, reg, &lo);
- if (status)
- goto exit;
- status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
- if (status)
- goto exit;
- *data = (u64) lo | ((u64) hi << 32);
- exit:
- return status;
- }
- static int ql_8000_port_initialize(struct ql_adapter *qdev)
- {
- int status;
- /*
- * Get MPI firmware version for driver banner
- * and ethool info.
- */
- status = ql_mb_about_fw(qdev);
- if (status)
- goto exit;
- status = ql_mb_get_fw_state(qdev);
- if (status)
- goto exit;
- /* Wake up a worker to get/set the TX/RX frame sizes. */
- queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
- exit:
- return status;
- }
- /* Take the MAC Core out of reset.
- * Enable statistics counting.
- * Take the transmitter/receiver out of reset.
- * This functionality may be done in the MPI firmware at a
- * later date.
- */
- static int ql_8012_port_initialize(struct ql_adapter *qdev)
- {
- int status = 0;
- u32 data;
- if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
- /* Another function has the semaphore, so
- * wait for the port init bit to come ready.
- */
- netif_info(qdev, link, qdev->ndev,
- "Another function has the semaphore, so wait for the port init bit to come ready.\n");
- status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
- if (status) {
- netif_crit(qdev, link, qdev->ndev,
- "Port initialize timed out.\n");
- }
- return status;
- }
- netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
- /* Set the core reset. */
- status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
- if (status)
- goto end;
- data |= GLOBAL_CFG_RESET;
- status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
- if (status)
- goto end;
- /* Clear the core reset and turn on jumbo for receiver. */
- data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
- data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
- data |= GLOBAL_CFG_TX_STAT_EN;
- data |= GLOBAL_CFG_RX_STAT_EN;
- status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
- if (status)
- goto end;
- /* Enable transmitter, and clear it's reset. */
- status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
- if (status)
- goto end;
- data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
- data |= TX_CFG_EN; /* Enable the transmitter. */
- status = ql_write_xgmac_reg(qdev, TX_CFG, data);
- if (status)
- goto end;
- /* Enable receiver and clear it's reset. */
- status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
- if (status)
- goto end;
- data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
- data |= RX_CFG_EN; /* Enable the receiver. */
- status = ql_write_xgmac_reg(qdev, RX_CFG, data);
- if (status)
- goto end;
- /* Turn on jumbo. */
- status =
- ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
- if (status)
- goto end;
- status =
- ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
- if (status)
- goto end;
- /* Signal to the world that the port is enabled. */
- ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
- end:
- ql_sem_unlock(qdev, qdev->xg_sem_mask);
- return status;
- }
- static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
- {
- return PAGE_SIZE << qdev->lbq_buf_order;
- }
- /* Get the next large buffer. */
- static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
- {
- struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
- rx_ring->lbq_curr_idx++;
- if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
- rx_ring->lbq_curr_idx = 0;
- rx_ring->lbq_free_cnt++;
- return lbq_desc;
- }
- static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
- {
- struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr(lbq_desc, mapaddr),
- rx_ring->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
- /* If it's the last chunk of our master page then
- * we unmap it.
- */
- if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
- == ql_lbq_block_size(qdev))
- pci_unmap_page(qdev->pdev,
- lbq_desc->p.pg_chunk.map,
- ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- return lbq_desc;
- }
- /* Get the next small buffer. */
- static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
- {
- struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
- rx_ring->sbq_curr_idx++;
- if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
- rx_ring->sbq_curr_idx = 0;
- rx_ring->sbq_free_cnt++;
- return sbq_desc;
- }
- /* Update an rx ring index. */
- static void ql_update_cq(struct rx_ring *rx_ring)
- {
- rx_ring->cnsmr_idx++;
- rx_ring->curr_entry++;
- if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
- rx_ring->cnsmr_idx = 0;
- rx_ring->curr_entry = rx_ring->cq_base;
- }
- }
- static void ql_write_cq_idx(struct rx_ring *rx_ring)
- {
- ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
- }
- static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
- struct bq_desc *lbq_desc)
- {
- if (!rx_ring->pg_chunk.page) {
- u64 map;
- rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
- GFP_ATOMIC,
- qdev->lbq_buf_order);
- if (unlikely(!rx_ring->pg_chunk.page)) {
- netif_err(qdev, drv, qdev->ndev,
- "page allocation failed.\n");
- return -ENOMEM;
- }
- rx_ring->pg_chunk.offset = 0;
- map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
- 0, ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- __free_pages(rx_ring->pg_chunk.page,
- qdev->lbq_buf_order);
- rx_ring->pg_chunk.page = NULL;
- netif_err(qdev, drv, qdev->ndev,
- "PCI mapping failed.\n");
- return -ENOMEM;
- }
- rx_ring->pg_chunk.map = map;
- rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
- }
- /* Copy the current master pg_chunk info
- * to the current descriptor.
- */
- lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
- /* Adjust the master page chunk for next
- * buffer get.
- */
- rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
- if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
- rx_ring->pg_chunk.page = NULL;
- lbq_desc->p.pg_chunk.last_flag = 1;
- } else {
- rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
- get_page(rx_ring->pg_chunk.page);
- lbq_desc->p.pg_chunk.last_flag = 0;
- }
- return 0;
- }
- /* Process (refill) a large buffer queue. */
- static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
- {
- u32 clean_idx = rx_ring->lbq_clean_idx;
- u32 start_idx = clean_idx;
- struct bq_desc *lbq_desc;
- u64 map;
- int i;
- while (rx_ring->lbq_free_cnt > 32) {
- for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "lbq: try cleaning clean_idx = %d.\n",
- clean_idx);
- lbq_desc = &rx_ring->lbq[clean_idx];
- if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
- rx_ring->lbq_clean_idx = clean_idx;
- netif_err(qdev, ifup, qdev->ndev,
- "Could not get a page chunk, i=%d, clean_idx =%d .\n",
- i, clean_idx);
- return;
- }
- map = lbq_desc->p.pg_chunk.map +
- lbq_desc->p.pg_chunk.offset;
- dma_unmap_addr_set(lbq_desc, mapaddr, map);
- dma_unmap_len_set(lbq_desc, maplen,
- rx_ring->lbq_buf_size);
- *lbq_desc->addr = cpu_to_le64(map);
- pci_dma_sync_single_for_device(qdev->pdev, map,
- rx_ring->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
- clean_idx++;
- if (clean_idx == rx_ring->lbq_len)
- clean_idx = 0;
- }
- rx_ring->lbq_clean_idx = clean_idx;
- rx_ring->lbq_prod_idx += 16;
- if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
- rx_ring->lbq_prod_idx = 0;
- rx_ring->lbq_free_cnt -= 16;
- }
- if (start_idx != clean_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "lbq: updating prod idx = %d.\n",
- rx_ring->lbq_prod_idx);
- ql_write_db_reg(rx_ring->lbq_prod_idx,
- rx_ring->lbq_prod_idx_db_reg);
- }
- }
- /* Process (refill) a small buffer queue. */
- static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
- {
- u32 clean_idx = rx_ring->sbq_clean_idx;
- u32 start_idx = clean_idx;
- struct bq_desc *sbq_desc;
- u64 map;
- int i;
- while (rx_ring->sbq_free_cnt > 16) {
- for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
- sbq_desc = &rx_ring->sbq[clean_idx];
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "sbq: try cleaning clean_idx = %d.\n",
- clean_idx);
- if (sbq_desc->p.skb == NULL) {
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "sbq: getting new skb for index %d.\n",
- sbq_desc->index);
- sbq_desc->p.skb =
- netdev_alloc_skb(qdev->ndev,
- SMALL_BUFFER_SIZE);
- if (sbq_desc->p.skb == NULL) {
- rx_ring->sbq_clean_idx = clean_idx;
- return;
- }
- skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
- map = pci_map_single(qdev->pdev,
- sbq_desc->p.skb->data,
- rx_ring->sbq_buf_size,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- netif_err(qdev, ifup, qdev->ndev,
- "PCI mapping failed.\n");
- rx_ring->sbq_clean_idx = clean_idx;
- dev_kfree_skb_any(sbq_desc->p.skb);
- sbq_desc->p.skb = NULL;
- return;
- }
- dma_unmap_addr_set(sbq_desc, mapaddr, map);
- dma_unmap_len_set(sbq_desc, maplen,
- rx_ring->sbq_buf_size);
- *sbq_desc->addr = cpu_to_le64(map);
- }
- clean_idx++;
- if (clean_idx == rx_ring->sbq_len)
- clean_idx = 0;
- }
- rx_ring->sbq_clean_idx = clean_idx;
- rx_ring->sbq_prod_idx += 16;
- if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
- rx_ring->sbq_prod_idx = 0;
- rx_ring->sbq_free_cnt -= 16;
- }
- if (start_idx != clean_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "sbq: updating prod idx = %d.\n",
- rx_ring->sbq_prod_idx);
- ql_write_db_reg(rx_ring->sbq_prod_idx,
- rx_ring->sbq_prod_idx_db_reg);
- }
- }
- static void ql_update_buffer_queues(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
- {
- ql_update_sbq(qdev, rx_ring);
- ql_update_lbq(qdev, rx_ring);
- }
- /* Unmaps tx buffers. Can be called from send() if a pci mapping
- * fails at some stage, or from the interrupt when a tx completes.
- */
- static void ql_unmap_send(struct ql_adapter *qdev,
- struct tx_ring_desc *tx_ring_desc, int mapped)
- {
- int i;
- for (i = 0; i < mapped; i++) {
- if (i == 0 || (i == 7 && mapped > 7)) {
- /*
- * Unmap the skb->data area, or the
- * external sglist (AKA the Outbound
- * Address List (OAL)).
- * If its the zeroeth element, then it's
- * the skb->data area. If it's the 7th
- * element and there is more than 6 frags,
- * then its an OAL.
- */
- if (i == 7) {
- netif_printk(qdev, tx_done, KERN_DEBUG,
- qdev->ndev,
- "unmapping OAL area.\n");
- }
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_ring_desc->map[i],
- mapaddr),
- dma_unmap_len(&tx_ring_desc->map[i],
- maplen),
- PCI_DMA_TODEVICE);
- } else {
- netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
- "unmapping frag %d.\n", i);
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_ring_desc->map[i],
- mapaddr),
- dma_unmap_len(&tx_ring_desc->map[i],
- maplen), PCI_DMA_TODEVICE);
- }
- }
- }
- /* Map the buffers for this transmit. This will return
- * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
- */
- static int ql_map_send(struct ql_adapter *qdev,
- struct ob_mac_iocb_req *mac_iocb_ptr,
- struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
- {
- int len = skb_headlen(skb);
- dma_addr_t map;
- int frag_idx, err, map_idx = 0;
- struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
- int frag_cnt = skb_shinfo(skb)->nr_frags;
- if (frag_cnt) {
- netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
- "frag_cnt = %d.\n", frag_cnt);
- }
- /*
- * Map the skb buffer first.
- */
- map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
- if (err) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "PCI mapping failed with error: %d\n", err);
- return NETDEV_TX_BUSY;
- }
- tbd->len = cpu_to_le32(len);
- tbd->addr = cpu_to_le64(map);
- dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
- map_idx++;
- /*
- * This loop fills the remainder of the 8 address descriptors
- * in the IOCB. If there are more than 7 fragments, then the
- * eighth address desc will point to an external list (OAL).
- * When this happens, the remainder of the frags will be stored
- * in this list.
- */
- for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
- tbd++;
- if (frag_idx == 6 && frag_cnt > 7) {
- /* Let's tack on an sglist.
- * Our control block will now
- * look like this:
- * iocb->seg[0] = skb->data
- * iocb->seg[1] = frag[0]
- * iocb->seg[2] = frag[1]
- * iocb->seg[3] = frag[2]
- * iocb->seg[4] = frag[3]
- * iocb->seg[5] = frag[4]
- * iocb->seg[6] = frag[5]
- * iocb->seg[7] = ptr to OAL (external sglist)
- * oal->seg[0] = frag[6]
- * oal->seg[1] = frag[7]
- * oal->seg[2] = frag[8]
- * oal->seg[3] = frag[9]
- * oal->seg[4] = frag[10]
- * etc...
- */
- /* Tack on the OAL in the eighth segment of IOCB. */
- map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
- sizeof(struct oal),
- PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
- if (err) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "PCI mapping outbound address list with error: %d\n",
- err);
- goto map_error;
- }
- tbd->addr = cpu_to_le64(map);
- /*
- * The length is the number of fragments
- * that remain to be mapped times the length
- * of our sglist (OAL).
- */
- tbd->len =
- cpu_to_le32((sizeof(struct tx_buf_desc) *
- (frag_cnt - frag_idx)) | TX_DESC_C);
- dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
- map);
- dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
- sizeof(struct oal));
- tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
- map_idx++;
- }
- map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
- err = dma_mapping_error(&qdev->pdev->dev, map);
- if (err) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "PCI mapping frags failed with error: %d.\n",
- err);
- goto map_error;
- }
- tbd->addr = cpu_to_le64(map);
- tbd->len = cpu_to_le32(skb_frag_size(frag));
- dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
- skb_frag_size(frag));
- }
- /* Save the number of segments we've mapped. */
- tx_ring_desc->map_cnt = map_idx;
- /* Terminate the last segment. */
- tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
- return NETDEV_TX_OK;
- map_error:
- /*
- * If the first frag mapping failed, then i will be zero.
- * This causes the unmap of the skb->data area. Otherwise
- * we pass in the number of frags that mapped successfully
- * so they can be umapped.
- */
- ql_unmap_send(qdev, tx_ring_desc, map_idx);
- return NETDEV_TX_BUSY;
- }
- /* Categorizing receive firmware frame errors */
- static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
- struct rx_ring *rx_ring)
- {
- struct nic_stats *stats = &qdev->nic_stats;
- stats->rx_err_count++;
- rx_ring->rx_errors++;
- switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
- case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
- stats->rx_code_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
- stats->rx_oversize_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
- stats->rx_undersize_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
- stats->rx_preamble_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
- stats->rx_frame_len_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_CRC:
- stats->rx_crc_err++;
- default:
- break;
- }
- }
- /**
- * ql_update_mac_hdr_len - helper routine to update the mac header length
- * based on vlan tags if present
- */
- static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- void *page, size_t *len)
- {
- u16 *tags;
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
- return;
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
- tags = (u16 *)page;
- /* Look for stacked vlan tags in ethertype field */
- if (tags[6] == ETH_P_8021Q &&
- tags[8] == ETH_P_8021Q)
- *len += 2 * VLAN_HLEN;
- else
- *len += VLAN_HLEN;
- }
- }
- /* Process an inbound completion from an rx ring. */
- static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
- {
- struct sk_buff *skb;
- struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- struct napi_struct *napi = &rx_ring->napi;
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
- napi->dev = qdev->ndev;
- skb = napi_get_frags(napi);
- if (!skb) {
- netif_err(qdev, drv, qdev->ndev,
- "Couldn't get an skb, exiting.\n");
- rx_ring->rx_dropped++;
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
- prefetch(lbq_desc->p.pg_chunk.va);
- __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
- skb_shinfo(skb)->nr_frags++;
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += length;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- napi_gro_frags(napi);
- }
- /* Process an inbound completion from an rx ring. */
- static void ql_process_mac_rx_page(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
- {
- struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
- void *addr;
- struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- struct napi_struct *napi = &rx_ring->napi;
- size_t hlen = ETH_HLEN;
- skb = netdev_alloc_skb(ndev, length);
- if (!skb) {
- rx_ring->rx_dropped++;
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
- addr = lbq_desc->p.pg_chunk.va;
- prefetch(addr);
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- goto err_out;
- }
- /* Update the MAC header length*/
- ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
- /* The max framesize filter on this chip is set higher than
- * MTU since FCoE uses 2k frames.
- */
- if (skb->len > ndev->mtu + hlen) {
- netif_err(qdev, drv, qdev->ndev,
- "Segment too small, dropping.\n");
- rx_ring->rx_dropped++;
- goto err_out;
- }
- memcpy(skb_put(skb, hlen), addr, hlen);
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
- length);
- skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset + hlen,
- length - hlen);
- skb->len += length - hlen;
- skb->data_len += length - hlen;
- skb->truesize += length - hlen;
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += skb->len;
- skb->protocol = eth_type_trans(skb, ndev);
- skb_checksum_none_assert(skb);
- if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
- /* TCP frame. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
- /* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph =
- (struct iphdr *)((u8 *)addr + hlen);
- if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "UDP checksum done!\n");
- }
- }
- }
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(napi, skb);
- else
- netif_receive_skb(skb);
- return;
- err_out:
- dev_kfree_skb_any(skb);
- put_page(lbq_desc->p.pg_chunk.page);
- }
- /* Process an inbound completion from an rx ring. */
- static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
- {
- struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
- struct sk_buff *new_skb = NULL;
- struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
- skb = sbq_desc->p.skb;
- /* Allocate new_skb and copy */
- new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
- if (new_skb == NULL) {
- rx_ring->rx_dropped++;
- return;
- }
- skb_reserve(new_skb, NET_IP_ALIGN);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- memcpy(skb_put(new_skb, length), skb->data, length);
- pci_dma_sync_single_for_device(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- skb = new_skb;
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- dev_kfree_skb_any(skb);
- return;
- }
- /* loopback self test for ethtool */
- if (test_bit(QL_SELFTEST, &qdev->flags)) {
- ql_check_lb_frame(qdev, skb);
- dev_kfree_skb_any(skb);
- return;
- }
- /* The max framesize filter on this chip is set higher than
- * MTU since FCoE uses 2k frames.
- */
- if (skb->len > ndev->mtu + ETH_HLEN) {
- dev_kfree_skb_any(skb);
- rx_ring->rx_dropped++;
- return;
- }
- prefetch(skb->data);
- if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%s Multicast.\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
- }
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Promiscuous Packet.\n");
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += skb->len;
- skb->protocol = eth_type_trans(skb, ndev);
- skb_checksum_none_assert(skb);
- /* If rx checksum is on, and there are no
- * csum or frame errors.
- */
- if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
- /* TCP frame. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
- /* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph = (struct iphdr *) skb->data;
- if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "UDP checksum done!\n");
- }
- }
- }
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(&rx_ring->napi, skb);
- else
- netif_receive_skb(skb);
- }
- static void ql_realign_skb(struct sk_buff *skb, int len)
- {
- void *temp_addr = skb->data;
- /* Undo the skb_reserve(skb,32) we did before
- * giving to hardware, and realign data on
- * a 2-byte boundary.
- */
- skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
- skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
- skb_copy_to_linear_data(skb, temp_addr,
- (unsigned int)len);
- }
- /*
- * This function builds an skb for the given inbound
- * completion. It will be rewritten for readability in the near
- * future, but for not it works well.
- */
- static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
- {
- struct bq_desc *lbq_desc;
- struct bq_desc *sbq_desc;
- struct sk_buff *skb = NULL;
- u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
- size_t hlen = ETH_HLEN;
- /*
- * Handle the header buffer if present.
- */
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Header of %d bytes in small buffer.\n", hdr_len);
- /*
- * Headers fit nicely into a small buffer.
- */
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- skb = sbq_desc->p.skb;
- ql_realign_skb(skb, hdr_len);
- skb_put(skb, hdr_len);
- sbq_desc->p.skb = NULL;
- }
- /*
- * Handle the data buffer(s).
- */
- if (unlikely(!length)) { /* Is there data too? */
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "No Data buffer in this packet.\n");
- return skb;
- }
- if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Headers in small, data of %d bytes in small, combine them.\n",
- length);
- /*
- * Data is less than small buffer size so it's
- * stuffed in a small buffer.
- * For this case we append the data
- * from the "data" small buffer to the "header" small
- * buffer.
- */
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr
- (sbq_desc, mapaddr),
- dma_unmap_len
- (sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- memcpy(skb_put(skb, length),
- sbq_desc->p.skb->data, length);
- pci_dma_sync_single_for_device(qdev->pdev,
- dma_unmap_addr
- (sbq_desc,
- mapaddr),…