PageRenderTime 84ms CodeModel.GetById 16ms app.highlight 58ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/qlcnic/qlcnic_hw.c

https://bitbucket.org/ndreys/linux-sunxi
C | 1723 lines | 1405 code | 256 blank | 62 comment | 170 complexity | d1314bef6475bd30924a129932ab4567 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
   1/*
   2 * QLogic qlcnic NIC Driver
   3 * Copyright (c)  2009-2010 QLogic Corporation
   4 *
   5 * See LICENSE.qlcnic for copyright and licensing details.
   6 */
   7
   8#include "qlcnic.h"
   9
  10#include <linux/slab.h>
  11#include <net/ip.h>
  12#include <linux/bitops.h>
  13
  14#define MASK(n) ((1ULL<<(n))-1)
  15#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
  16
  17#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
  18
  19#define CRB_BLK(off)	((off >> 20) & 0x3f)
  20#define CRB_SUBBLK(off)	((off >> 16) & 0xf)
  21#define CRB_WINDOW_2M	(0x130060)
  22#define CRB_HI(off)	((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
  23#define CRB_INDIRECT_2M	(0x1e0000UL)
  24
  25
  26#ifndef readq
  27static inline u64 readq(void __iomem *addr)
  28{
  29	return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
  30}
  31#endif
  32
  33#ifndef writeq
  34static inline void writeq(u64 val, void __iomem *addr)
  35{
  36	writel(((u32) (val)), (addr));
  37	writel(((u32) (val >> 32)), (addr + 4));
  38}
  39#endif
  40
  41static const struct crb_128M_2M_block_map
  42crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
  43    {{{0, 0,         0,         0} } },		/* 0: PCI */
  44    {{{1, 0x0100000, 0x0102000, 0x120000},	/* 1: PCIE */
  45	  {1, 0x0110000, 0x0120000, 0x130000},
  46	  {1, 0x0120000, 0x0122000, 0x124000},
  47	  {1, 0x0130000, 0x0132000, 0x126000},
  48	  {1, 0x0140000, 0x0142000, 0x128000},
  49	  {1, 0x0150000, 0x0152000, 0x12a000},
  50	  {1, 0x0160000, 0x0170000, 0x110000},
  51	  {1, 0x0170000, 0x0172000, 0x12e000},
  52	  {0, 0x0000000, 0x0000000, 0x000000},
  53	  {0, 0x0000000, 0x0000000, 0x000000},
  54	  {0, 0x0000000, 0x0000000, 0x000000},
  55	  {0, 0x0000000, 0x0000000, 0x000000},
  56	  {0, 0x0000000, 0x0000000, 0x000000},
  57	  {0, 0x0000000, 0x0000000, 0x000000},
  58	  {1, 0x01e0000, 0x01e0800, 0x122000},
  59	  {0, 0x0000000, 0x0000000, 0x000000} } },
  60	{{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
  61    {{{0, 0,         0,         0} } },	    /* 3: */
  62    {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
  63    {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE   */
  64    {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU   */
  65    {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM    */
  66    {{{1, 0x0800000, 0x0802000, 0x170000},  /* 8: SQM0  */
  67      {0, 0x0000000, 0x0000000, 0x000000},
  68      {0, 0x0000000, 0x0000000, 0x000000},
  69      {0, 0x0000000, 0x0000000, 0x000000},
  70      {0, 0x0000000, 0x0000000, 0x000000},
  71      {0, 0x0000000, 0x0000000, 0x000000},
  72      {0, 0x0000000, 0x0000000, 0x000000},
  73      {0, 0x0000000, 0x0000000, 0x000000},
  74      {0, 0x0000000, 0x0000000, 0x000000},
  75      {0, 0x0000000, 0x0000000, 0x000000},
  76      {0, 0x0000000, 0x0000000, 0x000000},
  77      {0, 0x0000000, 0x0000000, 0x000000},
  78      {0, 0x0000000, 0x0000000, 0x000000},
  79      {0, 0x0000000, 0x0000000, 0x000000},
  80      {0, 0x0000000, 0x0000000, 0x000000},
  81      {1, 0x08f0000, 0x08f2000, 0x172000} } },
  82    {{{1, 0x0900000, 0x0902000, 0x174000},	/* 9: SQM1*/
  83      {0, 0x0000000, 0x0000000, 0x000000},
  84      {0, 0x0000000, 0x0000000, 0x000000},
  85      {0, 0x0000000, 0x0000000, 0x000000},
  86      {0, 0x0000000, 0x0000000, 0x000000},
  87      {0, 0x0000000, 0x0000000, 0x000000},
  88      {0, 0x0000000, 0x0000000, 0x000000},
  89      {0, 0x0000000, 0x0000000, 0x000000},
  90      {0, 0x0000000, 0x0000000, 0x000000},
  91      {0, 0x0000000, 0x0000000, 0x000000},
  92      {0, 0x0000000, 0x0000000, 0x000000},
  93      {0, 0x0000000, 0x0000000, 0x000000},
  94      {0, 0x0000000, 0x0000000, 0x000000},
  95      {0, 0x0000000, 0x0000000, 0x000000},
  96      {0, 0x0000000, 0x0000000, 0x000000},
  97      {1, 0x09f0000, 0x09f2000, 0x176000} } },
  98    {{{0, 0x0a00000, 0x0a02000, 0x178000},	/* 10: SQM2*/
  99      {0, 0x0000000, 0x0000000, 0x000000},
 100      {0, 0x0000000, 0x0000000, 0x000000},
 101      {0, 0x0000000, 0x0000000, 0x000000},
 102      {0, 0x0000000, 0x0000000, 0x000000},
 103      {0, 0x0000000, 0x0000000, 0x000000},
 104      {0, 0x0000000, 0x0000000, 0x000000},
 105      {0, 0x0000000, 0x0000000, 0x000000},
 106      {0, 0x0000000, 0x0000000, 0x000000},
 107      {0, 0x0000000, 0x0000000, 0x000000},
 108      {0, 0x0000000, 0x0000000, 0x000000},
 109      {0, 0x0000000, 0x0000000, 0x000000},
 110      {0, 0x0000000, 0x0000000, 0x000000},
 111      {0, 0x0000000, 0x0000000, 0x000000},
 112      {0, 0x0000000, 0x0000000, 0x000000},
 113      {1, 0x0af0000, 0x0af2000, 0x17a000} } },
 114    {{{0, 0x0b00000, 0x0b02000, 0x17c000},	/* 11: SQM3*/
 115      {0, 0x0000000, 0x0000000, 0x000000},
 116      {0, 0x0000000, 0x0000000, 0x000000},
 117      {0, 0x0000000, 0x0000000, 0x000000},
 118      {0, 0x0000000, 0x0000000, 0x000000},
 119      {0, 0x0000000, 0x0000000, 0x000000},
 120      {0, 0x0000000, 0x0000000, 0x000000},
 121      {0, 0x0000000, 0x0000000, 0x000000},
 122      {0, 0x0000000, 0x0000000, 0x000000},
 123      {0, 0x0000000, 0x0000000, 0x000000},
 124      {0, 0x0000000, 0x0000000, 0x000000},
 125      {0, 0x0000000, 0x0000000, 0x000000},
 126      {0, 0x0000000, 0x0000000, 0x000000},
 127      {0, 0x0000000, 0x0000000, 0x000000},
 128      {0, 0x0000000, 0x0000000, 0x000000},
 129      {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
 130	{{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
 131	{{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
 132	{{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
 133	{{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
 134	{{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
 135	{{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
 136	{{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
 137	{{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
 138	{{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
 139	{{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
 140	{{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
 141	{{{0, 0,         0,         0} } },	/* 23: */
 142	{{{0, 0,         0,         0} } },	/* 24: */
 143	{{{0, 0,         0,         0} } },	/* 25: */
 144	{{{0, 0,         0,         0} } },	/* 26: */
 145	{{{0, 0,         0,         0} } },	/* 27: */
 146	{{{0, 0,         0,         0} } },	/* 28: */
 147	{{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
 148    {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
 149    {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
 150	{{{0} } },				/* 32: PCI */
 151	{{{1, 0x2100000, 0x2102000, 0x120000},	/* 33: PCIE */
 152	  {1, 0x2110000, 0x2120000, 0x130000},
 153	  {1, 0x2120000, 0x2122000, 0x124000},
 154	  {1, 0x2130000, 0x2132000, 0x126000},
 155	  {1, 0x2140000, 0x2142000, 0x128000},
 156	  {1, 0x2150000, 0x2152000, 0x12a000},
 157	  {1, 0x2160000, 0x2170000, 0x110000},
 158	  {1, 0x2170000, 0x2172000, 0x12e000},
 159	  {0, 0x0000000, 0x0000000, 0x000000},
 160	  {0, 0x0000000, 0x0000000, 0x000000},
 161	  {0, 0x0000000, 0x0000000, 0x000000},
 162	  {0, 0x0000000, 0x0000000, 0x000000},
 163	  {0, 0x0000000, 0x0000000, 0x000000},
 164	  {0, 0x0000000, 0x0000000, 0x000000},
 165	  {0, 0x0000000, 0x0000000, 0x000000},
 166	  {0, 0x0000000, 0x0000000, 0x000000} } },
 167	{{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
 168	{{{0} } },				/* 35: */
 169	{{{0} } },				/* 36: */
 170	{{{0} } },				/* 37: */
 171	{{{0} } },				/* 38: */
 172	{{{0} } },				/* 39: */
 173	{{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
 174	{{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
 175	{{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
 176	{{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
 177	{{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
 178	{{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
 179	{{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
 180	{{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
 181	{{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
 182	{{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
 183	{{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
 184	{{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
 185	{{{0} } },				/* 52: */
 186	{{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
 187	{{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
 188	{{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
 189	{{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
 190	{{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
 191	{{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
 192	{{{0} } },				/* 59: I2C0 */
 193	{{{0} } },				/* 60: I2C1 */
 194	{{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
 195	{{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
 196	{{{1, 0x3f00000, 0x3f01000, 0x168000} } }	/* 63: P2NR0 */
 197};
 198
 199/*
 200 * top 12 bits of crb internal address (hub, agent)
 201 */
 202static const unsigned crb_hub_agt[64] = {
 203	0,
 204	QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
 205	QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
 206	QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
 207	0,
 208	QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
 209	QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
 210	QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
 211	QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
 212	QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
 213	QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
 214	QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
 215	QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
 216	QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
 217	QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
 218	QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
 219	QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
 220	QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
 221	QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
 222	QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
 223	QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
 224	QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
 225	QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
 226	QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
 227	QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
 228	QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
 229	QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
 230	0,
 231	QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
 232	QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
 233	0,
 234	QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
 235	0,
 236	QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
 237	QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
 238	0,
 239	0,
 240	0,
 241	0,
 242	0,
 243	QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
 244	0,
 245	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
 246	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
 247	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
 248	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
 249	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
 250	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
 251	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
 252	QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
 253	QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
 254	QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
 255	0,
 256	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
 257	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
 258	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
 259	QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
 260	0,
 261	QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
 262	QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
 263	QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
 264	0,
 265	QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
 266	0,
 267};
 268
 269/*  PCI Windowing for DDR regions.  */
 270
 271#define QLCNIC_PCIE_SEM_TIMEOUT	10000
 272
 273int
 274qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
 275{
 276	int done = 0, timeout = 0;
 277
 278	while (!done) {
 279		done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
 280		if (done == 1)
 281			break;
 282		if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
 283			dev_err(&adapter->pdev->dev,
 284				"Failed to acquire sem=%d lock; holdby=%d\n",
 285				sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
 286			return -EIO;
 287		}
 288		msleep(1);
 289	}
 290
 291	if (id_reg)
 292		QLCWR32(adapter, id_reg, adapter->portnum);
 293
 294	return 0;
 295}
 296
 297void
 298qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
 299{
 300	QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
 301}
 302
 303static int
 304qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
 305		struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
 306{
 307	u32 i, producer, consumer;
 308	struct qlcnic_cmd_buffer *pbuf;
 309	struct cmd_desc_type0 *cmd_desc;
 310	struct qlcnic_host_tx_ring *tx_ring;
 311
 312	i = 0;
 313
 314	if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
 315		return -EIO;
 316
 317	tx_ring = adapter->tx_ring;
 318	__netif_tx_lock_bh(tx_ring->txq);
 319
 320	producer = tx_ring->producer;
 321	consumer = tx_ring->sw_consumer;
 322
 323	if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
 324		netif_tx_stop_queue(tx_ring->txq);
 325		smp_mb();
 326		if (qlcnic_tx_avail(tx_ring) > nr_desc) {
 327			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
 328				netif_tx_wake_queue(tx_ring->txq);
 329		} else {
 330			adapter->stats.xmit_off++;
 331			__netif_tx_unlock_bh(tx_ring->txq);
 332			return -EBUSY;
 333		}
 334	}
 335
 336	do {
 337		cmd_desc = &cmd_desc_arr[i];
 338
 339		pbuf = &tx_ring->cmd_buf_arr[producer];
 340		pbuf->skb = NULL;
 341		pbuf->frag_count = 0;
 342
 343		memcpy(&tx_ring->desc_head[producer],
 344			&cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
 345
 346		producer = get_next_index(producer, tx_ring->num_desc);
 347		i++;
 348
 349	} while (i != nr_desc);
 350
 351	tx_ring->producer = producer;
 352
 353	qlcnic_update_cmd_producer(adapter, tx_ring);
 354
 355	__netif_tx_unlock_bh(tx_ring->txq);
 356
 357	return 0;
 358}
 359
 360static int
 361qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
 362				__le16 vlan_id, unsigned op)
 363{
 364	struct qlcnic_nic_req req;
 365	struct qlcnic_mac_req *mac_req;
 366	struct qlcnic_vlan_req *vlan_req;
 367	u64 word;
 368
 369	memset(&req, 0, sizeof(struct qlcnic_nic_req));
 370	req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
 371
 372	word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
 373	req.req_hdr = cpu_to_le64(word);
 374
 375	mac_req = (struct qlcnic_mac_req *)&req.words[0];
 376	mac_req->op = op;
 377	memcpy(mac_req->mac_addr, addr, 6);
 378
 379	vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
 380	vlan_req->vlan_id = vlan_id;
 381
 382	return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 383}
 384
 385static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
 386{
 387	struct list_head *head;
 388	struct qlcnic_mac_list_s *cur;
 389
 390	/* look up if already exists */
 391	list_for_each(head, &adapter->mac_list) {
 392		cur = list_entry(head, struct qlcnic_mac_list_s, list);
 393		if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
 394			return 0;
 395	}
 396
 397	cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
 398	if (cur == NULL) {
 399		dev_err(&adapter->netdev->dev,
 400			"failed to add mac address filter\n");
 401		return -ENOMEM;
 402	}
 403	memcpy(cur->mac_addr, addr, ETH_ALEN);
 404
 405	if (qlcnic_sre_macaddr_change(adapter,
 406				cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
 407		kfree(cur);
 408		return -EIO;
 409	}
 410
 411	list_add_tail(&cur->list, &adapter->mac_list);
 412	return 0;
 413}
 414
 415void qlcnic_set_multi(struct net_device *netdev)
 416{
 417	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 418	struct netdev_hw_addr *ha;
 419	static const u8 bcast_addr[ETH_ALEN] = {
 420		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
 421	};
 422	u32 mode = VPORT_MISS_MODE_DROP;
 423
 424	if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
 425		return;
 426
 427	qlcnic_nic_add_mac(adapter, adapter->mac_addr);
 428	qlcnic_nic_add_mac(adapter, bcast_addr);
 429
 430	if (netdev->flags & IFF_PROMISC) {
 431		if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
 432			mode = VPORT_MISS_MODE_ACCEPT_ALL;
 433		goto send_fw_cmd;
 434	}
 435
 436	if ((netdev->flags & IFF_ALLMULTI) ||
 437	    (netdev_mc_count(netdev) > adapter->max_mc_count)) {
 438		mode = VPORT_MISS_MODE_ACCEPT_MULTI;
 439		goto send_fw_cmd;
 440	}
 441
 442	if (!netdev_mc_empty(netdev)) {
 443		netdev_for_each_mc_addr(ha, netdev) {
 444			qlcnic_nic_add_mac(adapter, ha->addr);
 445		}
 446	}
 447
 448send_fw_cmd:
 449	qlcnic_nic_set_promisc(adapter, mode);
 450}
 451
 452int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
 453{
 454	struct qlcnic_nic_req req;
 455	u64 word;
 456
 457	memset(&req, 0, sizeof(struct qlcnic_nic_req));
 458
 459	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
 460
 461	word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
 462			((u64)adapter->portnum << 16);
 463	req.req_hdr = cpu_to_le64(word);
 464
 465	req.words[0] = cpu_to_le64(mode);
 466
 467	return qlcnic_send_cmd_descs(adapter,
 468				(struct cmd_desc_type0 *)&req, 1);
 469}
 470
 471void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
 472{
 473	struct qlcnic_mac_list_s *cur;
 474	struct list_head *head = &adapter->mac_list;
 475
 476	while (!list_empty(head)) {
 477		cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
 478		qlcnic_sre_macaddr_change(adapter,
 479				cur->mac_addr, 0, QLCNIC_MAC_DEL);
 480		list_del(&cur->list);
 481		kfree(cur);
 482	}
 483}
 484
 485void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
 486{
 487	struct qlcnic_filter *tmp_fil;
 488	struct hlist_node *tmp_hnode, *n;
 489	struct hlist_head *head;
 490	int i;
 491
 492	for (i = 0; i < adapter->fhash.fmax; i++) {
 493		head = &(adapter->fhash.fhead[i]);
 494
 495		hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
 496		{
 497			if (jiffies >
 498				(QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
 499				qlcnic_sre_macaddr_change(adapter,
 500					tmp_fil->faddr, tmp_fil->vlan_id,
 501					tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
 502					QLCNIC_MAC_DEL);
 503				spin_lock_bh(&adapter->mac_learn_lock);
 504				adapter->fhash.fnum--;
 505				hlist_del(&tmp_fil->fnode);
 506				spin_unlock_bh(&adapter->mac_learn_lock);
 507				kfree(tmp_fil);
 508			}
 509		}
 510	}
 511}
 512
 513void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
 514{
 515	struct qlcnic_filter *tmp_fil;
 516	struct hlist_node *tmp_hnode, *n;
 517	struct hlist_head *head;
 518	int i;
 519
 520	for (i = 0; i < adapter->fhash.fmax; i++) {
 521		head = &(adapter->fhash.fhead[i]);
 522
 523		hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
 524			qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
 525				tmp_fil->vlan_id, tmp_fil->vlan_id ?
 526				QLCNIC_MAC_VLAN_DEL :  QLCNIC_MAC_DEL);
 527			spin_lock_bh(&adapter->mac_learn_lock);
 528			adapter->fhash.fnum--;
 529			hlist_del(&tmp_fil->fnode);
 530			spin_unlock_bh(&adapter->mac_learn_lock);
 531			kfree(tmp_fil);
 532		}
 533	}
 534}
 535
 536/*
 537 * Send the interrupt coalescing parameter set by ethtool to the card.
 538 */
 539int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
 540{
 541	struct qlcnic_nic_req req;
 542	int rv;
 543
 544	memset(&req, 0, sizeof(struct qlcnic_nic_req));
 545
 546	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
 547
 548	req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
 549		((u64) adapter->portnum << 16));
 550
 551	req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
 552	req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
 553			((u64) adapter->ahw->coal.rx_time_us) << 16);
 554	req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
 555			((u64) adapter->ahw->coal.type) << 32 |
 556			((u64) adapter->ahw->coal.sts_ring_mask) << 40);
 557	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 558	if (rv != 0)
 559		dev_err(&adapter->netdev->dev,
 560			"Could not send interrupt coalescing parameters\n");
 561	return rv;
 562}
 563
 564int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
 565{
 566	struct qlcnic_nic_req req;
 567	u64 word;
 568	int rv;
 569
 570	if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
 571		return 0;
 572
 573	memset(&req, 0, sizeof(struct qlcnic_nic_req));
 574
 575	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
 576
 577	word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
 578	req.req_hdr = cpu_to_le64(word);
 579
 580	req.words[0] = cpu_to_le64(enable);
 581
 582	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 583	if (rv != 0)
 584		dev_err(&adapter->netdev->dev,
 585			"Could not send configure hw lro request\n");
 586
 587	return rv;
 588}
 589
 590int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
 591{
 592	struct qlcnic_nic_req req;
 593	u64 word;
 594	int rv;
 595
 596	if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
 597		return 0;
 598
 599	memset(&req, 0, sizeof(struct qlcnic_nic_req));
 600
 601	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
 602
 603	word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
 604		((u64)adapter->portnum << 16);
 605	req.req_hdr = cpu_to_le64(word);
 606
 607	req.words[0] = cpu_to_le64(enable);
 608
 609	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 610	if (rv != 0)
 611		dev_err(&adapter->netdev->dev,
 612			"Could not send configure bridge mode request\n");
 613
 614	adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
 615
 616	return rv;
 617}
 618
 619
 620#define RSS_HASHTYPE_IP_TCP	0x3
 621
 622int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
 623{
 624	struct qlcnic_nic_req req;
 625	u64 word;
 626	int i, rv;
 627
 628	static const u64 key[] = {
 629		0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
 630		0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
 631		0x255b0ec26d5a56daULL
 632	};
 633
 634	memset(&req, 0, sizeof(struct qlcnic_nic_req));
 635	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
 636
 637	word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
 638	req.req_hdr = cpu_to_le64(word);
 639
 640	/*
 641	 * RSS request:
 642	 * bits 3-0: hash_method
 643	 *      5-4: hash_type_ipv4
 644	 *	7-6: hash_type_ipv6
 645	 *	  8: enable
 646	 *        9: use indirection table
 647	 *    47-10: reserved
 648	 *    63-48: indirection table mask
 649	 */
 650	word =  ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
 651		((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
 652		((u64)(enable & 0x1) << 8) |
 653		((0x7ULL) << 48);
 654	req.words[0] = cpu_to_le64(word);
 655	for (i = 0; i < 5; i++)
 656		req.words[i+1] = cpu_to_le64(key[i]);
 657
 658	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 659	if (rv != 0)
 660		dev_err(&adapter->netdev->dev, "could not configure RSS\n");
 661
 662	return rv;
 663}
 664
 665int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
 666{
 667	struct qlcnic_nic_req req;
 668	struct qlcnic_ipaddr *ipa;
 669	u64 word;
 670	int rv;
 671
 672	memset(&req, 0, sizeof(struct qlcnic_nic_req));
 673	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
 674
 675	word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
 676	req.req_hdr = cpu_to_le64(word);
 677
 678	req.words[0] = cpu_to_le64(cmd);
 679	ipa = (struct qlcnic_ipaddr *)&req.words[1];
 680	ipa->ipv4 = ip;
 681
 682	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 683	if (rv != 0)
 684		dev_err(&adapter->netdev->dev,
 685				"could not notify %s IP 0x%x reuqest\n",
 686				(cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
 687
 688	return rv;
 689}
 690
 691int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
 692{
 693	struct qlcnic_nic_req req;
 694	u64 word;
 695	int rv;
 696
 697	memset(&req, 0, sizeof(struct qlcnic_nic_req));
 698	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
 699
 700	word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
 701	req.req_hdr = cpu_to_le64(word);
 702	req.words[0] = cpu_to_le64(enable | (enable << 8));
 703
 704	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 705	if (rv != 0)
 706		dev_err(&adapter->netdev->dev,
 707				"could not configure link notification\n");
 708
 709	return rv;
 710}
 711
 712int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
 713{
 714	struct qlcnic_nic_req req;
 715	u64 word;
 716	int rv;
 717
 718	if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
 719		return 0;
 720
 721	memset(&req, 0, sizeof(struct qlcnic_nic_req));
 722	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
 723
 724	word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
 725		((u64)adapter->portnum << 16) |
 726		((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
 727
 728	req.req_hdr = cpu_to_le64(word);
 729
 730	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 731	if (rv != 0)
 732		dev_err(&adapter->netdev->dev,
 733				 "could not cleanup lro flows\n");
 734
 735	return rv;
 736}
 737
 738/*
 739 * qlcnic_change_mtu - Change the Maximum Transfer Unit
 740 * @returns 0 on success, negative on failure
 741 */
 742
 743int qlcnic_change_mtu(struct net_device *netdev, int mtu)
 744{
 745	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 746	int rc = 0;
 747
 748	if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
 749		dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
 750			" not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
 751		return -EINVAL;
 752	}
 753
 754	rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
 755
 756	if (!rc)
 757		netdev->mtu = mtu;
 758
 759	return rc;
 760}
 761
 762
 763u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
 764{
 765	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 766
 767	if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
 768		u32 changed = features ^ netdev->features;
 769		features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
 770	}
 771
 772	if (!(features & NETIF_F_RXCSUM))
 773		features &= ~NETIF_F_LRO;
 774
 775	return features;
 776}
 777
 778
 779int qlcnic_set_features(struct net_device *netdev, u32 features)
 780{
 781	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 782	u32 changed = netdev->features ^ features;
 783	int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
 784
 785	if (!(changed & NETIF_F_LRO))
 786		return 0;
 787
 788	netdev->features = features ^ NETIF_F_LRO;
 789
 790	if (qlcnic_config_hw_lro(adapter, hw_lro))
 791		return -EIO;
 792
 793	if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
 794		return -EIO;
 795
 796	return 0;
 797}
 798
 799/*
 800 * Changes the CRB window to the specified window.
 801 */
 802 /* Returns < 0 if off is not valid,
 803 *	 1 if window access is needed. 'off' is set to offset from
 804 *	   CRB space in 128M pci map
 805 *	 0 if no window access is needed. 'off' is set to 2M addr
 806 * In: 'off' is offset from base in 128M pci map
 807 */
 808static int
 809qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
 810		ulong off, void __iomem **addr)
 811{
 812	const struct crb_128M_2M_sub_block_map *m;
 813
 814	if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
 815		return -EINVAL;
 816
 817	off -= QLCNIC_PCI_CRBSPACE;
 818
 819	/*
 820	 * Try direct map
 821	 */
 822	m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
 823
 824	if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
 825		*addr = adapter->ahw->pci_base0 + m->start_2M +
 826			(off - m->start_128M);
 827		return 0;
 828	}
 829
 830	/*
 831	 * Not in direct map, use crb window
 832	 */
 833	*addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
 834	return 1;
 835}
 836
 837/*
 838 * In: 'off' is offset from CRB space in 128M pci map
 839 * Out: 'off' is 2M pci map addr
 840 * side effect: lock crb window
 841 */
 842static int
 843qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
 844{
 845	u32 window;
 846	void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
 847
 848	off -= QLCNIC_PCI_CRBSPACE;
 849
 850	window = CRB_HI(off);
 851	if (window == 0) {
 852		dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
 853		return -EIO;
 854	}
 855
 856	writel(window, addr);
 857	if (readl(addr) != window) {
 858		if (printk_ratelimit())
 859			dev_warn(&adapter->pdev->dev,
 860				"failed to set CRB window to %d off 0x%lx\n",
 861				window, off);
 862		return -EIO;
 863	}
 864	return 0;
 865}
 866
 867int
 868qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
 869{
 870	unsigned long flags;
 871	int rv;
 872	void __iomem *addr = NULL;
 873
 874	rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
 875
 876	if (rv == 0) {
 877		writel(data, addr);
 878		return 0;
 879	}
 880
 881	if (rv > 0) {
 882		/* indirect access */
 883		write_lock_irqsave(&adapter->ahw->crb_lock, flags);
 884		crb_win_lock(adapter);
 885		rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
 886		if (!rv)
 887			writel(data, addr);
 888		crb_win_unlock(adapter);
 889		write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
 890		return rv;
 891	}
 892
 893	dev_err(&adapter->pdev->dev,
 894			"%s: invalid offset: 0x%016lx\n", __func__, off);
 895	dump_stack();
 896	return -EIO;
 897}
 898
 899u32
 900qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
 901{
 902	unsigned long flags;
 903	int rv;
 904	u32 data = -1;
 905	void __iomem *addr = NULL;
 906
 907	rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
 908
 909	if (rv == 0)
 910		return readl(addr);
 911
 912	if (rv > 0) {
 913		/* indirect access */
 914		write_lock_irqsave(&adapter->ahw->crb_lock, flags);
 915		crb_win_lock(adapter);
 916		if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
 917			data = readl(addr);
 918		crb_win_unlock(adapter);
 919		write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
 920		return data;
 921	}
 922
 923	dev_err(&adapter->pdev->dev,
 924			"%s: invalid offset: 0x%016lx\n", __func__, off);
 925	dump_stack();
 926	return -1;
 927}
 928
 929
 930void __iomem *
 931qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
 932{
 933	void __iomem *addr = NULL;
 934
 935	WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
 936
 937	return addr;
 938}
 939
 940
 941static int
 942qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
 943		u64 addr, u32 *start)
 944{
 945	u32 window;
 946
 947	window = OCM_WIN_P3P(addr);
 948
 949	writel(window, adapter->ahw->ocm_win_crb);
 950	/* read back to flush */
 951	readl(adapter->ahw->ocm_win_crb);
 952
 953	*start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
 954	return 0;
 955}
 956
 957static int
 958qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
 959		u64 *data, int op)
 960{
 961	void __iomem *addr;
 962	int ret;
 963	u32 start;
 964
 965	mutex_lock(&adapter->ahw->mem_lock);
 966
 967	ret = qlcnic_pci_set_window_2M(adapter, off, &start);
 968	if (ret != 0)
 969		goto unlock;
 970
 971	addr = adapter->ahw->pci_base0 + start;
 972
 973	if (op == 0)	/* read */
 974		*data = readq(addr);
 975	else		/* write */
 976		writeq(*data, addr);
 977
 978unlock:
 979	mutex_unlock(&adapter->ahw->mem_lock);
 980
 981	return ret;
 982}
 983
 984void
 985qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
 986{
 987	void __iomem *addr = adapter->ahw->pci_base0 +
 988		QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
 989
 990	mutex_lock(&adapter->ahw->mem_lock);
 991	*data = readq(addr);
 992	mutex_unlock(&adapter->ahw->mem_lock);
 993}
 994
 995void
 996qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
 997{
 998	void __iomem *addr = adapter->ahw->pci_base0 +
 999		QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
1000
1001	mutex_lock(&adapter->ahw->mem_lock);
1002	writeq(data, addr);
1003	mutex_unlock(&adapter->ahw->mem_lock);
1004}
1005
1006#define MAX_CTL_CHECK   1000
1007
1008int
1009qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
1010		u64 off, u64 data)
1011{
1012	int i, j, ret;
1013	u32 temp, off8;
1014	void __iomem *mem_crb;
1015
1016	/* Only 64-bit aligned access */
1017	if (off & 7)
1018		return -EIO;
1019
1020	/* P3 onward, test agent base for MIU and SIU is same */
1021	if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1022				QLCNIC_ADDR_QDR_NET_MAX)) {
1023		mem_crb = qlcnic_get_ioaddr(adapter,
1024				QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1025		goto correct;
1026	}
1027
1028	if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1029		mem_crb = qlcnic_get_ioaddr(adapter,
1030				QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1031		goto correct;
1032	}
1033
1034	if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
1035		return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
1036
1037	return -EIO;
1038
1039correct:
1040	off8 = off & ~0xf;
1041
1042	mutex_lock(&adapter->ahw->mem_lock);
1043
1044	writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1045	writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1046
1047	i = 0;
1048	writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1049	writel((TA_CTL_START | TA_CTL_ENABLE),
1050			(mem_crb + TEST_AGT_CTRL));
1051
1052	for (j = 0; j < MAX_CTL_CHECK; j++) {
1053		temp = readl(mem_crb + TEST_AGT_CTRL);
1054		if ((temp & TA_CTL_BUSY) == 0)
1055			break;
1056	}
1057
1058	if (j >= MAX_CTL_CHECK) {
1059		ret = -EIO;
1060		goto done;
1061	}
1062
1063	i = (off & 0xf) ? 0 : 2;
1064	writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1065			mem_crb + MIU_TEST_AGT_WRDATA(i));
1066	writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1067			mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1068	i = (off & 0xf) ? 2 : 0;
1069
1070	writel(data & 0xffffffff,
1071			mem_crb + MIU_TEST_AGT_WRDATA(i));
1072	writel((data >> 32) & 0xffffffff,
1073			mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1074
1075	writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1076	writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1077			(mem_crb + TEST_AGT_CTRL));
1078
1079	for (j = 0; j < MAX_CTL_CHECK; j++) {
1080		temp = readl(mem_crb + TEST_AGT_CTRL);
1081		if ((temp & TA_CTL_BUSY) == 0)
1082			break;
1083	}
1084
1085	if (j >= MAX_CTL_CHECK) {
1086		if (printk_ratelimit())
1087			dev_err(&adapter->pdev->dev,
1088					"failed to write through agent\n");
1089		ret = -EIO;
1090	} else
1091		ret = 0;
1092
1093done:
1094	mutex_unlock(&adapter->ahw->mem_lock);
1095
1096	return ret;
1097}
1098
1099int
1100qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1101		u64 off, u64 *data)
1102{
1103	int j, ret;
1104	u32 temp, off8;
1105	u64 val;
1106	void __iomem *mem_crb;
1107
1108	/* Only 64-bit aligned access */
1109	if (off & 7)
1110		return -EIO;
1111
1112	/* P3 onward, test agent base for MIU and SIU is same */
1113	if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1114				QLCNIC_ADDR_QDR_NET_MAX)) {
1115		mem_crb = qlcnic_get_ioaddr(adapter,
1116				QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1117		goto correct;
1118	}
1119
1120	if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1121		mem_crb = qlcnic_get_ioaddr(adapter,
1122				QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1123		goto correct;
1124	}
1125
1126	if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
1127		return qlcnic_pci_mem_access_direct(adapter,
1128				off, data, 0);
1129	}
1130
1131	return -EIO;
1132
1133correct:
1134	off8 = off & ~0xf;
1135
1136	mutex_lock(&adapter->ahw->mem_lock);
1137
1138	writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1139	writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1140	writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1141	writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1142
1143	for (j = 0; j < MAX_CTL_CHECK; j++) {
1144		temp = readl(mem_crb + TEST_AGT_CTRL);
1145		if ((temp & TA_CTL_BUSY) == 0)
1146			break;
1147	}
1148
1149	if (j >= MAX_CTL_CHECK) {
1150		if (printk_ratelimit())
1151			dev_err(&adapter->pdev->dev,
1152					"failed to read through agent\n");
1153		ret = -EIO;
1154	} else {
1155		off8 = MIU_TEST_AGT_RDDATA_LO;
1156		if (off & 0xf)
1157			off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1158
1159		temp = readl(mem_crb + off8 + 4);
1160		val = (u64)temp << 32;
1161		val |= readl(mem_crb + off8);
1162		*data = val;
1163		ret = 0;
1164	}
1165
1166	mutex_unlock(&adapter->ahw->mem_lock);
1167
1168	return ret;
1169}
1170
1171int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1172{
1173	int offset, board_type, magic;
1174	struct pci_dev *pdev = adapter->pdev;
1175
1176	offset = QLCNIC_FW_MAGIC_OFFSET;
1177	if (qlcnic_rom_fast_read(adapter, offset, &magic))
1178		return -EIO;
1179
1180	if (magic != QLCNIC_BDINFO_MAGIC) {
1181		dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1182			magic);
1183		return -EIO;
1184	}
1185
1186	offset = QLCNIC_BRDTYPE_OFFSET;
1187	if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1188		return -EIO;
1189
1190	adapter->ahw->board_type = board_type;
1191
1192	if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
1193		u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
1194		if ((gpio & 0x8000) == 0)
1195			board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
1196	}
1197
1198	switch (board_type) {
1199	case QLCNIC_BRDTYPE_P3P_HMEZ:
1200	case QLCNIC_BRDTYPE_P3P_XG_LOM:
1201	case QLCNIC_BRDTYPE_P3P_10G_CX4:
1202	case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
1203	case QLCNIC_BRDTYPE_P3P_IMEZ:
1204	case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
1205	case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
1206	case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
1207	case QLCNIC_BRDTYPE_P3P_10G_XFP:
1208	case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
1209		adapter->ahw->port_type = QLCNIC_XGBE;
1210		break;
1211	case QLCNIC_BRDTYPE_P3P_REF_QG:
1212	case QLCNIC_BRDTYPE_P3P_4_GB:
1213	case QLCNIC_BRDTYPE_P3P_4_GB_MM:
1214		adapter->ahw->port_type = QLCNIC_GBE;
1215		break;
1216	case QLCNIC_BRDTYPE_P3P_10G_TP:
1217		adapter->ahw->port_type = (adapter->portnum < 2) ?
1218			QLCNIC_XGBE : QLCNIC_GBE;
1219		break;
1220	default:
1221		dev_err(&pdev->dev, "unknown board type %x\n", board_type);
1222		adapter->ahw->port_type = QLCNIC_XGBE;
1223		break;
1224	}
1225
1226	return 0;
1227}
1228
1229int
1230qlcnic_wol_supported(struct qlcnic_adapter *adapter)
1231{
1232	u32 wol_cfg;
1233
1234	wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1235	if (wol_cfg & (1UL << adapter->portnum)) {
1236		wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1237		if (wol_cfg & (1 << adapter->portnum))
1238			return 1;
1239	}
1240
1241	return 0;
1242}
1243
1244int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1245{
1246	struct qlcnic_nic_req   req;
1247	int rv;
1248	u64 word;
1249
1250	memset(&req, 0, sizeof(struct qlcnic_nic_req));
1251	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1252
1253	word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
1254	req.req_hdr = cpu_to_le64(word);
1255
1256	req.words[0] = cpu_to_le64((u64)rate << 32);
1257	req.words[1] = cpu_to_le64(state);
1258
1259	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1260	if (rv)
1261		dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
1262
1263	return rv;
1264}
1265
1266/* FW dump related functions */
1267static u32
1268qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1269		u32 *buffer)
1270{
1271	int i;
1272	u32 addr, data;
1273	struct __crb *crb = &entry->region.crb;
1274	void __iomem *base = adapter->ahw->pci_base0;
1275
1276	addr = crb->addr;
1277
1278	for (i = 0; i < crb->no_ops; i++) {
1279		QLCNIC_RD_DUMP_REG(addr, base, &data);
1280		*buffer++ = cpu_to_le32(addr);
1281		*buffer++ = cpu_to_le32(data);
1282		addr += crb->stride;
1283	}
1284	return crb->no_ops * 2 * sizeof(u32);
1285}
1286
1287static u32
1288qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
1289	struct qlcnic_dump_entry *entry, u32 *buffer)
1290{
1291	int i, k, timeout = 0;
1292	void __iomem *base = adapter->ahw->pci_base0;
1293	u32 addr, data;
1294	u8 opcode, no_ops;
1295	struct __ctrl *ctr = &entry->region.ctrl;
1296	struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
1297
1298	addr = ctr->addr;
1299	no_ops = ctr->no_ops;
1300
1301	for (i = 0; i < no_ops; i++) {
1302		k = 0;
1303		opcode = 0;
1304		for (k = 0; k < 8; k++) {
1305			if (!(ctr->opcode & (1 << k)))
1306				continue;
1307			switch (1 << k) {
1308			case QLCNIC_DUMP_WCRB:
1309				QLCNIC_WR_DUMP_REG(addr, base, ctr->val1);
1310				break;
1311			case QLCNIC_DUMP_RWCRB:
1312				QLCNIC_RD_DUMP_REG(addr, base, &data);
1313				QLCNIC_WR_DUMP_REG(addr, base, data);
1314				break;
1315			case QLCNIC_DUMP_ANDCRB:
1316				QLCNIC_RD_DUMP_REG(addr, base, &data);
1317				QLCNIC_WR_DUMP_REG(addr, base,
1318					(data & ctr->val2));
1319				break;
1320			case QLCNIC_DUMP_ORCRB:
1321				QLCNIC_RD_DUMP_REG(addr, base, &data);
1322				QLCNIC_WR_DUMP_REG(addr, base,
1323					(data | ctr->val3));
1324				break;
1325			case QLCNIC_DUMP_POLLCRB:
1326				while (timeout <= ctr->timeout) {
1327					QLCNIC_RD_DUMP_REG(addr, base, &data);
1328					if ((data & ctr->val2) == ctr->val1)
1329						break;
1330					msleep(1);
1331					timeout++;
1332				}
1333				if (timeout > ctr->timeout) {
1334					dev_info(&adapter->pdev->dev,
1335					"Timed out, aborting poll CRB\n");
1336					return -EINVAL;
1337				}
1338				break;
1339			case QLCNIC_DUMP_RD_SAVE:
1340				if (ctr->index_a)
1341					addr = t_hdr->saved_state[ctr->index_a];
1342				QLCNIC_RD_DUMP_REG(addr, base, &data);
1343				t_hdr->saved_state[ctr->index_v] = data;
1344				break;
1345			case QLCNIC_DUMP_WRT_SAVED:
1346				if (ctr->index_v)
1347					data = t_hdr->saved_state[ctr->index_v];
1348				else
1349					data = ctr->val1;
1350				if (ctr->index_a)
1351					addr = t_hdr->saved_state[ctr->index_a];
1352				QLCNIC_WR_DUMP_REG(addr, base, data);
1353				break;
1354			case QLCNIC_DUMP_MOD_SAVE_ST:
1355				data = t_hdr->saved_state[ctr->index_v];
1356				data <<= ctr->shl_val;
1357				data >>= ctr->shr_val;
1358				if (ctr->val2)
1359					data &= ctr->val2;
1360				data |= ctr->val3;
1361				data += ctr->val1;
1362				t_hdr->saved_state[ctr->index_v] = data;
1363				break;
1364			default:
1365				dev_info(&adapter->pdev->dev,
1366					"Unknown opcode\n");
1367				break;
1368			}
1369		}
1370		addr += ctr->stride;
1371	}
1372	return 0;
1373}
1374
1375static u32
1376qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1377	u32 *buffer)
1378{
1379	int loop;
1380	u32 val, data = 0;
1381	struct __mux *mux = &entry->region.mux;
1382	void __iomem *base = adapter->ahw->pci_base0;
1383
1384	val = mux->val;
1385	for (loop = 0; loop < mux->no_ops; loop++) {
1386		QLCNIC_WR_DUMP_REG(mux->addr, base, val);
1387		QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data);
1388		*buffer++ = cpu_to_le32(val);
1389		*buffer++ = cpu_to_le32(data);
1390		val += mux->val_stride;
1391	}
1392	return 2 * mux->no_ops * sizeof(u32);
1393}
1394
1395static u32
1396qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1397	u32 *buffer)
1398{
1399	int i, loop;
1400	u32 cnt, addr, data, que_id = 0;
1401	void __iomem *base = adapter->ahw->pci_base0;
1402	struct __queue *que = &entry->region.que;
1403
1404	addr = que->read_addr;
1405	cnt = que->read_addr_cnt;
1406
1407	for (loop = 0; loop < que->no_ops; loop++) {
1408		QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
1409		addr = que->read_addr;
1410		for (i = 0; i < cnt; i++) {
1411			QLCNIC_RD_DUMP_REG(addr, base, &data);
1412			*buffer++ = cpu_to_le32(data);
1413			addr += que->read_addr_stride;
1414		}
1415		que_id += que->stride;
1416	}
1417	return que->no_ops * cnt * sizeof(u32);
1418}
1419
1420static u32
1421qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1422	u32 *buffer)
1423{
1424	int i;
1425	u32 data;
1426	void __iomem *addr;
1427	struct __ocm *ocm = &entry->region.ocm;
1428
1429	addr = adapter->ahw->pci_base0 + ocm->read_addr;
1430	for (i = 0; i < ocm->no_ops; i++) {
1431		data = readl(addr);
1432		*buffer++ = cpu_to_le32(data);
1433		addr += ocm->read_addr_stride;
1434	}
1435	return ocm->no_ops * sizeof(u32);
1436}
1437
1438static u32
1439qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1440	u32 *buffer)
1441{
1442	int i, count = 0;
1443	u32 fl_addr, size, val, lck_val, addr;
1444	struct __mem *rom = &entry->region.mem;
1445	void __iomem *base = adapter->ahw->pci_base0;
1446
1447	fl_addr = rom->addr;
1448	size = rom->size/4;
1449lock_try:
1450	lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
1451	if (!lck_val && count < MAX_CTL_CHECK) {
1452		msleep(10);
1453		count++;
1454		goto lock_try;
1455	}
1456	writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
1457	for (i = 0; i < size; i++) {
1458		addr = fl_addr & 0xFFFF0000;
1459		QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr);
1460		addr = LSW(fl_addr) + FLASH_ROM_DATA;
1461		QLCNIC_RD_DUMP_REG(addr, base, &val);
1462		fl_addr += 4;
1463		*buffer++ = cpu_to_le32(val);
1464	}
1465	readl(base + QLCNIC_FLASH_SEM2_ULK);
1466	return rom->size;
1467}
1468
1469static u32
1470qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
1471	struct qlcnic_dump_entry *entry, u32 *buffer)
1472{
1473	int i;
1474	u32 cnt, val, data, addr;
1475	void __iomem *base = adapter->ahw->pci_base0;
1476	struct __cache *l1 = &entry->region.cache;
1477
1478	val = l1->init_tag_val;
1479
1480	for (i = 0; i < l1->no_ops; i++) {
1481		QLCNIC_WR_DUMP_REG(l1->addr, base, val);
1482		QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val));
1483		addr = l1->read_addr;
1484		cnt = l1->read_addr_num;
1485		while (cnt) {
1486			QLCNIC_RD_DUMP_REG(addr, base, &data);
1487			*buffer++ = cpu_to_le32(data);
1488			addr += l1->read_addr_stride;
1489			cnt--;
1490		}
1491		val += l1->stride;
1492	}
1493	return l1->no_ops * l1->read_addr_num * sizeof(u32);
1494}
1495
1496static u32
1497qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
1498	struct qlcnic_dump_entry *entry, u32 *buffer)
1499{
1500	int i;
1501	u32 cnt, val, data, addr;
1502	u8 poll_mask, poll_to, time_out = 0;
1503	void __iomem *base = adapter->ahw->pci_base0;
1504	struct __cache *l2 = &entry->region.cache;
1505
1506	val = l2->init_tag_val;
1507	poll_mask = LSB(MSW(l2->ctrl_val));
1508	poll_to = MSB(MSW(l2->ctrl_val));
1509
1510	for (i = 0; i < l2->no_ops; i++) {
1511		QLCNIC_WR_DUMP_REG(l2->addr, base, val);
1512		do {
1513			QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base,
1514				LSW(l2->ctrl_val));
1515			QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data);
1516			if (!(data & poll_mask))
1517				break;
1518			msleep(1);
1519			time_out++;
1520		} while (time_out <= poll_to);
1521		if (time_out > poll_to)
1522			return -EINVAL;
1523
1524		addr = l2->read_addr;
1525		cnt = l2->read_addr_num;
1526		while (cnt) {
1527			QLCNIC_RD_DUMP_REG(addr, base, &data);
1528			*buffer++ = cpu_to_le32(data);
1529			addr += l2->read_addr_stride;
1530			cnt--;
1531		}
1532		val += l2->stride;
1533	}
1534	return l2->no_ops * l2->read_addr_num * sizeof(u32);
1535}
1536
1537static u32
1538qlcnic_read_memory(struct qlcnic_adapter *adapter,
1539	struct qlcnic_dump_entry *entry, u32 *buffer)
1540{
1541	u32 addr, data, test, ret = 0;
1542	int i, reg_read;
1543	struct __mem *mem = &entry->region.mem;
1544	void __iomem *base = adapter->ahw->pci_base0;
1545
1546	reg_read = mem->size;
1547	addr = mem->addr;
1548	/* check for data size of multiple of 16 and 16 byte alignment */
1549	if ((addr & 0xf) || (reg_read%16)) {
1550		dev_info(&adapter->pdev->dev,
1551			"Unaligned memory addr:0x%x size:0x%x\n",
1552			addr, reg_read);
1553		return -EINVAL;
1554	}
1555
1556	mutex_lock(&adapter->ahw->mem_lock);
1557
1558	while (reg_read != 0) {
1559		QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr);
1560		QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0);
1561		QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base,
1562			TA_CTL_ENABLE | TA_CTL_START);
1563
1564		for (i = 0; i < MAX_CTL_CHECK; i++) {
1565			QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test);
1566			if (!(test & TA_CTL_BUSY))
1567				break;
1568		}
1569		if (i == MAX_CTL_CHECK) {
1570			if (printk_ratelimit()) {
1571				dev_err(&adapter->pdev->dev,
1572					"failed to read through agent\n");
1573				ret = -EINVAL;
1574				goto out;
1575			}
1576		}
1577		for (i = 0; i < 4; i++) {
1578			QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data);
1579			*buffer++ = cpu_to_le32(data);
1580		}
1581		addr += 16;
1582		reg_read -= 16;
1583		ret += 16;
1584	}
1585out:
1586	mutex_unlock(&adapter->ahw->mem_lock);
1587	return mem->size;
1588}
1589
1590static u32
1591qlcnic_dump_nop(struct qlcnic_adapter *adapter,
1592	struct qlcnic_dump_entry *entry, u32 *buffer)
1593{
1594	entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1595	return 0;
1596}
1597
1598struct qlcnic_dump_operations fw_dump_ops[] = {
1599	{ QLCNIC_DUMP_NOP, qlcnic_dump_nop },
1600	{ QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
1601	{ QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
1602	{ QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
1603	{ QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
1604	{ QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
1605	{ QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
1606	{ QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
1607	{ QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
1608	{ QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
1609	{ QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
1610	{ QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
1611	{ QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
1612	{ QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
1613	{ QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
1614	{ QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
1615	{ QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
1616	{ QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
1617	{ QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
1618	{ QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
1619};
1620
1621/* Walk the template and collect dump for each entry in the dump template */
1622static int
1623qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
1624	u32 size)
1625{
1626	int ret = 1;
1627	if (size != entry->hdr.cap_size) {
1628		dev_info(dev,
1629		"Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
1630		entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
1631		dev_info(dev, "Aborting further dump capture\n");
1632		ret = 0;
1633	}
1634	return ret;
1635}
1636
1637int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1638{
1639	u32 *buffer;
1640	char mesg[64];
1641	char *msg[] = {mesg, NULL};
1642	int i, k, ops_cnt, ops_index, dump_size = 0;
1643	u32 entry_offset, dump, no_entries, buf_offset = 0;
1644	struct qlcnic_dump_entry *entry;
1645	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1646	struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
1647
1648	if (fw_dump->clr) {
1649		dev_info(&adapter->pdev->dev,
1650			"Previous dump not cleared, not capturing dump\n");
1651		return -EIO;
1652	}
1653	/* Calculate the size for dump data area only */
1654	for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1655		if (i & tmpl_hdr->drv_cap_mask)
1656			dump_size += tmpl_hdr->cap_sizes[k];
1657	if (!dump_size)
1658		return -EIO;
1659
1660	fw_dump->data = vzalloc(dump_size);
1661	if (!fw_dump->data) {
1662		dev_info(&adapter->pdev->dev,
1663			"Unable to allocate (%d KB) for fw dump\n",
1664			dump_size/1024);
1665		return -ENOMEM;
1666	}
1667	buffer = fw_dump->data;
1668	fw_dump->size = dump_size;
1669	no_entries = tmpl_hdr->num_entries;
1670	ops_cnt = ARRAY_SIZE(fw_dump_ops);
1671	entry_offset = tmpl_hdr->offset;
1672	tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
1673	tmpl_hdr->sys_info[1] = adapter->fw_version;
1674
1675	for (i = 0; i < no_entries; i++) {
1676		entry = (struct qlcnic_dump_entry *) ((void *) tmpl_hdr +
1677			entry_offset);
1678		if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
1679			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1680			entry_offset += entry->hdr.offset;
1681			continue;
1682		}
1683		/* Find the handler for this entry */
1684		ops_index = 0;
1685		while (ops_index < ops_cnt) {
1686			if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1687				break;
1688			ops_index++;
1689		}
1690		if (ops_index == ops_cnt) {
1691			dev_info(&adapter->pdev->dev,
1692				"Invalid entry type %d, exiting dump\n",
1693				entry->hdr.type);
1694			goto error;
1695		}
1696		/* Collect dump for this entry */
1697		dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1698		if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
1699			dump))
1700			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1701		buf_offset += entry->hdr.cap_size;
1702		entry_offset += entry->hdr.offset;
1703		buffer = fw_dump->data + buf_offset;
1704	}
1705	if (dump_size != buf_offset) {
1706		dev_info(&adapter->pdev->dev,
1707			"Captured(%d) and expected size(%d) do not match\n",
1708			buf_offset, dump_size);
1709		goto error;
1710	} else {
1711		fw_dump->clr = 1;
1712		snprintf(mesg, sizeof(mesg), "FW dump for device: %d\n",
1713			adapter->pdev->devfn);
1714		dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
1715			fw_dump->size);
1716		/* Send a udev event to notify availability of FW dump */
1717		kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
1718		return 0;
1719	}
1720error:
1721	vfree(fw_dump->data);
1722	return -EINVAL;
1723}