PageRenderTime 159ms CodeModel.GetById 50ms app.highlight 92ms RepoModel.GetById 2ms app.codeStats 0ms

/drivers/net/ethernet/brocade/bna/bnad.c

http://github.com/mirrors/linux
C | 3870 lines | 2878 code | 662 blank | 330 comment | 405 complexity | 931867b9dc29ed3d6af87762bdebce51 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Linux network driver for QLogic BR-series Converged Network Adapter.
   4 */
   5/*
   6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   7 * Copyright (c) 2014-2015 QLogic Corporation
   8 * All rights reserved
   9 * www.qlogic.com
  10 */
  11#include <linux/bitops.h>
  12#include <linux/netdevice.h>
  13#include <linux/skbuff.h>
  14#include <linux/etherdevice.h>
  15#include <linux/in.h>
  16#include <linux/ethtool.h>
  17#include <linux/if_vlan.h>
  18#include <linux/if_ether.h>
  19#include <linux/ip.h>
  20#include <linux/prefetch.h>
  21#include <linux/module.h>
  22
  23#include "bnad.h"
  24#include "bna.h"
  25#include "cna.h"
  26
  27static DEFINE_MUTEX(bnad_fwimg_mutex);
  28
  29/*
  30 * Module params
  31 */
  32static uint bnad_msix_disable;
  33module_param(bnad_msix_disable, uint, 0444);
  34MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
  35
  36static uint bnad_ioc_auto_recover = 1;
  37module_param(bnad_ioc_auto_recover, uint, 0444);
  38MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
  39
  40static uint bna_debugfs_enable = 1;
  41module_param(bna_debugfs_enable, uint, 0644);
  42MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
  43		 " Range[false:0|true:1]");
  44
  45/*
  46 * Global variables
  47 */
  48static u32 bnad_rxqs_per_cq = 2;
  49static atomic_t bna_id;
  50static const u8 bnad_bcast_addr[] __aligned(2) =
  51	{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  52
  53/*
  54 * Local MACROS
  55 */
  56#define BNAD_GET_MBOX_IRQ(_bnad)				\
  57	(((_bnad)->cfg_flags & BNAD_CF_MSIX) ?			\
  58	 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
  59	 ((_bnad)->pcidev->irq))
  60
  61#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)	\
  62do {								\
  63	(_res_info)->res_type = BNA_RES_T_MEM;			\
  64	(_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;	\
  65	(_res_info)->res_u.mem_info.num = (_num);		\
  66	(_res_info)->res_u.mem_info.len = (_size);		\
  67} while (0)
  68
  69/*
  70 * Reinitialize completions in CQ, once Rx is taken down
  71 */
  72static void
  73bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
  74{
  75	struct bna_cq_entry *cmpl;
  76	int i;
  77
  78	for (i = 0; i < ccb->q_depth; i++) {
  79		cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
  80		cmpl->valid = 0;
  81	}
  82}
  83
  84/* Tx Datapath functions */
  85
  86
  87/* Caller should ensure that the entry at unmap_q[index] is valid */
  88static u32
  89bnad_tx_buff_unmap(struct bnad *bnad,
  90			      struct bnad_tx_unmap *unmap_q,
  91			      u32 q_depth, u32 index)
  92{
  93	struct bnad_tx_unmap *unmap;
  94	struct sk_buff *skb;
  95	int vector, nvecs;
  96
  97	unmap = &unmap_q[index];
  98	nvecs = unmap->nvecs;
  99
 100	skb = unmap->skb;
 101	unmap->skb = NULL;
 102	unmap->nvecs = 0;
 103	dma_unmap_single(&bnad->pcidev->dev,
 104		dma_unmap_addr(&unmap->vectors[0], dma_addr),
 105		skb_headlen(skb), DMA_TO_DEVICE);
 106	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
 107	nvecs--;
 108
 109	vector = 0;
 110	while (nvecs) {
 111		vector++;
 112		if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
 113			vector = 0;
 114			BNA_QE_INDX_INC(index, q_depth);
 115			unmap = &unmap_q[index];
 116		}
 117
 118		dma_unmap_page(&bnad->pcidev->dev,
 119			dma_unmap_addr(&unmap->vectors[vector], dma_addr),
 120			dma_unmap_len(&unmap->vectors[vector], dma_len),
 121			DMA_TO_DEVICE);
 122		dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
 123		nvecs--;
 124	}
 125
 126	BNA_QE_INDX_INC(index, q_depth);
 127
 128	return index;
 129}
 130
 131/*
 132 * Frees all pending Tx Bufs
 133 * At this point no activity is expected on the Q,
 134 * so DMA unmap & freeing is fine.
 135 */
 136static void
 137bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
 138{
 139	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
 140	struct sk_buff *skb;
 141	int i;
 142
 143	for (i = 0; i < tcb->q_depth; i++) {
 144		skb = unmap_q[i].skb;
 145		if (!skb)
 146			continue;
 147		bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
 148
 149		dev_kfree_skb_any(skb);
 150	}
 151}
 152
 153/*
 154 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
 155 * Can be called in a) Interrupt context
 156 *		    b) Sending context
 157 */
 158static u32
 159bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
 160{
 161	u32 sent_packets = 0, sent_bytes = 0;
 162	u32 wis, unmap_wis, hw_cons, cons, q_depth;
 163	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
 164	struct bnad_tx_unmap *unmap;
 165	struct sk_buff *skb;
 166
 167	/* Just return if TX is stopped */
 168	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
 169		return 0;
 170
 171	hw_cons = *(tcb->hw_consumer_index);
 172	rmb();
 173	cons = tcb->consumer_index;
 174	q_depth = tcb->q_depth;
 175
 176	wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
 177	BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
 178
 179	while (wis) {
 180		unmap = &unmap_q[cons];
 181
 182		skb = unmap->skb;
 183
 184		sent_packets++;
 185		sent_bytes += skb->len;
 186
 187		unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
 188		wis -= unmap_wis;
 189
 190		cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
 191		dev_kfree_skb_any(skb);
 192	}
 193
 194	/* Update consumer pointers. */
 195	tcb->consumer_index = hw_cons;
 196
 197	tcb->txq->tx_packets += sent_packets;
 198	tcb->txq->tx_bytes += sent_bytes;
 199
 200	return sent_packets;
 201}
 202
 203static u32
 204bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
 205{
 206	struct net_device *netdev = bnad->netdev;
 207	u32 sent = 0;
 208
 209	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
 210		return 0;
 211
 212	sent = bnad_txcmpl_process(bnad, tcb);
 213	if (sent) {
 214		if (netif_queue_stopped(netdev) &&
 215		    netif_carrier_ok(netdev) &&
 216		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
 217				    BNAD_NETIF_WAKE_THRESHOLD) {
 218			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
 219				netif_wake_queue(netdev);
 220				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
 221			}
 222		}
 223	}
 224
 225	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
 226		bna_ib_ack(tcb->i_dbell, sent);
 227
 228	smp_mb__before_atomic();
 229	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
 230
 231	return sent;
 232}
 233
 234/* MSIX Tx Completion Handler */
 235static irqreturn_t
 236bnad_msix_tx(int irq, void *data)
 237{
 238	struct bna_tcb *tcb = (struct bna_tcb *)data;
 239	struct bnad *bnad = tcb->bnad;
 240
 241	bnad_tx_complete(bnad, tcb);
 242
 243	return IRQ_HANDLED;
 244}
 245
 246static inline void
 247bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
 248{
 249	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 250
 251	unmap_q->reuse_pi = -1;
 252	unmap_q->alloc_order = -1;
 253	unmap_q->map_size = 0;
 254	unmap_q->type = BNAD_RXBUF_NONE;
 255}
 256
 257/* Default is page-based allocation. Multi-buffer support - TBD */
 258static int
 259bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
 260{
 261	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 262	int order;
 263
 264	bnad_rxq_alloc_uninit(bnad, rcb);
 265
 266	order = get_order(rcb->rxq->buffer_size);
 267
 268	unmap_q->type = BNAD_RXBUF_PAGE;
 269
 270	if (bna_is_small_rxq(rcb->id)) {
 271		unmap_q->alloc_order = 0;
 272		unmap_q->map_size = rcb->rxq->buffer_size;
 273	} else {
 274		if (rcb->rxq->multi_buffer) {
 275			unmap_q->alloc_order = 0;
 276			unmap_q->map_size = rcb->rxq->buffer_size;
 277			unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
 278		} else {
 279			unmap_q->alloc_order = order;
 280			unmap_q->map_size =
 281				(rcb->rxq->buffer_size > 2048) ?
 282				PAGE_SIZE << order : 2048;
 283		}
 284	}
 285
 286	BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
 287
 288	return 0;
 289}
 290
 291static inline void
 292bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
 293{
 294	if (!unmap->page)
 295		return;
 296
 297	dma_unmap_page(&bnad->pcidev->dev,
 298			dma_unmap_addr(&unmap->vector, dma_addr),
 299			unmap->vector.len, DMA_FROM_DEVICE);
 300	put_page(unmap->page);
 301	unmap->page = NULL;
 302	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
 303	unmap->vector.len = 0;
 304}
 305
 306static inline void
 307bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
 308{
 309	if (!unmap->skb)
 310		return;
 311
 312	dma_unmap_single(&bnad->pcidev->dev,
 313			dma_unmap_addr(&unmap->vector, dma_addr),
 314			unmap->vector.len, DMA_FROM_DEVICE);
 315	dev_kfree_skb_any(unmap->skb);
 316	unmap->skb = NULL;
 317	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
 318	unmap->vector.len = 0;
 319}
 320
 321static void
 322bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
 323{
 324	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 325	int i;
 326
 327	for (i = 0; i < rcb->q_depth; i++) {
 328		struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
 329
 330		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 331			bnad_rxq_cleanup_skb(bnad, unmap);
 332		else
 333			bnad_rxq_cleanup_page(bnad, unmap);
 334	}
 335	bnad_rxq_alloc_uninit(bnad, rcb);
 336}
 337
 338static u32
 339bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
 340{
 341	u32 alloced, prod, q_depth;
 342	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 343	struct bnad_rx_unmap *unmap, *prev;
 344	struct bna_rxq_entry *rxent;
 345	struct page *page;
 346	u32 page_offset, alloc_size;
 347	dma_addr_t dma_addr;
 348
 349	prod = rcb->producer_index;
 350	q_depth = rcb->q_depth;
 351
 352	alloc_size = PAGE_SIZE << unmap_q->alloc_order;
 353	alloced = 0;
 354
 355	while (nalloc--) {
 356		unmap = &unmap_q->unmap[prod];
 357
 358		if (unmap_q->reuse_pi < 0) {
 359			page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
 360					unmap_q->alloc_order);
 361			page_offset = 0;
 362		} else {
 363			prev = &unmap_q->unmap[unmap_q->reuse_pi];
 364			page = prev->page;
 365			page_offset = prev->page_offset + unmap_q->map_size;
 366			get_page(page);
 367		}
 368
 369		if (unlikely(!page)) {
 370			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
 371			rcb->rxq->rxbuf_alloc_failed++;
 372			goto finishing;
 373		}
 374
 375		dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
 376					unmap_q->map_size, DMA_FROM_DEVICE);
 377		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
 378			put_page(page);
 379			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
 380			rcb->rxq->rxbuf_map_failed++;
 381			goto finishing;
 382		}
 383
 384		unmap->page = page;
 385		unmap->page_offset = page_offset;
 386		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
 387		unmap->vector.len = unmap_q->map_size;
 388		page_offset += unmap_q->map_size;
 389
 390		if (page_offset < alloc_size)
 391			unmap_q->reuse_pi = prod;
 392		else
 393			unmap_q->reuse_pi = -1;
 394
 395		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
 396		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
 397		BNA_QE_INDX_INC(prod, q_depth);
 398		alloced++;
 399	}
 400
 401finishing:
 402	if (likely(alloced)) {
 403		rcb->producer_index = prod;
 404		smp_mb();
 405		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
 406			bna_rxq_prod_indx_doorbell(rcb);
 407	}
 408
 409	return alloced;
 410}
 411
 412static u32
 413bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
 414{
 415	u32 alloced, prod, q_depth, buff_sz;
 416	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 417	struct bnad_rx_unmap *unmap;
 418	struct bna_rxq_entry *rxent;
 419	struct sk_buff *skb;
 420	dma_addr_t dma_addr;
 421
 422	buff_sz = rcb->rxq->buffer_size;
 423	prod = rcb->producer_index;
 424	q_depth = rcb->q_depth;
 425
 426	alloced = 0;
 427	while (nalloc--) {
 428		unmap = &unmap_q->unmap[prod];
 429
 430		skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
 431
 432		if (unlikely(!skb)) {
 433			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
 434			rcb->rxq->rxbuf_alloc_failed++;
 435			goto finishing;
 436		}
 437
 438		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
 439					  buff_sz, DMA_FROM_DEVICE);
 440		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
 441			dev_kfree_skb_any(skb);
 442			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
 443			rcb->rxq->rxbuf_map_failed++;
 444			goto finishing;
 445		}
 446
 447		unmap->skb = skb;
 448		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
 449		unmap->vector.len = buff_sz;
 450
 451		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
 452		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
 453		BNA_QE_INDX_INC(prod, q_depth);
 454		alloced++;
 455	}
 456
 457finishing:
 458	if (likely(alloced)) {
 459		rcb->producer_index = prod;
 460		smp_mb();
 461		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
 462			bna_rxq_prod_indx_doorbell(rcb);
 463	}
 464
 465	return alloced;
 466}
 467
 468static inline void
 469bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
 470{
 471	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 472	u32 to_alloc;
 473
 474	to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
 475	if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
 476		return;
 477
 478	if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 479		bnad_rxq_refill_skb(bnad, rcb, to_alloc);
 480	else
 481		bnad_rxq_refill_page(bnad, rcb, to_alloc);
 482}
 483
 484#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
 485					BNA_CQ_EF_IPV6 | \
 486					BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
 487					BNA_CQ_EF_L4_CKSUM_OK)
 488
 489#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
 490				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
 491#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
 492				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
 493#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
 494				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
 495#define flags_udp6 (BNA_CQ_EF_IPV6 | \
 496				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
 497
 498static void
 499bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
 500		    u32 sop_ci, u32 nvecs)
 501{
 502	struct bnad_rx_unmap_q *unmap_q;
 503	struct bnad_rx_unmap *unmap;
 504	u32 ci, vec;
 505
 506	unmap_q = rcb->unmap_q;
 507	for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
 508		unmap = &unmap_q->unmap[ci];
 509		BNA_QE_INDX_INC(ci, rcb->q_depth);
 510
 511		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 512			bnad_rxq_cleanup_skb(bnad, unmap);
 513		else
 514			bnad_rxq_cleanup_page(bnad, unmap);
 515	}
 516}
 517
 518static void
 519bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
 520{
 521	struct bna_rcb *rcb;
 522	struct bnad *bnad;
 523	struct bnad_rx_unmap_q *unmap_q;
 524	struct bna_cq_entry *cq, *cmpl;
 525	u32 ci, pi, totlen = 0;
 526
 527	cq = ccb->sw_q;
 528	pi = ccb->producer_index;
 529	cmpl = &cq[pi];
 530
 531	rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
 532	unmap_q = rcb->unmap_q;
 533	bnad = rcb->bnad;
 534	ci = rcb->consumer_index;
 535
 536	/* prefetch header */
 537	prefetch(page_address(unmap_q->unmap[ci].page) +
 538		 unmap_q->unmap[ci].page_offset);
 539
 540	while (nvecs--) {
 541		struct bnad_rx_unmap *unmap;
 542		u32 len;
 543
 544		unmap = &unmap_q->unmap[ci];
 545		BNA_QE_INDX_INC(ci, rcb->q_depth);
 546
 547		dma_unmap_page(&bnad->pcidev->dev,
 548			       dma_unmap_addr(&unmap->vector, dma_addr),
 549			       unmap->vector.len, DMA_FROM_DEVICE);
 550
 551		len = ntohs(cmpl->length);
 552		skb->truesize += unmap->vector.len;
 553		totlen += len;
 554
 555		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
 556				   unmap->page, unmap->page_offset, len);
 557
 558		unmap->page = NULL;
 559		unmap->vector.len = 0;
 560
 561		BNA_QE_INDX_INC(pi, ccb->q_depth);
 562		cmpl = &cq[pi];
 563	}
 564
 565	skb->len += totlen;
 566	skb->data_len += totlen;
 567}
 568
 569static inline void
 570bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
 571		  struct bnad_rx_unmap *unmap, u32 len)
 572{
 573	prefetch(skb->data);
 574
 575	dma_unmap_single(&bnad->pcidev->dev,
 576			dma_unmap_addr(&unmap->vector, dma_addr),
 577			unmap->vector.len, DMA_FROM_DEVICE);
 578
 579	skb_put(skb, len);
 580	skb->protocol = eth_type_trans(skb, bnad->netdev);
 581
 582	unmap->skb = NULL;
 583	unmap->vector.len = 0;
 584}
 585
 586static u32
 587bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 588{
 589	struct bna_cq_entry *cq, *cmpl, *next_cmpl;
 590	struct bna_rcb *rcb = NULL;
 591	struct bnad_rx_unmap_q *unmap_q;
 592	struct bnad_rx_unmap *unmap = NULL;
 593	struct sk_buff *skb = NULL;
 594	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
 595	struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
 596	u32 packets = 0, len = 0, totlen = 0;
 597	u32 pi, vec, sop_ci = 0, nvecs = 0;
 598	u32 flags, masked_flags;
 599
 600	prefetch(bnad->netdev);
 601
 602	cq = ccb->sw_q;
 603
 604	while (packets < budget) {
 605		cmpl = &cq[ccb->producer_index];
 606		if (!cmpl->valid)
 607			break;
 608		/* The 'valid' field is set by the adapter, only after writing
 609		 * the other fields of completion entry. Hence, do not load
 610		 * other fields of completion entry *before* the 'valid' is
 611		 * loaded. Adding the rmb() here prevents the compiler and/or
 612		 * CPU from reordering the reads which would potentially result
 613		 * in reading stale values in completion entry.
 614		 */
 615		rmb();
 616
 617		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
 618
 619		if (bna_is_small_rxq(cmpl->rxq_id))
 620			rcb = ccb->rcb[1];
 621		else
 622			rcb = ccb->rcb[0];
 623
 624		unmap_q = rcb->unmap_q;
 625
 626		/* start of packet ci */
 627		sop_ci = rcb->consumer_index;
 628
 629		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
 630			unmap = &unmap_q->unmap[sop_ci];
 631			skb = unmap->skb;
 632		} else {
 633			skb = napi_get_frags(&rx_ctrl->napi);
 634			if (unlikely(!skb))
 635				break;
 636		}
 637		prefetch(skb);
 638
 639		flags = ntohl(cmpl->flags);
 640		len = ntohs(cmpl->length);
 641		totlen = len;
 642		nvecs = 1;
 643
 644		/* Check all the completions for this frame.
 645		 * busy-wait doesn't help much, break here.
 646		 */
 647		if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
 648		    (flags & BNA_CQ_EF_EOP) == 0) {
 649			pi = ccb->producer_index;
 650			do {
 651				BNA_QE_INDX_INC(pi, ccb->q_depth);
 652				next_cmpl = &cq[pi];
 653
 654				if (!next_cmpl->valid)
 655					break;
 656				/* The 'valid' field is set by the adapter, only
 657				 * after writing the other fields of completion
 658				 * entry. Hence, do not load other fields of
 659				 * completion entry *before* the 'valid' is
 660				 * loaded. Adding the rmb() here prevents the
 661				 * compiler and/or CPU from reordering the reads
 662				 * which would potentially result in reading
 663				 * stale values in completion entry.
 664				 */
 665				rmb();
 666
 667				len = ntohs(next_cmpl->length);
 668				flags = ntohl(next_cmpl->flags);
 669
 670				nvecs++;
 671				totlen += len;
 672			} while ((flags & BNA_CQ_EF_EOP) == 0);
 673
 674			if (!next_cmpl->valid)
 675				break;
 676		}
 677		packets++;
 678
 679		/* TODO: BNA_CQ_EF_LOCAL ? */
 680		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
 681						BNA_CQ_EF_FCS_ERROR |
 682						BNA_CQ_EF_TOO_LONG))) {
 683			bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
 684			rcb->rxq->rx_packets_with_error++;
 685
 686			goto next;
 687		}
 688
 689		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 690			bnad_cq_setup_skb(bnad, skb, unmap, len);
 691		else
 692			bnad_cq_setup_skb_frags(ccb, skb, nvecs);
 693
 694		rcb->rxq->rx_packets++;
 695		rcb->rxq->rx_bytes += totlen;
 696		ccb->bytes_per_intr += totlen;
 697
 698		masked_flags = flags & flags_cksum_prot_mask;
 699
 700		if (likely
 701		    ((bnad->netdev->features & NETIF_F_RXCSUM) &&
 702		     ((masked_flags == flags_tcp4) ||
 703		      (masked_flags == flags_udp4) ||
 704		      (masked_flags == flags_tcp6) ||
 705		      (masked_flags == flags_udp6))))
 706			skb->ip_summed = CHECKSUM_UNNECESSARY;
 707		else
 708			skb_checksum_none_assert(skb);
 709
 710		if ((flags & BNA_CQ_EF_VLAN) &&
 711		    (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
 712			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
 713
 714		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 715			netif_receive_skb(skb);
 716		else
 717			napi_gro_frags(&rx_ctrl->napi);
 718
 719next:
 720		BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
 721		for (vec = 0; vec < nvecs; vec++) {
 722			cmpl = &cq[ccb->producer_index];
 723			cmpl->valid = 0;
 724			BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
 725		}
 726	}
 727
 728	napi_gro_flush(&rx_ctrl->napi, false);
 729	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
 730		bna_ib_ack_disable_irq(ccb->i_dbell, packets);
 731
 732	bnad_rxq_post(bnad, ccb->rcb[0]);
 733	if (ccb->rcb[1])
 734		bnad_rxq_post(bnad, ccb->rcb[1]);
 735
 736	return packets;
 737}
 738
 739static void
 740bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
 741{
 742	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
 743	struct napi_struct *napi = &rx_ctrl->napi;
 744
 745	if (likely(napi_schedule_prep(napi))) {
 746		__napi_schedule(napi);
 747		rx_ctrl->rx_schedule++;
 748	}
 749}
 750
 751/* MSIX Rx Path Handler */
 752static irqreturn_t
 753bnad_msix_rx(int irq, void *data)
 754{
 755	struct bna_ccb *ccb = (struct bna_ccb *)data;
 756
 757	if (ccb) {
 758		((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
 759		bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
 760	}
 761
 762	return IRQ_HANDLED;
 763}
 764
 765/* Interrupt handlers */
 766
 767/* Mbox Interrupt Handlers */
 768static irqreturn_t
 769bnad_msix_mbox_handler(int irq, void *data)
 770{
 771	u32 intr_status;
 772	unsigned long flags;
 773	struct bnad *bnad = (struct bnad *)data;
 774
 775	spin_lock_irqsave(&bnad->bna_lock, flags);
 776	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
 777		spin_unlock_irqrestore(&bnad->bna_lock, flags);
 778		return IRQ_HANDLED;
 779	}
 780
 781	bna_intr_status_get(&bnad->bna, intr_status);
 782
 783	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
 784		bna_mbox_handler(&bnad->bna, intr_status);
 785
 786	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 787
 788	return IRQ_HANDLED;
 789}
 790
 791static irqreturn_t
 792bnad_isr(int irq, void *data)
 793{
 794	int i, j;
 795	u32 intr_status;
 796	unsigned long flags;
 797	struct bnad *bnad = (struct bnad *)data;
 798	struct bnad_rx_info *rx_info;
 799	struct bnad_rx_ctrl *rx_ctrl;
 800	struct bna_tcb *tcb = NULL;
 801
 802	spin_lock_irqsave(&bnad->bna_lock, flags);
 803	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
 804		spin_unlock_irqrestore(&bnad->bna_lock, flags);
 805		return IRQ_NONE;
 806	}
 807
 808	bna_intr_status_get(&bnad->bna, intr_status);
 809
 810	if (unlikely(!intr_status)) {
 811		spin_unlock_irqrestore(&bnad->bna_lock, flags);
 812		return IRQ_NONE;
 813	}
 814
 815	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
 816		bna_mbox_handler(&bnad->bna, intr_status);
 817
 818	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 819
 820	if (!BNA_IS_INTX_DATA_INTR(intr_status))
 821		return IRQ_HANDLED;
 822
 823	/* Process data interrupts */
 824	/* Tx processing */
 825	for (i = 0; i < bnad->num_tx; i++) {
 826		for (j = 0; j < bnad->num_txq_per_tx; j++) {
 827			tcb = bnad->tx_info[i].tcb[j];
 828			if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
 829				bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
 830		}
 831	}
 832	/* Rx processing */
 833	for (i = 0; i < bnad->num_rx; i++) {
 834		rx_info = &bnad->rx_info[i];
 835		if (!rx_info->rx)
 836			continue;
 837		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
 838			rx_ctrl = &rx_info->rx_ctrl[j];
 839			if (rx_ctrl->ccb)
 840				bnad_netif_rx_schedule_poll(bnad,
 841							    rx_ctrl->ccb);
 842		}
 843	}
 844	return IRQ_HANDLED;
 845}
 846
 847/*
 848 * Called in interrupt / callback context
 849 * with bna_lock held, so cfg_flags access is OK
 850 */
 851static void
 852bnad_enable_mbox_irq(struct bnad *bnad)
 853{
 854	clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
 855
 856	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
 857}
 858
 859/*
 860 * Called with bnad->bna_lock held b'cos of
 861 * bnad->cfg_flags access.
 862 */
 863static void
 864bnad_disable_mbox_irq(struct bnad *bnad)
 865{
 866	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
 867
 868	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
 869}
 870
 871static void
 872bnad_set_netdev_perm_addr(struct bnad *bnad)
 873{
 874	struct net_device *netdev = bnad->netdev;
 875
 876	ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
 877	if (is_zero_ether_addr(netdev->dev_addr))
 878		ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
 879}
 880
 881/* Control Path Handlers */
 882
 883/* Callbacks */
 884void
 885bnad_cb_mbox_intr_enable(struct bnad *bnad)
 886{
 887	bnad_enable_mbox_irq(bnad);
 888}
 889
 890void
 891bnad_cb_mbox_intr_disable(struct bnad *bnad)
 892{
 893	bnad_disable_mbox_irq(bnad);
 894}
 895
 896void
 897bnad_cb_ioceth_ready(struct bnad *bnad)
 898{
 899	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
 900	complete(&bnad->bnad_completions.ioc_comp);
 901}
 902
 903void
 904bnad_cb_ioceth_failed(struct bnad *bnad)
 905{
 906	bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
 907	complete(&bnad->bnad_completions.ioc_comp);
 908}
 909
 910void
 911bnad_cb_ioceth_disabled(struct bnad *bnad)
 912{
 913	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
 914	complete(&bnad->bnad_completions.ioc_comp);
 915}
 916
 917static void
 918bnad_cb_enet_disabled(void *arg)
 919{
 920	struct bnad *bnad = (struct bnad *)arg;
 921
 922	netif_carrier_off(bnad->netdev);
 923	complete(&bnad->bnad_completions.enet_comp);
 924}
 925
 926void
 927bnad_cb_ethport_link_status(struct bnad *bnad,
 928			enum bna_link_status link_status)
 929{
 930	bool link_up = false;
 931
 932	link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
 933
 934	if (link_status == BNA_CEE_UP) {
 935		if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
 936			BNAD_UPDATE_CTR(bnad, cee_toggle);
 937		set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
 938	} else {
 939		if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
 940			BNAD_UPDATE_CTR(bnad, cee_toggle);
 941		clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
 942	}
 943
 944	if (link_up) {
 945		if (!netif_carrier_ok(bnad->netdev)) {
 946			uint tx_id, tcb_id;
 947			netdev_info(bnad->netdev, "link up\n");
 948			netif_carrier_on(bnad->netdev);
 949			BNAD_UPDATE_CTR(bnad, link_toggle);
 950			for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
 951				for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
 952				      tcb_id++) {
 953					struct bna_tcb *tcb =
 954					bnad->tx_info[tx_id].tcb[tcb_id];
 955					u32 txq_id;
 956					if (!tcb)
 957						continue;
 958
 959					txq_id = tcb->id;
 960
 961					if (test_bit(BNAD_TXQ_TX_STARTED,
 962						     &tcb->flags)) {
 963						/*
 964						 * Force an immediate
 965						 * Transmit Schedule */
 966						netif_wake_subqueue(
 967								bnad->netdev,
 968								txq_id);
 969						BNAD_UPDATE_CTR(bnad,
 970							netif_queue_wakeup);
 971					} else {
 972						netif_stop_subqueue(
 973								bnad->netdev,
 974								txq_id);
 975						BNAD_UPDATE_CTR(bnad,
 976							netif_queue_stop);
 977					}
 978				}
 979			}
 980		}
 981	} else {
 982		if (netif_carrier_ok(bnad->netdev)) {
 983			netdev_info(bnad->netdev, "link down\n");
 984			netif_carrier_off(bnad->netdev);
 985			BNAD_UPDATE_CTR(bnad, link_toggle);
 986		}
 987	}
 988}
 989
 990static void
 991bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
 992{
 993	struct bnad *bnad = (struct bnad *)arg;
 994
 995	complete(&bnad->bnad_completions.tx_comp);
 996}
 997
 998static void
 999bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1000{
1001	struct bnad_tx_info *tx_info =
1002			(struct bnad_tx_info *)tcb->txq->tx->priv;
1003
1004	tcb->priv = tcb;
1005	tx_info->tcb[tcb->id] = tcb;
1006}
1007
1008static void
1009bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1010{
1011	struct bnad_tx_info *tx_info =
1012			(struct bnad_tx_info *)tcb->txq->tx->priv;
1013
1014	tx_info->tcb[tcb->id] = NULL;
1015	tcb->priv = NULL;
1016}
1017
1018static void
1019bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1020{
1021	struct bnad_rx_info *rx_info =
1022			(struct bnad_rx_info *)ccb->cq->rx->priv;
1023
1024	rx_info->rx_ctrl[ccb->id].ccb = ccb;
1025	ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1026}
1027
1028static void
1029bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1030{
1031	struct bnad_rx_info *rx_info =
1032			(struct bnad_rx_info *)ccb->cq->rx->priv;
1033
1034	rx_info->rx_ctrl[ccb->id].ccb = NULL;
1035}
1036
1037static void
1038bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1039{
1040	struct bnad_tx_info *tx_info =
1041			(struct bnad_tx_info *)tx->priv;
1042	struct bna_tcb *tcb;
1043	u32 txq_id;
1044	int i;
1045
1046	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1047		tcb = tx_info->tcb[i];
1048		if (!tcb)
1049			continue;
1050		txq_id = tcb->id;
1051		clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1052		netif_stop_subqueue(bnad->netdev, txq_id);
1053	}
1054}
1055
1056static void
1057bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1058{
1059	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1060	struct bna_tcb *tcb;
1061	u32 txq_id;
1062	int i;
1063
1064	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1065		tcb = tx_info->tcb[i];
1066		if (!tcb)
1067			continue;
1068		txq_id = tcb->id;
1069
1070		BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1071		set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1072		BUG_ON(*(tcb->hw_consumer_index) != 0);
1073
1074		if (netif_carrier_ok(bnad->netdev)) {
1075			netif_wake_subqueue(bnad->netdev, txq_id);
1076			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1077		}
1078	}
1079
1080	/*
1081	 * Workaround for first ioceth enable failure & we
1082	 * get a 0 MAC address. We try to get the MAC address
1083	 * again here.
1084	 */
1085	if (is_zero_ether_addr(bnad->perm_addr)) {
1086		bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1087		bnad_set_netdev_perm_addr(bnad);
1088	}
1089}
1090
1091/*
1092 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1093 */
1094static void
1095bnad_tx_cleanup(struct delayed_work *work)
1096{
1097	struct bnad_tx_info *tx_info =
1098		container_of(work, struct bnad_tx_info, tx_cleanup_work);
1099	struct bnad *bnad = NULL;
1100	struct bna_tcb *tcb;
1101	unsigned long flags;
1102	u32 i, pending = 0;
1103
1104	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1105		tcb = tx_info->tcb[i];
1106		if (!tcb)
1107			continue;
1108
1109		bnad = tcb->bnad;
1110
1111		if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1112			pending++;
1113			continue;
1114		}
1115
1116		bnad_txq_cleanup(bnad, tcb);
1117
1118		smp_mb__before_atomic();
1119		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1120	}
1121
1122	if (pending) {
1123		queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1124			msecs_to_jiffies(1));
1125		return;
1126	}
1127
1128	spin_lock_irqsave(&bnad->bna_lock, flags);
1129	bna_tx_cleanup_complete(tx_info->tx);
1130	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1131}
1132
1133static void
1134bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1135{
1136	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1137	struct bna_tcb *tcb;
1138	int i;
1139
1140	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1141		tcb = tx_info->tcb[i];
1142		if (!tcb)
1143			continue;
1144	}
1145
1146	queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1147}
1148
1149static void
1150bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1151{
1152	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1153	struct bna_ccb *ccb;
1154	struct bnad_rx_ctrl *rx_ctrl;
1155	int i;
1156
1157	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1158		rx_ctrl = &rx_info->rx_ctrl[i];
1159		ccb = rx_ctrl->ccb;
1160		if (!ccb)
1161			continue;
1162
1163		clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1164
1165		if (ccb->rcb[1])
1166			clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1167	}
1168}
1169
1170/*
1171 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1172 */
1173static void
1174bnad_rx_cleanup(void *work)
1175{
1176	struct bnad_rx_info *rx_info =
1177		container_of(work, struct bnad_rx_info, rx_cleanup_work);
1178	struct bnad_rx_ctrl *rx_ctrl;
1179	struct bnad *bnad = NULL;
1180	unsigned long flags;
1181	u32 i;
1182
1183	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1184		rx_ctrl = &rx_info->rx_ctrl[i];
1185
1186		if (!rx_ctrl->ccb)
1187			continue;
1188
1189		bnad = rx_ctrl->ccb->bnad;
1190
1191		/*
1192		 * Wait till the poll handler has exited
1193		 * and nothing can be scheduled anymore
1194		 */
1195		napi_disable(&rx_ctrl->napi);
1196
1197		bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1198		bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1199		if (rx_ctrl->ccb->rcb[1])
1200			bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1201	}
1202
1203	spin_lock_irqsave(&bnad->bna_lock, flags);
1204	bna_rx_cleanup_complete(rx_info->rx);
1205	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1206}
1207
1208static void
1209bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1210{
1211	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1212	struct bna_ccb *ccb;
1213	struct bnad_rx_ctrl *rx_ctrl;
1214	int i;
1215
1216	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1217		rx_ctrl = &rx_info->rx_ctrl[i];
1218		ccb = rx_ctrl->ccb;
1219		if (!ccb)
1220			continue;
1221
1222		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1223
1224		if (ccb->rcb[1])
1225			clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1226	}
1227
1228	queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1229}
1230
1231static void
1232bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1233{
1234	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1235	struct bna_ccb *ccb;
1236	struct bna_rcb *rcb;
1237	struct bnad_rx_ctrl *rx_ctrl;
1238	int i, j;
1239
1240	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1241		rx_ctrl = &rx_info->rx_ctrl[i];
1242		ccb = rx_ctrl->ccb;
1243		if (!ccb)
1244			continue;
1245
1246		napi_enable(&rx_ctrl->napi);
1247
1248		for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1249			rcb = ccb->rcb[j];
1250			if (!rcb)
1251				continue;
1252
1253			bnad_rxq_alloc_init(bnad, rcb);
1254			set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1255			set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1256			bnad_rxq_post(bnad, rcb);
1257		}
1258	}
1259}
1260
1261static void
1262bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1263{
1264	struct bnad *bnad = (struct bnad *)arg;
1265
1266	complete(&bnad->bnad_completions.rx_comp);
1267}
1268
1269static void
1270bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1271{
1272	bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1273	complete(&bnad->bnad_completions.mcast_comp);
1274}
1275
1276void
1277bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1278		       struct bna_stats *stats)
1279{
1280	if (status == BNA_CB_SUCCESS)
1281		BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1282
1283	if (!netif_running(bnad->netdev) ||
1284		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1285		return;
1286
1287	mod_timer(&bnad->stats_timer,
1288		  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1289}
1290
1291static void
1292bnad_cb_enet_mtu_set(struct bnad *bnad)
1293{
1294	bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1295	complete(&bnad->bnad_completions.mtu_comp);
1296}
1297
1298void
1299bnad_cb_completion(void *arg, enum bfa_status status)
1300{
1301	struct bnad_iocmd_comp *iocmd_comp =
1302			(struct bnad_iocmd_comp *)arg;
1303
1304	iocmd_comp->comp_status = (u32) status;
1305	complete(&iocmd_comp->comp);
1306}
1307
1308/* Resource allocation, free functions */
1309
1310static void
1311bnad_mem_free(struct bnad *bnad,
1312	      struct bna_mem_info *mem_info)
1313{
1314	int i;
1315	dma_addr_t dma_pa;
1316
1317	if (mem_info->mdl == NULL)
1318		return;
1319
1320	for (i = 0; i < mem_info->num; i++) {
1321		if (mem_info->mdl[i].kva != NULL) {
1322			if (mem_info->mem_type == BNA_MEM_T_DMA) {
1323				BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1324						dma_pa);
1325				dma_free_coherent(&bnad->pcidev->dev,
1326						  mem_info->mdl[i].len,
1327						  mem_info->mdl[i].kva, dma_pa);
1328			} else
1329				kfree(mem_info->mdl[i].kva);
1330		}
1331	}
1332	kfree(mem_info->mdl);
1333	mem_info->mdl = NULL;
1334}
1335
1336static int
1337bnad_mem_alloc(struct bnad *bnad,
1338	       struct bna_mem_info *mem_info)
1339{
1340	int i;
1341	dma_addr_t dma_pa;
1342
1343	if ((mem_info->num == 0) || (mem_info->len == 0)) {
1344		mem_info->mdl = NULL;
1345		return 0;
1346	}
1347
1348	mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1349				GFP_KERNEL);
1350	if (mem_info->mdl == NULL)
1351		return -ENOMEM;
1352
1353	if (mem_info->mem_type == BNA_MEM_T_DMA) {
1354		for (i = 0; i < mem_info->num; i++) {
1355			mem_info->mdl[i].len = mem_info->len;
1356			mem_info->mdl[i].kva =
1357				dma_alloc_coherent(&bnad->pcidev->dev,
1358						   mem_info->len, &dma_pa,
1359						   GFP_KERNEL);
1360			if (mem_info->mdl[i].kva == NULL)
1361				goto err_return;
1362
1363			BNA_SET_DMA_ADDR(dma_pa,
1364					 &(mem_info->mdl[i].dma));
1365		}
1366	} else {
1367		for (i = 0; i < mem_info->num; i++) {
1368			mem_info->mdl[i].len = mem_info->len;
1369			mem_info->mdl[i].kva = kzalloc(mem_info->len,
1370							GFP_KERNEL);
1371			if (mem_info->mdl[i].kva == NULL)
1372				goto err_return;
1373		}
1374	}
1375
1376	return 0;
1377
1378err_return:
1379	bnad_mem_free(bnad, mem_info);
1380	return -ENOMEM;
1381}
1382
1383/* Free IRQ for Mailbox */
1384static void
1385bnad_mbox_irq_free(struct bnad *bnad)
1386{
1387	int irq;
1388	unsigned long flags;
1389
1390	spin_lock_irqsave(&bnad->bna_lock, flags);
1391	bnad_disable_mbox_irq(bnad);
1392	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1393
1394	irq = BNAD_GET_MBOX_IRQ(bnad);
1395	free_irq(irq, bnad);
1396}
1397
1398/*
1399 * Allocates IRQ for Mailbox, but keep it disabled
1400 * This will be enabled once we get the mbox enable callback
1401 * from bna
1402 */
1403static int
1404bnad_mbox_irq_alloc(struct bnad *bnad)
1405{
1406	int		err = 0;
1407	unsigned long	irq_flags, flags;
1408	u32	irq;
1409	irq_handler_t	irq_handler;
1410
1411	spin_lock_irqsave(&bnad->bna_lock, flags);
1412	if (bnad->cfg_flags & BNAD_CF_MSIX) {
1413		irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1414		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1415		irq_flags = 0;
1416	} else {
1417		irq_handler = (irq_handler_t)bnad_isr;
1418		irq = bnad->pcidev->irq;
1419		irq_flags = IRQF_SHARED;
1420	}
1421
1422	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1424
1425	/*
1426	 * Set the Mbox IRQ disable flag, so that the IRQ handler
1427	 * called from request_irq() for SHARED IRQs do not execute
1428	 */
1429	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1430
1431	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1432
1433	err = request_irq(irq, irq_handler, irq_flags,
1434			  bnad->mbox_irq_name, bnad);
1435
1436	return err;
1437}
1438
1439static void
1440bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1441{
1442	kfree(intr_info->idl);
1443	intr_info->idl = NULL;
1444}
1445
1446/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1447static int
1448bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1449		    u32 txrx_id, struct bna_intr_info *intr_info)
1450{
1451	int i, vector_start = 0;
1452	u32 cfg_flags;
1453	unsigned long flags;
1454
1455	spin_lock_irqsave(&bnad->bna_lock, flags);
1456	cfg_flags = bnad->cfg_flags;
1457	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1458
1459	if (cfg_flags & BNAD_CF_MSIX) {
1460		intr_info->intr_type = BNA_INTR_T_MSIX;
1461		intr_info->idl = kcalloc(intr_info->num,
1462					sizeof(struct bna_intr_descr),
1463					GFP_KERNEL);
1464		if (!intr_info->idl)
1465			return -ENOMEM;
1466
1467		switch (src) {
1468		case BNAD_INTR_TX:
1469			vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1470			break;
1471
1472		case BNAD_INTR_RX:
1473			vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1474					(bnad->num_tx * bnad->num_txq_per_tx) +
1475					txrx_id;
1476			break;
1477
1478		default:
1479			BUG();
1480		}
1481
1482		for (i = 0; i < intr_info->num; i++)
1483			intr_info->idl[i].vector = vector_start + i;
1484	} else {
1485		intr_info->intr_type = BNA_INTR_T_INTX;
1486		intr_info->num = 1;
1487		intr_info->idl = kcalloc(intr_info->num,
1488					sizeof(struct bna_intr_descr),
1489					GFP_KERNEL);
1490		if (!intr_info->idl)
1491			return -ENOMEM;
1492
1493		switch (src) {
1494		case BNAD_INTR_TX:
1495			intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1496			break;
1497
1498		case BNAD_INTR_RX:
1499			intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1500			break;
1501		}
1502	}
1503	return 0;
1504}
1505
1506/* NOTE: Should be called for MSIX only
1507 * Unregisters Tx MSIX vector(s) from the kernel
1508 */
1509static void
1510bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1511			int num_txqs)
1512{
1513	int i;
1514	int vector_num;
1515
1516	for (i = 0; i < num_txqs; i++) {
1517		if (tx_info->tcb[i] == NULL)
1518			continue;
1519
1520		vector_num = tx_info->tcb[i]->intr_vector;
1521		free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1522	}
1523}
1524
1525/* NOTE: Should be called for MSIX only
1526 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1527 */
1528static int
1529bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1530			u32 tx_id, int num_txqs)
1531{
1532	int i;
1533	int err;
1534	int vector_num;
1535
1536	for (i = 0; i < num_txqs; i++) {
1537		vector_num = tx_info->tcb[i]->intr_vector;
1538		sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1539				tx_id + tx_info->tcb[i]->id);
1540		err = request_irq(bnad->msix_table[vector_num].vector,
1541				  (irq_handler_t)bnad_msix_tx, 0,
1542				  tx_info->tcb[i]->name,
1543				  tx_info->tcb[i]);
1544		if (err)
1545			goto err_return;
1546	}
1547
1548	return 0;
1549
1550err_return:
1551	if (i > 0)
1552		bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1553	return -1;
1554}
1555
1556/* NOTE: Should be called for MSIX only
1557 * Unregisters Rx MSIX vector(s) from the kernel
1558 */
1559static void
1560bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1561			int num_rxps)
1562{
1563	int i;
1564	int vector_num;
1565
1566	for (i = 0; i < num_rxps; i++) {
1567		if (rx_info->rx_ctrl[i].ccb == NULL)
1568			continue;
1569
1570		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1571		free_irq(bnad->msix_table[vector_num].vector,
1572			 rx_info->rx_ctrl[i].ccb);
1573	}
1574}
1575
1576/* NOTE: Should be called for MSIX only
1577 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1578 */
1579static int
1580bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1581			u32 rx_id, int num_rxps)
1582{
1583	int i;
1584	int err;
1585	int vector_num;
1586
1587	for (i = 0; i < num_rxps; i++) {
1588		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1589		sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1590			bnad->netdev->name,
1591			rx_id + rx_info->rx_ctrl[i].ccb->id);
1592		err = request_irq(bnad->msix_table[vector_num].vector,
1593				  (irq_handler_t)bnad_msix_rx, 0,
1594				  rx_info->rx_ctrl[i].ccb->name,
1595				  rx_info->rx_ctrl[i].ccb);
1596		if (err)
1597			goto err_return;
1598	}
1599
1600	return 0;
1601
1602err_return:
1603	if (i > 0)
1604		bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1605	return -1;
1606}
1607
1608/* Free Tx object Resources */
1609static void
1610bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1611{
1612	int i;
1613
1614	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1615		if (res_info[i].res_type == BNA_RES_T_MEM)
1616			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1617		else if (res_info[i].res_type == BNA_RES_T_INTR)
1618			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1619	}
1620}
1621
1622/* Allocates memory and interrupt resources for Tx object */
1623static int
1624bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1625		  u32 tx_id)
1626{
1627	int i, err = 0;
1628
1629	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1630		if (res_info[i].res_type == BNA_RES_T_MEM)
1631			err = bnad_mem_alloc(bnad,
1632					&res_info[i].res_u.mem_info);
1633		else if (res_info[i].res_type == BNA_RES_T_INTR)
1634			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1635					&res_info[i].res_u.intr_info);
1636		if (err)
1637			goto err_return;
1638	}
1639	return 0;
1640
1641err_return:
1642	bnad_tx_res_free(bnad, res_info);
1643	return err;
1644}
1645
1646/* Free Rx object Resources */
1647static void
1648bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1649{
1650	int i;
1651
1652	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1653		if (res_info[i].res_type == BNA_RES_T_MEM)
1654			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1655		else if (res_info[i].res_type == BNA_RES_T_INTR)
1656			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1657	}
1658}
1659
1660/* Allocates memory and interrupt resources for Rx object */
1661static int
1662bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1663		  uint rx_id)
1664{
1665	int i, err = 0;
1666
1667	/* All memory needs to be allocated before setup_ccbs */
1668	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1669		if (res_info[i].res_type == BNA_RES_T_MEM)
1670			err = bnad_mem_alloc(bnad,
1671					&res_info[i].res_u.mem_info);
1672		else if (res_info[i].res_type == BNA_RES_T_INTR)
1673			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1674					&res_info[i].res_u.intr_info);
1675		if (err)
1676			goto err_return;
1677	}
1678	return 0;
1679
1680err_return:
1681	bnad_rx_res_free(bnad, res_info);
1682	return err;
1683}
1684
1685/* Timer callbacks */
1686/* a) IOC timer */
1687static void
1688bnad_ioc_timeout(struct timer_list *t)
1689{
1690	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
1691	unsigned long flags;
1692
1693	spin_lock_irqsave(&bnad->bna_lock, flags);
1694	bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1695	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1696}
1697
1698static void
1699bnad_ioc_hb_check(struct timer_list *t)
1700{
1701	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
1702	unsigned long flags;
1703
1704	spin_lock_irqsave(&bnad->bna_lock, flags);
1705	bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1706	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1707}
1708
1709static void
1710bnad_iocpf_timeout(struct timer_list *t)
1711{
1712	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
1713	unsigned long flags;
1714
1715	spin_lock_irqsave(&bnad->bna_lock, flags);
1716	bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1717	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1718}
1719
1720static void
1721bnad_iocpf_sem_timeout(struct timer_list *t)
1722{
1723	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
1724	unsigned long flags;
1725
1726	spin_lock_irqsave(&bnad->bna_lock, flags);
1727	bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1728	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1729}
1730
1731/*
1732 * All timer routines use bnad->bna_lock to protect against
1733 * the following race, which may occur in case of no locking:
1734 *	Time	CPU m	CPU n
1735 *	0       1 = test_bit
1736 *	1			clear_bit
1737 *	2			del_timer_sync
1738 *	3	mod_timer
1739 */
1740
1741/* b) Dynamic Interrupt Moderation Timer */
1742static void
1743bnad_dim_timeout(struct timer_list *t)
1744{
1745	struct bnad *bnad = from_timer(bnad, t, dim_timer);
1746	struct bnad_rx_info *rx_info;
1747	struct bnad_rx_ctrl *rx_ctrl;
1748	int i, j;
1749	unsigned long flags;
1750
1751	if (!netif_carrier_ok(bnad->netdev))
1752		return;
1753
1754	spin_lock_irqsave(&bnad->bna_lock, flags);
1755	for (i = 0; i < bnad->num_rx; i++) {
1756		rx_info = &bnad->rx_info[i];
1757		if (!rx_info->rx)
1758			continue;
1759		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1760			rx_ctrl = &rx_info->rx_ctrl[j];
1761			if (!rx_ctrl->ccb)
1762				continue;
1763			bna_rx_dim_update(rx_ctrl->ccb);
1764		}
1765	}
1766
1767	/* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1768	if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1769		mod_timer(&bnad->dim_timer,
1770			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1771	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1772}
1773
1774/* c)  Statistics Timer */
1775static void
1776bnad_stats_timeout(struct timer_list *t)
1777{
1778	struct bnad *bnad = from_timer(bnad, t, stats_timer);
1779	unsigned long flags;
1780
1781	if (!netif_running(bnad->netdev) ||
1782		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1783		return;
1784
1785	spin_lock_irqsave(&bnad->bna_lock, flags);
1786	bna_hw_stats_get(&bnad->bna);
1787	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1788}
1789
1790/*
1791 * Set up timer for DIM
1792 * Called with bnad->bna_lock held
1793 */
1794void
1795bnad_dim_timer_start(struct bnad *bnad)
1796{
1797	if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1798	    !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1799		timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
1800		set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1801		mod_timer(&bnad->dim_timer,
1802			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1803	}
1804}
1805
1806/*
1807 * Set up timer for statistics
1808 * Called with mutex_lock(&bnad->conf_mutex) held
1809 */
1810static void
1811bnad_stats_timer_start(struct bnad *bnad)
1812{
1813	unsigned long flags;
1814
1815	spin_lock_irqsave(&bnad->bna_lock, flags);
1816	if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1817		timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
1818		mod_timer(&bnad->stats_timer,
1819			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1820	}
1821	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1822}
1823
1824/*
1825 * Stops the stats timer
1826 * Called with mutex_lock(&bnad->conf_mutex) held
1827 */
1828static void
1829bnad_stats_timer_stop(struct bnad *bnad)
1830{
1831	int to_del = 0;
1832	unsigned long flags;
1833
1834	spin_lock_irqsave(&bnad->bna_lock, flags);
1835	if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1836		to_del = 1;
1837	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1838	if (to_del)
1839		del_timer_sync(&bnad->stats_timer);
1840}
1841
1842/* Utilities */
1843
1844static void
1845bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1846{
1847	int i = 1; /* Index 0 has broadcast address */
1848	struct netdev_hw_addr *mc_addr;
1849
1850	netdev_for_each_mc_addr(mc_addr, netdev) {
1851		ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1852		i++;
1853	}
1854}
1855
1856static int
1857bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1858{
1859	struct bnad_rx_ctrl *rx_ctrl =
1860		container_of(napi, struct bnad_rx_ctrl, napi);
1861	struct bnad *bnad = rx_ctrl->bnad;
1862	int rcvd = 0;
1863
1864	rx_ctrl->rx_poll_ctr++;
1865
1866	if (!netif_carrier_ok(bnad->netdev))
1867		goto poll_exit;
1868
1869	rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1870	if (rcvd >= budget)
1871		return rcvd;
1872
1873poll_exit:
1874	napi_complete_done(napi, rcvd);
1875
1876	rx_ctrl->rx_complete++;
1877
1878	if (rx_ctrl->ccb)
1879		bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1880
1881	return rcvd;
1882}
1883
1884#define BNAD_NAPI_POLL_QUOTA		64
1885static void
1886bnad_napi_add(struct bnad *bnad, u32 rx_id)
1887{
1888	struct bnad_rx_ctrl *rx_ctrl;
1889	int i;
1890
1891	/* Initialize & enable NAPI */
1892	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
1893		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1894		netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1895			       bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1896	}
1897}
1898
1899static void
1900bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1901{
1902	int i;
1903
1904	/* First disable and then clean up */
1905	for (i = 0; i < bnad->num_rxp_per_rx; i++)
1906		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1907}
1908
1909/* Should be held with conf_lock held */
1910void
1911bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1912{
1913	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1914	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1915	unsigned long flags;
1916
1917	if (!tx_info->tx)
1918		return;
1919
1920	init_completion(&bnad->bnad_completions.tx_comp);
1921	spin_lock_irqsave(&bnad->bna_lock, flags);
1922	bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1923	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1924	wait_for_completion(&bnad->bnad_completions.tx_comp);
1925
1926	if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1927		bnad_tx_msix_unregister(bnad, tx_info,
1928			bnad->num_txq_per_tx);
1929
1930	spin_lock_irqsave(&bnad->bna_lock, flags);
1931	bna_tx_destroy(tx_info->tx);
1932	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1933
1934	tx_info->tx = NULL;
1935	tx_info->tx_id = 0;
1936
1937	bnad_tx_res_free(bnad, res_info);
1938}
1939
1940/* Should be held with conf_lock held */
1941int
1942bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1943{
1944	int err;
1945	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1946	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1947	struct bna_intr_info *intr_info =
1948			&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1949	struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1950	static const struct bna_tx_event_cbfn tx_cbfn = {
1951		.tcb_setup_cbfn = bnad_cb_tcb_setup,
1952		.tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1953		.tx_stall_cbfn = bnad_cb_tx_stall,
1954		.tx_resume_cbfn = bnad_cb_tx_resume,
1955		.tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1956	};
1957
1958	struct bna_tx *tx;
1959	unsigned long flags;
1960
1961	tx_info->tx_id = tx_id;
1962
1963	/* Initialize the Tx object configuration */
1964	tx_config->num_txq = bnad->num_txq_per_tx;
1965	tx_config->txq_depth = bnad->txq_depth;
1966	tx_config->tx_type = BNA_TX_T_REGULAR;
1967	tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1968
1969	/* Get BNA's resource requirement for one tx object */
1970	spin_lock_irqsave(&bnad->bna_lock, flags);
1971	bna_tx_res_req(bnad->num_txq_per_tx,
1972		bnad->txq_depth, res_info);
1973	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1974
1975	/* Fill Unmap Q memory requirements */
1976	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1977			bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1978			bnad->txq_depth));
1979
1980	/* Allocate resources */
1981	err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1982	if (err)
1983		return err;
1984
1985	/* Ask BNA to create one Tx object, supplying required resources */
1986	spin_lock_irqsave(&bnad->bna_lock, flags);
1987	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1988			tx_info);
1989	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1990	if (!tx) {
1991		err = -ENOMEM;
1992		goto err_return;
1993	}
1994	tx_info->tx = tx;
1995
1996	INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1997			(work_func_t)bnad_tx_cleanup);
1998
1999	/* Register ISR for the Tx object */
2000	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2001		err = bnad_tx_msix_register(bnad, tx_info,
2002			tx_id, bnad->num_txq_per_tx);
2003		if (err)
2004			goto cleanup_tx;
2005	}
2006
2007	spin_lock_irqsave(&bnad->bna_lock, flags);
2008	bna_tx_enable(tx);
2009	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2010
2011	return 0;
2012
2013cleanup_tx:
2014	spin_lock_irqsave(&bnad->bna_lock, flags);
2015	bna_tx_destroy(tx_info->tx);
2016	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2017	tx_info->tx = NULL;
2018	tx_info->tx_id = 0;
2019err_return:
2020	bnad_tx_res_free(bnad, res_info);
2021	return err;
2022}
2023
2024/* Setup the rx config for bna_rx_create */
2025/* bnad decides the configuration */
2026static void
2027bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2028{
2029	memset(rx_config, 0, sizeof(*rx_config));
2030	rx_config->rx_type = BNA_RX_T_REGULAR;
2031	rx_config->num_paths = bnad->num_rxp_per_rx;
2032	rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2033
2034	if (bnad->num_rxp_per_rx > 1) {
2035		rx_config->rss_status = BNA_STATUS_T_ENABLED;
2036		rx_config->rss_config.hash_type =
2037				(BFI_ENET_RSS_IPV6 |
2038				 BFI_ENET_RSS_IPV6_TCP |
2039				 BFI_ENET_RSS_IPV4 |
2040				 BFI_ENET_RSS_IPV4_TCP);
2041		rx_config->rss_config.hash_mask =
2042				bnad->num_rxp_per_rx - 1;
2043		netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2044			sizeof(rx_config->rss_config.toeplitz_hash_key));
2045	} else {
2046		rx_config->rss_status = BNA_STATUS_T_DISABLED;
2047		memset(&rx_config->rss_config, 0,
2048		       sizeof(rx_config->rss_config));
2049	}
2050
2051	rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2052	rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2053
2054	/* BNA_RXP_SINGLE - one data-buffer queue
2055	 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2056	 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2057	 */
2058	/* TODO: configurable param for queue type */
2059	rx_config->rxp_type = BNA_RXP_SLR;
2060
2061	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2062	    rx_config->frame_size > 4096) {
2063		/* though size_routing_enable is set in SLR,
2064		 * small packets may get routed to same rxq.
2065		 * set buf_size to 2048 instead of PAGE_SIZE.
2066		 */
2067		rx_config->q0_buf_size = 2048;
2068		/* this should be in multiples of 2 */
2069		rx_config->q0_num_vecs = 4;
2070		rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2071		rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2072	} else {
2073		rx_config->q0_buf_size = rx_config->frame_size;
2074		rx_config->q0_num_vecs = 1;
2075		rx_config->q0_depth 

Large files files are truncated, but you can click here to view the full file