PageRenderTime 77ms CodeModel.GetById 13ms app.highlight 51ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/bna/bna_txrx.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 4185 lines | 3204 code | 696 blank | 285 comment | 312 complexity | c8af6c0c5ef9c147e562447b9d878faa MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * Linux network driver for Brocade Converged Network Adapter.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License (GPL) Version 2 as
   6 * published by the Free Software Foundation
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12  */
  13/*
  14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  15 * All rights reserved
  16 * www.brocade.com
  17 */
  18#include "bna.h"
  19#include "bfa_sm.h"
  20#include "bfi.h"
  21
  22/**
  23 * IB
  24 */
  25#define bna_ib_find_free_ibidx(_mask, _pos)\
  26do {\
  27	(_pos) = 0;\
  28	while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
  29		((1 << (_pos)) & (_mask)))\
  30		(_pos)++;\
  31} while (0)
  32
  33#define bna_ib_count_ibidx(_mask, _count)\
  34do {\
  35	int pos = 0;\
  36	(_count) = 0;\
  37	while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
  38		if ((1 << pos) & (_mask))\
  39			(_count) = pos + 1;\
  40		pos++;\
  41	} \
  42} while (0)
  43
  44#define bna_ib_select_segpool(_count, _q_idx)\
  45do {\
  46	int i;\
  47	(_q_idx) = -1;\
  48	for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
  49		if ((_count <= ibidx_pool[i].pool_entry_size)) {\
  50			(_q_idx) = i;\
  51			break;\
  52		} \
  53	} \
  54} while (0)
  55
  56struct bna_ibidx_pool {
  57	int	pool_size;
  58	int	pool_entry_size;
  59};
  60init_ibidx_pool(ibidx_pool);
  61
  62static struct bna_intr *
  63bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
  64		int vector)
  65{
  66	struct bna_intr *intr;
  67	struct list_head *qe;
  68
  69	list_for_each(qe, &ib_mod->intr_active_q) {
  70		intr = (struct bna_intr *)qe;
  71
  72		if ((intr->intr_type == intr_type) &&
  73			(intr->vector == vector)) {
  74			intr->ref_count++;
  75			return intr;
  76		}
  77	}
  78
  79	if (list_empty(&ib_mod->intr_free_q))
  80		return NULL;
  81
  82	bfa_q_deq(&ib_mod->intr_free_q, &intr);
  83	bfa_q_qe_init(&intr->qe);
  84
  85	intr->ref_count = 1;
  86	intr->intr_type = intr_type;
  87	intr->vector = vector;
  88
  89	list_add_tail(&intr->qe, &ib_mod->intr_active_q);
  90
  91	return intr;
  92}
  93
  94static void
  95bna_intr_put(struct bna_ib_mod *ib_mod,
  96		struct bna_intr *intr)
  97{
  98	intr->ref_count--;
  99
 100	if (intr->ref_count == 0) {
 101		intr->ib = NULL;
 102		list_del(&intr->qe);
 103		bfa_q_qe_init(&intr->qe);
 104		list_add_tail(&intr->qe, &ib_mod->intr_free_q);
 105	}
 106}
 107
 108void
 109bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
 110		struct bna_res_info *res_info)
 111{
 112	int i;
 113	int j;
 114	int count;
 115	u8 offset;
 116	struct bna_doorbell_qset *qset;
 117	unsigned long off;
 118
 119	ib_mod->bna = bna;
 120
 121	ib_mod->ib = (struct bna_ib *)
 122		res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
 123	ib_mod->intr = (struct bna_intr *)
 124		res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
 125	ib_mod->idx_seg = (struct bna_ibidx_seg *)
 126		res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
 127
 128	INIT_LIST_HEAD(&ib_mod->ib_free_q);
 129	INIT_LIST_HEAD(&ib_mod->intr_free_q);
 130	INIT_LIST_HEAD(&ib_mod->intr_active_q);
 131
 132	for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
 133		INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
 134
 135	for (i = 0; i < BFI_MAX_IB; i++) {
 136		ib_mod->ib[i].ib_id = i;
 137
 138		ib_mod->ib[i].ib_seg_host_addr_kva =
 139		res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
 140		ib_mod->ib[i].ib_seg_host_addr.lsb =
 141		res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
 142		ib_mod->ib[i].ib_seg_host_addr.msb =
 143		res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
 144
 145		qset = (struct bna_doorbell_qset *)0;
 146		off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
 147					* (0x20 >> 2)]);
 148		ib_mod->ib[i].door_bell.doorbell_addr = off +
 149			BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
 150
 151		bfa_q_qe_init(&ib_mod->ib[i].qe);
 152		list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
 153
 154		bfa_q_qe_init(&ib_mod->intr[i].qe);
 155		list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
 156	}
 157
 158	count = 0;
 159	offset = 0;
 160	for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
 161		for (j = 0; j < ibidx_pool[i].pool_size; j++) {
 162			bfa_q_qe_init(&ib_mod->idx_seg[count]);
 163			ib_mod->idx_seg[count].ib_seg_size =
 164					ibidx_pool[i].pool_entry_size;
 165			ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
 166			list_add_tail(&ib_mod->idx_seg[count].qe,
 167				&ib_mod->ibidx_seg_pool[i]);
 168			count++;
 169			offset += ibidx_pool[i].pool_entry_size;
 170		}
 171	}
 172}
 173
 174void
 175bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
 176{
 177	int i;
 178	int j;
 179	struct list_head *qe;
 180
 181	i = 0;
 182	list_for_each(qe, &ib_mod->ib_free_q)
 183		i++;
 184
 185	i = 0;
 186	list_for_each(qe, &ib_mod->intr_free_q)
 187		i++;
 188
 189	for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
 190		j = 0;
 191		list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
 192			j++;
 193	}
 194
 195	ib_mod->bna = NULL;
 196}
 197
 198static struct bna_ib *
 199bna_ib_get(struct bna_ib_mod *ib_mod,
 200		enum bna_intr_type intr_type,
 201		int vector)
 202{
 203	struct bna_ib *ib;
 204	struct bna_intr *intr;
 205
 206	if (intr_type == BNA_INTR_T_INTX)
 207		vector = (1 << vector);
 208
 209	intr = bna_intr_get(ib_mod, intr_type, vector);
 210	if (intr == NULL)
 211		return NULL;
 212
 213	if (intr->ib) {
 214		if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
 215			bna_intr_put(ib_mod, intr);
 216			return NULL;
 217		}
 218		intr->ib->ref_count++;
 219		return intr->ib;
 220	}
 221
 222	if (list_empty(&ib_mod->ib_free_q)) {
 223		bna_intr_put(ib_mod, intr);
 224		return NULL;
 225	}
 226
 227	bfa_q_deq(&ib_mod->ib_free_q, &ib);
 228	bfa_q_qe_init(&ib->qe);
 229
 230	ib->ref_count = 1;
 231	ib->start_count = 0;
 232	ib->idx_mask = 0;
 233
 234	ib->intr = intr;
 235	ib->idx_seg = NULL;
 236	intr->ib = ib;
 237
 238	ib->bna = ib_mod->bna;
 239
 240	return ib;
 241}
 242
 243static void
 244bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
 245{
 246	bna_intr_put(ib_mod, ib->intr);
 247
 248	ib->ref_count--;
 249
 250	if (ib->ref_count == 0) {
 251		ib->intr = NULL;
 252		ib->bna = NULL;
 253		list_add_tail(&ib->qe, &ib_mod->ib_free_q);
 254	}
 255}
 256
 257/* Returns index offset - starting from 0 */
 258static int
 259bna_ib_reserve_idx(struct bna_ib *ib)
 260{
 261	struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
 262	struct bna_ibidx_seg *idx_seg;
 263	int idx;
 264	int num_idx;
 265	int q_idx;
 266
 267	/* Find the first free index position */
 268	bna_ib_find_free_ibidx(ib->idx_mask, idx);
 269	if (idx == BFI_IBIDX_MAX_SEGSIZE)
 270		return -1;
 271
 272	/*
 273	 * Calculate the total number of indexes held by this IB,
 274	 * including the index newly reserved above.
 275	 */
 276	bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
 277
 278	/* See if there is a free space in the index segment held by this IB */
 279	if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
 280		ib->idx_mask |= (1 << idx);
 281		return idx;
 282	}
 283
 284	if (ib->start_count)
 285		return -1;
 286
 287	/* Allocate a new segment */
 288	bna_ib_select_segpool(num_idx, q_idx);
 289	while (1) {
 290		if (q_idx == BFI_IBIDX_TOTAL_POOLS)
 291			return -1;
 292		if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
 293			break;
 294		q_idx++;
 295	}
 296	bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
 297	bfa_q_qe_init(&idx_seg->qe);
 298
 299	/* Free the old segment */
 300	if (ib->idx_seg) {
 301		bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
 302		list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
 303	}
 304
 305	ib->idx_seg = idx_seg;
 306
 307	ib->idx_mask |= (1 << idx);
 308
 309	return idx;
 310}
 311
 312static void
 313bna_ib_release_idx(struct bna_ib *ib, int idx)
 314{
 315	struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
 316	struct bna_ibidx_seg *idx_seg;
 317	int num_idx;
 318	int cur_q_idx;
 319	int new_q_idx;
 320
 321	ib->idx_mask &= ~(1 << idx);
 322
 323	if (ib->start_count)
 324		return;
 325
 326	bna_ib_count_ibidx(ib->idx_mask, num_idx);
 327
 328	/*
 329	 * Free the segment, if there are no more indexes in the segment
 330	 * held by this IB
 331	 */
 332	if (!num_idx) {
 333		bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
 334		list_add_tail(&ib->idx_seg->qe,
 335			&ib_mod->ibidx_seg_pool[cur_q_idx]);
 336		ib->idx_seg = NULL;
 337		return;
 338	}
 339
 340	/* See if we can move to a smaller segment */
 341	bna_ib_select_segpool(num_idx, new_q_idx);
 342	bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
 343	while (new_q_idx < cur_q_idx) {
 344		if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
 345			break;
 346		new_q_idx++;
 347	}
 348	if (new_q_idx < cur_q_idx) {
 349		/* Select the new smaller segment */
 350		bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
 351		bfa_q_qe_init(&idx_seg->qe);
 352		/* Free the old segment */
 353		list_add_tail(&ib->idx_seg->qe,
 354			&ib_mod->ibidx_seg_pool[cur_q_idx]);
 355		ib->idx_seg = idx_seg;
 356	}
 357}
 358
 359static int
 360bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
 361{
 362	if (ib->start_count)
 363		return -1;
 364
 365	ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
 366	ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
 367	ib->ib_config.interpkt_count = ib_config->interpkt_count;
 368	ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
 369
 370	ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
 371	if (ib->intr->intr_type == BNA_INTR_T_MSIX)
 372		ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
 373
 374	return 0;
 375}
 376
 377static void
 378bna_ib_start(struct bna_ib *ib)
 379{
 380	struct bna_ib_blk_mem ib_cfg;
 381	struct bna_ib_blk_mem *ib_mem;
 382	u32 pg_num;
 383	u32 intx_mask;
 384	int i;
 385	void __iomem *base_addr;
 386	unsigned long off;
 387
 388	ib->start_count++;
 389
 390	if (ib->start_count > 1)
 391		return;
 392
 393	ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
 394	ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
 395
 396	ib_cfg.clsc_n_ctrl_n_msix = (((u32)
 397				     ib->ib_config.coalescing_timeo << 16) |
 398				((u32)ib->ib_config.ctrl_flags << 8) |
 399				(ib->intr->vector));
 400	ib_cfg.ipkt_n_ent_n_idxof =
 401				((u32)
 402				 (ib->ib_config.interpkt_timeo & 0xf) << 16) |
 403				((u32)ib->idx_seg->ib_seg_size << 8) |
 404				(ib->idx_seg->ib_idx_tbl_offset);
 405	ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
 406					 ib->ib_config.interpkt_count << 24);
 407
 408	pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
 409				HQM_IB_RAM_BASE_OFFSET);
 410	writel(pg_num, ib->bna->regs.page_addr);
 411
 412	base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
 413					HQM_IB_RAM_BASE_OFFSET);
 414
 415	ib_mem = (struct bna_ib_blk_mem *)0;
 416	off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
 417	writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
 418
 419	off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
 420	writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
 421
 422	off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
 423	writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
 424
 425	off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
 426	writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
 427
 428	off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
 429	writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
 430
 431	ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
 432				(u32)ib->ib_config.coalescing_timeo, 0);
 433
 434	pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
 435				HQM_INDX_TBL_RAM_BASE_OFFSET);
 436	writel(pg_num, ib->bna->regs.page_addr);
 437
 438	base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
 439					HQM_INDX_TBL_RAM_BASE_OFFSET);
 440	for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
 441		off = (unsigned long)
 442		((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
 443		writel(0, base_addr + off);
 444	}
 445
 446	if (ib->intr->intr_type == BNA_INTR_T_INTX) {
 447		bna_intx_disable(ib->bna, intx_mask);
 448		intx_mask &= ~(ib->intr->vector);
 449		bna_intx_enable(ib->bna, intx_mask);
 450	}
 451}
 452
 453static void
 454bna_ib_stop(struct bna_ib *ib)
 455{
 456	u32 intx_mask;
 457
 458	ib->start_count--;
 459
 460	if (ib->start_count == 0) {
 461		writel(BNA_DOORBELL_IB_INT_DISABLE,
 462				ib->door_bell.doorbell_addr);
 463		if (ib->intr->intr_type == BNA_INTR_T_INTX) {
 464			bna_intx_disable(ib->bna, intx_mask);
 465			intx_mask |= (ib->intr->vector);
 466			bna_intx_enable(ib->bna, intx_mask);
 467		}
 468	}
 469}
 470
 471static void
 472bna_ib_fail(struct bna_ib *ib)
 473{
 474	ib->start_count = 0;
 475}
 476
 477/**
 478 * RXF
 479 */
 480static void rxf_enable(struct bna_rxf *rxf);
 481static void rxf_disable(struct bna_rxf *rxf);
 482static void __rxf_config_set(struct bna_rxf *rxf);
 483static void __rxf_rit_set(struct bna_rxf *rxf);
 484static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
 485static int rxf_process_packet_filter(struct bna_rxf *rxf);
 486static int rxf_clear_packet_filter(struct bna_rxf *rxf);
 487static void rxf_reset_packet_filter(struct bna_rxf *rxf);
 488static void rxf_cb_enabled(void *arg, int status);
 489static void rxf_cb_disabled(void *arg, int status);
 490static void bna_rxf_cb_stats_cleared(void *arg, int status);
 491static void __rxf_enable(struct bna_rxf *rxf);
 492static void __rxf_disable(struct bna_rxf *rxf);
 493
 494bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
 495			enum bna_rxf_event);
 496bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
 497			enum bna_rxf_event);
 498bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
 499			enum bna_rxf_event);
 500bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
 501			enum bna_rxf_event);
 502bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
 503			enum bna_rxf_event);
 504bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
 505			enum bna_rxf_event);
 506bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
 507			enum bna_rxf_event);
 508bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
 509			enum bna_rxf_event);
 510bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
 511			enum bna_rxf_event);
 512
 513static struct bfa_sm_table rxf_sm_table[] = {
 514	{BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
 515	{BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
 516	{BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
 517	{BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
 518	{BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
 519	{BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
 520	{BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
 521	{BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
 522	{BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
 523};
 524
 525static void
 526bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
 527{
 528	call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
 529}
 530
 531static void
 532bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
 533{
 534	switch (event) {
 535	case RXF_E_START:
 536		bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
 537		break;
 538
 539	case RXF_E_STOP:
 540		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 541		break;
 542
 543	case RXF_E_FAIL:
 544		/* No-op */
 545		break;
 546
 547	case RXF_E_CAM_FLTR_MOD:
 548		call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
 549		break;
 550
 551	case RXF_E_STARTED:
 552	case RXF_E_STOPPED:
 553	case RXF_E_CAM_FLTR_RESP:
 554		/**
 555		 * These events are received due to flushing of mbox
 556		 * when device fails
 557		 */
 558		/* No-op */
 559		break;
 560
 561	case RXF_E_PAUSE:
 562		rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
 563		call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
 564		break;
 565
 566	case RXF_E_RESUME:
 567		rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
 568		call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
 569		break;
 570
 571	default:
 572		bfa_sm_fault(rxf->rx->bna, event);
 573	}
 574}
 575
 576static void
 577bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
 578{
 579	__rxf_config_set(rxf);
 580	__rxf_rit_set(rxf);
 581	rxf_enable(rxf);
 582}
 583
 584static void
 585bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
 586{
 587	switch (event) {
 588	case RXF_E_STOP:
 589		/**
 590		 * STOP is originated from bnad. When this happens,
 591		 * it can not be waiting for filter update
 592		 */
 593		call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
 594		bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
 595		break;
 596
 597	case RXF_E_FAIL:
 598		call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
 599		call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
 600		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 601		break;
 602
 603	case RXF_E_CAM_FLTR_MOD:
 604		/* No-op */
 605		break;
 606
 607	case RXF_E_STARTED:
 608		/**
 609		 * Force rxf_process_filter() to go through initial
 610		 * config
 611		 */
 612		if ((rxf->ucast_active_mac != NULL) &&
 613			(rxf->ucast_pending_set == 0))
 614			rxf->ucast_pending_set = 1;
 615
 616		if (rxf->rss_status == BNA_STATUS_T_ENABLED)
 617			rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
 618
 619		rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
 620
 621		bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
 622		break;
 623
 624	case RXF_E_PAUSE:
 625	case RXF_E_RESUME:
 626		rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
 627		break;
 628
 629	default:
 630		bfa_sm_fault(rxf->rx->bna, event);
 631	}
 632}
 633
 634static void
 635bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
 636{
 637	if (!rxf_process_packet_filter(rxf)) {
 638		/* No more pending CAM entries to update */
 639		bfa_fsm_set_state(rxf, bna_rxf_sm_started);
 640	}
 641}
 642
 643static void
 644bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
 645{
 646	switch (event) {
 647	case RXF_E_STOP:
 648		/**
 649		 * STOP is originated from bnad. When this happens,
 650		 * it can not be waiting for filter update
 651		 */
 652		call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
 653		bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
 654		break;
 655
 656	case RXF_E_FAIL:
 657		rxf_reset_packet_filter(rxf);
 658		call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
 659		call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
 660		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 661		break;
 662
 663	case RXF_E_CAM_FLTR_MOD:
 664		/* No-op */
 665		break;
 666
 667	case RXF_E_CAM_FLTR_RESP:
 668		if (!rxf_process_packet_filter(rxf)) {
 669			/* No more pending CAM entries to update */
 670			call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
 671			bfa_fsm_set_state(rxf, bna_rxf_sm_started);
 672		}
 673		break;
 674
 675	case RXF_E_PAUSE:
 676	case RXF_E_RESUME:
 677		rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
 678		break;
 679
 680	default:
 681		bfa_sm_fault(rxf->rx->bna, event);
 682	}
 683}
 684
 685static void
 686bna_rxf_sm_started_entry(struct bna_rxf *rxf)
 687{
 688	call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
 689
 690	if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
 691		if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
 692			bfa_fsm_send_event(rxf, RXF_E_PAUSE);
 693		else
 694			bfa_fsm_send_event(rxf, RXF_E_RESUME);
 695	}
 696
 697}
 698
 699static void
 700bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
 701{
 702	switch (event) {
 703	case RXF_E_STOP:
 704		bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
 705		/* Hack to get FSM start clearing CAM entries */
 706		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
 707		break;
 708
 709	case RXF_E_FAIL:
 710		rxf_reset_packet_filter(rxf);
 711		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 712		break;
 713
 714	case RXF_E_CAM_FLTR_MOD:
 715		bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
 716		break;
 717
 718	case RXF_E_PAUSE:
 719		bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
 720		break;
 721
 722	case RXF_E_RESUME:
 723		bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
 724		break;
 725
 726	default:
 727		bfa_sm_fault(rxf->rx->bna, event);
 728	}
 729}
 730
 731static void
 732bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
 733{
 734	/**
 735	 *  Note: Do not add rxf_clear_packet_filter here.
 736	 * It will overstep mbox when this transition happens:
 737	 * 	cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
 738	 */
 739}
 740
 741static void
 742bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
 743{
 744	switch (event) {
 745	case RXF_E_FAIL:
 746		/**
 747		 * FSM was in the process of stopping, initiated by
 748		 * bnad. When this happens, no one can be waiting for
 749		 * start or filter update
 750		 */
 751		rxf_reset_packet_filter(rxf);
 752		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 753		break;
 754
 755	case RXF_E_CAM_FLTR_RESP:
 756		if (!rxf_clear_packet_filter(rxf)) {
 757			/* No more pending CAM entries to clear */
 758			bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
 759			rxf_disable(rxf);
 760		}
 761		break;
 762
 763	default:
 764		bfa_sm_fault(rxf->rx->bna, event);
 765	}
 766}
 767
 768static void
 769bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
 770{
 771	/**
 772	 * NOTE: Do not add  rxf_disable here.
 773	 * It will overstep mbox when this transition happens:
 774	 * 	start_wait -> stop_wait on RXF_E_STOP event
 775	 */
 776}
 777
 778static void
 779bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
 780{
 781	switch (event) {
 782	case RXF_E_FAIL:
 783		/**
 784		 * FSM was in the process of stopping, initiated by
 785		 * bnad. When this happens, no one can be waiting for
 786		 * start or filter update
 787		 */
 788		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 789		break;
 790
 791	case RXF_E_STARTED:
 792		/**
 793		 * This event is received due to abrupt transition from
 794		 * bna_rxf_sm_start_wait state on receiving
 795		 * RXF_E_STOP event
 796		 */
 797		rxf_disable(rxf);
 798		break;
 799
 800	case RXF_E_STOPPED:
 801		/**
 802		 * FSM was in the process of stopping, initiated by
 803		 * bnad. When this happens, no one can be waiting for
 804		 * start or filter update
 805		 */
 806		bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
 807		break;
 808
 809	case RXF_E_PAUSE:
 810		rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
 811		break;
 812
 813	case RXF_E_RESUME:
 814		rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
 815		break;
 816
 817	default:
 818		bfa_sm_fault(rxf->rx->bna, event);
 819	}
 820}
 821
 822static void
 823bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
 824{
 825	rxf->rxf_flags &=
 826		~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
 827	__rxf_disable(rxf);
 828}
 829
 830static void
 831bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
 832{
 833	switch (event) {
 834	case RXF_E_FAIL:
 835		/**
 836		 * FSM was in the process of disabling rxf, initiated by
 837		 * bnad.
 838		 */
 839		call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
 840		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 841		break;
 842
 843	case RXF_E_STOPPED:
 844		rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
 845		call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
 846		bfa_fsm_set_state(rxf, bna_rxf_sm_started);
 847		break;
 848
 849	/*
 850	 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
 851	 * any other event during these states
 852	 */
 853	default:
 854		bfa_sm_fault(rxf->rx->bna, event);
 855	}
 856}
 857
 858static void
 859bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
 860{
 861	rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
 862	rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
 863	__rxf_enable(rxf);
 864}
 865
 866static void
 867bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
 868{
 869	switch (event) {
 870	case RXF_E_FAIL:
 871		/**
 872		 * FSM was in the process of disabling rxf, initiated by
 873		 * bnad.
 874		 */
 875		call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
 876		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 877		break;
 878
 879	case RXF_E_STARTED:
 880		rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
 881		call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
 882		bfa_fsm_set_state(rxf, bna_rxf_sm_started);
 883		break;
 884
 885	/*
 886	 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
 887	 * any other event during these states
 888	 */
 889	default:
 890		bfa_sm_fault(rxf->rx->bna, event);
 891	}
 892}
 893
 894static void
 895bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
 896{
 897	__bna_rxf_stat_clr(rxf);
 898}
 899
 900static void
 901bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
 902{
 903	switch (event) {
 904	case RXF_E_FAIL:
 905	case RXF_E_STAT_CLEARED:
 906		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 907		break;
 908
 909	default:
 910		bfa_sm_fault(rxf->rx->bna, event);
 911	}
 912}
 913
 914static void
 915__rxf_enable(struct bna_rxf *rxf)
 916{
 917	struct bfi_ll_rxf_multi_req ll_req;
 918	u32 bm[2] = {0, 0};
 919
 920	if (rxf->rxf_id < 32)
 921		bm[0] = 1 << rxf->rxf_id;
 922	else
 923		bm[1] = 1 << (rxf->rxf_id - 32);
 924
 925	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
 926	ll_req.rxf_id_mask[0] = htonl(bm[0]);
 927	ll_req.rxf_id_mask[1] = htonl(bm[1]);
 928	ll_req.enable = 1;
 929
 930	bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
 931			rxf_cb_enabled, rxf);
 932
 933	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
 934}
 935
 936static void
 937__rxf_disable(struct bna_rxf *rxf)
 938{
 939	struct bfi_ll_rxf_multi_req ll_req;
 940	u32 bm[2] = {0, 0};
 941
 942	if (rxf->rxf_id < 32)
 943		bm[0] = 1 << rxf->rxf_id;
 944	else
 945		bm[1] = 1 << (rxf->rxf_id - 32);
 946
 947	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
 948	ll_req.rxf_id_mask[0] = htonl(bm[0]);
 949	ll_req.rxf_id_mask[1] = htonl(bm[1]);
 950	ll_req.enable = 0;
 951
 952	bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
 953			rxf_cb_disabled, rxf);
 954
 955	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
 956}
 957
 958static void
 959__rxf_config_set(struct bna_rxf *rxf)
 960{
 961	u32 i;
 962	struct bna_rss_mem *rss_mem;
 963	struct bna_rx_fndb_ram *rx_fndb_ram;
 964	struct bna *bna = rxf->rx->bna;
 965	void __iomem *base_addr;
 966	unsigned long off;
 967
 968	base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
 969			RSS_TABLE_BASE_OFFSET);
 970
 971	rss_mem = (struct bna_rss_mem *)0;
 972
 973	/* Configure RSS if required */
 974	if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
 975		/* configure RSS Table */
 976		writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
 977			bna->port_num, RSS_TABLE_BASE_OFFSET),
 978					bna->regs.page_addr);
 979
 980		/* temporarily disable RSS, while hash value is written */
 981		off = (unsigned long)&rss_mem[0].type_n_hash;
 982		writel(0, base_addr + off);
 983
 984		for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
 985			off = (unsigned long)
 986			&rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
 987			writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
 988			base_addr + off);
 989		}
 990
 991		off = (unsigned long)&rss_mem[0].type_n_hash;
 992		writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
 993			base_addr + off);
 994	}
 995
 996	/* Configure RxF */
 997	writel(BNA_GET_PAGE_NUM(
 998		LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
 999		RX_FNDB_RAM_BASE_OFFSET),
1000		bna->regs.page_addr);
1001
1002	base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1003		RX_FNDB_RAM_BASE_OFFSET);
1004
1005	rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
1006
1007	/* We always use RSS table 0 */
1008	off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
1009	writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
1010		base_addr + off);
1011
1012	/* small large buffer enable/disable */
1013	off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
1014	writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
1015		base_addr + off);
1016
1017	/* RIT offset,  HDS forced offset, multicast RxQ Id */
1018	off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
1019	writel((rxf->rit_segment->rit_offset << 16) |
1020		(rxf->forced_offset << 8) |
1021		(rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
1022		base_addr + off);
1023
1024	/*
1025	 * default vlan tag, default function enable, strip vlan bytes,
1026	 * HDS type, header size
1027	 */
1028
1029	off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
1030	 writel(((u32)rxf->default_vlan_tag << 16) |
1031		(rxf->ctrl_flags &
1032			(BNA_RXF_CF_DEFAULT_VLAN |
1033			BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
1034			BNA_RXF_CF_VLAN_STRIP)) |
1035		(rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
1036		rxf->hds_cfg.header_size,
1037		base_addr + off);
1038}
1039
1040void
1041__rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
1042{
1043	struct bna *bna = rxf->rx->bna;
1044	int i;
1045
1046	writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
1047			(bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
1048			bna->regs.page_addr);
1049
1050	if (status == BNA_STATUS_T_ENABLED) {
1051		/* enable VLAN filtering on this function */
1052		for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1053			writel(rxf->vlan_filter_table[i],
1054					BNA_GET_VLAN_MEM_ENTRY_ADDR
1055					(bna->pcidev.pci_bar_kva, rxf->rxf_id,
1056						i * 32));
1057		}
1058	} else {
1059		/* disable VLAN filtering on this function */
1060		for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1061			writel(0xffffffff,
1062					BNA_GET_VLAN_MEM_ENTRY_ADDR
1063					(bna->pcidev.pci_bar_kva, rxf->rxf_id,
1064						i * 32));
1065		}
1066	}
1067}
1068
1069static void
1070__rxf_rit_set(struct bna_rxf *rxf)
1071{
1072	struct bna *bna = rxf->rx->bna;
1073	struct bna_rit_mem *rit_mem;
1074	int i;
1075	void __iomem *base_addr;
1076	unsigned long off;
1077
1078	base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1079			FUNCTION_TO_RXQ_TRANSLATE);
1080
1081	rit_mem = (struct bna_rit_mem *)0;
1082
1083	writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
1084		FUNCTION_TO_RXQ_TRANSLATE),
1085		bna->regs.page_addr);
1086
1087	for (i = 0; i < rxf->rit_segment->rit_size; i++) {
1088		off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
1089		writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
1090			rxf->rit_segment->rit[i].small_rxq_id,
1091			base_addr + off);
1092	}
1093}
1094
1095static void
1096__bna_rxf_stat_clr(struct bna_rxf *rxf)
1097{
1098	struct bfi_ll_stats_req ll_req;
1099	u32 bm[2] = {0, 0};
1100
1101	if (rxf->rxf_id < 32)
1102		bm[0] = 1 << rxf->rxf_id;
1103	else
1104		bm[1] = 1 << (rxf->rxf_id - 32);
1105
1106	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
1107	ll_req.stats_mask = 0;
1108	ll_req.txf_id_mask[0] = 0;
1109	ll_req.txf_id_mask[1] =	0;
1110
1111	ll_req.rxf_id_mask[0] = htonl(bm[0]);
1112	ll_req.rxf_id_mask[1] = htonl(bm[1]);
1113
1114	bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
1115			bna_rxf_cb_stats_cleared, rxf);
1116	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1117}
1118
1119static void
1120rxf_enable(struct bna_rxf *rxf)
1121{
1122	if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1123		bfa_fsm_send_event(rxf, RXF_E_STARTED);
1124	else {
1125		rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
1126		__rxf_enable(rxf);
1127	}
1128}
1129
1130static void
1131rxf_cb_enabled(void *arg, int status)
1132{
1133	struct bna_rxf *rxf = (struct bna_rxf *)arg;
1134
1135	bfa_q_qe_init(&rxf->mbox_qe.qe);
1136	bfa_fsm_send_event(rxf, RXF_E_STARTED);
1137}
1138
1139static void
1140rxf_disable(struct bna_rxf *rxf)
1141{
1142	if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1143		bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1144	else
1145		rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
1146		__rxf_disable(rxf);
1147}
1148
1149static void
1150rxf_cb_disabled(void *arg, int status)
1151{
1152	struct bna_rxf *rxf = (struct bna_rxf *)arg;
1153
1154	bfa_q_qe_init(&rxf->mbox_qe.qe);
1155	bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1156}
1157
1158void
1159rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
1160{
1161	struct bna_rxf *rxf = (struct bna_rxf *)arg;
1162
1163	bfa_q_qe_init(&rxf->mbox_qe.qe);
1164
1165	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
1166}
1167
1168static void
1169bna_rxf_cb_stats_cleared(void *arg, int status)
1170{
1171	struct bna_rxf *rxf = (struct bna_rxf *)arg;
1172
1173	bfa_q_qe_init(&rxf->mbox_qe.qe);
1174	bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
1175}
1176
1177void
1178rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
1179		const struct bna_mac *mac_addr)
1180{
1181	struct bfi_ll_mac_addr_req req;
1182
1183	bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
1184
1185	req.rxf_id = rxf->rxf_id;
1186	memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
1187
1188	bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
1189				rxf_cb_cam_fltr_mbox_cmd, rxf);
1190
1191	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1192}
1193
1194static int
1195rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
1196{
1197	struct bna_mac *mac = NULL;
1198	struct list_head *qe;
1199
1200	/* Add multicast entries */
1201	if (!list_empty(&rxf->mcast_pending_add_q)) {
1202		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1203		bfa_q_qe_init(qe);
1204		mac = (struct bna_mac *)qe;
1205		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
1206		list_add_tail(&mac->qe, &rxf->mcast_active_q);
1207		return 1;
1208	}
1209
1210	/* Delete multicast entries previousely added */
1211	if (!list_empty(&rxf->mcast_pending_del_q)) {
1212		bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1213		bfa_q_qe_init(qe);
1214		mac = (struct bna_mac *)qe;
1215		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1216		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1217		return 1;
1218	}
1219
1220	return 0;
1221}
1222
1223static int
1224rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
1225{
1226	/* Apply the VLAN filter */
1227	if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
1228		rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
1229		if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))
1230			__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
1231	}
1232
1233	/* Apply RSS configuration */
1234	if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
1235		rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
1236		if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
1237			/* RSS is being disabled */
1238			rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
1239			__rxf_rit_set(rxf);
1240			__rxf_config_set(rxf);
1241		} else {
1242			/* RSS is being enabled or reconfigured */
1243			rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
1244			__rxf_rit_set(rxf);
1245			__rxf_config_set(rxf);
1246		}
1247	}
1248
1249	return 0;
1250}
1251
1252/**
1253 * Processes pending ucast, mcast entry addition/deletion and issues mailbox
1254 * command. Also processes pending filter configuration - promiscuous mode,
1255 * default mode, allmutli mode and issues mailbox command or directly applies
1256 * to h/w
1257 */
1258static int
1259rxf_process_packet_filter(struct bna_rxf *rxf)
1260{
1261	/* Set the default MAC first */
1262	if (rxf->ucast_pending_set > 0) {
1263		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
1264				rxf->ucast_active_mac);
1265		rxf->ucast_pending_set--;
1266		return 1;
1267	}
1268
1269	if (rxf_process_packet_filter_ucast(rxf))
1270		return 1;
1271
1272	if (rxf_process_packet_filter_mcast(rxf))
1273		return 1;
1274
1275	if (rxf_process_packet_filter_promisc(rxf))
1276		return 1;
1277
1278	if (rxf_process_packet_filter_allmulti(rxf))
1279		return 1;
1280
1281	if (rxf_process_packet_filter_vlan(rxf))
1282		return 1;
1283
1284	return 0;
1285}
1286
1287static int
1288rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
1289{
1290	struct bna_mac *mac = NULL;
1291	struct list_head *qe;
1292
1293	/* 3. delete pending mcast entries */
1294	if (!list_empty(&rxf->mcast_pending_del_q)) {
1295		bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1296		bfa_q_qe_init(qe);
1297		mac = (struct bna_mac *)qe;
1298		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1299		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1300		return 1;
1301	}
1302
1303	/* 4. clear active mcast entries; move them to pending_add_q */
1304	if (!list_empty(&rxf->mcast_active_q)) {
1305		bfa_q_deq(&rxf->mcast_active_q, &qe);
1306		bfa_q_qe_init(qe);
1307		mac = (struct bna_mac *)qe;
1308		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1309		list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1310		return 1;
1311	}
1312
1313	return 0;
1314}
1315
1316/**
1317 * In the rxf stop path, processes pending ucast/mcast delete queue and issues
1318 * the mailbox command. Moves the active ucast/mcast entries to pending add q,
1319 * so that they are added to CAM again in the rxf start path. Moves the current
1320 * filter settings - promiscuous, default, allmutli - to pending filter
1321 * configuration
1322 */
1323static int
1324rxf_clear_packet_filter(struct bna_rxf *rxf)
1325{
1326	if (rxf_clear_packet_filter_ucast(rxf))
1327		return 1;
1328
1329	if (rxf_clear_packet_filter_mcast(rxf))
1330		return 1;
1331
1332	/* 5. clear active default MAC in the CAM */
1333	if (rxf->ucast_pending_set > 0)
1334		rxf->ucast_pending_set = 0;
1335
1336	if (rxf_clear_packet_filter_promisc(rxf))
1337		return 1;
1338
1339	if (rxf_clear_packet_filter_allmulti(rxf))
1340		return 1;
1341
1342	return 0;
1343}
1344
1345static void
1346rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
1347{
1348	struct list_head *qe;
1349	struct bna_mac *mac;
1350
1351	/* 3. Move active mcast entries to pending_add_q */
1352	while (!list_empty(&rxf->mcast_active_q)) {
1353		bfa_q_deq(&rxf->mcast_active_q, &qe);
1354		bfa_q_qe_init(qe);
1355		list_add_tail(qe, &rxf->mcast_pending_add_q);
1356	}
1357
1358	/* 4. Throw away delete pending mcast entries */
1359	while (!list_empty(&rxf->mcast_pending_del_q)) {
1360		bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1361		bfa_q_qe_init(qe);
1362		mac = (struct bna_mac *)qe;
1363		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1364	}
1365}
1366
1367/**
1368 * In the rxf fail path, throws away the ucast/mcast entries pending for
1369 * deletion, moves all active ucast/mcast entries to pending queue so that
1370 * they are added back to CAM in the rxf start path. Also moves the current
1371 * filter configuration to pending filter configuration.
1372 */
1373static void
1374rxf_reset_packet_filter(struct bna_rxf *rxf)
1375{
1376	rxf_reset_packet_filter_ucast(rxf);
1377
1378	rxf_reset_packet_filter_mcast(rxf);
1379
1380	/* 5. Turn off ucast set flag */
1381	rxf->ucast_pending_set = 0;
1382
1383	rxf_reset_packet_filter_promisc(rxf);
1384
1385	rxf_reset_packet_filter_allmulti(rxf);
1386}
1387
1388static void
1389bna_rxf_init(struct bna_rxf *rxf,
1390		struct bna_rx *rx,
1391		struct bna_rx_config *q_config)
1392{
1393	struct list_head *qe;
1394	struct bna_rxp *rxp;
1395
1396	/* rxf_id is initialized during rx_mod init */
1397	rxf->rx = rx;
1398
1399	INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
1400	INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
1401	rxf->ucast_pending_set = 0;
1402	INIT_LIST_HEAD(&rxf->ucast_active_q);
1403	rxf->ucast_active_mac = NULL;
1404
1405	INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
1406	INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
1407	INIT_LIST_HEAD(&rxf->mcast_active_q);
1408
1409	bfa_q_qe_init(&rxf->mbox_qe.qe);
1410
1411	if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
1412		rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
1413
1414	rxf->rxf_oper_state = (q_config->paused) ?
1415		BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
1416
1417	bna_rxf_adv_init(rxf, rx, q_config);
1418
1419	rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
1420					q_config->num_paths);
1421
1422	list_for_each(qe, &rx->rxp_q) {
1423		rxp = (struct bna_rxp *)qe;
1424		if (q_config->rxp_type == BNA_RXP_SINGLE)
1425			rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
1426		else
1427			rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
1428		break;
1429	}
1430
1431	rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
1432	memset(rxf->vlan_filter_table, 0,
1433			(sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
1434
1435	/* Set up VLAN 0 for pure priority tagged packets */
1436	rxf->vlan_filter_table[0] |= 1;
1437
1438	bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
1439}
1440
1441static void
1442bna_rxf_uninit(struct bna_rxf *rxf)
1443{
1444	struct bna *bna = rxf->rx->bna;
1445	struct bna_mac *mac;
1446
1447	bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
1448	rxf->rit_segment = NULL;
1449
1450	rxf->ucast_pending_set = 0;
1451
1452	while (!list_empty(&rxf->ucast_pending_add_q)) {
1453		bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
1454		bfa_q_qe_init(&mac->qe);
1455		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1456	}
1457
1458	if (rxf->ucast_active_mac) {
1459		bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1460		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
1461			rxf->ucast_active_mac);
1462		rxf->ucast_active_mac = NULL;
1463	}
1464
1465	while (!list_empty(&rxf->mcast_pending_add_q)) {
1466		bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
1467		bfa_q_qe_init(&mac->qe);
1468		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1469	}
1470
1471	/* Turn off pending promisc mode */
1472	if (is_promisc_enable(rxf->rxmode_pending,
1473				rxf->rxmode_pending_bitmask)) {
1474		/* system promisc state should be pending */
1475		BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id));
1476		promisc_inactive(rxf->rxmode_pending,
1477				rxf->rxmode_pending_bitmask);
1478		 bna->rxf_promisc_id = BFI_MAX_RXF;
1479	}
1480	/* Promisc mode should not be active */
1481	BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC);
1482
1483	/* Turn off pending all-multi mode */
1484	if (is_allmulti_enable(rxf->rxmode_pending,
1485				rxf->rxmode_pending_bitmask)) {
1486		allmulti_inactive(rxf->rxmode_pending,
1487				rxf->rxmode_pending_bitmask);
1488	}
1489	/* Allmulti mode should not be active */
1490	BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI);
1491
1492	rxf->rx = NULL;
1493}
1494
1495static void
1496bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
1497{
1498	bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
1499	if (rx->rxf.rxf_id < 32)
1500		rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
1501	else
1502		rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
1503				1 << (rx->rxf.rxf_id - 32));
1504}
1505
1506static void
1507bna_rxf_start(struct bna_rxf *rxf)
1508{
1509	rxf->start_cbfn = bna_rx_cb_rxf_started;
1510	rxf->start_cbarg = rxf->rx;
1511	rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
1512	bfa_fsm_send_event(rxf, RXF_E_START);
1513}
1514
1515static void
1516bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
1517{
1518	bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
1519	if (rx->rxf.rxf_id < 32)
1520		rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
1521	else
1522		rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
1523				1 << (rx->rxf.rxf_id - 32);
1524}
1525
1526static void
1527bna_rxf_stop(struct bna_rxf *rxf)
1528{
1529	rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
1530	rxf->stop_cbarg = rxf->rx;
1531	bfa_fsm_send_event(rxf, RXF_E_STOP);
1532}
1533
1534static void
1535bna_rxf_fail(struct bna_rxf *rxf)
1536{
1537	rxf->rxf_flags |= BNA_RXF_FL_FAILED;
1538	bfa_fsm_send_event(rxf, RXF_E_FAIL);
1539}
1540
1541int
1542bna_rxf_state_get(struct bna_rxf *rxf)
1543{
1544	return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
1545}
1546
1547enum bna_cb_status
1548bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
1549		 void (*cbfn)(struct bnad *, struct bna_rx *,
1550			      enum bna_cb_status))
1551{
1552	struct bna_rxf *rxf = &rx->rxf;
1553
1554	if (rxf->ucast_active_mac == NULL) {
1555		rxf->ucast_active_mac =
1556				bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
1557		if (rxf->ucast_active_mac == NULL)
1558			return BNA_CB_UCAST_CAM_FULL;
1559		bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1560	}
1561
1562	memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
1563	rxf->ucast_pending_set++;
1564	rxf->cam_fltr_cbfn = cbfn;
1565	rxf->cam_fltr_cbarg = rx->bna->bnad;
1566
1567	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1568
1569	return BNA_CB_SUCCESS;
1570}
1571
1572enum bna_cb_status
1573bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
1574		 void (*cbfn)(struct bnad *, struct bna_rx *,
1575			      enum bna_cb_status))
1576{
1577	struct bna_rxf *rxf = &rx->rxf;
1578	struct list_head	*qe;
1579	struct bna_mac *mac;
1580
1581	/* Check if already added */
1582	list_for_each(qe, &rxf->mcast_active_q) {
1583		mac = (struct bna_mac *)qe;
1584		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1585			if (cbfn)
1586				(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1587			return BNA_CB_SUCCESS;
1588		}
1589	}
1590
1591	/* Check if pending addition */
1592	list_for_each(qe, &rxf->mcast_pending_add_q) {
1593		mac = (struct bna_mac *)qe;
1594		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1595			if (cbfn)
1596				(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1597			return BNA_CB_SUCCESS;
1598		}
1599	}
1600
1601	mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1602	if (mac == NULL)
1603		return BNA_CB_MCAST_LIST_FULL;
1604	bfa_q_qe_init(&mac->qe);
1605	memcpy(mac->addr, addr, ETH_ALEN);
1606	list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1607
1608	rxf->cam_fltr_cbfn = cbfn;
1609	rxf->cam_fltr_cbarg = rx->bna->bnad;
1610
1611	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1612
1613	return BNA_CB_SUCCESS;
1614}
1615
1616enum bna_cb_status
1617bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
1618		     void (*cbfn)(struct bnad *, struct bna_rx *,
1619				  enum bna_cb_status))
1620{
1621	struct bna_rxf *rxf = &rx->rxf;
1622	struct list_head list_head;
1623	struct list_head *qe;
1624	u8 *mcaddr;
1625	struct bna_mac *mac;
1626	struct bna_mac *mac1;
1627	int skip;
1628	int delete;
1629	int need_hw_config = 0;
1630	int i;
1631
1632	/* Allocate nodes */
1633	INIT_LIST_HEAD(&list_head);
1634	for (i = 0, mcaddr = mclist; i < count; i++) {
1635		mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1636		if (mac == NULL)
1637			goto err_return;
1638		bfa_q_qe_init(&mac->qe);
1639		memcpy(mac->addr, mcaddr, ETH_ALEN);
1640		list_add_tail(&mac->qe, &list_head);
1641
1642		mcaddr += ETH_ALEN;
1643	}
1644
1645	/* Schedule for addition */
1646	while (!list_empty(&list_head)) {
1647		bfa_q_deq(&list_head, &qe);
1648		mac = (struct bna_mac *)qe;
1649		bfa_q_qe_init(&mac->qe);
1650
1651		skip = 0;
1652
1653		/* Skip if already added */
1654		list_for_each(qe, &rxf->mcast_active_q) {
1655			mac1 = (struct bna_mac *)qe;
1656			if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1657				bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1658							mac);
1659				skip = 1;
1660				break;
1661			}
1662		}
1663
1664		if (skip)
1665			continue;
1666
1667		/* Skip if pending addition */
1668		list_for_each(qe, &rxf->mcast_pending_add_q) {
1669			mac1 = (struct bna_mac *)qe;
1670			if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1671				bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1672							mac);
1673				skip = 1;
1674				break;
1675			}
1676		}
1677
1678		if (skip)
1679			continue;
1680
1681		need_hw_config = 1;
1682		list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1683	}
1684
1685	/**
1686	 * Delete the entries that are in the pending_add_q but not
1687	 * in the new list
1688	 */
1689	while (!list_empty(&rxf->mcast_pending_add_q)) {
1690		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1691		mac = (struct bna_mac *)qe;
1692		bfa_q_qe_init(&mac->qe);
1693		for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1694			if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1695				delete = 0;
1696				break;
1697			}
1698			mcaddr += ETH_ALEN;
1699		}
1700		if (delete)
1701			bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1702		else
1703			list_add_tail(&mac->qe, &list_head);
1704	}
1705	while (!list_empty(&list_head)) {
1706		bfa_q_deq(&list_head, &qe);
1707		mac = (struct bna_mac *)qe;
1708		bfa_q_qe_init(&mac->qe);
1709		list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1710	}
1711
1712	/**
1713	 * Schedule entries for deletion that are in the active_q but not
1714	 * in the new list
1715	 */
1716	while (!list_empty(&rxf->mcast_active_q)) {
1717		bfa_q_deq(&rxf->mcast_active_q, &qe);
1718		mac = (struct bna_mac *)qe;
1719		bfa_q_qe_init(&mac->qe);
1720		for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1721			if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1722				delete = 0;
1723				break;
1724			}
1725			mcaddr += ETH_ALEN;
1726		}
1727		if (delete) {
1728			list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
1729			need_hw_config = 1;
1730		} else {
1731			list_add_tail(&mac->qe, &list_head);
1732		}
1733	}
1734	while (!list_empty(&list_head)) {
1735		bfa_q_deq(&list_head, &qe);
1736		mac = (struct bna_mac *)qe;
1737		bfa_q_qe_init(&mac->qe);
1738		list_add_tail(&mac->qe, &rxf->mcast_active_q);
1739	}
1740
1741	if (need_hw_config) {
1742		rxf->cam_fltr_cbfn = cbfn;
1743		rxf->cam_fltr_cbarg = rx->bna->bnad;
1744		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1745	} else if (cbfn)
1746		(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1747
1748	return BNA_CB_SUCCESS;
1749
1750err_return:
1751	while (!list_empty(&list_head)) {
1752		bfa_q_deq(&list_head, &qe);
1753		mac = (struct bna_mac *)qe;
1754		bfa_q_qe_init(&mac->qe);
1755		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1756	}
1757
1758	return BNA_CB_MCAST_LIST_FULL;
1759}
1760
1761void
1762bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1763{
1764	struct bna_rxf *rxf = &rx->rxf;
1765	int index = (vlan_id >> 5);
1766	int bit = (1 << (vlan_id & 0x1F));
1767
1768	rxf->vlan_filter_table[index] |= bit;
1769	if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1770		rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1771		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1772	}
1773}
1774
1775void
1776bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1777{
1778	struct bna_rxf *rxf = &rx->rxf;
1779	int index = (vlan_id >> 5);
1780	int bit = (1 << (vlan_id & 0x1F));
1781
1782	rxf->vlan_filter_table[index] &= ~bit;
1783	if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1784		rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1785		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1786	}
1787}
1788
1789/**
1790 * RX
1791 */
1792#define	RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem)	do {	\
1793	struct bna_doorbell_qset *_qset;				\
1794	unsigned long off;						\
1795	(q)->rcb->producer_index = (q)->rcb->consumer_index = 0;	\
1796	(q)->rcb->q_depth = (qdepth);					\
1797	(q)->rcb->unmap_q = unmapq_mem;					\
1798	(q)->rcb->rxq = (q);						\
1799	(q)->rcb->cq = &(rxp)->cq;					\
1800	(q)->rcb->bnad = (bna)->bnad;					\
1801	_qset = (struct bna_doorbell_qset *)0;			\
1802	off = (unsigned long)&_qset[(q)->rxq_id].rxq[0];		\
1803	(q)->rcb->q_dbell = off +					\
1804		BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva);	\
1805	(q)->rcb->id = _id;						\
1806} while (0)
1807
1808#define	BNA_GET_RXQS(qcfg)	(((qcfg)->rxp_type == BNA_RXP_SINGLE) ?	\
1809	(qcfg)->num_paths : ((qcfg)->num_paths * 2))
1810
1811#define	SIZE_TO_PAGES(size)	(((size) >> PAGE_SHIFT) + ((((size) &\
1812	(PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1813
1814#define	call_rx_stop_callback(rx, status)				\
1815	if ((rx)->stop_cbfn) {						\
1816		(*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status));	\
1817		(rx)->stop_cbfn = NULL;					\
1818		(rx)->stop_cbarg = NULL;				\
1819	}
1820
1821/*
1822 * Since rx_enable is synchronous callback, there is no start_cbfn required.
1823 * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
1824 * for each rxpath.
1825 */
1826
1827#define	call_rx_disable_cbfn(rx, status)				\
1828		if ((rx)->disable_cbfn)	{				\
1829			(*(rx)->disable_cbfn)((rx)->disable_cbarg,	\
1830					status);			\
1831			(rx)->disable_cbfn = NULL;			\
1832			(rx)->disable_cbarg = NULL;			\
1833		}							\
1834
1835#define	rxqs_reqd(type, num_rxqs)					\
1836	(((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
1837
1838#define rx_ib_fail(rx)						\
1839do {								\
1840	struct bna_rxp *rxp;					\
1841	struct list_head *qe;						\
1842	list_for_each(qe, &(rx)->rxp_q) {				\
1843		rxp = (struct bna_rxp *)qe;			\
1844		bna_ib_fail(rxp->cq.ib);			\
1845	}							\
1846} while (0)
1847
1848static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
1849static void __bna_rxq_start(struct bna_rxq *rxq);
1850static void __bna_cq_start(struct bna_cq *cq);
1851static void bna_rit_create(struct bna_rx *rx);
1852static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
1853static void bna_rx_cb_rxq_stopped_all(void *arg);
1854
1855bfa_fsm_state_decl(bna_rx, stopped,
1856	struct bna_rx, enum bna_rx_event);
1857bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1858	struct bna_rx, enum bna_rx_event);
1859bfa_fsm_state_decl(bna_rx, started,
1860	struct bna_rx, enum bna_rx_event);
1861bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1862	struct bna_rx, enum bna_rx_event);
1863bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
1864	struct bna_rx, enum bna_rx_event);
1865
1866static const struct bfa_sm_table rx_sm_table[] = {
1867	{BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
1868	{BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
1869	{BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
1870	{BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
1871	{BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
1872};
1873
1874static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1875{
1876	struct bna_rxp *rxp;
1877	struct list_head *qe_rxp;
1878
1879	list_for_each(qe_rxp, &rx->rxp_q) {
1880		rxp = (struct bna_rxp *)qe_rxp;
1881		rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
1882	}
1883
1884	call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1885}
1886
1887static void bna_rx_sm_stopped(struct bna_rx *rx,
1888				enum bna_rx_event event)
1889{
1890	switch (event) {
1891	case RX_E_START:
1892		bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1893		break;
1894	case RX_E_STOP:
1895		call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1896		break;
1897	case RX_E_FAIL:
1898		/* no-op */
1899		break;
1900	default:
1901		bfa_sm_fault(rx->bna, event);
1902		break;
1903	}
1904
1905}
1906
1907static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1908{
1909	struct bna_rxp *rxp;
1910	struct list_head *qe_rxp;
1911	struct bna_rxq *q0 = NULL, *q1 = NULL;
1912
1913	/* Setup the RIT */
1914	bna_rit_create(rx);
1915
1916	list_for_each(qe_rxp, &rx->rxp_q) {
1917		rxp = (struct bna_rxp *)qe_rxp;
1918		bna_ib_start(rxp->cq.ib);
1919		GET_RXQS(rxp, q0, q1);
1920		q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
1921		__bna_rxq_start(q0);
1922		rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
1923		if (q1)  {
1924			__bna_rxq_start(q1);
1925			rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
1926		}
1927		__bna_cq_start(&rxp->cq);
1928	}
1929
1930	bna_rxf_start(&rx->rxf);
1931}
1932
1933static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1934				enum bna_rx_event event)
1935{
1936	switch (event) {
1937	case RX_E_STOP:
1938		bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1939		break;
1940	case RX_E_FAIL:
1941		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1942		rx_ib_fail(rx);
1943		bna_rxf_fail(&rx->rxf);
1944		break;
1945	case RX_E_RXF_STARTED:
1946		bfa_fsm_set_state(rx, bna_rx_sm_started);
1947		break;
1948	default:
1949		bfa_sm_fault(rx->bna, event);
1950		break;
1951	}
1952}
1953
1954void
1955bna_rx_sm_started_entry(struct bna_rx *rx)
1956{
1957	struct bna_rxp *rxp;
1958	struct list_head *qe_rxp;
1959
1960	/* Start IB */
1961	list_for_each(qe_rxp, &rx->rxp_q) {
1962		rxp = (struct bna_rxp *)qe_rxp;
1963		bna_ib_ack(&rxp->cq.ib->door_bell, 0);
1964	}
1965
1966	bna_llport_rx_started(&rx->bna->port.llport);
1967}
1968
1969void
1970bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1971{
1972	switch (event) {
1973	case RX_E_FAIL:
1974		bna_llport_rx_stopped(&rx->bna->port.llport);
1975		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1976		rx_ib_fail(rx);
1977		bna_rxf_fail(&rx->rxf);
1978		break;
1979	case RX_E_STOP:
1980		bna_llport_rx_stopped(&rx->bna->port.llport);
1981		bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1982		break;
1983	default:
1984		bfa_sm_fault(rx->bna, event);
1985		break;
1986	}
1987}
1988
1989void
1990bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1991{
1992	bna_rxf_stop(&rx->rxf);
1993}
1994
1995void
1996bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1997{
1998	switch (event) {
1999	case RX_E_RXF_STOPPED:
2000		bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
2001		break;
2002	case RX_E_RXF_STARTED:
2003		/**
2004		 * RxF was in the process of starting up when
2005		 * RXF_E_STOP was issued. Ignore this event
2006		 */
2007		break;
2008	case RX_E_FAIL:
2009		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2010		rx_ib_fail(rx);
2011		bna_rxf_fail(&rx->rxf);
2012		break;
2013	default:
2014		bfa_sm_fault(rx->bna, event);
2015		break;
2016	}
2017
2018}
2019
2020void
2021bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
2022{
2023	struct bna_rxp *rxp = NULL;
2024	struct bna_rxq *q0 = NULL;
2025	struct bna_rxq *q1 = NULL;
2026	struct list_head	*qe;
2027	u32 rxq_mask[2] = {0, 0};
2028
2029	/* Only one call to multi-rxq-stop for all RXPs in this RX */
2030	bfa_wc_up(&rx->rxq_stop_wc);
2031	list_for_each(qe, &rx->rxp_q) {
2032		rxp = (struct bna_rxp *)qe;
2033		GET_RXQS(rxp, q0, q1);
2034		if (q0->rxq_id < 32)
2035			rxq_mask[0] |= ((u32)1 << q0->rxq_id);
2036		else
2037			rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
2038		if (q1) {
2039			if (q1->rxq_id < 32)
2040				rxq_mask[0] |= ((u32)1 << q1->rxq_id);
2041			else
2042				rxq_mask[1] |= ((u32)
2043						1 << (q1->rxq_id - 32));
2044		}
2045	}
2046
2047	__bna_multi_rxq_stop(rxp, rxq_mask);
2048}
2049
2050void
2051bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
2052{
2053	struct bna_rxp *rxp = NULL;
2054	struct list_head	*q

Large files files are truncated, but you can click here to view the full file