/drivers/net/bna/bna_txrx.c
https://bitbucket.org/slukk/jb-tsm-kernel-4.2 · C · 4185 lines · 3204 code · 696 blank · 285 comment · 312 complexity · c8af6c0c5ef9c147e562447b9d878faa MD5 · raw file
Large files are truncated click here to view the full file
- /*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
- /*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
- #include "bna.h"
- #include "bfa_sm.h"
- #include "bfi.h"
- /**
- * IB
- */
- #define bna_ib_find_free_ibidx(_mask, _pos)\
- do {\
- (_pos) = 0;\
- while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
- ((1 << (_pos)) & (_mask)))\
- (_pos)++;\
- } while (0)
- #define bna_ib_count_ibidx(_mask, _count)\
- do {\
- int pos = 0;\
- (_count) = 0;\
- while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
- if ((1 << pos) & (_mask))\
- (_count) = pos + 1;\
- pos++;\
- } \
- } while (0)
- #define bna_ib_select_segpool(_count, _q_idx)\
- do {\
- int i;\
- (_q_idx) = -1;\
- for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
- if ((_count <= ibidx_pool[i].pool_entry_size)) {\
- (_q_idx) = i;\
- break;\
- } \
- } \
- } while (0)
- struct bna_ibidx_pool {
- int pool_size;
- int pool_entry_size;
- };
- init_ibidx_pool(ibidx_pool);
- static struct bna_intr *
- bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
- int vector)
- {
- struct bna_intr *intr;
- struct list_head *qe;
- list_for_each(qe, &ib_mod->intr_active_q) {
- intr = (struct bna_intr *)qe;
- if ((intr->intr_type == intr_type) &&
- (intr->vector == vector)) {
- intr->ref_count++;
- return intr;
- }
- }
- if (list_empty(&ib_mod->intr_free_q))
- return NULL;
- bfa_q_deq(&ib_mod->intr_free_q, &intr);
- bfa_q_qe_init(&intr->qe);
- intr->ref_count = 1;
- intr->intr_type = intr_type;
- intr->vector = vector;
- list_add_tail(&intr->qe, &ib_mod->intr_active_q);
- return intr;
- }
- static void
- bna_intr_put(struct bna_ib_mod *ib_mod,
- struct bna_intr *intr)
- {
- intr->ref_count--;
- if (intr->ref_count == 0) {
- intr->ib = NULL;
- list_del(&intr->qe);
- bfa_q_qe_init(&intr->qe);
- list_add_tail(&intr->qe, &ib_mod->intr_free_q);
- }
- }
- void
- bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
- struct bna_res_info *res_info)
- {
- int i;
- int j;
- int count;
- u8 offset;
- struct bna_doorbell_qset *qset;
- unsigned long off;
- ib_mod->bna = bna;
- ib_mod->ib = (struct bna_ib *)
- res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
- ib_mod->intr = (struct bna_intr *)
- res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
- ib_mod->idx_seg = (struct bna_ibidx_seg *)
- res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
- INIT_LIST_HEAD(&ib_mod->ib_free_q);
- INIT_LIST_HEAD(&ib_mod->intr_free_q);
- INIT_LIST_HEAD(&ib_mod->intr_active_q);
- for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
- INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
- for (i = 0; i < BFI_MAX_IB; i++) {
- ib_mod->ib[i].ib_id = i;
- ib_mod->ib[i].ib_seg_host_addr_kva =
- res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
- ib_mod->ib[i].ib_seg_host_addr.lsb =
- res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
- ib_mod->ib[i].ib_seg_host_addr.msb =
- res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
- qset = (struct bna_doorbell_qset *)0;
- off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
- * (0x20 >> 2)]);
- ib_mod->ib[i].door_bell.doorbell_addr = off +
- BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
- bfa_q_qe_init(&ib_mod->ib[i].qe);
- list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
- bfa_q_qe_init(&ib_mod->intr[i].qe);
- list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
- }
- count = 0;
- offset = 0;
- for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
- for (j = 0; j < ibidx_pool[i].pool_size; j++) {
- bfa_q_qe_init(&ib_mod->idx_seg[count]);
- ib_mod->idx_seg[count].ib_seg_size =
- ibidx_pool[i].pool_entry_size;
- ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
- list_add_tail(&ib_mod->idx_seg[count].qe,
- &ib_mod->ibidx_seg_pool[i]);
- count++;
- offset += ibidx_pool[i].pool_entry_size;
- }
- }
- }
- void
- bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
- {
- int i;
- int j;
- struct list_head *qe;
- i = 0;
- list_for_each(qe, &ib_mod->ib_free_q)
- i++;
- i = 0;
- list_for_each(qe, &ib_mod->intr_free_q)
- i++;
- for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
- j = 0;
- list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
- j++;
- }
- ib_mod->bna = NULL;
- }
- static struct bna_ib *
- bna_ib_get(struct bna_ib_mod *ib_mod,
- enum bna_intr_type intr_type,
- int vector)
- {
- struct bna_ib *ib;
- struct bna_intr *intr;
- if (intr_type == BNA_INTR_T_INTX)
- vector = (1 << vector);
- intr = bna_intr_get(ib_mod, intr_type, vector);
- if (intr == NULL)
- return NULL;
- if (intr->ib) {
- if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
- bna_intr_put(ib_mod, intr);
- return NULL;
- }
- intr->ib->ref_count++;
- return intr->ib;
- }
- if (list_empty(&ib_mod->ib_free_q)) {
- bna_intr_put(ib_mod, intr);
- return NULL;
- }
- bfa_q_deq(&ib_mod->ib_free_q, &ib);
- bfa_q_qe_init(&ib->qe);
- ib->ref_count = 1;
- ib->start_count = 0;
- ib->idx_mask = 0;
- ib->intr = intr;
- ib->idx_seg = NULL;
- intr->ib = ib;
- ib->bna = ib_mod->bna;
- return ib;
- }
- static void
- bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
- {
- bna_intr_put(ib_mod, ib->intr);
- ib->ref_count--;
- if (ib->ref_count == 0) {
- ib->intr = NULL;
- ib->bna = NULL;
- list_add_tail(&ib->qe, &ib_mod->ib_free_q);
- }
- }
- /* Returns index offset - starting from 0 */
- static int
- bna_ib_reserve_idx(struct bna_ib *ib)
- {
- struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
- struct bna_ibidx_seg *idx_seg;
- int idx;
- int num_idx;
- int q_idx;
- /* Find the first free index position */
- bna_ib_find_free_ibidx(ib->idx_mask, idx);
- if (idx == BFI_IBIDX_MAX_SEGSIZE)
- return -1;
- /*
- * Calculate the total number of indexes held by this IB,
- * including the index newly reserved above.
- */
- bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
- /* See if there is a free space in the index segment held by this IB */
- if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
- ib->idx_mask |= (1 << idx);
- return idx;
- }
- if (ib->start_count)
- return -1;
- /* Allocate a new segment */
- bna_ib_select_segpool(num_idx, q_idx);
- while (1) {
- if (q_idx == BFI_IBIDX_TOTAL_POOLS)
- return -1;
- if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
- break;
- q_idx++;
- }
- bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
- bfa_q_qe_init(&idx_seg->qe);
- /* Free the old segment */
- if (ib->idx_seg) {
- bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
- list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
- }
- ib->idx_seg = idx_seg;
- ib->idx_mask |= (1 << idx);
- return idx;
- }
- static void
- bna_ib_release_idx(struct bna_ib *ib, int idx)
- {
- struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
- struct bna_ibidx_seg *idx_seg;
- int num_idx;
- int cur_q_idx;
- int new_q_idx;
- ib->idx_mask &= ~(1 << idx);
- if (ib->start_count)
- return;
- bna_ib_count_ibidx(ib->idx_mask, num_idx);
- /*
- * Free the segment, if there are no more indexes in the segment
- * held by this IB
- */
- if (!num_idx) {
- bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
- list_add_tail(&ib->idx_seg->qe,
- &ib_mod->ibidx_seg_pool[cur_q_idx]);
- ib->idx_seg = NULL;
- return;
- }
- /* See if we can move to a smaller segment */
- bna_ib_select_segpool(num_idx, new_q_idx);
- bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
- while (new_q_idx < cur_q_idx) {
- if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
- break;
- new_q_idx++;
- }
- if (new_q_idx < cur_q_idx) {
- /* Select the new smaller segment */
- bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
- bfa_q_qe_init(&idx_seg->qe);
- /* Free the old segment */
- list_add_tail(&ib->idx_seg->qe,
- &ib_mod->ibidx_seg_pool[cur_q_idx]);
- ib->idx_seg = idx_seg;
- }
- }
- static int
- bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
- {
- if (ib->start_count)
- return -1;
- ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
- ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
- ib->ib_config.interpkt_count = ib_config->interpkt_count;
- ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
- ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
- if (ib->intr->intr_type == BNA_INTR_T_MSIX)
- ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
- return 0;
- }
- static void
- bna_ib_start(struct bna_ib *ib)
- {
- struct bna_ib_blk_mem ib_cfg;
- struct bna_ib_blk_mem *ib_mem;
- u32 pg_num;
- u32 intx_mask;
- int i;
- void __iomem *base_addr;
- unsigned long off;
- ib->start_count++;
- if (ib->start_count > 1)
- return;
- ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
- ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
- ib_cfg.clsc_n_ctrl_n_msix = (((u32)
- ib->ib_config.coalescing_timeo << 16) |
- ((u32)ib->ib_config.ctrl_flags << 8) |
- (ib->intr->vector));
- ib_cfg.ipkt_n_ent_n_idxof =
- ((u32)
- (ib->ib_config.interpkt_timeo & 0xf) << 16) |
- ((u32)ib->idx_seg->ib_seg_size << 8) |
- (ib->idx_seg->ib_idx_tbl_offset);
- ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
- ib->ib_config.interpkt_count << 24);
- pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
- HQM_IB_RAM_BASE_OFFSET);
- writel(pg_num, ib->bna->regs.page_addr);
- base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
- HQM_IB_RAM_BASE_OFFSET);
- ib_mem = (struct bna_ib_blk_mem *)0;
- off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
- writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
- off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
- writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
- off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
- writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
- off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
- writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
- off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
- writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
- ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
- (u32)ib->ib_config.coalescing_timeo, 0);
- pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
- HQM_INDX_TBL_RAM_BASE_OFFSET);
- writel(pg_num, ib->bna->regs.page_addr);
- base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
- HQM_INDX_TBL_RAM_BASE_OFFSET);
- for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
- off = (unsigned long)
- ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
- writel(0, base_addr + off);
- }
- if (ib->intr->intr_type == BNA_INTR_T_INTX) {
- bna_intx_disable(ib->bna, intx_mask);
- intx_mask &= ~(ib->intr->vector);
- bna_intx_enable(ib->bna, intx_mask);
- }
- }
- static void
- bna_ib_stop(struct bna_ib *ib)
- {
- u32 intx_mask;
- ib->start_count--;
- if (ib->start_count == 0) {
- writel(BNA_DOORBELL_IB_INT_DISABLE,
- ib->door_bell.doorbell_addr);
- if (ib->intr->intr_type == BNA_INTR_T_INTX) {
- bna_intx_disable(ib->bna, intx_mask);
- intx_mask |= (ib->intr->vector);
- bna_intx_enable(ib->bna, intx_mask);
- }
- }
- }
- static void
- bna_ib_fail(struct bna_ib *ib)
- {
- ib->start_count = 0;
- }
- /**
- * RXF
- */
- static void rxf_enable(struct bna_rxf *rxf);
- static void rxf_disable(struct bna_rxf *rxf);
- static void __rxf_config_set(struct bna_rxf *rxf);
- static void __rxf_rit_set(struct bna_rxf *rxf);
- static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
- static int rxf_process_packet_filter(struct bna_rxf *rxf);
- static int rxf_clear_packet_filter(struct bna_rxf *rxf);
- static void rxf_reset_packet_filter(struct bna_rxf *rxf);
- static void rxf_cb_enabled(void *arg, int status);
- static void rxf_cb_disabled(void *arg, int status);
- static void bna_rxf_cb_stats_cleared(void *arg, int status);
- static void __rxf_enable(struct bna_rxf *rxf);
- static void __rxf_disable(struct bna_rxf *rxf);
- bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
- enum bna_rxf_event);
- bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
- enum bna_rxf_event);
- bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
- enum bna_rxf_event);
- bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
- enum bna_rxf_event);
- bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
- enum bna_rxf_event);
- bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
- enum bna_rxf_event);
- bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
- enum bna_rxf_event);
- bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
- enum bna_rxf_event);
- bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
- enum bna_rxf_event);
- static struct bfa_sm_table rxf_sm_table[] = {
- {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
- {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
- {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
- {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
- {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
- {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
- {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
- {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
- {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
- };
- static void
- bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
- {
- call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
- }
- static void
- bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
- {
- switch (event) {
- case RXF_E_START:
- bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
- break;
- case RXF_E_STOP:
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
- case RXF_E_FAIL:
- /* No-op */
- break;
- case RXF_E_CAM_FLTR_MOD:
- call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
- break;
- case RXF_E_STARTED:
- case RXF_E_STOPPED:
- case RXF_E_CAM_FLTR_RESP:
- /**
- * These events are received due to flushing of mbox
- * when device fails
- */
- /* No-op */
- break;
- case RXF_E_PAUSE:
- rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
- call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
- break;
- case RXF_E_RESUME:
- rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
- call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
- break;
- default:
- bfa_sm_fault(rxf->rx->bna, event);
- }
- }
- static void
- bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
- {
- __rxf_config_set(rxf);
- __rxf_rit_set(rxf);
- rxf_enable(rxf);
- }
- static void
- bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
- {
- switch (event) {
- case RXF_E_STOP:
- /**
- * STOP is originated from bnad. When this happens,
- * it can not be waiting for filter update
- */
- call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
- break;
- case RXF_E_FAIL:
- call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
- call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
- case RXF_E_CAM_FLTR_MOD:
- /* No-op */
- break;
- case RXF_E_STARTED:
- /**
- * Force rxf_process_filter() to go through initial
- * config
- */
- if ((rxf->ucast_active_mac != NULL) &&
- (rxf->ucast_pending_set == 0))
- rxf->ucast_pending_set = 1;
- if (rxf->rss_status == BNA_STATUS_T_ENABLED)
- rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
- rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
- bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
- break;
- case RXF_E_PAUSE:
- case RXF_E_RESUME:
- rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
- break;
- default:
- bfa_sm_fault(rxf->rx->bna, event);
- }
- }
- static void
- bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
- {
- if (!rxf_process_packet_filter(rxf)) {
- /* No more pending CAM entries to update */
- bfa_fsm_set_state(rxf, bna_rxf_sm_started);
- }
- }
- static void
- bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
- {
- switch (event) {
- case RXF_E_STOP:
- /**
- * STOP is originated from bnad. When this happens,
- * it can not be waiting for filter update
- */
- call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
- bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
- break;
- case RXF_E_FAIL:
- rxf_reset_packet_filter(rxf);
- call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
- call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
- case RXF_E_CAM_FLTR_MOD:
- /* No-op */
- break;
- case RXF_E_CAM_FLTR_RESP:
- if (!rxf_process_packet_filter(rxf)) {
- /* No more pending CAM entries to update */
- call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
- bfa_fsm_set_state(rxf, bna_rxf_sm_started);
- }
- break;
- case RXF_E_PAUSE:
- case RXF_E_RESUME:
- rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
- break;
- default:
- bfa_sm_fault(rxf->rx->bna, event);
- }
- }
- static void
- bna_rxf_sm_started_entry(struct bna_rxf *rxf)
- {
- call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
- if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
- if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
- bfa_fsm_send_event(rxf, RXF_E_PAUSE);
- else
- bfa_fsm_send_event(rxf, RXF_E_RESUME);
- }
- }
- static void
- bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
- {
- switch (event) {
- case RXF_E_STOP:
- bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
- /* Hack to get FSM start clearing CAM entries */
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
- break;
- case RXF_E_FAIL:
- rxf_reset_packet_filter(rxf);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
- case RXF_E_CAM_FLTR_MOD:
- bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
- break;
- case RXF_E_PAUSE:
- bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
- break;
- case RXF_E_RESUME:
- bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
- break;
- default:
- bfa_sm_fault(rxf->rx->bna, event);
- }
- }
- static void
- bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
- {
- /**
- * Note: Do not add rxf_clear_packet_filter here.
- * It will overstep mbox when this transition happens:
- * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
- */
- }
- static void
- bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
- {
- switch (event) {
- case RXF_E_FAIL:
- /**
- * FSM was in the process of stopping, initiated by
- * bnad. When this happens, no one can be waiting for
- * start or filter update
- */
- rxf_reset_packet_filter(rxf);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
- case RXF_E_CAM_FLTR_RESP:
- if (!rxf_clear_packet_filter(rxf)) {
- /* No more pending CAM entries to clear */
- bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
- rxf_disable(rxf);
- }
- break;
- default:
- bfa_sm_fault(rxf->rx->bna, event);
- }
- }
- static void
- bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
- {
- /**
- * NOTE: Do not add rxf_disable here.
- * It will overstep mbox when this transition happens:
- * start_wait -> stop_wait on RXF_E_STOP event
- */
- }
- static void
- bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
- {
- switch (event) {
- case RXF_E_FAIL:
- /**
- * FSM was in the process of stopping, initiated by
- * bnad. When this happens, no one can be waiting for
- * start or filter update
- */
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
- case RXF_E_STARTED:
- /**
- * This event is received due to abrupt transition from
- * bna_rxf_sm_start_wait state on receiving
- * RXF_E_STOP event
- */
- rxf_disable(rxf);
- break;
- case RXF_E_STOPPED:
- /**
- * FSM was in the process of stopping, initiated by
- * bnad. When this happens, no one can be waiting for
- * start or filter update
- */
- bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
- break;
- case RXF_E_PAUSE:
- rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
- break;
- case RXF_E_RESUME:
- rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
- break;
- default:
- bfa_sm_fault(rxf->rx->bna, event);
- }
- }
- static void
- bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
- {
- rxf->rxf_flags &=
- ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
- __rxf_disable(rxf);
- }
- static void
- bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
- {
- switch (event) {
- case RXF_E_FAIL:
- /**
- * FSM was in the process of disabling rxf, initiated by
- * bnad.
- */
- call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
- case RXF_E_STOPPED:
- rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
- call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
- bfa_fsm_set_state(rxf, bna_rxf_sm_started);
- break;
- /*
- * Since PAUSE/RESUME can only be sent by bnad, we don't expect
- * any other event during these states
- */
- default:
- bfa_sm_fault(rxf->rx->bna, event);
- }
- }
- static void
- bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
- {
- rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
- rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
- __rxf_enable(rxf);
- }
- static void
- bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
- {
- switch (event) {
- case RXF_E_FAIL:
- /**
- * FSM was in the process of disabling rxf, initiated by
- * bnad.
- */
- call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
- case RXF_E_STARTED:
- rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
- call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
- bfa_fsm_set_state(rxf, bna_rxf_sm_started);
- break;
- /*
- * Since PAUSE/RESUME can only be sent by bnad, we don't expect
- * any other event during these states
- */
- default:
- bfa_sm_fault(rxf->rx->bna, event);
- }
- }
- static void
- bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
- {
- __bna_rxf_stat_clr(rxf);
- }
- static void
- bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
- {
- switch (event) {
- case RXF_E_FAIL:
- case RXF_E_STAT_CLEARED:
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
- default:
- bfa_sm_fault(rxf->rx->bna, event);
- }
- }
- static void
- __rxf_enable(struct bna_rxf *rxf)
- {
- struct bfi_ll_rxf_multi_req ll_req;
- u32 bm[2] = {0, 0};
- if (rxf->rxf_id < 32)
- bm[0] = 1 << rxf->rxf_id;
- else
- bm[1] = 1 << (rxf->rxf_id - 32);
- bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
- ll_req.rxf_id_mask[0] = htonl(bm[0]);
- ll_req.rxf_id_mask[1] = htonl(bm[1]);
- ll_req.enable = 1;
- bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
- rxf_cb_enabled, rxf);
- bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
- }
- static void
- __rxf_disable(struct bna_rxf *rxf)
- {
- struct bfi_ll_rxf_multi_req ll_req;
- u32 bm[2] = {0, 0};
- if (rxf->rxf_id < 32)
- bm[0] = 1 << rxf->rxf_id;
- else
- bm[1] = 1 << (rxf->rxf_id - 32);
- bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
- ll_req.rxf_id_mask[0] = htonl(bm[0]);
- ll_req.rxf_id_mask[1] = htonl(bm[1]);
- ll_req.enable = 0;
- bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
- rxf_cb_disabled, rxf);
- bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
- }
- static void
- __rxf_config_set(struct bna_rxf *rxf)
- {
- u32 i;
- struct bna_rss_mem *rss_mem;
- struct bna_rx_fndb_ram *rx_fndb_ram;
- struct bna *bna = rxf->rx->bna;
- void __iomem *base_addr;
- unsigned long off;
- base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
- RSS_TABLE_BASE_OFFSET);
- rss_mem = (struct bna_rss_mem *)0;
- /* Configure RSS if required */
- if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
- /* configure RSS Table */
- writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
- bna->port_num, RSS_TABLE_BASE_OFFSET),
- bna->regs.page_addr);
- /* temporarily disable RSS, while hash value is written */
- off = (unsigned long)&rss_mem[0].type_n_hash;
- writel(0, base_addr + off);
- for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
- off = (unsigned long)
- &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
- writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
- base_addr + off);
- }
- off = (unsigned long)&rss_mem[0].type_n_hash;
- writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
- base_addr + off);
- }
- /* Configure RxF */
- writel(BNA_GET_PAGE_NUM(
- LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
- RX_FNDB_RAM_BASE_OFFSET),
- bna->regs.page_addr);
- base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
- RX_FNDB_RAM_BASE_OFFSET);
- rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
- /* We always use RSS table 0 */
- off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
- writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
- base_addr + off);
- /* small large buffer enable/disable */
- off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
- writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
- base_addr + off);
- /* RIT offset, HDS forced offset, multicast RxQ Id */
- off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
- writel((rxf->rit_segment->rit_offset << 16) |
- (rxf->forced_offset << 8) |
- (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
- base_addr + off);
- /*
- * default vlan tag, default function enable, strip vlan bytes,
- * HDS type, header size
- */
- off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
- writel(((u32)rxf->default_vlan_tag << 16) |
- (rxf->ctrl_flags &
- (BNA_RXF_CF_DEFAULT_VLAN |
- BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
- BNA_RXF_CF_VLAN_STRIP)) |
- (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
- rxf->hds_cfg.header_size,
- base_addr + off);
- }
- void
- __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
- {
- struct bna *bna = rxf->rx->bna;
- int i;
- writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
- (bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
- bna->regs.page_addr);
- if (status == BNA_STATUS_T_ENABLED) {
- /* enable VLAN filtering on this function */
- for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
- writel(rxf->vlan_filter_table[i],
- BNA_GET_VLAN_MEM_ENTRY_ADDR
- (bna->pcidev.pci_bar_kva, rxf->rxf_id,
- i * 32));
- }
- } else {
- /* disable VLAN filtering on this function */
- for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
- writel(0xffffffff,
- BNA_GET_VLAN_MEM_ENTRY_ADDR
- (bna->pcidev.pci_bar_kva, rxf->rxf_id,
- i * 32));
- }
- }
- }
- static void
- __rxf_rit_set(struct bna_rxf *rxf)
- {
- struct bna *bna = rxf->rx->bna;
- struct bna_rit_mem *rit_mem;
- int i;
- void __iomem *base_addr;
- unsigned long off;
- base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
- FUNCTION_TO_RXQ_TRANSLATE);
- rit_mem = (struct bna_rit_mem *)0;
- writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
- FUNCTION_TO_RXQ_TRANSLATE),
- bna->regs.page_addr);
- for (i = 0; i < rxf->rit_segment->rit_size; i++) {
- off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
- writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
- rxf->rit_segment->rit[i].small_rxq_id,
- base_addr + off);
- }
- }
- static void
- __bna_rxf_stat_clr(struct bna_rxf *rxf)
- {
- struct bfi_ll_stats_req ll_req;
- u32 bm[2] = {0, 0};
- if (rxf->rxf_id < 32)
- bm[0] = 1 << rxf->rxf_id;
- else
- bm[1] = 1 << (rxf->rxf_id - 32);
- bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
- ll_req.stats_mask = 0;
- ll_req.txf_id_mask[0] = 0;
- ll_req.txf_id_mask[1] = 0;
- ll_req.rxf_id_mask[0] = htonl(bm[0]);
- ll_req.rxf_id_mask[1] = htonl(bm[1]);
- bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
- bna_rxf_cb_stats_cleared, rxf);
- bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
- }
- static void
- rxf_enable(struct bna_rxf *rxf)
- {
- if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
- bfa_fsm_send_event(rxf, RXF_E_STARTED);
- else {
- rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
- __rxf_enable(rxf);
- }
- }
- static void
- rxf_cb_enabled(void *arg, int status)
- {
- struct bna_rxf *rxf = (struct bna_rxf *)arg;
- bfa_q_qe_init(&rxf->mbox_qe.qe);
- bfa_fsm_send_event(rxf, RXF_E_STARTED);
- }
- static void
- rxf_disable(struct bna_rxf *rxf)
- {
- if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
- bfa_fsm_send_event(rxf, RXF_E_STOPPED);
- else
- rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
- __rxf_disable(rxf);
- }
- static void
- rxf_cb_disabled(void *arg, int status)
- {
- struct bna_rxf *rxf = (struct bna_rxf *)arg;
- bfa_q_qe_init(&rxf->mbox_qe.qe);
- bfa_fsm_send_event(rxf, RXF_E_STOPPED);
- }
- void
- rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
- {
- struct bna_rxf *rxf = (struct bna_rxf *)arg;
- bfa_q_qe_init(&rxf->mbox_qe.qe);
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
- }
- static void
- bna_rxf_cb_stats_cleared(void *arg, int status)
- {
- struct bna_rxf *rxf = (struct bna_rxf *)arg;
- bfa_q_qe_init(&rxf->mbox_qe.qe);
- bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
- }
- void
- rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
- const struct bna_mac *mac_addr)
- {
- struct bfi_ll_mac_addr_req req;
- bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
- req.rxf_id = rxf->rxf_id;
- memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
- bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
- rxf_cb_cam_fltr_mbox_cmd, rxf);
- bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
- }
- static int
- rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
- {
- struct bna_mac *mac = NULL;
- struct list_head *qe;
- /* Add multicast entries */
- if (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
- list_add_tail(&mac->qe, &rxf->mcast_active_q);
- return 1;
- }
- /* Delete multicast entries previousely added */
- if (!list_empty(&rxf->mcast_pending_del_q)) {
- bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
- return 1;
- }
- return 0;
- }
- static int
- rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
- {
- /* Apply the VLAN filter */
- if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
- rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
- if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))
- __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
- }
- /* Apply RSS configuration */
- if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
- rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
- if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
- /* RSS is being disabled */
- rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
- __rxf_rit_set(rxf);
- __rxf_config_set(rxf);
- } else {
- /* RSS is being enabled or reconfigured */
- rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
- __rxf_rit_set(rxf);
- __rxf_config_set(rxf);
- }
- }
- return 0;
- }
- /**
- * Processes pending ucast, mcast entry addition/deletion and issues mailbox
- * command. Also processes pending filter configuration - promiscuous mode,
- * default mode, allmutli mode and issues mailbox command or directly applies
- * to h/w
- */
- static int
- rxf_process_packet_filter(struct bna_rxf *rxf)
- {
- /* Set the default MAC first */
- if (rxf->ucast_pending_set > 0) {
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
- rxf->ucast_active_mac);
- rxf->ucast_pending_set--;
- return 1;
- }
- if (rxf_process_packet_filter_ucast(rxf))
- return 1;
- if (rxf_process_packet_filter_mcast(rxf))
- return 1;
- if (rxf_process_packet_filter_promisc(rxf))
- return 1;
- if (rxf_process_packet_filter_allmulti(rxf))
- return 1;
- if (rxf_process_packet_filter_vlan(rxf))
- return 1;
- return 0;
- }
- static int
- rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
- {
- struct bna_mac *mac = NULL;
- struct list_head *qe;
- /* 3. delete pending mcast entries */
- if (!list_empty(&rxf->mcast_pending_del_q)) {
- bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
- return 1;
- }
- /* 4. clear active mcast entries; move them to pending_add_q */
- if (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
- list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
- return 1;
- }
- return 0;
- }
- /**
- * In the rxf stop path, processes pending ucast/mcast delete queue and issues
- * the mailbox command. Moves the active ucast/mcast entries to pending add q,
- * so that they are added to CAM again in the rxf start path. Moves the current
- * filter settings - promiscuous, default, allmutli - to pending filter
- * configuration
- */
- static int
- rxf_clear_packet_filter(struct bna_rxf *rxf)
- {
- if (rxf_clear_packet_filter_ucast(rxf))
- return 1;
- if (rxf_clear_packet_filter_mcast(rxf))
- return 1;
- /* 5. clear active default MAC in the CAM */
- if (rxf->ucast_pending_set > 0)
- rxf->ucast_pending_set = 0;
- if (rxf_clear_packet_filter_promisc(rxf))
- return 1;
- if (rxf_clear_packet_filter_allmulti(rxf))
- return 1;
- return 0;
- }
- static void
- rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
- {
- struct list_head *qe;
- struct bna_mac *mac;
- /* 3. Move active mcast entries to pending_add_q */
- while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- bfa_q_qe_init(qe);
- list_add_tail(qe, &rxf->mcast_pending_add_q);
- }
- /* 4. Throw away delete pending mcast entries */
- while (!list_empty(&rxf->mcast_pending_del_q)) {
- bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
- }
- }
- /**
- * In the rxf fail path, throws away the ucast/mcast entries pending for
- * deletion, moves all active ucast/mcast entries to pending queue so that
- * they are added back to CAM in the rxf start path. Also moves the current
- * filter configuration to pending filter configuration.
- */
- static void
- rxf_reset_packet_filter(struct bna_rxf *rxf)
- {
- rxf_reset_packet_filter_ucast(rxf);
- rxf_reset_packet_filter_mcast(rxf);
- /* 5. Turn off ucast set flag */
- rxf->ucast_pending_set = 0;
- rxf_reset_packet_filter_promisc(rxf);
- rxf_reset_packet_filter_allmulti(rxf);
- }
- static void
- bna_rxf_init(struct bna_rxf *rxf,
- struct bna_rx *rx,
- struct bna_rx_config *q_config)
- {
- struct list_head *qe;
- struct bna_rxp *rxp;
- /* rxf_id is initialized during rx_mod init */
- rxf->rx = rx;
- INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
- INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
- rxf->ucast_pending_set = 0;
- INIT_LIST_HEAD(&rxf->ucast_active_q);
- rxf->ucast_active_mac = NULL;
- INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
- INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
- INIT_LIST_HEAD(&rxf->mcast_active_q);
- bfa_q_qe_init(&rxf->mbox_qe.qe);
- if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
- rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
- rxf->rxf_oper_state = (q_config->paused) ?
- BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
- bna_rxf_adv_init(rxf, rx, q_config);
- rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
- q_config->num_paths);
- list_for_each(qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe;
- if (q_config->rxp_type == BNA_RXP_SINGLE)
- rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
- else
- rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
- break;
- }
- rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
- memset(rxf->vlan_filter_table, 0,
- (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
- /* Set up VLAN 0 for pure priority tagged packets */
- rxf->vlan_filter_table[0] |= 1;
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- }
- static void
- bna_rxf_uninit(struct bna_rxf *rxf)
- {
- struct bna *bna = rxf->rx->bna;
- struct bna_mac *mac;
- bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
- rxf->rit_segment = NULL;
- rxf->ucast_pending_set = 0;
- while (!list_empty(&rxf->ucast_pending_add_q)) {
- bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
- bfa_q_qe_init(&mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
- }
- if (rxf->ucast_active_mac) {
- bfa_q_qe_init(&rxf->ucast_active_mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
- rxf->ucast_active_mac);
- rxf->ucast_active_mac = NULL;
- }
- while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
- bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
- }
- /* Turn off pending promisc mode */
- if (is_promisc_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* system promisc state should be pending */
- BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id));
- promisc_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- bna->rxf_promisc_id = BFI_MAX_RXF;
- }
- /* Promisc mode should not be active */
- BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC);
- /* Turn off pending all-multi mode */
- if (is_allmulti_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- allmulti_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- }
- /* Allmulti mode should not be active */
- BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI);
- rxf->rx = NULL;
- }
- static void
- bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
- {
- bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
- if (rx->rxf.rxf_id < 32)
- rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
- else
- rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
- 1 << (rx->rxf.rxf_id - 32));
- }
- static void
- bna_rxf_start(struct bna_rxf *rxf)
- {
- rxf->start_cbfn = bna_rx_cb_rxf_started;
- rxf->start_cbarg = rxf->rx;
- rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
- bfa_fsm_send_event(rxf, RXF_E_START);
- }
- static void
- bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
- {
- bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
- if (rx->rxf.rxf_id < 32)
- rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
- else
- rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
- 1 << (rx->rxf.rxf_id - 32);
- }
- static void
- bna_rxf_stop(struct bna_rxf *rxf)
- {
- rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
- rxf->stop_cbarg = rxf->rx;
- bfa_fsm_send_event(rxf, RXF_E_STOP);
- }
- static void
- bna_rxf_fail(struct bna_rxf *rxf)
- {
- rxf->rxf_flags |= BNA_RXF_FL_FAILED;
- bfa_fsm_send_event(rxf, RXF_E_FAIL);
- }
- int
- bna_rxf_state_get(struct bna_rxf *rxf)
- {
- return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
- }
- enum bna_cb_status
- bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status))
- {
- struct bna_rxf *rxf = &rx->rxf;
- if (rxf->ucast_active_mac == NULL) {
- rxf->ucast_active_mac =
- bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
- if (rxf->ucast_active_mac == NULL)
- return BNA_CB_UCAST_CAM_FULL;
- bfa_q_qe_init(&rxf->ucast_active_mac->qe);
- }
- memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
- rxf->ucast_pending_set++;
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
- return BNA_CB_SUCCESS;
- }
- enum bna_cb_status
- bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status))
- {
- struct bna_rxf *rxf = &rx->rxf;
- struct list_head *qe;
- struct bna_mac *mac;
- /* Check if already added */
- list_for_each(qe, &rxf->mcast_active_q) {
- mac = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
- if (cbfn)
- (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
- return BNA_CB_SUCCESS;
- }
- }
- /* Check if pending addition */
- list_for_each(qe, &rxf->mcast_pending_add_q) {
- mac = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
- if (cbfn)
- (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
- return BNA_CB_SUCCESS;
- }
- }
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
- if (mac == NULL)
- return BNA_CB_MCAST_LIST_FULL;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, addr, ETH_ALEN);
- list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
- return BNA_CB_SUCCESS;
- }
- enum bna_cb_status
- bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
- void (*cbfn)(struct bnad *, struct bna_rx *,
- enum bna_cb_status))
- {
- struct bna_rxf *rxf = &rx->rxf;
- struct list_head list_head;
- struct list_head *qe;
- u8 *mcaddr;
- struct bna_mac *mac;
- struct bna_mac *mac1;
- int skip;
- int delete;
- int need_hw_config = 0;
- int i;
- /* Allocate nodes */
- INIT_LIST_HEAD(&list_head);
- for (i = 0, mcaddr = mclist; i < count; i++) {
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
- if (mac == NULL)
- goto err_return;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, mcaddr, ETH_ALEN);
- list_add_tail(&mac->qe, &list_head);
- mcaddr += ETH_ALEN;
- }
- /* Schedule for addition */
- while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- skip = 0;
- /* Skip if already added */
- list_for_each(qe, &rxf->mcast_active_q) {
- mac1 = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
- mac);
- skip = 1;
- break;
- }
- }
- if (skip)
- continue;
- /* Skip if pending addition */
- list_for_each(qe, &rxf->mcast_pending_add_q) {
- mac1 = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
- mac);
- skip = 1;
- break;
- }
- }
- if (skip)
- continue;
- need_hw_config = 1;
- list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
- }
- /**
- * Delete the entries that are in the pending_add_q but not
- * in the new list
- */
- while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
- if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
- delete = 0;
- break;
- }
- mcaddr += ETH_ALEN;
- }
- if (delete)
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
- else
- list_add_tail(&mac->qe, &list_head);
- }
- while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
- }
- /**
- * Schedule entries for deletion that are in the active_q but not
- * in the new list
- */
- while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
- if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
- delete = 0;
- break;
- }
- mcaddr += ETH_ALEN;
- }
- if (delete) {
- list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
- need_hw_config = 1;
- } else {
- list_add_tail(&mac->qe, &list_head);
- }
- }
- while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->mcast_active_q);
- }
- if (need_hw_config) {
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
- } else if (cbfn)
- (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
- return BNA_CB_SUCCESS;
- err_return:
- while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
- }
- return BNA_CB_MCAST_LIST_FULL;
- }
- void
- bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
- {
- struct bna_rxf *rxf = &rx->rxf;
- int index = (vlan_id >> 5);
- int bit = (1 << (vlan_id & 0x1F));
- rxf->vlan_filter_table[index] |= bit;
- if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
- rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
- }
- }
- void
- bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
- {
- struct bna_rxf *rxf = &rx->rxf;
- int index = (vlan_id >> 5);
- int bit = (1 << (vlan_id & 0x1F));
- rxf->vlan_filter_table[index] &= ~bit;
- if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
- rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
- bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
- }
- }
- /**
- * RX
- */
- #define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
- struct bna_doorbell_qset *_qset; \
- unsigned long off; \
- (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
- (q)->rcb->q_depth = (qdepth); \
- (q)->rcb->unmap_q = unmapq_mem; \
- (q)->rcb->rxq = (q); \
- (q)->rcb->cq = &(rxp)->cq; \
- (q)->rcb->bnad = (bna)->bnad; \
- _qset = (struct bna_doorbell_qset *)0; \
- off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
- (q)->rcb->q_dbell = off + \
- BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
- (q)->rcb->id = _id; \
- } while (0)
- #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
- (qcfg)->num_paths : ((qcfg)->num_paths * 2))
- #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
- (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
- #define call_rx_stop_callback(rx, status) \
- if ((rx)->stop_cbfn) { \
- (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
- (rx)->stop_cbfn = NULL; \
- (rx)->stop_cbarg = NULL; \
- }
- /*
- * Since rx_enable is synchronous callback, there is no start_cbfn required.
- * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
- * for each rxpath.
- */
- #define call_rx_disable_cbfn(rx, status) \
- if ((rx)->disable_cbfn) { \
- (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
- status); \
- (rx)->disable_cbfn = NULL; \
- (rx)->disable_cbarg = NULL; \
- } \
- #define rxqs_reqd(type, num_rxqs) \
- (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
- #define rx_ib_fail(rx) \
- do { \
- struct bna_rxp *rxp; \
- struct list_head *qe; \
- list_for_each(qe, &(rx)->rxp_q) { \
- rxp = (struct bna_rxp *)qe; \
- bna_ib_fail(rxp->cq.ib); \
- } \
- } while (0)
- static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
- static void __bna_rxq_start(struct bna_rxq *rxq);
- static void __bna_cq_start(struct bna_cq *cq);
- static void bna_rit_create(struct bna_rx *rx);
- static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
- static void bna_rx_cb_rxq_stopped_all(void *arg);
- bfa_fsm_state_decl(bna_rx, stopped,
- struct bna_rx, enum bna_rx_event);
- bfa_fsm_state_decl(bna_rx, rxf_start_wait,
- struct bna_rx, enum bna_rx_event);
- bfa_fsm_state_decl(bna_rx, started,
- struct bna_rx, enum bna_rx_event);
- bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
- struct bna_rx, enum bna_rx_event);
- bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
- struct bna_rx, enum bna_rx_event);
- static const struct bfa_sm_table rx_sm_table[] = {
- {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
- {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
- {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
- {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
- {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
- };
- static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
- {
- struct bna_rxp *rxp;
- struct list_head *qe_rxp;
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
- rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
- }
- call_rx_stop_callback(rx, BNA_CB_SUCCESS);
- }
- static void bna_rx_sm_stopped(struct bna_rx *rx,
- enum bna_rx_event event)
- {
- switch (event) {
- case RX_E_START:
- bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
- break;
- case RX_E_STOP:
- call_rx_stop_callback(rx, BNA_CB_SUCCESS);
- break;
- case RX_E_FAIL:
- /* no-op */
- break;
- default:
- bfa_sm_fault(rx->bna, event);
- break;
- }
- }
- static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
- {
- struct bna_rxp *rxp;
- struct list_head *qe_rxp;
- struct bna_rxq *q0 = NULL, *q1 = NULL;
- /* Setup the RIT */
- bna_rit_create(rx);
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
- bna_ib_start(rxp->cq.ib);
- GET_RXQS(rxp, q0, q1);
- q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
- __bna_rxq_start(q0);
- rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
- if (q1) {
- __bna_rxq_start(q1);
- rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
- }
- __bna_cq_start(&rxp->cq);
- }
- bna_rxf_start(&rx->rxf);
- }
- static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
- enum bna_rx_event event)
- {
- switch (event) {
- case RX_E_STOP:
- bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
- break;
- case RX_E_FAIL:
- bfa_fsm_set_state(rx, bna_rx_sm_stopped);
- rx_ib_fail(rx);
- bna_rxf_fail(&rx->rxf);
- break;
- case RX_E_RXF_STARTED:
- bfa_fsm_set_state(rx, bna_rx_sm_started);
- break;
- default:
- bfa_sm_fault(rx->bna, event);
- break;
- }
- }
- void
- bna_rx_sm_started_entry(struct bna_rx *rx)
- {
- struct bna_rxp *rxp;
- struct list_head *qe_rxp;
- /* Start IB */
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
- bna_ib_ack(&rxp->cq.ib->door_bell, 0);
- }
- bna_llport_rx_started(&rx->bna->port.llport);
- }
- void
- bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
- {
- switch (event) {
- case RX_E_FAIL:
- bna_llport_rx_stopped(&rx->bna->port.llport);
- bfa_fsm_set_state(rx, bna_rx_sm_stopped);
- rx_ib_fail(rx);
- bna_rxf_fail(&rx->rxf);
- break;
- case RX_E_STOP:
- bna_llport_rx_stopped(&rx->bna->port.llport);
- bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
- break;
- default:
- bfa_sm_fault(rx->bna, event);
- break;
- }
- }
- void
- bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
- {
- bna_rxf_stop(&rx->rxf);
- }
- void
- bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
- {
- switch (event) {
- case RX_E_RXF_STOPPED:
- bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
- break;
- case RX_E_RXF_STARTED:
- /**
- * RxF was in the process of starting up when
- * RXF_E_STOP was issued. Ignore this event
- */
- break;
- case RX_E_FAIL:
- bfa_fsm_set_state(rx, bna_rx_sm_stopped);
- rx_ib_fail(rx);
- bna_rxf_fail(&rx->rxf);
- break;
- default:
- bfa_sm_fault(rx->bna, event);
- break;
- }
- }
- void
- bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
- {
- struct bna_rxp *rxp = NULL;
- struct bna_rxq *q0 = NULL;
- struct bna_rxq *q1 = NULL;
- struct list_head *qe;
- u32 rxq_mask[2] = {0, 0};
- /* Only one call to multi-rxq-stop for all RXPs in this RX */
- bfa_wc_up(&rx->rxq_stop_wc);
- list_for_each(qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe;
- GET_RXQS(rxp, q0, q1);
- if (q0->rxq_id < 32)
- rxq_mask[0] |= ((u32)1 << q0->rxq_id);
- else
- rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
- if (q1) {
- if (q1->rxq_id < 32)
- rxq_mask[0] |= ((u32)1 << q1->rxq_id);
- else
- rxq_mask[1] |= ((u32)
- 1 << (q1->rxq_id - 32));
- }
- }
- __bna_multi_rxq_stop(rxp, rxq_mask);
- }
- void
- bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
- {
- struct bna_rxp *rxp = NULL;
- struct list_head *q…