PageRenderTime 77ms CodeModel.GetById 14ms app.highlight 53ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/bna/bna_ctrl.c

https://bitbucket.org/ndreys/linux-sunxi
C | 3077 lines | 2328 code | 519 blank | 230 comment | 222 complexity | 76a1af13028ca3238daa335a5fd91915 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * Linux network driver for Brocade Converged Network Adapter.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License (GPL) Version 2 as
   6 * published by the Free Software Foundation
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 */
  13/*
  14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  15 * All rights reserved
  16 * www.brocade.com
  17 */
  18#include "bna.h"
  19#include "bfa_sm.h"
  20#include "bfa_wc.h"
  21
  22static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
  23
  24static void
  25bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
  26			int status)
  27{
  28	int i;
  29	u8 prio_map;
  30
  31	port->llport.link_status = BNA_LINK_UP;
  32	if (aen->cee_linkup)
  33		port->llport.link_status = BNA_CEE_UP;
  34
  35	/* Compute the priority */
  36	prio_map = aen->prio_map;
  37	if (prio_map) {
  38		for (i = 0; i < 8; i++) {
  39			if ((prio_map >> i) & 0x1)
  40				break;
  41		}
  42		port->priority = i;
  43	} else
  44		port->priority = 0;
  45
  46	/* Dispatch events */
  47	bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
  48	bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
  49	port->link_cbfn(port->bna->bnad, port->llport.link_status);
  50}
  51
  52static void
  53bna_port_cb_link_down(struct bna_port *port, int status)
  54{
  55	port->llport.link_status = BNA_LINK_DOWN;
  56
  57	/* Dispatch events */
  58	bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
  59	port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
  60}
  61
  62static inline int
  63llport_can_be_up(struct bna_llport *llport)
  64{
  65	int ready = 0;
  66	if (llport->type == BNA_PORT_T_REGULAR)
  67		ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
  68			 (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
  69			 (llport->flags & BNA_LLPORT_F_PORT_ENABLED));
  70	else
  71		ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
  72			 (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
  73			 !(llport->flags & BNA_LLPORT_F_PORT_ENABLED));
  74	return ready;
  75}
  76
  77#define llport_is_up llport_can_be_up
  78
  79enum bna_llport_event {
  80	LLPORT_E_START			= 1,
  81	LLPORT_E_STOP			= 2,
  82	LLPORT_E_FAIL			= 3,
  83	LLPORT_E_UP			= 4,
  84	LLPORT_E_DOWN			= 5,
  85	LLPORT_E_FWRESP_UP_OK		= 6,
  86	LLPORT_E_FWRESP_UP_FAIL		= 7,
  87	LLPORT_E_FWRESP_DOWN		= 8
  88};
  89
  90static void
  91bna_llport_cb_port_enabled(struct bna_llport *llport)
  92{
  93	llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
  94
  95	if (llport_can_be_up(llport))
  96		bfa_fsm_send_event(llport, LLPORT_E_UP);
  97}
  98
  99static void
 100bna_llport_cb_port_disabled(struct bna_llport *llport)
 101{
 102	int llport_up = llport_is_up(llport);
 103
 104	llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
 105
 106	if (llport_up)
 107		bfa_fsm_send_event(llport, LLPORT_E_DOWN);
 108}
 109
 110/**
 111 * MBOX
 112 */
 113static int
 114bna_is_aen(u8 msg_id)
 115{
 116	switch (msg_id) {
 117	case BFI_LL_I2H_LINK_DOWN_AEN:
 118	case BFI_LL_I2H_LINK_UP_AEN:
 119	case BFI_LL_I2H_PORT_ENABLE_AEN:
 120	case BFI_LL_I2H_PORT_DISABLE_AEN:
 121		return 1;
 122
 123	default:
 124		return 0;
 125	}
 126}
 127
 128static void
 129bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
 130{
 131	struct bfi_ll_aen *aen = (struct bfi_ll_aen *)(msg);
 132
 133	switch (aen->mh.msg_id) {
 134	case BFI_LL_I2H_LINK_UP_AEN:
 135		bna_port_cb_link_up(&bna->port, aen, aen->reason);
 136		break;
 137	case BFI_LL_I2H_LINK_DOWN_AEN:
 138		bna_port_cb_link_down(&bna->port, aen->reason);
 139		break;
 140	case BFI_LL_I2H_PORT_ENABLE_AEN:
 141		bna_llport_cb_port_enabled(&bna->port.llport);
 142		break;
 143	case BFI_LL_I2H_PORT_DISABLE_AEN:
 144		bna_llport_cb_port_disabled(&bna->port.llport);
 145		break;
 146	default:
 147		break;
 148	}
 149}
 150
 151static void
 152bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
 153{
 154	struct bna *bna = (struct bna *)(llarg);
 155	struct bfi_ll_rsp *mb_rsp = (struct bfi_ll_rsp *)(msg);
 156	struct bfi_mhdr *cmd_h, *rsp_h;
 157	struct bna_mbox_qe *mb_qe = NULL;
 158	int to_post = 0;
 159	u8 aen = 0;
 160	char message[BNA_MESSAGE_SIZE];
 161
 162	aen = bna_is_aen(mb_rsp->mh.msg_id);
 163
 164	if (!aen) {
 165		mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
 166		cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
 167		rsp_h = (struct bfi_mhdr *)(&mb_rsp->mh);
 168
 169		if ((BFA_I2HM(cmd_h->msg_id) == rsp_h->msg_id) &&
 170		    (cmd_h->mtag.i2htok == rsp_h->mtag.i2htok)) {
 171			/* Remove the request from posted_q, update state  */
 172			list_del(&mb_qe->qe);
 173			bna->mbox_mod.msg_pending--;
 174			if (list_empty(&bna->mbox_mod.posted_q))
 175				bna->mbox_mod.state = BNA_MBOX_FREE;
 176			else
 177				to_post = 1;
 178
 179			/* Dispatch the cbfn */
 180			if (mb_qe->cbfn)
 181				mb_qe->cbfn(mb_qe->cbarg, mb_rsp->error);
 182
 183			/* Post the next entry, if needed */
 184			if (to_post) {
 185				mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
 186				bfa_nw_ioc_mbox_queue(&bna->device.ioc,
 187							&mb_qe->cmd);
 188			}
 189		} else {
 190			snprintf(message, BNA_MESSAGE_SIZE,
 191				       "No matching rsp for [%d:%d:%d]\n",
 192				       mb_rsp->mh.msg_class, mb_rsp->mh.msg_id,
 193				       mb_rsp->mh.mtag.i2htok);
 194		pr_info("%s", message);
 195		}
 196
 197	} else
 198		bna_mbox_aen_callback(bna, msg);
 199}
 200
 201static void
 202bna_err_handler(struct bna *bna, u32 intr_status)
 203{
 204	u32 init_halt;
 205
 206	if (intr_status & __HALT_STATUS_BITS) {
 207		init_halt = readl(bna->device.ioc.ioc_regs.ll_halt);
 208		init_halt &= ~__FW_INIT_HALT_P;
 209		writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
 210	}
 211
 212	bfa_nw_ioc_error_isr(&bna->device.ioc);
 213}
 214
 215void
 216bna_mbox_handler(struct bna *bna, u32 intr_status)
 217{
 218	if (BNA_IS_ERR_INTR(intr_status)) {
 219		bna_err_handler(bna, intr_status);
 220		return;
 221	}
 222	if (BNA_IS_MBOX_INTR(intr_status))
 223		bfa_nw_ioc_mbox_isr(&bna->device.ioc);
 224}
 225
 226void
 227bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
 228{
 229	struct bfi_mhdr *mh;
 230
 231	mh = (struct bfi_mhdr *)(&mbox_qe->cmd.msg[0]);
 232
 233	mh->mtag.i2htok = htons(bna->mbox_mod.msg_ctr);
 234	bna->mbox_mod.msg_ctr++;
 235	bna->mbox_mod.msg_pending++;
 236	if (bna->mbox_mod.state == BNA_MBOX_FREE) {
 237		list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
 238		bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
 239		bna->mbox_mod.state = BNA_MBOX_POSTED;
 240	} else {
 241		list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
 242	}
 243}
 244
 245static void
 246bna_mbox_flush_q(struct bna *bna, struct list_head *q)
 247{
 248	struct bna_mbox_qe *mb_qe = NULL;
 249	struct list_head			*mb_q;
 250	void 			(*cbfn)(void *arg, int status);
 251	void 			*cbarg;
 252
 253	mb_q = &bna->mbox_mod.posted_q;
 254
 255	while (!list_empty(mb_q)) {
 256		bfa_q_deq(mb_q, &mb_qe);
 257		cbfn = mb_qe->cbfn;
 258		cbarg = mb_qe->cbarg;
 259		bfa_q_qe_init(mb_qe);
 260		bna->mbox_mod.msg_pending--;
 261
 262		if (cbfn)
 263			cbfn(cbarg, BNA_CB_NOT_EXEC);
 264	}
 265
 266	bna->mbox_mod.state = BNA_MBOX_FREE;
 267}
 268
 269static void
 270bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
 271{
 272}
 273
 274static void
 275bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
 276{
 277	bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
 278}
 279
 280static void
 281bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
 282{
 283	bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
 284	mbox_mod->state = BNA_MBOX_FREE;
 285	mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
 286	INIT_LIST_HEAD(&mbox_mod->posted_q);
 287	mbox_mod->bna = bna;
 288}
 289
 290static void
 291bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
 292{
 293	mbox_mod->bna = NULL;
 294}
 295
 296/**
 297 * LLPORT
 298 */
 299#define call_llport_stop_cbfn(llport, status)\
 300do {\
 301	if ((llport)->stop_cbfn)\
 302		(llport)->stop_cbfn(&(llport)->bna->port, status);\
 303	(llport)->stop_cbfn = NULL;\
 304} while (0)
 305
 306static void bna_fw_llport_up(struct bna_llport *llport);
 307static void bna_fw_cb_llport_up(void *arg, int status);
 308static void bna_fw_llport_down(struct bna_llport *llport);
 309static void bna_fw_cb_llport_down(void *arg, int status);
 310static void bna_llport_start(struct bna_llport *llport);
 311static void bna_llport_stop(struct bna_llport *llport);
 312static void bna_llport_fail(struct bna_llport *llport);
 313
 314enum bna_llport_state {
 315	BNA_LLPORT_STOPPED		= 1,
 316	BNA_LLPORT_DOWN			= 2,
 317	BNA_LLPORT_UP_RESP_WAIT		= 3,
 318	BNA_LLPORT_DOWN_RESP_WAIT	= 4,
 319	BNA_LLPORT_UP			= 5,
 320	BNA_LLPORT_LAST_RESP_WAIT 	= 6
 321};
 322
 323bfa_fsm_state_decl(bna_llport, stopped, struct bna_llport,
 324			enum bna_llport_event);
 325bfa_fsm_state_decl(bna_llport, down, struct bna_llport,
 326			enum bna_llport_event);
 327bfa_fsm_state_decl(bna_llport, up_resp_wait, struct bna_llport,
 328			enum bna_llport_event);
 329bfa_fsm_state_decl(bna_llport, down_resp_wait, struct bna_llport,
 330			enum bna_llport_event);
 331bfa_fsm_state_decl(bna_llport, up, struct bna_llport,
 332			enum bna_llport_event);
 333bfa_fsm_state_decl(bna_llport, last_resp_wait, struct bna_llport,
 334			enum bna_llport_event);
 335
 336static struct bfa_sm_table llport_sm_table[] = {
 337	{BFA_SM(bna_llport_sm_stopped), BNA_LLPORT_STOPPED},
 338	{BFA_SM(bna_llport_sm_down), BNA_LLPORT_DOWN},
 339	{BFA_SM(bna_llport_sm_up_resp_wait), BNA_LLPORT_UP_RESP_WAIT},
 340	{BFA_SM(bna_llport_sm_down_resp_wait), BNA_LLPORT_DOWN_RESP_WAIT},
 341	{BFA_SM(bna_llport_sm_up), BNA_LLPORT_UP},
 342	{BFA_SM(bna_llport_sm_last_resp_wait), BNA_LLPORT_LAST_RESP_WAIT}
 343};
 344
 345static void
 346bna_llport_sm_stopped_entry(struct bna_llport *llport)
 347{
 348	llport->bna->port.link_cbfn((llport)->bna->bnad, BNA_LINK_DOWN);
 349	call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
 350}
 351
 352static void
 353bna_llport_sm_stopped(struct bna_llport *llport,
 354			enum bna_llport_event event)
 355{
 356	switch (event) {
 357	case LLPORT_E_START:
 358		bfa_fsm_set_state(llport, bna_llport_sm_down);
 359		break;
 360
 361	case LLPORT_E_STOP:
 362		call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
 363		break;
 364
 365	case LLPORT_E_FAIL:
 366		break;
 367
 368	case LLPORT_E_DOWN:
 369		/* This event is received due to Rx objects failing */
 370		/* No-op */
 371		break;
 372
 373	case LLPORT_E_FWRESP_UP_OK:
 374	case LLPORT_E_FWRESP_DOWN:
 375		/**
 376		 * These events are received due to flushing of mbox when
 377		 * device fails
 378		 */
 379		/* No-op */
 380		break;
 381
 382	default:
 383		bfa_sm_fault(llport->bna, event);
 384	}
 385}
 386
 387static void
 388bna_llport_sm_down_entry(struct bna_llport *llport)
 389{
 390	bnad_cb_port_link_status((llport)->bna->bnad, BNA_LINK_DOWN);
 391}
 392
 393static void
 394bna_llport_sm_down(struct bna_llport *llport,
 395			enum bna_llport_event event)
 396{
 397	switch (event) {
 398	case LLPORT_E_STOP:
 399		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
 400		break;
 401
 402	case LLPORT_E_FAIL:
 403		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
 404		break;
 405
 406	case LLPORT_E_UP:
 407		bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
 408		bna_fw_llport_up(llport);
 409		break;
 410
 411	default:
 412		bfa_sm_fault(llport->bna, event);
 413	}
 414}
 415
 416static void
 417bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
 418{
 419	BUG_ON(!llport_can_be_up(llport));
 420	/**
 421	 * NOTE: Do not call bna_fw_llport_up() here. That will over step
 422	 * mbox due to down_resp_wait -> up_resp_wait transition on event
 423	 * LLPORT_E_UP
 424	 */
 425}
 426
 427static void
 428bna_llport_sm_up_resp_wait(struct bna_llport *llport,
 429			enum bna_llport_event event)
 430{
 431	switch (event) {
 432	case LLPORT_E_STOP:
 433		bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
 434		break;
 435
 436	case LLPORT_E_FAIL:
 437		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
 438		break;
 439
 440	case LLPORT_E_DOWN:
 441		bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
 442		break;
 443
 444	case LLPORT_E_FWRESP_UP_OK:
 445		bfa_fsm_set_state(llport, bna_llport_sm_up);
 446		break;
 447
 448	case LLPORT_E_FWRESP_UP_FAIL:
 449		bfa_fsm_set_state(llport, bna_llport_sm_down);
 450		break;
 451
 452	case LLPORT_E_FWRESP_DOWN:
 453		/* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
 454		bna_fw_llport_up(llport);
 455		break;
 456
 457	default:
 458		bfa_sm_fault(llport->bna, event);
 459	}
 460}
 461
 462static void
 463bna_llport_sm_down_resp_wait_entry(struct bna_llport *llport)
 464{
 465	/**
 466	 * NOTE: Do not call bna_fw_llport_down() here. That will over step
 467	 * mbox due to up_resp_wait -> down_resp_wait transition on event
 468	 * LLPORT_E_DOWN
 469	 */
 470}
 471
 472static void
 473bna_llport_sm_down_resp_wait(struct bna_llport *llport,
 474			enum bna_llport_event event)
 475{
 476	switch (event) {
 477	case LLPORT_E_STOP:
 478		bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
 479		break;
 480
 481	case LLPORT_E_FAIL:
 482		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
 483		break;
 484
 485	case LLPORT_E_UP:
 486		bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
 487		break;
 488
 489	case LLPORT_E_FWRESP_UP_OK:
 490		/* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
 491		bna_fw_llport_down(llport);
 492		break;
 493
 494	case LLPORT_E_FWRESP_UP_FAIL:
 495	case LLPORT_E_FWRESP_DOWN:
 496		bfa_fsm_set_state(llport, bna_llport_sm_down);
 497		break;
 498
 499	default:
 500		bfa_sm_fault(llport->bna, event);
 501	}
 502}
 503
 504static void
 505bna_llport_sm_up_entry(struct bna_llport *llport)
 506{
 507}
 508
 509static void
 510bna_llport_sm_up(struct bna_llport *llport,
 511			enum bna_llport_event event)
 512{
 513	switch (event) {
 514	case LLPORT_E_STOP:
 515		bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
 516		bna_fw_llport_down(llport);
 517		break;
 518
 519	case LLPORT_E_FAIL:
 520		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
 521		break;
 522
 523	case LLPORT_E_DOWN:
 524		bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
 525		bna_fw_llport_down(llport);
 526		break;
 527
 528	default:
 529		bfa_sm_fault(llport->bna, event);
 530	}
 531}
 532
 533static void
 534bna_llport_sm_last_resp_wait_entry(struct bna_llport *llport)
 535{
 536}
 537
 538static void
 539bna_llport_sm_last_resp_wait(struct bna_llport *llport,
 540			enum bna_llport_event event)
 541{
 542	switch (event) {
 543	case LLPORT_E_FAIL:
 544		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
 545		break;
 546
 547	case LLPORT_E_DOWN:
 548		/**
 549		 * This event is received due to Rx objects stopping in
 550		 * parallel to llport
 551		 */
 552		/* No-op */
 553		break;
 554
 555	case LLPORT_E_FWRESP_UP_OK:
 556		/* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
 557		bna_fw_llport_down(llport);
 558		break;
 559
 560	case LLPORT_E_FWRESP_UP_FAIL:
 561	case LLPORT_E_FWRESP_DOWN:
 562		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
 563		break;
 564
 565	default:
 566		bfa_sm_fault(llport->bna, event);
 567	}
 568}
 569
 570static void
 571bna_fw_llport_admin_up(struct bna_llport *llport)
 572{
 573	struct bfi_ll_port_admin_req ll_req;
 574
 575	memset(&ll_req, 0, sizeof(ll_req));
 576	ll_req.mh.msg_class = BFI_MC_LL;
 577	ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
 578	ll_req.mh.mtag.h2i.lpu_id = 0;
 579
 580	ll_req.up = BNA_STATUS_T_ENABLED;
 581
 582	bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
 583			bna_fw_cb_llport_up, llport);
 584
 585	bna_mbox_send(llport->bna, &llport->mbox_qe);
 586}
 587
 588static void
 589bna_fw_llport_up(struct bna_llport *llport)
 590{
 591	if (llport->type == BNA_PORT_T_REGULAR)
 592		bna_fw_llport_admin_up(llport);
 593}
 594
 595static void
 596bna_fw_cb_llport_up(void *arg, int status)
 597{
 598	struct bna_llport *llport = (struct bna_llport *)arg;
 599
 600	bfa_q_qe_init(&llport->mbox_qe.qe);
 601	if (status == BFI_LL_CMD_FAIL) {
 602		if (llport->type == BNA_PORT_T_REGULAR)
 603			llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
 604		else
 605			llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
 606		bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_FAIL);
 607	} else
 608		bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_OK);
 609}
 610
 611static void
 612bna_fw_llport_admin_down(struct bna_llport *llport)
 613{
 614	struct bfi_ll_port_admin_req ll_req;
 615
 616	memset(&ll_req, 0, sizeof(ll_req));
 617	ll_req.mh.msg_class = BFI_MC_LL;
 618	ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
 619	ll_req.mh.mtag.h2i.lpu_id = 0;
 620
 621	ll_req.up = BNA_STATUS_T_DISABLED;
 622
 623	bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
 624			bna_fw_cb_llport_down, llport);
 625
 626	bna_mbox_send(llport->bna, &llport->mbox_qe);
 627}
 628
 629static void
 630bna_fw_llport_down(struct bna_llport *llport)
 631{
 632	if (llport->type == BNA_PORT_T_REGULAR)
 633		bna_fw_llport_admin_down(llport);
 634}
 635
 636static void
 637bna_fw_cb_llport_down(void *arg, int status)
 638{
 639	struct bna_llport *llport = (struct bna_llport *)arg;
 640
 641	bfa_q_qe_init(&llport->mbox_qe.qe);
 642	bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
 643}
 644
 645static void
 646bna_port_cb_llport_stopped(struct bna_port *port,
 647				enum bna_cb_status status)
 648{
 649	bfa_wc_down(&port->chld_stop_wc);
 650}
 651
 652static void
 653bna_llport_init(struct bna_llport *llport, struct bna *bna)
 654{
 655	llport->flags |= BNA_LLPORT_F_ADMIN_UP;
 656	llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
 657	llport->type = BNA_PORT_T_REGULAR;
 658	llport->bna = bna;
 659
 660	llport->link_status = BNA_LINK_DOWN;
 661
 662	llport->rx_started_count = 0;
 663
 664	llport->stop_cbfn = NULL;
 665
 666	bfa_q_qe_init(&llport->mbox_qe.qe);
 667
 668	bfa_fsm_set_state(llport, bna_llport_sm_stopped);
 669}
 670
 671static void
 672bna_llport_uninit(struct bna_llport *llport)
 673{
 674	llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
 675	llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
 676
 677	llport->bna = NULL;
 678}
 679
 680static void
 681bna_llport_start(struct bna_llport *llport)
 682{
 683	bfa_fsm_send_event(llport, LLPORT_E_START);
 684}
 685
 686static void
 687bna_llport_stop(struct bna_llport *llport)
 688{
 689	llport->stop_cbfn = bna_port_cb_llport_stopped;
 690
 691	bfa_fsm_send_event(llport, LLPORT_E_STOP);
 692}
 693
 694static void
 695bna_llport_fail(struct bna_llport *llport)
 696{
 697	/* Reset the physical port status to enabled */
 698	llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
 699	bfa_fsm_send_event(llport, LLPORT_E_FAIL);
 700}
 701
 702static int
 703bna_llport_state_get(struct bna_llport *llport)
 704{
 705	return bfa_sm_to_state(llport_sm_table, llport->fsm);
 706}
 707
 708void
 709bna_llport_rx_started(struct bna_llport *llport)
 710{
 711	llport->rx_started_count++;
 712
 713	if (llport->rx_started_count == 1) {
 714
 715		llport->flags |= BNA_LLPORT_F_RX_STARTED;
 716
 717		if (llport_can_be_up(llport))
 718			bfa_fsm_send_event(llport, LLPORT_E_UP);
 719	}
 720}
 721
 722void
 723bna_llport_rx_stopped(struct bna_llport *llport)
 724{
 725	int llport_up = llport_is_up(llport);
 726
 727	llport->rx_started_count--;
 728
 729	if (llport->rx_started_count == 0) {
 730
 731		llport->flags &= ~BNA_LLPORT_F_RX_STARTED;
 732
 733		if (llport_up)
 734			bfa_fsm_send_event(llport, LLPORT_E_DOWN);
 735	}
 736}
 737
 738/**
 739 * PORT
 740 */
 741#define bna_port_chld_start(port)\
 742do {\
 743	enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
 744					BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
 745	enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
 746					BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
 747	bna_llport_start(&(port)->llport);\
 748	bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
 749	bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
 750} while (0)
 751
 752#define bna_port_chld_stop(port)\
 753do {\
 754	enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
 755					BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
 756	enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
 757					BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
 758	bfa_wc_up(&(port)->chld_stop_wc);\
 759	bfa_wc_up(&(port)->chld_stop_wc);\
 760	bfa_wc_up(&(port)->chld_stop_wc);\
 761	bna_llport_stop(&(port)->llport);\
 762	bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
 763	bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
 764} while (0)
 765
 766#define bna_port_chld_fail(port)\
 767do {\
 768	bna_llport_fail(&(port)->llport);\
 769	bna_tx_mod_fail(&(port)->bna->tx_mod);\
 770	bna_rx_mod_fail(&(port)->bna->rx_mod);\
 771} while (0)
 772
 773#define bna_port_rx_start(port)\
 774do {\
 775	enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
 776					BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
 777	bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
 778} while (0)
 779
 780#define bna_port_rx_stop(port)\
 781do {\
 782	enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
 783					BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
 784	bfa_wc_up(&(port)->chld_stop_wc);\
 785	bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
 786} while (0)
 787
 788#define call_port_stop_cbfn(port, status)\
 789do {\
 790	if ((port)->stop_cbfn)\
 791		(port)->stop_cbfn((port)->stop_cbarg, status);\
 792	(port)->stop_cbfn = NULL;\
 793	(port)->stop_cbarg = NULL;\
 794} while (0)
 795
 796#define call_port_pause_cbfn(port, status)\
 797do {\
 798	if ((port)->pause_cbfn)\
 799		(port)->pause_cbfn((port)->bna->bnad, status);\
 800	(port)->pause_cbfn = NULL;\
 801} while (0)
 802
 803#define call_port_mtu_cbfn(port, status)\
 804do {\
 805	if ((port)->mtu_cbfn)\
 806		(port)->mtu_cbfn((port)->bna->bnad, status);\
 807	(port)->mtu_cbfn = NULL;\
 808} while (0)
 809
 810static void bna_fw_pause_set(struct bna_port *port);
 811static void bna_fw_cb_pause_set(void *arg, int status);
 812static void bna_fw_mtu_set(struct bna_port *port);
 813static void bna_fw_cb_mtu_set(void *arg, int status);
 814
 815enum bna_port_event {
 816	PORT_E_START			= 1,
 817	PORT_E_STOP			= 2,
 818	PORT_E_FAIL			= 3,
 819	PORT_E_PAUSE_CFG		= 4,
 820	PORT_E_MTU_CFG			= 5,
 821	PORT_E_CHLD_STOPPED		= 6,
 822	PORT_E_FWRESP_PAUSE		= 7,
 823	PORT_E_FWRESP_MTU		= 8
 824};
 825
 826enum bna_port_state {
 827	BNA_PORT_STOPPED		= 1,
 828	BNA_PORT_MTU_INIT_WAIT		= 2,
 829	BNA_PORT_PAUSE_INIT_WAIT	= 3,
 830	BNA_PORT_LAST_RESP_WAIT		= 4,
 831	BNA_PORT_STARTED		= 5,
 832	BNA_PORT_PAUSE_CFG_WAIT		= 6,
 833	BNA_PORT_RX_STOP_WAIT		= 7,
 834	BNA_PORT_MTU_CFG_WAIT 		= 8,
 835	BNA_PORT_CHLD_STOP_WAIT		= 9
 836};
 837
 838bfa_fsm_state_decl(bna_port, stopped, struct bna_port,
 839			enum bna_port_event);
 840bfa_fsm_state_decl(bna_port, mtu_init_wait, struct bna_port,
 841			enum bna_port_event);
 842bfa_fsm_state_decl(bna_port, pause_init_wait, struct bna_port,
 843			enum bna_port_event);
 844bfa_fsm_state_decl(bna_port, last_resp_wait, struct bna_port,
 845			enum bna_port_event);
 846bfa_fsm_state_decl(bna_port, started, struct bna_port,
 847			enum bna_port_event);
 848bfa_fsm_state_decl(bna_port, pause_cfg_wait, struct bna_port,
 849			enum bna_port_event);
 850bfa_fsm_state_decl(bna_port, rx_stop_wait, struct bna_port,
 851			enum bna_port_event);
 852bfa_fsm_state_decl(bna_port, mtu_cfg_wait, struct bna_port,
 853			enum bna_port_event);
 854bfa_fsm_state_decl(bna_port, chld_stop_wait, struct bna_port,
 855			enum bna_port_event);
 856
 857static struct bfa_sm_table port_sm_table[] = {
 858	{BFA_SM(bna_port_sm_stopped), BNA_PORT_STOPPED},
 859	{BFA_SM(bna_port_sm_mtu_init_wait), BNA_PORT_MTU_INIT_WAIT},
 860	{BFA_SM(bna_port_sm_pause_init_wait), BNA_PORT_PAUSE_INIT_WAIT},
 861	{BFA_SM(bna_port_sm_last_resp_wait), BNA_PORT_LAST_RESP_WAIT},
 862	{BFA_SM(bna_port_sm_started), BNA_PORT_STARTED},
 863	{BFA_SM(bna_port_sm_pause_cfg_wait), BNA_PORT_PAUSE_CFG_WAIT},
 864	{BFA_SM(bna_port_sm_rx_stop_wait), BNA_PORT_RX_STOP_WAIT},
 865	{BFA_SM(bna_port_sm_mtu_cfg_wait), BNA_PORT_MTU_CFG_WAIT},
 866	{BFA_SM(bna_port_sm_chld_stop_wait), BNA_PORT_CHLD_STOP_WAIT}
 867};
 868
 869static void
 870bna_port_sm_stopped_entry(struct bna_port *port)
 871{
 872	call_port_pause_cbfn(port, BNA_CB_SUCCESS);
 873	call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
 874	call_port_stop_cbfn(port, BNA_CB_SUCCESS);
 875}
 876
 877static void
 878bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
 879{
 880	switch (event) {
 881	case PORT_E_START:
 882		bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
 883		break;
 884
 885	case PORT_E_STOP:
 886		call_port_stop_cbfn(port, BNA_CB_SUCCESS);
 887		break;
 888
 889	case PORT_E_FAIL:
 890		/* No-op */
 891		break;
 892
 893	case PORT_E_PAUSE_CFG:
 894		call_port_pause_cbfn(port, BNA_CB_SUCCESS);
 895		break;
 896
 897	case PORT_E_MTU_CFG:
 898		call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
 899		break;
 900
 901	case PORT_E_CHLD_STOPPED:
 902		/**
 903		 * This event is received due to LLPort, Tx and Rx objects
 904		 * failing
 905		 */
 906		/* No-op */
 907		break;
 908
 909	case PORT_E_FWRESP_PAUSE:
 910	case PORT_E_FWRESP_MTU:
 911		/**
 912		 * These events are received due to flushing of mbox when
 913		 * device fails
 914		 */
 915		/* No-op */
 916		break;
 917
 918	default:
 919		bfa_sm_fault(port->bna, event);
 920	}
 921}
 922
 923static void
 924bna_port_sm_mtu_init_wait_entry(struct bna_port *port)
 925{
 926	bna_fw_mtu_set(port);
 927}
 928
 929static void
 930bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
 931{
 932	switch (event) {
 933	case PORT_E_STOP:
 934		bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
 935		break;
 936
 937	case PORT_E_FAIL:
 938		bfa_fsm_set_state(port, bna_port_sm_stopped);
 939		break;
 940
 941	case PORT_E_PAUSE_CFG:
 942		/* No-op */
 943		break;
 944
 945	case PORT_E_MTU_CFG:
 946		port->flags |= BNA_PORT_F_MTU_CHANGED;
 947		break;
 948
 949	case PORT_E_FWRESP_MTU:
 950		if (port->flags & BNA_PORT_F_MTU_CHANGED) {
 951			port->flags &= ~BNA_PORT_F_MTU_CHANGED;
 952			bna_fw_mtu_set(port);
 953		} else {
 954			bfa_fsm_set_state(port, bna_port_sm_pause_init_wait);
 955		}
 956		break;
 957
 958	default:
 959		bfa_sm_fault(port->bna, event);
 960	}
 961}
 962
 963static void
 964bna_port_sm_pause_init_wait_entry(struct bna_port *port)
 965{
 966	bna_fw_pause_set(port);
 967}
 968
 969static void
 970bna_port_sm_pause_init_wait(struct bna_port *port,
 971				enum bna_port_event event)
 972{
 973	switch (event) {
 974	case PORT_E_STOP:
 975		bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
 976		break;
 977
 978	case PORT_E_FAIL:
 979		bfa_fsm_set_state(port, bna_port_sm_stopped);
 980		break;
 981
 982	case PORT_E_PAUSE_CFG:
 983		port->flags |= BNA_PORT_F_PAUSE_CHANGED;
 984		break;
 985
 986	case PORT_E_MTU_CFG:
 987		port->flags |= BNA_PORT_F_MTU_CHANGED;
 988		break;
 989
 990	case PORT_E_FWRESP_PAUSE:
 991		if (port->flags & BNA_PORT_F_PAUSE_CHANGED) {
 992			port->flags &= ~BNA_PORT_F_PAUSE_CHANGED;
 993			bna_fw_pause_set(port);
 994		} else if (port->flags & BNA_PORT_F_MTU_CHANGED) {
 995			port->flags &= ~BNA_PORT_F_MTU_CHANGED;
 996			bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
 997		} else {
 998			bfa_fsm_set_state(port, bna_port_sm_started);
 999			bna_port_chld_start(port);
1000		}
1001		break;
1002
1003	default:
1004		bfa_sm_fault(port->bna, event);
1005	}
1006}
1007
1008static void
1009bna_port_sm_last_resp_wait_entry(struct bna_port *port)
1010{
1011}
1012
1013static void
1014bna_port_sm_last_resp_wait(struct bna_port *port,
1015				enum bna_port_event event)
1016{
1017	switch (event) {
1018	case PORT_E_FAIL:
1019	case PORT_E_FWRESP_PAUSE:
1020	case PORT_E_FWRESP_MTU:
1021		bfa_fsm_set_state(port, bna_port_sm_stopped);
1022		break;
1023
1024	default:
1025		bfa_sm_fault(port->bna, event);
1026	}
1027}
1028
1029static void
1030bna_port_sm_started_entry(struct bna_port *port)
1031{
1032	/**
1033	 * NOTE: Do not call bna_port_chld_start() here, since it will be
1034	 * inadvertently called during pause_cfg_wait->started transition
1035	 * as well
1036	 */
1037	call_port_pause_cbfn(port, BNA_CB_SUCCESS);
1038	call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
1039}
1040
1041static void
1042bna_port_sm_started(struct bna_port *port,
1043			enum bna_port_event event)
1044{
1045	switch (event) {
1046	case PORT_E_STOP:
1047		bfa_fsm_set_state(port, bna_port_sm_chld_stop_wait);
1048		break;
1049
1050	case PORT_E_FAIL:
1051		bfa_fsm_set_state(port, bna_port_sm_stopped);
1052		bna_port_chld_fail(port);
1053		break;
1054
1055	case PORT_E_PAUSE_CFG:
1056		bfa_fsm_set_state(port, bna_port_sm_pause_cfg_wait);
1057		break;
1058
1059	case PORT_E_MTU_CFG:
1060		bfa_fsm_set_state(port, bna_port_sm_rx_stop_wait);
1061		break;
1062
1063	default:
1064		bfa_sm_fault(port->bna, event);
1065	}
1066}
1067
1068static void
1069bna_port_sm_pause_cfg_wait_entry(struct bna_port *port)
1070{
1071	bna_fw_pause_set(port);
1072}
1073
1074static void
1075bna_port_sm_pause_cfg_wait(struct bna_port *port,
1076				enum bna_port_event event)
1077{
1078	switch (event) {
1079	case PORT_E_FAIL:
1080		bfa_fsm_set_state(port, bna_port_sm_stopped);
1081		bna_port_chld_fail(port);
1082		break;
1083
1084	case PORT_E_FWRESP_PAUSE:
1085		bfa_fsm_set_state(port, bna_port_sm_started);
1086		break;
1087
1088	default:
1089		bfa_sm_fault(port->bna, event);
1090	}
1091}
1092
1093static void
1094bna_port_sm_rx_stop_wait_entry(struct bna_port *port)
1095{
1096	bna_port_rx_stop(port);
1097}
1098
1099static void
1100bna_port_sm_rx_stop_wait(struct bna_port *port,
1101				enum bna_port_event event)
1102{
1103	switch (event) {
1104	case PORT_E_FAIL:
1105		bfa_fsm_set_state(port, bna_port_sm_stopped);
1106		bna_port_chld_fail(port);
1107		break;
1108
1109	case PORT_E_CHLD_STOPPED:
1110		bfa_fsm_set_state(port, bna_port_sm_mtu_cfg_wait);
1111		break;
1112
1113	default:
1114		bfa_sm_fault(port->bna, event);
1115	}
1116}
1117
1118static void
1119bna_port_sm_mtu_cfg_wait_entry(struct bna_port *port)
1120{
1121	bna_fw_mtu_set(port);
1122}
1123
1124static void
1125bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
1126{
1127	switch (event) {
1128	case PORT_E_FAIL:
1129		bfa_fsm_set_state(port, bna_port_sm_stopped);
1130		bna_port_chld_fail(port);
1131		break;
1132
1133	case PORT_E_FWRESP_MTU:
1134		bfa_fsm_set_state(port, bna_port_sm_started);
1135		bna_port_rx_start(port);
1136		break;
1137
1138	default:
1139		bfa_sm_fault(port->bna, event);
1140	}
1141}
1142
1143static void
1144bna_port_sm_chld_stop_wait_entry(struct bna_port *port)
1145{
1146	bna_port_chld_stop(port);
1147}
1148
1149static void
1150bna_port_sm_chld_stop_wait(struct bna_port *port,
1151				enum bna_port_event event)
1152{
1153	switch (event) {
1154	case PORT_E_FAIL:
1155		bfa_fsm_set_state(port, bna_port_sm_stopped);
1156		bna_port_chld_fail(port);
1157		break;
1158
1159	case PORT_E_CHLD_STOPPED:
1160		bfa_fsm_set_state(port, bna_port_sm_stopped);
1161		break;
1162
1163	default:
1164		bfa_sm_fault(port->bna, event);
1165	}
1166}
1167
1168static void
1169bna_fw_pause_set(struct bna_port *port)
1170{
1171	struct bfi_ll_set_pause_req ll_req;
1172
1173	memset(&ll_req, 0, sizeof(ll_req));
1174	ll_req.mh.msg_class = BFI_MC_LL;
1175	ll_req.mh.msg_id = BFI_LL_H2I_SET_PAUSE_REQ;
1176	ll_req.mh.mtag.h2i.lpu_id = 0;
1177
1178	ll_req.tx_pause = port->pause_config.tx_pause;
1179	ll_req.rx_pause = port->pause_config.rx_pause;
1180
1181	bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
1182			bna_fw_cb_pause_set, port);
1183
1184	bna_mbox_send(port->bna, &port->mbox_qe);
1185}
1186
1187static void
1188bna_fw_cb_pause_set(void *arg, int status)
1189{
1190	struct bna_port *port = (struct bna_port *)arg;
1191
1192	bfa_q_qe_init(&port->mbox_qe.qe);
1193	bfa_fsm_send_event(port, PORT_E_FWRESP_PAUSE);
1194}
1195
1196void
1197bna_fw_mtu_set(struct bna_port *port)
1198{
1199	struct bfi_ll_mtu_info_req ll_req;
1200
1201	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_MTU_INFO_REQ, 0);
1202	ll_req.mtu = htons((u16)port->mtu);
1203
1204	bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
1205				bna_fw_cb_mtu_set, port);
1206	bna_mbox_send(port->bna, &port->mbox_qe);
1207}
1208
1209void
1210bna_fw_cb_mtu_set(void *arg, int status)
1211{
1212	struct bna_port *port = (struct bna_port *)arg;
1213
1214	bfa_q_qe_init(&port->mbox_qe.qe);
1215	bfa_fsm_send_event(port, PORT_E_FWRESP_MTU);
1216}
1217
1218static void
1219bna_port_cb_chld_stopped(void *arg)
1220{
1221	struct bna_port *port = (struct bna_port *)arg;
1222
1223	bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
1224}
1225
1226static void
1227bna_port_init(struct bna_port *port, struct bna *bna)
1228{
1229	port->bna = bna;
1230	port->flags = 0;
1231	port->mtu = 0;
1232	port->type = BNA_PORT_T_REGULAR;
1233
1234	port->link_cbfn = bnad_cb_port_link_status;
1235
1236	port->chld_stop_wc.wc_resume = bna_port_cb_chld_stopped;
1237	port->chld_stop_wc.wc_cbarg = port;
1238	port->chld_stop_wc.wc_count = 0;
1239
1240	port->stop_cbfn = NULL;
1241	port->stop_cbarg = NULL;
1242
1243	port->pause_cbfn = NULL;
1244
1245	port->mtu_cbfn = NULL;
1246
1247	bfa_q_qe_init(&port->mbox_qe.qe);
1248
1249	bfa_fsm_set_state(port, bna_port_sm_stopped);
1250
1251	bna_llport_init(&port->llport, bna);
1252}
1253
1254static void
1255bna_port_uninit(struct bna_port *port)
1256{
1257	bna_llport_uninit(&port->llport);
1258
1259	port->flags = 0;
1260
1261	port->bna = NULL;
1262}
1263
1264static int
1265bna_port_state_get(struct bna_port *port)
1266{
1267	return bfa_sm_to_state(port_sm_table, port->fsm);
1268}
1269
1270static void
1271bna_port_start(struct bna_port *port)
1272{
1273	port->flags |= BNA_PORT_F_DEVICE_READY;
1274	if (port->flags & BNA_PORT_F_ENABLED)
1275		bfa_fsm_send_event(port, PORT_E_START);
1276}
1277
1278static void
1279bna_port_stop(struct bna_port *port)
1280{
1281	port->stop_cbfn = bna_device_cb_port_stopped;
1282	port->stop_cbarg = &port->bna->device;
1283
1284	port->flags &= ~BNA_PORT_F_DEVICE_READY;
1285	bfa_fsm_send_event(port, PORT_E_STOP);
1286}
1287
1288static void
1289bna_port_fail(struct bna_port *port)
1290{
1291	port->flags &= ~BNA_PORT_F_DEVICE_READY;
1292	bfa_fsm_send_event(port, PORT_E_FAIL);
1293}
1294
1295void
1296bna_port_cb_tx_stopped(struct bna_port *port, enum bna_cb_status status)
1297{
1298	bfa_wc_down(&port->chld_stop_wc);
1299}
1300
1301void
1302bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
1303{
1304	bfa_wc_down(&port->chld_stop_wc);
1305}
1306
1307int
1308bna_port_mtu_get(struct bna_port *port)
1309{
1310	return port->mtu;
1311}
1312
1313void
1314bna_port_enable(struct bna_port *port)
1315{
1316	if (port->fsm != (bfa_sm_t)bna_port_sm_stopped)
1317		return;
1318
1319	port->flags |= BNA_PORT_F_ENABLED;
1320
1321	if (port->flags & BNA_PORT_F_DEVICE_READY)
1322		bfa_fsm_send_event(port, PORT_E_START);
1323}
1324
1325void
1326bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
1327		 void (*cbfn)(void *, enum bna_cb_status))
1328{
1329	if (type == BNA_SOFT_CLEANUP) {
1330		(*cbfn)(port->bna->bnad, BNA_CB_SUCCESS);
1331		return;
1332	}
1333
1334	port->stop_cbfn = cbfn;
1335	port->stop_cbarg = port->bna->bnad;
1336
1337	port->flags &= ~BNA_PORT_F_ENABLED;
1338
1339	bfa_fsm_send_event(port, PORT_E_STOP);
1340}
1341
1342void
1343bna_port_pause_config(struct bna_port *port,
1344		      struct bna_pause_config *pause_config,
1345		      void (*cbfn)(struct bnad *, enum bna_cb_status))
1346{
1347	port->pause_config = *pause_config;
1348
1349	port->pause_cbfn = cbfn;
1350
1351	bfa_fsm_send_event(port, PORT_E_PAUSE_CFG);
1352}
1353
1354void
1355bna_port_mtu_set(struct bna_port *port, int mtu,
1356		 void (*cbfn)(struct bnad *, enum bna_cb_status))
1357{
1358	port->mtu = mtu;
1359
1360	port->mtu_cbfn = cbfn;
1361
1362	bfa_fsm_send_event(port, PORT_E_MTU_CFG);
1363}
1364
1365void
1366bna_port_mac_get(struct bna_port *port, mac_t *mac)
1367{
1368	*mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
1369}
1370
1371/**
1372 * DEVICE
1373 */
1374#define enable_mbox_intr(_device)\
1375do {\
1376	u32 intr_status;\
1377	bna_intr_status_get((_device)->bna, intr_status);\
1378	bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
1379	bna_mbox_intr_enable((_device)->bna);\
1380} while (0)
1381
1382#define disable_mbox_intr(_device)\
1383do {\
1384	bna_mbox_intr_disable((_device)->bna);\
1385	bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
1386} while (0)
1387
1388static const struct bna_chip_regs_offset reg_offset[] =
1389{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
1390	HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
1391{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
1392	HOSTFN1_INT_MASK, HOST_MSIX_ERR_INDEX_FN1},
1393{HOST_PAGE_NUM_FN2, HOSTFN2_INT_STATUS,
1394	HOSTFN2_INT_MASK, HOST_MSIX_ERR_INDEX_FN2},
1395{HOST_PAGE_NUM_FN3, HOSTFN3_INT_STATUS,
1396	HOSTFN3_INT_MASK, HOST_MSIX_ERR_INDEX_FN3},
1397};
1398
1399enum bna_device_event {
1400	DEVICE_E_ENABLE			= 1,
1401	DEVICE_E_DISABLE		= 2,
1402	DEVICE_E_IOC_READY		= 3,
1403	DEVICE_E_IOC_FAILED		= 4,
1404	DEVICE_E_IOC_DISABLED		= 5,
1405	DEVICE_E_IOC_RESET		= 6,
1406	DEVICE_E_PORT_STOPPED		= 7,
1407};
1408
1409enum bna_device_state {
1410	BNA_DEVICE_STOPPED		= 1,
1411	BNA_DEVICE_IOC_READY_WAIT 	= 2,
1412	BNA_DEVICE_READY		= 3,
1413	BNA_DEVICE_PORT_STOP_WAIT 	= 4,
1414	BNA_DEVICE_IOC_DISABLE_WAIT 	= 5,
1415	BNA_DEVICE_FAILED		= 6
1416};
1417
1418bfa_fsm_state_decl(bna_device, stopped, struct bna_device,
1419			enum bna_device_event);
1420bfa_fsm_state_decl(bna_device, ioc_ready_wait, struct bna_device,
1421			enum bna_device_event);
1422bfa_fsm_state_decl(bna_device, ready, struct bna_device,
1423			enum bna_device_event);
1424bfa_fsm_state_decl(bna_device, port_stop_wait, struct bna_device,
1425			enum bna_device_event);
1426bfa_fsm_state_decl(bna_device, ioc_disable_wait, struct bna_device,
1427			enum bna_device_event);
1428bfa_fsm_state_decl(bna_device, failed, struct bna_device,
1429			enum bna_device_event);
1430
1431static struct bfa_sm_table device_sm_table[] = {
1432	{BFA_SM(bna_device_sm_stopped), BNA_DEVICE_STOPPED},
1433	{BFA_SM(bna_device_sm_ioc_ready_wait), BNA_DEVICE_IOC_READY_WAIT},
1434	{BFA_SM(bna_device_sm_ready), BNA_DEVICE_READY},
1435	{BFA_SM(bna_device_sm_port_stop_wait), BNA_DEVICE_PORT_STOP_WAIT},
1436	{BFA_SM(bna_device_sm_ioc_disable_wait), BNA_DEVICE_IOC_DISABLE_WAIT},
1437	{BFA_SM(bna_device_sm_failed), BNA_DEVICE_FAILED},
1438};
1439
1440static void
1441bna_device_sm_stopped_entry(struct bna_device *device)
1442{
1443	if (device->stop_cbfn)
1444		device->stop_cbfn(device->stop_cbarg, BNA_CB_SUCCESS);
1445
1446	device->stop_cbfn = NULL;
1447	device->stop_cbarg = NULL;
1448}
1449
1450static void
1451bna_device_sm_stopped(struct bna_device *device,
1452			enum bna_device_event event)
1453{
1454	switch (event) {
1455	case DEVICE_E_ENABLE:
1456		if (device->intr_type == BNA_INTR_T_MSIX)
1457			bna_mbox_msix_idx_set(device);
1458		bfa_nw_ioc_enable(&device->ioc);
1459		bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
1460		break;
1461
1462	case DEVICE_E_DISABLE:
1463		bfa_fsm_set_state(device, bna_device_sm_stopped);
1464		break;
1465
1466	case DEVICE_E_IOC_RESET:
1467		enable_mbox_intr(device);
1468		break;
1469
1470	case DEVICE_E_IOC_FAILED:
1471		bfa_fsm_set_state(device, bna_device_sm_failed);
1472		break;
1473
1474	default:
1475		bfa_sm_fault(device->bna, event);
1476	}
1477}
1478
1479static void
1480bna_device_sm_ioc_ready_wait_entry(struct bna_device *device)
1481{
1482	/**
1483	 * Do not call bfa_ioc_enable() here. It must be called in the
1484	 * previous state due to failed -> ioc_ready_wait transition.
1485	 */
1486}
1487
1488static void
1489bna_device_sm_ioc_ready_wait(struct bna_device *device,
1490				enum bna_device_event event)
1491{
1492	switch (event) {
1493	case DEVICE_E_DISABLE:
1494		if (device->ready_cbfn)
1495			device->ready_cbfn(device->ready_cbarg,
1496						BNA_CB_INTERRUPT);
1497		device->ready_cbfn = NULL;
1498		device->ready_cbarg = NULL;
1499		bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1500		break;
1501
1502	case DEVICE_E_IOC_READY:
1503		bfa_fsm_set_state(device, bna_device_sm_ready);
1504		break;
1505
1506	case DEVICE_E_IOC_FAILED:
1507		bfa_fsm_set_state(device, bna_device_sm_failed);
1508		break;
1509
1510	case DEVICE_E_IOC_RESET:
1511		enable_mbox_intr(device);
1512		break;
1513
1514	default:
1515		bfa_sm_fault(device->bna, event);
1516	}
1517}
1518
1519static void
1520bna_device_sm_ready_entry(struct bna_device *device)
1521{
1522	bna_mbox_mod_start(&device->bna->mbox_mod);
1523	bna_port_start(&device->bna->port);
1524
1525	if (device->ready_cbfn)
1526		device->ready_cbfn(device->ready_cbarg,
1527					BNA_CB_SUCCESS);
1528	device->ready_cbfn = NULL;
1529	device->ready_cbarg = NULL;
1530}
1531
1532static void
1533bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
1534{
1535	switch (event) {
1536	case DEVICE_E_DISABLE:
1537		bfa_fsm_set_state(device, bna_device_sm_port_stop_wait);
1538		break;
1539
1540	case DEVICE_E_IOC_FAILED:
1541		bfa_fsm_set_state(device, bna_device_sm_failed);
1542		break;
1543
1544	default:
1545		bfa_sm_fault(device->bna, event);
1546	}
1547}
1548
1549static void
1550bna_device_sm_port_stop_wait_entry(struct bna_device *device)
1551{
1552	bna_port_stop(&device->bna->port);
1553}
1554
1555static void
1556bna_device_sm_port_stop_wait(struct bna_device *device,
1557				enum bna_device_event event)
1558{
1559	switch (event) {
1560	case DEVICE_E_PORT_STOPPED:
1561		bna_mbox_mod_stop(&device->bna->mbox_mod);
1562		bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1563		break;
1564
1565	case DEVICE_E_IOC_FAILED:
1566		disable_mbox_intr(device);
1567		bna_port_fail(&device->bna->port);
1568		break;
1569
1570	default:
1571		bfa_sm_fault(device->bna, event);
1572	}
1573}
1574
1575static void
1576bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
1577{
1578	bfa_nw_ioc_disable(&device->ioc);
1579}
1580
1581static void
1582bna_device_sm_ioc_disable_wait(struct bna_device *device,
1583				enum bna_device_event event)
1584{
1585	switch (event) {
1586	case DEVICE_E_IOC_DISABLED:
1587		disable_mbox_intr(device);
1588		bfa_fsm_set_state(device, bna_device_sm_stopped);
1589		break;
1590
1591	default:
1592		bfa_sm_fault(device->bna, event);
1593	}
1594}
1595
1596static void
1597bna_device_sm_failed_entry(struct bna_device *device)
1598{
1599	disable_mbox_intr(device);
1600	bna_port_fail(&device->bna->port);
1601	bna_mbox_mod_stop(&device->bna->mbox_mod);
1602
1603	if (device->ready_cbfn)
1604		device->ready_cbfn(device->ready_cbarg,
1605					BNA_CB_FAIL);
1606	device->ready_cbfn = NULL;
1607	device->ready_cbarg = NULL;
1608}
1609
1610static void
1611bna_device_sm_failed(struct bna_device *device,
1612			enum bna_device_event event)
1613{
1614	switch (event) {
1615	case DEVICE_E_DISABLE:
1616		bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1617		break;
1618
1619	case DEVICE_E_IOC_RESET:
1620		enable_mbox_intr(device);
1621		bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
1622		break;
1623
1624	default:
1625		bfa_sm_fault(device->bna, event);
1626	}
1627}
1628
1629/* IOC callback functions */
1630
1631static void
1632bna_device_cb_iocll_ready(void *dev, enum bfa_status error)
1633{
1634	struct bna_device *device = (struct bna_device *)dev;
1635
1636	if (error)
1637		bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
1638	else
1639		bfa_fsm_send_event(device, DEVICE_E_IOC_READY);
1640}
1641
1642static void
1643bna_device_cb_iocll_disabled(void *dev)
1644{
1645	struct bna_device *device = (struct bna_device *)dev;
1646
1647	bfa_fsm_send_event(device, DEVICE_E_IOC_DISABLED);
1648}
1649
1650static void
1651bna_device_cb_iocll_failed(void *dev)
1652{
1653	struct bna_device *device = (struct bna_device *)dev;
1654
1655	bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
1656}
1657
1658static void
1659bna_device_cb_iocll_reset(void *dev)
1660{
1661	struct bna_device *device = (struct bna_device *)dev;
1662
1663	bfa_fsm_send_event(device, DEVICE_E_IOC_RESET);
1664}
1665
1666static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
1667	bna_device_cb_iocll_ready,
1668	bna_device_cb_iocll_disabled,
1669	bna_device_cb_iocll_failed,
1670	bna_device_cb_iocll_reset
1671};
1672
1673/* device */
1674static void
1675bna_adv_device_init(struct bna_device *device, struct bna *bna,
1676		struct bna_res_info *res_info)
1677{
1678	u8 *kva;
1679	u64 dma;
1680
1681	device->bna = bna;
1682
1683	kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1684
1685	/**
1686	 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1687	 * DMA memory.
1688	 */
1689	BNA_GET_DMA_ADDR(
1690		&res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1691	kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1692
1693	bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
1694	bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1695	kva += bfa_nw_cee_meminfo();
1696	dma += bfa_nw_cee_meminfo();
1697
1698}
1699
1700static void
1701bna_device_init(struct bna_device *device, struct bna *bna,
1702		struct bna_res_info *res_info)
1703{
1704	u64 dma;
1705
1706	device->bna = bna;
1707
1708	/**
1709	 * Attach IOC and claim:
1710	 *	1. DMA memory for IOC attributes
1711	 *	2. Kernel memory for FW trace
1712	 */
1713	bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
1714	bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
1715
1716	BNA_GET_DMA_ADDR(
1717		&res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1718	bfa_nw_ioc_mem_claim(&device->ioc,
1719		res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
1720			  dma);
1721
1722	bna_adv_device_init(device, bna, res_info);
1723	/*
1724	 * Initialize mbox_mod only after IOC, so that mbox handler
1725	 * registration goes through
1726	 */
1727	device->intr_type =
1728		res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type;
1729	device->vector =
1730		res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.idl[0].vector;
1731	bna_mbox_mod_init(&bna->mbox_mod, bna);
1732
1733	device->ready_cbfn = device->stop_cbfn = NULL;
1734	device->ready_cbarg = device->stop_cbarg = NULL;
1735
1736	bfa_fsm_set_state(device, bna_device_sm_stopped);
1737}
1738
1739static void
1740bna_device_uninit(struct bna_device *device)
1741{
1742	bna_mbox_mod_uninit(&device->bna->mbox_mod);
1743
1744	bfa_nw_ioc_detach(&device->ioc);
1745
1746	device->bna = NULL;
1747}
1748
1749static void
1750bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
1751{
1752	struct bna_device *device = (struct bna_device *)arg;
1753
1754	bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
1755}
1756
1757static int
1758bna_device_status_get(struct bna_device *device)
1759{
1760	return device->fsm == (bfa_fsm_t)bna_device_sm_ready;
1761}
1762
1763void
1764bna_device_enable(struct bna_device *device)
1765{
1766	if (device->fsm != (bfa_fsm_t)bna_device_sm_stopped) {
1767		bnad_cb_device_enabled(device->bna->bnad, BNA_CB_BUSY);
1768		return;
1769	}
1770
1771	device->ready_cbfn = bnad_cb_device_enabled;
1772	device->ready_cbarg = device->bna->bnad;
1773
1774	bfa_fsm_send_event(device, DEVICE_E_ENABLE);
1775}
1776
1777void
1778bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
1779{
1780	if (type == BNA_SOFT_CLEANUP) {
1781		bnad_cb_device_disabled(device->bna->bnad, BNA_CB_SUCCESS);
1782		return;
1783	}
1784
1785	device->stop_cbfn = bnad_cb_device_disabled;
1786	device->stop_cbarg = device->bna->bnad;
1787
1788	bfa_fsm_send_event(device, DEVICE_E_DISABLE);
1789}
1790
1791static int
1792bna_device_state_get(struct bna_device *device)
1793{
1794	return bfa_sm_to_state(device_sm_table, device->fsm);
1795}
1796
1797const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1798	{12, 12},
1799	{6, 10},
1800	{5, 10},
1801	{4, 8},
1802	{3, 6},
1803	{3, 6},
1804	{2, 4},
1805	{1, 2},
1806};
1807
1808/* utils */
1809
1810static void
1811bna_adv_res_req(struct bna_res_info *res_info)
1812{
1813	/* DMA memory for COMMON_MODULE */
1814	res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1815	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1816	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1817	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1818				bfa_nw_cee_meminfo(), PAGE_SIZE);
1819
1820	/* Virtual memory for retreiving fw_trc */
1821	res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1822	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1823	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
1824	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
1825
1826	/* DMA memory for retreiving stats */
1827	res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1828	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1829	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1830	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1831				ALIGN(BFI_HW_STATS_SIZE, PAGE_SIZE);
1832
1833	/* Virtual memory for soft stats */
1834	res_info[BNA_RES_MEM_T_SWSTATS].res_type = BNA_RES_T_MEM;
1835	res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1836	res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.num = 1;
1837	res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.len =
1838				sizeof(struct bna_sw_stats);
1839}
1840
1841static void
1842bna_sw_stats_get(struct bna *bna, struct bna_sw_stats *sw_stats)
1843{
1844	struct bna_tx *tx;
1845	struct bna_txq *txq;
1846	struct bna_rx *rx;
1847	struct bna_rxp *rxp;
1848	struct list_head *qe;
1849	struct list_head *txq_qe;
1850	struct list_head *rxp_qe;
1851	struct list_head *mac_qe;
1852	int i;
1853
1854	sw_stats->device_state = bna_device_state_get(&bna->device);
1855	sw_stats->port_state = bna_port_state_get(&bna->port);
1856	sw_stats->port_flags = bna->port.flags;
1857	sw_stats->llport_state = bna_llport_state_get(&bna->port.llport);
1858	sw_stats->priority = bna->port.priority;
1859
1860	i = 0;
1861	list_for_each(qe, &bna->tx_mod.tx_active_q) {
1862		tx = (struct bna_tx *)qe;
1863		sw_stats->tx_stats[i].tx_state = bna_tx_state_get(tx);
1864		sw_stats->tx_stats[i].tx_flags = tx->flags;
1865
1866		sw_stats->tx_stats[i].num_txqs = 0;
1867		sw_stats->tx_stats[i].txq_bmap[0] = 0;
1868		sw_stats->tx_stats[i].txq_bmap[1] = 0;
1869		list_for_each(txq_qe, &tx->txq_q) {
1870			txq = (struct bna_txq *)txq_qe;
1871			if (txq->txq_id < 32)
1872				sw_stats->tx_stats[i].txq_bmap[0] |=
1873						((u32)1 << txq->txq_id);
1874			else
1875				sw_stats->tx_stats[i].txq_bmap[1] |=
1876						((u32)
1877						 1 << (txq->txq_id - 32));
1878			sw_stats->tx_stats[i].num_txqs++;
1879		}
1880
1881		sw_stats->tx_stats[i].txf_id = tx->txf.txf_id;
1882
1883		i++;
1884	}
1885	sw_stats->num_active_tx = i;
1886
1887	i = 0;
1888	list_for_each(qe, &bna->rx_mod.rx_active_q) {
1889		rx = (struct bna_rx *)qe;
1890		sw_stats->rx_stats[i].rx_state = bna_rx_state_get(rx);
1891		sw_stats->rx_stats[i].rx_flags = rx->rx_flags;
1892
1893		sw_stats->rx_stats[i].num_rxps = 0;
1894		sw_stats->rx_stats[i].num_rxqs = 0;
1895		sw_stats->rx_stats[i].rxq_bmap[0] = 0;
1896		sw_stats->rx_stats[i].rxq_bmap[1] = 0;
1897		sw_stats->rx_stats[i].cq_bmap[0] = 0;
1898		sw_stats->rx_stats[i].cq_bmap[1] = 0;
1899		list_for_each(rxp_qe, &rx->rxp_q) {
1900			rxp = (struct bna_rxp *)rxp_qe;
1901
1902			sw_stats->rx_stats[i].num_rxqs += 1;
1903
1904			if (rxp->type == BNA_RXP_SINGLE) {
1905				if (rxp->rxq.single.only->rxq_id < 32) {
1906					sw_stats->rx_stats[i].rxq_bmap[0] |=
1907					((u32)1 <<
1908					rxp->rxq.single.only->rxq_id);
1909				} else {
1910					sw_stats->rx_stats[i].rxq_bmap[1] |=
1911					((u32)1 <<
1912					(rxp->rxq.single.only->rxq_id - 32));
1913				}
1914			} else {
1915				if (rxp->rxq.slr.large->rxq_id < 32) {
1916					sw_stats->rx_stats[i].rxq_bmap[0] |=
1917					((u32)1 <<
1918					rxp->rxq.slr.large->rxq_id);
1919				} else {
1920					sw_stats->rx_stats[i].rxq_bmap[1] |=
1921					((u32)1 <<
1922					(rxp->rxq.slr.large->rxq_id - 32));
1923				}
1924
1925				if (rxp->rxq.slr.small->rxq_id < 32) {
1926					sw_stats->rx_stats[i].rxq_bmap[0] |=
1927					((u32)1 <<
1928					rxp->rxq.slr.small->rxq_id);
1929				} else {
1930					sw_stats->rx_stats[i].rxq_bmap[1] |=
1931				((u32)1 <<
1932				 (rxp->rxq.slr.small->rxq_id - 32));
1933				}
1934				sw_stats->rx_stats[i].num_rxqs += 1;
1935			}
1936
1937			if (rxp->cq.cq_id < 32)
1938				sw_stats->rx_stats[i].cq_bmap[0] |=
1939					(1 << rxp->cq.cq_id);
1940			else
1941				sw_stats->rx_stats[i].cq_bmap[1] |=
1942					(1 << (rxp->cq.cq_id - 32));
1943
1944			sw_stats->rx_stats[i].num_rxps++;
1945		}
1946
1947		sw_stats->rx_stats[i].rxf_id = rx->rxf.rxf_id;
1948		sw_stats->rx_stats[i].rxf_state = bna_rxf_state_get(&rx->rxf);
1949		sw_stats->rx_stats[i].rxf_oper_state = rx->rxf.rxf_oper_state;
1950
1951		sw_stats->rx_stats[i].num_active_ucast = 0;
1952		if (rx->rxf.ucast_active_mac)
1953			sw_stats->rx_stats[i].num_active_ucast++;
1954		list_for_each(mac_qe, &rx->rxf.ucast_active_q)
1955			sw_stats->rx_stats[i].num_active_ucast++;
1956
1957		sw_stats->rx_stats[i].num_active_mcast = 0;
1958		list_for_each(mac_qe, &rx->rxf.mcast_active_q)
1959			sw_stats->rx_stats[i].num_active_mcast++;
1960
1961		sw_stats->rx_stats[i].rxmode_active = rx->rxf.rxmode_active;
1962		sw_stats->rx_stats[i].vlan_filter_status =
1963						rx->rxf.vlan_filter_status;
1964		memcpy(sw_stats->rx_stats[i].vlan_filter_table,
1965				rx->rxf.vlan_filter_table,
1966				sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32));
1967
1968		sw_stats->rx_stats[i].rss_status = rx->rxf.rss_status;
1969		sw_stats->rx_stats[i].hds_status = rx->rxf.hds_status;
1970
1971		i++;
1972	}
1973	sw_stats->num_active_rx = i;
1974}
1975
1976static void
1977bna_fw_cb_stats_get(void *arg, int status)
1978{
1979	struct bna *bna = (struct bna *)arg;
1980	u64 *p_stats;
1981	int i, count;
1982	int rxf_count, txf_count;
1983	u64 rxf_bmap, txf_bmap;
1984
1985	bfa_q_qe_init(&bna->mbox_qe.qe);
1986
1987	if (status == 0) {
1988		p_stats = (u64 *)bna->stats.hw_stats;
1989		count = sizeof(struct bfi_ll_stats) / sizeof(u64);
1990		for (i = 0; i < count; i++)
1991			p_stats[i] = cpu_to_be64(p_stats[i]);
1992
1993		rxf_count = 0;
1994		rxf_bmap = (u64)bna->stats.rxf_bmap[0] |
1995			((u64)bna->stats.rxf_bmap[1] << 32);
1996		for (i = 0; i < BFI_LL_RXF_ID_MAX; i++)
1997			if (rxf_bmap & ((u64)1 << i))
1998				rxf_count++;
1999
2000		txf_count = 0;
2001		txf_bmap = (u64)bna->stats.txf_bmap[0] |
2002			((u64)bna->stats.txf_bmap[1] << 32);
2003		for (i = 0; i < BFI_LL_TXF_ID_MAX; i++)
2004			if (txf_bmap & ((u64)1 << i))
2005				txf_count++;
2006
2007		p_stats = (u64 *)&bna->stats.hw_stats->rxf_stats[0] +
2008				((rxf_count * sizeof(struct bfi_ll_stats_rxf) +
2009				txf_count * sizeof(struct bfi_ll_stats_txf))/
2010				sizeof(u64));
2011
2012		/* Populate the TXF stats from the firmware DMAed copy */
2013		for (i = (BFI_LL_TXF_ID_MAX - 1); i >= 0; i--)
2014			if (txf_bmap & ((u64)1 << i)) {
2015				p_stats -= sizeof(struct bfi_ll_stats_txf)/
2016						sizeof(u64);
2017				memcpy(&bna->stats.hw_stats->txf_stats[i],
2018					p_stats,
2019					sizeof(struct bfi_ll_stats_txf));
2020			}
2021
2022		/* Populate the RXF stats from the firmware DMAed copy */
2023		for (i = (BFI_LL_RXF_ID_MAX - 1); i >= 0; i--)
2024			if (rxf_bmap & ((u64)1 << i)) {
2025				p_stats -= sizeof(struct bfi_ll_stats_rxf)/
2026						sizeof(u64);
2027				memcpy(&bna->stats.hw_stats->rxf_stats[i],
2028					p_stats,
2029					sizeof(struct bfi_ll_stats_rxf));
2030			}
2031
2032		bna_sw_stats_get(bna, bna->stats.sw_stats);
2033		bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
2034	} else
2035		bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2036}
2037
2038static void
2039bna_fw_stats_get(struct bna *bna)
2040{
2041	struct bfi_ll_stats_req ll_req;
2042
2043	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_GET_REQ, 0);
2044	ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
2045
2046	ll_req.rxf_id_mask[0] = htonl(bna->rx_mod.rxf_bmap[0]);
2047	ll_req.rxf_id_mask[1] =	htonl(bna->rx_mod.rxf_bmap[1]);
2048	ll_req.txf_id_mask[0] =	htonl(bna->tx_mod.txf_bmap[0]);
2049	ll_req.txf_id_mask[1] =	htonl(bna->tx_mod.txf_bmap[1]);
2050
2051	ll_req.host_buffer.a32.addr_hi = bna->hw_stats_dma.msb;
2052	ll_req.host_buffer.a32.addr_lo = bna->hw_stats_dma.lsb;
2053
2054	bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
2055				bna_fw_cb_stats_get, bna);
2056	bna_mbox_send(bna, &bna->mbox_qe);
2057
2058	bna->stats.rxf_bmap[0] = bna->rx_mod.rxf_bmap[0];
2059	bna->stats.rxf_bmap[1] = bna->rx_mod.rxf_bmap[1];
2060	bna->stats.txf_bmap[0] = bna->tx_mod.txf_bmap[0];
2061	bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
2062}
2063
2064void
2065bna_stats_get(struct bna *bna)
2066{
2067	if (bna_device_status_get(&bna->device))
2068		bna_fw_stats_get(bna);
2069	else
2070		bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2071}
2072
2073/* IB */
2074static void
2075bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
2076{
2077	ib->ib_config.coalescing_timeo = coalescing_timeo;
2078
2079	if (ib->start_count)
2080		ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
2081				(u32)ib->ib_config.coalescing_timeo, 0);
2082}
2083
2084/* RxF */
2085void
2086bna_rxf_adv_init(struct bna_rxf *rxf,
2087		struct bna_rx *rx,
2088		struct bna_rx_config *q_config)
2089{
2090	switch (q_config->rxp_type) {
2091	case BNA_RXP_SINGLE:
2092		/* No-op */
2093		break;
2094	case BNA_RXP_SLR:
2095		rxf->ctrl_flags |= BNA_RXF_CF_SM_LG_RXQ;
2096		break;
2097	case BNA_RXP_HDS:
2098		rxf->hds_cfg.hdr_type = q_config->hds_config.hdr_type;
2099		rxf->hds_cfg.header_size =
2100				q_config->hds_config.header_size;
2101		rxf->forced_offset = 0;
2102		break;
2103	default:
2104		break;
2105	}
2106
2107	if (q_config->rss_status == BNA_STATUS_T_ENABLED) {
2108		rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
2109		rxf->rss_cfg.hash

Large files files are truncated, but you can click here to view the full file