PageRenderTime 84ms CodeModel.GetById 15ms app.highlight 64ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/ethernet/brocade/bna/bfa_msgq.c

http://github.com/mirrors/linux
C | 660 lines | 538 code | 108 blank | 14 comment | 33 complexity | d77846ad679dd3c924c1091380a0c27d MD5 | raw file
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Linux network driver for QLogic BR-series Converged Network Adapter.
  4 */
  5/*
  6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  7 * Copyright (c) 2014-2015 QLogic Corporation
  8 * All rights reserved
  9 * www.qlogic.com
 10 */
 11
 12/* MSGQ module source file. */
 13
 14#include "bfi.h"
 15#include "bfa_msgq.h"
 16#include "bfa_ioc.h"
 17
 18#define call_cmdq_ent_cbfn(_cmdq_ent, _status)				\
 19{									\
 20	bfa_msgq_cmdcbfn_t cbfn;					\
 21	void *cbarg;							\
 22	cbfn = (_cmdq_ent)->cbfn;					\
 23	cbarg = (_cmdq_ent)->cbarg;					\
 24	(_cmdq_ent)->cbfn = NULL;					\
 25	(_cmdq_ent)->cbarg = NULL;					\
 26	if (cbfn) {							\
 27		cbfn(cbarg, (_status));					\
 28	}								\
 29}
 30
 31static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
 32static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
 33
 34enum cmdq_event {
 35	CMDQ_E_START			= 1,
 36	CMDQ_E_STOP			= 2,
 37	CMDQ_E_FAIL			= 3,
 38	CMDQ_E_POST			= 4,
 39	CMDQ_E_INIT_RESP		= 5,
 40	CMDQ_E_DB_READY			= 6,
 41};
 42
 43bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
 44bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
 45bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
 46bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
 47			enum cmdq_event);
 48
 49static void
 50cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
 51{
 52	struct bfa_msgq_cmd_entry *cmdq_ent;
 53
 54	cmdq->producer_index = 0;
 55	cmdq->consumer_index = 0;
 56	cmdq->flags = 0;
 57	cmdq->token = 0;
 58	cmdq->offset = 0;
 59	cmdq->bytes_to_copy = 0;
 60	while (!list_empty(&cmdq->pending_q)) {
 61		cmdq_ent = list_first_entry(&cmdq->pending_q,
 62					    struct bfa_msgq_cmd_entry, qe);
 63		list_del(&cmdq_ent->qe);
 64		call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
 65	}
 66}
 67
 68static void
 69cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
 70{
 71	switch (event) {
 72	case CMDQ_E_START:
 73		bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
 74		break;
 75
 76	case CMDQ_E_STOP:
 77	case CMDQ_E_FAIL:
 78		/* No-op */
 79		break;
 80
 81	case CMDQ_E_POST:
 82		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
 83		break;
 84
 85	default:
 86		bfa_sm_fault(event);
 87	}
 88}
 89
 90static void
 91cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
 92{
 93	bfa_wc_down(&cmdq->msgq->init_wc);
 94}
 95
 96static void
 97cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
 98{
 99	switch (event) {
100	case CMDQ_E_STOP:
101	case CMDQ_E_FAIL:
102		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
103		break;
104
105	case CMDQ_E_POST:
106		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
107		break;
108
109	case CMDQ_E_INIT_RESP:
110		if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
111			cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
112			bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
113		} else
114			bfa_fsm_set_state(cmdq, cmdq_sm_ready);
115		break;
116
117	default:
118		bfa_sm_fault(event);
119	}
120}
121
122static void
123cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
124{
125}
126
127static void
128cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
129{
130	switch (event) {
131	case CMDQ_E_STOP:
132	case CMDQ_E_FAIL:
133		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
134		break;
135
136	case CMDQ_E_POST:
137		bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
138		break;
139
140	default:
141		bfa_sm_fault(event);
142	}
143}
144
145static void
146cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
147{
148	bfa_msgq_cmdq_dbell(cmdq);
149}
150
151static void
152cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
153{
154	switch (event) {
155	case CMDQ_E_STOP:
156	case CMDQ_E_FAIL:
157		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
158		break;
159
160	case CMDQ_E_POST:
161		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
162		break;
163
164	case CMDQ_E_DB_READY:
165		if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
166			cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
167			bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
168		} else
169			bfa_fsm_set_state(cmdq, cmdq_sm_ready);
170		break;
171
172	default:
173		bfa_sm_fault(event);
174	}
175}
176
177static void
178bfa_msgq_cmdq_dbell_ready(void *arg)
179{
180	struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
181	bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
182}
183
184static void
185bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
186{
187	struct bfi_msgq_h2i_db *dbell =
188		(struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
189
190	memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
191	bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
192	dbell->mh.mtag.i2htok = 0;
193	dbell->idx.cmdq_pi = htons(cmdq->producer_index);
194
195	if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
196				bfa_msgq_cmdq_dbell_ready, cmdq)) {
197		bfa_msgq_cmdq_dbell_ready(cmdq);
198	}
199}
200
201static void
202__cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
203{
204	size_t len = cmd->msg_size;
205	int num_entries = 0;
206	size_t to_copy;
207	u8 *src, *dst;
208
209	src = (u8 *)cmd->msg_hdr;
210	dst = (u8 *)cmdq->addr.kva;
211	dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
212
213	while (len) {
214		to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
215				len : BFI_MSGQ_CMD_ENTRY_SIZE;
216		memcpy(dst, src, to_copy);
217		len -= to_copy;
218		src += BFI_MSGQ_CMD_ENTRY_SIZE;
219		BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
220		dst = (u8 *)cmdq->addr.kva;
221		dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
222		num_entries++;
223	}
224
225}
226
227static void
228bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
229{
230	struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
231	struct bfa_msgq_cmd_entry *cmd;
232	int posted = 0;
233
234	cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
235
236	/* Walk through pending list to see if the command can be posted */
237	while (!list_empty(&cmdq->pending_q)) {
238		cmd = list_first_entry(&cmdq->pending_q,
239				       struct bfa_msgq_cmd_entry, qe);
240		if (ntohs(cmd->msg_hdr->num_entries) <=
241			BFA_MSGQ_FREE_CNT(cmdq)) {
242			list_del(&cmd->qe);
243			__cmd_copy(cmdq, cmd);
244			posted = 1;
245			call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
246		} else {
247			break;
248		}
249	}
250
251	if (posted)
252		bfa_fsm_send_event(cmdq, CMDQ_E_POST);
253}
254
255static void
256bfa_msgq_cmdq_copy_next(void *arg)
257{
258	struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
259
260	if (cmdq->bytes_to_copy)
261		bfa_msgq_cmdq_copy_rsp(cmdq);
262}
263
264static void
265bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
266{
267	struct bfi_msgq_i2h_cmdq_copy_req *req =
268		(struct bfi_msgq_i2h_cmdq_copy_req *)mb;
269
270	cmdq->token = 0;
271	cmdq->offset = ntohs(req->offset);
272	cmdq->bytes_to_copy = ntohs(req->len);
273	bfa_msgq_cmdq_copy_rsp(cmdq);
274}
275
276static void
277bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
278{
279	struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
280		(struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
281	int copied;
282	u8 *addr = (u8 *)cmdq->addr.kva;
283
284	memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
285	bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
286	rsp->mh.mtag.i2htok = htons(cmdq->token);
287	copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
288		cmdq->bytes_to_copy;
289	addr += cmdq->offset;
290	memcpy(rsp->data, addr, copied);
291
292	cmdq->token++;
293	cmdq->offset += copied;
294	cmdq->bytes_to_copy -= copied;
295
296	if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
297				bfa_msgq_cmdq_copy_next, cmdq)) {
298		bfa_msgq_cmdq_copy_next(cmdq);
299	}
300}
301
302static void
303bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
304{
305	cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
306	INIT_LIST_HEAD(&cmdq->pending_q);
307	cmdq->msgq = msgq;
308	bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
309}
310
311static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
312
313enum rspq_event {
314	RSPQ_E_START			= 1,
315	RSPQ_E_STOP			= 2,
316	RSPQ_E_FAIL			= 3,
317	RSPQ_E_RESP			= 4,
318	RSPQ_E_INIT_RESP		= 5,
319	RSPQ_E_DB_READY			= 6,
320};
321
322bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
323bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
324			enum rspq_event);
325bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
326bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
327			enum rspq_event);
328
329static void
330rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
331{
332	rspq->producer_index = 0;
333	rspq->consumer_index = 0;
334	rspq->flags = 0;
335}
336
337static void
338rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
339{
340	switch (event) {
341	case RSPQ_E_START:
342		bfa_fsm_set_state(rspq, rspq_sm_init_wait);
343		break;
344
345	case RSPQ_E_STOP:
346	case RSPQ_E_FAIL:
347		/* No-op */
348		break;
349
350	default:
351		bfa_sm_fault(event);
352	}
353}
354
355static void
356rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
357{
358	bfa_wc_down(&rspq->msgq->init_wc);
359}
360
361static void
362rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
363{
364	switch (event) {
365	case RSPQ_E_FAIL:
366	case RSPQ_E_STOP:
367		bfa_fsm_set_state(rspq, rspq_sm_stopped);
368		break;
369
370	case RSPQ_E_INIT_RESP:
371		bfa_fsm_set_state(rspq, rspq_sm_ready);
372		break;
373
374	default:
375		bfa_sm_fault(event);
376	}
377}
378
379static void
380rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
381{
382}
383
384static void
385rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
386{
387	switch (event) {
388	case RSPQ_E_STOP:
389	case RSPQ_E_FAIL:
390		bfa_fsm_set_state(rspq, rspq_sm_stopped);
391		break;
392
393	case RSPQ_E_RESP:
394		bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
395		break;
396
397	default:
398		bfa_sm_fault(event);
399	}
400}
401
402static void
403rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
404{
405	if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
406		bfa_msgq_rspq_dbell(rspq);
407}
408
409static void
410rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
411{
412	switch (event) {
413	case RSPQ_E_STOP:
414	case RSPQ_E_FAIL:
415		bfa_fsm_set_state(rspq, rspq_sm_stopped);
416		break;
417
418	case RSPQ_E_RESP:
419		rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
420		break;
421
422	case RSPQ_E_DB_READY:
423		if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
424			rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
425			bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
426		} else
427			bfa_fsm_set_state(rspq, rspq_sm_ready);
428		break;
429
430	default:
431		bfa_sm_fault(event);
432	}
433}
434
435static void
436bfa_msgq_rspq_dbell_ready(void *arg)
437{
438	struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
439	bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
440}
441
442static void
443bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
444{
445	struct bfi_msgq_h2i_db *dbell =
446		(struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
447
448	memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
449	bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
450	dbell->mh.mtag.i2htok = 0;
451	dbell->idx.rspq_ci = htons(rspq->consumer_index);
452
453	if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
454				bfa_msgq_rspq_dbell_ready, rspq)) {
455		bfa_msgq_rspq_dbell_ready(rspq);
456	}
457}
458
459static void
460bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
461{
462	struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
463	struct bfi_msgq_mhdr *msghdr;
464	int num_entries;
465	int mc;
466	u8 *rspq_qe;
467
468	rspq->producer_index = ntohs(dbell->idx.rspq_pi);
469
470	while (rspq->consumer_index != rspq->producer_index) {
471		rspq_qe = (u8 *)rspq->addr.kva;
472		rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
473		msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
474
475		mc = msghdr->msg_class;
476		num_entries = ntohs(msghdr->num_entries);
477
478		if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
479			break;
480
481		(rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
482
483		BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
484				rspq->depth);
485	}
486
487	bfa_fsm_send_event(rspq, RSPQ_E_RESP);
488}
489
490static void
491bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
492{
493	rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
494	rspq->msgq = msgq;
495	bfa_fsm_set_state(rspq, rspq_sm_stopped);
496}
497
498static void
499bfa_msgq_init_rsp(struct bfa_msgq *msgq,
500		 struct bfi_mbmsg *mb)
501{
502	bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
503	bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
504}
505
506static void
507bfa_msgq_init(void *arg)
508{
509	struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
510	struct bfi_msgq_cfg_req *msgq_cfg =
511		(struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
512
513	memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
514	bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
515	msgq_cfg->mh.mtag.i2htok = 0;
516
517	bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
518	msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
519	bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
520	msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
521
522	bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
523}
524
525static void
526bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
527{
528	struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
529
530	switch (msg->mh.msg_id) {
531	case BFI_MSGQ_I2H_INIT_RSP:
532		bfa_msgq_init_rsp(msgq, msg);
533		break;
534
535	case BFI_MSGQ_I2H_DOORBELL_PI:
536		bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
537		break;
538
539	case BFI_MSGQ_I2H_DOORBELL_CI:
540		bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
541		break;
542
543	case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
544		bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
545		break;
546
547	default:
548		BUG_ON(1);
549	}
550}
551
552static void
553bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
554{
555	struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
556
557	switch (event) {
558	case BFA_IOC_E_ENABLED:
559		bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
560		bfa_wc_up(&msgq->init_wc);
561		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
562		bfa_wc_up(&msgq->init_wc);
563		bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
564		bfa_wc_wait(&msgq->init_wc);
565		break;
566
567	case BFA_IOC_E_DISABLED:
568		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
569		bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
570		break;
571
572	case BFA_IOC_E_FAILED:
573		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
574		bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
575		break;
576
577	default:
578		break;
579	}
580}
581
582u32
583bfa_msgq_meminfo(void)
584{
585	return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
586		roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
587}
588
589void
590bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
591{
592	msgq->cmdq.addr.kva = kva;
593	msgq->cmdq.addr.pa  = pa;
594
595	kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
596	pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
597
598	msgq->rspq.addr.kva = kva;
599	msgq->rspq.addr.pa = pa;
600}
601
602void
603bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
604{
605	msgq->ioc    = ioc;
606
607	bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
608	bfa_msgq_rspq_attach(&msgq->rspq, msgq);
609
610	bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
611	bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
612	bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
613}
614
615void
616bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
617		bfa_msgq_mcfunc_t cbfn, void *cbarg)
618{
619	msgq->rspq.rsphdlr[mc].cbfn	= cbfn;
620	msgq->rspq.rsphdlr[mc].cbarg	= cbarg;
621}
622
623void
624bfa_msgq_cmd_post(struct bfa_msgq *msgq,  struct bfa_msgq_cmd_entry *cmd)
625{
626	if (ntohs(cmd->msg_hdr->num_entries) <=
627		BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
628		__cmd_copy(&msgq->cmdq, cmd);
629		call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
630		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
631	} else {
632		list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
633	}
634}
635
636void
637bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
638{
639	struct bfa_msgq_rspq *rspq = &msgq->rspq;
640	size_t len = buf_len;
641	size_t to_copy;
642	int ci;
643	u8 *src, *dst;
644
645	ci = rspq->consumer_index;
646	src = (u8 *)rspq->addr.kva;
647	src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
648	dst = buf;
649
650	while (len) {
651		to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
652				len : BFI_MSGQ_RSP_ENTRY_SIZE;
653		memcpy(dst, src, to_copy);
654		len -= to_copy;
655		dst += BFI_MSGQ_RSP_ENTRY_SIZE;
656		BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
657		src = (u8 *)rspq->addr.kva;
658		src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
659	}
660}