/drivers/net/ethernet/brocade/bna/bfa_msgq.c

http://github.com/mirrors/linux · C · 660 lines · 538 code · 108 blank · 14 comment · 33 complexity · d77846ad679dd3c924c1091380a0c27d MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Linux network driver for QLogic BR-series Converged Network Adapter.
  4. */
  5. /*
  6. * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  7. * Copyright (c) 2014-2015 QLogic Corporation
  8. * All rights reserved
  9. * www.qlogic.com
  10. */
  11. /* MSGQ module source file. */
  12. #include "bfi.h"
  13. #include "bfa_msgq.h"
  14. #include "bfa_ioc.h"
  15. #define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
  16. { \
  17. bfa_msgq_cmdcbfn_t cbfn; \
  18. void *cbarg; \
  19. cbfn = (_cmdq_ent)->cbfn; \
  20. cbarg = (_cmdq_ent)->cbarg; \
  21. (_cmdq_ent)->cbfn = NULL; \
  22. (_cmdq_ent)->cbarg = NULL; \
  23. if (cbfn) { \
  24. cbfn(cbarg, (_status)); \
  25. } \
  26. }
  27. static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
  28. static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
  29. enum cmdq_event {
  30. CMDQ_E_START = 1,
  31. CMDQ_E_STOP = 2,
  32. CMDQ_E_FAIL = 3,
  33. CMDQ_E_POST = 4,
  34. CMDQ_E_INIT_RESP = 5,
  35. CMDQ_E_DB_READY = 6,
  36. };
  37. bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
  38. bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
  39. bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
  40. bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
  41. enum cmdq_event);
  42. static void
  43. cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
  44. {
  45. struct bfa_msgq_cmd_entry *cmdq_ent;
  46. cmdq->producer_index = 0;
  47. cmdq->consumer_index = 0;
  48. cmdq->flags = 0;
  49. cmdq->token = 0;
  50. cmdq->offset = 0;
  51. cmdq->bytes_to_copy = 0;
  52. while (!list_empty(&cmdq->pending_q)) {
  53. cmdq_ent = list_first_entry(&cmdq->pending_q,
  54. struct bfa_msgq_cmd_entry, qe);
  55. list_del(&cmdq_ent->qe);
  56. call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
  57. }
  58. }
  59. static void
  60. cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
  61. {
  62. switch (event) {
  63. case CMDQ_E_START:
  64. bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
  65. break;
  66. case CMDQ_E_STOP:
  67. case CMDQ_E_FAIL:
  68. /* No-op */
  69. break;
  70. case CMDQ_E_POST:
  71. cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
  72. break;
  73. default:
  74. bfa_sm_fault(event);
  75. }
  76. }
  77. static void
  78. cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
  79. {
  80. bfa_wc_down(&cmdq->msgq->init_wc);
  81. }
  82. static void
  83. cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
  84. {
  85. switch (event) {
  86. case CMDQ_E_STOP:
  87. case CMDQ_E_FAIL:
  88. bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
  89. break;
  90. case CMDQ_E_POST:
  91. cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
  92. break;
  93. case CMDQ_E_INIT_RESP:
  94. if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
  95. cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
  96. bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
  97. } else
  98. bfa_fsm_set_state(cmdq, cmdq_sm_ready);
  99. break;
  100. default:
  101. bfa_sm_fault(event);
  102. }
  103. }
  104. static void
  105. cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
  106. {
  107. }
  108. static void
  109. cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
  110. {
  111. switch (event) {
  112. case CMDQ_E_STOP:
  113. case CMDQ_E_FAIL:
  114. bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
  115. break;
  116. case CMDQ_E_POST:
  117. bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
  118. break;
  119. default:
  120. bfa_sm_fault(event);
  121. }
  122. }
  123. static void
  124. cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
  125. {
  126. bfa_msgq_cmdq_dbell(cmdq);
  127. }
  128. static void
  129. cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
  130. {
  131. switch (event) {
  132. case CMDQ_E_STOP:
  133. case CMDQ_E_FAIL:
  134. bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
  135. break;
  136. case CMDQ_E_POST:
  137. cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
  138. break;
  139. case CMDQ_E_DB_READY:
  140. if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
  141. cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
  142. bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
  143. } else
  144. bfa_fsm_set_state(cmdq, cmdq_sm_ready);
  145. break;
  146. default:
  147. bfa_sm_fault(event);
  148. }
  149. }
  150. static void
  151. bfa_msgq_cmdq_dbell_ready(void *arg)
  152. {
  153. struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
  154. bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
  155. }
  156. static void
  157. bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
  158. {
  159. struct bfi_msgq_h2i_db *dbell =
  160. (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
  161. memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
  162. bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
  163. dbell->mh.mtag.i2htok = 0;
  164. dbell->idx.cmdq_pi = htons(cmdq->producer_index);
  165. if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
  166. bfa_msgq_cmdq_dbell_ready, cmdq)) {
  167. bfa_msgq_cmdq_dbell_ready(cmdq);
  168. }
  169. }
  170. static void
  171. __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
  172. {
  173. size_t len = cmd->msg_size;
  174. int num_entries = 0;
  175. size_t to_copy;
  176. u8 *src, *dst;
  177. src = (u8 *)cmd->msg_hdr;
  178. dst = (u8 *)cmdq->addr.kva;
  179. dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
  180. while (len) {
  181. to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
  182. len : BFI_MSGQ_CMD_ENTRY_SIZE;
  183. memcpy(dst, src, to_copy);
  184. len -= to_copy;
  185. src += BFI_MSGQ_CMD_ENTRY_SIZE;
  186. BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
  187. dst = (u8 *)cmdq->addr.kva;
  188. dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
  189. num_entries++;
  190. }
  191. }
  192. static void
  193. bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
  194. {
  195. struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
  196. struct bfa_msgq_cmd_entry *cmd;
  197. int posted = 0;
  198. cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
  199. /* Walk through pending list to see if the command can be posted */
  200. while (!list_empty(&cmdq->pending_q)) {
  201. cmd = list_first_entry(&cmdq->pending_q,
  202. struct bfa_msgq_cmd_entry, qe);
  203. if (ntohs(cmd->msg_hdr->num_entries) <=
  204. BFA_MSGQ_FREE_CNT(cmdq)) {
  205. list_del(&cmd->qe);
  206. __cmd_copy(cmdq, cmd);
  207. posted = 1;
  208. call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
  209. } else {
  210. break;
  211. }
  212. }
  213. if (posted)
  214. bfa_fsm_send_event(cmdq, CMDQ_E_POST);
  215. }
  216. static void
  217. bfa_msgq_cmdq_copy_next(void *arg)
  218. {
  219. struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
  220. if (cmdq->bytes_to_copy)
  221. bfa_msgq_cmdq_copy_rsp(cmdq);
  222. }
  223. static void
  224. bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
  225. {
  226. struct bfi_msgq_i2h_cmdq_copy_req *req =
  227. (struct bfi_msgq_i2h_cmdq_copy_req *)mb;
  228. cmdq->token = 0;
  229. cmdq->offset = ntohs(req->offset);
  230. cmdq->bytes_to_copy = ntohs(req->len);
  231. bfa_msgq_cmdq_copy_rsp(cmdq);
  232. }
  233. static void
  234. bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
  235. {
  236. struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
  237. (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
  238. int copied;
  239. u8 *addr = (u8 *)cmdq->addr.kva;
  240. memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
  241. bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
  242. rsp->mh.mtag.i2htok = htons(cmdq->token);
  243. copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
  244. cmdq->bytes_to_copy;
  245. addr += cmdq->offset;
  246. memcpy(rsp->data, addr, copied);
  247. cmdq->token++;
  248. cmdq->offset += copied;
  249. cmdq->bytes_to_copy -= copied;
  250. if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
  251. bfa_msgq_cmdq_copy_next, cmdq)) {
  252. bfa_msgq_cmdq_copy_next(cmdq);
  253. }
  254. }
  255. static void
  256. bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
  257. {
  258. cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
  259. INIT_LIST_HEAD(&cmdq->pending_q);
  260. cmdq->msgq = msgq;
  261. bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
  262. }
  263. static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
  264. enum rspq_event {
  265. RSPQ_E_START = 1,
  266. RSPQ_E_STOP = 2,
  267. RSPQ_E_FAIL = 3,
  268. RSPQ_E_RESP = 4,
  269. RSPQ_E_INIT_RESP = 5,
  270. RSPQ_E_DB_READY = 6,
  271. };
  272. bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
  273. bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
  274. enum rspq_event);
  275. bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
  276. bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
  277. enum rspq_event);
  278. static void
  279. rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
  280. {
  281. rspq->producer_index = 0;
  282. rspq->consumer_index = 0;
  283. rspq->flags = 0;
  284. }
  285. static void
  286. rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
  287. {
  288. switch (event) {
  289. case RSPQ_E_START:
  290. bfa_fsm_set_state(rspq, rspq_sm_init_wait);
  291. break;
  292. case RSPQ_E_STOP:
  293. case RSPQ_E_FAIL:
  294. /* No-op */
  295. break;
  296. default:
  297. bfa_sm_fault(event);
  298. }
  299. }
  300. static void
  301. rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
  302. {
  303. bfa_wc_down(&rspq->msgq->init_wc);
  304. }
  305. static void
  306. rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
  307. {
  308. switch (event) {
  309. case RSPQ_E_FAIL:
  310. case RSPQ_E_STOP:
  311. bfa_fsm_set_state(rspq, rspq_sm_stopped);
  312. break;
  313. case RSPQ_E_INIT_RESP:
  314. bfa_fsm_set_state(rspq, rspq_sm_ready);
  315. break;
  316. default:
  317. bfa_sm_fault(event);
  318. }
  319. }
  320. static void
  321. rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
  322. {
  323. }
  324. static void
  325. rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
  326. {
  327. switch (event) {
  328. case RSPQ_E_STOP:
  329. case RSPQ_E_FAIL:
  330. bfa_fsm_set_state(rspq, rspq_sm_stopped);
  331. break;
  332. case RSPQ_E_RESP:
  333. bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
  334. break;
  335. default:
  336. bfa_sm_fault(event);
  337. }
  338. }
  339. static void
  340. rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
  341. {
  342. if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
  343. bfa_msgq_rspq_dbell(rspq);
  344. }
  345. static void
  346. rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
  347. {
  348. switch (event) {
  349. case RSPQ_E_STOP:
  350. case RSPQ_E_FAIL:
  351. bfa_fsm_set_state(rspq, rspq_sm_stopped);
  352. break;
  353. case RSPQ_E_RESP:
  354. rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
  355. break;
  356. case RSPQ_E_DB_READY:
  357. if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
  358. rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
  359. bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
  360. } else
  361. bfa_fsm_set_state(rspq, rspq_sm_ready);
  362. break;
  363. default:
  364. bfa_sm_fault(event);
  365. }
  366. }
  367. static void
  368. bfa_msgq_rspq_dbell_ready(void *arg)
  369. {
  370. struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
  371. bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
  372. }
  373. static void
  374. bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
  375. {
  376. struct bfi_msgq_h2i_db *dbell =
  377. (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
  378. memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
  379. bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
  380. dbell->mh.mtag.i2htok = 0;
  381. dbell->idx.rspq_ci = htons(rspq->consumer_index);
  382. if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
  383. bfa_msgq_rspq_dbell_ready, rspq)) {
  384. bfa_msgq_rspq_dbell_ready(rspq);
  385. }
  386. }
  387. static void
  388. bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
  389. {
  390. struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
  391. struct bfi_msgq_mhdr *msghdr;
  392. int num_entries;
  393. int mc;
  394. u8 *rspq_qe;
  395. rspq->producer_index = ntohs(dbell->idx.rspq_pi);
  396. while (rspq->consumer_index != rspq->producer_index) {
  397. rspq_qe = (u8 *)rspq->addr.kva;
  398. rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
  399. msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
  400. mc = msghdr->msg_class;
  401. num_entries = ntohs(msghdr->num_entries);
  402. if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
  403. break;
  404. (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
  405. BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
  406. rspq->depth);
  407. }
  408. bfa_fsm_send_event(rspq, RSPQ_E_RESP);
  409. }
  410. static void
  411. bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
  412. {
  413. rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
  414. rspq->msgq = msgq;
  415. bfa_fsm_set_state(rspq, rspq_sm_stopped);
  416. }
  417. static void
  418. bfa_msgq_init_rsp(struct bfa_msgq *msgq,
  419. struct bfi_mbmsg *mb)
  420. {
  421. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
  422. bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
  423. }
  424. static void
  425. bfa_msgq_init(void *arg)
  426. {
  427. struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
  428. struct bfi_msgq_cfg_req *msgq_cfg =
  429. (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
  430. memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
  431. bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
  432. msgq_cfg->mh.mtag.i2htok = 0;
  433. bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
  434. msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
  435. bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
  436. msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
  437. bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
  438. }
  439. static void
  440. bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
  441. {
  442. struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
  443. switch (msg->mh.msg_id) {
  444. case BFI_MSGQ_I2H_INIT_RSP:
  445. bfa_msgq_init_rsp(msgq, msg);
  446. break;
  447. case BFI_MSGQ_I2H_DOORBELL_PI:
  448. bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
  449. break;
  450. case BFI_MSGQ_I2H_DOORBELL_CI:
  451. bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
  452. break;
  453. case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
  454. bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
  455. break;
  456. default:
  457. BUG_ON(1);
  458. }
  459. }
  460. static void
  461. bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
  462. {
  463. struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
  464. switch (event) {
  465. case BFA_IOC_E_ENABLED:
  466. bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
  467. bfa_wc_up(&msgq->init_wc);
  468. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
  469. bfa_wc_up(&msgq->init_wc);
  470. bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
  471. bfa_wc_wait(&msgq->init_wc);
  472. break;
  473. case BFA_IOC_E_DISABLED:
  474. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
  475. bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
  476. break;
  477. case BFA_IOC_E_FAILED:
  478. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
  479. bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
  480. break;
  481. default:
  482. break;
  483. }
  484. }
  485. u32
  486. bfa_msgq_meminfo(void)
  487. {
  488. return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
  489. roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
  490. }
  491. void
  492. bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
  493. {
  494. msgq->cmdq.addr.kva = kva;
  495. msgq->cmdq.addr.pa = pa;
  496. kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
  497. pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
  498. msgq->rspq.addr.kva = kva;
  499. msgq->rspq.addr.pa = pa;
  500. }
  501. void
  502. bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
  503. {
  504. msgq->ioc = ioc;
  505. bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
  506. bfa_msgq_rspq_attach(&msgq->rspq, msgq);
  507. bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
  508. bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
  509. bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
  510. }
  511. void
  512. bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
  513. bfa_msgq_mcfunc_t cbfn, void *cbarg)
  514. {
  515. msgq->rspq.rsphdlr[mc].cbfn = cbfn;
  516. msgq->rspq.rsphdlr[mc].cbarg = cbarg;
  517. }
  518. void
  519. bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd)
  520. {
  521. if (ntohs(cmd->msg_hdr->num_entries) <=
  522. BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
  523. __cmd_copy(&msgq->cmdq, cmd);
  524. call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
  525. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
  526. } else {
  527. list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
  528. }
  529. }
  530. void
  531. bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
  532. {
  533. struct bfa_msgq_rspq *rspq = &msgq->rspq;
  534. size_t len = buf_len;
  535. size_t to_copy;
  536. int ci;
  537. u8 *src, *dst;
  538. ci = rspq->consumer_index;
  539. src = (u8 *)rspq->addr.kva;
  540. src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
  541. dst = buf;
  542. while (len) {
  543. to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
  544. len : BFI_MSGQ_RSP_ENTRY_SIZE;
  545. memcpy(dst, src, to_copy);
  546. len -= to_copy;
  547. dst += BFI_MSGQ_RSP_ENTRY_SIZE;
  548. BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
  549. src = (u8 *)rspq->addr.kva;
  550. src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
  551. }
  552. }