PageRenderTime 45ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/crypto/marvell/octeontx2/otx2_cptlf.c

https://github.com/kvaneesh/linux
C | 433 lines | 334 code | 77 blank | 22 comment | 42 complexity | 5c8e1b863d71e0a5acec5767f4cd1c2c MD5 | raw file
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (C) 2020 Marvell. */
  3. #include "otx2_cpt_common.h"
  4. #include "otx2_cptlf.h"
  5. #include "rvu_reg.h"
  6. #define CPT_TIMER_HOLD 0x03F
  7. #define CPT_COUNT_HOLD 32
  8. static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf,
  9. int time_wait)
  10. {
  11. union otx2_cptx_lf_done_wait done_wait;
  12. done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  13. OTX2_CPT_LF_DONE_WAIT);
  14. done_wait.s.time_wait = time_wait;
  15. otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  16. OTX2_CPT_LF_DONE_WAIT, done_wait.u);
  17. }
  18. static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait)
  19. {
  20. union otx2_cptx_lf_done_wait done_wait;
  21. done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  22. OTX2_CPT_LF_DONE_WAIT);
  23. done_wait.s.num_wait = num_wait;
  24. otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  25. OTX2_CPT_LF_DONE_WAIT, done_wait.u);
  26. }
  27. static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs,
  28. int time_wait)
  29. {
  30. int slot;
  31. for (slot = 0; slot < lfs->lfs_num; slot++)
  32. cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait);
  33. }
  34. static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait)
  35. {
  36. int slot;
  37. for (slot = 0; slot < lfs->lfs_num; slot++)
  38. cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait);
  39. }
  40. static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
  41. {
  42. struct otx2_cptlfs_info *lfs = lf->lfs;
  43. union otx2_cptx_af_lf_ctrl lf_ctrl;
  44. int ret;
  45. ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
  46. CPT_AF_LFX_CTL(lf->slot),
  47. &lf_ctrl.u, lfs->blkaddr);
  48. if (ret)
  49. return ret;
  50. lf_ctrl.s.pri = pri ? 1 : 0;
  51. ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
  52. CPT_AF_LFX_CTL(lf->slot),
  53. lf_ctrl.u, lfs->blkaddr);
  54. return ret;
  55. }
  56. static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
  57. int eng_grps_mask)
  58. {
  59. struct otx2_cptlfs_info *lfs = lf->lfs;
  60. union otx2_cptx_af_lf_ctrl lf_ctrl;
  61. int ret;
  62. ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
  63. CPT_AF_LFX_CTL(lf->slot),
  64. &lf_ctrl.u, lfs->blkaddr);
  65. if (ret)
  66. return ret;
  67. lf_ctrl.s.grp = eng_grps_mask;
  68. ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
  69. CPT_AF_LFX_CTL(lf->slot),
  70. lf_ctrl.u, lfs->blkaddr);
  71. return ret;
  72. }
  73. static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
  74. int eng_grp_mask, int pri)
  75. {
  76. int slot, ret = 0;
  77. for (slot = 0; slot < lfs->lfs_num; slot++) {
  78. ret = cptlf_set_pri(&lfs->lf[slot], pri);
  79. if (ret)
  80. return ret;
  81. ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask);
  82. if (ret)
  83. return ret;
  84. }
  85. return ret;
  86. }
  87. static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
  88. {
  89. /* Disable instruction queues */
  90. otx2_cptlf_disable_iqueues(lfs);
  91. /* Set instruction queues base addresses */
  92. otx2_cptlf_set_iqueues_base_addr(lfs);
  93. /* Set instruction queues sizes */
  94. otx2_cptlf_set_iqueues_size(lfs);
  95. /* Set done interrupts time wait */
  96. cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD);
  97. /* Set done interrupts num wait */
  98. cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD);
  99. /* Enable instruction queues */
  100. otx2_cptlf_enable_iqueues(lfs);
  101. }
  102. static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs)
  103. {
  104. /* Disable instruction queues */
  105. otx2_cptlf_disable_iqueues(lfs);
  106. }
  107. static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
  108. {
  109. union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 };
  110. u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S :
  111. OTX2_CPT_LF_MISC_INT_ENA_W1C;
  112. int slot;
  113. irq_misc.s.fault = 0x1;
  114. irq_misc.s.hwerr = 0x1;
  115. irq_misc.s.irde = 0x1;
  116. irq_misc.s.nqerr = 0x1;
  117. irq_misc.s.nwrp = 0x1;
  118. for (slot = 0; slot < lfs->lfs_num; slot++)
  119. otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, reg,
  120. irq_misc.u);
  121. }
  122. static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
  123. {
  124. int slot;
  125. /* Enable done interrupts */
  126. for (slot = 0; slot < lfs->lfs_num; slot++)
  127. otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
  128. OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
  129. /* Enable Misc interrupts */
  130. cptlf_set_misc_intrs(lfs, true);
  131. }
  132. static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
  133. {
  134. int slot;
  135. for (slot = 0; slot < lfs->lfs_num; slot++)
  136. otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
  137. OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
  138. cptlf_set_misc_intrs(lfs, false);
  139. }
  140. static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
  141. {
  142. union otx2_cptx_lf_done irq_cnt;
  143. irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  144. OTX2_CPT_LF_DONE);
  145. return irq_cnt.s.done;
  146. }
  147. static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
  148. {
  149. union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack;
  150. struct otx2_cptlf_info *lf = arg;
  151. struct device *dev;
  152. dev = &lf->lfs->pdev->dev;
  153. irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  154. OTX2_CPT_LF_MISC_INT);
  155. irq_misc_ack.u = 0x0;
  156. if (irq_misc.s.fault) {
  157. dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n",
  158. lf->slot);
  159. irq_misc_ack.s.fault = 0x1;
  160. } else if (irq_misc.s.hwerr) {
  161. dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.",
  162. lf->slot);
  163. irq_misc_ack.s.hwerr = 0x1;
  164. } else if (irq_misc.s.nwrp) {
  165. dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n",
  166. lf->slot);
  167. irq_misc_ack.s.nwrp = 0x1;
  168. } else if (irq_misc.s.irde) {
  169. dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n");
  170. irq_misc_ack.s.irde = 0x1;
  171. } else if (irq_misc.s.nqerr) {
  172. dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n");
  173. irq_misc_ack.s.nqerr = 0x1;
  174. } else {
  175. dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot);
  176. return IRQ_NONE;
  177. }
  178. /* Acknowledge interrupts */
  179. otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  180. OTX2_CPT_LF_MISC_INT, irq_misc_ack.u);
  181. return IRQ_HANDLED;
  182. }
  183. static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
  184. {
  185. union otx2_cptx_lf_done_wait done_wait;
  186. struct otx2_cptlf_info *lf = arg;
  187. int irq_cnt;
  188. /* Read the number of completed requests */
  189. irq_cnt = cptlf_read_done_cnt(lf);
  190. if (irq_cnt) {
  191. done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
  192. lf->slot, OTX2_CPT_LF_DONE_WAIT);
  193. /* Acknowledge the number of completed requests */
  194. otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  195. OTX2_CPT_LF_DONE_ACK, irq_cnt);
  196. otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  197. OTX2_CPT_LF_DONE_WAIT, done_wait.u);
  198. if (unlikely(!lf->wqe)) {
  199. dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
  200. lf->slot);
  201. return IRQ_NONE;
  202. }
  203. /* Schedule processing of completed requests */
  204. tasklet_hi_schedule(&lf->wqe->work);
  205. }
  206. return IRQ_HANDLED;
  207. }
  208. void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
  209. {
  210. int i, offs, vector;
  211. for (i = 0; i < lfs->lfs_num; i++) {
  212. for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
  213. if (!lfs->lf[i].is_irq_reg[offs])
  214. continue;
  215. vector = pci_irq_vector(lfs->pdev,
  216. lfs->lf[i].msix_offset + offs);
  217. free_irq(vector, &lfs->lf[i]);
  218. lfs->lf[i].is_irq_reg[offs] = false;
  219. }
  220. }
  221. cptlf_disable_intrs(lfs);
  222. }
  223. static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
  224. int lf_num, int irq_offset,
  225. irq_handler_t handler)
  226. {
  227. int ret, vector;
  228. vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset +
  229. irq_offset);
  230. ret = request_irq(vector, handler, 0,
  231. lfs->lf[lf_num].irq_name[irq_offset],
  232. &lfs->lf[lf_num]);
  233. if (ret)
  234. return ret;
  235. lfs->lf[lf_num].is_irq_reg[irq_offset] = true;
  236. return ret;
  237. }
  238. int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
  239. {
  240. int irq_offs, ret, i;
  241. for (i = 0; i < lfs->lfs_num; i++) {
  242. irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
  243. snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
  244. ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
  245. cptlf_misc_intr_handler);
  246. if (ret)
  247. goto free_irq;
  248. irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
  249. snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
  250. i);
  251. ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
  252. cptlf_done_intr_handler);
  253. if (ret)
  254. goto free_irq;
  255. }
  256. cptlf_enable_intrs(lfs);
  257. return 0;
  258. free_irq:
  259. otx2_cptlf_unregister_interrupts(lfs);
  260. return ret;
  261. }
  262. void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
  263. {
  264. int slot, offs;
  265. for (slot = 0; slot < lfs->lfs_num; slot++) {
  266. for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++)
  267. irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
  268. lfs->lf[slot].msix_offset +
  269. offs), NULL);
  270. free_cpumask_var(lfs->lf[slot].affinity_mask);
  271. }
  272. }
  273. int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
  274. {
  275. struct otx2_cptlf_info *lf = lfs->lf;
  276. int slot, offs, ret;
  277. for (slot = 0; slot < lfs->lfs_num; slot++) {
  278. if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) {
  279. dev_err(&lfs->pdev->dev,
  280. "cpumask allocation failed for LF %d", slot);
  281. ret = -ENOMEM;
  282. goto free_affinity_mask;
  283. }
  284. cpumask_set_cpu(cpumask_local_spread(slot,
  285. dev_to_node(&lfs->pdev->dev)),
  286. lf[slot].affinity_mask);
  287. for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
  288. ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
  289. lf[slot].msix_offset + offs),
  290. lf[slot].affinity_mask);
  291. if (ret)
  292. goto free_affinity_mask;
  293. }
  294. }
  295. return 0;
  296. free_affinity_mask:
  297. otx2_cptlf_free_irqs_affinity(lfs);
  298. return ret;
  299. }
  300. int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
  301. int lfs_num)
  302. {
  303. int slot, ret;
  304. if (!lfs->pdev || !lfs->reg_base)
  305. return -EINVAL;
  306. lfs->lfs_num = lfs_num;
  307. for (slot = 0; slot < lfs->lfs_num; slot++) {
  308. lfs->lf[slot].lfs = lfs;
  309. lfs->lf[slot].slot = slot;
  310. if (lfs->lmt_base)
  311. lfs->lf[slot].lmtline = lfs->lmt_base +
  312. (slot * LMTLINE_SIZE);
  313. else
  314. lfs->lf[slot].lmtline = lfs->reg_base +
  315. OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
  316. OTX2_CPT_LMT_LF_LMTLINEX(0));
  317. lfs->lf[slot].ioreg = lfs->reg_base +
  318. OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot,
  319. OTX2_CPT_LF_NQX(0));
  320. }
  321. /* Send request to attach LFs */
  322. ret = otx2_cpt_attach_rscrs_msg(lfs);
  323. if (ret)
  324. goto clear_lfs_num;
  325. ret = otx2_cpt_alloc_instruction_queues(lfs);
  326. if (ret) {
  327. dev_err(&lfs->pdev->dev,
  328. "Allocating instruction queues failed\n");
  329. goto detach_rsrcs;
  330. }
  331. cptlf_hw_init(lfs);
  332. /*
  333. * Allow each LF to execute requests destined to any of 8 engine
  334. * groups and set queue priority of each LF to high
  335. */
  336. ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri);
  337. if (ret)
  338. goto free_iq;
  339. return 0;
  340. free_iq:
  341. otx2_cpt_free_instruction_queues(lfs);
  342. cptlf_hw_cleanup(lfs);
  343. detach_rsrcs:
  344. otx2_cpt_detach_rsrcs_msg(lfs);
  345. clear_lfs_num:
  346. lfs->lfs_num = 0;
  347. return ret;
  348. }
  349. void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
  350. {
  351. lfs->lfs_num = 0;
  352. /* Cleanup LFs hardware side */
  353. cptlf_hw_cleanup(lfs);
  354. /* Send request to detach LFs */
  355. otx2_cpt_detach_rsrcs_msg(lfs);
  356. }