PageRenderTime 52ms CodeModel.GetById 20ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/crypto/amlogic/amlogic-gxl-cipher.c

https://gitlab.com/pachecof/centos-stream-9
C | 374 lines | 308 code | 51 blank | 15 comment | 55 complexity | ee1c99b5cbb457cc388e45bcb6834013 MD5 | raw file
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * amlogic-cipher.c - hardware cryptographic offloader for Amlogic GXL SoC
  4. *
  5. * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
  6. *
  7. * This file add support for AES cipher with 128,192,256 bits keysize in
  8. * CBC and ECB mode.
  9. */
  10. #include <linux/crypto.h>
  11. #include <linux/delay.h>
  12. #include <linux/io.h>
  13. #include <crypto/scatterwalk.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/dma-mapping.h>
  16. #include <crypto/internal/skcipher.h>
  17. #include "amlogic-gxl.h"
  18. static int get_engine_number(struct meson_dev *mc)
  19. {
  20. return atomic_inc_return(&mc->flow) % MAXFLOW;
  21. }
  22. static bool meson_cipher_need_fallback(struct skcipher_request *areq)
  23. {
  24. struct scatterlist *src_sg = areq->src;
  25. struct scatterlist *dst_sg = areq->dst;
  26. if (areq->cryptlen == 0)
  27. return true;
  28. if (sg_nents(src_sg) != sg_nents(dst_sg))
  29. return true;
  30. /* KEY/IV descriptors use 3 desc */
  31. if (sg_nents(src_sg) > MAXDESC - 3 || sg_nents(dst_sg) > MAXDESC - 3)
  32. return true;
  33. while (src_sg && dst_sg) {
  34. if ((src_sg->length % 16) != 0)
  35. return true;
  36. if ((dst_sg->length % 16) != 0)
  37. return true;
  38. if (src_sg->length != dst_sg->length)
  39. return true;
  40. if (!IS_ALIGNED(src_sg->offset, sizeof(u32)))
  41. return true;
  42. if (!IS_ALIGNED(dst_sg->offset, sizeof(u32)))
  43. return true;
  44. src_sg = sg_next(src_sg);
  45. dst_sg = sg_next(dst_sg);
  46. }
  47. return false;
  48. }
  49. static int meson_cipher_do_fallback(struct skcipher_request *areq)
  50. {
  51. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  52. struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  53. struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  54. int err;
  55. #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
  56. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  57. struct meson_alg_template *algt;
  58. algt = container_of(alg, struct meson_alg_template, alg.skcipher);
  59. algt->stat_fb++;
  60. #endif
  61. skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
  62. skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
  63. areq->base.complete, areq->base.data);
  64. skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
  65. areq->cryptlen, areq->iv);
  66. if (rctx->op_dir == MESON_DECRYPT)
  67. err = crypto_skcipher_decrypt(&rctx->fallback_req);
  68. else
  69. err = crypto_skcipher_encrypt(&rctx->fallback_req);
  70. return err;
  71. }
  72. static int meson_cipher(struct skcipher_request *areq)
  73. {
  74. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  75. struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  76. struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  77. struct meson_dev *mc = op->mc;
  78. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  79. struct meson_alg_template *algt;
  80. int flow = rctx->flow;
  81. unsigned int todo, eat, len;
  82. struct scatterlist *src_sg = areq->src;
  83. struct scatterlist *dst_sg = areq->dst;
  84. struct meson_desc *desc;
  85. int nr_sgs, nr_sgd;
  86. int i, err = 0;
  87. unsigned int keyivlen, ivsize, offset, tloffset;
  88. dma_addr_t phykeyiv;
  89. void *backup_iv = NULL, *bkeyiv;
  90. u32 v;
  91. algt = container_of(alg, struct meson_alg_template, alg.skcipher);
  92. dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
  93. crypto_tfm_alg_name(areq->base.tfm),
  94. areq->cryptlen,
  95. rctx->op_dir, crypto_skcipher_ivsize(tfm),
  96. op->keylen, flow);
  97. #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
  98. algt->stat_req++;
  99. mc->chanlist[flow].stat_req++;
  100. #endif
  101. /*
  102. * The hardware expect a list of meson_desc structures.
  103. * The 2 first structures store key
  104. * The third stores IV
  105. */
  106. bkeyiv = kzalloc(48, GFP_KERNEL | GFP_DMA);
  107. if (!bkeyiv)
  108. return -ENOMEM;
  109. memcpy(bkeyiv, op->key, op->keylen);
  110. keyivlen = op->keylen;
  111. ivsize = crypto_skcipher_ivsize(tfm);
  112. if (areq->iv && ivsize > 0) {
  113. if (ivsize > areq->cryptlen) {
  114. dev_err(mc->dev, "invalid ivsize=%d vs len=%d\n", ivsize, areq->cryptlen);
  115. err = -EINVAL;
  116. goto theend;
  117. }
  118. memcpy(bkeyiv + 32, areq->iv, ivsize);
  119. keyivlen = 48;
  120. if (rctx->op_dir == MESON_DECRYPT) {
  121. backup_iv = kzalloc(ivsize, GFP_KERNEL);
  122. if (!backup_iv) {
  123. err = -ENOMEM;
  124. goto theend;
  125. }
  126. offset = areq->cryptlen - ivsize;
  127. scatterwalk_map_and_copy(backup_iv, areq->src, offset,
  128. ivsize, 0);
  129. }
  130. }
  131. if (keyivlen == 24)
  132. keyivlen = 32;
  133. phykeyiv = dma_map_single(mc->dev, bkeyiv, keyivlen,
  134. DMA_TO_DEVICE);
  135. err = dma_mapping_error(mc->dev, phykeyiv);
  136. if (err) {
  137. dev_err(mc->dev, "Cannot DMA MAP KEY IV\n");
  138. goto theend;
  139. }
  140. tloffset = 0;
  141. eat = 0;
  142. i = 0;
  143. while (keyivlen > eat) {
  144. desc = &mc->chanlist[flow].tl[tloffset];
  145. memset(desc, 0, sizeof(struct meson_desc));
  146. todo = min(keyivlen - eat, 16u);
  147. desc->t_src = cpu_to_le32(phykeyiv + i * 16);
  148. desc->t_dst = cpu_to_le32(i * 16);
  149. v = (MODE_KEY << 20) | DESC_OWN | 16;
  150. desc->t_status = cpu_to_le32(v);
  151. eat += todo;
  152. i++;
  153. tloffset++;
  154. }
  155. if (areq->src == areq->dst) {
  156. nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
  157. DMA_BIDIRECTIONAL);
  158. if (nr_sgs < 0) {
  159. dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
  160. err = -EINVAL;
  161. goto theend;
  162. }
  163. nr_sgd = nr_sgs;
  164. } else {
  165. nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
  166. DMA_TO_DEVICE);
  167. if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) {
  168. dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
  169. err = -EINVAL;
  170. goto theend;
  171. }
  172. nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
  173. DMA_FROM_DEVICE);
  174. if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) {
  175. dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
  176. err = -EINVAL;
  177. goto theend;
  178. }
  179. }
  180. src_sg = areq->src;
  181. dst_sg = areq->dst;
  182. len = areq->cryptlen;
  183. while (src_sg) {
  184. desc = &mc->chanlist[flow].tl[tloffset];
  185. memset(desc, 0, sizeof(struct meson_desc));
  186. desc->t_src = cpu_to_le32(sg_dma_address(src_sg));
  187. desc->t_dst = cpu_to_le32(sg_dma_address(dst_sg));
  188. todo = min(len, sg_dma_len(src_sg));
  189. v = (op->keymode << 20) | DESC_OWN | todo | (algt->blockmode << 26);
  190. if (rctx->op_dir)
  191. v |= DESC_ENCRYPTION;
  192. len -= todo;
  193. if (!sg_next(src_sg))
  194. v |= DESC_LAST;
  195. desc->t_status = cpu_to_le32(v);
  196. tloffset++;
  197. src_sg = sg_next(src_sg);
  198. dst_sg = sg_next(dst_sg);
  199. }
  200. reinit_completion(&mc->chanlist[flow].complete);
  201. mc->chanlist[flow].status = 0;
  202. writel(mc->chanlist[flow].t_phy | 2, mc->base + (flow << 2));
  203. wait_for_completion_interruptible_timeout(&mc->chanlist[flow].complete,
  204. msecs_to_jiffies(500));
  205. if (mc->chanlist[flow].status == 0) {
  206. dev_err(mc->dev, "DMA timeout for flow %d\n", flow);
  207. err = -EINVAL;
  208. }
  209. dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
  210. if (areq->src == areq->dst) {
  211. dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL);
  212. } else {
  213. dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
  214. dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE);
  215. }
  216. if (areq->iv && ivsize > 0) {
  217. if (rctx->op_dir == MESON_DECRYPT) {
  218. memcpy(areq->iv, backup_iv, ivsize);
  219. } else {
  220. scatterwalk_map_and_copy(areq->iv, areq->dst,
  221. areq->cryptlen - ivsize,
  222. ivsize, 0);
  223. }
  224. }
  225. theend:
  226. kfree_sensitive(bkeyiv);
  227. kfree_sensitive(backup_iv);
  228. return err;
  229. }
  230. static int meson_handle_cipher_request(struct crypto_engine *engine,
  231. void *areq)
  232. {
  233. int err;
  234. struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
  235. err = meson_cipher(breq);
  236. crypto_finalize_skcipher_request(engine, breq, err);
  237. return 0;
  238. }
  239. int meson_skdecrypt(struct skcipher_request *areq)
  240. {
  241. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  242. struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  243. struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  244. struct crypto_engine *engine;
  245. int e;
  246. rctx->op_dir = MESON_DECRYPT;
  247. if (meson_cipher_need_fallback(areq))
  248. return meson_cipher_do_fallback(areq);
  249. e = get_engine_number(op->mc);
  250. engine = op->mc->chanlist[e].engine;
  251. rctx->flow = e;
  252. return crypto_transfer_skcipher_request_to_engine(engine, areq);
  253. }
  254. int meson_skencrypt(struct skcipher_request *areq)
  255. {
  256. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  257. struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  258. struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  259. struct crypto_engine *engine;
  260. int e;
  261. rctx->op_dir = MESON_ENCRYPT;
  262. if (meson_cipher_need_fallback(areq))
  263. return meson_cipher_do_fallback(areq);
  264. e = get_engine_number(op->mc);
  265. engine = op->mc->chanlist[e].engine;
  266. rctx->flow = e;
  267. return crypto_transfer_skcipher_request_to_engine(engine, areq);
  268. }
  269. int meson_cipher_init(struct crypto_tfm *tfm)
  270. {
  271. struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
  272. struct meson_alg_template *algt;
  273. const char *name = crypto_tfm_alg_name(tfm);
  274. struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
  275. struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
  276. memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
  277. algt = container_of(alg, struct meson_alg_template, alg.skcipher);
  278. op->mc = algt->mc;
  279. op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
  280. if (IS_ERR(op->fallback_tfm)) {
  281. dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
  282. name, PTR_ERR(op->fallback_tfm));
  283. return PTR_ERR(op->fallback_tfm);
  284. }
  285. sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
  286. crypto_skcipher_reqsize(op->fallback_tfm);
  287. op->enginectx.op.do_one_request = meson_handle_cipher_request;
  288. op->enginectx.op.prepare_request = NULL;
  289. op->enginectx.op.unprepare_request = NULL;
  290. return 0;
  291. }
  292. void meson_cipher_exit(struct crypto_tfm *tfm)
  293. {
  294. struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
  295. kfree_sensitive(op->key);
  296. crypto_free_skcipher(op->fallback_tfm);
  297. }
  298. int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
  299. unsigned int keylen)
  300. {
  301. struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  302. struct meson_dev *mc = op->mc;
  303. switch (keylen) {
  304. case 128 / 8:
  305. op->keymode = MODE_AES_128;
  306. break;
  307. case 192 / 8:
  308. op->keymode = MODE_AES_192;
  309. break;
  310. case 256 / 8:
  311. op->keymode = MODE_AES_256;
  312. break;
  313. default:
  314. dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
  315. return -EINVAL;
  316. }
  317. kfree_sensitive(op->key);
  318. op->keylen = keylen;
  319. op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
  320. if (!op->key)
  321. return -ENOMEM;
  322. return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
  323. }