PageRenderTime 27ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/crypto/scompress.c

https://github.com/tklauser/linux-nios2
C | 386 lines | 303 code | 70 blank | 13 comment | 34 complexity | 4facd109b19b12742499d2f1569dbac1 MD5 | raw file
  1. /*
  2. * Synchronous Compression operations
  3. *
  4. * Copyright 2015 LG Electronics Inc.
  5. * Copyright (c) 2016, Intel Corporation
  6. * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <linux/errno.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/slab.h>
  19. #include <linux/string.h>
  20. #include <linux/crypto.h>
  21. #include <linux/compiler.h>
  22. #include <linux/vmalloc.h>
  23. #include <crypto/algapi.h>
  24. #include <linux/cryptouser.h>
  25. #include <net/netlink.h>
  26. #include <linux/scatterlist.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <crypto/internal/acompress.h>
  29. #include <crypto/internal/scompress.h>
  30. #include "internal.h"
  31. static const struct crypto_type crypto_scomp_type;
  32. static void * __percpu *scomp_src_scratches;
  33. static void * __percpu *scomp_dst_scratches;
  34. static int scomp_scratch_users;
  35. static DEFINE_MUTEX(scomp_lock);
  36. #ifdef CONFIG_NET
  37. static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
  38. {
  39. struct crypto_report_comp rscomp;
  40. strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
  41. if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
  42. sizeof(struct crypto_report_comp), &rscomp))
  43. goto nla_put_failure;
  44. return 0;
  45. nla_put_failure:
  46. return -EMSGSIZE;
  47. }
  48. #else
  49. static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
  50. {
  51. return -ENOSYS;
  52. }
  53. #endif
  54. static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
  55. __maybe_unused;
  56. static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
  57. {
  58. seq_puts(m, "type : scomp\n");
  59. }
  60. static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
  61. {
  62. return 0;
  63. }
  64. static void crypto_scomp_free_scratches(void * __percpu *scratches)
  65. {
  66. int i;
  67. if (!scratches)
  68. return;
  69. for_each_possible_cpu(i)
  70. vfree(*per_cpu_ptr(scratches, i));
  71. free_percpu(scratches);
  72. }
  73. static void * __percpu *crypto_scomp_alloc_scratches(void)
  74. {
  75. void * __percpu *scratches;
  76. int i;
  77. scratches = alloc_percpu(void *);
  78. if (!scratches)
  79. return NULL;
  80. for_each_possible_cpu(i) {
  81. void *scratch;
  82. scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
  83. if (!scratch)
  84. goto error;
  85. *per_cpu_ptr(scratches, i) = scratch;
  86. }
  87. return scratches;
  88. error:
  89. crypto_scomp_free_scratches(scratches);
  90. return NULL;
  91. }
  92. static void crypto_scomp_free_all_scratches(void)
  93. {
  94. if (!--scomp_scratch_users) {
  95. crypto_scomp_free_scratches(scomp_src_scratches);
  96. crypto_scomp_free_scratches(scomp_dst_scratches);
  97. scomp_src_scratches = NULL;
  98. scomp_dst_scratches = NULL;
  99. }
  100. }
  101. static int crypto_scomp_alloc_all_scratches(void)
  102. {
  103. if (!scomp_scratch_users++) {
  104. scomp_src_scratches = crypto_scomp_alloc_scratches();
  105. if (!scomp_src_scratches)
  106. return -ENOMEM;
  107. scomp_dst_scratches = crypto_scomp_alloc_scratches();
  108. if (!scomp_dst_scratches)
  109. return -ENOMEM;
  110. }
  111. return 0;
  112. }
  113. static void crypto_scomp_sg_free(struct scatterlist *sgl)
  114. {
  115. int i, n;
  116. struct page *page;
  117. if (!sgl)
  118. return;
  119. n = sg_nents(sgl);
  120. for_each_sg(sgl, sgl, n, i) {
  121. page = sg_page(sgl);
  122. if (page)
  123. __free_page(page);
  124. }
  125. kfree(sgl);
  126. }
  127. static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
  128. {
  129. struct scatterlist *sgl;
  130. struct page *page;
  131. int i, n;
  132. n = ((size - 1) >> PAGE_SHIFT) + 1;
  133. sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
  134. if (!sgl)
  135. return NULL;
  136. sg_init_table(sgl, n);
  137. for (i = 0; i < n; i++) {
  138. page = alloc_page(gfp);
  139. if (!page)
  140. goto err;
  141. sg_set_page(sgl + i, page, PAGE_SIZE, 0);
  142. }
  143. return sgl;
  144. err:
  145. sg_mark_end(sgl + i);
  146. crypto_scomp_sg_free(sgl);
  147. return NULL;
  148. }
  149. static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
  150. {
  151. struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
  152. void **tfm_ctx = acomp_tfm_ctx(tfm);
  153. struct crypto_scomp *scomp = *tfm_ctx;
  154. void **ctx = acomp_request_ctx(req);
  155. const int cpu = get_cpu();
  156. u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
  157. u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
  158. int ret;
  159. if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
  160. ret = -EINVAL;
  161. goto out;
  162. }
  163. if (req->dst && !req->dlen) {
  164. ret = -EINVAL;
  165. goto out;
  166. }
  167. if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
  168. req->dlen = SCOMP_SCRATCH_SIZE;
  169. scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
  170. if (dir)
  171. ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
  172. scratch_dst, &req->dlen, *ctx);
  173. else
  174. ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
  175. scratch_dst, &req->dlen, *ctx);
  176. if (!ret) {
  177. if (!req->dst) {
  178. req->dst = crypto_scomp_sg_alloc(req->dlen,
  179. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  180. GFP_KERNEL : GFP_ATOMIC);
  181. if (!req->dst)
  182. goto out;
  183. }
  184. scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
  185. 1);
  186. }
  187. out:
  188. put_cpu();
  189. return ret;
  190. }
  191. static int scomp_acomp_compress(struct acomp_req *req)
  192. {
  193. return scomp_acomp_comp_decomp(req, 1);
  194. }
  195. static int scomp_acomp_decompress(struct acomp_req *req)
  196. {
  197. return scomp_acomp_comp_decomp(req, 0);
  198. }
  199. static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
  200. {
  201. struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
  202. crypto_free_scomp(*ctx);
  203. }
  204. int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
  205. {
  206. struct crypto_alg *calg = tfm->__crt_alg;
  207. struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
  208. struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
  209. struct crypto_scomp *scomp;
  210. if (!crypto_mod_get(calg))
  211. return -EAGAIN;
  212. scomp = crypto_create_tfm(calg, &crypto_scomp_type);
  213. if (IS_ERR(scomp)) {
  214. crypto_mod_put(calg);
  215. return PTR_ERR(scomp);
  216. }
  217. *ctx = scomp;
  218. tfm->exit = crypto_exit_scomp_ops_async;
  219. crt->compress = scomp_acomp_compress;
  220. crt->decompress = scomp_acomp_decompress;
  221. crt->dst_free = crypto_scomp_sg_free;
  222. crt->reqsize = sizeof(void *);
  223. return 0;
  224. }
  225. struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
  226. {
  227. struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
  228. struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
  229. struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
  230. struct crypto_scomp *scomp = *tfm_ctx;
  231. void *ctx;
  232. ctx = crypto_scomp_alloc_ctx(scomp);
  233. if (IS_ERR(ctx)) {
  234. kfree(req);
  235. return NULL;
  236. }
  237. *req->__ctx = ctx;
  238. return req;
  239. }
  240. void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
  241. {
  242. struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
  243. struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
  244. struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
  245. struct crypto_scomp *scomp = *tfm_ctx;
  246. void *ctx = *req->__ctx;
  247. if (ctx)
  248. crypto_scomp_free_ctx(scomp, ctx);
  249. }
  250. static const struct crypto_type crypto_scomp_type = {
  251. .extsize = crypto_alg_extsize,
  252. .init_tfm = crypto_scomp_init_tfm,
  253. #ifdef CONFIG_PROC_FS
  254. .show = crypto_scomp_show,
  255. #endif
  256. .report = crypto_scomp_report,
  257. .maskclear = ~CRYPTO_ALG_TYPE_MASK,
  258. .maskset = CRYPTO_ALG_TYPE_MASK,
  259. .type = CRYPTO_ALG_TYPE_SCOMPRESS,
  260. .tfmsize = offsetof(struct crypto_scomp, base),
  261. };
  262. int crypto_register_scomp(struct scomp_alg *alg)
  263. {
  264. struct crypto_alg *base = &alg->base;
  265. int ret = -ENOMEM;
  266. mutex_lock(&scomp_lock);
  267. if (crypto_scomp_alloc_all_scratches())
  268. goto error;
  269. base->cra_type = &crypto_scomp_type;
  270. base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
  271. base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
  272. ret = crypto_register_alg(base);
  273. if (ret)
  274. goto error;
  275. mutex_unlock(&scomp_lock);
  276. return ret;
  277. error:
  278. crypto_scomp_free_all_scratches();
  279. mutex_unlock(&scomp_lock);
  280. return ret;
  281. }
  282. EXPORT_SYMBOL_GPL(crypto_register_scomp);
  283. int crypto_unregister_scomp(struct scomp_alg *alg)
  284. {
  285. int ret;
  286. mutex_lock(&scomp_lock);
  287. ret = crypto_unregister_alg(&alg->base);
  288. crypto_scomp_free_all_scratches();
  289. mutex_unlock(&scomp_lock);
  290. return ret;
  291. }
  292. EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
  293. int crypto_register_scomps(struct scomp_alg *algs, int count)
  294. {
  295. int i, ret;
  296. for (i = 0; i < count; i++) {
  297. ret = crypto_register_scomp(&algs[i]);
  298. if (ret)
  299. goto err;
  300. }
  301. return 0;
  302. err:
  303. for (--i; i >= 0; --i)
  304. crypto_unregister_scomp(&algs[i]);
  305. return ret;
  306. }
  307. EXPORT_SYMBOL_GPL(crypto_register_scomps);
  308. void crypto_unregister_scomps(struct scomp_alg *algs, int count)
  309. {
  310. int i;
  311. for (i = count - 1; i >= 0; --i)
  312. crypto_unregister_scomp(&algs[i]);
  313. }
  314. EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
  315. MODULE_LICENSE("GPL");
  316. MODULE_DESCRIPTION("Synchronous compression type");