PageRenderTime 44ms CodeModel.GetById 7ms RepoModel.GetById 0ms app.codeStats 0ms

/net/ipv6/esp6_offload.c

https://bitbucket.org/sjohann81/linux-4.13.9
C | 334 lines | 242 code | 75 blank | 17 comment | 41 complexity | 2311932fc292b2663e432d8f551a563a MD5 | raw file
Possible License(s): GPL-2.0
  1. /*
  2. * IPV6 GSO/GRO offload support
  3. * Linux INET implementation
  4. *
  5. * Copyright (C) 2016 secunet Security Networks AG
  6. * Author: Steffen Klassert <steffen.klassert@secunet.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * ESP GRO support
  13. */
  14. #include <linux/skbuff.h>
  15. #include <linux/init.h>
  16. #include <net/protocol.h>
  17. #include <crypto/aead.h>
  18. #include <crypto/authenc.h>
  19. #include <linux/err.h>
  20. #include <linux/module.h>
  21. #include <net/ip.h>
  22. #include <net/xfrm.h>
  23. #include <net/esp.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/kernel.h>
  26. #include <linux/slab.h>
  27. #include <linux/spinlock.h>
  28. #include <net/ip6_route.h>
  29. #include <net/ipv6.h>
  30. #include <linux/icmpv6.h>
  31. static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
  32. {
  33. int off = sizeof(struct ipv6hdr);
  34. struct ipv6_opt_hdr *exthdr;
  35. if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP))
  36. return offsetof(struct ipv6hdr, nexthdr);
  37. while (off < nhlen) {
  38. exthdr = (void *)ipv6_hdr + off;
  39. if (exthdr->nexthdr == NEXTHDR_ESP)
  40. return off;
  41. off += ipv6_optlen(exthdr);
  42. }
  43. return 0;
  44. }
  45. static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
  46. struct sk_buff *skb)
  47. {
  48. int offset = skb_gro_offset(skb);
  49. struct xfrm_offload *xo;
  50. struct xfrm_state *x;
  51. __be32 seq;
  52. __be32 spi;
  53. int nhoff;
  54. int err;
  55. skb_pull(skb, offset);
  56. if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
  57. goto out;
  58. xo = xfrm_offload(skb);
  59. if (!xo || !(xo->flags & CRYPTO_DONE)) {
  60. err = secpath_set(skb);
  61. if (err)
  62. goto out;
  63. if (skb->sp->len == XFRM_MAX_DEPTH)
  64. goto out;
  65. x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
  66. (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
  67. spi, IPPROTO_ESP, AF_INET6);
  68. if (!x)
  69. goto out;
  70. skb->sp->xvec[skb->sp->len++] = x;
  71. skb->sp->olen++;
  72. xo = xfrm_offload(skb);
  73. if (!xo) {
  74. xfrm_state_put(x);
  75. goto out;
  76. }
  77. }
  78. xo->flags |= XFRM_GRO;
  79. nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
  80. if (!nhoff)
  81. goto out;
  82. IP6CB(skb)->nhoff = nhoff;
  83. XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
  84. XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
  85. XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
  86. XFRM_SPI_SKB_CB(skb)->seq = seq;
  87. /* We don't need to handle errors from xfrm_input, it does all
  88. * the error handling and frees the resources on error. */
  89. xfrm_input(skb, IPPROTO_ESP, spi, -2);
  90. return ERR_PTR(-EINPROGRESS);
  91. out:
  92. skb_push(skb, offset);
  93. NAPI_GRO_CB(skb)->same_flow = 0;
  94. NAPI_GRO_CB(skb)->flush = 1;
  95. return NULL;
  96. }
  97. static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
  98. {
  99. struct ip_esp_hdr *esph;
  100. struct ipv6hdr *iph = ipv6_hdr(skb);
  101. struct xfrm_offload *xo = xfrm_offload(skb);
  102. int proto = iph->nexthdr;
  103. skb_push(skb, -skb_network_offset(skb));
  104. esph = ip_esp_hdr(skb);
  105. *skb_mac_header(skb) = IPPROTO_ESP;
  106. esph->spi = x->id.spi;
  107. esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
  108. xo->proto = proto;
  109. }
  110. static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
  111. netdev_features_t features)
  112. {
  113. __u32 seq;
  114. int err = 0;
  115. struct sk_buff *skb2;
  116. struct xfrm_state *x;
  117. struct ip_esp_hdr *esph;
  118. struct crypto_aead *aead;
  119. struct sk_buff *segs = ERR_PTR(-EINVAL);
  120. netdev_features_t esp_features = features;
  121. struct xfrm_offload *xo = xfrm_offload(skb);
  122. if (!xo)
  123. goto out;
  124. seq = xo->seq.low;
  125. x = skb->sp->xvec[skb->sp->len - 1];
  126. aead = x->data;
  127. esph = ip_esp_hdr(skb);
  128. if (esph->spi != x->id.spi)
  129. goto out;
  130. if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
  131. goto out;
  132. __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
  133. skb->encap_hdr_csum = 1;
  134. if (!(features & NETIF_F_HW_ESP))
  135. esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
  136. segs = x->outer_mode->gso_segment(x, skb, esp_features);
  137. if (IS_ERR_OR_NULL(segs))
  138. goto out;
  139. __skb_pull(skb, skb->data - skb_mac_header(skb));
  140. skb2 = segs;
  141. do {
  142. struct sk_buff *nskb = skb2->next;
  143. xo = xfrm_offload(skb2);
  144. xo->flags |= XFRM_GSO_SEGMENT;
  145. xo->seq.low = seq;
  146. xo->seq.hi = xfrm_replay_seqhi(x, seq);
  147. if(!(features & NETIF_F_HW_ESP))
  148. xo->flags |= CRYPTO_FALLBACK;
  149. x->outer_mode->xmit(x, skb2);
  150. err = x->type_offload->xmit(x, skb2, esp_features);
  151. if (err) {
  152. kfree_skb_list(segs);
  153. return ERR_PTR(err);
  154. }
  155. if (!skb_is_gso(skb2))
  156. seq++;
  157. else
  158. seq += skb_shinfo(skb2)->gso_segs;
  159. skb_push(skb2, skb2->mac_len);
  160. skb2 = nskb;
  161. } while (skb2);
  162. out:
  163. return segs;
  164. }
  165. static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
  166. {
  167. struct crypto_aead *aead = x->data;
  168. if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
  169. return -EINVAL;
  170. skb->ip_summed = CHECKSUM_NONE;
  171. return esp6_input_done2(skb, 0);
  172. }
  173. static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
  174. {
  175. int err;
  176. int alen;
  177. int blksize;
  178. struct xfrm_offload *xo;
  179. struct ip_esp_hdr *esph;
  180. struct crypto_aead *aead;
  181. struct esp_info esp;
  182. bool hw_offload = true;
  183. esp.inplace = true;
  184. xo = xfrm_offload(skb);
  185. if (!xo)
  186. return -EINVAL;
  187. if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
  188. (x->xso.dev != skb->dev)) {
  189. xo->flags |= CRYPTO_FALLBACK;
  190. hw_offload = false;
  191. }
  192. esp.proto = xo->proto;
  193. /* skb is pure payload to encrypt */
  194. aead = x->data;
  195. alen = crypto_aead_authsize(aead);
  196. esp.tfclen = 0;
  197. /* XXX: Add support for tfc padding here. */
  198. blksize = ALIGN(crypto_aead_blocksize(aead), 4);
  199. esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
  200. esp.plen = esp.clen - skb->len - esp.tfclen;
  201. esp.tailen = esp.tfclen + esp.plen + alen;
  202. if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
  203. esp.nfrags = esp6_output_head(x, skb, &esp);
  204. if (esp.nfrags < 0)
  205. return esp.nfrags;
  206. }
  207. esph = ip_esp_hdr(skb);
  208. esph->spi = x->id.spi;
  209. skb_push(skb, -skb_network_offset(skb));
  210. if (xo->flags & XFRM_GSO_SEGMENT) {
  211. esph->seq_no = htonl(xo->seq.low);
  212. } else {
  213. int len;
  214. len = skb->len - sizeof(struct ipv6hdr);
  215. if (len > IPV6_MAXPLEN)
  216. len = 0;
  217. ipv6_hdr(skb)->payload_len = htons(len);
  218. }
  219. if (hw_offload)
  220. return 0;
  221. esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
  222. err = esp6_output_tail(x, skb, &esp);
  223. if (err)
  224. return err;
  225. secpath_reset(skb);
  226. return 0;
  227. }
  228. static const struct net_offload esp6_offload = {
  229. .callbacks = {
  230. .gro_receive = esp6_gro_receive,
  231. .gso_segment = esp6_gso_segment,
  232. },
  233. };
  234. static const struct xfrm_type_offload esp6_type_offload = {
  235. .description = "ESP6 OFFLOAD",
  236. .owner = THIS_MODULE,
  237. .proto = IPPROTO_ESP,
  238. .input_tail = esp6_input_tail,
  239. .xmit = esp6_xmit,
  240. .encap = esp6_gso_encap,
  241. };
  242. static int __init esp6_offload_init(void)
  243. {
  244. if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) {
  245. pr_info("%s: can't add xfrm type offload\n", __func__);
  246. return -EAGAIN;
  247. }
  248. return inet6_add_offload(&esp6_offload, IPPROTO_ESP);
  249. }
  250. static void __exit esp6_offload_exit(void)
  251. {
  252. if (xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6) < 0)
  253. pr_info("%s: can't remove xfrm type offload\n", __func__);
  254. inet6_del_offload(&esp6_offload, IPPROTO_ESP);
  255. }
  256. module_init(esp6_offload_init);
  257. module_exit(esp6_offload_exit);
  258. MODULE_LICENSE("GPL");
  259. MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");