PageRenderTime 54ms CodeModel.GetById 20ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c

https://gitlab.com/Skylake/linux-kernel
C | 411 lines | 297 code | 53 blank | 61 comment | 44 complexity | 07bfea0874ad40dbc6fef6c242e2838f MD5 | raw file
  1. /*
  2. * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <crypto/aead.h>
  34. #include <net/xfrm.h>
  35. #include <net/esp.h>
  36. #include "en_accel/ipsec_rxtx.h"
  37. #include "en_accel/ipsec.h"
  38. #include "en.h"
  39. enum {
  40. MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
  41. MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
  42. MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
  43. };
  44. struct mlx5e_ipsec_rx_metadata {
  45. unsigned char nexthdr;
  46. __be32 sa_handle;
  47. } __packed;
  48. enum {
  49. MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
  50. MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
  51. };
  52. struct mlx5e_ipsec_tx_metadata {
  53. __be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */
  54. __be16 seq; /* LSBs of the first TCP seq, only for LSO */
  55. u8 esp_next_proto; /* Next protocol of ESP */
  56. } __packed;
  57. struct mlx5e_ipsec_metadata {
  58. unsigned char syndrome;
  59. union {
  60. unsigned char raw[5];
  61. /* from FPGA to host, on successful decrypt */
  62. struct mlx5e_ipsec_rx_metadata rx;
  63. /* from host to FPGA */
  64. struct mlx5e_ipsec_tx_metadata tx;
  65. } __packed content;
  66. /* packet type ID field */
  67. __be16 ethertype;
  68. } __packed;
  69. #define MAX_LSO_MSS 2048
  70. /* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
  71. static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
  72. static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
  73. {
  74. return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
  75. }
  76. static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
  77. {
  78. struct mlx5e_ipsec_metadata *mdata;
  79. struct ethhdr *eth;
  80. if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
  81. return ERR_PTR(-ENOMEM);
  82. eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
  83. skb->mac_header -= sizeof(*mdata);
  84. mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
  85. memmove(skb->data, skb->data + sizeof(*mdata),
  86. 2 * ETH_ALEN);
  87. eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
  88. memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
  89. return mdata;
  90. }
  91. static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
  92. {
  93. unsigned int alen = crypto_aead_authsize(x->data);
  94. struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
  95. struct iphdr *ipv4hdr = ip_hdr(skb);
  96. unsigned int trailer_len;
  97. u8 plen;
  98. int ret;
  99. ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
  100. if (unlikely(ret))
  101. return ret;
  102. trailer_len = alen + plen + 2;
  103. pskb_trim(skb, skb->len - trailer_len);
  104. if (skb->protocol == htons(ETH_P_IP)) {
  105. ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
  106. ip_send_check(ipv4hdr);
  107. } else {
  108. ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
  109. trailer_len);
  110. }
  111. return 0;
  112. }
  113. static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
  114. struct mlx5_wqe_eth_seg *eseg, u8 mode,
  115. struct xfrm_offload *xo)
  116. {
  117. u8 proto;
  118. /* Tunnel Mode:
  119. * SWP: OutL3 InL3 InL4
  120. * Pkt: MAC IP ESP IP L4
  121. *
  122. * Transport Mode:
  123. * SWP: OutL3 InL4
  124. * InL3
  125. * Pkt: MAC IP ESP L4
  126. *
  127. * Offsets are in 2-byte words, counting from start of frame
  128. */
  129. eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
  130. if (skb->protocol == htons(ETH_P_IPV6))
  131. eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
  132. if (mode == XFRM_MODE_TUNNEL) {
  133. eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
  134. if (xo->proto == IPPROTO_IPV6) {
  135. eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
  136. proto = inner_ipv6_hdr(skb)->nexthdr;
  137. } else {
  138. proto = inner_ip_hdr(skb)->protocol;
  139. }
  140. } else {
  141. eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
  142. if (skb->protocol == htons(ETH_P_IPV6))
  143. eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
  144. proto = xo->proto;
  145. }
  146. switch (proto) {
  147. case IPPROTO_UDP:
  148. eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
  149. /* Fall through */
  150. case IPPROTO_TCP:
  151. eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
  152. break;
  153. }
  154. }
  155. void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
  156. struct xfrm_offload *xo)
  157. {
  158. struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
  159. __u32 oseq = replay_esn->oseq;
  160. int iv_offset;
  161. __be64 seqno;
  162. u32 seq_hi;
  163. if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
  164. MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
  165. seq_hi = xo->seq.hi - 1;
  166. } else {
  167. seq_hi = xo->seq.hi;
  168. }
  169. /* Place the SN in the IV field */
  170. seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
  171. iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
  172. skb_store_bits(skb, iv_offset, &seqno, 8);
  173. }
  174. void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
  175. struct xfrm_offload *xo)
  176. {
  177. int iv_offset;
  178. __be64 seqno;
  179. /* Place the SN in the IV field */
  180. seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
  181. iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
  182. skb_store_bits(skb, iv_offset, &seqno, 8);
  183. }
  184. static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
  185. struct mlx5e_ipsec_metadata *mdata,
  186. struct xfrm_offload *xo)
  187. {
  188. struct ip_esp_hdr *esph;
  189. struct tcphdr *tcph;
  190. if (skb_is_gso(skb)) {
  191. /* Add LSO metadata indication */
  192. esph = ip_esp_hdr(skb);
  193. tcph = inner_tcp_hdr(skb);
  194. netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
  195. skb->network_header,
  196. skb->transport_header,
  197. skb->inner_network_header,
  198. skb->inner_transport_header);
  199. netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
  200. skb->len, skb_shinfo(skb)->gso_size,
  201. ntohs(tcph->source), ntohs(tcph->dest),
  202. ntohl(tcph->seq), ntohl(esph->seq_no));
  203. mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
  204. mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
  205. mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
  206. } else {
  207. mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
  208. }
  209. mdata->content.tx.esp_next_proto = xo->proto;
  210. netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
  211. mdata->syndrome, mdata->content.tx.esp_next_proto,
  212. ntohs(mdata->content.tx.mss_inv),
  213. ntohs(mdata->content.tx.seq));
  214. }
  215. struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
  216. struct mlx5e_tx_wqe *wqe,
  217. struct sk_buff *skb)
  218. {
  219. struct mlx5e_priv *priv = netdev_priv(netdev);
  220. struct xfrm_offload *xo = xfrm_offload(skb);
  221. struct mlx5e_ipsec_metadata *mdata;
  222. struct mlx5e_ipsec_sa_entry *sa_entry;
  223. struct xfrm_state *x;
  224. if (!xo)
  225. return skb;
  226. if (unlikely(skb->sp->len != 1)) {
  227. atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
  228. goto drop;
  229. }
  230. x = xfrm_input_state(skb);
  231. if (unlikely(!x)) {
  232. atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
  233. goto drop;
  234. }
  235. if (unlikely(!x->xso.offload_handle ||
  236. (skb->protocol != htons(ETH_P_IP) &&
  237. skb->protocol != htons(ETH_P_IPV6)))) {
  238. atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
  239. goto drop;
  240. }
  241. if (!skb_is_gso(skb))
  242. if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
  243. atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
  244. goto drop;
  245. }
  246. mdata = mlx5e_ipsec_add_metadata(skb);
  247. if (IS_ERR(mdata)) {
  248. atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
  249. goto drop;
  250. }
  251. mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
  252. sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
  253. sa_entry->set_iv_op(skb, x, xo);
  254. mlx5e_ipsec_set_metadata(skb, mdata, xo);
  255. return skb;
  256. drop:
  257. kfree_skb(skb);
  258. return NULL;
  259. }
  260. static inline struct xfrm_state *
  261. mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
  262. struct mlx5e_ipsec_metadata *mdata)
  263. {
  264. struct mlx5e_priv *priv = netdev_priv(netdev);
  265. struct xfrm_offload *xo;
  266. struct xfrm_state *xs;
  267. u32 sa_handle;
  268. skb->sp = secpath_dup(skb->sp);
  269. if (unlikely(!skb->sp)) {
  270. atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
  271. return NULL;
  272. }
  273. sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
  274. xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
  275. if (unlikely(!xs)) {
  276. atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
  277. return NULL;
  278. }
  279. skb->sp->xvec[skb->sp->len++] = xs;
  280. skb->sp->olen++;
  281. xo = xfrm_offload(skb);
  282. xo->flags = CRYPTO_DONE;
  283. switch (mdata->syndrome) {
  284. case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
  285. xo->status = CRYPTO_SUCCESS;
  286. if (likely(priv->ipsec->no_trailer)) {
  287. xo->flags |= XFRM_ESP_NO_TRAILER;
  288. xo->proto = mdata->content.rx.nexthdr;
  289. }
  290. break;
  291. case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
  292. xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
  293. break;
  294. case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
  295. xo->status = CRYPTO_INVALID_PROTOCOL;
  296. break;
  297. default:
  298. atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
  299. return NULL;
  300. }
  301. return xs;
  302. }
  303. struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
  304. struct sk_buff *skb)
  305. {
  306. struct mlx5e_ipsec_metadata *mdata;
  307. struct ethhdr *old_eth;
  308. struct ethhdr *new_eth;
  309. struct xfrm_state *xs;
  310. __be16 *ethtype;
  311. /* Detect inline metadata */
  312. if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
  313. return skb;
  314. ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
  315. if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
  316. return skb;
  317. /* Use the metadata */
  318. mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
  319. xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
  320. if (unlikely(!xs)) {
  321. kfree_skb(skb);
  322. return NULL;
  323. }
  324. /* Remove the metadata from the buffer */
  325. old_eth = (struct ethhdr *)skb->data;
  326. new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
  327. memmove(new_eth, old_eth, 2 * ETH_ALEN);
  328. /* Ethertype is already in its new place */
  329. skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
  330. return skb;
  331. }
  332. bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
  333. netdev_features_t features)
  334. {
  335. struct xfrm_state *x;
  336. if (skb->sp && skb->sp->len) {
  337. x = skb->sp->xvec[0];
  338. if (x && x->xso.offload_handle)
  339. return true;
  340. }
  341. return false;
  342. }
  343. void mlx5e_ipsec_build_inverse_table(void)
  344. {
  345. u16 mss_inv;
  346. u32 mss;
  347. /* Calculate 1/x inverse table for use in GSO data path.
  348. * Using this table, we provide the IPSec accelerator with the value of
  349. * 1/gso_size so that it can infer the position of each segment inside
  350. * the GSO, and increment the ESP sequence number, and generate the IV.
  351. * The HW needs this value in Q0.16 fixed-point number format
  352. */
  353. mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
  354. for (mss = 2; mss < MAX_LSO_MSS; mss++) {
  355. mss_inv = div_u64(1ULL << 32, mss) >> 16;
  356. mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
  357. }
  358. }