PageRenderTime 28ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/um/drivers/vector_transports.c

https://github.com/kvaneesh/linux
C | 495 lines | 422 code | 63 blank | 10 comment | 93 complexity | f6483938b89d0f23ed5b4f47c4b7adbb MD5 | raw file
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2017 - Cambridge Greys Limited
  4. * Copyright (C) 2011 - 2014 Cisco Systems Inc
  5. */
  6. #include <linux/etherdevice.h>
  7. #include <linux/netdevice.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/slab.h>
  10. #include <asm/byteorder.h>
  11. #include <uapi/linux/ip.h>
  12. #include <uapi/linux/virtio_net.h>
  13. #include <linux/virtio_net.h>
  14. #include <linux/virtio_byteorder.h>
  15. #include <linux/netdev_features.h>
  16. #include "vector_user.h"
  17. #include "vector_kern.h"
  18. #define GOOD_LINEAR 512
  19. #define GSO_ERROR "Incoming GSO frames and GRO disabled on the interface"
  20. struct gre_minimal_header {
  21. uint16_t header;
  22. uint16_t arptype;
  23. };
  24. struct uml_gre_data {
  25. uint32_t rx_key;
  26. uint32_t tx_key;
  27. uint32_t sequence;
  28. bool ipv6;
  29. bool has_sequence;
  30. bool pin_sequence;
  31. bool checksum;
  32. bool key;
  33. struct gre_minimal_header expected_header;
  34. uint32_t checksum_offset;
  35. uint32_t key_offset;
  36. uint32_t sequence_offset;
  37. };
  38. struct uml_l2tpv3_data {
  39. uint64_t rx_cookie;
  40. uint64_t tx_cookie;
  41. uint64_t rx_session;
  42. uint64_t tx_session;
  43. uint32_t counter;
  44. bool udp;
  45. bool ipv6;
  46. bool has_counter;
  47. bool pin_counter;
  48. bool cookie;
  49. bool cookie_is_64;
  50. uint32_t cookie_offset;
  51. uint32_t session_offset;
  52. uint32_t counter_offset;
  53. };
  54. static int l2tpv3_form_header(uint8_t *header,
  55. struct sk_buff *skb, struct vector_private *vp)
  56. {
  57. struct uml_l2tpv3_data *td = vp->transport_data;
  58. uint32_t *counter;
  59. if (td->udp)
  60. *(uint32_t *) header = cpu_to_be32(L2TPV3_DATA_PACKET);
  61. (*(uint32_t *) (header + td->session_offset)) = td->tx_session;
  62. if (td->cookie) {
  63. if (td->cookie_is_64)
  64. (*(uint64_t *)(header + td->cookie_offset)) =
  65. td->tx_cookie;
  66. else
  67. (*(uint32_t *)(header + td->cookie_offset)) =
  68. td->tx_cookie;
  69. }
  70. if (td->has_counter) {
  71. counter = (uint32_t *)(header + td->counter_offset);
  72. if (td->pin_counter) {
  73. *counter = 0;
  74. } else {
  75. td->counter++;
  76. *counter = cpu_to_be32(td->counter);
  77. }
  78. }
  79. return 0;
  80. }
  81. static int gre_form_header(uint8_t *header,
  82. struct sk_buff *skb, struct vector_private *vp)
  83. {
  84. struct uml_gre_data *td = vp->transport_data;
  85. uint32_t *sequence;
  86. *((uint32_t *) header) = *((uint32_t *) &td->expected_header);
  87. if (td->key)
  88. (*(uint32_t *) (header + td->key_offset)) = td->tx_key;
  89. if (td->has_sequence) {
  90. sequence = (uint32_t *)(header + td->sequence_offset);
  91. if (td->pin_sequence)
  92. *sequence = 0;
  93. else
  94. *sequence = cpu_to_be32(++td->sequence);
  95. }
  96. return 0;
  97. }
  98. static int raw_form_header(uint8_t *header,
  99. struct sk_buff *skb, struct vector_private *vp)
  100. {
  101. struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
  102. virtio_net_hdr_from_skb(
  103. skb,
  104. vheader,
  105. virtio_legacy_is_little_endian(),
  106. false,
  107. 0
  108. );
  109. return 0;
  110. }
  111. static int l2tpv3_verify_header(
  112. uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
  113. {
  114. struct uml_l2tpv3_data *td = vp->transport_data;
  115. uint32_t *session;
  116. uint64_t cookie;
  117. if ((!td->udp) && (!td->ipv6))
  118. header += sizeof(struct iphdr) /* fix for ipv4 raw */;
  119. /* we do not do a strict check for "data" packets as per
  120. * the RFC spec because the pure IP spec does not have
  121. * that anyway.
  122. */
  123. if (td->cookie) {
  124. if (td->cookie_is_64)
  125. cookie = *(uint64_t *)(header + td->cookie_offset);
  126. else
  127. cookie = *(uint32_t *)(header + td->cookie_offset);
  128. if (cookie != td->rx_cookie) {
  129. if (net_ratelimit())
  130. netdev_err(vp->dev, "uml_l2tpv3: unknown cookie id");
  131. return -1;
  132. }
  133. }
  134. session = (uint32_t *) (header + td->session_offset);
  135. if (*session != td->rx_session) {
  136. if (net_ratelimit())
  137. netdev_err(vp->dev, "uml_l2tpv3: session mismatch");
  138. return -1;
  139. }
  140. return 0;
  141. }
  142. static int gre_verify_header(
  143. uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
  144. {
  145. uint32_t key;
  146. struct uml_gre_data *td = vp->transport_data;
  147. if (!td->ipv6)
  148. header += sizeof(struct iphdr) /* fix for ipv4 raw */;
  149. if (*((uint32_t *) header) != *((uint32_t *) &td->expected_header)) {
  150. if (net_ratelimit())
  151. netdev_err(vp->dev, "header type disagreement, expecting %0x, got %0x",
  152. *((uint32_t *) &td->expected_header),
  153. *((uint32_t *) header)
  154. );
  155. return -1;
  156. }
  157. if (td->key) {
  158. key = (*(uint32_t *)(header + td->key_offset));
  159. if (key != td->rx_key) {
  160. if (net_ratelimit())
  161. netdev_err(vp->dev, "unknown key id %0x, expecting %0x",
  162. key, td->rx_key);
  163. return -1;
  164. }
  165. }
  166. return 0;
  167. }
  168. static int raw_verify_header(
  169. uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
  170. {
  171. struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
  172. if ((vheader->gso_type != VIRTIO_NET_HDR_GSO_NONE) &&
  173. (vp->req_size != 65536)) {
  174. if (net_ratelimit())
  175. netdev_err(
  176. vp->dev,
  177. GSO_ERROR
  178. );
  179. }
  180. if ((vheader->flags & VIRTIO_NET_HDR_F_DATA_VALID) > 0)
  181. return 1;
  182. virtio_net_hdr_to_skb(skb, vheader, virtio_legacy_is_little_endian());
  183. return 0;
  184. }
  185. static bool get_uint_param(
  186. struct arglist *def, char *param, unsigned int *result)
  187. {
  188. char *arg = uml_vector_fetch_arg(def, param);
  189. if (arg != NULL) {
  190. if (kstrtoint(arg, 0, result) == 0)
  191. return true;
  192. }
  193. return false;
  194. }
  195. static bool get_ulong_param(
  196. struct arglist *def, char *param, unsigned long *result)
  197. {
  198. char *arg = uml_vector_fetch_arg(def, param);
  199. if (arg != NULL) {
  200. if (kstrtoul(arg, 0, result) == 0)
  201. return true;
  202. return true;
  203. }
  204. return false;
  205. }
  206. static int build_gre_transport_data(struct vector_private *vp)
  207. {
  208. struct uml_gre_data *td;
  209. int temp_int;
  210. int temp_rx;
  211. int temp_tx;
  212. vp->transport_data = kmalloc(sizeof(struct uml_gre_data), GFP_KERNEL);
  213. if (vp->transport_data == NULL)
  214. return -ENOMEM;
  215. td = vp->transport_data;
  216. td->sequence = 0;
  217. td->expected_header.arptype = GRE_IRB;
  218. td->expected_header.header = 0;
  219. vp->form_header = &gre_form_header;
  220. vp->verify_header = &gre_verify_header;
  221. vp->header_size = 4;
  222. td->key_offset = 4;
  223. td->sequence_offset = 4;
  224. td->checksum_offset = 4;
  225. td->ipv6 = false;
  226. if (get_uint_param(vp->parsed, "v6", &temp_int)) {
  227. if (temp_int > 0)
  228. td->ipv6 = true;
  229. }
  230. td->key = false;
  231. if (get_uint_param(vp->parsed, "rx_key", &temp_rx)) {
  232. if (get_uint_param(vp->parsed, "tx_key", &temp_tx)) {
  233. td->key = true;
  234. td->expected_header.header |= GRE_MODE_KEY;
  235. td->rx_key = cpu_to_be32(temp_rx);
  236. td->tx_key = cpu_to_be32(temp_tx);
  237. vp->header_size += 4;
  238. td->sequence_offset += 4;
  239. } else {
  240. return -EINVAL;
  241. }
  242. }
  243. td->sequence = false;
  244. if (get_uint_param(vp->parsed, "sequence", &temp_int)) {
  245. if (temp_int > 0) {
  246. vp->header_size += 4;
  247. td->has_sequence = true;
  248. td->expected_header.header |= GRE_MODE_SEQUENCE;
  249. if (get_uint_param(
  250. vp->parsed, "pin_sequence", &temp_int)) {
  251. if (temp_int > 0)
  252. td->pin_sequence = true;
  253. }
  254. }
  255. }
  256. vp->rx_header_size = vp->header_size;
  257. if (!td->ipv6)
  258. vp->rx_header_size += sizeof(struct iphdr);
  259. return 0;
  260. }
  261. static int build_l2tpv3_transport_data(struct vector_private *vp)
  262. {
  263. struct uml_l2tpv3_data *td;
  264. int temp_int, temp_rxs, temp_txs;
  265. unsigned long temp_rx;
  266. unsigned long temp_tx;
  267. vp->transport_data = kmalloc(
  268. sizeof(struct uml_l2tpv3_data), GFP_KERNEL);
  269. if (vp->transport_data == NULL)
  270. return -ENOMEM;
  271. td = vp->transport_data;
  272. vp->form_header = &l2tpv3_form_header;
  273. vp->verify_header = &l2tpv3_verify_header;
  274. td->counter = 0;
  275. vp->header_size = 4;
  276. td->session_offset = 0;
  277. td->cookie_offset = 4;
  278. td->counter_offset = 4;
  279. td->ipv6 = false;
  280. if (get_uint_param(vp->parsed, "v6", &temp_int)) {
  281. if (temp_int > 0)
  282. td->ipv6 = true;
  283. }
  284. if (get_uint_param(vp->parsed, "rx_session", &temp_rxs)) {
  285. if (get_uint_param(vp->parsed, "tx_session", &temp_txs)) {
  286. td->tx_session = cpu_to_be32(temp_txs);
  287. td->rx_session = cpu_to_be32(temp_rxs);
  288. } else {
  289. return -EINVAL;
  290. }
  291. } else {
  292. return -EINVAL;
  293. }
  294. td->cookie_is_64 = false;
  295. if (get_uint_param(vp->parsed, "cookie64", &temp_int)) {
  296. if (temp_int > 0)
  297. td->cookie_is_64 = true;
  298. }
  299. td->cookie = false;
  300. if (get_ulong_param(vp->parsed, "rx_cookie", &temp_rx)) {
  301. if (get_ulong_param(vp->parsed, "tx_cookie", &temp_tx)) {
  302. td->cookie = true;
  303. if (td->cookie_is_64) {
  304. td->rx_cookie = cpu_to_be64(temp_rx);
  305. td->tx_cookie = cpu_to_be64(temp_tx);
  306. vp->header_size += 8;
  307. td->counter_offset += 8;
  308. } else {
  309. td->rx_cookie = cpu_to_be32(temp_rx);
  310. td->tx_cookie = cpu_to_be32(temp_tx);
  311. vp->header_size += 4;
  312. td->counter_offset += 4;
  313. }
  314. } else {
  315. return -EINVAL;
  316. }
  317. }
  318. td->has_counter = false;
  319. if (get_uint_param(vp->parsed, "counter", &temp_int)) {
  320. if (temp_int > 0) {
  321. td->has_counter = true;
  322. vp->header_size += 4;
  323. if (get_uint_param(
  324. vp->parsed, "pin_counter", &temp_int)) {
  325. if (temp_int > 0)
  326. td->pin_counter = true;
  327. }
  328. }
  329. }
  330. if (get_uint_param(vp->parsed, "udp", &temp_int)) {
  331. if (temp_int > 0) {
  332. td->udp = true;
  333. vp->header_size += 4;
  334. td->counter_offset += 4;
  335. td->session_offset += 4;
  336. td->cookie_offset += 4;
  337. }
  338. }
  339. vp->rx_header_size = vp->header_size;
  340. if ((!td->ipv6) && (!td->udp))
  341. vp->rx_header_size += sizeof(struct iphdr);
  342. return 0;
  343. }
  344. static int build_raw_transport_data(struct vector_private *vp)
  345. {
  346. if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
  347. if (!uml_raw_enable_vnet_headers(vp->fds->tx_fd))
  348. return -1;
  349. vp->form_header = &raw_form_header;
  350. vp->verify_header = &raw_verify_header;
  351. vp->header_size = sizeof(struct virtio_net_hdr);
  352. vp->rx_header_size = sizeof(struct virtio_net_hdr);
  353. vp->dev->hw_features |= (NETIF_F_TSO | NETIF_F_GRO);
  354. vp->dev->features |=
  355. (NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
  356. NETIF_F_TSO | NETIF_F_GRO);
  357. netdev_info(
  358. vp->dev,
  359. "raw: using vnet headers for tso and tx/rx checksum"
  360. );
  361. }
  362. return 0;
  363. }
  364. static int build_hybrid_transport_data(struct vector_private *vp)
  365. {
  366. if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
  367. vp->form_header = &raw_form_header;
  368. vp->verify_header = &raw_verify_header;
  369. vp->header_size = sizeof(struct virtio_net_hdr);
  370. vp->rx_header_size = sizeof(struct virtio_net_hdr);
  371. vp->dev->hw_features |=
  372. (NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
  373. vp->dev->features |=
  374. (NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
  375. NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
  376. netdev_info(
  377. vp->dev,
  378. "tap/raw hybrid: using vnet headers for tso and tx/rx checksum"
  379. );
  380. } else {
  381. return 0; /* do not try to enable tap too if raw failed */
  382. }
  383. if (uml_tap_enable_vnet_headers(vp->fds->tx_fd))
  384. return 0;
  385. return -1;
  386. }
  387. static int build_tap_transport_data(struct vector_private *vp)
  388. {
  389. /* "Pure" tap uses the same fd for rx and tx */
  390. if (uml_tap_enable_vnet_headers(vp->fds->tx_fd)) {
  391. vp->form_header = &raw_form_header;
  392. vp->verify_header = &raw_verify_header;
  393. vp->header_size = sizeof(struct virtio_net_hdr);
  394. vp->rx_header_size = sizeof(struct virtio_net_hdr);
  395. vp->dev->hw_features |=
  396. (NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
  397. vp->dev->features |=
  398. (NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
  399. NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
  400. netdev_info(
  401. vp->dev,
  402. "tap: using vnet headers for tso and tx/rx checksum"
  403. );
  404. return 0;
  405. }
  406. return -1;
  407. }
  408. static int build_bess_transport_data(struct vector_private *vp)
  409. {
  410. vp->form_header = NULL;
  411. vp->verify_header = NULL;
  412. vp->header_size = 0;
  413. vp->rx_header_size = 0;
  414. return 0;
  415. }
  416. int build_transport_data(struct vector_private *vp)
  417. {
  418. char *transport = uml_vector_fetch_arg(vp->parsed, "transport");
  419. if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0)
  420. return build_gre_transport_data(vp);
  421. if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0)
  422. return build_l2tpv3_transport_data(vp);
  423. if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
  424. return build_raw_transport_data(vp);
  425. if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
  426. return build_tap_transport_data(vp);
  427. if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
  428. return build_hybrid_transport_data(vp);
  429. if (strncmp(transport, TRANS_BESS, TRANS_BESS_LEN) == 0)
  430. return build_bess_transport_data(vp);
  431. return 0;
  432. }