PageRenderTime 26ms CodeModel.GetById 29ms RepoModel.GetById 0ms app.codeStats 1ms

/tools/lib/bpf/netlink.c

https://github.com/tiwai/sound
C | 858 lines | 703 code | 149 blank | 6 comment | 140 complexity | 0854d1153d667c473541e24d9014c57c MD5 | raw file
  1. // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
  2. /* Copyright (c) 2018 Facebook */
  3. #include <stdlib.h>
  4. #include <memory.h>
  5. #include <unistd.h>
  6. #include <arpa/inet.h>
  7. #include <linux/bpf.h>
  8. #include <linux/if_ether.h>
  9. #include <linux/pkt_cls.h>
  10. #include <linux/rtnetlink.h>
  11. #include <sys/socket.h>
  12. #include <errno.h>
  13. #include <time.h>
  14. #include "bpf.h"
  15. #include "libbpf.h"
  16. #include "libbpf_internal.h"
  17. #include "nlattr.h"
  18. #ifndef SOL_NETLINK
  19. #define SOL_NETLINK 270
  20. #endif
  21. typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
  22. typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t,
  23. void *cookie);
  24. struct xdp_id_md {
  25. int ifindex;
  26. __u32 flags;
  27. struct xdp_link_info info;
  28. };
  29. static int libbpf_netlink_open(__u32 *nl_pid)
  30. {
  31. struct sockaddr_nl sa;
  32. socklen_t addrlen;
  33. int one = 1, ret;
  34. int sock;
  35. memset(&sa, 0, sizeof(sa));
  36. sa.nl_family = AF_NETLINK;
  37. sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
  38. if (sock < 0)
  39. return -errno;
  40. if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK,
  41. &one, sizeof(one)) < 0) {
  42. pr_warn("Netlink error reporting not supported\n");
  43. }
  44. if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
  45. ret = -errno;
  46. goto cleanup;
  47. }
  48. addrlen = sizeof(sa);
  49. if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) {
  50. ret = -errno;
  51. goto cleanup;
  52. }
  53. if (addrlen != sizeof(sa)) {
  54. ret = -LIBBPF_ERRNO__INTERNAL;
  55. goto cleanup;
  56. }
  57. *nl_pid = sa.nl_pid;
  58. return sock;
  59. cleanup:
  60. close(sock);
  61. return ret;
  62. }
  63. static void libbpf_netlink_close(int sock)
  64. {
  65. close(sock);
  66. }
  67. enum {
  68. NL_CONT,
  69. NL_NEXT,
  70. NL_DONE,
  71. };
  72. static int netlink_recvmsg(int sock, struct msghdr *mhdr, int flags)
  73. {
  74. int len;
  75. do {
  76. len = recvmsg(sock, mhdr, flags);
  77. } while (len < 0 && (errno == EINTR || errno == EAGAIN));
  78. if (len < 0)
  79. return -errno;
  80. return len;
  81. }
  82. static int alloc_iov(struct iovec *iov, int len)
  83. {
  84. void *nbuf;
  85. nbuf = realloc(iov->iov_base, len);
  86. if (!nbuf)
  87. return -ENOMEM;
  88. iov->iov_base = nbuf;
  89. iov->iov_len = len;
  90. return 0;
  91. }
  92. static int libbpf_netlink_recv(int sock, __u32 nl_pid, int seq,
  93. __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn,
  94. void *cookie)
  95. {
  96. struct iovec iov = {};
  97. struct msghdr mhdr = {
  98. .msg_iov = &iov,
  99. .msg_iovlen = 1,
  100. };
  101. bool multipart = true;
  102. struct nlmsgerr *err;
  103. struct nlmsghdr *nh;
  104. int len, ret;
  105. ret = alloc_iov(&iov, 4096);
  106. if (ret)
  107. goto done;
  108. while (multipart) {
  109. start:
  110. multipart = false;
  111. len = netlink_recvmsg(sock, &mhdr, MSG_PEEK | MSG_TRUNC);
  112. if (len < 0) {
  113. ret = len;
  114. goto done;
  115. }
  116. if (len > iov.iov_len) {
  117. ret = alloc_iov(&iov, len);
  118. if (ret)
  119. goto done;
  120. }
  121. len = netlink_recvmsg(sock, &mhdr, 0);
  122. if (len < 0) {
  123. ret = len;
  124. goto done;
  125. }
  126. if (len == 0)
  127. break;
  128. for (nh = (struct nlmsghdr *)iov.iov_base; NLMSG_OK(nh, len);
  129. nh = NLMSG_NEXT(nh, len)) {
  130. if (nh->nlmsg_pid != nl_pid) {
  131. ret = -LIBBPF_ERRNO__WRNGPID;
  132. goto done;
  133. }
  134. if (nh->nlmsg_seq != seq) {
  135. ret = -LIBBPF_ERRNO__INVSEQ;
  136. goto done;
  137. }
  138. if (nh->nlmsg_flags & NLM_F_MULTI)
  139. multipart = true;
  140. switch (nh->nlmsg_type) {
  141. case NLMSG_ERROR:
  142. err = (struct nlmsgerr *)NLMSG_DATA(nh);
  143. if (!err->error)
  144. continue;
  145. ret = err->error;
  146. libbpf_nla_dump_errormsg(nh);
  147. goto done;
  148. case NLMSG_DONE:
  149. ret = 0;
  150. goto done;
  151. default:
  152. break;
  153. }
  154. if (_fn) {
  155. ret = _fn(nh, fn, cookie);
  156. switch (ret) {
  157. case NL_CONT:
  158. break;
  159. case NL_NEXT:
  160. goto start;
  161. case NL_DONE:
  162. ret = 0;
  163. goto done;
  164. default:
  165. goto done;
  166. }
  167. }
  168. }
  169. }
  170. ret = 0;
  171. done:
  172. free(iov.iov_base);
  173. return ret;
  174. }
  175. static int libbpf_netlink_send_recv(struct libbpf_nla_req *req,
  176. __dump_nlmsg_t parse_msg,
  177. libbpf_dump_nlmsg_t parse_attr,
  178. void *cookie)
  179. {
  180. __u32 nl_pid = 0;
  181. int sock, ret;
  182. sock = libbpf_netlink_open(&nl_pid);
  183. if (sock < 0)
  184. return sock;
  185. req->nh.nlmsg_pid = 0;
  186. req->nh.nlmsg_seq = time(NULL);
  187. if (send(sock, req, req->nh.nlmsg_len, 0) < 0) {
  188. ret = -errno;
  189. goto out;
  190. }
  191. ret = libbpf_netlink_recv(sock, nl_pid, req->nh.nlmsg_seq,
  192. parse_msg, parse_attr, cookie);
  193. out:
  194. libbpf_netlink_close(sock);
  195. return ret;
  196. }
  197. static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
  198. __u32 flags)
  199. {
  200. struct nlattr *nla;
  201. int ret;
  202. struct libbpf_nla_req req;
  203. memset(&req, 0, sizeof(req));
  204. req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
  205. req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
  206. req.nh.nlmsg_type = RTM_SETLINK;
  207. req.ifinfo.ifi_family = AF_UNSPEC;
  208. req.ifinfo.ifi_index = ifindex;
  209. nla = nlattr_begin_nested(&req, IFLA_XDP);
  210. if (!nla)
  211. return -EMSGSIZE;
  212. ret = nlattr_add(&req, IFLA_XDP_FD, &fd, sizeof(fd));
  213. if (ret < 0)
  214. return ret;
  215. if (flags) {
  216. ret = nlattr_add(&req, IFLA_XDP_FLAGS, &flags, sizeof(flags));
  217. if (ret < 0)
  218. return ret;
  219. }
  220. if (flags & XDP_FLAGS_REPLACE) {
  221. ret = nlattr_add(&req, IFLA_XDP_EXPECTED_FD, &old_fd,
  222. sizeof(old_fd));
  223. if (ret < 0)
  224. return ret;
  225. }
  226. nlattr_end_nested(&req, nla);
  227. return libbpf_netlink_send_recv(&req, NULL, NULL, NULL);
  228. }
  229. int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, const struct bpf_xdp_attach_opts *opts)
  230. {
  231. int old_prog_fd, err;
  232. if (!OPTS_VALID(opts, bpf_xdp_attach_opts))
  233. return libbpf_err(-EINVAL);
  234. old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
  235. if (old_prog_fd)
  236. flags |= XDP_FLAGS_REPLACE;
  237. else
  238. old_prog_fd = -1;
  239. err = __bpf_set_link_xdp_fd_replace(ifindex, prog_fd, old_prog_fd, flags);
  240. return libbpf_err(err);
  241. }
  242. int bpf_xdp_detach(int ifindex, __u32 flags, const struct bpf_xdp_attach_opts *opts)
  243. {
  244. return bpf_xdp_attach(ifindex, -1, flags, opts);
  245. }
  246. int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
  247. const struct bpf_xdp_set_link_opts *opts)
  248. {
  249. int old_fd = -1, ret;
  250. if (!OPTS_VALID(opts, bpf_xdp_set_link_opts))
  251. return libbpf_err(-EINVAL);
  252. if (OPTS_HAS(opts, old_fd)) {
  253. old_fd = OPTS_GET(opts, old_fd, -1);
  254. flags |= XDP_FLAGS_REPLACE;
  255. }
  256. ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, old_fd, flags);
  257. return libbpf_err(ret);
  258. }
  259. int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
  260. {
  261. int ret;
  262. ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags);
  263. return libbpf_err(ret);
  264. }
  265. static int __dump_link_nlmsg(struct nlmsghdr *nlh,
  266. libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
  267. {
  268. struct nlattr *tb[IFLA_MAX + 1], *attr;
  269. struct ifinfomsg *ifi = NLMSG_DATA(nlh);
  270. int len;
  271. len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi));
  272. attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi)));
  273. if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0)
  274. return -LIBBPF_ERRNO__NLPARSE;
  275. return dump_link_nlmsg(cookie, ifi, tb);
  276. }
  277. static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb)
  278. {
  279. struct nlattr *xdp_tb[IFLA_XDP_MAX + 1];
  280. struct xdp_id_md *xdp_id = cookie;
  281. struct ifinfomsg *ifinfo = msg;
  282. int ret;
  283. if (xdp_id->ifindex && xdp_id->ifindex != ifinfo->ifi_index)
  284. return 0;
  285. if (!tb[IFLA_XDP])
  286. return 0;
  287. ret = libbpf_nla_parse_nested(xdp_tb, IFLA_XDP_MAX, tb[IFLA_XDP], NULL);
  288. if (ret)
  289. return ret;
  290. if (!xdp_tb[IFLA_XDP_ATTACHED])
  291. return 0;
  292. xdp_id->info.attach_mode = libbpf_nla_getattr_u8(
  293. xdp_tb[IFLA_XDP_ATTACHED]);
  294. if (xdp_id->info.attach_mode == XDP_ATTACHED_NONE)
  295. return 0;
  296. if (xdp_tb[IFLA_XDP_PROG_ID])
  297. xdp_id->info.prog_id = libbpf_nla_getattr_u32(
  298. xdp_tb[IFLA_XDP_PROG_ID]);
  299. if (xdp_tb[IFLA_XDP_SKB_PROG_ID])
  300. xdp_id->info.skb_prog_id = libbpf_nla_getattr_u32(
  301. xdp_tb[IFLA_XDP_SKB_PROG_ID]);
  302. if (xdp_tb[IFLA_XDP_DRV_PROG_ID])
  303. xdp_id->info.drv_prog_id = libbpf_nla_getattr_u32(
  304. xdp_tb[IFLA_XDP_DRV_PROG_ID]);
  305. if (xdp_tb[IFLA_XDP_HW_PROG_ID])
  306. xdp_id->info.hw_prog_id = libbpf_nla_getattr_u32(
  307. xdp_tb[IFLA_XDP_HW_PROG_ID]);
  308. return 0;
  309. }
  310. int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
  311. {
  312. struct libbpf_nla_req req = {
  313. .nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
  314. .nh.nlmsg_type = RTM_GETLINK,
  315. .nh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
  316. .ifinfo.ifi_family = AF_PACKET,
  317. };
  318. struct xdp_id_md xdp_id = {};
  319. int err;
  320. if (!OPTS_VALID(opts, bpf_xdp_query_opts))
  321. return libbpf_err(-EINVAL);
  322. if (xdp_flags & ~XDP_FLAGS_MASK)
  323. return libbpf_err(-EINVAL);
  324. /* Check whether the single {HW,DRV,SKB} mode is set */
  325. xdp_flags &= XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE;
  326. if (xdp_flags & (xdp_flags - 1))
  327. return libbpf_err(-EINVAL);
  328. xdp_id.ifindex = ifindex;
  329. xdp_id.flags = xdp_flags;
  330. err = libbpf_netlink_send_recv(&req, __dump_link_nlmsg,
  331. get_xdp_info, &xdp_id);
  332. if (err)
  333. return libbpf_err(err);
  334. OPTS_SET(opts, prog_id, xdp_id.info.prog_id);
  335. OPTS_SET(opts, drv_prog_id, xdp_id.info.drv_prog_id);
  336. OPTS_SET(opts, hw_prog_id, xdp_id.info.hw_prog_id);
  337. OPTS_SET(opts, skb_prog_id, xdp_id.info.skb_prog_id);
  338. OPTS_SET(opts, attach_mode, xdp_id.info.attach_mode);
  339. return 0;
  340. }
  341. int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
  342. size_t info_size, __u32 flags)
  343. {
  344. LIBBPF_OPTS(bpf_xdp_query_opts, opts);
  345. size_t sz;
  346. int err;
  347. if (!info_size)
  348. return libbpf_err(-EINVAL);
  349. err = bpf_xdp_query(ifindex, flags, &opts);
  350. if (err)
  351. return libbpf_err(err);
  352. /* struct xdp_link_info field layout matches struct bpf_xdp_query_opts
  353. * layout after sz field
  354. */
  355. sz = min(info_size, offsetofend(struct xdp_link_info, attach_mode));
  356. memcpy(info, &opts.prog_id, sz);
  357. memset((void *)info + sz, 0, info_size - sz);
  358. return 0;
  359. }
  360. int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id)
  361. {
  362. LIBBPF_OPTS(bpf_xdp_query_opts, opts);
  363. int ret;
  364. ret = bpf_xdp_query(ifindex, flags, &opts);
  365. if (ret)
  366. return libbpf_err(ret);
  367. flags &= XDP_FLAGS_MODES;
  368. if (opts.attach_mode != XDP_ATTACHED_MULTI && !flags)
  369. *prog_id = opts.prog_id;
  370. else if (flags & XDP_FLAGS_DRV_MODE)
  371. *prog_id = opts.drv_prog_id;
  372. else if (flags & XDP_FLAGS_HW_MODE)
  373. *prog_id = opts.hw_prog_id;
  374. else if (flags & XDP_FLAGS_SKB_MODE)
  375. *prog_id = opts.skb_prog_id;
  376. else
  377. *prog_id = 0;
  378. return 0;
  379. }
  380. int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
  381. {
  382. return bpf_xdp_query_id(ifindex, flags, prog_id);
  383. }
  384. typedef int (*qdisc_config_t)(struct libbpf_nla_req *req);
  385. static int clsact_config(struct libbpf_nla_req *req)
  386. {
  387. req->tc.tcm_parent = TC_H_CLSACT;
  388. req->tc.tcm_handle = TC_H_MAKE(TC_H_CLSACT, 0);
  389. return nlattr_add(req, TCA_KIND, "clsact", sizeof("clsact"));
  390. }
  391. static int attach_point_to_config(struct bpf_tc_hook *hook,
  392. qdisc_config_t *config)
  393. {
  394. switch (OPTS_GET(hook, attach_point, 0)) {
  395. case BPF_TC_INGRESS:
  396. case BPF_TC_EGRESS:
  397. case BPF_TC_INGRESS | BPF_TC_EGRESS:
  398. if (OPTS_GET(hook, parent, 0))
  399. return -EINVAL;
  400. *config = &clsact_config;
  401. return 0;
  402. case BPF_TC_CUSTOM:
  403. return -EOPNOTSUPP;
  404. default:
  405. return -EINVAL;
  406. }
  407. }
  408. static int tc_get_tcm_parent(enum bpf_tc_attach_point attach_point,
  409. __u32 *parent)
  410. {
  411. switch (attach_point) {
  412. case BPF_TC_INGRESS:
  413. case BPF_TC_EGRESS:
  414. if (*parent)
  415. return -EINVAL;
  416. *parent = TC_H_MAKE(TC_H_CLSACT,
  417. attach_point == BPF_TC_INGRESS ?
  418. TC_H_MIN_INGRESS : TC_H_MIN_EGRESS);
  419. break;
  420. case BPF_TC_CUSTOM:
  421. if (!*parent)
  422. return -EINVAL;
  423. break;
  424. default:
  425. return -EINVAL;
  426. }
  427. return 0;
  428. }
  429. static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags)
  430. {
  431. qdisc_config_t config;
  432. int ret;
  433. struct libbpf_nla_req req;
  434. ret = attach_point_to_config(hook, &config);
  435. if (ret < 0)
  436. return ret;
  437. memset(&req, 0, sizeof(req));
  438. req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
  439. req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | flags;
  440. req.nh.nlmsg_type = cmd;
  441. req.tc.tcm_family = AF_UNSPEC;
  442. req.tc.tcm_ifindex = OPTS_GET(hook, ifindex, 0);
  443. ret = config(&req);
  444. if (ret < 0)
  445. return ret;
  446. return libbpf_netlink_send_recv(&req, NULL, NULL, NULL);
  447. }
  448. static int tc_qdisc_create_excl(struct bpf_tc_hook *hook)
  449. {
  450. return tc_qdisc_modify(hook, RTM_NEWQDISC, NLM_F_CREATE | NLM_F_EXCL);
  451. }
  452. static int tc_qdisc_delete(struct bpf_tc_hook *hook)
  453. {
  454. return tc_qdisc_modify(hook, RTM_DELQDISC, 0);
  455. }
  456. int bpf_tc_hook_create(struct bpf_tc_hook *hook)
  457. {
  458. int ret;
  459. if (!hook || !OPTS_VALID(hook, bpf_tc_hook) ||
  460. OPTS_GET(hook, ifindex, 0) <= 0)
  461. return libbpf_err(-EINVAL);
  462. ret = tc_qdisc_create_excl(hook);
  463. return libbpf_err(ret);
  464. }
  465. static int __bpf_tc_detach(const struct bpf_tc_hook *hook,
  466. const struct bpf_tc_opts *opts,
  467. const bool flush);
  468. int bpf_tc_hook_destroy(struct bpf_tc_hook *hook)
  469. {
  470. if (!hook || !OPTS_VALID(hook, bpf_tc_hook) ||
  471. OPTS_GET(hook, ifindex, 0) <= 0)
  472. return libbpf_err(-EINVAL);
  473. switch (OPTS_GET(hook, attach_point, 0)) {
  474. case BPF_TC_INGRESS:
  475. case BPF_TC_EGRESS:
  476. return libbpf_err(__bpf_tc_detach(hook, NULL, true));
  477. case BPF_TC_INGRESS | BPF_TC_EGRESS:
  478. return libbpf_err(tc_qdisc_delete(hook));
  479. case BPF_TC_CUSTOM:
  480. return libbpf_err(-EOPNOTSUPP);
  481. default:
  482. return libbpf_err(-EINVAL);
  483. }
  484. }
  485. struct bpf_cb_ctx {
  486. struct bpf_tc_opts *opts;
  487. bool processed;
  488. };
  489. static int __get_tc_info(void *cookie, struct tcmsg *tc, struct nlattr **tb,
  490. bool unicast)
  491. {
  492. struct nlattr *tbb[TCA_BPF_MAX + 1];
  493. struct bpf_cb_ctx *info = cookie;
  494. if (!info || !info->opts)
  495. return -EINVAL;
  496. if (unicast && info->processed)
  497. return -EINVAL;
  498. if (!tb[TCA_OPTIONS])
  499. return NL_CONT;
  500. libbpf_nla_parse_nested(tbb, TCA_BPF_MAX, tb[TCA_OPTIONS], NULL);
  501. if (!tbb[TCA_BPF_ID])
  502. return -EINVAL;
  503. OPTS_SET(info->opts, prog_id, libbpf_nla_getattr_u32(tbb[TCA_BPF_ID]));
  504. OPTS_SET(info->opts, handle, tc->tcm_handle);
  505. OPTS_SET(info->opts, priority, TC_H_MAJ(tc->tcm_info) >> 16);
  506. info->processed = true;
  507. return unicast ? NL_NEXT : NL_DONE;
  508. }
  509. static int get_tc_info(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
  510. void *cookie)
  511. {
  512. struct tcmsg *tc = NLMSG_DATA(nh);
  513. struct nlattr *tb[TCA_MAX + 1];
  514. libbpf_nla_parse(tb, TCA_MAX,
  515. (struct nlattr *)((void *)tc + NLMSG_ALIGN(sizeof(*tc))),
  516. NLMSG_PAYLOAD(nh, sizeof(*tc)), NULL);
  517. if (!tb[TCA_KIND])
  518. return NL_CONT;
  519. return __get_tc_info(cookie, tc, tb, nh->nlmsg_flags & NLM_F_ECHO);
  520. }
  521. static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd)
  522. {
  523. struct bpf_prog_info info = {};
  524. __u32 info_len = sizeof(info);
  525. char name[256];
  526. int len, ret;
  527. ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
  528. if (ret < 0)
  529. return ret;
  530. ret = nlattr_add(req, TCA_BPF_FD, &fd, sizeof(fd));
  531. if (ret < 0)
  532. return ret;
  533. len = snprintf(name, sizeof(name), "%s:[%u]", info.name, info.id);
  534. if (len < 0)
  535. return -errno;
  536. if (len >= sizeof(name))
  537. return -ENAMETOOLONG;
  538. return nlattr_add(req, TCA_BPF_NAME, name, len + 1);
  539. }
  540. int bpf_tc_attach(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
  541. {
  542. __u32 protocol, bpf_flags, handle, priority, parent, prog_id, flags;
  543. int ret, ifindex, attach_point, prog_fd;
  544. struct bpf_cb_ctx info = {};
  545. struct libbpf_nla_req req;
  546. struct nlattr *nla;
  547. if (!hook || !opts ||
  548. !OPTS_VALID(hook, bpf_tc_hook) ||
  549. !OPTS_VALID(opts, bpf_tc_opts))
  550. return libbpf_err(-EINVAL);
  551. ifindex = OPTS_GET(hook, ifindex, 0);
  552. parent = OPTS_GET(hook, parent, 0);
  553. attach_point = OPTS_GET(hook, attach_point, 0);
  554. handle = OPTS_GET(opts, handle, 0);
  555. priority = OPTS_GET(opts, priority, 0);
  556. prog_fd = OPTS_GET(opts, prog_fd, 0);
  557. prog_id = OPTS_GET(opts, prog_id, 0);
  558. flags = OPTS_GET(opts, flags, 0);
  559. if (ifindex <= 0 || !prog_fd || prog_id)
  560. return libbpf_err(-EINVAL);
  561. if (priority > UINT16_MAX)
  562. return libbpf_err(-EINVAL);
  563. if (flags & ~BPF_TC_F_REPLACE)
  564. return libbpf_err(-EINVAL);
  565. flags = (flags & BPF_TC_F_REPLACE) ? NLM_F_REPLACE : NLM_F_EXCL;
  566. protocol = ETH_P_ALL;
  567. memset(&req, 0, sizeof(req));
  568. req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
  569. req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE |
  570. NLM_F_ECHO | flags;
  571. req.nh.nlmsg_type = RTM_NEWTFILTER;
  572. req.tc.tcm_family = AF_UNSPEC;
  573. req.tc.tcm_ifindex = ifindex;
  574. req.tc.tcm_handle = handle;
  575. req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
  576. ret = tc_get_tcm_parent(attach_point, &parent);
  577. if (ret < 0)
  578. return libbpf_err(ret);
  579. req.tc.tcm_parent = parent;
  580. ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf"));
  581. if (ret < 0)
  582. return libbpf_err(ret);
  583. nla = nlattr_begin_nested(&req, TCA_OPTIONS);
  584. if (!nla)
  585. return libbpf_err(-EMSGSIZE);
  586. ret = tc_add_fd_and_name(&req, prog_fd);
  587. if (ret < 0)
  588. return libbpf_err(ret);
  589. bpf_flags = TCA_BPF_FLAG_ACT_DIRECT;
  590. ret = nlattr_add(&req, TCA_BPF_FLAGS, &bpf_flags, sizeof(bpf_flags));
  591. if (ret < 0)
  592. return libbpf_err(ret);
  593. nlattr_end_nested(&req, nla);
  594. info.opts = opts;
  595. ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info);
  596. if (ret < 0)
  597. return libbpf_err(ret);
  598. if (!info.processed)
  599. return libbpf_err(-ENOENT);
  600. return ret;
  601. }
  602. static int __bpf_tc_detach(const struct bpf_tc_hook *hook,
  603. const struct bpf_tc_opts *opts,
  604. const bool flush)
  605. {
  606. __u32 protocol = 0, handle, priority, parent, prog_id, flags;
  607. int ret, ifindex, attach_point, prog_fd;
  608. struct libbpf_nla_req req;
  609. if (!hook ||
  610. !OPTS_VALID(hook, bpf_tc_hook) ||
  611. !OPTS_VALID(opts, bpf_tc_opts))
  612. return -EINVAL;
  613. ifindex = OPTS_GET(hook, ifindex, 0);
  614. parent = OPTS_GET(hook, parent, 0);
  615. attach_point = OPTS_GET(hook, attach_point, 0);
  616. handle = OPTS_GET(opts, handle, 0);
  617. priority = OPTS_GET(opts, priority, 0);
  618. prog_fd = OPTS_GET(opts, prog_fd, 0);
  619. prog_id = OPTS_GET(opts, prog_id, 0);
  620. flags = OPTS_GET(opts, flags, 0);
  621. if (ifindex <= 0 || flags || prog_fd || prog_id)
  622. return -EINVAL;
  623. if (priority > UINT16_MAX)
  624. return -EINVAL;
  625. if (!flush) {
  626. if (!handle || !priority)
  627. return -EINVAL;
  628. protocol = ETH_P_ALL;
  629. } else {
  630. if (handle || priority)
  631. return -EINVAL;
  632. }
  633. memset(&req, 0, sizeof(req));
  634. req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
  635. req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
  636. req.nh.nlmsg_type = RTM_DELTFILTER;
  637. req.tc.tcm_family = AF_UNSPEC;
  638. req.tc.tcm_ifindex = ifindex;
  639. if (!flush) {
  640. req.tc.tcm_handle = handle;
  641. req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
  642. }
  643. ret = tc_get_tcm_parent(attach_point, &parent);
  644. if (ret < 0)
  645. return ret;
  646. req.tc.tcm_parent = parent;
  647. if (!flush) {
  648. ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf"));
  649. if (ret < 0)
  650. return ret;
  651. }
  652. return libbpf_netlink_send_recv(&req, NULL, NULL, NULL);
  653. }
  654. int bpf_tc_detach(const struct bpf_tc_hook *hook,
  655. const struct bpf_tc_opts *opts)
  656. {
  657. int ret;
  658. if (!opts)
  659. return libbpf_err(-EINVAL);
  660. ret = __bpf_tc_detach(hook, opts, false);
  661. return libbpf_err(ret);
  662. }
  663. int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
  664. {
  665. __u32 protocol, handle, priority, parent, prog_id, flags;
  666. int ret, ifindex, attach_point, prog_fd;
  667. struct bpf_cb_ctx info = {};
  668. struct libbpf_nla_req req;
  669. if (!hook || !opts ||
  670. !OPTS_VALID(hook, bpf_tc_hook) ||
  671. !OPTS_VALID(opts, bpf_tc_opts))
  672. return libbpf_err(-EINVAL);
  673. ifindex = OPTS_GET(hook, ifindex, 0);
  674. parent = OPTS_GET(hook, parent, 0);
  675. attach_point = OPTS_GET(hook, attach_point, 0);
  676. handle = OPTS_GET(opts, handle, 0);
  677. priority = OPTS_GET(opts, priority, 0);
  678. prog_fd = OPTS_GET(opts, prog_fd, 0);
  679. prog_id = OPTS_GET(opts, prog_id, 0);
  680. flags = OPTS_GET(opts, flags, 0);
  681. if (ifindex <= 0 || flags || prog_fd || prog_id ||
  682. !handle || !priority)
  683. return libbpf_err(-EINVAL);
  684. if (priority > UINT16_MAX)
  685. return libbpf_err(-EINVAL);
  686. protocol = ETH_P_ALL;
  687. memset(&req, 0, sizeof(req));
  688. req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
  689. req.nh.nlmsg_flags = NLM_F_REQUEST;
  690. req.nh.nlmsg_type = RTM_GETTFILTER;
  691. req.tc.tcm_family = AF_UNSPEC;
  692. req.tc.tcm_ifindex = ifindex;
  693. req.tc.tcm_handle = handle;
  694. req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
  695. ret = tc_get_tcm_parent(attach_point, &parent);
  696. if (ret < 0)
  697. return libbpf_err(ret);
  698. req.tc.tcm_parent = parent;
  699. ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf"));
  700. if (ret < 0)
  701. return libbpf_err(ret);
  702. info.opts = opts;
  703. ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info);
  704. if (ret < 0)
  705. return libbpf_err(ret);
  706. if (!info.processed)
  707. return libbpf_err(-ENOENT);
  708. return ret;
  709. }