PageRenderTime 34ms CodeModel.GetById 25ms RepoModel.GetById 0ms app.codeStats 0ms

/net/ipv4/tcp_bpf.c

https://gitlab.com/kush/linux
C | 691 lines | 587 code | 86 blank | 18 comment | 118 complexity | 976a5ff68e9aa4cb1660633de4808c62 MD5 | raw file
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
  3. #include <linux/skmsg.h>
  4. #include <linux/filter.h>
  5. #include <linux/bpf.h>
  6. #include <linux/init.h>
  7. #include <linux/wait.h>
  8. #include <net/inet_common.h>
  9. #include <net/tls.h>
  10. static bool tcp_bpf_stream_read(const struct sock *sk)
  11. {
  12. struct sk_psock *psock;
  13. bool empty = true;
  14. rcu_read_lock();
  15. psock = sk_psock(sk);
  16. if (likely(psock))
  17. empty = list_empty(&psock->ingress_msg);
  18. rcu_read_unlock();
  19. return !empty;
  20. }
  21. static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock,
  22. int flags, long timeo, int *err)
  23. {
  24. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  25. int ret;
  26. add_wait_queue(sk_sleep(sk), &wait);
  27. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  28. ret = sk_wait_event(sk, &timeo,
  29. !list_empty(&psock->ingress_msg) ||
  30. !skb_queue_empty(&sk->sk_receive_queue), &wait);
  31. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  32. remove_wait_queue(sk_sleep(sk), &wait);
  33. return ret;
  34. }
  35. int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
  36. struct msghdr *msg, int len, int flags)
  37. {
  38. struct iov_iter *iter = &msg->msg_iter;
  39. int peek = flags & MSG_PEEK;
  40. int i, ret, copied = 0;
  41. struct sk_msg *msg_rx;
  42. msg_rx = list_first_entry_or_null(&psock->ingress_msg,
  43. struct sk_msg, list);
  44. while (copied != len) {
  45. struct scatterlist *sge;
  46. if (unlikely(!msg_rx))
  47. break;
  48. i = msg_rx->sg.start;
  49. do {
  50. struct page *page;
  51. int copy;
  52. sge = sk_msg_elem(msg_rx, i);
  53. copy = sge->length;
  54. page = sg_page(sge);
  55. if (copied + copy > len)
  56. copy = len - copied;
  57. ret = copy_page_to_iter(page, sge->offset, copy, iter);
  58. if (ret != copy) {
  59. msg_rx->sg.start = i;
  60. return -EFAULT;
  61. }
  62. copied += copy;
  63. if (likely(!peek)) {
  64. sge->offset += copy;
  65. sge->length -= copy;
  66. sk_mem_uncharge(sk, copy);
  67. msg_rx->sg.size -= copy;
  68. if (!sge->length) {
  69. sk_msg_iter_var_next(i);
  70. if (!msg_rx->skb)
  71. put_page(page);
  72. }
  73. } else {
  74. sk_msg_iter_var_next(i);
  75. }
  76. if (copied == len)
  77. break;
  78. } while (i != msg_rx->sg.end);
  79. if (unlikely(peek)) {
  80. msg_rx = list_next_entry(msg_rx, list);
  81. continue;
  82. }
  83. msg_rx->sg.start = i;
  84. if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
  85. list_del(&msg_rx->list);
  86. if (msg_rx->skb)
  87. consume_skb(msg_rx->skb);
  88. kfree(msg_rx);
  89. }
  90. msg_rx = list_first_entry_or_null(&psock->ingress_msg,
  91. struct sk_msg, list);
  92. }
  93. return copied;
  94. }
  95. EXPORT_SYMBOL_GPL(__tcp_bpf_recvmsg);
  96. int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
  97. int nonblock, int flags, int *addr_len)
  98. {
  99. struct sk_psock *psock;
  100. int copied, ret;
  101. if (unlikely(flags & MSG_ERRQUEUE))
  102. return inet_recv_error(sk, msg, len, addr_len);
  103. if (!skb_queue_empty(&sk->sk_receive_queue))
  104. return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
  105. psock = sk_psock_get(sk);
  106. if (unlikely(!psock))
  107. return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
  108. lock_sock(sk);
  109. msg_bytes_ready:
  110. copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
  111. if (!copied) {
  112. int data, err = 0;
  113. long timeo;
  114. timeo = sock_rcvtimeo(sk, nonblock);
  115. data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err);
  116. if (data) {
  117. if (skb_queue_empty(&sk->sk_receive_queue))
  118. goto msg_bytes_ready;
  119. release_sock(sk);
  120. sk_psock_put(sk, psock);
  121. return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
  122. }
  123. if (err) {
  124. ret = err;
  125. goto out;
  126. }
  127. copied = -EAGAIN;
  128. }
  129. ret = copied;
  130. out:
  131. release_sock(sk);
  132. sk_psock_put(sk, psock);
  133. return ret;
  134. }
  135. static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
  136. struct sk_msg *msg, u32 apply_bytes, int flags)
  137. {
  138. bool apply = apply_bytes;
  139. struct scatterlist *sge;
  140. u32 size, copied = 0;
  141. struct sk_msg *tmp;
  142. int i, ret = 0;
  143. tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL);
  144. if (unlikely(!tmp))
  145. return -ENOMEM;
  146. lock_sock(sk);
  147. tmp->sg.start = msg->sg.start;
  148. i = msg->sg.start;
  149. do {
  150. sge = sk_msg_elem(msg, i);
  151. size = (apply && apply_bytes < sge->length) ?
  152. apply_bytes : sge->length;
  153. if (!sk_wmem_schedule(sk, size)) {
  154. if (!copied)
  155. ret = -ENOMEM;
  156. break;
  157. }
  158. sk_mem_charge(sk, size);
  159. sk_msg_xfer(tmp, msg, i, size);
  160. copied += size;
  161. if (sge->length)
  162. get_page(sk_msg_page(tmp, i));
  163. sk_msg_iter_var_next(i);
  164. tmp->sg.end = i;
  165. if (apply) {
  166. apply_bytes -= size;
  167. if (!apply_bytes)
  168. break;
  169. }
  170. } while (i != msg->sg.end);
  171. if (!ret) {
  172. msg->sg.start = i;
  173. msg->sg.size -= apply_bytes;
  174. sk_psock_queue_msg(psock, tmp);
  175. sk_psock_data_ready(sk, psock);
  176. } else {
  177. sk_msg_free(sk, tmp);
  178. kfree(tmp);
  179. }
  180. release_sock(sk);
  181. return ret;
  182. }
  183. static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
  184. int flags, bool uncharge)
  185. {
  186. bool apply = apply_bytes;
  187. struct scatterlist *sge;
  188. struct page *page;
  189. int size, ret = 0;
  190. u32 off;
  191. while (1) {
  192. bool has_tx_ulp;
  193. sge = sk_msg_elem(msg, msg->sg.start);
  194. size = (apply && apply_bytes < sge->length) ?
  195. apply_bytes : sge->length;
  196. off = sge->offset;
  197. page = sg_page(sge);
  198. tcp_rate_check_app_limited(sk);
  199. retry:
  200. has_tx_ulp = tls_sw_has_ctx_tx(sk);
  201. if (has_tx_ulp) {
  202. flags |= MSG_SENDPAGE_NOPOLICY;
  203. ret = kernel_sendpage_locked(sk,
  204. page, off, size, flags);
  205. } else {
  206. ret = do_tcp_sendpages(sk, page, off, size, flags);
  207. }
  208. if (ret <= 0)
  209. return ret;
  210. if (apply)
  211. apply_bytes -= ret;
  212. msg->sg.size -= ret;
  213. sge->offset += ret;
  214. sge->length -= ret;
  215. if (uncharge)
  216. sk_mem_uncharge(sk, ret);
  217. if (ret != size) {
  218. size -= ret;
  219. off += ret;
  220. goto retry;
  221. }
  222. if (!sge->length) {
  223. put_page(page);
  224. sk_msg_iter_next(msg, start);
  225. sg_init_table(sge, 1);
  226. if (msg->sg.start == msg->sg.end)
  227. break;
  228. }
  229. if (apply && !apply_bytes)
  230. break;
  231. }
  232. return 0;
  233. }
  234. static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
  235. u32 apply_bytes, int flags, bool uncharge)
  236. {
  237. int ret;
  238. lock_sock(sk);
  239. ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
  240. release_sock(sk);
  241. return ret;
  242. }
  243. int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
  244. u32 bytes, int flags)
  245. {
  246. bool ingress = sk_msg_to_ingress(msg);
  247. struct sk_psock *psock = sk_psock_get(sk);
  248. int ret;
  249. if (unlikely(!psock)) {
  250. sk_msg_free(sk, msg);
  251. return 0;
  252. }
  253. ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
  254. tcp_bpf_push_locked(sk, msg, bytes, flags, false);
  255. sk_psock_put(sk, psock);
  256. return ret;
  257. }
  258. EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
  259. static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
  260. struct sk_msg *msg, int *copied, int flags)
  261. {
  262. bool cork = false, enospc = msg->sg.start == msg->sg.end;
  263. struct sock *sk_redir;
  264. u32 tosend, delta = 0;
  265. int ret;
  266. more_data:
  267. if (psock->eval == __SK_NONE) {
  268. /* Track delta in msg size to add/subtract it on SK_DROP from
  269. * returned to user copied size. This ensures user doesn't
  270. * get a positive return code with msg_cut_data and SK_DROP
  271. * verdict.
  272. */
  273. delta = msg->sg.size;
  274. psock->eval = sk_psock_msg_verdict(sk, psock, msg);
  275. if (msg->sg.size < delta)
  276. delta -= msg->sg.size;
  277. else
  278. delta = 0;
  279. }
  280. if (msg->cork_bytes &&
  281. msg->cork_bytes > msg->sg.size && !enospc) {
  282. psock->cork_bytes = msg->cork_bytes - msg->sg.size;
  283. if (!psock->cork) {
  284. psock->cork = kzalloc(sizeof(*psock->cork),
  285. GFP_ATOMIC | __GFP_NOWARN);
  286. if (!psock->cork)
  287. return -ENOMEM;
  288. }
  289. memcpy(psock->cork, msg, sizeof(*msg));
  290. return 0;
  291. }
  292. tosend = msg->sg.size;
  293. if (psock->apply_bytes && psock->apply_bytes < tosend)
  294. tosend = psock->apply_bytes;
  295. switch (psock->eval) {
  296. case __SK_PASS:
  297. ret = tcp_bpf_push(sk, msg, tosend, flags, true);
  298. if (unlikely(ret)) {
  299. *copied -= sk_msg_free(sk, msg);
  300. break;
  301. }
  302. sk_msg_apply_bytes(psock, tosend);
  303. break;
  304. case __SK_REDIRECT:
  305. sk_redir = psock->sk_redir;
  306. sk_msg_apply_bytes(psock, tosend);
  307. if (psock->cork) {
  308. cork = true;
  309. psock->cork = NULL;
  310. }
  311. sk_msg_return(sk, msg, tosend);
  312. release_sock(sk);
  313. ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
  314. lock_sock(sk);
  315. if (unlikely(ret < 0)) {
  316. int free = sk_msg_free_nocharge(sk, msg);
  317. if (!cork)
  318. *copied -= free;
  319. }
  320. if (cork) {
  321. sk_msg_free(sk, msg);
  322. kfree(msg);
  323. msg = NULL;
  324. ret = 0;
  325. }
  326. break;
  327. case __SK_DROP:
  328. default:
  329. sk_msg_free_partial(sk, msg, tosend);
  330. sk_msg_apply_bytes(psock, tosend);
  331. *copied -= (tosend + delta);
  332. return -EACCES;
  333. }
  334. if (likely(!ret)) {
  335. if (!psock->apply_bytes) {
  336. psock->eval = __SK_NONE;
  337. if (psock->sk_redir) {
  338. sock_put(psock->sk_redir);
  339. psock->sk_redir = NULL;
  340. }
  341. }
  342. if (msg &&
  343. msg->sg.data[msg->sg.start].page_link &&
  344. msg->sg.data[msg->sg.start].length)
  345. goto more_data;
  346. }
  347. return ret;
  348. }
  349. static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  350. {
  351. struct sk_msg tmp, *msg_tx = NULL;
  352. int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
  353. int copied = 0, err = 0;
  354. struct sk_psock *psock;
  355. long timeo;
  356. psock = sk_psock_get(sk);
  357. if (unlikely(!psock))
  358. return tcp_sendmsg(sk, msg, size);
  359. lock_sock(sk);
  360. timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  361. while (msg_data_left(msg)) {
  362. bool enospc = false;
  363. u32 copy, osize;
  364. if (sk->sk_err) {
  365. err = -sk->sk_err;
  366. goto out_err;
  367. }
  368. copy = msg_data_left(msg);
  369. if (!sk_stream_memory_free(sk))
  370. goto wait_for_sndbuf;
  371. if (psock->cork) {
  372. msg_tx = psock->cork;
  373. } else {
  374. msg_tx = &tmp;
  375. sk_msg_init(msg_tx);
  376. }
  377. osize = msg_tx->sg.size;
  378. err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1);
  379. if (err) {
  380. if (err != -ENOSPC)
  381. goto wait_for_memory;
  382. enospc = true;
  383. copy = msg_tx->sg.size - osize;
  384. }
  385. err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
  386. copy);
  387. if (err < 0) {
  388. sk_msg_trim(sk, msg_tx, osize);
  389. goto out_err;
  390. }
  391. copied += copy;
  392. if (psock->cork_bytes) {
  393. if (size > psock->cork_bytes)
  394. psock->cork_bytes = 0;
  395. else
  396. psock->cork_bytes -= size;
  397. if (psock->cork_bytes && !enospc)
  398. goto out_err;
  399. /* All cork bytes are accounted, rerun the prog. */
  400. psock->eval = __SK_NONE;
  401. psock->cork_bytes = 0;
  402. }
  403. err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags);
  404. if (unlikely(err < 0))
  405. goto out_err;
  406. continue;
  407. wait_for_sndbuf:
  408. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  409. wait_for_memory:
  410. err = sk_stream_wait_memory(sk, &timeo);
  411. if (err) {
  412. if (msg_tx && msg_tx != psock->cork)
  413. sk_msg_free(sk, msg_tx);
  414. goto out_err;
  415. }
  416. }
  417. out_err:
  418. if (err < 0)
  419. err = sk_stream_error(sk, msg->msg_flags, err);
  420. release_sock(sk);
  421. sk_psock_put(sk, psock);
  422. return copied ? copied : err;
  423. }
  424. static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
  425. size_t size, int flags)
  426. {
  427. struct sk_msg tmp, *msg = NULL;
  428. int err = 0, copied = 0;
  429. struct sk_psock *psock;
  430. bool enospc = false;
  431. psock = sk_psock_get(sk);
  432. if (unlikely(!psock))
  433. return tcp_sendpage(sk, page, offset, size, flags);
  434. lock_sock(sk);
  435. if (psock->cork) {
  436. msg = psock->cork;
  437. } else {
  438. msg = &tmp;
  439. sk_msg_init(msg);
  440. }
  441. /* Catch case where ring is full and sendpage is stalled. */
  442. if (unlikely(sk_msg_full(msg)))
  443. goto out_err;
  444. sk_msg_page_add(msg, page, size, offset);
  445. sk_mem_charge(sk, size);
  446. copied = size;
  447. if (sk_msg_full(msg))
  448. enospc = true;
  449. if (psock->cork_bytes) {
  450. if (size > psock->cork_bytes)
  451. psock->cork_bytes = 0;
  452. else
  453. psock->cork_bytes -= size;
  454. if (psock->cork_bytes && !enospc)
  455. goto out_err;
  456. /* All cork bytes are accounted, rerun the prog. */
  457. psock->eval = __SK_NONE;
  458. psock->cork_bytes = 0;
  459. }
  460. err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags);
  461. out_err:
  462. release_sock(sk);
  463. sk_psock_put(sk, psock);
  464. return copied ? copied : err;
  465. }
  466. static void tcp_bpf_remove(struct sock *sk, struct sk_psock *psock)
  467. {
  468. struct sk_psock_link *link;
  469. sk_psock_cork_free(psock);
  470. __sk_psock_purge_ingress_msg(psock);
  471. while ((link = sk_psock_link_pop(psock))) {
  472. sk_psock_unlink(sk, link);
  473. sk_psock_free_link(link);
  474. }
  475. }
  476. static void tcp_bpf_unhash(struct sock *sk)
  477. {
  478. void (*saved_unhash)(struct sock *sk);
  479. struct sk_psock *psock;
  480. rcu_read_lock();
  481. psock = sk_psock(sk);
  482. if (unlikely(!psock)) {
  483. rcu_read_unlock();
  484. if (sk->sk_prot->unhash)
  485. sk->sk_prot->unhash(sk);
  486. return;
  487. }
  488. saved_unhash = psock->saved_unhash;
  489. tcp_bpf_remove(sk, psock);
  490. rcu_read_unlock();
  491. saved_unhash(sk);
  492. }
  493. static void tcp_bpf_close(struct sock *sk, long timeout)
  494. {
  495. void (*saved_close)(struct sock *sk, long timeout);
  496. struct sk_psock *psock;
  497. lock_sock(sk);
  498. rcu_read_lock();
  499. psock = sk_psock(sk);
  500. if (unlikely(!psock)) {
  501. rcu_read_unlock();
  502. release_sock(sk);
  503. return sk->sk_prot->close(sk, timeout);
  504. }
  505. saved_close = psock->saved_close;
  506. tcp_bpf_remove(sk, psock);
  507. rcu_read_unlock();
  508. release_sock(sk);
  509. saved_close(sk, timeout);
  510. }
  511. enum {
  512. TCP_BPF_IPV4,
  513. TCP_BPF_IPV6,
  514. TCP_BPF_NUM_PROTS,
  515. };
  516. enum {
  517. TCP_BPF_BASE,
  518. TCP_BPF_TX,
  519. TCP_BPF_NUM_CFGS,
  520. };
  521. static struct proto *tcpv6_prot_saved __read_mostly;
  522. static DEFINE_SPINLOCK(tcpv6_prot_lock);
  523. static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS];
  524. static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
  525. struct proto *base)
  526. {
  527. prot[TCP_BPF_BASE] = *base;
  528. prot[TCP_BPF_BASE].unhash = tcp_bpf_unhash;
  529. prot[TCP_BPF_BASE].close = tcp_bpf_close;
  530. prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
  531. prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read;
  532. prot[TCP_BPF_TX] = prot[TCP_BPF_BASE];
  533. prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg;
  534. prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage;
  535. }
  536. static void tcp_bpf_check_v6_needs_rebuild(struct sock *sk, struct proto *ops)
  537. {
  538. if (sk->sk_family == AF_INET6 &&
  539. unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) {
  540. spin_lock_bh(&tcpv6_prot_lock);
  541. if (likely(ops != tcpv6_prot_saved)) {
  542. tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops);
  543. smp_store_release(&tcpv6_prot_saved, ops);
  544. }
  545. spin_unlock_bh(&tcpv6_prot_lock);
  546. }
  547. }
  548. static int __init tcp_bpf_v4_build_proto(void)
  549. {
  550. tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
  551. return 0;
  552. }
  553. core_initcall(tcp_bpf_v4_build_proto);
  554. static void tcp_bpf_update_sk_prot(struct sock *sk, struct sk_psock *psock)
  555. {
  556. int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
  557. int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
  558. sk_psock_update_proto(sk, psock, &tcp_bpf_prots[family][config]);
  559. }
  560. static void tcp_bpf_reinit_sk_prot(struct sock *sk, struct sk_psock *psock)
  561. {
  562. int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
  563. int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
  564. /* Reinit occurs when program types change e.g. TCP_BPF_TX is removed
  565. * or added requiring sk_prot hook updates. We keep original saved
  566. * hooks in this case.
  567. */
  568. sk->sk_prot = &tcp_bpf_prots[family][config];
  569. }
  570. static int tcp_bpf_assert_proto_ops(struct proto *ops)
  571. {
  572. /* In order to avoid retpoline, we make assumptions when we call
  573. * into ops if e.g. a psock is not present. Make sure they are
  574. * indeed valid assumptions.
  575. */
  576. return ops->recvmsg == tcp_recvmsg &&
  577. ops->sendmsg == tcp_sendmsg &&
  578. ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP;
  579. }
  580. void tcp_bpf_reinit(struct sock *sk)
  581. {
  582. struct sk_psock *psock;
  583. sock_owned_by_me(sk);
  584. rcu_read_lock();
  585. psock = sk_psock(sk);
  586. tcp_bpf_reinit_sk_prot(sk, psock);
  587. rcu_read_unlock();
  588. }
  589. int tcp_bpf_init(struct sock *sk)
  590. {
  591. struct proto *ops = READ_ONCE(sk->sk_prot);
  592. struct sk_psock *psock;
  593. sock_owned_by_me(sk);
  594. rcu_read_lock();
  595. psock = sk_psock(sk);
  596. if (unlikely(!psock || psock->sk_proto ||
  597. tcp_bpf_assert_proto_ops(ops))) {
  598. rcu_read_unlock();
  599. return -EINVAL;
  600. }
  601. tcp_bpf_check_v6_needs_rebuild(sk, ops);
  602. tcp_bpf_update_sk_prot(sk, psock);
  603. rcu_read_unlock();
  604. return 0;
  605. }