PageRenderTime 61ms CodeModel.GetById 29ms RepoModel.GetById 0ms app.codeStats 0ms

/net/rxrpc/recvmsg.c

https://bitbucket.org/mirror/linux
C | 851 lines | 623 code | 115 blank | 113 comment | 106 complexity | f7263df7d10d80a5de56756f7d09e7d5 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* RxRPC recvmsg() implementation
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/net.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/export.h>
  11. #include <linux/sched/signal.h>
  12. #include <net/sock.h>
  13. #include <net/af_rxrpc.h>
  14. #include "ar-internal.h"
  15. /*
  16. * Post a call for attention by the socket or kernel service. Further
  17. * notifications are suppressed by putting recvmsg_link on a dummy queue.
  18. */
  19. void rxrpc_notify_socket(struct rxrpc_call *call)
  20. {
  21. struct rxrpc_sock *rx;
  22. struct sock *sk;
  23. _enter("%d", call->debug_id);
  24. if (!list_empty(&call->recvmsg_link))
  25. return;
  26. rcu_read_lock();
  27. rx = rcu_dereference(call->socket);
  28. sk = &rx->sk;
  29. if (rx && sk->sk_state < RXRPC_CLOSE) {
  30. if (call->notify_rx) {
  31. spin_lock_bh(&call->notify_lock);
  32. call->notify_rx(sk, call, call->user_call_ID);
  33. spin_unlock_bh(&call->notify_lock);
  34. } else {
  35. write_lock_bh(&rx->recvmsg_lock);
  36. if (list_empty(&call->recvmsg_link)) {
  37. rxrpc_get_call(call, rxrpc_call_got);
  38. list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
  39. }
  40. write_unlock_bh(&rx->recvmsg_lock);
  41. if (!sock_flag(sk, SOCK_DEAD)) {
  42. _debug("call %ps", sk->sk_data_ready);
  43. sk->sk_data_ready(sk);
  44. }
  45. }
  46. }
  47. rcu_read_unlock();
  48. _leave("");
  49. }
  50. /*
  51. * Transition a call to the complete state.
  52. */
  53. bool __rxrpc_set_call_completion(struct rxrpc_call *call,
  54. enum rxrpc_call_completion compl,
  55. u32 abort_code,
  56. int error)
  57. {
  58. if (call->state < RXRPC_CALL_COMPLETE) {
  59. call->abort_code = abort_code;
  60. call->error = error;
  61. call->completion = compl,
  62. call->state = RXRPC_CALL_COMPLETE;
  63. trace_rxrpc_call_complete(call);
  64. wake_up(&call->waitq);
  65. rxrpc_notify_socket(call);
  66. return true;
  67. }
  68. return false;
  69. }
  70. bool rxrpc_set_call_completion(struct rxrpc_call *call,
  71. enum rxrpc_call_completion compl,
  72. u32 abort_code,
  73. int error)
  74. {
  75. bool ret = false;
  76. if (call->state < RXRPC_CALL_COMPLETE) {
  77. write_lock_bh(&call->state_lock);
  78. ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
  79. write_unlock_bh(&call->state_lock);
  80. }
  81. return ret;
  82. }
  83. /*
  84. * Record that a call successfully completed.
  85. */
  86. bool __rxrpc_call_completed(struct rxrpc_call *call)
  87. {
  88. return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
  89. }
  90. bool rxrpc_call_completed(struct rxrpc_call *call)
  91. {
  92. bool ret = false;
  93. if (call->state < RXRPC_CALL_COMPLETE) {
  94. write_lock_bh(&call->state_lock);
  95. ret = __rxrpc_call_completed(call);
  96. write_unlock_bh(&call->state_lock);
  97. }
  98. return ret;
  99. }
  100. /*
  101. * Record that a call is locally aborted.
  102. */
  103. bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
  104. rxrpc_seq_t seq, u32 abort_code, int error)
  105. {
  106. trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
  107. abort_code, error);
  108. return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
  109. abort_code, error);
  110. }
  111. bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
  112. rxrpc_seq_t seq, u32 abort_code, int error)
  113. {
  114. bool ret;
  115. write_lock_bh(&call->state_lock);
  116. ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
  117. write_unlock_bh(&call->state_lock);
  118. return ret;
  119. }
  120. /*
  121. * Pass a call terminating message to userspace.
  122. */
  123. static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
  124. {
  125. u32 tmp = 0;
  126. int ret;
  127. switch (call->completion) {
  128. case RXRPC_CALL_SUCCEEDED:
  129. ret = 0;
  130. if (rxrpc_is_service_call(call))
  131. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &tmp);
  132. break;
  133. case RXRPC_CALL_REMOTELY_ABORTED:
  134. tmp = call->abort_code;
  135. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
  136. break;
  137. case RXRPC_CALL_LOCALLY_ABORTED:
  138. tmp = call->abort_code;
  139. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
  140. break;
  141. case RXRPC_CALL_NETWORK_ERROR:
  142. tmp = -call->error;
  143. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp);
  144. break;
  145. case RXRPC_CALL_LOCAL_ERROR:
  146. tmp = -call->error;
  147. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
  148. break;
  149. default:
  150. pr_err("Invalid terminal call state %u\n", call->state);
  151. BUG();
  152. break;
  153. }
  154. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_terminal, call->rx_hard_ack,
  155. call->rx_pkt_offset, call->rx_pkt_len, ret);
  156. return ret;
  157. }
  158. /*
  159. * Pass back notification of a new call. The call is added to the
  160. * to-be-accepted list. This means that the next call to be accepted might not
  161. * be the last call seen awaiting acceptance, but unless we leave this on the
  162. * front of the queue and block all other messages until someone gives us a
  163. * user_ID for it, there's not a lot we can do.
  164. */
  165. static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx,
  166. struct rxrpc_call *call,
  167. struct msghdr *msg, int flags)
  168. {
  169. int tmp = 0, ret;
  170. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp);
  171. if (ret == 0 && !(flags & MSG_PEEK)) {
  172. _debug("to be accepted");
  173. write_lock_bh(&rx->recvmsg_lock);
  174. list_del_init(&call->recvmsg_link);
  175. write_unlock_bh(&rx->recvmsg_lock);
  176. rxrpc_get_call(call, rxrpc_call_got);
  177. write_lock(&rx->call_lock);
  178. list_add_tail(&call->accept_link, &rx->to_be_accepted);
  179. write_unlock(&rx->call_lock);
  180. }
  181. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret);
  182. return ret;
  183. }
  184. /*
  185. * End the packet reception phase.
  186. */
  187. static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
  188. {
  189. _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
  190. trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
  191. ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
  192. if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
  193. rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true,
  194. rxrpc_propose_ack_terminal_ack);
  195. //rxrpc_send_ack_packet(call, false, NULL);
  196. }
  197. write_lock_bh(&call->state_lock);
  198. switch (call->state) {
  199. case RXRPC_CALL_CLIENT_RECV_REPLY:
  200. __rxrpc_call_completed(call);
  201. write_unlock_bh(&call->state_lock);
  202. break;
  203. case RXRPC_CALL_SERVER_RECV_REQUEST:
  204. call->tx_phase = true;
  205. call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
  206. call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
  207. write_unlock_bh(&call->state_lock);
  208. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true,
  209. rxrpc_propose_ack_processing_op);
  210. break;
  211. default:
  212. write_unlock_bh(&call->state_lock);
  213. break;
  214. }
  215. }
  216. /*
  217. * Discard a packet we've used up and advance the Rx window by one.
  218. */
  219. static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
  220. {
  221. struct rxrpc_skb_priv *sp;
  222. struct sk_buff *skb;
  223. rxrpc_serial_t serial;
  224. rxrpc_seq_t hard_ack, top;
  225. bool last = false;
  226. u8 subpacket;
  227. int ix;
  228. _enter("%d", call->debug_id);
  229. hard_ack = call->rx_hard_ack;
  230. top = smp_load_acquire(&call->rx_top);
  231. ASSERT(before(hard_ack, top));
  232. hard_ack++;
  233. ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
  234. skb = call->rxtx_buffer[ix];
  235. rxrpc_see_skb(skb, rxrpc_skb_rotated);
  236. sp = rxrpc_skb(skb);
  237. subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
  238. serial = sp->hdr.serial + subpacket;
  239. if (subpacket == sp->nr_subpackets - 1 &&
  240. sp->rx_flags & RXRPC_SKB_INCL_LAST)
  241. last = true;
  242. call->rxtx_buffer[ix] = NULL;
  243. call->rxtx_annotations[ix] = 0;
  244. /* Barrier against rxrpc_input_data(). */
  245. smp_store_release(&call->rx_hard_ack, hard_ack);
  246. rxrpc_free_skb(skb, rxrpc_skb_freed);
  247. trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
  248. if (last) {
  249. rxrpc_end_rx_phase(call, serial);
  250. } else {
  251. /* Check to see if there's an ACK that needs sending. */
  252. if (after_eq(hard_ack, call->ackr_consumed + 2) ||
  253. after_eq(top, call->ackr_seen + 2) ||
  254. (hard_ack == top && after(hard_ack, call->ackr_consumed)))
  255. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
  256. true, true,
  257. rxrpc_propose_ack_rotate_rx);
  258. if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
  259. rxrpc_send_ack_packet(call, false, NULL);
  260. }
  261. }
  262. /*
  263. * Decrypt and verify a (sub)packet. The packet's length may be changed due to
  264. * padding, but if this is the case, the packet length will be resident in the
  265. * socket buffer. Note that we can't modify the master skb info as the skb may
  266. * be the home to multiple subpackets.
  267. */
  268. static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
  269. u8 annotation,
  270. unsigned int offset, unsigned int len)
  271. {
  272. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  273. rxrpc_seq_t seq = sp->hdr.seq;
  274. u16 cksum = sp->hdr.cksum;
  275. u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
  276. _enter("");
  277. /* For all but the head jumbo subpacket, the security checksum is in a
  278. * jumbo header immediately prior to the data.
  279. */
  280. if (subpacket > 0) {
  281. __be16 tmp;
  282. if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
  283. BUG();
  284. cksum = ntohs(tmp);
  285. seq += subpacket;
  286. }
  287. return call->security->verify_packet(call, skb, offset, len,
  288. seq, cksum);
  289. }
  290. /*
  291. * Locate the data within a packet. This is complicated by:
  292. *
  293. * (1) An skb may contain a jumbo packet - so we have to find the appropriate
  294. * subpacket.
  295. *
  296. * (2) The (sub)packets may be encrypted and, if so, the encrypted portion
  297. * contains an extra header which includes the true length of the data,
  298. * excluding any encrypted padding.
  299. */
  300. static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
  301. u8 *_annotation,
  302. unsigned int *_offset, unsigned int *_len,
  303. bool *_last)
  304. {
  305. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  306. unsigned int offset = sizeof(struct rxrpc_wire_header);
  307. unsigned int len;
  308. bool last = false;
  309. int ret;
  310. u8 annotation = *_annotation;
  311. u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
  312. /* Locate the subpacket */
  313. offset += subpacket * RXRPC_JUMBO_SUBPKTLEN;
  314. len = skb->len - offset;
  315. if (subpacket < sp->nr_subpackets - 1)
  316. len = RXRPC_JUMBO_DATALEN;
  317. else if (sp->rx_flags & RXRPC_SKB_INCL_LAST)
  318. last = true;
  319. if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
  320. ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
  321. if (ret < 0)
  322. return ret;
  323. *_annotation |= RXRPC_RX_ANNO_VERIFIED;
  324. }
  325. *_offset = offset;
  326. *_len = len;
  327. *_last = last;
  328. call->security->locate_data(call, skb, _offset, _len);
  329. return 0;
  330. }
  331. /*
  332. * Deliver messages to a call. This keeps processing packets until the buffer
  333. * is filled and we find either more DATA (returns 0) or the end of the DATA
  334. * (returns 1). If more packets are required, it returns -EAGAIN.
  335. */
  336. static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
  337. struct msghdr *msg, struct iov_iter *iter,
  338. size_t len, int flags, size_t *_offset)
  339. {
  340. struct rxrpc_skb_priv *sp;
  341. struct sk_buff *skb;
  342. rxrpc_serial_t serial;
  343. rxrpc_seq_t hard_ack, top, seq;
  344. size_t remain;
  345. bool rx_pkt_last;
  346. unsigned int rx_pkt_offset, rx_pkt_len;
  347. int ix, copy, ret = -EAGAIN, ret2;
  348. if (test_and_clear_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags) &&
  349. call->ackr_reason)
  350. rxrpc_send_ack_packet(call, false, NULL);
  351. rx_pkt_offset = call->rx_pkt_offset;
  352. rx_pkt_len = call->rx_pkt_len;
  353. rx_pkt_last = call->rx_pkt_last;
  354. if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
  355. seq = call->rx_hard_ack;
  356. ret = 1;
  357. goto done;
  358. }
  359. /* Barriers against rxrpc_input_data(). */
  360. hard_ack = call->rx_hard_ack;
  361. seq = hard_ack + 1;
  362. while (top = smp_load_acquire(&call->rx_top),
  363. before_eq(seq, top)
  364. ) {
  365. ix = seq & RXRPC_RXTX_BUFF_MASK;
  366. skb = call->rxtx_buffer[ix];
  367. if (!skb) {
  368. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq,
  369. rx_pkt_offset, rx_pkt_len, 0);
  370. break;
  371. }
  372. smp_rmb();
  373. rxrpc_see_skb(skb, rxrpc_skb_seen);
  374. sp = rxrpc_skb(skb);
  375. if (!(flags & MSG_PEEK)) {
  376. serial = sp->hdr.serial;
  377. serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
  378. trace_rxrpc_receive(call, rxrpc_receive_front,
  379. serial, seq);
  380. }
  381. if (msg)
  382. sock_recv_timestamp(msg, sock->sk, skb);
  383. if (rx_pkt_offset == 0) {
  384. ret2 = rxrpc_locate_data(call, skb,
  385. &call->rxtx_annotations[ix],
  386. &rx_pkt_offset, &rx_pkt_len,
  387. &rx_pkt_last);
  388. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
  389. rx_pkt_offset, rx_pkt_len, ret2);
  390. if (ret2 < 0) {
  391. ret = ret2;
  392. goto out;
  393. }
  394. } else {
  395. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq,
  396. rx_pkt_offset, rx_pkt_len, 0);
  397. }
  398. /* We have to handle short, empty and used-up DATA packets. */
  399. remain = len - *_offset;
  400. copy = rx_pkt_len;
  401. if (copy > remain)
  402. copy = remain;
  403. if (copy > 0) {
  404. ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
  405. copy);
  406. if (ret2 < 0) {
  407. ret = ret2;
  408. goto out;
  409. }
  410. /* handle piecemeal consumption of data packets */
  411. rx_pkt_offset += copy;
  412. rx_pkt_len -= copy;
  413. *_offset += copy;
  414. }
  415. if (rx_pkt_len > 0) {
  416. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq,
  417. rx_pkt_offset, rx_pkt_len, 0);
  418. ASSERTCMP(*_offset, ==, len);
  419. ret = 0;
  420. break;
  421. }
  422. /* The whole packet has been transferred. */
  423. if (!(flags & MSG_PEEK))
  424. rxrpc_rotate_rx_window(call);
  425. rx_pkt_offset = 0;
  426. rx_pkt_len = 0;
  427. if (rx_pkt_last) {
  428. ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
  429. ret = 1;
  430. goto out;
  431. }
  432. seq++;
  433. }
  434. out:
  435. if (!(flags & MSG_PEEK)) {
  436. call->rx_pkt_offset = rx_pkt_offset;
  437. call->rx_pkt_len = rx_pkt_len;
  438. call->rx_pkt_last = rx_pkt_last;
  439. }
  440. done:
  441. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
  442. rx_pkt_offset, rx_pkt_len, ret);
  443. if (ret == -EAGAIN)
  444. set_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags);
  445. return ret;
  446. }
  447. /*
  448. * Receive a message from an RxRPC socket
  449. * - we need to be careful about two or more threads calling recvmsg
  450. * simultaneously
  451. */
  452. int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
  453. int flags)
  454. {
  455. struct rxrpc_call *call;
  456. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  457. struct list_head *l;
  458. size_t copied = 0;
  459. long timeo;
  460. int ret;
  461. DEFINE_WAIT(wait);
  462. trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0);
  463. if (flags & (MSG_OOB | MSG_TRUNC))
  464. return -EOPNOTSUPP;
  465. timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
  466. try_again:
  467. lock_sock(&rx->sk);
  468. /* Return immediately if a client socket has no outstanding calls */
  469. if (RB_EMPTY_ROOT(&rx->calls) &&
  470. list_empty(&rx->recvmsg_q) &&
  471. rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
  472. release_sock(&rx->sk);
  473. return -ENODATA;
  474. }
  475. if (list_empty(&rx->recvmsg_q)) {
  476. ret = -EWOULDBLOCK;
  477. if (timeo == 0) {
  478. call = NULL;
  479. goto error_no_call;
  480. }
  481. release_sock(&rx->sk);
  482. /* Wait for something to happen */
  483. prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
  484. TASK_INTERRUPTIBLE);
  485. ret = sock_error(&rx->sk);
  486. if (ret)
  487. goto wait_error;
  488. if (list_empty(&rx->recvmsg_q)) {
  489. if (signal_pending(current))
  490. goto wait_interrupted;
  491. trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait,
  492. 0, 0, 0, 0);
  493. timeo = schedule_timeout(timeo);
  494. }
  495. finish_wait(sk_sleep(&rx->sk), &wait);
  496. goto try_again;
  497. }
  498. /* Find the next call and dequeue it if we're not just peeking. If we
  499. * do dequeue it, that comes with a ref that we will need to release.
  500. */
  501. write_lock_bh(&rx->recvmsg_lock);
  502. l = rx->recvmsg_q.next;
  503. call = list_entry(l, struct rxrpc_call, recvmsg_link);
  504. if (!(flags & MSG_PEEK))
  505. list_del_init(&call->recvmsg_link);
  506. else
  507. rxrpc_get_call(call, rxrpc_call_got);
  508. write_unlock_bh(&rx->recvmsg_lock);
  509. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);
  510. /* We're going to drop the socket lock, so we need to lock the call
  511. * against interference by sendmsg.
  512. */
  513. if (!mutex_trylock(&call->user_mutex)) {
  514. ret = -EWOULDBLOCK;
  515. if (flags & MSG_DONTWAIT)
  516. goto error_requeue_call;
  517. ret = -ERESTARTSYS;
  518. if (mutex_lock_interruptible(&call->user_mutex) < 0)
  519. goto error_requeue_call;
  520. }
  521. release_sock(&rx->sk);
  522. if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
  523. BUG();
  524. if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  525. if (flags & MSG_CMSG_COMPAT) {
  526. unsigned int id32 = call->user_call_ID;
  527. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
  528. sizeof(unsigned int), &id32);
  529. } else {
  530. unsigned long idl = call->user_call_ID;
  531. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
  532. sizeof(unsigned long), &idl);
  533. }
  534. if (ret < 0)
  535. goto error_unlock_call;
  536. }
  537. if (msg->msg_name) {
  538. struct sockaddr_rxrpc *srx = msg->msg_name;
  539. size_t len = sizeof(call->peer->srx);
  540. memcpy(msg->msg_name, &call->peer->srx, len);
  541. srx->srx_service = call->service_id;
  542. msg->msg_namelen = len;
  543. }
  544. switch (READ_ONCE(call->state)) {
  545. case RXRPC_CALL_SERVER_ACCEPTING:
  546. ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
  547. break;
  548. case RXRPC_CALL_CLIENT_RECV_REPLY:
  549. case RXRPC_CALL_SERVER_RECV_REQUEST:
  550. case RXRPC_CALL_SERVER_ACK_REQUEST:
  551. ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
  552. flags, &copied);
  553. if (ret == -EAGAIN)
  554. ret = 0;
  555. if (after(call->rx_top, call->rx_hard_ack) &&
  556. call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
  557. rxrpc_notify_socket(call);
  558. break;
  559. default:
  560. ret = 0;
  561. break;
  562. }
  563. if (ret < 0)
  564. goto error_unlock_call;
  565. if (call->state == RXRPC_CALL_COMPLETE) {
  566. ret = rxrpc_recvmsg_term(call, msg);
  567. if (ret < 0)
  568. goto error_unlock_call;
  569. if (!(flags & MSG_PEEK))
  570. rxrpc_release_call(rx, call);
  571. msg->msg_flags |= MSG_EOR;
  572. ret = 1;
  573. }
  574. if (ret == 0)
  575. msg->msg_flags |= MSG_MORE;
  576. else
  577. msg->msg_flags &= ~MSG_MORE;
  578. ret = copied;
  579. error_unlock_call:
  580. mutex_unlock(&call->user_mutex);
  581. rxrpc_put_call(call, rxrpc_call_put);
  582. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
  583. return ret;
  584. error_requeue_call:
  585. if (!(flags & MSG_PEEK)) {
  586. write_lock_bh(&rx->recvmsg_lock);
  587. list_add(&call->recvmsg_link, &rx->recvmsg_q);
  588. write_unlock_bh(&rx->recvmsg_lock);
  589. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0);
  590. } else {
  591. rxrpc_put_call(call, rxrpc_call_put);
  592. }
  593. error_no_call:
  594. release_sock(&rx->sk);
  595. error_trace:
  596. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
  597. return ret;
  598. wait_interrupted:
  599. ret = sock_intr_errno(timeo);
  600. wait_error:
  601. finish_wait(sk_sleep(&rx->sk), &wait);
  602. call = NULL;
  603. goto error_trace;
  604. }
  605. /**
  606. * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info
  607. * @sock: The socket that the call exists on
  608. * @call: The call to send data through
  609. * @iter: The buffer to receive into
  610. * @want_more: True if more data is expected to be read
  611. * @_abort: Where the abort code is stored if -ECONNABORTED is returned
  612. * @_service: Where to store the actual service ID (may be upgraded)
  613. *
  614. * Allow a kernel service to receive data and pick up information about the
  615. * state of a call. Returns 0 if got what was asked for and there's more
  616. * available, 1 if we got what was asked for and we're at the end of the data
  617. * and -EAGAIN if we need more data.
  618. *
  619. * Note that we may return -EAGAIN to drain empty packets at the end of the
  620. * data, even if we've already copied over the requested data.
  621. *
  622. * *_abort should also be initialised to 0.
  623. */
  624. int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
  625. struct iov_iter *iter,
  626. bool want_more, u32 *_abort, u16 *_service)
  627. {
  628. size_t offset = 0;
  629. int ret;
  630. _enter("{%d,%s},%zu,%d",
  631. call->debug_id, rxrpc_call_states[call->state],
  632. iov_iter_count(iter), want_more);
  633. ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING);
  634. mutex_lock(&call->user_mutex);
  635. switch (READ_ONCE(call->state)) {
  636. case RXRPC_CALL_CLIENT_RECV_REPLY:
  637. case RXRPC_CALL_SERVER_RECV_REQUEST:
  638. case RXRPC_CALL_SERVER_ACK_REQUEST:
  639. ret = rxrpc_recvmsg_data(sock, call, NULL, iter,
  640. iov_iter_count(iter), 0,
  641. &offset);
  642. if (ret < 0)
  643. goto out;
  644. /* We can only reach here with a partially full buffer if we
  645. * have reached the end of the data. We must otherwise have a
  646. * full buffer or have been given -EAGAIN.
  647. */
  648. if (ret == 1) {
  649. if (iov_iter_count(iter) > 0)
  650. goto short_data;
  651. if (!want_more)
  652. goto read_phase_complete;
  653. ret = 0;
  654. goto out;
  655. }
  656. if (!want_more)
  657. goto excess_data;
  658. goto out;
  659. case RXRPC_CALL_COMPLETE:
  660. goto call_complete;
  661. default:
  662. ret = -EINPROGRESS;
  663. goto out;
  664. }
  665. read_phase_complete:
  666. ret = 1;
  667. out:
  668. switch (call->ackr_reason) {
  669. case RXRPC_ACK_IDLE:
  670. break;
  671. case RXRPC_ACK_DELAY:
  672. if (ret != -EAGAIN)
  673. break;
  674. /* Fall through */
  675. default:
  676. rxrpc_send_ack_packet(call, false, NULL);
  677. }
  678. if (_service)
  679. *_service = call->service_id;
  680. mutex_unlock(&call->user_mutex);
  681. _leave(" = %d [%zu,%d]", ret, iov_iter_count(iter), *_abort);
  682. return ret;
  683. short_data:
  684. trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data"));
  685. ret = -EBADMSG;
  686. goto out;
  687. excess_data:
  688. trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data"));
  689. ret = -EMSGSIZE;
  690. goto out;
  691. call_complete:
  692. *_abort = call->abort_code;
  693. ret = call->error;
  694. if (call->completion == RXRPC_CALL_SUCCEEDED) {
  695. ret = 1;
  696. if (iov_iter_count(iter) > 0)
  697. ret = -ECONNRESET;
  698. }
  699. goto out;
  700. }
  701. EXPORT_SYMBOL(rxrpc_kernel_recv_data);
  702. /**
  703. * rxrpc_kernel_get_reply_time - Get timestamp on first reply packet
  704. * @sock: The socket that the call exists on
  705. * @call: The call to query
  706. * @_ts: Where to put the timestamp
  707. *
  708. * Retrieve the timestamp from the first DATA packet of the reply if it is
  709. * in the ring. Returns true if successful, false if not.
  710. */
  711. bool rxrpc_kernel_get_reply_time(struct socket *sock, struct rxrpc_call *call,
  712. ktime_t *_ts)
  713. {
  714. struct sk_buff *skb;
  715. rxrpc_seq_t hard_ack, top, seq;
  716. bool success = false;
  717. mutex_lock(&call->user_mutex);
  718. if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_RECV_REPLY)
  719. goto out;
  720. hard_ack = call->rx_hard_ack;
  721. if (hard_ack != 0)
  722. goto out;
  723. seq = hard_ack + 1;
  724. top = smp_load_acquire(&call->rx_top);
  725. if (after(seq, top))
  726. goto out;
  727. skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK];
  728. if (!skb)
  729. goto out;
  730. *_ts = skb_get_ktime(skb);
  731. success = true;
  732. out:
  733. mutex_unlock(&call->user_mutex);
  734. return success;
  735. }
  736. EXPORT_SYMBOL(rxrpc_kernel_get_reply_time);