PageRenderTime 53ms CodeModel.GetById 26ms RepoModel.GetById 0ms app.codeStats 0ms

/net/rxrpc/recvmsg.c

http://github.com/torvalds/linux
C | 772 lines | 562 code | 106 blank | 104 comment | 103 complexity | 175dc97dbcd1f6a9c6f2fcd098aeb19e MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* RxRPC recvmsg() implementation
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/net.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/export.h>
  11. #include <linux/sched/signal.h>
  12. #include <net/sock.h>
  13. #include <net/af_rxrpc.h>
  14. #include "ar-internal.h"
  15. /*
  16. * Post a call for attention by the socket or kernel service. Further
  17. * notifications are suppressed by putting recvmsg_link on a dummy queue.
  18. */
  19. void rxrpc_notify_socket(struct rxrpc_call *call)
  20. {
  21. struct rxrpc_sock *rx;
  22. struct sock *sk;
  23. _enter("%d", call->debug_id);
  24. if (!list_empty(&call->recvmsg_link))
  25. return;
  26. rcu_read_lock();
  27. rx = rcu_dereference(call->socket);
  28. sk = &rx->sk;
  29. if (rx && sk->sk_state < RXRPC_CLOSE) {
  30. if (call->notify_rx) {
  31. spin_lock_bh(&call->notify_lock);
  32. call->notify_rx(sk, call, call->user_call_ID);
  33. spin_unlock_bh(&call->notify_lock);
  34. } else {
  35. write_lock_bh(&rx->recvmsg_lock);
  36. if (list_empty(&call->recvmsg_link)) {
  37. rxrpc_get_call(call, rxrpc_call_got);
  38. list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
  39. }
  40. write_unlock_bh(&rx->recvmsg_lock);
  41. if (!sock_flag(sk, SOCK_DEAD)) {
  42. _debug("call %ps", sk->sk_data_ready);
  43. sk->sk_data_ready(sk);
  44. }
  45. }
  46. }
  47. rcu_read_unlock();
  48. _leave("");
  49. }
  50. /*
  51. * Pass a call terminating message to userspace.
  52. */
  53. static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
  54. {
  55. u32 tmp = 0;
  56. int ret;
  57. switch (call->completion) {
  58. case RXRPC_CALL_SUCCEEDED:
  59. ret = 0;
  60. if (rxrpc_is_service_call(call))
  61. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &tmp);
  62. break;
  63. case RXRPC_CALL_REMOTELY_ABORTED:
  64. tmp = call->abort_code;
  65. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
  66. break;
  67. case RXRPC_CALL_LOCALLY_ABORTED:
  68. tmp = call->abort_code;
  69. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
  70. break;
  71. case RXRPC_CALL_NETWORK_ERROR:
  72. tmp = -call->error;
  73. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp);
  74. break;
  75. case RXRPC_CALL_LOCAL_ERROR:
  76. tmp = -call->error;
  77. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
  78. break;
  79. default:
  80. pr_err("Invalid terminal call state %u\n", call->state);
  81. BUG();
  82. break;
  83. }
  84. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_terminal, call->rx_hard_ack,
  85. call->rx_pkt_offset, call->rx_pkt_len, ret);
  86. return ret;
  87. }
  88. /*
  89. * Pass back notification of a new call. The call is added to the
  90. * to-be-accepted list. This means that the next call to be accepted might not
  91. * be the last call seen awaiting acceptance, but unless we leave this on the
  92. * front of the queue and block all other messages until someone gives us a
  93. * user_ID for it, there's not a lot we can do.
  94. */
  95. static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx,
  96. struct rxrpc_call *call,
  97. struct msghdr *msg, int flags)
  98. {
  99. int tmp = 0, ret;
  100. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp);
  101. if (ret == 0 && !(flags & MSG_PEEK)) {
  102. _debug("to be accepted");
  103. write_lock_bh(&rx->recvmsg_lock);
  104. list_del_init(&call->recvmsg_link);
  105. write_unlock_bh(&rx->recvmsg_lock);
  106. rxrpc_get_call(call, rxrpc_call_got);
  107. write_lock(&rx->call_lock);
  108. list_add_tail(&call->accept_link, &rx->to_be_accepted);
  109. write_unlock(&rx->call_lock);
  110. }
  111. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret);
  112. return ret;
  113. }
  114. /*
  115. * End the packet reception phase.
  116. */
  117. static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
  118. {
  119. _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
  120. trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
  121. ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
  122. if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
  123. rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true,
  124. rxrpc_propose_ack_terminal_ack);
  125. //rxrpc_send_ack_packet(call, false, NULL);
  126. }
  127. write_lock_bh(&call->state_lock);
  128. switch (call->state) {
  129. case RXRPC_CALL_CLIENT_RECV_REPLY:
  130. __rxrpc_call_completed(call);
  131. write_unlock_bh(&call->state_lock);
  132. break;
  133. case RXRPC_CALL_SERVER_RECV_REQUEST:
  134. call->tx_phase = true;
  135. call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
  136. call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
  137. write_unlock_bh(&call->state_lock);
  138. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true,
  139. rxrpc_propose_ack_processing_op);
  140. break;
  141. default:
  142. write_unlock_bh(&call->state_lock);
  143. break;
  144. }
  145. }
  146. /*
  147. * Discard a packet we've used up and advance the Rx window by one.
  148. */
  149. static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
  150. {
  151. struct rxrpc_skb_priv *sp;
  152. struct sk_buff *skb;
  153. rxrpc_serial_t serial;
  154. rxrpc_seq_t hard_ack, top;
  155. bool last = false;
  156. u8 subpacket;
  157. int ix;
  158. _enter("%d", call->debug_id);
  159. hard_ack = call->rx_hard_ack;
  160. top = smp_load_acquire(&call->rx_top);
  161. ASSERT(before(hard_ack, top));
  162. hard_ack++;
  163. ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
  164. skb = call->rxtx_buffer[ix];
  165. rxrpc_see_skb(skb, rxrpc_skb_rotated);
  166. sp = rxrpc_skb(skb);
  167. subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
  168. serial = sp->hdr.serial + subpacket;
  169. if (subpacket == sp->nr_subpackets - 1 &&
  170. sp->rx_flags & RXRPC_SKB_INCL_LAST)
  171. last = true;
  172. call->rxtx_buffer[ix] = NULL;
  173. call->rxtx_annotations[ix] = 0;
  174. /* Barrier against rxrpc_input_data(). */
  175. smp_store_release(&call->rx_hard_ack, hard_ack);
  176. rxrpc_free_skb(skb, rxrpc_skb_freed);
  177. trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
  178. if (last) {
  179. rxrpc_end_rx_phase(call, serial);
  180. } else {
  181. /* Check to see if there's an ACK that needs sending. */
  182. if (after_eq(hard_ack, call->ackr_consumed + 2) ||
  183. after_eq(top, call->ackr_seen + 2) ||
  184. (hard_ack == top && after(hard_ack, call->ackr_consumed)))
  185. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
  186. true, true,
  187. rxrpc_propose_ack_rotate_rx);
  188. if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
  189. rxrpc_send_ack_packet(call, false, NULL);
  190. }
  191. }
  192. /*
  193. * Decrypt and verify a (sub)packet. The packet's length may be changed due to
  194. * padding, but if this is the case, the packet length will be resident in the
  195. * socket buffer. Note that we can't modify the master skb info as the skb may
  196. * be the home to multiple subpackets.
  197. */
  198. static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
  199. u8 annotation,
  200. unsigned int offset, unsigned int len)
  201. {
  202. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  203. rxrpc_seq_t seq = sp->hdr.seq;
  204. u16 cksum = sp->hdr.cksum;
  205. u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
  206. _enter("");
  207. /* For all but the head jumbo subpacket, the security checksum is in a
  208. * jumbo header immediately prior to the data.
  209. */
  210. if (subpacket > 0) {
  211. __be16 tmp;
  212. if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
  213. BUG();
  214. cksum = ntohs(tmp);
  215. seq += subpacket;
  216. }
  217. return call->security->verify_packet(call, skb, offset, len,
  218. seq, cksum);
  219. }
  220. /*
  221. * Locate the data within a packet. This is complicated by:
  222. *
  223. * (1) An skb may contain a jumbo packet - so we have to find the appropriate
  224. * subpacket.
  225. *
  226. * (2) The (sub)packets may be encrypted and, if so, the encrypted portion
  227. * contains an extra header which includes the true length of the data,
  228. * excluding any encrypted padding.
  229. */
  230. static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
  231. u8 *_annotation,
  232. unsigned int *_offset, unsigned int *_len,
  233. bool *_last)
  234. {
  235. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  236. unsigned int offset = sizeof(struct rxrpc_wire_header);
  237. unsigned int len;
  238. bool last = false;
  239. int ret;
  240. u8 annotation = *_annotation;
  241. u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
  242. /* Locate the subpacket */
  243. offset += subpacket * RXRPC_JUMBO_SUBPKTLEN;
  244. len = skb->len - offset;
  245. if (subpacket < sp->nr_subpackets - 1)
  246. len = RXRPC_JUMBO_DATALEN;
  247. else if (sp->rx_flags & RXRPC_SKB_INCL_LAST)
  248. last = true;
  249. if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
  250. ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
  251. if (ret < 0)
  252. return ret;
  253. *_annotation |= RXRPC_RX_ANNO_VERIFIED;
  254. }
  255. *_offset = offset;
  256. *_len = len;
  257. *_last = last;
  258. call->security->locate_data(call, skb, _offset, _len);
  259. return 0;
  260. }
  261. /*
  262. * Deliver messages to a call. This keeps processing packets until the buffer
  263. * is filled and we find either more DATA (returns 0) or the end of the DATA
  264. * (returns 1). If more packets are required, it returns -EAGAIN.
  265. */
  266. static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
  267. struct msghdr *msg, struct iov_iter *iter,
  268. size_t len, int flags, size_t *_offset)
  269. {
  270. struct rxrpc_skb_priv *sp;
  271. struct sk_buff *skb;
  272. rxrpc_serial_t serial;
  273. rxrpc_seq_t hard_ack, top, seq;
  274. size_t remain;
  275. bool rx_pkt_last;
  276. unsigned int rx_pkt_offset, rx_pkt_len;
  277. int ix, copy, ret = -EAGAIN, ret2;
  278. if (test_and_clear_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags) &&
  279. call->ackr_reason)
  280. rxrpc_send_ack_packet(call, false, NULL);
  281. rx_pkt_offset = call->rx_pkt_offset;
  282. rx_pkt_len = call->rx_pkt_len;
  283. rx_pkt_last = call->rx_pkt_last;
  284. if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
  285. seq = call->rx_hard_ack;
  286. ret = 1;
  287. goto done;
  288. }
  289. /* Barriers against rxrpc_input_data(). */
  290. hard_ack = call->rx_hard_ack;
  291. seq = hard_ack + 1;
  292. while (top = smp_load_acquire(&call->rx_top),
  293. before_eq(seq, top)
  294. ) {
  295. ix = seq & RXRPC_RXTX_BUFF_MASK;
  296. skb = call->rxtx_buffer[ix];
  297. if (!skb) {
  298. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq,
  299. rx_pkt_offset, rx_pkt_len, 0);
  300. break;
  301. }
  302. smp_rmb();
  303. rxrpc_see_skb(skb, rxrpc_skb_seen);
  304. sp = rxrpc_skb(skb);
  305. if (!(flags & MSG_PEEK)) {
  306. serial = sp->hdr.serial;
  307. serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
  308. trace_rxrpc_receive(call, rxrpc_receive_front,
  309. serial, seq);
  310. }
  311. if (msg)
  312. sock_recv_timestamp(msg, sock->sk, skb);
  313. if (rx_pkt_offset == 0) {
  314. ret2 = rxrpc_locate_data(call, skb,
  315. &call->rxtx_annotations[ix],
  316. &rx_pkt_offset, &rx_pkt_len,
  317. &rx_pkt_last);
  318. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
  319. rx_pkt_offset, rx_pkt_len, ret2);
  320. if (ret2 < 0) {
  321. ret = ret2;
  322. goto out;
  323. }
  324. } else {
  325. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq,
  326. rx_pkt_offset, rx_pkt_len, 0);
  327. }
  328. /* We have to handle short, empty and used-up DATA packets. */
  329. remain = len - *_offset;
  330. copy = rx_pkt_len;
  331. if (copy > remain)
  332. copy = remain;
  333. if (copy > 0) {
  334. ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
  335. copy);
  336. if (ret2 < 0) {
  337. ret = ret2;
  338. goto out;
  339. }
  340. /* handle piecemeal consumption of data packets */
  341. rx_pkt_offset += copy;
  342. rx_pkt_len -= copy;
  343. *_offset += copy;
  344. }
  345. if (rx_pkt_len > 0) {
  346. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq,
  347. rx_pkt_offset, rx_pkt_len, 0);
  348. ASSERTCMP(*_offset, ==, len);
  349. ret = 0;
  350. break;
  351. }
  352. /* The whole packet has been transferred. */
  353. if (!(flags & MSG_PEEK))
  354. rxrpc_rotate_rx_window(call);
  355. rx_pkt_offset = 0;
  356. rx_pkt_len = 0;
  357. if (rx_pkt_last) {
  358. ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
  359. ret = 1;
  360. goto out;
  361. }
  362. seq++;
  363. }
  364. out:
  365. if (!(flags & MSG_PEEK)) {
  366. call->rx_pkt_offset = rx_pkt_offset;
  367. call->rx_pkt_len = rx_pkt_len;
  368. call->rx_pkt_last = rx_pkt_last;
  369. }
  370. done:
  371. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
  372. rx_pkt_offset, rx_pkt_len, ret);
  373. if (ret == -EAGAIN)
  374. set_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags);
  375. return ret;
  376. }
  377. /*
  378. * Receive a message from an RxRPC socket
  379. * - we need to be careful about two or more threads calling recvmsg
  380. * simultaneously
  381. */
  382. int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
  383. int flags)
  384. {
  385. struct rxrpc_call *call;
  386. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  387. struct list_head *l;
  388. size_t copied = 0;
  389. long timeo;
  390. int ret;
  391. DEFINE_WAIT(wait);
  392. trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0);
  393. if (flags & (MSG_OOB | MSG_TRUNC))
  394. return -EOPNOTSUPP;
  395. timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
  396. try_again:
  397. lock_sock(&rx->sk);
  398. /* Return immediately if a client socket has no outstanding calls */
  399. if (RB_EMPTY_ROOT(&rx->calls) &&
  400. list_empty(&rx->recvmsg_q) &&
  401. rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
  402. release_sock(&rx->sk);
  403. return -ENODATA;
  404. }
  405. if (list_empty(&rx->recvmsg_q)) {
  406. ret = -EWOULDBLOCK;
  407. if (timeo == 0) {
  408. call = NULL;
  409. goto error_no_call;
  410. }
  411. release_sock(&rx->sk);
  412. /* Wait for something to happen */
  413. prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
  414. TASK_INTERRUPTIBLE);
  415. ret = sock_error(&rx->sk);
  416. if (ret)
  417. goto wait_error;
  418. if (list_empty(&rx->recvmsg_q)) {
  419. if (signal_pending(current))
  420. goto wait_interrupted;
  421. trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait,
  422. 0, 0, 0, 0);
  423. timeo = schedule_timeout(timeo);
  424. }
  425. finish_wait(sk_sleep(&rx->sk), &wait);
  426. goto try_again;
  427. }
  428. /* Find the next call and dequeue it if we're not just peeking. If we
  429. * do dequeue it, that comes with a ref that we will need to release.
  430. */
  431. write_lock_bh(&rx->recvmsg_lock);
  432. l = rx->recvmsg_q.next;
  433. call = list_entry(l, struct rxrpc_call, recvmsg_link);
  434. if (!(flags & MSG_PEEK))
  435. list_del_init(&call->recvmsg_link);
  436. else
  437. rxrpc_get_call(call, rxrpc_call_got);
  438. write_unlock_bh(&rx->recvmsg_lock);
  439. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);
  440. /* We're going to drop the socket lock, so we need to lock the call
  441. * against interference by sendmsg.
  442. */
  443. if (!mutex_trylock(&call->user_mutex)) {
  444. ret = -EWOULDBLOCK;
  445. if (flags & MSG_DONTWAIT)
  446. goto error_requeue_call;
  447. ret = -ERESTARTSYS;
  448. if (mutex_lock_interruptible(&call->user_mutex) < 0)
  449. goto error_requeue_call;
  450. }
  451. release_sock(&rx->sk);
  452. if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
  453. BUG();
  454. if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  455. if (flags & MSG_CMSG_COMPAT) {
  456. unsigned int id32 = call->user_call_ID;
  457. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
  458. sizeof(unsigned int), &id32);
  459. } else {
  460. unsigned long idl = call->user_call_ID;
  461. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
  462. sizeof(unsigned long), &idl);
  463. }
  464. if (ret < 0)
  465. goto error_unlock_call;
  466. }
  467. if (msg->msg_name) {
  468. struct sockaddr_rxrpc *srx = msg->msg_name;
  469. size_t len = sizeof(call->peer->srx);
  470. memcpy(msg->msg_name, &call->peer->srx, len);
  471. srx->srx_service = call->service_id;
  472. msg->msg_namelen = len;
  473. }
  474. switch (READ_ONCE(call->state)) {
  475. case RXRPC_CALL_SERVER_ACCEPTING:
  476. ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
  477. break;
  478. case RXRPC_CALL_CLIENT_RECV_REPLY:
  479. case RXRPC_CALL_SERVER_RECV_REQUEST:
  480. case RXRPC_CALL_SERVER_ACK_REQUEST:
  481. ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
  482. flags, &copied);
  483. if (ret == -EAGAIN)
  484. ret = 0;
  485. if (after(call->rx_top, call->rx_hard_ack) &&
  486. call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
  487. rxrpc_notify_socket(call);
  488. break;
  489. default:
  490. ret = 0;
  491. break;
  492. }
  493. if (ret < 0)
  494. goto error_unlock_call;
  495. if (call->state == RXRPC_CALL_COMPLETE) {
  496. ret = rxrpc_recvmsg_term(call, msg);
  497. if (ret < 0)
  498. goto error_unlock_call;
  499. if (!(flags & MSG_PEEK))
  500. rxrpc_release_call(rx, call);
  501. msg->msg_flags |= MSG_EOR;
  502. ret = 1;
  503. }
  504. if (ret == 0)
  505. msg->msg_flags |= MSG_MORE;
  506. else
  507. msg->msg_flags &= ~MSG_MORE;
  508. ret = copied;
  509. error_unlock_call:
  510. mutex_unlock(&call->user_mutex);
  511. rxrpc_put_call(call, rxrpc_call_put);
  512. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
  513. return ret;
  514. error_requeue_call:
  515. if (!(flags & MSG_PEEK)) {
  516. write_lock_bh(&rx->recvmsg_lock);
  517. list_add(&call->recvmsg_link, &rx->recvmsg_q);
  518. write_unlock_bh(&rx->recvmsg_lock);
  519. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0);
  520. } else {
  521. rxrpc_put_call(call, rxrpc_call_put);
  522. }
  523. error_no_call:
  524. release_sock(&rx->sk);
  525. error_trace:
  526. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
  527. return ret;
  528. wait_interrupted:
  529. ret = sock_intr_errno(timeo);
  530. wait_error:
  531. finish_wait(sk_sleep(&rx->sk), &wait);
  532. call = NULL;
  533. goto error_trace;
  534. }
  535. /**
  536. * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info
  537. * @sock: The socket that the call exists on
  538. * @call: The call to send data through
  539. * @iter: The buffer to receive into
  540. * @want_more: True if more data is expected to be read
  541. * @_abort: Where the abort code is stored if -ECONNABORTED is returned
  542. * @_service: Where to store the actual service ID (may be upgraded)
  543. *
  544. * Allow a kernel service to receive data and pick up information about the
  545. * state of a call. Returns 0 if got what was asked for and there's more
  546. * available, 1 if we got what was asked for and we're at the end of the data
  547. * and -EAGAIN if we need more data.
  548. *
  549. * Note that we may return -EAGAIN to drain empty packets at the end of the
  550. * data, even if we've already copied over the requested data.
  551. *
  552. * *_abort should also be initialised to 0.
  553. */
  554. int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
  555. struct iov_iter *iter,
  556. bool want_more, u32 *_abort, u16 *_service)
  557. {
  558. size_t offset = 0;
  559. int ret;
  560. _enter("{%d,%s},%zu,%d",
  561. call->debug_id, rxrpc_call_states[call->state],
  562. iov_iter_count(iter), want_more);
  563. ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING);
  564. mutex_lock(&call->user_mutex);
  565. switch (READ_ONCE(call->state)) {
  566. case RXRPC_CALL_CLIENT_RECV_REPLY:
  567. case RXRPC_CALL_SERVER_RECV_REQUEST:
  568. case RXRPC_CALL_SERVER_ACK_REQUEST:
  569. ret = rxrpc_recvmsg_data(sock, call, NULL, iter,
  570. iov_iter_count(iter), 0,
  571. &offset);
  572. if (ret < 0)
  573. goto out;
  574. /* We can only reach here with a partially full buffer if we
  575. * have reached the end of the data. We must otherwise have a
  576. * full buffer or have been given -EAGAIN.
  577. */
  578. if (ret == 1) {
  579. if (iov_iter_count(iter) > 0)
  580. goto short_data;
  581. if (!want_more)
  582. goto read_phase_complete;
  583. ret = 0;
  584. goto out;
  585. }
  586. if (!want_more)
  587. goto excess_data;
  588. goto out;
  589. case RXRPC_CALL_COMPLETE:
  590. goto call_complete;
  591. default:
  592. ret = -EINPROGRESS;
  593. goto out;
  594. }
  595. read_phase_complete:
  596. ret = 1;
  597. out:
  598. switch (call->ackr_reason) {
  599. case RXRPC_ACK_IDLE:
  600. break;
  601. case RXRPC_ACK_DELAY:
  602. if (ret != -EAGAIN)
  603. break;
  604. /* Fall through */
  605. default:
  606. rxrpc_send_ack_packet(call, false, NULL);
  607. }
  608. if (_service)
  609. *_service = call->service_id;
  610. mutex_unlock(&call->user_mutex);
  611. _leave(" = %d [%zu,%d]", ret, iov_iter_count(iter), *_abort);
  612. return ret;
  613. short_data:
  614. trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data"));
  615. ret = -EBADMSG;
  616. goto out;
  617. excess_data:
  618. trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data"));
  619. ret = -EMSGSIZE;
  620. goto out;
  621. call_complete:
  622. *_abort = call->abort_code;
  623. ret = call->error;
  624. if (call->completion == RXRPC_CALL_SUCCEEDED) {
  625. ret = 1;
  626. if (iov_iter_count(iter) > 0)
  627. ret = -ECONNRESET;
  628. }
  629. goto out;
  630. }
  631. EXPORT_SYMBOL(rxrpc_kernel_recv_data);
  632. /**
  633. * rxrpc_kernel_get_reply_time - Get timestamp on first reply packet
  634. * @sock: The socket that the call exists on
  635. * @call: The call to query
  636. * @_ts: Where to put the timestamp
  637. *
  638. * Retrieve the timestamp from the first DATA packet of the reply if it is
  639. * in the ring. Returns true if successful, false if not.
  640. */
  641. bool rxrpc_kernel_get_reply_time(struct socket *sock, struct rxrpc_call *call,
  642. ktime_t *_ts)
  643. {
  644. struct sk_buff *skb;
  645. rxrpc_seq_t hard_ack, top, seq;
  646. bool success = false;
  647. mutex_lock(&call->user_mutex);
  648. if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_RECV_REPLY)
  649. goto out;
  650. hard_ack = call->rx_hard_ack;
  651. if (hard_ack != 0)
  652. goto out;
  653. seq = hard_ack + 1;
  654. top = smp_load_acquire(&call->rx_top);
  655. if (after(seq, top))
  656. goto out;
  657. skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK];
  658. if (!skb)
  659. goto out;
  660. *_ts = skb_get_ktime(skb);
  661. success = true;
  662. out:
  663. mutex_unlock(&call->user_mutex);
  664. return success;
  665. }
  666. EXPORT_SYMBOL(rxrpc_kernel_get_reply_time);