PageRenderTime 53ms CodeModel.GetById 18ms RepoModel.GetById 1ms app.codeStats 0ms

/net/rxrpc/recvmsg.c

https://gitlab.com/kush/linux
C | 761 lines | 550 code | 103 blank | 108 comment | 100 complexity | 13408afef82a23dbfe7b7b307d7fd14e MD5 | raw file
  1. /* RxRPC recvmsg() implementation
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/net.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/export.h>
  15. #include <linux/sched/signal.h>
  16. #include <net/sock.h>
  17. #include <net/af_rxrpc.h>
  18. #include "ar-internal.h"
  19. /*
  20. * Post a call for attention by the socket or kernel service. Further
  21. * notifications are suppressed by putting recvmsg_link on a dummy queue.
  22. */
  23. void rxrpc_notify_socket(struct rxrpc_call *call)
  24. {
  25. struct rxrpc_sock *rx;
  26. struct sock *sk;
  27. _enter("%d", call->debug_id);
  28. if (!list_empty(&call->recvmsg_link))
  29. return;
  30. rcu_read_lock();
  31. rx = rcu_dereference(call->socket);
  32. sk = &rx->sk;
  33. if (rx && sk->sk_state < RXRPC_CLOSE) {
  34. if (call->notify_rx) {
  35. spin_lock_bh(&call->notify_lock);
  36. call->notify_rx(sk, call, call->user_call_ID);
  37. spin_unlock_bh(&call->notify_lock);
  38. } else {
  39. write_lock_bh(&rx->recvmsg_lock);
  40. if (list_empty(&call->recvmsg_link)) {
  41. rxrpc_get_call(call, rxrpc_call_got);
  42. list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
  43. }
  44. write_unlock_bh(&rx->recvmsg_lock);
  45. if (!sock_flag(sk, SOCK_DEAD)) {
  46. _debug("call %ps", sk->sk_data_ready);
  47. sk->sk_data_ready(sk);
  48. }
  49. }
  50. }
  51. rcu_read_unlock();
  52. _leave("");
  53. }
  54. /*
  55. * Pass a call terminating message to userspace.
  56. */
  57. static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
  58. {
  59. u32 tmp = 0;
  60. int ret;
  61. switch (call->completion) {
  62. case RXRPC_CALL_SUCCEEDED:
  63. ret = 0;
  64. if (rxrpc_is_service_call(call))
  65. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &tmp);
  66. break;
  67. case RXRPC_CALL_REMOTELY_ABORTED:
  68. tmp = call->abort_code;
  69. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
  70. break;
  71. case RXRPC_CALL_LOCALLY_ABORTED:
  72. tmp = call->abort_code;
  73. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
  74. break;
  75. case RXRPC_CALL_NETWORK_ERROR:
  76. tmp = -call->error;
  77. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp);
  78. break;
  79. case RXRPC_CALL_LOCAL_ERROR:
  80. tmp = -call->error;
  81. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
  82. break;
  83. default:
  84. pr_err("Invalid terminal call state %u\n", call->state);
  85. BUG();
  86. break;
  87. }
  88. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_terminal, call->rx_hard_ack,
  89. call->rx_pkt_offset, call->rx_pkt_len, ret);
  90. return ret;
  91. }
  92. /*
  93. * Pass back notification of a new call. The call is added to the
  94. * to-be-accepted list. This means that the next call to be accepted might not
  95. * be the last call seen awaiting acceptance, but unless we leave this on the
  96. * front of the queue and block all other messages until someone gives us a
  97. * user_ID for it, there's not a lot we can do.
  98. */
  99. static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx,
  100. struct rxrpc_call *call,
  101. struct msghdr *msg, int flags)
  102. {
  103. int tmp = 0, ret;
  104. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp);
  105. if (ret == 0 && !(flags & MSG_PEEK)) {
  106. _debug("to be accepted");
  107. write_lock_bh(&rx->recvmsg_lock);
  108. list_del_init(&call->recvmsg_link);
  109. write_unlock_bh(&rx->recvmsg_lock);
  110. rxrpc_get_call(call, rxrpc_call_got);
  111. write_lock(&rx->call_lock);
  112. list_add_tail(&call->accept_link, &rx->to_be_accepted);
  113. write_unlock(&rx->call_lock);
  114. }
  115. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret);
  116. return ret;
  117. }
  118. /*
  119. * End the packet reception phase.
  120. */
  121. static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
  122. {
  123. _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
  124. trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
  125. ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
  126. if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
  127. rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, false, true,
  128. rxrpc_propose_ack_terminal_ack);
  129. //rxrpc_send_ack_packet(call, false, NULL);
  130. }
  131. write_lock_bh(&call->state_lock);
  132. switch (call->state) {
  133. case RXRPC_CALL_CLIENT_RECV_REPLY:
  134. __rxrpc_call_completed(call);
  135. write_unlock_bh(&call->state_lock);
  136. break;
  137. case RXRPC_CALL_SERVER_RECV_REQUEST:
  138. call->tx_phase = true;
  139. call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
  140. call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
  141. write_unlock_bh(&call->state_lock);
  142. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
  143. rxrpc_propose_ack_processing_op);
  144. break;
  145. default:
  146. write_unlock_bh(&call->state_lock);
  147. break;
  148. }
  149. }
  150. /*
  151. * Discard a packet we've used up and advance the Rx window by one.
  152. */
  153. static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
  154. {
  155. struct rxrpc_skb_priv *sp;
  156. struct sk_buff *skb;
  157. rxrpc_serial_t serial;
  158. rxrpc_seq_t hard_ack, top;
  159. u8 flags;
  160. int ix;
  161. _enter("%d", call->debug_id);
  162. hard_ack = call->rx_hard_ack;
  163. top = smp_load_acquire(&call->rx_top);
  164. ASSERT(before(hard_ack, top));
  165. hard_ack++;
  166. ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
  167. skb = call->rxtx_buffer[ix];
  168. rxrpc_see_skb(skb, rxrpc_skb_rx_rotated);
  169. sp = rxrpc_skb(skb);
  170. flags = sp->hdr.flags;
  171. serial = sp->hdr.serial;
  172. if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO)
  173. serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1;
  174. call->rxtx_buffer[ix] = NULL;
  175. call->rxtx_annotations[ix] = 0;
  176. /* Barrier against rxrpc_input_data(). */
  177. smp_store_release(&call->rx_hard_ack, hard_ack);
  178. rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
  179. _debug("%u,%u,%02x", hard_ack, top, flags);
  180. trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
  181. if (flags & RXRPC_LAST_PACKET) {
  182. rxrpc_end_rx_phase(call, serial);
  183. } else {
  184. /* Check to see if there's an ACK that needs sending. */
  185. if (after_eq(hard_ack, call->ackr_consumed + 2) ||
  186. after_eq(top, call->ackr_seen + 2) ||
  187. (hard_ack == top && after(hard_ack, call->ackr_consumed)))
  188. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
  189. true, true,
  190. rxrpc_propose_ack_rotate_rx);
  191. if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
  192. rxrpc_send_ack_packet(call, false, NULL);
  193. }
  194. }
  195. /*
  196. * Decrypt and verify a (sub)packet. The packet's length may be changed due to
  197. * padding, but if this is the case, the packet length will be resident in the
  198. * socket buffer. Note that we can't modify the master skb info as the skb may
  199. * be the home to multiple subpackets.
  200. */
  201. static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
  202. u8 annotation,
  203. unsigned int offset, unsigned int len)
  204. {
  205. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  206. rxrpc_seq_t seq = sp->hdr.seq;
  207. u16 cksum = sp->hdr.cksum;
  208. _enter("");
  209. /* For all but the head jumbo subpacket, the security checksum is in a
  210. * jumbo header immediately prior to the data.
  211. */
  212. if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) {
  213. __be16 tmp;
  214. if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
  215. BUG();
  216. cksum = ntohs(tmp);
  217. seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1;
  218. }
  219. return call->conn->security->verify_packet(call, skb, offset, len,
  220. seq, cksum);
  221. }
  222. /*
  223. * Locate the data within a packet. This is complicated by:
  224. *
  225. * (1) An skb may contain a jumbo packet - so we have to find the appropriate
  226. * subpacket.
  227. *
  228. * (2) The (sub)packets may be encrypted and, if so, the encrypted portion
  229. * contains an extra header which includes the true length of the data,
  230. * excluding any encrypted padding.
  231. */
  232. static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
  233. u8 *_annotation,
  234. unsigned int *_offset, unsigned int *_len)
  235. {
  236. unsigned int offset = sizeof(struct rxrpc_wire_header);
  237. unsigned int len;
  238. int ret;
  239. u8 annotation = *_annotation;
  240. /* Locate the subpacket */
  241. len = skb->len - offset;
  242. if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) {
  243. offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) *
  244. RXRPC_JUMBO_SUBPKTLEN);
  245. len = (annotation & RXRPC_RX_ANNO_JLAST) ?
  246. skb->len - offset : RXRPC_JUMBO_SUBPKTLEN;
  247. }
  248. if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
  249. ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
  250. if (ret < 0)
  251. return ret;
  252. *_annotation |= RXRPC_RX_ANNO_VERIFIED;
  253. }
  254. *_offset = offset;
  255. *_len = len;
  256. call->conn->security->locate_data(call, skb, _offset, _len);
  257. return 0;
  258. }
  259. /*
  260. * Deliver messages to a call. This keeps processing packets until the buffer
  261. * is filled and we find either more DATA (returns 0) or the end of the DATA
  262. * (returns 1). If more packets are required, it returns -EAGAIN.
  263. */
  264. static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
  265. struct msghdr *msg, struct iov_iter *iter,
  266. size_t len, int flags, size_t *_offset)
  267. {
  268. struct rxrpc_skb_priv *sp;
  269. struct sk_buff *skb;
  270. rxrpc_seq_t hard_ack, top, seq;
  271. size_t remain;
  272. bool last;
  273. unsigned int rx_pkt_offset, rx_pkt_len;
  274. int ix, copy, ret = -EAGAIN, ret2;
  275. if (test_and_clear_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags) &&
  276. call->ackr_reason)
  277. rxrpc_send_ack_packet(call, false, NULL);
  278. rx_pkt_offset = call->rx_pkt_offset;
  279. rx_pkt_len = call->rx_pkt_len;
  280. if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
  281. seq = call->rx_hard_ack;
  282. ret = 1;
  283. goto done;
  284. }
  285. /* Barriers against rxrpc_input_data(). */
  286. hard_ack = call->rx_hard_ack;
  287. seq = hard_ack + 1;
  288. while (top = smp_load_acquire(&call->rx_top),
  289. before_eq(seq, top)
  290. ) {
  291. ix = seq & RXRPC_RXTX_BUFF_MASK;
  292. skb = call->rxtx_buffer[ix];
  293. if (!skb) {
  294. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq,
  295. rx_pkt_offset, rx_pkt_len, 0);
  296. break;
  297. }
  298. smp_rmb();
  299. rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
  300. sp = rxrpc_skb(skb);
  301. if (!(flags & MSG_PEEK))
  302. trace_rxrpc_receive(call, rxrpc_receive_front,
  303. sp->hdr.serial, seq);
  304. if (msg)
  305. sock_recv_timestamp(msg, sock->sk, skb);
  306. if (rx_pkt_offset == 0) {
  307. ret2 = rxrpc_locate_data(call, skb,
  308. &call->rxtx_annotations[ix],
  309. &rx_pkt_offset, &rx_pkt_len);
  310. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
  311. rx_pkt_offset, rx_pkt_len, ret2);
  312. if (ret2 < 0) {
  313. ret = ret2;
  314. goto out;
  315. }
  316. } else {
  317. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq,
  318. rx_pkt_offset, rx_pkt_len, 0);
  319. }
  320. /* We have to handle short, empty and used-up DATA packets. */
  321. remain = len - *_offset;
  322. copy = rx_pkt_len;
  323. if (copy > remain)
  324. copy = remain;
  325. if (copy > 0) {
  326. ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
  327. copy);
  328. if (ret2 < 0) {
  329. ret = ret2;
  330. goto out;
  331. }
  332. /* handle piecemeal consumption of data packets */
  333. rx_pkt_offset += copy;
  334. rx_pkt_len -= copy;
  335. *_offset += copy;
  336. }
  337. if (rx_pkt_len > 0) {
  338. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq,
  339. rx_pkt_offset, rx_pkt_len, 0);
  340. ASSERTCMP(*_offset, ==, len);
  341. ret = 0;
  342. break;
  343. }
  344. /* The whole packet has been transferred. */
  345. last = sp->hdr.flags & RXRPC_LAST_PACKET;
  346. if (!(flags & MSG_PEEK))
  347. rxrpc_rotate_rx_window(call);
  348. rx_pkt_offset = 0;
  349. rx_pkt_len = 0;
  350. if (last) {
  351. ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
  352. ret = 1;
  353. goto out;
  354. }
  355. seq++;
  356. }
  357. out:
  358. if (!(flags & MSG_PEEK)) {
  359. call->rx_pkt_offset = rx_pkt_offset;
  360. call->rx_pkt_len = rx_pkt_len;
  361. }
  362. done:
  363. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
  364. rx_pkt_offset, rx_pkt_len, ret);
  365. if (ret == -EAGAIN)
  366. set_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags);
  367. return ret;
  368. }
  369. /*
  370. * Receive a message from an RxRPC socket
  371. * - we need to be careful about two or more threads calling recvmsg
  372. * simultaneously
  373. */
  374. int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
  375. int flags)
  376. {
  377. struct rxrpc_call *call;
  378. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  379. struct list_head *l;
  380. size_t copied = 0;
  381. long timeo;
  382. int ret;
  383. DEFINE_WAIT(wait);
  384. trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0);
  385. if (flags & (MSG_OOB | MSG_TRUNC))
  386. return -EOPNOTSUPP;
  387. timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
  388. try_again:
  389. lock_sock(&rx->sk);
  390. /* Return immediately if a client socket has no outstanding calls */
  391. if (RB_EMPTY_ROOT(&rx->calls) &&
  392. list_empty(&rx->recvmsg_q) &&
  393. rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
  394. release_sock(&rx->sk);
  395. return -ENODATA;
  396. }
  397. if (list_empty(&rx->recvmsg_q)) {
  398. ret = -EWOULDBLOCK;
  399. if (timeo == 0) {
  400. call = NULL;
  401. goto error_no_call;
  402. }
  403. release_sock(&rx->sk);
  404. /* Wait for something to happen */
  405. prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
  406. TASK_INTERRUPTIBLE);
  407. ret = sock_error(&rx->sk);
  408. if (ret)
  409. goto wait_error;
  410. if (list_empty(&rx->recvmsg_q)) {
  411. if (signal_pending(current))
  412. goto wait_interrupted;
  413. trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait,
  414. 0, 0, 0, 0);
  415. timeo = schedule_timeout(timeo);
  416. }
  417. finish_wait(sk_sleep(&rx->sk), &wait);
  418. goto try_again;
  419. }
  420. /* Find the next call and dequeue it if we're not just peeking. If we
  421. * do dequeue it, that comes with a ref that we will need to release.
  422. */
  423. write_lock_bh(&rx->recvmsg_lock);
  424. l = rx->recvmsg_q.next;
  425. call = list_entry(l, struct rxrpc_call, recvmsg_link);
  426. if (!(flags & MSG_PEEK))
  427. list_del_init(&call->recvmsg_link);
  428. else
  429. rxrpc_get_call(call, rxrpc_call_got);
  430. write_unlock_bh(&rx->recvmsg_lock);
  431. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);
  432. /* We're going to drop the socket lock, so we need to lock the call
  433. * against interference by sendmsg.
  434. */
  435. if (!mutex_trylock(&call->user_mutex)) {
  436. ret = -EWOULDBLOCK;
  437. if (flags & MSG_DONTWAIT)
  438. goto error_requeue_call;
  439. ret = -ERESTARTSYS;
  440. if (mutex_lock_interruptible(&call->user_mutex) < 0)
  441. goto error_requeue_call;
  442. }
  443. release_sock(&rx->sk);
  444. if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
  445. BUG();
  446. if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  447. if (flags & MSG_CMSG_COMPAT) {
  448. unsigned int id32 = call->user_call_ID;
  449. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
  450. sizeof(unsigned int), &id32);
  451. } else {
  452. unsigned long idl = call->user_call_ID;
  453. ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
  454. sizeof(unsigned long), &idl);
  455. }
  456. if (ret < 0)
  457. goto error_unlock_call;
  458. }
  459. if (msg->msg_name) {
  460. struct sockaddr_rxrpc *srx = msg->msg_name;
  461. size_t len = sizeof(call->peer->srx);
  462. memcpy(msg->msg_name, &call->peer->srx, len);
  463. srx->srx_service = call->service_id;
  464. msg->msg_namelen = len;
  465. }
  466. switch (READ_ONCE(call->state)) {
  467. case RXRPC_CALL_SERVER_ACCEPTING:
  468. ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
  469. break;
  470. case RXRPC_CALL_CLIENT_RECV_REPLY:
  471. case RXRPC_CALL_SERVER_RECV_REQUEST:
  472. case RXRPC_CALL_SERVER_ACK_REQUEST:
  473. ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
  474. flags, &copied);
  475. if (ret == -EAGAIN)
  476. ret = 0;
  477. if (after(call->rx_top, call->rx_hard_ack) &&
  478. call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
  479. rxrpc_notify_socket(call);
  480. break;
  481. default:
  482. ret = 0;
  483. break;
  484. }
  485. if (ret < 0)
  486. goto error_unlock_call;
  487. if (call->state == RXRPC_CALL_COMPLETE) {
  488. ret = rxrpc_recvmsg_term(call, msg);
  489. if (ret < 0)
  490. goto error_unlock_call;
  491. if (!(flags & MSG_PEEK))
  492. rxrpc_release_call(rx, call);
  493. msg->msg_flags |= MSG_EOR;
  494. ret = 1;
  495. }
  496. if (ret == 0)
  497. msg->msg_flags |= MSG_MORE;
  498. else
  499. msg->msg_flags &= ~MSG_MORE;
  500. ret = copied;
  501. error_unlock_call:
  502. mutex_unlock(&call->user_mutex);
  503. rxrpc_put_call(call, rxrpc_call_put);
  504. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
  505. return ret;
  506. error_requeue_call:
  507. if (!(flags & MSG_PEEK)) {
  508. write_lock_bh(&rx->recvmsg_lock);
  509. list_add(&call->recvmsg_link, &rx->recvmsg_q);
  510. write_unlock_bh(&rx->recvmsg_lock);
  511. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0);
  512. } else {
  513. rxrpc_put_call(call, rxrpc_call_put);
  514. }
  515. error_no_call:
  516. release_sock(&rx->sk);
  517. error_trace:
  518. trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
  519. return ret;
  520. wait_interrupted:
  521. ret = sock_intr_errno(timeo);
  522. wait_error:
  523. finish_wait(sk_sleep(&rx->sk), &wait);
  524. call = NULL;
  525. goto error_trace;
  526. }
  527. /**
  528. * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info
  529. * @sock: The socket that the call exists on
  530. * @call: The call to send data through
  531. * @iter: The buffer to receive into
  532. * @want_more: True if more data is expected to be read
  533. * @_abort: Where the abort code is stored if -ECONNABORTED is returned
  534. * @_service: Where to store the actual service ID (may be upgraded)
  535. *
  536. * Allow a kernel service to receive data and pick up information about the
  537. * state of a call. Returns 0 if got what was asked for and there's more
  538. * available, 1 if we got what was asked for and we're at the end of the data
  539. * and -EAGAIN if we need more data.
  540. *
  541. * Note that we may return -EAGAIN to drain empty packets at the end of the
  542. * data, even if we've already copied over the requested data.
  543. *
  544. * *_abort should also be initialised to 0.
  545. */
  546. int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
  547. struct iov_iter *iter,
  548. bool want_more, u32 *_abort, u16 *_service)
  549. {
  550. size_t offset = 0;
  551. int ret;
  552. _enter("{%d,%s},%zu,%d",
  553. call->debug_id, rxrpc_call_states[call->state],
  554. iov_iter_count(iter), want_more);
  555. ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING);
  556. mutex_lock(&call->user_mutex);
  557. switch (READ_ONCE(call->state)) {
  558. case RXRPC_CALL_CLIENT_RECV_REPLY:
  559. case RXRPC_CALL_SERVER_RECV_REQUEST:
  560. case RXRPC_CALL_SERVER_ACK_REQUEST:
  561. ret = rxrpc_recvmsg_data(sock, call, NULL, iter,
  562. iov_iter_count(iter), 0,
  563. &offset);
  564. if (ret < 0)
  565. goto out;
  566. /* We can only reach here with a partially full buffer if we
  567. * have reached the end of the data. We must otherwise have a
  568. * full buffer or have been given -EAGAIN.
  569. */
  570. if (ret == 1) {
  571. if (iov_iter_count(iter) > 0)
  572. goto short_data;
  573. if (!want_more)
  574. goto read_phase_complete;
  575. ret = 0;
  576. goto out;
  577. }
  578. if (!want_more)
  579. goto excess_data;
  580. goto out;
  581. case RXRPC_CALL_COMPLETE:
  582. goto call_complete;
  583. default:
  584. ret = -EINPROGRESS;
  585. goto out;
  586. }
  587. read_phase_complete:
  588. ret = 1;
  589. out:
  590. switch (call->ackr_reason) {
  591. case RXRPC_ACK_IDLE:
  592. break;
  593. case RXRPC_ACK_DELAY:
  594. if (ret != -EAGAIN)
  595. break;
  596. /* Fall through */
  597. default:
  598. rxrpc_send_ack_packet(call, false, NULL);
  599. }
  600. if (_service)
  601. *_service = call->service_id;
  602. mutex_unlock(&call->user_mutex);
  603. _leave(" = %d [%zu,%d]", ret, iov_iter_count(iter), *_abort);
  604. return ret;
  605. short_data:
  606. trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data"));
  607. ret = -EBADMSG;
  608. goto out;
  609. excess_data:
  610. trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data"));
  611. ret = -EMSGSIZE;
  612. goto out;
  613. call_complete:
  614. *_abort = call->abort_code;
  615. ret = call->error;
  616. if (call->completion == RXRPC_CALL_SUCCEEDED) {
  617. ret = 1;
  618. if (iov_iter_count(iter) > 0)
  619. ret = -ECONNRESET;
  620. }
  621. goto out;
  622. }
  623. EXPORT_SYMBOL(rxrpc_kernel_recv_data);
  624. /**
  625. * rxrpc_kernel_get_reply_time - Get timestamp on first reply packet
  626. * @sock: The socket that the call exists on
  627. * @call: The call to query
  628. * @_ts: Where to put the timestamp
  629. *
  630. * Retrieve the timestamp from the first DATA packet of the reply if it is
  631. * in the ring. Returns true if successful, false if not.
  632. */
  633. bool rxrpc_kernel_get_reply_time(struct socket *sock, struct rxrpc_call *call,
  634. ktime_t *_ts)
  635. {
  636. struct sk_buff *skb;
  637. rxrpc_seq_t hard_ack, top, seq;
  638. bool success = false;
  639. mutex_lock(&call->user_mutex);
  640. if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_RECV_REPLY)
  641. goto out;
  642. hard_ack = call->rx_hard_ack;
  643. if (hard_ack != 0)
  644. goto out;
  645. seq = hard_ack + 1;
  646. top = smp_load_acquire(&call->rx_top);
  647. if (after(seq, top))
  648. goto out;
  649. skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK];
  650. if (!skb)
  651. goto out;
  652. *_ts = skb_get_ktime(skb);
  653. success = true;
  654. out:
  655. mutex_unlock(&call->user_mutex);
  656. return success;
  657. }
  658. EXPORT_SYMBOL(rxrpc_kernel_get_reply_time);