PageRenderTime 72ms CodeModel.GetById 30ms RepoModel.GetById 1ms app.codeStats 1ms

/net/ipv4/tcp.c

https://bitbucket.org/DutchDanny/bindroid-xtc-onex
C | 3457 lines | 2295 code | 480 blank | 682 comment | 557 complexity | 0bd1ec321967f686afd03a8d815463bc MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Implementation of the Transmission Control Protocol(TCP).
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  11. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  12. * Florian La Roche, <flla@stud.uni-sb.de>
  13. * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14. * Linus Torvalds, <torvalds@cs.helsinki.fi>
  15. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  16. * Matthew Dillon, <dillon@apollo.west.oic.com>
  17. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18. * Jorge Cwik, <jorge@laser.satlink.net>
  19. *
  20. * Fixes:
  21. * Alan Cox : Numerous verify_area() calls
  22. * Alan Cox : Set the ACK bit on a reset
  23. * Alan Cox : Stopped it crashing if it closed while
  24. * sk->inuse=1 and was trying to connect
  25. * (tcp_err()).
  26. * Alan Cox : All icmp error handling was broken
  27. * pointers passed where wrong and the
  28. * socket was looked up backwards. Nobody
  29. * tested any icmp error code obviously.
  30. * Alan Cox : tcp_err() now handled properly. It
  31. * wakes people on errors. poll
  32. * behaves and the icmp error race
  33. * has gone by moving it into sock.c
  34. * Alan Cox : tcp_send_reset() fixed to work for
  35. * everything not just packets for
  36. * unknown sockets.
  37. * Alan Cox : tcp option processing.
  38. * Alan Cox : Reset tweaked (still not 100%) [Had
  39. * syn rule wrong]
  40. * Herp Rosmanith : More reset fixes
  41. * Alan Cox : No longer acks invalid rst frames.
  42. * Acking any kind of RST is right out.
  43. * Alan Cox : Sets an ignore me flag on an rst
  44. * receive otherwise odd bits of prattle
  45. * escape still
  46. * Alan Cox : Fixed another acking RST frame bug.
  47. * Should stop LAN workplace lockups.
  48. * Alan Cox : Some tidyups using the new skb list
  49. * facilities
  50. * Alan Cox : sk->keepopen now seems to work
  51. * Alan Cox : Pulls options out correctly on accepts
  52. * Alan Cox : Fixed assorted sk->rqueue->next errors
  53. * Alan Cox : PSH doesn't end a TCP read. Switched a
  54. * bit to skb ops.
  55. * Alan Cox : Tidied tcp_data to avoid a potential
  56. * nasty.
  57. * Alan Cox : Added some better commenting, as the
  58. * tcp is hard to follow
  59. * Alan Cox : Removed incorrect check for 20 * psh
  60. * Michael O'Reilly : ack < copied bug fix.
  61. * Johannes Stille : Misc tcp fixes (not all in yet).
  62. * Alan Cox : FIN with no memory -> CRASH
  63. * Alan Cox : Added socket option proto entries.
  64. * Also added awareness of them to accept.
  65. * Alan Cox : Added TCP options (SOL_TCP)
  66. * Alan Cox : Switched wakeup calls to callbacks,
  67. * so the kernel can layer network
  68. * sockets.
  69. * Alan Cox : Use ip_tos/ip_ttl settings.
  70. * Alan Cox : Handle FIN (more) properly (we hope).
  71. * Alan Cox : RST frames sent on unsynchronised
  72. * state ack error.
  73. * Alan Cox : Put in missing check for SYN bit.
  74. * Alan Cox : Added tcp_select_window() aka NET2E
  75. * window non shrink trick.
  76. * Alan Cox : Added a couple of small NET2E timer
  77. * fixes
  78. * Charles Hedrick : TCP fixes
  79. * Toomas Tamm : TCP window fixes
  80. * Alan Cox : Small URG fix to rlogin ^C ack fight
  81. * Charles Hedrick : Rewrote most of it to actually work
  82. * Linus : Rewrote tcp_read() and URG handling
  83. * completely
  84. * Gerhard Koerting: Fixed some missing timer handling
  85. * Matthew Dillon : Reworked TCP machine states as per RFC
  86. * Gerhard Koerting: PC/TCP workarounds
  87. * Adam Caldwell : Assorted timer/timing errors
  88. * Matthew Dillon : Fixed another RST bug
  89. * Alan Cox : Move to kernel side addressing changes.
  90. * Alan Cox : Beginning work on TCP fastpathing
  91. * (not yet usable)
  92. * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
  93. * Alan Cox : TCP fast path debugging
  94. * Alan Cox : Window clamping
  95. * Michael Riepe : Bug in tcp_check()
  96. * Matt Dillon : More TCP improvements and RST bug fixes
  97. * Matt Dillon : Yet more small nasties remove from the
  98. * TCP code (Be very nice to this man if
  99. * tcp finally works 100%) 8)
  100. * Alan Cox : BSD accept semantics.
  101. * Alan Cox : Reset on closedown bug.
  102. * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
  103. * Michael Pall : Handle poll() after URG properly in
  104. * all cases.
  105. * Michael Pall : Undo the last fix in tcp_read_urg()
  106. * (multi URG PUSH broke rlogin).
  107. * Michael Pall : Fix the multi URG PUSH problem in
  108. * tcp_readable(), poll() after URG
  109. * works now.
  110. * Michael Pall : recv(...,MSG_OOB) never blocks in the
  111. * BSD api.
  112. * Alan Cox : Changed the semantics of sk->socket to
  113. * fix a race and a signal problem with
  114. * accept() and async I/O.
  115. * Alan Cox : Relaxed the rules on tcp_sendto().
  116. * Yury Shevchuk : Really fixed accept() blocking problem.
  117. * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
  118. * clients/servers which listen in on
  119. * fixed ports.
  120. * Alan Cox : Cleaned the above up and shrank it to
  121. * a sensible code size.
  122. * Alan Cox : Self connect lockup fix.
  123. * Alan Cox : No connect to multicast.
  124. * Ross Biro : Close unaccepted children on master
  125. * socket close.
  126. * Alan Cox : Reset tracing code.
  127. * Alan Cox : Spurious resets on shutdown.
  128. * Alan Cox : Giant 15 minute/60 second timer error
  129. * Alan Cox : Small whoops in polling before an
  130. * accept.
  131. * Alan Cox : Kept the state trace facility since
  132. * it's handy for debugging.
  133. * Alan Cox : More reset handler fixes.
  134. * Alan Cox : Started rewriting the code based on
  135. * the RFC's for other useful protocol
  136. * references see: Comer, KA9Q NOS, and
  137. * for a reference on the difference
  138. * between specifications and how BSD
  139. * works see the 4.4lite source.
  140. * A.N.Kuznetsov : Don't time wait on completion of tidy
  141. * close.
  142. * Linus Torvalds : Fin/Shutdown & copied_seq changes.
  143. * Linus Torvalds : Fixed BSD port reuse to work first syn
  144. * Alan Cox : Reimplemented timers as per the RFC
  145. * and using multiple timers for sanity.
  146. * Alan Cox : Small bug fixes, and a lot of new
  147. * comments.
  148. * Alan Cox : Fixed dual reader crash by locking
  149. * the buffers (much like datagram.c)
  150. * Alan Cox : Fixed stuck sockets in probe. A probe
  151. * now gets fed up of retrying without
  152. * (even a no space) answer.
  153. * Alan Cox : Extracted closing code better
  154. * Alan Cox : Fixed the closing state machine to
  155. * resemble the RFC.
  156. * Alan Cox : More 'per spec' fixes.
  157. * Jorge Cwik : Even faster checksumming.
  158. * Alan Cox : tcp_data() doesn't ack illegal PSH
  159. * only frames. At least one pc tcp stack
  160. * generates them.
  161. * Alan Cox : Cache last socket.
  162. * Alan Cox : Per route irtt.
  163. * Matt Day : poll()->select() match BSD precisely on error
  164. * Alan Cox : New buffers
  165. * Marc Tamsky : Various sk->prot->retransmits and
  166. * sk->retransmits misupdating fixed.
  167. * Fixed tcp_write_timeout: stuck close,
  168. * and TCP syn retries gets used now.
  169. * Mark Yarvis : In tcp_read_wakeup(), don't send an
  170. * ack if state is TCP_CLOSED.
  171. * Alan Cox : Look up device on a retransmit - routes may
  172. * change. Doesn't yet cope with MSS shrink right
  173. * but it's a start!
  174. * Marc Tamsky : Closing in closing fixes.
  175. * Mike Shaver : RFC1122 verifications.
  176. * Alan Cox : rcv_saddr errors.
  177. * Alan Cox : Block double connect().
  178. * Alan Cox : Small hooks for enSKIP.
  179. * Alexey Kuznetsov: Path MTU discovery.
  180. * Alan Cox : Support soft errors.
  181. * Alan Cox : Fix MTU discovery pathological case
  182. * when the remote claims no mtu!
  183. * Marc Tamsky : TCP_CLOSE fix.
  184. * Colin (G3TNE) : Send a reset on syn ack replies in
  185. * window but wrong (fixes NT lpd problems)
  186. * Pedro Roque : Better TCP window handling, delayed ack.
  187. * Joerg Reuter : No modification of locked buffers in
  188. * tcp_do_retransmit()
  189. * Eric Schenk : Changed receiver side silly window
  190. * avoidance algorithm to BSD style
  191. * algorithm. This doubles throughput
  192. * against machines running Solaris,
  193. * and seems to result in general
  194. * improvement.
  195. * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
  196. * Willy Konynenberg : Transparent proxying support.
  197. * Mike McLagan : Routing by source
  198. * Keith Owens : Do proper merging with partial SKB's in
  199. * tcp_do_sendmsg to avoid burstiness.
  200. * Eric Schenk : Fix fast close down bug with
  201. * shutdown() followed by close().
  202. * Andi Kleen : Make poll agree with SIGIO
  203. * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
  204. * lingertime == 0 (RFC 793 ABORT Call)
  205. * Hirokazu Takahashi : Use copy_from_user() instead of
  206. * csum_and_copy_from_user() if possible.
  207. *
  208. * This program is free software; you can redistribute it and/or
  209. * modify it under the terms of the GNU General Public License
  210. * as published by the Free Software Foundation; either version
  211. * 2 of the License, or(at your option) any later version.
  212. *
  213. * Description of States:
  214. *
  215. * TCP_SYN_SENT sent a connection request, waiting for ack
  216. *
  217. * TCP_SYN_RECV received a connection request, sent ack,
  218. * waiting for final ack in three-way handshake.
  219. *
  220. * TCP_ESTABLISHED connection established
  221. *
  222. * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
  223. * transmission of remaining buffered data
  224. *
  225. * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
  226. * to shutdown
  227. *
  228. * TCP_CLOSING both sides have shutdown but we still have
  229. * data we have to finish sending
  230. *
  231. * TCP_TIME_WAIT timeout to catch resent junk before entering
  232. * closed, can only be entered from FIN_WAIT2
  233. * or CLOSING. Required because the other end
  234. * may not have gotten our last ACK causing it
  235. * to retransmit the data packet (which we ignore)
  236. *
  237. * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
  238. * us to finish writing our data and to shutdown
  239. * (we have to close() to move on to LAST_ACK)
  240. *
  241. * TCP_LAST_ACK out side has shutdown after remote has
  242. * shutdown. There may still be data in our
  243. * buffer that we have to finish sending
  244. *
  245. * TCP_CLOSE socket is finished
  246. */
  247. #include <linux/kernel.h>
  248. #include <linux/module.h>
  249. #include <linux/types.h>
  250. #include <linux/fcntl.h>
  251. #include <linux/poll.h>
  252. #include <linux/init.h>
  253. #include <linux/fs.h>
  254. #include <linux/skbuff.h>
  255. #include <linux/scatterlist.h>
  256. #include <linux/splice.h>
  257. #include <linux/net.h>
  258. #include <linux/socket.h>
  259. #include <linux/random.h>
  260. #include <linux/bootmem.h>
  261. #include <linux/highmem.h>
  262. #include <linux/swap.h>
  263. #include <linux/cache.h>
  264. #include <linux/err.h>
  265. #include <linux/crypto.h>
  266. #include <linux/time.h>
  267. #include <linux/slab.h>
  268. #include <linux/uid_stat.h>
  269. #include <net/icmp.h>
  270. #include <net/tcp.h>
  271. #include <net/xfrm.h>
  272. #include <net/ip.h>
  273. #include <net/ip6_route.h>
  274. #include <net/ipv6.h>
  275. #include <net/transp_v6.h>
  276. #include <net/netdma.h>
  277. #include <net/sock.h>
  278. #include <asm/uaccess.h>
  279. #include <asm/ioctls.h>
  280. int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
  281. struct percpu_counter tcp_orphan_count;
  282. EXPORT_SYMBOL_GPL(tcp_orphan_count);
  283. long sysctl_tcp_mem[3] __read_mostly;
  284. int sysctl_tcp_wmem[3] __read_mostly;
  285. int sysctl_tcp_rmem[3] __read_mostly;
  286. EXPORT_SYMBOL(sysctl_tcp_mem);
  287. EXPORT_SYMBOL(sysctl_tcp_rmem);
  288. EXPORT_SYMBOL(sysctl_tcp_wmem);
  289. atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
  290. EXPORT_SYMBOL(tcp_memory_allocated);
  291. /*
  292. * Current number of TCP sockets.
  293. */
  294. struct percpu_counter tcp_sockets_allocated;
  295. EXPORT_SYMBOL(tcp_sockets_allocated);
  296. /*
  297. * TCP splice context
  298. */
  299. struct tcp_splice_state {
  300. struct pipe_inode_info *pipe;
  301. size_t len;
  302. unsigned int flags;
  303. };
  304. /*
  305. * Pressure flag: try to collapse.
  306. * Technical note: it is used by multiple contexts non atomically.
  307. * All the __sk_mem_schedule() is of this nature: accounting
  308. * is strict, actions are advisory and have some latency.
  309. */
  310. int tcp_memory_pressure __read_mostly;
  311. EXPORT_SYMBOL(tcp_memory_pressure);
  312. void tcp_enter_memory_pressure(struct sock *sk)
  313. {
  314. if (!tcp_memory_pressure) {
  315. NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
  316. tcp_memory_pressure = 1;
  317. }
  318. }
  319. EXPORT_SYMBOL(tcp_enter_memory_pressure);
  320. /* Convert seconds to retransmits based on initial and max timeout */
  321. static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
  322. {
  323. u8 res = 0;
  324. if (seconds > 0) {
  325. int period = timeout;
  326. res = 1;
  327. while (seconds > period && res < 255) {
  328. res++;
  329. timeout <<= 1;
  330. if (timeout > rto_max)
  331. timeout = rto_max;
  332. period += timeout;
  333. }
  334. }
  335. return res;
  336. }
  337. /* Convert retransmits to seconds based on initial and max timeout */
  338. static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
  339. {
  340. int period = 0;
  341. if (retrans > 0) {
  342. period = timeout;
  343. while (--retrans) {
  344. timeout <<= 1;
  345. if (timeout > rto_max)
  346. timeout = rto_max;
  347. period += timeout;
  348. }
  349. }
  350. return period;
  351. }
  352. /*
  353. * Wait for a TCP event.
  354. *
  355. * Note that we don't need to lock the socket, as the upper poll layers
  356. * take care of normal races (between the test and the event) and we don't
  357. * go look at any of the socket buffers directly.
  358. */
  359. unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
  360. {
  361. unsigned int mask;
  362. struct sock *sk = sock->sk;
  363. struct tcp_sock *tp = tcp_sk(sk);
  364. sock_poll_wait(file, sk_sleep(sk), wait);
  365. if (sk->sk_state == TCP_LISTEN)
  366. return inet_csk_listen_poll(sk);
  367. /* Socket is not locked. We are protected from async events
  368. * by poll logic and correct handling of state changes
  369. * made by other threads is impossible in any case.
  370. */
  371. mask = 0;
  372. /*
  373. * POLLHUP is certainly not done right. But poll() doesn't
  374. * have a notion of HUP in just one direction, and for a
  375. * socket the read side is more interesting.
  376. *
  377. * Some poll() documentation says that POLLHUP is incompatible
  378. * with the POLLOUT/POLLWR flags, so somebody should check this
  379. * all. But careful, it tends to be safer to return too many
  380. * bits than too few, and you can easily break real applications
  381. * if you don't tell them that something has hung up!
  382. *
  383. * Check-me.
  384. *
  385. * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
  386. * our fs/select.c). It means that after we received EOF,
  387. * poll always returns immediately, making impossible poll() on write()
  388. * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
  389. * if and only if shutdown has been made in both directions.
  390. * Actually, it is interesting to look how Solaris and DUX
  391. * solve this dilemma. I would prefer, if POLLHUP were maskable,
  392. * then we could set it on SND_SHUTDOWN. BTW examples given
  393. * in Stevens' books assume exactly this behaviour, it explains
  394. * why POLLHUP is incompatible with POLLOUT. --ANK
  395. *
  396. * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
  397. * blocking on fresh not-connected or disconnected socket. --ANK
  398. */
  399. if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
  400. mask |= POLLHUP;
  401. if (sk->sk_shutdown & RCV_SHUTDOWN)
  402. mask |= POLLIN | POLLRDNORM | POLLRDHUP;
  403. /* Connected? */
  404. if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
  405. int target = sock_rcvlowat(sk, 0, INT_MAX);
  406. if (tp->urg_seq == tp->copied_seq &&
  407. !sock_flag(sk, SOCK_URGINLINE) &&
  408. tp->urg_data)
  409. target++;
  410. /* Potential race condition. If read of tp below will
  411. * escape above sk->sk_state, we can be illegally awaken
  412. * in SYN_* states. */
  413. if (tp->rcv_nxt - tp->copied_seq >= target)
  414. mask |= POLLIN | POLLRDNORM;
  415. if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
  416. if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
  417. mask |= POLLOUT | POLLWRNORM;
  418. } else { /* send SIGIO later */
  419. set_bit(SOCK_ASYNC_NOSPACE,
  420. &sk->sk_socket->flags);
  421. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  422. /* Race breaker. If space is freed after
  423. * wspace test but before the flags are set,
  424. * IO signal will be lost.
  425. */
  426. if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
  427. mask |= POLLOUT | POLLWRNORM;
  428. }
  429. } else
  430. mask |= POLLOUT | POLLWRNORM;
  431. if (tp->urg_data & TCP_URG_VALID)
  432. mask |= POLLPRI;
  433. }
  434. /* This barrier is coupled with smp_wmb() in tcp_reset() */
  435. smp_rmb();
  436. if (sk->sk_err)
  437. mask |= POLLERR;
  438. return mask;
  439. }
  440. EXPORT_SYMBOL(tcp_poll);
  441. int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  442. {
  443. struct tcp_sock *tp = tcp_sk(sk);
  444. int answ;
  445. switch (cmd) {
  446. case SIOCINQ:
  447. if (sk->sk_state == TCP_LISTEN)
  448. return -EINVAL;
  449. lock_sock(sk);
  450. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
  451. answ = 0;
  452. else if (sock_flag(sk, SOCK_URGINLINE) ||
  453. !tp->urg_data ||
  454. before(tp->urg_seq, tp->copied_seq) ||
  455. !before(tp->urg_seq, tp->rcv_nxt)) {
  456. struct sk_buff *skb;
  457. answ = tp->rcv_nxt - tp->copied_seq;
  458. /* Subtract 1, if FIN is in queue. */
  459. skb = skb_peek_tail(&sk->sk_receive_queue);
  460. if (answ && skb)
  461. answ -= tcp_hdr(skb)->fin;
  462. } else
  463. answ = tp->urg_seq - tp->copied_seq;
  464. release_sock(sk);
  465. break;
  466. case SIOCATMARK:
  467. answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
  468. break;
  469. case SIOCOUTQ:
  470. if (sk->sk_state == TCP_LISTEN)
  471. return -EINVAL;
  472. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
  473. answ = 0;
  474. else
  475. answ = tp->write_seq - tp->snd_una;
  476. break;
  477. case SIOCOUTQNSD:
  478. if (sk->sk_state == TCP_LISTEN)
  479. return -EINVAL;
  480. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
  481. answ = 0;
  482. else
  483. answ = tp->write_seq - tp->snd_nxt;
  484. break;
  485. default:
  486. return -ENOIOCTLCMD;
  487. }
  488. return put_user(answ, (int __user *)arg);
  489. }
  490. EXPORT_SYMBOL(tcp_ioctl);
  491. static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
  492. {
  493. TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
  494. tp->pushed_seq = tp->write_seq;
  495. }
  496. static inline int forced_push(struct tcp_sock *tp)
  497. {
  498. return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
  499. }
  500. static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
  501. {
  502. struct tcp_sock *tp = tcp_sk(sk);
  503. struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
  504. skb->csum = 0;
  505. tcb->seq = tcb->end_seq = tp->write_seq;
  506. tcb->flags = TCPHDR_ACK;
  507. tcb->sacked = 0;
  508. skb_header_release(skb);
  509. tcp_add_write_queue_tail(sk, skb);
  510. sk->sk_wmem_queued += skb->truesize;
  511. sk_mem_charge(sk, skb->truesize);
  512. if (tp->nonagle & TCP_NAGLE_PUSH)
  513. tp->nonagle &= ~TCP_NAGLE_PUSH;
  514. }
  515. static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
  516. {
  517. if (flags & MSG_OOB)
  518. tp->snd_up = tp->write_seq;
  519. }
  520. static inline void tcp_push(struct sock *sk, int flags, int mss_now,
  521. int nonagle)
  522. {
  523. if (tcp_send_head(sk)) {
  524. struct tcp_sock *tp = tcp_sk(sk);
  525. if (!(flags & MSG_MORE) || forced_push(tp))
  526. tcp_mark_push(tp, tcp_write_queue_tail(sk));
  527. tcp_mark_urg(tp, flags);
  528. __tcp_push_pending_frames(sk, mss_now,
  529. (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
  530. }
  531. }
  532. static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
  533. unsigned int offset, size_t len)
  534. {
  535. struct tcp_splice_state *tss = rd_desc->arg.data;
  536. int ret;
  537. ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
  538. tss->flags);
  539. if (ret > 0)
  540. rd_desc->count -= ret;
  541. return ret;
  542. }
  543. static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
  544. {
  545. /* Store TCP splice context information in read_descriptor_t. */
  546. read_descriptor_t rd_desc = {
  547. .arg.data = tss,
  548. .count = tss->len,
  549. };
  550. return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
  551. }
  552. /**
  553. * tcp_splice_read - splice data from TCP socket to a pipe
  554. * @sock: socket to splice from
  555. * @ppos: position (not valid)
  556. * @pipe: pipe to splice to
  557. * @len: number of bytes to splice
  558. * @flags: splice modifier flags
  559. *
  560. * Description:
  561. * Will read pages from given socket and fill them into a pipe.
  562. *
  563. **/
  564. ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
  565. struct pipe_inode_info *pipe, size_t len,
  566. unsigned int flags)
  567. {
  568. struct sock *sk = sock->sk;
  569. struct tcp_splice_state tss = {
  570. .pipe = pipe,
  571. .len = len,
  572. .flags = flags,
  573. };
  574. long timeo;
  575. ssize_t spliced;
  576. int ret;
  577. sock_rps_record_flow(sk);
  578. /*
  579. * We can't seek on a socket input
  580. */
  581. if (unlikely(*ppos))
  582. return -ESPIPE;
  583. ret = spliced = 0;
  584. lock_sock(sk);
  585. timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
  586. while (tss.len) {
  587. ret = __tcp_splice_read(sk, &tss);
  588. if (ret < 0)
  589. break;
  590. else if (!ret) {
  591. if (spliced)
  592. break;
  593. if (sock_flag(sk, SOCK_DONE))
  594. break;
  595. if (sk->sk_err) {
  596. ret = sock_error(sk);
  597. break;
  598. }
  599. if (sk->sk_shutdown & RCV_SHUTDOWN)
  600. break;
  601. if (sk->sk_state == TCP_CLOSE) {
  602. /*
  603. * This occurs when user tries to read
  604. * from never connected socket.
  605. */
  606. if (!sock_flag(sk, SOCK_DONE))
  607. ret = -ENOTCONN;
  608. break;
  609. }
  610. if (!timeo) {
  611. ret = -EAGAIN;
  612. break;
  613. }
  614. sk_wait_data(sk, &timeo);
  615. if (signal_pending(current)) {
  616. ret = sock_intr_errno(timeo);
  617. break;
  618. }
  619. continue;
  620. }
  621. tss.len -= ret;
  622. spliced += ret;
  623. if (!timeo)
  624. break;
  625. release_sock(sk);
  626. lock_sock(sk);
  627. if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
  628. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  629. signal_pending(current))
  630. break;
  631. }
  632. release_sock(sk);
  633. if (spliced)
  634. return spliced;
  635. return ret;
  636. }
  637. EXPORT_SYMBOL(tcp_splice_read);
  638. struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
  639. {
  640. struct sk_buff *skb;
  641. /* The TCP header must be at least 32-bit aligned. */
  642. size = ALIGN(size, 4);
  643. skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
  644. if (skb) {
  645. if (sk_wmem_schedule(sk, skb->truesize)) {
  646. /*
  647. * Make sure that we have exactly size bytes
  648. * available to the caller, no more, no less.
  649. */
  650. skb_reserve(skb, skb_tailroom(skb) - size);
  651. return skb;
  652. }
  653. __kfree_skb(skb);
  654. } else {
  655. sk->sk_prot->enter_memory_pressure(sk);
  656. sk_stream_moderate_sndbuf(sk);
  657. }
  658. return NULL;
  659. }
  660. static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
  661. int large_allowed)
  662. {
  663. struct tcp_sock *tp = tcp_sk(sk);
  664. u32 xmit_size_goal, old_size_goal;
  665. xmit_size_goal = mss_now;
  666. if (large_allowed && sk_can_gso(sk)) {
  667. xmit_size_goal = ((sk->sk_gso_max_size - 1) -
  668. inet_csk(sk)->icsk_af_ops->net_header_len -
  669. inet_csk(sk)->icsk_ext_hdr_len -
  670. tp->tcp_header_len);
  671. xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
  672. /* We try hard to avoid divides here */
  673. old_size_goal = tp->xmit_size_goal_segs * mss_now;
  674. if (likely(old_size_goal <= xmit_size_goal &&
  675. old_size_goal + mss_now > xmit_size_goal)) {
  676. xmit_size_goal = old_size_goal;
  677. } else {
  678. tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
  679. xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
  680. }
  681. }
  682. return max(xmit_size_goal, mss_now);
  683. }
  684. static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
  685. {
  686. int mss_now;
  687. mss_now = tcp_current_mss(sk);
  688. *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
  689. return mss_now;
  690. }
  691. static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
  692. size_t psize, int flags)
  693. {
  694. struct tcp_sock *tp = tcp_sk(sk);
  695. int mss_now, size_goal;
  696. int err;
  697. ssize_t copied;
  698. long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  699. /* Wait for a connection to finish. */
  700. if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
  701. if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
  702. goto out_err;
  703. clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  704. mss_now = tcp_send_mss(sk, &size_goal, flags);
  705. copied = 0;
  706. err = -EPIPE;
  707. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  708. goto out_err;
  709. while (psize > 0) {
  710. struct sk_buff *skb = tcp_write_queue_tail(sk);
  711. struct page *page = pages[poffset / PAGE_SIZE];
  712. int copy, i, can_coalesce;
  713. int offset = poffset % PAGE_SIZE;
  714. int size = min_t(size_t, psize, PAGE_SIZE - offset);
  715. if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
  716. new_segment:
  717. if (!sk_stream_memory_free(sk))
  718. goto wait_for_sndbuf;
  719. skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
  720. if (!skb)
  721. goto wait_for_memory;
  722. skb_entail(sk, skb);
  723. copy = size_goal;
  724. }
  725. if (copy > size)
  726. copy = size;
  727. i = skb_shinfo(skb)->nr_frags;
  728. can_coalesce = skb_can_coalesce(skb, i, page, offset);
  729. if (!can_coalesce && i >= MAX_SKB_FRAGS) {
  730. tcp_mark_push(tp, skb);
  731. goto new_segment;
  732. }
  733. if (!sk_wmem_schedule(sk, copy))
  734. goto wait_for_memory;
  735. if (can_coalesce) {
  736. skb_shinfo(skb)->frags[i - 1].size += copy;
  737. } else {
  738. get_page(page);
  739. skb_fill_page_desc(skb, i, page, offset, copy);
  740. }
  741. skb->len += copy;
  742. skb->data_len += copy;
  743. skb->truesize += copy;
  744. sk->sk_wmem_queued += copy;
  745. sk_mem_charge(sk, copy);
  746. skb->ip_summed = CHECKSUM_PARTIAL;
  747. tp->write_seq += copy;
  748. TCP_SKB_CB(skb)->end_seq += copy;
  749. skb_shinfo(skb)->gso_segs = 0;
  750. if (!copied)
  751. TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
  752. copied += copy;
  753. poffset += copy;
  754. if (!(psize -= copy))
  755. goto out;
  756. if (skb->len < size_goal || (flags & MSG_OOB))
  757. continue;
  758. if (forced_push(tp)) {
  759. tcp_mark_push(tp, skb);
  760. __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
  761. } else if (skb == tcp_send_head(sk))
  762. tcp_push_one(sk, mss_now);
  763. continue;
  764. wait_for_sndbuf:
  765. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  766. wait_for_memory:
  767. if (copied)
  768. tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
  769. if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
  770. goto do_error;
  771. mss_now = tcp_send_mss(sk, &size_goal, flags);
  772. }
  773. out:
  774. if (copied)
  775. tcp_push(sk, flags, mss_now, tp->nonagle);
  776. return copied;
  777. do_error:
  778. if (copied)
  779. goto out;
  780. out_err:
  781. return sk_stream_error(sk, flags, err);
  782. }
  783. int tcp_sendpage(struct sock *sk, struct page *page, int offset,
  784. size_t size, int flags)
  785. {
  786. ssize_t res;
  787. if (!(sk->sk_route_caps & NETIF_F_SG) ||
  788. !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
  789. return sock_no_sendpage(sk->sk_socket, page, offset, size,
  790. flags);
  791. lock_sock(sk);
  792. res = do_tcp_sendpages(sk, &page, offset, size, flags);
  793. release_sock(sk);
  794. return res;
  795. }
  796. EXPORT_SYMBOL(tcp_sendpage);
  797. #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
  798. #define TCP_OFF(sk) (sk->sk_sndmsg_off)
  799. static inline int select_size(struct sock *sk, int sg)
  800. {
  801. struct tcp_sock *tp = tcp_sk(sk);
  802. int tmp = tp->mss_cache;
  803. if (sg) {
  804. if (sk_can_gso(sk))
  805. tmp = 0;
  806. else {
  807. int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
  808. if (tmp >= pgbreak &&
  809. tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
  810. tmp = pgbreak;
  811. }
  812. }
  813. return tmp;
  814. }
  815. int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  816. size_t size)
  817. {
  818. struct iovec *iov;
  819. struct tcp_sock *tp = tcp_sk(sk);
  820. struct sk_buff *skb;
  821. int iovlen, flags;
  822. int mss_now, size_goal;
  823. int sg, err, copied;
  824. long timeo;
  825. lock_sock(sk);
  826. flags = msg->msg_flags;
  827. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  828. /* Wait for a connection to finish. */
  829. if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
  830. if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
  831. goto out_err;
  832. /* This should be in poll */
  833. clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  834. mss_now = tcp_send_mss(sk, &size_goal, flags);
  835. /* Ok commence sending. */
  836. iovlen = msg->msg_iovlen;
  837. iov = msg->msg_iov;
  838. copied = 0;
  839. err = -EPIPE;
  840. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  841. goto out_err;
  842. sg = sk->sk_route_caps & NETIF_F_SG;
  843. while (--iovlen >= 0) {
  844. size_t seglen = iov->iov_len;
  845. unsigned char __user *from = iov->iov_base;
  846. iov++;
  847. while (seglen > 0) {
  848. int copy = 0;
  849. int max = size_goal;
  850. skb = tcp_write_queue_tail(sk);
  851. if (tcp_send_head(sk)) {
  852. if (skb->ip_summed == CHECKSUM_NONE)
  853. max = mss_now;
  854. copy = max - skb->len;
  855. }
  856. if (copy <= 0) {
  857. new_segment:
  858. /* Allocate new segment. If the interface is SG,
  859. * allocate skb fitting to single page.
  860. */
  861. if (!sk_stream_memory_free(sk))
  862. goto wait_for_sndbuf;
  863. skb = sk_stream_alloc_skb(sk,
  864. select_size(sk, sg),
  865. sk->sk_allocation);
  866. if (!skb)
  867. goto wait_for_memory;
  868. /*
  869. * Check whether we can use HW checksum.
  870. */
  871. if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
  872. skb->ip_summed = CHECKSUM_PARTIAL;
  873. skb_entail(sk, skb);
  874. copy = size_goal;
  875. max = size_goal;
  876. }
  877. /* Try to append data to the end of skb. */
  878. if (copy > seglen)
  879. copy = seglen;
  880. /* Where to copy to? */
  881. if (skb_tailroom(skb) > 0) {
  882. /* We have some space in skb head. Superb! */
  883. if (copy > skb_tailroom(skb))
  884. copy = skb_tailroom(skb);
  885. err = skb_add_data_nocache(sk, skb, from, copy);
  886. if (err)
  887. goto do_fault;
  888. } else {
  889. int merge = 0;
  890. int i = skb_shinfo(skb)->nr_frags;
  891. struct page *page = TCP_PAGE(sk);
  892. int off = TCP_OFF(sk);
  893. if (skb_can_coalesce(skb, i, page, off) &&
  894. off != PAGE_SIZE) {
  895. /* We can extend the last page
  896. * fragment. */
  897. merge = 1;
  898. } else if (i == MAX_SKB_FRAGS || !sg) {
  899. /* Need to add new fragment and cannot
  900. * do this because interface is non-SG,
  901. * or because all the page slots are
  902. * busy. */
  903. tcp_mark_push(tp, skb);
  904. goto new_segment;
  905. } else if (page) {
  906. if (off == PAGE_SIZE) {
  907. put_page(page);
  908. TCP_PAGE(sk) = page = NULL;
  909. off = 0;
  910. }
  911. } else
  912. off = 0;
  913. if (copy > PAGE_SIZE - off)
  914. copy = PAGE_SIZE - off;
  915. if (!sk_wmem_schedule(sk, copy))
  916. goto wait_for_memory;
  917. if (!page) {
  918. /* Allocate new cache page. */
  919. if (!(page = sk_stream_alloc_page(sk)))
  920. goto wait_for_memory;
  921. }
  922. /* Time to copy data. We are close to
  923. * the end! */
  924. err = skb_copy_to_page_nocache(sk, from, skb,
  925. page, off, copy);
  926. if (err) {
  927. /* If this page was new, give it to the
  928. * socket so it does not get leaked.
  929. */
  930. if (!TCP_PAGE(sk)) {
  931. TCP_PAGE(sk) = page;
  932. TCP_OFF(sk) = 0;
  933. }
  934. goto do_error;
  935. }
  936. /* Update the skb. */
  937. if (merge) {
  938. skb_shinfo(skb)->frags[i - 1].size +=
  939. copy;
  940. } else {
  941. skb_fill_page_desc(skb, i, page, off, copy);
  942. if (TCP_PAGE(sk)) {
  943. get_page(page);
  944. } else if (off + copy < PAGE_SIZE) {
  945. get_page(page);
  946. TCP_PAGE(sk) = page;
  947. }
  948. }
  949. TCP_OFF(sk) = off + copy;
  950. }
  951. if (!copied)
  952. TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
  953. tp->write_seq += copy;
  954. TCP_SKB_CB(skb)->end_seq += copy;
  955. skb_shinfo(skb)->gso_segs = 0;
  956. from += copy;
  957. copied += copy;
  958. if ((seglen -= copy) == 0 && iovlen == 0)
  959. goto out;
  960. if (skb->len < max || (flags & MSG_OOB))
  961. continue;
  962. if (forced_push(tp)) {
  963. tcp_mark_push(tp, skb);
  964. __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
  965. } else if (skb == tcp_send_head(sk))
  966. tcp_push_one(sk, mss_now);
  967. continue;
  968. wait_for_sndbuf:
  969. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  970. wait_for_memory:
  971. if (copied)
  972. tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
  973. if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
  974. goto do_error;
  975. mss_now = tcp_send_mss(sk, &size_goal, flags);
  976. }
  977. }
  978. out:
  979. if (copied)
  980. tcp_push(sk, flags, mss_now, tp->nonagle);
  981. release_sock(sk);
  982. if (copied > 0)
  983. uid_stat_tcp_snd(current_uid(), copied);
  984. return copied;
  985. do_fault:
  986. if (!skb->len) {
  987. tcp_unlink_write_queue(skb, sk);
  988. /* It is the one place in all of TCP, except connection
  989. * reset, where we can be unlinking the send_head.
  990. */
  991. tcp_check_send_head(sk, skb);
  992. sk_wmem_free_skb(sk, skb);
  993. }
  994. do_error:
  995. if (copied)
  996. goto out;
  997. out_err:
  998. err = sk_stream_error(sk, flags, err);
  999. release_sock(sk);
  1000. return err;
  1001. }
  1002. EXPORT_SYMBOL(tcp_sendmsg);
  1003. /*
  1004. * Handle reading urgent data. BSD has very simple semantics for
  1005. * this, no blocking and very strange errors 8)
  1006. */
  1007. static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
  1008. {
  1009. struct tcp_sock *tp = tcp_sk(sk);
  1010. /* No URG data to read. */
  1011. if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
  1012. tp->urg_data == TCP_URG_READ)
  1013. return -EINVAL; /* Yes this is right ! */
  1014. if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
  1015. return -ENOTCONN;
  1016. if (tp->urg_data & TCP_URG_VALID) {
  1017. int err = 0;
  1018. char c = tp->urg_data;
  1019. if (!(flags & MSG_PEEK))
  1020. tp->urg_data = TCP_URG_READ;
  1021. /* Read urgent data. */
  1022. msg->msg_flags |= MSG_OOB;
  1023. if (len > 0) {
  1024. if (!(flags & MSG_TRUNC))
  1025. err = memcpy_toiovec(msg->msg_iov, &c, 1);
  1026. len = 1;
  1027. } else
  1028. msg->msg_flags |= MSG_TRUNC;
  1029. return err ? -EFAULT : len;
  1030. }
  1031. if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
  1032. return 0;
  1033. /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
  1034. * the available implementations agree in this case:
  1035. * this call should never block, independent of the
  1036. * blocking state of the socket.
  1037. * Mike <pall@rz.uni-karlsruhe.de>
  1038. */
  1039. return -EAGAIN;
  1040. }
  1041. /* Clean up the receive buffer for full frames taken by the user,
  1042. * then send an ACK if necessary. COPIED is the number of bytes
  1043. * tcp_recvmsg has given to the user so far, it speeds up the
  1044. * calculation of whether or not we must ACK for the sake of
  1045. * a window update.
  1046. */
  1047. void tcp_cleanup_rbuf(struct sock *sk, int copied)
  1048. {
  1049. struct tcp_sock *tp = tcp_sk(sk);
  1050. int time_to_ack = 0;
  1051. #if TCP_DEBUG
  1052. struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
  1053. WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
  1054. "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
  1055. tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
  1056. #endif
  1057. if (inet_csk_ack_scheduled(sk)) {
  1058. const struct inet_connection_sock *icsk = inet_csk(sk);
  1059. /* Delayed ACKs frequently hit locked sockets during bulk
  1060. * receive. */
  1061. if (icsk->icsk_ack.blocked ||
  1062. /* Once-per-two-segments ACK was not sent by tcp_input.c */
  1063. tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
  1064. /*
  1065. * If this read emptied read buffer, we send ACK, if
  1066. * connection is not bidirectional, user drained
  1067. * receive buffer and there was a small segment
  1068. * in queue.
  1069. */
  1070. (copied > 0 &&
  1071. ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
  1072. ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
  1073. !icsk->icsk_ack.pingpong)) &&
  1074. !atomic_read(&sk->sk_rmem_alloc)))
  1075. time_to_ack = 1;
  1076. }
  1077. /* We send an ACK if we can now advertise a non-zero window
  1078. * which has been raised "significantly".
  1079. *
  1080. * Even if window raised up to infinity, do not send window open ACK
  1081. * in states, where we will not receive more. It is useless.
  1082. */
  1083. if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
  1084. __u32 rcv_window_now = tcp_receive_window(tp);
  1085. /* Optimize, __tcp_select_window() is not cheap. */
  1086. if (2*rcv_window_now <= tp->window_clamp) {
  1087. __u32 new_window = __tcp_select_window(sk);
  1088. /* Send ACK now, if this read freed lots of space
  1089. * in our buffer. Certainly, new_window is new window.
  1090. * We can advertise it now, if it is not less than current one.
  1091. * "Lots" means "at least twice" here.
  1092. */
  1093. if (new_window && new_window >= 2 * rcv_window_now)
  1094. time_to_ack = 1;
  1095. }
  1096. }
  1097. if (time_to_ack)
  1098. tcp_send_ack(sk);
  1099. }
  1100. static void tcp_prequeue_process(struct sock *sk)
  1101. {
  1102. struct sk_buff *skb;
  1103. struct tcp_sock *tp = tcp_sk(sk);
  1104. NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
  1105. /* RX process wants to run with disabled BHs, though it is not
  1106. * necessary */
  1107. local_bh_disable();
  1108. while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
  1109. sk_backlog_rcv(sk, skb);
  1110. local_bh_enable();
  1111. /* Clear memory counter. */
  1112. tp->ucopy.memory = 0;
  1113. }
  1114. #ifdef CONFIG_NET_DMA
  1115. static void tcp_service_net_dma(struct sock *sk, bool wait)
  1116. {
  1117. dma_cookie_t done, used;
  1118. dma_cookie_t last_issued;
  1119. struct tcp_sock *tp = tcp_sk(sk);
  1120. if (!tp->ucopy.dma_chan)
  1121. return;
  1122. last_issued = tp->ucopy.dma_cookie;
  1123. dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
  1124. do {
  1125. if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
  1126. last_issued, &done,
  1127. &used) == DMA_SUCCESS) {
  1128. /* Safe to free early-copied skbs now */
  1129. __skb_queue_purge(&sk->sk_async_wait_queue);
  1130. break;
  1131. } else {
  1132. struct sk_buff *skb;
  1133. while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
  1134. (dma_async_is_complete(skb->dma_cookie, done,
  1135. used) == DMA_SUCCESS)) {
  1136. __skb_dequeue(&sk->sk_async_wait_queue);
  1137. kfree_skb(skb);
  1138. }
  1139. }
  1140. } while (wait);
  1141. }
  1142. #endif
  1143. static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
  1144. {
  1145. struct sk_buff *skb;
  1146. u32 offset;
  1147. skb_queue_walk(&sk->sk_receive_queue, skb) {
  1148. offset = seq - TCP_SKB_CB(skb)->seq;
  1149. if (tcp_hdr(skb)->syn)
  1150. offset--;
  1151. if (offset < skb->len || tcp_hdr(skb)->fin) {
  1152. *off = offset;
  1153. return skb;
  1154. }
  1155. }
  1156. return NULL;
  1157. }
  1158. /*
  1159. * This routine provides an alternative to tcp_recvmsg() for routines
  1160. * that would like to handle copying from skbuffs directly in 'sendfile'
  1161. * fashion.
  1162. * Note:
  1163. * - It is assumed that the socket was locked by the caller.
  1164. * - The routine does not block.
  1165. * - At present, there is no support for reading OOB data
  1166. * or for 'peeking' the socket using this routine
  1167. * (although both would be easy to implement).
  1168. */
  1169. int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
  1170. sk_read_actor_t recv_actor)
  1171. {
  1172. struct sk_buff *skb;
  1173. struct tcp_sock *tp = tcp_sk(sk);
  1174. u32 seq = tp->copied_seq;
  1175. u32 offset;
  1176. int copied = 0;
  1177. if (sk->sk_state == TCP_LISTEN)
  1178. return -ENOTCONN;
  1179. while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
  1180. if (offset < skb->len) {
  1181. int used;
  1182. size_t len;
  1183. len = skb->len - offset;
  1184. /* Stop reading if we hit a patch of urgent data */
  1185. if (tp->urg_data) {
  1186. u32 urg_offset = tp->urg_seq - seq;
  1187. if (urg_offset < len)
  1188. len = urg_offset;
  1189. if (!len)
  1190. break;
  1191. }
  1192. used = recv_actor(desc, skb, offset, len);
  1193. if (used < 0) {
  1194. if (!copied)
  1195. copied = used;
  1196. break;
  1197. } else if (used <= len) {
  1198. seq += used;
  1199. copied += used;
  1200. offset += used;
  1201. }
  1202. /*
  1203. * If recv_actor drops the lock (e.g. TCP splice
  1204. * receive) the skb pointer might be invalid when
  1205. * getting here: tcp_collapse might have deleted it
  1206. * while aggregating skbs from the socket queue.
  1207. */
  1208. skb = tcp_recv_skb(sk, seq-1, &offset);
  1209. if (!skb || (offset+1 != skb->len))
  1210. break;
  1211. }
  1212. if (tcp_hdr(skb)->fin) {
  1213. sk_eat_skb(sk, skb, 0);
  1214. ++seq;
  1215. break;
  1216. }
  1217. sk_eat_skb(sk, skb, 0);
  1218. if (!desc->count)
  1219. break;
  1220. tp->copied_seq = seq;
  1221. }
  1222. tp->copied_seq = seq;
  1223. tcp_rcv_space_adjust(sk);
  1224. /* Clean up data we have read: This will do ACK frames. */
  1225. if (copied > 0) {
  1226. tcp_cleanup_rbuf(sk, copied);
  1227. uid_stat_tcp_rcv(current_uid(), copied);
  1228. }
  1229. return copied;
  1230. }
  1231. EXPORT_SYMBOL(tcp_read_sock);
  1232. /*
  1233. * This routine copies from a sock struct into the user buffer.
  1234. *
  1235. * Technical note: in 2.3 we work on _locked_ socket, so that
  1236. * tricks with *seq access order and skb->users are not required.
  1237. * Probably, code can be easily improved even more.
  1238. */
  1239. int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  1240. size_t len, int nonblock, int flags, int *addr_len)
  1241. {
  1242. struct tcp_sock *tp = tcp_sk(sk);
  1243. int copied = 0;
  1244. u32 peek_seq;
  1245. u32 *seq;
  1246. unsigned long used;
  1247. int err;
  1248. int target; /* Read at least this many bytes */
  1249. long timeo;
  1250. struct task_struct *user_recv = NULL;
  1251. int copied_early = 0;
  1252. struct sk_buff *skb;
  1253. u32 urg_hole = 0;
  1254. lock_sock(sk);
  1255. err = -ENOTCONN;
  1256. if (sk->sk_state == TCP_LISTEN)
  1257. goto out;
  1258. timeo = sock_rcvtimeo(sk, nonblock);
  1259. /* Urgent data needs to be handled specially. */
  1260. if (flags & MSG_OOB)
  1261. goto recv_urg;
  1262. seq = &tp->copied_seq;
  1263. if (flags & MSG_PEEK) {
  1264. peek_seq = tp->copied_seq;
  1265. seq = &peek_seq;
  1266. }
  1267. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  1268. #ifdef CONFIG_NET_DMA
  1269. tp->ucopy.dma_chan = NULL;
  1270. preempt_disable();
  1271. skb = skb_peek_tail(&sk->sk_receive_queue);
  1272. {
  1273. int available = 0;
  1274. if (skb)
  1275. available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
  1276. if ((available < target) &&
  1277. (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
  1278. !sysctl_tcp_low_latency &&
  1279. dma_find_channel(DMA_MEMCPY)) {
  1280. preempt_enable_no_resched();
  1281. tp->ucopy.pinned_list =
  1282. dma_pin_iovec_pages(msg->msg_iov, len);
  1283. } else {
  1284. preempt_enable_no_resched();
  1285. }
  1286. }
  1287. #endif
  1288. do {
  1289. u32 offset;
  1290. /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
  1291. if (tp->urg_data && tp->urg_seq == *seq) {
  1292. if (copied)
  1293. break;
  1294. if (signal_pending(current)) {
  1295. copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
  1296. break;
  1297. }
  1298. }
  1299. /* Next get a buffer. */
  1300. skb_queue_walk(&sk->sk_receive_queue, skb) {
  1301. /* Now that we have two receive queues this
  1302. * shouldn't happen.
  1303. */
  1304. if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
  1305. "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
  1306. *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
  1307. flags))
  1308. break;
  1309. offset = *seq - TCP_SKB_CB(skb)->seq;
  1310. if (tcp_hdr(skb)->syn)
  1311. offset--;
  1312. if (offset < skb->len)
  1313. goto found_ok_skb;
  1314. if (tcp_hdr(skb)->fin)
  1315. goto found_fin_ok;
  1316. WARN(!(flags & MSG_PEEK),
  1317. "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
  1318. *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
  1319. }
  1320. /* Well, if we have backlog, try to process it now yet. */
  1321. if (copied >= target && !sk->sk_backlog.tail)
  1322. break;
  1323. if (copied) {
  1324. if (sk->sk_err ||
  1325. sk->sk_state == TCP_CLOSE ||
  1326. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  1327. !timeo ||
  1328. signal_pending(current))
  1329. break;
  1330. } else {
  1331. if (sock_flag(sk, SOCK_DONE))
  1332. break;
  1333. if (sk->sk_err) {
  1334. copied = sock_error(sk);
  1335. break;
  1336. }
  1337. if (sk->sk_shutdown & RCV_SHUTDOWN)
  1338. break;
  1339. if (sk->sk_state == TCP_CLOSE) {
  1340. if (!sock_flag(sk, SOCK_DONE)) {
  1341. /* This occurs when user tries to read
  1342. * from never connected socket.
  1343. */
  1344. copied = -ENOTCONN;
  1345. break;
  1346. }
  1347. break;
  1348. }
  1349. if (!timeo) {
  1350. copied = -EAGAIN;
  1351. break;
  1352. }
  1353. if (signal_pending(current)) {
  1354. copied = sock_intr_errno(timeo);
  1355. break;
  1356. }
  1357. }
  1358. tcp_cleanup_rbuf(sk, copied);
  1359. if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
  1360. /* Install new reader */
  1361. if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
  1362. user_recv = current;
  1363. tp->ucopy.task = user_recv;
  1364. tp->ucopy.iov = msg->msg_iov;
  1365. }
  1366. tp->ucopy.len = len;
  1367. WARN_ON(tp->copied_seq != tp->rcv_nxt &&
  1368. !(flags & (MSG_PEEK | MSG_TRUNC)));
  1369. /* Ugly... If prequeue is not empty, we have to
  1370. * process it before releasing socket, otherwise
  1371. * order will be broken at second iteration.
  1372. * More elegant solution is required!!!
  1373. *
  1374. * Look: we have the following (pseudo)queues:
  1375. *
  1376. * 1. packets in flight
  1377. * 2. backlog
  1378. * 3. prequeue
  1379. * 4. receive_queue
  1380. *
  1381. * Each queue can be processed only if the next ones
  1382. * are empty. At this point we have empty receive_queue.
  1383. * But prequeue _can_ be not empty after 2nd iteration,
  1384. * when we jumped to start of loop because backlog
  1385. * processing added something to receive_queue.
  1386. * We cannot release_sock(), because backlog contains
  1387. * packets arrived _after_ prequeued ones.
  1388. *
  1389. * Shortly, algorithm is clear --- to process all
  1390. * the queues in order. We could make it more directly,
  1391. * requeueing packets from backlog to prequeue, if
  1392. * is not empty. It is more elegant, but eats cycles,
  1393. * unfortunately.
  1394. */
  1395. if (!skb_queue_empty(&tp->ucopy.prequeue))
  1396. goto do_prequeue;
  1397. /* __ Set realtime policy in scheduler __ */
  1398. }
  1399. #ifdef CONFIG_NET_DMA
  1400. if (tp->ucopy.dma_chan)
  1401. dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
  1402. #endif
  1403. if (copied >= target) {
  1404. /* Do not sleep, just process backlog. */
  1405. release_sock(sk);
  1406. lock_sock(sk);
  1407. } else
  1408. sk_wait_data(sk, &timeo);
  1409. #ifdef CONFIG_NET_DMA
  1410. tcp_service_net_dma(sk, false); /* Don't block */
  1411. tp->ucopy.wakeup = 0;
  1412. #endif
  1413. if (user_recv) {
  1414. int chunk;
  1415. /* __ Restore normal policy in scheduler __ */
  1416. if ((chunk = len - tp->ucopy.len) != 0) {
  1417. NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
  1418. len -= chunk;
  1419. copied += chunk;
  1420. }
  1421. if (tp->rcv_nxt == tp->copied_seq &&
  1422. !skb_queue_empty(&tp->ucopy.prequeue)) {
  1423. do_prequeue:
  1424. tcp_prequeue_process(sk);
  1425. if ((chunk = len - tp->ucopy.len) != 0) {
  1426. NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
  1427. len -= chunk;
  1428. copied += chunk;
  1429. }
  1430. }
  1431. }
  1432. if ((flags & MSG_PEEK) &&
  1433. (peek_seq - copied - urg_hole != tp->copied_seq)) {
  1434. if (net_ratelimit())
  1435. printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
  1436. current->comm, task_pid_nr(current));
  1437. peek_seq = tp->copied_seq;
  1438. }
  1439. continue;
  1440. found_ok_skb:
  1441. /* Ok so how much can we use? */
  1442. used = skb->len - offset;
  1443. if (len < used)
  1444. used = len;
  1445. /* Do we have urgent data here? */
  1446. if (tp->urg_data) {
  1447. u32 urg_offset = tp->urg_seq - *seq;
  1448. if (urg_offset < used) {
  1449. if (!urg_offset) {
  1450. if (!sock_flag(sk, SOCK_URGINLINE)) {
  1451. ++*seq;
  1452. urg_hole++;
  1453. offset++;
  1454. used--;
  1455. if (!used)
  1456. goto skip_copy;
  1457. }
  1458. } else
  1459. used = urg_offset;
  1460. }
  1461. }
  1462. if (!(flags & MSG_TRUNC)) {
  1463. #ifdef CONFIG_NET_DMA
  1464. if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
  1465. tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
  1466. if (tp->ucopy.dma_chan) {
  1467. tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
  1468. tp->ucopy.dma_chan, skb, offset,
  1469. msg->msg_iov, used,
  1470. tp->ucopy.pinned_list);
  1471. if (tp->ucopy.dma_cookie < 0) {
  1472. printk(KERN_ALERT "dma_cookie < 0\n");
  1473. /* Exception. Bailout! */
  1474. if (!copied)
  1475. copied = -EFAULT;
  1476. break;
  1477. }
  1478. dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
  1479. if ((offset + used) == skb->len)
  1480. copied_early = 1;
  1481. } else
  1482. #endif
  1483. {
  1484. err = skb_copy_datagram_iovec(skb, offset,
  1485. msg->msg_iov, used);
  1486. if (err) {
  1487. /* Exception. Bailout! */
  1488. if (!copied)
  1489. copied = -EFAULT;
  1490. break;
  1491. }
  1492. }
  1493. }
  1494. *seq += used;
  1495. copied += used;
  1496. len -= used;
  1497. tcp_rcv_space_adjust(sk);
  1498. skip_copy:
  1499. if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
  1500. tp->urg_data = 0;
  1501. tcp_fast_path_check(sk);
  1502. }
  1503. if (used + offset < skb->len)
  1504. continue;
  1505. if (tcp_hdr(skb)->fin)
  1506. goto found_fin_ok;
  1507. if (!(flags & MSG_PEEK)) {
  1508. sk_eat_skb(sk, skb, copied_early);
  1509. copied_early = 0;
  1510. }
  1511. continue;
  1512. found_fin_ok:
  1513. /* Process the FIN. */
  1514. ++*seq;
  1515. if (!(flags & MSG_PEEK)) {
  1516. sk_eat_skb(sk, skb, copied_early);
  1517. copied_early = 0;
  1518. }
  1519. break;
  1520. } while (len > 0);
  1521. if (user_recv) {
  1522. if (!skb_queue_empty(&tp->ucopy.prequeue)) {
  1523. int chunk;
  1524. tp->ucopy.len = copied > 0 ? len : 0;
  1525. tcp_prequeue_process(sk);
  1526. if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
  1527. NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
  1528. len -= chunk;
  1529. copied += chunk;
  1530. }
  1531. }
  1532. tp->ucopy.task = NULL;
  1533. tp->ucopy.len = 0;
  1534. }
  1535. #ifdef CONFIG_NET_DMA
  1536. tcp_service_net_dma(sk, true); /* Wait for queue to drain */
  1537. tp->ucopy.dma_chan = NULL;
  1538. if (tp->ucopy.pinned_list) {
  1539. dma_unpin_iovec_pages(tp->ucopy.pinned_list);
  1540. tp->ucopy.pinned_list = NULL;
  1541. }
  1542. #endif
  1543. /* According to UNIX98, msg_name/msg_namelen are ignored
  1544. * on connected socket. I was just happy when found this 8) --ANK
  1545. */
  1546. /* Clean up data we have read: This will do ACK frames. */
  1547. tcp_cleanup_rbuf(sk, copied);
  1548. release_sock(sk);
  1549. if (copied > 0)
  1550. uid_stat_tcp_rcv(current_uid(), copied);
  1551. return copied;
  1552. out:
  1553. release_sock(sk);
  1554. return err;
  1555. recv_urg:
  1556. err = tcp_recv_urg(sk, msg, len, flags);
  1557. if (err > 0)
  1558. uid_stat_tcp_rcv(current_uid(), err);
  1559. goto out;
  1560. }
  1561. EXPORT_SYMBOL(tcp_recvmsg);
  1562. #ifdef CONFIG_HTC_TCP_SYN_FAIL
  1563. EXPORT_SYMBOL(sysctl_tcp_syn_fail);
  1564. __be32 sysctl_tcp_syn_fail=0;
  1565. #endif /* CONFIG_HTC_TCP_SYN_FAIL */
  1566. void tcp_set_state(struct sock *sk, int state)
  1567. {
  1568. int oldstate = sk->sk_state;
  1569. #ifdef CONFIG_HTC_TCP_SYN_FAIL
  1570. struct inet_sock *inet;
  1571. struct inet_connection_sock *icsk = inet_csk(sk);
  1572. __be32 dst = icsk->icsk_inet.inet_daddr;
  1573. #endif /* CONFIG_HTC_TCP_SYN_FAIL */
  1574. switch (state) {
  1575. case TCP_ESTABLISHED:
  1576. if (oldstate != TCP_ESTABLISHED)
  1577. TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
  1578. break;
  1579. case TCP_CLOSE:
  1580. if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
  1581. TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
  1582. sk->sk_prot->unhash(sk);
  1583. if (inet_csk(sk)->icsk_bind_hash &&
  1584. !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
  1585. inet_put_port(sk);
  1586. #ifdef CONFIG_HTC_TCP_SYN_FAIL
  1587. if (sk!=NULL) {
  1588. inet = inet_sk(sk);
  1589. if (inet !=NULL && ntohs(inet->inet_sport) != 0 ) {
  1590. if ( dst != 0x0100007F && sk->sk_state== TCP_SYN_SENT && icsk->icsk_retransmits >= 2 ) {
  1591. printk(KERN_INFO "[NET][SMD] TCP SYN SENT fail, dst=%x, retransmit=%d \n",dst,icsk->icsk_retransmits);
  1592. sysctl_tcp_syn_fail = dst;
  1593. }
  1594. }
  1595. }
  1596. #endif /* CONFIG_HTC_TCP_SYN_FAIL */
  1597. /* fall through */
  1598. default:
  1599. if (oldstate == TCP_ESTABLISHED)
  1600. TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
  1601. }
  1602. /* Change state AFTER socket is unhashed to avoid closed
  1603. * socket sitting in hash tables.
  1604. */
  1605. sk->sk_state = state;
  1606. #ifdef STATE_TRACE
  1607. SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
  1608. #endif
  1609. }
  1610. EXPORT_SYMBOL_GPL(tcp_set_state);
  1611. /*
  1612. * State processing on a close. This implements the state shift for
  1613. * sending our FIN frame. Note that we only send a FIN for some
  1614. * states. A shutdown() may have already sent the FIN, or we may be
  1615. * closed.
  1616. */
  1617. static const unsigned char new_state[16] = {
  1618. /* current state: new state: action: */
  1619. /* (Invalid) */ TCP_CLOSE,
  1620. /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
  1621. /* TCP_SYN_SENT */ TCP_CLOSE,
  1622. /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
  1623. /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
  1624. /* TCP_…

Large files files are truncated, but you can click here to view the full file