PageRenderTime 60ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 1ms

/net/ipv4/tcp.c

https://bitbucket.org/zarboz/villez-htc-2.38-linux-3.0.51
C | 3449 lines | 2289 code | 478 blank | 682 comment | 553 complexity | 73046535828bfe38e92d21abdc75d4a5 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Implementation of the Transmission Control Protocol(TCP).
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  11. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  12. * Florian La Roche, <flla@stud.uni-sb.de>
  13. * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14. * Linus Torvalds, <torvalds@cs.helsinki.fi>
  15. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  16. * Matthew Dillon, <dillon@apollo.west.oic.com>
  17. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18. * Jorge Cwik, <jorge@laser.satlink.net>
  19. *
  20. * Fixes:
  21. * Alan Cox : Numerous verify_area() calls
  22. * Alan Cox : Set the ACK bit on a reset
  23. * Alan Cox : Stopped it crashing if it closed while
  24. * sk->inuse=1 and was trying to connect
  25. * (tcp_err()).
  26. * Alan Cox : All icmp error handling was broken
  27. * pointers passed where wrong and the
  28. * socket was looked up backwards. Nobody
  29. * tested any icmp error code obviously.
  30. * Alan Cox : tcp_err() now handled properly. It
  31. * wakes people on errors. poll
  32. * behaves and the icmp error race
  33. * has gone by moving it into sock.c
  34. * Alan Cox : tcp_send_reset() fixed to work for
  35. * everything not just packets for
  36. * unknown sockets.
  37. * Alan Cox : tcp option processing.
  38. * Alan Cox : Reset tweaked (still not 100%) [Had
  39. * syn rule wrong]
  40. * Herp Rosmanith : More reset fixes
  41. * Alan Cox : No longer acks invalid rst frames.
  42. * Acking any kind of RST is right out.
  43. * Alan Cox : Sets an ignore me flag on an rst
  44. * receive otherwise odd bits of prattle
  45. * escape still
  46. * Alan Cox : Fixed another acking RST frame bug.
  47. * Should stop LAN workplace lockups.
  48. * Alan Cox : Some tidyups using the new skb list
  49. * facilities
  50. * Alan Cox : sk->keepopen now seems to work
  51. * Alan Cox : Pulls options out correctly on accepts
  52. * Alan Cox : Fixed assorted sk->rqueue->next errors
  53. * Alan Cox : PSH doesn't end a TCP read. Switched a
  54. * bit to skb ops.
  55. * Alan Cox : Tidied tcp_data to avoid a potential
  56. * nasty.
  57. * Alan Cox : Added some better commenting, as the
  58. * tcp is hard to follow
  59. * Alan Cox : Removed incorrect check for 20 * psh
  60. * Michael O'Reilly : ack < copied bug fix.
  61. * Johannes Stille : Misc tcp fixes (not all in yet).
  62. * Alan Cox : FIN with no memory -> CRASH
  63. * Alan Cox : Added socket option proto entries.
  64. * Also added awareness of them to accept.
  65. * Alan Cox : Added TCP options (SOL_TCP)
  66. * Alan Cox : Switched wakeup calls to callbacks,
  67. * so the kernel can layer network
  68. * sockets.
  69. * Alan Cox : Use ip_tos/ip_ttl settings.
  70. * Alan Cox : Handle FIN (more) properly (we hope).
  71. * Alan Cox : RST frames sent on unsynchronised
  72. * state ack error.
  73. * Alan Cox : Put in missing check for SYN bit.
  74. * Alan Cox : Added tcp_select_window() aka NET2E
  75. * window non shrink trick.
  76. * Alan Cox : Added a couple of small NET2E timer
  77. * fixes
  78. * Charles Hedrick : TCP fixes
  79. * Toomas Tamm : TCP window fixes
  80. * Alan Cox : Small URG fix to rlogin ^C ack fight
  81. * Charles Hedrick : Rewrote most of it to actually work
  82. * Linus : Rewrote tcp_read() and URG handling
  83. * completely
  84. * Gerhard Koerting: Fixed some missing timer handling
  85. * Matthew Dillon : Reworked TCP machine states as per RFC
  86. * Gerhard Koerting: PC/TCP workarounds
  87. * Adam Caldwell : Assorted timer/timing errors
  88. * Matthew Dillon : Fixed another RST bug
  89. * Alan Cox : Move to kernel side addressing changes.
  90. * Alan Cox : Beginning work on TCP fastpathing
  91. * (not yet usable)
  92. * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
  93. * Alan Cox : TCP fast path debugging
  94. * Alan Cox : Window clamping
  95. * Michael Riepe : Bug in tcp_check()
  96. * Matt Dillon : More TCP improvements and RST bug fixes
  97. * Matt Dillon : Yet more small nasties remove from the
  98. * TCP code (Be very nice to this man if
  99. * tcp finally works 100%) 8)
  100. * Alan Cox : BSD accept semantics.
  101. * Alan Cox : Reset on closedown bug.
  102. * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
  103. * Michael Pall : Handle poll() after URG properly in
  104. * all cases.
  105. * Michael Pall : Undo the last fix in tcp_read_urg()
  106. * (multi URG PUSH broke rlogin).
  107. * Michael Pall : Fix the multi URG PUSH problem in
  108. * tcp_readable(), poll() after URG
  109. * works now.
  110. * Michael Pall : recv(...,MSG_OOB) never blocks in the
  111. * BSD api.
  112. * Alan Cox : Changed the semantics of sk->socket to
  113. * fix a race and a signal problem with
  114. * accept() and async I/O.
  115. * Alan Cox : Relaxed the rules on tcp_sendto().
  116. * Yury Shevchuk : Really fixed accept() blocking problem.
  117. * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
  118. * clients/servers which listen in on
  119. * fixed ports.
  120. * Alan Cox : Cleaned the above up and shrank it to
  121. * a sensible code size.
  122. * Alan Cox : Self connect lockup fix.
  123. * Alan Cox : No connect to multicast.
  124. * Ross Biro : Close unaccepted children on master
  125. * socket close.
  126. * Alan Cox : Reset tracing code.
  127. * Alan Cox : Spurious resets on shutdown.
  128. * Alan Cox : Giant 15 minute/60 second timer error
  129. * Alan Cox : Small whoops in polling before an
  130. * accept.
  131. * Alan Cox : Kept the state trace facility since
  132. * it's handy for debugging.
  133. * Alan Cox : More reset handler fixes.
  134. * Alan Cox : Started rewriting the code based on
  135. * the RFC's for other useful protocol
  136. * references see: Comer, KA9Q NOS, and
  137. * for a reference on the difference
  138. * between specifications and how BSD
  139. * works see the 4.4lite source.
  140. * A.N.Kuznetsov : Don't time wait on completion of tidy
  141. * close.
  142. * Linus Torvalds : Fin/Shutdown & copied_seq changes.
  143. * Linus Torvalds : Fixed BSD port reuse to work first syn
  144. * Alan Cox : Reimplemented timers as per the RFC
  145. * and using multiple timers for sanity.
  146. * Alan Cox : Small bug fixes, and a lot of new
  147. * comments.
  148. * Alan Cox : Fixed dual reader crash by locking
  149. * the buffers (much like datagram.c)
  150. * Alan Cox : Fixed stuck sockets in probe. A probe
  151. * now gets fed up of retrying without
  152. * (even a no space) answer.
  153. * Alan Cox : Extracted closing code better
  154. * Alan Cox : Fixed the closing state machine to
  155. * resemble the RFC.
  156. * Alan Cox : More 'per spec' fixes.
  157. * Jorge Cwik : Even faster checksumming.
  158. * Alan Cox : tcp_data() doesn't ack illegal PSH
  159. * only frames. At least one pc tcp stack
  160. * generates them.
  161. * Alan Cox : Cache last socket.
  162. * Alan Cox : Per route irtt.
  163. * Matt Day : poll()->select() match BSD precisely on error
  164. * Alan Cox : New buffers
  165. * Marc Tamsky : Various sk->prot->retransmits and
  166. * sk->retransmits misupdating fixed.
  167. * Fixed tcp_write_timeout: stuck close,
  168. * and TCP syn retries gets used now.
  169. * Mark Yarvis : In tcp_read_wakeup(), don't send an
  170. * ack if state is TCP_CLOSED.
  171. * Alan Cox : Look up device on a retransmit - routes may
  172. * change. Doesn't yet cope with MSS shrink right
  173. * but it's a start!
  174. * Marc Tamsky : Closing in closing fixes.
  175. * Mike Shaver : RFC1122 verifications.
  176. * Alan Cox : rcv_saddr errors.
  177. * Alan Cox : Block double connect().
  178. * Alan Cox : Small hooks for enSKIP.
  179. * Alexey Kuznetsov: Path MTU discovery.
  180. * Alan Cox : Support soft errors.
  181. * Alan Cox : Fix MTU discovery pathological case
  182. * when the remote claims no mtu!
  183. * Marc Tamsky : TCP_CLOSE fix.
  184. * Colin (G3TNE) : Send a reset on syn ack replies in
  185. * window but wrong (fixes NT lpd problems)
  186. * Pedro Roque : Better TCP window handling, delayed ack.
  187. * Joerg Reuter : No modification of locked buffers in
  188. * tcp_do_retransmit()
  189. * Eric Schenk : Changed receiver side silly window
  190. * avoidance algorithm to BSD style
  191. * algorithm. This doubles throughput
  192. * against machines running Solaris,
  193. * and seems to result in general
  194. * improvement.
  195. * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
  196. * Willy Konynenberg : Transparent proxying support.
  197. * Mike McLagan : Routing by source
  198. * Keith Owens : Do proper merging with partial SKB's in
  199. * tcp_do_sendmsg to avoid burstiness.
  200. * Eric Schenk : Fix fast close down bug with
  201. * shutdown() followed by close().
  202. * Andi Kleen : Make poll agree with SIGIO
  203. * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
  204. * lingertime == 0 (RFC 793 ABORT Call)
  205. * Hirokazu Takahashi : Use copy_from_user() instead of
  206. * csum_and_copy_from_user() if possible.
  207. *
  208. * This program is free software; you can redistribute it and/or
  209. * modify it under the terms of the GNU General Public License
  210. * as published by the Free Software Foundation; either version
  211. * 2 of the License, or(at your option) any later version.
  212. *
  213. * Description of States:
  214. *
  215. * TCP_SYN_SENT sent a connection request, waiting for ack
  216. *
  217. * TCP_SYN_RECV received a connection request, sent ack,
  218. * waiting for final ack in three-way handshake.
  219. *
  220. * TCP_ESTABLISHED connection established
  221. *
  222. * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
  223. * transmission of remaining buffered data
  224. *
  225. * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
  226. * to shutdown
  227. *
  228. * TCP_CLOSING both sides have shutdown but we still have
  229. * data we have to finish sending
  230. *
  231. * TCP_TIME_WAIT timeout to catch resent junk before entering
  232. * closed, can only be entered from FIN_WAIT2
  233. * or CLOSING. Required because the other end
  234. * may not have gotten our last ACK causing it
  235. * to retransmit the data packet (which we ignore)
  236. *
  237. * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
  238. * us to finish writing our data and to shutdown
  239. * (we have to close() to move on to LAST_ACK)
  240. *
  241. * TCP_LAST_ACK out side has shutdown after remote has
  242. * shutdown. There may still be data in our
  243. * buffer that we have to finish sending
  244. *
  245. * TCP_CLOSE socket is finished
  246. */
  247. #include <linux/kernel.h>
  248. #include <linux/module.h>
  249. #include <linux/types.h>
  250. #include <linux/fcntl.h>
  251. #include <linux/poll.h>
  252. #include <linux/init.h>
  253. #include <linux/fs.h>
  254. #include <linux/skbuff.h>
  255. #include <linux/scatterlist.h>
  256. #include <linux/splice.h>
  257. #include <linux/net.h>
  258. #include <linux/socket.h>
  259. #include <linux/random.h>
  260. #include <linux/bootmem.h>
  261. #include <linux/highmem.h>
  262. #include <linux/swap.h>
  263. #include <linux/cache.h>
  264. #include <linux/err.h>
  265. #include <linux/crypto.h>
  266. #include <linux/time.h>
  267. #include <linux/slab.h>
  268. #include <linux/uid_stat.h>
  269. #include <net/icmp.h>
  270. #include <net/tcp.h>
  271. #include <net/xfrm.h>
  272. #include <net/ip.h>
  273. #include <net/ip6_route.h>
  274. #include <net/ipv6.h>
  275. #include <net/transp_v6.h>
  276. #include <net/netdma.h>
  277. #include <net/sock.h>
  278. #include <asm/uaccess.h>
  279. #include <asm/ioctls.h>
  280. int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
  281. struct percpu_counter tcp_orphan_count;
  282. EXPORT_SYMBOL_GPL(tcp_orphan_count);
  283. long sysctl_tcp_mem[3] __read_mostly;
  284. int sysctl_tcp_wmem[3] __read_mostly;
  285. int sysctl_tcp_rmem[3] __read_mostly;
  286. EXPORT_SYMBOL(sysctl_tcp_mem);
  287. EXPORT_SYMBOL(sysctl_tcp_rmem);
  288. EXPORT_SYMBOL(sysctl_tcp_wmem);
  289. atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
  290. EXPORT_SYMBOL(tcp_memory_allocated);
  291. /*
  292. * Current number of TCP sockets.
  293. */
  294. struct percpu_counter tcp_sockets_allocated;
  295. EXPORT_SYMBOL(tcp_sockets_allocated);
  296. /*
  297. * TCP splice context
  298. */
  299. struct tcp_splice_state {
  300. struct pipe_inode_info *pipe;
  301. size_t len;
  302. unsigned int flags;
  303. };
  304. /*
  305. * Pressure flag: try to collapse.
  306. * Technical note: it is used by multiple contexts non atomically.
  307. * All the __sk_mem_schedule() is of this nature: accounting
  308. * is strict, actions are advisory and have some latency.
  309. */
  310. int tcp_memory_pressure __read_mostly;
  311. EXPORT_SYMBOL(tcp_memory_pressure);
  312. void tcp_enter_memory_pressure(struct sock *sk)
  313. {
  314. if (!tcp_memory_pressure) {
  315. NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
  316. tcp_memory_pressure = 1;
  317. }
  318. }
  319. EXPORT_SYMBOL(tcp_enter_memory_pressure);
  320. /* Convert seconds to retransmits based on initial and max timeout */
  321. static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
  322. {
  323. u8 res = 0;
  324. if (seconds > 0) {
  325. int period = timeout;
  326. res = 1;
  327. while (seconds > period && res < 255) {
  328. res++;
  329. timeout <<= 1;
  330. if (timeout > rto_max)
  331. timeout = rto_max;
  332. period += timeout;
  333. }
  334. }
  335. return res;
  336. }
  337. /* Convert retransmits to seconds based on initial and max timeout */
  338. static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
  339. {
  340. int period = 0;
  341. if (retrans > 0) {
  342. period = timeout;
  343. while (--retrans) {
  344. timeout <<= 1;
  345. if (timeout > rto_max)
  346. timeout = rto_max;
  347. period += timeout;
  348. }
  349. }
  350. return period;
  351. }
  352. /*
  353. * Wait for a TCP event.
  354. *
  355. * Note that we don't need to lock the socket, as the upper poll layers
  356. * take care of normal races (between the test and the event) and we don't
  357. * go look at any of the socket buffers directly.
  358. */
  359. unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
  360. {
  361. unsigned int mask;
  362. struct sock *sk = sock->sk;
  363. struct tcp_sock *tp = tcp_sk(sk);
  364. sock_poll_wait(file, sk_sleep(sk), wait);
  365. if (sk->sk_state == TCP_LISTEN)
  366. return inet_csk_listen_poll(sk);
  367. /* Socket is not locked. We are protected from async events
  368. * by poll logic and correct handling of state changes
  369. * made by other threads is impossible in any case.
  370. */
  371. mask = 0;
  372. /*
  373. * POLLHUP is certainly not done right. But poll() doesn't
  374. * have a notion of HUP in just one direction, and for a
  375. * socket the read side is more interesting.
  376. *
  377. * Some poll() documentation says that POLLHUP is incompatible
  378. * with the POLLOUT/POLLWR flags, so somebody should check this
  379. * all. But careful, it tends to be safer to return too many
  380. * bits than too few, and you can easily break real applications
  381. * if you don't tell them that something has hung up!
  382. *
  383. * Check-me.
  384. *
  385. * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
  386. * our fs/select.c). It means that after we received EOF,
  387. * poll always returns immediately, making impossible poll() on write()
  388. * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
  389. * if and only if shutdown has been made in both directions.
  390. * Actually, it is interesting to look how Solaris and DUX
  391. * solve this dilemma. I would prefer, if POLLHUP were maskable,
  392. * then we could set it on SND_SHUTDOWN. BTW examples given
  393. * in Stevens' books assume exactly this behaviour, it explains
  394. * why POLLHUP is incompatible with POLLOUT. --ANK
  395. *
  396. * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
  397. * blocking on fresh not-connected or disconnected socket. --ANK
  398. */
  399. if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
  400. mask |= POLLHUP;
  401. if (sk->sk_shutdown & RCV_SHUTDOWN)
  402. mask |= POLLIN | POLLRDNORM | POLLRDHUP;
  403. /* Connected? */
  404. if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
  405. int target = sock_rcvlowat(sk, 0, INT_MAX);
  406. if (tp->urg_seq == tp->copied_seq &&
  407. !sock_flag(sk, SOCK_URGINLINE) &&
  408. tp->urg_data)
  409. target++;
  410. /* Potential race condition. If read of tp below will
  411. * escape above sk->sk_state, we can be illegally awaken
  412. * in SYN_* states. */
  413. if (tp->rcv_nxt - tp->copied_seq >= target)
  414. mask |= POLLIN | POLLRDNORM;
  415. if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
  416. if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
  417. mask |= POLLOUT | POLLWRNORM;
  418. } else { /* send SIGIO later */
  419. set_bit(SOCK_ASYNC_NOSPACE,
  420. &sk->sk_socket->flags);
  421. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  422. /* Race breaker. If space is freed after
  423. * wspace test but before the flags are set,
  424. * IO signal will be lost.
  425. */
  426. if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
  427. mask |= POLLOUT | POLLWRNORM;
  428. }
  429. } else
  430. mask |= POLLOUT | POLLWRNORM;
  431. if (tp->urg_data & TCP_URG_VALID)
  432. mask |= POLLPRI;
  433. }
  434. /* This barrier is coupled with smp_wmb() in tcp_reset() */
  435. smp_rmb();
  436. if (sk->sk_err)
  437. mask |= POLLERR;
  438. return mask;
  439. }
  440. EXPORT_SYMBOL(tcp_poll);
  441. int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  442. {
  443. struct tcp_sock *tp = tcp_sk(sk);
  444. int answ;
  445. switch (cmd) {
  446. case SIOCINQ:
  447. if (sk->sk_state == TCP_LISTEN)
  448. return -EINVAL;
  449. lock_sock(sk);
  450. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
  451. answ = 0;
  452. else if (sock_flag(sk, SOCK_URGINLINE) ||
  453. !tp->urg_data ||
  454. before(tp->urg_seq, tp->copied_seq) ||
  455. !before(tp->urg_seq, tp->rcv_nxt)) {
  456. answ = tp->rcv_nxt - tp->copied_seq;
  457. /* Subtract 1, if FIN was received */
  458. if (answ && sock_flag(sk, SOCK_DONE))
  459. answ--;
  460. } else
  461. answ = tp->urg_seq - tp->copied_seq;
  462. release_sock(sk);
  463. break;
  464. case SIOCATMARK:
  465. answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
  466. break;
  467. case SIOCOUTQ:
  468. if (sk->sk_state == TCP_LISTEN)
  469. return -EINVAL;
  470. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
  471. answ = 0;
  472. else
  473. answ = tp->write_seq - tp->snd_una;
  474. break;
  475. case SIOCOUTQNSD:
  476. if (sk->sk_state == TCP_LISTEN)
  477. return -EINVAL;
  478. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
  479. answ = 0;
  480. else
  481. answ = tp->write_seq - tp->snd_nxt;
  482. break;
  483. default:
  484. return -ENOIOCTLCMD;
  485. }
  486. return put_user(answ, (int __user *)arg);
  487. }
  488. EXPORT_SYMBOL(tcp_ioctl);
  489. static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
  490. {
  491. TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
  492. tp->pushed_seq = tp->write_seq;
  493. }
  494. static inline int forced_push(struct tcp_sock *tp)
  495. {
  496. return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
  497. }
  498. static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
  499. {
  500. struct tcp_sock *tp = tcp_sk(sk);
  501. struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
  502. skb->csum = 0;
  503. tcb->seq = tcb->end_seq = tp->write_seq;
  504. tcb->flags = TCPHDR_ACK;
  505. tcb->sacked = 0;
  506. skb_header_release(skb);
  507. tcp_add_write_queue_tail(sk, skb);
  508. sk->sk_wmem_queued += skb->truesize;
  509. sk_mem_charge(sk, skb->truesize);
  510. if (tp->nonagle & TCP_NAGLE_PUSH)
  511. tp->nonagle &= ~TCP_NAGLE_PUSH;
  512. }
  513. static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
  514. {
  515. if (flags & MSG_OOB)
  516. tp->snd_up = tp->write_seq;
  517. }
  518. static inline void tcp_push(struct sock *sk, int flags, int mss_now,
  519. int nonagle)
  520. {
  521. if (tcp_send_head(sk)) {
  522. struct tcp_sock *tp = tcp_sk(sk);
  523. if (!(flags & MSG_MORE) || forced_push(tp))
  524. tcp_mark_push(tp, tcp_write_queue_tail(sk));
  525. tcp_mark_urg(tp, flags);
  526. __tcp_push_pending_frames(sk, mss_now,
  527. (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
  528. }
  529. }
  530. static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
  531. unsigned int offset, size_t len)
  532. {
  533. struct tcp_splice_state *tss = rd_desc->arg.data;
  534. int ret;
  535. ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
  536. tss->flags);
  537. if (ret > 0)
  538. rd_desc->count -= ret;
  539. return ret;
  540. }
  541. static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
  542. {
  543. /* Store TCP splice context information in read_descriptor_t. */
  544. read_descriptor_t rd_desc = {
  545. .arg.data = tss,
  546. .count = tss->len,
  547. };
  548. return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
  549. }
  550. /**
  551. * tcp_splice_read - splice data from TCP socket to a pipe
  552. * @sock: socket to splice from
  553. * @ppos: position (not valid)
  554. * @pipe: pipe to splice to
  555. * @len: number of bytes to splice
  556. * @flags: splice modifier flags
  557. *
  558. * Description:
  559. * Will read pages from given socket and fill them into a pipe.
  560. *
  561. **/
  562. ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
  563. struct pipe_inode_info *pipe, size_t len,
  564. unsigned int flags)
  565. {
  566. struct sock *sk = sock->sk;
  567. struct tcp_splice_state tss = {
  568. .pipe = pipe,
  569. .len = len,
  570. .flags = flags,
  571. };
  572. long timeo;
  573. ssize_t spliced;
  574. int ret;
  575. sock_rps_record_flow(sk);
  576. /*
  577. * We can't seek on a socket input
  578. */
  579. if (unlikely(*ppos))
  580. return -ESPIPE;
  581. ret = spliced = 0;
  582. lock_sock(sk);
  583. timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
  584. while (tss.len) {
  585. ret = __tcp_splice_read(sk, &tss);
  586. if (ret < 0)
  587. break;
  588. else if (!ret) {
  589. if (spliced)
  590. break;
  591. if (sock_flag(sk, SOCK_DONE))
  592. break;
  593. if (sk->sk_err) {
  594. ret = sock_error(sk);
  595. break;
  596. }
  597. if (sk->sk_shutdown & RCV_SHUTDOWN)
  598. break;
  599. if (sk->sk_state == TCP_CLOSE) {
  600. /*
  601. * This occurs when user tries to read
  602. * from never connected socket.
  603. */
  604. if (!sock_flag(sk, SOCK_DONE))
  605. ret = -ENOTCONN;
  606. break;
  607. }
  608. if (!timeo) {
  609. ret = -EAGAIN;
  610. break;
  611. }
  612. sk_wait_data(sk, &timeo);
  613. if (signal_pending(current)) {
  614. ret = sock_intr_errno(timeo);
  615. break;
  616. }
  617. continue;
  618. }
  619. tss.len -= ret;
  620. spliced += ret;
  621. if (!timeo)
  622. break;
  623. release_sock(sk);
  624. lock_sock(sk);
  625. if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
  626. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  627. signal_pending(current))
  628. break;
  629. }
  630. release_sock(sk);
  631. if (spliced)
  632. return spliced;
  633. return ret;
  634. }
  635. EXPORT_SYMBOL(tcp_splice_read);
  636. struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
  637. {
  638. struct sk_buff *skb;
  639. /* The TCP header must be at least 32-bit aligned. */
  640. size = ALIGN(size, 4);
  641. skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
  642. if (skb) {
  643. if (sk_wmem_schedule(sk, skb->truesize)) {
  644. /*
  645. * Make sure that we have exactly size bytes
  646. * available to the caller, no more, no less.
  647. */
  648. skb_reserve(skb, skb_tailroom(skb) - size);
  649. return skb;
  650. }
  651. #ifdef CONFIG_HTC_NETWORK_MODIFY
  652. if (IS_ERR(skb) || (!skb)) {
  653. printk(KERN_ERR "[NET] skb is NULL in %s!\n", __func__);
  654. }
  655. else {
  656. __kfree_skb(skb);
  657. }
  658. #else
  659. __kfree_skb(skb);
  660. #endif
  661. } else {
  662. sk->sk_prot->enter_memory_pressure(sk);
  663. sk_stream_moderate_sndbuf(sk);
  664. }
  665. return NULL;
  666. }
  667. static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
  668. int large_allowed)
  669. {
  670. struct tcp_sock *tp = tcp_sk(sk);
  671. u32 xmit_size_goal, old_size_goal;
  672. xmit_size_goal = mss_now;
  673. if (large_allowed && sk_can_gso(sk)) {
  674. xmit_size_goal = ((sk->sk_gso_max_size - 1) -
  675. inet_csk(sk)->icsk_af_ops->net_header_len -
  676. inet_csk(sk)->icsk_ext_hdr_len -
  677. tp->tcp_header_len);
  678. xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
  679. /* We try hard to avoid divides here */
  680. old_size_goal = tp->xmit_size_goal_segs * mss_now;
  681. if (likely(old_size_goal <= xmit_size_goal &&
  682. old_size_goal + mss_now > xmit_size_goal)) {
  683. xmit_size_goal = old_size_goal;
  684. } else {
  685. tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
  686. xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
  687. }
  688. }
  689. return max(xmit_size_goal, mss_now);
  690. }
  691. static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
  692. {
  693. int mss_now;
  694. mss_now = tcp_current_mss(sk);
  695. *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
  696. return mss_now;
  697. }
  698. static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
  699. size_t psize, int flags)
  700. {
  701. struct tcp_sock *tp = tcp_sk(sk);
  702. int mss_now, size_goal;
  703. int err;
  704. ssize_t copied;
  705. long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  706. /* Wait for a connection to finish. */
  707. if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
  708. if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
  709. goto out_err;
  710. clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  711. mss_now = tcp_send_mss(sk, &size_goal, flags);
  712. copied = 0;
  713. err = -EPIPE;
  714. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  715. goto out_err;
  716. while (psize > 0) {
  717. struct sk_buff *skb = tcp_write_queue_tail(sk);
  718. struct page *page = pages[poffset / PAGE_SIZE];
  719. int copy, i, can_coalesce;
  720. int offset = poffset % PAGE_SIZE;
  721. int size = min_t(size_t, psize, PAGE_SIZE - offset);
  722. if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
  723. new_segment:
  724. if (!sk_stream_memory_free(sk))
  725. goto wait_for_sndbuf;
  726. skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
  727. if (!skb)
  728. goto wait_for_memory;
  729. skb_entail(sk, skb);
  730. copy = size_goal;
  731. }
  732. if (copy > size)
  733. copy = size;
  734. i = skb_shinfo(skb)->nr_frags;
  735. can_coalesce = skb_can_coalesce(skb, i, page, offset);
  736. if (!can_coalesce && i >= MAX_SKB_FRAGS) {
  737. tcp_mark_push(tp, skb);
  738. goto new_segment;
  739. }
  740. if (!sk_wmem_schedule(sk, copy))
  741. goto wait_for_memory;
  742. if (can_coalesce) {
  743. skb_shinfo(skb)->frags[i - 1].size += copy;
  744. } else {
  745. get_page(page);
  746. skb_fill_page_desc(skb, i, page, offset, copy);
  747. }
  748. skb->len += copy;
  749. skb->data_len += copy;
  750. skb->truesize += copy;
  751. sk->sk_wmem_queued += copy;
  752. sk_mem_charge(sk, copy);
  753. skb->ip_summed = CHECKSUM_PARTIAL;
  754. tp->write_seq += copy;
  755. TCP_SKB_CB(skb)->end_seq += copy;
  756. skb_shinfo(skb)->gso_segs = 0;
  757. if (!copied)
  758. TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
  759. copied += copy;
  760. poffset += copy;
  761. if (!(psize -= copy))
  762. goto out;
  763. if (skb->len < size_goal || (flags & MSG_OOB))
  764. continue;
  765. if (forced_push(tp)) {
  766. tcp_mark_push(tp, skb);
  767. __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
  768. } else if (skb == tcp_send_head(sk))
  769. tcp_push_one(sk, mss_now);
  770. continue;
  771. wait_for_sndbuf:
  772. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  773. wait_for_memory:
  774. tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
  775. if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
  776. goto do_error;
  777. mss_now = tcp_send_mss(sk, &size_goal, flags);
  778. }
  779. out:
  780. if (copied)
  781. tcp_push(sk, flags, mss_now, tp->nonagle);
  782. return copied;
  783. do_error:
  784. if (copied)
  785. goto out;
  786. out_err:
  787. return sk_stream_error(sk, flags, err);
  788. }
  789. int tcp_sendpage(struct sock *sk, struct page *page, int offset,
  790. size_t size, int flags)
  791. {
  792. ssize_t res;
  793. if (!(sk->sk_route_caps & NETIF_F_SG) ||
  794. !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
  795. return sock_no_sendpage(sk->sk_socket, page, offset, size,
  796. flags);
  797. lock_sock(sk);
  798. res = do_tcp_sendpages(sk, &page, offset, size, flags);
  799. release_sock(sk);
  800. return res;
  801. }
  802. EXPORT_SYMBOL(tcp_sendpage);
  803. #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
  804. #define TCP_OFF(sk) (sk->sk_sndmsg_off)
  805. static inline int select_size(struct sock *sk, int sg)
  806. {
  807. struct tcp_sock *tp = tcp_sk(sk);
  808. int tmp = tp->mss_cache;
  809. if (sg) {
  810. if (sk_can_gso(sk))
  811. tmp = 0;
  812. else {
  813. int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
  814. if (tmp >= pgbreak &&
  815. tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
  816. tmp = pgbreak;
  817. }
  818. }
  819. return tmp;
  820. }
  821. int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  822. size_t size)
  823. {
  824. struct iovec *iov;
  825. struct tcp_sock *tp = tcp_sk(sk);
  826. struct sk_buff *skb;
  827. int iovlen, flags;
  828. int mss_now, size_goal;
  829. int sg, err, copied;
  830. long timeo;
  831. lock_sock(sk);
  832. flags = msg->msg_flags;
  833. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  834. /* Wait for a connection to finish. */
  835. if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
  836. if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
  837. goto out_err;
  838. /* This should be in poll */
  839. clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  840. mss_now = tcp_send_mss(sk, &size_goal, flags);
  841. /* Ok commence sending. */
  842. iovlen = msg->msg_iovlen;
  843. iov = msg->msg_iov;
  844. copied = 0;
  845. err = -EPIPE;
  846. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  847. goto out_err;
  848. sg = sk->sk_route_caps & NETIF_F_SG;
  849. while (--iovlen >= 0) {
  850. size_t seglen = iov->iov_len;
  851. unsigned char __user *from = iov->iov_base;
  852. iov++;
  853. while (seglen > 0) {
  854. int copy = 0;
  855. int max = size_goal;
  856. skb = tcp_write_queue_tail(sk);
  857. if (tcp_send_head(sk)) {
  858. if (skb->ip_summed == CHECKSUM_NONE)
  859. max = mss_now;
  860. copy = max - skb->len;
  861. }
  862. if (copy <= 0) {
  863. new_segment:
  864. /* Allocate new segment. If the interface is SG,
  865. * allocate skb fitting to single page.
  866. */
  867. if (!sk_stream_memory_free(sk))
  868. goto wait_for_sndbuf;
  869. skb = sk_stream_alloc_skb(sk,
  870. select_size(sk, sg),
  871. sk->sk_allocation);
  872. if (!skb)
  873. goto wait_for_memory;
  874. /*
  875. * Check whether we can use HW checksum.
  876. */
  877. if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
  878. skb->ip_summed = CHECKSUM_PARTIAL;
  879. skb_entail(sk, skb);
  880. copy = size_goal;
  881. max = size_goal;
  882. }
  883. /* Try to append data to the end of skb. */
  884. if (copy > seglen)
  885. copy = seglen;
  886. /* Where to copy to? */
  887. if (skb_tailroom(skb) > 0) {
  888. /* We have some space in skb head. Superb! */
  889. if (copy > skb_tailroom(skb))
  890. copy = skb_tailroom(skb);
  891. err = skb_add_data_nocache(sk, skb, from, copy);
  892. if (err)
  893. goto do_fault;
  894. } else {
  895. int merge = 0;
  896. int i = skb_shinfo(skb)->nr_frags;
  897. struct page *page = TCP_PAGE(sk);
  898. int off = TCP_OFF(sk);
  899. if (skb_can_coalesce(skb, i, page, off) &&
  900. off != PAGE_SIZE) {
  901. /* We can extend the last page
  902. * fragment. */
  903. merge = 1;
  904. } else if (i == MAX_SKB_FRAGS || !sg) {
  905. /* Need to add new fragment and cannot
  906. * do this because interface is non-SG,
  907. * or because all the page slots are
  908. * busy. */
  909. tcp_mark_push(tp, skb);
  910. goto new_segment;
  911. } else if (page) {
  912. if (off == PAGE_SIZE) {
  913. put_page(page);
  914. TCP_PAGE(sk) = page = NULL;
  915. off = 0;
  916. }
  917. } else
  918. off = 0;
  919. if (copy > PAGE_SIZE - off)
  920. copy = PAGE_SIZE - off;
  921. if (!sk_wmem_schedule(sk, copy))
  922. goto wait_for_memory;
  923. if (!page) {
  924. /* Allocate new cache page. */
  925. if (!(page = sk_stream_alloc_page(sk)))
  926. goto wait_for_memory;
  927. }
  928. /* Time to copy data. We are close to
  929. * the end! */
  930. err = skb_copy_to_page_nocache(sk, from, skb,
  931. page, off, copy);
  932. if (err) {
  933. /* If this page was new, give it to the
  934. * socket so it does not get leaked.
  935. */
  936. if (!TCP_PAGE(sk)) {
  937. TCP_PAGE(sk) = page;
  938. TCP_OFF(sk) = 0;
  939. }
  940. goto do_error;
  941. }
  942. /* Update the skb. */
  943. if (merge) {
  944. skb_shinfo(skb)->frags[i - 1].size +=
  945. copy;
  946. } else {
  947. skb_fill_page_desc(skb, i, page, off, copy);
  948. if (TCP_PAGE(sk)) {
  949. get_page(page);
  950. } else if (off + copy < PAGE_SIZE) {
  951. get_page(page);
  952. TCP_PAGE(sk) = page;
  953. }
  954. }
  955. TCP_OFF(sk) = off + copy;
  956. }
  957. if (!copied)
  958. TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
  959. tp->write_seq += copy;
  960. TCP_SKB_CB(skb)->end_seq += copy;
  961. skb_shinfo(skb)->gso_segs = 0;
  962. from += copy;
  963. copied += copy;
  964. if ((seglen -= copy) == 0 && iovlen == 0)
  965. goto out;
  966. if (skb->len < max || (flags & MSG_OOB))
  967. continue;
  968. if (forced_push(tp)) {
  969. tcp_mark_push(tp, skb);
  970. __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
  971. } else if (skb == tcp_send_head(sk))
  972. tcp_push_one(sk, mss_now);
  973. continue;
  974. wait_for_sndbuf:
  975. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  976. wait_for_memory:
  977. if (copied)
  978. tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
  979. if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
  980. goto do_error;
  981. mss_now = tcp_send_mss(sk, &size_goal, flags);
  982. }
  983. }
  984. out:
  985. if (copied)
  986. tcp_push(sk, flags, mss_now, tp->nonagle);
  987. release_sock(sk);
  988. if (copied > 0)
  989. uid_stat_tcp_snd(current_uid(), copied);
  990. return copied;
  991. do_fault:
  992. if (!skb->len) {
  993. tcp_unlink_write_queue(skb, sk);
  994. /* It is the one place in all of TCP, except connection
  995. * reset, where we can be unlinking the send_head.
  996. */
  997. tcp_check_send_head(sk, skb);
  998. sk_wmem_free_skb(sk, skb);
  999. }
  1000. do_error:
  1001. if (copied)
  1002. goto out;
  1003. out_err:
  1004. err = sk_stream_error(sk, flags, err);
  1005. release_sock(sk);
  1006. return err;
  1007. }
  1008. EXPORT_SYMBOL(tcp_sendmsg);
  1009. /*
  1010. * Handle reading urgent data. BSD has very simple semantics for
  1011. * this, no blocking and very strange errors 8)
  1012. */
  1013. static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
  1014. {
  1015. struct tcp_sock *tp = tcp_sk(sk);
  1016. /* No URG data to read. */
  1017. if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
  1018. tp->urg_data == TCP_URG_READ)
  1019. return -EINVAL; /* Yes this is right ! */
  1020. if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
  1021. return -ENOTCONN;
  1022. if (tp->urg_data & TCP_URG_VALID) {
  1023. int err = 0;
  1024. char c = tp->urg_data;
  1025. if (!(flags & MSG_PEEK))
  1026. tp->urg_data = TCP_URG_READ;
  1027. /* Read urgent data. */
  1028. msg->msg_flags |= MSG_OOB;
  1029. if (len > 0) {
  1030. if (!(flags & MSG_TRUNC))
  1031. err = memcpy_toiovec(msg->msg_iov, &c, 1);
  1032. len = 1;
  1033. } else
  1034. msg->msg_flags |= MSG_TRUNC;
  1035. return err ? -EFAULT : len;
  1036. }
  1037. if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
  1038. return 0;
  1039. /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
  1040. * the available implementations agree in this case:
  1041. * this call should never block, independent of the
  1042. * blocking state of the socket.
  1043. * Mike <pall@rz.uni-karlsruhe.de>
  1044. */
  1045. return -EAGAIN;
  1046. }
  1047. /* Clean up the receive buffer for full frames taken by the user,
  1048. * then send an ACK if necessary. COPIED is the number of bytes
  1049. * tcp_recvmsg has given to the user so far, it speeds up the
  1050. * calculation of whether or not we must ACK for the sake of
  1051. * a window update.
  1052. */
  1053. void tcp_cleanup_rbuf(struct sock *sk, int copied)
  1054. {
  1055. struct tcp_sock *tp = tcp_sk(sk);
  1056. int time_to_ack = 0;
  1057. #if TCP_DEBUG
  1058. struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
  1059. WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
  1060. "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
  1061. tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
  1062. #endif
  1063. if (inet_csk_ack_scheduled(sk)) {
  1064. const struct inet_connection_sock *icsk = inet_csk(sk);
  1065. /* Delayed ACKs frequently hit locked sockets during bulk
  1066. * receive. */
  1067. if (icsk->icsk_ack.blocked ||
  1068. /* Once-per-two-segments ACK was not sent by tcp_input.c */
  1069. tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
  1070. /*
  1071. * If this read emptied read buffer, we send ACK, if
  1072. * connection is not bidirectional, user drained
  1073. * receive buffer and there was a small segment
  1074. * in queue.
  1075. */
  1076. (copied > 0 &&
  1077. ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
  1078. ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
  1079. !icsk->icsk_ack.pingpong)) &&
  1080. !atomic_read(&sk->sk_rmem_alloc)))
  1081. time_to_ack = 1;
  1082. }
  1083. /* We send an ACK if we can now advertise a non-zero window
  1084. * which has been raised "significantly".
  1085. *
  1086. * Even if window raised up to infinity, do not send window open ACK
  1087. * in states, where we will not receive more. It is useless.
  1088. */
  1089. if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
  1090. __u32 rcv_window_now = tcp_receive_window(tp);
  1091. /* Optimize, __tcp_select_window() is not cheap. */
  1092. if (2*rcv_window_now <= tp->window_clamp) {
  1093. __u32 new_window = __tcp_select_window(sk);
  1094. /* Send ACK now, if this read freed lots of space
  1095. * in our buffer. Certainly, new_window is new window.
  1096. * We can advertise it now, if it is not less than current one.
  1097. * "Lots" means "at least twice" here.
  1098. */
  1099. if (new_window && new_window >= 2 * rcv_window_now)
  1100. time_to_ack = 1;
  1101. }
  1102. }
  1103. if (time_to_ack)
  1104. tcp_send_ack(sk);
  1105. }
  1106. static void tcp_prequeue_process(struct sock *sk)
  1107. {
  1108. struct sk_buff *skb;
  1109. struct tcp_sock *tp = tcp_sk(sk);
  1110. NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
  1111. /* RX process wants to run with disabled BHs, though it is not
  1112. * necessary */
  1113. local_bh_disable();
  1114. while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
  1115. sk_backlog_rcv(sk, skb);
  1116. local_bh_enable();
  1117. /* Clear memory counter. */
  1118. tp->ucopy.memory = 0;
  1119. }
  1120. #ifdef CONFIG_NET_DMA
  1121. static void tcp_service_net_dma(struct sock *sk, bool wait)
  1122. {
  1123. dma_cookie_t done, used;
  1124. dma_cookie_t last_issued;
  1125. struct tcp_sock *tp = tcp_sk(sk);
  1126. if (!tp->ucopy.dma_chan)
  1127. return;
  1128. last_issued = tp->ucopy.dma_cookie;
  1129. dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
  1130. do {
  1131. if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
  1132. last_issued, &done,
  1133. &used) == DMA_SUCCESS) {
  1134. /* Safe to free early-copied skbs now */
  1135. __skb_queue_purge(&sk->sk_async_wait_queue);
  1136. break;
  1137. } else {
  1138. struct sk_buff *skb;
  1139. while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
  1140. (dma_async_is_complete(skb->dma_cookie, done,
  1141. used) == DMA_SUCCESS)) {
  1142. __skb_dequeue(&sk->sk_async_wait_queue);
  1143. kfree_skb(skb);
  1144. }
  1145. }
  1146. } while (wait);
  1147. }
  1148. #endif
  1149. static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
  1150. {
  1151. struct sk_buff *skb;
  1152. u32 offset;
  1153. skb_queue_walk(&sk->sk_receive_queue, skb) {
  1154. offset = seq - TCP_SKB_CB(skb)->seq;
  1155. if (tcp_hdr(skb)->syn)
  1156. offset--;
  1157. if (offset < skb->len || tcp_hdr(skb)->fin) {
  1158. *off = offset;
  1159. return skb;
  1160. }
  1161. }
  1162. return NULL;
  1163. }
  1164. /*
  1165. * This routine provides an alternative to tcp_recvmsg() for routines
  1166. * that would like to handle copying from skbuffs directly in 'sendfile'
  1167. * fashion.
  1168. * Note:
  1169. * - It is assumed that the socket was locked by the caller.
  1170. * - The routine does not block.
  1171. * - At present, there is no support for reading OOB data
  1172. * or for 'peeking' the socket using this routine
  1173. * (although both would be easy to implement).
  1174. */
  1175. int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
  1176. sk_read_actor_t recv_actor)
  1177. {
  1178. struct sk_buff *skb;
  1179. struct tcp_sock *tp = tcp_sk(sk);
  1180. u32 seq = tp->copied_seq;
  1181. u32 offset;
  1182. int copied = 0;
  1183. if (sk->sk_state == TCP_LISTEN)
  1184. return -ENOTCONN;
  1185. while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
  1186. if (offset < skb->len) {
  1187. int used;
  1188. size_t len;
  1189. len = skb->len - offset;
  1190. /* Stop reading if we hit a patch of urgent data */
  1191. if (tp->urg_data) {
  1192. u32 urg_offset = tp->urg_seq - seq;
  1193. if (urg_offset < len)
  1194. len = urg_offset;
  1195. if (!len)
  1196. break;
  1197. }
  1198. used = recv_actor(desc, skb, offset, len);
  1199. if (used < 0) {
  1200. if (!copied)
  1201. copied = used;
  1202. break;
  1203. } else if (used <= len) {
  1204. seq += used;
  1205. copied += used;
  1206. offset += used;
  1207. }
  1208. /*
  1209. * If recv_actor drops the lock (e.g. TCP splice
  1210. * receive) the skb pointer might be invalid when
  1211. * getting here: tcp_collapse might have deleted it
  1212. * while aggregating skbs from the socket queue.
  1213. */
  1214. skb = tcp_recv_skb(sk, seq-1, &offset);
  1215. if (!skb || (offset+1 != skb->len))
  1216. break;
  1217. }
  1218. if (tcp_hdr(skb)->fin) {
  1219. sk_eat_skb(sk, skb, 0);
  1220. ++seq;
  1221. break;
  1222. }
  1223. sk_eat_skb(sk, skb, 0);
  1224. if (!desc->count)
  1225. break;
  1226. tp->copied_seq = seq;
  1227. }
  1228. tp->copied_seq = seq;
  1229. tcp_rcv_space_adjust(sk);
  1230. /* Clean up data we have read: This will do ACK frames. */
  1231. if (copied > 0) {
  1232. tcp_cleanup_rbuf(sk, copied);
  1233. uid_stat_tcp_rcv(current_uid(), copied);
  1234. }
  1235. return copied;
  1236. }
  1237. EXPORT_SYMBOL(tcp_read_sock);
  1238. /*
  1239. * This routine copies from a sock struct into the user buffer.
  1240. *
  1241. * Technical note: in 2.3 we work on _locked_ socket, so that
  1242. * tricks with *seq access order and skb->users are not required.
  1243. * Probably, code can be easily improved even more.
  1244. */
  1245. int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  1246. size_t len, int nonblock, int flags, int *addr_len)
  1247. {
  1248. struct tcp_sock *tp = tcp_sk(sk);
  1249. int copied = 0;
  1250. u32 peek_seq;
  1251. u32 *seq;
  1252. unsigned long used;
  1253. int err;
  1254. int target; /* Read at least this many bytes */
  1255. long timeo;
  1256. struct task_struct *user_recv = NULL;
  1257. int copied_early = 0;
  1258. struct sk_buff *skb;
  1259. u32 urg_hole = 0;
  1260. lock_sock(sk);
  1261. err = -ENOTCONN;
  1262. if (sk->sk_state == TCP_LISTEN)
  1263. goto out;
  1264. timeo = sock_rcvtimeo(sk, nonblock);
  1265. /* Urgent data needs to be handled specially. */
  1266. if (flags & MSG_OOB)
  1267. goto recv_urg;
  1268. seq = &tp->copied_seq;
  1269. if (flags & MSG_PEEK) {
  1270. peek_seq = tp->copied_seq;
  1271. seq = &peek_seq;
  1272. }
  1273. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  1274. #ifdef CONFIG_NET_DMA
  1275. tp->ucopy.dma_chan = NULL;
  1276. preempt_disable();
  1277. skb = skb_peek_tail(&sk->sk_receive_queue);
  1278. {
  1279. int available = 0;
  1280. if (skb)
  1281. available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
  1282. if ((available < target) &&
  1283. (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
  1284. !sysctl_tcp_low_latency &&
  1285. dma_find_channel(DMA_MEMCPY)) {
  1286. preempt_enable_no_resched();
  1287. tp->ucopy.pinned_list =
  1288. dma_pin_iovec_pages(msg->msg_iov, len);
  1289. } else {
  1290. preempt_enable_no_resched();
  1291. }
  1292. }
  1293. #endif
  1294. do {
  1295. u32 offset;
  1296. /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
  1297. if (tp->urg_data && tp->urg_seq == *seq) {
  1298. if (copied)
  1299. break;
  1300. if (signal_pending(current)) {
  1301. copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
  1302. break;
  1303. }
  1304. }
  1305. /* Next get a buffer. */
  1306. skb_queue_walk(&sk->sk_receive_queue, skb) {
  1307. /* Now that we have two receive queues this
  1308. * shouldn't happen.
  1309. */
  1310. if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
  1311. "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
  1312. *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
  1313. flags))
  1314. break;
  1315. offset = *seq - TCP_SKB_CB(skb)->seq;
  1316. if (tcp_hdr(skb)->syn)
  1317. offset--;
  1318. if (offset < skb->len)
  1319. goto found_ok_skb;
  1320. if (tcp_hdr(skb)->fin)
  1321. goto found_fin_ok;
  1322. WARN(!(flags & MSG_PEEK),
  1323. "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
  1324. *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
  1325. }
  1326. /* Well, if we have backlog, try to process it now yet. */
  1327. if (copied >= target && !sk->sk_backlog.tail)
  1328. break;
  1329. if (copied) {
  1330. if (sk->sk_err ||
  1331. sk->sk_state == TCP_CLOSE ||
  1332. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  1333. !timeo ||
  1334. signal_pending(current))
  1335. break;
  1336. } else {
  1337. if (sock_flag(sk, SOCK_DONE))
  1338. break;
  1339. if (sk->sk_err) {
  1340. copied = sock_error(sk);
  1341. break;
  1342. }
  1343. if (sk->sk_shutdown & RCV_SHUTDOWN)
  1344. break;
  1345. if (sk->sk_state == TCP_CLOSE) {
  1346. if (!sock_flag(sk, SOCK_DONE)) {
  1347. /* This occurs when user tries to read
  1348. * from never connected socket.
  1349. */
  1350. copied = -ENOTCONN;
  1351. break;
  1352. }
  1353. break;
  1354. }
  1355. if (!timeo) {
  1356. copied = -EAGAIN;
  1357. break;
  1358. }
  1359. if (signal_pending(current)) {
  1360. copied = sock_intr_errno(timeo);
  1361. break;
  1362. }
  1363. }
  1364. tcp_cleanup_rbuf(sk, copied);
  1365. if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
  1366. /* Install new reader */
  1367. if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
  1368. user_recv = current;
  1369. tp->ucopy.task = user_recv;
  1370. tp->ucopy.iov = msg->msg_iov;
  1371. }
  1372. tp->ucopy.len = len;
  1373. WARN_ON(tp->copied_seq != tp->rcv_nxt &&
  1374. !(flags & (MSG_PEEK | MSG_TRUNC)));
  1375. /* Ugly... If prequeue is not empty, we have to
  1376. * process it before releasing socket, otherwise
  1377. * order will be broken at second iteration.
  1378. * More elegant solution is required!!!
  1379. *
  1380. * Look: we have the following (pseudo)queues:
  1381. *
  1382. * 1. packets in flight
  1383. * 2. backlog
  1384. * 3. prequeue
  1385. * 4. receive_queue
  1386. *
  1387. * Each queue can be processed only if the next ones
  1388. * are empty. At this point we have empty receive_queue.
  1389. * But prequeue _can_ be not empty after 2nd iteration,
  1390. * when we jumped to start of loop because backlog
  1391. * processing added something to receive_queue.
  1392. * We cannot release_sock(), because backlog contains
  1393. * packets arrived _after_ prequeued ones.
  1394. *
  1395. * Shortly, algorithm is clear --- to process all
  1396. * the queues in order. We could make it more directly,
  1397. * requeueing packets from backlog to prequeue, if
  1398. * is not empty. It is more elegant, but eats cycles,
  1399. * unfortunately.
  1400. */
  1401. if (!skb_queue_empty(&tp->ucopy.prequeue))
  1402. goto do_prequeue;
  1403. /* __ Set realtime policy in scheduler __ */
  1404. }
  1405. #ifdef CONFIG_NET_DMA
  1406. if (tp->ucopy.dma_chan)
  1407. dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
  1408. #endif
  1409. if (copied >= target) {
  1410. /* Do not sleep, just process backlog. */
  1411. release_sock(sk);
  1412. lock_sock(sk);
  1413. } else
  1414. sk_wait_data(sk, &timeo);
  1415. #ifdef CONFIG_NET_DMA
  1416. tcp_service_net_dma(sk, false); /* Don't block */
  1417. tp->ucopy.wakeup = 0;
  1418. #endif
  1419. if (user_recv) {
  1420. int chunk;
  1421. /* __ Restore normal policy in scheduler __ */
  1422. if ((chunk = len - tp->ucopy.len) != 0) {
  1423. NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
  1424. len -= chunk;
  1425. copied += chunk;
  1426. }
  1427. if (tp->rcv_nxt == tp->copied_seq &&
  1428. !skb_queue_empty(&tp->ucopy.prequeue)) {
  1429. do_prequeue:
  1430. tcp_prequeue_process(sk);
  1431. if ((chunk = len - tp->ucopy.len) != 0) {
  1432. NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
  1433. len -= chunk;
  1434. copied += chunk;
  1435. }
  1436. }
  1437. }
  1438. if ((flags & MSG_PEEK) &&
  1439. (peek_seq - copied - urg_hole != tp->copied_seq)) {
  1440. if (net_ratelimit())
  1441. printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
  1442. current->comm, task_pid_nr(current));
  1443. peek_seq = tp->copied_seq;
  1444. }
  1445. continue;
  1446. found_ok_skb:
  1447. /* Ok so how much can we use? */
  1448. used = skb->len - offset;
  1449. if (len < used)
  1450. used = len;
  1451. /* Do we have urgent data here? */
  1452. if (tp->urg_data) {
  1453. u32 urg_offset = tp->urg_seq - *seq;
  1454. if (urg_offset < used) {
  1455. if (!urg_offset) {
  1456. if (!sock_flag(sk, SOCK_URGINLINE)) {
  1457. ++*seq;
  1458. urg_hole++;
  1459. offset++;
  1460. used--;
  1461. if (!used)
  1462. goto skip_copy;
  1463. }
  1464. } else
  1465. used = urg_offset;
  1466. }
  1467. }
  1468. if (!(flags & MSG_TRUNC)) {
  1469. #ifdef CONFIG_NET_DMA
  1470. if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
  1471. tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
  1472. if (tp->ucopy.dma_chan) {
  1473. tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
  1474. tp->ucopy.dma_chan, skb, offset,
  1475. msg->msg_iov, used,
  1476. tp->ucopy.pinned_list);
  1477. if (tp->ucopy.dma_cookie < 0) {
  1478. printk(KERN_ALERT "dma_cookie < 0\n");
  1479. /* Exception. Bailout! */
  1480. if (!copied)
  1481. copied = -EFAULT;
  1482. break;
  1483. }
  1484. dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
  1485. if ((offset + used) == skb->len)
  1486. copied_early = 1;
  1487. } else
  1488. #endif
  1489. {
  1490. err = skb_copy_datagram_iovec(skb, offset,
  1491. msg->msg_iov, used);
  1492. if (err) {
  1493. /* Exception. Bailout! */
  1494. if (!copied)
  1495. copied = -EFAULT;
  1496. break;
  1497. }
  1498. }
  1499. }
  1500. *seq += used;
  1501. copied += used;
  1502. len -= used;
  1503. tcp_rcv_space_adjust(sk);
  1504. skip_copy:
  1505. if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
  1506. tp->urg_data = 0;
  1507. tcp_fast_path_check(sk);
  1508. }
  1509. if (used + offset < skb->len)
  1510. continue;
  1511. if (tcp_hdr(skb)->fin)
  1512. goto found_fin_ok;
  1513. if (!(flags & MSG_PEEK)) {
  1514. sk_eat_skb(sk, skb, copied_early);
  1515. copied_early = 0;
  1516. }
  1517. continue;
  1518. found_fin_ok:
  1519. /* Process the FIN. */
  1520. ++*seq;
  1521. if (!(flags & MSG_PEEK)) {
  1522. sk_eat_skb(sk, skb, copied_early);
  1523. copied_early = 0;
  1524. }
  1525. break;
  1526. } while (len > 0);
  1527. if (user_recv) {
  1528. if (!skb_queue_empty(&tp->ucopy.prequeue)) {
  1529. int chunk;
  1530. tp->ucopy.len = copied > 0 ? len : 0;
  1531. tcp_prequeue_process(sk);
  1532. if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
  1533. NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
  1534. len -= chunk;
  1535. copied += chunk;
  1536. }
  1537. }
  1538. tp->ucopy.task = NULL;
  1539. tp->ucopy.len = 0;
  1540. }
  1541. #ifdef CONFIG_NET_DMA
  1542. tcp_service_net_dma(sk, true); /* Wait for queue to drain */
  1543. tp->ucopy.dma_chan = NULL;
  1544. if (tp->ucopy.pinned_list) {
  1545. dma_unpin_iovec_pages(tp->ucopy.pinned_list);
  1546. tp->ucopy.pinned_list = NULL;
  1547. }
  1548. #endif
  1549. /* According to UNIX98, msg_name/msg_namelen are ignored
  1550. * on connected socket. I was just happy when found this 8) --ANK
  1551. */
  1552. /* Clean up data we have read: This will do ACK frames. */
  1553. tcp_cleanup_rbuf(sk, copied);
  1554. release_sock(sk);
  1555. if (copied > 0)
  1556. uid_stat_tcp_rcv(current_uid(), copied);
  1557. return copied;
  1558. out:
  1559. release_sock(sk);
  1560. return err;
  1561. recv_urg:
  1562. err = tcp_recv_urg(sk, msg, len, flags);
  1563. if (err > 0)
  1564. uid_stat_tcp_rcv(current_uid(), err);
  1565. goto out;
  1566. }
  1567. EXPORT_SYMBOL(tcp_recvmsg);
  1568. void tcp_set_state(struct sock *sk, int state)
  1569. {
  1570. int oldstate = sk->sk_state;
  1571. switch (state) {
  1572. case TCP_ESTABLISHED:
  1573. if (oldstate != TCP_ESTABLISHED)
  1574. TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
  1575. break;
  1576. case TCP_CLOSE:
  1577. if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
  1578. TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
  1579. sk->sk_prot->unhash(sk);
  1580. if (inet_csk(sk)->icsk_bind_hash &&
  1581. !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
  1582. inet_put_port(sk);
  1583. /* fall through */
  1584. default:
  1585. if (oldstate == TCP_ESTABLISHED)
  1586. TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
  1587. }
  1588. /* Change state AFTER socket is unhashed to avoid closed
  1589. * socket sitting in hash tables.
  1590. */
  1591. sk->sk_state = state;
  1592. #ifdef STATE_TRACE
  1593. SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
  1594. #endif
  1595. }
  1596. EXPORT_SYMBOL_GPL(tcp_set_state);
  1597. /*
  1598. * State processing on a close. This implements the state shift for
  1599. * sending our FIN frame. Note that we only send a FIN for some
  1600. * states. A shutdown() may have already sent the FIN, or we may be
  1601. * closed.
  1602. */
  1603. static const unsigned char new_state[16] = {
  1604. /* current state: new state: action: */
  1605. /* (Invalid) */ TCP_CLOSE,
  1606. /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
  1607. /* TCP_SYN_SENT */ TCP_CLOSE,
  1608. /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
  1609. /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
  1610. /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
  1611. /* TCP_TIME_WAIT */ TCP_CLOSE,
  1612. /* TCP_CLOSE */ TCP_CLOSE,
  1613. /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
  1614. /* TCP_LAST_ACK */ TCP_LAST_ACK,
  1615. /* TCP_LISTEN */ TCP_CLOSE,
  1616. /* TCP_CLOSING */ TCP_CLOSING,
  1617. };
  1618. static int tcp_close_state(struct sock *sk)
  1619. {
  1620. int next = (int)new_state[sk->sk_state];
  1621. int ns = next & TCP_STATE_MASK;
  1622. tcp_set_state(sk, ns);
  1623. return next & TCP_ACTION_FIN;
  1624. }
  1625. /*
  1626. * Shutdown the sending side of a connection. Much like close except
  1627. * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
  1628. */
  1629. void tcp_shutdown(struct sock *sk, int how)
  1630. {
  1631. /* We need to grab some memory, and put together a FIN,
  1632. * and then put it into the queue to be sent.
  1633. * Tim…

Large files files are truncated, but you can click here to view the full file