PageRenderTime 92ms CodeModel.GetById 11ms RepoModel.GetById 0ms app.codeStats 1ms

/src/VBox/Devices/Network/slirp/socket.c

https://bitbucket.org/diagiman/vbox-trunk
C | 1697 lines | 1302 code | 133 blank | 262 comment | 283 complexity | 57f04f15b30a46a3d8a1a171ce730429 MD5 | raw file
Possible License(s): BSD-3-Clause, MIT, GPL-3.0, GPL-2.0, MPL-2.0-no-copyleft-exception, LGPL-3.0, LGPL-2.1

Large files files are truncated, but you can click here to view the full file

  1. /* $Id$ */
  2. /** @file
  3. * NAT - socket handling.
  4. */
  5. /*
  6. * Copyright (C) 2006-2012 Oracle Corporation
  7. *
  8. * This file is part of VirtualBox Open Source Edition (OSE), as
  9. * available from http://www.virtualbox.org. This file is free software;
  10. * you can redistribute it and/or modify it under the terms of the GNU
  11. * General Public License (GPL) as published by the Free Software
  12. * Foundation, in version 2 as it comes in the "COPYING" file of the
  13. * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
  14. * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
  15. */
  16. /*
  17. * This code is based on:
  18. *
  19. * Copyright (c) 1995 Danny Gasparovski.
  20. *
  21. * Please read the file COPYRIGHT for the
  22. * terms and conditions of the copyright.
  23. */
  24. #include <slirp.h>
  25. #include "ip_icmp.h"
  26. #include "main.h"
  27. #ifdef __sun__
  28. #include <sys/filio.h>
  29. #endif
  30. #include <VBox/vmm/pdmdrv.h>
  31. #if defined (RT_OS_WINDOWS)
  32. #include <iphlpapi.h>
  33. #include <icmpapi.h>
  34. #endif
  35. #ifdef VBOX_WITH_NAT_UDP_SOCKET_CLONE
  36. /**
  37. *
  38. */
  39. struct socket * soCloneUDPSocketWithForegnAddr(PNATState pData, bool fBindSocket, struct socket *pSo, uint32_t u32ForeignAddr)
  40. {
  41. struct socket *pNewSocket = NULL;
  42. LogFlowFunc(("Enter: fBindSocket:%RTbool, so:%R[natsock], u32ForeignAddr:%RTnaipv4\n", fBindSocket, pSo, u32ForeignAddr));
  43. pNewSocket = socreate();
  44. if (!pNewSocket)
  45. {
  46. LogFunc(("Can't create socket\n"));
  47. LogFlowFunc(("Leave: NULL\n"));
  48. return NULL;
  49. }
  50. if (fBindSocket)
  51. {
  52. if (udp_attach(pData, pNewSocket, 0) <= 0)
  53. {
  54. sofree(pData, pNewSocket);
  55. LogFunc(("Can't attach fresh created socket\n"));
  56. return NULL;
  57. }
  58. }
  59. else
  60. {
  61. pNewSocket->so_cloneOf = (struct socket *)pSo;
  62. pNewSocket->s = pSo->s;
  63. insque(pData, pNewSocket, &udb);
  64. }
  65. pNewSocket->so_laddr = pSo->so_laddr;
  66. pNewSocket->so_lport = pSo->so_lport;
  67. pNewSocket->so_faddr.s_addr = u32ForeignAddr;
  68. pNewSocket->so_fport = pSo->so_fport;
  69. pSo->so_cCloneCounter++;
  70. LogFlowFunc(("Leave: %R[natsock]\n", pNewSocket));
  71. return pNewSocket;
  72. }
  73. struct socket *soLookUpClonedUDPSocket(PNATState pData, const struct socket *pcSo, uint32_t u32ForeignAddress)
  74. {
  75. struct socket *pSoClone = NULL;
  76. LogFlowFunc(("Enter: pcSo:%R[natsock], u32ForeignAddress:%RTnaipv4\n", pcSo, u32ForeignAddress));
  77. for (pSoClone = udb.so_next; pSoClone != &udb; pSoClone = pSoClone->so_next)
  78. {
  79. if ( pSoClone->so_cloneOf
  80. && pSoClone->so_cloneOf == pcSo
  81. && pSoClone->so_lport == pcSo->so_lport
  82. && pSoClone->so_fport == pcSo->so_fport
  83. && pSoClone->so_laddr.s_addr == pcSo->so_laddr.s_addr
  84. && pSoClone->so_faddr.s_addr == u32ForeignAddress)
  85. goto done;
  86. }
  87. pSoClone = NULL;
  88. done:
  89. LogFlowFunc(("Leave: pSoClone: %R[natsock]\n", pSoClone));
  90. return pSoClone;
  91. }
  92. #endif
  93. #ifdef VBOX_WITH_NAT_SEND2HOME
  94. DECLINLINE(bool) slirpSend2Home(PNATState pData, struct socket *pSo, const void *pvBuf, uint32_t cbBuf, int iFlags)
  95. {
  96. int idxAddr;
  97. int ret = 0;
  98. bool fSendDone = false;
  99. LogFlowFunc(("Enter pSo:%R[natsock] pvBuf: %p, cbBuf: %d, iFlags: %d\n", pSo, pvBuf, cbBuf, iFlags));
  100. for (idxAddr = 0; idxAddr < pData->cInHomeAddressSize; ++idxAddr)
  101. {
  102. struct socket *pNewSocket = soCloneUDPSocketWithForegnAddr(pData, pSo, pData->pInSockAddrHomeAddress[idxAddr].sin_addr);
  103. AssertReturn((pNewSocket, false));
  104. pData->pInSockAddrHomeAddress[idxAddr].sin_port = pSo->so_fport;
  105. /* @todo: more verbose on errors,
  106. * @note: we shouldn't care if this send fail or not (we're in broadcast).
  107. */
  108. LogFunc(("send %d bytes to %RTnaipv4 from %R[natsock]\n", cbBuf, pData->pInSockAddrHomeAddress[idxAddr].sin_addr.s_addr, pNewSocket));
  109. ret = sendto(pNewSocket->s, pvBuf, cbBuf, iFlags, (struct sockaddr *)&pData->pInSockAddrHomeAddress[idxAddr], sizeof(struct sockaddr_in));
  110. if (ret < 0)
  111. LogFunc(("Failed to send %d bytes to %RTnaipv4\n", cbBuf, pData->pInSockAddrHomeAddress[idxAddr].sin_addr.s_addr));
  112. fSendDone |= ret > 0;
  113. }
  114. LogFlowFunc(("Leave %RTbool\n", fSendDone));
  115. return fSendDone;
  116. }
  117. #endif /* !VBOX_WITH_NAT_SEND2HOME */
  118. static void send_icmp_to_guest(PNATState, char *, size_t, const struct sockaddr_in *);
  119. #ifdef RT_OS_WINDOWS
  120. static void sorecvfrom_icmp_win(PNATState, struct socket *);
  121. #else /* RT_OS_WINDOWS */
  122. static void sorecvfrom_icmp_unix(PNATState, struct socket *);
  123. #endif /* !RT_OS_WINDOWS */
  124. void
  125. so_init()
  126. {
  127. }
  128. struct socket *
  129. solookup(struct socket *head, struct in_addr laddr,
  130. u_int lport, struct in_addr faddr, u_int fport)
  131. {
  132. struct socket *so;
  133. for (so = head->so_next; so != head; so = so->so_next)
  134. {
  135. if ( so->so_lport == lport
  136. && so->so_laddr.s_addr == laddr.s_addr
  137. && so->so_faddr.s_addr == faddr.s_addr
  138. && so->so_fport == fport)
  139. return so;
  140. }
  141. return (struct socket *)NULL;
  142. }
  143. /*
  144. * Create a new socket, initialise the fields
  145. * It is the responsibility of the caller to
  146. * insque() it into the correct linked-list
  147. */
  148. struct socket *
  149. socreate()
  150. {
  151. struct socket *so;
  152. so = (struct socket *)RTMemAllocZ(sizeof(struct socket));
  153. if (so)
  154. {
  155. so->so_state = SS_NOFDREF;
  156. so->s = -1;
  157. #if !defined(RT_OS_WINDOWS)
  158. so->so_poll_index = -1;
  159. #endif
  160. }
  161. return so;
  162. }
  163. /*
  164. * remque and free a socket, clobber cache
  165. */
  166. void
  167. sofree(PNATState pData, struct socket *so)
  168. {
  169. LogFlowFunc(("ENTER:%R[natsock]\n", so));
  170. /*
  171. * We should not remove socket when polling routine do the polling
  172. * instead we mark it for deletion.
  173. */
  174. if (so->fUnderPolling)
  175. {
  176. so->fShouldBeRemoved = 1;
  177. LogFlowFunc(("LEAVE:%R[natsock] postponed deletion\n", so));
  178. return;
  179. }
  180. /**
  181. * Check that we don't freeng socket with tcbcb
  182. */
  183. Assert(!sototcpcb(so));
  184. if (so == tcp_last_so)
  185. tcp_last_so = &tcb;
  186. else if (so == udp_last_so)
  187. udp_last_so = &udb;
  188. /* libalias notification */
  189. if (so->so_pvLnk)
  190. slirpDeleteLinkSocket(so->so_pvLnk);
  191. /* check if mbuf haven't been already freed */
  192. if (so->so_m != NULL)
  193. {
  194. m_freem(pData, so->so_m);
  195. so->so_m = NULL;
  196. }
  197. if (so->so_next && so->so_prev)
  198. {
  199. remque(pData, so); /* crashes if so is not in a queue */
  200. NSOCK_DEC();
  201. }
  202. RTMemFree(so);
  203. LogFlowFuncLeave();
  204. }
  205. /*
  206. * Read from so's socket into sb_snd, updating all relevant sbuf fields
  207. * NOTE: This will only be called if it is select()ed for reading, so
  208. * a read() of 0 (or less) means it's disconnected
  209. */
  210. #ifndef VBOX_WITH_SLIRP_BSD_SBUF
  211. int
  212. soread(PNATState pData, struct socket *so)
  213. {
  214. int n, nn, lss, total;
  215. struct sbuf *sb = &so->so_snd;
  216. size_t len = sb->sb_datalen - sb->sb_cc;
  217. struct iovec iov[2];
  218. int mss = so->so_tcpcb->t_maxseg;
  219. STAM_PROFILE_START(&pData->StatIOread, a);
  220. STAM_COUNTER_RESET(&pData->StatIORead_in_1);
  221. STAM_COUNTER_RESET(&pData->StatIORead_in_2);
  222. QSOCKET_LOCK(tcb);
  223. SOCKET_LOCK(so);
  224. QSOCKET_UNLOCK(tcb);
  225. LogFlow(("soread: so = %R[natsock]\n", so));
  226. Log2(("%s: so = %R[natsock] so->so_snd = %R[sbuf]\n", __PRETTY_FUNCTION__, so, sb));
  227. /*
  228. * No need to check if there's enough room to read.
  229. * soread wouldn't have been called if there weren't
  230. */
  231. len = sb->sb_datalen - sb->sb_cc;
  232. iov[0].iov_base = sb->sb_wptr;
  233. iov[1].iov_base = 0;
  234. iov[1].iov_len = 0;
  235. if (sb->sb_wptr < sb->sb_rptr)
  236. {
  237. iov[0].iov_len = sb->sb_rptr - sb->sb_wptr;
  238. /* Should never succeed, but... */
  239. if (iov[0].iov_len > len)
  240. iov[0].iov_len = len;
  241. if (iov[0].iov_len > mss)
  242. iov[0].iov_len -= iov[0].iov_len%mss;
  243. n = 1;
  244. }
  245. else
  246. {
  247. iov[0].iov_len = (sb->sb_data + sb->sb_datalen) - sb->sb_wptr;
  248. /* Should never succeed, but... */
  249. if (iov[0].iov_len > len)
  250. iov[0].iov_len = len;
  251. len -= iov[0].iov_len;
  252. if (len)
  253. {
  254. iov[1].iov_base = sb->sb_data;
  255. iov[1].iov_len = sb->sb_rptr - sb->sb_data;
  256. if (iov[1].iov_len > len)
  257. iov[1].iov_len = len;
  258. total = iov[0].iov_len + iov[1].iov_len;
  259. if (total > mss)
  260. {
  261. lss = total % mss;
  262. if (iov[1].iov_len > lss)
  263. {
  264. iov[1].iov_len -= lss;
  265. n = 2;
  266. }
  267. else
  268. {
  269. lss -= iov[1].iov_len;
  270. iov[0].iov_len -= lss;
  271. n = 1;
  272. }
  273. }
  274. else
  275. n = 2;
  276. }
  277. else
  278. {
  279. if (iov[0].iov_len > mss)
  280. iov[0].iov_len -= iov[0].iov_len%mss;
  281. n = 1;
  282. }
  283. }
  284. #ifdef HAVE_READV
  285. nn = readv(so->s, (struct iovec *)iov, n);
  286. #else
  287. nn = recv(so->s, iov[0].iov_base, iov[0].iov_len, (so->so_tcpcb->t_force? MSG_OOB:0));
  288. #endif
  289. Log2(("%s: read(1) nn = %d bytes\n", __PRETTY_FUNCTION__, nn));
  290. Log2(("%s: so = %R[natsock] so->so_snd = %R[sbuf]\n", __PRETTY_FUNCTION__, so, sb));
  291. if (nn <= 0)
  292. {
  293. /*
  294. * Special case for WSAEnumNetworkEvents: If we receive 0 bytes that
  295. * _could_ mean that the connection is closed. But we will receive an
  296. * FD_CLOSE event later if the connection was _really_ closed. With
  297. * www.youtube.com I see this very often. Closing the socket too early
  298. * would be dangerous.
  299. */
  300. int status;
  301. unsigned long pending = 0;
  302. status = ioctlsocket(so->s, FIONREAD, &pending);
  303. if (status < 0)
  304. Log(("NAT:%s: error in WSAIoctl: %d\n", __PRETTY_FUNCTION__, errno));
  305. if (nn == 0 && (pending != 0))
  306. {
  307. SOCKET_UNLOCK(so);
  308. STAM_PROFILE_STOP(&pData->StatIOread, a);
  309. return 0;
  310. }
  311. if ( nn < 0
  312. && soIgnorableErrorCode(errno))
  313. {
  314. SOCKET_UNLOCK(so);
  315. STAM_PROFILE_STOP(&pData->StatIOread, a);
  316. return 0;
  317. }
  318. else
  319. {
  320. int fUninitiolizedTemplate = 0;
  321. fUninitiolizedTemplate = RT_BOOL(( sototcpcb(so)
  322. && ( sototcpcb(so)->t_template.ti_src.s_addr == INADDR_ANY
  323. || sototcpcb(so)->t_template.ti_dst.s_addr == INADDR_ANY)));
  324. /* nn == 0 means peer has performed an orderly shutdown */
  325. Log2(("%s: disconnected, nn = %d, errno = %d (%s)\n",
  326. __PRETTY_FUNCTION__, nn, errno, strerror(errno)));
  327. sofcantrcvmore(so);
  328. if (!fUninitiolizedTemplate)
  329. tcp_sockclosed(pData, sototcpcb(so));
  330. else
  331. tcp_drop(pData, sototcpcb(so), errno);
  332. SOCKET_UNLOCK(so);
  333. STAM_PROFILE_STOP(&pData->StatIOread, a);
  334. return -1;
  335. }
  336. }
  337. STAM_STATS(
  338. if (n == 1)
  339. {
  340. STAM_COUNTER_INC(&pData->StatIORead_in_1);
  341. STAM_COUNTER_ADD(&pData->StatIORead_in_1_bytes, nn);
  342. }
  343. else
  344. {
  345. STAM_COUNTER_INC(&pData->StatIORead_in_2);
  346. STAM_COUNTER_ADD(&pData->StatIORead_in_2_1st_bytes, nn);
  347. }
  348. );
  349. #ifndef HAVE_READV
  350. /*
  351. * If there was no error, try and read the second time round
  352. * We read again if n = 2 (ie, there's another part of the buffer)
  353. * and we read as much as we could in the first read
  354. * We don't test for <= 0 this time, because there legitimately
  355. * might not be any more data (since the socket is non-blocking),
  356. * a close will be detected on next iteration.
  357. * A return of -1 wont (shouldn't) happen, since it didn't happen above
  358. */
  359. if (n == 2 && nn == iov[0].iov_len)
  360. {
  361. int ret;
  362. ret = recv(so->s, iov[1].iov_base, iov[1].iov_len, 0);
  363. if (ret > 0)
  364. nn += ret;
  365. STAM_STATS(
  366. if (ret > 0)
  367. {
  368. STAM_COUNTER_INC(&pData->StatIORead_in_2);
  369. STAM_COUNTER_ADD(&pData->StatIORead_in_2_2nd_bytes, ret);
  370. }
  371. );
  372. }
  373. Log2(("%s: read(2) nn = %d bytes\n", __PRETTY_FUNCTION__, nn));
  374. #endif
  375. /* Update fields */
  376. sb->sb_cc += nn;
  377. sb->sb_wptr += nn;
  378. Log2(("%s: update so_snd (readed nn = %d) %R[sbuf]\n", __PRETTY_FUNCTION__, nn, sb));
  379. if (sb->sb_wptr >= (sb->sb_data + sb->sb_datalen))
  380. {
  381. sb->sb_wptr -= sb->sb_datalen;
  382. Log2(("%s: alter sb_wptr so_snd = %R[sbuf]\n", __PRETTY_FUNCTION__, sb));
  383. }
  384. STAM_PROFILE_STOP(&pData->StatIOread, a);
  385. SOCKET_UNLOCK(so);
  386. return nn;
  387. }
  388. #else /* VBOX_WITH_SLIRP_BSD_SBUF */
  389. int
  390. soread(PNATState pData, struct socket *so)
  391. {
  392. int n;
  393. char *buf;
  394. struct sbuf *sb = &so->so_snd;
  395. size_t len = sbspace(sb);
  396. int mss = so->so_tcpcb->t_maxseg;
  397. STAM_PROFILE_START(&pData->StatIOread, a);
  398. STAM_COUNTER_RESET(&pData->StatIORead_in_1);
  399. STAM_COUNTER_RESET(&pData->StatIORead_in_2);
  400. QSOCKET_LOCK(tcb);
  401. SOCKET_LOCK(so);
  402. QSOCKET_UNLOCK(tcb);
  403. LogFlowFunc(("soread: so = %lx\n", (long)so));
  404. if (len > mss)
  405. len -= len % mss;
  406. buf = RTMemAlloc(len);
  407. if (buf == NULL)
  408. {
  409. Log(("NAT: can't alloc enough memory\n"));
  410. return -1;
  411. }
  412. n = recv(so->s, buf, len, (so->so_tcpcb->t_force? MSG_OOB:0));
  413. if (n <= 0)
  414. {
  415. /*
  416. * Special case for WSAEnumNetworkEvents: If we receive 0 bytes that
  417. * _could_ mean that the connection is closed. But we will receive an
  418. * FD_CLOSE event later if the connection was _really_ closed. With
  419. * www.youtube.com I see this very often. Closing the socket too early
  420. * would be dangerous.
  421. */
  422. int status;
  423. unsigned long pending = 0;
  424. status = ioctlsocket(so->s, FIONREAD, &pending);
  425. if (status < 0)
  426. Log(("NAT:error in WSAIoctl: %d\n", errno));
  427. if (n == 0 && (pending != 0))
  428. {
  429. SOCKET_UNLOCK(so);
  430. STAM_PROFILE_STOP(&pData->StatIOread, a);
  431. RTMemFree(buf);
  432. return 0;
  433. }
  434. if ( n < 0
  435. && soIgnorableErrorCode(errno))
  436. {
  437. SOCKET_UNLOCK(so);
  438. STAM_PROFILE_STOP(&pData->StatIOread, a);
  439. RTMemFree(buf);
  440. return 0;
  441. }
  442. else
  443. {
  444. Log2((" --- soread() disconnected, n = %d, errno = %d (%s)\n",
  445. n, errno, strerror(errno)));
  446. sofcantrcvmore(so);
  447. tcp_sockclosed(pData, sototcpcb(so));
  448. SOCKET_UNLOCK(so);
  449. STAM_PROFILE_STOP(&pData->StatIOread, a);
  450. RTMemFree(buf);
  451. return -1;
  452. }
  453. }
  454. sbuf_bcat(sb, buf, n);
  455. RTMemFree(buf);
  456. return n;
  457. }
  458. #endif
  459. /*
  460. * Get urgent data
  461. *
  462. * When the socket is created, we set it SO_OOBINLINE,
  463. * so when OOB data arrives, we soread() it and everything
  464. * in the send buffer is sent as urgent data
  465. */
  466. void
  467. sorecvoob(PNATState pData, struct socket *so)
  468. {
  469. struct tcpcb *tp = sototcpcb(so);
  470. ssize_t ret;
  471. LogFlowFunc(("sorecvoob: so = %R[natsock]\n", so));
  472. /*
  473. * We take a guess at how much urgent data has arrived.
  474. * In most situations, when urgent data arrives, the next
  475. * read() should get all the urgent data. This guess will
  476. * be wrong however if more data arrives just after the
  477. * urgent data, or the read() doesn't return all the
  478. * urgent data.
  479. */
  480. ret = soread(pData, so);
  481. if (RT_LIKELY(ret > 0))
  482. {
  483. tp->snd_up = tp->snd_una + SBUF_LEN(&so->so_snd);
  484. tp->t_force = 1;
  485. tcp_output(pData, tp);
  486. tp->t_force = 0;
  487. }
  488. }
  489. #ifndef VBOX_WITH_SLIRP_BSD_SBUF
  490. /*
  491. * Send urgent data
  492. * There's a lot duplicated code here, but...
  493. */
  494. int
  495. sosendoob(struct socket *so)
  496. {
  497. struct sbuf *sb = &so->so_rcv;
  498. char buff[2048]; /* XXX Shouldn't be sending more oob data than this */
  499. int n, len;
  500. LogFlowFunc(("sosendoob so = %R[natsock]\n", so));
  501. if (so->so_urgc > sizeof(buff))
  502. so->so_urgc = sizeof(buff); /* XXX */
  503. if (sb->sb_rptr < sb->sb_wptr)
  504. {
  505. /* We can send it directly */
  506. n = send(so->s, sb->sb_rptr, so->so_urgc, (MSG_OOB)); /* |MSG_DONTWAIT)); */
  507. so->so_urgc -= n;
  508. Log2((" --- sent %d bytes urgent data, %d urgent bytes left\n",
  509. n, so->so_urgc));
  510. }
  511. else
  512. {
  513. /*
  514. * Since there's no sendv or sendtov like writev,
  515. * we must copy all data to a linear buffer then
  516. * send it all
  517. */
  518. len = (sb->sb_data + sb->sb_datalen) - sb->sb_rptr;
  519. if (len > so->so_urgc)
  520. len = so->so_urgc;
  521. memcpy(buff, sb->sb_rptr, len);
  522. so->so_urgc -= len;
  523. if (so->so_urgc)
  524. {
  525. n = sb->sb_wptr - sb->sb_data;
  526. if (n > so->so_urgc)
  527. n = so->so_urgc;
  528. memcpy(buff + len, sb->sb_data, n);
  529. so->so_urgc -= n;
  530. len += n;
  531. }
  532. n = send(so->s, buff, len, (MSG_OOB)); /* |MSG_DONTWAIT)); */
  533. #ifdef DEBUG
  534. if (n != len)
  535. Log(("Didn't send all data urgently XXXXX\n"));
  536. #endif
  537. Log2((" ---2 sent %d bytes urgent data, %d urgent bytes left\n",
  538. n, so->so_urgc));
  539. }
  540. sb->sb_cc -= n;
  541. sb->sb_rptr += n;
  542. if (sb->sb_rptr >= (sb->sb_data + sb->sb_datalen))
  543. sb->sb_rptr -= sb->sb_datalen;
  544. return n;
  545. }
  546. /*
  547. * Write data from so_rcv to so's socket,
  548. * updating all sbuf field as necessary
  549. */
  550. int
  551. sowrite(PNATState pData, struct socket *so)
  552. {
  553. int n, nn;
  554. struct sbuf *sb = &so->so_rcv;
  555. size_t len = sb->sb_cc;
  556. struct iovec iov[2];
  557. STAM_PROFILE_START(&pData->StatIOwrite, a);
  558. STAM_COUNTER_RESET(&pData->StatIOWrite_in_1);
  559. STAM_COUNTER_RESET(&pData->StatIOWrite_in_1_bytes);
  560. STAM_COUNTER_RESET(&pData->StatIOWrite_in_2);
  561. STAM_COUNTER_RESET(&pData->StatIOWrite_in_2_1st_bytes);
  562. STAM_COUNTER_RESET(&pData->StatIOWrite_in_2_2nd_bytes);
  563. STAM_COUNTER_RESET(&pData->StatIOWrite_no_w);
  564. STAM_COUNTER_RESET(&pData->StatIOWrite_rest);
  565. STAM_COUNTER_RESET(&pData->StatIOWrite_rest_bytes);
  566. LogFlowFunc(("so = %R[natsock]\n", so));
  567. Log2(("%s: so = %R[natsock] so->so_rcv = %R[sbuf]\n", __PRETTY_FUNCTION__, so, sb));
  568. QSOCKET_LOCK(tcb);
  569. SOCKET_LOCK(so);
  570. QSOCKET_UNLOCK(tcb);
  571. if (so->so_urgc)
  572. {
  573. sosendoob(so);
  574. if (sb->sb_cc == 0)
  575. {
  576. SOCKET_UNLOCK(so);
  577. STAM_PROFILE_STOP(&pData->StatIOwrite, a);
  578. return 0;
  579. }
  580. }
  581. /*
  582. * No need to check if there's something to write,
  583. * sowrite wouldn't have been called otherwise
  584. */
  585. len = sb->sb_cc;
  586. iov[0].iov_base = sb->sb_rptr;
  587. iov[1].iov_base = 0;
  588. iov[1].iov_len = 0;
  589. if (sb->sb_rptr < sb->sb_wptr)
  590. {
  591. iov[0].iov_len = sb->sb_wptr - sb->sb_rptr;
  592. /* Should never succeed, but... */
  593. if (iov[0].iov_len > len)
  594. iov[0].iov_len = len;
  595. n = 1;
  596. }
  597. else
  598. {
  599. iov[0].iov_len = (sb->sb_data + sb->sb_datalen) - sb->sb_rptr;
  600. if (iov[0].iov_len > len)
  601. iov[0].iov_len = len;
  602. len -= iov[0].iov_len;
  603. if (len)
  604. {
  605. iov[1].iov_base = sb->sb_data;
  606. iov[1].iov_len = sb->sb_wptr - sb->sb_data;
  607. if (iov[1].iov_len > len)
  608. iov[1].iov_len = len;
  609. n = 2;
  610. }
  611. else
  612. n = 1;
  613. }
  614. STAM_STATS({
  615. if (n == 1)
  616. {
  617. STAM_COUNTER_INC(&pData->StatIOWrite_in_1);
  618. STAM_COUNTER_ADD(&pData->StatIOWrite_in_1_bytes, iov[0].iov_len);
  619. }
  620. else
  621. {
  622. STAM_COUNTER_INC(&pData->StatIOWrite_in_2);
  623. STAM_COUNTER_ADD(&pData->StatIOWrite_in_2_1st_bytes, iov[0].iov_len);
  624. STAM_COUNTER_ADD(&pData->StatIOWrite_in_2_2nd_bytes, iov[1].iov_len);
  625. }
  626. });
  627. /* Check if there's urgent data to send, and if so, send it */
  628. #ifdef HAVE_READV
  629. nn = writev(so->s, (const struct iovec *)iov, n);
  630. #else
  631. nn = send(so->s, iov[0].iov_base, iov[0].iov_len, 0);
  632. #endif
  633. Log2(("%s: wrote(1) nn = %d bytes\n", __PRETTY_FUNCTION__, nn));
  634. /* This should never happen, but people tell me it does *shrug* */
  635. if ( nn < 0
  636. && soIgnorableErrorCode(errno))
  637. {
  638. SOCKET_UNLOCK(so);
  639. STAM_PROFILE_STOP(&pData->StatIOwrite, a);
  640. return 0;
  641. }
  642. if (nn < 0 || (nn == 0 && iov[0].iov_len > 0))
  643. {
  644. Log2(("%s: disconnected, so->so_state = %x, errno = %d\n",
  645. __PRETTY_FUNCTION__, so->so_state, errno));
  646. sofcantsendmore(so);
  647. tcp_sockclosed(pData, sototcpcb(so));
  648. SOCKET_UNLOCK(so);
  649. STAM_PROFILE_STOP(&pData->StatIOwrite, a);
  650. return -1;
  651. }
  652. #ifndef HAVE_READV
  653. if (n == 2 && nn == iov[0].iov_len)
  654. {
  655. int ret;
  656. ret = send(so->s, iov[1].iov_base, iov[1].iov_len, 0);
  657. if (ret > 0)
  658. nn += ret;
  659. STAM_STATS({
  660. if (ret > 0 && ret != iov[1].iov_len)
  661. {
  662. STAM_COUNTER_INC(&pData->StatIOWrite_rest);
  663. STAM_COUNTER_ADD(&pData->StatIOWrite_rest_bytes, (iov[1].iov_len - ret));
  664. }
  665. });
  666. }
  667. Log2(("%s: wrote(2) nn = %d bytes\n", __PRETTY_FUNCTION__, nn));
  668. #endif
  669. /* Update sbuf */
  670. sb->sb_cc -= nn;
  671. sb->sb_rptr += nn;
  672. Log2(("%s: update so_rcv (written nn = %d) %R[sbuf]\n", __PRETTY_FUNCTION__, nn, sb));
  673. if (sb->sb_rptr >= (sb->sb_data + sb->sb_datalen))
  674. {
  675. sb->sb_rptr -= sb->sb_datalen;
  676. Log2(("%s: alter sb_rptr of so_rcv %R[sbuf]\n", __PRETTY_FUNCTION__, sb));
  677. }
  678. /*
  679. * If in DRAIN mode, and there's no more data, set
  680. * it CANTSENDMORE
  681. */
  682. if ((so->so_state & SS_FWDRAIN) && sb->sb_cc == 0)
  683. sofcantsendmore(so);
  684. SOCKET_UNLOCK(so);
  685. STAM_PROFILE_STOP(&pData->StatIOwrite, a);
  686. return nn;
  687. }
  688. #else /* VBOX_WITH_SLIRP_BSD_SBUF */
  689. static int
  690. do_sosend(struct socket *so, int fUrg)
  691. {
  692. struct sbuf *sb = &so->so_rcv;
  693. int n, len;
  694. LogFlowFunc(("sosendoob: so = %R[natsock]\n", so));
  695. len = sbuf_len(sb);
  696. n = send(so->s, sbuf_data(sb), len, (fUrg ? MSG_OOB : 0));
  697. if (n < 0)
  698. Log(("NAT: Can't sent sbuf via socket.\n"));
  699. if (fUrg)
  700. so->so_urgc -= n;
  701. if (n > 0 && n < len)
  702. {
  703. char *ptr;
  704. char *buff;
  705. buff = RTMemAlloc(len);
  706. if (buff == NULL)
  707. {
  708. Log(("NAT: No space to allocate temporal buffer\n"));
  709. return -1;
  710. }
  711. ptr = sbuf_data(sb);
  712. memcpy(buff, &ptr[n], len - n);
  713. sbuf_bcpy(sb, buff, len - n);
  714. RTMemFree(buff);
  715. return n;
  716. }
  717. sbuf_clear(sb);
  718. return n;
  719. }
  720. int
  721. sosendoob(struct socket *so)
  722. {
  723. return do_sosend(so, 1);
  724. }
  725. /*
  726. * Write data from so_rcv to so's socket,
  727. * updating all sbuf field as necessary
  728. */
  729. int
  730. sowrite(PNATState pData, struct socket *so)
  731. {
  732. return do_sosend(so, 0);
  733. }
  734. #endif
  735. /*
  736. * recvfrom() a UDP socket
  737. */
  738. void
  739. sorecvfrom(PNATState pData, struct socket *so)
  740. {
  741. ssize_t ret = 0;
  742. struct sockaddr_in addr;
  743. socklen_t addrlen = sizeof(struct sockaddr_in);
  744. LogFlowFunc(("sorecvfrom: so = %lx\n", (long)so));
  745. if (so->so_type == IPPROTO_ICMP)
  746. {
  747. /* This is a "ping" reply */
  748. #ifdef RT_OS_WINDOWS
  749. sorecvfrom_icmp_win(pData, so);
  750. #else /* RT_OS_WINDOWS */
  751. sorecvfrom_icmp_unix(pData, so);
  752. #endif /* !RT_OS_WINDOWS */
  753. udp_detach(pData, so);
  754. }
  755. else
  756. {
  757. /* A "normal" UDP packet */
  758. struct mbuf *m;
  759. ssize_t len;
  760. u_long n = 0;
  761. int rc = 0;
  762. static int signalled = 0;
  763. char *pchBuffer = NULL;
  764. bool fWithTemporalBuffer = false;
  765. QSOCKET_LOCK(udb);
  766. SOCKET_LOCK(so);
  767. QSOCKET_UNLOCK(udb);
  768. /*How many data has been received ?*/
  769. /*
  770. * 1. calculate how much we can read
  771. * 2. read as much as possible
  772. * 3. attach buffer to allocated header mbuf
  773. */
  774. rc = ioctlsocket(so->s, FIONREAD, &n);
  775. if (rc == -1)
  776. {
  777. if ( soIgnorableErrorCode(errno)
  778. || errno == ENOTCONN)
  779. return;
  780. else if (signalled == 0)
  781. {
  782. LogRel(("NAT: can't fetch amount of bytes on socket %R[natsock], so message will be truncated.\n", so));
  783. signalled = 1;
  784. }
  785. return;
  786. }
  787. len = sizeof(struct udpiphdr);
  788. m = m_getjcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR, slirp_size(pData));
  789. if (m == NULL)
  790. return;
  791. len += n;
  792. m->m_data += ETH_HLEN;
  793. m->m_pkthdr.header = mtod(m, void *);
  794. m->m_data += sizeof(struct udpiphdr);
  795. pchBuffer = mtod(m, char *);
  796. fWithTemporalBuffer = false;
  797. /*
  798. * Even if amounts of bytes on socket is greater than MTU value
  799. * Slirp will able fragment it, but we won't create temporal location
  800. * here.
  801. */
  802. if (n > (slirp_size(pData) - sizeof(struct udpiphdr)))
  803. {
  804. pchBuffer = RTMemAlloc((n) * sizeof(char));
  805. if (!pchBuffer)
  806. {
  807. m_freem(pData, m);
  808. return;
  809. }
  810. fWithTemporalBuffer = true;
  811. }
  812. ret = recvfrom(so->s, pchBuffer, n, 0,
  813. (struct sockaddr *)&addr, &addrlen);
  814. if (fWithTemporalBuffer)
  815. {
  816. if (ret > 0)
  817. {
  818. m_copyback(pData, m, 0, ret, pchBuffer);
  819. /*
  820. * If we've met comporison below our size prediction was failed
  821. * it's not fatal just we've allocated for nothing. (@todo add counter here
  822. * to calculate how rare we here)
  823. */
  824. if(ret < slirp_size(pData) && !m->m_next)
  825. Log(("NAT:udp: Expected size(%d) lesser than real(%d) and less minimal mbuf size(%d)\n",
  826. n, ret, slirp_size(pData)));
  827. }
  828. /* we're freeing buffer anyway */
  829. RTMemFree(pchBuffer);
  830. }
  831. else
  832. m->m_len = ret;
  833. if (ret < 0)
  834. {
  835. u_char code = ICMP_UNREACH_PORT;
  836. if (errno == EHOSTUNREACH)
  837. code = ICMP_UNREACH_HOST;
  838. else if (errno == ENETUNREACH)
  839. code = ICMP_UNREACH_NET;
  840. m_freem(pData, m);
  841. if ( soIgnorableErrorCode(errno)
  842. || errno == ENOTCONN)
  843. {
  844. return;
  845. }
  846. Log2((" rx error, tx icmp ICMP_UNREACH:%i\n", code));
  847. icmp_error(pData, so->so_m, ICMP_UNREACH, code, 0, strerror(errno));
  848. so->so_m = NULL;
  849. }
  850. else
  851. {
  852. Assert((m_length(m,NULL) == ret));
  853. /*
  854. * Hack: domain name lookup will be used the most for UDP,
  855. * and since they'll only be used once there's no need
  856. * for the 4 minute (or whatever) timeout... So we time them
  857. * out much quicker (10 seconds for now...)
  858. */
  859. if (so->so_expire)
  860. {
  861. if (so->so_fport != RT_H2N_U16_C(53))
  862. so->so_expire = curtime + SO_EXPIRE;
  863. }
  864. /*
  865. * last argument should be changed if Slirp will inject IP attributes
  866. * Note: Here we can't check if dnsproxy's sent initial request
  867. */
  868. if ( pData->fUseDnsProxy
  869. && so->so_fport == RT_H2N_U16_C(53))
  870. dnsproxy_answer(pData, so, m);
  871. #if 0
  872. if (m->m_len == len)
  873. {
  874. m_inc(m, MINCSIZE);
  875. m->m_len = 0;
  876. }
  877. #endif
  878. /* packets definetly will be fragmented, could confuse receiver peer. */
  879. if (m_length(m, NULL) > if_mtu)
  880. m->m_flags |= M_SKIP_FIREWALL;
  881. /*
  882. * If this packet was destined for CTL_ADDR,
  883. * make it look like that's where it came from, done by udp_output
  884. */
  885. udp_output(pData, so, m, &addr);
  886. SOCKET_UNLOCK(so);
  887. } /* rx error */
  888. } /* if ping packet */
  889. }
  890. /*
  891. * sendto() a socket
  892. */
  893. int
  894. sosendto(PNATState pData, struct socket *so, struct mbuf *m)
  895. {
  896. int ret;
  897. struct sockaddr_in *paddr;
  898. struct sockaddr addr;
  899. #if 0
  900. struct sockaddr_in host_addr;
  901. #endif
  902. caddr_t buf = 0;
  903. int mlen;
  904. LogFlowFunc(("sosendto: so = %R[natsock], m = %lx\n", so, (long)m));
  905. memset(&addr, 0, sizeof(struct sockaddr));
  906. #ifdef RT_OS_DARWIN
  907. addr.sa_len = sizeof(struct sockaddr_in);
  908. #endif
  909. paddr = (struct sockaddr_in *)&addr;
  910. paddr->sin_family = AF_INET;
  911. if ((so->so_faddr.s_addr & RT_H2N_U32(pData->netmask)) == pData->special_addr.s_addr)
  912. {
  913. /* It's an alias */
  914. uint32_t last_byte = RT_N2H_U32(so->so_faddr.s_addr) & ~pData->netmask;
  915. switch(last_byte)
  916. {
  917. #if 0
  918. /* handle this case at 'default:' */
  919. case CTL_BROADCAST:
  920. addr.sin_addr.s_addr = INADDR_BROADCAST;
  921. /* Send the packet to host to fully emulate broadcast */
  922. /** @todo r=klaus: on Linux host this causes the host to receive
  923. * the packet twice for some reason. And I cannot find any place
  924. * in the man pages which states that sending a broadcast does not
  925. * reach the host itself. */
  926. host_addr.sin_family = AF_INET;
  927. host_addr.sin_port = so->so_fport;
  928. host_addr.sin_addr = our_addr;
  929. sendto(so->s, m->m_data, m->m_len, 0,
  930. (struct sockaddr *)&host_addr, sizeof (struct sockaddr));
  931. break;
  932. #endif
  933. case CTL_DNS:
  934. case CTL_ALIAS:
  935. default:
  936. if (last_byte == ~pData->netmask)
  937. paddr->sin_addr.s_addr = INADDR_BROADCAST;
  938. else
  939. paddr->sin_addr = loopback_addr;
  940. break;
  941. }
  942. }
  943. else
  944. paddr->sin_addr = so->so_faddr;
  945. paddr->sin_port = so->so_fport;
  946. Log2((" sendto()ing, addr.sin_port=%d, addr.sin_addr.s_addr=%.16s\n",
  947. RT_N2H_U16(paddr->sin_port), inet_ntoa(paddr->sin_addr)));
  948. /* Don't care what port we get */
  949. /*
  950. * > nmap -sV -T4 -O -A -v -PU3483 255.255.255.255
  951. * generates bodyless messages, annoying memmory management system.
  952. */
  953. mlen = m_length(m, NULL);
  954. if (mlen > 0)
  955. {
  956. buf = RTMemAlloc(mlen);
  957. if (buf == NULL)
  958. {
  959. return -1;
  960. }
  961. m_copydata(m, 0, mlen, buf);
  962. }
  963. ret = sendto(so->s, buf, mlen, 0,
  964. (struct sockaddr *)&addr, sizeof (struct sockaddr));
  965. #ifdef VBOX_WITH_NAT_SEND2HOME
  966. if (slirpIsWideCasting(pData, so->so_faddr.s_addr))
  967. {
  968. slirpSend2Home(pData, so, buf, mlen, 0);
  969. }
  970. #endif
  971. if (buf)
  972. RTMemFree(buf);
  973. if (ret < 0)
  974. {
  975. Log2(("UDP: sendto fails (%s)\n", strerror(errno)));
  976. return -1;
  977. }
  978. /*
  979. * Kill the socket if there's no reply in 4 minutes,
  980. * but only if it's an expirable socket
  981. */
  982. if (so->so_expire)
  983. so->so_expire = curtime + SO_EXPIRE;
  984. so->so_state = SS_ISFCONNECTED; /* So that it gets select()ed */
  985. return 0;
  986. }
  987. /*
  988. * XXX This should really be tcp_listen
  989. */
  990. struct socket *
  991. solisten(PNATState pData, u_int32_t bind_addr, u_int port, u_int32_t laddr, u_int lport, int flags)
  992. {
  993. struct sockaddr_in addr;
  994. struct socket *so;
  995. socklen_t addrlen = sizeof(addr);
  996. int s, opt = 1;
  997. int status;
  998. LogFlowFunc(("solisten: port = %d, laddr = %x, lport = %d, flags = %x\n", port, laddr, lport, flags));
  999. if ((so = socreate()) == NULL)
  1000. {
  1001. /* RTMemFree(so); Not sofree() ??? free(NULL) == NOP */
  1002. return NULL;
  1003. }
  1004. /* Don't tcp_attach... we don't need so_snd nor so_rcv */
  1005. if ((so->so_tcpcb = tcp_newtcpcb(pData, so)) == NULL)
  1006. {
  1007. RTMemFree(so);
  1008. return NULL;
  1009. }
  1010. SOCKET_LOCK_CREATE(so);
  1011. SOCKET_LOCK(so);
  1012. QSOCKET_LOCK(tcb);
  1013. insque(pData, so,&tcb);
  1014. NSOCK_INC();
  1015. QSOCKET_UNLOCK(tcb);
  1016. /*
  1017. * SS_FACCEPTONCE sockets must time out.
  1018. */
  1019. if (flags & SS_FACCEPTONCE)
  1020. so->so_tcpcb->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT*2;
  1021. so->so_state = (SS_FACCEPTCONN|flags);
  1022. so->so_lport = lport; /* Kept in network format */
  1023. so->so_laddr.s_addr = laddr; /* Ditto */
  1024. memset(&addr, 0, sizeof(addr));
  1025. #ifdef RT_OS_DARWIN
  1026. addr.sin_len = sizeof(addr);
  1027. #endif
  1028. addr.sin_family = AF_INET;
  1029. addr.sin_addr.s_addr = bind_addr;
  1030. addr.sin_port = port;
  1031. /**
  1032. * changing listen(,1->SOMAXCONN) shouldn't be harmful for NAT's TCP/IP stack,
  1033. * kernel will choose the optimal value for requests queue length.
  1034. * @note: MSDN recommends low (2-4) values for bluetooth networking devices.
  1035. */
  1036. if ( ((s = socket(AF_INET, SOCK_STREAM, 0)) < 0)
  1037. || (setsockopt(s, SOL_SOCKET, SO_REUSEADDR,(char *)&opt, sizeof(int)) < 0)
  1038. || (bind(s,(struct sockaddr *)&addr, sizeof(addr)) < 0)
  1039. || (listen(s, pData->soMaxConn) < 0))
  1040. {
  1041. #ifdef RT_OS_WINDOWS
  1042. int tmperrno = WSAGetLastError(); /* Don't clobber the real reason we failed */
  1043. closesocket(s);
  1044. QSOCKET_LOCK(tcb);
  1045. sofree(pData, so);
  1046. QSOCKET_UNLOCK(tcb);
  1047. /* Restore the real errno */
  1048. WSASetLastError(tmperrno);
  1049. #else
  1050. int tmperrno = errno; /* Don't clobber the real reason we failed */
  1051. close(s);
  1052. if (sototcpcb(so))
  1053. tcp_close(pData, sototcpcb(so));
  1054. else
  1055. sofree(pData, so);
  1056. /* Restore the real errno */
  1057. errno = tmperrno;
  1058. #endif
  1059. return NULL;
  1060. }
  1061. fd_nonblock(s);
  1062. setsockopt(s, SOL_SOCKET, SO_OOBINLINE,(char *)&opt, sizeof(int));
  1063. getsockname(s,(struct sockaddr *)&addr,&addrlen);
  1064. so->so_fport = addr.sin_port;
  1065. /* set socket buffers */
  1066. opt = pData->socket_rcv;
  1067. status = setsockopt(s, SOL_SOCKET, SO_RCVBUF, (char *)&opt, sizeof(int));
  1068. if (status < 0)
  1069. {
  1070. LogRel(("NAT: Error(%d) while setting RCV capacity to (%d)\n", errno, opt));
  1071. goto no_sockopt;
  1072. }
  1073. opt = pData->socket_snd;
  1074. status = setsockopt(s, SOL_SOCKET, SO_SNDBUF, (char *)&opt, sizeof(int));
  1075. if (status < 0)
  1076. {
  1077. LogRel(("NAT: Error(%d) while setting SND capacity to (%d)\n", errno, opt));
  1078. goto no_sockopt;
  1079. }
  1080. no_sockopt:
  1081. if (addr.sin_addr.s_addr == 0 || addr.sin_addr.s_addr == loopback_addr.s_addr)
  1082. so->so_faddr = alias_addr;
  1083. else
  1084. so->so_faddr = addr.sin_addr;
  1085. so->s = s;
  1086. SOCKET_UNLOCK(so);
  1087. return so;
  1088. }
  1089. /*
  1090. * Data is available in so_rcv
  1091. * Just write() the data to the socket
  1092. * XXX not yet...
  1093. * @todo do we really need this function, what it's intended to do?
  1094. */
  1095. void
  1096. sorwakeup(struct socket *so)
  1097. {
  1098. NOREF(so);
  1099. #if 0
  1100. sowrite(so);
  1101. FD_CLR(so->s,&writefds);
  1102. #endif
  1103. }
  1104. /*
  1105. * Data has been freed in so_snd
  1106. * We have room for a read() if we want to
  1107. * For now, don't read, it'll be done in the main loop
  1108. */
  1109. void
  1110. sowwakeup(struct socket *so)
  1111. {
  1112. NOREF(so);
  1113. }
  1114. /*
  1115. * Various session state calls
  1116. * XXX Should be #define's
  1117. * The socket state stuff needs work, these often get call 2 or 3
  1118. * times each when only 1 was needed
  1119. */
  1120. void
  1121. soisfconnecting(struct socket *so)
  1122. {
  1123. so->so_state &= ~(SS_NOFDREF|SS_ISFCONNECTED|SS_FCANTRCVMORE|
  1124. SS_FCANTSENDMORE|SS_FWDRAIN);
  1125. so->so_state |= SS_ISFCONNECTING; /* Clobber other states */
  1126. }
  1127. void
  1128. soisfconnected(struct socket *so)
  1129. {
  1130. LogFlowFunc(("ENTER: so:%R[natsock]\n", so));
  1131. so->so_state &= ~(SS_ISFCONNECTING|SS_FWDRAIN|SS_NOFDREF);
  1132. so->so_state |= SS_ISFCONNECTED; /* Clobber other states */
  1133. LogFlowFunc(("LEAVE: so:%R[natsock]\n", so));
  1134. }
  1135. void
  1136. sofcantrcvmore(struct socket *so)
  1137. {
  1138. LogFlowFunc(("ENTER: so:%R[natsock]\n", so));
  1139. if ((so->so_state & SS_NOFDREF) == 0)
  1140. {
  1141. shutdown(so->s, 0);
  1142. }
  1143. so->so_state &= ~(SS_ISFCONNECTING);
  1144. if (so->so_state & SS_FCANTSENDMORE)
  1145. so->so_state = SS_NOFDREF; /* Don't select it */
  1146. /* XXX close() here as well? */
  1147. else
  1148. so->so_state |= SS_FCANTRCVMORE;
  1149. LogFlowFuncLeave();
  1150. }
  1151. void
  1152. sofcantsendmore(struct socket *so)
  1153. {
  1154. LogFlowFunc(("ENTER: so:%R[natsock]\n", so));
  1155. if ((so->so_state & SS_NOFDREF) == 0)
  1156. shutdown(so->s, 1); /* send FIN to fhost */
  1157. so->so_state &= ~(SS_ISFCONNECTING);
  1158. if (so->so_state & SS_FCANTRCVMORE)
  1159. so->so_state = SS_NOFDREF; /* as above */
  1160. else
  1161. so->so_state |= SS_FCANTSENDMORE;
  1162. LogFlowFuncLeave();
  1163. }
  1164. void
  1165. soisfdisconnected(struct socket *so)
  1166. {
  1167. NOREF(so);
  1168. #if 0
  1169. so->so_state &= ~(SS_ISFCONNECTING|SS_ISFCONNECTED);
  1170. close(so->s);
  1171. so->so_state = SS_ISFDISCONNECTED;
  1172. /*
  1173. * XXX Do nothing ... ?
  1174. */
  1175. #endif
  1176. }
  1177. /*
  1178. * Set write drain mode
  1179. * Set CANTSENDMORE once all data has been write()n
  1180. */
  1181. void
  1182. sofwdrain(struct socket *so)
  1183. {
  1184. if (SBUF_LEN(&so->so_rcv))
  1185. so->so_state |= SS_FWDRAIN;
  1186. else
  1187. sofcantsendmore(so);
  1188. }
  1189. static void
  1190. send_icmp_to_guest(PNATState pData, char *buff, size_t len, const struct sockaddr_in *addr)
  1191. {
  1192. struct ip *ip;
  1193. uint32_t dst, src;
  1194. char ip_copy[256];
  1195. struct icmp *icp;
  1196. int old_ip_len = 0;
  1197. int hlen, original_hlen = 0;
  1198. struct mbuf *m;
  1199. struct icmp_msg *icm;
  1200. uint8_t proto;
  1201. int type = 0;
  1202. ip = (struct ip *)buff;
  1203. /* Fix ip->ip_len to contain the total packet length including the header
  1204. * in _host_ byte order for all OSes. On Darwin, that value already is in
  1205. * host byte order. Solaris and Darwin report only the payload. */
  1206. #ifndef RT_OS_DARWIN
  1207. ip->ip_len = RT_N2H_U16(ip->ip_len);
  1208. #endif
  1209. hlen = (ip->ip_hl << 2);
  1210. #if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
  1211. ip->ip_len += hlen;
  1212. #endif
  1213. if (ip->ip_len < hlen + ICMP_MINLEN)
  1214. {
  1215. Log(("send_icmp_to_guest: ICMP header is too small to understand which type/subtype of the datagram\n"));
  1216. return;
  1217. }
  1218. icp = (struct icmp *)((char *)ip + hlen);
  1219. Log(("ICMP:received msg(t:%d, c:%d)\n", icp->icmp_type, icp->icmp_code));
  1220. if ( icp->icmp_type != ICMP_ECHOREPLY
  1221. && icp->icmp_type != ICMP_TIMXCEED
  1222. && icp->icmp_type != ICMP_UNREACH)
  1223. {
  1224. return;
  1225. }
  1226. /*
  1227. * ICMP_ECHOREPLY, ICMP_TIMXCEED, ICMP_UNREACH minimal header size is
  1228. * ICMP_ECHOREPLY assuming data 0
  1229. * icmp_{type(8), code(8), cksum(16),identifier(16),seqnum(16)}
  1230. */
  1231. if (ip->ip_len < hlen + 8)
  1232. {
  1233. Log(("send_icmp_to_guest: NAT accept ICMP_{ECHOREPLY, TIMXCEED, UNREACH} the minimum size is 64 (see rfc792)\n"));
  1234. return;
  1235. }
  1236. type = icp->icmp_type;
  1237. if ( type == ICMP_TIMXCEED
  1238. || type == ICMP_UNREACH)
  1239. {
  1240. /*
  1241. * ICMP_TIMXCEED, ICMP_UNREACH minimal header size is
  1242. * icmp_{type(8), code(8), cksum(16),unused(32)} + IP header + 64 bit of original datagram
  1243. */
  1244. if (ip->ip_len < hlen + 2*8 + sizeof(struct ip))
  1245. {
  1246. Log(("send_icmp_to_guest: NAT accept ICMP_{TIMXCEED, UNREACH} the minimum size of ipheader + 64 bit of data (see rfc792)\n"));
  1247. return;
  1248. }
  1249. ip = &icp->icmp_ip;
  1250. }
  1251. icm = icmp_find_original_mbuf(pData, ip);
  1252. if (icm == NULL)
  1253. {
  1254. Log(("NAT: Can't find the corresponding packet for the received ICMP\n"));
  1255. return;
  1256. }
  1257. m = icm->im_m;
  1258. if (!m)
  1259. {
  1260. LogFunc(("%R[natsock] hasn't stored it's mbuf on sent\n", icm->im_so));
  1261. LIST_REMOVE(icm, im_list);
  1262. RTMemFree(icm);
  1263. return;
  1264. }
  1265. src = addr->sin_addr.s_addr;
  1266. if (type == ICMP_ECHOREPLY)
  1267. {
  1268. struct ip *ip0 = mtod(m, struct ip *);
  1269. struct icmp *icp0 = (struct icmp *)((char *)ip0 + (ip0->ip_hl << 2));
  1270. if (icp0->icmp_type != ICMP_ECHO)
  1271. {
  1272. Log(("NAT: we haven't found echo for this reply\n"));
  1273. return;
  1274. }
  1275. /*
  1276. * while combining buffer to send (see ip_icmp.c) we control ICMP header only,
  1277. * IP header combined by OS network stack, our local copy of IP header contians values
  1278. * in host byte order so no byte order conversion is required. IP headers fields are converting
  1279. * in ip_output0 routine only.
  1280. */
  1281. if ( (ip->ip_len - hlen)
  1282. != (ip0->ip_len - (ip0->ip_hl << 2)))
  1283. {
  1284. Log(("NAT: ECHO(%d) lenght doesn't match ECHOREPLY(%d)\n",
  1285. (ip->ip_len - hlen), (ip0->ip_len - (ip0->ip_hl << 2))));
  1286. return;
  1287. }
  1288. }
  1289. /* ip points on origianal ip header */
  1290. ip = mtod(m, struct ip *);
  1291. proto = ip->ip_p;
  1292. /* Now ip is pointing on header we've sent from guest */
  1293. if ( icp->icmp_type == ICMP_TIMXCEED
  1294. || icp->icmp_type == ICMP_UNREACH)
  1295. {
  1296. old_ip_len = (ip->ip_hl << 2) + 64;
  1297. if (old_ip_len > sizeof(ip_copy))
  1298. old_ip_len = sizeof(ip_copy);
  1299. memcpy(ip_copy, ip, old_ip_len);
  1300. }
  1301. /* source address from original IP packet*/
  1302. dst = ip->ip_src.s_addr;
  1303. /* overide ther tail of old packet */
  1304. ip = mtod(m, struct ip *); /* ip is from mbuf we've overrided */
  1305. original_hlen = ip->ip_hl << 2;
  1306. /* saves original ip header and options */
  1307. m_copyback(pData, m, original_hlen, len - hlen, buff + hlen);
  1308. ip->ip_len = m_length(m, NULL);
  1309. ip->ip_p = IPPROTO_ICMP; /* the original package could be whatever, but we're response via ICMP*/
  1310. icp = (struct icmp *)((char *)ip + (ip->ip_hl << 2));
  1311. type = icp->icmp_type;
  1312. if ( type == ICMP_TIMXCEED
  1313. || type == ICMP_UNREACH)
  1314. {
  1315. /* according RFC 793 error messages required copy of initial IP header + 64 bit */
  1316. memcpy(&icp->icmp_ip, ip_copy, old_ip_len);
  1317. ip->ip_tos = ((ip->ip_tos & 0x1E) | 0xC0); /* high priority for errors */
  1318. }
  1319. ip->ip_src.s_addr = src;
  1320. ip->ip_dst.s_addr = dst;
  1321. icmp_reflect(pData, m);
  1322. LIST_REMOVE(icm, im_list);
  1323. pData->cIcmpCacheSize--;
  1324. /* Don't call m_free here*/
  1325. if ( type == ICMP_TIMXCEED
  1326. || type == ICMP_UNREACH)
  1327. {
  1328. icm->im_so->so_m = NULL;
  1329. switch (proto)
  1330. {
  1331. case IPPROTO_UDP:
  1332. /*XXX: so->so_m already freed so we shouldn't call sofree */
  1333. udp_detach(pData, icm->im_so);
  1334. break;
  1335. case IPPROTO_TCP:
  1336. /*close tcp should be here */
  1337. break;
  1338. default:
  1339. /* do nothing */
  1340. break;
  1341. }
  1342. }
  1343. RTMemFree(icm);
  1344. }
  1345. #ifdef RT_OS_WINDOWS
  1346. static void
  1347. sorecvfrom_icmp_win(PNATState pData, struct socket *so)
  1348. {
  1349. int len;
  1350. int i;
  1351. struct ip *ip;
  1352. struct mbuf *m;
  1353. struct icmp *icp;
  1354. struct icmp_msg *icm;
  1355. struct ip *ip_broken; /* ICMP returns header + 64 bit of packet */
  1356. uint32_t src;
  1357. ICMP_ECHO_REPLY *icr;
  1358. int hlen = 0;
  1359. int nbytes = 0;
  1360. u_char code = ~0;
  1361. int out_len;
  1362. int size;
  1363. len = pData->pfIcmpParseReplies(pData->pvIcmpBuffer, pData->szIcmpBuffer);
  1364. if (len < 0)
  1365. {
  1366. LogRel(("NAT: Error (%d) occurred on ICMP receiving\n", GetLastError()));
  1367. return;
  1368. }
  1369. if (len == 0)
  1370. return; /* no error */
  1371. icr = (ICMP_ECHO_REPLY *)pData->pvIcmpBuffer;
  1372. for (i = 0; i < len; ++i)
  1373. {
  1374. LogFunc(("icr[%d] Data:%p, DataSize:%d\n",
  1375. i, icr[i].Data, icr[i].DataSize));
  1376. switch(icr[i].Status)
  1377. {
  1378. case IP_DEST_HOST_UNREACHABLE:
  1379. code = (code != ~0 ? code : ICMP_UNREACH_HOST);
  1380. case IP_DEST_NET_UNREACHABLE:
  1381. code = (code != ~0 ? code : ICMP_UNREACH_NET);
  1382. case IP_DEST_PROT_UNREACHABLE:
  1383. code = (code != ~0 ? code : ICMP_UNREACH_PROTOCOL);
  1384. /* UNREACH error inject here */
  1385. case IP_DEST_PORT_UNREACHABLE:
  1386. code = (code != ~0 ? code : ICMP_UNREACH_PORT);
  1387. icmp_error(pData, so->so_m, ICMP_UNREACH, code, 0, "Error occurred!!!");
  1388. so->so_m = NULL;
  1389. break;
  1390. case IP_SUCCESS: /* echo replied */
  1391. out_len = ETH_HLEN + sizeof(struct ip) + 8;
  1392. size;
  1393. size = MCLBYTES;
  1394. if (out_len < MSIZE)
  1395. size = MCLBYTES;
  1396. else if (out_len < MCLBYTES)
  1397. size = MCLBYTES;
  1398. else if (out_len < MJUM9BYTES)
  1399. size = MJUM9BYTES;
  1400. else if (out_len < MJUM16BYTES)
  1401. size = MJUM16BYTES;
  1402. else
  1403. AssertMsgFailed(("Unsupported size"));
  1404. m = m_getjcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR, size);
  1405. LogFunc(("m_getjcl returns m: %p\n", m));
  1406. if (m == NULL)
  1407. return;
  1408. m->m_len = 0;
  1409. m->m_data += if_maxlinkhdr;
  1410. m->m_pkthdr.header = mtod(m, void *);
  1411. ip = mtod(m, struct ip *);
  1412. ip->ip_src.s_addr = icr[i].Address;
  1413. ip->ip_p = IPPROTO_ICMP;
  1414. ip->ip_dst.s_addr = so->so_laddr.s_addr; /*XXX: still the hack*/
  1415. ip->ip_hl = sizeof(struct ip) >> 2; /* requiered for icmp_reflect, no IP options */
  1416. ip->ip_ttl = icr[i].Options.Ttl;
  1417. icp = (struct icmp *)&ip[1]; /* no options */
  1418. icp->icmp_type = ICMP_ECHOREPLY;
  1419. icp->icmp_code = 0;
  1420. icp->icmp_id = so->so_icmp_id;
  1421. icp->icmp_seq = so->so_icmp_seq;
  1422. icm = icmp_find_original_mbuf(pData, ip);
  1423. if (icm)
  1424. {
  1425. /* on this branch we don't need stored variant */
  1426. m_freem(pData, icm->im_m);
  1427. LIST_REMOVE(icm, im_list);
  1428. pData->cIcmpCacheSize--;
  1429. RTMemFree(icm);
  1430. }
  1431. hlen = (ip->ip_hl << 2);
  1432. Assert((hlen >= sizeof(struct ip)));
  1433. m->m_data += hlen + ICMP_MINLEN;
  1434. if (!RT_VALID_PTR(icr[i].Data))
  1435. {
  1436. m_freem(pData, m);
  1437. break;
  1438. }
  1439. m_copyback(pData, m, 0, icr[i].DataSize, icr[i].Data);
  1440. m->m_data -= hlen + ICMP_MINLEN;
  1441. m->m_len += hlen + ICMP_MINLEN;
  1442. ip->ip_len = m_length(m, NULL);
  1443. Assert((ip->ip_len == hlen + ICMP_MINLEN + icr[i].DataSize));
  1444. icmp_reflect(pData, m);
  1445. break;
  1446. case IP_TTL_EXPIRED_TRANSIT: /* TTL expired */
  1447. ip_broken = icr[i].Data;
  1448. icm = icmp_find_original_mbuf(pData, ip_broken);
  1449. if (icm == NULL) {
  1450. Log(("ICMP: can't find original package (first double word %x)\n", *(uint32_t *)ip_broken));
  1451. return;
  1452. }
  1453. m = icm->im_m;
  1454. ip = mtod(m, struct ip *);
  1455. Assert(((ip_broken->ip_hl >> 2) >= sizeof(struct ip)));
  1456. ip->ip_ttl = icr[i].Options.Ttl;
  1457. src = ip->ip_src.s_addr;
  1458. ip->ip_dst.s_addr = src;
  1459. ip->ip_dst.s_addr = icr[i].Address;
  1460. hlen = (ip->ip_hl << 2);
  1461. icp = (struct icmp *)((char *)ip + hlen);
  1462. ip_broken->ip_src.s_addr = src; /*it packet sent from host not from guest*/
  1463. m->m_len = (ip_broken->ip_hl << 2) + 64;
  1464. m->m_pkthdr.header = mtod(m, void *);
  1465. m_copyback(pData, m, ip->ip_hl >> 2, icr[i].DataSize, icr[i].Data);
  1466. icmp_reflect(pData, m);
  1467. /* Here is different situation from Unix world, where we can receive icmp in response on TCP/UDP */
  1468. LIST_REMOVE(icm, im_list);
  1469. pData->cIcmpCacheSize--;
  1470. RTMemFree(icm);
  1471. break;
  1472. default:
  1473. Log(("ICMP(default): message with Status: %x was received from %x\n", icr[i].Status, icr[i].Address));
  1474. break;
  1475. }
  1476. }
  1477. }
  1478. #else /* !RT_OS_WINDOWS */
  1479. static void sorecvfrom_icmp_unix(PNATState pData, struct socket *so)
  1480. {
  1481. struct sockaddr_in addr;
  1482. socklen_t addrlen = sizeof(struct sockaddr_in);
  1483. struct ip ip;
  1484. char *buff;
  1485. int len = 0;
  1486. /* 1- step: read the ip header */
  1487. len = recvfrom(so->s, &ip, sizeof(struct ip), MSG_PEEK,
  1488. (struct sockaddr *)&addr, &addrlen);
  1489. if ( len < 0
  1490. && ( soIgnorableErrorCode(errno)
  1491. || errno == ENOTCONN))
  1492. {
  1493. Log(("sorecvfrom_icmp_unix: 1 - step can't read IP datagramm (would block)\n"));
  1494. return;
  1495. }
  1496. if ( len < sizeof(struct ip)
  1497. || len < 0
  1498. || len == 0)
  1499. {
  1500. u_char code;
  1501. code = ICMP_UNREACH_PORT;
  1502. if (errno == EHOSTUNREACH)
  1503. code = ICMP_UNREACH_HOST;
  1504. else if (errno == ENETUNREACH)
  1505. code = ICMP_UNREACH_NET;
  1506. LogRel((" udp icmp rx errno = %d (%s)\n", errno, strerror(errno)));
  1507. icmp_error(pData, so->so_m, ICMP_UNREACH, code, 0, strerror(errno));
  1508. so->so_m = NULL;
  1509. Log(("sorecvfrom_icmp_unix: 1 - step can't read IP datagramm\n"));
  1510. return;
  1511. }
  1512. /* basic check of IP header */
  1513. if (

Large files files are truncated, but you can click here to view the full file