PageRenderTime 51ms CodeModel.GetById 23ms RepoModel.GetById 0ms app.codeStats 0ms

/deps/uv/src/win/udp.c

https://gitlab.com/GeekSir/node
C | 890 lines | 656 code | 150 blank | 84 comment | 173 complexity | 8ed6989440fe366fb5be97562ed2c96e MD5 | raw file
Possible License(s): 0BSD, Apache-2.0, MPL-2.0-no-copyleft-exception, JSON, WTFPL, CC-BY-SA-3.0, Unlicense, ISC, BSD-3-Clause, MIT, AGPL-3.0
  1. /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
  2. *
  3. * Permission is hereby granted, free of charge, to any person obtaining a copy
  4. * of this software and associated documentation files (the "Software"), to
  5. * deal in the Software without restriction, including without limitation the
  6. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  7. * sell copies of the Software, and to permit persons to whom the Software is
  8. * furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice shall be included in
  11. * all copies or substantial portions of the Software.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  16. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  17. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  18. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  19. * IN THE SOFTWARE.
  20. */
  21. #include <assert.h>
  22. #include <stdlib.h>
  23. #include "uv.h"
  24. #include "internal.h"
  25. #include "handle-inl.h"
  26. #include "stream-inl.h"
  27. #include "req-inl.h"
  28. /*
  29. * Threshold of active udp streams for which to preallocate udp read buffers.
  30. */
  31. const unsigned int uv_active_udp_streams_threshold = 0;
  32. /* A zero-size buffer for use by uv_udp_read */
  33. static char uv_zero_[] = "";
  34. int uv_udp_getsockname(const uv_udp_t* handle,
  35. struct sockaddr* name,
  36. int* namelen) {
  37. int result;
  38. if (!(handle->flags & UV_HANDLE_BOUND)) {
  39. return UV_EINVAL;
  40. }
  41. result = getsockname(handle->socket, name, namelen);
  42. if (result != 0) {
  43. return uv_translate_sys_error(WSAGetLastError());
  44. }
  45. return 0;
  46. }
  47. static int uv_udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket,
  48. int family) {
  49. DWORD yes = 1;
  50. WSAPROTOCOL_INFOW info;
  51. int opt_len;
  52. assert(handle->socket == INVALID_SOCKET);
  53. /* Set the socket to nonblocking mode */
  54. if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) {
  55. return WSAGetLastError();
  56. }
  57. /* Make the socket non-inheritable */
  58. if (!SetHandleInformation((HANDLE)socket, HANDLE_FLAG_INHERIT, 0)) {
  59. return GetLastError();
  60. }
  61. /* Associate it with the I/O completion port. */
  62. /* Use uv_handle_t pointer as completion key. */
  63. if (CreateIoCompletionPort((HANDLE)socket,
  64. loop->iocp,
  65. (ULONG_PTR)socket,
  66. 0) == NULL) {
  67. return GetLastError();
  68. }
  69. if (pSetFileCompletionNotificationModes) {
  70. /* All known Windows that support SetFileCompletionNotificationModes */
  71. /* have a bug that makes it impossible to use this function in */
  72. /* conjunction with datagram sockets. We can work around that but only */
  73. /* if the user is using the default UDP driver (AFD) and has no other */
  74. /* LSPs stacked on top. Here we check whether that is the case. */
  75. opt_len = (int) sizeof info;
  76. if (getsockopt(socket,
  77. SOL_SOCKET,
  78. SO_PROTOCOL_INFOW,
  79. (char*) &info,
  80. &opt_len) == SOCKET_ERROR) {
  81. return GetLastError();
  82. }
  83. if (info.ProtocolChain.ChainLen == 1) {
  84. if (pSetFileCompletionNotificationModes((HANDLE)socket,
  85. FILE_SKIP_SET_EVENT_ON_HANDLE |
  86. FILE_SKIP_COMPLETION_PORT_ON_SUCCESS)) {
  87. handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
  88. handle->func_wsarecv = uv_wsarecv_workaround;
  89. handle->func_wsarecvfrom = uv_wsarecvfrom_workaround;
  90. } else if (GetLastError() != ERROR_INVALID_FUNCTION) {
  91. return GetLastError();
  92. }
  93. }
  94. }
  95. handle->socket = socket;
  96. if (family == AF_INET6) {
  97. handle->flags |= UV_HANDLE_IPV6;
  98. } else {
  99. assert(!(handle->flags & UV_HANDLE_IPV6));
  100. }
  101. return 0;
  102. }
  103. int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
  104. uv__handle_init(loop, (uv_handle_t*) handle, UV_UDP);
  105. handle->socket = INVALID_SOCKET;
  106. handle->reqs_pending = 0;
  107. handle->activecnt = 0;
  108. handle->func_wsarecv = WSARecv;
  109. handle->func_wsarecvfrom = WSARecvFrom;
  110. handle->send_queue_size = 0;
  111. handle->send_queue_count = 0;
  112. uv_req_init(loop, (uv_req_t*) &(handle->recv_req));
  113. handle->recv_req.type = UV_UDP_RECV;
  114. handle->recv_req.data = handle;
  115. return 0;
  116. }
  117. void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle) {
  118. uv_udp_recv_stop(handle);
  119. closesocket(handle->socket);
  120. handle->socket = INVALID_SOCKET;
  121. uv__handle_closing(handle);
  122. if (handle->reqs_pending == 0) {
  123. uv_want_endgame(loop, (uv_handle_t*) handle);
  124. }
  125. }
  126. void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle) {
  127. if (handle->flags & UV__HANDLE_CLOSING &&
  128. handle->reqs_pending == 0) {
  129. assert(!(handle->flags & UV_HANDLE_CLOSED));
  130. uv__handle_close(handle);
  131. }
  132. }
  133. static int uv_udp_maybe_bind(uv_udp_t* handle,
  134. const struct sockaddr* addr,
  135. unsigned int addrlen,
  136. unsigned int flags) {
  137. int r;
  138. int err;
  139. DWORD no = 0;
  140. if (handle->flags & UV_HANDLE_BOUND)
  141. return 0;
  142. if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6) {
  143. /* UV_UDP_IPV6ONLY is supported only for IPV6 sockets */
  144. return ERROR_INVALID_PARAMETER;
  145. }
  146. if (handle->socket == INVALID_SOCKET) {
  147. SOCKET sock = socket(addr->sa_family, SOCK_DGRAM, 0);
  148. if (sock == INVALID_SOCKET) {
  149. return WSAGetLastError();
  150. }
  151. err = uv_udp_set_socket(handle->loop, handle, sock, addr->sa_family);
  152. if (err) {
  153. closesocket(sock);
  154. return err;
  155. }
  156. if (flags & UV_UDP_REUSEADDR) {
  157. DWORD yes = 1;
  158. /* Set SO_REUSEADDR on the socket. */
  159. if (setsockopt(sock,
  160. SOL_SOCKET,
  161. SO_REUSEADDR,
  162. (char*) &yes,
  163. sizeof yes) == SOCKET_ERROR) {
  164. err = WSAGetLastError();
  165. closesocket(sock);
  166. return err;
  167. }
  168. }
  169. if (addr->sa_family == AF_INET6)
  170. handle->flags |= UV_HANDLE_IPV6;
  171. }
  172. if (addr->sa_family == AF_INET6 && !(flags & UV_UDP_IPV6ONLY)) {
  173. /* On windows IPV6ONLY is on by default. */
  174. /* If the user doesn't specify it libuv turns it off. */
  175. /* TODO: how to handle errors? This may fail if there is no ipv4 stack */
  176. /* available, or when run on XP/2003 which have no support for dualstack */
  177. /* sockets. For now we're silently ignoring the error. */
  178. setsockopt(handle->socket,
  179. IPPROTO_IPV6,
  180. IPV6_V6ONLY,
  181. (char*) &no,
  182. sizeof no);
  183. }
  184. r = bind(handle->socket, addr, addrlen);
  185. if (r == SOCKET_ERROR) {
  186. return WSAGetLastError();
  187. }
  188. handle->flags |= UV_HANDLE_BOUND;
  189. return 0;
  190. }
  191. static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
  192. uv_req_t* req;
  193. uv_buf_t buf;
  194. DWORD bytes, flags;
  195. int result;
  196. assert(handle->flags & UV_HANDLE_READING);
  197. assert(!(handle->flags & UV_HANDLE_READ_PENDING));
  198. req = &handle->recv_req;
  199. memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
  200. /*
  201. * Preallocate a read buffer if the number of active streams is below
  202. * the threshold.
  203. */
  204. if (loop->active_udp_streams < uv_active_udp_streams_threshold) {
  205. handle->flags &= ~UV_HANDLE_ZERO_READ;
  206. handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->recv_buffer);
  207. if (handle->recv_buffer.len == 0) {
  208. handle->recv_cb(handle, UV_ENOBUFS, &handle->recv_buffer, NULL, 0);
  209. return;
  210. }
  211. assert(handle->recv_buffer.base != NULL);
  212. buf = handle->recv_buffer;
  213. memset(&handle->recv_from, 0, sizeof handle->recv_from);
  214. handle->recv_from_len = sizeof handle->recv_from;
  215. flags = 0;
  216. result = handle->func_wsarecvfrom(handle->socket,
  217. (WSABUF*) &buf,
  218. 1,
  219. &bytes,
  220. &flags,
  221. (struct sockaddr*) &handle->recv_from,
  222. &handle->recv_from_len,
  223. &req->u.io.overlapped,
  224. NULL);
  225. if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
  226. /* Process the req without IOCP. */
  227. handle->flags |= UV_HANDLE_READ_PENDING;
  228. req->u.io.overlapped.InternalHigh = bytes;
  229. handle->reqs_pending++;
  230. uv_insert_pending_req(loop, req);
  231. } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
  232. /* The req will be processed with IOCP. */
  233. handle->flags |= UV_HANDLE_READ_PENDING;
  234. handle->reqs_pending++;
  235. } else {
  236. /* Make this req pending reporting an error. */
  237. SET_REQ_ERROR(req, WSAGetLastError());
  238. uv_insert_pending_req(loop, req);
  239. handle->reqs_pending++;
  240. }
  241. } else {
  242. handle->flags |= UV_HANDLE_ZERO_READ;
  243. buf.base = (char*) uv_zero_;
  244. buf.len = 0;
  245. flags = MSG_PEEK;
  246. result = handle->func_wsarecv(handle->socket,
  247. (WSABUF*) &buf,
  248. 1,
  249. &bytes,
  250. &flags,
  251. &req->u.io.overlapped,
  252. NULL);
  253. if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
  254. /* Process the req without IOCP. */
  255. handle->flags |= UV_HANDLE_READ_PENDING;
  256. req->u.io.overlapped.InternalHigh = bytes;
  257. handle->reqs_pending++;
  258. uv_insert_pending_req(loop, req);
  259. } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
  260. /* The req will be processed with IOCP. */
  261. handle->flags |= UV_HANDLE_READ_PENDING;
  262. handle->reqs_pending++;
  263. } else {
  264. /* Make this req pending reporting an error. */
  265. SET_REQ_ERROR(req, WSAGetLastError());
  266. uv_insert_pending_req(loop, req);
  267. handle->reqs_pending++;
  268. }
  269. }
  270. }
  271. int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
  272. uv_udp_recv_cb recv_cb) {
  273. uv_loop_t* loop = handle->loop;
  274. int err;
  275. if (handle->flags & UV_HANDLE_READING) {
  276. return WSAEALREADY;
  277. }
  278. err = uv_udp_maybe_bind(handle,
  279. (const struct sockaddr*) &uv_addr_ip4_any_,
  280. sizeof(uv_addr_ip4_any_),
  281. 0);
  282. if (err)
  283. return err;
  284. handle->flags |= UV_HANDLE_READING;
  285. INCREASE_ACTIVE_COUNT(loop, handle);
  286. loop->active_udp_streams++;
  287. handle->recv_cb = recv_cb;
  288. handle->alloc_cb = alloc_cb;
  289. /* If reading was stopped and then started again, there could still be a */
  290. /* recv request pending. */
  291. if (!(handle->flags & UV_HANDLE_READ_PENDING))
  292. uv_udp_queue_recv(loop, handle);
  293. return 0;
  294. }
  295. int uv__udp_recv_stop(uv_udp_t* handle) {
  296. if (handle->flags & UV_HANDLE_READING) {
  297. handle->flags &= ~UV_HANDLE_READING;
  298. handle->loop->active_udp_streams--;
  299. DECREASE_ACTIVE_COUNT(loop, handle);
  300. }
  301. return 0;
  302. }
  303. static int uv__send(uv_udp_send_t* req,
  304. uv_udp_t* handle,
  305. const uv_buf_t bufs[],
  306. unsigned int nbufs,
  307. const struct sockaddr* addr,
  308. unsigned int addrlen,
  309. uv_udp_send_cb cb) {
  310. uv_loop_t* loop = handle->loop;
  311. DWORD result, bytes;
  312. uv_req_init(loop, (uv_req_t*) req);
  313. req->type = UV_UDP_SEND;
  314. req->handle = handle;
  315. req->cb = cb;
  316. memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
  317. result = WSASendTo(handle->socket,
  318. (WSABUF*)bufs,
  319. nbufs,
  320. &bytes,
  321. 0,
  322. addr,
  323. addrlen,
  324. &req->u.io.overlapped,
  325. NULL);
  326. if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
  327. /* Request completed immediately. */
  328. req->u.io.queued_bytes = 0;
  329. handle->reqs_pending++;
  330. handle->send_queue_size += req->u.io.queued_bytes;
  331. handle->send_queue_count++;
  332. REGISTER_HANDLE_REQ(loop, handle, req);
  333. uv_insert_pending_req(loop, (uv_req_t*)req);
  334. } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
  335. /* Request queued by the kernel. */
  336. req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs);
  337. handle->reqs_pending++;
  338. handle->send_queue_size += req->u.io.queued_bytes;
  339. handle->send_queue_count++;
  340. REGISTER_HANDLE_REQ(loop, handle, req);
  341. } else {
  342. /* Send failed due to an error. */
  343. return WSAGetLastError();
  344. }
  345. return 0;
  346. }
  347. void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle,
  348. uv_req_t* req) {
  349. uv_buf_t buf;
  350. int partial;
  351. assert(handle->type == UV_UDP);
  352. handle->flags &= ~UV_HANDLE_READ_PENDING;
  353. if (!REQ_SUCCESS(req)) {
  354. DWORD err = GET_REQ_SOCK_ERROR(req);
  355. if (err == WSAEMSGSIZE) {
  356. /* Not a real error, it just indicates that the received packet */
  357. /* was bigger than the receive buffer. */
  358. } else if (err == WSAECONNRESET || err == WSAENETRESET) {
  359. /* A previous sendto operation failed; ignore this error. If */
  360. /* zero-reading we need to call WSARecv/WSARecvFrom _without_ the */
  361. /* MSG_PEEK flag to clear out the error queue. For nonzero reads, */
  362. /* immediately queue a new receive. */
  363. if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
  364. goto done;
  365. }
  366. } else {
  367. /* A real error occurred. Report the error to the user only if we're */
  368. /* currently reading. */
  369. if (handle->flags & UV_HANDLE_READING) {
  370. uv_udp_recv_stop(handle);
  371. buf = (handle->flags & UV_HANDLE_ZERO_READ) ?
  372. uv_buf_init(NULL, 0) : handle->recv_buffer;
  373. handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0);
  374. }
  375. goto done;
  376. }
  377. }
  378. if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
  379. /* Successful read */
  380. partial = !REQ_SUCCESS(req);
  381. handle->recv_cb(handle,
  382. req->u.io.overlapped.InternalHigh,
  383. &handle->recv_buffer,
  384. (const struct sockaddr*) &handle->recv_from,
  385. partial ? UV_UDP_PARTIAL : 0);
  386. } else if (handle->flags & UV_HANDLE_READING) {
  387. DWORD bytes, err, flags;
  388. struct sockaddr_storage from;
  389. int from_len;
  390. /* Do a nonblocking receive */
  391. /* TODO: try to read multiple datagrams at once. FIONREAD maybe? */
  392. handle->alloc_cb((uv_handle_t*) handle, 65536, &buf);
  393. if (buf.len == 0) {
  394. handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
  395. goto done;
  396. }
  397. assert(buf.base != NULL);
  398. memset(&from, 0, sizeof from);
  399. from_len = sizeof from;
  400. flags = 0;
  401. if (WSARecvFrom(handle->socket,
  402. (WSABUF*)&buf,
  403. 1,
  404. &bytes,
  405. &flags,
  406. (struct sockaddr*) &from,
  407. &from_len,
  408. NULL,
  409. NULL) != SOCKET_ERROR) {
  410. /* Message received */
  411. handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0);
  412. } else {
  413. err = WSAGetLastError();
  414. if (err == WSAEMSGSIZE) {
  415. /* Message truncated */
  416. handle->recv_cb(handle,
  417. bytes,
  418. &buf,
  419. (const struct sockaddr*) &from,
  420. UV_UDP_PARTIAL);
  421. } else if (err == WSAEWOULDBLOCK) {
  422. /* Kernel buffer empty */
  423. handle->recv_cb(handle, 0, &buf, NULL, 0);
  424. } else if (err == WSAECONNRESET || err == WSAENETRESET) {
  425. /* WSAECONNRESET/WSANETRESET is ignored because this just indicates
  426. * that a previous sendto operation failed.
  427. */
  428. handle->recv_cb(handle, 0, &buf, NULL, 0);
  429. } else {
  430. /* Any other error that we want to report back to the user. */
  431. uv_udp_recv_stop(handle);
  432. handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0);
  433. }
  434. }
  435. }
  436. done:
  437. /* Post another read if still reading and not closing. */
  438. if ((handle->flags & UV_HANDLE_READING) &&
  439. !(handle->flags & UV_HANDLE_READ_PENDING)) {
  440. uv_udp_queue_recv(loop, handle);
  441. }
  442. DECREASE_PENDING_REQ_COUNT(handle);
  443. }
  444. void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
  445. uv_udp_send_t* req) {
  446. int err;
  447. assert(handle->type == UV_UDP);
  448. assert(handle->send_queue_size >= req->u.io.queued_bytes);
  449. assert(handle->send_queue_count >= 1);
  450. handle->send_queue_size -= req->u.io.queued_bytes;
  451. handle->send_queue_count--;
  452. UNREGISTER_HANDLE_REQ(loop, handle, req);
  453. if (req->cb) {
  454. err = 0;
  455. if (!REQ_SUCCESS(req)) {
  456. err = GET_REQ_SOCK_ERROR(req);
  457. }
  458. req->cb(req, uv_translate_sys_error(err));
  459. }
  460. DECREASE_PENDING_REQ_COUNT(handle);
  461. }
  462. static int uv__udp_set_membership4(uv_udp_t* handle,
  463. const struct sockaddr_in* multicast_addr,
  464. const char* interface_addr,
  465. uv_membership membership) {
  466. int err;
  467. int optname;
  468. struct ip_mreq mreq;
  469. if (handle->flags & UV_HANDLE_IPV6)
  470. return UV_EINVAL;
  471. /* If the socket is unbound, bind to inaddr_any. */
  472. err = uv_udp_maybe_bind(handle,
  473. (const struct sockaddr*) &uv_addr_ip4_any_,
  474. sizeof(uv_addr_ip4_any_),
  475. UV_UDP_REUSEADDR);
  476. if (err)
  477. return uv_translate_sys_error(err);
  478. memset(&mreq, 0, sizeof mreq);
  479. if (interface_addr) {
  480. err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
  481. if (err)
  482. return err;
  483. } else {
  484. mreq.imr_interface.s_addr = htonl(INADDR_ANY);
  485. }
  486. mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
  487. switch (membership) {
  488. case UV_JOIN_GROUP:
  489. optname = IP_ADD_MEMBERSHIP;
  490. break;
  491. case UV_LEAVE_GROUP:
  492. optname = IP_DROP_MEMBERSHIP;
  493. break;
  494. default:
  495. return UV_EINVAL;
  496. }
  497. if (setsockopt(handle->socket,
  498. IPPROTO_IP,
  499. optname,
  500. (char*) &mreq,
  501. sizeof mreq) == SOCKET_ERROR) {
  502. return uv_translate_sys_error(WSAGetLastError());
  503. }
  504. return 0;
  505. }
  506. int uv__udp_set_membership6(uv_udp_t* handle,
  507. const struct sockaddr_in6* multicast_addr,
  508. const char* interface_addr,
  509. uv_membership membership) {
  510. int optname;
  511. int err;
  512. struct ipv6_mreq mreq;
  513. struct sockaddr_in6 addr6;
  514. if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6))
  515. return UV_EINVAL;
  516. err = uv_udp_maybe_bind(handle,
  517. (const struct sockaddr*) &uv_addr_ip6_any_,
  518. sizeof(uv_addr_ip6_any_),
  519. UV_UDP_REUSEADDR);
  520. if (err)
  521. return uv_translate_sys_error(err);
  522. memset(&mreq, 0, sizeof(mreq));
  523. if (interface_addr) {
  524. if (uv_ip6_addr(interface_addr, 0, &addr6))
  525. return UV_EINVAL;
  526. mreq.ipv6mr_interface = addr6.sin6_scope_id;
  527. } else {
  528. mreq.ipv6mr_interface = 0;
  529. }
  530. mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
  531. switch (membership) {
  532. case UV_JOIN_GROUP:
  533. optname = IPV6_ADD_MEMBERSHIP;
  534. break;
  535. case UV_LEAVE_GROUP:
  536. optname = IPV6_DROP_MEMBERSHIP;
  537. break;
  538. default:
  539. return UV_EINVAL;
  540. }
  541. if (setsockopt(handle->socket,
  542. IPPROTO_IPV6,
  543. optname,
  544. (char*) &mreq,
  545. sizeof mreq) == SOCKET_ERROR) {
  546. return uv_translate_sys_error(WSAGetLastError());
  547. }
  548. return 0;
  549. }
  550. int uv_udp_set_membership(uv_udp_t* handle,
  551. const char* multicast_addr,
  552. const char* interface_addr,
  553. uv_membership membership) {
  554. struct sockaddr_in addr4;
  555. struct sockaddr_in6 addr6;
  556. if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0)
  557. return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
  558. else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0)
  559. return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
  560. else
  561. return UV_EINVAL;
  562. }
  563. int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
  564. struct sockaddr_storage addr_st;
  565. struct sockaddr_in* addr4;
  566. struct sockaddr_in6* addr6;
  567. addr4 = (struct sockaddr_in*) &addr_st;
  568. addr6 = (struct sockaddr_in6*) &addr_st;
  569. if (!interface_addr) {
  570. memset(&addr_st, 0, sizeof addr_st);
  571. if (handle->flags & UV_HANDLE_IPV6) {
  572. addr_st.ss_family = AF_INET6;
  573. addr6->sin6_scope_id = 0;
  574. } else {
  575. addr_st.ss_family = AF_INET;
  576. addr4->sin_addr.s_addr = htonl(INADDR_ANY);
  577. }
  578. } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
  579. /* nothing, address was parsed */
  580. } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
  581. /* nothing, address was parsed */
  582. } else {
  583. return UV_EINVAL;
  584. }
  585. if (!(handle->flags & UV_HANDLE_BOUND))
  586. return UV_EBADF;
  587. if (addr_st.ss_family == AF_INET) {
  588. if (setsockopt(handle->socket,
  589. IPPROTO_IP,
  590. IP_MULTICAST_IF,
  591. (char*) &addr4->sin_addr,
  592. sizeof(addr4->sin_addr)) == SOCKET_ERROR) {
  593. return uv_translate_sys_error(WSAGetLastError());
  594. }
  595. } else if (addr_st.ss_family == AF_INET6) {
  596. if (setsockopt(handle->socket,
  597. IPPROTO_IPV6,
  598. IPV6_MULTICAST_IF,
  599. (char*) &addr6->sin6_scope_id,
  600. sizeof(addr6->sin6_scope_id)) == SOCKET_ERROR) {
  601. return uv_translate_sys_error(WSAGetLastError());
  602. }
  603. } else {
  604. assert(0 && "unexpected address family");
  605. abort();
  606. }
  607. return 0;
  608. }
  609. int uv_udp_set_broadcast(uv_udp_t* handle, int value) {
  610. BOOL optval = (BOOL) value;
  611. if (!(handle->flags & UV_HANDLE_BOUND))
  612. return UV_EBADF;
  613. if (setsockopt(handle->socket,
  614. SOL_SOCKET,
  615. SO_BROADCAST,
  616. (char*) &optval,
  617. sizeof optval)) {
  618. return uv_translate_sys_error(WSAGetLastError());
  619. }
  620. return 0;
  621. }
  622. int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
  623. WSAPROTOCOL_INFOW protocol_info;
  624. int opt_len;
  625. int err;
  626. /* Detect the address family of the socket. */
  627. opt_len = (int) sizeof protocol_info;
  628. if (getsockopt(sock,
  629. SOL_SOCKET,
  630. SO_PROTOCOL_INFOW,
  631. (char*) &protocol_info,
  632. &opt_len) == SOCKET_ERROR) {
  633. return uv_translate_sys_error(GetLastError());
  634. }
  635. err = uv_udp_set_socket(handle->loop,
  636. handle,
  637. sock,
  638. protocol_info.iAddressFamily);
  639. return uv_translate_sys_error(err);
  640. }
  641. #define SOCKOPT_SETTER(name, option4, option6, validate) \
  642. int uv_udp_set_##name(uv_udp_t* handle, int value) { \
  643. DWORD optval = (DWORD) value; \
  644. \
  645. if (!(validate(value))) { \
  646. return UV_EINVAL; \
  647. } \
  648. \
  649. if (!(handle->flags & UV_HANDLE_BOUND)) \
  650. return UV_EBADF; \
  651. \
  652. if (!(handle->flags & UV_HANDLE_IPV6)) { \
  653. /* Set IPv4 socket option */ \
  654. if (setsockopt(handle->socket, \
  655. IPPROTO_IP, \
  656. option4, \
  657. (char*) &optval, \
  658. sizeof optval)) { \
  659. return uv_translate_sys_error(WSAGetLastError()); \
  660. } \
  661. } else { \
  662. /* Set IPv6 socket option */ \
  663. if (setsockopt(handle->socket, \
  664. IPPROTO_IPV6, \
  665. option6, \
  666. (char*) &optval, \
  667. sizeof optval)) { \
  668. return uv_translate_sys_error(WSAGetLastError()); \
  669. } \
  670. } \
  671. return 0; \
  672. }
  673. #define VALIDATE_TTL(value) ((value) >= 1 && (value) <= 255)
  674. #define VALIDATE_MULTICAST_TTL(value) ((value) >= -1 && (value) <= 255)
  675. #define VALIDATE_MULTICAST_LOOP(value) (1)
  676. SOCKOPT_SETTER(ttl,
  677. IP_TTL,
  678. IPV6_HOPLIMIT,
  679. VALIDATE_TTL)
  680. SOCKOPT_SETTER(multicast_ttl,
  681. IP_MULTICAST_TTL,
  682. IPV6_MULTICAST_HOPS,
  683. VALIDATE_MULTICAST_TTL)
  684. SOCKOPT_SETTER(multicast_loop,
  685. IP_MULTICAST_LOOP,
  686. IPV6_MULTICAST_LOOP,
  687. VALIDATE_MULTICAST_LOOP)
  688. #undef SOCKOPT_SETTER
  689. #undef VALIDATE_TTL
  690. #undef VALIDATE_MULTICAST_TTL
  691. #undef VALIDATE_MULTICAST_LOOP
  692. /* This function is an egress point, i.e. it returns libuv errors rather than
  693. * system errors.
  694. */
  695. int uv__udp_bind(uv_udp_t* handle,
  696. const struct sockaddr* addr,
  697. unsigned int addrlen,
  698. unsigned int flags) {
  699. int err;
  700. err = uv_udp_maybe_bind(handle, addr, addrlen, flags);
  701. if (err)
  702. return uv_translate_sys_error(err);
  703. return 0;
  704. }
  705. /* This function is an egress point, i.e. it returns libuv errors rather than
  706. * system errors.
  707. */
  708. int uv__udp_send(uv_udp_send_t* req,
  709. uv_udp_t* handle,
  710. const uv_buf_t bufs[],
  711. unsigned int nbufs,
  712. const struct sockaddr* addr,
  713. unsigned int addrlen,
  714. uv_udp_send_cb send_cb) {
  715. const struct sockaddr* bind_addr;
  716. int err;
  717. if (!(handle->flags & UV_HANDLE_BOUND)) {
  718. if (addrlen == sizeof(uv_addr_ip4_any_)) {
  719. bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_;
  720. } else if (addrlen == sizeof(uv_addr_ip6_any_)) {
  721. bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
  722. } else {
  723. abort();
  724. }
  725. err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0);
  726. if (err)
  727. return uv_translate_sys_error(err);
  728. }
  729. err = uv__send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
  730. if (err)
  731. return uv_translate_sys_error(err);
  732. return 0;
  733. }
  734. int uv__udp_try_send(uv_udp_t* handle,
  735. const uv_buf_t bufs[],
  736. unsigned int nbufs,
  737. const struct sockaddr* addr,
  738. unsigned int addrlen) {
  739. return UV_ENOSYS;
  740. }