PageRenderTime 54ms CodeModel.GetById 23ms RepoModel.GetById 0ms app.codeStats 0ms

/deps/uv/src/win/poll.c

https://gitlab.com/GeekSir/node
C | 635 lines | 459 code | 125 blank | 51 comment | 144 complexity | ef400fd7e7cb28d572b1690060d05845 MD5 | raw file
Possible License(s): 0BSD, Apache-2.0, MPL-2.0-no-copyleft-exception, JSON, WTFPL, CC-BY-SA-3.0, Unlicense, ISC, BSD-3-Clause, MIT, AGPL-3.0
  1. /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
  2. *
  3. * Permission is hereby granted, free of charge, to any person obtaining a copy
  4. * of this software and associated documentation files (the "Software"), to
  5. * deal in the Software without restriction, including without limitation the
  6. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  7. * sell copies of the Software, and to permit persons to whom the Software is
  8. * furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice shall be included in
  11. * all copies or substantial portions of the Software.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  16. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  17. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  18. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  19. * IN THE SOFTWARE.
  20. */
  21. #include <assert.h>
  22. #include <io.h>
  23. #include "uv.h"
  24. #include "internal.h"
  25. #include "handle-inl.h"
  26. #include "req-inl.h"
  27. static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = {
  28. {0xe70f1aa0, 0xab8b, 0x11cf,
  29. {0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}},
  30. {0xf9eab0c0, 0x26d4, 0x11d0,
  31. {0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}},
  32. {0x9fc48064, 0x7298, 0x43e4,
  33. {0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}}
  34. };
  35. typedef struct uv_single_fd_set_s {
  36. unsigned int fd_count;
  37. SOCKET fd_array[1];
  38. } uv_single_fd_set_t;
  39. static OVERLAPPED overlapped_dummy_;
  40. static uv_once_t overlapped_dummy_init_guard_ = UV_ONCE_INIT;
  41. static AFD_POLL_INFO afd_poll_info_dummy_;
  42. static void uv__init_overlapped_dummy(void) {
  43. HANDLE event;
  44. event = CreateEvent(NULL, TRUE, TRUE, NULL);
  45. if (event == NULL)
  46. uv_fatal_error(GetLastError(), "CreateEvent");
  47. memset(&overlapped_dummy_, 0, sizeof overlapped_dummy_);
  48. overlapped_dummy_.hEvent = (HANDLE) ((uintptr_t) event | 1);
  49. }
  50. static OVERLAPPED* uv__get_overlapped_dummy() {
  51. uv_once(&overlapped_dummy_init_guard_, uv__init_overlapped_dummy);
  52. return &overlapped_dummy_;
  53. }
  54. static AFD_POLL_INFO* uv__get_afd_poll_info_dummy() {
  55. return &afd_poll_info_dummy_;
  56. }
  57. static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
  58. uv_req_t* req;
  59. AFD_POLL_INFO* afd_poll_info;
  60. DWORD result;
  61. /* Find a yet unsubmitted req to submit. */
  62. if (handle->submitted_events_1 == 0) {
  63. req = &handle->poll_req_1;
  64. afd_poll_info = &handle->afd_poll_info_1;
  65. handle->submitted_events_1 = handle->events;
  66. handle->mask_events_1 = 0;
  67. handle->mask_events_2 = handle->events;
  68. } else if (handle->submitted_events_2 == 0) {
  69. req = &handle->poll_req_2;
  70. afd_poll_info = &handle->afd_poll_info_2;
  71. handle->submitted_events_2 = handle->events;
  72. handle->mask_events_1 = handle->events;
  73. handle->mask_events_2 = 0;
  74. } else {
  75. assert(0);
  76. return;
  77. }
  78. /* Setting Exclusive to TRUE makes the other poll request return if there */
  79. /* is any. */
  80. afd_poll_info->Exclusive = TRUE;
  81. afd_poll_info->NumberOfHandles = 1;
  82. afd_poll_info->Timeout.QuadPart = INT64_MAX;
  83. afd_poll_info->Handles[0].Handle = (HANDLE) handle->socket;
  84. afd_poll_info->Handles[0].Status = 0;
  85. afd_poll_info->Handles[0].Events = 0;
  86. if (handle->events & UV_READABLE) {
  87. afd_poll_info->Handles[0].Events |= AFD_POLL_RECEIVE |
  88. AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT;
  89. }
  90. if (handle->events & UV_WRITABLE) {
  91. afd_poll_info->Handles[0].Events |= AFD_POLL_SEND | AFD_POLL_CONNECT_FAIL;
  92. }
  93. memset(&req->u.io.overlapped, 0, sizeof req->u.io.overlapped);
  94. result = uv_msafd_poll((SOCKET) handle->peer_socket,
  95. afd_poll_info,
  96. afd_poll_info,
  97. &req->u.io.overlapped);
  98. if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) {
  99. /* Queue this req, reporting an error. */
  100. SET_REQ_ERROR(req, WSAGetLastError());
  101. uv_insert_pending_req(loop, req);
  102. }
  103. }
  104. static int uv__fast_poll_cancel_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
  105. AFD_POLL_INFO afd_poll_info;
  106. DWORD result;
  107. afd_poll_info.Exclusive = TRUE;
  108. afd_poll_info.NumberOfHandles = 1;
  109. afd_poll_info.Timeout.QuadPart = INT64_MAX;
  110. afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket;
  111. afd_poll_info.Handles[0].Status = 0;
  112. afd_poll_info.Handles[0].Events = AFD_POLL_ALL;
  113. result = uv_msafd_poll(handle->socket,
  114. &afd_poll_info,
  115. uv__get_afd_poll_info_dummy(),
  116. uv__get_overlapped_dummy());
  117. if (result == SOCKET_ERROR) {
  118. DWORD error = WSAGetLastError();
  119. if (error != WSA_IO_PENDING)
  120. return error;
  121. }
  122. return 0;
  123. }
  124. static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
  125. uv_req_t* req) {
  126. unsigned char mask_events;
  127. AFD_POLL_INFO* afd_poll_info;
  128. if (req == &handle->poll_req_1) {
  129. afd_poll_info = &handle->afd_poll_info_1;
  130. handle->submitted_events_1 = 0;
  131. mask_events = handle->mask_events_1;
  132. } else if (req == &handle->poll_req_2) {
  133. afd_poll_info = &handle->afd_poll_info_2;
  134. handle->submitted_events_2 = 0;
  135. mask_events = handle->mask_events_2;
  136. } else {
  137. assert(0);
  138. return;
  139. }
  140. /* Report an error unless the select was just interrupted. */
  141. if (!REQ_SUCCESS(req)) {
  142. DWORD error = GET_REQ_SOCK_ERROR(req);
  143. if (error != WSAEINTR && handle->events != 0) {
  144. handle->events = 0; /* Stop the watcher */
  145. handle->poll_cb(handle, uv_translate_sys_error(error), 0);
  146. }
  147. } else if (afd_poll_info->NumberOfHandles >= 1) {
  148. unsigned char events = 0;
  149. if ((afd_poll_info->Handles[0].Events & (AFD_POLL_RECEIVE |
  150. AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT)) != 0) {
  151. events |= UV_READABLE;
  152. }
  153. if ((afd_poll_info->Handles[0].Events & (AFD_POLL_SEND |
  154. AFD_POLL_CONNECT_FAIL)) != 0) {
  155. events |= UV_WRITABLE;
  156. }
  157. events &= handle->events & ~mask_events;
  158. if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) {
  159. /* Stop polling. */
  160. handle->events = 0;
  161. if (uv__is_active(handle))
  162. uv__handle_stop(handle);
  163. }
  164. if (events != 0) {
  165. handle->poll_cb(handle, 0, events);
  166. }
  167. }
  168. if ((handle->events & ~(handle->submitted_events_1 |
  169. handle->submitted_events_2)) != 0) {
  170. uv__fast_poll_submit_poll_req(loop, handle);
  171. } else if ((handle->flags & UV__HANDLE_CLOSING) &&
  172. handle->submitted_events_1 == 0 &&
  173. handle->submitted_events_2 == 0) {
  174. uv_want_endgame(loop, (uv_handle_t*) handle);
  175. }
  176. }
  177. static int uv__fast_poll_set(uv_loop_t* loop, uv_poll_t* handle, int events) {
  178. assert(handle->type == UV_POLL);
  179. assert(!(handle->flags & UV__HANDLE_CLOSING));
  180. assert((events & ~(UV_READABLE | UV_WRITABLE)) == 0);
  181. handle->events = events;
  182. if (handle->events != 0) {
  183. uv__handle_start(handle);
  184. } else {
  185. uv__handle_stop(handle);
  186. }
  187. if ((handle->events & ~(handle->submitted_events_1 |
  188. handle->submitted_events_2)) != 0) {
  189. uv__fast_poll_submit_poll_req(handle->loop, handle);
  190. }
  191. return 0;
  192. }
  193. static int uv__fast_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
  194. handle->events = 0;
  195. uv__handle_closing(handle);
  196. if (handle->submitted_events_1 == 0 &&
  197. handle->submitted_events_2 == 0) {
  198. uv_want_endgame(loop, (uv_handle_t*) handle);
  199. return 0;
  200. } else {
  201. /* Cancel outstanding poll requests by executing another, unique poll */
  202. /* request that forces the outstanding ones to return. */
  203. return uv__fast_poll_cancel_poll_req(loop, handle);
  204. }
  205. }
  206. static SOCKET uv__fast_poll_create_peer_socket(HANDLE iocp,
  207. WSAPROTOCOL_INFOW* protocol_info) {
  208. SOCKET sock = 0;
  209. sock = WSASocketW(protocol_info->iAddressFamily,
  210. protocol_info->iSocketType,
  211. protocol_info->iProtocol,
  212. protocol_info,
  213. 0,
  214. WSA_FLAG_OVERLAPPED);
  215. if (sock == INVALID_SOCKET) {
  216. return INVALID_SOCKET;
  217. }
  218. if (!SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)) {
  219. goto error;
  220. };
  221. if (CreateIoCompletionPort((HANDLE) sock,
  222. iocp,
  223. (ULONG_PTR) sock,
  224. 0) == NULL) {
  225. goto error;
  226. }
  227. return sock;
  228. error:
  229. closesocket(sock);
  230. return INVALID_SOCKET;
  231. }
  232. static SOCKET uv__fast_poll_get_peer_socket(uv_loop_t* loop,
  233. WSAPROTOCOL_INFOW* protocol_info) {
  234. int index, i;
  235. SOCKET peer_socket;
  236. index = -1;
  237. for (i = 0; (size_t) i < ARRAY_SIZE(uv_msafd_provider_ids); i++) {
  238. if (memcmp((void*) &protocol_info->ProviderId,
  239. (void*) &uv_msafd_provider_ids[i],
  240. sizeof protocol_info->ProviderId) == 0) {
  241. index = i;
  242. }
  243. }
  244. /* Check if the protocol uses an msafd socket. */
  245. if (index < 0) {
  246. return INVALID_SOCKET;
  247. }
  248. /* If we didn't (try) to create a peer socket yet, try to make one. Don't */
  249. /* try again if the peer socket creation failed earlier for the same */
  250. /* protocol. */
  251. peer_socket = loop->poll_peer_sockets[index];
  252. if (peer_socket == 0) {
  253. peer_socket = uv__fast_poll_create_peer_socket(loop->iocp, protocol_info);
  254. loop->poll_peer_sockets[index] = peer_socket;
  255. }
  256. return peer_socket;
  257. }
  258. static DWORD WINAPI uv__slow_poll_thread_proc(void* arg) {
  259. uv_req_t* req = (uv_req_t*) arg;
  260. uv_poll_t* handle = (uv_poll_t*) req->data;
  261. unsigned char reported_events;
  262. int r;
  263. uv_single_fd_set_t rfds, wfds, efds;
  264. struct timeval timeout;
  265. assert(handle->type == UV_POLL);
  266. assert(req->type == UV_POLL_REQ);
  267. if (handle->events & UV_READABLE) {
  268. rfds.fd_count = 1;
  269. rfds.fd_array[0] = handle->socket;
  270. } else {
  271. rfds.fd_count = 0;
  272. }
  273. if (handle->events & UV_WRITABLE) {
  274. wfds.fd_count = 1;
  275. wfds.fd_array[0] = handle->socket;
  276. efds.fd_count = 1;
  277. efds.fd_array[0] = handle->socket;
  278. } else {
  279. wfds.fd_count = 0;
  280. efds.fd_count = 0;
  281. }
  282. /* Make the select() time out after 3 minutes. If select() hangs because */
  283. /* the user closed the socket, we will at least not hang indefinitely. */
  284. timeout.tv_sec = 3 * 60;
  285. timeout.tv_usec = 0;
  286. r = select(1, (fd_set*) &rfds, (fd_set*) &wfds, (fd_set*) &efds, &timeout);
  287. if (r == SOCKET_ERROR) {
  288. /* Queue this req, reporting an error. */
  289. SET_REQ_ERROR(&handle->poll_req_1, WSAGetLastError());
  290. POST_COMPLETION_FOR_REQ(handle->loop, req);
  291. return 0;
  292. }
  293. reported_events = 0;
  294. if (r > 0) {
  295. if (rfds.fd_count > 0) {
  296. assert(rfds.fd_count == 1);
  297. assert(rfds.fd_array[0] == handle->socket);
  298. reported_events |= UV_READABLE;
  299. }
  300. if (wfds.fd_count > 0) {
  301. assert(wfds.fd_count == 1);
  302. assert(wfds.fd_array[0] == handle->socket);
  303. reported_events |= UV_WRITABLE;
  304. } else if (efds.fd_count > 0) {
  305. assert(efds.fd_count == 1);
  306. assert(efds.fd_array[0] == handle->socket);
  307. reported_events |= UV_WRITABLE;
  308. }
  309. }
  310. SET_REQ_SUCCESS(req);
  311. req->u.io.overlapped.InternalHigh = (DWORD) reported_events;
  312. POST_COMPLETION_FOR_REQ(handle->loop, req);
  313. return 0;
  314. }
  315. static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
  316. uv_req_t* req;
  317. /* Find a yet unsubmitted req to submit. */
  318. if (handle->submitted_events_1 == 0) {
  319. req = &handle->poll_req_1;
  320. handle->submitted_events_1 = handle->events;
  321. handle->mask_events_1 = 0;
  322. handle->mask_events_2 = handle->events;
  323. } else if (handle->submitted_events_2 == 0) {
  324. req = &handle->poll_req_2;
  325. handle->submitted_events_2 = handle->events;
  326. handle->mask_events_1 = handle->events;
  327. handle->mask_events_2 = 0;
  328. } else {
  329. assert(0);
  330. return;
  331. }
  332. if (!QueueUserWorkItem(uv__slow_poll_thread_proc,
  333. (void*) req,
  334. WT_EXECUTELONGFUNCTION)) {
  335. /* Make this req pending, reporting an error. */
  336. SET_REQ_ERROR(req, GetLastError());
  337. uv_insert_pending_req(loop, req);
  338. }
  339. }
  340. static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
  341. uv_req_t* req) {
  342. unsigned char mask_events;
  343. int err;
  344. if (req == &handle->poll_req_1) {
  345. handle->submitted_events_1 = 0;
  346. mask_events = handle->mask_events_1;
  347. } else if (req == &handle->poll_req_2) {
  348. handle->submitted_events_2 = 0;
  349. mask_events = handle->mask_events_2;
  350. } else {
  351. assert(0);
  352. return;
  353. }
  354. if (!REQ_SUCCESS(req)) {
  355. /* Error. */
  356. if (handle->events != 0) {
  357. err = GET_REQ_ERROR(req);
  358. handle->events = 0; /* Stop the watcher */
  359. handle->poll_cb(handle, uv_translate_sys_error(err), 0);
  360. }
  361. } else {
  362. /* Got some events. */
  363. int events = req->u.io.overlapped.InternalHigh & handle->events & ~mask_events;
  364. if (events != 0) {
  365. handle->poll_cb(handle, 0, events);
  366. }
  367. }
  368. if ((handle->events & ~(handle->submitted_events_1 |
  369. handle->submitted_events_2)) != 0) {
  370. uv__slow_poll_submit_poll_req(loop, handle);
  371. } else if ((handle->flags & UV__HANDLE_CLOSING) &&
  372. handle->submitted_events_1 == 0 &&
  373. handle->submitted_events_2 == 0) {
  374. uv_want_endgame(loop, (uv_handle_t*) handle);
  375. }
  376. }
  377. static int uv__slow_poll_set(uv_loop_t* loop, uv_poll_t* handle, int events) {
  378. assert(handle->type == UV_POLL);
  379. assert(!(handle->flags & UV__HANDLE_CLOSING));
  380. assert((events & ~(UV_READABLE | UV_WRITABLE)) == 0);
  381. handle->events = events;
  382. if (handle->events != 0) {
  383. uv__handle_start(handle);
  384. } else {
  385. uv__handle_stop(handle);
  386. }
  387. if ((handle->events &
  388. ~(handle->submitted_events_1 | handle->submitted_events_2)) != 0) {
  389. uv__slow_poll_submit_poll_req(handle->loop, handle);
  390. }
  391. return 0;
  392. }
  393. static int uv__slow_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
  394. handle->events = 0;
  395. uv__handle_closing(handle);
  396. if (handle->submitted_events_1 == 0 &&
  397. handle->submitted_events_2 == 0) {
  398. uv_want_endgame(loop, (uv_handle_t*) handle);
  399. }
  400. return 0;
  401. }
  402. int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
  403. return uv_poll_init_socket(loop, handle, (SOCKET) uv__get_osfhandle(fd));
  404. }
  405. int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
  406. uv_os_sock_t socket) {
  407. WSAPROTOCOL_INFOW protocol_info;
  408. int len;
  409. SOCKET peer_socket, base_socket;
  410. DWORD bytes;
  411. DWORD yes = 1;
  412. /* Set the socket to nonblocking mode */
  413. if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR)
  414. return uv_translate_sys_error(WSAGetLastError());
  415. /* Try to obtain a base handle for the socket. This increases this chances */
  416. /* that we find an AFD handle and are able to use the fast poll mechanism. */
  417. /* This will always fail on windows XP/2k3, since they don't support the */
  418. /* SIO_BASE_HANDLE ioctl. */
  419. #ifndef NDEBUG
  420. base_socket = INVALID_SOCKET;
  421. #endif
  422. if (WSAIoctl(socket,
  423. SIO_BASE_HANDLE,
  424. NULL,
  425. 0,
  426. &base_socket,
  427. sizeof base_socket,
  428. &bytes,
  429. NULL,
  430. NULL) == 0) {
  431. assert(base_socket != 0 && base_socket != INVALID_SOCKET);
  432. socket = base_socket;
  433. }
  434. uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
  435. handle->socket = socket;
  436. handle->events = 0;
  437. /* Obtain protocol information about the socket. */
  438. len = sizeof protocol_info;
  439. if (getsockopt(socket,
  440. SOL_SOCKET,
  441. SO_PROTOCOL_INFOW,
  442. (char*) &protocol_info,
  443. &len) != 0) {
  444. return uv_translate_sys_error(WSAGetLastError());
  445. }
  446. /* Get the peer socket that is needed to enable fast poll. If the returned */
  447. /* value is NULL, the protocol is not implemented by MSAFD and we'll have */
  448. /* to use slow mode. */
  449. peer_socket = uv__fast_poll_get_peer_socket(loop, &protocol_info);
  450. if (peer_socket != INVALID_SOCKET) {
  451. /* Initialize fast poll specific fields. */
  452. handle->peer_socket = peer_socket;
  453. } else {
  454. /* Initialize slow poll specific fields. */
  455. handle->flags |= UV_HANDLE_POLL_SLOW;
  456. }
  457. /* Initialize 2 poll reqs. */
  458. handle->submitted_events_1 = 0;
  459. uv_req_init(loop, (uv_req_t*) &(handle->poll_req_1));
  460. handle->poll_req_1.type = UV_POLL_REQ;
  461. handle->poll_req_1.data = handle;
  462. handle->submitted_events_2 = 0;
  463. uv_req_init(loop, (uv_req_t*) &(handle->poll_req_2));
  464. handle->poll_req_2.type = UV_POLL_REQ;
  465. handle->poll_req_2.data = handle;
  466. return 0;
  467. }
  468. int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) {
  469. int err;
  470. if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
  471. err = uv__fast_poll_set(handle->loop, handle, events);
  472. } else {
  473. err = uv__slow_poll_set(handle->loop, handle, events);
  474. }
  475. if (err) {
  476. return uv_translate_sys_error(err);
  477. }
  478. handle->poll_cb = cb;
  479. return 0;
  480. }
  481. int uv_poll_stop(uv_poll_t* handle) {
  482. int err;
  483. if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
  484. err = uv__fast_poll_set(handle->loop, handle, 0);
  485. } else {
  486. err = uv__slow_poll_set(handle->loop, handle, 0);
  487. }
  488. return uv_translate_sys_error(err);
  489. }
  490. void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
  491. if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
  492. uv__fast_poll_process_poll_req(loop, handle, req);
  493. } else {
  494. uv__slow_poll_process_poll_req(loop, handle, req);
  495. }
  496. }
  497. int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
  498. if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
  499. return uv__fast_poll_close(loop, handle);
  500. } else {
  501. return uv__slow_poll_close(loop, handle);
  502. }
  503. }
  504. void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
  505. assert(handle->flags & UV__HANDLE_CLOSING);
  506. assert(!(handle->flags & UV_HANDLE_CLOSED));
  507. assert(handle->submitted_events_1 == 0);
  508. assert(handle->submitted_events_2 == 0);
  509. uv__handle_close(handle);
  510. }