PageRenderTime 53ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 1ms

/deps/uv/test/benchmark-multi-accept.c

https://github.com/isaacs/node
C | 451 lines | 312 code | 90 blank | 49 comment | 71 complexity | c83f51dd9f7b025e82abb830fda5cc00 MD5 | raw file
  1. /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
  2. *
  3. * Permission is hereby granted, free of charge, to any person obtaining a copy
  4. * of this software and associated documentation files (the "Software"), to
  5. * deal in the Software without restriction, including without limitation the
  6. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  7. * sell copies of the Software, and to permit persons to whom the Software is
  8. * furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice shall be included in
  11. * all copies or substantial portions of the Software.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  16. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  17. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  18. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  19. * IN THE SOFTWARE.
  20. */
  21. #include "task.h"
  22. #include "uv.h"
  23. #define IPC_PIPE_NAME TEST_PIPENAME
  24. #define NUM_CONNECTS (250 * 1000)
  25. union stream_handle {
  26. uv_pipe_t pipe;
  27. uv_tcp_t tcp;
  28. };
  29. /* Use as (uv_stream_t *) &handle_storage -- it's kind of clunky but it
  30. * avoids aliasing warnings.
  31. */
  32. typedef unsigned char handle_storage_t[sizeof(union stream_handle)];
  33. /* Used for passing around the listen handle, not part of the benchmark proper.
  34. * We have an overabundance of server types here. It works like this:
  35. *
  36. * 1. The main thread starts an IPC pipe server.
  37. * 2. The worker threads connect to the IPC server and obtain a listen handle.
  38. * 3. The worker threads start accepting requests on the listen handle.
  39. * 4. The main thread starts connecting repeatedly.
  40. *
  41. * Step #4 should perhaps be farmed out over several threads.
  42. */
  43. struct ipc_server_ctx {
  44. handle_storage_t server_handle;
  45. unsigned int num_connects;
  46. uv_pipe_t ipc_pipe;
  47. };
  48. struct ipc_peer_ctx {
  49. handle_storage_t peer_handle;
  50. uv_write_t write_req;
  51. };
  52. struct ipc_client_ctx {
  53. uv_connect_t connect_req;
  54. uv_stream_t* server_handle;
  55. uv_pipe_t ipc_pipe;
  56. char scratch[16];
  57. };
  58. /* Used in the actual benchmark. */
  59. struct server_ctx {
  60. handle_storage_t server_handle;
  61. unsigned int num_connects;
  62. uv_async_t async_handle;
  63. uv_thread_t thread_id;
  64. uv_sem_t semaphore;
  65. };
  66. struct client_ctx {
  67. handle_storage_t client_handle;
  68. unsigned int num_connects;
  69. uv_connect_t connect_req;
  70. uv_idle_t idle_handle;
  71. };
  72. static void ipc_connection_cb(uv_stream_t* ipc_pipe, int status);
  73. static void ipc_write_cb(uv_write_t* req, int status);
  74. static void ipc_close_cb(uv_handle_t* handle);
  75. static void ipc_connect_cb(uv_connect_t* req, int status);
  76. static void ipc_read_cb(uv_stream_t* handle,
  77. ssize_t nread,
  78. const uv_buf_t* buf);
  79. static void ipc_alloc_cb(uv_handle_t* handle,
  80. size_t suggested_size,
  81. uv_buf_t* buf);
  82. static void sv_async_cb(uv_async_t* handle);
  83. static void sv_connection_cb(uv_stream_t* server_handle, int status);
  84. static void sv_read_cb(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf);
  85. static void sv_alloc_cb(uv_handle_t* handle,
  86. size_t suggested_size,
  87. uv_buf_t* buf);
  88. static void cl_connect_cb(uv_connect_t* req, int status);
  89. static void cl_idle_cb(uv_idle_t* handle);
  90. static void cl_close_cb(uv_handle_t* handle);
  91. static struct sockaddr_in listen_addr;
  92. static void ipc_connection_cb(uv_stream_t* ipc_pipe, int status) {
  93. struct ipc_server_ctx* sc;
  94. struct ipc_peer_ctx* pc;
  95. uv_loop_t* loop;
  96. uv_buf_t buf;
  97. loop = ipc_pipe->loop;
  98. buf = uv_buf_init("PING", 4);
  99. sc = container_of(ipc_pipe, struct ipc_server_ctx, ipc_pipe);
  100. pc = calloc(1, sizeof(*pc));
  101. ASSERT(pc != NULL);
  102. if (ipc_pipe->type == UV_TCP)
  103. ASSERT(0 == uv_tcp_init(loop, (uv_tcp_t*) &pc->peer_handle));
  104. else if (ipc_pipe->type == UV_NAMED_PIPE)
  105. ASSERT(0 == uv_pipe_init(loop, (uv_pipe_t*) &pc->peer_handle, 1));
  106. else
  107. ASSERT(0);
  108. ASSERT(0 == uv_accept(ipc_pipe, (uv_stream_t*) &pc->peer_handle));
  109. ASSERT(0 == uv_write2(&pc->write_req,
  110. (uv_stream_t*) &pc->peer_handle,
  111. &buf,
  112. 1,
  113. (uv_stream_t*) &sc->server_handle,
  114. ipc_write_cb));
  115. if (--sc->num_connects == 0)
  116. uv_close((uv_handle_t*) ipc_pipe, NULL);
  117. }
  118. static void ipc_write_cb(uv_write_t* req, int status) {
  119. struct ipc_peer_ctx* ctx;
  120. ctx = container_of(req, struct ipc_peer_ctx, write_req);
  121. uv_close((uv_handle_t*) &ctx->peer_handle, ipc_close_cb);
  122. }
  123. static void ipc_close_cb(uv_handle_t* handle) {
  124. struct ipc_peer_ctx* ctx;
  125. ctx = container_of(handle, struct ipc_peer_ctx, peer_handle);
  126. free(ctx);
  127. }
  128. static void ipc_connect_cb(uv_connect_t* req, int status) {
  129. struct ipc_client_ctx* ctx;
  130. ctx = container_of(req, struct ipc_client_ctx, connect_req);
  131. ASSERT(0 == status);
  132. ASSERT(0 == uv_read_start((uv_stream_t*) &ctx->ipc_pipe,
  133. ipc_alloc_cb,
  134. ipc_read_cb));
  135. }
  136. static void ipc_alloc_cb(uv_handle_t* handle,
  137. size_t suggested_size,
  138. uv_buf_t* buf) {
  139. struct ipc_client_ctx* ctx;
  140. ctx = container_of(handle, struct ipc_client_ctx, ipc_pipe);
  141. buf->base = ctx->scratch;
  142. buf->len = sizeof(ctx->scratch);
  143. }
  144. static void ipc_read_cb(uv_stream_t* handle,
  145. ssize_t nread,
  146. const uv_buf_t* buf) {
  147. struct ipc_client_ctx* ctx;
  148. uv_loop_t* loop;
  149. uv_handle_type type;
  150. uv_pipe_t* ipc_pipe;
  151. ipc_pipe = (uv_pipe_t*) handle;
  152. ctx = container_of(ipc_pipe, struct ipc_client_ctx, ipc_pipe);
  153. loop = ipc_pipe->loop;
  154. ASSERT(1 == uv_pipe_pending_count(ipc_pipe));
  155. type = uv_pipe_pending_type(ipc_pipe);
  156. if (type == UV_TCP)
  157. ASSERT(0 == uv_tcp_init(loop, (uv_tcp_t*) ctx->server_handle));
  158. else if (type == UV_NAMED_PIPE)
  159. ASSERT(0 == uv_pipe_init(loop, (uv_pipe_t*) ctx->server_handle, 0));
  160. else
  161. ASSERT(0);
  162. ASSERT(0 == uv_accept(handle, ctx->server_handle));
  163. uv_close((uv_handle_t*) &ctx->ipc_pipe, NULL);
  164. }
  165. /* Set up an IPC pipe server that hands out listen sockets to the worker
  166. * threads. It's kind of cumbersome for such a simple operation, maybe we
  167. * should revive uv_import() and uv_export().
  168. */
  169. static void send_listen_handles(uv_handle_type type,
  170. unsigned int num_servers,
  171. struct server_ctx* servers) {
  172. struct ipc_server_ctx ctx;
  173. uv_loop_t* loop;
  174. unsigned int i;
  175. loop = uv_default_loop();
  176. ctx.num_connects = num_servers;
  177. if (type == UV_TCP) {
  178. ASSERT(0 == uv_tcp_init(loop, (uv_tcp_t*) &ctx.server_handle));
  179. ASSERT(0 == uv_tcp_bind((uv_tcp_t*) &ctx.server_handle,
  180. (const struct sockaddr*) &listen_addr,
  181. 0));
  182. }
  183. else
  184. ASSERT(0);
  185. /* We need to initialize this pipe with ipc=0 - this is not a uv_pipe we'll
  186. * be sending handles over, it's just for listening for new connections.
  187. * If we accept a connection then the connected pipe must be initialized
  188. * with ipc=1.
  189. */
  190. ASSERT(0 == uv_pipe_init(loop, &ctx.ipc_pipe, 0));
  191. ASSERT(0 == uv_pipe_bind(&ctx.ipc_pipe, IPC_PIPE_NAME));
  192. ASSERT(0 == uv_listen((uv_stream_t*) &ctx.ipc_pipe, 128, ipc_connection_cb));
  193. for (i = 0; i < num_servers; i++)
  194. uv_sem_post(&servers[i].semaphore);
  195. ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
  196. uv_close((uv_handle_t*) &ctx.server_handle, NULL);
  197. ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
  198. for (i = 0; i < num_servers; i++)
  199. uv_sem_wait(&servers[i].semaphore);
  200. }
  201. static void get_listen_handle(uv_loop_t* loop, uv_stream_t* server_handle) {
  202. struct ipc_client_ctx ctx;
  203. ctx.server_handle = server_handle;
  204. ctx.server_handle->data = "server handle";
  205. ASSERT(0 == uv_pipe_init(loop, &ctx.ipc_pipe, 1));
  206. uv_pipe_connect(&ctx.connect_req,
  207. &ctx.ipc_pipe,
  208. IPC_PIPE_NAME,
  209. ipc_connect_cb);
  210. ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
  211. }
  212. static void server_cb(void *arg) {
  213. struct server_ctx *ctx;
  214. uv_loop_t loop;
  215. ctx = arg;
  216. ASSERT(0 == uv_loop_init(&loop));
  217. ASSERT(0 == uv_async_init(&loop, &ctx->async_handle, sv_async_cb));
  218. uv_unref((uv_handle_t*) &ctx->async_handle);
  219. /* Wait until the main thread is ready. */
  220. uv_sem_wait(&ctx->semaphore);
  221. get_listen_handle(&loop, (uv_stream_t*) &ctx->server_handle);
  222. uv_sem_post(&ctx->semaphore);
  223. /* Now start the actual benchmark. */
  224. ASSERT(0 == uv_listen((uv_stream_t*) &ctx->server_handle,
  225. 128,
  226. sv_connection_cb));
  227. ASSERT(0 == uv_run(&loop, UV_RUN_DEFAULT));
  228. uv_loop_close(&loop);
  229. }
  230. static void sv_async_cb(uv_async_t* handle) {
  231. struct server_ctx* ctx;
  232. ctx = container_of(handle, struct server_ctx, async_handle);
  233. uv_close((uv_handle_t*) &ctx->server_handle, NULL);
  234. uv_close((uv_handle_t*) &ctx->async_handle, NULL);
  235. }
  236. static void sv_connection_cb(uv_stream_t* server_handle, int status) {
  237. handle_storage_t* storage;
  238. struct server_ctx* ctx;
  239. ctx = container_of(server_handle, struct server_ctx, server_handle);
  240. ASSERT(status == 0);
  241. storage = malloc(sizeof(*storage));
  242. ASSERT(storage != NULL);
  243. if (server_handle->type == UV_TCP)
  244. ASSERT(0 == uv_tcp_init(server_handle->loop, (uv_tcp_t*) storage));
  245. else if (server_handle->type == UV_NAMED_PIPE)
  246. ASSERT(0 == uv_pipe_init(server_handle->loop, (uv_pipe_t*) storage, 0));
  247. else
  248. ASSERT(0);
  249. ASSERT(0 == uv_accept(server_handle, (uv_stream_t*) storage));
  250. ASSERT(0 == uv_read_start((uv_stream_t*) storage, sv_alloc_cb, sv_read_cb));
  251. ctx->num_connects++;
  252. }
  253. static void sv_alloc_cb(uv_handle_t* handle,
  254. size_t suggested_size,
  255. uv_buf_t* buf) {
  256. static char slab[32];
  257. buf->base = slab;
  258. buf->len = sizeof(slab);
  259. }
  260. static void sv_read_cb(uv_stream_t* handle,
  261. ssize_t nread,
  262. const uv_buf_t* buf) {
  263. ASSERT(nread == UV_EOF);
  264. uv_close((uv_handle_t*) handle, (uv_close_cb) free);
  265. }
  266. static void cl_connect_cb(uv_connect_t* req, int status) {
  267. struct client_ctx* ctx = container_of(req, struct client_ctx, connect_req);
  268. uv_idle_start(&ctx->idle_handle, cl_idle_cb);
  269. ASSERT(0 == status);
  270. }
  271. static void cl_idle_cb(uv_idle_t* handle) {
  272. struct client_ctx* ctx = container_of(handle, struct client_ctx, idle_handle);
  273. uv_close((uv_handle_t*) &ctx->client_handle, cl_close_cb);
  274. uv_idle_stop(&ctx->idle_handle);
  275. }
  276. static void cl_close_cb(uv_handle_t* handle) {
  277. struct client_ctx* ctx;
  278. ctx = container_of(handle, struct client_ctx, client_handle);
  279. if (--ctx->num_connects == 0) {
  280. uv_close((uv_handle_t*) &ctx->idle_handle, NULL);
  281. return;
  282. }
  283. ASSERT(0 == uv_tcp_init(handle->loop, (uv_tcp_t*) &ctx->client_handle));
  284. ASSERT(0 == uv_tcp_connect(&ctx->connect_req,
  285. (uv_tcp_t*) &ctx->client_handle,
  286. (const struct sockaddr*) &listen_addr,
  287. cl_connect_cb));
  288. }
  289. static int test_tcp(unsigned int num_servers, unsigned int num_clients) {
  290. struct server_ctx* servers;
  291. struct client_ctx* clients;
  292. uv_loop_t* loop;
  293. uv_tcp_t* handle;
  294. unsigned int i;
  295. double time;
  296. ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &listen_addr));
  297. loop = uv_default_loop();
  298. servers = calloc(num_servers, sizeof(servers[0]));
  299. clients = calloc(num_clients, sizeof(clients[0]));
  300. ASSERT(servers != NULL);
  301. ASSERT(clients != NULL);
  302. /* We're making the assumption here that from the perspective of the
  303. * OS scheduler, threads are functionally equivalent to and interchangeable
  304. * with full-blown processes.
  305. */
  306. for (i = 0; i < num_servers; i++) {
  307. struct server_ctx* ctx = servers + i;
  308. ASSERT(0 == uv_sem_init(&ctx->semaphore, 0));
  309. ASSERT(0 == uv_thread_create(&ctx->thread_id, server_cb, ctx));
  310. }
  311. send_listen_handles(UV_TCP, num_servers, servers);
  312. for (i = 0; i < num_clients; i++) {
  313. struct client_ctx* ctx = clients + i;
  314. ctx->num_connects = NUM_CONNECTS / num_clients;
  315. handle = (uv_tcp_t*) &ctx->client_handle;
  316. handle->data = "client handle";
  317. ASSERT(0 == uv_tcp_init(loop, handle));
  318. ASSERT(0 == uv_tcp_connect(&ctx->connect_req,
  319. handle,
  320. (const struct sockaddr*) &listen_addr,
  321. cl_connect_cb));
  322. ASSERT(0 == uv_idle_init(loop, &ctx->idle_handle));
  323. }
  324. {
  325. uint64_t t = uv_hrtime();
  326. ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
  327. t = uv_hrtime() - t;
  328. time = t / 1e9;
  329. }
  330. for (i = 0; i < num_servers; i++) {
  331. struct server_ctx* ctx = servers + i;
  332. uv_async_send(&ctx->async_handle);
  333. ASSERT(0 == uv_thread_join(&ctx->thread_id));
  334. uv_sem_destroy(&ctx->semaphore);
  335. }
  336. printf("accept%u: %.0f accepts/sec (%u total)\n",
  337. num_servers,
  338. NUM_CONNECTS / time,
  339. NUM_CONNECTS);
  340. for (i = 0; i < num_servers; i++) {
  341. struct server_ctx* ctx = servers + i;
  342. printf(" thread #%u: %.0f accepts/sec (%u total, %.1f%%)\n",
  343. i,
  344. ctx->num_connects / time,
  345. ctx->num_connects,
  346. ctx->num_connects * 100.0 / NUM_CONNECTS);
  347. }
  348. free(clients);
  349. free(servers);
  350. MAKE_VALGRIND_HAPPY();
  351. return 0;
  352. }
  353. BENCHMARK_IMPL(tcp_multi_accept2) {
  354. return test_tcp(2, 40);
  355. }
  356. BENCHMARK_IMPL(tcp_multi_accept4) {
  357. return test_tcp(4, 40);
  358. }
  359. BENCHMARK_IMPL(tcp_multi_accept8) {
  360. return test_tcp(8, 40);
  361. }