PageRenderTime 73ms CodeModel.GetById 25ms RepoModel.GetById 0ms app.codeStats 1ms

/c_src/erlzmq_nif.c

https://github.com/betawaffle/erlzmq2
C | 1283 lines | 1118 code | 115 blank | 50 comment | 155 complexity | 82b242fe104cab7952b74c8ddbeb6207 MD5 | raw file
  1. // -*- coding:utf-8;Mode:C;tab-width:2;c-basic-offset:2;indent-tabs-mode:nil -*-
  2. // ex: set softtabstop=2 tabstop=2 shiftwidth=2 expandtab fileencoding=utf-8:
  3. //
  4. // Copyright (c) 2011 Yurii Rashkovskii, Evax Software and Michael Truog
  5. //
  6. // Permission is hereby granted, free of charge, to any person obtaining a copy
  7. // of this software and associated documentation files (the "Software"), to deal
  8. // in the Software without restriction, including without limitation the rights
  9. // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. // copies of the Software, and to permit persons to whom the Software is
  11. // furnished to do so, subject to the following conditions:
  12. //
  13. // The above copyright notice and this permission notice shall be included in
  14. // all copies or substantial portions of the Software.
  15. //
  16. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  19. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. // THE SOFTWARE.
  23. #include "zmq.h"
  24. #include "erl_nif.h"
  25. #include "vector.h"
  26. #include <string.h>
  27. #include <stdio.h>
  28. #include <assert.h>
  29. #define ERLZMQ_MAX_CONCURRENT_REQUESTS 16384
  30. static ErlNifResourceType* erlzmq_nif_resource_context;
  31. static ErlNifResourceType* erlzmq_nif_resource_socket;
  32. typedef struct erlzmq_context {
  33. void * context_zmq;
  34. void * thread_socket;
  35. char * thread_socket_name;
  36. int64_t socket_index;
  37. ErlNifTid polling_tid;
  38. ErlNifMutex * mutex;
  39. } erlzmq_context_t;
  40. #define ERLZMQ_SOCKET_ACTIVE_OFF 0
  41. #define ERLZMQ_SOCKET_ACTIVE_PENDING 1
  42. #define ERLZMQ_SOCKET_ACTIVE_ON 2
  43. typedef struct erlzmq_socket {
  44. erlzmq_context_t * context;
  45. int64_t socket_index;
  46. void * socket_zmq;
  47. int active;
  48. ErlNifMutex * mutex;
  49. } erlzmq_socket_t;
  50. #define ERLZMQ_THREAD_REQUEST_SEND 1
  51. #define ERLZMQ_THREAD_REQUEST_RECV 2
  52. #define ERLZMQ_THREAD_REQUEST_CLOSE 3
  53. #define ERLZMQ_THREAD_REQUEST_TERM 4
  54. typedef struct {
  55. int type;
  56. union {
  57. struct {
  58. erlzmq_socket_t * socket;
  59. ErlNifEnv * env;
  60. ERL_NIF_TERM ref;
  61. int flags;
  62. zmq_msg_t msg;
  63. ErlNifPid pid;
  64. } send;
  65. struct {
  66. erlzmq_socket_t * socket;
  67. ErlNifEnv * env;
  68. ERL_NIF_TERM ref;
  69. int flags;
  70. ErlNifPid pid;
  71. } recv;
  72. struct {
  73. erlzmq_socket_t * socket;
  74. ErlNifEnv * env;
  75. ERL_NIF_TERM ref;
  76. ErlNifPid pid;
  77. } close;
  78. struct {
  79. ErlNifEnv * env;
  80. ERL_NIF_TERM ref;
  81. ErlNifPid pid;
  82. } term;
  83. } data;
  84. } erlzmq_thread_request_t;
  85. // Prototypes
  86. #define NIF(name) \
  87. ERL_NIF_TERM name(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
  88. NIF(erlzmq_nif_context);
  89. NIF(erlzmq_nif_socket);
  90. NIF(erlzmq_nif_bind);
  91. NIF(erlzmq_nif_connect);
  92. NIF(erlzmq_nif_setsockopt);
  93. NIF(erlzmq_nif_getsockopt);
  94. NIF(erlzmq_nif_send);
  95. NIF(erlzmq_nif_recv);
  96. NIF(erlzmq_nif_close);
  97. NIF(erlzmq_nif_term);
  98. NIF(erlzmq_nif_version);
  99. static void * polling_thread(void * handle);
  100. static ERL_NIF_TERM add_active_req(ErlNifEnv* env, erlzmq_socket_t * socket);
  101. static ERL_NIF_TERM return_zmq_errno(ErlNifEnv* env, int const value);
  102. static ErlNifFunc nif_funcs[] =
  103. {
  104. {"context", 1, erlzmq_nif_context},
  105. {"socket", 3, erlzmq_nif_socket},
  106. {"bind", 2, erlzmq_nif_bind},
  107. {"connect", 2, erlzmq_nif_connect},
  108. {"setsockopt", 3, erlzmq_nif_setsockopt},
  109. {"getsockopt", 2, erlzmq_nif_getsockopt},
  110. {"send", 3, erlzmq_nif_send},
  111. {"recv", 2, erlzmq_nif_recv},
  112. {"close", 1, erlzmq_nif_close},
  113. {"term", 1, erlzmq_nif_term},
  114. {"version", 0, erlzmq_nif_version}
  115. };
  116. NIF(erlzmq_nif_context)
  117. {
  118. int thread_count;
  119. if (! enif_get_int(env, argv[0], &thread_count)) {
  120. return enif_make_badarg(env);
  121. }
  122. erlzmq_context_t * context = enif_alloc_resource(erlzmq_nif_resource_context,
  123. sizeof(erlzmq_context_t));
  124. assert(context);
  125. context->context_zmq = zmq_init(thread_count);
  126. if (!context->context_zmq) {
  127. return return_zmq_errno(env, zmq_errno());
  128. }
  129. char thread_socket_id[64];
  130. sprintf(thread_socket_id, "inproc://erlzmq-%ld", (long int) context);
  131. context->thread_socket = zmq_socket(context->context_zmq, ZMQ_PUSH);
  132. assert(context->thread_socket);
  133. context->mutex = enif_mutex_create("erlzmq_context_t_mutex");
  134. assert(context->mutex);
  135. if (zmq_bind(context->thread_socket, thread_socket_id)) {
  136. zmq_close(context->thread_socket);
  137. enif_mutex_destroy(context->mutex);
  138. zmq_term(context->context_zmq);
  139. enif_release_resource(context);
  140. return return_zmq_errno(env, zmq_errno());
  141. }
  142. context->thread_socket_name = strdup(thread_socket_id);
  143. assert(context->thread_socket_name);
  144. context->socket_index = 1;
  145. int const value_errno = enif_thread_create("erlzmq_polling_thread",
  146. &context->polling_tid,
  147. polling_thread, context, NULL);
  148. if (value_errno) {
  149. free(context->thread_socket_name);
  150. zmq_close(context->thread_socket);
  151. zmq_term(context->context_zmq);
  152. enif_release_resource(context);
  153. return return_zmq_errno(env, value_errno);
  154. }
  155. return enif_make_tuple2(env, enif_make_atom(env, "ok"),
  156. enif_make_resource(env, context));
  157. }
  158. NIF(erlzmq_nif_socket)
  159. {
  160. erlzmq_context_t * context;
  161. int socket_type;
  162. int active;
  163. if (! enif_get_resource(env, argv[0], erlzmq_nif_resource_context,
  164. (void **) &context)) {
  165. return enif_make_badarg(env);
  166. }
  167. if (! enif_get_int(env, argv[1], &socket_type)) {
  168. return enif_make_badarg(env);
  169. }
  170. if (! enif_get_int(env, argv[2], &active)) {
  171. return enif_make_badarg(env);
  172. }
  173. erlzmq_socket_t * socket = enif_alloc_resource(erlzmq_nif_resource_socket,
  174. sizeof(erlzmq_socket_t));
  175. assert(socket);
  176. socket->context = context;
  177. socket->socket_index = context->socket_index++;
  178. socket->socket_zmq = zmq_socket(context->context_zmq, socket_type);
  179. if (!socket->socket_zmq) {
  180. return return_zmq_errno(env, zmq_errno());
  181. }
  182. socket->active = active;
  183. socket->mutex = enif_mutex_create("erlzmq_socket_t_mutex");
  184. assert(socket->mutex);
  185. return enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_tuple2(env,
  186. enif_make_uint64(env, socket->socket_index),
  187. enif_make_resource(env, socket)));
  188. }
  189. NIF(erlzmq_nif_bind)
  190. {
  191. erlzmq_socket_t * socket;
  192. unsigned endpoint_length;
  193. if (! enif_get_resource(env, argv[0], erlzmq_nif_resource_socket,
  194. (void **) &socket)) {
  195. return enif_make_badarg(env);
  196. }
  197. if (! enif_get_list_length(env, argv[1], &endpoint_length)) {
  198. return enif_make_badarg(env);
  199. }
  200. char * endpoint = (char *) malloc(endpoint_length + 1);
  201. if (! enif_get_string(env, argv[1], endpoint, endpoint_length + 1,
  202. ERL_NIF_LATIN1)) {
  203. return enif_make_badarg(env);
  204. }
  205. enif_mutex_lock(socket->mutex);
  206. if (zmq_bind(socket->socket_zmq, endpoint)) {
  207. enif_mutex_unlock(socket->mutex);
  208. free(endpoint);
  209. return return_zmq_errno(env, zmq_errno());
  210. }
  211. else {
  212. enif_mutex_unlock(socket->mutex);
  213. free(endpoint);
  214. if (socket->active == ERLZMQ_SOCKET_ACTIVE_PENDING) {
  215. return add_active_req(env, socket);
  216. }
  217. else {
  218. return enif_make_atom(env, "ok");
  219. }
  220. }
  221. }
  222. NIF(erlzmq_nif_connect)
  223. {
  224. erlzmq_socket_t * socket;
  225. unsigned endpoint_length;
  226. if (! enif_get_resource(env, argv[0], erlzmq_nif_resource_socket,
  227. (void **) &socket)) {
  228. return enif_make_badarg(env);
  229. }
  230. if (! enif_get_list_length(env, argv[1], &endpoint_length)) {
  231. return enif_make_badarg(env);
  232. }
  233. char * endpoint = (char *) malloc(endpoint_length + 1);
  234. if (! enif_get_string(env, argv[1], endpoint, endpoint_length + 1,
  235. ERL_NIF_LATIN1)) {
  236. return enif_make_badarg(env);
  237. }
  238. enif_mutex_lock(socket->mutex);
  239. if (zmq_connect(socket->socket_zmq, endpoint)) {
  240. enif_mutex_unlock(socket->mutex);
  241. free(endpoint);
  242. return return_zmq_errno(env, zmq_errno());
  243. }
  244. else {
  245. enif_mutex_unlock(socket->mutex);
  246. free(endpoint);
  247. if (socket->active == ERLZMQ_SOCKET_ACTIVE_PENDING) {
  248. return add_active_req(env, socket);
  249. }
  250. else {
  251. return enif_make_atom(env, "ok");
  252. }
  253. }
  254. }
  255. NIF(erlzmq_nif_setsockopt)
  256. {
  257. erlzmq_socket_t * socket;
  258. int option_name;
  259. if (! enif_get_resource(env, argv[0], erlzmq_nif_resource_socket,
  260. (void **) &socket)) {
  261. return enif_make_badarg(env);
  262. }
  263. if (! enif_get_int(env, argv[1], &option_name)) {
  264. return enif_make_badarg(env);
  265. }
  266. ErlNifUInt64 value_uint64;
  267. ErlNifSInt64 value_int64;
  268. ErlNifBinary value_binary;
  269. int value_int;
  270. void *option_value;
  271. size_t option_len;
  272. switch (option_name) {
  273. // uint64_t
  274. case ZMQ_HWM:
  275. case ZMQ_AFFINITY:
  276. case ZMQ_SNDBUF:
  277. case ZMQ_RCVBUF:
  278. if (! enif_get_uint64(env, argv[2], &value_uint64)) {
  279. return enif_make_badarg(env);
  280. }
  281. option_value = &value_uint64;
  282. option_len = sizeof(int64_t);
  283. break;
  284. // int64_t
  285. case ZMQ_SWAP:
  286. case ZMQ_RATE:
  287. case ZMQ_RECOVERY_IVL:
  288. case ZMQ_MCAST_LOOP:
  289. if (! enif_get_int64(env, argv[2], &value_int64)) {
  290. return enif_make_badarg(env);
  291. }
  292. option_value = &value_int64;
  293. option_len = sizeof(int64_t);
  294. break;
  295. // binary
  296. case ZMQ_IDENTITY:
  297. case ZMQ_SUBSCRIBE:
  298. case ZMQ_UNSUBSCRIBE:
  299. if (! enif_inspect_iolist_as_binary(env, argv[2], &value_binary)) {
  300. return enif_make_badarg(env);
  301. }
  302. option_value = value_binary.data;
  303. option_len = value_binary.size;
  304. break;
  305. // int
  306. case ZMQ_LINGER:
  307. case ZMQ_RECONNECT_IVL:
  308. case ZMQ_BACKLOG:
  309. if (! enif_get_int(env, argv[2], &value_int)) {
  310. return enif_make_badarg(env);
  311. }
  312. option_value = &value_int;
  313. option_len = sizeof(int);
  314. break;
  315. default:
  316. return enif_make_badarg(env);
  317. }
  318. enif_mutex_lock(socket->mutex);
  319. if (zmq_setsockopt(socket->socket_zmq, option_name,
  320. option_value, option_len)) {
  321. enif_mutex_unlock(socket->mutex);
  322. return return_zmq_errno(env, zmq_errno());
  323. }
  324. else {
  325. enif_mutex_unlock(socket->mutex);
  326. return enif_make_atom(env, "ok");
  327. }
  328. }
  329. NIF(erlzmq_nif_getsockopt)
  330. {
  331. erlzmq_socket_t * socket;
  332. int option_name;
  333. if (! enif_get_resource(env, argv[0], erlzmq_nif_resource_socket,
  334. (void **) &socket)) {
  335. return enif_make_badarg(env);
  336. }
  337. if (! enif_get_int(env, argv[1], &option_name)) {
  338. return enif_make_badarg(env);
  339. }
  340. ErlNifBinary value_binary;
  341. int64_t value_int64;
  342. int64_t value_uint64;
  343. char option_value[256];
  344. int value_int;
  345. size_t option_len;
  346. switch(option_name) {
  347. // int64_t
  348. case ZMQ_RCVMORE:
  349. case ZMQ_SWAP:
  350. case ZMQ_RATE:
  351. case ZMQ_RECOVERY_IVL:
  352. case ZMQ_RECOVERY_IVL_MSEC:
  353. case ZMQ_MCAST_LOOP:
  354. option_len = sizeof(value_int64);
  355. enif_mutex_lock(socket->mutex);
  356. if (zmq_getsockopt(socket->socket_zmq, option_name,
  357. &value_int64, &option_len)) {
  358. enif_mutex_unlock(socket->mutex);
  359. return return_zmq_errno(env, zmq_errno());
  360. }
  361. enif_mutex_unlock(socket->mutex);
  362. return enif_make_tuple2(env, enif_make_atom(env, "ok"),
  363. enif_make_int64(env, value_int64));
  364. // uint64_t
  365. case ZMQ_HWM:
  366. case ZMQ_AFFINITY:
  367. case ZMQ_SNDBUF:
  368. case ZMQ_RCVBUF:
  369. option_len = sizeof(value_uint64);
  370. enif_mutex_lock(socket->mutex);
  371. if (zmq_getsockopt(socket->socket_zmq, option_name,
  372. &value_uint64, &option_len)) {
  373. enif_mutex_unlock(socket->mutex);
  374. return return_zmq_errno(env, zmq_errno());
  375. }
  376. enif_mutex_unlock(socket->mutex);
  377. return enif_make_tuple2(env, enif_make_atom(env, "ok"),
  378. enif_make_uint64(env, value_uint64));
  379. // binary
  380. case ZMQ_IDENTITY:
  381. option_len = sizeof(option_value);
  382. enif_mutex_lock(socket->mutex);
  383. if (zmq_getsockopt(socket->socket_zmq, option_name,
  384. option_value, &option_len)) {
  385. enif_mutex_unlock(socket->mutex);
  386. return return_zmq_errno(env, zmq_errno());
  387. }
  388. enif_mutex_unlock(socket->mutex);
  389. enif_alloc_binary(option_len, &value_binary);
  390. memcpy(value_binary.data, option_value, option_len);
  391. return enif_make_tuple2(env, enif_make_atom(env, "ok"),
  392. enif_make_binary(env, &value_binary));
  393. // int
  394. case ZMQ_TYPE:
  395. case ZMQ_LINGER:
  396. case ZMQ_RECONNECT_IVL:
  397. case ZMQ_RECONNECT_IVL_MAX:
  398. case ZMQ_BACKLOG:
  399. case ZMQ_FD: // FIXME: ZMQ_FD returns SOCKET on Windows
  400. option_len = sizeof(value_int);
  401. enif_mutex_lock(socket->mutex);
  402. if (zmq_getsockopt(socket->socket_zmq, option_name,
  403. &value_int, &option_len)) {
  404. enif_mutex_unlock(socket->mutex);
  405. return return_zmq_errno(env, zmq_errno());
  406. }
  407. enif_mutex_unlock(socket->mutex);
  408. return enif_make_tuple2(env, enif_make_atom(env, "ok"),
  409. enif_make_int(env, value_int));
  410. default:
  411. return enif_make_badarg(env);
  412. }
  413. }
  414. NIF(erlzmq_nif_send)
  415. {
  416. erlzmq_thread_request_t req;
  417. erlzmq_socket_t * socket;
  418. ErlNifBinary binary;
  419. if (! enif_get_resource(env, argv[0], erlzmq_nif_resource_socket,
  420. (void **) &socket)) {
  421. return enif_make_badarg(env);
  422. }
  423. if (! enif_inspect_iolist_as_binary(env, argv[1], &binary)) {
  424. return enif_make_badarg(env);
  425. }
  426. if (! enif_get_int(env, argv[2], &req.data.send.flags)) {
  427. return enif_make_badarg(env);
  428. }
  429. if (zmq_msg_init_size(&req.data.send.msg, binary.size)) {
  430. return return_zmq_errno(env, zmq_errno());
  431. }
  432. memcpy(zmq_msg_data(&req.data.send.msg), binary.data, binary.size);
  433. int polling_thread_send = 1;
  434. if (! socket->active) {
  435. enif_mutex_lock(socket->mutex);
  436. if (zmq_send(socket->socket_zmq, &req.data.send.msg,
  437. req.data.send.flags | ZMQ_NOBLOCK)) {
  438. enif_mutex_unlock(socket->mutex);
  439. int const error = zmq_errno();
  440. if (error != EAGAIN ||
  441. (error == EAGAIN && (req.data.send.flags & ZMQ_NOBLOCK))) {
  442. zmq_msg_close(&req.data.send.msg);
  443. return return_zmq_errno(env, error);
  444. }
  445. }
  446. else {
  447. enif_mutex_unlock(socket->mutex);
  448. polling_thread_send = 0;
  449. }
  450. }
  451. if (polling_thread_send) {
  452. req.type = ERLZMQ_THREAD_REQUEST_SEND;
  453. req.data.send.env = enif_alloc_env();
  454. req.data.send.ref = enif_make_ref(req.data.send.env);
  455. enif_self(env, &req.data.send.pid);
  456. req.data.send.socket = socket;
  457. zmq_msg_t msg;
  458. if (zmq_msg_init_size(&msg, sizeof(erlzmq_thread_request_t))) {
  459. zmq_msg_close(&req.data.send.msg);
  460. enif_free_env(req.data.send.env);
  461. return return_zmq_errno(env, zmq_errno());
  462. }
  463. memcpy(zmq_msg_data(&msg), &req, sizeof(erlzmq_thread_request_t));
  464. enif_mutex_lock(socket->context->mutex);
  465. if (socket->context->thread_socket_name == NULL) {
  466. enif_mutex_unlock(socket->context->mutex);
  467. return return_zmq_errno(env, ETERM);
  468. }
  469. if (zmq_send(socket->context->thread_socket, &msg, 0)) {
  470. enif_mutex_unlock(socket->context->mutex);
  471. zmq_msg_close(&msg);
  472. zmq_msg_close(&req.data.send.msg);
  473. enif_free_env(req.data.send.env);
  474. return return_zmq_errno(env, zmq_errno());
  475. }
  476. else {
  477. enif_mutex_unlock(socket->context->mutex);
  478. zmq_msg_close(&msg);
  479. // each pointer to the socket in a request increments the reference
  480. enif_keep_resource(socket);
  481. return enif_make_copy(env, req.data.send.ref);
  482. }
  483. }
  484. else {
  485. zmq_msg_close(&req.data.send.msg);
  486. return enif_make_atom(env, "ok");
  487. }
  488. }
  489. NIF(erlzmq_nif_recv)
  490. {
  491. erlzmq_thread_request_t req;
  492. erlzmq_socket_t * socket;
  493. if (! enif_get_resource(env, argv[0], erlzmq_nif_resource_socket,
  494. (void **) &socket)) {
  495. return enif_make_badarg(env);
  496. }
  497. if (! enif_get_int(env, argv[1], &req.data.recv.flags)) {
  498. return enif_make_badarg(env);
  499. }
  500. if (socket->active) {
  501. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  502. enif_make_atom(env, "active"));
  503. }
  504. zmq_msg_t msg;
  505. if (zmq_msg_init(&msg)) {
  506. return return_zmq_errno(env, zmq_errno());
  507. }
  508. // try recv with noblock
  509. enif_mutex_lock(socket->mutex);
  510. if (zmq_recv(socket->socket_zmq, &msg, ZMQ_NOBLOCK)) {
  511. enif_mutex_unlock(socket->mutex);
  512. zmq_msg_close(&msg);
  513. int const error = zmq_errno();
  514. if (error != EAGAIN ||
  515. (error == EAGAIN && (req.data.recv.flags & ZMQ_NOBLOCK))) {
  516. return return_zmq_errno(env, error);
  517. }
  518. req.type = ERLZMQ_THREAD_REQUEST_RECV;
  519. req.data.recv.env = enif_alloc_env();
  520. req.data.recv.ref = enif_make_ref(req.data.recv.env);
  521. enif_self(env, &req.data.recv.pid);
  522. req.data.recv.socket = socket;
  523. if (zmq_msg_init_size(&msg, sizeof(erlzmq_thread_request_t))) {
  524. enif_free_env(req.data.recv.env);
  525. return return_zmq_errno(env, zmq_errno());
  526. }
  527. memcpy(zmq_msg_data(&msg), &req, sizeof(erlzmq_thread_request_t));
  528. enif_mutex_lock(socket->context->mutex);
  529. if (socket->context->thread_socket_name == NULL) {
  530. enif_mutex_unlock(socket->context->mutex);
  531. return return_zmq_errno(env, ETERM);
  532. }
  533. if (zmq_send(socket->context->thread_socket, &msg, 0)) {
  534. enif_mutex_unlock(socket->context->mutex);
  535. zmq_msg_close(&msg);
  536. enif_free_env(req.data.recv.env);
  537. return return_zmq_errno(env, zmq_errno());
  538. }
  539. else {
  540. enif_mutex_unlock(socket->context->mutex);
  541. zmq_msg_close(&msg);
  542. // each pointer to the socket in a request increments the reference
  543. enif_keep_resource(socket);
  544. return enif_make_copy(env, req.data.recv.ref);
  545. }
  546. }
  547. else {
  548. enif_mutex_unlock(socket->mutex);
  549. ErlNifBinary binary;
  550. enif_alloc_binary(zmq_msg_size(&msg), &binary);
  551. memcpy(binary.data, zmq_msg_data(&msg), zmq_msg_size(&msg));
  552. zmq_msg_close(&msg);
  553. return enif_make_tuple2(env, enif_make_atom(env, "ok"),
  554. enif_make_binary(env, &binary));
  555. }
  556. }
  557. NIF(erlzmq_nif_close)
  558. {
  559. erlzmq_socket_t * socket;
  560. if (! enif_get_resource(env, argv[0], erlzmq_nif_resource_socket,
  561. (void **) &socket)) {
  562. return enif_make_badarg(env);
  563. }
  564. erlzmq_thread_request_t req;
  565. req.type = ERLZMQ_THREAD_REQUEST_CLOSE;
  566. req.data.close.env = enif_alloc_env();
  567. req.data.close.ref = enif_make_ref(req.data.close.env);
  568. enif_self(env, &req.data.close.pid);
  569. req.data.close.socket = socket;
  570. zmq_msg_t msg;
  571. if (zmq_msg_init_size(&msg, sizeof(erlzmq_thread_request_t))) {
  572. enif_free_env(req.data.close.env);
  573. return return_zmq_errno(env, zmq_errno());
  574. }
  575. memcpy(zmq_msg_data(&msg), &req, sizeof(erlzmq_thread_request_t));
  576. enif_mutex_lock(socket->context->mutex);
  577. if (socket->context->thread_socket_name == NULL) {
  578. // context is gone
  579. enif_mutex_lock(socket->mutex);
  580. zmq_msg_close(&msg);
  581. zmq_close(socket->socket_zmq);
  582. enif_mutex_unlock(socket->mutex);
  583. enif_mutex_destroy(socket->mutex);
  584. enif_release_resource(socket);
  585. enif_mutex_unlock(socket->context->mutex);
  586. return enif_make_atom(env, "ok");
  587. }
  588. if (zmq_send(socket->context->thread_socket, &msg, 0)) {
  589. enif_mutex_unlock(socket->context->mutex);
  590. zmq_msg_close(&msg);
  591. enif_free_env(req.data.close.env);
  592. return return_zmq_errno(env, zmq_errno());
  593. }
  594. else {
  595. enif_mutex_unlock(socket->context->mutex);
  596. zmq_msg_close(&msg);
  597. // each pointer to the socket in a request increments the reference
  598. enif_keep_resource(socket);
  599. return enif_make_copy(env, req.data.close.ref);
  600. }
  601. }
  602. NIF(erlzmq_nif_term)
  603. {
  604. erlzmq_context_t * context;
  605. if (!enif_get_resource(env, argv[0], erlzmq_nif_resource_context,
  606. (void **) &context)) {
  607. return enif_make_badarg(env);
  608. }
  609. erlzmq_thread_request_t req;
  610. req.type = ERLZMQ_THREAD_REQUEST_TERM;
  611. req.data.term.env = enif_alloc_env();
  612. req.data.term.ref = enif_make_ref(req.data.term.env);
  613. enif_self(env, &req.data.term.pid);
  614. zmq_msg_t msg;
  615. if (zmq_msg_init_size(&msg, sizeof(erlzmq_thread_request_t))) {
  616. enif_free_env(req.data.term.env);
  617. return return_zmq_errno(env, zmq_errno());
  618. }
  619. memcpy(zmq_msg_data(&msg), &req, sizeof(erlzmq_thread_request_t));
  620. enif_mutex_lock(context->mutex);
  621. if (zmq_send(context->thread_socket, &msg, 0)) {
  622. enif_mutex_unlock(context->mutex);
  623. zmq_msg_close(&msg);
  624. enif_free_env(req.data.term.env);
  625. return return_zmq_errno(env, zmq_errno());
  626. }
  627. else {
  628. enif_mutex_unlock(context->mutex);
  629. zmq_msg_close(&msg);
  630. // thread has a reference to the context, decrement here
  631. enif_release_resource(context);
  632. return enif_make_copy(env, req.data.term.ref);
  633. }
  634. }
  635. NIF(erlzmq_nif_version)
  636. {
  637. int major, minor, patch;
  638. zmq_version(&major, &minor, &patch);
  639. return enif_make_tuple3(env, enif_make_int(env, major),
  640. enif_make_int(env, minor),
  641. enif_make_int(env, patch));
  642. }
  643. static void * polling_thread(void * handle)
  644. {
  645. erlzmq_context_t * context = (erlzmq_context_t *) handle;
  646. enif_keep_resource(context);
  647. void * thread_socket = zmq_socket(context->context_zmq, ZMQ_PULL);
  648. assert(thread_socket);
  649. int status = zmq_connect(thread_socket, context->thread_socket_name);
  650. assert(status == 0);
  651. vector_t items_zmq;
  652. status = vector_initialize_pow2(zmq_pollitem_t, &items_zmq, 1,
  653. ERLZMQ_MAX_CONCURRENT_REQUESTS);
  654. assert(status == 0);
  655. zmq_pollitem_t thread_socket_poll_zmq = {thread_socket, 0, ZMQ_POLLIN, 0};
  656. status = vector_append(zmq_pollitem_t, &items_zmq, &thread_socket_poll_zmq);
  657. assert(status == 0);
  658. vector_t requests;
  659. status = vector_initialize_pow2(erlzmq_thread_request_t, &requests, 1,
  660. ERLZMQ_MAX_CONCURRENT_REQUESTS);
  661. assert(status == 0);
  662. erlzmq_thread_request_t request_empty;
  663. memset(&request_empty, 0, sizeof(erlzmq_thread_request_t));
  664. status = vector_append(erlzmq_thread_request_t, &requests, &request_empty);
  665. assert(status == 0);
  666. int i;
  667. for (;;) {
  668. int count = zmq_poll(vector_p(zmq_pollitem_t, &items_zmq),
  669. vector_count(&items_zmq), -1);
  670. assert(count != -1);
  671. if (vector_get(zmq_pollitem_t, &items_zmq, 0)->revents & ZMQ_POLLIN) {
  672. --count;
  673. }
  674. for (i = 1; i < vector_count(&items_zmq); ++i) {
  675. zmq_pollitem_t * item = vector_get(zmq_pollitem_t, &items_zmq, i);
  676. erlzmq_thread_request_t * r = vector_get(erlzmq_thread_request_t,
  677. &requests, i);
  678. if (item->revents & ZMQ_POLLIN) {
  679. size_t value_len = sizeof(int64_t);
  680. int64_t flag_value = 0;
  681. assert(r->type == ERLZMQ_THREAD_REQUEST_RECV);
  682. --count;
  683. zmq_msg_t msg;
  684. zmq_msg_init(&msg);
  685. enif_mutex_lock(r->data.recv.socket->mutex);
  686. if (zmq_recv(r->data.recv.socket->socket_zmq, &msg,
  687. r->data.recv.flags) ||
  688. (r->data.recv.socket->active == ERLZMQ_SOCKET_ACTIVE_ON &&
  689. zmq_getsockopt(r->data.recv.socket->socket_zmq,
  690. ZMQ_RCVMORE, &flag_value, &value_len)) )
  691. {
  692. enif_mutex_unlock(r->data.recv.socket->mutex);
  693. if (r->data.recv.socket->active == ERLZMQ_SOCKET_ACTIVE_ON) {
  694. enif_send(NULL, &r->data.recv.pid, r->data.recv.env,
  695. enif_make_tuple3(r->data.recv.env,
  696. enif_make_atom(r->data.recv.env, "zmq"),
  697. enif_make_tuple2(r->data.recv.env,
  698. enif_make_uint64(r->data.recv.env,
  699. r->data.recv.socket->socket_index),
  700. enif_make_resource(r->data.recv.env, r->data.recv.socket)),
  701. return_zmq_errno(r->data.recv.env, zmq_errno())));
  702. enif_free_env(r->data.recv.env);
  703. r->data.recv.env = enif_alloc_env();
  704. item->revents = 0;
  705. }
  706. else {
  707. assert(0);
  708. }
  709. }
  710. else {
  711. enif_mutex_unlock(r->data.recv.socket->mutex);
  712. }
  713. ErlNifBinary binary;
  714. enif_alloc_binary(zmq_msg_size(&msg), &binary);
  715. memcpy(binary.data, zmq_msg_data(&msg), zmq_msg_size(&msg));
  716. zmq_msg_close(&msg);
  717. if (r->data.recv.socket->active == ERLZMQ_SOCKET_ACTIVE_ON) {
  718. ERL_NIF_TERM flags_list;
  719. // Should we send the multipart flag
  720. if(flag_value == 1) {
  721. flags_list = enif_make_list1(r->data.recv.env, enif_make_atom(r->data.recv.env, "rcvmore"));
  722. } else {
  723. flags_list = enif_make_list(r->data.recv.env, 0);
  724. }
  725. enif_send(NULL, &r->data.recv.pid, r->data.recv.env,
  726. enif_make_tuple4(r->data.recv.env,
  727. enif_make_atom(r->data.recv.env, "zmq"),
  728. enif_make_tuple2(r->data.recv.env,
  729. enif_make_uint64(r->data.recv.env,
  730. r->data.recv.socket->socket_index),
  731. enif_make_resource(r->data.recv.env, r->data.recv.socket)),
  732. enif_make_binary(r->data.recv.env, &binary),
  733. flags_list));
  734. enif_free_env(r->data.recv.env);
  735. r->data.recv.env = enif_alloc_env();
  736. item->revents = 0;
  737. }
  738. else {
  739. enif_send(NULL, &r->data.recv.pid, r->data.recv.env,
  740. enif_make_tuple2(r->data.recv.env,
  741. enif_make_copy(r->data.recv.env, r->data.recv.ref),
  742. enif_make_binary(r->data.recv.env, &binary)));
  743. enif_free_env(r->data.recv.env);
  744. enif_release_resource(r->data.recv.socket);
  745. status = vector_remove(&items_zmq, i);
  746. assert(status == 0);
  747. status = vector_remove(&requests, i);
  748. assert(status == 0);
  749. --i;
  750. }
  751. }
  752. else if (item->revents & ZMQ_POLLOUT) {
  753. assert(r->type == ERLZMQ_THREAD_REQUEST_SEND);
  754. --count;
  755. enif_mutex_lock(r->data.send.socket->mutex);
  756. if (zmq_send(r->data.send.socket->socket_zmq,
  757. &r->data.send.msg, r->data.send.flags)) {
  758. enif_mutex_unlock(r->data.send.socket->mutex);
  759. enif_send(NULL, &r->data.send.pid, r->data.send.env,
  760. enif_make_tuple2(r->data.send.env,
  761. enif_make_copy(r->data.send.env, r->data.send.ref),
  762. return_zmq_errno(r->data.send.env, zmq_errno())));
  763. } else {
  764. enif_mutex_unlock(r->data.send.socket->mutex);
  765. enif_send(NULL, &r->data.send.pid, r->data.send.env,
  766. enif_make_tuple2(r->data.send.env,
  767. enif_make_copy(r->data.send.env, r->data.send.ref),
  768. enif_make_atom(r->data.send.env, "ok")));
  769. }
  770. zmq_msg_close(&r->data.send.msg);
  771. enif_free_env(r->data.send.env);
  772. enif_release_resource(r->data.send.socket);
  773. status = vector_remove(&items_zmq, i);
  774. assert(status == 0);
  775. status = vector_remove(&requests, i);
  776. assert(status == 0);
  777. --i;
  778. }
  779. }
  780. if (vector_get(zmq_pollitem_t, &items_zmq, 0)->revents & ZMQ_POLLIN) {
  781. vector_get(zmq_pollitem_t, &items_zmq, 0)->revents = 0;
  782. zmq_msg_t msg;
  783. zmq_msg_init(&msg);
  784. enif_mutex_lock(context->mutex);
  785. status = zmq_recv(thread_socket, &msg, 0);
  786. enif_mutex_unlock(context->mutex);
  787. assert(status == 0);
  788. assert(zmq_msg_size(&msg) == sizeof(erlzmq_thread_request_t));
  789. erlzmq_thread_request_t * r =
  790. (erlzmq_thread_request_t *) zmq_msg_data(&msg);
  791. if (r->type == ERLZMQ_THREAD_REQUEST_SEND) {
  792. zmq_pollitem_t item_zmq = {r->data.send.socket->socket_zmq,
  793. 0, ZMQ_POLLOUT, 0};
  794. status = vector_append(zmq_pollitem_t, &items_zmq, &item_zmq);
  795. assert(status == 0);
  796. status = vector_append(erlzmq_thread_request_t, &requests, r);
  797. assert(status == 0);
  798. zmq_msg_close(&msg);
  799. }
  800. else if (r->type == ERLZMQ_THREAD_REQUEST_RECV) {
  801. zmq_pollitem_t item_zmq = {r->data.recv.socket->socket_zmq,
  802. 0, ZMQ_POLLIN, 0};
  803. status = vector_append(zmq_pollitem_t, &items_zmq, &item_zmq);
  804. assert(status == 0);
  805. status = vector_append(erlzmq_thread_request_t, &requests, r);
  806. assert(status == 0);
  807. zmq_msg_close(&msg);
  808. }
  809. else if (r->type == ERLZMQ_THREAD_REQUEST_CLOSE) {
  810. // remove all entries with this socket
  811. for (i = vector_count(&items_zmq) - 1; i > 0; --i) {
  812. zmq_pollitem_t * item = vector_get(zmq_pollitem_t, &items_zmq, i);
  813. if (item->socket == r->data.close.socket->socket_zmq) {
  814. erlzmq_thread_request_t * r_old =
  815. vector_get(erlzmq_thread_request_t, &requests, i);
  816. if (r_old->type == ERLZMQ_THREAD_REQUEST_RECV) {
  817. enif_clear_env(r_old->data.recv.env);
  818. // FIXME
  819. // causes crash on R14B01, works fine on R14B02
  820. // (repeated enif_send with active receive broken on R14B01)
  821. //enif_free_env(r_old->data.recv.env);
  822. enif_release_resource(r_old->data.recv.socket);
  823. }
  824. else if (r_old->type == ERLZMQ_THREAD_REQUEST_SEND) {
  825. zmq_msg_close(&(r_old->data.send.msg));
  826. enif_free_env(r_old->data.send.env);
  827. enif_release_resource(r_old->data.send.socket);
  828. }
  829. else {
  830. assert(0);
  831. }
  832. status = vector_remove(&items_zmq, i);
  833. assert(status == 0);
  834. status = vector_remove(&requests, i);
  835. assert(status == 0);
  836. }
  837. }
  838. // close the socket
  839. enif_mutex_lock(r->data.close.socket->mutex);
  840. zmq_close(r->data.close.socket->socket_zmq);
  841. enif_mutex_unlock(r->data.close.socket->mutex);
  842. enif_mutex_destroy(r->data.close.socket->mutex);
  843. enif_release_resource(r->data.close.socket);
  844. // notify the waiting request
  845. enif_send(NULL, &r->data.close.pid, r->data.close.env,
  846. enif_make_tuple2(r->data.close.env,
  847. enif_make_copy(r->data.close.env, r->data.close.ref),
  848. enif_make_atom(r->data.close.env, "ok")));
  849. enif_free_env(r->data.close.env);
  850. zmq_msg_close(&msg);
  851. }
  852. else if (r->type == ERLZMQ_THREAD_REQUEST_TERM) {
  853. enif_mutex_lock(context->mutex);
  854. free(context->thread_socket_name);
  855. // use this to flag context is over
  856. context->thread_socket_name = NULL;
  857. enif_mutex_unlock(context->mutex);
  858. // cleanup pending requests
  859. for (i = 1; i < vector_count(&requests); ++i) {
  860. erlzmq_thread_request_t * r_old = vector_get(erlzmq_thread_request_t,
  861. &requests, i);
  862. if (r_old->type == ERLZMQ_THREAD_REQUEST_RECV) {
  863. enif_free_env(r_old->data.recv.env);
  864. enif_release_resource(r_old->data.recv.socket);
  865. zmq_close(r_old->data.recv.socket->socket_zmq);
  866. }
  867. else if (r_old->type == ERLZMQ_THREAD_REQUEST_SEND) {
  868. zmq_msg_close(&r_old->data.send.msg);
  869. enif_free_env(r_old->data.send.env);
  870. enif_release_resource(r_old->data.send.socket);
  871. zmq_close(r_old->data.send.socket->socket_zmq);
  872. }
  873. }
  874. // terminate the context
  875. enif_mutex_lock(context->mutex);
  876. zmq_close(thread_socket);
  877. zmq_close(context->thread_socket);
  878. enif_mutex_unlock(context->mutex);
  879. zmq_term(context->context_zmq);
  880. enif_mutex_lock(context->mutex);
  881. enif_mutex_unlock(context->mutex);
  882. enif_mutex_destroy(context->mutex);
  883. enif_release_resource(context);
  884. // notify the waiting request
  885. enif_send(NULL, &r->data.term.pid, r->data.term.env,
  886. enif_make_tuple2(r->data.term.env,
  887. enif_make_copy(r->data.term.env, r->data.term.ref),
  888. enif_make_atom(r->data.term.env, "ok")));
  889. enif_free_env(r->data.term.env);
  890. zmq_msg_close(&msg);
  891. vector_destroy(&items_zmq);
  892. vector_destroy(&requests);
  893. return NULL;
  894. }
  895. else {
  896. assert(0);
  897. }
  898. }
  899. }
  900. return NULL;
  901. }
  902. static ERL_NIF_TERM add_active_req(ErlNifEnv* env, erlzmq_socket_t * socket)
  903. {
  904. socket->active = ERLZMQ_SOCKET_ACTIVE_ON;
  905. erlzmq_thread_request_t req;
  906. req.type = ERLZMQ_THREAD_REQUEST_RECV;
  907. req.data.recv.env = enif_alloc_env();
  908. req.data.recv.flags = 0;
  909. enif_self(env, &req.data.recv.pid);
  910. req.data.recv.socket = socket;
  911. zmq_msg_t msg;
  912. if (zmq_msg_init_size(&msg, sizeof(erlzmq_thread_request_t))) {
  913. enif_free_env(req.data.recv.env);
  914. return return_zmq_errno(env, zmq_errno());
  915. }
  916. memcpy(zmq_msg_data(&msg), &req, sizeof(erlzmq_thread_request_t));
  917. if (zmq_send(socket->context->thread_socket, &msg, 0)) {
  918. zmq_msg_close(&msg);
  919. enif_free_env(req.data.recv.env);
  920. return return_zmq_errno(env, zmq_errno());
  921. }
  922. else {
  923. zmq_msg_close(&msg);
  924. // each pointer to the socket in a request increments the reference
  925. enif_keep_resource(socket);
  926. return enif_make_atom(env, "ok");
  927. }
  928. }
  929. static ERL_NIF_TERM return_zmq_errno(ErlNifEnv* env, int const value)
  930. {
  931. switch (value) {
  932. case EPERM:
  933. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  934. enif_make_atom(env, "eperm"));
  935. case ENOENT:
  936. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  937. enif_make_atom(env, "enoent"));
  938. case ESRCH:
  939. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  940. enif_make_atom(env, "esrch"));
  941. case EINTR:
  942. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  943. enif_make_atom(env, "eintr"));
  944. case EIO:
  945. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  946. enif_make_atom(env, "eio"));
  947. case ENXIO:
  948. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  949. enif_make_atom(env, "enxio"));
  950. case ENOEXEC:
  951. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  952. enif_make_atom(env, "enoexec"));
  953. case EBADF:
  954. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  955. enif_make_atom(env, "ebadf"));
  956. case ECHILD:
  957. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  958. enif_make_atom(env, "echild"));
  959. case EDEADLK:
  960. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  961. enif_make_atom(env, "edeadlk"));
  962. case ENOMEM:
  963. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  964. enif_make_atom(env, "enomem"));
  965. case EACCES:
  966. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  967. enif_make_atom(env, "eacces"));
  968. case EFAULT:
  969. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  970. enif_make_atom(env, "efault"));
  971. case ENOTBLK:
  972. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  973. enif_make_atom(env, "enotblk"));
  974. case EBUSY:
  975. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  976. enif_make_atom(env, "ebusy"));
  977. case EEXIST:
  978. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  979. enif_make_atom(env, "eexist"));
  980. case EXDEV:
  981. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  982. enif_make_atom(env, "exdev"));
  983. case ENODEV:
  984. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  985. enif_make_atom(env, "enodev"));
  986. case ENOTDIR:
  987. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  988. enif_make_atom(env, "enotdir"));
  989. case EISDIR:
  990. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  991. enif_make_atom(env, "eisdir"));
  992. case EINVAL:
  993. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  994. enif_make_atom(env, "einval"));
  995. case ENFILE:
  996. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  997. enif_make_atom(env, "enfile"));
  998. case EMFILE:
  999. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1000. enif_make_atom(env, "emfile"));
  1001. case ETXTBSY:
  1002. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1003. enif_make_atom(env, "etxtbsy"));
  1004. case EFBIG:
  1005. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1006. enif_make_atom(env, "efbig"));
  1007. case ENOSPC:
  1008. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1009. enif_make_atom(env, "enospc"));
  1010. case ESPIPE:
  1011. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1012. enif_make_atom(env, "espipe"));
  1013. case EROFS:
  1014. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1015. enif_make_atom(env, "erofs"));
  1016. case EMLINK:
  1017. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1018. enif_make_atom(env, "emlink"));
  1019. case EPIPE:
  1020. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1021. enif_make_atom(env, "epipe"));
  1022. case EAGAIN:
  1023. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1024. enif_make_atom(env, "eagain"));
  1025. case EINPROGRESS:
  1026. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1027. enif_make_atom(env, "einprogress"));
  1028. case EALREADY:
  1029. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1030. enif_make_atom(env, "ealready"));
  1031. case ENOTSOCK:
  1032. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1033. enif_make_atom(env, "enotsock"));
  1034. case EDESTADDRREQ:
  1035. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1036. enif_make_atom(env, "edestaddrreq"));
  1037. case EMSGSIZE:
  1038. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1039. enif_make_atom(env, "emsgsize"));
  1040. case EPROTOTYPE:
  1041. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1042. enif_make_atom(env, "eprototype"));
  1043. case ENOPROTOOPT:
  1044. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1045. enif_make_atom(env, "eprotoopt"));
  1046. case EPROTONOSUPPORT:
  1047. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1048. enif_make_atom(env, "eprotonosupport"));
  1049. case ESOCKTNOSUPPORT:
  1050. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1051. enif_make_atom(env, "esocktnosupport"));
  1052. case ENOTSUP:
  1053. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1054. enif_make_atom(env, "enotsup"));
  1055. case EPFNOSUPPORT:
  1056. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1057. enif_make_atom(env, "epfnosupport"));
  1058. case EAFNOSUPPORT:
  1059. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1060. enif_make_atom(env, "eafnosupport"));
  1061. case EADDRINUSE:
  1062. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1063. enif_make_atom(env, "eaddrinuse"));
  1064. case EADDRNOTAVAIL:
  1065. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1066. enif_make_atom(env, "eaddrnotavail"));
  1067. case ENETDOWN:
  1068. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1069. enif_make_atom(env, "enetdown"));
  1070. case ENETUNREACH:
  1071. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1072. enif_make_atom(env, "enetunreach"));
  1073. case ENETRESET:
  1074. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1075. enif_make_atom(env, "enetreset"));
  1076. case ECONNABORTED:
  1077. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1078. enif_make_atom(env, "econnaborted"));
  1079. case ECONNRESET:
  1080. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1081. enif_make_atom(env, "econnreset"));
  1082. case ENOBUFS:
  1083. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1084. enif_make_atom(env, "enobufs"));
  1085. case EISCONN:
  1086. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1087. enif_make_atom(env, "eisconn"));
  1088. case ENOTCONN:
  1089. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1090. enif_make_atom(env, "enotconn"));
  1091. case ESHUTDOWN:
  1092. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1093. enif_make_atom(env, "eshutdown"));
  1094. case ETOOMANYREFS:
  1095. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1096. enif_make_atom(env, "etoomanyrefs"));
  1097. case ETIMEDOUT:
  1098. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1099. enif_make_atom(env, "etimedout"));
  1100. case ECONNREFUSED:
  1101. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1102. enif_make_atom(env, "econnrefused"));
  1103. case ELOOP:
  1104. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1105. enif_make_atom(env, "eloop"));
  1106. case ENAMETOOLONG:
  1107. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1108. enif_make_atom(env, "enametoolong"));
  1109. case (ZMQ_HAUSNUMERO + 1):
  1110. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1111. enif_make_atom(env, "enotsup"));
  1112. case (ZMQ_HAUSNUMERO + 2):
  1113. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1114. enif_make_atom(env, "eprotonosupport"));
  1115. case (ZMQ_HAUSNUMERO + 3):
  1116. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1117. enif_make_atom(env, "enobufs"));
  1118. case (ZMQ_HAUSNUMERO + 4):
  1119. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1120. enif_make_atom(env, "enetdown"));
  1121. case (ZMQ_HAUSNUMERO + 5):
  1122. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1123. enif_make_atom(env, "eaddrinuse"));
  1124. case (ZMQ_HAUSNUMERO + 6):
  1125. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1126. enif_make_atom(env, "eaddrnotavail"));
  1127. case (ZMQ_HAUSNUMERO + 7):
  1128. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1129. enif_make_atom(env, "econnrefused"));
  1130. case (ZMQ_HAUSNUMERO + 8):
  1131. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1132. enif_make_atom(env, "einprogress"));
  1133. case (ZMQ_HAUSNUMERO + 51):
  1134. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1135. enif_make_atom(env, "efsm"));
  1136. case (ZMQ_HAUSNUMERO + 52):
  1137. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1138. enif_make_atom(env, "enocompatproto"));
  1139. case (ZMQ_HAUSNUMERO + 53):
  1140. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1141. enif_make_atom(env, "eterm"));
  1142. case (ZMQ_HAUSNUMERO + 54):
  1143. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1144. enif_make_atom(env, "emthread"));
  1145. default:
  1146. return enif_make_tuple2(env, enif_make_atom(env, "error"),
  1147. enif_make_int(env, value));
  1148. }
  1149. }
  1150. static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
  1151. {
  1152. erlzmq_nif_resource_context =
  1153. enif_open_resource_type(env, "erlzmq_nif",
  1154. "erlzmq_nif_resource_context",
  1155. NULL,
  1156. ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER,
  1157. 0);
  1158. erlzmq_nif_resource_socket =
  1159. enif_open_resource_type(env, "erlzmq_nif",
  1160. "erlzmq_nif_resource_socket",
  1161. NULL,
  1162. ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER,
  1163. 0);
  1164. return 0;
  1165. }
  1166. static void on_unload(ErlNifEnv* env, void* priv_data) {
  1167. }
  1168. ERL_NIF_INIT(erlzmq_nif, nif_funcs, &on_load, NULL, NULL, &on_unload);