PageRenderTime 64ms CodeModel.GetById 30ms RepoModel.GetById 0ms app.codeStats 1ms

/bigstring_unix/src/bigstring_unix_stubs.c

https://github.com/janestreet/core
C | 753 lines | 638 code | 64 blank | 51 comment | 136 complexity | 0fabdac5aad8e3c238d06d0bdcf12583 MD5 | raw file
Possible License(s): MIT
  1. #include "config.h"
  2. #define _FILE_OFFSET_BITS 64
  3. #ifdef JSC_RECVMMSG
  4. #define _GNU_SOURCE /* recvmmsg */
  5. #endif
  6. /* Defining _XOPEN_SOURCE on FreeBSD results in some
  7. other definitions (MSG_NOSIGNAL) being hidden. */
  8. #ifdef __linux__
  9. /* For pread/pwrite >= 500 */
  10. /* For ipv6 >= 600 */
  11. #define _XOPEN_SOURCE 600
  12. #endif
  13. #include <string.h>
  14. #include <unistd.h>
  15. #include <errno.h>
  16. #include <netinet/in.h>
  17. #include <assert.h>
  18. #include <stdint.h>
  19. #ifdef __APPLE__
  20. #include <libkern/OSByteOrder.h>
  21. #define bswap_16 OSSwapInt16
  22. #define bswap_32 OSSwapInt32
  23. #define bswap_64 OSSwapInt64
  24. #elif __GLIBC__
  25. #include <byteswap.h>
  26. #include <malloc.h>
  27. #else
  28. #include <sys/types.h>
  29. #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
  30. #include <sys/endian.h>
  31. #else
  32. #include <endian.h>
  33. #endif
  34. #define __BYTE_ORDER _BYTE_ORDER
  35. #define __LITTLE_ENDIAN _LITTLE_ENDIAN
  36. #define __BIG_ENDIAN _BIG_ENDIAN
  37. #define bswap_16 bswap16
  38. #define bswap_32 bswap32
  39. #define bswap_64 bswap64
  40. #endif
  41. #include "ocaml_utils.h"
  42. #include "unix_utils.h"
  43. #include "socketaddr.h"
  44. #include <core_params.h>
  45. #include "recvmmsg.h"
  46. /* Initialisation */
  47. static const value *bigstring_exc_IOError = NULL;
  48. static const value *bigstring_exc_End_of_file = NULL;
  49. static const value *unix_error_exn = NULL;
  50. CAMLprim value bigstring_init_stub(value __unused v_unit)
  51. {
  52. bigstring_exc_IOError = caml_named_value("Bigstring.IOError");
  53. bigstring_exc_End_of_file = caml_named_value("Bigstring.End_of_file");
  54. unix_error_exn = caml_named_value("Unix.Unix_error");
  55. #ifdef __GLIBC__
  56. /* GLIBC uses a threshold internally as a cutoff between brk and mmap.
  57. Sadly, it nowadays employs a heuristic that may change this value
  58. dynamically. The call to mallopt suppresses this behavior, which
  59. made it hard to prevent C-heap fragmentation (e.g. in the writer).
  60. */
  61. mallopt(M_MMAP_THRESHOLD, 131072);
  62. #endif
  63. if (unix_error_exn == NULL)
  64. caml_invalid_argument(
  65. "Exception Unix.Unix_error not initialized, please link unix.cma");
  66. return Val_unit;
  67. }
  68. /* Exceptions */
  69. static inline void raise_io_error(value v_n_good, value v_exc)
  70. {
  71. raise_with_two_args(*bigstring_exc_IOError, v_n_good, v_exc);
  72. }
  73. static inline value mk_unix_error_exn(int errcode, char *cmdname, value cmdarg)
  74. {
  75. CAMLparam0();
  76. CAMLlocal3(name, err, arg);
  77. value res;
  78. arg = cmdarg == Nothing ? caml_copy_string("") : cmdarg;
  79. name = caml_copy_string(cmdname);
  80. err = unix_error_of_code(errcode);
  81. res = caml_alloc_small(4, 0);
  82. Field(res, 0) = *unix_error_exn;
  83. Field(res, 1) = err;
  84. Field(res, 2) = name;
  85. Field(res, 3) = arg;
  86. CAMLreturn(res);
  87. }
  88. static inline value mk_uerror_exn(char *cmdname, value cmdarg)
  89. {
  90. return mk_unix_error_exn(errno, cmdname, cmdarg);
  91. }
  92. static inline void raise_unix_io_error(
  93. value v_n_good, char *cmdname, value cmdarg)
  94. {
  95. value v_uerror = mk_uerror_exn(cmdname, cmdarg);
  96. raise_io_error(v_n_good, v_uerror);
  97. }
  98. static inline void raise_eof_io_error(value v_n_good)
  99. {
  100. value v_eof_exn = *bigstring_exc_End_of_file;
  101. raise_io_error(v_n_good, v_eof_exn);
  102. }
  103. /* Input of bigstrings from file descriptors */
  104. CAMLprim value bigstring_read_stub(
  105. value v_min_len, value v_fd, value v_pos, value v_len, value v_bstr)
  106. {
  107. CAMLparam1(v_bstr);
  108. size_t min_len = Long_val(v_min_len);
  109. int fd = Int_val(v_fd);
  110. size_t init_len = Long_val(v_len);
  111. size_t len = init_len;
  112. ssize_t n_read;
  113. char *bstr_start = get_bstr(v_bstr, v_pos);
  114. char *bstr = bstr_start;
  115. char *bstr_min = bstr_start + min_len;
  116. caml_enter_blocking_section();
  117. do {
  118. do {
  119. n_read = read(fd, bstr, len);
  120. } while (n_read == -1 && errno == EINTR);
  121. if (n_read <= 0) {
  122. value v_n_good = Val_long(bstr - bstr_start);
  123. caml_leave_blocking_section();
  124. if (n_read == 0) {
  125. if (init_len == 0) CAMLreturn(Val_long(0));
  126. else raise_eof_io_error(v_n_good);
  127. }
  128. else raise_unix_io_error(v_n_good, "read", Nothing);
  129. } else {
  130. bstr += n_read;
  131. len -= n_read;
  132. }
  133. } while (bstr < bstr_min);
  134. caml_leave_blocking_section();
  135. CAMLreturn(Val_long(bstr - bstr_start));
  136. }
  137. CAMLprim value bigstring_read_assume_fd_is_nonblocking_stub(
  138. value v_fd, value v_pos, value v_len, value v_bstr)
  139. {
  140. struct caml_ba_array *ba = Caml_ba_array_val(v_bstr);
  141. char *bstr = (char *) ba->data + Long_val(v_pos);
  142. size_t len = Long_val(v_len);
  143. ssize_t n_read;
  144. if ((len > THREAD_IO_CUTOFF) || (ba->flags & CAML_BA_MAPPED_FILE)) {
  145. Begin_roots1(v_bstr);
  146. caml_enter_blocking_section();
  147. n_read = read(Int_val(v_fd), bstr, len);
  148. caml_leave_blocking_section();
  149. End_roots();
  150. }
  151. else n_read = read(Int_val(v_fd), bstr, len);
  152. if (n_read == -1) n_read = -errno;
  153. return Val_long(n_read);
  154. }
  155. CAMLprim value bigstring_pread_assume_fd_is_nonblocking_stub(
  156. value v_fd, value v_offset, value v_pos, value v_len, value v_bstr)
  157. {
  158. char *bstr = get_bstr(v_bstr, v_pos);
  159. size_t len = Long_val(v_len);
  160. ssize_t n_read;
  161. n_read = pread(Int_val(v_fd), bstr, len, Long_val(v_offset));
  162. if (n_read == -1) uerror("bigstring_pread_assume_fd_is_nonblocking_stub", Nothing);
  163. return Val_long(n_read);
  164. }
  165. /* Input of bigstrings from sockets */
  166. CAMLprim value bigstring_recv_peek_assume_fd_is_nonblocking_stub(
  167. value v_sock, value v_pos, value v_len, value v_bstr)
  168. {
  169. CAMLparam4(v_sock, v_pos, v_len, v_bstr);
  170. size_t len = Long_val(v_len);
  171. if (len == 0) CAMLreturn (Val_long (0));
  172. else {
  173. char *bstr = get_bstr(v_bstr, v_pos);
  174. int sock = Int_val(v_sock);
  175. ssize_t n_read;
  176. n_read = recv(sock, bstr, len, MSG_PEEK);
  177. if (n_read == -1) uerror("bigstring_recv_peek_assume_fd_is_nonblocking", Nothing);
  178. CAMLreturn (Val_long(n_read));
  179. }
  180. }
  181. CAMLprim value bigstring_really_recv_stub(
  182. value v_sock, value v_pos, value v_len, value v_bstr)
  183. {
  184. size_t len = Long_val(v_len);
  185. if (len == 0) return Val_long(0);
  186. else {
  187. CAMLparam1(v_bstr);
  188. char *bstr = get_bstr(v_bstr, v_pos);
  189. int sock = Int_val(v_sock);
  190. ssize_t n_read;
  191. size_t n_total = 0;
  192. caml_enter_blocking_section();
  193. while (len > 0) {
  194. n_read = recv(sock, bstr, len, MSG_WAITALL);
  195. if (n_read <= 0) {
  196. if (n_read != -1 || errno != EINTR) {
  197. value v_n_total = Val_long(n_total);
  198. caml_leave_blocking_section();
  199. if (n_read == 0) raise_eof_io_error(v_n_total);
  200. else raise_unix_io_error(v_n_total, "really_recv", Nothing);
  201. }
  202. } else {
  203. len -= n_read;
  204. bstr += n_read;
  205. n_total += n_read;
  206. }
  207. }
  208. caml_leave_blocking_section();
  209. CAMLreturn(Val_unit);
  210. }
  211. }
  212. CAMLprim value bigstring_recvfrom_assume_fd_is_nonblocking_stub(
  213. value v_sock, value v_pos, value v_len, value v_bstr)
  214. {
  215. CAMLparam1(v_bstr);
  216. CAMLlocal1(v_addr);
  217. struct caml_ba_array *ba = Caml_ba_array_val(v_bstr);
  218. char *bstr = (char *) ba->data + Long_val(v_pos);
  219. size_t len = Long_val(v_len);
  220. ssize_t n_read;
  221. union sock_addr_union addr;
  222. socklen_param_type addr_len = sizeof(addr);
  223. value v_res;
  224. if (len > THREAD_IO_CUTOFF) {
  225. caml_enter_blocking_section();
  226. n_read = recvfrom(Int_val(v_sock), bstr, len, 0, &addr.s_gen, &addr_len);
  227. caml_leave_blocking_section();
  228. }
  229. else n_read = recvfrom(Int_val(v_sock), bstr, len, 0, &addr.s_gen, &addr_len);
  230. if (n_read == -1)
  231. uerror("bigstring_recvfrom_assume_fd_is_nonblocking", Nothing);
  232. v_addr = alloc_sockaddr(&addr, addr_len, -1);
  233. v_res = caml_alloc_small(2, 0);
  234. Field(v_res, 0) = Val_long(n_read);
  235. Field(v_res, 1) = v_addr;
  236. CAMLreturn(v_res);
  237. }
  238. /* I/O of bigstrings from channels */
  239. typedef off_t file_offset;
  240. #define IO_BUFFER_SIZE 65536
  241. struct channel {
  242. int fd; /* Unix file descriptor */
  243. file_offset offset; /* Absolute position of fd in the file */
  244. char * end; /* Physical end of the buffer */
  245. char * curr; /* Current position in the buffer */
  246. char * max; /* Logical end of the buffer (for input) */
  247. void * mutex; /* Placeholder for mutex (for systhreads) */
  248. struct channel * next, * prev;/* Double chaining of channels (flush_all) */
  249. int revealed; /* For Cash only */
  250. int old_revealed; /* For Cash only */
  251. int refcount; /* For flush_all and for Cash */
  252. int flags; /* Bitfield */
  253. char buff[IO_BUFFER_SIZE]; /* The buffer itself */
  254. };
  255. CAMLextern void (*caml_channel_mutex_lock) (struct channel *);
  256. CAMLextern void (*caml_channel_mutex_unlock) (struct channel *);
  257. #define Channel(v) (*((struct channel **) (Data_custom_val(v))))
  258. #define Lock(channel) \
  259. if (caml_channel_mutex_lock != NULL) (*caml_channel_mutex_lock)(channel)
  260. #define Unlock(channel) \
  261. if (caml_channel_mutex_unlock != NULL) (*caml_channel_mutex_unlock)(channel)
  262. CAMLprim value bigstring_input_stub(
  263. value v_min_len, value v_chan, value v_pos, value v_len, value v_bstr)
  264. {
  265. CAMLparam2(v_chan, v_bstr);
  266. struct channel *chan = Channel(v_chan);
  267. char *bstr_start = get_bstr(v_bstr, v_pos);
  268. char *bstr = bstr_start;
  269. size_t init_bstr_len = Long_val(v_len);
  270. size_t bstr_len = init_bstr_len;
  271. size_t min_len = Long_val(v_min_len);
  272. size_t avail = chan->max - chan->curr;
  273. Lock(chan);
  274. if (avail) {
  275. int got_all = bstr_len <= avail;
  276. size_t to_write = got_all ? bstr_len : avail;
  277. memcpy(bstr, chan->curr, to_write);
  278. if (got_all) {
  279. chan->curr += to_write;
  280. Unlock(chan);
  281. CAMLreturn(v_len);
  282. }
  283. else {
  284. bstr += to_write;
  285. bstr_len -= to_write;
  286. min_len -= to_write;
  287. }
  288. }
  289. /* Buffer empty now */
  290. {
  291. int fd = chan->fd;
  292. char *bstr_min = bstr + min_len;
  293. char *bstr_max = bstr + bstr_len;
  294. struct iovec iovecs[2];
  295. struct iovec *bstr_iov = &iovecs[0];
  296. struct iovec *buff_iov = &iovecs[1];
  297. ssize_t n_read;
  298. bstr_iov->iov_base = bstr;
  299. bstr_iov->iov_len = bstr_len;
  300. buff_iov->iov_base = chan->buff;
  301. buff_iov->iov_len = chan->end - chan->buff;
  302. caml_enter_blocking_section();
  303. while (1) {
  304. n_read = readv(fd, iovecs, 2);
  305. if (n_read <= 0) {
  306. if (n_read == -1) {
  307. /* Interrupt and error handling */
  308. if (errno == EINTR) continue;
  309. else {
  310. value v_n_good =
  311. Val_long((char *) bstr_iov->iov_base - bstr_start);
  312. /* Set buffer to empty as required */
  313. chan->curr = chan->max;
  314. caml_leave_blocking_section();
  315. Unlock(chan);
  316. raise_unix_io_error(v_n_good, "input", Nothing);
  317. }
  318. }
  319. else {
  320. /* Zero-read: set buffer to empty as required */
  321. assert(n_read == 0);
  322. chan->curr = chan->max;
  323. if (init_bstr_len == 0) {
  324. caml_leave_blocking_section();
  325. Unlock(chan);
  326. CAMLreturn(Val_long(0));
  327. } else {
  328. /* EOF handling */
  329. value v_n_good =
  330. Val_long((char *) bstr_iov->iov_base - bstr_start);
  331. caml_leave_blocking_section();
  332. Unlock(chan);
  333. raise_eof_io_error(v_n_good);
  334. }
  335. }
  336. } else {
  337. /* Successful read */
  338. chan->offset += n_read;
  339. bstr = (char *) bstr_iov->iov_base + n_read;
  340. if (bstr >= bstr_min) {
  341. /* Sufficient data read */
  342. if (bstr > bstr_max) {
  343. /* Buffer contains extra data */
  344. chan->max = &chan->buff[bstr - bstr_max];
  345. chan->curr = chan->buff;
  346. caml_leave_blocking_section();
  347. Unlock(chan);
  348. CAMLreturn(v_len);
  349. } else {
  350. /* Buffer empty; set it so */
  351. chan->curr = chan->max;
  352. caml_leave_blocking_section();
  353. Unlock(chan);
  354. CAMLreturn(Val_long(bstr - bstr_start));
  355. }
  356. } else {
  357. /* Insufficient data */
  358. bstr_iov->iov_base = bstr;
  359. bstr_iov->iov_len -= n_read;
  360. }
  361. }
  362. }
  363. }
  364. }
  365. CAMLprim value bigstring_output_stub(
  366. value v_min_len, value v_chan, value v_pos, value v_len, value v_bstr)
  367. {
  368. CAMLparam2(v_chan, v_bstr);
  369. struct channel *chan = Channel(v_chan);
  370. char *bstr = get_bstr(v_bstr, v_pos);
  371. size_t bstr_len = Long_val(v_len);
  372. Lock(chan);
  373. if (bstr_len <= (size_t) (chan->end - chan->curr)) {
  374. /* Buffer can store all data */
  375. memcpy(chan->curr, bstr, bstr_len);
  376. chan->curr += bstr_len;
  377. Unlock(chan);
  378. CAMLreturn(v_len);
  379. } else {
  380. /* Buffer cannot store all data */
  381. int fd = chan->fd;
  382. size_t buff_len = chan->curr - chan->buff;
  383. char *bstr_min = bstr + Long_val(v_min_len);
  384. struct iovec iovecs[2];
  385. struct iovec *buff_iov = &iovecs[0];
  386. struct iovec *bstr_iov = &iovecs[1];
  387. ssize_t written;
  388. buff_iov->iov_base = chan->buff;
  389. buff_iov->iov_len = buff_len;
  390. bstr_iov->iov_base = bstr;
  391. bstr_iov->iov_len = bstr_len;
  392. caml_enter_blocking_section();
  393. while (1) {
  394. written = jane_writev(fd, iovecs, 2);
  395. if (written == -1) {
  396. /* Interrupt and error handling */
  397. if (errno == EINTR) continue;
  398. if ((errno == EAGAIN || errno == EWOULDBLOCK) &&
  399. buff_iov->iov_len + bstr_iov->iov_len > 1) {
  400. /* Call might have blocked, try writing a single byte */
  401. if (buff_len) {
  402. buff_iov->iov_len = 1;
  403. bstr_iov->iov_len = 0;
  404. } else bstr_iov->iov_len = 1;
  405. continue;
  406. } else {
  407. /* Write (maybe of even one byte only) failed */
  408. value v_n_good = Val_long((char *) bstr_iov->iov_base - bstr);
  409. chan->curr = chan->buff + buff_len;
  410. if (buff_len) memmove(chan->buff, buff_iov->iov_base, buff_len);
  411. caml_leave_blocking_section();
  412. Unlock(chan);
  413. raise_unix_io_error(v_n_good, "output", Nothing);
  414. }
  415. } else {
  416. /* Write successful */
  417. chan->offset += written;
  418. if (buff_len > (size_t) written) {
  419. /* Buffer was partially written only; continue */
  420. buff_iov->iov_base = (char *) buff_iov->iov_base + written;
  421. buff_len -= written;
  422. buff_iov->iov_len = buff_len;
  423. } else {
  424. /* Buffer is empty now */
  425. size_t bstr_written = written - buff_len;
  426. char *new_bstr = (char *) bstr_iov->iov_base + bstr_written;
  427. if (new_bstr >= bstr_min) {
  428. /* Sufficient data was sent */
  429. chan->curr = chan->buff;
  430. caml_leave_blocking_section();
  431. Unlock(chan);
  432. CAMLreturn(Val_long(new_bstr - bstr));
  433. } else {
  434. /* Not yet done */
  435. bstr_iov->iov_base = new_bstr;
  436. buff_len = 0;
  437. buff_iov->iov_len = buff_len;
  438. bstr_len -= bstr_written;
  439. bstr_iov->iov_len = bstr_len;
  440. }
  441. }
  442. }
  443. }
  444. }
  445. }
  446. /* Output macros and functions */
  447. #define MakeReallyOutputFun(NAME, CALL_WRITE) \
  448. CAMLprim value bigstring_really_##NAME##_stub( \
  449. value v_fd, value v_pos, value v_len, value v_bstr) \
  450. { \
  451. CAMLparam1(v_bstr); \
  452. int fd = Int_val(v_fd); \
  453. size_t len = Long_val(v_len); \
  454. ssize_t written; \
  455. char *bstr_start = get_bstr(v_bstr, v_pos); \
  456. char *bstr = bstr_start; \
  457. char *bstr_max = bstr + len; \
  458. caml_enter_blocking_section(); \
  459. do { \
  460. CALL_WRITE; \
  461. if (written == -1) { \
  462. if (errno == EINTR) continue; \
  463. { \
  464. value v_n_good = Val_long(bstr - bstr_start); \
  465. caml_leave_blocking_section(); \
  466. raise_unix_io_error(v_n_good, STR(really_##NAME), Nothing); \
  467. } \
  468. }; \
  469. len -= written; \
  470. bstr += written; \
  471. } while (bstr < bstr_max); \
  472. caml_leave_blocking_section(); \
  473. CAMLreturn(Val_unit); \
  474. }
  475. MakeReallyOutputFun(write, written = write(fd, bstr, len))
  476. CAMLprim value bigstring_write_stub(
  477. value v_fd, value v_pos, value v_len, value v_bstr)
  478. {
  479. CAMLparam1(v_bstr);
  480. char *bstr = get_bstr(v_bstr, v_pos);
  481. size_t len = Long_val(v_len);
  482. ssize_t written;
  483. caml_enter_blocking_section();
  484. written = write(Int_val(v_fd), bstr, len);
  485. caml_leave_blocking_section();
  486. if (written == -1) uerror("write", Nothing);
  487. CAMLreturn(Val_long(written));
  488. }
  489. CAMLprim value bigstring_pwrite_assume_fd_is_nonblocking_stub(
  490. value v_fd, value v_offset, value v_pos, value v_len, value v_bstr)
  491. {
  492. char *bstr = get_bstr(v_bstr, v_pos);
  493. size_t len = Long_val(v_len);
  494. ssize_t written;
  495. written = pwrite(Int_val(v_fd), bstr, len, Long_val(v_offset));
  496. if (written == -1) uerror("bigstring_pwrite_assume_fd_is_nonblocking_stub", Nothing);
  497. return Val_long(written);
  498. }
  499. CAMLprim value bigstring_write_assume_fd_is_nonblocking_stub(
  500. value v_fd, value v_pos, value v_len, value v_bstr)
  501. {
  502. struct caml_ba_array *ba = Caml_ba_array_val(v_bstr);
  503. char *bstr = (char *) ba->data + Long_val(v_pos);
  504. size_t len = Long_val(v_len);
  505. ssize_t written;
  506. if ((len > THREAD_IO_CUTOFF) || (ba->flags & CAML_BA_MAPPED_FILE)) {
  507. Begin_roots1(v_bstr);
  508. caml_enter_blocking_section();
  509. written = write(Int_val(v_fd), bstr, len);
  510. caml_leave_blocking_section();
  511. End_roots();
  512. }
  513. else written = write(Int_val(v_fd), bstr, len);
  514. if (written == -1) uerror("write_assume_fd_is_nonblocking", Nothing);
  515. return Val_long(written);
  516. }
  517. static inline ssize_t writev_in_blocking_section(
  518. value v_fd, value v_iovecs, struct iovec *iovecs, int count)
  519. {
  520. ssize_t ret;
  521. CAMLparam1(v_iovecs); /* To protect bigstrings outside of OCaml lock */
  522. caml_enter_blocking_section();
  523. ret = jane_writev(Int_val(v_fd), iovecs, count);
  524. caml_stat_free(iovecs);
  525. caml_leave_blocking_section();
  526. CAMLreturn(ret);
  527. }
  528. CAMLprim value bigstring_writev_stub(value v_fd, value v_iovecs, value v_count)
  529. {
  530. int count = Int_val(v_count);
  531. size_t total_len = 0;
  532. struct iovec *iovecs = copy_iovecs(&total_len, v_iovecs, count);
  533. ssize_t ret = writev_in_blocking_section(v_fd, v_iovecs, iovecs, count);
  534. if (ret == -1) uerror("writev", Nothing);
  535. return Val_long(ret);
  536. }
  537. __pure static inline int contains_mmapped(value v_iovecs, int n)
  538. {
  539. for (--n; n >= 0; --n) {
  540. value v_iovec = Field(v_iovecs, n);
  541. int flags = Caml_ba_array_val(Field(v_iovec, 0))->flags;
  542. if (unlikely(flags & CAML_BA_MAPPED_FILE)) return 1;
  543. }
  544. return 0;
  545. }
  546. CAMLprim value bigstring_writev_assume_fd_is_nonblocking_stub(
  547. value v_fd, value v_iovecs, value v_count)
  548. {
  549. int count = Int_val(v_count);
  550. size_t total_len = 0;
  551. struct iovec *iovecs = copy_iovecs(&total_len, v_iovecs, count);
  552. ssize_t ret;
  553. if (total_len > THREAD_IO_CUTOFF || contains_mmapped(v_iovecs, count))
  554. /* NOTE: writev_in_blocking_section frees iovecs */
  555. ret = writev_in_blocking_section(v_fd, v_iovecs, iovecs, count);
  556. else {
  557. ret = jane_writev(Int_val(v_fd), iovecs, count);
  558. caml_stat_free(iovecs);
  559. }
  560. if (ret == -1) uerror("writev_assume_fd_is_nonblocking", Nothing);
  561. return Val_long(ret);
  562. }
  563. #ifdef JSC_RECVMMSG
  564. CAMLprim value bigstring_recvmmsg_assume_fd_is_nonblocking_stub(
  565. value v_fd, value v_iovecs, value v_count, value v_srcs, value v_lens)
  566. {
  567. CAMLparam5(v_fd, v_iovecs, v_count, v_srcs, v_lens);
  568. CAMLlocal4(v_iovec, v_buf, v_pos, v_len);
  569. unsigned i;
  570. int n_read;
  571. unsigned count;
  572. count = (unsigned) Long_val(v_count);
  573. /* On 32-bit platforms, sizeof(unsigned) == sizeof(intnat); it thus suffices to
  574. check that [v_count] is not negative.
  575. On 64-bit platforms with unsigned being 32 bit and intnat being 64 bit, we
  576. need the second check to ensure there is no truncation. Note that "(intnat) count"
  577. zero-extends to 64-bit width. This check actually subsumes the [v_count] being
  578. negative check.
  579. If this code were built on a platform where both unsigned and intnat were 64 bit,
  580. then it should still work, by analogy with the all-32 bit case.
  581. */
  582. if (Long_val(v_count) < 0 || (intnat) count != Long_val(v_count)) {
  583. caml_invalid_argument("bigstring_recvmmsg_assume_fd_is_nonblocking_stub: "
  584. "v_count exceeds unsigned int");
  585. }
  586. if (!Is_block(v_lens)) {
  587. caml_invalid_argument("bigstring_recvmmsg_assume_fd_is_nonblocking_stub: "
  588. "v_lens is not an array");
  589. }
  590. if (Wosize_val(v_lens) < count) {
  591. caml_invalid_argument("bigstring_recvmmsg_assume_fd_is_nonblocking_stub: "
  592. "length v_lens < count");
  593. }
  594. if (count > RECVMMSG_MAX_COUNT) {
  595. caml_invalid_argument("bigstring_recvmmsg_assume_fd_is_nonblocking_stub: "
  596. "v_count exceeds RECVMMSG_MAX_COUNT");
  597. }
  598. {
  599. /* For a big count (~100), a mostly idle system spent a
  600. substantial amount of time (~10%) copying the iovec fields back
  601. and forth. This was greatly improved by passing a small (~4)
  602. number of buffers. */
  603. struct mmsghdr hdrs[RECVMMSG_MAX_COUNT];
  604. struct iovec iovecs[RECVMMSG_MAX_COUNT];
  605. for (i = 0; i < count; i++) {
  606. v_iovec = Field(v_iovecs, i);
  607. v_buf = Field(v_iovec, 0);
  608. v_pos = Field(v_iovec, 1);
  609. v_len = Field(v_iovec, 2);
  610. iovecs[i].iov_base = get_bstr(v_buf, v_pos);
  611. iovecs[i].iov_len = Long_val(v_len);
  612. }
  613. n_read = recvmmsg_assume_fd_is_nonblocking(v_fd, iovecs, count, v_srcs, hdrs);
  614. for (i = 0; (int) i < n_read; i++) {
  615. Field(v_lens, i) = Val_long(hdrs[i].msg_len);
  616. }
  617. }
  618. CAMLreturn(Val_int(n_read));
  619. }
  620. #endif /* JSC_RECVMMSG */
  621. #if defined(JSC_MSG_NOSIGNAL) || defined(JSC_SO_NOSIGPIPE)
  622. #if defined(JSC_MSG_NOSIGNAL)
  623. MakeReallyOutputFun(send_no_sigpipe,
  624. written = send(fd, bstr, len, MSG_NOSIGNAL))
  625. static int nonblocking_no_sigpipe_flag = MSG_DONTWAIT | MSG_NOSIGNAL;
  626. #elif defined(JSC_SO_NOSIGPIPE)
  627. MakeReallyOutputFun(send_no_sigpipe,
  628. written = send(fd, bstr, len, SO_NOSIGPIPE))
  629. static int nonblocking_no_sigpipe_flag = MSG_DONTWAIT | SO_NOSIGPIPE;
  630. #endif
  631. CAMLprim value bigstring_send_nonblocking_no_sigpipe_stub(
  632. value v_fd, value v_pos, value v_len, value v_bstr)
  633. {
  634. char *bstr = get_bstr(v_bstr, v_pos);
  635. ssize_t ret =
  636. send(Int_val(v_fd), bstr, Long_val(v_len), nonblocking_no_sigpipe_flag);
  637. if (ret == -1)
  638. ret = -errno;
  639. return Val_long(ret);
  640. }
  641. CAMLprim value bigstring_sendto_nonblocking_no_sigpipe_stub(
  642. value v_fd, value v_pos, value v_len, value v_bstr, value v_addr)
  643. {
  644. char *bstr = get_bstr(v_bstr, v_pos);
  645. union sock_addr_union addr;
  646. socklen_param_type addr_len = sizeof(addr);
  647. ssize_t ret;
  648. get_sockaddr(v_addr, &addr, &addr_len);
  649. ret =
  650. sendto(
  651. Int_val(v_fd), bstr, Long_val(v_len),
  652. nonblocking_no_sigpipe_flag, &addr.s_gen, addr_len);
  653. if (ret == -1)
  654. ret = -errno;
  655. return Val_long(ret);
  656. }
  657. CAMLprim value bigstring_sendmsg_nonblocking_no_sigpipe_stub(
  658. value v_fd, value v_iovecs, value v_count)
  659. {
  660. int count = Int_val(v_count);
  661. size_t total_len = 0;
  662. struct iovec *iovecs = copy_iovecs(&total_len, v_iovecs, count);
  663. struct msghdr msghdr = { NULL, 0, NULL, 0, NULL, 0, 0 };
  664. ssize_t ret;
  665. if (total_len > THREAD_IO_CUTOFF || contains_mmapped(v_iovecs, count)) {
  666. Begin_roots1(v_iovecs);
  667. caml_enter_blocking_section();
  668. msghdr.msg_iov = iovecs;
  669. msghdr.msg_iovlen = count;
  670. ret = sendmsg(Int_val(v_fd), &msghdr, nonblocking_no_sigpipe_flag);
  671. caml_stat_free(iovecs);
  672. caml_leave_blocking_section();
  673. End_roots();
  674. } else {
  675. msghdr.msg_iov = iovecs;
  676. msghdr.msg_iovlen = count;
  677. ret = sendmsg(Int_val(v_fd), &msghdr, nonblocking_no_sigpipe_flag);
  678. caml_stat_free(iovecs);
  679. }
  680. if (ret == -1 && errno != EAGAIN && errno != EWOULDBLOCK)
  681. uerror("sendmsg_nonblocking_no_sigpipe", Nothing);
  682. return Val_long(ret);
  683. }
  684. #else
  685. #warning "Neither MSG_NOSIGNAL nor SO_NOSIGPIPE defined; bigstring_send{,msg}_noblocking_no_sigpipe not implemented"
  686. #warning "Platform not supported. Please report this."
  687. #endif /* JSC_MSG_NOSIGNAL || JSC_SO_NOSIGPIPE */