PageRenderTime 45ms CodeModel.GetById 10ms RepoModel.GetById 0ms app.codeStats 1ms

/nsock/src/engine_kqueue.c

https://gitlab.com/g10h4ck/nmap-gsoc2015
C | 370 lines | 213 code | 78 blank | 79 comment | 42 complexity | 41d69c3ea16607377dc39043d9130fa7 MD5 | raw file
Possible License(s): BSD-3-Clause, GPL-2.0, Apache-2.0, LGPL-2.0, LGPL-2.1, MIT
  1. /***************************************************************************
  2. * engine_kqueue.c -- BSD kqueue(2) based IO engine. *
  3. * *
  4. ***********************IMPORTANT NSOCK LICENSE TERMS***********************
  5. * *
  6. * The nsock parallel socket event library is (C) 1999-2015 Insecure.Com *
  7. * LLC This library is free software; you may redistribute and/or *
  8. * modify it under the terms of the GNU General Public License as *
  9. * published by the Free Software Foundation; Version 2. This guarantees *
  10. * your right to use, modify, and redistribute this software under certain *
  11. * conditions. If this license is unacceptable to you, Insecure.Com LLC *
  12. * may be willing to sell alternative licenses (contact *
  13. * sales@insecure.com ). *
  14. * *
  15. * As a special exception to the GPL terms, Insecure.Com LLC grants *
  16. * permission to link the code of this program with any version of the *
  17. * OpenSSL library which is distributed under a license identical to that *
  18. * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
  19. * linked combinations including the two. You must obey the GNU GPL in all *
  20. * respects for all of the code used other than OpenSSL. If you modify *
  21. * this file, you may extend this exception to your version of the file, *
  22. * but you are not obligated to do so. *
  23. * *
  24. * If you received these files with a written license agreement stating *
  25. * terms other than the (GPL) terms above, then that alternative license *
  26. * agreement takes precedence over this comment. *
  27. * *
  28. * Source is provided to this software because we believe users have a *
  29. * right to know exactly what a program is going to do before they run it. *
  30. * This also allows you to audit the software for security holes. *
  31. * *
  32. * Source code also allows you to port Nmap to new platforms, fix bugs, *
  33. * and add new features. You are highly encouraged to send your changes *
  34. * to the dev@nmap.org mailing list for possible incorporation into the *
  35. * main distribution. By sending these changes to Fyodor or one of the *
  36. * Insecure.Org development mailing lists, or checking them into the Nmap *
  37. * source code repository, it is understood (unless you specify otherwise) *
  38. * that you are offering the Nmap Project (Insecure.Com LLC) the *
  39. * unlimited, non-exclusive right to reuse, modify, and relicense the *
  40. * code. Nmap will always be available Open Source, but this is important *
  41. * because the inability to relicense code has caused devastating problems *
  42. * for other Free Software projects (such as KDE and NASM). We also *
  43. * occasionally relicense the code to third parties as discussed above. *
  44. * If you wish to specify special license conditions of your *
  45. * contributions, just say so when you send them. *
  46. * *
  47. * This program is distributed in the hope that it will be useful, but *
  48. * WITHOUT ANY WARRANTY; without even the implied warranty of *
  49. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
  50. * General Public License v2.0 for more details *
  51. * (http://www.gnu.org/licenses/gpl-2.0.html). *
  52. * *
  53. ***************************************************************************/
  54. /* $Id$ */
  55. #ifdef HAVE_CONFIG_H
  56. #include "nsock_config.h"
  57. #endif
  58. #if HAVE_KQUEUE
  59. #include <sys/types.h>
  60. #include <sys/event.h>
  61. #include <sys/time.h>
  62. #include <errno.h>
  63. #include "nsock_internal.h"
  64. #include "nsock_log.h"
  65. #if HAVE_PCAP
  66. #include "nsock_pcap.h"
  67. #endif
  68. #define INITIAL_EV_COUNT 128
  69. /* --- ENGINE INTERFACE PROTOTYPES --- */
  70. static int kqueue_init(struct npool *nsp);
  71. static void kqueue_destroy(struct npool *nsp);
  72. static int kqueue_iod_register(struct npool *nsp, struct niod *iod, int ev);
  73. static int kqueue_iod_unregister(struct npool *nsp, struct niod *iod);
  74. static int kqueue_iod_modify(struct npool *nsp, struct niod *iod, int ev_set, int ev_clr);
  75. static int kqueue_loop(struct npool *nsp, int msec_timeout);
  76. /* ---- ENGINE DEFINITION ---- */
  77. struct io_engine engine_kqueue = {
  78. "kqueue",
  79. kqueue_init,
  80. kqueue_destroy,
  81. kqueue_iod_register,
  82. kqueue_iod_unregister,
  83. kqueue_iod_modify,
  84. kqueue_loop
  85. };
  86. /* --- INTERNAL PROTOTYPES --- */
  87. static void iterate_through_event_lists(struct npool *nsp, int evcount);
  88. /* defined in nsock_core.c */
  89. void process_iod_events(struct npool *nsp, struct niod *nsi, int ev);
  90. void process_event(struct npool *nsp, gh_list_t *evlist, struct nevent *nse, int ev);
  91. void process_expired_events(struct npool *nsp);
  92. #if HAVE_PCAP
  93. #ifndef PCAP_CAN_DO_SELECT
  94. int pcap_read_on_nonselect(struct npool *nsp);
  95. #endif
  96. #endif
  97. /* defined in nsock_event.c */
  98. void update_first_events(struct nevent *nse);
  99. extern struct timeval nsock_tod;
  100. /*
  101. * Engine specific data structure
  102. */
  103. struct kqueue_engine_info {
  104. int kqfd;
  105. int maxfd;
  106. size_t evlen;
  107. struct kevent *events;
  108. };
  109. int kqueue_init(struct npool *nsp) {
  110. struct kqueue_engine_info *kinfo;
  111. kinfo = (struct kqueue_engine_info *)safe_malloc(sizeof(struct kqueue_engine_info));
  112. kinfo->kqfd = kqueue();
  113. kinfo->maxfd = -1;
  114. kinfo->evlen = INITIAL_EV_COUNT;
  115. kinfo->events = (struct kevent *)safe_malloc(INITIAL_EV_COUNT * sizeof(struct kevent));
  116. nsp->engine_data = (void *)kinfo;
  117. return 1;
  118. }
  119. void kqueue_destroy(struct npool *nsp) {
  120. struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
  121. assert(kinfo != NULL);
  122. close(kinfo->kqfd);
  123. free(kinfo->events);
  124. free(kinfo);
  125. }
  126. int kqueue_iod_register(struct npool *nsp, struct niod *iod, int ev) {
  127. struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
  128. assert(!IOD_PROPGET(iod, IOD_REGISTERED));
  129. IOD_PROPSET(iod, IOD_REGISTERED);
  130. iod->watched_events = EV_NONE;
  131. kqueue_iod_modify(nsp, iod, ev, EV_NONE);
  132. if (nsock_iod_get_sd(iod) > kinfo->maxfd)
  133. kinfo->maxfd = nsock_iod_get_sd(iod);
  134. return 1;
  135. }
  136. int kqueue_iod_unregister(struct npool *nsp, struct niod *iod) {
  137. struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
  138. /* some IODs can be unregistered here if they're associated to an event that was
  139. * immediately completed */
  140. if (IOD_PROPGET(iod, IOD_REGISTERED)) {
  141. kqueue_iod_modify(nsp, iod, EV_NONE, EV_READ|EV_WRITE);
  142. IOD_PROPCLR(iod, IOD_REGISTERED);
  143. if (nsock_iod_get_sd(iod) == kinfo->maxfd)
  144. kinfo->maxfd--;
  145. }
  146. iod->watched_events = EV_NONE;
  147. return 1;
  148. }
  149. #define EV_SETFLAG(_set, _ev) (((_set) & (_ev)) ? (EV_ADD|EV_ENABLE) : (EV_ADD|EV_DISABLE))
  150. int kqueue_iod_modify(struct npool *nsp, struct niod *iod, int ev_set, int ev_clr) {
  151. struct kevent kev[2];
  152. int new_events, i;
  153. struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
  154. assert((ev_set & ev_clr) == 0);
  155. assert(IOD_PROPGET(iod, IOD_REGISTERED));
  156. new_events = iod->watched_events;
  157. new_events |= ev_set;
  158. new_events &= ~ev_clr;
  159. if (new_events == iod->watched_events)
  160. return 1; /* nothing to do */
  161. i = 0;
  162. if ((ev_set ^ ev_clr) & EV_READ) {
  163. EV_SET(&kev[i], nsock_iod_get_sd(iod), EVFILT_READ, EV_SETFLAG(ev_set, EV_READ), 0, 0, (void *)iod);
  164. i++;
  165. }
  166. if ((ev_set ^ ev_clr) & EV_WRITE) {
  167. EV_SET(&kev[i], nsock_iod_get_sd(iod), EVFILT_WRITE, EV_SETFLAG(ev_set, EV_WRITE), 0, 0, (void *)iod);
  168. i++;
  169. }
  170. if (i > 0 && kevent(kinfo->kqfd, kev, i, NULL, 0, NULL) < 0)
  171. fatal("Unable to update events for IOD #%lu: %s", iod->id, strerror(errno));
  172. iod->watched_events = new_events;
  173. return 1;
  174. }
  175. int kqueue_loop(struct npool *nsp, int msec_timeout) {
  176. int results_left = 0;
  177. int event_msecs; /* msecs before an event goes off */
  178. int combined_msecs;
  179. struct timespec ts, *ts_p;
  180. int sock_err = 0;
  181. struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
  182. assert(msec_timeout >= -1);
  183. if (nsp->events_pending == 0)
  184. return 0; /* No need to wait on 0 events ... */
  185. if (gh_list_count(&nsp->active_iods) > kinfo->evlen) {
  186. kinfo->evlen = gh_list_count(&nsp->active_iods) * 2;
  187. kinfo->events = (struct kevent *)safe_realloc(kinfo->events, kinfo->evlen * sizeof(struct kevent));
  188. }
  189. do {
  190. struct nevent *nse;
  191. nsock_log_debug_all("wait for events");
  192. nse = next_expirable_event(nsp);
  193. if (!nse)
  194. event_msecs = -1; /* None of the events specified a timeout */
  195. else
  196. event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod));
  197. #if HAVE_PCAP
  198. #ifndef PCAP_CAN_DO_SELECT
  199. /* Force a low timeout when capturing packets on systems where
  200. * the pcap descriptor is not select()able. */
  201. if (gh_list_count(&nsp->pcap_read_events) > 0)
  202. if (event_msecs > PCAP_POLL_INTERVAL)
  203. event_msecs = PCAP_POLL_INTERVAL;
  204. #endif
  205. #endif
  206. /* We cast to unsigned because we want -1 to be very high (since it means no
  207. * timeout) */
  208. combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout);
  209. /* Set up the timeval pointer we will give to kevent() */
  210. memset(&ts, 0, sizeof(struct timespec));
  211. if (combined_msecs >= 0) {
  212. ts.tv_sec = combined_msecs / 1000;
  213. ts.tv_nsec = (combined_msecs % 1000) * 1000000L;
  214. ts_p = &ts;
  215. } else {
  216. ts_p = NULL;
  217. }
  218. #if HAVE_PCAP
  219. #ifndef PCAP_CAN_DO_SELECT
  220. /* do non-blocking read on pcap devices that doesn't support select()
  221. * If there is anything read, just leave this loop. */
  222. if (pcap_read_on_nonselect(nsp)) {
  223. /* okay, something was read. */
  224. } else
  225. #endif
  226. #endif
  227. {
  228. results_left = kevent(kinfo->kqfd, NULL, 0, kinfo->events, kinfo->evlen, ts_p);
  229. if (results_left == -1)
  230. sock_err = socket_errno();
  231. }
  232. gettimeofday(&nsock_tod, NULL); /* Due to kevent delay */
  233. } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */
  234. if (results_left == -1 && sock_err != EINTR) {
  235. nsock_log_error("nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
  236. nsp->errnum = sock_err;
  237. return -1;
  238. }
  239. iterate_through_event_lists(nsp, results_left);
  240. return 1;
  241. }
  242. /* ---- INTERNAL FUNCTIONS ---- */
  243. static inline int get_evmask(struct niod *nsi, const struct kevent *kev) {
  244. int evmask = EV_NONE;
  245. /* generate the corresponding event mask with nsock event flags */
  246. if (kev->flags & EV_ERROR) {
  247. evmask |= EV_EXCEPT;
  248. if (kev->data == EPIPE && (nsi->watched_events & EV_READ))
  249. evmask |= EV_READ;
  250. } else {
  251. switch (kev->filter) {
  252. case EVFILT_READ:
  253. evmask |= EV_READ;
  254. break;
  255. case EVFILT_WRITE:
  256. evmask |= EV_WRITE;
  257. break;
  258. default:
  259. fatal("Unsupported filter value: %d\n", (int)kev->filter);
  260. }
  261. }
  262. return evmask;
  263. }
  264. /* Iterate through all the event lists (such as connect_events, read_events,
  265. * timer_events, etc) and take action for those that have completed (due to
  266. * timeout, i/o, etc) */
  267. void iterate_through_event_lists(struct npool *nsp, int evcount) {
  268. int n;
  269. struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
  270. struct niod *nsi;
  271. for (n = 0; n < evcount; n++) {
  272. struct kevent *kev = &kinfo->events[n];
  273. nsi = (struct niod *)kev->udata;
  274. /* process all the pending events for this IOD */
  275. process_iod_events(nsp, nsi, get_evmask(nsi, kev));
  276. IOD_PROPSET(nsi, IOD_PROCESSED);
  277. }
  278. for (n = 0; n < evcount; n++) {
  279. struct kevent *kev = &kinfo->events[n];
  280. nsi = (struct niod *)kev->udata;
  281. if (nsi->state == NSIOD_STATE_DELETED) {
  282. if (IOD_PROPGET(nsi, IOD_PROCESSED)) {
  283. IOD_PROPCLR(nsi, IOD_PROCESSED);
  284. gh_list_remove(&nsp->active_iods, &nsi->nodeq);
  285. gh_list_prepend(&nsp->free_iods, &nsi->nodeq);
  286. }
  287. }
  288. }
  289. /* iterate through timers and expired events */
  290. process_expired_events(nsp);
  291. }
  292. #endif /* HAVE_KQUEUE */