PageRenderTime 48ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 0ms

/nsock/src/engine_kqueue.c

https://github.com/prakashgamit/nmap
C | 371 lines | 213 code | 78 blank | 80 comment | 42 complexity | 06e45ed6a4d3f385fbd167d16e7565db MD5 | raw file
Possible License(s): BSD-3-Clause, GPL-2.0, LGPL-2.0, LGPL-2.1
  1. /***************************************************************************
  2. * engine_kqueue.c -- BSD kqueue(2) based IO engine. *
  3. * *
  4. ***********************IMPORTANT NSOCK LICENSE TERMS***********************
  5. * *
  6. * The nsock parallel socket event library is (C) 1999-2013 Insecure.Com *
  7. * LLC This library is free software; you may redistribute and/or *
  8. * modify it under the terms of the GNU General Public License as *
  9. * published by the Free Software Foundation; Version 2. This guarantees *
  10. * your right to use, modify, and redistribute this software under certain *
  11. * conditions. If this license is unacceptable to you, Insecure.Com LLC *
  12. * may be willing to sell alternative licenses (contact *
  13. * sales@insecure.com ). *
  14. * *
  15. * As a special exception to the GPL terms, Insecure.Com LLC grants *
  16. * permission to link the code of this program with any version of the *
  17. * OpenSSL library which is distributed under a license identical to that *
  18. * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
  19. * linked combinations including the two. You must obey the GNU GPL in all *
  20. * respects for all of the code used other than OpenSSL. If you modify *
  21. * this file, you may extend this exception to your version of the file, *
  22. * but you are not obligated to do so. *
  23. * *
  24. * If you received these files with a written license agreement stating *
  25. * terms other than the (GPL) terms above, then that alternative license *
  26. * agreement takes precedence over this comment. *
  27. * *
  28. * Source is provided to this software because we believe users have a *
  29. * right to know exactly what a program is going to do before they run it. *
  30. * This also allows you to audit the software for security holes (none *
  31. * have been found so far). *
  32. * *
  33. * Source code also allows you to port Nmap to new platforms, fix bugs, *
  34. * and add new features. You are highly encouraged to send your changes *
  35. * to the dev@nmap.org mailing list for possible incorporation into the *
  36. * main distribution. By sending these changes to Fyodor or one of the *
  37. * Insecure.Org development mailing lists, or checking them into the Nmap *
  38. * source code repository, it is understood (unless you specify otherwise) *
  39. * that you are offering the Nmap Project (Insecure.Com LLC) the *
  40. * unlimited, non-exclusive right to reuse, modify, and relicense the *
  41. * code. Nmap will always be available Open Source, but this is important *
  42. * because the inability to relicense code has caused devastating problems *
  43. * for other Free Software projects (such as KDE and NASM). We also *
  44. * occasionally relicense the code to third parties as discussed above. *
  45. * If you wish to specify special license conditions of your *
  46. * contributions, just say so when you send them. *
  47. * *
  48. * This program is distributed in the hope that it will be useful, but *
  49. * WITHOUT ANY WARRANTY; without even the implied warranty of *
  50. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
  51. * General Public License v2.0 for more details *
  52. * (http://www.gnu.org/licenses/gpl-2.0.html). *
  53. * *
  54. ***************************************************************************/
  55. /* $Id$ */
  56. #ifdef HAVE_CONFIG_H
  57. #include "nsock_config.h"
  58. #endif
  59. #if HAVE_KQUEUE
  60. #include <sys/types.h>
  61. #include <sys/event.h>
  62. #include <sys/time.h>
  63. #include <errno.h>
  64. #include "nsock_internal.h"
  65. #include "nsock_log.h"
  66. #if HAVE_PCAP
  67. #include "nsock_pcap.h"
  68. #endif
  69. #define INITIAL_EV_COUNT 128
  70. /* --- ENGINE INTERFACE PROTOTYPES --- */
  71. static int kqueue_init(mspool *nsp);
  72. static void kqueue_destroy(mspool *nsp);
  73. static int kqueue_iod_register(mspool *nsp, msiod *iod, int ev);
  74. static int kqueue_iod_unregister(mspool *nsp, msiod *iod);
  75. static int kqueue_iod_modify(mspool *nsp, msiod *iod, int ev_set, int ev_clr);
  76. static int kqueue_loop(mspool *nsp, int msec_timeout);
  77. /* ---- ENGINE DEFINITION ---- */
  78. struct io_engine engine_kqueue = {
  79. "kqueue",
  80. kqueue_init,
  81. kqueue_destroy,
  82. kqueue_iod_register,
  83. kqueue_iod_unregister,
  84. kqueue_iod_modify,
  85. kqueue_loop
  86. };
  87. /* --- INTERNAL PROTOTYPES --- */
  88. static void iterate_through_event_lists(mspool *nsp, int evcount);
  89. /* defined in nsock_core.c */
  90. void process_iod_events(mspool *nsp, msiod *nsi, int ev);
  91. void process_event(mspool *nsp, gh_list_t *evlist, msevent *nse, int ev);
  92. void process_expired_events(mspool *nsp);
  93. #if HAVE_PCAP
  94. #ifndef PCAP_CAN_DO_SELECT
  95. int pcap_read_on_nonselect(mspool *nsp);
  96. #endif
  97. #endif
  98. /* defined in nsock_event.c */
  99. void update_first_events(msevent *nse);
  100. extern struct timeval nsock_tod;
  101. /*
  102. * Engine specific data structure
  103. */
  104. struct kqueue_engine_info {
  105. int kqfd;
  106. int maxfd;
  107. size_t evlen;
  108. struct kevent *events;
  109. };
  110. int kqueue_init(mspool *nsp) {
  111. struct kqueue_engine_info *kinfo;
  112. kinfo = (struct kqueue_engine_info *)safe_malloc(sizeof(struct kqueue_engine_info));
  113. kinfo->kqfd = kqueue();
  114. kinfo->maxfd = -1;
  115. kinfo->evlen = INITIAL_EV_COUNT;
  116. kinfo->events = (struct kevent *)safe_malloc(INITIAL_EV_COUNT * sizeof(struct kevent));
  117. nsp->engine_data = (void *)kinfo;
  118. return 1;
  119. }
  120. void kqueue_destroy(mspool *nsp) {
  121. struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
  122. assert(kinfo != NULL);
  123. close(kinfo->kqfd);
  124. free(kinfo->events);
  125. free(kinfo);
  126. }
  127. int kqueue_iod_register(mspool *nsp, msiod *iod, int ev) {
  128. struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
  129. assert(!IOD_PROPGET(iod, IOD_REGISTERED));
  130. IOD_PROPSET(iod, IOD_REGISTERED);
  131. iod->watched_events = EV_NONE;
  132. kqueue_iod_modify(nsp, iod, ev, EV_NONE);
  133. if (nsi_getsd(iod) > kinfo->maxfd)
  134. kinfo->maxfd = nsi_getsd(iod);
  135. return 1;
  136. }
  137. int kqueue_iod_unregister(mspool *nsp, msiod *iod) {
  138. struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
  139. /* some IODs can be unregistered here if they're associated to an event that was
  140. * immediately completed */
  141. if (IOD_PROPGET(iod, IOD_REGISTERED)) {
  142. kqueue_iod_modify(nsp, iod, EV_NONE, EV_READ|EV_WRITE);
  143. IOD_PROPCLR(iod, IOD_REGISTERED);
  144. if (nsi_getsd(iod) == kinfo->maxfd)
  145. kinfo->maxfd--;
  146. }
  147. iod->watched_events = EV_NONE;
  148. return 1;
  149. }
  150. #define EV_SETFLAG(_set, _ev) (((_set) & (_ev)) ? (EV_ADD|EV_ENABLE) : (EV_ADD|EV_DISABLE))
  151. int kqueue_iod_modify(mspool *nsp, msiod *iod, int ev_set, int ev_clr) {
  152. struct kevent kev[2];
  153. int new_events, i;
  154. struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
  155. assert((ev_set & ev_clr) == 0);
  156. assert(IOD_PROPGET(iod, IOD_REGISTERED));
  157. new_events = iod->watched_events;
  158. new_events |= ev_set;
  159. new_events &= ~ev_clr;
  160. if (new_events == iod->watched_events)
  161. return 1; /* nothing to do */
  162. i = 0;
  163. if ((ev_set ^ ev_clr) & EV_READ) {
  164. EV_SET(&kev[i], nsi_getsd(iod), EVFILT_READ, EV_SETFLAG(ev_set, EV_READ), 0, 0, (void *)iod);
  165. i++;
  166. }
  167. if ((ev_set ^ ev_clr) & EV_WRITE) {
  168. EV_SET(&kev[i], nsi_getsd(iod), EVFILT_WRITE, EV_SETFLAG(ev_set, EV_WRITE), 0, 0, (void *)iod);
  169. i++;
  170. }
  171. if (i > 0 && kevent(kinfo->kqfd, kev, i, NULL, 0, NULL) < 0)
  172. fatal("Unable to update events for IOD #%lu: %s", iod->id, strerror(errno));
  173. iod->watched_events = new_events;
  174. return 1;
  175. }
  176. int kqueue_loop(mspool *nsp, int msec_timeout) {
  177. int results_left = 0;
  178. int event_msecs; /* msecs before an event goes off */
  179. int combined_msecs;
  180. struct timespec ts, *ts_p;
  181. int sock_err = 0;
  182. struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
  183. assert(msec_timeout >= -1);
  184. if (nsp->events_pending == 0)
  185. return 0; /* No need to wait on 0 events ... */
  186. if (gh_list_count(&nsp->active_iods) > kinfo->evlen) {
  187. kinfo->evlen = gh_list_count(&nsp->active_iods) * 2;
  188. kinfo->events = (struct kevent *)safe_realloc(kinfo->events, kinfo->evlen * sizeof(struct kevent));
  189. }
  190. do {
  191. msevent *nse;
  192. nsock_log_debug_all(nsp, "wait for events");
  193. nse = next_expirable_event(nsp);
  194. if (!nse)
  195. event_msecs = -1; /* None of the events specified a timeout */
  196. else
  197. event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod));
  198. #if HAVE_PCAP
  199. #ifndef PCAP_CAN_DO_SELECT
  200. /* Force a low timeout when capturing packets on systems where
  201. * the pcap descriptor is not select()able. */
  202. if (gh_list_count(&nsp->pcap_read_events) > 0)
  203. if (event_msecs > PCAP_POLL_INTERVAL)
  204. event_msecs = PCAP_POLL_INTERVAL;
  205. #endif
  206. #endif
  207. /* We cast to unsigned because we want -1 to be very high (since it means no
  208. * timeout) */
  209. combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout);
  210. /* Set up the timeval pointer we will give to kevent() */
  211. memset(&ts, 0, sizeof(struct timespec));
  212. if (combined_msecs >= 0) {
  213. ts.tv_sec = combined_msecs / 1000;
  214. ts.tv_nsec = (combined_msecs % 1000) * 1000000L;
  215. ts_p = &ts;
  216. } else {
  217. ts_p = NULL;
  218. }
  219. #if HAVE_PCAP
  220. #ifndef PCAP_CAN_DO_SELECT
  221. /* do non-blocking read on pcap devices that doesn't support select()
  222. * If there is anything read, just leave this loop. */
  223. if (pcap_read_on_nonselect(nsp)) {
  224. /* okay, something was read. */
  225. } else
  226. #endif
  227. #endif
  228. {
  229. results_left = kevent(kinfo->kqfd, NULL, 0, kinfo->events, kinfo->evlen, ts_p);
  230. if (results_left == -1)
  231. sock_err = socket_errno();
  232. }
  233. gettimeofday(&nsock_tod, NULL); /* Due to kevent delay */
  234. } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */
  235. if (results_left == -1 && sock_err != EINTR) {
  236. nsock_log_error(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
  237. nsp->errnum = sock_err;
  238. return -1;
  239. }
  240. iterate_through_event_lists(nsp, results_left);
  241. return 1;
  242. }
  243. /* ---- INTERNAL FUNCTIONS ---- */
  244. static inline int get_evmask(msiod *nsi, const struct kevent *kev) {
  245. int evmask = EV_NONE;
  246. /* generate the corresponding event mask with nsock event flags */
  247. if (kev->flags & EV_ERROR) {
  248. evmask |= EV_EXCEPT;
  249. if (kev->data == EPIPE && (nsi->watched_events & EV_READ))
  250. evmask |= EV_READ;
  251. } else {
  252. switch (kev->filter) {
  253. case EVFILT_READ:
  254. evmask |= EV_READ;
  255. break;
  256. case EVFILT_WRITE:
  257. evmask |= EV_WRITE;
  258. break;
  259. default:
  260. fatal("Unsupported filter value: %d\n", (int)kev->filter);
  261. }
  262. }
  263. return evmask;
  264. }
  265. /* Iterate through all the event lists (such as connect_events, read_events,
  266. * timer_events, etc) and take action for those that have completed (due to
  267. * timeout, i/o, etc) */
  268. void iterate_through_event_lists(mspool *nsp, int evcount) {
  269. int n;
  270. struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
  271. msiod *nsi;
  272. for (n = 0; n < evcount; n++) {
  273. struct kevent *kev = &kinfo->events[n];
  274. nsi = (msiod *)kev->udata;
  275. /* process all the pending events for this IOD */
  276. process_iod_events(nsp, nsi, get_evmask(nsi, kev));
  277. IOD_PROPSET(nsi, IOD_PROCESSED);
  278. }
  279. for (n = 0; n < evcount; n++) {
  280. struct kevent *kev = &kinfo->events[n];
  281. nsi = (msiod *)kev->udata;
  282. if (nsi->state == NSIOD_STATE_DELETED) {
  283. if (IOD_PROPGET(nsi, IOD_PROCESSED)) {
  284. IOD_PROPCLR(nsi, IOD_PROCESSED);
  285. gh_list_remove(&nsp->active_iods, &nsi->nodeq);
  286. gh_list_prepend(&nsp->free_iods, &nsi->nodeq);
  287. }
  288. }
  289. }
  290. /* iterate through timers and expired events */
  291. process_expired_events(nsp);
  292. }
  293. #endif /* HAVE_KQUEUE */