PageRenderTime 41ms CodeModel.GetById 13ms RepoModel.GetById 0ms app.codeStats 0ms

/nsock/src/engine_poll.c

https://github.com/prakashgamit/nmap
C | 432 lines | 262 code | 89 blank | 81 comment | 53 complexity | dd270dd2a01ccc47ef23bdb6a74e0f34 MD5 | raw file
Possible License(s): BSD-3-Clause, GPL-2.0, LGPL-2.0, LGPL-2.1
  1. /***************************************************************************
  2. * engine_poll.c -- poll(2) based IO engine. *
  3. * *
  4. ***********************IMPORTANT NSOCK LICENSE TERMS***********************
  5. * *
  6. * The nsock parallel socket event library is (C) 1999-2013 Insecure.Com *
  7. * LLC This library is free software; you may redistribute and/or *
  8. * modify it under the terms of the GNU General Public License as *
  9. * published by the Free Software Foundation; Version 2. This guarantees *
  10. * your right to use, modify, and redistribute this software under certain *
  11. * conditions. If this license is unacceptable to you, Insecure.Com LLC *
  12. * may be willing to sell alternative licenses (contact *
  13. * sales@insecure.com ). *
  14. * *
  15. * As a special exception to the GPL terms, Insecure.Com LLC grants *
  16. * permission to link the code of this program with any version of the *
  17. * OpenSSL library which is distributed under a license identical to that *
  18. * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
  19. * linked combinations including the two. You must obey the GNU GPL in all *
  20. * respects for all of the code used other than OpenSSL. If you modify *
  21. * this file, you may extend this exception to your version of the file, *
  22. * but you are not obligated to do so. *
  23. * *
  24. * If you received these files with a written license agreement stating *
  25. * terms other than the (GPL) terms above, then that alternative license *
  26. * agreement takes precedence over this comment. *
  27. * *
  28. * Source is provided to this software because we believe users have a *
  29. * right to know exactly what a program is going to do before they run it. *
  30. * This also allows you to audit the software for security holes (none *
  31. * have been found so far). *
  32. * *
  33. * Source code also allows you to port Nmap to new platforms, fix bugs, *
  34. * and add new features. You are highly encouraged to send your changes *
  35. * to the dev@nmap.org mailing list for possible incorporation into the *
  36. * main distribution. By sending these changes to Fyodor or one of the *
  37. * Insecure.Org development mailing lists, or checking them into the Nmap *
  38. * source code repository, it is understood (unless you specify otherwise) *
  39. * that you are offering the Nmap Project (Insecure.Com LLC) the *
  40. * unlimited, non-exclusive right to reuse, modify, and relicense the *
  41. * code. Nmap will always be available Open Source, but this is important *
  42. * because the inability to relicense code has caused devastating problems *
  43. * for other Free Software projects (such as KDE and NASM). We also *
  44. * occasionally relicense the code to third parties as discussed above. *
  45. * If you wish to specify special license conditions of your *
  46. * contributions, just say so when you send them. *
  47. * *
  48. * This program is distributed in the hope that it will be useful, but *
  49. * WITHOUT ANY WARRANTY; without even the implied warranty of *
  50. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
  51. * General Public License v2.0 for more details *
  52. * (http://www.gnu.org/licenses/gpl-2.0.html). *
  53. * *
  54. ***************************************************************************/
  55. /* $Id$ */
  56. #ifndef WIN32
  57. /* Allow the use of POLLRDHUP, if available. */
  58. #define _GNU_SOURCE
  59. #endif
  60. #ifdef HAVE_CONFIG_H
  61. #include "nsock_config.h"
  62. #elif WIN32
  63. #include "nsock_winconfig.h"
  64. #endif
  65. #if HAVE_POLL
  66. #include <errno.h>
  67. #ifndef WIN32
  68. #include <poll.h>
  69. #else
  70. #include <Winsock2.h>
  71. #endif /* ^WIN32 */
  72. #include "nsock_internal.h"
  73. #include "nsock_log.h"
  74. #if HAVE_PCAP
  75. #include "nsock_pcap.h"
  76. #endif
  77. #define EV_LIST_INIT_SIZE 1024
  78. #ifdef WIN32
  79. #define Poll WSAPoll
  80. #define POLLFD WSAPOLLFD
  81. #else
  82. #define Poll poll
  83. #define POLLFD struct pollfd
  84. #endif
  85. #ifdef WIN32
  86. #define POLL_R_FLAGS (POLLIN)
  87. #else
  88. #define POLL_R_FLAGS (POLLIN | POLLPRI)
  89. #endif /* WIN32 */
  90. #define POLL_W_FLAGS POLLOUT
  91. #ifdef POLLRDHUP
  92. #define POLL_X_FLAGS (POLLERR | POLLHUP | POLLRDHUP)
  93. #else
  94. /* POLLRDHUP was introduced later and might be unavailable on older systems. */
  95. #define POLL_X_FLAGS (POLLERR | POLLHUP)
  96. #endif /* POLLRDHUP */
  97. /* --- ENGINE INTERFACE PROTOTYPES --- */
  98. static int poll_init(mspool *nsp);
  99. static void poll_destroy(mspool *nsp);
  100. static int poll_iod_register(mspool *nsp, msiod *iod, int ev);
  101. static int poll_iod_unregister(mspool *nsp, msiod *iod);
  102. static int poll_iod_modify(mspool *nsp, msiod *iod, int ev_set, int ev_clr);
  103. static int poll_loop(mspool *nsp, int msec_timeout);
  104. /* ---- ENGINE DEFINITION ---- */
  105. struct io_engine engine_poll = {
  106. "poll",
  107. poll_init,
  108. poll_destroy,
  109. poll_iod_register,
  110. poll_iod_unregister,
  111. poll_iod_modify,
  112. poll_loop
  113. };
  114. /* --- INTERNAL PROTOTYPES --- */
  115. static void iterate_through_event_lists(mspool *nsp);
  116. /* defined in nsock_core.c */
  117. void process_iod_events(mspool *nsp, msiod *nsi, int ev);
  118. void process_event(mspool *nsp, gh_list_t *evlist, msevent *nse, int ev);
  119. void process_expired_events(mspool *nsp);
  120. #if HAVE_PCAP
  121. #ifndef PCAP_CAN_DO_SELECT
  122. int pcap_read_on_nonselect(mspool *nsp);
  123. #endif
  124. #endif
  125. /* defined in nsock_event.c */
  126. void update_first_events(msevent *nse);
  127. extern struct timeval nsock_tod;
  128. /*
  129. * Engine specific data structure
  130. */
  131. struct poll_engine_info {
  132. int capacity;
  133. int max_fd;
  134. /* index of the highest poll event */
  135. POLLFD *events;
  136. };
  137. static inline int lower_max_fd(struct poll_engine_info *pinfo) {
  138. do {
  139. pinfo->max_fd--;
  140. } while (pinfo->max_fd >= 0 && pinfo->events[pinfo->max_fd].fd == -1);
  141. return pinfo->max_fd;
  142. }
  143. static inline int evlist_grow(struct poll_engine_info *pinfo) {
  144. int i;
  145. i = pinfo->capacity;
  146. if (pinfo->capacity == 0) {
  147. pinfo->capacity = EV_LIST_INIT_SIZE;
  148. pinfo->events = (POLLFD *)safe_malloc(sizeof(POLLFD) * pinfo->capacity);
  149. } else {
  150. pinfo->capacity *= 2;
  151. pinfo->events = (POLLFD *)safe_realloc(pinfo->events, sizeof(POLLFD) * pinfo->capacity);
  152. }
  153. while (i < pinfo->capacity) {
  154. pinfo->events[i].fd = -1;
  155. pinfo->events[i].events = 0;
  156. pinfo->events[i].revents = 0;
  157. i++;
  158. }
  159. return pinfo->capacity;
  160. }
  161. int poll_init(mspool *nsp) {
  162. struct poll_engine_info *pinfo;
  163. pinfo = (struct poll_engine_info *)safe_malloc(sizeof(struct poll_engine_info));
  164. pinfo->capacity = 0;
  165. pinfo->max_fd = -1;
  166. evlist_grow(pinfo);
  167. nsp->engine_data = (void *)pinfo;
  168. return 1;
  169. }
  170. void poll_destroy(mspool *nsp) {
  171. struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
  172. assert(pinfo != NULL);
  173. free(pinfo->events);
  174. free(pinfo);
  175. }
  176. int poll_iod_register(mspool *nsp, msiod *iod, int ev) {
  177. struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
  178. int sd;
  179. assert(!IOD_PROPGET(iod, IOD_REGISTERED));
  180. iod->watched_events = ev;
  181. sd = nsi_getsd(iod);
  182. while (pinfo->capacity < sd + 1)
  183. evlist_grow(pinfo);
  184. pinfo->events[sd].fd = sd;
  185. pinfo->events[sd].events = 0;
  186. pinfo->events[sd].revents = 0;
  187. pinfo->max_fd = MAX(pinfo->max_fd, sd);
  188. if (ev & EV_READ)
  189. pinfo->events[sd].events |= POLL_R_FLAGS;
  190. if (ev & EV_WRITE)
  191. pinfo->events[sd].events |= POLL_W_FLAGS;
  192. #ifndef WIN32
  193. if (ev & EV_EXCEPT)
  194. pinfo->events[sd].events |= POLL_X_FLAGS;
  195. #endif
  196. IOD_PROPSET(iod, IOD_REGISTERED);
  197. return 1;
  198. }
  199. int poll_iod_unregister(mspool *nsp, msiod *iod) {
  200. iod->watched_events = EV_NONE;
  201. /* some IODs can be unregistered here if they're associated to an event that was
  202. * immediately completed */
  203. if (IOD_PROPGET(iod, IOD_REGISTERED)) {
  204. struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
  205. int sd;
  206. sd = nsi_getsd(iod);
  207. pinfo->events[sd].fd = -1;
  208. pinfo->events[sd].events = 0;
  209. pinfo->events[sd].revents = 0;
  210. if (pinfo->max_fd == sd)
  211. lower_max_fd(pinfo);
  212. IOD_PROPCLR(iod, IOD_REGISTERED);
  213. }
  214. return 1;
  215. }
  216. int poll_iod_modify(mspool *nsp, msiod *iod, int ev_set, int ev_clr) {
  217. int sd;
  218. int new_events;
  219. struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
  220. assert((ev_set & ev_clr) == 0);
  221. assert(IOD_PROPGET(iod, IOD_REGISTERED));
  222. new_events = iod->watched_events;
  223. new_events |= ev_set;
  224. new_events &= ~ev_clr;
  225. if (new_events == iod->watched_events)
  226. return 1; /* nothing to do */
  227. iod->watched_events = new_events;
  228. sd = nsi_getsd(iod);
  229. pinfo->events[sd].fd = sd;
  230. pinfo->events[sd].events = 0;
  231. /* regenerate the current set of events for this IOD */
  232. if (iod->watched_events & EV_READ)
  233. pinfo->events[sd].events |= POLL_R_FLAGS;
  234. if (iod->watched_events & EV_WRITE)
  235. pinfo->events[sd].events |= POLL_W_FLAGS;
  236. #ifndef WIN32
  237. if (iod->watched_events & EV_EXCEPT)
  238. pinfo->events[sd].events |= POLL_X_FLAGS;
  239. #endif
  240. return 1;
  241. }
  242. int poll_loop(mspool *nsp, int msec_timeout) {
  243. int results_left = 0;
  244. int event_msecs; /* msecs before an event goes off */
  245. int combined_msecs;
  246. int sock_err = 0;
  247. struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
  248. assert(msec_timeout >= -1);
  249. if (nsp->events_pending == 0)
  250. return 0; /* No need to wait on 0 events ... */
  251. do {
  252. msevent *nse;
  253. nsock_log_debug_all(nsp, "wait for events");
  254. nse = next_expirable_event(nsp);
  255. if (!nse)
  256. event_msecs = -1; /* None of the events specified a timeout */
  257. else
  258. event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod));
  259. #if HAVE_PCAP
  260. #ifndef PCAP_CAN_DO_SELECT
  261. /* Force a low timeout when capturing packets on systems where
  262. * the pcap descriptor is not select()able. */
  263. if (gh_list_count(&nsp->pcap_read_events) > 0)
  264. if (event_msecs > PCAP_POLL_INTERVAL)
  265. event_msecs = PCAP_POLL_INTERVAL;
  266. #endif
  267. #endif
  268. /* We cast to unsigned because we want -1 to be very high (since it means no
  269. * timeout) */
  270. combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout);
  271. #if HAVE_PCAP
  272. #ifndef PCAP_CAN_DO_SELECT
  273. /* do non-blocking read on pcap devices that doesn't support select()
  274. * If there is anything read, just leave this loop. */
  275. if (pcap_read_on_nonselect(nsp)) {
  276. /* okay, something was read. */
  277. } else
  278. #endif
  279. #endif
  280. {
  281. if (pinfo->max_fd > -1)
  282. results_left = Poll(pinfo->events, pinfo->max_fd + 1, combined_msecs);
  283. else
  284. results_left = 0;
  285. if (results_left == -1)
  286. sock_err = socket_errno();
  287. }
  288. gettimeofday(&nsock_tod, NULL); /* Due to poll delay */
  289. } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */
  290. if (results_left == -1 && sock_err != EINTR) {
  291. nsock_log_error(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
  292. nsp->errnum = sock_err;
  293. return -1;
  294. }
  295. iterate_through_event_lists(nsp);
  296. return 1;
  297. }
  298. /* ---- INTERNAL FUNCTIONS ---- */
  299. static inline int get_evmask(mspool *nsp, msiod *nsi) {
  300. struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
  301. int sd, evmask = EV_NONE;
  302. POLLFD *pev;
  303. if (nsi->state != NSIOD_STATE_DELETED
  304. && nsi->events_pending
  305. && IOD_PROPGET(nsi, IOD_REGISTERED)) {
  306. #if HAVE_PCAP
  307. if (nsi->pcap)
  308. sd = ((mspcap *)nsi->pcap)->pcap_desc;
  309. else
  310. #endif
  311. sd = nsi->sd;
  312. assert(sd < pinfo->capacity);
  313. pev = &pinfo->events[sd];
  314. if (pev->revents & POLL_R_FLAGS)
  315. evmask |= EV_READ;
  316. if (pev->revents & POLL_W_FLAGS)
  317. evmask |= EV_WRITE;
  318. if (pev->events && (pev->revents & POLL_X_FLAGS))
  319. evmask |= (EV_READ | EV_WRITE | EV_EXCEPT);
  320. }
  321. return evmask;
  322. }
  323. /* Iterate through all the event lists (such as connect_events, read_events,
  324. * timer_events, etc) and take action for those that have completed (due to
  325. * timeout, i/o, etc) */
  326. void iterate_through_event_lists(mspool *nsp) {
  327. gh_lnode_t *current, *next, *last;
  328. last = gh_list_last_elem(&nsp->active_iods);
  329. for (current = gh_list_first_elem(&nsp->active_iods);
  330. current != NULL && gh_lnode_prev(current) != last;
  331. current = next) {
  332. msiod *nsi = container_of(current, msiod, nodeq);
  333. process_iod_events(nsp, nsi, get_evmask(nsp, nsi));
  334. next = gh_lnode_next(current);
  335. if (nsi->state == NSIOD_STATE_DELETED) {
  336. gh_list_remove(&nsp->active_iods, current);
  337. gh_list_prepend(&nsp->free_iods, current);
  338. }
  339. }
  340. /* iterate through timers and expired events */
  341. process_expired_events(nsp);
  342. }
  343. #endif /* HAVE_POLL */