PageRenderTime 61ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 1ms

/servers/vfs/select.c

http://www.minix3.org/
C | 1072 lines | 686 code | 164 blank | 222 comment | 260 complexity | 67722c615da72f0d5c634b75d55bbbc9 MD5 | raw file
Possible License(s): MIT, WTFPL, AGPL-1.0, BSD-3-Clause, GPL-3.0, LGPL-2.0, JSON, 0BSD
  1. /* Implement entry point to select system call.
  2. *
  3. * The entry points into this file are
  4. * do_select: perform the SELECT system call
  5. * select_callback: notify select system of possible fd operation
  6. * select_unsuspend_by_endpt: cancel a blocking select on exiting driver
  7. */
  8. #include "fs.h"
  9. #include <sys/fcntl.h>
  10. #include <sys/time.h>
  11. #include <sys/select.h>
  12. #include <sys/stat.h>
  13. #include <minix/com.h>
  14. #include <minix/u64.h>
  15. #include <string.h>
  16. #include <assert.h>
  17. #include "file.h"
  18. #include "fproc.h"
  19. #include "dmap.h"
  20. #include "vnode.h"
  21. /* max. number of simultaneously pending select() calls */
  22. #define MAXSELECTS 25
  23. #define FROM_PROC 0
  24. #define TO_PROC 1
  25. static struct selectentry {
  26. struct fproc *requestor; /* slot is free iff this is NULL */
  27. endpoint_t req_endpt;
  28. fd_set readfds, writefds, errorfds;
  29. fd_set ready_readfds, ready_writefds, ready_errorfds;
  30. fd_set *vir_readfds, *vir_writefds, *vir_errorfds;
  31. struct filp *filps[OPEN_MAX];
  32. int type[OPEN_MAX];
  33. int nfds, nreadyfds;
  34. int error;
  35. char block;
  36. clock_t expiry;
  37. timer_t timer; /* if expiry > 0 */
  38. } selecttab[MAXSELECTS];
  39. static int copy_fdsets(struct selectentry *se, int nfds, int
  40. direction);
  41. static int do_select_request(struct selectentry *se, int fd, int *ops);
  42. static void filp_status(struct filp *fp, int status);
  43. static int is_deferred(struct selectentry *se);
  44. static void restart_proc(struct selectentry *se);
  45. static void ops2tab(int ops, int fd, struct selectentry *e);
  46. static int is_regular_file(struct filp *f);
  47. static int is_pipe(struct filp *f);
  48. static int is_supported_major(struct filp *f);
  49. static void select_lock_filp(struct filp *f, int ops);
  50. static int select_request_async(struct filp *f, int *ops, int block);
  51. static int select_request_file(struct filp *f, int *ops, int block);
  52. static int select_request_major(struct filp *f, int *ops, int block);
  53. static int select_request_pipe(struct filp *f, int *ops, int block);
  54. static int select_request_sync(struct filp *f, int *ops, int block);
  55. static void select_cancel_all(struct selectentry *e);
  56. static void select_cancel_filp(struct filp *f);
  57. static void select_return(struct selectentry *);
  58. static void select_restart_filps(void);
  59. static int tab2ops(int fd, struct selectentry *e);
  60. static void wipe_select(struct selectentry *s);
  61. static struct fdtype {
  62. int (*select_request)(struct filp *, int *ops, int block);
  63. int (*type_match)(struct filp *f);
  64. } fdtypes[] = {
  65. { select_request_major, is_supported_major },
  66. { select_request_file, is_regular_file },
  67. { select_request_pipe, is_pipe },
  68. };
  69. #define SEL_FDS (sizeof(fdtypes) / sizeof(fdtypes[0]))
  70. static int select_majors[] = { /* List of majors that support selecting on */
  71. TTY_MAJOR,
  72. INET_MAJOR,
  73. UDS_MAJOR,
  74. LOG_MAJOR,
  75. };
  76. #define SEL_MAJORS (sizeof(select_majors) / sizeof(select_majors[0]))
  77. /*===========================================================================*
  78. * do_select *
  79. *===========================================================================*/
  80. int do_select(void)
  81. {
  82. /* Implement the select(nfds, readfds, writefds, errorfds, timeout) system
  83. * call. First we copy the arguments and verify their sanity. Then we check
  84. * whether there are file descriptors that satisfy the select call right of the
  85. * bat. If so, or if there are no ready file descriptors but the process
  86. * requested to return immediately, we return the result. Otherwise we set a
  87. * timeout and wait for either the file descriptors to become ready or the
  88. * timer to go off. If no timeout value was provided, we wait indefinitely. */
  89. int r, nfds, do_timeout = 0, fd, s;
  90. struct timeval timeout;
  91. struct selectentry *se;
  92. vir_bytes vtimeout;
  93. nfds = job_m_in.SEL_NFDS;
  94. vtimeout = (vir_bytes) job_m_in.SEL_TIMEOUT;
  95. /* Sane amount of file descriptors? */
  96. if (nfds < 0 || nfds > OPEN_MAX) return(EINVAL);
  97. /* Find a slot to store this select request */
  98. for (s = 0; s < MAXSELECTS; s++)
  99. if (selecttab[s].requestor == NULL) /* Unused slot */
  100. break;
  101. if (s >= MAXSELECTS) return(ENOSPC);
  102. se = &selecttab[s];
  103. wipe_select(se); /* Clear results of previous usage */
  104. se->requestor = fp;
  105. se->req_endpt = who_e;
  106. se->vir_readfds = (fd_set *) job_m_in.SEL_READFDS;
  107. se->vir_writefds = (fd_set *) job_m_in.SEL_WRITEFDS;
  108. se->vir_errorfds = (fd_set *) job_m_in.SEL_ERRORFDS;
  109. /* Copy fdsets from the process */
  110. if ((r = copy_fdsets(se, nfds, FROM_PROC)) != OK) {
  111. se->requestor = NULL;
  112. return(r);
  113. }
  114. /* Did the process set a timeout value? If so, retrieve it. */
  115. if (vtimeout != 0) {
  116. do_timeout = 1;
  117. r = sys_vircopy(who_e, (vir_bytes) vtimeout, SELF,
  118. (vir_bytes) &timeout, sizeof(timeout));
  119. if (r != OK) {
  120. se->requestor = NULL;
  121. return(r);
  122. }
  123. }
  124. /* No nonsense in the timeval */
  125. if (do_timeout && (timeout.tv_sec < 0 || timeout.tv_usec < 0)) {
  126. se->requestor = NULL;
  127. return(EINVAL);
  128. }
  129. /* If there is no timeout, we block forever. Otherwise, we block up to the
  130. * specified time interval.
  131. */
  132. if (!do_timeout) /* No timeout value set */
  133. se->block = 1;
  134. else if (do_timeout && (timeout.tv_sec > 0 || timeout.tv_usec > 0))
  135. se->block = 1;
  136. else /* timeout set as (0,0) - this effects a poll */
  137. se->block = 0;
  138. se->expiry = 0; /* no timer set (yet) */
  139. /* Verify that file descriptors are okay to select on */
  140. for (fd = 0; fd < nfds; fd++) {
  141. struct filp *f;
  142. unsigned int type, ops;
  143. /* Because the select() interface implicitly includes file descriptors
  144. * you might not want to select on, we have to figure out whether we're
  145. * interested in them. Typically, these file descriptors include fd's
  146. * inherited from the parent proc and file descriptors that have been
  147. * close()d, but had a lower fd than one in the current set.
  148. */
  149. if (!(ops = tab2ops(fd, se)))
  150. continue; /* No operations set; nothing to do for this fd */
  151. /* Get filp belonging to this fd */
  152. f = se->filps[fd] = get_filp(fd, VNODE_READ);
  153. if (f == NULL) {
  154. if (err_code == EBADF)
  155. r = err_code;
  156. else /* File descriptor is 'ready' to return EIO */
  157. r = EINTR;
  158. se->requestor = NULL;
  159. return(r);
  160. }
  161. /* Check file types. According to POSIX 2008:
  162. * "The pselect() and select() functions shall support regular files,
  163. * terminal and pseudo-terminal devices, FIFOs, pipes, and sockets. The
  164. * behavior of pselect() and select() on file descriptors that refer to
  165. * other types of file is unspecified."
  166. *
  167. * In our case, terminal and pseudo-terminal devices are handled by the
  168. * TTY major and sockets by either INET major (socket type AF_INET) or
  169. * PFS major (socket type AF_UNIX). PFS acts as an FS when it handles
  170. * pipes and as a driver when it handles sockets. Additionally, we
  171. * support select on the LOG major to handle kernel logging, which is
  172. * beyond the POSIX spec. */
  173. se->type[fd] = -1;
  174. for (type = 0; type < SEL_FDS; type++) {
  175. if (fdtypes[type].type_match(f)) {
  176. se->type[fd] = type;
  177. se->nfds = fd+1;
  178. se->filps[fd]->filp_selectors++;
  179. break;
  180. }
  181. }
  182. unlock_filp(f);
  183. if (se->type[fd] == -1) { /* Type not found */
  184. se->requestor = NULL;
  185. return(EBADF);
  186. }
  187. }
  188. /* Check all file descriptors in the set whether one is 'ready' now */
  189. for (fd = 0; fd < nfds; fd++) {
  190. int ops, r;
  191. struct filp *f;
  192. /* Again, check for involuntarily selected fd's */
  193. if (!(ops = tab2ops(fd, se)))
  194. continue; /* No operations set; nothing to do for this fd */
  195. /* Test filp for select operations if not already done so. e.g.,
  196. * processes sharing a filp and both doing a select on that filp. */
  197. f = se->filps[fd];
  198. if ((f->filp_select_ops & ops) != ops) {
  199. int wantops;
  200. wantops = (f->filp_select_ops |= ops);
  201. r = do_select_request(se, fd, &wantops);
  202. if (r != OK && r != SUSPEND)
  203. break; /* Error or bogus return code; abort */
  204. /* The select request above might have turned on/off some
  205. * operations because they were 'ready' or not meaningful.
  206. * Either way, we might have a result and we need to store them
  207. * in the select table entry. */
  208. if (wantops & ops) ops2tab(wantops, fd, se);
  209. }
  210. }
  211. if ((se->nreadyfds > 0 || !se->block) && !is_deferred(se)) {
  212. /* fd's were found that were ready to go right away, and/or
  213. * we were instructed not to block at all. Must return
  214. * immediately.
  215. */
  216. r = copy_fdsets(se, se->nfds, TO_PROC);
  217. select_cancel_all(se);
  218. se->requestor = NULL;
  219. if (r != OK)
  220. return(r);
  221. else if (se->error != OK)
  222. return(se->error);
  223. return(se->nreadyfds);
  224. }
  225. /* Convert timeval to ticks and set the timer. If it fails, undo
  226. * all, return error.
  227. */
  228. if (do_timeout) {
  229. int ticks;
  230. /* Open Group:
  231. * "If the requested timeout interval requires a finer
  232. * granularity than the implementation supports, the
  233. * actual timeout interval shall be rounded up to the next
  234. * supported value."
  235. */
  236. #define USECPERSEC 1000000
  237. while(timeout.tv_usec >= USECPERSEC) {
  238. /* this is to avoid overflow with *system_hz below */
  239. timeout.tv_usec -= USECPERSEC;
  240. timeout.tv_sec++;
  241. }
  242. ticks = timeout.tv_sec * system_hz +
  243. (timeout.tv_usec * system_hz + USECPERSEC-1) / USECPERSEC;
  244. se->expiry = ticks;
  245. set_timer(&se->timer, ticks, select_timeout_check, s);
  246. }
  247. /* process now blocked */
  248. suspend(FP_BLOCKED_ON_SELECT);
  249. return(SUSPEND);
  250. }
  251. /*===========================================================================*
  252. * is_deferred *
  253. *===========================================================================*/
  254. static int is_deferred(struct selectentry *se)
  255. {
  256. /* Find out whether this select has pending initial replies */
  257. int fd;
  258. struct filp *f;
  259. for (fd = 0; fd < se->nfds; fd++) {
  260. if ((f = se->filps[fd]) == NULL) continue;
  261. if (f->filp_select_flags & (FSF_UPDATE|FSF_BUSY)) return(TRUE);
  262. }
  263. return(FALSE);
  264. }
  265. /*===========================================================================*
  266. * is_regular_file *
  267. *===========================================================================*/
  268. static int is_regular_file(struct filp *f)
  269. {
  270. return(f && f->filp_vno && S_ISREG(f->filp_vno->v_mode));
  271. }
  272. /*===========================================================================*
  273. * is_pipe *
  274. *===========================================================================*/
  275. static int is_pipe(struct filp *f)
  276. {
  277. /* Recognize either anonymous pipe or named pipe (FIFO) */
  278. return(f && f->filp_vno && S_ISFIFO(f->filp_vno->v_mode));
  279. }
  280. /*===========================================================================*
  281. * is_supported_major *
  282. *===========================================================================*/
  283. static int is_supported_major(struct filp *f)
  284. {
  285. /* See if this filp is a handle on a device on which we support select() */
  286. unsigned int m;
  287. if (!(f && f->filp_vno)) return(FALSE);
  288. if (!S_ISCHR(f->filp_vno->v_mode)) return(FALSE);
  289. for (m = 0; m < SEL_MAJORS; m++)
  290. if (major(f->filp_vno->v_sdev) == select_majors[m])
  291. return(TRUE);
  292. return(FALSE);
  293. }
  294. /*===========================================================================*
  295. * select_request_async *
  296. *===========================================================================*/
  297. static int select_request_async(struct filp *f, int *ops, int block)
  298. {
  299. int r, rops, major;
  300. struct dmap *dp;
  301. rops = *ops;
  302. /* By default, nothing to do */
  303. *ops = 0;
  304. if (!block && (f->filp_select_flags & FSF_BLOCKED)) {
  305. /* This filp is blocked waiting for a reply, but we don't want to
  306. * block ourselves. Unless we're awaiting the initial reply, these
  307. * operations won't be ready */
  308. if (!(f->filp_select_flags & FSF_BUSY)) {
  309. if ((rops & SEL_RD) && (f->filp_select_flags & FSF_RD_BLOCK))
  310. rops &= ~SEL_RD;
  311. if ((rops & SEL_WR) && (f->filp_select_flags & FSF_WR_BLOCK))
  312. rops &= ~SEL_WR;
  313. if ((rops & SEL_ERR) && (f->filp_select_flags & FSF_ERR_BLOCK))
  314. rops &= ~SEL_ERR;
  315. if (!(rops & (SEL_RD|SEL_WR|SEL_ERR)))
  316. return(OK);
  317. }
  318. }
  319. f->filp_select_flags |= FSF_UPDATE;
  320. if (block) {
  321. rops |= SEL_NOTIFY;
  322. if (rops & SEL_RD) f->filp_select_flags |= FSF_RD_BLOCK;
  323. if (rops & SEL_WR) f->filp_select_flags |= FSF_WR_BLOCK;
  324. if (rops & SEL_ERR) f->filp_select_flags |= FSF_ERR_BLOCK;
  325. }
  326. if (f->filp_select_flags & FSF_BUSY)
  327. return(SUSPEND);
  328. major = major(f->filp_vno->v_sdev);
  329. if (major < 0 || major >= NR_DEVICES) return(ENXIO);
  330. dp = &dmap[major];
  331. if (dp->dmap_sel_filp)
  332. return(SUSPEND);
  333. f->filp_select_flags &= ~FSF_UPDATE;
  334. r = dev_io(VFS_DEV_SELECT, f->filp_vno->v_sdev, rops, NULL,
  335. cvu64(0), 0, 0, FALSE);
  336. if (r < 0 && r != SUSPEND)
  337. return(r);
  338. if (r != SUSPEND)
  339. panic("select_request_asynch: expected SUSPEND got: %d", r);
  340. dp->dmap_sel_filp = f;
  341. f->filp_select_flags |= FSF_BUSY;
  342. return(SUSPEND);
  343. }
  344. /*===========================================================================*
  345. * select_request_file *
  346. *===========================================================================*/
  347. static int select_request_file(struct filp *UNUSED(f), int *UNUSED(ops),
  348. int UNUSED(block))
  349. {
  350. /* Files are always ready, so output *ops is input *ops */
  351. return(OK);
  352. }
  353. /*===========================================================================*
  354. * select_request_major *
  355. *===========================================================================*/
  356. static int select_request_major(struct filp *f, int *ops, int block)
  357. {
  358. int major, r;
  359. major = major(f->filp_vno->v_sdev);
  360. if (major < 0 || major >= NR_DEVICES) return(ENXIO);
  361. if (dmap[major].dmap_style == STYLE_DEVA ||
  362. dmap[major].dmap_style == STYLE_CLONE_A)
  363. r = select_request_async(f, ops, block);
  364. else
  365. r = select_request_sync(f, ops, block);
  366. return(r);
  367. }
  368. /*===========================================================================*
  369. * select_request_sync *
  370. *===========================================================================*/
  371. static int select_request_sync(struct filp *f, int *ops, int block)
  372. {
  373. int rops;
  374. rops = *ops;
  375. if (block) rops |= SEL_NOTIFY;
  376. *ops = dev_io(VFS_DEV_SELECT, f->filp_vno->v_sdev, rops, NULL,
  377. cvu64(0), 0, 0, FALSE);
  378. if (*ops < 0)
  379. return(*ops);
  380. return(OK);
  381. }
  382. /*===========================================================================*
  383. * select_request_pipe *
  384. *===========================================================================*/
  385. static int select_request_pipe(struct filp *f, int *ops, int block)
  386. {
  387. int orig_ops, r = 0, err;
  388. orig_ops = *ops;
  389. if ((*ops & (SEL_RD|SEL_ERR))) {
  390. /* Check if we can read 1 byte */
  391. err = pipe_check(f->filp_vno, READING, f->filp_flags & ~O_NONBLOCK, 1,
  392. 1 /* Check only */);
  393. if (err != SUSPEND)
  394. r |= SEL_RD;
  395. if (err < 0 && err != SUSPEND)
  396. r |= SEL_ERR;
  397. if (err == SUSPEND && !(f->filp_mode & R_BIT)) {
  398. /* A "meaningless" read select, therefore ready
  399. * for reading and no error set. */
  400. r |= SEL_RD;
  401. r &= ~SEL_ERR;
  402. }
  403. }
  404. if ((*ops & (SEL_WR|SEL_ERR))) {
  405. /* Check if we can write 1 byte */
  406. err = pipe_check(f->filp_vno, WRITING, f->filp_flags & ~O_NONBLOCK, 1,
  407. 1 /* Check only */);
  408. if (err != SUSPEND)
  409. r |= SEL_WR;
  410. if (err < 0 && err != SUSPEND)
  411. r |= SEL_ERR;
  412. if (err == SUSPEND && !(f->filp_mode & W_BIT)) {
  413. /* A "meaningless" write select, therefore ready
  414. for writing and no error set. */
  415. r |= SEL_WR;
  416. r &= ~SEL_ERR;
  417. }
  418. }
  419. /* Some options we collected might not be requested. */
  420. *ops = r & orig_ops;
  421. if (!*ops && block)
  422. f->filp_pipe_select_ops |= orig_ops;
  423. return(OK);
  424. }
  425. /*===========================================================================*
  426. * tab2ops *
  427. *===========================================================================*/
  428. static int tab2ops(int fd, struct selectentry *e)
  429. {
  430. int ops = 0;
  431. if (FD_ISSET(fd, &e->readfds)) ops |= SEL_RD;
  432. if (FD_ISSET(fd, &e->writefds)) ops |= SEL_WR;
  433. if (FD_ISSET(fd, &e->errorfds)) ops |= SEL_ERR;
  434. return(ops);
  435. }
  436. /*===========================================================================*
  437. * ops2tab *
  438. *===========================================================================*/
  439. static void ops2tab(int ops, int fd, struct selectentry *e)
  440. {
  441. if ((ops & SEL_RD) && e->vir_readfds && FD_ISSET(fd, &e->readfds) &&
  442. !FD_ISSET(fd, &e->ready_readfds)) {
  443. FD_SET(fd, &e->ready_readfds);
  444. e->nreadyfds++;
  445. }
  446. if ((ops & SEL_WR) && e->vir_writefds && FD_ISSET(fd, &e->writefds) &&
  447. !FD_ISSET(fd, &e->ready_writefds)) {
  448. FD_SET(fd, &e->ready_writefds);
  449. e->nreadyfds++;
  450. }
  451. if ((ops & SEL_ERR) && e->vir_errorfds && FD_ISSET(fd, &e->errorfds) &&
  452. !FD_ISSET(fd, &e->ready_errorfds)) {
  453. FD_SET(fd, &e->ready_errorfds);
  454. e->nreadyfds++;
  455. }
  456. }
  457. /*===========================================================================*
  458. * copy_fdsets *
  459. *===========================================================================*/
  460. static int copy_fdsets(struct selectentry *se, int nfds, int direction)
  461. {
  462. int r;
  463. size_t fd_setsize;
  464. endpoint_t src_e, dst_e;
  465. fd_set *src_fds, *dst_fds;
  466. if (nfds < 0 || nfds > OPEN_MAX)
  467. panic("select copy_fdsets: nfds wrong: %d", nfds);
  468. /* Only copy back as many bits as the user expects. */
  469. #ifdef __NBSD_LIBC
  470. fd_setsize = (size_t) (howmany(nfds, __NFDBITS) * sizeof(__fd_mask));
  471. #else
  472. fd_setsize = (size_t) (_FDSETWORDS(nfds) * _FDSETBITSPERWORD/8);
  473. #endif
  474. /* Set source and destination endpoints */
  475. src_e = (direction == FROM_PROC) ? se->req_endpt : SELF;
  476. dst_e = (direction == FROM_PROC) ? SELF : se->req_endpt;
  477. /* read set */
  478. src_fds = (direction == FROM_PROC) ? se->vir_readfds : &se->ready_readfds;
  479. dst_fds = (direction == FROM_PROC) ? &se->readfds : se->vir_readfds;
  480. if (se->vir_readfds) {
  481. r = sys_vircopy(src_e, (vir_bytes) src_fds, dst_e,
  482. (vir_bytes) dst_fds, fd_setsize);
  483. if (r != OK) return(r);
  484. }
  485. /* write set */
  486. src_fds = (direction == FROM_PROC) ? se->vir_writefds : &se->ready_writefds;
  487. dst_fds = (direction == FROM_PROC) ? &se->writefds : se->vir_writefds;
  488. if (se->vir_writefds) {
  489. r = sys_vircopy(src_e, (vir_bytes) src_fds, dst_e,
  490. (vir_bytes) dst_fds, fd_setsize);
  491. if (r != OK) return(r);
  492. }
  493. /* error set */
  494. src_fds = (direction == FROM_PROC) ? se->vir_errorfds : &se->ready_errorfds;
  495. dst_fds = (direction == FROM_PROC) ? &se->errorfds : se->vir_errorfds;
  496. if (se->vir_errorfds) {
  497. r = sys_vircopy(src_e, (vir_bytes) src_fds, dst_e,
  498. (vir_bytes) dst_fds, fd_setsize);
  499. if (r != OK) return(r);
  500. }
  501. return(OK);
  502. }
  503. /*===========================================================================*
  504. * select_cancel_all *
  505. *===========================================================================*/
  506. static void select_cancel_all(struct selectentry *se)
  507. {
  508. /* Cancel select. Decrease select usage and cancel timer */
  509. int fd;
  510. struct filp *f;
  511. for (fd = 0; fd < se->nfds; fd++) {
  512. if ((f = se->filps[fd]) == NULL) continue;
  513. se->filps[fd] = NULL;
  514. select_cancel_filp(f);
  515. }
  516. if (se->expiry > 0) {
  517. cancel_timer(&se->timer);
  518. se->expiry = 0;
  519. }
  520. se->requestor = NULL;
  521. }
  522. /*===========================================================================*
  523. * select_cancel_filp *
  524. *===========================================================================*/
  525. static void select_cancel_filp(struct filp *f)
  526. {
  527. /* Reduce number of select users of this filp */
  528. assert(f);
  529. assert(f->filp_selectors >= 0);
  530. if (f->filp_selectors == 0) return;
  531. if (f->filp_count == 0) return;
  532. select_lock_filp(f, f->filp_select_ops);
  533. f->filp_selectors--;
  534. if (f->filp_selectors == 0) {
  535. /* No one selecting on this filp anymore, forget about select state */
  536. f->filp_select_ops = 0;
  537. f->filp_select_flags = 0;
  538. f->filp_pipe_select_ops = 0;
  539. }
  540. unlock_filp(f);
  541. }
  542. /*===========================================================================*
  543. * select_return *
  544. *===========================================================================*/
  545. static void select_return(struct selectentry *se)
  546. {
  547. int r, r1;
  548. assert(!is_deferred(se)); /* Not done yet, first wait for async reply */
  549. select_cancel_all(se);
  550. r1 = copy_fdsets(se, se->nfds, TO_PROC);
  551. if (r1 != OK)
  552. r = r1;
  553. else if (se->error != OK)
  554. r = se->error;
  555. else
  556. r = se->nreadyfds;
  557. revive(se->req_endpt, r);
  558. }
  559. /*===========================================================================*
  560. * select_callback *
  561. *===========================================================================*/
  562. void select_callback(struct filp *f, int status)
  563. {
  564. filp_status(f, status);
  565. }
  566. /*===========================================================================*
  567. * init_select *
  568. *===========================================================================*/
  569. void init_select(void)
  570. {
  571. int s;
  572. for (s = 0; s < MAXSELECTS; s++)
  573. init_timer(&selecttab[s].timer);
  574. }
  575. /*===========================================================================*
  576. * select_forget *
  577. *===========================================================================*/
  578. void select_forget(endpoint_t proc_e)
  579. {
  580. /* Something has happened (e.g. signal delivered that interrupts select()).
  581. * Totally forget about the select(). */
  582. int slot;
  583. struct selectentry *se;
  584. for (slot = 0; slot < MAXSELECTS; slot++) {
  585. se = &selecttab[slot];
  586. if (se->requestor != NULL && se->req_endpt == proc_e)
  587. break;
  588. }
  589. if (slot >= MAXSELECTS) return; /* Entry not found */
  590. se->error = EINTR;
  591. if (is_deferred(se)) return; /* Still awaiting initial reply */
  592. select_cancel_all(se);
  593. }
  594. /*===========================================================================*
  595. * select_timeout_check *
  596. *===========================================================================*/
  597. void select_timeout_check(timer_t *timer)
  598. {
  599. int s;
  600. struct selectentry *se;
  601. s = tmr_arg(timer)->ta_int;
  602. if (s < 0 || s >= MAXSELECTS) return; /* Entry does not exist */
  603. se = &selecttab[s];
  604. if (se->requestor == NULL) return;
  605. fp = se->requestor;
  606. if (se->expiry <= 0) return; /* Strange, did we even ask for a timeout? */
  607. se->expiry = 0;
  608. if (is_deferred(se)) return; /* Wait for initial replies to DEV_SELECT */
  609. select_return(se);
  610. }
  611. /*===========================================================================*
  612. * select_unsuspend_by_endpt *
  613. *===========================================================================*/
  614. void select_unsuspend_by_endpt(endpoint_t proc_e)
  615. {
  616. /* Revive blocked processes when a driver has disappeared */
  617. int fd, s, major;
  618. struct selectentry *se;
  619. struct filp *f;
  620. for (s = 0; s < MAXSELECTS; s++) {
  621. int wakehim = 0;
  622. se = &selecttab[s];
  623. if (se->requestor == NULL) continue;
  624. if (se->requestor->fp_endpoint == proc_e) {
  625. assert(se->requestor->fp_flags & FP_EXITING);
  626. select_cancel_all(se);
  627. continue;
  628. }
  629. for (fd = 0; fd < se->nfds; fd++) {
  630. if ((f = se->filps[fd]) == NULL || f->filp_vno == NULL)
  631. continue;
  632. major = major(f->filp_vno->v_sdev);
  633. if (dmap_driver_match(proc_e, major)) {
  634. se->filps[fd] = NULL;
  635. se->error = EINTR;
  636. select_cancel_filp(f);
  637. wakehim = 1;
  638. }
  639. }
  640. if (wakehim && !is_deferred(se))
  641. select_return(se);
  642. }
  643. }
  644. /*===========================================================================*
  645. * select_reply1 *
  646. *===========================================================================*/
  647. void select_reply1(driver_e, minor, status)
  648. endpoint_t driver_e;
  649. int minor;
  650. int status;
  651. {
  652. /* Handle reply to DEV_SELECT request */
  653. int major;
  654. dev_t dev;
  655. struct filp *f;
  656. struct dmap *dp;
  657. struct vnode *vp;
  658. /* Figure out which device is replying */
  659. if ((dp = get_dmap(driver_e)) == NULL) return;
  660. major = dp-dmap;
  661. dev = makedev(major, minor);
  662. /* Get filp belonging to character special file */
  663. if ((f = dp->dmap_sel_filp) == NULL) {
  664. printf("VFS (%s:%d): major %d was not expecting a DEV_SELECT reply\n",
  665. __FILE__, __LINE__, major);
  666. return;
  667. }
  668. /* Is the filp still in use and busy waiting for a reply? The owner might
  669. * have vanished before the driver was able to reply. */
  670. if (f->filp_count >= 1 && (f->filp_select_flags & FSF_BUSY)) {
  671. /* Find vnode and check we got a reply from the device we expected */
  672. vp = f->filp_vno;
  673. assert(vp != NULL);
  674. assert(S_ISCHR(vp->v_mode));
  675. if (vp->v_sdev != dev) {
  676. printf("VFS (%s:%d): expected reply from dev %d not %d\n",
  677. __FILE__, __LINE__, vp->v_sdev, dev);
  678. return;
  679. }
  680. }
  681. /* No longer waiting for a reply from this device */
  682. dp->dmap_sel_filp = NULL;
  683. /* Process select result only if requestor is still around. That is, the
  684. * corresponding filp is still in use.
  685. */
  686. if (f->filp_count >= 1) {
  687. select_lock_filp(f, f->filp_select_ops);
  688. f->filp_select_flags &= ~FSF_BUSY;
  689. /* The select call is done now, except when
  690. * - another process started a select on the same filp with possibly a
  691. * different set of operations.
  692. * - a process does a select on the same filp but using different file
  693. * descriptors.
  694. * - the select has a timeout. Upon receiving this reply the operations
  695. * might not be ready yet, so we want to wait for that to ultimately
  696. * happen.
  697. * Therefore we need to keep remembering what the operations are.
  698. */
  699. if (!(f->filp_select_flags & (FSF_UPDATE|FSF_BLOCKED)))
  700. f->filp_select_ops = 0; /* done selecting */
  701. else if (!(f->filp_select_flags & FSF_UPDATE))
  702. /* there may be operations pending */
  703. f->filp_select_ops &= ~status;
  704. /* Record new filp status */
  705. if (!(status == 0 && (f->filp_select_flags & FSF_BLOCKED))) {
  706. if (status > 0) { /* operations ready */
  707. if (status & SEL_RD)
  708. f->filp_select_flags &= ~FSF_RD_BLOCK;
  709. if (status & SEL_WR)
  710. f->filp_select_flags &= ~FSF_WR_BLOCK;
  711. if (status & SEL_ERR)
  712. f->filp_select_flags &= ~FSF_ERR_BLOCK;
  713. } else if (status < 0) { /* error */
  714. /* Always unblock upon error */
  715. f->filp_select_flags &= ~FSF_BLOCKED;
  716. }
  717. }
  718. unlock_filp(f);
  719. filp_status(f, status); /* Tell filp owners about the results */
  720. }
  721. select_restart_filps();
  722. }
  723. /*===========================================================================*
  724. * select_reply2 *
  725. *===========================================================================*/
  726. void select_reply2(driver_e, minor, status)
  727. endpoint_t driver_e;
  728. int minor;
  729. int status;
  730. {
  731. /* Handle secondary reply to DEV_SELECT request. A secondary reply occurs when
  732. * the select request is 'blocking' until an operation becomes ready. */
  733. int major, slot, fd;
  734. dev_t dev;
  735. struct filp *f;
  736. struct dmap *dp;
  737. struct vnode *vp;
  738. struct selectentry *se;
  739. if (status == 0) {
  740. printf("VFS (%s:%d): weird status (%d) to report\n",
  741. __FILE__, __LINE__, status);
  742. return;
  743. }
  744. /* Figure out which device is replying */
  745. if ((dp = get_dmap(driver_e)) == NULL) {
  746. printf("VFS (%s:%d): endpoint %d is not a known driver endpoint\n",
  747. __FILE__, __LINE__, driver_e);
  748. return;
  749. }
  750. major = dp-dmap;
  751. dev = makedev(major, minor);
  752. /* Find all file descriptors selecting for this device */
  753. for (slot = 0; slot < MAXSELECTS; slot++) {
  754. se = &selecttab[slot];
  755. if (se->requestor == NULL) continue; /* empty slot */
  756. for (fd = 0; fd < se->nfds; fd++) {
  757. if ((f = se->filps[fd]) == NULL) continue;
  758. if ((vp = f->filp_vno) == NULL) continue;
  759. if (!S_ISCHR(vp->v_mode)) continue;
  760. if (vp->v_sdev != dev) continue;
  761. select_lock_filp(f, f->filp_select_ops);
  762. if (status > 0) { /* Operations ready */
  763. /* Clear the replied bits from the request
  764. * mask unless FSF_UPDATE is set.
  765. */
  766. if (!(f->filp_select_flags & FSF_UPDATE))
  767. f->filp_select_ops &= ~status;
  768. if (status & SEL_RD)
  769. f->filp_select_flags &= ~FSF_RD_BLOCK;
  770. if (status & SEL_WR)
  771. f->filp_select_flags &= ~FSF_WR_BLOCK;
  772. if (status & SEL_ERR)
  773. f->filp_select_flags &= ~FSF_ERR_BLOCK;
  774. ops2tab(status, fd, se);
  775. } else {
  776. f->filp_select_flags &= ~FSF_BLOCKED;
  777. ops2tab(SEL_RD|SEL_WR|SEL_ERR, fd, se);
  778. }
  779. unlock_filp(f);
  780. if (se->nreadyfds > 0) restart_proc(se);
  781. }
  782. }
  783. select_restart_filps();
  784. }
  785. /*===========================================================================*
  786. * select_restart_filps *
  787. *===========================================================================*/
  788. static void select_restart_filps()
  789. {
  790. int fd, slot;
  791. struct filp *f;
  792. struct vnode *vp;
  793. struct selectentry *se;
  794. /* Locate filps that can be restarted */
  795. for (slot = 0; slot < MAXSELECTS; slot++) {
  796. se = &selecttab[slot];
  797. if (se->requestor == NULL) continue; /* empty slot */
  798. /* Only 'deferred' processes are eligible to restart */
  799. if (!is_deferred(se)) continue;
  800. /* Find filps that are not waiting for a reply, but have an updated
  801. * status (i.e., another select on the same filp with possibly a
  802. * different set of operations is to be done), and thus requires the
  803. * select request to be sent again).
  804. */
  805. for (fd = 0; fd < se->nfds; fd++) {
  806. int r, wantops, ops;
  807. if ((f = se->filps[fd]) == NULL) continue;
  808. if (f->filp_select_flags & FSF_BUSY) /* Still waiting for */
  809. continue; /* initial reply */
  810. if (!(f->filp_select_flags & FSF_UPDATE)) /* Must be in */
  811. continue; /* 'update' state */
  812. wantops = ops = f->filp_select_ops;
  813. vp = f->filp_vno;
  814. assert(S_ISCHR(vp->v_mode));
  815. r = do_select_request(se, fd, &wantops);
  816. if (r != OK && r != SUSPEND)
  817. break; /* Error or bogus return code; abort */
  818. if (wantops & ops) ops2tab(wantops, fd, se);
  819. }
  820. }
  821. }
  822. /*===========================================================================*
  823. * do_select_request *
  824. *===========================================================================*/
  825. static int do_select_request(se, fd, ops)
  826. struct selectentry *se;
  827. int fd;
  828. int *ops;
  829. {
  830. /* Perform actual select request for file descriptor fd */
  831. int r, type;
  832. struct filp *f;
  833. type = se->type[fd];
  834. f = se->filps[fd];
  835. select_lock_filp(f, *ops);
  836. r = fdtypes[type].select_request(f, ops, se->block);
  837. unlock_filp(f);
  838. if (r != OK && r != SUSPEND) {
  839. se->error = EINTR;
  840. se->block = 0; /* Stop blocking to return asap */
  841. if (!is_deferred(se)) select_cancel_all(se);
  842. }
  843. return(r);
  844. }
  845. /*===========================================================================*
  846. * filp_status *
  847. *===========================================================================*/
  848. static void filp_status(f, status)
  849. struct filp *f;
  850. int status;
  851. {
  852. /* Tell processes that need to know about the status of this filp */
  853. int fd, slot;
  854. struct selectentry *se;
  855. for (slot = 0; slot < MAXSELECTS; slot++) {
  856. se = &selecttab[slot];
  857. if (se->requestor == NULL) continue; /* empty slot */
  858. for (fd = 0; fd < se->nfds; fd++) {
  859. if (se->filps[fd] != f) continue;
  860. if (status < 0)
  861. ops2tab(SEL_RD|SEL_WR|SEL_ERR, fd, se);
  862. else
  863. ops2tab(status, fd, se);
  864. restart_proc(se);
  865. }
  866. }
  867. }
  868. /*===========================================================================*
  869. * restart_proc *
  870. *===========================================================================*/
  871. static void restart_proc(se)
  872. struct selectentry *se;
  873. {
  874. /* Tell process about select results (if any) unless there are still results
  875. * pending. */
  876. if ((se->nreadyfds > 0 || !se->block) && !is_deferred(se))
  877. select_return(se);
  878. }
  879. /*===========================================================================*
  880. * wipe_select *
  881. *===========================================================================*/
  882. static void wipe_select(struct selectentry *se)
  883. {
  884. se->nfds = 0;
  885. se->nreadyfds = 0;
  886. se->error = OK;
  887. se->block = 0;
  888. memset(se->filps, 0, sizeof(se->filps));
  889. FD_ZERO(&se->readfds);
  890. FD_ZERO(&se->writefds);
  891. FD_ZERO(&se->errorfds);
  892. FD_ZERO(&se->ready_readfds);
  893. FD_ZERO(&se->ready_writefds);
  894. FD_ZERO(&se->ready_errorfds);
  895. }
  896. /*===========================================================================*
  897. * select_lock_filp *
  898. *===========================================================================*/
  899. static void select_lock_filp(struct filp *f, int ops)
  900. {
  901. /* Lock a filp and vnode based on which operations are requested */
  902. tll_access_t locktype;;
  903. locktype = VNODE_READ; /* By default */
  904. if (ops & (SEL_WR|SEL_ERR))
  905. /* Selecting for error or writing requires exclusive access */
  906. locktype = VNODE_WRITE;
  907. lock_filp(f, locktype);
  908. }