PageRenderTime 66ms CodeModel.GetById 27ms RepoModel.GetById 1ms app.codeStats 0ms

/GENERIC/src/kern/uipc_socket.c

https://github.com/7shi/openbsd-loongson-vc
C | 1315 lines | 1053 code | 91 blank | 171 comment | 350 complexity | 897f4194f00cb04fd0fa4c29d562c8f5 MD5 | raw file
  1. /* $OpenBSD: uipc_socket.c,v 1.79 2009/10/31 12:00:08 fgsch Exp $ */
  2. /* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */
  3. /*
  4. * Copyright (c) 1982, 1986, 1988, 1990, 1993
  5. * The Regents of the University of California. All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of the University nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  20. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  21. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  22. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  23. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  24. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  25. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  26. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  27. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  28. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  29. * SUCH DAMAGE.
  30. *
  31. * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
  32. */
  33. #include <sys/param.h>
  34. #include <sys/systm.h>
  35. #include <sys/proc.h>
  36. #include <sys/file.h>
  37. #include <sys/malloc.h>
  38. #include <sys/mbuf.h>
  39. #include <sys/domain.h>
  40. #include <sys/kernel.h>
  41. #include <sys/event.h>
  42. #include <sys/protosw.h>
  43. #include <sys/socket.h>
  44. #include <sys/socketvar.h>
  45. #include <sys/signalvar.h>
  46. #include <sys/resourcevar.h>
  47. #include <net/route.h>
  48. #include <sys/pool.h>
  49. void filt_sordetach(struct knote *kn);
  50. int filt_soread(struct knote *kn, long hint);
  51. void filt_sowdetach(struct knote *kn);
  52. int filt_sowrite(struct knote *kn, long hint);
  53. int filt_solisten(struct knote *kn, long hint);
  54. struct filterops solisten_filtops =
  55. { 1, NULL, filt_sordetach, filt_solisten };
  56. struct filterops soread_filtops =
  57. { 1, NULL, filt_sordetach, filt_soread };
  58. struct filterops sowrite_filtops =
  59. { 1, NULL, filt_sowdetach, filt_sowrite };
  60. #ifndef SOMINCONN
  61. #define SOMINCONN 80
  62. #endif /* SOMINCONN */
  63. int somaxconn = SOMAXCONN;
  64. int sominconn = SOMINCONN;
  65. struct pool socket_pool;
  66. void
  67. soinit(void)
  68. {
  69. pool_init(&socket_pool, sizeof(struct socket), 0, 0, 0, "sockpl", NULL);
  70. }
  71. /*
  72. * Socket operation routines.
  73. * These routines are called by the routines in
  74. * sys_socket.c or from a system process, and
  75. * implement the semantics of socket operations by
  76. * switching out to the protocol specific routines.
  77. */
  78. /*ARGSUSED*/
  79. int
  80. socreate(int dom, struct socket **aso, int type, int proto)
  81. {
  82. struct proc *p = curproc; /* XXX */
  83. struct protosw *prp;
  84. struct socket *so;
  85. int error, s;
  86. if (proto)
  87. prp = pffindproto(dom, proto, type);
  88. else
  89. prp = pffindtype(dom, type);
  90. if (prp == NULL || prp->pr_usrreq == 0)
  91. return (EPROTONOSUPPORT);
  92. if (prp->pr_type != type)
  93. return (EPROTOTYPE);
  94. s = splsoftnet();
  95. so = pool_get(&socket_pool, PR_WAITOK | PR_ZERO);
  96. TAILQ_INIT(&so->so_q0);
  97. TAILQ_INIT(&so->so_q);
  98. so->so_type = type;
  99. if (suser(p, 0) == 0)
  100. so->so_state = SS_PRIV;
  101. so->so_ruid = p->p_cred->p_ruid;
  102. so->so_euid = p->p_ucred->cr_uid;
  103. so->so_rgid = p->p_cred->p_rgid;
  104. so->so_egid = p->p_ucred->cr_gid;
  105. so->so_cpid = p->p_pid;
  106. so->so_proto = prp;
  107. error = (*prp->pr_usrreq)(so, PRU_ATTACH, NULL,
  108. (struct mbuf *)(long)proto, NULL, p);
  109. if (error) {
  110. so->so_state |= SS_NOFDREF;
  111. sofree(so);
  112. splx(s);
  113. return (error);
  114. }
  115. #ifdef COMPAT_SUNOS
  116. {
  117. extern struct emul emul_sunos;
  118. if (p->p_emul == &emul_sunos && type == SOCK_DGRAM)
  119. so->so_options |= SO_BROADCAST;
  120. }
  121. #endif
  122. splx(s);
  123. *aso = so;
  124. return (0);
  125. }
  126. int
  127. sobind(struct socket *so, struct mbuf *nam, struct proc *p)
  128. {
  129. int s = splsoftnet();
  130. int error;
  131. error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, p);
  132. splx(s);
  133. return (error);
  134. }
  135. int
  136. solisten(struct socket *so, int backlog)
  137. {
  138. int s = splsoftnet(), error;
  139. error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL,
  140. curproc);
  141. if (error) {
  142. splx(s);
  143. return (error);
  144. }
  145. if (TAILQ_FIRST(&so->so_q) == NULL)
  146. so->so_options |= SO_ACCEPTCONN;
  147. if (backlog < 0 || backlog > somaxconn)
  148. backlog = somaxconn;
  149. if (backlog < sominconn)
  150. backlog = sominconn;
  151. so->so_qlimit = backlog;
  152. splx(s);
  153. return (0);
  154. }
  155. /*
  156. * Must be called at splsoftnet()
  157. */
  158. void
  159. sofree(struct socket *so)
  160. {
  161. splsoftassert(IPL_SOFTNET);
  162. if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
  163. return;
  164. if (so->so_head) {
  165. /*
  166. * We must not decommission a socket that's on the accept(2)
  167. * queue. If we do, then accept(2) may hang after select(2)
  168. * indicated that the listening socket was ready.
  169. */
  170. if (!soqremque(so, 0))
  171. return;
  172. }
  173. sbrelease(&so->so_snd);
  174. sorflush(so);
  175. pool_put(&socket_pool, so);
  176. }
  177. /*
  178. * Close a socket on last file table reference removal.
  179. * Initiate disconnect if connected.
  180. * Free socket when disconnect complete.
  181. */
  182. int
  183. soclose(struct socket *so)
  184. {
  185. struct socket *so2;
  186. int s = splsoftnet(); /* conservative */
  187. int error = 0;
  188. if (so->so_options & SO_ACCEPTCONN) {
  189. while ((so2 = TAILQ_FIRST(&so->so_q0)) != NULL) {
  190. (void) soqremque(so2, 0);
  191. (void) soabort(so2);
  192. }
  193. while ((so2 = TAILQ_FIRST(&so->so_q)) != NULL) {
  194. (void) soqremque(so2, 1);
  195. (void) soabort(so2);
  196. }
  197. }
  198. if (so->so_pcb == 0)
  199. goto discard;
  200. if (so->so_state & SS_ISCONNECTED) {
  201. if ((so->so_state & SS_ISDISCONNECTING) == 0) {
  202. error = sodisconnect(so);
  203. if (error)
  204. goto drop;
  205. }
  206. if (so->so_options & SO_LINGER) {
  207. if ((so->so_state & SS_ISDISCONNECTING) &&
  208. (so->so_state & SS_NBIO))
  209. goto drop;
  210. while (so->so_state & SS_ISCONNECTED) {
  211. error = tsleep(&so->so_timeo,
  212. PSOCK | PCATCH, "netcls",
  213. so->so_linger * hz);
  214. if (error)
  215. break;
  216. }
  217. }
  218. }
  219. drop:
  220. if (so->so_pcb) {
  221. int error2 = (*so->so_proto->pr_usrreq)(so, PRU_DETACH, NULL,
  222. NULL, NULL, curproc);
  223. if (error == 0)
  224. error = error2;
  225. }
  226. discard:
  227. if (so->so_state & SS_NOFDREF)
  228. panic("soclose: NOFDREF");
  229. so->so_state |= SS_NOFDREF;
  230. sofree(so);
  231. splx(s);
  232. return (error);
  233. }
  234. /*
  235. * Must be called at splsoftnet.
  236. */
  237. int
  238. soabort(struct socket *so)
  239. {
  240. splsoftassert(IPL_SOFTNET);
  241. return (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, NULL, NULL,
  242. curproc);
  243. }
  244. int
  245. soaccept(struct socket *so, struct mbuf *nam)
  246. {
  247. int s = splsoftnet();
  248. int error = 0;
  249. if ((so->so_state & SS_NOFDREF) == 0)
  250. panic("soaccept: !NOFDREF");
  251. so->so_state &= ~SS_NOFDREF;
  252. if ((so->so_state & SS_ISDISCONNECTED) == 0 ||
  253. (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0)
  254. error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, NULL,
  255. nam, NULL, curproc);
  256. else
  257. error = ECONNABORTED;
  258. splx(s);
  259. return (error);
  260. }
  261. int
  262. soconnect(struct socket *so, struct mbuf *nam)
  263. {
  264. int s;
  265. int error;
  266. if (so->so_options & SO_ACCEPTCONN)
  267. return (EOPNOTSUPP);
  268. s = splsoftnet();
  269. /*
  270. * If protocol is connection-based, can only connect once.
  271. * Otherwise, if connected, try to disconnect first.
  272. * This allows user to disconnect by connecting to, e.g.,
  273. * a null address.
  274. */
  275. if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
  276. ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
  277. (error = sodisconnect(so))))
  278. error = EISCONN;
  279. else
  280. error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
  281. NULL, nam, NULL, curproc);
  282. splx(s);
  283. return (error);
  284. }
  285. int
  286. soconnect2(struct socket *so1, struct socket *so2)
  287. {
  288. int s = splsoftnet();
  289. int error;
  290. error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, NULL,
  291. (struct mbuf *)so2, NULL, curproc);
  292. splx(s);
  293. return (error);
  294. }
  295. int
  296. sodisconnect(struct socket *so)
  297. {
  298. int s = splsoftnet();
  299. int error;
  300. if ((so->so_state & SS_ISCONNECTED) == 0) {
  301. error = ENOTCONN;
  302. goto bad;
  303. }
  304. if (so->so_state & SS_ISDISCONNECTING) {
  305. error = EALREADY;
  306. goto bad;
  307. }
  308. error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, NULL, NULL,
  309. NULL, curproc);
  310. bad:
  311. splx(s);
  312. return (error);
  313. }
  314. #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
  315. /*
  316. * Send on a socket.
  317. * If send must go all at once and message is larger than
  318. * send buffering, then hard error.
  319. * Lock against other senders.
  320. * If must go all at once and not enough room now, then
  321. * inform user that this would block and do nothing.
  322. * Otherwise, if nonblocking, send as much as possible.
  323. * The data to be sent is described by "uio" if nonzero,
  324. * otherwise by the mbuf chain "top" (which must be null
  325. * if uio is not). Data provided in mbuf chain must be small
  326. * enough to send all at once.
  327. *
  328. * Returns nonzero on error, timeout or signal; callers
  329. * must check for short counts if EINTR/ERESTART are returned.
  330. * Data and control buffers are freed on return.
  331. */
  332. int
  333. sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top,
  334. struct mbuf *control, int flags)
  335. {
  336. struct mbuf **mp;
  337. struct mbuf *m;
  338. long space, len, mlen, clen = 0;
  339. quad_t resid;
  340. int error, s, dontroute;
  341. int atomic = sosendallatonce(so) || top;
  342. if (uio)
  343. resid = uio->uio_resid;
  344. else
  345. resid = top->m_pkthdr.len;
  346. /*
  347. * In theory resid should be unsigned (since uio->uio_resid is).
  348. * However, space must be signed, as it might be less than 0
  349. * if we over-committed, and we must use a signed comparison
  350. * of space and resid. On the other hand, a negative resid
  351. * causes us to loop sending 0-length segments to the protocol.
  352. * MSG_EOR on a SOCK_STREAM socket is also invalid.
  353. */
  354. if (resid < 0 ||
  355. (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
  356. error = EINVAL;
  357. goto out;
  358. }
  359. dontroute =
  360. (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
  361. (so->so_proto->pr_flags & PR_ATOMIC);
  362. if (uio && uio->uio_procp)
  363. uio->uio_procp->p_stats->p_ru.ru_msgsnd++;
  364. if (control)
  365. clen = control->m_len;
  366. #define snderr(errno) { error = errno; splx(s); goto release; }
  367. restart:
  368. if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
  369. goto out;
  370. so->so_state |= SS_ISSENDING;
  371. do {
  372. s = splsoftnet();
  373. if (so->so_state & SS_CANTSENDMORE)
  374. snderr(EPIPE);
  375. if (so->so_error) {
  376. error = so->so_error;
  377. so->so_error = 0;
  378. splx(s);
  379. goto release;
  380. }
  381. if ((so->so_state & SS_ISCONNECTED) == 0) {
  382. if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
  383. if ((so->so_state & SS_ISCONFIRMING) == 0 &&
  384. !(resid == 0 && clen != 0))
  385. snderr(ENOTCONN);
  386. } else if (addr == 0)
  387. snderr(EDESTADDRREQ);
  388. }
  389. space = sbspace(&so->so_snd);
  390. if (flags & MSG_OOB)
  391. space += 1024;
  392. if ((atomic && resid > so->so_snd.sb_hiwat) ||
  393. clen > so->so_snd.sb_hiwat)
  394. snderr(EMSGSIZE);
  395. if (space < resid + clen &&
  396. (atomic || space < so->so_snd.sb_lowat || space < clen)) {
  397. if (so->so_state & SS_NBIO)
  398. snderr(EWOULDBLOCK);
  399. sbunlock(&so->so_snd);
  400. error = sbwait(&so->so_snd);
  401. so->so_state &= ~SS_ISSENDING;
  402. splx(s);
  403. if (error)
  404. goto out;
  405. goto restart;
  406. }
  407. splx(s);
  408. mp = &top;
  409. space -= clen;
  410. do {
  411. if (uio == NULL) {
  412. /*
  413. * Data is prepackaged in "top".
  414. */
  415. resid = 0;
  416. if (flags & MSG_EOR)
  417. top->m_flags |= M_EOR;
  418. } else do {
  419. if (top == 0) {
  420. MGETHDR(m, M_WAIT, MT_DATA);
  421. mlen = MHLEN;
  422. m->m_pkthdr.len = 0;
  423. m->m_pkthdr.rcvif = (struct ifnet *)0;
  424. } else {
  425. MGET(m, M_WAIT, MT_DATA);
  426. mlen = MLEN;
  427. }
  428. if (resid >= MINCLSIZE && space >= MCLBYTES) {
  429. MCLGET(m, M_NOWAIT);
  430. if ((m->m_flags & M_EXT) == 0)
  431. goto nopages;
  432. mlen = MCLBYTES;
  433. if (atomic && top == 0) {
  434. len = lmin(MCLBYTES - max_hdr, resid);
  435. m->m_data += max_hdr;
  436. } else
  437. len = lmin(MCLBYTES, resid);
  438. space -= len;
  439. } else {
  440. nopages:
  441. len = lmin(lmin(mlen, resid), space);
  442. space -= len;
  443. /*
  444. * For datagram protocols, leave room
  445. * for protocol headers in first mbuf.
  446. */
  447. if (atomic && top == 0 && len < mlen)
  448. MH_ALIGN(m, len);
  449. }
  450. error = uiomove(mtod(m, caddr_t), (int)len,
  451. uio);
  452. resid = uio->uio_resid;
  453. m->m_len = len;
  454. *mp = m;
  455. top->m_pkthdr.len += len;
  456. if (error)
  457. goto release;
  458. mp = &m->m_next;
  459. if (resid <= 0) {
  460. if (flags & MSG_EOR)
  461. top->m_flags |= M_EOR;
  462. break;
  463. }
  464. } while (space > 0 && atomic);
  465. if (dontroute)
  466. so->so_options |= SO_DONTROUTE;
  467. s = splsoftnet(); /* XXX */
  468. if (resid <= 0)
  469. so->so_state &= ~SS_ISSENDING;
  470. error = (*so->so_proto->pr_usrreq)(so,
  471. (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
  472. top, addr, control, curproc);
  473. splx(s);
  474. if (dontroute)
  475. so->so_options &= ~SO_DONTROUTE;
  476. clen = 0;
  477. control = 0;
  478. top = 0;
  479. mp = &top;
  480. if (error)
  481. goto release;
  482. } while (resid && space > 0);
  483. } while (resid);
  484. release:
  485. so->so_state &= ~SS_ISSENDING;
  486. sbunlock(&so->so_snd);
  487. out:
  488. if (top)
  489. m_freem(top);
  490. if (control)
  491. m_freem(control);
  492. return (error);
  493. }
  494. /*
  495. * Implement receive operations on a socket.
  496. * We depend on the way that records are added to the sockbuf
  497. * by sbappend*. In particular, each record (mbufs linked through m_next)
  498. * must begin with an address if the protocol so specifies,
  499. * followed by an optional mbuf or mbufs containing ancillary data,
  500. * and then zero or more mbufs of data.
  501. * In order to avoid blocking network interrupts for the entire time here,
  502. * we splx() while doing the actual copy to user space.
  503. * Although the sockbuf is locked, new data may still be appended,
  504. * and thus we must maintain consistency of the sockbuf during that time.
  505. *
  506. * The caller may receive the data as a single mbuf chain by supplying
  507. * an mbuf **mp0 for use in returning the chain. The uio is then used
  508. * only for the count in uio_resid.
  509. */
  510. int
  511. soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio,
  512. struct mbuf **mp0, struct mbuf **controlp, int *flagsp,
  513. socklen_t controllen)
  514. {
  515. struct mbuf *m, **mp;
  516. int flags, len, error, s, offset;
  517. struct protosw *pr = so->so_proto;
  518. struct mbuf *nextrecord;
  519. int moff, type = 0;
  520. size_t orig_resid = uio->uio_resid;
  521. int uio_error = 0;
  522. int resid;
  523. mp = mp0;
  524. if (paddr)
  525. *paddr = 0;
  526. if (controlp)
  527. *controlp = 0;
  528. if (flagsp)
  529. flags = *flagsp &~ MSG_EOR;
  530. else
  531. flags = 0;
  532. if (so->so_state & SS_NBIO)
  533. flags |= MSG_DONTWAIT;
  534. if (flags & MSG_OOB) {
  535. m = m_get(M_WAIT, MT_DATA);
  536. error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m,
  537. (struct mbuf *)(long)(flags & MSG_PEEK), NULL, curproc);
  538. if (error)
  539. goto bad;
  540. do {
  541. error = uiomove(mtod(m, caddr_t),
  542. (int) min(uio->uio_resid, m->m_len), uio);
  543. m = m_free(m);
  544. } while (uio->uio_resid && error == 0 && m);
  545. bad:
  546. if (m)
  547. m_freem(m);
  548. return (error);
  549. }
  550. if (mp)
  551. *mp = NULL;
  552. if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
  553. (*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL, curproc);
  554. restart:
  555. if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0)
  556. return (error);
  557. s = splsoftnet();
  558. m = so->so_rcv.sb_mb;
  559. /*
  560. * If we have less data than requested, block awaiting more
  561. * (subject to any timeout) if:
  562. * 1. the current count is less than the low water mark,
  563. * 2. MSG_WAITALL is set, and it is possible to do the entire
  564. * receive operation at once if we block (resid <= hiwat), or
  565. * 3. MSG_DONTWAIT is not set.
  566. * If MSG_WAITALL is set but resid is larger than the receive buffer,
  567. * we have to do the receive in sections, and thus risk returning
  568. * a short count if a timeout or signal occurs after we start.
  569. */
  570. if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
  571. so->so_rcv.sb_cc < uio->uio_resid) &&
  572. (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
  573. ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
  574. m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
  575. #ifdef DIAGNOSTIC
  576. if (m == NULL && so->so_rcv.sb_cc)
  577. panic("receive 1");
  578. #endif
  579. if (so->so_error) {
  580. if (m)
  581. goto dontblock;
  582. error = so->so_error;
  583. if ((flags & MSG_PEEK) == 0)
  584. so->so_error = 0;
  585. goto release;
  586. }
  587. if (so->so_state & SS_CANTRCVMORE) {
  588. if (m)
  589. goto dontblock;
  590. else
  591. goto release;
  592. }
  593. for (; m; m = m->m_next)
  594. if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
  595. m = so->so_rcv.sb_mb;
  596. goto dontblock;
  597. }
  598. if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
  599. (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
  600. error = ENOTCONN;
  601. goto release;
  602. }
  603. if (uio->uio_resid == 0 && controlp == NULL)
  604. goto release;
  605. if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
  606. error = EWOULDBLOCK;
  607. goto release;
  608. }
  609. SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1");
  610. SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1");
  611. sbunlock(&so->so_rcv);
  612. error = sbwait(&so->so_rcv);
  613. splx(s);
  614. if (error)
  615. return (error);
  616. goto restart;
  617. }
  618. dontblock:
  619. /*
  620. * On entry here, m points to the first record of the socket buffer.
  621. * While we process the initial mbufs containing address and control
  622. * info, we save a copy of m->m_nextpkt into nextrecord.
  623. */
  624. if (uio->uio_procp)
  625. uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
  626. KASSERT(m == so->so_rcv.sb_mb);
  627. SBLASTRECORDCHK(&so->so_rcv, "soreceive 1");
  628. SBLASTMBUFCHK(&so->so_rcv, "soreceive 1");
  629. nextrecord = m->m_nextpkt;
  630. if (pr->pr_flags & PR_ADDR) {
  631. #ifdef DIAGNOSTIC
  632. if (m->m_type != MT_SONAME)
  633. panic("receive 1a");
  634. #endif
  635. orig_resid = 0;
  636. if (flags & MSG_PEEK) {
  637. if (paddr)
  638. *paddr = m_copy(m, 0, m->m_len);
  639. m = m->m_next;
  640. } else {
  641. sbfree(&so->so_rcv, m);
  642. if (paddr) {
  643. *paddr = m;
  644. so->so_rcv.sb_mb = m->m_next;
  645. m->m_next = 0;
  646. m = so->so_rcv.sb_mb;
  647. } else {
  648. MFREE(m, so->so_rcv.sb_mb);
  649. m = so->so_rcv.sb_mb;
  650. }
  651. }
  652. }
  653. while (m && m->m_type == MT_CONTROL && error == 0) {
  654. if (flags & MSG_PEEK) {
  655. if (controlp)
  656. *controlp = m_copy(m, 0, m->m_len);
  657. m = m->m_next;
  658. } else {
  659. sbfree(&so->so_rcv, m);
  660. if (controlp) {
  661. if (pr->pr_domain->dom_externalize &&
  662. mtod(m, struct cmsghdr *)->cmsg_type ==
  663. SCM_RIGHTS)
  664. error = (*pr->pr_domain->dom_externalize)(m,
  665. controllen);
  666. *controlp = m;
  667. so->so_rcv.sb_mb = m->m_next;
  668. m->m_next = 0;
  669. m = so->so_rcv.sb_mb;
  670. } else {
  671. /*
  672. * Dispose of any SCM_RIGHTS message that went
  673. * through the read path rather than recv.
  674. */
  675. if (pr->pr_domain->dom_dispose &&
  676. mtod(m, struct cmsghdr *)->cmsg_type == SCM_RIGHTS)
  677. pr->pr_domain->dom_dispose(m);
  678. MFREE(m, so->so_rcv.sb_mb);
  679. m = so->so_rcv.sb_mb;
  680. }
  681. }
  682. if (controlp) {
  683. orig_resid = 0;
  684. controlp = &(*controlp)->m_next;
  685. }
  686. }
  687. /*
  688. * If m is non-NULL, we have some data to read. From now on,
  689. * make sure to keep sb_lastrecord consistent when working on
  690. * the last packet on the chain (nextrecord == NULL) and we
  691. * change m->m_nextpkt.
  692. */
  693. if (m) {
  694. if ((flags & MSG_PEEK) == 0) {
  695. m->m_nextpkt = nextrecord;
  696. /*
  697. * If nextrecord == NULL (this is a single chain),
  698. * then sb_lastrecord may not be valid here if m
  699. * was changed earlier.
  700. */
  701. if (nextrecord == NULL) {
  702. KASSERT(so->so_rcv.sb_mb == m);
  703. so->so_rcv.sb_lastrecord = m;
  704. }
  705. }
  706. type = m->m_type;
  707. if (type == MT_OOBDATA)
  708. flags |= MSG_OOB;
  709. if (m->m_flags & M_BCAST)
  710. flags |= MSG_BCAST;
  711. if (m->m_flags & M_MCAST)
  712. flags |= MSG_MCAST;
  713. } else {
  714. if ((flags & MSG_PEEK) == 0) {
  715. KASSERT(so->so_rcv.sb_mb == m);
  716. so->so_rcv.sb_mb = nextrecord;
  717. SB_EMPTY_FIXUP(&so->so_rcv);
  718. }
  719. }
  720. SBLASTRECORDCHK(&so->so_rcv, "soreceive 2");
  721. SBLASTMBUFCHK(&so->so_rcv, "soreceive 2");
  722. moff = 0;
  723. offset = 0;
  724. while (m && uio->uio_resid > 0 && error == 0) {
  725. if (m->m_type == MT_OOBDATA) {
  726. if (type != MT_OOBDATA)
  727. break;
  728. } else if (type == MT_OOBDATA)
  729. break;
  730. #ifdef DIAGNOSTIC
  731. else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
  732. panic("receive 3");
  733. #endif
  734. so->so_state &= ~SS_RCVATMARK;
  735. len = uio->uio_resid;
  736. if (so->so_oobmark && len > so->so_oobmark - offset)
  737. len = so->so_oobmark - offset;
  738. if (len > m->m_len - moff)
  739. len = m->m_len - moff;
  740. /*
  741. * If mp is set, just pass back the mbufs.
  742. * Otherwise copy them out via the uio, then free.
  743. * Sockbuf must be consistent here (points to current mbuf,
  744. * it points to next record) when we drop priority;
  745. * we must note any additions to the sockbuf when we
  746. * block interrupts again.
  747. */
  748. if (mp == NULL && uio_error == 0) {
  749. SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove");
  750. SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove");
  751. resid = uio->uio_resid;
  752. splx(s);
  753. uio_error =
  754. uiomove(mtod(m, caddr_t) + moff, (int)len,
  755. uio);
  756. s = splsoftnet();
  757. if (uio_error)
  758. uio->uio_resid = resid - len;
  759. } else
  760. uio->uio_resid -= len;
  761. if (len == m->m_len - moff) {
  762. if (m->m_flags & M_EOR)
  763. flags |= MSG_EOR;
  764. if (flags & MSG_PEEK) {
  765. m = m->m_next;
  766. moff = 0;
  767. } else {
  768. nextrecord = m->m_nextpkt;
  769. sbfree(&so->so_rcv, m);
  770. if (mp) {
  771. *mp = m;
  772. mp = &m->m_next;
  773. so->so_rcv.sb_mb = m = m->m_next;
  774. *mp = NULL;
  775. } else {
  776. MFREE(m, so->so_rcv.sb_mb);
  777. m = so->so_rcv.sb_mb;
  778. }
  779. /*
  780. * If m != NULL, we also know that
  781. * so->so_rcv.sb_mb != NULL.
  782. */
  783. KASSERT(so->so_rcv.sb_mb == m);
  784. if (m) {
  785. m->m_nextpkt = nextrecord;
  786. if (nextrecord == NULL)
  787. so->so_rcv.sb_lastrecord = m;
  788. } else {
  789. so->so_rcv.sb_mb = nextrecord;
  790. SB_EMPTY_FIXUP(&so->so_rcv);
  791. }
  792. SBLASTRECORDCHK(&so->so_rcv, "soreceive 3");
  793. SBLASTMBUFCHK(&so->so_rcv, "soreceive 3");
  794. }
  795. } else {
  796. if (flags & MSG_PEEK)
  797. moff += len;
  798. else {
  799. if (mp)
  800. *mp = m_copym(m, 0, len, M_WAIT);
  801. m->m_data += len;
  802. m->m_len -= len;
  803. so->so_rcv.sb_cc -= len;
  804. so->so_rcv.sb_datacc -= len;
  805. }
  806. }
  807. if (so->so_oobmark) {
  808. if ((flags & MSG_PEEK) == 0) {
  809. so->so_oobmark -= len;
  810. if (so->so_oobmark == 0) {
  811. so->so_state |= SS_RCVATMARK;
  812. break;
  813. }
  814. } else {
  815. offset += len;
  816. if (offset == so->so_oobmark)
  817. break;
  818. }
  819. }
  820. if (flags & MSG_EOR)
  821. break;
  822. /*
  823. * If the MSG_WAITALL flag is set (for non-atomic socket),
  824. * we must not quit until "uio->uio_resid == 0" or an error
  825. * termination. If a signal/timeout occurs, return
  826. * with a short count but without error.
  827. * Keep sockbuf locked against other readers.
  828. */
  829. while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
  830. !sosendallatonce(so) && !nextrecord) {
  831. if (so->so_error || so->so_state & SS_CANTRCVMORE)
  832. break;
  833. SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2");
  834. SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2");
  835. error = sbwait(&so->so_rcv);
  836. if (error) {
  837. sbunlock(&so->so_rcv);
  838. splx(s);
  839. return (0);
  840. }
  841. if ((m = so->so_rcv.sb_mb) != NULL)
  842. nextrecord = m->m_nextpkt;
  843. }
  844. }
  845. if (m && pr->pr_flags & PR_ATOMIC) {
  846. flags |= MSG_TRUNC;
  847. if ((flags & MSG_PEEK) == 0)
  848. (void) sbdroprecord(&so->so_rcv);
  849. }
  850. if ((flags & MSG_PEEK) == 0) {
  851. if (m == NULL) {
  852. /*
  853. * First part is an inline SB_EMPTY_FIXUP(). Second
  854. * part makes sure sb_lastrecord is up-to-date if
  855. * there is still data in the socket buffer.
  856. */
  857. so->so_rcv.sb_mb = nextrecord;
  858. if (so->so_rcv.sb_mb == NULL) {
  859. so->so_rcv.sb_mbtail = NULL;
  860. so->so_rcv.sb_lastrecord = NULL;
  861. } else if (nextrecord->m_nextpkt == NULL)
  862. so->so_rcv.sb_lastrecord = nextrecord;
  863. }
  864. SBLASTRECORDCHK(&so->so_rcv, "soreceive 4");
  865. SBLASTMBUFCHK(&so->so_rcv, "soreceive 4");
  866. if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
  867. (*pr->pr_usrreq)(so, PRU_RCVD, NULL,
  868. (struct mbuf *)(long)flags, NULL, curproc);
  869. }
  870. if (orig_resid == uio->uio_resid && orig_resid &&
  871. (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
  872. sbunlock(&so->so_rcv);
  873. splx(s);
  874. goto restart;
  875. }
  876. if (uio_error)
  877. error = uio_error;
  878. if (flagsp)
  879. *flagsp |= flags;
  880. release:
  881. sbunlock(&so->so_rcv);
  882. splx(s);
  883. return (error);
  884. }
  885. int
  886. soshutdown(struct socket *so, int how)
  887. {
  888. struct protosw *pr = so->so_proto;
  889. switch (how) {
  890. case SHUT_RD:
  891. case SHUT_RDWR:
  892. sorflush(so);
  893. if (how == SHUT_RD)
  894. return (0);
  895. /* FALLTHROUGH */
  896. case SHUT_WR:
  897. return (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, NULL, NULL,
  898. curproc);
  899. default:
  900. return (EINVAL);
  901. }
  902. }
  903. void
  904. sorflush(struct socket *so)
  905. {
  906. struct sockbuf *sb = &so->so_rcv;
  907. struct protosw *pr = so->so_proto;
  908. int s;
  909. struct sockbuf asb;
  910. sb->sb_flags |= SB_NOINTR;
  911. (void) sblock(sb, M_WAITOK);
  912. s = splnet();
  913. socantrcvmore(so);
  914. sbunlock(sb);
  915. asb = *sb;
  916. bzero(sb, sizeof (*sb));
  917. /* XXX - the bzero stumps all over so_rcv */
  918. if (asb.sb_flags & SB_KNOTE) {
  919. sb->sb_sel.si_note = asb.sb_sel.si_note;
  920. sb->sb_flags = SB_KNOTE;
  921. }
  922. splx(s);
  923. if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
  924. (*pr->pr_domain->dom_dispose)(asb.sb_mb);
  925. sbrelease(&asb);
  926. }
  927. int
  928. sosetopt(struct socket *so, int level, int optname, struct mbuf *m0)
  929. {
  930. int error = 0;
  931. struct mbuf *m = m0;
  932. if (level != SOL_SOCKET) {
  933. if (so->so_proto && so->so_proto->pr_ctloutput)
  934. return ((*so->so_proto->pr_ctloutput)
  935. (PRCO_SETOPT, so, level, optname, &m0));
  936. error = ENOPROTOOPT;
  937. } else {
  938. switch (optname) {
  939. case SO_BINDANY:
  940. case SO_RDOMAIN:
  941. if ((error = suser(curproc, 0)) != 0) /* XXX */
  942. goto bad;
  943. break;
  944. }
  945. switch (optname) {
  946. case SO_LINGER:
  947. if (m == NULL || m->m_len != sizeof (struct linger) ||
  948. mtod(m, struct linger *)->l_linger < 0 ||
  949. mtod(m, struct linger *)->l_linger > SHRT_MAX) {
  950. error = EINVAL;
  951. goto bad;
  952. }
  953. so->so_linger = mtod(m, struct linger *)->l_linger;
  954. /* FALLTHROUGH */
  955. case SO_BINDANY:
  956. case SO_DEBUG:
  957. case SO_KEEPALIVE:
  958. case SO_DONTROUTE:
  959. case SO_USELOOPBACK:
  960. case SO_BROADCAST:
  961. case SO_REUSEADDR:
  962. case SO_REUSEPORT:
  963. case SO_OOBINLINE:
  964. case SO_JUMBO:
  965. case SO_TIMESTAMP:
  966. if (m == NULL || m->m_len < sizeof (int)) {
  967. error = EINVAL;
  968. goto bad;
  969. }
  970. if (*mtod(m, int *))
  971. so->so_options |= optname;
  972. else
  973. so->so_options &= ~optname;
  974. break;
  975. case SO_SNDBUF:
  976. case SO_RCVBUF:
  977. case SO_SNDLOWAT:
  978. case SO_RCVLOWAT:
  979. {
  980. u_long cnt;
  981. if (m == NULL || m->m_len < sizeof (int)) {
  982. error = EINVAL;
  983. goto bad;
  984. }
  985. cnt = *mtod(m, int *);
  986. if ((long)cnt <= 0)
  987. cnt = 1;
  988. switch (optname) {
  989. case SO_SNDBUF:
  990. if (sbcheckreserve(cnt, so->so_snd.sb_hiwat) ||
  991. sbreserve(&so->so_snd, cnt)) {
  992. error = ENOBUFS;
  993. goto bad;
  994. }
  995. break;
  996. case SO_RCVBUF:
  997. if (sbcheckreserve(cnt, so->so_rcv.sb_hiwat) ||
  998. sbreserve(&so->so_rcv, cnt)) {
  999. error = ENOBUFS;
  1000. goto bad;
  1001. }
  1002. break;
  1003. case SO_SNDLOWAT:
  1004. so->so_snd.sb_lowat = (cnt > so->so_snd.sb_hiwat) ?
  1005. so->so_snd.sb_hiwat : cnt;
  1006. break;
  1007. case SO_RCVLOWAT:
  1008. so->so_rcv.sb_lowat = (cnt > so->so_rcv.sb_hiwat) ?
  1009. so->so_rcv.sb_hiwat : cnt;
  1010. break;
  1011. }
  1012. break;
  1013. }
  1014. case SO_SNDTIMEO:
  1015. case SO_RCVTIMEO:
  1016. {
  1017. struct timeval *tv;
  1018. u_short val;
  1019. if (m == NULL || m->m_len < sizeof (*tv)) {
  1020. error = EINVAL;
  1021. goto bad;
  1022. }
  1023. tv = mtod(m, struct timeval *);
  1024. if (tv->tv_sec > (USHRT_MAX - tv->tv_usec / tick) / hz) {
  1025. error = EDOM;
  1026. goto bad;
  1027. }
  1028. val = tv->tv_sec * hz + tv->tv_usec / tick;
  1029. if (val == 0 && tv->tv_usec != 0)
  1030. val = 1;
  1031. switch (optname) {
  1032. case SO_SNDTIMEO:
  1033. so->so_snd.sb_timeo = val;
  1034. break;
  1035. case SO_RCVTIMEO:
  1036. so->so_rcv.sb_timeo = val;
  1037. break;
  1038. }
  1039. break;
  1040. }
  1041. default:
  1042. error = ENOPROTOOPT;
  1043. break;
  1044. }
  1045. if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
  1046. (void) ((*so->so_proto->pr_ctloutput)
  1047. (PRCO_SETOPT, so, level, optname, &m0));
  1048. m = NULL; /* freed by protocol */
  1049. }
  1050. }
  1051. bad:
  1052. if (m)
  1053. (void) m_free(m);
  1054. return (error);
  1055. }
  1056. int
  1057. sogetopt(struct socket *so, int level, int optname, struct mbuf **mp)
  1058. {
  1059. struct mbuf *m;
  1060. if (level != SOL_SOCKET) {
  1061. if (so->so_proto && so->so_proto->pr_ctloutput) {
  1062. return ((*so->so_proto->pr_ctloutput)
  1063. (PRCO_GETOPT, so, level, optname, mp));
  1064. } else
  1065. return (ENOPROTOOPT);
  1066. } else {
  1067. m = m_get(M_WAIT, MT_SOOPTS);
  1068. m->m_len = sizeof (int);
  1069. switch (optname) {
  1070. case SO_LINGER:
  1071. m->m_len = sizeof (struct linger);
  1072. mtod(m, struct linger *)->l_onoff =
  1073. so->so_options & SO_LINGER;
  1074. mtod(m, struct linger *)->l_linger = so->so_linger;
  1075. break;
  1076. case SO_BINDANY:
  1077. case SO_USELOOPBACK:
  1078. case SO_DONTROUTE:
  1079. case SO_DEBUG:
  1080. case SO_KEEPALIVE:
  1081. case SO_REUSEADDR:
  1082. case SO_REUSEPORT:
  1083. case SO_BROADCAST:
  1084. case SO_OOBINLINE:
  1085. case SO_JUMBO:
  1086. case SO_TIMESTAMP:
  1087. *mtod(m, int *) = so->so_options & optname;
  1088. break;
  1089. case SO_TYPE:
  1090. *mtod(m, int *) = so->so_type;
  1091. break;
  1092. case SO_ERROR:
  1093. *mtod(m, int *) = so->so_error;
  1094. so->so_error = 0;
  1095. break;
  1096. case SO_SNDBUF:
  1097. *mtod(m, int *) = so->so_snd.sb_hiwat;
  1098. break;
  1099. case SO_RCVBUF:
  1100. *mtod(m, int *) = so->so_rcv.sb_hiwat;
  1101. break;
  1102. case SO_SNDLOWAT:
  1103. *mtod(m, int *) = so->so_snd.sb_lowat;
  1104. break;
  1105. case SO_RCVLOWAT:
  1106. *mtod(m, int *) = so->so_rcv.sb_lowat;
  1107. break;
  1108. case SO_SNDTIMEO:
  1109. case SO_RCVTIMEO:
  1110. {
  1111. int val = (optname == SO_SNDTIMEO ?
  1112. so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
  1113. m->m_len = sizeof(struct timeval);
  1114. mtod(m, struct timeval *)->tv_sec = val / hz;
  1115. mtod(m, struct timeval *)->tv_usec =
  1116. (val % hz) * tick;
  1117. break;
  1118. }
  1119. default:
  1120. (void)m_free(m);
  1121. return (ENOPROTOOPT);
  1122. }
  1123. *mp = m;
  1124. return (0);
  1125. }
  1126. }
  1127. void
  1128. sohasoutofband(struct socket *so)
  1129. {
  1130. csignal(so->so_pgid, SIGURG, so->so_siguid, so->so_sigeuid);
  1131. selwakeup(&so->so_rcv.sb_sel);
  1132. }
  1133. int
  1134. soo_kqfilter(struct file *fp, struct knote *kn)
  1135. {
  1136. struct socket *so = (struct socket *)kn->kn_fp->f_data;
  1137. struct sockbuf *sb;
  1138. int s;
  1139. switch (kn->kn_filter) {
  1140. case EVFILT_READ:
  1141. if (so->so_options & SO_ACCEPTCONN)
  1142. kn->kn_fop = &solisten_filtops;
  1143. else
  1144. kn->kn_fop = &soread_filtops;
  1145. sb = &so->so_rcv;
  1146. break;
  1147. case EVFILT_WRITE:
  1148. kn->kn_fop = &sowrite_filtops;
  1149. sb = &so->so_snd;
  1150. break;
  1151. default:
  1152. return (1);
  1153. }
  1154. s = splnet();
  1155. SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext);
  1156. sb->sb_flags |= SB_KNOTE;
  1157. splx(s);
  1158. return (0);
  1159. }
  1160. void
  1161. filt_sordetach(struct knote *kn)
  1162. {
  1163. struct socket *so = (struct socket *)kn->kn_fp->f_data;
  1164. int s = splnet();
  1165. SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
  1166. if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
  1167. so->so_rcv.sb_flags &= ~SB_KNOTE;
  1168. splx(s);
  1169. }
  1170. /*ARGSUSED*/
  1171. int
  1172. filt_soread(struct knote *kn, long hint)
  1173. {
  1174. struct socket *so = (struct socket *)kn->kn_fp->f_data;
  1175. kn->kn_data = so->so_rcv.sb_cc;
  1176. if (so->so_state & SS_CANTRCVMORE) {
  1177. kn->kn_flags |= EV_EOF;
  1178. kn->kn_fflags = so->so_error;
  1179. return (1);
  1180. }
  1181. if (so->so_error) /* temporary udp error */
  1182. return (1);
  1183. if (kn->kn_sfflags & NOTE_LOWAT)
  1184. return (kn->kn_data >= kn->kn_sdata);
  1185. return (kn->kn_data >= so->so_rcv.sb_lowat);
  1186. }
  1187. void
  1188. filt_sowdetach(struct knote *kn)
  1189. {
  1190. struct socket *so = (struct socket *)kn->kn_fp->f_data;
  1191. int s = splnet();
  1192. SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
  1193. if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
  1194. so->so_snd.sb_flags &= ~SB_KNOTE;
  1195. splx(s);
  1196. }
  1197. /*ARGSUSED*/
  1198. int
  1199. filt_sowrite(struct knote *kn, long hint)
  1200. {
  1201. struct socket *so = (struct socket *)kn->kn_fp->f_data;
  1202. kn->kn_data = sbspace(&so->so_snd);
  1203. if (so->so_state & SS_CANTSENDMORE) {
  1204. kn->kn_flags |= EV_EOF;
  1205. kn->kn_fflags = so->so_error;
  1206. return (1);
  1207. }
  1208. if (so->so_error) /* temporary udp error */
  1209. return (1);
  1210. if (((so->so_state & SS_ISCONNECTED) == 0) &&
  1211. (so->so_proto->pr_flags & PR_CONNREQUIRED))
  1212. return (0);
  1213. if (kn->kn_sfflags & NOTE_LOWAT)
  1214. return (kn->kn_data >= kn->kn_sdata);
  1215. return (kn->kn_data >= so->so_snd.sb_lowat);
  1216. }
  1217. /*ARGSUSED*/
  1218. int
  1219. filt_solisten(struct knote *kn, long hint)
  1220. {
  1221. struct socket *so = (struct socket *)kn->kn_fp->f_data;
  1222. kn->kn_data = so->so_qlen;
  1223. return (so->so_qlen != 0);
  1224. }