PageRenderTime 52ms CodeModel.GetById 41ms RepoModel.GetById 1ms app.codeStats 1ms

/netinet/sctputil.c

https://bitbucket.org/brucec/sctpdrv
C | 7726 lines | 6422 code | 376 blank | 928 comment | 1694 complexity | 5b1ab6f6258d7e6de6120e6fe7ae9ca4 MD5 | raw file
  1. /*-
  2. * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  4. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions are met:
  8. *
  9. * a) Redistributions of source code must retain the above copyright notice,
  10. * this list of conditions and the following disclaimer.
  11. *
  12. * b) Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in
  14. * the documentation and/or other materials provided with the distribution.
  15. *
  16. * c) Neither the name of Cisco Systems, Inc. nor the names of its
  17. * contributors may be used to endorse or promote products derived
  18. * from this software without specific prior written permission.
  19. *
  20. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  21. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  22. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  23. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  24. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  25. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  26. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  27. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  28. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  29. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  30. * THE POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #ifdef __FreeBSD__
  33. #include <sys/cdefs.h>
  34. __FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 235828 2012-05-23 11:26:28Z tuexen $");
  35. #endif
  36. #include <netinet/sctp_os.h>
  37. #include <netinet/sctp_pcb.h>
  38. #include <netinet/sctputil.h>
  39. #include <netinet/sctp_var.h>
  40. #include <netinet/sctp_sysctl.h>
  41. #ifdef INET6
  42. #if defined(__Userspace__)
  43. #include <netinet6/sctp6_var.h>
  44. #endif
  45. #endif
  46. #include <netinet/sctp_header.h>
  47. #include <netinet/sctp_output.h>
  48. #include <netinet/sctp_uio.h>
  49. #include <netinet/sctp_timer.h>
  50. #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
  51. #include <netinet/sctp_auth.h>
  52. #include <netinet/sctp_asconf.h>
  53. #include <netinet/sctp_windows_addr.h>
  54. #if defined(__Userspace__)
  55. #include <netinet/sctp_constants.h>
  56. #endif
  57. #if defined(__APPLE__)
  58. #define APPLE_FILE_NO 8
  59. #endif
  60. #if defined(__Windows__)
  61. #if !defined(SCTP_LOCAL_TRACE_BUF)
  62. #include "eventrace_netinet.h"
  63. #include "sctputil.tmh" /* this is the file that will be auto generated */
  64. #endif
  65. #else
  66. #ifndef KTR_SCTP
  67. #define KTR_SCTP KTR_SUBSYS
  68. #endif
  69. #endif
  70. extern struct sctp_cc_functions sctp_cc_functions[];
  71. extern struct sctp_ss_functions sctp_ss_functions[];
  72. void
  73. sctp_sblog(struct sockbuf *sb,
  74. struct sctp_tcb *stcb, int from, int incr)
  75. {
  76. struct sctp_cwnd_log sctp_clog;
  77. sctp_clog.x.sb.stcb = stcb;
  78. sctp_clog.x.sb.so_sbcc = sb->sb_cc;
  79. if (stcb)
  80. sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
  81. else
  82. sctp_clog.x.sb.stcb_sbcc = 0;
  83. sctp_clog.x.sb.incr = incr;
  84. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  85. SCTP_LOG_EVENT_SB,
  86. from,
  87. sctp_clog.x.misc.log1,
  88. sctp_clog.x.misc.log2,
  89. sctp_clog.x.misc.log3,
  90. sctp_clog.x.misc.log4);
  91. }
  92. void
  93. sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
  94. {
  95. struct sctp_cwnd_log sctp_clog;
  96. sctp_clog.x.close.inp = (void *)inp;
  97. sctp_clog.x.close.sctp_flags = inp->sctp_flags;
  98. if (stcb) {
  99. sctp_clog.x.close.stcb = (void *)stcb;
  100. sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
  101. } else {
  102. sctp_clog.x.close.stcb = 0;
  103. sctp_clog.x.close.state = 0;
  104. }
  105. sctp_clog.x.close.loc = loc;
  106. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  107. SCTP_LOG_EVENT_CLOSE,
  108. 0,
  109. sctp_clog.x.misc.log1,
  110. sctp_clog.x.misc.log2,
  111. sctp_clog.x.misc.log3,
  112. sctp_clog.x.misc.log4);
  113. }
  114. void
  115. rto_logging(struct sctp_nets *net, int from)
  116. {
  117. struct sctp_cwnd_log sctp_clog;
  118. memset(&sctp_clog, 0, sizeof(sctp_clog));
  119. sctp_clog.x.rto.net = (void *) net;
  120. sctp_clog.x.rto.rtt = net->rtt / 1000;
  121. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  122. SCTP_LOG_EVENT_RTT,
  123. from,
  124. sctp_clog.x.misc.log1,
  125. sctp_clog.x.misc.log2,
  126. sctp_clog.x.misc.log3,
  127. sctp_clog.x.misc.log4);
  128. }
  129. void
  130. sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
  131. {
  132. struct sctp_cwnd_log sctp_clog;
  133. sctp_clog.x.strlog.stcb = stcb;
  134. sctp_clog.x.strlog.n_tsn = tsn;
  135. sctp_clog.x.strlog.n_sseq = sseq;
  136. sctp_clog.x.strlog.e_tsn = 0;
  137. sctp_clog.x.strlog.e_sseq = 0;
  138. sctp_clog.x.strlog.strm = stream;
  139. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  140. SCTP_LOG_EVENT_STRM,
  141. from,
  142. sctp_clog.x.misc.log1,
  143. sctp_clog.x.misc.log2,
  144. sctp_clog.x.misc.log3,
  145. sctp_clog.x.misc.log4);
  146. }
  147. void
  148. sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
  149. {
  150. struct sctp_cwnd_log sctp_clog;
  151. sctp_clog.x.nagle.stcb = (void *)stcb;
  152. sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
  153. sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
  154. sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
  155. sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
  156. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  157. SCTP_LOG_EVENT_NAGLE,
  158. action,
  159. sctp_clog.x.misc.log1,
  160. sctp_clog.x.misc.log2,
  161. sctp_clog.x.misc.log3,
  162. sctp_clog.x.misc.log4);
  163. }
  164. void
  165. sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
  166. {
  167. struct sctp_cwnd_log sctp_clog;
  168. sctp_clog.x.sack.cumack = cumack;
  169. sctp_clog.x.sack.oldcumack = old_cumack;
  170. sctp_clog.x.sack.tsn = tsn;
  171. sctp_clog.x.sack.numGaps = gaps;
  172. sctp_clog.x.sack.numDups = dups;
  173. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  174. SCTP_LOG_EVENT_SACK,
  175. from,
  176. sctp_clog.x.misc.log1,
  177. sctp_clog.x.misc.log2,
  178. sctp_clog.x.misc.log3,
  179. sctp_clog.x.misc.log4);
  180. }
  181. void
  182. sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
  183. {
  184. struct sctp_cwnd_log sctp_clog;
  185. memset(&sctp_clog, 0, sizeof(sctp_clog));
  186. sctp_clog.x.map.base = map;
  187. sctp_clog.x.map.cum = cum;
  188. sctp_clog.x.map.high = high;
  189. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  190. SCTP_LOG_EVENT_MAP,
  191. from,
  192. sctp_clog.x.misc.log1,
  193. sctp_clog.x.misc.log2,
  194. sctp_clog.x.misc.log3,
  195. sctp_clog.x.misc.log4);
  196. }
  197. void
  198. sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
  199. int from)
  200. {
  201. struct sctp_cwnd_log sctp_clog;
  202. memset(&sctp_clog, 0, sizeof(sctp_clog));
  203. sctp_clog.x.fr.largest_tsn = biggest_tsn;
  204. sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
  205. sctp_clog.x.fr.tsn = tsn;
  206. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  207. SCTP_LOG_EVENT_FR,
  208. from,
  209. sctp_clog.x.misc.log1,
  210. sctp_clog.x.misc.log2,
  211. sctp_clog.x.misc.log3,
  212. sctp_clog.x.misc.log4);
  213. }
  214. void
  215. sctp_log_mb(struct mbuf *m, int from)
  216. {
  217. struct sctp_cwnd_log sctp_clog;
  218. sctp_clog.x.mb.mp = m;
  219. sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
  220. sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
  221. sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
  222. if (SCTP_BUF_IS_EXTENDED(m)) {
  223. sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
  224. #if defined(__APPLE__)
  225. /* APPLE does not use a ref_cnt, but a forward/backward ref queue */
  226. #else
  227. sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
  228. #endif
  229. } else {
  230. sctp_clog.x.mb.ext = 0;
  231. sctp_clog.x.mb.refcnt = 0;
  232. }
  233. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  234. SCTP_LOG_EVENT_MBUF,
  235. from,
  236. sctp_clog.x.misc.log1,
  237. sctp_clog.x.misc.log2,
  238. sctp_clog.x.misc.log3,
  239. sctp_clog.x.misc.log4);
  240. }
  241. void
  242. sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
  243. int from)
  244. {
  245. struct sctp_cwnd_log sctp_clog;
  246. if (control == NULL) {
  247. SCTP_PRINTF("Gak log of NULL?\n");
  248. return;
  249. }
  250. sctp_clog.x.strlog.stcb = control->stcb;
  251. sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
  252. sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
  253. sctp_clog.x.strlog.strm = control->sinfo_stream;
  254. if (poschk != NULL) {
  255. sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
  256. sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
  257. } else {
  258. sctp_clog.x.strlog.e_tsn = 0;
  259. sctp_clog.x.strlog.e_sseq = 0;
  260. }
  261. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  262. SCTP_LOG_EVENT_STRM,
  263. from,
  264. sctp_clog.x.misc.log1,
  265. sctp_clog.x.misc.log2,
  266. sctp_clog.x.misc.log3,
  267. sctp_clog.x.misc.log4);
  268. }
  269. void
  270. sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
  271. {
  272. struct sctp_cwnd_log sctp_clog;
  273. sctp_clog.x.cwnd.net = net;
  274. if (stcb->asoc.send_queue_cnt > 255)
  275. sctp_clog.x.cwnd.cnt_in_send = 255;
  276. else
  277. sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
  278. if (stcb->asoc.stream_queue_cnt > 255)
  279. sctp_clog.x.cwnd.cnt_in_str = 255;
  280. else
  281. sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
  282. if (net) {
  283. sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
  284. sctp_clog.x.cwnd.inflight = net->flight_size;
  285. sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
  286. sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
  287. sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
  288. }
  289. if (SCTP_CWNDLOG_PRESEND == from) {
  290. sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
  291. }
  292. sctp_clog.x.cwnd.cwnd_augment = augment;
  293. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  294. SCTP_LOG_EVENT_CWND,
  295. from,
  296. sctp_clog.x.misc.log1,
  297. sctp_clog.x.misc.log2,
  298. sctp_clog.x.misc.log3,
  299. sctp_clog.x.misc.log4);
  300. }
  301. #ifndef __APPLE__
  302. void
  303. sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
  304. {
  305. struct sctp_cwnd_log sctp_clog;
  306. memset(&sctp_clog, 0, sizeof(sctp_clog));
  307. if (inp) {
  308. sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
  309. } else {
  310. sctp_clog.x.lock.sock = (void *) NULL;
  311. }
  312. sctp_clog.x.lock.inp = (void *) inp;
  313. #if (defined(__FreeBSD__) && __FreeBSD_version >= 503000) || (defined(__APPLE__))
  314. if (stcb) {
  315. sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
  316. } else {
  317. sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
  318. }
  319. if (inp) {
  320. sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
  321. sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
  322. } else {
  323. sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
  324. sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
  325. }
  326. #if (defined(__FreeBSD__) && __FreeBSD_version <= 602000)
  327. sctp_clog.x.lock.info_lock = mtx_owned(&SCTP_BASE_INFO(ipi_ep_mtx));
  328. #else
  329. sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
  330. #endif
  331. if (inp && (inp->sctp_socket)) {
  332. sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
  333. sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
  334. sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
  335. } else {
  336. sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
  337. sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
  338. sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
  339. }
  340. #endif
  341. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  342. SCTP_LOG_LOCK_EVENT,
  343. from,
  344. sctp_clog.x.misc.log1,
  345. sctp_clog.x.misc.log2,
  346. sctp_clog.x.misc.log3,
  347. sctp_clog.x.misc.log4);
  348. }
  349. #endif
  350. void
  351. sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
  352. {
  353. struct sctp_cwnd_log sctp_clog;
  354. memset(&sctp_clog, 0, sizeof(sctp_clog));
  355. sctp_clog.x.cwnd.net = net;
  356. sctp_clog.x.cwnd.cwnd_new_value = error;
  357. sctp_clog.x.cwnd.inflight = net->flight_size;
  358. sctp_clog.x.cwnd.cwnd_augment = burst;
  359. if (stcb->asoc.send_queue_cnt > 255)
  360. sctp_clog.x.cwnd.cnt_in_send = 255;
  361. else
  362. sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
  363. if (stcb->asoc.stream_queue_cnt > 255)
  364. sctp_clog.x.cwnd.cnt_in_str = 255;
  365. else
  366. sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
  367. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  368. SCTP_LOG_EVENT_MAXBURST,
  369. from,
  370. sctp_clog.x.misc.log1,
  371. sctp_clog.x.misc.log2,
  372. sctp_clog.x.misc.log3,
  373. sctp_clog.x.misc.log4);
  374. }
  375. void
  376. sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
  377. {
  378. struct sctp_cwnd_log sctp_clog;
  379. sctp_clog.x.rwnd.rwnd = peers_rwnd;
  380. sctp_clog.x.rwnd.send_size = snd_size;
  381. sctp_clog.x.rwnd.overhead = overhead;
  382. sctp_clog.x.rwnd.new_rwnd = 0;
  383. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  384. SCTP_LOG_EVENT_RWND,
  385. from,
  386. sctp_clog.x.misc.log1,
  387. sctp_clog.x.misc.log2,
  388. sctp_clog.x.misc.log3,
  389. sctp_clog.x.misc.log4);
  390. }
  391. void
  392. sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
  393. {
  394. struct sctp_cwnd_log sctp_clog;
  395. sctp_clog.x.rwnd.rwnd = peers_rwnd;
  396. sctp_clog.x.rwnd.send_size = flight_size;
  397. sctp_clog.x.rwnd.overhead = overhead;
  398. sctp_clog.x.rwnd.new_rwnd = a_rwndval;
  399. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  400. SCTP_LOG_EVENT_RWND,
  401. from,
  402. sctp_clog.x.misc.log1,
  403. sctp_clog.x.misc.log2,
  404. sctp_clog.x.misc.log3,
  405. sctp_clog.x.misc.log4);
  406. }
  407. void
  408. sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
  409. {
  410. struct sctp_cwnd_log sctp_clog;
  411. sctp_clog.x.mbcnt.total_queue_size = total_oq;
  412. sctp_clog.x.mbcnt.size_change = book;
  413. sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
  414. sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
  415. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  416. SCTP_LOG_EVENT_MBCNT,
  417. from,
  418. sctp_clog.x.misc.log1,
  419. sctp_clog.x.misc.log2,
  420. sctp_clog.x.misc.log3,
  421. sctp_clog.x.misc.log4);
  422. }
  423. void
  424. sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
  425. {
  426. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  427. SCTP_LOG_MISC_EVENT,
  428. from,
  429. a, b, c, d);
  430. }
  431. void
  432. sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
  433. {
  434. struct sctp_cwnd_log sctp_clog;
  435. sctp_clog.x.wake.stcb = (void *)stcb;
  436. sctp_clog.x.wake.wake_cnt = wake_cnt;
  437. sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
  438. sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
  439. sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
  440. if (stcb->asoc.stream_queue_cnt < 0xff)
  441. sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
  442. else
  443. sctp_clog.x.wake.stream_qcnt = 0xff;
  444. if (stcb->asoc.chunks_on_out_queue < 0xff)
  445. sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
  446. else
  447. sctp_clog.x.wake.chunks_on_oque = 0xff;
  448. sctp_clog.x.wake.sctpflags = 0;
  449. /* set in the defered mode stuff */
  450. if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
  451. sctp_clog.x.wake.sctpflags |= 1;
  452. if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
  453. sctp_clog.x.wake.sctpflags |= 2;
  454. if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
  455. sctp_clog.x.wake.sctpflags |= 4;
  456. /* what about the sb */
  457. if (stcb->sctp_socket) {
  458. struct socket *so = stcb->sctp_socket;
  459. sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
  460. } else {
  461. sctp_clog.x.wake.sbflags = 0xff;
  462. }
  463. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  464. SCTP_LOG_EVENT_WAKE,
  465. from,
  466. sctp_clog.x.misc.log1,
  467. sctp_clog.x.misc.log2,
  468. sctp_clog.x.misc.log3,
  469. sctp_clog.x.misc.log4);
  470. }
  471. void
  472. sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
  473. {
  474. struct sctp_cwnd_log sctp_clog;
  475. sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
  476. sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
  477. sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
  478. sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
  479. sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
  480. sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
  481. sctp_clog.x.blk.sndlen = sendlen;
  482. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  483. SCTP_LOG_EVENT_BLOCK,
  484. from,
  485. sctp_clog.x.misc.log1,
  486. sctp_clog.x.misc.log2,
  487. sctp_clog.x.misc.log3,
  488. sctp_clog.x.misc.log4);
  489. }
  490. int
  491. sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
  492. {
  493. /* May need to fix this if ktrdump does not work */
  494. return (0);
  495. }
  496. #ifdef SCTP_AUDITING_ENABLED
  497. uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
  498. static int sctp_audit_indx = 0;
  499. static
  500. void
  501. sctp_print_audit_report(void)
  502. {
  503. int i;
  504. int cnt;
  505. cnt = 0;
  506. for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
  507. if ((sctp_audit_data[i][0] == 0xe0) &&
  508. (sctp_audit_data[i][1] == 0x01)) {
  509. cnt = 0;
  510. SCTP_PRINTF("\n");
  511. } else if (sctp_audit_data[i][0] == 0xf0) {
  512. cnt = 0;
  513. SCTP_PRINTF("\n");
  514. } else if ((sctp_audit_data[i][0] == 0xc0) &&
  515. (sctp_audit_data[i][1] == 0x01)) {
  516. SCTP_PRINTF("\n");
  517. cnt = 0;
  518. }
  519. SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
  520. (uint32_t) sctp_audit_data[i][1]);
  521. cnt++;
  522. if ((cnt % 14) == 0)
  523. SCTP_PRINTF("\n");
  524. }
  525. for (i = 0; i < sctp_audit_indx; i++) {
  526. if ((sctp_audit_data[i][0] == 0xe0) &&
  527. (sctp_audit_data[i][1] == 0x01)) {
  528. cnt = 0;
  529. SCTP_PRINTF("\n");
  530. } else if (sctp_audit_data[i][0] == 0xf0) {
  531. cnt = 0;
  532. SCTP_PRINTF("\n");
  533. } else if ((sctp_audit_data[i][0] == 0xc0) &&
  534. (sctp_audit_data[i][1] == 0x01)) {
  535. SCTP_PRINTF("\n");
  536. cnt = 0;
  537. }
  538. SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
  539. (uint32_t) sctp_audit_data[i][1]);
  540. cnt++;
  541. if ((cnt % 14) == 0)
  542. SCTP_PRINTF("\n");
  543. }
  544. SCTP_PRINTF("\n");
  545. }
  546. void
  547. sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
  548. struct sctp_nets *net)
  549. {
  550. int resend_cnt, tot_out, rep, tot_book_cnt;
  551. struct sctp_nets *lnet;
  552. struct sctp_tmit_chunk *chk;
  553. sctp_audit_data[sctp_audit_indx][0] = 0xAA;
  554. sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
  555. sctp_audit_indx++;
  556. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  557. sctp_audit_indx = 0;
  558. }
  559. if (inp == NULL) {
  560. sctp_audit_data[sctp_audit_indx][0] = 0xAF;
  561. sctp_audit_data[sctp_audit_indx][1] = 0x01;
  562. sctp_audit_indx++;
  563. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  564. sctp_audit_indx = 0;
  565. }
  566. return;
  567. }
  568. if (stcb == NULL) {
  569. sctp_audit_data[sctp_audit_indx][0] = 0xAF;
  570. sctp_audit_data[sctp_audit_indx][1] = 0x02;
  571. sctp_audit_indx++;
  572. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  573. sctp_audit_indx = 0;
  574. }
  575. return;
  576. }
  577. sctp_audit_data[sctp_audit_indx][0] = 0xA1;
  578. sctp_audit_data[sctp_audit_indx][1] =
  579. (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
  580. sctp_audit_indx++;
  581. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  582. sctp_audit_indx = 0;
  583. }
  584. rep = 0;
  585. tot_book_cnt = 0;
  586. resend_cnt = tot_out = 0;
  587. TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
  588. if (chk->sent == SCTP_DATAGRAM_RESEND) {
  589. resend_cnt++;
  590. } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
  591. tot_out += chk->book_size;
  592. tot_book_cnt++;
  593. }
  594. }
  595. if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
  596. sctp_audit_data[sctp_audit_indx][0] = 0xAF;
  597. sctp_audit_data[sctp_audit_indx][1] = 0xA1;
  598. sctp_audit_indx++;
  599. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  600. sctp_audit_indx = 0;
  601. }
  602. SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
  603. resend_cnt, stcb->asoc.sent_queue_retran_cnt);
  604. rep = 1;
  605. stcb->asoc.sent_queue_retran_cnt = resend_cnt;
  606. sctp_audit_data[sctp_audit_indx][0] = 0xA2;
  607. sctp_audit_data[sctp_audit_indx][1] =
  608. (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
  609. sctp_audit_indx++;
  610. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  611. sctp_audit_indx = 0;
  612. }
  613. }
  614. if (tot_out != stcb->asoc.total_flight) {
  615. sctp_audit_data[sctp_audit_indx][0] = 0xAF;
  616. sctp_audit_data[sctp_audit_indx][1] = 0xA2;
  617. sctp_audit_indx++;
  618. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  619. sctp_audit_indx = 0;
  620. }
  621. rep = 1;
  622. SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
  623. (int)stcb->asoc.total_flight);
  624. stcb->asoc.total_flight = tot_out;
  625. }
  626. if (tot_book_cnt != stcb->asoc.total_flight_count) {
  627. sctp_audit_data[sctp_audit_indx][0] = 0xAF;
  628. sctp_audit_data[sctp_audit_indx][1] = 0xA5;
  629. sctp_audit_indx++;
  630. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  631. sctp_audit_indx = 0;
  632. }
  633. rep = 1;
  634. SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
  635. stcb->asoc.total_flight_count = tot_book_cnt;
  636. }
  637. tot_out = 0;
  638. TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
  639. tot_out += lnet->flight_size;
  640. }
  641. if (tot_out != stcb->asoc.total_flight) {
  642. sctp_audit_data[sctp_audit_indx][0] = 0xAF;
  643. sctp_audit_data[sctp_audit_indx][1] = 0xA3;
  644. sctp_audit_indx++;
  645. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  646. sctp_audit_indx = 0;
  647. }
  648. rep = 1;
  649. SCTP_PRINTF("real flight:%d net total was %d\n",
  650. stcb->asoc.total_flight, tot_out);
  651. /* now corrective action */
  652. TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
  653. tot_out = 0;
  654. TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
  655. if ((chk->whoTo == lnet) &&
  656. (chk->sent < SCTP_DATAGRAM_RESEND)) {
  657. tot_out += chk->book_size;
  658. }
  659. }
  660. if (lnet->flight_size != tot_out) {
  661. SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
  662. lnet, lnet->flight_size,
  663. tot_out);
  664. lnet->flight_size = tot_out;
  665. }
  666. }
  667. }
  668. if (rep) {
  669. sctp_print_audit_report();
  670. }
  671. }
  672. void
  673. sctp_audit_log(uint8_t ev, uint8_t fd)
  674. {
  675. sctp_audit_data[sctp_audit_indx][0] = ev;
  676. sctp_audit_data[sctp_audit_indx][1] = fd;
  677. sctp_audit_indx++;
  678. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  679. sctp_audit_indx = 0;
  680. }
  681. }
  682. #endif
  683. /*
  684. * sctp_stop_timers_for_shutdown() should be called
  685. * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
  686. * state to make sure that all timers are stopped.
  687. */
  688. void
  689. sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
  690. {
  691. struct sctp_association *asoc;
  692. struct sctp_nets *net;
  693. asoc = &stcb->asoc;
  694. (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
  695. (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
  696. (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
  697. (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
  698. (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
  699. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  700. (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
  701. (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
  702. }
  703. }
  704. /*
  705. * a list of sizes based on typical mtu's, used only if next hop size not
  706. * returned.
  707. */
  708. static uint32_t sctp_mtu_sizes[] = {
  709. 68,
  710. 296,
  711. 508,
  712. 512,
  713. 544,
  714. 576,
  715. 1006,
  716. 1492,
  717. 1500,
  718. 1536,
  719. 2002,
  720. 2048,
  721. 4352,
  722. 4464,
  723. 8166,
  724. 17914,
  725. 32000,
  726. 65535
  727. };
  728. /*
  729. * Return the largest MTU smaller than val. If there is no
  730. * entry, just return val.
  731. */
  732. uint32_t
  733. sctp_get_prev_mtu(uint32_t val)
  734. {
  735. uint32_t i;
  736. if (val <= sctp_mtu_sizes[0]) {
  737. return (val);
  738. }
  739. for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
  740. if (val <= sctp_mtu_sizes[i]) {
  741. break;
  742. }
  743. }
  744. return (sctp_mtu_sizes[i - 1]);
  745. }
  746. /*
  747. * Return the smallest MTU larger than val. If there is no
  748. * entry, just return val.
  749. */
  750. uint32_t
  751. sctp_get_next_mtu(uint32_t val)
  752. {
  753. /* select another MTU that is just bigger than this one */
  754. uint32_t i;
  755. for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
  756. if (val < sctp_mtu_sizes[i]) {
  757. return (sctp_mtu_sizes[i]);
  758. }
  759. }
  760. return (val);
  761. }
  762. void
  763. sctp_fill_random_store(struct sctp_pcb *m)
  764. {
  765. /*
  766. * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
  767. * our counter. The result becomes our good random numbers and we
  768. * then setup to give these out. Note that we do no locking to
  769. * protect this. This is ok, since if competing folks call this we
  770. * will get more gobbled gook in the random store which is what we
  771. * want. There is a danger that two guys will use the same random
  772. * numbers, but thats ok too since that is random as well :->
  773. */
  774. m->store_at = 0;
  775. (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
  776. sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
  777. sizeof(m->random_counter), (uint8_t *)m->random_store);
  778. m->random_counter++;
  779. }
  780. uint32_t
  781. sctp_select_initial_TSN(struct sctp_pcb *inp)
  782. {
  783. /*
  784. * A true implementation should use random selection process to get
  785. * the initial stream sequence number, using RFC1750 as a good
  786. * guideline
  787. */
  788. uint32_t x, *xp;
  789. uint8_t *p;
  790. int store_at, new_store;
  791. if (inp->initial_sequence_debug != 0) {
  792. uint32_t ret;
  793. ret = inp->initial_sequence_debug;
  794. inp->initial_sequence_debug++;
  795. return (ret);
  796. }
  797. retry:
  798. store_at = inp->store_at;
  799. new_store = store_at + sizeof(uint32_t);
  800. if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
  801. new_store = 0;
  802. }
  803. if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
  804. goto retry;
  805. }
  806. if (new_store == 0) {
  807. /* Refill the random store */
  808. sctp_fill_random_store(inp);
  809. }
  810. p = &inp->random_store[store_at];
  811. xp = (uint32_t *)p;
  812. x = *xp;
  813. return (x);
  814. }
  815. uint32_t
  816. sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
  817. {
  818. uint32_t x;
  819. struct timeval now;
  820. if (check) {
  821. (void)SCTP_GETTIME_TIMEVAL(&now);
  822. }
  823. for (;;) {
  824. x = sctp_select_initial_TSN(&inp->sctp_ep);
  825. if (x == 0) {
  826. /* we never use 0 */
  827. continue;
  828. }
  829. if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
  830. break;
  831. }
  832. }
  833. return (x);
  834. }
  835. int
  836. sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
  837. uint32_t override_tag, uint32_t vrf_id)
  838. {
  839. struct sctp_association *asoc;
  840. /*
  841. * Anything set to zero is taken care of by the allocation routine's
  842. * bzero
  843. */
  844. /*
  845. * Up front select what scoping to apply on addresses I tell my peer
  846. * Not sure what to do with these right now, we will need to come up
  847. * with a way to set them. We may need to pass them through from the
  848. * caller in the sctp_aloc_assoc() function.
  849. */
  850. int i;
  851. asoc = &stcb->asoc;
  852. /* init all variables to a known value. */
  853. SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
  854. asoc->max_burst = m->sctp_ep.max_burst;
  855. asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
  856. asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
  857. asoc->cookie_life = m->sctp_ep.def_cookie_life;
  858. asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
  859. asoc->ecn_allowed = m->sctp_ecn_enable;
  860. asoc->sctp_nr_sack_on_off = (uint8_t)SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
  861. asoc->sctp_cmt_pf = (uint8_t)0;
  862. asoc->sctp_frag_point = m->sctp_frag_point;
  863. asoc->sctp_features = m->sctp_features;
  864. asoc->default_dscp = m->sctp_ep.default_dscp;
  865. #ifdef INET6
  866. if (m->sctp_ep.default_flowlabel) {
  867. asoc->default_flowlabel = m->sctp_ep.default_flowlabel;
  868. } else {
  869. if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
  870. asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep);
  871. asoc->default_flowlabel &= 0x000fffff;
  872. asoc->default_flowlabel |= 0x80000000;
  873. } else {
  874. asoc->default_flowlabel = 0;
  875. }
  876. }
  877. #endif
  878. asoc->sb_send_resv = 0;
  879. if (override_tag) {
  880. asoc->my_vtag = override_tag;
  881. } else {
  882. asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
  883. }
  884. /* Get the nonce tags */
  885. asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
  886. asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
  887. asoc->vrf_id = vrf_id;
  888. #ifdef SCTP_ASOCLOG_OF_TSNS
  889. asoc->tsn_in_at = 0;
  890. asoc->tsn_out_at = 0;
  891. asoc->tsn_in_wrapped = 0;
  892. asoc->tsn_out_wrapped = 0;
  893. asoc->cumack_log_at = 0;
  894. asoc->cumack_log_atsnt = 0;
  895. #endif
  896. #ifdef SCTP_FS_SPEC_LOG
  897. asoc->fs_index = 0;
  898. #endif
  899. asoc->refcnt = 0;
  900. asoc->assoc_up_sent = 0;
  901. asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
  902. sctp_select_initial_TSN(&m->sctp_ep);
  903. asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
  904. /* we are optimisitic here */
  905. asoc->peer_supports_pktdrop = 1;
  906. asoc->peer_supports_nat = 0;
  907. asoc->sent_queue_retran_cnt = 0;
  908. /* for CMT */
  909. asoc->last_net_cmt_send_started = NULL;
  910. /* This will need to be adjusted */
  911. asoc->last_acked_seq = asoc->init_seq_number - 1;
  912. asoc->advanced_peer_ack_point = asoc->last_acked_seq;
  913. asoc->asconf_seq_in = asoc->last_acked_seq;
  914. /* here we are different, we hold the next one we expect */
  915. asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
  916. asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
  917. asoc->initial_rto = m->sctp_ep.initial_rto;
  918. asoc->max_init_times = m->sctp_ep.max_init_times;
  919. asoc->max_send_times = m->sctp_ep.max_send_times;
  920. asoc->def_net_failure = m->sctp_ep.def_net_failure;
  921. asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
  922. asoc->free_chunk_cnt = 0;
  923. asoc->iam_blocking = 0;
  924. asoc->context = m->sctp_context;
  925. asoc->local_strreset_support = m->local_strreset_support;
  926. asoc->def_send = m->def_send;
  927. asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
  928. asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
  929. asoc->pr_sctp_cnt = 0;
  930. asoc->total_output_queue_size = 0;
  931. if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
  932. struct in6pcb *inp6;
  933. /* Its a V6 socket */
  934. inp6 = (struct in6pcb *)m;
  935. asoc->ipv6_addr_legal = 1;
  936. /* Now look at the binding flag to see if V4 will be legal */
  937. if (SCTP_IPV6_V6ONLY(inp6) == 0) {
  938. asoc->ipv4_addr_legal = 1;
  939. } else {
  940. /* V4 addresses are NOT legal on the association */
  941. asoc->ipv4_addr_legal = 0;
  942. }
  943. } else {
  944. /* Its a V4 socket, no - V6 */
  945. asoc->ipv4_addr_legal = 1;
  946. asoc->ipv6_addr_legal = 0;
  947. }
  948. asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
  949. asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
  950. asoc->smallest_mtu = m->sctp_frag_point;
  951. asoc->minrto = m->sctp_ep.sctp_minrto;
  952. asoc->maxrto = m->sctp_ep.sctp_maxrto;
  953. asoc->locked_on_sending = NULL;
  954. asoc->stream_locked_on = 0;
  955. asoc->ecn_echo_cnt_onq = 0;
  956. asoc->stream_locked = 0;
  957. asoc->send_sack = 1;
  958. LIST_INIT(&asoc->sctp_restricted_addrs);
  959. TAILQ_INIT(&asoc->nets);
  960. TAILQ_INIT(&asoc->pending_reply_queue);
  961. TAILQ_INIT(&asoc->asconf_ack_sent);
  962. /* Setup to fill the hb random cache at first HB */
  963. asoc->hb_random_idx = 4;
  964. asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
  965. stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
  966. stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
  967. stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
  968. stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
  969. /*
  970. * Now the stream parameters, here we allocate space for all streams
  971. * that we request by default.
  972. */
  973. asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
  974. m->sctp_ep.pre_open_stream_count;
  975. SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
  976. asoc->streamoutcnt * sizeof(struct sctp_stream_out),
  977. SCTP_M_STRMO);
  978. if (asoc->strmout == NULL) {
  979. /* big trouble no memory */
  980. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
  981. return (ENOMEM);
  982. }
  983. for (i = 0; i < asoc->streamoutcnt; i++) {
  984. /*
  985. * inbound side must be set to 0xffff, also NOTE when we get
  986. * the INIT-ACK back (for INIT sender) we MUST reduce the
  987. * count (streamoutcnt) but first check if we sent to any of
  988. * the upper streams that were dropped (if some were). Those
  989. * that were dropped must be notified to the upper layer as
  990. * failed to send.
  991. */
  992. asoc->strmout[i].next_sequence_sent = 0x0;
  993. TAILQ_INIT(&asoc->strmout[i].outqueue);
  994. asoc->strmout[i].stream_no = i;
  995. asoc->strmout[i].last_msg_incomplete = 0;
  996. asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
  997. }
  998. asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
  999. /* Now the mapping array */
  1000. asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
  1001. SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
  1002. SCTP_M_MAP);
  1003. if (asoc->mapping_array == NULL) {
  1004. SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
  1005. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
  1006. return (ENOMEM);
  1007. }
  1008. memset(asoc->mapping_array, 0, asoc->mapping_array_size);
  1009. SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
  1010. SCTP_M_MAP);
  1011. if (asoc->nr_mapping_array == NULL) {
  1012. SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
  1013. SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
  1014. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
  1015. return (ENOMEM);
  1016. }
  1017. memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
  1018. /* Now the init of the other outqueues */
  1019. TAILQ_INIT(&asoc->free_chunks);
  1020. TAILQ_INIT(&asoc->control_send_queue);
  1021. TAILQ_INIT(&asoc->asconf_send_queue);
  1022. TAILQ_INIT(&asoc->send_queue);
  1023. TAILQ_INIT(&asoc->sent_queue);
  1024. TAILQ_INIT(&asoc->reasmqueue);
  1025. TAILQ_INIT(&asoc->resetHead);
  1026. asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
  1027. TAILQ_INIT(&asoc->asconf_queue);
  1028. /* authentication fields */
  1029. asoc->authinfo.random = NULL;
  1030. asoc->authinfo.active_keyid = 0;
  1031. asoc->authinfo.assoc_key = NULL;
  1032. asoc->authinfo.assoc_keyid = 0;
  1033. asoc->authinfo.recv_key = NULL;
  1034. asoc->authinfo.recv_keyid = 0;
  1035. LIST_INIT(&asoc->shared_keys);
  1036. asoc->marked_retrans = 0;
  1037. asoc->port = m->sctp_ep.port;
  1038. asoc->timoinit = 0;
  1039. asoc->timodata = 0;
  1040. asoc->timosack = 0;
  1041. asoc->timoshutdown = 0;
  1042. asoc->timoheartbeat = 0;
  1043. asoc->timocookie = 0;
  1044. asoc->timoshutdownack = 0;
  1045. (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
  1046. asoc->discontinuity_time = asoc->start_time;
  1047. /* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
  1048. * the association is freed.
  1049. */
  1050. return (0);
  1051. }
  1052. void
  1053. sctp_print_mapping_array(struct sctp_association *asoc)
  1054. {
  1055. unsigned int i, limit;
  1056. SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
  1057. asoc->mapping_array_size,
  1058. asoc->mapping_array_base_tsn,
  1059. asoc->cumulative_tsn,
  1060. asoc->highest_tsn_inside_map,
  1061. asoc->highest_tsn_inside_nr_map);
  1062. for (limit = asoc->mapping_array_size; limit > 1; limit--) {
  1063. if (asoc->mapping_array[limit - 1] != 0) {
  1064. break;
  1065. }
  1066. }
  1067. SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
  1068. for (i = 0; i < limit; i++) {
  1069. SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
  1070. }
  1071. if (limit % 16)
  1072. SCTP_PRINTF("\n");
  1073. for (limit = asoc->mapping_array_size; limit > 1; limit--) {
  1074. if (asoc->nr_mapping_array[limit - 1]) {
  1075. break;
  1076. }
  1077. }
  1078. SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
  1079. for (i = 0; i < limit; i++) {
  1080. SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
  1081. }
  1082. if (limit % 16)
  1083. SCTP_PRINTF("\n");
  1084. }
  1085. int
  1086. sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
  1087. {
  1088. /* mapping array needs to grow */
  1089. uint8_t *new_array1, *new_array2;
  1090. uint32_t new_size;
  1091. new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
  1092. SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
  1093. SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
  1094. if ((new_array1 == NULL) || (new_array2 == NULL)) {
  1095. /* can't get more, forget it */
  1096. SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
  1097. if (new_array1) {
  1098. SCTP_FREE(new_array1, SCTP_M_MAP);
  1099. }
  1100. if (new_array2) {
  1101. SCTP_FREE(new_array2, SCTP_M_MAP);
  1102. }
  1103. return (-1);
  1104. }
  1105. memset(new_array1, 0, new_size);
  1106. memset(new_array2, 0, new_size);
  1107. memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
  1108. memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
  1109. SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
  1110. SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
  1111. asoc->mapping_array = new_array1;
  1112. asoc->nr_mapping_array = new_array2;
  1113. asoc->mapping_array_size = new_size;
  1114. return (0);
  1115. }
  1116. static void
  1117. sctp_iterator_work(struct sctp_iterator *it)
  1118. {
  1119. int iteration_count = 0;
  1120. int inp_skip = 0;
  1121. int first_in = 1;
  1122. struct sctp_inpcb *tinp;
  1123. SCTP_INP_INFO_RLOCK();
  1124. SCTP_ITERATOR_LOCK();
  1125. if (it->inp) {
  1126. SCTP_INP_RLOCK(it->inp);
  1127. SCTP_INP_DECR_REF(it->inp);
  1128. }
  1129. if (it->inp == NULL) {
  1130. /* iterator is complete */
  1131. done_with_iterator:
  1132. SCTP_ITERATOR_UNLOCK();
  1133. SCTP_INP_INFO_RUNLOCK();
  1134. if (it->function_atend != NULL) {
  1135. (*it->function_atend) (it->pointer, it->val);
  1136. }
  1137. SCTP_FREE(it, SCTP_M_ITER);
  1138. return;
  1139. }
  1140. select_a_new_ep:
  1141. if (first_in) {
  1142. first_in = 0;
  1143. } else {
  1144. SCTP_INP_RLOCK(it->inp);
  1145. }
  1146. while (((it->pcb_flags) &&
  1147. ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
  1148. ((it->pcb_features) &&
  1149. ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
  1150. /* endpoint flags or features don't match, so keep looking */
  1151. if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
  1152. SCTP_INP_RUNLOCK(it->inp);
  1153. goto done_with_iterator;
  1154. }
  1155. tinp = it->inp;
  1156. it->inp = LIST_NEXT(it->inp, sctp_list);
  1157. SCTP_INP_RUNLOCK(tinp);
  1158. if (it->inp == NULL) {
  1159. goto done_with_iterator;
  1160. }
  1161. SCTP_INP_RLOCK(it->inp);
  1162. }
  1163. /* now go through each assoc which is in the desired state */
  1164. if (it->done_current_ep == 0) {
  1165. if (it->function_inp != NULL)
  1166. inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
  1167. it->done_current_ep = 1;
  1168. }
  1169. if (it->stcb == NULL) {
  1170. /* run the per instance function */
  1171. it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
  1172. }
  1173. if ((inp_skip) || it->stcb == NULL) {
  1174. if (it->function_inp_end != NULL) {
  1175. inp_skip = (*it->function_inp_end)(it->inp,
  1176. it->pointer,
  1177. it->val);
  1178. }
  1179. SCTP_INP_RUNLOCK(it->inp);
  1180. goto no_stcb;
  1181. }
  1182. while (it->stcb) {
  1183. SCTP_TCB_LOCK(it->stcb);
  1184. if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
  1185. /* not in the right state... keep looking */
  1186. SCTP_TCB_UNLOCK(it->stcb);
  1187. goto next_assoc;
  1188. }
  1189. /* see if we have limited out the iterator loop */
  1190. iteration_count++;
  1191. if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
  1192. /* Pause to let others grab the lock */
  1193. atomic_add_int(&it->stcb->asoc.refcnt, 1);
  1194. SCTP_TCB_UNLOCK(it->stcb);
  1195. SCTP_INP_INCR_REF(it->inp);
  1196. SCTP_INP_RUNLOCK(it->inp);
  1197. SCTP_ITERATOR_UNLOCK();
  1198. SCTP_INP_INFO_RUNLOCK();
  1199. SCTP_INP_INFO_RLOCK();
  1200. SCTP_ITERATOR_LOCK();
  1201. if (sctp_it_ctl.iterator_flags) {
  1202. /* We won't be staying here */
  1203. SCTP_INP_DECR_REF(it->inp);
  1204. atomic_add_int(&it->stcb->asoc.refcnt, -1);
  1205. #if !defined(__FreeBSD__)
  1206. if (sctp_it_ctl.iterator_flags &
  1207. SCTP_ITERATOR_MUST_EXIT) {
  1208. goto done_with_iterator;
  1209. }
  1210. #endif
  1211. if (sctp_it_ctl.iterator_flags &
  1212. SCTP_ITERATOR_STOP_CUR_IT) {
  1213. sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
  1214. goto done_with_iterator;
  1215. }
  1216. if (sctp_it_ctl.iterator_flags &
  1217. SCTP_ITERATOR_STOP_CUR_INP) {
  1218. sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
  1219. goto no_stcb;
  1220. }
  1221. /* If we reach here huh? */
  1222. SCTP_PRINTF("Unknown it ctl flag %x\n",
  1223. sctp_it_ctl.iterator_flags);
  1224. sctp_it_ctl.iterator_flags = 0;
  1225. }
  1226. SCTP_INP_RLOCK(it->inp);
  1227. SCTP_INP_DECR_REF(it->inp);
  1228. SCTP_TCB_LOCK(it->stcb);
  1229. atomic_add_int(&it->stcb->asoc.refcnt, -1);
  1230. iteration_count = 0;
  1231. }
  1232. /* run function on this one */
  1233. (*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
  1234. /*
  1235. * we lie here, it really needs to have its own type but
  1236. * first I must verify that this won't effect things :-0
  1237. */
  1238. if (it->no_chunk_output == 0)
  1239. sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
  1240. SCTP_TCB_UNLOCK(it->stcb);
  1241. next_assoc:
  1242. it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
  1243. if (it->stcb == NULL) {
  1244. /* Run last function */
  1245. if (it->function_inp_end != NULL) {
  1246. inp_skip = (*it->function_inp_end)(it->inp,
  1247. it->pointer,
  1248. it->val);
  1249. }
  1250. }
  1251. }
  1252. SCTP_INP_RUNLOCK(it->inp);
  1253. no_stcb:
  1254. /* done with all assocs on this endpoint, move on to next endpoint */
  1255. it->done_current_ep = 0;
  1256. if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
  1257. it->inp = NULL;
  1258. } else {
  1259. it->inp = LIST_NEXT(it->inp, sctp_list);
  1260. }
  1261. if (it->inp == NULL) {
  1262. goto done_with_iterator;
  1263. }
  1264. goto select_a_new_ep;
  1265. }
  1266. void
  1267. sctp_iterator_worker(void)
  1268. {
  1269. struct sctp_iterator *it, *nit;
  1270. /* This function is called with the WQ lock in place */
  1271. sctp_it_ctl.iterator_running = 1;
  1272. TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
  1273. sctp_it_ctl.cur_it = it;
  1274. /* now lets work on this one */
  1275. TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
  1276. SCTP_IPI_ITERATOR_WQ_UNLOCK();
  1277. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1278. CURVNET_SET(it->vn);
  1279. #endif
  1280. sctp_iterator_work(it);
  1281. sctp_it_ctl.cur_it = NULL;
  1282. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1283. CURVNET_RESTORE();
  1284. #endif
  1285. SCTP_IPI_ITERATOR_WQ_LOCK();
  1286. #if !defined(__FreeBSD__)
  1287. if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
  1288. break;
  1289. }
  1290. #endif
  1291. /*sa_ignore FREED_MEMORY*/
  1292. }
  1293. sctp_it_ctl.iterator_running = 0;
  1294. return;
  1295. }
  1296. static void
  1297. sctp_handle_addr_wq(void)
  1298. {
  1299. /* deal with the ADDR wq from the rtsock calls */
  1300. struct sctp_laddr *wi, *nwi;
  1301. struct sctp_asconf_iterator *asc;
  1302. SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
  1303. sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
  1304. if (asc == NULL) {
  1305. /* Try later, no memory */
  1306. sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
  1307. (struct sctp_inpcb *)NULL,
  1308. (struct sctp_tcb *)NULL,
  1309. (struct sctp_nets *)NULL);
  1310. return;
  1311. }
  1312. LIST_INIT(&asc->list_of_work);
  1313. asc->cnt = 0;
  1314. SCTP_WQ_ADDR_LOCK();
  1315. LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
  1316. LIST_REMOVE(wi, sctp_nxt_addr);
  1317. LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
  1318. asc->cnt++;
  1319. }
  1320. SCTP_WQ_ADDR_UNLOCK();
  1321. if (asc->cnt == 0) {
  1322. SCTP_FREE(asc, SCTP_M_ASC_IT);
  1323. } else {
  1324. (void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
  1325. sctp_asconf_iterator_stcb,
  1326. NULL, /* No ep end for boundall */
  1327. SCTP_PCB_FLAGS_BOUNDALL,
  1328. SCTP_PCB_ANY_FEATURES,
  1329. SCTP_ASOC_ANY_STATE,
  1330. (void *)asc, 0,
  1331. sctp_asconf_iterator_end, NULL, 0);
  1332. }
  1333. }
  1334. void
  1335. sctp_timeout_handler(void *t)
  1336. {
  1337. struct sctp_inpcb *inp;
  1338. struct sctp_tcb *stcb;
  1339. struct sctp_nets *net;
  1340. struct sctp_timer *tmr;
  1341. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  1342. struct socket *so;
  1343. #endif
  1344. int did_output, type;
  1345. tmr = (struct sctp_timer *)t;
  1346. inp = (struct sctp_inpcb *)tmr->ep;
  1347. stcb = (struct sctp_tcb *)tmr->tcb;
  1348. net = (struct sctp_nets *)tmr->net;
  1349. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1350. CURVNET_SET((struct vnet *)tmr->vnet);
  1351. #endif
  1352. did_output = 1;
  1353. #ifdef SCTP_AUDITING_ENABLED
  1354. sctp_audit_log(0xF0, (uint8_t) tmr->type);
  1355. sctp_auditing(3, inp, stcb, net);
  1356. #endif
  1357. /* sanity checks... */
  1358. if (tmr->self != (void *)tmr) {
  1359. /*
  1360. * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
  1361. * tmr);
  1362. */
  1363. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1364. CURVNET_RESTORE();
  1365. #endif
  1366. return;
  1367. }
  1368. tmr->stopped_from = 0xa001;
  1369. if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
  1370. /*
  1371. * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
  1372. * tmr->type);
  1373. */
  1374. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1375. CURVNET_RESTORE();
  1376. #endif
  1377. return;
  1378. }
  1379. tmr->stopped_from = 0xa002;
  1380. if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
  1381. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1382. CURVNET_RESTORE();
  1383. #endif
  1384. return;
  1385. }
  1386. /* if this is an iterator timeout, get the struct and clear inp */
  1387. tmr->stopped_from = 0xa003;
  1388. type = tmr->type;
  1389. if (inp) {
  1390. SCTP_INP_INCR_REF(inp);
  1391. if ((inp->sctp_socket == NULL) &&
  1392. ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
  1393. (tmr->type != SCTP_TIMER_TYPE_INIT) &&
  1394. (tmr->type != SCTP_TIMER_TYPE_SEND) &&
  1395. (tmr->type != SCTP_TIMER_TYPE_RECV) &&
  1396. (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
  1397. (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
  1398. (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
  1399. (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
  1400. (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
  1401. ) {
  1402. SCTP_INP_DECR_REF(inp);
  1403. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1404. CURVNET_RESTORE();
  1405. #endif
  1406. return;
  1407. }
  1408. }
  1409. tmr->stopped_from = 0xa004;
  1410. if (stcb) {
  1411. atomic_add_int(&stcb->asoc.refcnt, 1);
  1412. if (stcb->asoc.state == 0) {
  1413. atomic_add_int(&stcb->asoc.refcnt, -1);
  1414. if (inp) {
  1415. SCTP_INP_DECR_REF(inp);
  1416. }
  1417. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1418. CURVNET_RESTORE();
  1419. #endif
  1420. return;
  1421. }
  1422. }
  1423. tmr->stopped_from = 0xa005;
  1424. SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
  1425. if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
  1426. if (inp) {
  1427. SCTP_INP_DECR_REF(inp);
  1428. }
  1429. if (stcb) {
  1430. atomic_add_int(&stcb->asoc.refcnt, -1);
  1431. }
  1432. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1433. CURVNET_RESTORE();
  1434. #endif
  1435. return;
  1436. }
  1437. tmr->stopped_from = 0xa006;
  1438. if (stcb) {
  1439. SCTP_TCB_LOCK(stcb);
  1440. atomic_add_int(&stcb->asoc.refcnt, -1);
  1441. if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
  1442. ((stcb->asoc.state == 0) ||
  1443. (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
  1444. SCTP_TCB_UNLOCK(stcb);
  1445. if (inp) {
  1446. SCTP_INP_DECR_REF(inp);
  1447. }
  1448. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1449. CURVNET_RESTORE();
  1450. #endif
  1451. return;
  1452. }
  1453. }
  1454. /* record in stopped what t-o occured */
  1455. tmr->stopped_from = tmr->type;
  1456. /* mark as being serviced now */
  1457. if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
  1458. /*
  1459. * Callout has been rescheduled.
  1460. */
  1461. goto get_out;
  1462. }
  1463. if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
  1464. /*
  1465. * Not active, so no action.
  1466. */
  1467. goto get_out;
  1468. }
  1469. SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
  1470. /* call the handler for the appropriate timer type */
  1471. switch (tmr->type) {
  1472. case SCTP_TIMER_TYPE_ZERO_COPY:
  1473. if (inp == NULL) {
  1474. break;
  1475. }
  1476. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
  1477. SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
  1478. }
  1479. break;
  1480. case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
  1481. if (inp == NULL) {
  1482. break;
  1483. }
  1484. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
  1485. SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
  1486. }
  1487. break;
  1488. case SCTP_TIMER_TYPE_ADDR_WQ:
  1489. sctp_handle_addr_wq();
  1490. break;
  1491. case SCTP_TIMER_TYPE_SEND:
  1492. if ((stcb == NULL) || (inp == NULL)) {
  1493. break;
  1494. }
  1495. SCTP_STAT_INCR(sctps_timodata);
  1496. stcb->asoc.timodata++;
  1497. stcb->asoc.num_send_timers_up--;
  1498. if (stcb->asoc.num_send_timers_up < 0) {
  1499. stcb->asoc.num_send_timers_up = 0;
  1500. }
  1501. SCTP_TCB_LOCK_ASSERT(stcb);
  1502. if (sctp_t3rxt_timer(inp, stcb, net)) {
  1503. /* no need to unlock on tcb its gone */
  1504. goto out_decr;
  1505. }
  1506. SCTP_TCB_LOCK_ASSERT(stcb);
  1507. #ifdef SCTP_AUDITING_ENABLED
  1508. sctp_auditing(4, inp, stcb, net);
  1509. #endif
  1510. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
  1511. if ((stcb->asoc.num_send_timers_up == 0) &&
  1512. (stcb->asoc.sent_queue_cnt > 0)) {
  1513. struct sctp_tmit_chunk *chk;
  1514. /*
  1515. * safeguard. If there on some on the sent queue
  1516. * somewhere but no timers running something is
  1517. * wrong... so we start a timer on the first chunk
  1518. * on the send queue on whatever net it is sent to.
  1519. */
  1520. chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
  1521. sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
  1522. chk->whoTo);
  1523. }
  1524. break;
  1525. case SCTP_TIMER_TYPE_INIT:
  1526. if ((stcb == NULL) || (inp == NULL)) {
  1527. break;
  1528. }
  1529. SCTP_STAT_INCR(sctps_timoinit);
  1530. stcb->asoc.timoinit++;
  1531. if (sctp_t1init_timer(inp, stcb, net)) {
  1532. /* no need to unlock on tcb its gone */
  1533. goto out_decr;
  1534. }
  1535. /* We do output but not here */
  1536. did_output = 0;
  1537. break;
  1538. case SCTP_TIMER_TYPE_RECV:
  1539. if ((stcb == NULL) || (inp == NULL)) {
  1540. break;
  1541. }
  1542. SCTP_STAT_INCR(sctps_timosack);
  1543. stcb->asoc.timosack++;
  1544. sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
  1545. #ifdef SCTP_AUDITING_ENABLED
  1546. sctp_auditing(4, inp, stcb, net);
  1547. #endif
  1548. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
  1549. break;
  1550. case SCTP_TIMER_TYPE_SHUTDOWN:
  1551. if ((stcb == NULL) || (inp == NULL)) {
  1552. break;
  1553. }
  1554. if (sctp_shutdown_timer(inp, stcb, net)) {
  1555. /* no need to unlock on tcb its gone */
  1556. goto out_decr;
  1557. }
  1558. SCTP_STAT_INCR(sctps_timoshutdown);
  1559. stcb->asoc.timoshutdown++;
  1560. #ifdef SCTP_AUDITING_ENABLED
  1561. sctp_auditing(4, inp, stcb, net);
  1562. #endif
  1563. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
  1564. break;
  1565. case SCTP_TIMER_TYPE_HEARTBEAT:
  1566. if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
  1567. break;
  1568. }
  1569. SCTP_STAT_INCR(sctps_timoheartbeat);
  1570. stcb->asoc.timoheartbeat++;
  1571. if (sctp_heartbeat_timer(inp, stcb, net)) {
  1572. /* no need to unlock on tcb its gone */
  1573. goto out_decr;
  1574. }
  1575. #ifdef SCTP_AUDITING_ENABLED
  1576. sctp_auditing(4, inp, stcb, net);
  1577. #endif
  1578. if (!(net->dest_state & SCTP_ADDR_NOHB)) {
  1579. sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
  1580. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
  1581. }
  1582. break;
  1583. case SCTP_TIMER_TYPE_COOKIE:
  1584. if ((stcb == NULL) || (inp == NULL)) {
  1585. break;
  1586. }
  1587. if (sctp_cookie_timer(inp, stcb, net)) {
  1588. /* no need to unlock on tcb its gone */
  1589. goto out_decr;
  1590. }
  1591. SCTP_STAT_INCR(sctps_timocookie);
  1592. stcb->asoc.timocookie++;
  1593. #ifdef SCTP_AUDITING_ENABLED
  1594. sctp_auditing(4, inp, stcb, net);
  1595. #endif
  1596. /*
  1597. * We consider T3 and Cookie timer pretty much the same with
  1598. * respect to where from in chunk_output.
  1599. */
  1600. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
  1601. break;
  1602. case SCTP_TIMER_TYPE_NEWCOOKIE:
  1603. {
  1604. struct timeval tv;
  1605. int i, secret;
  1606. if (inp == NULL) {
  1607. break;
  1608. }
  1609. SCTP_STAT_INCR(sctps_timosecret);
  1610. (void)SCTP_GETTIME_TIMEVAL(&tv);
  1611. SCTP_INP_WLOCK(inp);
  1612. inp->sctp_ep.time_of_secret_change = tv.tv_sec;
  1613. inp->sctp_ep.last_secret_number =
  1614. inp->sctp_ep.current_secret_number;
  1615. inp->sctp_ep.current_secret_number++;
  1616. if (inp->sctp_ep.current_secret_number >=
  1617. SCTP_HOW_MANY_SECRETS) {
  1618. inp->sctp_ep.current_secret_number = 0;
  1619. }
  1620. secret = (int)inp->sctp_ep.current_secret_number;
  1621. for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
  1622. inp->sctp_ep.secret_key[secret][i] =
  1623. sctp_select_initial_TSN(&inp->sctp_ep);
  1624. }
  1625. SCTP_INP_WUNLOCK(inp);
  1626. sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
  1627. }
  1628. did_output = 0;
  1629. break;
  1630. case SCTP_TIMER_TYPE_PATHMTURAISE:
  1631. if ((stcb == NULL) || (inp == NULL)) {
  1632. break;
  1633. }
  1634. SCTP_STAT_INCR(sctps_timopathmtu);
  1635. sctp_pathmtu_timer(inp, stcb, net);
  1636. did_output = 0;
  1637. break;
  1638. case SCTP_TIMER_TYPE_SHUTDOWNACK:
  1639. if ((stcb == NULL) || (inp == NULL)) {
  1640. break;
  1641. }
  1642. if (sctp_shutdownack_timer(inp, stcb, net)) {
  1643. /* no need to unlock on tcb its gone */
  1644. goto out_decr;
  1645. }
  1646. SCTP_STAT_INCR(sctps_timoshutdownack);
  1647. stcb->asoc.timoshutdownack++;
  1648. #ifdef SCTP_AUDITING_ENABLED
  1649. sctp_auditing(4, inp, stcb, net);
  1650. #endif
  1651. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
  1652. break;
  1653. case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
  1654. if ((stcb == NULL) || (inp == NULL)) {
  1655. break;
  1656. }
  1657. SCTP_STAT_INCR(sctps_timoshutdownguard);
  1658. sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
  1659. /* no need to unlock on tcb its gone */
  1660. goto out_decr;
  1661. case SCTP_TIMER_TYPE_STRRESET:
  1662. if ((stcb == NULL) || (inp == NULL)) {
  1663. break;
  1664. }
  1665. if (sctp_strreset_timer(inp, stcb, net)) {
  1666. /* no need to unlock on tcb its gone */
  1667. goto out_decr;
  1668. }
  1669. SCTP_STAT_INCR(sctps_timostrmrst);
  1670. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
  1671. break;
  1672. case SCTP_TIMER_TYPE_ASCONF:
  1673. if ((stcb == NULL) || (inp == NULL)) {
  1674. break;
  1675. }
  1676. if (sctp_asconf_timer(inp, stcb, net)) {
  1677. /* no need to unlock on tcb its gone */
  1678. goto out_decr;
  1679. }
  1680. SCTP_STAT_INCR(sctps_timoasconf);
  1681. #ifdef SCTP_AUDITING_ENABLED
  1682. sctp_auditing(4, inp, stcb, net);
  1683. #endif
  1684. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
  1685. break;
  1686. case SCTP_TIMER_TYPE_PRIM_DELETED:
  1687. if ((stcb == NULL) || (inp == NULL)) {
  1688. break;
  1689. }
  1690. sctp_delete_prim_timer(inp, stcb, net);
  1691. SCTP_STAT_INCR(sctps_timodelprim);
  1692. break;
  1693. case SCTP_TIMER_TYPE_AUTOCLOSE:
  1694. if ((stcb == NULL) || (inp == NULL)) {
  1695. break;
  1696. }
  1697. SCTP_STAT_INCR(sctps_timoautoclose);
  1698. sctp_autoclose_timer(inp, stcb, net);
  1699. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
  1700. did_output = 0;
  1701. break;
  1702. case SCTP_TIMER_TYPE_ASOCKILL:
  1703. if ((stcb == NULL) || (inp == NULL)) {
  1704. break;
  1705. }
  1706. SCTP_STAT_INCR(sctps_timoassockill);
  1707. /* Can we free it yet? */
  1708. SCTP_INP_DECR_REF(inp);
  1709. sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_1);
  1710. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  1711. so = SCTP_INP_SO(inp);
  1712. atomic_add_int(&stcb->asoc.refcnt, 1);
  1713. SCTP_TCB_UNLOCK(stcb);
  1714. SCTP_SOCKET_LOCK(so, 1);
  1715. SCTP_TCB_LOCK(stcb);
  1716. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  1717. #endif
  1718. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_2);
  1719. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  1720. SCTP_SOCKET_UNLOCK(so, 1);
  1721. #endif
  1722. /*
  1723. * free asoc, always unlocks (or destroy's) so prevent
  1724. * duplicate unlock or unlock of a free mtx :-0
  1725. */
  1726. stcb = NULL;
  1727. goto out_no_decr;
  1728. case SCTP_TIMER_TYPE_INPKILL:
  1729. SCTP_STAT_INCR(sctps_timoinpkill);
  1730. if (inp == NULL) {
  1731. break;
  1732. }
  1733. /*
  1734. * special case, take away our increment since WE are the
  1735. * killer
  1736. */
  1737. SCTP_INP_DECR_REF(inp);
  1738. sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_3);
  1739. #if defined(__APPLE__)
  1740. SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
  1741. #endif
  1742. sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
  1743. SCTP_CALLED_FROM_INPKILL_TIMER);
  1744. #if defined(__APPLE__)
  1745. SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
  1746. #endif
  1747. inp = NULL;
  1748. goto out_no_decr;
  1749. default:
  1750. SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
  1751. tmr->type);
  1752. break;
  1753. }
  1754. #ifdef SCTP_AUDITING_ENABLED
  1755. sctp_audit_log(0xF1, (uint8_t) tmr->type);
  1756. if (inp)
  1757. sctp_auditing(5, inp, stcb, net);
  1758. #endif
  1759. if ((did_output) && stcb) {
  1760. /*
  1761. * Now we need to clean up the control chunk chain if an
  1762. * ECNE is on it. It must be marked as UNSENT again so next
  1763. * call will continue to send it until such time that we get
  1764. * a CWR, to remove it. It is, however, less likely that we
  1765. * will find a ecn echo on the chain though.
  1766. */
  1767. sctp_fix_ecn_echo(&stcb->asoc);
  1768. }
  1769. get_out:
  1770. if (stcb) {
  1771. SCTP_TCB_UNLOCK(stcb);
  1772. }
  1773. out_decr:
  1774. if (inp) {
  1775. SCTP_INP_DECR_REF(inp);
  1776. }
  1777. out_no_decr:
  1778. SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
  1779. type);
  1780. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1781. CURVNET_RESTORE();
  1782. #endif
  1783. }
  1784. void
  1785. sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
  1786. struct sctp_nets *net)
  1787. {
  1788. uint32_t to_ticks;
  1789. struct sctp_timer *tmr;
  1790. if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
  1791. return;
  1792. tmr = NULL;
  1793. if (stcb) {
  1794. SCTP_TCB_LOCK_ASSERT(stcb);
  1795. }
  1796. switch (t_type) {
  1797. case SCTP_TIMER_TYPE_ZERO_COPY:
  1798. tmr = &inp->sctp_ep.zero_copy_timer;
  1799. to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
  1800. break;
  1801. case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
  1802. tmr = &inp->sctp_ep.zero_copy_sendq_timer;
  1803. to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
  1804. break;
  1805. case SCTP_TIMER_TYPE_ADDR_WQ:
  1806. /* Only 1 tick away :-) */
  1807. tmr = &SCTP_BASE_INFO(addr_wq_timer);
  1808. to_ticks = SCTP_ADDRESS_TICK_DELAY;
  1809. break;
  1810. case SCTP_TIMER_TYPE_SEND:
  1811. /* Here we use the RTO timer */
  1812. {
  1813. int rto_val;
  1814. if ((stcb == NULL) || (net == NULL)) {
  1815. return;
  1816. }
  1817. tmr = &net->rxt_timer;
  1818. if (net->RTO == 0) {
  1819. rto_val = stcb->asoc.initial_rto;
  1820. } else {
  1821. rto_val = net->RTO;
  1822. }
  1823. to_ticks = MSEC_TO_TICKS(rto_val);
  1824. }
  1825. break;
  1826. case SCTP_TIMER_TYPE_INIT:
  1827. /*
  1828. * Here we use the INIT timer default usually about 1
  1829. * minute.
  1830. */
  1831. if ((stcb == NULL) || (net == NULL)) {
  1832. return;
  1833. }
  1834. tmr = &net->rxt_timer;
  1835. if (net->RTO == 0) {
  1836. to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  1837. } else {
  1838. to_ticks = MSEC_TO_TICKS(net->RTO);
  1839. }
  1840. break;
  1841. case SCTP_TIMER_TYPE_RECV:
  1842. /*
  1843. * Here we use the Delayed-Ack timer value from the inp
  1844. * ususually about 200ms.
  1845. */
  1846. if (stcb == NULL) {
  1847. return;
  1848. }
  1849. tmr = &stcb->asoc.dack_timer;
  1850. to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
  1851. break;
  1852. case SCTP_TIMER_TYPE_SHUTDOWN:
  1853. /* Here we use the RTO of the destination. */
  1854. if ((stcb == NULL) || (net == NULL)) {
  1855. return;
  1856. }
  1857. if (net->RTO == 0) {
  1858. to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  1859. } else {
  1860. to_ticks = MSEC_TO_TICKS(net->RTO);
  1861. }
  1862. tmr = &net->rxt_timer;
  1863. break;
  1864. case SCTP_TIMER_TYPE_HEARTBEAT:
  1865. /*
  1866. * the net is used here so that we can add in the RTO. Even
  1867. * though we use a different timer. We also add the HB timer
  1868. * PLUS a random jitter.
  1869. */
  1870. if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
  1871. return;
  1872. } else {
  1873. uint32_t rndval;
  1874. uint32_t jitter;
  1875. if ((net->dest_state & SCTP_ADDR_NOHB) &&
  1876. !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
  1877. return;
  1878. }
  1879. if (net->RTO == 0) {
  1880. to_ticks = stcb->asoc.initial_rto;
  1881. } else {
  1882. to_ticks = net->RTO;
  1883. }
  1884. rndval = sctp_select_initial_TSN(&inp->sctp_ep);
  1885. jitter = rndval % to_ticks;
  1886. if (jitter >= (to_ticks >> 1)) {
  1887. to_ticks = to_ticks + (jitter - (to_ticks >> 1));
  1888. } else {
  1889. to_ticks = to_ticks - jitter;
  1890. }
  1891. if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
  1892. !(net->dest_state & SCTP_ADDR_PF)) {
  1893. to_ticks += net->heart_beat_delay;
  1894. }
  1895. /*
  1896. * Now we must convert the to_ticks that are now in
  1897. * ms to ticks.
  1898. */
  1899. to_ticks = MSEC_TO_TICKS(to_ticks);
  1900. tmr = &net->hb_timer;
  1901. }
  1902. break;
  1903. case SCTP_TIMER_TYPE_COOKIE:
  1904. /*
  1905. * Here we can use the RTO timer from the network since one
  1906. * RTT was compelete. If a retran happened then we will be
  1907. * using the RTO initial value.
  1908. */
  1909. if ((stcb == NULL) || (net == NULL)) {
  1910. return;
  1911. }
  1912. if (net->RTO == 0) {
  1913. to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  1914. } else {
  1915. to_ticks = MSEC_TO_TICKS(net->RTO);
  1916. }
  1917. tmr = &net->rxt_timer;
  1918. break;
  1919. case SCTP_TIMER_TYPE_NEWCOOKIE:
  1920. /*
  1921. * nothing needed but the endpoint here ususually about 60
  1922. * minutes.
  1923. */
  1924. if (inp == NULL) {
  1925. return;
  1926. }
  1927. tmr = &inp->sctp_ep.signature_change;
  1928. to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
  1929. break;
  1930. case SCTP_TIMER_TYPE_ASOCKILL:
  1931. if (stcb == NULL) {
  1932. return;
  1933. }
  1934. tmr = &stcb->asoc.strreset_timer;
  1935. to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
  1936. break;
  1937. case SCTP_TIMER_TYPE_INPKILL:
  1938. /*
  1939. * The inp is setup to die. We re-use the signature_chage
  1940. * timer since that has stopped and we are in the GONE
  1941. * state.
  1942. */
  1943. if (inp == NULL) {
  1944. return;
  1945. }
  1946. tmr = &inp->sctp_ep.signature_change;
  1947. to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
  1948. break;
  1949. case SCTP_TIMER_TYPE_PATHMTURAISE:
  1950. /*
  1951. * Here we use the value found in the EP for PMTU ususually
  1952. * about 10 minutes.
  1953. */
  1954. if ((stcb == NULL) || (inp == NULL)) {
  1955. return;
  1956. }
  1957. if (net == NULL) {
  1958. return;
  1959. }
  1960. if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
  1961. return;
  1962. }
  1963. to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
  1964. tmr = &net->pmtu_timer;
  1965. break;
  1966. case SCTP_TIMER_TYPE_SHUTDOWNACK:
  1967. /* Here we use the RTO of the destination */
  1968. if ((stcb == NULL) || (net == NULL)) {
  1969. return;
  1970. }
  1971. if (net->RTO == 0) {
  1972. to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  1973. } else {
  1974. to_ticks = MSEC_TO_TICKS(net->RTO);
  1975. }
  1976. tmr = &net->rxt_timer;
  1977. break;
  1978. case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
  1979. /*
  1980. * Here we use the endpoints shutdown guard timer usually
  1981. * about 3 minutes.
  1982. */
  1983. if ((inp == NULL) || (stcb == NULL)) {
  1984. return;
  1985. }
  1986. to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
  1987. tmr = &stcb->asoc.shut_guard_timer;
  1988. break;
  1989. case SCTP_TIMER_TYPE_STRRESET:
  1990. /*
  1991. * Here the timer comes from the stcb but its value is from
  1992. * the net's RTO.
  1993. */
  1994. if ((stcb == NULL) || (net == NULL)) {
  1995. return;
  1996. }
  1997. if (net->RTO == 0) {
  1998. to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  1999. } else {
  2000. to_ticks = MSEC_TO_TICKS(net->RTO);
  2001. }
  2002. tmr = &stcb->asoc.strreset_timer;
  2003. break;
  2004. case SCTP_TIMER_TYPE_ASCONF:
  2005. /*
  2006. * Here the timer comes from the stcb but its value is from
  2007. * the net's RTO.
  2008. */
  2009. if ((stcb == NULL) || (net == NULL)) {
  2010. return;
  2011. }
  2012. if (net->RTO == 0) {
  2013. to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  2014. } else {
  2015. to_ticks = MSEC_TO_TICKS(net->RTO);
  2016. }
  2017. tmr = &stcb->asoc.asconf_timer;
  2018. break;
  2019. case SCTP_TIMER_TYPE_PRIM_DELETED:
  2020. if ((stcb == NULL) || (net != NULL)) {
  2021. return;
  2022. }
  2023. to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  2024. tmr = &stcb->asoc.delete_prim_timer;
  2025. break;
  2026. case SCTP_TIMER_TYPE_AUTOCLOSE:
  2027. if (stcb == NULL) {
  2028. return;
  2029. }
  2030. if (stcb->asoc.sctp_autoclose_ticks == 0) {
  2031. /*
  2032. * Really an error since stcb is NOT set to
  2033. * autoclose
  2034. */
  2035. return;
  2036. }
  2037. to_ticks = stcb->asoc.sctp_autoclose_ticks;
  2038. tmr = &stcb->asoc.autoclose_timer;
  2039. break;
  2040. default:
  2041. SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
  2042. __FUNCTION__, t_type);
  2043. return;
  2044. break;
  2045. }
  2046. if ((to_ticks <= 0) || (tmr == NULL)) {
  2047. SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
  2048. __FUNCTION__, t_type, to_ticks, tmr);
  2049. return;
  2050. }
  2051. if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
  2052. /*
  2053. * we do NOT allow you to have it already running. if it is
  2054. * we leave the current one up unchanged
  2055. */
  2056. return;
  2057. }
  2058. /* At this point we can proceed */
  2059. if (t_type == SCTP_TIMER_TYPE_SEND) {
  2060. stcb->asoc.num_send_timers_up++;
  2061. }
  2062. tmr->stopped_from = 0;
  2063. tmr->type = t_type;
  2064. tmr->ep = (void *)inp;
  2065. tmr->tcb = (void *)stcb;
  2066. tmr->net = (void *)net;
  2067. tmr->self = (void *)tmr;
  2068. #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
  2069. tmr->vnet = (void *)curvnet;
  2070. #endif
  2071. #ifndef __Panda__
  2072. tmr->ticks = sctp_get_tick_count();
  2073. #endif
  2074. (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
  2075. return;
  2076. }
  2077. void
  2078. sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
  2079. struct sctp_nets *net, uint32_t from)
  2080. {
  2081. struct sctp_timer *tmr;
  2082. if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
  2083. (inp == NULL))
  2084. return;
  2085. tmr = NULL;
  2086. if (stcb) {
  2087. SCTP_TCB_LOCK_ASSERT(stcb);
  2088. }
  2089. switch (t_type) {
  2090. case SCTP_TIMER_TYPE_ZERO_COPY:
  2091. tmr = &inp->sctp_ep.zero_copy_timer;
  2092. break;
  2093. case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
  2094. tmr = &inp->sctp_ep.zero_copy_sendq_timer;
  2095. break;
  2096. case SCTP_TIMER_TYPE_ADDR_WQ:
  2097. tmr = &SCTP_BASE_INFO(addr_wq_timer);
  2098. break;
  2099. case SCTP_TIMER_TYPE_SEND:
  2100. if ((stcb == NULL) || (net == NULL)) {
  2101. return;
  2102. }
  2103. tmr = &net->rxt_timer;
  2104. break;
  2105. case SCTP_TIMER_TYPE_INIT:
  2106. if ((stcb == NULL) || (net == NULL)) {
  2107. return;
  2108. }
  2109. tmr = &net->rxt_timer;
  2110. break;
  2111. case SCTP_TIMER_TYPE_RECV:
  2112. if (stcb == NULL) {
  2113. return;
  2114. }
  2115. tmr = &stcb->asoc.dack_timer;
  2116. break;
  2117. case SCTP_TIMER_TYPE_SHUTDOWN:
  2118. if ((stcb == NULL) || (net == NULL)) {
  2119. return;
  2120. }
  2121. tmr = &net->rxt_timer;
  2122. break;
  2123. case SCTP_TIMER_TYPE_HEARTBEAT:
  2124. if ((stcb == NULL) || (net == NULL)) {
  2125. return;
  2126. }
  2127. tmr = &net->hb_timer;
  2128. break;
  2129. case SCTP_TIMER_TYPE_COOKIE:
  2130. if ((stcb == NULL) || (net == NULL)) {
  2131. return;
  2132. }
  2133. tmr = &net->rxt_timer;
  2134. break;
  2135. case SCTP_TIMER_TYPE_NEWCOOKIE:
  2136. /* nothing needed but the endpoint here */
  2137. tmr = &inp->sctp_ep.signature_change;
  2138. /*
  2139. * We re-use the newcookie timer for the INP kill timer. We
  2140. * must assure that we do not kill it by accident.
  2141. */
  2142. break;
  2143. case SCTP_TIMER_TYPE_ASOCKILL:
  2144. /*
  2145. * Stop the asoc kill timer.
  2146. */
  2147. if (stcb == NULL) {
  2148. return;
  2149. }
  2150. tmr = &stcb->asoc.strreset_timer;
  2151. break;
  2152. case SCTP_TIMER_TYPE_INPKILL:
  2153. /*
  2154. * The inp is setup to die. We re-use the signature_chage
  2155. * timer since that has stopped and we are in the GONE
  2156. * state.
  2157. */
  2158. tmr = &inp->sctp_ep.signature_change;
  2159. break;
  2160. case SCTP_TIMER_TYPE_PATHMTURAISE:
  2161. if ((stcb == NULL) || (net == NULL)) {
  2162. return;
  2163. }
  2164. tmr = &net->pmtu_timer;
  2165. break;
  2166. case SCTP_TIMER_TYPE_SHUTDOWNACK:
  2167. if ((stcb == NULL) || (net == NULL)) {
  2168. return;
  2169. }
  2170. tmr = &net->rxt_timer;
  2171. break;
  2172. case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
  2173. if (stcb == NULL) {
  2174. return;
  2175. }
  2176. tmr = &stcb->asoc.shut_guard_timer;
  2177. break;
  2178. case SCTP_TIMER_TYPE_STRRESET:
  2179. if (stcb == NULL) {
  2180. return;
  2181. }
  2182. tmr = &stcb->asoc.strreset_timer;
  2183. break;
  2184. case SCTP_TIMER_TYPE_ASCONF:
  2185. if (stcb == NULL) {
  2186. return;
  2187. }
  2188. tmr = &stcb->asoc.asconf_timer;
  2189. break;
  2190. case SCTP_TIMER_TYPE_PRIM_DELETED:
  2191. if (stcb == NULL) {
  2192. return;
  2193. }
  2194. tmr = &stcb->asoc.delete_prim_timer;
  2195. break;
  2196. case SCTP_TIMER_TYPE_AUTOCLOSE:
  2197. if (stcb == NULL) {
  2198. return;
  2199. }
  2200. tmr = &stcb->asoc.autoclose_timer;
  2201. break;
  2202. default:
  2203. SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
  2204. __FUNCTION__, t_type);
  2205. break;
  2206. }
  2207. if (tmr == NULL) {
  2208. return;
  2209. }
  2210. if ((tmr->type != t_type) && tmr->type) {
  2211. /*
  2212. * Ok we have a timer that is under joint use. Cookie timer
  2213. * per chance with the SEND timer. We therefore are NOT
  2214. * running the timer that the caller wants stopped. So just
  2215. * return.
  2216. */
  2217. return;
  2218. }
  2219. if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
  2220. stcb->asoc.num_send_timers_up--;
  2221. if (stcb->asoc.num_send_timers_up < 0) {
  2222. stcb->asoc.num_send_timers_up = 0;
  2223. }
  2224. }
  2225. tmr->self = NULL;
  2226. tmr->stopped_from = from;
  2227. (void)SCTP_OS_TIMER_STOP(&tmr->timer);
  2228. return;
  2229. }
  2230. uint32_t
  2231. sctp_calculate_len(struct mbuf *m)
  2232. {
  2233. uint32_t tlen = 0;
  2234. struct mbuf *at;
  2235. at = m;
  2236. while (at) {
  2237. tlen += SCTP_BUF_LEN(at);
  2238. at = SCTP_BUF_NEXT(at);
  2239. }
  2240. return (tlen);
  2241. }
  2242. void
  2243. sctp_mtu_size_reset(struct sctp_inpcb *inp,
  2244. struct sctp_association *asoc, uint32_t mtu)
  2245. {
  2246. /*
  2247. * Reset the P-MTU size on this association, this involves changing
  2248. * the asoc MTU, going through ANY chunk+overhead larger than mtu to
  2249. * allow the DF flag to be cleared.
  2250. */
  2251. struct sctp_tmit_chunk *chk;
  2252. unsigned int eff_mtu, ovh;
  2253. asoc->smallest_mtu = mtu;
  2254. if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
  2255. ovh = SCTP_MIN_OVERHEAD;
  2256. } else {
  2257. ovh = SCTP_MIN_V4_OVERHEAD;
  2258. }
  2259. eff_mtu = mtu - ovh;
  2260. TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
  2261. if (chk->send_size > eff_mtu) {
  2262. chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
  2263. }
  2264. }
  2265. TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
  2266. if (chk->send_size > eff_mtu) {
  2267. chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
  2268. }
  2269. }
  2270. }
  2271. /*
  2272. * given an association and starting time of the current RTT period return
  2273. * RTO in number of msecs net should point to the current network
  2274. */
  2275. uint32_t
  2276. sctp_calculate_rto(struct sctp_tcb *stcb,
  2277. struct sctp_association *asoc,
  2278. struct sctp_nets *net,
  2279. struct timeval *told,
  2280. int safe, int rtt_from_sack)
  2281. {
  2282. /*-
  2283. * given an association and the starting time of the current RTT
  2284. * period (in value1/value2) return RTO in number of msecs.
  2285. */
  2286. int32_t rtt; /* RTT in ms */
  2287. uint32_t new_rto;
  2288. int first_measure = 0;
  2289. struct timeval now, then, *old;
  2290. /* Copy it out for sparc64 */
  2291. if (safe == sctp_align_unsafe_makecopy) {
  2292. old = &then;
  2293. memcpy(&then, told, sizeof(struct timeval));
  2294. } else if (safe == sctp_align_safe_nocopy) {
  2295. old = told;
  2296. } else {
  2297. /* error */
  2298. SCTP_PRINTF("Huh, bad rto calc call\n");
  2299. return (0);
  2300. }
  2301. /************************/
  2302. /* 1. calculate new RTT */
  2303. /************************/
  2304. /* get the current time */
  2305. if (stcb->asoc.use_precise_time) {
  2306. (void)SCTP_GETPTIME_TIMEVAL(&now);
  2307. } else {
  2308. (void)SCTP_GETTIME_TIMEVAL(&now);
  2309. }
  2310. timevalsub(&now, old);
  2311. /* store the current RTT in us */
  2312. net->rtt = (uint64_t)10000000 * (uint64_t)now.tv_sec +
  2313. (uint64_t)now.tv_usec;
  2314. /* computer rtt in ms */
  2315. rtt = net->rtt / 1000;
  2316. if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
  2317. /* Tell the CC module that a new update has just occurred from a sack */
  2318. (*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
  2319. }
  2320. /* Do we need to determine the lan? We do this only
  2321. * on sacks i.e. RTT being determined from data not
  2322. * non-data (HB/INIT->INITACK).
  2323. */
  2324. if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
  2325. (net->lan_type == SCTP_LAN_UNKNOWN)) {
  2326. if (net->rtt > SCTP_LOCAL_LAN_RTT) {
  2327. net->lan_type = SCTP_LAN_INTERNET;
  2328. } else {
  2329. net->lan_type = SCTP_LAN_LOCAL;
  2330. }
  2331. }
  2332. /***************************/
  2333. /* 2. update RTTVAR & SRTT */
  2334. /***************************/
  2335. /*-
  2336. * Compute the scaled average lastsa and the
  2337. * scaled variance lastsv as described in van Jacobson
  2338. * Paper "Congestion Avoidance and Control", Annex A.
  2339. *
  2340. * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
  2341. * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
  2342. */
  2343. if (net->RTO_measured) {
  2344. rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
  2345. net->lastsa += rtt;
  2346. if (rtt < 0) {
  2347. rtt = -rtt;
  2348. }
  2349. rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
  2350. net->lastsv += rtt;
  2351. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
  2352. rto_logging(net, SCTP_LOG_RTTVAR);
  2353. }
  2354. } else {
  2355. /* First RTO measurment */
  2356. net->RTO_measured = 1;
  2357. first_measure = 1;
  2358. net->lastsa = rtt << SCTP_RTT_SHIFT;
  2359. net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
  2360. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
  2361. rto_logging(net, SCTP_LOG_INITIAL_RTT);
  2362. }
  2363. }
  2364. if (net->lastsv == 0) {
  2365. net->lastsv = SCTP_CLOCK_GRANULARITY;
  2366. }
  2367. new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
  2368. if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
  2369. (stcb->asoc.sat_network_lockout == 0)) {
  2370. stcb->asoc.sat_network = 1;
  2371. } else if ((!first_measure) && stcb->asoc.sat_network) {
  2372. stcb->asoc.sat_network = 0;
  2373. stcb->asoc.sat_network_lockout = 1;
  2374. }
  2375. /* bound it, per C6/C7 in Section 5.3.1 */
  2376. if (new_rto < stcb->asoc.minrto) {
  2377. new_rto = stcb->asoc.minrto;
  2378. }
  2379. if (new_rto > stcb->asoc.maxrto) {
  2380. new_rto = stcb->asoc.maxrto;
  2381. }
  2382. /* we are now returning the RTO */
  2383. return (new_rto);
  2384. }
  2385. /*
  2386. * return a pointer to a contiguous piece of data from the given mbuf chain
  2387. * starting at 'off' for 'len' bytes. If the desired piece spans more than
  2388. * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
  2389. * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
  2390. */
  2391. caddr_t
  2392. sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
  2393. {
  2394. uint32_t count;
  2395. uint8_t *ptr;
  2396. ptr = in_ptr;
  2397. if ((off < 0) || (len <= 0))
  2398. return (NULL);
  2399. /* find the desired start location */
  2400. while ((m != NULL) && (off > 0)) {
  2401. if (off < SCTP_BUF_LEN(m))
  2402. break;
  2403. off -= SCTP_BUF_LEN(m);
  2404. m = SCTP_BUF_NEXT(m);
  2405. }
  2406. if (m == NULL)
  2407. return (NULL);
  2408. /* is the current mbuf large enough (eg. contiguous)? */
  2409. if ((SCTP_BUF_LEN(m) - off) >= len) {
  2410. return (mtod(m, caddr_t) + off);
  2411. } else {
  2412. /* else, it spans more than one mbuf, so save a temp copy... */
  2413. while ((m != NULL) && (len > 0)) {
  2414. count = min(SCTP_BUF_LEN(m) - off, len);
  2415. bcopy(mtod(m, caddr_t) + off, ptr, count);
  2416. len -= count;
  2417. ptr += count;
  2418. off = 0;
  2419. m = SCTP_BUF_NEXT(m);
  2420. }
  2421. if ((m == NULL) && (len > 0))
  2422. return (NULL);
  2423. else
  2424. return ((caddr_t)in_ptr);
  2425. }
  2426. }
  2427. struct sctp_paramhdr *
  2428. sctp_get_next_param(struct mbuf *m,
  2429. int offset,
  2430. struct sctp_paramhdr *pull,
  2431. int pull_limit)
  2432. {
  2433. /* This just provides a typed signature to Peter's Pull routine */
  2434. return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
  2435. (uint8_t *) pull));
  2436. }
  2437. int
  2438. sctp_add_pad_tombuf(struct mbuf *m, int padlen)
  2439. {
  2440. /*
  2441. * add padlen bytes of 0 filled padding to the end of the mbuf. If
  2442. * padlen is > 3 this routine will fail.
  2443. */
  2444. uint8_t *dp;
  2445. int i;
  2446. if (padlen > 3) {
  2447. SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
  2448. return (ENOBUFS);
  2449. }
  2450. if (padlen <= M_TRAILINGSPACE(m)) {
  2451. /*
  2452. * The easy way. We hope the majority of the time we hit
  2453. * here :)
  2454. */
  2455. dp = (uint8_t *) (mtod(m, caddr_t) + SCTP_BUF_LEN(m));
  2456. SCTP_BUF_LEN(m) += padlen;
  2457. } else {
  2458. /* Hard way we must grow the mbuf */
  2459. struct mbuf *tmp;
  2460. tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
  2461. if (tmp == NULL) {
  2462. /* Out of space GAK! we are in big trouble. */
  2463. SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
  2464. return (ENOBUFS);
  2465. }
  2466. /* setup and insert in middle */
  2467. SCTP_BUF_LEN(tmp) = padlen;
  2468. SCTP_BUF_NEXT(tmp) = NULL;
  2469. SCTP_BUF_NEXT(m) = tmp;
  2470. dp = mtod(tmp, uint8_t *);
  2471. }
  2472. /* zero out the pad */
  2473. for (i = 0; i < padlen; i++) {
  2474. *dp = 0;
  2475. dp++;
  2476. }
  2477. return (0);
  2478. }
  2479. int
  2480. sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
  2481. {
  2482. /* find the last mbuf in chain and pad it */
  2483. struct mbuf *m_at;
  2484. m_at = m;
  2485. if (last_mbuf) {
  2486. return (sctp_add_pad_tombuf(last_mbuf, padval));
  2487. } else {
  2488. while (m_at) {
  2489. if (SCTP_BUF_NEXT(m_at) == NULL) {
  2490. return (sctp_add_pad_tombuf(m_at, padval));
  2491. }
  2492. m_at = SCTP_BUF_NEXT(m_at);
  2493. }
  2494. }
  2495. SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
  2496. return (EFAULT);
  2497. }
  2498. static void
  2499. sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
  2500. uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
  2501. #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  2502. SCTP_UNUSED
  2503. #endif
  2504. )
  2505. {
  2506. struct mbuf *m_notify;
  2507. struct sctp_assoc_change *sac;
  2508. struct sctp_queued_to_read *control;
  2509. size_t notif_len, abort_len;
  2510. unsigned int i;
  2511. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  2512. struct socket *so;
  2513. #endif
  2514. if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
  2515. notif_len = sizeof(struct sctp_assoc_change);
  2516. if (abort != NULL) {
  2517. abort_len = htons(abort->ch.chunk_length);
  2518. } else {
  2519. abort_len = 0;
  2520. }
  2521. if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
  2522. notif_len += SCTP_ASSOC_SUPPORTS_MAX;
  2523. } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
  2524. notif_len += abort_len;
  2525. }
  2526. m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
  2527. if (m_notify == NULL) {
  2528. /* Retry with smaller value. */
  2529. notif_len = sizeof(struct sctp_assoc_change);
  2530. m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
  2531. if (m_notify == NULL) {
  2532. goto set_error;
  2533. }
  2534. }
  2535. SCTP_BUF_NEXT(m_notify) = NULL;
  2536. sac = mtod(m_notify, struct sctp_assoc_change *);
  2537. sac->sac_type = SCTP_ASSOC_CHANGE;
  2538. sac->sac_flags = 0;
  2539. sac->sac_length = sizeof(struct sctp_assoc_change);
  2540. sac->sac_state = state;
  2541. sac->sac_error = error;
  2542. /* XXX verify these stream counts */
  2543. sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
  2544. sac->sac_inbound_streams = stcb->asoc.streamincnt;
  2545. sac->sac_assoc_id = sctp_get_associd(stcb);
  2546. if (notif_len > sizeof(struct sctp_assoc_change)) {
  2547. if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
  2548. i = 0;
  2549. if (stcb->asoc.peer_supports_prsctp) {
  2550. sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
  2551. }
  2552. if (stcb->asoc.peer_supports_auth) {
  2553. sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
  2554. }
  2555. if (stcb->asoc.peer_supports_asconf) {
  2556. sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
  2557. }
  2558. sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
  2559. if (stcb->asoc.peer_supports_strreset) {
  2560. sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
  2561. }
  2562. sac->sac_length += i;
  2563. } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
  2564. memcpy(sac->sac_info, abort, abort_len);
  2565. sac->sac_length += abort_len;
  2566. }
  2567. }
  2568. SCTP_BUF_LEN(m_notify) = sac->sac_length;
  2569. control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  2570. 0, 0, stcb->asoc.context, 0, 0, 0,
  2571. m_notify);
  2572. if (control != NULL) {
  2573. control->length = SCTP_BUF_LEN(m_notify);
  2574. /* not that we need this */
  2575. control->tail_mbuf = m_notify;
  2576. control->spec_flags = M_NOTIFICATION;
  2577. sctp_add_to_readq(stcb->sctp_ep, stcb,
  2578. control,
  2579. &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
  2580. so_locked);
  2581. } else {
  2582. sctp_m_freem(m_notify);
  2583. }
  2584. }
  2585. /*
  2586. * For 1-to-1 style sockets, we send up and error when an ABORT
  2587. * comes in.
  2588. */
  2589. set_error:
  2590. if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  2591. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
  2592. ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
  2593. if (from_peer) {
  2594. if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
  2595. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
  2596. stcb->sctp_socket->so_error = ECONNREFUSED;
  2597. } else {
  2598. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
  2599. stcb->sctp_socket->so_error = ECONNRESET;
  2600. }
  2601. } else {
  2602. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
  2603. stcb->sctp_socket->so_error = ECONNABORTED;
  2604. }
  2605. }
  2606. /* Wake ANY sleepers */
  2607. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  2608. so = SCTP_INP_SO(stcb->sctp_ep);
  2609. if (!so_locked) {
  2610. atomic_add_int(&stcb->asoc.refcnt, 1);
  2611. SCTP_TCB_UNLOCK(stcb);
  2612. SCTP_SOCKET_LOCK(so, 1);
  2613. SCTP_TCB_LOCK(stcb);
  2614. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  2615. if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  2616. SCTP_SOCKET_UNLOCK(so, 1);
  2617. return;
  2618. }
  2619. }
  2620. #endif
  2621. if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  2622. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
  2623. ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
  2624. socantrcvmore(stcb->sctp_socket);
  2625. }
  2626. sorwakeup(stcb->sctp_socket);
  2627. sowwakeup(stcb->sctp_socket);
  2628. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  2629. if (!so_locked) {
  2630. SCTP_SOCKET_UNLOCK(so, 1);
  2631. }
  2632. #endif
  2633. }
  2634. static void
  2635. sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
  2636. struct sockaddr *sa, uint32_t error)
  2637. {
  2638. struct mbuf *m_notify;
  2639. struct sctp_paddr_change *spc;
  2640. struct sctp_queued_to_read *control;
  2641. if ((stcb == NULL) ||
  2642. sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
  2643. /* event not enabled */
  2644. return;
  2645. }
  2646. m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
  2647. if (m_notify == NULL)
  2648. return;
  2649. SCTP_BUF_LEN(m_notify) = 0;
  2650. spc = mtod(m_notify, struct sctp_paddr_change *);
  2651. spc->spc_type = SCTP_PEER_ADDR_CHANGE;
  2652. spc->spc_flags = 0;
  2653. spc->spc_length = sizeof(struct sctp_paddr_change);
  2654. switch (sa->sa_family) {
  2655. #ifdef INET
  2656. case AF_INET:
  2657. memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
  2658. break;
  2659. #endif
  2660. #ifdef INET6
  2661. case AF_INET6:
  2662. {
  2663. #ifdef SCTP_EMBEDDED_V6_SCOPE
  2664. struct sockaddr_in6 *sin6;
  2665. #endif /* SCTP_EMBEDDED_V6_SCOPE */
  2666. memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
  2667. #ifdef SCTP_EMBEDDED_V6_SCOPE
  2668. sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
  2669. if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
  2670. if (sin6->sin6_scope_id == 0) {
  2671. /* recover scope_id for user */
  2672. #ifdef SCTP_KAME
  2673. (void)sa6_recoverscope(sin6);
  2674. #else
  2675. (void)in6_recoverscope(sin6, &sin6->sin6_addr,
  2676. NULL);
  2677. #endif
  2678. } else {
  2679. /* clear embedded scope_id for user */
  2680. in6_clearscope(&sin6->sin6_addr);
  2681. }
  2682. }
  2683. #endif /* SCTP_EMBEDDED_V6_SCOPE */
  2684. break;
  2685. }
  2686. #endif
  2687. default:
  2688. /* TSNH */
  2689. break;
  2690. }
  2691. spc->spc_state = state;
  2692. spc->spc_error = error;
  2693. spc->spc_assoc_id = sctp_get_associd(stcb);
  2694. SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
  2695. SCTP_BUF_NEXT(m_notify) = NULL;
  2696. /* append to socket */
  2697. control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  2698. 0, 0, stcb->asoc.context, 0, 0, 0,
  2699. m_notify);
  2700. if (control == NULL) {
  2701. /* no memory */
  2702. sctp_m_freem(m_notify);
  2703. return;
  2704. }
  2705. control->length = SCTP_BUF_LEN(m_notify);
  2706. control->spec_flags = M_NOTIFICATION;
  2707. /* not that we need this */
  2708. control->tail_mbuf = m_notify;
  2709. sctp_add_to_readq(stcb->sctp_ep, stcb,
  2710. control,
  2711. &stcb->sctp_socket->so_rcv, 1,
  2712. SCTP_READ_LOCK_NOT_HELD,
  2713. SCTP_SO_NOT_LOCKED);
  2714. }
  2715. static void
  2716. sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
  2717. struct sctp_tmit_chunk *chk, int so_locked
  2718. #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  2719. SCTP_UNUSED
  2720. #endif
  2721. )
  2722. {
  2723. struct mbuf *m_notify;
  2724. struct sctp_send_failed *ssf;
  2725. struct sctp_send_failed_event *ssfe;
  2726. struct sctp_queued_to_read *control;
  2727. int length;
  2728. if ((stcb == NULL) ||
  2729. (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
  2730. sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
  2731. /* event not enabled */
  2732. return;
  2733. }
  2734. if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
  2735. length = sizeof(struct sctp_send_failed_event);
  2736. } else {
  2737. length = sizeof(struct sctp_send_failed);
  2738. }
  2739. m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
  2740. if (m_notify == NULL)
  2741. /* no space left */
  2742. return;
  2743. length += chk->send_size;
  2744. length -= sizeof(struct sctp_data_chunk);
  2745. SCTP_BUF_LEN(m_notify) = 0;
  2746. if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
  2747. ssfe = mtod(m_notify, struct sctp_send_failed_event *);
  2748. ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
  2749. if (sent) {
  2750. ssfe->ssfe_flags = SCTP_DATA_SENT;
  2751. } else {
  2752. ssfe->ssfe_flags = SCTP_DATA_UNSENT;
  2753. }
  2754. ssfe->ssfe_length = length;
  2755. ssfe->ssfe_error = error;
  2756. /* not exactly what the user sent in, but should be close :) */
  2757. bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
  2758. ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
  2759. ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
  2760. ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
  2761. ssfe->ssfe_info.snd_context = chk->rec.data.context;
  2762. ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
  2763. ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
  2764. SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
  2765. } else {
  2766. ssf = mtod(m_notify, struct sctp_send_failed *);
  2767. ssf->ssf_type = SCTP_SEND_FAILED;
  2768. if (sent) {
  2769. ssf->ssf_flags = SCTP_DATA_SENT;
  2770. } else {
  2771. ssf->ssf_flags = SCTP_DATA_UNSENT;
  2772. }
  2773. ssf->ssf_length = length;
  2774. ssf->ssf_error = error;
  2775. /* not exactly what the user sent in, but should be close :) */
  2776. bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
  2777. ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
  2778. ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
  2779. ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
  2780. ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
  2781. ssf->ssf_info.sinfo_context = chk->rec.data.context;
  2782. ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
  2783. ssf->ssf_assoc_id = sctp_get_associd(stcb);
  2784. SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
  2785. }
  2786. if (chk->data) {
  2787. /*
  2788. * trim off the sctp chunk header(it should
  2789. * be there)
  2790. */
  2791. if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
  2792. m_adj(chk->data, sizeof(struct sctp_data_chunk));
  2793. sctp_mbuf_crush(chk->data);
  2794. chk->send_size -= sizeof(struct sctp_data_chunk);
  2795. }
  2796. }
  2797. SCTP_BUF_NEXT(m_notify) = chk->data;
  2798. /* Steal off the mbuf */
  2799. chk->data = NULL;
  2800. /*
  2801. * For this case, we check the actual socket buffer, since the assoc
  2802. * is going away we don't want to overfill the socket buffer for a
  2803. * non-reader
  2804. */
  2805. if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
  2806. sctp_m_freem(m_notify);
  2807. return;
  2808. }
  2809. /* append to socket */
  2810. control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  2811. 0, 0, stcb->asoc.context, 0, 0, 0,
  2812. m_notify);
  2813. if (control == NULL) {
  2814. /* no memory */
  2815. sctp_m_freem(m_notify);
  2816. return;
  2817. }
  2818. control->spec_flags = M_NOTIFICATION;
  2819. sctp_add_to_readq(stcb->sctp_ep, stcb,
  2820. control,
  2821. &stcb->sctp_socket->so_rcv, 1,
  2822. SCTP_READ_LOCK_NOT_HELD,
  2823. so_locked);
  2824. }
  2825. static void
  2826. sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
  2827. struct sctp_stream_queue_pending *sp, int so_locked
  2828. #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  2829. SCTP_UNUSED
  2830. #endif
  2831. )
  2832. {
  2833. struct mbuf *m_notify;
  2834. struct sctp_send_failed *ssf;
  2835. struct sctp_send_failed_event *ssfe;
  2836. struct sctp_queued_to_read *control;
  2837. int length;
  2838. if ((stcb == NULL) ||
  2839. (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
  2840. sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
  2841. /* event not enabled */
  2842. return;
  2843. }
  2844. if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
  2845. length = sizeof(struct sctp_send_failed_event);
  2846. } else {
  2847. length = sizeof(struct sctp_send_failed);
  2848. }
  2849. m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
  2850. if (m_notify == NULL) {
  2851. /* no space left */
  2852. return;
  2853. }
  2854. length += sp->length;
  2855. SCTP_BUF_LEN(m_notify) = 0;
  2856. if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
  2857. ssfe = mtod(m_notify, struct sctp_send_failed_event *);
  2858. ssfe->ssfe_type = SCTP_SEND_FAILED;
  2859. ssfe->ssfe_flags = SCTP_DATA_UNSENT;
  2860. ssfe->ssfe_length = length;
  2861. ssfe->ssfe_error = error;
  2862. /* not exactly what the user sent in, but should be close :) */
  2863. bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
  2864. ssfe->ssfe_info.snd_sid = sp->stream;
  2865. if (sp->some_taken) {
  2866. ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
  2867. } else {
  2868. ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
  2869. }
  2870. ssfe->ssfe_info.snd_ppid = sp->ppid;
  2871. ssfe->ssfe_info.snd_context = sp->context;
  2872. ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
  2873. ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
  2874. SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
  2875. } else {
  2876. ssf = mtod(m_notify, struct sctp_send_failed *);
  2877. ssf->ssf_type = SCTP_SEND_FAILED;
  2878. ssf->ssf_flags = SCTP_DATA_UNSENT;
  2879. ssf->ssf_length = length;
  2880. ssf->ssf_error = error;
  2881. /* not exactly what the user sent in, but should be close :) */
  2882. bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
  2883. ssf->ssf_info.sinfo_stream = sp->stream;
  2884. ssf->ssf_info.sinfo_ssn = sp->strseq;
  2885. if (sp->some_taken) {
  2886. ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
  2887. } else {
  2888. ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
  2889. }
  2890. ssf->ssf_info.sinfo_ppid = sp->ppid;
  2891. ssf->ssf_info.sinfo_context = sp->context;
  2892. ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
  2893. ssf->ssf_assoc_id = sctp_get_associd(stcb);
  2894. SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
  2895. }
  2896. SCTP_BUF_NEXT(m_notify) = sp->data;
  2897. /* Steal off the mbuf */
  2898. sp->data = NULL;
  2899. /*
  2900. * For this case, we check the actual socket buffer, since the assoc
  2901. * is going away we don't want to overfill the socket buffer for a
  2902. * non-reader
  2903. */
  2904. if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
  2905. sctp_m_freem(m_notify);
  2906. return;
  2907. }
  2908. /* append to socket */
  2909. control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  2910. 0, 0, stcb->asoc.context, 0, 0, 0,
  2911. m_notify);
  2912. if (control == NULL) {
  2913. /* no memory */
  2914. sctp_m_freem(m_notify);
  2915. return;
  2916. }
  2917. control->spec_flags = M_NOTIFICATION;
  2918. sctp_add_to_readq(stcb->sctp_ep, stcb,
  2919. control,
  2920. &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
  2921. }
  2922. static void
  2923. sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
  2924. {
  2925. struct mbuf *m_notify;
  2926. struct sctp_adaptation_event *sai;
  2927. struct sctp_queued_to_read *control;
  2928. if ((stcb == NULL) ||
  2929. sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
  2930. /* event not enabled */
  2931. return;
  2932. }
  2933. m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
  2934. if (m_notify == NULL)
  2935. /* no space left */
  2936. return;
  2937. SCTP_BUF_LEN(m_notify) = 0;
  2938. sai = mtod(m_notify, struct sctp_adaptation_event *);
  2939. sai->sai_type = SCTP_ADAPTATION_INDICATION;
  2940. sai->sai_flags = 0;
  2941. sai->sai_length = sizeof(struct sctp_adaptation_event);
  2942. sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
  2943. sai->sai_assoc_id = sctp_get_associd(stcb);
  2944. SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
  2945. SCTP_BUF_NEXT(m_notify) = NULL;
  2946. /* append to socket */
  2947. control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  2948. 0, 0, stcb->asoc.context, 0, 0, 0,
  2949. m_notify);
  2950. if (control == NULL) {
  2951. /* no memory */
  2952. sctp_m_freem(m_notify);
  2953. return;
  2954. }
  2955. control->length = SCTP_BUF_LEN(m_notify);
  2956. control->spec_flags = M_NOTIFICATION;
  2957. /* not that we need this */
  2958. control->tail_mbuf = m_notify;
  2959. sctp_add_to_readq(stcb->sctp_ep, stcb,
  2960. control,
  2961. &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  2962. }
  2963. /* This always must be called with the read-queue LOCKED in the INP */
  2964. static void
  2965. sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
  2966. uint32_t val, int so_locked
  2967. #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  2968. SCTP_UNUSED
  2969. #endif
  2970. )
  2971. {
  2972. struct mbuf *m_notify;
  2973. struct sctp_pdapi_event *pdapi;
  2974. struct sctp_queued_to_read *control;
  2975. struct sockbuf *sb;
  2976. if ((stcb == NULL) ||
  2977. sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
  2978. /* event not enabled */
  2979. return;
  2980. }
  2981. if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
  2982. return;
  2983. }
  2984. m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
  2985. if (m_notify == NULL)
  2986. /* no space left */
  2987. return;
  2988. SCTP_BUF_LEN(m_notify) = 0;
  2989. pdapi = mtod(m_notify, struct sctp_pdapi_event *);
  2990. pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
  2991. pdapi->pdapi_flags = 0;
  2992. pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
  2993. pdapi->pdapi_indication = error;
  2994. pdapi->pdapi_stream = (val >> 16);
  2995. pdapi->pdapi_seq = (val & 0x0000ffff);
  2996. pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
  2997. SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
  2998. SCTP_BUF_NEXT(m_notify) = NULL;
  2999. control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3000. 0, 0, stcb->asoc.context, 0, 0, 0,
  3001. m_notify);
  3002. if (control == NULL) {
  3003. /* no memory */
  3004. sctp_m_freem(m_notify);
  3005. return;
  3006. }
  3007. control->spec_flags = M_NOTIFICATION;
  3008. control->length = SCTP_BUF_LEN(m_notify);
  3009. /* not that we need this */
  3010. control->tail_mbuf = m_notify;
  3011. control->held_length = 0;
  3012. control->length = 0;
  3013. sb = &stcb->sctp_socket->so_rcv;
  3014. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  3015. sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
  3016. }
  3017. sctp_sballoc(stcb, sb, m_notify);
  3018. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  3019. sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  3020. }
  3021. atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
  3022. control->end_added = 1;
  3023. if (stcb->asoc.control_pdapi)
  3024. TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
  3025. else {
  3026. /* we really should not see this case */
  3027. TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
  3028. }
  3029. if (stcb->sctp_ep && stcb->sctp_socket) {
  3030. /* This should always be the case */
  3031. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3032. struct socket *so;
  3033. so = SCTP_INP_SO(stcb->sctp_ep);
  3034. if (!so_locked) {
  3035. atomic_add_int(&stcb->asoc.refcnt, 1);
  3036. SCTP_TCB_UNLOCK(stcb);
  3037. SCTP_SOCKET_LOCK(so, 1);
  3038. SCTP_TCB_LOCK(stcb);
  3039. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  3040. if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  3041. SCTP_SOCKET_UNLOCK(so, 1);
  3042. return;
  3043. }
  3044. }
  3045. #endif
  3046. sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
  3047. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3048. if (!so_locked) {
  3049. SCTP_SOCKET_UNLOCK(so, 1);
  3050. }
  3051. #endif
  3052. }
  3053. }
  3054. static void
  3055. sctp_notify_shutdown_event(struct sctp_tcb *stcb)
  3056. {
  3057. struct mbuf *m_notify;
  3058. struct sctp_shutdown_event *sse;
  3059. struct sctp_queued_to_read *control;
  3060. /*
  3061. * For TCP model AND UDP connected sockets we will send an error up
  3062. * when an SHUTDOWN completes
  3063. */
  3064. if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  3065. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
  3066. /* mark socket closed for read/write and wakeup! */
  3067. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3068. struct socket *so;
  3069. so = SCTP_INP_SO(stcb->sctp_ep);
  3070. atomic_add_int(&stcb->asoc.refcnt, 1);
  3071. SCTP_TCB_UNLOCK(stcb);
  3072. SCTP_SOCKET_LOCK(so, 1);
  3073. SCTP_TCB_LOCK(stcb);
  3074. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  3075. if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  3076. SCTP_SOCKET_UNLOCK(so, 1);
  3077. return;
  3078. }
  3079. #endif
  3080. socantsendmore(stcb->sctp_socket);
  3081. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3082. SCTP_SOCKET_UNLOCK(so, 1);
  3083. #endif
  3084. }
  3085. if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
  3086. /* event not enabled */
  3087. return;
  3088. }
  3089. m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
  3090. if (m_notify == NULL)
  3091. /* no space left */
  3092. return;
  3093. sse = mtod(m_notify, struct sctp_shutdown_event *);
  3094. sse->sse_type = SCTP_SHUTDOWN_EVENT;
  3095. sse->sse_flags = 0;
  3096. sse->sse_length = sizeof(struct sctp_shutdown_event);
  3097. sse->sse_assoc_id = sctp_get_associd(stcb);
  3098. SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
  3099. SCTP_BUF_NEXT(m_notify) = NULL;
  3100. /* append to socket */
  3101. control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3102. 0, 0, stcb->asoc.context, 0, 0, 0,
  3103. m_notify);
  3104. if (control == NULL) {
  3105. /* no memory */
  3106. sctp_m_freem(m_notify);
  3107. return;
  3108. }
  3109. control->spec_flags = M_NOTIFICATION;
  3110. control->length = SCTP_BUF_LEN(m_notify);
  3111. /* not that we need this */
  3112. control->tail_mbuf = m_notify;
  3113. sctp_add_to_readq(stcb->sctp_ep, stcb,
  3114. control,
  3115. &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  3116. }
  3117. static void
  3118. sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
  3119. int so_locked
  3120. #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  3121. SCTP_UNUSED
  3122. #endif
  3123. )
  3124. {
  3125. struct mbuf *m_notify;
  3126. struct sctp_sender_dry_event *event;
  3127. struct sctp_queued_to_read *control;
  3128. if ((stcb == NULL) ||
  3129. sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
  3130. /* event not enabled */
  3131. return;
  3132. }
  3133. m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
  3134. if (m_notify == NULL) {
  3135. /* no space left */
  3136. return;
  3137. }
  3138. SCTP_BUF_LEN(m_notify) = 0;
  3139. event = mtod(m_notify, struct sctp_sender_dry_event *);
  3140. event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
  3141. event->sender_dry_flags = 0;
  3142. event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
  3143. event->sender_dry_assoc_id = sctp_get_associd(stcb);
  3144. SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
  3145. SCTP_BUF_NEXT(m_notify) = NULL;
  3146. /* append to socket */
  3147. control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3148. 0, 0, stcb->asoc.context, 0, 0, 0,
  3149. m_notify);
  3150. if (control == NULL) {
  3151. /* no memory */
  3152. sctp_m_freem(m_notify);
  3153. return;
  3154. }
  3155. control->length = SCTP_BUF_LEN(m_notify);
  3156. control->spec_flags = M_NOTIFICATION;
  3157. /* not that we need this */
  3158. control->tail_mbuf = m_notify;
  3159. sctp_add_to_readq(stcb->sctp_ep, stcb, control,
  3160. &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
  3161. }
  3162. void
  3163. sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
  3164. {
  3165. struct mbuf *m_notify;
  3166. struct sctp_queued_to_read *control;
  3167. struct sctp_stream_change_event *stradd;
  3168. int len;
  3169. if ((stcb == NULL) ||
  3170. (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
  3171. /* event not enabled */
  3172. return;
  3173. }
  3174. if ((stcb->asoc.peer_req_out) && flag) {
  3175. /* Peer made the request, don't tell the local user */
  3176. stcb->asoc.peer_req_out = 0;
  3177. return;
  3178. }
  3179. stcb->asoc.peer_req_out = 0;
  3180. m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
  3181. if (m_notify == NULL)
  3182. /* no space left */
  3183. return;
  3184. SCTP_BUF_LEN(m_notify) = 0;
  3185. len = sizeof(struct sctp_stream_change_event);
  3186. if (len > M_TRAILINGSPACE(m_notify)) {
  3187. /* never enough room */
  3188. sctp_m_freem(m_notify);
  3189. return;
  3190. }
  3191. stradd = mtod(m_notify, struct sctp_stream_change_event *);
  3192. stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
  3193. stradd->strchange_flags = flag;
  3194. stradd->strchange_length = len;
  3195. stradd->strchange_assoc_id = sctp_get_associd(stcb);
  3196. stradd->strchange_instrms = numberin;
  3197. stradd->strchange_outstrms = numberout;
  3198. SCTP_BUF_LEN(m_notify) = len;
  3199. SCTP_BUF_NEXT(m_notify) = NULL;
  3200. if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
  3201. /* no space */
  3202. sctp_m_freem(m_notify);
  3203. return;
  3204. }
  3205. /* append to socket */
  3206. control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3207. 0, 0, stcb->asoc.context, 0, 0, 0,
  3208. m_notify);
  3209. if (control == NULL) {
  3210. /* no memory */
  3211. sctp_m_freem(m_notify);
  3212. return;
  3213. }
  3214. control->spec_flags = M_NOTIFICATION;
  3215. control->length = SCTP_BUF_LEN(m_notify);
  3216. /* not that we need this */
  3217. control->tail_mbuf = m_notify;
  3218. sctp_add_to_readq(stcb->sctp_ep, stcb,
  3219. control,
  3220. &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  3221. }
  3222. void
  3223. sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
  3224. {
  3225. struct mbuf *m_notify;
  3226. struct sctp_queued_to_read *control;
  3227. struct sctp_assoc_reset_event *strasoc;
  3228. int len;
  3229. if ((stcb == NULL) ||
  3230. (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
  3231. /* event not enabled */
  3232. return;
  3233. }
  3234. m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
  3235. if (m_notify == NULL)
  3236. /* no space left */
  3237. return;
  3238. SCTP_BUF_LEN(m_notify) = 0;
  3239. len = sizeof(struct sctp_assoc_reset_event);
  3240. if (len > M_TRAILINGSPACE(m_notify)) {
  3241. /* never enough room */
  3242. sctp_m_freem(m_notify);
  3243. return;
  3244. }
  3245. strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
  3246. strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
  3247. strasoc->assocreset_flags = flag;
  3248. strasoc->assocreset_length = len;
  3249. strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
  3250. strasoc->assocreset_local_tsn = sending_tsn;
  3251. strasoc->assocreset_remote_tsn = recv_tsn;
  3252. SCTP_BUF_LEN(m_notify) = len;
  3253. SCTP_BUF_NEXT(m_notify) = NULL;
  3254. if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
  3255. /* no space */
  3256. sctp_m_freem(m_notify);
  3257. return;
  3258. }
  3259. /* append to socket */
  3260. control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3261. 0, 0, stcb->asoc.context, 0, 0, 0,
  3262. m_notify);
  3263. if (control == NULL) {
  3264. /* no memory */
  3265. sctp_m_freem(m_notify);
  3266. return;
  3267. }
  3268. control->spec_flags = M_NOTIFICATION;
  3269. control->length = SCTP_BUF_LEN(m_notify);
  3270. /* not that we need this */
  3271. control->tail_mbuf = m_notify;
  3272. sctp_add_to_readq(stcb->sctp_ep, stcb,
  3273. control,
  3274. &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  3275. }
  3276. static void
  3277. sctp_notify_stream_reset(struct sctp_tcb *stcb,
  3278. int number_entries, uint16_t * list, int flag)
  3279. {
  3280. struct mbuf *m_notify;
  3281. struct sctp_queued_to_read *control;
  3282. struct sctp_stream_reset_event *strreset;
  3283. int len;
  3284. if ((stcb == NULL) ||
  3285. (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
  3286. /* event not enabled */
  3287. return;
  3288. }
  3289. m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
  3290. if (m_notify == NULL)
  3291. /* no space left */
  3292. return;
  3293. SCTP_BUF_LEN(m_notify) = 0;
  3294. len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
  3295. if (len > M_TRAILINGSPACE(m_notify)) {
  3296. /* never enough room */
  3297. sctp_m_freem(m_notify);
  3298. return;
  3299. }
  3300. strreset = mtod(m_notify, struct sctp_stream_reset_event *);
  3301. strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
  3302. strreset->strreset_flags = flag;
  3303. strreset->strreset_length = len;
  3304. strreset->strreset_assoc_id = sctp_get_associd(stcb);
  3305. if (number_entries) {
  3306. int i;
  3307. for (i = 0; i < number_entries; i++) {
  3308. strreset->strreset_stream_list[i] = ntohs(list[i]);
  3309. }
  3310. }
  3311. SCTP_BUF_LEN(m_notify) = len;
  3312. SCTP_BUF_NEXT(m_notify) = NULL;
  3313. if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
  3314. /* no space */
  3315. sctp_m_freem(m_notify);
  3316. return;
  3317. }
  3318. /* append to socket */
  3319. control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3320. 0, 0, stcb->asoc.context, 0, 0, 0,
  3321. m_notify);
  3322. if (control == NULL) {
  3323. /* no memory */
  3324. sctp_m_freem(m_notify);
  3325. return;
  3326. }
  3327. control->spec_flags = M_NOTIFICATION;
  3328. control->length = SCTP_BUF_LEN(m_notify);
  3329. /* not that we need this */
  3330. control->tail_mbuf = m_notify;
  3331. sctp_add_to_readq(stcb->sctp_ep, stcb,
  3332. control,
  3333. &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  3334. }
  3335. static void
  3336. sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
  3337. {
  3338. struct mbuf *m_notify;
  3339. struct sctp_remote_error *sre;
  3340. struct sctp_queued_to_read *control;
  3341. size_t notif_len, chunk_len;
  3342. if ((stcb == NULL) ||
  3343. sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
  3344. return;
  3345. }
  3346. if (chunk != NULL) {
  3347. chunk_len = htons(chunk->ch.chunk_length);
  3348. } else {
  3349. chunk_len = 0;
  3350. }
  3351. notif_len = sizeof(struct sctp_remote_error) + chunk_len;
  3352. m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
  3353. if (m_notify == NULL) {
  3354. /* Retry with smaller value. */
  3355. notif_len = sizeof(struct sctp_remote_error);
  3356. m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
  3357. if (m_notify == NULL) {
  3358. return;
  3359. }
  3360. }
  3361. SCTP_BUF_NEXT(m_notify) = NULL;
  3362. sre = mtod(m_notify, struct sctp_remote_error *);
  3363. sre->sre_type = SCTP_REMOTE_ERROR;
  3364. sre->sre_flags = 0;
  3365. sre->sre_length = sizeof(struct sctp_remote_error);
  3366. sre->sre_error = error;
  3367. sre->sre_assoc_id = sctp_get_associd(stcb);
  3368. if (notif_len > sizeof(struct sctp_remote_error)) {
  3369. memcpy(sre->sre_data, chunk, chunk_len);
  3370. sre->sre_length += chunk_len;
  3371. }
  3372. SCTP_BUF_LEN(m_notify) = sre->sre_length;
  3373. control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3374. 0, 0, stcb->asoc.context, 0, 0, 0,
  3375. m_notify);
  3376. if (control != NULL) {
  3377. control->length = SCTP_BUF_LEN(m_notify);
  3378. /* not that we need this */
  3379. control->tail_mbuf = m_notify;
  3380. control->spec_flags = M_NOTIFICATION;
  3381. sctp_add_to_readq(stcb->sctp_ep, stcb,
  3382. control,
  3383. &stcb->sctp_socket->so_rcv, 1,
  3384. SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  3385. } else {
  3386. sctp_m_freem(m_notify);
  3387. }
  3388. }
  3389. void
  3390. sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
  3391. uint32_t error, void *data, int so_locked
  3392. #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  3393. SCTP_UNUSED
  3394. #endif
  3395. )
  3396. {
  3397. if ((stcb == NULL) ||
  3398. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
  3399. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
  3400. (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
  3401. /* If the socket is gone we are out of here */
  3402. return;
  3403. }
  3404. #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
  3405. if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
  3406. #else
  3407. if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
  3408. #endif
  3409. return;
  3410. }
  3411. #if defined(__APPLE__)
  3412. if (so_locked) {
  3413. sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
  3414. } else {
  3415. sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
  3416. }
  3417. #endif
  3418. if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
  3419. (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
  3420. if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
  3421. (notification == SCTP_NOTIFY_INTERFACE_UP) ||
  3422. (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
  3423. /* Don't report these in front states */
  3424. return;
  3425. }
  3426. }
  3427. switch (notification) {
  3428. case SCTP_NOTIFY_ASSOC_UP:
  3429. if (stcb->asoc.assoc_up_sent == 0) {
  3430. sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
  3431. stcb->asoc.assoc_up_sent = 1;
  3432. }
  3433. if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
  3434. sctp_notify_adaptation_layer(stcb);
  3435. }
  3436. if (stcb->asoc.peer_supports_auth == 0) {
  3437. sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
  3438. NULL, so_locked);
  3439. }
  3440. break;
  3441. case SCTP_NOTIFY_ASSOC_DOWN:
  3442. sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
  3443. #if defined (__Userspace__)
  3444. if (stcb->sctp_ep->recv_callback) {
  3445. if (stcb->sctp_socket) {
  3446. union sctp_sockstore addr;
  3447. struct sctp_rcvinfo rcv;
  3448. memset(&addr, 0, sizeof(union sctp_sockstore));
  3449. memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
  3450. atomic_add_int(&stcb->asoc.refcnt, 1);
  3451. SCTP_TCB_UNLOCK(stcb);
  3452. stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
  3453. SCTP_TCB_LOCK(stcb);
  3454. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  3455. }
  3456. }
  3457. #endif
  3458. break;
  3459. case SCTP_NOTIFY_INTERFACE_DOWN:
  3460. {
  3461. struct sctp_nets *net;
  3462. net = (struct sctp_nets *)data;
  3463. sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
  3464. (struct sockaddr *)&net->ro._l_addr, error);
  3465. break;
  3466. }
  3467. case SCTP_NOTIFY_INTERFACE_UP:
  3468. {
  3469. struct sctp_nets *net;
  3470. net = (struct sctp_nets *)data;
  3471. sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
  3472. (struct sockaddr *)&net->ro._l_addr, error);
  3473. break;
  3474. }
  3475. case SCTP_NOTIFY_INTERFACE_CONFIRMED:
  3476. {
  3477. struct sctp_nets *net;
  3478. net = (struct sctp_nets *)data;
  3479. sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
  3480. (struct sockaddr *)&net->ro._l_addr, error);
  3481. break;
  3482. }
  3483. case SCTP_NOTIFY_SPECIAL_SP_FAIL:
  3484. sctp_notify_send_failed2(stcb, error,
  3485. (struct sctp_stream_queue_pending *)data, so_locked);
  3486. break;
  3487. case SCTP_NOTIFY_SENT_DG_FAIL:
  3488. sctp_notify_send_failed(stcb, 1, error,
  3489. (struct sctp_tmit_chunk *)data, so_locked);
  3490. break;
  3491. case SCTP_NOTIFY_UNSENT_DG_FAIL:
  3492. sctp_notify_send_failed(stcb, 0, error,
  3493. (struct sctp_tmit_chunk *)data, so_locked);
  3494. break;
  3495. case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
  3496. {
  3497. uint32_t val;
  3498. val = *((uint32_t *)data);
  3499. sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
  3500. break;
  3501. }
  3502. case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
  3503. if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
  3504. ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
  3505. sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
  3506. } else {
  3507. sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
  3508. }
  3509. break;
  3510. case SCTP_NOTIFY_ASSOC_REM_ABORTED:
  3511. if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
  3512. ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
  3513. sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
  3514. } else {
  3515. sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
  3516. }
  3517. break;
  3518. case SCTP_NOTIFY_ASSOC_RESTART:
  3519. sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
  3520. if (stcb->asoc.peer_supports_auth == 0) {
  3521. sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
  3522. NULL, so_locked);
  3523. }
  3524. break;
  3525. case SCTP_NOTIFY_STR_RESET_SEND:
  3526. sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
  3527. break;
  3528. case SCTP_NOTIFY_STR_RESET_RECV:
  3529. sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
  3530. break;
  3531. case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
  3532. sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
  3533. (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
  3534. break;
  3535. case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
  3536. sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
  3537. (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
  3538. break;
  3539. case SCTP_NOTIFY_STR_RESET_FAILED_IN:
  3540. sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
  3541. (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
  3542. break;
  3543. case SCTP_NOTIFY_STR_RESET_DENIED_IN:
  3544. sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
  3545. (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
  3546. break;
  3547. case SCTP_NOTIFY_ASCONF_ADD_IP:
  3548. sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
  3549. error);
  3550. break;
  3551. case SCTP_NOTIFY_ASCONF_DELETE_IP:
  3552. sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
  3553. error);
  3554. break;
  3555. case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
  3556. sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
  3557. error);
  3558. break;
  3559. case SCTP_NOTIFY_PEER_SHUTDOWN:
  3560. sctp_notify_shutdown_event(stcb);
  3561. break;
  3562. case SCTP_NOTIFY_AUTH_NEW_KEY:
  3563. sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
  3564. (uint16_t)(uintptr_t)data,
  3565. so_locked);
  3566. break;
  3567. case SCTP_NOTIFY_AUTH_FREE_KEY:
  3568. sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
  3569. (uint16_t)(uintptr_t)data,
  3570. so_locked);
  3571. break;
  3572. case SCTP_NOTIFY_NO_PEER_AUTH:
  3573. sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
  3574. (uint16_t)(uintptr_t)data,
  3575. so_locked);
  3576. break;
  3577. case SCTP_NOTIFY_SENDER_DRY:
  3578. sctp_notify_sender_dry_event(stcb, so_locked);
  3579. break;
  3580. case SCTP_NOTIFY_REMOTE_ERROR:
  3581. sctp_notify_remote_error(stcb, error, data);
  3582. break;
  3583. default:
  3584. SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
  3585. __FUNCTION__, notification, notification);
  3586. break;
  3587. } /* end switch */
  3588. }
  3589. void
  3590. sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
  3591. #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  3592. SCTP_UNUSED
  3593. #endif
  3594. )
  3595. {
  3596. struct sctp_association *asoc;
  3597. struct sctp_stream_out *outs;
  3598. struct sctp_tmit_chunk *chk, *nchk;
  3599. struct sctp_stream_queue_pending *sp, *nsp;
  3600. int i;
  3601. if (stcb == NULL) {
  3602. return;
  3603. }
  3604. asoc = &stcb->asoc;
  3605. if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
  3606. /* already being freed */
  3607. return;
  3608. }
  3609. #if defined(__APPLE__)
  3610. if (so_locked) {
  3611. sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
  3612. } else {
  3613. sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
  3614. }
  3615. #endif
  3616. if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
  3617. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
  3618. (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
  3619. return;
  3620. }
  3621. /* now through all the gunk freeing chunks */
  3622. if (holds_lock == 0) {
  3623. SCTP_TCB_SEND_LOCK(stcb);
  3624. }
  3625. /* sent queue SHOULD be empty */
  3626. TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
  3627. TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
  3628. asoc->sent_queue_cnt--;
  3629. if (chk->data != NULL) {
  3630. sctp_free_bufspace(stcb, asoc, chk, 1);
  3631. sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
  3632. error, chk, so_locked);
  3633. if (chk->data) {
  3634. sctp_m_freem(chk->data);
  3635. chk->data = NULL;
  3636. }
  3637. }
  3638. sctp_free_a_chunk(stcb, chk, so_locked);
  3639. /*sa_ignore FREED_MEMORY*/
  3640. }
  3641. /* pending send queue SHOULD be empty */
  3642. TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
  3643. TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
  3644. asoc->send_queue_cnt--;
  3645. if (chk->data != NULL) {
  3646. sctp_free_bufspace(stcb, asoc, chk, 1);
  3647. sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
  3648. error, chk, so_locked);
  3649. if (chk->data) {
  3650. sctp_m_freem(chk->data);
  3651. chk->data = NULL;
  3652. }
  3653. }
  3654. sctp_free_a_chunk(stcb, chk, so_locked);
  3655. /*sa_ignore FREED_MEMORY*/
  3656. }
  3657. for (i = 0; i < asoc->streamoutcnt; i++) {
  3658. /* For each stream */
  3659. outs = &asoc->strmout[i];
  3660. /* clean up any sends there */
  3661. asoc->locked_on_sending = NULL;
  3662. TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
  3663. asoc->stream_queue_cnt--;
  3664. TAILQ_REMOVE(&outs->outqueue, sp, next);
  3665. sctp_free_spbufspace(stcb, asoc, sp);
  3666. if (sp->data) {
  3667. sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
  3668. error, (void *)sp, so_locked);
  3669. if (sp->data) {
  3670. sctp_m_freem(sp->data);
  3671. sp->data = NULL;
  3672. }
  3673. }
  3674. if (sp->net) {
  3675. sctp_free_remote_addr(sp->net);
  3676. sp->net = NULL;
  3677. }
  3678. /* Free the chunk */
  3679. sctp_free_a_strmoq(stcb, sp, so_locked);
  3680. /*sa_ignore FREED_MEMORY*/
  3681. }
  3682. }
  3683. if (holds_lock == 0) {
  3684. SCTP_TCB_SEND_UNLOCK(stcb);
  3685. }
  3686. }
  3687. void
  3688. sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
  3689. struct sctp_abort_chunk *abort, int so_locked
  3690. #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  3691. SCTP_UNUSED
  3692. #endif
  3693. )
  3694. {
  3695. if (stcb == NULL) {
  3696. return;
  3697. }
  3698. #if defined(__APPLE__)
  3699. if (so_locked) {
  3700. sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
  3701. } else {
  3702. sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
  3703. }
  3704. #endif
  3705. if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
  3706. ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
  3707. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
  3708. stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
  3709. }
  3710. if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
  3711. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
  3712. (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
  3713. return;
  3714. }
  3715. /* Tell them we lost the asoc */
  3716. sctp_report_all_outbound(stcb, error, 1, so_locked);
  3717. if (from_peer) {
  3718. sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
  3719. } else {
  3720. sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
  3721. }
  3722. }
  3723. void
  3724. sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
  3725. struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
  3726. uint32_t vrf_id, uint16_t port)
  3727. {
  3728. uint32_t vtag;
  3729. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3730. struct socket *so;
  3731. #endif
  3732. vtag = 0;
  3733. if (stcb != NULL) {
  3734. /* We have a TCB to abort, send notification too */
  3735. vtag = stcb->asoc.peer_vtag;
  3736. sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
  3737. /* get the assoc vrf id and table id */
  3738. vrf_id = stcb->asoc.vrf_id;
  3739. stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
  3740. }
  3741. sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
  3742. if (stcb != NULL) {
  3743. /* Ok, now lets free it */
  3744. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3745. so = SCTP_INP_SO(inp);
  3746. atomic_add_int(&stcb->asoc.refcnt, 1);
  3747. SCTP_TCB_UNLOCK(stcb);
  3748. SCTP_SOCKET_LOCK(so, 1);
  3749. SCTP_TCB_LOCK(stcb);
  3750. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  3751. #endif
  3752. SCTP_STAT_INCR_COUNTER32(sctps_aborted);
  3753. if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
  3754. (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
  3755. SCTP_STAT_DECR_GAUGE32(sctps_currestab);
  3756. }
  3757. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_4);
  3758. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3759. SCTP_SOCKET_UNLOCK(so, 1);
  3760. #endif
  3761. }
  3762. }
  3763. #ifdef SCTP_ASOCLOG_OF_TSNS
  3764. void
  3765. sctp_print_out_track_log(struct sctp_tcb *stcb)
  3766. {
  3767. #ifdef NOSIY_PRINTS
  3768. int i;
  3769. SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
  3770. SCTP_PRINTF("IN bound TSN log-aaa\n");
  3771. if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
  3772. SCTP_PRINTF("None rcvd\n");
  3773. goto none_in;
  3774. }
  3775. if (stcb->asoc.tsn_in_wrapped) {
  3776. for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
  3777. SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
  3778. stcb->asoc.in_tsnlog[i].tsn,
  3779. stcb->asoc.in_tsnlog[i].strm,
  3780. stcb->asoc.in_tsnlog[i].seq,
  3781. stcb->asoc.in_tsnlog[i].flgs,
  3782. stcb->asoc.in_tsnlog[i].sz);
  3783. }
  3784. }
  3785. if (stcb->asoc.tsn_in_at) {
  3786. for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
  3787. SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
  3788. stcb->asoc.in_tsnlog[i].tsn,
  3789. stcb->asoc.in_tsnlog[i].strm,
  3790. stcb->asoc.in_tsnlog[i].seq,
  3791. stcb->asoc.in_tsnlog[i].flgs,
  3792. stcb->asoc.in_tsnlog[i].sz);
  3793. }
  3794. }
  3795. none_in:
  3796. SCTP_PRINTF("OUT bound TSN log-aaa\n");
  3797. if ((stcb->asoc.tsn_out_at == 0) &&
  3798. (stcb->asoc.tsn_out_wrapped == 0)) {
  3799. SCTP_PRINTF("None sent\n");
  3800. }
  3801. if (stcb->asoc.tsn_out_wrapped) {
  3802. for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
  3803. SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
  3804. stcb->asoc.out_tsnlog[i].tsn,
  3805. stcb->asoc.out_tsnlog[i].strm,
  3806. stcb->asoc.out_tsnlog[i].seq,
  3807. stcb->asoc.out_tsnlog[i].flgs,
  3808. stcb->asoc.out_tsnlog[i].sz);
  3809. }
  3810. }
  3811. if (stcb->asoc.tsn_out_at) {
  3812. for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
  3813. SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
  3814. stcb->asoc.out_tsnlog[i].tsn,
  3815. stcb->asoc.out_tsnlog[i].strm,
  3816. stcb->asoc.out_tsnlog[i].seq,
  3817. stcb->asoc.out_tsnlog[i].flgs,
  3818. stcb->asoc.out_tsnlog[i].sz);
  3819. }
  3820. }
  3821. #endif
  3822. }
  3823. #endif
  3824. void
  3825. sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
  3826. struct mbuf *op_err,
  3827. int so_locked
  3828. #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  3829. SCTP_UNUSED
  3830. #endif
  3831. )
  3832. {
  3833. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3834. struct socket *so;
  3835. #endif
  3836. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3837. so = SCTP_INP_SO(inp);
  3838. #endif
  3839. #if defined(__APPLE__)
  3840. if (so_locked) {
  3841. sctp_lock_assert(SCTP_INP_SO(inp));
  3842. } else {
  3843. sctp_unlock_assert(SCTP_INP_SO(inp));
  3844. }
  3845. #endif
  3846. if (stcb == NULL) {
  3847. /* Got to have a TCB */
  3848. if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  3849. if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
  3850. #if defined(__APPLE__)
  3851. if (!so_locked) {
  3852. SCTP_SOCKET_LOCK(so, 1);
  3853. }
  3854. #endif
  3855. sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
  3856. SCTP_CALLED_DIRECTLY_NOCMPSET);
  3857. #if defined(__APPLE__)
  3858. if (!so_locked) {
  3859. SCTP_SOCKET_UNLOCK(so, 1);
  3860. }
  3861. #endif
  3862. }
  3863. }
  3864. return;
  3865. } else {
  3866. stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
  3867. }
  3868. /* notify the ulp */
  3869. if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
  3870. sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
  3871. }
  3872. /* notify the peer */
  3873. sctp_send_abort_tcb(stcb, op_err, so_locked);
  3874. SCTP_STAT_INCR_COUNTER32(sctps_aborted);
  3875. if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
  3876. (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
  3877. SCTP_STAT_DECR_GAUGE32(sctps_currestab);
  3878. }
  3879. /* now free the asoc */
  3880. #ifdef SCTP_ASOCLOG_OF_TSNS
  3881. sctp_print_out_track_log(stcb);
  3882. #endif
  3883. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3884. if (!so_locked) {
  3885. atomic_add_int(&stcb->asoc.refcnt, 1);
  3886. SCTP_TCB_UNLOCK(stcb);
  3887. SCTP_SOCKET_LOCK(so, 1);
  3888. SCTP_TCB_LOCK(stcb);
  3889. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  3890. }
  3891. #endif
  3892. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_5);
  3893. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3894. if (!so_locked) {
  3895. SCTP_SOCKET_UNLOCK(so, 1);
  3896. }
  3897. #endif
  3898. }
  3899. void
  3900. sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
  3901. struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
  3902. {
  3903. struct sctp_chunkhdr *ch, chunk_buf;
  3904. unsigned int chk_length;
  3905. int contains_init_chunk;
  3906. SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
  3907. /* Generate a TO address for future reference */
  3908. if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
  3909. if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
  3910. #if defined(__APPLE__)
  3911. SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
  3912. #endif
  3913. sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
  3914. SCTP_CALLED_DIRECTLY_NOCMPSET);
  3915. #if defined(__APPLE__)
  3916. SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
  3917. #endif
  3918. }
  3919. }
  3920. contains_init_chunk = 0;
  3921. ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
  3922. sizeof(*ch), (uint8_t *) & chunk_buf);
  3923. while (ch != NULL) {
  3924. chk_length = ntohs(ch->chunk_length);
  3925. if (chk_length < sizeof(*ch)) {
  3926. /* break to abort land */
  3927. break;
  3928. }
  3929. switch (ch->chunk_type) {
  3930. case SCTP_INIT:
  3931. contains_init_chunk = 1;
  3932. break;
  3933. case SCTP_COOKIE_ECHO:
  3934. /* We hit here only if the assoc is being freed */
  3935. return;
  3936. case SCTP_PACKET_DROPPED:
  3937. /* we don't respond to pkt-dropped */
  3938. return;
  3939. case SCTP_ABORT_ASSOCIATION:
  3940. /* we don't respond with an ABORT to an ABORT */
  3941. return;
  3942. case SCTP_SHUTDOWN_COMPLETE:
  3943. /*
  3944. * we ignore it since we are not waiting for it and
  3945. * peer is gone
  3946. */
  3947. return;
  3948. case SCTP_SHUTDOWN_ACK:
  3949. sctp_send_shutdown_complete2(m, sh, vrf_id, port);
  3950. return;
  3951. default:
  3952. break;
  3953. }
  3954. offset += SCTP_SIZE32(chk_length);
  3955. ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
  3956. sizeof(*ch), (uint8_t *) & chunk_buf);
  3957. }
  3958. if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
  3959. ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
  3960. (contains_init_chunk == 0))) {
  3961. sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
  3962. }
  3963. }
  3964. /*
  3965. * check the inbound datagram to make sure there is not an abort inside it,
  3966. * if there is return 1, else return 0.
  3967. */
  3968. int
  3969. sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
  3970. {
  3971. struct sctp_chunkhdr *ch;
  3972. struct sctp_init_chunk *init_chk, chunk_buf;
  3973. int offset;
  3974. unsigned int chk_length;
  3975. offset = iphlen + sizeof(struct sctphdr);
  3976. ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
  3977. (uint8_t *) & chunk_buf);
  3978. while (ch != NULL) {
  3979. chk_length = ntohs(ch->chunk_length);
  3980. if (chk_length < sizeof(*ch)) {
  3981. /* packet is probably corrupt */
  3982. break;
  3983. }
  3984. /* we seem to be ok, is it an abort? */
  3985. if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
  3986. /* yep, tell them */
  3987. return (1);
  3988. }
  3989. if (ch->chunk_type == SCTP_INITIATION) {
  3990. /* need to update the Vtag */
  3991. init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
  3992. offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
  3993. if (init_chk != NULL) {
  3994. *vtagfill = ntohl(init_chk->init.initiate_tag);
  3995. }
  3996. }
  3997. /* Nope, move to the next chunk */
  3998. offset += SCTP_SIZE32(chk_length);
  3999. ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
  4000. sizeof(*ch), (uint8_t *) & chunk_buf);
  4001. }
  4002. return (0);
  4003. }
  4004. /*
  4005. * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
  4006. * set (i.e. it's 0) so, create this function to compare link local scopes
  4007. */
  4008. #ifdef INET6
  4009. uint32_t
  4010. sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
  4011. {
  4012. #if defined(__Userspace__)
  4013. /*__Userspace__ Returning 1 here always */
  4014. #endif
  4015. #if defined(SCTP_EMBEDDED_V6_SCOPE)
  4016. struct sockaddr_in6 a, b;
  4017. /* save copies */
  4018. a = *addr1;
  4019. b = *addr2;
  4020. if (a.sin6_scope_id == 0)
  4021. #ifdef SCTP_KAME
  4022. if (sa6_recoverscope(&a)) {
  4023. #else
  4024. if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
  4025. #endif /* SCTP_KAME */
  4026. /* can't get scope, so can't match */
  4027. return (0);
  4028. }
  4029. if (b.sin6_scope_id == 0)
  4030. #ifdef SCTP_KAME
  4031. if (sa6_recoverscope(&b)) {
  4032. #else
  4033. if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
  4034. #endif /* SCTP_KAME */
  4035. /* can't get scope, so can't match */
  4036. return (0);
  4037. }
  4038. if (a.sin6_scope_id != b.sin6_scope_id)
  4039. return (0);
  4040. #else
  4041. if (addr1->sin6_scope_id != addr2->sin6_scope_id)
  4042. return (0);
  4043. #endif /* SCTP_EMBEDDED_V6_SCOPE */
  4044. return (1);
  4045. }
  4046. #if defined(SCTP_EMBEDDED_V6_SCOPE)
  4047. /*
  4048. * returns a sockaddr_in6 with embedded scope recovered and removed
  4049. */
  4050. struct sockaddr_in6 *
  4051. sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
  4052. {
  4053. /* check and strip embedded scope junk */
  4054. if (addr->sin6_family == AF_INET6) {
  4055. if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
  4056. if (addr->sin6_scope_id == 0) {
  4057. *store = *addr;
  4058. #ifdef SCTP_KAME
  4059. if (!sa6_recoverscope(store)) {
  4060. #else
  4061. if (!in6_recoverscope(store, &store->sin6_addr,
  4062. NULL)) {
  4063. #endif /* SCTP_KAME */
  4064. /* use the recovered scope */
  4065. addr = store;
  4066. }
  4067. } else {
  4068. /* else, return the original "to" addr */
  4069. in6_clearscope(&addr->sin6_addr);
  4070. }
  4071. }
  4072. }
  4073. return (addr);
  4074. }
  4075. #endif /* SCTP_EMBEDDED_V6_SCOPE */
  4076. #endif
  4077. /*
  4078. * are the two addresses the same? currently a "scopeless" check returns: 1
  4079. * if same, 0 if not
  4080. */
  4081. int
  4082. sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
  4083. {
  4084. /* must be valid */
  4085. if (sa1 == NULL || sa2 == NULL)
  4086. return (0);
  4087. /* must be the same family */
  4088. if (sa1->sa_family != sa2->sa_family)
  4089. return (0);
  4090. switch (sa1->sa_family) {
  4091. #ifdef INET6
  4092. case AF_INET6:
  4093. {
  4094. /* IPv6 addresses */
  4095. struct sockaddr_in6 *sin6_1, *sin6_2;
  4096. sin6_1 = (struct sockaddr_in6 *)sa1;
  4097. sin6_2 = (struct sockaddr_in6 *)sa2;
  4098. return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
  4099. sin6_2));
  4100. }
  4101. #endif
  4102. #ifdef INET
  4103. case AF_INET:
  4104. {
  4105. /* IPv4 addresses */
  4106. struct sockaddr_in *sin_1, *sin_2;
  4107. sin_1 = (struct sockaddr_in *)sa1;
  4108. sin_2 = (struct sockaddr_in *)sa2;
  4109. return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
  4110. }
  4111. #endif
  4112. default:
  4113. /* we don't do these... */
  4114. return (0);
  4115. }
  4116. }
  4117. void
  4118. sctp_print_address(struct sockaddr *sa)
  4119. {
  4120. #ifdef INET6
  4121. char ip6buf[INET6_ADDRSTRLEN];
  4122. ip6buf[0] = 0;
  4123. #endif
  4124. switch (sa->sa_family) {
  4125. #ifdef INET6
  4126. case AF_INET6:
  4127. {
  4128. struct sockaddr_in6 *sin6;
  4129. sin6 = (struct sockaddr_in6 *)sa;
  4130. #if defined(__Userspace__)
  4131. SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
  4132. ntohs(sin6->sin6_addr.s6_addr16[0]),
  4133. ntohs(sin6->sin6_addr.s6_addr16[1]),
  4134. ntohs(sin6->sin6_addr.s6_addr16[2]),
  4135. ntohs(sin6->sin6_addr.s6_addr16[3]),
  4136. ntohs(sin6->sin6_addr.s6_addr16[4]),
  4137. ntohs(sin6->sin6_addr.s6_addr16[5]),
  4138. ntohs(sin6->sin6_addr.s6_addr16[6]),
  4139. ntohs(sin6->sin6_addr.s6_addr16[7]),
  4140. ntohs(sin6->sin6_port),
  4141. sin6->sin6_scope_id);
  4142. #else
  4143. SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
  4144. #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
  4145. ip6_sprintf(ip6buf, &sin6->sin6_addr),
  4146. #else
  4147. ip6_sprintf(&sin6->sin6_addr),
  4148. #endif
  4149. ntohs(sin6->sin6_port),
  4150. sin6->sin6_scope_id);
  4151. #endif
  4152. break;
  4153. }
  4154. #endif
  4155. #ifdef INET
  4156. case AF_INET:
  4157. {
  4158. struct sockaddr_in *sin;
  4159. unsigned char *p;
  4160. sin = (struct sockaddr_in *)sa;
  4161. p = (unsigned char *)&sin->sin_addr;
  4162. SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
  4163. p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
  4164. break;
  4165. }
  4166. #endif
  4167. default:
  4168. SCTP_PRINTF("?\n");
  4169. break;
  4170. }
  4171. }
  4172. void
  4173. sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
  4174. {
  4175. switch (iph->ip_v) {
  4176. #ifdef INET
  4177. case IPVERSION:
  4178. {
  4179. struct sockaddr_in lsa, fsa;
  4180. bzero(&lsa, sizeof(lsa));
  4181. #if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
  4182. lsa.sin_len = sizeof(lsa);
  4183. #endif
  4184. lsa.sin_family = AF_INET;
  4185. lsa.sin_addr = iph->ip_src;
  4186. lsa.sin_port = sh->src_port;
  4187. bzero(&fsa, sizeof(fsa));
  4188. #if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
  4189. fsa.sin_len = sizeof(fsa);
  4190. #endif
  4191. fsa.sin_family = AF_INET;
  4192. fsa.sin_addr = iph->ip_dst;
  4193. fsa.sin_port = sh->dest_port;
  4194. SCTP_PRINTF("src: ");
  4195. sctp_print_address((struct sockaddr *)&lsa);
  4196. SCTP_PRINTF("dest: ");
  4197. sctp_print_address((struct sockaddr *)&fsa);
  4198. break;
  4199. }
  4200. #endif
  4201. #ifdef INET6
  4202. case IPV6_VERSION >> 4:
  4203. {
  4204. struct ip6_hdr *ip6;
  4205. struct sockaddr_in6 lsa6, fsa6;
  4206. ip6 = (struct ip6_hdr *)iph;
  4207. bzero(&lsa6, sizeof(lsa6));
  4208. #if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
  4209. lsa6.sin6_len = sizeof(lsa6);
  4210. #endif
  4211. lsa6.sin6_family = AF_INET6;
  4212. lsa6.sin6_addr = ip6->ip6_src;
  4213. lsa6.sin6_port = sh->src_port;
  4214. bzero(&fsa6, sizeof(fsa6));
  4215. #if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
  4216. fsa6.sin6_len = sizeof(fsa6);
  4217. #endif
  4218. fsa6.sin6_family = AF_INET6;
  4219. fsa6.sin6_addr = ip6->ip6_dst;
  4220. fsa6.sin6_port = sh->dest_port;
  4221. SCTP_PRINTF("src: ");
  4222. sctp_print_address((struct sockaddr *)&lsa6);
  4223. SCTP_PRINTF("dest: ");
  4224. sctp_print_address((struct sockaddr *)&fsa6);
  4225. break;
  4226. }
  4227. #endif
  4228. default:
  4229. /* TSNH */
  4230. break;
  4231. }
  4232. }
  4233. void
  4234. sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
  4235. struct sctp_inpcb *new_inp,
  4236. struct sctp_tcb *stcb,
  4237. int waitflags)
  4238. {
  4239. /*
  4240. * go through our old INP and pull off any control structures that
  4241. * belong to stcb and move then to the new inp.
  4242. */
  4243. struct socket *old_so, *new_so;
  4244. struct sctp_queued_to_read *control, *nctl;
  4245. struct sctp_readhead tmp_queue;
  4246. struct mbuf *m;
  4247. int error = 0;
  4248. old_so = old_inp->sctp_socket;
  4249. new_so = new_inp->sctp_socket;
  4250. TAILQ_INIT(&tmp_queue);
  4251. #if defined(__FreeBSD__) && __FreeBSD_version < 700000
  4252. SOCKBUF_LOCK(&(old_so->so_rcv));
  4253. #endif
  4254. #if defined(__FreeBSD__) || defined(__APPLE__)
  4255. error = sblock(&old_so->so_rcv, waitflags);
  4256. #endif
  4257. #if defined(__FreeBSD__) && __FreeBSD_version < 700000
  4258. SOCKBUF_UNLOCK(&(old_so->so_rcv));
  4259. #endif
  4260. if (error) {
  4261. /* Gak, can't get sblock, we have a problem.
  4262. * data will be left stranded.. and we
  4263. * don't dare look at it since the
  4264. * other thread may be reading something.
  4265. * Oh well, its a screwed up app that does
  4266. * a peeloff OR a accept while reading
  4267. * from the main socket... actually its
  4268. * only the peeloff() case, since I think
  4269. * read will fail on a listening socket..
  4270. */
  4271. return;
  4272. }
  4273. /* lock the socket buffers */
  4274. SCTP_INP_READ_LOCK(old_inp);
  4275. TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
  4276. /* Pull off all for out target stcb */
  4277. if (control->stcb == stcb) {
  4278. /* remove it we want it */
  4279. TAILQ_REMOVE(&old_inp->read_queue, control, next);
  4280. TAILQ_INSERT_TAIL(&tmp_queue, control, next);
  4281. m = control->data;
  4282. while (m) {
  4283. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4284. sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
  4285. }
  4286. sctp_sbfree(control, stcb, &old_so->so_rcv, m);
  4287. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4288. sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  4289. }
  4290. m = SCTP_BUF_NEXT(m);
  4291. }
  4292. }
  4293. }
  4294. SCTP_INP_READ_UNLOCK(old_inp);
  4295. /* Remove the sb-lock on the old socket */
  4296. #if defined(__FreeBSD__) && __FreeBSD_version < 700000
  4297. SOCKBUF_LOCK(&(old_so->so_rcv));
  4298. #endif
  4299. #if defined(__APPLE__)
  4300. sbunlock(&old_so->so_rcv, 1);
  4301. #endif
  4302. #if defined(__FreeBSD__)
  4303. sbunlock(&old_so->so_rcv);
  4304. #endif
  4305. #if defined(__FreeBSD__) && __FreeBSD_version < 700000
  4306. SOCKBUF_UNLOCK(&(old_so->so_rcv));
  4307. #endif
  4308. /* Now we move them over to the new socket buffer */
  4309. SCTP_INP_READ_LOCK(new_inp);
  4310. TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
  4311. TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
  4312. m = control->data;
  4313. while (m) {
  4314. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4315. sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
  4316. }
  4317. sctp_sballoc(stcb, &new_so->so_rcv, m);
  4318. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4319. sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  4320. }
  4321. m = SCTP_BUF_NEXT(m);
  4322. }
  4323. }
  4324. SCTP_INP_READ_UNLOCK(new_inp);
  4325. }
  4326. void
  4327. sctp_add_to_readq(struct sctp_inpcb *inp,
  4328. struct sctp_tcb *stcb,
  4329. struct sctp_queued_to_read *control,
  4330. struct sockbuf *sb,
  4331. int end,
  4332. int inp_read_lock_held,
  4333. int so_locked
  4334. #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  4335. SCTP_UNUSED
  4336. #endif
  4337. )
  4338. {
  4339. /*
  4340. * Here we must place the control on the end of the socket read
  4341. * queue AND increment sb_cc so that select will work properly on
  4342. * read.
  4343. */
  4344. struct mbuf *m, *prev = NULL;
  4345. if (inp == NULL) {
  4346. /* Gak, TSNH!! */
  4347. #ifdef INVARIANTS
  4348. panic("Gak, inp NULL on add_to_readq");
  4349. #endif
  4350. return;
  4351. }
  4352. #if defined(__APPLE__)
  4353. if (so_locked) {
  4354. sctp_lock_assert(SCTP_INP_SO(inp));
  4355. } else {
  4356. sctp_unlock_assert(SCTP_INP_SO(inp));
  4357. }
  4358. #endif
  4359. if (inp_read_lock_held == 0)
  4360. SCTP_INP_READ_LOCK(inp);
  4361. if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
  4362. sctp_free_remote_addr(control->whoFrom);
  4363. if (control->data) {
  4364. sctp_m_freem(control->data);
  4365. control->data = NULL;
  4366. }
  4367. SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
  4368. if (inp_read_lock_held == 0)
  4369. SCTP_INP_READ_UNLOCK(inp);
  4370. return;
  4371. }
  4372. if (!(control->spec_flags & M_NOTIFICATION)) {
  4373. atomic_add_int(&inp->total_recvs, 1);
  4374. if (!control->do_not_ref_stcb) {
  4375. atomic_add_int(&stcb->total_recvs, 1);
  4376. }
  4377. }
  4378. m = control->data;
  4379. control->held_length = 0;
  4380. control->length = 0;
  4381. while (m) {
  4382. if (SCTP_BUF_LEN(m) == 0) {
  4383. /* Skip mbufs with NO length */
  4384. if (prev == NULL) {
  4385. /* First one */
  4386. control->data = sctp_m_free(m);
  4387. m = control->data;
  4388. } else {
  4389. SCTP_BUF_NEXT(prev) = sctp_m_free(m);
  4390. m = SCTP_BUF_NEXT(prev);
  4391. }
  4392. if (m == NULL) {
  4393. control->tail_mbuf = prev;
  4394. }
  4395. continue;
  4396. }
  4397. prev = m;
  4398. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4399. sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
  4400. }
  4401. sctp_sballoc(stcb, sb, m);
  4402. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4403. sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  4404. }
  4405. atomic_add_int(&control->length, SCTP_BUF_LEN(m));
  4406. m = SCTP_BUF_NEXT(m);
  4407. }
  4408. if (prev != NULL) {
  4409. control->tail_mbuf = prev;
  4410. } else {
  4411. /* Everything got collapsed out?? */
  4412. sctp_free_remote_addr(control->whoFrom);
  4413. SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
  4414. if (inp_read_lock_held == 0)
  4415. SCTP_INP_READ_UNLOCK(inp);
  4416. return;
  4417. }
  4418. if (end) {
  4419. control->end_added = 1;
  4420. }
  4421. #if defined(__Userspace__)
  4422. if (inp->recv_callback) {
  4423. if (inp_read_lock_held == 0)
  4424. SCTP_INP_READ_UNLOCK(inp);
  4425. if (control->end_added == 1) {
  4426. struct socket *so;
  4427. struct mbuf *m;
  4428. char *buffer;
  4429. struct sctp_rcvinfo rcv;
  4430. union sctp_sockstore addr;
  4431. int flags = 0;
  4432. if ((buffer = malloc(control->length)) == NULL) {
  4433. return;
  4434. }
  4435. so = inp->sctp_socket;
  4436. for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
  4437. sctp_sbfree(control, control->stcb, &so->so_rcv, m);
  4438. }
  4439. atomic_add_int(&stcb->asoc.refcnt, 1);
  4440. SCTP_TCB_UNLOCK(stcb);
  4441. m_copydata(control->data, 0, control->length, buffer);
  4442. memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
  4443. rcv.rcv_sid = control->sinfo_stream;
  4444. rcv.rcv_ssn = control->sinfo_ssn;
  4445. rcv.rcv_flags = control->sinfo_flags;
  4446. rcv.rcv_ppid = control->sinfo_ppid;
  4447. rcv.rcv_tsn = control->sinfo_tsn;
  4448. rcv.rcv_cumtsn = control->sinfo_cumtsn;
  4449. rcv.rcv_context = control->sinfo_context;
  4450. rcv.rcv_assoc_id = control->sinfo_assoc_id;
  4451. memset(&addr, 0, sizeof(union sctp_sockstore));
  4452. switch (control->whoFrom->ro._l_addr.sa.sa_family) {
  4453. #ifdef INET
  4454. case AF_INET:
  4455. addr.sin = control->whoFrom->ro._l_addr.sin;
  4456. break;
  4457. #endif
  4458. #ifdef INET6
  4459. case AF_INET6:
  4460. addr.sin6 = control->whoFrom->ro._l_addr.sin6;
  4461. break;
  4462. #endif
  4463. default:
  4464. addr.sa = control->whoFrom->ro._l_addr.sa;
  4465. break;
  4466. }
  4467. if (control->spec_flags & M_NOTIFICATION) {
  4468. flags |= MSG_NOTIFICATION;
  4469. }
  4470. if (control->spec_flags & M_EOR) {
  4471. flags |= MSG_EOR;
  4472. }
  4473. inp->recv_callback(so, addr, buffer, control->length, rcv, flags, inp->ulp_info);
  4474. SCTP_TCB_LOCK(stcb);
  4475. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4476. sctp_m_freem(control->data);
  4477. control->data = NULL;
  4478. control->length = 0;
  4479. sctp_free_a_readq(stcb, control);
  4480. }
  4481. return;
  4482. }
  4483. #endif
  4484. TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
  4485. if (inp_read_lock_held == 0)
  4486. SCTP_INP_READ_UNLOCK(inp);
  4487. if (inp && inp->sctp_socket) {
  4488. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
  4489. SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
  4490. } else {
  4491. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4492. struct socket *so;
  4493. so = SCTP_INP_SO(inp);
  4494. if (!so_locked) {
  4495. if (stcb) {
  4496. atomic_add_int(&stcb->asoc.refcnt, 1);
  4497. SCTP_TCB_UNLOCK(stcb);
  4498. }
  4499. SCTP_SOCKET_LOCK(so, 1);
  4500. if (stcb) {
  4501. SCTP_TCB_LOCK(stcb);
  4502. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4503. }
  4504. if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  4505. SCTP_SOCKET_UNLOCK(so, 1);
  4506. return;
  4507. }
  4508. }
  4509. #endif
  4510. sctp_sorwakeup(inp, inp->sctp_socket);
  4511. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4512. if (!so_locked) {
  4513. SCTP_SOCKET_UNLOCK(so, 1);
  4514. }
  4515. #endif
  4516. }
  4517. }
  4518. }
  4519. int
  4520. sctp_append_to_readq(struct sctp_inpcb *inp,
  4521. struct sctp_tcb *stcb,
  4522. struct sctp_queued_to_read *control,
  4523. struct mbuf *m,
  4524. int end,
  4525. int ctls_cumack,
  4526. struct sockbuf *sb)
  4527. {
  4528. /*
  4529. * A partial delivery API event is underway. OR we are appending on
  4530. * the reassembly queue.
  4531. *
  4532. * If PDAPI this means we need to add m to the end of the data.
  4533. * Increase the length in the control AND increment the sb_cc.
  4534. * Otherwise sb is NULL and all we need to do is put it at the end
  4535. * of the mbuf chain.
  4536. */
  4537. int len = 0;
  4538. struct mbuf *mm, *tail = NULL, *prev = NULL;
  4539. if (inp) {
  4540. SCTP_INP_READ_LOCK(inp);
  4541. }
  4542. if (control == NULL) {
  4543. get_out:
  4544. if (inp) {
  4545. SCTP_INP_READ_UNLOCK(inp);
  4546. }
  4547. return (-1);
  4548. }
  4549. if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
  4550. SCTP_INP_READ_UNLOCK(inp);
  4551. return (0);
  4552. }
  4553. if (control->end_added) {
  4554. /* huh this one is complete? */
  4555. goto get_out;
  4556. }
  4557. mm = m;
  4558. if (mm == NULL) {
  4559. goto get_out;
  4560. }
  4561. while (mm) {
  4562. if (SCTP_BUF_LEN(mm) == 0) {
  4563. /* Skip mbufs with NO lenght */
  4564. if (prev == NULL) {
  4565. /* First one */
  4566. m = sctp_m_free(mm);
  4567. mm = m;
  4568. } else {
  4569. SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
  4570. mm = SCTP_BUF_NEXT(prev);
  4571. }
  4572. continue;
  4573. }
  4574. prev = mm;
  4575. len += SCTP_BUF_LEN(mm);
  4576. if (sb) {
  4577. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4578. sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
  4579. }
  4580. sctp_sballoc(stcb, sb, mm);
  4581. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4582. sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  4583. }
  4584. }
  4585. mm = SCTP_BUF_NEXT(mm);
  4586. }
  4587. if (prev) {
  4588. tail = prev;
  4589. } else {
  4590. /* Really there should always be a prev */
  4591. if (m == NULL) {
  4592. /* Huh nothing left? */
  4593. #ifdef INVARIANTS
  4594. panic("Nothing left to add?");
  4595. #else
  4596. goto get_out;
  4597. #endif
  4598. }
  4599. tail = m;
  4600. }
  4601. if (control->tail_mbuf) {
  4602. /* append */
  4603. SCTP_BUF_NEXT(control->tail_mbuf) = m;
  4604. control->tail_mbuf = tail;
  4605. } else {
  4606. /* nothing there */
  4607. #ifdef INVARIANTS
  4608. if (control->data != NULL) {
  4609. panic("This should NOT happen");
  4610. }
  4611. #endif
  4612. control->data = m;
  4613. control->tail_mbuf = tail;
  4614. }
  4615. atomic_add_int(&control->length, len);
  4616. if (end) {
  4617. /* message is complete */
  4618. if (stcb && (control == stcb->asoc.control_pdapi)) {
  4619. stcb->asoc.control_pdapi = NULL;
  4620. }
  4621. control->held_length = 0;
  4622. control->end_added = 1;
  4623. }
  4624. if (stcb == NULL) {
  4625. control->do_not_ref_stcb = 1;
  4626. }
  4627. /*
  4628. * When we are appending in partial delivery, the cum-ack is used
  4629. * for the actual pd-api highest tsn on this mbuf. The true cum-ack
  4630. * is populated in the outbound sinfo structure from the true cumack
  4631. * if the association exists...
  4632. */
  4633. #if defined(__Userspace__)
  4634. if (inp->recv_callback) {
  4635. if (control->end_added == 1) {
  4636. struct socket *so;
  4637. char *buffer;
  4638. struct sctp_rcvinfo rcv;
  4639. union sctp_sockstore addr;
  4640. int flags = 0;
  4641. if ((buffer = malloc(control->length)) == NULL) {
  4642. return (-1);
  4643. }
  4644. so = inp->sctp_socket;
  4645. for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
  4646. sctp_sbfree(control, control->stcb, &so->so_rcv, m);
  4647. }
  4648. atomic_add_int(&stcb->asoc.refcnt, 1);
  4649. SCTP_TCB_UNLOCK(stcb);
  4650. m_copydata(control->data, 0, control->length, buffer);
  4651. memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
  4652. rcv.rcv_sid = control->sinfo_stream;
  4653. rcv.rcv_ssn = control->sinfo_ssn;
  4654. rcv.rcv_flags = control->sinfo_flags;
  4655. rcv.rcv_ppid = control->sinfo_ppid;
  4656. rcv.rcv_tsn = control->sinfo_tsn;
  4657. rcv.rcv_cumtsn = control->sinfo_cumtsn;
  4658. rcv.rcv_context = control->sinfo_context;
  4659. rcv.rcv_assoc_id = control->sinfo_assoc_id;
  4660. memset(&addr, 0, sizeof(union sctp_sockstore));
  4661. switch (control->whoFrom->ro._l_addr.sa.sa_family) {
  4662. #ifdef INET
  4663. case AF_INET:
  4664. addr.sin = control->whoFrom->ro._l_addr.sin;
  4665. break;
  4666. #endif
  4667. #ifdef INET6
  4668. case AF_INET6:
  4669. addr.sin6 = control->whoFrom->ro._l_addr.sin6;
  4670. break;
  4671. #endif
  4672. default:
  4673. addr.sa = control->whoFrom->ro._l_addr.sa;
  4674. break;
  4675. }
  4676. if (control->spec_flags & M_NOTIFICATION) {
  4677. flags |= MSG_NOTIFICATION;
  4678. }
  4679. if (control->spec_flags & M_EOR) {
  4680. flags |= MSG_EOR;
  4681. }
  4682. inp->recv_callback(so, addr, buffer, control->length, rcv, flags, inp->ulp_info);
  4683. SCTP_TCB_LOCK(stcb);
  4684. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4685. sctp_m_freem(control->data);
  4686. control->data = NULL;
  4687. control->length = 0;
  4688. sctp_free_a_readq(stcb, control);
  4689. }
  4690. if (inp)
  4691. SCTP_INP_READ_UNLOCK(inp);
  4692. return (0);
  4693. }
  4694. #endif
  4695. control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
  4696. if (inp) {
  4697. SCTP_INP_READ_UNLOCK(inp);
  4698. }
  4699. if (inp && inp->sctp_socket) {
  4700. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
  4701. SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
  4702. } else {
  4703. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4704. struct socket *so;
  4705. so = SCTP_INP_SO(inp);
  4706. if (stcb) {
  4707. atomic_add_int(&stcb->asoc.refcnt, 1);
  4708. SCTP_TCB_UNLOCK(stcb);
  4709. }
  4710. SCTP_SOCKET_LOCK(so, 1);
  4711. if (stcb) {
  4712. SCTP_TCB_LOCK(stcb);
  4713. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4714. }
  4715. if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  4716. SCTP_SOCKET_UNLOCK(so, 1);
  4717. return (0);
  4718. }
  4719. #endif
  4720. sctp_sorwakeup(inp, inp->sctp_socket);
  4721. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4722. SCTP_SOCKET_UNLOCK(so, 1);
  4723. #endif
  4724. }
  4725. }
  4726. return (0);
  4727. }
  4728. /*************HOLD THIS COMMENT FOR PATCH FILE OF
  4729. *************ALTERNATE ROUTING CODE
  4730. */
  4731. /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
  4732. *************ALTERNATE ROUTING CODE
  4733. */
  4734. struct mbuf *
  4735. sctp_generate_invmanparam(int err)
  4736. {
  4737. /* Return a MBUF with a invalid mandatory parameter */
  4738. struct mbuf *m;
  4739. m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
  4740. if (m) {
  4741. struct sctp_paramhdr *ph;
  4742. SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
  4743. ph = mtod(m, struct sctp_paramhdr *);
  4744. ph->param_length = htons(sizeof(struct sctp_paramhdr));
  4745. ph->param_type = htons(err);
  4746. }
  4747. return (m);
  4748. }
  4749. #ifdef SCTP_MBCNT_LOGGING
  4750. void
  4751. sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
  4752. struct sctp_tmit_chunk *tp1, int chk_cnt)
  4753. {
  4754. if (tp1->data == NULL) {
  4755. return;
  4756. }
  4757. asoc->chunks_on_out_queue -= chk_cnt;
  4758. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
  4759. sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
  4760. asoc->total_output_queue_size,
  4761. tp1->book_size,
  4762. 0,
  4763. tp1->mbcnt);
  4764. }
  4765. if (asoc->total_output_queue_size >= tp1->book_size) {
  4766. atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
  4767. } else {
  4768. asoc->total_output_queue_size = 0;
  4769. }
  4770. if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
  4771. ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
  4772. if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
  4773. stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
  4774. } else {
  4775. stcb->sctp_socket->so_snd.sb_cc = 0;
  4776. }
  4777. }
  4778. }
  4779. #endif
  4780. int
  4781. sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
  4782. uint8_t sent, int so_locked
  4783. #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  4784. SCTP_UNUSED
  4785. #endif
  4786. )
  4787. {
  4788. struct sctp_stream_out *strq;
  4789. struct sctp_tmit_chunk *chk = NULL, *tp2;
  4790. struct sctp_stream_queue_pending *sp;
  4791. uint16_t stream = 0, seq = 0;
  4792. uint8_t foundeom = 0;
  4793. int ret_sz = 0;
  4794. int notdone;
  4795. int do_wakeup_routine = 0;
  4796. #if defined(__APPLE__)
  4797. if (so_locked) {
  4798. sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
  4799. } else {
  4800. sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
  4801. }
  4802. #endif
  4803. stream = tp1->rec.data.stream_number;
  4804. seq = tp1->rec.data.stream_seq;
  4805. do {
  4806. ret_sz += tp1->book_size;
  4807. if (tp1->data != NULL) {
  4808. if (tp1->sent < SCTP_DATAGRAM_RESEND) {
  4809. sctp_flight_size_decrease(tp1);
  4810. sctp_total_flight_decrease(stcb, tp1);
  4811. }
  4812. sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
  4813. stcb->asoc.peers_rwnd += tp1->send_size;
  4814. stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
  4815. if (sent) {
  4816. sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
  4817. } else {
  4818. sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
  4819. }
  4820. if (tp1->data) {
  4821. sctp_m_freem(tp1->data);
  4822. tp1->data = NULL;
  4823. }
  4824. do_wakeup_routine = 1;
  4825. if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
  4826. stcb->asoc.sent_queue_cnt_removeable--;
  4827. }
  4828. }
  4829. tp1->sent = SCTP_FORWARD_TSN_SKIP;
  4830. if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
  4831. SCTP_DATA_NOT_FRAG) {
  4832. /* not frag'ed we ae done */
  4833. notdone = 0;
  4834. foundeom = 1;
  4835. } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
  4836. /* end of frag, we are done */
  4837. notdone = 0;
  4838. foundeom = 1;
  4839. } else {
  4840. /*
  4841. * Its a begin or middle piece, we must mark all of
  4842. * it
  4843. */
  4844. notdone = 1;
  4845. tp1 = TAILQ_NEXT(tp1, sctp_next);
  4846. }
  4847. } while (tp1 && notdone);
  4848. if (foundeom == 0) {
  4849. /*
  4850. * The multi-part message was scattered across the send and
  4851. * sent queue.
  4852. */
  4853. TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
  4854. if ((tp1->rec.data.stream_number != stream) ||
  4855. (tp1->rec.data.stream_seq != seq)) {
  4856. break;
  4857. }
  4858. /* save to chk in case we have some on stream out
  4859. * queue. If so and we have an un-transmitted one
  4860. * we don't have to fudge the TSN.
  4861. */
  4862. chk = tp1;
  4863. ret_sz += tp1->book_size;
  4864. sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
  4865. if (sent) {
  4866. sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
  4867. } else {
  4868. sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
  4869. }
  4870. if (tp1->data) {
  4871. sctp_m_freem(tp1->data);
  4872. tp1->data = NULL;
  4873. }
  4874. /* No flight involved here book the size to 0 */
  4875. tp1->book_size = 0;
  4876. if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
  4877. foundeom = 1;
  4878. }
  4879. do_wakeup_routine = 1;
  4880. tp1->sent = SCTP_FORWARD_TSN_SKIP;
  4881. TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
  4882. /* on to the sent queue so we can wait for it to be passed by. */
  4883. TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
  4884. sctp_next);
  4885. stcb->asoc.send_queue_cnt--;
  4886. stcb->asoc.sent_queue_cnt++;
  4887. }
  4888. }
  4889. if (foundeom == 0) {
  4890. /*
  4891. * Still no eom found. That means there
  4892. * is stuff left on the stream out queue.. yuck.
  4893. */
  4894. strq = &stcb->asoc.strmout[stream];
  4895. SCTP_TCB_SEND_LOCK(stcb);
  4896. TAILQ_FOREACH(sp, &strq->outqueue, next) {
  4897. /* FIXME: Shouldn't this be a serial number check? */
  4898. if (sp->strseq > seq) {
  4899. break;
  4900. }
  4901. /* Check if its our SEQ */
  4902. if (sp->strseq == seq) {
  4903. sp->discard_rest = 1;
  4904. /*
  4905. * We may need to put a chunk on the
  4906. * queue that holds the TSN that
  4907. * would have been sent with the LAST
  4908. * bit.
  4909. */
  4910. if (chk == NULL) {
  4911. /* Yep, we have to */
  4912. sctp_alloc_a_chunk(stcb, chk);
  4913. if (chk == NULL) {
  4914. /* we are hosed. All we can
  4915. * do is nothing.. which will
  4916. * cause an abort if the peer is
  4917. * paying attention.
  4918. */
  4919. goto oh_well;
  4920. }
  4921. memset(chk, 0, sizeof(*chk));
  4922. chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
  4923. chk->sent = SCTP_FORWARD_TSN_SKIP;
  4924. chk->asoc = &stcb->asoc;
  4925. chk->rec.data.stream_seq = sp->strseq;
  4926. chk->rec.data.stream_number = sp->stream;
  4927. chk->rec.data.payloadtype = sp->ppid;
  4928. chk->rec.data.context = sp->context;
  4929. chk->flags = sp->act_flags;
  4930. if (sp->net)
  4931. chk->whoTo = sp->net;
  4932. else
  4933. chk->whoTo = stcb->asoc.primary_destination;
  4934. atomic_add_int(&chk->whoTo->ref_count, 1);
  4935. #if defined(__FreeBSD__) || defined(__Panda__)
  4936. chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
  4937. #else
  4938. chk->rec.data.TSN_seq = stcb->asoc.sending_seq++;
  4939. #endif
  4940. stcb->asoc.pr_sctp_cnt++;
  4941. chk->pr_sctp_on = 1;
  4942. TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
  4943. stcb->asoc.sent_queue_cnt++;
  4944. stcb->asoc.pr_sctp_cnt++;
  4945. } else {
  4946. chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
  4947. }
  4948. oh_well:
  4949. if (sp->data) {
  4950. /* Pull any data to free up the SB and
  4951. * allow sender to "add more" whilc we
  4952. * will throw away :-)
  4953. */
  4954. sctp_free_spbufspace(stcb, &stcb->asoc,
  4955. sp);
  4956. ret_sz += sp->length;
  4957. do_wakeup_routine = 1;
  4958. sp->some_taken = 1;
  4959. sctp_m_freem(sp->data);
  4960. sp->length = 0;
  4961. sp->data = NULL;
  4962. sp->tail_mbuf = NULL;
  4963. }
  4964. break;
  4965. }
  4966. } /* End tailq_foreach */
  4967. SCTP_TCB_SEND_UNLOCK(stcb);
  4968. }
  4969. if (do_wakeup_routine) {
  4970. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4971. struct socket *so;
  4972. so = SCTP_INP_SO(stcb->sctp_ep);
  4973. if (!so_locked) {
  4974. atomic_add_int(&stcb->asoc.refcnt, 1);
  4975. SCTP_TCB_UNLOCK(stcb);
  4976. SCTP_SOCKET_LOCK(so, 1);
  4977. SCTP_TCB_LOCK(stcb);
  4978. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4979. if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  4980. /* assoc was freed while we were unlocked */
  4981. SCTP_SOCKET_UNLOCK(so, 1);
  4982. return (ret_sz);
  4983. }
  4984. }
  4985. #endif
  4986. sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
  4987. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4988. if (!so_locked) {
  4989. SCTP_SOCKET_UNLOCK(so, 1);
  4990. }
  4991. #endif
  4992. }
  4993. return (ret_sz);
  4994. }
  4995. /*
  4996. * checks to see if the given address, sa, is one that is currently known by
  4997. * the kernel note: can't distinguish the same address on multiple interfaces
  4998. * and doesn't handle multiple addresses with different zone/scope id's note:
  4999. * ifa_ifwithaddr() compares the entire sockaddr struct
  5000. */
  5001. struct sctp_ifa *
  5002. sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
  5003. int holds_lock)
  5004. {
  5005. struct sctp_laddr *laddr;
  5006. if (holds_lock == 0) {
  5007. SCTP_INP_RLOCK(inp);
  5008. }
  5009. LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
  5010. if (laddr->ifa == NULL)
  5011. continue;
  5012. if (addr->sa_family != laddr->ifa->address.sa.sa_family)
  5013. continue;
  5014. #ifdef INET
  5015. if (addr->sa_family == AF_INET) {
  5016. if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
  5017. laddr->ifa->address.sin.sin_addr.s_addr) {
  5018. /* found him. */
  5019. if (holds_lock == 0) {
  5020. SCTP_INP_RUNLOCK(inp);
  5021. }
  5022. return (laddr->ifa);
  5023. break;
  5024. }
  5025. }
  5026. #endif
  5027. #ifdef INET6
  5028. if (addr->sa_family == AF_INET6) {
  5029. if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
  5030. &laddr->ifa->address.sin6)) {
  5031. /* found him. */
  5032. if (holds_lock == 0) {
  5033. SCTP_INP_RUNLOCK(inp);
  5034. }
  5035. return (laddr->ifa);
  5036. break;
  5037. }
  5038. }
  5039. #endif
  5040. }
  5041. if (holds_lock == 0) {
  5042. SCTP_INP_RUNLOCK(inp);
  5043. }
  5044. return (NULL);
  5045. }
  5046. uint32_t
  5047. sctp_get_ifa_hash_val(struct sockaddr *addr)
  5048. {
  5049. switch (addr->sa_family) {
  5050. #ifdef INET
  5051. case AF_INET:
  5052. {
  5053. struct sockaddr_in *sin;
  5054. sin = (struct sockaddr_in *)addr;
  5055. return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
  5056. }
  5057. #endif
  5058. #ifdef INET6
  5059. case INET6:
  5060. {
  5061. struct sockaddr_in6 *sin6;
  5062. uint32_t hash_of_addr;
  5063. sin6 = (struct sockaddr_in6 *)addr;
  5064. #if !defined(__Windows__) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_Windows)
  5065. hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
  5066. sin6->sin6_addr.s6_addr32[1] +
  5067. sin6->sin6_addr.s6_addr32[2] +
  5068. sin6->sin6_addr.s6_addr32[3]);
  5069. #else
  5070. hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
  5071. ((uint32_t *)&sin6->sin6_addr)[1] +
  5072. ((uint32_t *)&sin6->sin6_addr)[2] +
  5073. ((uint32_t *)&sin6->sin6_addr)[3]);
  5074. #endif
  5075. hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
  5076. return (hash_of_addr);
  5077. }
  5078. #endif
  5079. default:
  5080. break;
  5081. }
  5082. return (0);
  5083. }
  5084. struct sctp_ifa *
  5085. sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
  5086. {
  5087. struct sctp_ifa *sctp_ifap;
  5088. struct sctp_vrf *vrf;
  5089. struct sctp_ifalist *hash_head;
  5090. uint32_t hash_of_addr;
  5091. if (holds_lock == 0)
  5092. SCTP_IPI_ADDR_RLOCK();
  5093. vrf = sctp_find_vrf(vrf_id);
  5094. if (vrf == NULL) {
  5095. stage_right:
  5096. if (holds_lock == 0)
  5097. SCTP_IPI_ADDR_RUNLOCK();
  5098. return (NULL);
  5099. }
  5100. hash_of_addr = sctp_get_ifa_hash_val(addr);
  5101. hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
  5102. if (hash_head == NULL) {
  5103. SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
  5104. hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
  5105. (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
  5106. sctp_print_address(addr);
  5107. SCTP_PRINTF("No such bucket for address\n");
  5108. if (holds_lock == 0)
  5109. SCTP_IPI_ADDR_RUNLOCK();
  5110. return (NULL);
  5111. }
  5112. LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
  5113. if (sctp_ifap == NULL) {
  5114. #ifdef INVARIANTS
  5115. panic("Huh LIST_FOREACH corrupt");
  5116. goto stage_right;
  5117. #else
  5118. SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
  5119. goto stage_right;
  5120. #endif
  5121. }
  5122. if (addr->sa_family != sctp_ifap->address.sa.sa_family)
  5123. continue;
  5124. #ifdef INET
  5125. if (addr->sa_family == AF_INET) {
  5126. if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
  5127. sctp_ifap->address.sin.sin_addr.s_addr) {
  5128. /* found him. */
  5129. if (holds_lock == 0)
  5130. SCTP_IPI_ADDR_RUNLOCK();
  5131. return (sctp_ifap);
  5132. break;
  5133. }
  5134. }
  5135. #endif
  5136. #ifdef INET6
  5137. if (addr->sa_family == AF_INET6) {
  5138. if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
  5139. &sctp_ifap->address.sin6)) {
  5140. /* found him. */
  5141. if (holds_lock == 0)
  5142. SCTP_IPI_ADDR_RUNLOCK();
  5143. return (sctp_ifap);
  5144. break;
  5145. }
  5146. }
  5147. #endif
  5148. }
  5149. if (holds_lock == 0)
  5150. SCTP_IPI_ADDR_RUNLOCK();
  5151. return (NULL);
  5152. }
  5153. static void
  5154. sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
  5155. uint32_t rwnd_req)
  5156. {
  5157. /* User pulled some data, do we need a rwnd update? */
  5158. int r_unlocked = 0;
  5159. uint32_t dif, rwnd;
  5160. struct socket *so = NULL;
  5161. if (stcb == NULL)
  5162. return;
  5163. atomic_add_int(&stcb->asoc.refcnt, 1);
  5164. if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
  5165. SCTP_STATE_SHUTDOWN_RECEIVED |
  5166. SCTP_STATE_SHUTDOWN_ACK_SENT)) {
  5167. /* Pre-check If we are freeing no update */
  5168. goto no_lock;
  5169. }
  5170. SCTP_INP_INCR_REF(stcb->sctp_ep);
  5171. if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
  5172. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
  5173. goto out;
  5174. }
  5175. so = stcb->sctp_socket;
  5176. if (so == NULL) {
  5177. goto out;
  5178. }
  5179. atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
  5180. /* Have you have freed enough to look */
  5181. *freed_so_far = 0;
  5182. /* Yep, its worth a look and the lock overhead */
  5183. /* Figure out what the rwnd would be */
  5184. rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
  5185. if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
  5186. dif = rwnd - stcb->asoc.my_last_reported_rwnd;
  5187. } else {
  5188. dif = 0;
  5189. }
  5190. if (dif >= rwnd_req) {
  5191. if (hold_rlock) {
  5192. SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
  5193. r_unlocked = 1;
  5194. }
  5195. if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
  5196. /*
  5197. * One last check before we allow the guy possibly
  5198. * to get in. There is a race, where the guy has not
  5199. * reached the gate. In that case
  5200. */
  5201. goto out;
  5202. }
  5203. SCTP_TCB_LOCK(stcb);
  5204. if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
  5205. /* No reports here */
  5206. SCTP_TCB_UNLOCK(stcb);
  5207. goto out;
  5208. }
  5209. SCTP_STAT_INCR(sctps_wu_sacks_sent);
  5210. sctp_send_sack(stcb, SCTP_SO_LOCKED);
  5211. sctp_chunk_output(stcb->sctp_ep, stcb,
  5212. SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
  5213. /* make sure no timer is running */
  5214. sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_6);
  5215. SCTP_TCB_UNLOCK(stcb);
  5216. } else {
  5217. /* Update how much we have pending */
  5218. stcb->freed_by_sorcv_sincelast = dif;
  5219. }
  5220. out:
  5221. if (so && r_unlocked && hold_rlock) {
  5222. SCTP_INP_READ_LOCK(stcb->sctp_ep);
  5223. }
  5224. SCTP_INP_DECR_REF(stcb->sctp_ep);
  5225. no_lock:
  5226. atomic_add_int(&stcb->asoc.refcnt, -1);
  5227. return;
  5228. }
  5229. int
  5230. sctp_sorecvmsg(struct socket *so,
  5231. struct uio *uio,
  5232. struct mbuf **mp,
  5233. struct sockaddr *from,
  5234. int fromlen,
  5235. int *msg_flags,
  5236. struct sctp_sndrcvinfo *sinfo,
  5237. int filling_sinfo)
  5238. {
  5239. /*
  5240. * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
  5241. * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
  5242. * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
  5243. * On the way out we may send out any combination of:
  5244. * MSG_NOTIFICATION MSG_EOR
  5245. *
  5246. */
  5247. struct sctp_inpcb *inp = NULL;
  5248. int my_len = 0;
  5249. int cp_len = 0, error = 0;
  5250. struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
  5251. struct mbuf *m = NULL;
  5252. struct sctp_tcb *stcb = NULL;
  5253. int wakeup_read_socket = 0;
  5254. int freecnt_applied = 0;
  5255. int out_flags = 0, in_flags = 0;
  5256. int block_allowed = 1;
  5257. uint32_t freed_so_far = 0;
  5258. uint32_t copied_so_far = 0;
  5259. int in_eeor_mode = 0;
  5260. int no_rcv_needed = 0;
  5261. uint32_t rwnd_req = 0;
  5262. int hold_sblock = 0;
  5263. int hold_rlock = 0;
  5264. int slen = 0;
  5265. uint32_t held_length = 0;
  5266. #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
  5267. int sockbuf_lock = 0;
  5268. #endif
  5269. if (uio == NULL) {
  5270. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  5271. return (EINVAL);
  5272. }
  5273. if (msg_flags) {
  5274. in_flags = *msg_flags;
  5275. if (in_flags & MSG_PEEK)
  5276. SCTP_STAT_INCR(sctps_read_peeks);
  5277. } else {
  5278. in_flags = 0;
  5279. }
  5280. #if defined(__APPLE__)
  5281. #if defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION)
  5282. slen = uio_resid(uio);
  5283. #else
  5284. slen = uio->uio_resid;
  5285. #endif
  5286. #else
  5287. slen = uio->uio_resid;
  5288. #endif
  5289. /* Pull in and set up our int flags */
  5290. if (in_flags & MSG_OOB) {
  5291. /* Out of band's NOT supported */
  5292. return (EOPNOTSUPP);
  5293. }
  5294. if ((in_flags & MSG_PEEK) && (mp != NULL)) {
  5295. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  5296. return (EINVAL);
  5297. }
  5298. if ((in_flags & (MSG_DONTWAIT
  5299. #if defined(__FreeBSD__) && __FreeBSD_version > 500000
  5300. | MSG_NBIO
  5301. #endif
  5302. )) ||
  5303. SCTP_SO_IS_NBIO(so)) {
  5304. block_allowed = 0;
  5305. }
  5306. /* setup the endpoint */
  5307. inp = (struct sctp_inpcb *)so->so_pcb;
  5308. if (inp == NULL) {
  5309. SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
  5310. return (EFAULT);
  5311. }
  5312. rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
  5313. /* Must be at least a MTU's worth */
  5314. if (rwnd_req < SCTP_MIN_RWND)
  5315. rwnd_req = SCTP_MIN_RWND;
  5316. in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
  5317. if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
  5318. #if defined(__APPLE__)
  5319. #if defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION)
  5320. sctp_misc_ints(SCTP_SORECV_ENTER,
  5321. rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
  5322. #else
  5323. sctp_misc_ints(SCTP_SORECV_ENTER,
  5324. rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
  5325. #endif
  5326. #else
  5327. sctp_misc_ints(SCTP_SORECV_ENTER,
  5328. rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
  5329. #endif
  5330. }
  5331. #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
  5332. SOCKBUF_LOCK(&so->so_rcv);
  5333. hold_sblock = 1;
  5334. #endif
  5335. if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
  5336. #if defined(__APPLE__)
  5337. #if defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION)
  5338. sctp_misc_ints(SCTP_SORECV_ENTERPL,
  5339. rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
  5340. #else
  5341. sctp_misc_ints(SCTP_SORECV_ENTERPL,
  5342. rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
  5343. #endif
  5344. #else
  5345. sctp_misc_ints(SCTP_SORECV_ENTERPL,
  5346. rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
  5347. #endif
  5348. }
  5349. #if defined(__APPLE__)
  5350. error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
  5351. #endif
  5352. #if defined(__FreeBSD__)
  5353. error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
  5354. #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
  5355. sockbuf_lock = 1;
  5356. #endif
  5357. #endif
  5358. if (error) {
  5359. goto release_unlocked;
  5360. }
  5361. restart:
  5362. #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
  5363. if (hold_sblock == 0) {
  5364. SOCKBUF_LOCK(&so->so_rcv);
  5365. hold_sblock = 1;
  5366. }
  5367. #endif
  5368. #if defined(__APPLE__)
  5369. sbunlock(&so->so_rcv, 1);
  5370. #endif
  5371. #if defined(__FreeBSD__) && __FreeBSD_version < 700000
  5372. sbunlock(&so->so_rcv);
  5373. #endif
  5374. restart_nosblocks:
  5375. if (hold_sblock == 0) {
  5376. SOCKBUF_LOCK(&so->so_rcv);
  5377. hold_sblock = 1;
  5378. }
  5379. if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
  5380. (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
  5381. goto out;
  5382. }
  5383. #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
  5384. if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
  5385. #else
  5386. if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
  5387. #endif
  5388. if (so->so_error) {
  5389. error = so->so_error;
  5390. if ((in_flags & MSG_PEEK) == 0)
  5391. so->so_error = 0;
  5392. goto out;
  5393. } else {
  5394. if (so->so_rcv.sb_cc == 0) {
  5395. /* indicate EOF */
  5396. error = 0;
  5397. goto out;
  5398. }
  5399. }
  5400. }
  5401. if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
  5402. /* we need to wait for data */
  5403. if ((so->so_rcv.sb_cc == 0) &&
  5404. ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  5405. (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
  5406. if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
  5407. /* For active open side clear flags for re-use
  5408. * passive open is blocked by connect.
  5409. */
  5410. if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
  5411. /* You were aborted, passive side always hits here */
  5412. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
  5413. error = ECONNRESET;
  5414. }
  5415. so->so_state &= ~(SS_ISCONNECTING |
  5416. SS_ISDISCONNECTING |
  5417. SS_ISCONFIRMING |
  5418. SS_ISCONNECTED);
  5419. if (error == 0) {
  5420. if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
  5421. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
  5422. error = ENOTCONN;
  5423. }
  5424. }
  5425. goto out;
  5426. }
  5427. }
  5428. error = sbwait(&so->so_rcv);
  5429. if (error) {
  5430. goto out;
  5431. }
  5432. held_length = 0;
  5433. goto restart_nosblocks;
  5434. } else if (so->so_rcv.sb_cc == 0) {
  5435. if (so->so_error) {
  5436. error = so->so_error;
  5437. if ((in_flags & MSG_PEEK) == 0)
  5438. so->so_error = 0;
  5439. } else {
  5440. if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  5441. (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
  5442. if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
  5443. /* For active open side clear flags for re-use
  5444. * passive open is blocked by connect.
  5445. */
  5446. if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
  5447. /* You were aborted, passive side always hits here */
  5448. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
  5449. error = ECONNRESET;
  5450. }
  5451. so->so_state &= ~(SS_ISCONNECTING |
  5452. SS_ISDISCONNECTING |
  5453. SS_ISCONFIRMING |
  5454. SS_ISCONNECTED);
  5455. if (error == 0) {
  5456. if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
  5457. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
  5458. error = ENOTCONN;
  5459. }
  5460. }
  5461. goto out;
  5462. }
  5463. }
  5464. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
  5465. error = EWOULDBLOCK;
  5466. }
  5467. goto out;
  5468. }
  5469. if (hold_sblock == 1) {
  5470. SOCKBUF_UNLOCK(&so->so_rcv);
  5471. hold_sblock = 0;
  5472. }
  5473. #if defined(__APPLE__)
  5474. error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
  5475. #endif
  5476. #if defined(__FreeBSD__) && __FreeBSD_version < 700000
  5477. error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
  5478. #endif
  5479. /* we possibly have data we can read */
  5480. /*sa_ignore FREED_MEMORY*/
  5481. control = TAILQ_FIRST(&inp->read_queue);
  5482. if (control == NULL) {
  5483. /* This could be happening since
  5484. * the appender did the increment but as not
  5485. * yet did the tailq insert onto the read_queue
  5486. */
  5487. if (hold_rlock == 0) {
  5488. SCTP_INP_READ_LOCK(inp);
  5489. }
  5490. control = TAILQ_FIRST(&inp->read_queue);
  5491. if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
  5492. #ifdef INVARIANTS
  5493. panic("Huh, its non zero and nothing on control?");
  5494. #endif
  5495. so->so_rcv.sb_cc = 0;
  5496. }
  5497. SCTP_INP_READ_UNLOCK(inp);
  5498. hold_rlock = 0;
  5499. goto restart;
  5500. }
  5501. if ((control->length == 0) &&
  5502. (control->do_not_ref_stcb)) {
  5503. /* Clean up code for freeing assoc that left behind a pdapi..
  5504. * maybe a peer in EEOR that just closed after sending and
  5505. * never indicated a EOR.
  5506. */
  5507. if (hold_rlock == 0) {
  5508. hold_rlock = 1;
  5509. SCTP_INP_READ_LOCK(inp);
  5510. }
  5511. control->held_length = 0;
  5512. if (control->data) {
  5513. /* Hmm there is data here .. fix */
  5514. struct mbuf *m_tmp;
  5515. int cnt = 0;
  5516. m_tmp = control->data;
  5517. while (m_tmp) {
  5518. cnt += SCTP_BUF_LEN(m_tmp);
  5519. if (SCTP_BUF_NEXT(m_tmp) == NULL) {
  5520. control->tail_mbuf = m_tmp;
  5521. control->end_added = 1;
  5522. }
  5523. m_tmp = SCTP_BUF_NEXT(m_tmp);
  5524. }
  5525. control->length = cnt;
  5526. } else {
  5527. /* remove it */
  5528. TAILQ_REMOVE(&inp->read_queue, control, next);
  5529. /* Add back any hiddend data */
  5530. sctp_free_remote_addr(control->whoFrom);
  5531. sctp_free_a_readq(stcb, control);
  5532. }
  5533. if (hold_rlock) {
  5534. hold_rlock = 0;
  5535. SCTP_INP_READ_UNLOCK(inp);
  5536. }
  5537. goto restart;
  5538. }
  5539. if ((control->length == 0) &&
  5540. (control->end_added == 1)) {
  5541. /* Do we also need to check for (control->pdapi_aborted == 1)? */
  5542. if (hold_rlock == 0) {
  5543. hold_rlock = 1;
  5544. SCTP_INP_READ_LOCK(inp);
  5545. }
  5546. TAILQ_REMOVE(&inp->read_queue, control, next);
  5547. if (control->data) {
  5548. #ifdef INVARIANTS
  5549. panic("control->data not null but control->length == 0");
  5550. #else
  5551. SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
  5552. sctp_m_freem(control->data);
  5553. control->data = NULL;
  5554. #endif
  5555. }
  5556. if (control->aux_data) {
  5557. sctp_m_free (control->aux_data);
  5558. control->aux_data = NULL;
  5559. }
  5560. sctp_free_remote_addr(control->whoFrom);
  5561. sctp_free_a_readq(stcb, control);
  5562. if (hold_rlock) {
  5563. hold_rlock = 0;
  5564. SCTP_INP_READ_UNLOCK(inp);
  5565. }
  5566. goto restart;
  5567. }
  5568. if (control->length == 0) {
  5569. if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
  5570. (filling_sinfo)) {
  5571. /* find a more suitable one then this */
  5572. ctl = TAILQ_NEXT(control, next);
  5573. while (ctl) {
  5574. if ((ctl->stcb != control->stcb) && (ctl->length) &&
  5575. (ctl->some_taken ||
  5576. (ctl->spec_flags & M_NOTIFICATION) ||
  5577. ((ctl->do_not_ref_stcb == 0) &&
  5578. (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
  5579. ) {
  5580. /*-
  5581. * If we have a different TCB next, and there is data
  5582. * present. If we have already taken some (pdapi), OR we can
  5583. * ref the tcb and no delivery as started on this stream, we
  5584. * take it. Note we allow a notification on a different
  5585. * assoc to be delivered..
  5586. */
  5587. control = ctl;
  5588. goto found_one;
  5589. } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
  5590. (ctl->length) &&
  5591. ((ctl->some_taken) ||
  5592. ((ctl->do_not_ref_stcb == 0) &&
  5593. ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
  5594. (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
  5595. /*-
  5596. * If we have the same tcb, and there is data present, and we
  5597. * have the strm interleave feature present. Then if we have
  5598. * taken some (pdapi) or we can refer to tht tcb AND we have
  5599. * not started a delivery for this stream, we can take it.
  5600. * Note we do NOT allow a notificaiton on the same assoc to
  5601. * be delivered.
  5602. */
  5603. control = ctl;
  5604. goto found_one;
  5605. }
  5606. ctl = TAILQ_NEXT(ctl, next);
  5607. }
  5608. }
  5609. /*
  5610. * if we reach here, not suitable replacement is available
  5611. * <or> fragment interleave is NOT on. So stuff the sb_cc
  5612. * into the our held count, and its time to sleep again.
  5613. */
  5614. held_length = so->so_rcv.sb_cc;
  5615. control->held_length = so->so_rcv.sb_cc;
  5616. goto restart;
  5617. }
  5618. /* Clear the held length since there is something to read */
  5619. control->held_length = 0;
  5620. if (hold_rlock) {
  5621. SCTP_INP_READ_UNLOCK(inp);
  5622. hold_rlock = 0;
  5623. }
  5624. found_one:
  5625. /*
  5626. * If we reach here, control has a some data for us to read off.
  5627. * Note that stcb COULD be NULL.
  5628. */
  5629. control->some_taken++;
  5630. if (hold_sblock) {
  5631. SOCKBUF_UNLOCK(&so->so_rcv);
  5632. hold_sblock = 0;
  5633. }
  5634. stcb = control->stcb;
  5635. if (stcb) {
  5636. if ((control->do_not_ref_stcb == 0) &&
  5637. (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
  5638. if (freecnt_applied == 0)
  5639. stcb = NULL;
  5640. } else if (control->do_not_ref_stcb == 0) {
  5641. /* you can't free it on me please */
  5642. /*
  5643. * The lock on the socket buffer protects us so the
  5644. * free code will stop. But since we used the socketbuf
  5645. * lock and the sender uses the tcb_lock to increment,
  5646. * we need to use the atomic add to the refcnt
  5647. */
  5648. if (freecnt_applied) {
  5649. #ifdef INVARIANTS
  5650. panic("refcnt already incremented");
  5651. #else
  5652. SCTP_PRINTF("refcnt already incremented?\n");
  5653. #endif
  5654. } else {
  5655. atomic_add_int(&stcb->asoc.refcnt, 1);
  5656. freecnt_applied = 1;
  5657. }
  5658. /*
  5659. * Setup to remember how much we have not yet told
  5660. * the peer our rwnd has opened up. Note we grab
  5661. * the value from the tcb from last time.
  5662. * Note too that sack sending clears this when a sack
  5663. * is sent, which is fine. Once we hit the rwnd_req,
  5664. * we then will go to the sctp_user_rcvd() that will
  5665. * not lock until it KNOWs it MUST send a WUP-SACK.
  5666. */
  5667. freed_so_far = stcb->freed_by_sorcv_sincelast;
  5668. stcb->freed_by_sorcv_sincelast = 0;
  5669. }
  5670. }
  5671. if (stcb &&
  5672. ((control->spec_flags & M_NOTIFICATION) == 0) &&
  5673. control->do_not_ref_stcb == 0) {
  5674. stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
  5675. }
  5676. /* First lets get off the sinfo and sockaddr info */
  5677. if ((sinfo) && filling_sinfo) {
  5678. memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
  5679. nxt = TAILQ_NEXT(control, next);
  5680. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
  5681. sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
  5682. struct sctp_extrcvinfo *s_extra;
  5683. s_extra = (struct sctp_extrcvinfo *)sinfo;
  5684. if ((nxt) &&
  5685. (nxt->length)) {
  5686. s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
  5687. if (nxt->sinfo_flags & SCTP_UNORDERED) {
  5688. s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
  5689. }
  5690. if (nxt->spec_flags & M_NOTIFICATION) {
  5691. s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
  5692. }
  5693. s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
  5694. s_extra->sreinfo_next_length = nxt->length;
  5695. s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
  5696. s_extra->sreinfo_next_stream = nxt->sinfo_stream;
  5697. if (nxt->tail_mbuf != NULL) {
  5698. if (nxt->end_added) {
  5699. s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
  5700. }
  5701. }
  5702. } else {
  5703. /* we explicitly 0 this, since the memcpy got
  5704. * some other things beyond the older sinfo_
  5705. * that is on the control's structure :-D
  5706. */
  5707. nxt = NULL;
  5708. s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
  5709. s_extra->sreinfo_next_aid = 0;
  5710. s_extra->sreinfo_next_length = 0;
  5711. s_extra->sreinfo_next_ppid = 0;
  5712. s_extra->sreinfo_next_stream = 0;
  5713. }
  5714. }
  5715. /*
  5716. * update off the real current cum-ack, if we have an stcb.
  5717. */
  5718. if ((control->do_not_ref_stcb == 0) && stcb)
  5719. sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
  5720. /*
  5721. * mask off the high bits, we keep the actual chunk bits in
  5722. * there.
  5723. */
  5724. sinfo->sinfo_flags &= 0x00ff;
  5725. if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
  5726. sinfo->sinfo_flags |= SCTP_UNORDERED;
  5727. }
  5728. }
  5729. #ifdef SCTP_ASOCLOG_OF_TSNS
  5730. {
  5731. int index, newindex;
  5732. struct sctp_pcbtsn_rlog *entry;
  5733. do {
  5734. index = inp->readlog_index;
  5735. newindex = index + 1;
  5736. if (newindex >= SCTP_READ_LOG_SIZE) {
  5737. newindex = 0;
  5738. }
  5739. } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
  5740. entry = &inp->readlog[index];
  5741. entry->vtag = control->sinfo_assoc_id;
  5742. entry->strm = control->sinfo_stream;
  5743. entry->seq = control->sinfo_ssn;
  5744. entry->sz = control->length;
  5745. entry->flgs = control->sinfo_flags;
  5746. }
  5747. #endif
  5748. if (fromlen && from) {
  5749. #if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
  5750. cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
  5751. #endif
  5752. switch (control->whoFrom->ro._l_addr.sa.sa_family) {
  5753. #ifdef INET6
  5754. case AF_INET6:
  5755. #if defined(__Windows__) || defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows)
  5756. cp_len = min((size_t)fromlen, sizeof(struct sockaddr_in6));
  5757. #endif
  5758. ((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
  5759. break;
  5760. #endif
  5761. #ifdef INET
  5762. case AF_INET:
  5763. #if defined(__Windows__) || defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows)
  5764. cp_len = min((size_t)fromlen, sizeof(struct sockaddr_in));
  5765. #endif
  5766. ((struct sockaddr_in *)from)->sin_port = control->port_from;
  5767. break;
  5768. #endif
  5769. default:
  5770. #if defined(__Windows__) || defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows)
  5771. cp_len = min((size_t)fromlen, sizeof(struct sockaddr));
  5772. #endif
  5773. break;
  5774. }
  5775. memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
  5776. #if defined(INET) && defined(INET6)
  5777. if ((sctp_is_feature_on(inp,SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
  5778. (from->sa_family == AF_INET) &&
  5779. ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
  5780. struct sockaddr_in *sin;
  5781. struct sockaddr_in6 sin6;
  5782. sin = (struct sockaddr_in *)from;
  5783. bzero(&sin6, sizeof(sin6));
  5784. sin6.sin6_family = AF_INET6;
  5785. #if !defined(__Windows__) && !defined (__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
  5786. sin6.sin6_len = sizeof(struct sockaddr_in6);
  5787. #endif
  5788. #if defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_Darwin) || defined(__Userspace_os_Windows)
  5789. ((uint32_t *)&sin6.sin6_addr)[2] = htonl(0xffff);
  5790. bcopy(&sin->sin_addr,
  5791. &(((uint32_t *)&sin6.sin6_addr)[3]),
  5792. sizeof(uint32_t));
  5793. #elif defined(__Windows__)
  5794. ((uint32_t *)&sin6.sin6_addr)[2] = htonl(0xffff);
  5795. bcopy(&sin->sin_addr,
  5796. &((uint32_t *)&sin6.sin6_addr)[3],
  5797. sizeof(uint32_t));
  5798. #else
  5799. sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
  5800. bcopy(&sin->sin_addr,
  5801. &sin6.sin6_addr.s6_addr32[3],
  5802. sizeof(sin6.sin6_addr.s6_addr32[3]));
  5803. #endif
  5804. sin6.sin6_port = sin->sin_port;
  5805. memcpy(from, &sin6, sizeof(struct sockaddr_in6));
  5806. }
  5807. #endif
  5808. #if defined(SCTP_EMBEDDED_V6_SCOPE)
  5809. #if defined(INET6)
  5810. {
  5811. struct sockaddr_in6 lsa6, *from6;
  5812. from6 = (struct sockaddr_in6 *)from;
  5813. sctp_recover_scope_mac(from6, (&lsa6));
  5814. }
  5815. #endif
  5816. #endif
  5817. }
  5818. /* now copy out what data we can */
  5819. if (mp == NULL) {
  5820. /* copy out each mbuf in the chain up to length */
  5821. get_more_data:
  5822. m = control->data;
  5823. while (m) {
  5824. /* Move out all we can */
  5825. #if defined(__APPLE__)
  5826. #if defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION)
  5827. cp_len = (int)uio_resid(uio);
  5828. #else
  5829. cp_len = (int)uio->uio_resid;
  5830. #endif
  5831. #else
  5832. cp_len = (int)uio->uio_resid;
  5833. #endif
  5834. my_len = (int)SCTP_BUF_LEN(m);
  5835. if (cp_len > my_len) {
  5836. /* not enough in this buf */
  5837. cp_len = my_len;
  5838. }
  5839. if (hold_rlock) {
  5840. SCTP_INP_READ_UNLOCK(inp);
  5841. hold_rlock = 0;
  5842. }
  5843. #if defined(__APPLE__)
  5844. SCTP_SOCKET_UNLOCK(so, 0);
  5845. #endif
  5846. if (cp_len > 0)
  5847. error = uiomove(mtod(m, char *), cp_len, uio);
  5848. #if defined(__APPLE__)
  5849. SCTP_SOCKET_LOCK(so, 0);
  5850. #endif
  5851. /* re-read */
  5852. if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  5853. goto release;
  5854. }
  5855. if ((control->do_not_ref_stcb == 0) && stcb &&
  5856. stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
  5857. no_rcv_needed = 1;
  5858. }
  5859. if (error) {
  5860. /* error we are out of here */
  5861. goto release;
  5862. }
  5863. if ((SCTP_BUF_NEXT(m) == NULL) &&
  5864. (cp_len >= SCTP_BUF_LEN(m)) &&
  5865. ((control->end_added == 0) ||
  5866. (control->end_added &&
  5867. (TAILQ_NEXT(control, next) == NULL)))
  5868. ) {
  5869. SCTP_INP_READ_LOCK(inp);
  5870. hold_rlock = 1;
  5871. }
  5872. if (cp_len == SCTP_BUF_LEN(m)) {
  5873. if ((SCTP_BUF_NEXT(m)== NULL) &&
  5874. (control->end_added)) {
  5875. out_flags |= MSG_EOR;
  5876. if ((control->do_not_ref_stcb == 0) &&
  5877. (control->stcb != NULL) &&
  5878. ((control->spec_flags & M_NOTIFICATION) == 0))
  5879. control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
  5880. }
  5881. if (control->spec_flags & M_NOTIFICATION) {
  5882. out_flags |= MSG_NOTIFICATION;
  5883. }
  5884. /* we ate up the mbuf */
  5885. if (in_flags & MSG_PEEK) {
  5886. /* just looking */
  5887. m = SCTP_BUF_NEXT(m);
  5888. copied_so_far += cp_len;
  5889. } else {
  5890. /* dispose of the mbuf */
  5891. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  5892. sctp_sblog(&so->so_rcv,
  5893. control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
  5894. }
  5895. sctp_sbfree(control, stcb, &so->so_rcv, m);
  5896. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  5897. sctp_sblog(&so->so_rcv,
  5898. control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  5899. }
  5900. copied_so_far += cp_len;
  5901. freed_so_far += cp_len;
  5902. freed_so_far += MSIZE;
  5903. atomic_subtract_int(&control->length, cp_len);
  5904. control->data = sctp_m_free(m);
  5905. m = control->data;
  5906. /* been through it all, must hold sb lock ok to null tail */
  5907. if (control->data == NULL) {
  5908. #ifdef INVARIANTS
  5909. #if !defined(__APPLE__)
  5910. if ((control->end_added == 0) ||
  5911. (TAILQ_NEXT(control, next) == NULL)) {
  5912. /* If the end is not added, OR the
  5913. * next is NOT null we MUST have the lock.
  5914. */
  5915. if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
  5916. panic("Hmm we don't own the lock?");
  5917. }
  5918. }
  5919. #endif
  5920. #endif
  5921. control->tail_mbuf = NULL;
  5922. #ifdef INVARIANTS
  5923. if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
  5924. panic("end_added, nothing left and no MSG_EOR");
  5925. }
  5926. #endif
  5927. }
  5928. }
  5929. } else {
  5930. /* Do we need to trim the mbuf? */
  5931. if (control->spec_flags & M_NOTIFICATION) {
  5932. out_flags |= MSG_NOTIFICATION;
  5933. }
  5934. if ((in_flags & MSG_PEEK) == 0) {
  5935. SCTP_BUF_RESV_UF(m, cp_len);
  5936. SCTP_BUF_LEN(m) -= cp_len;
  5937. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  5938. sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, cp_len);
  5939. }
  5940. atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
  5941. if ((control->do_not_ref_stcb == 0) &&
  5942. stcb) {
  5943. atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
  5944. }
  5945. copied_so_far += cp_len;
  5946. freed_so_far += cp_len;
  5947. freed_so_far += MSIZE;
  5948. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  5949. sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
  5950. SCTP_LOG_SBRESULT, 0);
  5951. }
  5952. atomic_subtract_int(&control->length, cp_len);
  5953. } else {
  5954. copied_so_far += cp_len;
  5955. }
  5956. }
  5957. #if defined(__APPLE__)
  5958. #if defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION)
  5959. if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
  5960. #else
  5961. if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
  5962. #endif
  5963. #else
  5964. if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
  5965. #endif
  5966. break;
  5967. }
  5968. if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
  5969. (control->do_not_ref_stcb == 0) &&
  5970. (freed_so_far >= rwnd_req)) {
  5971. sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
  5972. }
  5973. } /* end while(m) */
  5974. /*
  5975. * At this point we have looked at it all and we either have
  5976. * a MSG_EOR/or read all the user wants... <OR>
  5977. * control->length == 0.
  5978. */
  5979. if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
  5980. /* we are done with this control */
  5981. if (control->length == 0) {
  5982. if (control->data) {
  5983. #ifdef INVARIANTS
  5984. panic("control->data not null at read eor?");
  5985. #else
  5986. SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
  5987. sctp_m_freem(control->data);
  5988. control->data = NULL;
  5989. #endif
  5990. }
  5991. done_with_control:
  5992. if (TAILQ_NEXT(control, next) == NULL) {
  5993. /* If we don't have a next we need a
  5994. * lock, if there is a next interrupt
  5995. * is filling ahead of us and we don't
  5996. * need a lock to remove this guy
  5997. * (which is the head of the queue).
  5998. */
  5999. if (hold_rlock == 0) {
  6000. SCTP_INP_READ_LOCK(inp);
  6001. hold_rlock = 1;
  6002. }
  6003. }
  6004. TAILQ_REMOVE(&inp->read_queue, control, next);
  6005. /* Add back any hiddend data */
  6006. if (control->held_length) {
  6007. held_length = 0;
  6008. control->held_length = 0;
  6009. wakeup_read_socket = 1;
  6010. }
  6011. if (control->aux_data) {
  6012. sctp_m_free (control->aux_data);
  6013. control->aux_data = NULL;
  6014. }
  6015. no_rcv_needed = control->do_not_ref_stcb;
  6016. sctp_free_remote_addr(control->whoFrom);
  6017. control->data = NULL;
  6018. sctp_free_a_readq(stcb, control);
  6019. control = NULL;
  6020. if ((freed_so_far >= rwnd_req) &&
  6021. (no_rcv_needed == 0))
  6022. sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
  6023. } else {
  6024. /*
  6025. * The user did not read all of this
  6026. * message, turn off the returned MSG_EOR
  6027. * since we are leaving more behind on the
  6028. * control to read.
  6029. */
  6030. #ifdef INVARIANTS
  6031. if (control->end_added &&
  6032. (control->data == NULL) &&
  6033. (control->tail_mbuf == NULL)) {
  6034. panic("Gak, control->length is corrupt?");
  6035. }
  6036. #endif
  6037. no_rcv_needed = control->do_not_ref_stcb;
  6038. out_flags &= ~MSG_EOR;
  6039. }
  6040. }
  6041. if (out_flags & MSG_EOR) {
  6042. goto release;
  6043. }
  6044. #if defined(__APPLE__)
  6045. #if defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION)
  6046. if ((uio_resid(uio) == 0) ||
  6047. #else
  6048. if ((uio->uio_resid == 0) ||
  6049. #endif
  6050. #else
  6051. if ((uio->uio_resid == 0) ||
  6052. #endif
  6053. ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
  6054. ) {
  6055. goto release;
  6056. }
  6057. /*
  6058. * If I hit here the receiver wants more and this message is
  6059. * NOT done (pd-api). So two questions. Can we block? if not
  6060. * we are done. Did the user NOT set MSG_WAITALL?
  6061. */
  6062. if (block_allowed == 0) {
  6063. goto release;
  6064. }
  6065. /*
  6066. * We need to wait for more data a few things: - We don't
  6067. * sbunlock() so we don't get someone else reading. - We
  6068. * must be sure to account for the case where what is added
  6069. * is NOT to our control when we wakeup.
  6070. */
  6071. /* Do we need to tell the transport a rwnd update might be
  6072. * needed before we go to sleep?
  6073. */
  6074. if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
  6075. ((freed_so_far >= rwnd_req) &&
  6076. (control->do_not_ref_stcb == 0) &&
  6077. (no_rcv_needed == 0))) {
  6078. sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
  6079. }
  6080. wait_some_more:
  6081. #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
  6082. if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
  6083. goto release;
  6084. }
  6085. #else
  6086. if (so->so_state & SS_CANTRCVMORE) {
  6087. goto release;
  6088. }
  6089. #endif
  6090. if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
  6091. goto release;
  6092. if (hold_rlock == 1) {
  6093. SCTP_INP_READ_UNLOCK(inp);
  6094. hold_rlock = 0;
  6095. }
  6096. if (hold_sblock == 0) {
  6097. SOCKBUF_LOCK(&so->so_rcv);
  6098. hold_sblock = 1;
  6099. }
  6100. #if defined(__APPLE__)
  6101. sbunlock(&so->so_rcv, 1);
  6102. #endif
  6103. if ((copied_so_far) && (control->length == 0) &&
  6104. (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
  6105. goto release;
  6106. }
  6107. if (so->so_rcv.sb_cc <= control->held_length) {
  6108. error = sbwait(&so->so_rcv);
  6109. if (error) {
  6110. #if defined(__FreeBSD__)
  6111. goto release;
  6112. #else
  6113. goto release_unlocked;
  6114. #endif
  6115. }
  6116. control->held_length = 0;
  6117. }
  6118. #if defined(__APPLE__)
  6119. error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
  6120. #endif
  6121. if (hold_sblock) {
  6122. SOCKBUF_UNLOCK(&so->so_rcv);
  6123. hold_sblock = 0;
  6124. }
  6125. if (control->length == 0) {
  6126. /* still nothing here */
  6127. if (control->end_added == 1) {
  6128. /* he aborted, or is done i.e.did a shutdown */
  6129. out_flags |= MSG_EOR;
  6130. if (control->pdapi_aborted) {
  6131. if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
  6132. control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
  6133. out_flags |= MSG_TRUNC;
  6134. } else {
  6135. if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
  6136. control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
  6137. }
  6138. goto done_with_control;
  6139. }
  6140. if (so->so_rcv.sb_cc > held_length) {
  6141. control->held_length = so->so_rcv.sb_cc;
  6142. held_length = 0;
  6143. }
  6144. goto wait_some_more;
  6145. } else if (control->data == NULL) {
  6146. /* we must re-sync since data
  6147. * is probably being added
  6148. */
  6149. SCTP_INP_READ_LOCK(inp);
  6150. if ((control->length > 0) && (control->data == NULL)) {
  6151. /* big trouble.. we have the lock and its corrupt? */
  6152. #ifdef INVARIANTS
  6153. panic ("Impossible data==NULL length !=0");
  6154. #endif
  6155. out_flags |= MSG_EOR;
  6156. out_flags |= MSG_TRUNC;
  6157. control->length = 0;
  6158. SCTP_INP_READ_UNLOCK(inp);
  6159. goto done_with_control;
  6160. }
  6161. SCTP_INP_READ_UNLOCK(inp);
  6162. /* We will fall around to get more data */
  6163. }
  6164. goto get_more_data;
  6165. } else {
  6166. /*-
  6167. * Give caller back the mbuf chain,
  6168. * store in uio_resid the length
  6169. */
  6170. wakeup_read_socket = 0;
  6171. if ((control->end_added == 0) ||
  6172. (TAILQ_NEXT(control, next) == NULL)) {
  6173. /* Need to get rlock */
  6174. if (hold_rlock == 0) {
  6175. SCTP_INP_READ_LOCK(inp);
  6176. hold_rlock = 1;
  6177. }
  6178. }
  6179. if (control->end_added) {
  6180. out_flags |= MSG_EOR;
  6181. if ((control->do_not_ref_stcb == 0) &&
  6182. (control->stcb != NULL) &&
  6183. ((control->spec_flags & M_NOTIFICATION) == 0))
  6184. control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
  6185. }
  6186. if (control->spec_flags & M_NOTIFICATION) {
  6187. out_flags |= MSG_NOTIFICATION;
  6188. }
  6189. #if defined(__APPLE__)
  6190. #if defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION)
  6191. uio_setresid(uio, control->length);
  6192. #else
  6193. uio->uio_resid = control->length;
  6194. #endif
  6195. #else
  6196. uio->uio_resid = control->length;
  6197. #endif
  6198. *mp = control->data;
  6199. m = control->data;
  6200. while (m) {
  6201. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  6202. sctp_sblog(&so->so_rcv,
  6203. control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
  6204. }
  6205. sctp_sbfree(control, stcb, &so->so_rcv, m);
  6206. freed_so_far += SCTP_BUF_LEN(m);
  6207. freed_so_far += MSIZE;
  6208. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  6209. sctp_sblog(&so->so_rcv,
  6210. control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  6211. }
  6212. m = SCTP_BUF_NEXT(m);
  6213. }
  6214. control->data = control->tail_mbuf = NULL;
  6215. control->length = 0;
  6216. if (out_flags & MSG_EOR) {
  6217. /* Done with this control */
  6218. goto done_with_control;
  6219. }
  6220. }
  6221. release:
  6222. if (hold_rlock == 1) {
  6223. SCTP_INP_READ_UNLOCK(inp);
  6224. hold_rlock = 0;
  6225. }
  6226. #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
  6227. if (hold_sblock == 0) {
  6228. SOCKBUF_LOCK(&so->so_rcv);
  6229. hold_sblock = 1;
  6230. }
  6231. #else
  6232. if (hold_sblock == 1) {
  6233. SOCKBUF_UNLOCK(&so->so_rcv);
  6234. hold_sblock = 0;
  6235. }
  6236. #endif
  6237. #if defined(__APPLE__)
  6238. sbunlock(&so->so_rcv, 1);
  6239. #endif
  6240. #if defined(__FreeBSD__)
  6241. sbunlock(&so->so_rcv);
  6242. #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
  6243. sockbuf_lock = 0;
  6244. #endif
  6245. #endif
  6246. release_unlocked:
  6247. if (hold_sblock) {
  6248. SOCKBUF_UNLOCK(&so->so_rcv);
  6249. hold_sblock = 0;
  6250. }
  6251. if ((stcb) && (in_flags & MSG_PEEK) == 0) {
  6252. if ((freed_so_far >= rwnd_req) &&
  6253. (control && (control->do_not_ref_stcb == 0)) &&
  6254. (no_rcv_needed == 0))
  6255. sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
  6256. }
  6257. out:
  6258. if (msg_flags) {
  6259. *msg_flags = out_flags;
  6260. }
  6261. if (((out_flags & MSG_EOR) == 0) &&
  6262. ((in_flags & MSG_PEEK) == 0) &&
  6263. (sinfo) &&
  6264. (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
  6265. sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
  6266. struct sctp_extrcvinfo *s_extra;
  6267. s_extra = (struct sctp_extrcvinfo *)sinfo;
  6268. s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
  6269. }
  6270. if (hold_rlock == 1) {
  6271. SCTP_INP_READ_UNLOCK(inp);
  6272. }
  6273. if (hold_sblock) {
  6274. SOCKBUF_UNLOCK(&so->so_rcv);
  6275. }
  6276. #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
  6277. if (sockbuf_lock) {
  6278. sbunlock(&so->so_rcv);
  6279. }
  6280. #endif
  6281. if (freecnt_applied) {
  6282. /*
  6283. * The lock on the socket buffer protects us so the free
  6284. * code will stop. But since we used the socketbuf lock and
  6285. * the sender uses the tcb_lock to increment, we need to use
  6286. * the atomic add to the refcnt.
  6287. */
  6288. if (stcb == NULL) {
  6289. #ifdef INVARIANTS
  6290. panic("stcb for refcnt has gone NULL?");
  6291. goto stage_left;
  6292. #else
  6293. goto stage_left;
  6294. #endif
  6295. }
  6296. atomic_add_int(&stcb->asoc.refcnt, -1);
  6297. /* Save the value back for next time */
  6298. stcb->freed_by_sorcv_sincelast = freed_so_far;
  6299. }
  6300. if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
  6301. if (stcb) {
  6302. sctp_misc_ints(SCTP_SORECV_DONE,
  6303. freed_so_far,
  6304. #if defined(__APPLE__)
  6305. #if defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION)
  6306. ((uio) ? (slen-uio_resid(uio)) : slen),
  6307. #else
  6308. ((uio) ? (slen-uio->uio_resid) : slen),
  6309. #endif
  6310. #else
  6311. ((uio) ? (slen-uio->uio_resid) : slen),
  6312. #endif
  6313. stcb->asoc.my_rwnd,
  6314. so->so_rcv.sb_cc);
  6315. } else {
  6316. sctp_misc_ints(SCTP_SORECV_DONE,
  6317. freed_so_far,
  6318. #if defined(__APPLE__)
  6319. #if defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION)
  6320. ((uio) ? (slen-uio_resid(uio)) : slen),
  6321. #else
  6322. ((uio) ? (slen-uio->uio_resid) : slen),
  6323. #endif
  6324. #else
  6325. ((uio) ? (slen-uio->uio_resid) : slen),
  6326. #endif
  6327. 0,
  6328. so->so_rcv.sb_cc);
  6329. }
  6330. }
  6331. stage_left:
  6332. if (wakeup_read_socket) {
  6333. sctp_sorwakeup(inp, so);
  6334. }
  6335. return (error);
  6336. }
  6337. #ifdef SCTP_MBUF_LOGGING
  6338. struct mbuf *
  6339. sctp_m_free(struct mbuf *m)
  6340. {
  6341. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
  6342. if (SCTP_BUF_IS_EXTENDED(m)) {
  6343. sctp_log_mb(m, SCTP_MBUF_IFREE);
  6344. }
  6345. }
  6346. return (m_free(m));
  6347. }
  6348. void sctp_m_freem(struct mbuf *mb)
  6349. {
  6350. while (mb != NULL)
  6351. mb = sctp_m_free(mb);
  6352. }
  6353. #endif
  6354. int
  6355. sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
  6356. {
  6357. /* Given a local address. For all associations
  6358. * that holds the address, request a peer-set-primary.
  6359. */
  6360. struct sctp_ifa *ifa;
  6361. struct sctp_laddr *wi;
  6362. ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
  6363. if (ifa == NULL) {
  6364. SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
  6365. return (EADDRNOTAVAIL);
  6366. }
  6367. /* Now that we have the ifa we must awaken the
  6368. * iterator with this message.
  6369. */
  6370. wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
  6371. if (wi == NULL) {
  6372. SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
  6373. return (ENOMEM);
  6374. }
  6375. /* Now incr the count and int wi structure */
  6376. SCTP_INCR_LADDR_COUNT();
  6377. bzero(wi, sizeof(*wi));
  6378. (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
  6379. wi->ifa = ifa;
  6380. wi->action = SCTP_SET_PRIM_ADDR;
  6381. atomic_add_int(&ifa->refcount, 1);
  6382. /* Now add it to the work queue */
  6383. SCTP_WQ_ADDR_LOCK();
  6384. /*
  6385. * Should this really be a tailq? As it is we will process the
  6386. * newest first :-0
  6387. */
  6388. LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
  6389. SCTP_WQ_ADDR_UNLOCK();
  6390. sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
  6391. (struct sctp_inpcb *)NULL,
  6392. (struct sctp_tcb *)NULL,
  6393. (struct sctp_nets *)NULL);
  6394. return (0);
  6395. }
  6396. #if defined(__Userspace__)
  6397. /* no sctp_soreceive for __Userspace__ now */
  6398. #endif
  6399. #if !defined(__Userspace__)
  6400. int
  6401. sctp_soreceive( struct socket *so,
  6402. struct sockaddr **psa,
  6403. struct uio *uio,
  6404. struct mbuf **mp0,
  6405. struct mbuf **controlp,
  6406. int *flagsp)
  6407. {
  6408. int error, fromlen;
  6409. uint8_t sockbuf[256];
  6410. struct sockaddr *from;
  6411. struct sctp_extrcvinfo sinfo;
  6412. int filling_sinfo = 1;
  6413. struct sctp_inpcb *inp;
  6414. inp = (struct sctp_inpcb *)so->so_pcb;
  6415. /* pickup the assoc we are reading from */
  6416. if (inp == NULL) {
  6417. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6418. return (EINVAL);
  6419. }
  6420. if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
  6421. sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
  6422. sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
  6423. (controlp == NULL)) {
  6424. /* user does not want the sndrcv ctl */
  6425. filling_sinfo = 0;
  6426. }
  6427. if (psa) {
  6428. from = (struct sockaddr *)sockbuf;
  6429. fromlen = sizeof(sockbuf);
  6430. #if !defined(__Windows__)
  6431. from->sa_len = 0;
  6432. #endif
  6433. } else {
  6434. from = NULL;
  6435. fromlen = 0;
  6436. }
  6437. #if defined(__APPLE__)
  6438. SCTP_SOCKET_LOCK(so, 1);
  6439. #endif
  6440. error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
  6441. (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
  6442. if ((controlp) && (filling_sinfo)) {
  6443. /* copy back the sinfo in a CMSG format */
  6444. if (filling_sinfo)
  6445. *controlp = sctp_build_ctl_nchunk(inp,
  6446. (struct sctp_sndrcvinfo *)&sinfo);
  6447. else
  6448. *controlp = NULL;
  6449. }
  6450. if (psa) {
  6451. /* copy back the address info */
  6452. #if !defined(__Windows__)
  6453. if (from && from->sa_len) {
  6454. #else
  6455. if (from) {
  6456. #endif
  6457. #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
  6458. *psa = sodupsockaddr(from, M_NOWAIT);
  6459. #else
  6460. *psa = dup_sockaddr(from, mp0 == 0);
  6461. #endif
  6462. } else {
  6463. *psa = NULL;
  6464. }
  6465. }
  6466. #if defined(__APPLE__)
  6467. SCTP_SOCKET_UNLOCK(so, 1);
  6468. #endif
  6469. return (error);
  6470. }
  6471. #if (defined(__FreeBSD__) && __FreeBSD_version < 603000) || defined(__Windows__)
  6472. /*
  6473. * General routine to allocate a hash table with control of memory flags.
  6474. * is in 7.0 and beyond for sure :-)
  6475. */
  6476. void *
  6477. sctp_hashinit_flags(int elements, struct malloc_type *type,
  6478. u_long *hashmask, int flags)
  6479. {
  6480. long hashsize;
  6481. LIST_HEAD(generic, generic) *hashtbl;
  6482. int i;
  6483. if (elements <= 0) {
  6484. #ifdef INVARIANTS
  6485. panic("hashinit: bad elements");
  6486. #else
  6487. SCTP_PRINTF("hashinit: bad elements?");
  6488. elements = 1;
  6489. #endif
  6490. }
  6491. for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
  6492. continue;
  6493. hashsize >>= 1;
  6494. if (flags & HASH_WAITOK)
  6495. hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
  6496. else if (flags & HASH_NOWAIT)
  6497. hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
  6498. else {
  6499. #ifdef INVARIANTS
  6500. panic("flag incorrect in hashinit_flags");
  6501. #else
  6502. return (NULL);
  6503. #endif
  6504. }
  6505. /* no memory? */
  6506. if (hashtbl == NULL)
  6507. return (NULL);
  6508. for (i = 0; i < hashsize; i++)
  6509. LIST_INIT(&hashtbl[i]);
  6510. *hashmask = hashsize - 1;
  6511. return (hashtbl);
  6512. }
  6513. #endif
  6514. #else /* __Userspace__ ifdef above sctp_soreceive */
  6515. /*
  6516. * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
  6517. * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
  6518. *__FreeBSD__ must be excluded.
  6519. *
  6520. */
  6521. void *
  6522. sctp_hashinit_flags(int elements, struct malloc_type *type,
  6523. u_long *hashmask, int flags)
  6524. {
  6525. long hashsize;
  6526. LIST_HEAD(generic, generic) *hashtbl;
  6527. int i;
  6528. if (elements <= 0) {
  6529. SCTP_PRINTF("hashinit: bad elements?");
  6530. #ifdef INVARIANTS
  6531. return (NULL);
  6532. #else
  6533. elements = 1;
  6534. #endif
  6535. }
  6536. for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
  6537. continue;
  6538. hashsize >>= 1;
  6539. /*cannot use MALLOC here because it has to be declared or defined
  6540. using MALLOC_DECLARE or MALLOC_DEFINE first. */
  6541. if (flags & HASH_WAITOK)
  6542. hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
  6543. else if (flags & HASH_NOWAIT)
  6544. hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
  6545. else {
  6546. #ifdef INVARIANTS
  6547. SCTP_PRINTF("flag incorrect in hashinit_flags");
  6548. #endif
  6549. return (NULL);
  6550. }
  6551. /* no memory? */
  6552. if (hashtbl == NULL)
  6553. return (NULL);
  6554. for (i = 0; i < hashsize; i++)
  6555. LIST_INIT(&hashtbl[i]);
  6556. *hashmask = hashsize - 1;
  6557. return (hashtbl);
  6558. }
  6559. void
  6560. sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
  6561. {
  6562. LIST_HEAD(generic, generic) *hashtbl, *hp;
  6563. hashtbl = vhashtbl;
  6564. for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
  6565. if (!LIST_EMPTY(hp)) {
  6566. SCTP_PRINTF("hashdestroy: hash not empty");
  6567. return;
  6568. }
  6569. FREE(hashtbl, type);
  6570. }
  6571. void
  6572. sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
  6573. {
  6574. LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
  6575. /*
  6576. LIST_ENTRY(type) *start, *temp;
  6577. */
  6578. hashtbl = vhashtbl;
  6579. /* Apparently temp is not dynamically allocated, so attempts to
  6580. free it results in error.
  6581. for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
  6582. if (!LIST_EMPTY(hp)) {
  6583. start = LIST_FIRST(hp);
  6584. while (start != NULL) {
  6585. temp = start;
  6586. start = start->le_next;
  6587. SCTP_PRINTF("%s: %p \n", __func__, temp);
  6588. FREE(temp, type);
  6589. }
  6590. }
  6591. */
  6592. FREE(hashtbl, type);
  6593. }
  6594. #endif
  6595. int
  6596. sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
  6597. int totaddr, int *error)
  6598. {
  6599. int added = 0;
  6600. int i;
  6601. struct sctp_inpcb *inp;
  6602. struct sockaddr *sa;
  6603. size_t incr = 0;
  6604. #ifdef INET
  6605. struct sockaddr_in *sin;
  6606. #endif
  6607. #ifdef INET6
  6608. struct sockaddr_in6 *sin6;
  6609. #endif
  6610. sa = addr;
  6611. inp = stcb->sctp_ep;
  6612. *error = 0;
  6613. for (i = 0; i < totaddr; i++) {
  6614. switch (sa->sa_family) {
  6615. #ifdef INET
  6616. case AF_INET:
  6617. incr = sizeof(struct sockaddr_in);
  6618. sin = (struct sockaddr_in *)sa;
  6619. if ((sin->sin_addr.s_addr == INADDR_ANY) ||
  6620. (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
  6621. IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
  6622. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6623. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_7);
  6624. *error = EINVAL;
  6625. goto out_now;
  6626. }
  6627. if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
  6628. /* assoc gone no un-lock */
  6629. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
  6630. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_7);
  6631. *error = ENOBUFS;
  6632. goto out_now;
  6633. }
  6634. added++;
  6635. break;
  6636. #endif
  6637. #ifdef INET6
  6638. case AF_INET6:
  6639. incr = sizeof(struct sockaddr_in6);
  6640. sin6 = (struct sockaddr_in6 *)sa;
  6641. if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
  6642. IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
  6643. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6644. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
  6645. *error = EINVAL;
  6646. goto out_now;
  6647. }
  6648. if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
  6649. /* assoc gone no un-lock */
  6650. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
  6651. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
  6652. *error = ENOBUFS;
  6653. goto out_now;
  6654. }
  6655. added++;
  6656. break;
  6657. #endif
  6658. default:
  6659. break;
  6660. }
  6661. sa = (struct sockaddr *)((caddr_t)sa + incr);
  6662. }
  6663. out_now:
  6664. return (added);
  6665. }
  6666. struct sctp_tcb *
  6667. sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
  6668. int *totaddr, int *num_v4, int *num_v6, int *error,
  6669. int limit, int *bad_addr)
  6670. {
  6671. struct sockaddr *sa;
  6672. struct sctp_tcb *stcb = NULL;
  6673. size_t incr, at, i;
  6674. at = incr = 0;
  6675. sa = addr;
  6676. *error = *num_v6 = *num_v4 = 0;
  6677. /* account and validate addresses */
  6678. for (i = 0; i < (size_t)*totaddr; i++) {
  6679. switch (sa->sa_family) {
  6680. #ifdef INET
  6681. case AF_INET:
  6682. (*num_v4) += 1;
  6683. incr = sizeof(struct sockaddr_in);
  6684. #if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
  6685. if (sa->sa_len != incr) {
  6686. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6687. *error = EINVAL;
  6688. *bad_addr = 1;
  6689. return (NULL);
  6690. }
  6691. #endif
  6692. break;
  6693. #endif
  6694. #ifdef INET6
  6695. case AF_INET6:
  6696. {
  6697. struct sockaddr_in6 *sin6;
  6698. sin6 = (struct sockaddr_in6 *)sa;
  6699. if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
  6700. /* Must be non-mapped for connectx */
  6701. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6702. *error = EINVAL;
  6703. *bad_addr = 1;
  6704. return (NULL);
  6705. }
  6706. (*num_v6) += 1;
  6707. incr = sizeof(struct sockaddr_in6);
  6708. #if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
  6709. if (sa->sa_len != incr) {
  6710. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6711. *error = EINVAL;
  6712. *bad_addr = 1;
  6713. return (NULL);
  6714. }
  6715. #endif
  6716. break;
  6717. }
  6718. #endif
  6719. default:
  6720. *totaddr = i;
  6721. /* we are done */
  6722. break;
  6723. }
  6724. if (i == (size_t)*totaddr) {
  6725. break;
  6726. }
  6727. SCTP_INP_INCR_REF(inp);
  6728. stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
  6729. if (stcb != NULL) {
  6730. /* Already have or am bring up an association */
  6731. return (stcb);
  6732. } else {
  6733. SCTP_INP_DECR_REF(inp);
  6734. }
  6735. if ((at + incr) > (size_t)limit) {
  6736. *totaddr = i;
  6737. break;
  6738. }
  6739. sa = (struct sockaddr *)((caddr_t)sa + incr);
  6740. }
  6741. return ((struct sctp_tcb *)NULL);
  6742. }
  6743. /*
  6744. * sctp_bindx(ADD) for one address.
  6745. * assumes all arguments are valid/checked by caller.
  6746. */
  6747. void
  6748. sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
  6749. struct sockaddr *sa, sctp_assoc_t assoc_id,
  6750. uint32_t vrf_id, int *error, void *p)
  6751. {
  6752. struct sockaddr *addr_touse;
  6753. #ifdef INET6
  6754. struct sockaddr_in sin;
  6755. #endif
  6756. #ifdef SCTP_MVRF
  6757. int i, fnd = 0;
  6758. #endif
  6759. /* see if we're bound all already! */
  6760. if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
  6761. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6762. *error = EINVAL;
  6763. return;
  6764. }
  6765. #ifdef SCTP_MVRF
  6766. /* Is the VRF one we have */
  6767. for (i = 0; i < inp->num_vrfs; i++) {
  6768. if (vrf_id == inp->m_vrf_ids[i]) {
  6769. fnd = 1;
  6770. break;
  6771. }
  6772. }
  6773. if (!fnd) {
  6774. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6775. *error = EINVAL;
  6776. return;
  6777. }
  6778. #endif
  6779. addr_touse = sa;
  6780. #ifdef INET6
  6781. if (sa->sa_family == AF_INET6) {
  6782. struct sockaddr_in6 *sin6;
  6783. #if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
  6784. if (sa->sa_len != sizeof(struct sockaddr_in6)) {
  6785. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6786. *error = EINVAL;
  6787. return;
  6788. }
  6789. #endif
  6790. if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
  6791. /* can only bind v6 on PF_INET6 sockets */
  6792. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6793. *error = EINVAL;
  6794. return;
  6795. }
  6796. sin6 = (struct sockaddr_in6 *)addr_touse;
  6797. if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
  6798. if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
  6799. SCTP_IPV6_V6ONLY(inp)) {
  6800. /* can't bind v4-mapped on PF_INET sockets */
  6801. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6802. *error = EINVAL;
  6803. return;
  6804. }
  6805. in6_sin6_2_sin(&sin, sin6);
  6806. addr_touse = (struct sockaddr *)&sin;
  6807. }
  6808. }
  6809. #endif
  6810. #ifdef INET
  6811. if (sa->sa_family == AF_INET) {
  6812. #if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
  6813. if (sa->sa_len != sizeof(struct sockaddr_in)) {
  6814. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6815. *error = EINVAL;
  6816. return;
  6817. }
  6818. #endif
  6819. if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
  6820. SCTP_IPV6_V6ONLY(inp)) {
  6821. /* can't bind v4 on PF_INET sockets */
  6822. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6823. *error = EINVAL;
  6824. return;
  6825. }
  6826. }
  6827. #endif
  6828. if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
  6829. #if !(defined(__Panda__) || defined(__Windows__))
  6830. if (p == NULL) {
  6831. /* Can't get proc for Net/Open BSD */
  6832. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6833. *error = EINVAL;
  6834. return;
  6835. }
  6836. #endif
  6837. *error = sctp_inpcb_bind(so, addr_touse, NULL, p);
  6838. return;
  6839. }
  6840. /*
  6841. * No locks required here since bind and mgmt_ep_sa
  6842. * all do their own locking. If we do something for
  6843. * the FIX: below we may need to lock in that case.
  6844. */
  6845. if (assoc_id == 0) {
  6846. /* add the address */
  6847. struct sctp_inpcb *lep;
  6848. struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
  6849. /* validate the incoming port */
  6850. if ((lsin->sin_port != 0) &&
  6851. (lsin->sin_port != inp->sctp_lport)) {
  6852. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6853. *error = EINVAL;
  6854. return;
  6855. } else {
  6856. /* user specified 0 port, set it to existing port */
  6857. lsin->sin_port = inp->sctp_lport;
  6858. }
  6859. lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
  6860. if (lep != NULL) {
  6861. /*
  6862. * We must decrement the refcount
  6863. * since we have the ep already and
  6864. * are binding. No remove going on
  6865. * here.
  6866. */
  6867. SCTP_INP_DECR_REF(lep);
  6868. }
  6869. if (lep == inp) {
  6870. /* already bound to it.. ok */
  6871. return;
  6872. } else if (lep == NULL) {
  6873. ((struct sockaddr_in *)addr_touse)->sin_port = 0;
  6874. *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
  6875. SCTP_ADD_IP_ADDRESS,
  6876. vrf_id, NULL);
  6877. } else {
  6878. *error = EADDRINUSE;
  6879. }
  6880. if (*error)
  6881. return;
  6882. } else {
  6883. /*
  6884. * FIX: decide whether we allow assoc based
  6885. * bindx
  6886. */
  6887. }
  6888. }
  6889. /*
  6890. * sctp_bindx(DELETE) for one address.
  6891. * assumes all arguments are valid/checked by caller.
  6892. */
  6893. void
  6894. sctp_bindx_delete_address(struct sctp_inpcb *inp,
  6895. struct sockaddr *sa, sctp_assoc_t assoc_id,
  6896. uint32_t vrf_id, int *error)
  6897. {
  6898. struct sockaddr *addr_touse;
  6899. #ifdef INET6
  6900. struct sockaddr_in sin;
  6901. #endif
  6902. #ifdef SCTP_MVRF
  6903. int i, fnd = 0;
  6904. #endif
  6905. /* see if we're bound all already! */
  6906. if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
  6907. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6908. *error = EINVAL;
  6909. return;
  6910. }
  6911. #ifdef SCTP_MVRF
  6912. /* Is the VRF one we have */
  6913. for (i = 0; i < inp->num_vrfs; i++) {
  6914. if (vrf_id == inp->m_vrf_ids[i]) {
  6915. fnd = 1;
  6916. break;
  6917. }
  6918. }
  6919. if (!fnd) {
  6920. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6921. *error = EINVAL;
  6922. return;
  6923. }
  6924. #endif
  6925. addr_touse = sa;
  6926. #if defined(INET6)
  6927. if (sa->sa_family == AF_INET6) {
  6928. struct sockaddr_in6 *sin6;
  6929. #if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
  6930. if (sa->sa_len != sizeof(struct sockaddr_in6)) {
  6931. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6932. *error = EINVAL;
  6933. return;
  6934. }
  6935. #endif
  6936. if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
  6937. /* can only bind v6 on PF_INET6 sockets */
  6938. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6939. *error = EINVAL;
  6940. return;
  6941. }
  6942. sin6 = (struct sockaddr_in6 *)addr_touse;
  6943. if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
  6944. if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
  6945. SCTP_IPV6_V6ONLY(inp)) {
  6946. /* can't bind mapped-v4 on PF_INET sockets */
  6947. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6948. *error = EINVAL;
  6949. return;
  6950. }
  6951. in6_sin6_2_sin(&sin, sin6);
  6952. addr_touse = (struct sockaddr *)&sin;
  6953. }
  6954. }
  6955. #endif
  6956. #ifdef INET
  6957. if (sa->sa_family == AF_INET) {
  6958. #if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
  6959. if (sa->sa_len != sizeof(struct sockaddr_in)) {
  6960. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6961. *error = EINVAL;
  6962. return;
  6963. }
  6964. #endif
  6965. if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
  6966. SCTP_IPV6_V6ONLY(inp)) {
  6967. /* can't bind v4 on PF_INET sockets */
  6968. SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6969. *error = EINVAL;
  6970. return;
  6971. }
  6972. }
  6973. #endif
  6974. /*
  6975. * No lock required mgmt_ep_sa does its own locking.
  6976. * If the FIX: below is ever changed we may need to
  6977. * lock before calling association level binding.
  6978. */
  6979. if (assoc_id == 0) {
  6980. /* delete the address */
  6981. *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
  6982. SCTP_DEL_IP_ADDRESS,
  6983. vrf_id, NULL);
  6984. } else {
  6985. /*
  6986. * FIX: decide whether we allow assoc based
  6987. * bindx
  6988. */
  6989. }
  6990. }
  6991. /*
  6992. * returns the valid local address count for an assoc, taking into account
  6993. * all scoping rules
  6994. */
  6995. int
  6996. sctp_local_addr_count(struct sctp_tcb *stcb)
  6997. {
  6998. int loopback_scope, ipv4_local_scope, local_scope, site_scope;
  6999. int ipv4_addr_legal, ipv6_addr_legal;
  7000. struct sctp_vrf *vrf;
  7001. struct sctp_ifn *sctp_ifn;
  7002. struct sctp_ifa *sctp_ifa;
  7003. int count = 0;
  7004. /* Turn on all the appropriate scopes */
  7005. loopback_scope = stcb->asoc.loopback_scope;
  7006. ipv4_local_scope = stcb->asoc.ipv4_local_scope;
  7007. local_scope = stcb->asoc.local_scope;
  7008. site_scope = stcb->asoc.site_scope;
  7009. ipv4_addr_legal = ipv6_addr_legal = 0;
  7010. if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
  7011. ipv6_addr_legal = 1;
  7012. if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
  7013. ipv4_addr_legal = 1;
  7014. }
  7015. } else {
  7016. ipv4_addr_legal = 1;
  7017. }
  7018. SCTP_IPI_ADDR_RLOCK();
  7019. vrf = sctp_find_vrf(stcb->asoc.vrf_id);
  7020. if (vrf == NULL) {
  7021. /* no vrf, no addresses */
  7022. SCTP_IPI_ADDR_RUNLOCK();
  7023. return (0);
  7024. }
  7025. if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
  7026. /*
  7027. * bound all case: go through all ifns on the vrf
  7028. */
  7029. LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
  7030. if ((loopback_scope == 0) &&
  7031. SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
  7032. continue;
  7033. }
  7034. LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
  7035. if (sctp_is_addr_restricted(stcb, sctp_ifa))
  7036. continue;
  7037. switch (sctp_ifa->address.sa.sa_family) {
  7038. #ifdef INET
  7039. case AF_INET:
  7040. if (ipv4_addr_legal) {
  7041. struct sockaddr_in *sin;
  7042. sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
  7043. if (sin->sin_addr.s_addr == 0) {
  7044. /* skip unspecified addrs */
  7045. continue;
  7046. }
  7047. if ((ipv4_local_scope == 0) &&
  7048. (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
  7049. continue;
  7050. }
  7051. /* count this one */
  7052. count++;
  7053. } else {
  7054. continue;
  7055. }
  7056. break;
  7057. #endif
  7058. #ifdef INET6
  7059. case AF_INET6:
  7060. if (ipv6_addr_legal) {
  7061. struct sockaddr_in6 *sin6;
  7062. #if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
  7063. struct sockaddr_in6 lsa6;
  7064. #endif
  7065. sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
  7066. if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
  7067. continue;
  7068. }
  7069. if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
  7070. if (local_scope == 0)
  7071. continue;
  7072. #if defined(SCTP_EMBEDDED_V6_SCOPE)
  7073. if (sin6->sin6_scope_id == 0) {
  7074. #ifdef SCTP_KAME
  7075. if (sa6_recoverscope(sin6) != 0)
  7076. /*
  7077. * bad link
  7078. * local
  7079. * address
  7080. */
  7081. continue;
  7082. #else
  7083. lsa6 = *sin6;
  7084. if (in6_recoverscope(&lsa6,
  7085. &lsa6.sin6_addr,
  7086. NULL))
  7087. /*
  7088. * bad link
  7089. * local
  7090. * address
  7091. */
  7092. continue;
  7093. sin6 = &lsa6;
  7094. #endif /* SCTP_KAME */
  7095. }
  7096. #endif /* SCTP_EMBEDDED_V6_SCOPE */
  7097. }
  7098. if ((site_scope == 0) &&
  7099. (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
  7100. continue;
  7101. }
  7102. /* count this one */
  7103. count++;
  7104. }
  7105. break;
  7106. #endif
  7107. default:
  7108. /* TSNH */
  7109. break;
  7110. }
  7111. }
  7112. }
  7113. } else {
  7114. /*
  7115. * subset bound case
  7116. */
  7117. struct sctp_laddr *laddr;
  7118. LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
  7119. sctp_nxt_addr) {
  7120. if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
  7121. continue;
  7122. }
  7123. /* count this one */
  7124. count++;
  7125. }
  7126. }
  7127. SCTP_IPI_ADDR_RUNLOCK();
  7128. return (count);
  7129. }
  7130. #if defined(SCTP_LOCAL_TRACE_BUF)
  7131. void
  7132. sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
  7133. {
  7134. uint32_t saveindex, newindex;
  7135. #if defined(__Windows__)
  7136. if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
  7137. return;
  7138. }
  7139. do {
  7140. saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
  7141. if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
  7142. newindex = 1;
  7143. } else {
  7144. newindex = saveindex + 1;
  7145. }
  7146. } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
  7147. if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
  7148. saveindex = 0;
  7149. }
  7150. SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
  7151. SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
  7152. SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
  7153. SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
  7154. SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
  7155. SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
  7156. SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
  7157. SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
  7158. #else
  7159. do {
  7160. saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
  7161. if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
  7162. newindex = 1;
  7163. } else {
  7164. newindex = saveindex + 1;
  7165. }
  7166. } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
  7167. if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
  7168. saveindex = 0;
  7169. }
  7170. SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
  7171. SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
  7172. SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
  7173. SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
  7174. SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
  7175. SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
  7176. SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
  7177. SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
  7178. #endif
  7179. }
  7180. #endif
  7181. #if defined(__FreeBSD__)
  7182. /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
  7183. #ifdef INET
  7184. /* We will need to add support
  7185. * to bind the ports and such here
  7186. * so we can do UDP tunneling. In
  7187. * the mean-time, we return error
  7188. */
  7189. #include <netinet/udp.h>
  7190. #include <netinet/udp_var.h>
  7191. #include <sys/proc.h>
  7192. #ifdef INET6
  7193. #include <netinet6/sctp6_var.h>
  7194. #endif
  7195. #if __FreeBSD_version >= 800044
  7196. static void
  7197. sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
  7198. {
  7199. struct ip *iph;
  7200. struct mbuf *sp, *last;
  7201. struct udphdr *uhdr;
  7202. uint16_t port = 0;
  7203. int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
  7204. /* Split out the mbuf chain. Leave the
  7205. * IP header in m, place the
  7206. * rest in the sp.
  7207. */
  7208. if ((m->m_flags & M_PKTHDR) == 0) {
  7209. /* Can't handle one that is not a pkt hdr */
  7210. goto out;
  7211. }
  7212. /* pull the src port */
  7213. iph = mtod(m, struct ip *);
  7214. uhdr = (struct udphdr *)((caddr_t)iph + off);
  7215. port = uhdr->uh_sport;
  7216. sp = m_split(m, off, M_DONTWAIT);
  7217. if (sp == NULL) {
  7218. /* Gak, drop packet, we can't do a split */
  7219. goto out;
  7220. }
  7221. if (sp->m_pkthdr.len < header_size) {
  7222. /* Gak, packet can't have an SCTP header in it - to small */
  7223. m_freem(sp);
  7224. goto out;
  7225. }
  7226. /* ok now pull up the UDP header and SCTP header together */
  7227. sp = m_pullup(sp, header_size);
  7228. if (sp == NULL) {
  7229. /* Gak pullup failed */
  7230. goto out;
  7231. }
  7232. /* trim out the UDP header */
  7233. m_adj(sp, sizeof(struct udphdr));
  7234. /* Now reconstruct the mbuf chain */
  7235. /* 1) find last one */
  7236. last = m;
  7237. while (last->m_next != NULL) {
  7238. last = last->m_next;
  7239. }
  7240. last->m_next = sp;
  7241. m->m_pkthdr.len += sp->m_pkthdr.len;
  7242. last = m;
  7243. while (last != NULL) {
  7244. last = last->m_next;
  7245. }
  7246. /* Now its ready for sctp_input or sctp6_input */
  7247. iph = mtod(m, struct ip *);
  7248. switch (iph->ip_v) {
  7249. #ifdef INET
  7250. case IPVERSION:
  7251. {
  7252. uint16_t len;
  7253. /* its IPv4 */
  7254. len = SCTP_GET_IPV4_LENGTH(iph);
  7255. len -= sizeof(struct udphdr);
  7256. SCTP_GET_IPV4_LENGTH(iph) = len;
  7257. sctp_input_with_port(m, off, port);
  7258. break;
  7259. }
  7260. #endif
  7261. #ifdef INET6
  7262. case IPV6_VERSION >> 4:
  7263. {
  7264. /* its IPv6 - NOT supported */
  7265. goto out;
  7266. break;
  7267. }
  7268. #endif
  7269. default:
  7270. {
  7271. m_freem(m);
  7272. break;
  7273. }
  7274. }
  7275. return;
  7276. out:
  7277. m_freem(m);
  7278. }
  7279. #endif
  7280. void
  7281. sctp_over_udp_stop(void)
  7282. {
  7283. struct socket *sop;
  7284. /*
  7285. * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
  7286. */
  7287. if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
  7288. /* Nothing to do */
  7289. return;
  7290. }
  7291. sop = SCTP_BASE_INFO(udp_tun_socket);
  7292. soclose(sop);
  7293. SCTP_BASE_INFO(udp_tun_socket) = NULL;
  7294. }
  7295. int
  7296. sctp_over_udp_start(void)
  7297. {
  7298. #if __FreeBSD_version >= 800044
  7299. uint16_t port;
  7300. int ret;
  7301. struct sockaddr_in sin;
  7302. struct socket *sop = NULL;
  7303. struct thread *th;
  7304. struct ucred *cred;
  7305. /*
  7306. * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
  7307. */
  7308. port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
  7309. if (port == 0) {
  7310. /* Must have a port set */
  7311. return (EINVAL);
  7312. }
  7313. if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
  7314. /* Already running -- must stop first */
  7315. return (EALREADY);
  7316. }
  7317. th = curthread;
  7318. cred = th->td_ucred;
  7319. if ((ret = socreate(PF_INET, &sop,
  7320. SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
  7321. return (ret);
  7322. }
  7323. SCTP_BASE_INFO(udp_tun_socket) = sop;
  7324. /* call the special UDP hook */
  7325. ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
  7326. if (ret) {
  7327. goto exit_stage_left;
  7328. }
  7329. /* Ok we have a socket, bind it to the port*/
  7330. memset(&sin, 0, sizeof(sin));
  7331. sin.sin_len = sizeof(sin);
  7332. sin.sin_family = AF_INET;
  7333. sin.sin_port = htons(port);
  7334. ret = sobind(sop, (struct sockaddr *)&sin, th);
  7335. if (ret) {
  7336. /* Close up we cant get the port */
  7337. exit_stage_left:
  7338. sctp_over_udp_stop();
  7339. return (ret);
  7340. }
  7341. /* Ok we should now get UDP packets directly to our input routine
  7342. * sctp_recv_upd_tunneled_packet().
  7343. */
  7344. return (0);
  7345. #else
  7346. return (1);
  7347. #endif
  7348. }
  7349. #endif
  7350. #endif