PageRenderTime 80ms CodeModel.GetById 38ms RepoModel.GetById 1ms app.codeStats 1ms

/netinet/sctputil.c

https://bitbucket.org/brucec/sctpdrv
C | 7726 lines | 6422 code | 376 blank | 928 comment | 1694 complexity | 5b1ab6f6258d7e6de6120e6fe7ae9ca4 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

  1. /*-
  2. * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  4. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions are met:
  8. *
  9. * a) Redistributions of source code must retain the above copyright notice,
  10. * this list of conditions and the following disclaimer.
  11. *
  12. * b) Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in
  14. * the documentation and/or other materials provided with the distribution.
  15. *
  16. * c) Neither the name of Cisco Systems, Inc. nor the names of its
  17. * contributors may be used to endorse or promote products derived
  18. * from this software without specific prior written permission.
  19. *
  20. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  21. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  22. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  23. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  24. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  25. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  26. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  27. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  28. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  29. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  30. * THE POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #ifdef __FreeBSD__
  33. #include <sys/cdefs.h>
  34. __FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 235828 2012-05-23 11:26:28Z tuexen $");
  35. #endif
  36. #include <netinet/sctp_os.h>
  37. #include <netinet/sctp_pcb.h>
  38. #include <netinet/sctputil.h>
  39. #include <netinet/sctp_var.h>
  40. #include <netinet/sctp_sysctl.h>
  41. #ifdef INET6
  42. #if defined(__Userspace__)
  43. #include <netinet6/sctp6_var.h>
  44. #endif
  45. #endif
  46. #include <netinet/sctp_header.h>
  47. #include <netinet/sctp_output.h>
  48. #include <netinet/sctp_uio.h>
  49. #include <netinet/sctp_timer.h>
  50. #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
  51. #include <netinet/sctp_auth.h>
  52. #include <netinet/sctp_asconf.h>
  53. #include <netinet/sctp_windows_addr.h>
  54. #if defined(__Userspace__)
  55. #include <netinet/sctp_constants.h>
  56. #endif
  57. #if defined(__APPLE__)
  58. #define APPLE_FILE_NO 8
  59. #endif
  60. #if defined(__Windows__)
  61. #if !defined(SCTP_LOCAL_TRACE_BUF)
  62. #include "eventrace_netinet.h"
  63. #include "sctputil.tmh" /* this is the file that will be auto generated */
  64. #endif
  65. #else
  66. #ifndef KTR_SCTP
  67. #define KTR_SCTP KTR_SUBSYS
  68. #endif
  69. #endif
  70. extern struct sctp_cc_functions sctp_cc_functions[];
  71. extern struct sctp_ss_functions sctp_ss_functions[];
  72. void
  73. sctp_sblog(struct sockbuf *sb,
  74. struct sctp_tcb *stcb, int from, int incr)
  75. {
  76. struct sctp_cwnd_log sctp_clog;
  77. sctp_clog.x.sb.stcb = stcb;
  78. sctp_clog.x.sb.so_sbcc = sb->sb_cc;
  79. if (stcb)
  80. sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
  81. else
  82. sctp_clog.x.sb.stcb_sbcc = 0;
  83. sctp_clog.x.sb.incr = incr;
  84. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  85. SCTP_LOG_EVENT_SB,
  86. from,
  87. sctp_clog.x.misc.log1,
  88. sctp_clog.x.misc.log2,
  89. sctp_clog.x.misc.log3,
  90. sctp_clog.x.misc.log4);
  91. }
  92. void
  93. sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
  94. {
  95. struct sctp_cwnd_log sctp_clog;
  96. sctp_clog.x.close.inp = (void *)inp;
  97. sctp_clog.x.close.sctp_flags = inp->sctp_flags;
  98. if (stcb) {
  99. sctp_clog.x.close.stcb = (void *)stcb;
  100. sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
  101. } else {
  102. sctp_clog.x.close.stcb = 0;
  103. sctp_clog.x.close.state = 0;
  104. }
  105. sctp_clog.x.close.loc = loc;
  106. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  107. SCTP_LOG_EVENT_CLOSE,
  108. 0,
  109. sctp_clog.x.misc.log1,
  110. sctp_clog.x.misc.log2,
  111. sctp_clog.x.misc.log3,
  112. sctp_clog.x.misc.log4);
  113. }
  114. void
  115. rto_logging(struct sctp_nets *net, int from)
  116. {
  117. struct sctp_cwnd_log sctp_clog;
  118. memset(&sctp_clog, 0, sizeof(sctp_clog));
  119. sctp_clog.x.rto.net = (void *) net;
  120. sctp_clog.x.rto.rtt = net->rtt / 1000;
  121. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  122. SCTP_LOG_EVENT_RTT,
  123. from,
  124. sctp_clog.x.misc.log1,
  125. sctp_clog.x.misc.log2,
  126. sctp_clog.x.misc.log3,
  127. sctp_clog.x.misc.log4);
  128. }
  129. void
  130. sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
  131. {
  132. struct sctp_cwnd_log sctp_clog;
  133. sctp_clog.x.strlog.stcb = stcb;
  134. sctp_clog.x.strlog.n_tsn = tsn;
  135. sctp_clog.x.strlog.n_sseq = sseq;
  136. sctp_clog.x.strlog.e_tsn = 0;
  137. sctp_clog.x.strlog.e_sseq = 0;
  138. sctp_clog.x.strlog.strm = stream;
  139. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  140. SCTP_LOG_EVENT_STRM,
  141. from,
  142. sctp_clog.x.misc.log1,
  143. sctp_clog.x.misc.log2,
  144. sctp_clog.x.misc.log3,
  145. sctp_clog.x.misc.log4);
  146. }
  147. void
  148. sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
  149. {
  150. struct sctp_cwnd_log sctp_clog;
  151. sctp_clog.x.nagle.stcb = (void *)stcb;
  152. sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
  153. sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
  154. sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
  155. sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
  156. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  157. SCTP_LOG_EVENT_NAGLE,
  158. action,
  159. sctp_clog.x.misc.log1,
  160. sctp_clog.x.misc.log2,
  161. sctp_clog.x.misc.log3,
  162. sctp_clog.x.misc.log4);
  163. }
  164. void
  165. sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
  166. {
  167. struct sctp_cwnd_log sctp_clog;
  168. sctp_clog.x.sack.cumack = cumack;
  169. sctp_clog.x.sack.oldcumack = old_cumack;
  170. sctp_clog.x.sack.tsn = tsn;
  171. sctp_clog.x.sack.numGaps = gaps;
  172. sctp_clog.x.sack.numDups = dups;
  173. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  174. SCTP_LOG_EVENT_SACK,
  175. from,
  176. sctp_clog.x.misc.log1,
  177. sctp_clog.x.misc.log2,
  178. sctp_clog.x.misc.log3,
  179. sctp_clog.x.misc.log4);
  180. }
  181. void
  182. sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
  183. {
  184. struct sctp_cwnd_log sctp_clog;
  185. memset(&sctp_clog, 0, sizeof(sctp_clog));
  186. sctp_clog.x.map.base = map;
  187. sctp_clog.x.map.cum = cum;
  188. sctp_clog.x.map.high = high;
  189. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  190. SCTP_LOG_EVENT_MAP,
  191. from,
  192. sctp_clog.x.misc.log1,
  193. sctp_clog.x.misc.log2,
  194. sctp_clog.x.misc.log3,
  195. sctp_clog.x.misc.log4);
  196. }
  197. void
  198. sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
  199. int from)
  200. {
  201. struct sctp_cwnd_log sctp_clog;
  202. memset(&sctp_clog, 0, sizeof(sctp_clog));
  203. sctp_clog.x.fr.largest_tsn = biggest_tsn;
  204. sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
  205. sctp_clog.x.fr.tsn = tsn;
  206. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  207. SCTP_LOG_EVENT_FR,
  208. from,
  209. sctp_clog.x.misc.log1,
  210. sctp_clog.x.misc.log2,
  211. sctp_clog.x.misc.log3,
  212. sctp_clog.x.misc.log4);
  213. }
  214. void
  215. sctp_log_mb(struct mbuf *m, int from)
  216. {
  217. struct sctp_cwnd_log sctp_clog;
  218. sctp_clog.x.mb.mp = m;
  219. sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
  220. sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
  221. sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
  222. if (SCTP_BUF_IS_EXTENDED(m)) {
  223. sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
  224. #if defined(__APPLE__)
  225. /* APPLE does not use a ref_cnt, but a forward/backward ref queue */
  226. #else
  227. sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
  228. #endif
  229. } else {
  230. sctp_clog.x.mb.ext = 0;
  231. sctp_clog.x.mb.refcnt = 0;
  232. }
  233. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  234. SCTP_LOG_EVENT_MBUF,
  235. from,
  236. sctp_clog.x.misc.log1,
  237. sctp_clog.x.misc.log2,
  238. sctp_clog.x.misc.log3,
  239. sctp_clog.x.misc.log4);
  240. }
  241. void
  242. sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
  243. int from)
  244. {
  245. struct sctp_cwnd_log sctp_clog;
  246. if (control == NULL) {
  247. SCTP_PRINTF("Gak log of NULL?\n");
  248. return;
  249. }
  250. sctp_clog.x.strlog.stcb = control->stcb;
  251. sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
  252. sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
  253. sctp_clog.x.strlog.strm = control->sinfo_stream;
  254. if (poschk != NULL) {
  255. sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
  256. sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
  257. } else {
  258. sctp_clog.x.strlog.e_tsn = 0;
  259. sctp_clog.x.strlog.e_sseq = 0;
  260. }
  261. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  262. SCTP_LOG_EVENT_STRM,
  263. from,
  264. sctp_clog.x.misc.log1,
  265. sctp_clog.x.misc.log2,
  266. sctp_clog.x.misc.log3,
  267. sctp_clog.x.misc.log4);
  268. }
  269. void
  270. sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
  271. {
  272. struct sctp_cwnd_log sctp_clog;
  273. sctp_clog.x.cwnd.net = net;
  274. if (stcb->asoc.send_queue_cnt > 255)
  275. sctp_clog.x.cwnd.cnt_in_send = 255;
  276. else
  277. sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
  278. if (stcb->asoc.stream_queue_cnt > 255)
  279. sctp_clog.x.cwnd.cnt_in_str = 255;
  280. else
  281. sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
  282. if (net) {
  283. sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
  284. sctp_clog.x.cwnd.inflight = net->flight_size;
  285. sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
  286. sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
  287. sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
  288. }
  289. if (SCTP_CWNDLOG_PRESEND == from) {
  290. sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
  291. }
  292. sctp_clog.x.cwnd.cwnd_augment = augment;
  293. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  294. SCTP_LOG_EVENT_CWND,
  295. from,
  296. sctp_clog.x.misc.log1,
  297. sctp_clog.x.misc.log2,
  298. sctp_clog.x.misc.log3,
  299. sctp_clog.x.misc.log4);
  300. }
  301. #ifndef __APPLE__
  302. void
  303. sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
  304. {
  305. struct sctp_cwnd_log sctp_clog;
  306. memset(&sctp_clog, 0, sizeof(sctp_clog));
  307. if (inp) {
  308. sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
  309. } else {
  310. sctp_clog.x.lock.sock = (void *) NULL;
  311. }
  312. sctp_clog.x.lock.inp = (void *) inp;
  313. #if (defined(__FreeBSD__) && __FreeBSD_version >= 503000) || (defined(__APPLE__))
  314. if (stcb) {
  315. sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
  316. } else {
  317. sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
  318. }
  319. if (inp) {
  320. sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
  321. sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
  322. } else {
  323. sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
  324. sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
  325. }
  326. #if (defined(__FreeBSD__) && __FreeBSD_version <= 602000)
  327. sctp_clog.x.lock.info_lock = mtx_owned(&SCTP_BASE_INFO(ipi_ep_mtx));
  328. #else
  329. sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
  330. #endif
  331. if (inp && (inp->sctp_socket)) {
  332. sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
  333. sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
  334. sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
  335. } else {
  336. sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
  337. sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
  338. sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
  339. }
  340. #endif
  341. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  342. SCTP_LOG_LOCK_EVENT,
  343. from,
  344. sctp_clog.x.misc.log1,
  345. sctp_clog.x.misc.log2,
  346. sctp_clog.x.misc.log3,
  347. sctp_clog.x.misc.log4);
  348. }
  349. #endif
  350. void
  351. sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
  352. {
  353. struct sctp_cwnd_log sctp_clog;
  354. memset(&sctp_clog, 0, sizeof(sctp_clog));
  355. sctp_clog.x.cwnd.net = net;
  356. sctp_clog.x.cwnd.cwnd_new_value = error;
  357. sctp_clog.x.cwnd.inflight = net->flight_size;
  358. sctp_clog.x.cwnd.cwnd_augment = burst;
  359. if (stcb->asoc.send_queue_cnt > 255)
  360. sctp_clog.x.cwnd.cnt_in_send = 255;
  361. else
  362. sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
  363. if (stcb->asoc.stream_queue_cnt > 255)
  364. sctp_clog.x.cwnd.cnt_in_str = 255;
  365. else
  366. sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
  367. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  368. SCTP_LOG_EVENT_MAXBURST,
  369. from,
  370. sctp_clog.x.misc.log1,
  371. sctp_clog.x.misc.log2,
  372. sctp_clog.x.misc.log3,
  373. sctp_clog.x.misc.log4);
  374. }
  375. void
  376. sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
  377. {
  378. struct sctp_cwnd_log sctp_clog;
  379. sctp_clog.x.rwnd.rwnd = peers_rwnd;
  380. sctp_clog.x.rwnd.send_size = snd_size;
  381. sctp_clog.x.rwnd.overhead = overhead;
  382. sctp_clog.x.rwnd.new_rwnd = 0;
  383. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  384. SCTP_LOG_EVENT_RWND,
  385. from,
  386. sctp_clog.x.misc.log1,
  387. sctp_clog.x.misc.log2,
  388. sctp_clog.x.misc.log3,
  389. sctp_clog.x.misc.log4);
  390. }
  391. void
  392. sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
  393. {
  394. struct sctp_cwnd_log sctp_clog;
  395. sctp_clog.x.rwnd.rwnd = peers_rwnd;
  396. sctp_clog.x.rwnd.send_size = flight_size;
  397. sctp_clog.x.rwnd.overhead = overhead;
  398. sctp_clog.x.rwnd.new_rwnd = a_rwndval;
  399. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  400. SCTP_LOG_EVENT_RWND,
  401. from,
  402. sctp_clog.x.misc.log1,
  403. sctp_clog.x.misc.log2,
  404. sctp_clog.x.misc.log3,
  405. sctp_clog.x.misc.log4);
  406. }
  407. void
  408. sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
  409. {
  410. struct sctp_cwnd_log sctp_clog;
  411. sctp_clog.x.mbcnt.total_queue_size = total_oq;
  412. sctp_clog.x.mbcnt.size_change = book;
  413. sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
  414. sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
  415. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  416. SCTP_LOG_EVENT_MBCNT,
  417. from,
  418. sctp_clog.x.misc.log1,
  419. sctp_clog.x.misc.log2,
  420. sctp_clog.x.misc.log3,
  421. sctp_clog.x.misc.log4);
  422. }
  423. void
  424. sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
  425. {
  426. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  427. SCTP_LOG_MISC_EVENT,
  428. from,
  429. a, b, c, d);
  430. }
  431. void
  432. sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
  433. {
  434. struct sctp_cwnd_log sctp_clog;
  435. sctp_clog.x.wake.stcb = (void *)stcb;
  436. sctp_clog.x.wake.wake_cnt = wake_cnt;
  437. sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
  438. sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
  439. sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
  440. if (stcb->asoc.stream_queue_cnt < 0xff)
  441. sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
  442. else
  443. sctp_clog.x.wake.stream_qcnt = 0xff;
  444. if (stcb->asoc.chunks_on_out_queue < 0xff)
  445. sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
  446. else
  447. sctp_clog.x.wake.chunks_on_oque = 0xff;
  448. sctp_clog.x.wake.sctpflags = 0;
  449. /* set in the defered mode stuff */
  450. if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
  451. sctp_clog.x.wake.sctpflags |= 1;
  452. if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
  453. sctp_clog.x.wake.sctpflags |= 2;
  454. if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
  455. sctp_clog.x.wake.sctpflags |= 4;
  456. /* what about the sb */
  457. if (stcb->sctp_socket) {
  458. struct socket *so = stcb->sctp_socket;
  459. sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
  460. } else {
  461. sctp_clog.x.wake.sbflags = 0xff;
  462. }
  463. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  464. SCTP_LOG_EVENT_WAKE,
  465. from,
  466. sctp_clog.x.misc.log1,
  467. sctp_clog.x.misc.log2,
  468. sctp_clog.x.misc.log3,
  469. sctp_clog.x.misc.log4);
  470. }
  471. void
  472. sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
  473. {
  474. struct sctp_cwnd_log sctp_clog;
  475. sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
  476. sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
  477. sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
  478. sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
  479. sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
  480. sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
  481. sctp_clog.x.blk.sndlen = sendlen;
  482. SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
  483. SCTP_LOG_EVENT_BLOCK,
  484. from,
  485. sctp_clog.x.misc.log1,
  486. sctp_clog.x.misc.log2,
  487. sctp_clog.x.misc.log3,
  488. sctp_clog.x.misc.log4);
  489. }
  490. int
  491. sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
  492. {
  493. /* May need to fix this if ktrdump does not work */
  494. return (0);
  495. }
  496. #ifdef SCTP_AUDITING_ENABLED
  497. uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
  498. static int sctp_audit_indx = 0;
  499. static
  500. void
  501. sctp_print_audit_report(void)
  502. {
  503. int i;
  504. int cnt;
  505. cnt = 0;
  506. for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
  507. if ((sctp_audit_data[i][0] == 0xe0) &&
  508. (sctp_audit_data[i][1] == 0x01)) {
  509. cnt = 0;
  510. SCTP_PRINTF("\n");
  511. } else if (sctp_audit_data[i][0] == 0xf0) {
  512. cnt = 0;
  513. SCTP_PRINTF("\n");
  514. } else if ((sctp_audit_data[i][0] == 0xc0) &&
  515. (sctp_audit_data[i][1] == 0x01)) {
  516. SCTP_PRINTF("\n");
  517. cnt = 0;
  518. }
  519. SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
  520. (uint32_t) sctp_audit_data[i][1]);
  521. cnt++;
  522. if ((cnt % 14) == 0)
  523. SCTP_PRINTF("\n");
  524. }
  525. for (i = 0; i < sctp_audit_indx; i++) {
  526. if ((sctp_audit_data[i][0] == 0xe0) &&
  527. (sctp_audit_data[i][1] == 0x01)) {
  528. cnt = 0;
  529. SCTP_PRINTF("\n");
  530. } else if (sctp_audit_data[i][0] == 0xf0) {
  531. cnt = 0;
  532. SCTP_PRINTF("\n");
  533. } else if ((sctp_audit_data[i][0] == 0xc0) &&
  534. (sctp_audit_data[i][1] == 0x01)) {
  535. SCTP_PRINTF("\n");
  536. cnt = 0;
  537. }
  538. SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
  539. (uint32_t) sctp_audit_data[i][1]);
  540. cnt++;
  541. if ((cnt % 14) == 0)
  542. SCTP_PRINTF("\n");
  543. }
  544. SCTP_PRINTF("\n");
  545. }
  546. void
  547. sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
  548. struct sctp_nets *net)
  549. {
  550. int resend_cnt, tot_out, rep, tot_book_cnt;
  551. struct sctp_nets *lnet;
  552. struct sctp_tmit_chunk *chk;
  553. sctp_audit_data[sctp_audit_indx][0] = 0xAA;
  554. sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
  555. sctp_audit_indx++;
  556. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  557. sctp_audit_indx = 0;
  558. }
  559. if (inp == NULL) {
  560. sctp_audit_data[sctp_audit_indx][0] = 0xAF;
  561. sctp_audit_data[sctp_audit_indx][1] = 0x01;
  562. sctp_audit_indx++;
  563. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  564. sctp_audit_indx = 0;
  565. }
  566. return;
  567. }
  568. if (stcb == NULL) {
  569. sctp_audit_data[sctp_audit_indx][0] = 0xAF;
  570. sctp_audit_data[sctp_audit_indx][1] = 0x02;
  571. sctp_audit_indx++;
  572. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  573. sctp_audit_indx = 0;
  574. }
  575. return;
  576. }
  577. sctp_audit_data[sctp_audit_indx][0] = 0xA1;
  578. sctp_audit_data[sctp_audit_indx][1] =
  579. (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
  580. sctp_audit_indx++;
  581. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  582. sctp_audit_indx = 0;
  583. }
  584. rep = 0;
  585. tot_book_cnt = 0;
  586. resend_cnt = tot_out = 0;
  587. TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
  588. if (chk->sent == SCTP_DATAGRAM_RESEND) {
  589. resend_cnt++;
  590. } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
  591. tot_out += chk->book_size;
  592. tot_book_cnt++;
  593. }
  594. }
  595. if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
  596. sctp_audit_data[sctp_audit_indx][0] = 0xAF;
  597. sctp_audit_data[sctp_audit_indx][1] = 0xA1;
  598. sctp_audit_indx++;
  599. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  600. sctp_audit_indx = 0;
  601. }
  602. SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
  603. resend_cnt, stcb->asoc.sent_queue_retran_cnt);
  604. rep = 1;
  605. stcb->asoc.sent_queue_retran_cnt = resend_cnt;
  606. sctp_audit_data[sctp_audit_indx][0] = 0xA2;
  607. sctp_audit_data[sctp_audit_indx][1] =
  608. (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
  609. sctp_audit_indx++;
  610. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  611. sctp_audit_indx = 0;
  612. }
  613. }
  614. if (tot_out != stcb->asoc.total_flight) {
  615. sctp_audit_data[sctp_audit_indx][0] = 0xAF;
  616. sctp_audit_data[sctp_audit_indx][1] = 0xA2;
  617. sctp_audit_indx++;
  618. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  619. sctp_audit_indx = 0;
  620. }
  621. rep = 1;
  622. SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
  623. (int)stcb->asoc.total_flight);
  624. stcb->asoc.total_flight = tot_out;
  625. }
  626. if (tot_book_cnt != stcb->asoc.total_flight_count) {
  627. sctp_audit_data[sctp_audit_indx][0] = 0xAF;
  628. sctp_audit_data[sctp_audit_indx][1] = 0xA5;
  629. sctp_audit_indx++;
  630. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  631. sctp_audit_indx = 0;
  632. }
  633. rep = 1;
  634. SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
  635. stcb->asoc.total_flight_count = tot_book_cnt;
  636. }
  637. tot_out = 0;
  638. TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
  639. tot_out += lnet->flight_size;
  640. }
  641. if (tot_out != stcb->asoc.total_flight) {
  642. sctp_audit_data[sctp_audit_indx][0] = 0xAF;
  643. sctp_audit_data[sctp_audit_indx][1] = 0xA3;
  644. sctp_audit_indx++;
  645. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  646. sctp_audit_indx = 0;
  647. }
  648. rep = 1;
  649. SCTP_PRINTF("real flight:%d net total was %d\n",
  650. stcb->asoc.total_flight, tot_out);
  651. /* now corrective action */
  652. TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
  653. tot_out = 0;
  654. TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
  655. if ((chk->whoTo == lnet) &&
  656. (chk->sent < SCTP_DATAGRAM_RESEND)) {
  657. tot_out += chk->book_size;
  658. }
  659. }
  660. if (lnet->flight_size != tot_out) {
  661. SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
  662. lnet, lnet->flight_size,
  663. tot_out);
  664. lnet->flight_size = tot_out;
  665. }
  666. }
  667. }
  668. if (rep) {
  669. sctp_print_audit_report();
  670. }
  671. }
  672. void
  673. sctp_audit_log(uint8_t ev, uint8_t fd)
  674. {
  675. sctp_audit_data[sctp_audit_indx][0] = ev;
  676. sctp_audit_data[sctp_audit_indx][1] = fd;
  677. sctp_audit_indx++;
  678. if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
  679. sctp_audit_indx = 0;
  680. }
  681. }
  682. #endif
  683. /*
  684. * sctp_stop_timers_for_shutdown() should be called
  685. * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
  686. * state to make sure that all timers are stopped.
  687. */
  688. void
  689. sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
  690. {
  691. struct sctp_association *asoc;
  692. struct sctp_nets *net;
  693. asoc = &stcb->asoc;
  694. (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
  695. (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
  696. (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
  697. (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
  698. (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
  699. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  700. (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
  701. (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
  702. }
  703. }
  704. /*
  705. * a list of sizes based on typical mtu's, used only if next hop size not
  706. * returned.
  707. */
  708. static uint32_t sctp_mtu_sizes[] = {
  709. 68,
  710. 296,
  711. 508,
  712. 512,
  713. 544,
  714. 576,
  715. 1006,
  716. 1492,
  717. 1500,
  718. 1536,
  719. 2002,
  720. 2048,
  721. 4352,
  722. 4464,
  723. 8166,
  724. 17914,
  725. 32000,
  726. 65535
  727. };
  728. /*
  729. * Return the largest MTU smaller than val. If there is no
  730. * entry, just return val.
  731. */
  732. uint32_t
  733. sctp_get_prev_mtu(uint32_t val)
  734. {
  735. uint32_t i;
  736. if (val <= sctp_mtu_sizes[0]) {
  737. return (val);
  738. }
  739. for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
  740. if (val <= sctp_mtu_sizes[i]) {
  741. break;
  742. }
  743. }
  744. return (sctp_mtu_sizes[i - 1]);
  745. }
  746. /*
  747. * Return the smallest MTU larger than val. If there is no
  748. * entry, just return val.
  749. */
  750. uint32_t
  751. sctp_get_next_mtu(uint32_t val)
  752. {
  753. /* select another MTU that is just bigger than this one */
  754. uint32_t i;
  755. for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
  756. if (val < sctp_mtu_sizes[i]) {
  757. return (sctp_mtu_sizes[i]);
  758. }
  759. }
  760. return (val);
  761. }
  762. void
  763. sctp_fill_random_store(struct sctp_pcb *m)
  764. {
  765. /*
  766. * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
  767. * our counter. The result becomes our good random numbers and we
  768. * then setup to give these out. Note that we do no locking to
  769. * protect this. This is ok, since if competing folks call this we
  770. * will get more gobbled gook in the random store which is what we
  771. * want. There is a danger that two guys will use the same random
  772. * numbers, but thats ok too since that is random as well :->
  773. */
  774. m->store_at = 0;
  775. (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
  776. sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
  777. sizeof(m->random_counter), (uint8_t *)m->random_store);
  778. m->random_counter++;
  779. }
  780. uint32_t
  781. sctp_select_initial_TSN(struct sctp_pcb *inp)
  782. {
  783. /*
  784. * A true implementation should use random selection process to get
  785. * the initial stream sequence number, using RFC1750 as a good
  786. * guideline
  787. */
  788. uint32_t x, *xp;
  789. uint8_t *p;
  790. int store_at, new_store;
  791. if (inp->initial_sequence_debug != 0) {
  792. uint32_t ret;
  793. ret = inp->initial_sequence_debug;
  794. inp->initial_sequence_debug++;
  795. return (ret);
  796. }
  797. retry:
  798. store_at = inp->store_at;
  799. new_store = store_at + sizeof(uint32_t);
  800. if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
  801. new_store = 0;
  802. }
  803. if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
  804. goto retry;
  805. }
  806. if (new_store == 0) {
  807. /* Refill the random store */
  808. sctp_fill_random_store(inp);
  809. }
  810. p = &inp->random_store[store_at];
  811. xp = (uint32_t *)p;
  812. x = *xp;
  813. return (x);
  814. }
  815. uint32_t
  816. sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
  817. {
  818. uint32_t x;
  819. struct timeval now;
  820. if (check) {
  821. (void)SCTP_GETTIME_TIMEVAL(&now);
  822. }
  823. for (;;) {
  824. x = sctp_select_initial_TSN(&inp->sctp_ep);
  825. if (x == 0) {
  826. /* we never use 0 */
  827. continue;
  828. }
  829. if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
  830. break;
  831. }
  832. }
  833. return (x);
  834. }
  835. int
  836. sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
  837. uint32_t override_tag, uint32_t vrf_id)
  838. {
  839. struct sctp_association *asoc;
  840. /*
  841. * Anything set to zero is taken care of by the allocation routine's
  842. * bzero
  843. */
  844. /*
  845. * Up front select what scoping to apply on addresses I tell my peer
  846. * Not sure what to do with these right now, we will need to come up
  847. * with a way to set them. We may need to pass them through from the
  848. * caller in the sctp_aloc_assoc() function.
  849. */
  850. int i;
  851. asoc = &stcb->asoc;
  852. /* init all variables to a known value. */
  853. SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
  854. asoc->max_burst = m->sctp_ep.max_burst;
  855. asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
  856. asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
  857. asoc->cookie_life = m->sctp_ep.def_cookie_life;
  858. asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
  859. asoc->ecn_allowed = m->sctp_ecn_enable;
  860. asoc->sctp_nr_sack_on_off = (uint8_t)SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
  861. asoc->sctp_cmt_pf = (uint8_t)0;
  862. asoc->sctp_frag_point = m->sctp_frag_point;
  863. asoc->sctp_features = m->sctp_features;
  864. asoc->default_dscp = m->sctp_ep.default_dscp;
  865. #ifdef INET6
  866. if (m->sctp_ep.default_flowlabel) {
  867. asoc->default_flowlabel = m->sctp_ep.default_flowlabel;
  868. } else {
  869. if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
  870. asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep);
  871. asoc->default_flowlabel &= 0x000fffff;
  872. asoc->default_flowlabel |= 0x80000000;
  873. } else {
  874. asoc->default_flowlabel = 0;
  875. }
  876. }
  877. #endif
  878. asoc->sb_send_resv = 0;
  879. if (override_tag) {
  880. asoc->my_vtag = override_tag;
  881. } else {
  882. asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
  883. }
  884. /* Get the nonce tags */
  885. asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
  886. asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
  887. asoc->vrf_id = vrf_id;
  888. #ifdef SCTP_ASOCLOG_OF_TSNS
  889. asoc->tsn_in_at = 0;
  890. asoc->tsn_out_at = 0;
  891. asoc->tsn_in_wrapped = 0;
  892. asoc->tsn_out_wrapped = 0;
  893. asoc->cumack_log_at = 0;
  894. asoc->cumack_log_atsnt = 0;
  895. #endif
  896. #ifdef SCTP_FS_SPEC_LOG
  897. asoc->fs_index = 0;
  898. #endif
  899. asoc->refcnt = 0;
  900. asoc->assoc_up_sent = 0;
  901. asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
  902. sctp_select_initial_TSN(&m->sctp_ep);
  903. asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
  904. /* we are optimisitic here */
  905. asoc->peer_supports_pktdrop = 1;
  906. asoc->peer_supports_nat = 0;
  907. asoc->sent_queue_retran_cnt = 0;
  908. /* for CMT */
  909. asoc->last_net_cmt_send_started = NULL;
  910. /* This will need to be adjusted */
  911. asoc->last_acked_seq = asoc->init_seq_number - 1;
  912. asoc->advanced_peer_ack_point = asoc->last_acked_seq;
  913. asoc->asconf_seq_in = asoc->last_acked_seq;
  914. /* here we are different, we hold the next one we expect */
  915. asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
  916. asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
  917. asoc->initial_rto = m->sctp_ep.initial_rto;
  918. asoc->max_init_times = m->sctp_ep.max_init_times;
  919. asoc->max_send_times = m->sctp_ep.max_send_times;
  920. asoc->def_net_failure = m->sctp_ep.def_net_failure;
  921. asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
  922. asoc->free_chunk_cnt = 0;
  923. asoc->iam_blocking = 0;
  924. asoc->context = m->sctp_context;
  925. asoc->local_strreset_support = m->local_strreset_support;
  926. asoc->def_send = m->def_send;
  927. asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
  928. asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
  929. asoc->pr_sctp_cnt = 0;
  930. asoc->total_output_queue_size = 0;
  931. if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
  932. struct in6pcb *inp6;
  933. /* Its a V6 socket */
  934. inp6 = (struct in6pcb *)m;
  935. asoc->ipv6_addr_legal = 1;
  936. /* Now look at the binding flag to see if V4 will be legal */
  937. if (SCTP_IPV6_V6ONLY(inp6) == 0) {
  938. asoc->ipv4_addr_legal = 1;
  939. } else {
  940. /* V4 addresses are NOT legal on the association */
  941. asoc->ipv4_addr_legal = 0;
  942. }
  943. } else {
  944. /* Its a V4 socket, no - V6 */
  945. asoc->ipv4_addr_legal = 1;
  946. asoc->ipv6_addr_legal = 0;
  947. }
  948. asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
  949. asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
  950. asoc->smallest_mtu = m->sctp_frag_point;
  951. asoc->minrto = m->sctp_ep.sctp_minrto;
  952. asoc->maxrto = m->sctp_ep.sctp_maxrto;
  953. asoc->locked_on_sending = NULL;
  954. asoc->stream_locked_on = 0;
  955. asoc->ecn_echo_cnt_onq = 0;
  956. asoc->stream_locked = 0;
  957. asoc->send_sack = 1;
  958. LIST_INIT(&asoc->sctp_restricted_addrs);
  959. TAILQ_INIT(&asoc->nets);
  960. TAILQ_INIT(&asoc->pending_reply_queue);
  961. TAILQ_INIT(&asoc->asconf_ack_sent);
  962. /* Setup to fill the hb random cache at first HB */
  963. asoc->hb_random_idx = 4;
  964. asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
  965. stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
  966. stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
  967. stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
  968. stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
  969. /*
  970. * Now the stream parameters, here we allocate space for all streams
  971. * that we request by default.
  972. */
  973. asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
  974. m->sctp_ep.pre_open_stream_count;
  975. SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
  976. asoc->streamoutcnt * sizeof(struct sctp_stream_out),
  977. SCTP_M_STRMO);
  978. if (asoc->strmout == NULL) {
  979. /* big trouble no memory */
  980. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
  981. return (ENOMEM);
  982. }
  983. for (i = 0; i < asoc->streamoutcnt; i++) {
  984. /*
  985. * inbound side must be set to 0xffff, also NOTE when we get
  986. * the INIT-ACK back (for INIT sender) we MUST reduce the
  987. * count (streamoutcnt) but first check if we sent to any of
  988. * the upper streams that were dropped (if some were). Those
  989. * that were dropped must be notified to the upper layer as
  990. * failed to send.
  991. */
  992. asoc->strmout[i].next_sequence_sent = 0x0;
  993. TAILQ_INIT(&asoc->strmout[i].outqueue);
  994. asoc->strmout[i].stream_no = i;
  995. asoc->strmout[i].last_msg_incomplete = 0;
  996. asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
  997. }
  998. asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
  999. /* Now the mapping array */
  1000. asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
  1001. SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
  1002. SCTP_M_MAP);
  1003. if (asoc->mapping_array == NULL) {
  1004. SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
  1005. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
  1006. return (ENOMEM);
  1007. }
  1008. memset(asoc->mapping_array, 0, asoc->mapping_array_size);
  1009. SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
  1010. SCTP_M_MAP);
  1011. if (asoc->nr_mapping_array == NULL) {
  1012. SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
  1013. SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
  1014. SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
  1015. return (ENOMEM);
  1016. }
  1017. memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
  1018. /* Now the init of the other outqueues */
  1019. TAILQ_INIT(&asoc->free_chunks);
  1020. TAILQ_INIT(&asoc->control_send_queue);
  1021. TAILQ_INIT(&asoc->asconf_send_queue);
  1022. TAILQ_INIT(&asoc->send_queue);
  1023. TAILQ_INIT(&asoc->sent_queue);
  1024. TAILQ_INIT(&asoc->reasmqueue);
  1025. TAILQ_INIT(&asoc->resetHead);
  1026. asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
  1027. TAILQ_INIT(&asoc->asconf_queue);
  1028. /* authentication fields */
  1029. asoc->authinfo.random = NULL;
  1030. asoc->authinfo.active_keyid = 0;
  1031. asoc->authinfo.assoc_key = NULL;
  1032. asoc->authinfo.assoc_keyid = 0;
  1033. asoc->authinfo.recv_key = NULL;
  1034. asoc->authinfo.recv_keyid = 0;
  1035. LIST_INIT(&asoc->shared_keys);
  1036. asoc->marked_retrans = 0;
  1037. asoc->port = m->sctp_ep.port;
  1038. asoc->timoinit = 0;
  1039. asoc->timodata = 0;
  1040. asoc->timosack = 0;
  1041. asoc->timoshutdown = 0;
  1042. asoc->timoheartbeat = 0;
  1043. asoc->timocookie = 0;
  1044. asoc->timoshutdownack = 0;
  1045. (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
  1046. asoc->discontinuity_time = asoc->start_time;
  1047. /* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
  1048. * the association is freed.
  1049. */
  1050. return (0);
  1051. }
  1052. void
  1053. sctp_print_mapping_array(struct sctp_association *asoc)
  1054. {
  1055. unsigned int i, limit;
  1056. SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
  1057. asoc->mapping_array_size,
  1058. asoc->mapping_array_base_tsn,
  1059. asoc->cumulative_tsn,
  1060. asoc->highest_tsn_inside_map,
  1061. asoc->highest_tsn_inside_nr_map);
  1062. for (limit = asoc->mapping_array_size; limit > 1; limit--) {
  1063. if (asoc->mapping_array[limit - 1] != 0) {
  1064. break;
  1065. }
  1066. }
  1067. SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
  1068. for (i = 0; i < limit; i++) {
  1069. SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
  1070. }
  1071. if (limit % 16)
  1072. SCTP_PRINTF("\n");
  1073. for (limit = asoc->mapping_array_size; limit > 1; limit--) {
  1074. if (asoc->nr_mapping_array[limit - 1]) {
  1075. break;
  1076. }
  1077. }
  1078. SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
  1079. for (i = 0; i < limit; i++) {
  1080. SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
  1081. }
  1082. if (limit % 16)
  1083. SCTP_PRINTF("\n");
  1084. }
  1085. int
  1086. sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
  1087. {
  1088. /* mapping array needs to grow */
  1089. uint8_t *new_array1, *new_array2;
  1090. uint32_t new_size;
  1091. new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
  1092. SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
  1093. SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
  1094. if ((new_array1 == NULL) || (new_array2 == NULL)) {
  1095. /* can't get more, forget it */
  1096. SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
  1097. if (new_array1) {
  1098. SCTP_FREE(new_array1, SCTP_M_MAP);
  1099. }
  1100. if (new_array2) {
  1101. SCTP_FREE(new_array2, SCTP_M_MAP);
  1102. }
  1103. return (-1);
  1104. }
  1105. memset(new_array1, 0, new_size);
  1106. memset(new_array2, 0, new_size);
  1107. memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
  1108. memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
  1109. SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
  1110. SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
  1111. asoc->mapping_array = new_array1;
  1112. asoc->nr_mapping_array = new_array2;
  1113. asoc->mapping_array_size = new_size;
  1114. return (0);
  1115. }
  1116. static void
  1117. sctp_iterator_work(struct sctp_iterator *it)
  1118. {
  1119. int iteration_count = 0;
  1120. int inp_skip = 0;
  1121. int first_in = 1;
  1122. struct sctp_inpcb *tinp;
  1123. SCTP_INP_INFO_RLOCK();
  1124. SCTP_ITERATOR_LOCK();
  1125. if (it->inp) {
  1126. SCTP_INP_RLOCK(it->inp);
  1127. SCTP_INP_DECR_REF(it->inp);
  1128. }
  1129. if (it->inp == NULL) {
  1130. /* iterator is complete */
  1131. done_with_iterator:
  1132. SCTP_ITERATOR_UNLOCK();
  1133. SCTP_INP_INFO_RUNLOCK();
  1134. if (it->function_atend != NULL) {
  1135. (*it->function_atend) (it->pointer, it->val);
  1136. }
  1137. SCTP_FREE(it, SCTP_M_ITER);
  1138. return;
  1139. }
  1140. select_a_new_ep:
  1141. if (first_in) {
  1142. first_in = 0;
  1143. } else {
  1144. SCTP_INP_RLOCK(it->inp);
  1145. }
  1146. while (((it->pcb_flags) &&
  1147. ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
  1148. ((it->pcb_features) &&
  1149. ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
  1150. /* endpoint flags or features don't match, so keep looking */
  1151. if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
  1152. SCTP_INP_RUNLOCK(it->inp);
  1153. goto done_with_iterator;
  1154. }
  1155. tinp = it->inp;
  1156. it->inp = LIST_NEXT(it->inp, sctp_list);
  1157. SCTP_INP_RUNLOCK(tinp);
  1158. if (it->inp == NULL) {
  1159. goto done_with_iterator;
  1160. }
  1161. SCTP_INP_RLOCK(it->inp);
  1162. }
  1163. /* now go through each assoc which is in the desired state */
  1164. if (it->done_current_ep == 0) {
  1165. if (it->function_inp != NULL)
  1166. inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
  1167. it->done_current_ep = 1;
  1168. }
  1169. if (it->stcb == NULL) {
  1170. /* run the per instance function */
  1171. it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
  1172. }
  1173. if ((inp_skip) || it->stcb == NULL) {
  1174. if (it->function_inp_end != NULL) {
  1175. inp_skip = (*it->function_inp_end)(it->inp,
  1176. it->pointer,
  1177. it->val);
  1178. }
  1179. SCTP_INP_RUNLOCK(it->inp);
  1180. goto no_stcb;
  1181. }
  1182. while (it->stcb) {
  1183. SCTP_TCB_LOCK(it->stcb);
  1184. if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
  1185. /* not in the right state... keep looking */
  1186. SCTP_TCB_UNLOCK(it->stcb);
  1187. goto next_assoc;
  1188. }
  1189. /* see if we have limited out the iterator loop */
  1190. iteration_count++;
  1191. if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
  1192. /* Pause to let others grab the lock */
  1193. atomic_add_int(&it->stcb->asoc.refcnt, 1);
  1194. SCTP_TCB_UNLOCK(it->stcb);
  1195. SCTP_INP_INCR_REF(it->inp);
  1196. SCTP_INP_RUNLOCK(it->inp);
  1197. SCTP_ITERATOR_UNLOCK();
  1198. SCTP_INP_INFO_RUNLOCK();
  1199. SCTP_INP_INFO_RLOCK();
  1200. SCTP_ITERATOR_LOCK();
  1201. if (sctp_it_ctl.iterator_flags) {
  1202. /* We won't be staying here */
  1203. SCTP_INP_DECR_REF(it->inp);
  1204. atomic_add_int(&it->stcb->asoc.refcnt, -1);
  1205. #if !defined(__FreeBSD__)
  1206. if (sctp_it_ctl.iterator_flags &
  1207. SCTP_ITERATOR_MUST_EXIT) {
  1208. goto done_with_iterator;
  1209. }
  1210. #endif
  1211. if (sctp_it_ctl.iterator_flags &
  1212. SCTP_ITERATOR_STOP_CUR_IT) {
  1213. sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
  1214. goto done_with_iterator;
  1215. }
  1216. if (sctp_it_ctl.iterator_flags &
  1217. SCTP_ITERATOR_STOP_CUR_INP) {
  1218. sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
  1219. goto no_stcb;
  1220. }
  1221. /* If we reach here huh? */
  1222. SCTP_PRINTF("Unknown it ctl flag %x\n",
  1223. sctp_it_ctl.iterator_flags);
  1224. sctp_it_ctl.iterator_flags = 0;
  1225. }
  1226. SCTP_INP_RLOCK(it->inp);
  1227. SCTP_INP_DECR_REF(it->inp);
  1228. SCTP_TCB_LOCK(it->stcb);
  1229. atomic_add_int(&it->stcb->asoc.refcnt, -1);
  1230. iteration_count = 0;
  1231. }
  1232. /* run function on this one */
  1233. (*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
  1234. /*
  1235. * we lie here, it really needs to have its own type but
  1236. * first I must verify that this won't effect things :-0
  1237. */
  1238. if (it->no_chunk_output == 0)
  1239. sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
  1240. SCTP_TCB_UNLOCK(it->stcb);
  1241. next_assoc:
  1242. it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
  1243. if (it->stcb == NULL) {
  1244. /* Run last function */
  1245. if (it->function_inp_end != NULL) {
  1246. inp_skip = (*it->function_inp_end)(it->inp,
  1247. it->pointer,
  1248. it->val);
  1249. }
  1250. }
  1251. }
  1252. SCTP_INP_RUNLOCK(it->inp);
  1253. no_stcb:
  1254. /* done with all assocs on this endpoint, move on to next endpoint */
  1255. it->done_current_ep = 0;
  1256. if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
  1257. it->inp = NULL;
  1258. } else {
  1259. it->inp = LIST_NEXT(it->inp, sctp_list);
  1260. }
  1261. if (it->inp == NULL) {
  1262. goto done_with_iterator;
  1263. }
  1264. goto select_a_new_ep;
  1265. }
  1266. void
  1267. sctp_iterator_worker(void)
  1268. {
  1269. struct sctp_iterator *it, *nit;
  1270. /* This function is called with the WQ lock in place */
  1271. sctp_it_ctl.iterator_running = 1;
  1272. TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
  1273. sctp_it_ctl.cur_it = it;
  1274. /* now lets work on this one */
  1275. TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
  1276. SCTP_IPI_ITERATOR_WQ_UNLOCK();
  1277. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1278. CURVNET_SET(it->vn);
  1279. #endif
  1280. sctp_iterator_work(it);
  1281. sctp_it_ctl.cur_it = NULL;
  1282. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1283. CURVNET_RESTORE();
  1284. #endif
  1285. SCTP_IPI_ITERATOR_WQ_LOCK();
  1286. #if !defined(__FreeBSD__)
  1287. if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
  1288. break;
  1289. }
  1290. #endif
  1291. /*sa_ignore FREED_MEMORY*/
  1292. }
  1293. sctp_it_ctl.iterator_running = 0;
  1294. return;
  1295. }
  1296. static void
  1297. sctp_handle_addr_wq(void)
  1298. {
  1299. /* deal with the ADDR wq from the rtsock calls */
  1300. struct sctp_laddr *wi, *nwi;
  1301. struct sctp_asconf_iterator *asc;
  1302. SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
  1303. sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
  1304. if (asc == NULL) {
  1305. /* Try later, no memory */
  1306. sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
  1307. (struct sctp_inpcb *)NULL,
  1308. (struct sctp_tcb *)NULL,
  1309. (struct sctp_nets *)NULL);
  1310. return;
  1311. }
  1312. LIST_INIT(&asc->list_of_work);
  1313. asc->cnt = 0;
  1314. SCTP_WQ_ADDR_LOCK();
  1315. LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
  1316. LIST_REMOVE(wi, sctp_nxt_addr);
  1317. LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
  1318. asc->cnt++;
  1319. }
  1320. SCTP_WQ_ADDR_UNLOCK();
  1321. if (asc->cnt == 0) {
  1322. SCTP_FREE(asc, SCTP_M_ASC_IT);
  1323. } else {
  1324. (void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
  1325. sctp_asconf_iterator_stcb,
  1326. NULL, /* No ep end for boundall */
  1327. SCTP_PCB_FLAGS_BOUNDALL,
  1328. SCTP_PCB_ANY_FEATURES,
  1329. SCTP_ASOC_ANY_STATE,
  1330. (void *)asc, 0,
  1331. sctp_asconf_iterator_end, NULL, 0);
  1332. }
  1333. }
  1334. void
  1335. sctp_timeout_handler(void *t)
  1336. {
  1337. struct sctp_inpcb *inp;
  1338. struct sctp_tcb *stcb;
  1339. struct sctp_nets *net;
  1340. struct sctp_timer *tmr;
  1341. #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  1342. struct socket *so;
  1343. #endif
  1344. int did_output, type;
  1345. tmr = (struct sctp_timer *)t;
  1346. inp = (struct sctp_inpcb *)tmr->ep;
  1347. stcb = (struct sctp_tcb *)tmr->tcb;
  1348. net = (struct sctp_nets *)tmr->net;
  1349. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1350. CURVNET_SET((struct vnet *)tmr->vnet);
  1351. #endif
  1352. did_output = 1;
  1353. #ifdef SCTP_AUDITING_ENABLED
  1354. sctp_audit_log(0xF0, (uint8_t) tmr->type);
  1355. sctp_auditing(3, inp, stcb, net);
  1356. #endif
  1357. /* sanity checks... */
  1358. if (tmr->self != (void *)tmr) {
  1359. /*
  1360. * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
  1361. * tmr);
  1362. */
  1363. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1364. CURVNET_RESTORE();
  1365. #endif
  1366. return;
  1367. }
  1368. tmr->stopped_from = 0xa001;
  1369. if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
  1370. /*
  1371. * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
  1372. * tmr->type);
  1373. */
  1374. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1375. CURVNET_RESTORE();
  1376. #endif
  1377. return;
  1378. }
  1379. tmr->stopped_from = 0xa002;
  1380. if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
  1381. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1382. CURVNET_RESTORE();
  1383. #endif
  1384. return;
  1385. }
  1386. /* if this is an iterator timeout, get the struct and clear inp */
  1387. tmr->stopped_from = 0xa003;
  1388. type = tmr->type;
  1389. if (inp) {
  1390. SCTP_INP_INCR_REF(inp);
  1391. if ((inp->sctp_socket == NULL) &&
  1392. ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
  1393. (tmr->type != SCTP_TIMER_TYPE_INIT) &&
  1394. (tmr->type != SCTP_TIMER_TYPE_SEND) &&
  1395. (tmr->type != SCTP_TIMER_TYPE_RECV) &&
  1396. (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
  1397. (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
  1398. (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
  1399. (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
  1400. (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
  1401. ) {
  1402. SCTP_INP_DECR_REF(inp);
  1403. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1404. CURVNET_RESTORE();
  1405. #endif
  1406. return;
  1407. }
  1408. }
  1409. tmr->stopped_from = 0xa004;
  1410. if (stcb) {
  1411. atomic_add_int(&stcb->asoc.refcnt, 1);
  1412. if (stcb->asoc.state == 0) {
  1413. atomic_add_int(&stcb->asoc.refcnt, -1);
  1414. if (inp) {
  1415. SCTP_INP_DECR_REF(inp);
  1416. }
  1417. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1418. CURVNET_RESTORE();
  1419. #endif
  1420. return;
  1421. }
  1422. }
  1423. tmr->stopped_from = 0xa005;
  1424. SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
  1425. if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
  1426. if (inp) {
  1427. SCTP_INP_DECR_REF(inp);
  1428. }
  1429. if (stcb) {
  1430. atomic_add_int(&stcb->asoc.refcnt, -1);
  1431. }
  1432. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1433. CURVNET_RESTORE();
  1434. #endif
  1435. return;
  1436. }
  1437. tmr->stopped_from = 0xa006;
  1438. if (stcb) {
  1439. SCTP_TCB_LOCK(stcb);
  1440. atomic_add_int(&stcb->asoc.refcnt, -1);
  1441. if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
  1442. ((stcb->asoc.state == 0) ||
  1443. (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
  1444. SCTP_TCB_UNLOCK(stcb);
  1445. if (inp) {
  1446. SCTP_INP_DECR_REF(inp);
  1447. }
  1448. #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1449. CURVNET_RESTORE();
  1450. #endif
  1451. return;
  1452. }
  1453. }
  1454. /* record in stopped what t-o occured */
  1455. tmr->stopped_from = tmr->type;
  1456. /* mark as being serviced now */
  1457. if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
  1458. /*
  1459. * Callout has been rescheduled.
  1460. */
  1461. goto get_out;
  1462. }
  1463. if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
  1464. /*
  1465. * Not active, so no action.
  1466. */
  1467. goto get_out;
  1468. }
  1469. SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
  1470. /* call the handler for the appropriate timer type */
  1471. switch (tmr->type) {
  1472. case SCTP_TIMER_TYPE_ZERO_COPY:
  1473. if (inp == NULL) {
  1474. break;
  1475. }
  1476. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
  1477. SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
  1478. }
  1479. break;
  1480. case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
  1481. if (inp == NULL) {
  1482. break;
  1483. }
  1484. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
  1485. SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
  1486. }
  1487. break;
  1488. case SCTP_TIMER_TYPE_ADDR_WQ:
  1489. sctp_handle_addr_wq();
  1490. break;
  1491. case SCTP_TIMER_TYPE_SEND:
  1492. if ((stcb == NULL) || (inp == NULL)) {
  1493. break;
  1494. }
  1495. SCTP_STAT_INCR(sctps_timodata);
  1496. stcb->asoc.timodata++;
  1497. stcb->asoc.num_send_timers_up--;
  1498. if (stcb->asoc.num_send_timers_up < 0) {
  1499. stcb->asoc.num_send_timers_up = 0;
  1500. }
  1501. SCTP_TCB_LOCK_ASSERT(stcb);
  1502. if (sctp_t3rxt_timer(inp, stcb, net)) {
  1503. /* no need to unlock on tcb its gone */
  1504. goto out_decr;
  1505. }
  1506. SCTP_TCB_LOCK_ASSERT(stcb);
  1507. #ifdef SCTP_AUDITING_ENABLED
  1508. sctp_auditing(4, inp, stcb, net);
  1509. #endif
  1510. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
  1511. if ((stcb->asoc.num_send_timers_up == 0) &&
  1512. (stcb->asoc.sent_queue_cnt > 0)) {
  1513. struct sctp_tmit_chunk *chk;
  1514. /*
  1515. * safeguard. If there on some on the sent queue
  1516. * somewhere but no timers running something is
  1517. * wrong... so we start a timer on the first chunk
  1518. * on the send queue on whatever net it is sent to.
  1519. */
  1520. chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
  1521. sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
  1522. chk->whoTo);
  1523. }
  1524. break;
  1525. case SCTP_TIMER_TYPE_INIT:
  1526. if ((stcb == NULL) || (inp == NULL)) {
  1527. break;
  1528. }
  1529. SCTP_STAT_INCR(sctps_timoinit);
  1530. stcb->asoc.timoinit++;
  1531. if (sctp_t1init_timer(inp, stcb, net)) {
  1532. /* no need to unlock on tcb its gone */
  1533. goto out_decr;
  1534. }
  1535. /* We do output but not here */
  1536. did_output = 0;
  1537. break;
  1538. case SCTP_TIMER_TYPE_RECV:
  1539. if ((stcb == NULL) || (inp == NULL)) {
  1540. break;
  1541. }
  1542. SCTP_STAT_INCR(sctps_timosack);
  1543. stcb->asoc.timosack++;
  1544. sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
  1545. #ifdef SCTP_AUDITING_ENABLED
  1546. sctp_auditing(4, inp, stcb, net);
  1547. #endif
  1548. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
  1549. break;
  1550. case SCTP_TIMER_TYPE_SHUTDOWN:
  1551. if ((stcb == NULL) || (inp == NULL)) {
  1552. break;
  1553. }
  1554. if (sctp_shutdown_timer(inp, stcb, net)) {
  1555. /* no need to unlock on tcb its gone */
  1556. goto out_decr;
  1557. }
  1558. SCTP_STAT_INCR(sctps_timoshutdown);
  1559. stcb->asoc.timoshutdown++;
  1560. #ifdef SCTP_AUDITING_ENABLED
  1561. sctp_auditing(4, inp, stcb, net);
  1562. #endif
  1563. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
  1564. break;
  1565. case SCTP_TIMER_TYPE_HEARTBEAT:
  1566. if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
  1567. break;
  1568. }
  1569. SCTP_STAT_INCR(sctps_timoheartbeat);
  1570. stcb->asoc.timoheartbeat

Large files files are truncated, but you can click here to view the full file