PageRenderTime 62ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 1ms

/bsd/sys/netinet/in_mcast.cc

https://gitlab.com/jforge/osv
C++ | 2917 lines | 1911 code | 360 blank | 646 comment | 500 complexity | 1b3e94ec695765527d52c9d6fa8357eb MD5 | raw file
Possible License(s): BSD-3-Clause, 0BSD, MPL-2.0-no-copyleft-exception

Large files files are truncated, but you can click here to view the full file

  1. /*-
  2. * Copyright (c) 2007-2009 Bruce Simpson.
  3. * Copyright (c) 2005 Robert N. M. Watson.
  4. * All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. * 3. The name of the author may not be used to endorse or promote
  15. * products derived from this software without specific prior written
  16. * permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  19. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  20. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  21. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  22. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  23. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  24. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  25. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  26. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  27. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  28. * SUCH DAMAGE.
  29. */
  30. /*
  31. * IPv4 multicast socket, group, and socket option processing module.
  32. */
  33. #include <sys/cdefs.h>
  34. #include <bsd/porting/netport.h>
  35. #include <bsd/porting/sync_stub.h>
  36. #include <bsd/sys/sys/param.h>
  37. #include <bsd/sys/sys/mbuf.h>
  38. #include <bsd/sys/sys/protosw.h>
  39. #include <bsd/sys/sys/socket.h>
  40. #include <bsd/sys/sys/socketvar.h>
  41. #include <bsd/sys/sys/protosw.h>
  42. #include <bsd/sys/sys/tree.h>
  43. #include <bsd/sys/net/if.h>
  44. #include <bsd/sys/net/if_dl.h>
  45. #include <bsd/sys/net/route.h>
  46. #include <bsd/sys/net/vnet.h>
  47. #include <bsd/sys/netinet/in.h>
  48. #include <bsd/sys/netinet/in_systm.h>
  49. #include <bsd/sys/netinet/in_pcb.h>
  50. #include <bsd/sys/netinet/in_var.h>
  51. #include <bsd/sys/netinet/ip_var.h>
  52. #include <bsd/sys/netinet/igmp_var.h>
  53. #ifndef KTR_IGMPV3
  54. #define KTR_IGMPV3 KTR_INET
  55. #endif
  56. #ifndef __SOCKUNION_DECLARED
  57. union sockunion {
  58. struct bsd_sockaddr_storage ss;
  59. struct bsd_sockaddr sa;
  60. struct bsd_sockaddr_dl sdl;
  61. struct bsd_sockaddr_in sin;
  62. };
  63. typedef union sockunion sockunion_t;
  64. #define __SOCKUNION_DECLARED
  65. #endif /* __SOCKUNION_DECLARED */
  66. MALLOC_DEFINE(M_INMFILTER, "in_mfilter",
  67. "IPv4 multicast PCB-layer source filter");
  68. MALLOC_DEFINE(M_IPMADDR, "in_multi", "IPv4 multicast group");
  69. MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "IPv4 multicast options");
  70. MALLOC_DEFINE(M_IPMSOURCE, "ip_msource",
  71. "IPv4 multicast IGMP-layer source filter");
  72. /*
  73. * Locking:
  74. * - Lock order is: Giant, INP_WLOCK, IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
  75. * - The IF_ADDR_LOCK is implicitly taken by inm_lookup() earlier, however
  76. * it can be taken by code in net/if.c also.
  77. * - ip_moptions and in_mfilter are covered by the INP_WLOCK.
  78. *
  79. * struct in_multi is covered by IN_MULTI_LOCK. There isn't strictly
  80. * any need for in_multi itself to be virtualized -- it is bound to an ifp
  81. * anyway no matter what happens.
  82. */
  83. struct mtx in_multi_mtx = {};
  84. /*
  85. * Functions with non-static linkage defined in this file should be
  86. * declared in in_var.h:
  87. * imo_multi_filter()
  88. * in_addmulti()
  89. * in_delmulti()
  90. * in_joingroup()
  91. * in_joingroup_locked()
  92. * in_leavegroup()
  93. * in_leavegroup_locked()
  94. * and ip_var.h:
  95. * inp_freemoptions()
  96. * inp_getmoptions()
  97. * inp_setmoptions()
  98. *
  99. * XXX: Both carp and pf need to use the legacy (*,G) KPIs in_addmulti()
  100. * and in_delmulti().
  101. */
  102. static void imf_commit(struct in_mfilter *);
  103. static int imf_get_source(struct in_mfilter *imf,
  104. const struct bsd_sockaddr_in *psin,
  105. struct in_msource **);
  106. static struct in_msource *
  107. imf_graft(struct in_mfilter *, const uint8_t,
  108. const struct bsd_sockaddr_in *);
  109. static void imf_leave(struct in_mfilter *);
  110. static int imf_prune(struct in_mfilter *, const struct bsd_sockaddr_in *);
  111. static void imf_purge(struct in_mfilter *);
  112. static void imf_rollback(struct in_mfilter *);
  113. static void imf_reap(struct in_mfilter *);
  114. static int imo_grow(struct ip_moptions *);
  115. static size_t imo_match_group(const struct ip_moptions *,
  116. const struct ifnet *, const struct bsd_sockaddr *);
  117. static struct in_msource *
  118. imo_match_source(const struct ip_moptions *, const size_t,
  119. const struct bsd_sockaddr *);
  120. static void ims_merge(struct ip_msource *ims,
  121. const struct in_msource *lims, const int rollback);
  122. static int in_getmulti(struct ifnet *, const struct in_addr *,
  123. struct in_multi **);
  124. static int inm_get_source(struct in_multi *inm, const in_addr_t haddr,
  125. const int noalloc, struct ip_msource **pims);
  126. static int inm_is_ifp_detached(const struct in_multi *);
  127. static int inm_merge(struct in_multi *, /*const*/ struct in_mfilter *);
  128. static void inm_purge(struct in_multi *);
  129. static void inm_reap(struct in_multi *);
  130. static struct ip_moptions *
  131. inp_findmoptions(struct inpcb *);
  132. static int inp_get_source_filters(struct inpcb *, struct sockopt *);
  133. static int inp_join_group(struct inpcb *, struct sockopt *);
  134. static int inp_leave_group(struct inpcb *, struct sockopt *);
  135. static struct ifnet *
  136. inp_lookup_mcast_ifp(const struct inpcb *,
  137. const struct bsd_sockaddr_in *, const struct in_addr);
  138. static int inp_block_unblock_source(struct inpcb *, struct sockopt *);
  139. static int inp_set_multicast_if(struct inpcb *, struct sockopt *);
  140. static int inp_set_source_filters(struct inpcb *, struct sockopt *);
  141. SYSCTL_NODE(_net_inet_ip, OID_AUTO, mcast, CTLFLAG_RW, 0, "IPv4 multicast");
  142. static u_long in_mcast_maxgrpsrc = IP_MAX_GROUP_SRC_FILTER;
  143. SYSCTL_ULONG(_net_inet_ip_mcast, OID_AUTO, maxgrpsrc,
  144. CTLFLAG_RW | CTLFLAG_TUN, &in_mcast_maxgrpsrc, 0,
  145. "Max source filters per group");
  146. TUNABLE_ULONG("net.inet.ip.mcast.maxgrpsrc", &in_mcast_maxgrpsrc);
  147. static u_long in_mcast_maxsocksrc = IP_MAX_SOCK_SRC_FILTER;
  148. SYSCTL_ULONG(_net_inet_ip_mcast, OID_AUTO, maxsocksrc,
  149. CTLFLAG_RW | CTLFLAG_TUN, &in_mcast_maxsocksrc, 0,
  150. "Max source filters per socket");
  151. TUNABLE_ULONG("net.inet.ip.mcast.maxsocksrc", &in_mcast_maxsocksrc);
  152. int in_mcast_loop = IP_DEFAULT_MULTICAST_LOOP;
  153. SYSCTL_INT(_net_inet_ip_mcast, OID_AUTO, loop, CTLFLAG_RW | CTLFLAG_TUN,
  154. &in_mcast_loop, 0, "Loopback multicast datagrams by default");
  155. TUNABLE_INT("net.inet.ip.mcast.loop", &in_mcast_loop);
  156. SYSCTL_NODE(_net_inet_ip_mcast, OID_AUTO, filters,
  157. CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip_mcast_filters,
  158. "Per-interface stack-wide source filters");
  159. /*
  160. * Inline function which wraps assertions for a valid ifp.
  161. * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp
  162. * is detached.
  163. */
  164. static int __inline
  165. inm_is_ifp_detached(const struct in_multi *inm)
  166. {
  167. struct ifnet *ifp;
  168. KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
  169. ifp = inm->inm_ifma->ifma_ifp;
  170. if (ifp != NULL) {
  171. /*
  172. * Sanity check that netinet's notion of ifp is the
  173. * same as net's.
  174. */
  175. KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
  176. }
  177. return (ifp == NULL);
  178. }
  179. /*
  180. * Initialize an in_mfilter structure to a known state at t0, t1
  181. * with an empty source filter list.
  182. */
  183. static __inline void
  184. imf_init(struct in_mfilter *imf, const int st0, const int st1)
  185. {
  186. memset(imf, 0, sizeof(struct in_mfilter));
  187. RB_INIT(&imf->imf_sources);
  188. imf->imf_st[0] = st0;
  189. imf->imf_st[1] = st1;
  190. }
  191. /*
  192. * Resize the ip_moptions vector to the next power-of-two minus 1.
  193. * May be called with locks held; do not sleep.
  194. */
  195. static int
  196. imo_grow(struct ip_moptions *imo)
  197. {
  198. struct in_multi **nmships;
  199. struct in_multi **omships;
  200. struct in_mfilter *nmfilters;
  201. struct in_mfilter *omfilters;
  202. size_t idx;
  203. size_t newmax;
  204. size_t oldmax;
  205. nmships = NULL;
  206. nmfilters = NULL;
  207. omships = imo->imo_membership;
  208. omfilters = imo->imo_mfilters;
  209. oldmax = imo->imo_max_memberships;
  210. newmax = ((oldmax + 1) * 2) - 1;
  211. if (newmax <= IP_MAX_MEMBERSHIPS) {
  212. nmships = (struct in_multi **)realloc(omships,
  213. sizeof(struct in_multi *) * newmax);
  214. nmfilters = (struct in_mfilter *)realloc(omfilters,
  215. sizeof(struct in_mfilter) * newmax);
  216. if (nmships != NULL && nmfilters != NULL) {
  217. /* Initialize newly allocated source filter heads. */
  218. for (idx = oldmax; idx < newmax; idx++) {
  219. imf_init(&nmfilters[idx], MCAST_UNDEFINED,
  220. MCAST_EXCLUDE);
  221. }
  222. imo->imo_max_memberships = newmax;
  223. imo->imo_membership = nmships;
  224. imo->imo_mfilters = nmfilters;
  225. }
  226. }
  227. if (nmships == NULL || nmfilters == NULL) {
  228. if (nmships != NULL)
  229. free(nmships);
  230. if (nmfilters != NULL)
  231. free(nmfilters);
  232. return (ETOOMANYREFS);
  233. }
  234. return (0);
  235. }
  236. /*
  237. * Find an IPv4 multicast group entry for this ip_moptions instance
  238. * which matches the specified group, and optionally an interface.
  239. * Return its index into the array, or -1 if not found.
  240. */
  241. static size_t
  242. imo_match_group(const struct ip_moptions *imo, const struct ifnet *ifp,
  243. const struct bsd_sockaddr *group)
  244. {
  245. const struct bsd_sockaddr_in *gsin;
  246. struct in_multi **pinm;
  247. int idx;
  248. int nmships;
  249. gsin = (const struct bsd_sockaddr_in *)group;
  250. /* The imo_membership array may be lazy allocated. */
  251. if (imo->imo_membership == NULL || imo->imo_num_memberships == 0)
  252. return (-1);
  253. nmships = imo->imo_num_memberships;
  254. pinm = &imo->imo_membership[0];
  255. for (idx = 0; idx < nmships; idx++, pinm++) {
  256. if (*pinm == NULL)
  257. continue;
  258. if ((ifp == NULL || ((*pinm)->inm_ifp == ifp)) &&
  259. in_hosteq((*pinm)->inm_addr, gsin->sin_addr)) {
  260. break;
  261. }
  262. }
  263. if (idx >= nmships)
  264. idx = -1;
  265. return (idx);
  266. }
  267. /*
  268. * Find an IPv4 multicast source entry for this imo which matches
  269. * the given group index for this socket, and source address.
  270. *
  271. * NOTE: This does not check if the entry is in-mode, merely if
  272. * it exists, which may not be the desired behaviour.
  273. */
  274. static struct in_msource *
  275. imo_match_source(const struct ip_moptions *imo, const size_t gidx,
  276. const struct bsd_sockaddr *src)
  277. {
  278. struct ip_msource find;
  279. struct in_mfilter *imf;
  280. struct ip_msource *ims;
  281. const sockunion_t *psa;
  282. KASSERT(src->sa_family == AF_INET, ("%s: !AF_INET", __func__));
  283. KASSERT(gidx != -1 && gidx < imo->imo_num_memberships,
  284. ("%s: invalid index %d\n", __func__, (int)gidx));
  285. /* The imo_mfilters array may be lazy allocated. */
  286. if (imo->imo_mfilters == NULL)
  287. return (NULL);
  288. imf = &imo->imo_mfilters[gidx];
  289. /* Source trees are keyed in host byte order. */
  290. psa = (const sockunion_t *)src;
  291. find.ims_haddr = ntohl(psa->sin.sin_addr.s_addr);
  292. ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find);
  293. return ((struct in_msource *)ims);
  294. }
  295. /*
  296. * Perform filtering for multicast datagrams on a socket by group and source.
  297. *
  298. * Returns 0 if a datagram should be allowed through, or various error codes
  299. * if the socket was not a member of the group, or the source was muted, etc.
  300. */
  301. int
  302. imo_multi_filter(const struct ip_moptions *imo, const struct ifnet *ifp,
  303. const struct bsd_sockaddr *group, const struct bsd_sockaddr *src)
  304. {
  305. size_t gidx;
  306. struct in_msource *ims;
  307. int mode;
  308. KASSERT(ifp != NULL, ("%s: null ifp", __func__));
  309. gidx = imo_match_group(imo, ifp, group);
  310. if (gidx == -1)
  311. return (MCAST_NOTGMEMBER);
  312. /*
  313. * Check if the source was included in an (S,G) join.
  314. * Allow reception on exclusive memberships by default,
  315. * reject reception on inclusive memberships by default.
  316. * Exclude source only if an in-mode exclude filter exists.
  317. * Include source only if an in-mode include filter exists.
  318. * NOTE: We are comparing group state here at IGMP t1 (now)
  319. * with socket-layer t0 (since last downcall).
  320. */
  321. mode = imo->imo_mfilters[gidx].imf_st[1];
  322. ims = imo_match_source(imo, gidx, src);
  323. if ((ims == NULL && mode == MCAST_INCLUDE) ||
  324. (ims != NULL && ims->imsl_st[0] != mode))
  325. return (MCAST_NOTSMEMBER);
  326. return (MCAST_PASS);
  327. }
  328. /*
  329. * Find and return a reference to an in_multi record for (ifp, group),
  330. * and bump its reference count.
  331. * If one does not exist, try to allocate it, and update link-layer multicast
  332. * filters on ifp to listen for group.
  333. * Assumes the IN_MULTI lock is held across the call.
  334. * Return 0 if successful, otherwise return an appropriate error code.
  335. */
  336. static int
  337. in_getmulti(struct ifnet *ifp, const struct in_addr *group,
  338. struct in_multi **pinm)
  339. {
  340. struct bsd_sockaddr_in gsin;
  341. struct ifmultiaddr *ifma;
  342. struct in_ifinfo *ii;
  343. struct in_multi *inm;
  344. int error;
  345. IN_MULTI_LOCK_ASSERT();
  346. ii = (struct in_ifinfo *)ifp->if_afdata[AF_INET];
  347. inm = inm_lookup(ifp, *group);
  348. if (inm != NULL) {
  349. /*
  350. * If we already joined this group, just bump the
  351. * refcount and return it.
  352. */
  353. KASSERT(inm->inm_refcount >= 1,
  354. ("%s: bad refcount %d", __func__, inm->inm_refcount));
  355. ++inm->inm_refcount;
  356. *pinm = inm;
  357. return (0);
  358. }
  359. memset(&gsin, 0, sizeof(gsin));
  360. gsin.sin_family = AF_INET;
  361. gsin.sin_len = sizeof(struct bsd_sockaddr_in);
  362. gsin.sin_addr = *group;
  363. /*
  364. * Check if a link-layer group is already associated
  365. * with this network-layer group on the given ifnet.
  366. */
  367. error = if_addmulti(ifp, (struct bsd_sockaddr *)&gsin, &ifma);
  368. if (error != 0)
  369. return (error);
  370. /* XXX ifma_protospec must be covered by IF_ADDR_LOCK */
  371. IF_ADDR_WLOCK(ifp);
  372. /*
  373. * If something other than netinet is occupying the link-layer
  374. * group, print a meaningful error message and back out of
  375. * the allocation.
  376. * Otherwise, bump the refcount on the existing network-layer
  377. * group association and return it.
  378. */
  379. if (ifma->ifma_protospec != NULL) {
  380. inm = (struct in_multi *)ifma->ifma_protospec;
  381. #ifdef INVARIANTS
  382. KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr",
  383. __func__));
  384. KASSERT(ifma->ifma_addr->sa_family == AF_INET,
  385. ("%s: ifma not AF_INET", __func__));
  386. KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__));
  387. if (inm->inm_ifma != ifma || inm->inm_ifp != ifp ||
  388. !in_hosteq(inm->inm_addr, *group))
  389. panic("%s: ifma %p is inconsistent with %p (%s)",
  390. __func__, ifma, inm, inet_ntoa(*group));
  391. #endif
  392. ++inm->inm_refcount;
  393. *pinm = inm;
  394. IF_ADDR_WUNLOCK(ifp);
  395. return (0);
  396. }
  397. IF_ADDR_WLOCK_ASSERT(ifp);
  398. /*
  399. * A new in_multi record is needed; allocate and initialize it.
  400. * We DO NOT perform an IGMP join as the in_ layer may need to
  401. * push an initial source list down to IGMP to support SSM.
  402. *
  403. * The initial source filter state is INCLUDE, {} as per the RFC.
  404. */
  405. inm = (in_multi *)malloc(sizeof(*inm));
  406. if (inm == NULL) {
  407. if_delmulti_ifma(ifma);
  408. IF_ADDR_WUNLOCK(ifp);
  409. return (ENOMEM);
  410. }
  411. bzero(inm, sizeof(*inm));
  412. inm->inm_addr = *group;
  413. inm->inm_ifp = ifp;
  414. inm->inm_igi = ii->ii_igmp;
  415. inm->inm_ifma = ifma;
  416. inm->inm_refcount = 1;
  417. inm->inm_state = IGMP_NOT_MEMBER;
  418. /*
  419. * Pending state-changes per group are subject to a bounds check.
  420. */
  421. IFQ_SET_MAXLEN(&inm->inm_scq, IGMP_MAX_STATE_CHANGES);
  422. inm->inm_st[0].iss_fmode = MCAST_UNDEFINED;
  423. inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
  424. RB_INIT(&inm->inm_srcs);
  425. ifma->ifma_protospec = inm;
  426. *pinm = inm;
  427. IF_ADDR_WUNLOCK(ifp);
  428. return (0);
  429. }
  430. /*
  431. * Drop a reference to an in_multi record.
  432. *
  433. * If the refcount drops to 0, free the in_multi record and
  434. * delete the underlying link-layer membership.
  435. */
  436. void
  437. inm_release_locked(struct in_multi *inm)
  438. {
  439. struct ifmultiaddr *ifma;
  440. IN_MULTI_LOCK_ASSERT();
  441. CTR2(KTR_IGMPV3, "%s: refcount is %d", __func__, inm->inm_refcount);
  442. if (--inm->inm_refcount > 0) {
  443. CTR2(KTR_IGMPV3, "%s: refcount is now %d", __func__,
  444. inm->inm_refcount);
  445. return;
  446. }
  447. CTR2(KTR_IGMPV3, "%s: freeing inm %p", __func__, inm);
  448. ifma = inm->inm_ifma;
  449. /* XXX this access is not covered by IF_ADDR_LOCK */
  450. CTR2(KTR_IGMPV3, "%s: purging ifma %p", __func__, ifma);
  451. KASSERT(ifma->ifma_protospec == inm,
  452. ("%s: ifma_protospec != inm", __func__));
  453. ifma->ifma_protospec = NULL;
  454. inm_purge(inm);
  455. free(inm);
  456. if_delmulti_ifma(ifma);
  457. }
  458. /*
  459. * Clear recorded source entries for a group.
  460. * Used by the IGMP code. Caller must hold the IN_MULTI lock.
  461. * FIXME: Should reap.
  462. */
  463. void
  464. inm_clear_recorded(struct in_multi *inm)
  465. {
  466. struct ip_msource *ims;
  467. IN_MULTI_LOCK_ASSERT();
  468. RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) {
  469. if (ims->ims_stp) {
  470. ims->ims_stp = 0;
  471. --inm->inm_st[1].iss_rec;
  472. }
  473. }
  474. KASSERT(inm->inm_st[1].iss_rec == 0,
  475. ("%s: iss_rec %d not 0", __func__, inm->inm_st[1].iss_rec));
  476. }
  477. /*
  478. * Record a source as pending for a Source-Group IGMPv3 query.
  479. * This lives here as it modifies the shared tree.
  480. *
  481. * inm is the group descriptor.
  482. * naddr is the address of the source to record in network-byte order.
  483. *
  484. * If the net.inet.igmp.sgalloc sysctl is non-zero, we will
  485. * lazy-allocate a source node in response to an SG query.
  486. * Otherwise, no allocation is performed. This saves some memory
  487. * with the trade-off that the source will not be reported to the
  488. * router if joined in the window between the query response and
  489. * the group actually being joined on the local host.
  490. *
  491. * VIMAGE: XXX: Currently the igmp_sgalloc feature has been removed.
  492. * This turns off the allocation of a recorded source entry if
  493. * the group has not been joined.
  494. *
  495. * Return 0 if the source didn't exist or was already marked as recorded.
  496. * Return 1 if the source was marked as recorded by this function.
  497. * Return <0 if any error occured (negated errno code).
  498. */
  499. int
  500. inm_record_source(struct in_multi *inm, const in_addr_t naddr)
  501. {
  502. struct ip_msource find;
  503. struct ip_msource *ims, *nims;
  504. IN_MULTI_LOCK_ASSERT();
  505. find.ims_haddr = ntohl(naddr);
  506. ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find);
  507. if (ims && ims->ims_stp)
  508. return (0);
  509. if (ims == NULL) {
  510. if (inm->inm_nsrc == in_mcast_maxgrpsrc)
  511. return (-ENOSPC);
  512. nims = (ip_msource *)malloc(sizeof(struct ip_msource));
  513. if (nims == NULL)
  514. return (-ENOMEM);
  515. bzero(nims, sizeof(struct ip_msource));
  516. nims->ims_haddr = find.ims_haddr;
  517. RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims);
  518. ++inm->inm_nsrc;
  519. ims = nims;
  520. }
  521. /*
  522. * Mark the source as recorded and update the recorded
  523. * source count.
  524. */
  525. ++ims->ims_stp;
  526. ++inm->inm_st[1].iss_rec;
  527. return (1);
  528. }
  529. /*
  530. * Return a pointer to an in_msource owned by an in_mfilter,
  531. * given its source address.
  532. * Lazy-allocate if needed. If this is a new entry its filter state is
  533. * undefined at t0.
  534. *
  535. * imf is the filter set being modified.
  536. * haddr is the source address in *host* byte-order.
  537. *
  538. * SMPng: May be called with locks held; malloc must not block.
  539. */
  540. static int
  541. imf_get_source(struct in_mfilter *imf, const struct bsd_sockaddr_in *psin,
  542. struct in_msource **plims)
  543. {
  544. struct ip_msource find;
  545. struct ip_msource *ims, *nims;
  546. struct in_msource *lims;
  547. int error;
  548. error = 0;
  549. ims = NULL;
  550. lims = NULL;
  551. /* key is host byte order */
  552. find.ims_haddr = ntohl(psin->sin_addr.s_addr);
  553. ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find);
  554. lims = (struct in_msource *)ims;
  555. if (lims == NULL) {
  556. if (imf->imf_nsrc == in_mcast_maxsocksrc)
  557. return (ENOSPC);
  558. // FIXME: mismatch between allocation size and type
  559. nims = (ip_msource *)malloc(sizeof(struct in_msource));
  560. if (nims == NULL)
  561. return (ENOMEM);
  562. bzero(nims, sizeof(struct in_msource));
  563. lims = (struct in_msource *)nims;
  564. lims->ims_haddr = find.ims_haddr;
  565. lims->imsl_st[0] = MCAST_UNDEFINED;
  566. RB_INSERT(ip_msource_tree, &imf->imf_sources, nims);
  567. ++imf->imf_nsrc;
  568. }
  569. *plims = lims;
  570. return (error);
  571. }
  572. /*
  573. * Graft a source entry into an existing socket-layer filter set,
  574. * maintaining any required invariants and checking allocations.
  575. *
  576. * The source is marked as being in the new filter mode at t1.
  577. *
  578. * Return the pointer to the new node, otherwise return NULL.
  579. */
  580. static struct in_msource *
  581. imf_graft(struct in_mfilter *imf, const uint8_t st1,
  582. const struct bsd_sockaddr_in *psin)
  583. {
  584. struct ip_msource *nims;
  585. struct in_msource *lims;
  586. // FIXME: mismatch between allocated size and type
  587. nims = (ip_msource *)malloc(sizeof(struct in_msource));
  588. if (nims == NULL)
  589. return (NULL);
  590. bzero(nims, sizeof(struct in_msource));
  591. lims = (struct in_msource *)nims;
  592. lims->ims_haddr = ntohl(psin->sin_addr.s_addr);
  593. lims->imsl_st[0] = MCAST_UNDEFINED;
  594. lims->imsl_st[1] = st1;
  595. RB_INSERT(ip_msource_tree, &imf->imf_sources, nims);
  596. ++imf->imf_nsrc;
  597. return (lims);
  598. }
  599. /*
  600. * Prune a source entry from an existing socket-layer filter set,
  601. * maintaining any required invariants and checking allocations.
  602. *
  603. * The source is marked as being left at t1, it is not freed.
  604. *
  605. * Return 0 if no error occurred, otherwise return an errno value.
  606. */
  607. static int
  608. imf_prune(struct in_mfilter *imf, const struct bsd_sockaddr_in *psin)
  609. {
  610. struct ip_msource find;
  611. struct ip_msource *ims;
  612. struct in_msource *lims;
  613. /* key is host byte order */
  614. find.ims_haddr = ntohl(psin->sin_addr.s_addr);
  615. ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find);
  616. if (ims == NULL)
  617. return (ENOENT);
  618. lims = (struct in_msource *)ims;
  619. lims->imsl_st[1] = MCAST_UNDEFINED;
  620. return (0);
  621. }
  622. /*
  623. * Revert socket-layer filter set deltas at t1 to t0 state.
  624. */
  625. static void
  626. imf_rollback(struct in_mfilter *imf)
  627. {
  628. struct ip_msource *ims, *tims;
  629. struct in_msource *lims;
  630. RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) {
  631. lims = (struct in_msource *)ims;
  632. if (lims->imsl_st[0] == lims->imsl_st[1]) {
  633. /* no change at t1 */
  634. continue;
  635. } else if (lims->imsl_st[0] != MCAST_UNDEFINED) {
  636. /* revert change to existing source at t1 */
  637. lims->imsl_st[1] = lims->imsl_st[0];
  638. } else {
  639. /* revert source added t1 */
  640. CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims);
  641. RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims);
  642. free(ims);
  643. imf->imf_nsrc--;
  644. }
  645. }
  646. imf->imf_st[1] = imf->imf_st[0];
  647. }
  648. /*
  649. * Mark socket-layer filter set as INCLUDE {} at t1.
  650. */
  651. static void
  652. imf_leave(struct in_mfilter *imf)
  653. {
  654. struct ip_msource *ims;
  655. struct in_msource *lims;
  656. RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) {
  657. lims = (struct in_msource *)ims;
  658. lims->imsl_st[1] = MCAST_UNDEFINED;
  659. }
  660. imf->imf_st[1] = MCAST_INCLUDE;
  661. }
  662. /*
  663. * Mark socket-layer filter set deltas as committed.
  664. */
  665. static void
  666. imf_commit(struct in_mfilter *imf)
  667. {
  668. struct ip_msource *ims;
  669. struct in_msource *lims;
  670. RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) {
  671. lims = (struct in_msource *)ims;
  672. lims->imsl_st[0] = lims->imsl_st[1];
  673. }
  674. imf->imf_st[0] = imf->imf_st[1];
  675. }
  676. /*
  677. * Reap unreferenced sources from socket-layer filter set.
  678. */
  679. static void
  680. imf_reap(struct in_mfilter *imf)
  681. {
  682. struct ip_msource *ims, *tims;
  683. struct in_msource *lims;
  684. RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) {
  685. lims = (struct in_msource *)ims;
  686. if ((lims->imsl_st[0] == MCAST_UNDEFINED) &&
  687. (lims->imsl_st[1] == MCAST_UNDEFINED)) {
  688. CTR2(KTR_IGMPV3, "%s: free lims %p", __func__, ims);
  689. RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims);
  690. free(ims);
  691. imf->imf_nsrc--;
  692. }
  693. }
  694. }
  695. /*
  696. * Purge socket-layer filter set.
  697. */
  698. static void
  699. imf_purge(struct in_mfilter *imf)
  700. {
  701. struct ip_msource *ims, *tims;
  702. RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) {
  703. CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims);
  704. RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims);
  705. free(ims);
  706. imf->imf_nsrc--;
  707. }
  708. imf->imf_st[0] = imf->imf_st[1] = MCAST_UNDEFINED;
  709. KASSERT(RB_EMPTY(&imf->imf_sources),
  710. ("%s: imf_sources not empty", __func__));
  711. }
  712. /*
  713. * Look up a source filter entry for a multicast group.
  714. *
  715. * inm is the group descriptor to work with.
  716. * haddr is the host-byte-order IPv4 address to look up.
  717. * noalloc may be non-zero to suppress allocation of sources.
  718. * *pims will be set to the address of the retrieved or allocated source.
  719. *
  720. * SMPng: NOTE: may be called with locks held.
  721. * Return 0 if successful, otherwise return a non-zero error code.
  722. */
  723. static int
  724. inm_get_source(struct in_multi *inm, const in_addr_t haddr,
  725. const int noalloc, struct ip_msource **pims)
  726. {
  727. struct ip_msource find;
  728. struct ip_msource *ims, *nims;
  729. #ifdef KTR
  730. struct in_addr ia;
  731. #endif
  732. find.ims_haddr = haddr;
  733. ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find);
  734. if (ims == NULL && !noalloc) {
  735. if (inm->inm_nsrc == in_mcast_maxgrpsrc)
  736. return (ENOSPC);
  737. nims = (ip_msource *)malloc(sizeof(struct ip_msource));
  738. if (nims == NULL)
  739. return (ENOMEM);
  740. bzero(nims, sizeof(struct ip_msource));
  741. nims->ims_haddr = haddr;
  742. RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims);
  743. ++inm->inm_nsrc;
  744. ims = nims;
  745. #ifdef KTR
  746. ia.s_addr = htonl(haddr);
  747. CTR3(KTR_IGMPV3, "%s: allocated %s as %p", __func__,
  748. inet_ntoa(ia), ims);
  749. #endif
  750. }
  751. *pims = ims;
  752. return (0);
  753. }
  754. /*
  755. * Merge socket-layer source into IGMP-layer source.
  756. * If rollback is non-zero, perform the inverse of the merge.
  757. */
  758. static void
  759. ims_merge(struct ip_msource *ims, const struct in_msource *lims,
  760. const int rollback)
  761. {
  762. int n = rollback ? -1 : 1;
  763. #ifdef KTR
  764. struct in_addr ia;
  765. ia.s_addr = htonl(ims->ims_haddr);
  766. #endif
  767. if (lims->imsl_st[0] == MCAST_EXCLUDE) {
  768. CTR3(KTR_IGMPV3, "%s: t1 ex -= %d on %s",
  769. __func__, n, inet_ntoa(ia));
  770. ims->ims_st[1].ex -= n;
  771. } else if (lims->imsl_st[0] == MCAST_INCLUDE) {
  772. CTR3(KTR_IGMPV3, "%s: t1 in -= %d on %s",
  773. __func__, n, inet_ntoa(ia));
  774. ims->ims_st[1].in -= n;
  775. }
  776. if (lims->imsl_st[1] == MCAST_EXCLUDE) {
  777. CTR3(KTR_IGMPV3, "%s: t1 ex += %d on %s",
  778. __func__, n, inet_ntoa(ia));
  779. ims->ims_st[1].ex += n;
  780. } else if (lims->imsl_st[1] == MCAST_INCLUDE) {
  781. CTR3(KTR_IGMPV3, "%s: t1 in += %d on %s",
  782. __func__, n, inet_ntoa(ia));
  783. ims->ims_st[1].in += n;
  784. }
  785. }
  786. /*
  787. * Atomically update the global in_multi state, when a membership's
  788. * filter list is being updated in any way.
  789. *
  790. * imf is the per-inpcb-membership group filter pointer.
  791. * A fake imf may be passed for in-kernel consumers.
  792. *
  793. * XXX This is a candidate for a set-symmetric-difference style loop
  794. * which would eliminate the repeated lookup from root of ims nodes,
  795. * as they share the same key space.
  796. *
  797. * If any error occurred this function will back out of refcounts
  798. * and return a non-zero value.
  799. */
  800. static int
  801. inm_merge(struct in_multi *inm, /*const*/ struct in_mfilter *imf)
  802. {
  803. struct ip_msource *ims, *nims;
  804. struct in_msource *lims;
  805. int schanged, error;
  806. int nsrc0, nsrc1;
  807. schanged = 0;
  808. error = 0;
  809. nsrc1 = nsrc0 = 0;
  810. nims = NULL;
  811. /*
  812. * Update the source filters first, as this may fail.
  813. * Maintain count of in-mode filters at t0, t1. These are
  814. * used to work out if we transition into ASM mode or not.
  815. * Maintain a count of source filters whose state was
  816. * actually modified by this operation.
  817. */
  818. RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) {
  819. lims = (struct in_msource *)ims;
  820. if (lims->imsl_st[0] == imf->imf_st[0]) nsrc0++;
  821. if (lims->imsl_st[1] == imf->imf_st[1]) nsrc1++;
  822. if (lims->imsl_st[0] == lims->imsl_st[1]) continue;
  823. error = inm_get_source(inm, lims->ims_haddr, 0, &nims);
  824. ++schanged;
  825. if (error)
  826. break;
  827. ims_merge(nims, lims, 0);
  828. }
  829. if (error) {
  830. struct ip_msource *bims;
  831. RB_FOREACH_REVERSE_FROM(ims, ip_msource_tree, nims) {
  832. lims = (struct in_msource *)ims;
  833. if (lims->imsl_st[0] == lims->imsl_st[1])
  834. continue;
  835. (void)inm_get_source(inm, lims->ims_haddr, 1, &bims);
  836. if (bims == NULL)
  837. continue;
  838. ims_merge(bims, lims, 1);
  839. }
  840. goto out_reap;
  841. }
  842. CTR3(KTR_IGMPV3, "%s: imf filters in-mode: %d at t0, %d at t1",
  843. __func__, nsrc0, nsrc1);
  844. /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */
  845. if (imf->imf_st[0] == imf->imf_st[1] &&
  846. imf->imf_st[1] == MCAST_INCLUDE) {
  847. if (nsrc1 == 0) {
  848. CTR1(KTR_IGMPV3, "%s: --in on inm at t1", __func__);
  849. --inm->inm_st[1].iss_in;
  850. }
  851. }
  852. /* Handle filter mode transition on socket. */
  853. if (imf->imf_st[0] != imf->imf_st[1]) {
  854. CTR3(KTR_IGMPV3, "%s: imf transition %d to %d",
  855. __func__, imf->imf_st[0], imf->imf_st[1]);
  856. if (imf->imf_st[0] == MCAST_EXCLUDE) {
  857. CTR1(KTR_IGMPV3, "%s: --ex on inm at t1", __func__);
  858. --inm->inm_st[1].iss_ex;
  859. } else if (imf->imf_st[0] == MCAST_INCLUDE) {
  860. CTR1(KTR_IGMPV3, "%s: --in on inm at t1", __func__);
  861. --inm->inm_st[1].iss_in;
  862. }
  863. if (imf->imf_st[1] == MCAST_EXCLUDE) {
  864. CTR1(KTR_IGMPV3, "%s: ex++ on inm at t1", __func__);
  865. inm->inm_st[1].iss_ex++;
  866. } else if (imf->imf_st[1] == MCAST_INCLUDE && nsrc1 > 0) {
  867. CTR1(KTR_IGMPV3, "%s: in++ on inm at t1", __func__);
  868. inm->inm_st[1].iss_in++;
  869. }
  870. }
  871. /*
  872. * Track inm filter state in terms of listener counts.
  873. * If there are any exclusive listeners, stack-wide
  874. * membership is exclusive.
  875. * Otherwise, if only inclusive listeners, stack-wide is inclusive.
  876. * If no listeners remain, state is undefined at t1,
  877. * and the IGMP lifecycle for this group should finish.
  878. */
  879. if (inm->inm_st[1].iss_ex > 0) {
  880. CTR1(KTR_IGMPV3, "%s: transition to EX", __func__);
  881. inm->inm_st[1].iss_fmode = MCAST_EXCLUDE;
  882. } else if (inm->inm_st[1].iss_in > 0) {
  883. CTR1(KTR_IGMPV3, "%s: transition to IN", __func__);
  884. inm->inm_st[1].iss_fmode = MCAST_INCLUDE;
  885. } else {
  886. CTR1(KTR_IGMPV3, "%s: transition to UNDEF", __func__);
  887. inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
  888. }
  889. /* Decrement ASM listener count on transition out of ASM mode. */
  890. if (imf->imf_st[0] == MCAST_EXCLUDE && nsrc0 == 0) {
  891. if ((imf->imf_st[1] != MCAST_EXCLUDE) ||
  892. (imf->imf_st[1] == MCAST_EXCLUDE && nsrc1 > 0))
  893. CTR1(KTR_IGMPV3, "%s: --asm on inm at t1", __func__);
  894. --inm->inm_st[1].iss_asm;
  895. }
  896. /* Increment ASM listener count on transition to ASM mode. */
  897. if (imf->imf_st[1] == MCAST_EXCLUDE && nsrc1 == 0) {
  898. CTR1(KTR_IGMPV3, "%s: asm++ on inm at t1", __func__);
  899. inm->inm_st[1].iss_asm++;
  900. }
  901. CTR3(KTR_IGMPV3, "%s: merged imf %p to inm %p", __func__, imf, inm);
  902. inm_print(inm);
  903. out_reap:
  904. if (schanged > 0) {
  905. CTR1(KTR_IGMPV3, "%s: sources changed; reaping", __func__);
  906. inm_reap(inm);
  907. }
  908. return (error);
  909. }
  910. /*
  911. * Mark an in_multi's filter set deltas as committed.
  912. * Called by IGMP after a state change has been enqueued.
  913. */
  914. void
  915. inm_commit(struct in_multi *inm)
  916. {
  917. struct ip_msource *ims;
  918. CTR2(KTR_IGMPV3, "%s: commit inm %p", __func__, inm);
  919. CTR1(KTR_IGMPV3, "%s: pre commit:", __func__);
  920. inm_print(inm);
  921. RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) {
  922. ims->ims_st[0] = ims->ims_st[1];
  923. }
  924. inm->inm_st[0] = inm->inm_st[1];
  925. }
  926. /*
  927. * Reap unreferenced nodes from an in_multi's filter set.
  928. */
  929. static void
  930. inm_reap(struct in_multi *inm)
  931. {
  932. struct ip_msource *ims, *tims;
  933. RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, tims) {
  934. if (ims->ims_st[0].ex > 0 || ims->ims_st[0].in > 0 ||
  935. ims->ims_st[1].ex > 0 || ims->ims_st[1].in > 0 ||
  936. ims->ims_stp != 0)
  937. continue;
  938. CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims);
  939. RB_REMOVE(ip_msource_tree, &inm->inm_srcs, ims);
  940. free(ims);
  941. inm->inm_nsrc--;
  942. }
  943. }
  944. /*
  945. * Purge all source nodes from an in_multi's filter set.
  946. */
  947. static void
  948. inm_purge(struct in_multi *inm)
  949. {
  950. struct ip_msource *ims, *tims;
  951. RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, tims) {
  952. CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims);
  953. RB_REMOVE(ip_msource_tree, &inm->inm_srcs, ims);
  954. free(ims);
  955. inm->inm_nsrc--;
  956. }
  957. }
  958. /*
  959. * Join a multicast group; unlocked entry point.
  960. *
  961. * SMPng: XXX: in_joingroup() is called from in_control() when Giant
  962. * is not held. Fortunately, ifp is unlikely to have been detached
  963. * at this point, so we assume it's OK to recurse.
  964. */
  965. int
  966. in_joingroup(struct ifnet *ifp, const struct in_addr *gina,
  967. /*const*/ struct in_mfilter *imf, struct in_multi **pinm)
  968. {
  969. int error;
  970. IN_MULTI_LOCK();
  971. error = in_joingroup_locked(ifp, gina, imf, pinm);
  972. IN_MULTI_UNLOCK();
  973. return (error);
  974. }
  975. /*
  976. * Join a multicast group; real entry point.
  977. *
  978. * Only preserves atomicity at inm level.
  979. * NOTE: imf argument cannot be const due to sys/tree.h limitations.
  980. *
  981. * If the IGMP downcall fails, the group is not joined, and an error
  982. * code is returned.
  983. */
  984. int
  985. in_joingroup_locked(struct ifnet *ifp, const struct in_addr *gina,
  986. /*const*/ struct in_mfilter *imf, struct in_multi **pinm)
  987. {
  988. struct in_mfilter timf;
  989. struct in_multi *inm;
  990. int error;
  991. IN_MULTI_LOCK_ASSERT();
  992. CTR4(KTR_IGMPV3, "%s: join %s on %p(%s))", __func__,
  993. inet_ntoa(*gina), ifp, ifp->if_xname);
  994. error = 0;
  995. inm = NULL;
  996. /*
  997. * If no imf was specified (i.e. kernel consumer),
  998. * fake one up and assume it is an ASM join.
  999. */
  1000. if (imf == NULL) {
  1001. imf_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE);
  1002. imf = &timf;
  1003. }
  1004. error = in_getmulti(ifp, gina, &inm);
  1005. if (error) {
  1006. CTR1(KTR_IGMPV3, "%s: in_getmulti() failure", __func__);
  1007. return (error);
  1008. }
  1009. CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
  1010. error = inm_merge(inm, imf);
  1011. if (error) {
  1012. CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__);
  1013. goto out_inm_release;
  1014. }
  1015. CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
  1016. error = igmp_change_state(inm);
  1017. if (error) {
  1018. CTR1(KTR_IGMPV3, "%s: failed to update source", __func__);
  1019. goto out_inm_release;
  1020. }
  1021. out_inm_release:
  1022. if (error) {
  1023. CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm);
  1024. inm_release_locked(inm);
  1025. } else {
  1026. *pinm = inm;
  1027. }
  1028. return (error);
  1029. }
  1030. /*
  1031. * Leave a multicast group; unlocked entry point.
  1032. */
  1033. int
  1034. in_leavegroup(struct in_multi *inm, /*const*/ struct in_mfilter *imf)
  1035. {
  1036. struct ifnet *ifp;
  1037. int error;
  1038. ifp = inm->inm_ifp;
  1039. IN_MULTI_LOCK();
  1040. error = in_leavegroup_locked(inm, imf);
  1041. IN_MULTI_UNLOCK();
  1042. return (error);
  1043. }
  1044. /*
  1045. * Leave a multicast group; real entry point.
  1046. * All source filters will be expunged.
  1047. *
  1048. * Only preserves atomicity at inm level.
  1049. *
  1050. * Holding the write lock for the INP which contains imf
  1051. * is highly advisable. We can't assert for it as imf does not
  1052. * contain a back-pointer to the owning inp.
  1053. *
  1054. * Note: This is not the same as inm_release(*) as this function also
  1055. * makes a state change downcall into IGMP.
  1056. */
  1057. int
  1058. in_leavegroup_locked(struct in_multi *inm, /*const*/ struct in_mfilter *imf)
  1059. {
  1060. struct in_mfilter timf;
  1061. int error;
  1062. error = 0;
  1063. IN_MULTI_LOCK_ASSERT();
  1064. CTR5(KTR_IGMPV3, "%s: leave inm %p, %s/%s, imf %p", __func__,
  1065. inm, inet_ntoa(inm->inm_addr),
  1066. (inm_is_ifp_detached(inm) ? "null" : inm->inm_ifp->if_xname),
  1067. imf);
  1068. /*
  1069. * If no imf was specified (i.e. kernel consumer),
  1070. * fake one up and assume it is an ASM join.
  1071. */
  1072. if (imf == NULL) {
  1073. imf_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED);
  1074. imf = &timf;
  1075. }
  1076. /*
  1077. * Begin state merge transaction at IGMP layer.
  1078. *
  1079. * As this particular invocation should not cause any memory
  1080. * to be allocated, and there is no opportunity to roll back
  1081. * the transaction, it MUST NOT fail.
  1082. */
  1083. CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
  1084. error = inm_merge(inm, imf);
  1085. KASSERT(error == 0, ("%s: failed to merge inm state", __func__));
  1086. CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
  1087. error = igmp_change_state(inm);
  1088. if (error)
  1089. CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
  1090. CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm);
  1091. inm_release_locked(inm);
  1092. return (error);
  1093. }
  1094. /*#ifndef BURN_BRIDGES*/
  1095. /*
  1096. * Join an IPv4 multicast group in (*,G) exclusive mode.
  1097. * The group must be a 224.0.0.0/24 link-scope group.
  1098. * This KPI is for legacy kernel consumers only.
  1099. */
  1100. struct in_multi *
  1101. in_addmulti(struct in_addr *ap, struct ifnet *ifp)
  1102. {
  1103. struct in_multi *pinm;
  1104. int error;
  1105. KASSERT(IN_LOCAL_GROUP(ntohl(ap->s_addr)),
  1106. ("%s: %s not in 224.0.0.0/24", __func__, inet_ntoa(*ap)));
  1107. error = in_joingroup(ifp, ap, NULL, &pinm);
  1108. if (error != 0)
  1109. pinm = NULL;
  1110. return (pinm);
  1111. }
  1112. /*
  1113. * Leave an IPv4 multicast group, assumed to be in exclusive (*,G) mode.
  1114. * This KPI is for legacy kernel consumers only.
  1115. */
  1116. void
  1117. in_delmulti(struct in_multi *inm)
  1118. {
  1119. (void)in_leavegroup(inm, NULL);
  1120. }
  1121. /*#endif*/
  1122. /*
  1123. * Block or unblock an ASM multicast source on an inpcb.
  1124. * This implements the delta-based API described in RFC 3678.
  1125. *
  1126. * The delta-based API applies only to exclusive-mode memberships.
  1127. * An IGMP downcall will be performed.
  1128. *
  1129. * SMPng: NOTE: Must take Giant as a join may create a new ifma.
  1130. *
  1131. * Return 0 if successful, otherwise return an appropriate error code.
  1132. */
  1133. static int
  1134. inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
  1135. {
  1136. struct group_source_req gsr;
  1137. sockunion_t *gsa, *ssa;
  1138. struct ifnet *ifp;
  1139. struct in_mfilter *imf;
  1140. struct ip_moptions *imo;
  1141. struct in_msource *ims;
  1142. struct in_multi *inm;
  1143. size_t idx;
  1144. uint16_t fmode;
  1145. int error, doblock;
  1146. ifp = NULL;
  1147. error = 0;
  1148. doblock = 0;
  1149. memset(&gsr, 0, sizeof(struct group_source_req));
  1150. gsa = (sockunion_t *)&gsr.gsr_group;
  1151. ssa = (sockunion_t *)&gsr.gsr_source;
  1152. switch (sopt->sopt_name) {
  1153. case IP_BLOCK_SOURCE:
  1154. case IP_UNBLOCK_SOURCE: {
  1155. struct ip_mreq_source mreqs;
  1156. error = sooptcopyin(sopt, &mreqs,
  1157. sizeof(struct ip_mreq_source),
  1158. sizeof(struct ip_mreq_source));
  1159. if (error)
  1160. return (error);
  1161. gsa->sin.sin_family = AF_INET;
  1162. gsa->sin.sin_len = sizeof(struct bsd_sockaddr_in);
  1163. gsa->sin.sin_addr = mreqs.imr_multiaddr;
  1164. ssa->sin.sin_family = AF_INET;
  1165. ssa->sin.sin_len = sizeof(struct bsd_sockaddr_in);
  1166. ssa->sin.sin_addr = mreqs.imr_sourceaddr;
  1167. if (!in_nullhost(mreqs.imr_interface))
  1168. INADDR_TO_IFP(mreqs.imr_interface, ifp);
  1169. if (sopt->sopt_name == IP_BLOCK_SOURCE)
  1170. doblock = 1;
  1171. CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p",
  1172. __func__, inet_ntoa(mreqs.imr_interface), ifp);
  1173. break;
  1174. }
  1175. case MCAST_BLOCK_SOURCE:
  1176. case MCAST_UNBLOCK_SOURCE:
  1177. error = sooptcopyin(sopt, &gsr,
  1178. sizeof(struct group_source_req),
  1179. sizeof(struct group_source_req));
  1180. if (error)
  1181. return (error);
  1182. if (gsa->sin.sin_family != AF_INET ||
  1183. gsa->sin.sin_len != sizeof(struct bsd_sockaddr_in))
  1184. return (EINVAL);
  1185. if (ssa->sin.sin_family != AF_INET ||
  1186. ssa->sin.sin_len != sizeof(struct bsd_sockaddr_in))
  1187. return (EINVAL);
  1188. if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface)
  1189. return (EADDRNOTAVAIL);
  1190. ifp = ifnet_byindex(gsr.gsr_interface);
  1191. if (sopt->sopt_name == MCAST_BLOCK_SOURCE)
  1192. doblock = 1;
  1193. break;
  1194. default:
  1195. CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d",
  1196. __func__, sopt->sopt_name);
  1197. return (EOPNOTSUPP);
  1198. break;
  1199. }
  1200. if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr)))
  1201. return (EINVAL);
  1202. /*
  1203. * Check if we are actually a member of this group.
  1204. */
  1205. imo = inp_findmoptions(inp);
  1206. idx = imo_match_group(imo, ifp, &gsa->sa);
  1207. if (idx == -1 || imo->imo_mfilters == NULL) {
  1208. error = EADDRNOTAVAIL;
  1209. goto out_inp_locked;
  1210. }
  1211. KASSERT(imo->imo_mfilters != NULL,
  1212. ("%s: imo_mfilters not allocated", __func__));
  1213. imf = &imo->imo_mfilters[idx];
  1214. inm = imo->imo_membership[idx];
  1215. /*
  1216. * Attempting to use the delta-based API on an
  1217. * non exclusive-mode membership is an error.
  1218. */
  1219. fmode = imf->imf_st[0];
  1220. if (fmode != MCAST_EXCLUDE) {
  1221. error = EINVAL;
  1222. goto out_inp_locked;
  1223. }
  1224. /*
  1225. * Deal with error cases up-front:
  1226. * Asked to block, but already blocked; or
  1227. * Asked to unblock, but nothing to unblock.
  1228. * If adding a new block entry, allocate it.
  1229. */
  1230. ims = imo_match_source(imo, idx, &ssa->sa);
  1231. if ((ims != NULL && doblock) || (ims == NULL && !doblock)) {
  1232. CTR3(KTR_IGMPV3, "%s: source %s %spresent", __func__,
  1233. inet_ntoa(ssa->sin.sin_addr), doblock ? "" : "not ");
  1234. error = EADDRNOTAVAIL;
  1235. goto out_inp_locked;
  1236. }
  1237. INP_LOCK_ASSERT(inp);
  1238. /*
  1239. * Begin state merge transaction at socket layer.
  1240. */
  1241. if (doblock) {
  1242. CTR2(KTR_IGMPV3, "%s: %s source", __func__, "block");
  1243. ims = imf_graft(imf, fmode, &ssa->sin);
  1244. if (ims == NULL)
  1245. error = ENOMEM;
  1246. } else {
  1247. CTR2(KTR_IGMPV3, "%s: %s source", __func__, "allow");
  1248. error = imf_prune(imf, &ssa->sin);
  1249. }
  1250. if (error) {
  1251. CTR1(KTR_IGMPV3, "%s: merge imf state failed", __func__);
  1252. goto out_imf_rollback;
  1253. }
  1254. /*
  1255. * Begin state merge transaction at IGMP layer.
  1256. */
  1257. IN_MULTI_LOCK();
  1258. CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
  1259. error = inm_merge(inm, imf);
  1260. if (error) {
  1261. CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__);
  1262. goto out_imf_rollback;
  1263. }
  1264. CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
  1265. error = igmp_change_state(inm);
  1266. if (error)
  1267. CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
  1268. IN_MULTI_UNLOCK();
  1269. out_imf_rollback:
  1270. if (error)
  1271. imf_rollback(imf);
  1272. else
  1273. imf_commit(imf);
  1274. imf_reap(imf);
  1275. out_inp_locked:
  1276. INP_UNLOCK(inp);
  1277. return (error);
  1278. }
  1279. /*
  1280. * Given an inpcb, return its multicast options structure pointer. Accepts
  1281. * an unlocked inpcb pointer, but will return it locked. May sleep.
  1282. *
  1283. * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held.
  1284. * SMPng: NOTE: Returns with the INP write lock held.
  1285. */
  1286. static struct ip_moptions *
  1287. inp_findmoptions(struct inpcb *inp)
  1288. {
  1289. struct ip_moptions *imo;
  1290. struct in_multi **immp;
  1291. struct in_mfilter *imfp;
  1292. size_t idx;
  1293. INP_LOCK(inp);
  1294. if (inp->inp_moptions != NULL)
  1295. return (inp->inp_moptions);
  1296. INP_UNLOCK(inp);
  1297. imo = (ip_moptions *)malloc(sizeof(*imo));
  1298. immp = (in_multi **)malloc(sizeof(*immp) * IP_MIN_MEMBERSHIPS);
  1299. bzero(immp, sizeof(*immp) * IP_MIN_MEMBERSHIPS);
  1300. imfp = (in_mfilter *)malloc(sizeof(struct in_mfilter) * IP_MIN_MEMBERSHIPS);
  1301. imo->imo_multicast_ifp = NULL;
  1302. imo->imo_multicast_addr.s_addr = INADDR_ANY;
  1303. imo->imo_multicast_vif = -1;
  1304. imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
  1305. imo->imo_multicast_loop = in_mcast_loop;
  1306. imo->imo_num_memberships = 0;
  1307. imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
  1308. imo->imo_membership = immp;
  1309. /* Initialize per-group source filters. */
  1310. for (idx = 0; idx < IP_MIN_MEMBERSHIPS; idx++)
  1311. imf_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE);
  1312. imo->imo_mfilters = imfp;
  1313. INP_LOCK(inp);
  1314. if (inp->inp_moptions != NULL) {
  1315. free(imfp);
  1316. free(immp);
  1317. free(imo);
  1318. return (inp->inp_moptions);
  1319. }
  1320. inp->inp_moptions = imo;
  1321. return (imo);
  1322. }
  1323. /*
  1324. * Discard the IP multicast options (and source filters).
  1325. *
  1326. * SMPng: NOTE: assumes INP write lock is held.
  1327. */
  1328. void
  1329. inp_freemoptions(struct ip_moptions *imo)
  1330. {
  1331. struct in_mfilter *imf;
  1332. size_t idx, nmships;
  1333. KASSERT(imo != NULL, ("%s: ip_moptions is NULL", __func__));
  1334. nmships = imo->imo_num_memberships;
  1335. for (idx = 0; idx < nmships; ++idx) {
  1336. imf = imo->imo_mfilters ? &imo->imo_mfilters[idx] : NULL;
  1337. if (imf)
  1338. imf_leave(imf);
  1339. (void)in_leavegroup(imo->imo_membership[idx], imf);
  1340. if (imf)
  1341. imf_purge(imf);
  1342. }
  1343. if (imo->imo_mfilters)
  1344. free(imo->imo_mfilters);
  1345. free(imo->imo_membership);
  1346. free(imo);
  1347. }
  1348. /*
  1349. * Atomically get source filters on a socket for an IPv4 multicast group.
  1350. * Called with INP lock held; returns with lock released.
  1351. */
  1352. static int
  1353. inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
  1354. {
  1355. struct __msfilterreq msfr;
  1356. sockunion_t *gsa;
  1357. struct ifnet *ifp;
  1358. struct ip_moptions *imo;
  1359. struct in_mfilter *imf;
  1360. struct ip_msource *ims;
  1361. struct in_msource *lims;
  1362. struct bsd_sockaddr_in *psin;
  1363. struct bsd_sockaddr_storage *ptss;
  1364. struct bsd_sockaddr_storage *tss;
  1365. int error;
  1366. size_t idx, nsrcs, ncsrcs;
  1367. INP_LOCK_ASSERT(inp);
  1368. imo = inp->inp_moptions;
  1369. KASSERT(imo != NULL, ("%s: null ip_moptions", __func__));
  1370. INP_UNLOCK(inp);
  1371. error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq),
  1372. sizeof(struct __msfilterreq));
  1373. if (error)
  1374. return (error);
  1375. if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex)
  1376. return (EINVAL);
  1377. ifp = ifnet_byindex(msfr.msfr_ifindex);
  1378. if (ifp == NULL)
  1379. return (EINVAL);
  1380. INP_LOCK(inp);
  1381. /*
  1382. * Lookup group on the socket.
  1383. */
  1384. gsa = (sockunion_t *)&msfr.msfr_group;
  1385. idx = imo_match_group(imo, ifp, &gsa->sa);
  1386. if (idx == -1 || imo->imo_mfilters == NULL) {
  1387. INP_UNLOCK(inp);
  1388. return (EADDRNOTAVAIL);
  1389. }
  1390. imf = &imo->imo_mfilters[idx];
  1391. /*
  1392. * Ignore memberships which are in limbo.
  1393. */
  1394. if (imf->imf_st[1] == MCAST_UNDEFINED) {
  1395. INP_UNLOCK(inp);
  1396. return (EAGAIN);
  1397. }
  1398. msfr.msfr_fmode = imf->imf_st[1];
  1399. /*
  1400. * If the user specified a buffer, copy out the source filter
  1401. * entries to userland gracefully.
  1402. * We only copy out the number of entries which userland
  1403. * has asked for, but we always tell userland how big the
  1404. * buffer really needs to be.
  1405. */
  1406. tss = NULL;
  1407. if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) {
  1408. tss = (bsd_sockaddr_storage *)malloc(sizeof(struct bsd_sockaddr_storage) * msfr.msfr_nsrcs);
  1409. if (tss == NULL) {
  1410. INP_UNLOCK(inp);
  1411. return (ENOBUFS);
  1412. }
  1413. bzero(tss, sizeof(struct bsd_sockaddr_storage) * msfr.msfr_nsrcs);
  1414. }
  1415. /*
  1416. * Count number of sources in-mode at t0.
  1417. * If buffer space exists and remains, copy out source entries.
  1418. */
  1419. nsrcs = msfr.msfr_nsrcs;
  1420. ncsrcs = 0;
  1421. ptss = tss;
  1422. RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) {
  1423. lims = (struct in_msource *)ims;
  1424. if (lims->imsl_st[0] == MCAST_UNDEFINED ||
  1425. lims->imsl_st[0] != imf->imf_st[0])
  1426. continue;
  1427. ++ncsrcs;
  1428. if (tss != NULL && nsrcs > 0) {
  1429. psin = (struct bsd_sockaddr_in *)ptss;
  1430. psin->sin_family = AF_INET;
  1431. psin->sin_len = sizeof(struct bsd_sockaddr_in);
  1432. psin->sin_addr.s_addr = htonl(lims->ims_haddr);
  1433. psin->sin_port = 0;
  1434. ++ptss;
  1435. --nsrcs;
  1436. }
  1437. }
  1438. INP_UNLOCK(inp);
  1439. if (tss != NULL) {
  1440. error = copyout(tss, msfr.msfr_srcs,
  1441. sizeof(struct bsd_sockaddr_storage) * msfr.msfr_nsrcs);
  1442. free(tss);
  1443. if (error)
  1444. return (error);
  1445. }
  1446. msfr.msfr_nsrcs = ncsrcs;
  1447. error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq));
  1448. return (error);
  1449. }
  1450. /*
  1451. * Return the IP multicast options in response to user getsockopt().
  1452. */
  1453. int
  1454. inp_getmoptions(struct inpcb *inp, struct sockopt *sopt)
  1455. {
  1456. struct ip_mreqn mreqn;
  1457. struct ip_moptions *imo;
  1458. struct ifnet *ifp;
  1459. struct in_ifaddr *ia;
  1460. int error, optval;
  1461. u_char coptval;
  1462. INP_LOCK(inp);
  1463. imo = inp->inp_moptions;
  1464. /*
  1465. * If socket is neither of type SOCK_RAW or SOCK_DGRAM,
  1466. * or is a divert socket, reject it.
  1467. */
  1468. if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT ||
  1469. (inp->inp_socket->so_proto->pr_type != SOCK_RAW &&
  1470. inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) {
  1471. INP_UNLOCK(inp);
  1472. return (EOPNOTSUPP);
  1473. }
  1474. error = 0;
  1475. switch (sopt->sopt_name) {
  1476. case IP_MULTICAST_VIF:
  1477. if (imo != NULL)
  1478. optval = imo->imo_multicast_vif;
  1479. else
  1480. optval = -1;
  1481. INP_UNLOCK(inp);
  1482. error = sooptcopyout(sopt, &optval, sizeof(int));
  1483. break;
  1484. case IP_MULTICAST_IF:
  1485. memset(&mreqn, 0, sizeof(struct ip_mreqn));
  1486. if (imo != NULL) {
  1487. ifp = imo->imo_multicast_ifp;
  1488. if (!in_nullhost(imo->imo_multicast_addr)) {
  1489. mreqn.imr_address = imo->imo_multicast_addr;
  1490. } else if (ifp != NULL) {
  1491. mreqn.imr_ifindex = ifp->if_index;
  1492. IFP_TO_IA(ifp, ia);
  1493. if (ia != NULL) {
  1494. mreqn.imr_address =
  1495. IA_SIN(ia)->sin_addr;
  1496. ifa_free(&ia->ia_ifa);
  1497. }
  1498. }
  1499. }
  1500. INP_UNLOCK(inp);
  1501. if (sopt->sopt_valsize == sizeof(struct ip_mreqn)) {
  1502. error = sooptcopyout(sopt, &mreqn,
  1503. sizeof(struct ip_mreqn));
  1504. } else {
  1505. error = sooptcopyout(sopt, &mreqn.imr_address,
  1506. sizeof(struct in_addr));
  1507. }
  1508. break;
  1509. case IP_MULTICAST_TTL:
  1510. if (imo == 0)
  1511. optval = coptval = IP_DEFAULT_MULTICAST_TTL;
  1512. else
  1513. optval = coptval = imo->imo_multicast_ttl;
  1514. INP_UNLOCK(inp);
  1515. if (sopt->sopt_valsize == sizeof(u_char))
  1516. error = sooptcopyout(sopt, &coptval, sizeof(u_char));
  1517. else
  1518. error = sooptcopyout(sopt, &optval, sizeof(int));
  1519. break;
  1520. case IP_MULTICAST_LOOP:
  1521. if (imo == 0)
  1522. optval = coptval = IP_DEFAULT_MULTICAST_LOOP;
  1523. else
  1524. optval = coptval = imo->imo_multicast_loop;
  1525. INP_UNLOCK(inp);
  1526. if (sopt->sopt_valsize == sizeof(u_char))
  1527. error = sooptcopyout(sopt, &coptval, sizeof(u_char));
  1528. else
  1529. error = sooptcopyout(sopt, &optval, sizeof(int));
  1530. break;
  1531. case IP_MSFILTER:
  1532. if (imo == NULL) {
  1533. error = EADDRNOTAVAIL;
  1534. INP_UNLOCK(inp);
  1535. } else {
  1536. error = inp_get_source_filters(inp, sopt);
  1537. }
  1538. break;
  1539. default:
  1540. INP_UNLOCK(inp);
  1541. error = ENOPROTOOPT;
  1542. break;
  1543. }
  1544. INP_UNLOCK_ASSERT(inp);
  1545. return (error);
  1546. }
  1547. /*
  1548. * Look up the ifnet to use for a multicast group membership,
  1549. * given the IPv4 address of an interface, and the IPv4 group address.
  1550. *
  1551. * This routine exists to support legacy multicast applications
  1552. * which do not understand that multicast memberships are scoped to
  1553. * specific physical links in the networking stack, or which need
  1554. * to join link-scope groups before IPv4 addresses are configured.
  1555. *
  1556. * If inp is non-NULL, use this socket's current FIB number for any
  1557. * required FIB lookup.
  1558. * If ina is INADDR_ANY, look up the group address in the unicast FIB,
  1559. * and use its ifp; usually, this points to the default next-hop.
  1560. *
  1561. * If the FIB lookup fails, attempt to use the first non-loopback
  1562. * interface with multicast capability in the system as a
  1563. * last resort. The legacy IPv4 ASM API requires that we do
  1564. * this in order to allow groups to be joined when the routing
  1565. * table has not yet been populated during boot.
  1566. *
  1567. * Returns NULL if no ifp could be found.
  1568. *
  1569. * SMPng: TODO: Acquire the appropriate locks for INADDR_TO_IFP.
  1570. * FUTURE: Implement IPv4 source-address selection.
  1571. */
  1572. static struct ifnet *
  1573. inp_lookup_mcast_ifp(const struct inpcb *inp,
  1574. const struct bsd_sockaddr_in *gsin, const struct in_addr ina)
  1575. {
  1576. struct ifnet *ifp;
  1577. KASSERT(gsin->sin_family == AF_INET, ("%s: not AF_INET", __func__));
  1578. KASSERT(IN_MULTICAST(ntohl(gsin->sin_addr.s_addr)),
  1579. ("%s: not multicast", __func__));
  1580. ifp = NULL;
  1581. if (!in_nullhost(ina)) {
  1582. INADDR_TO_IFP(ina, ifp);
  1583. } else {
  1584. struct route ro;
  1585. ro.ro_rt = NULL;
  1586. memcpy(&ro.ro_dst, gsin, sizeof(struct bsd_sockaddr_in));
  1587. in_rtalloc_ign(&ro, 0, inp ? inp->inp_inc.inc_fibnum : 0);
  1588. if (ro.ro_rt != NULL) {
  1589. ifp = ro.ro_rt->rt_ifp;
  1590. KASSERT(ifp != NULL, ("%s: null ifp", __func__));
  1591. RTFREE(ro.ro_rt);
  1592. } else {
  1593. struct in_ifaddr *ia;
  1594. struct ifnet *mifp;
  1595. mifp = NULL;
  1596. IN_IFADDR_RLOCK();
  1597. TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
  1598. mifp = ia->ia_ifp;
  1599. if (!(mifp->if_flags & IFF_LOOPBACK) &&
  1600. (mifp->if_fla

Large files files are truncated, but you can click here to view the full file