Bug Summary

File:bsd/netinet6/in6_mcast.c
Warning:line 1787, column 11
Copies out a struct with untouched element(s): __msfr_align, msfr_srcs

Annotated Source Code

1/*
2 * Copyright (c) 2010-2016 Apple Inc. All rights reserved.
3 *
4 * @[email protected]
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @[email protected]
27 */
28/*
29 * Copyright (c) 2009 Bruce Simpson.
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. The name of the author may not be used to endorse or promote
41 * products derived from this software without specific prior written
42 * permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 */
56
57/*
58 * IPv6 multicast socket, group, and socket option processing module.
59 * Normative references: RFC 2292, RFC 3492, RFC 3542, RFC 3678, RFC 3810.
60 */
61
62#include <sys/cdefs.h>
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/kernel.h>
67#include <sys/malloc.h>
68#include <sys/mbuf.h>
69#include <sys/protosw.h>
70#include <sys/socket.h>
71#include <sys/socketvar.h>
72#include <sys/protosw.h>
73#include <sys/sysctl.h>
74#include <sys/tree.h>
75#include <sys/mcache.h>
76
77#include <kern/zalloc.h>
78
79#include <pexpert/pexpert.h>
80
81#include <net/if.h>
82#include <net/if_dl.h>
83#include <net/route.h>
84
85#include <netinet/in.h>
86#include <netinet/in_var.h>
87#include <netinet6/in6_var.h>
88#include <netinet/ip6.h>
89#include <netinet/icmp6.h>
90#include <netinet6/ip6_var.h>
91#include <netinet/in_pcb.h>
92#include <netinet/tcp.h>
93#include <netinet/tcp_seq.h>
94#include <netinet/tcp_var.h>
95#include <netinet6/nd6.h>
96#include <netinet6/mld6_var.h>
97#include <netinet6/scope6_var.h>
98
99#ifndef __SOCKUNION_DECLARED
100union sockunion {
101 struct sockaddr_storage ss;
102 struct sockaddr sa;
103 struct sockaddr_dl sdl;
104 struct sockaddr_in6 sin6;
105};
106typedef union sockunion sockunion_t;
107#define __SOCKUNION_DECLARED
108#endif /* __SOCKUNION_DECLARED */
109
110static void im6f_commit(struct in6_mfilter *);
111static int im6f_get_source(struct in6_mfilter *imf,
112 const struct sockaddr_in6 *psin,
113 struct in6_msource **);
114static struct in6_msource *
115 im6f_graft(struct in6_mfilter *, const uint8_t,
116 const struct sockaddr_in6 *);
117static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *);
118static void im6f_rollback(struct in6_mfilter *);
119static void im6f_reap(struct in6_mfilter *);
120static int im6o_grow(struct ip6_moptions *, size_t);
121static size_t im6o_match_group(const struct ip6_moptions *,
122 const struct ifnet *, const struct sockaddr *);
123static struct in6_msource *
124 im6o_match_source(const struct ip6_moptions *, const size_t,
125 const struct sockaddr *);
126static void im6s_merge(struct ip6_msource *ims,
127 const struct in6_msource *lims, const int rollback);
128static int in6_mc_get(struct ifnet *, const struct in6_addr *,
129 struct in6_multi **);
130static int in6m_get_source(struct in6_multi *inm,
131 const struct in6_addr *addr, const int noalloc,
132 struct ip6_msource **pims);
133static int in6m_is_ifp_detached(const struct in6_multi *);
134static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *);
135static void in6m_reap(struct in6_multi *);
136static struct ip6_moptions *
137 in6p_findmoptions(struct inpcb *);
138static int in6p_get_source_filters(struct inpcb *, struct sockopt *);
139static int in6p_lookup_v4addr(struct ipv6_mreq *, struct ip_mreq *);
140static int in6p_join_group(struct inpcb *, struct sockopt *);
141static int in6p_leave_group(struct inpcb *, struct sockopt *);
142static struct ifnet *
143 in6p_lookup_mcast_ifp(const struct inpcb *,
144 const struct sockaddr_in6 *);
145static int in6p_block_unblock_source(struct inpcb *, struct sockopt *);
146static int in6p_set_multicast_if(struct inpcb *, struct sockopt *);
147static int in6p_set_source_filters(struct inpcb *, struct sockopt *);
148static int sysctl_ip6_mcast_filters SYSCTL_HANDLER_ARGS(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req
*req)
;
149static __inline__ int ip6_msource_cmp(const struct ip6_msource *,
150 const struct ip6_msource *);
151
152SYSCTL_DECL(_net_inet6_ip6)extern struct sysctl_oid_list sysctl__net_inet6_ip6_children; /* XXX Not in any common header. */
153
154SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, mcast, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IPv6 multicast")struct sysctl_oid_list sysctl__net_inet6_ip6_mcast_children; struct
sysctl_oid sysctl__net_inet6_ip6_mcast = { &sysctl__net_inet6_ip6_children
, { 0 }, (-1), (int)(1|(0x80000000|0x40000000) | 0x00800000|0x00400000
), (void*)&sysctl__net_inet6_ip6_mcast_children, (int)(0)
, "mcast", 0, "N", "IPv6 multicast", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__net_inet6_ip6_mcast
__attribute__ ((section("__DATA,__sysctl_set"),used)) = (void
*)&sysctl__net_inet6_ip6_mcast;
;
155
156static unsigned long in6_mcast_maxgrpsrc = IPV6_MAX_GROUP_SRC_FILTER512;
157SYSCTL_LONG(_net_inet6_ip6_mcast, OID_AUTO, maxgrpsrc,struct sysctl_oid sysctl__net_inet6_ip6_mcast_maxgrpsrc = { &
sysctl__net_inet6_ip6_mcast_children, { 0 }, (-1), (int)(2|(0x80000000
|0x40000000) | 0x00800000|0x00400000), &in6_mcast_maxgrpsrc
, (int)(0), "maxgrpsrc", sysctl_handle_long, "L", "Max source filters per group"
, 1, 0 }; void const * __set___sysctl_set_sym_sysctl__net_inet6_ip6_mcast_maxgrpsrc
__attribute__ ((section("__DATA,__sysctl_set"),used)) = (void
*)&sysctl__net_inet6_ip6_mcast_maxgrpsrc; typedef char _sysctl__net_inet6_ip6_mcast_maxgrpsrc_size_check
[(__builtin_constant_p(&in6_mcast_maxgrpsrc) || sizeof(*(
&in6_mcast_maxgrpsrc)) == sizeof(long)) ? 0 : -1];
158 CTLFLAG_RW | CTLFLAG_LOCKED, &in6_mcast_maxgrpsrc,struct sysctl_oid sysctl__net_inet6_ip6_mcast_maxgrpsrc = { &
sysctl__net_inet6_ip6_mcast_children, { 0 }, (-1), (int)(2|(0x80000000
|0x40000000) | 0x00800000|0x00400000), &in6_mcast_maxgrpsrc
, (int)(0), "maxgrpsrc", sysctl_handle_long, "L", "Max source filters per group"
, 1, 0 }; void const * __set___sysctl_set_sym_sysctl__net_inet6_ip6_mcast_maxgrpsrc
__attribute__ ((section("__DATA,__sysctl_set"),used)) = (void
*)&sysctl__net_inet6_ip6_mcast_maxgrpsrc; typedef char _sysctl__net_inet6_ip6_mcast_maxgrpsrc_size_check
[(__builtin_constant_p(&in6_mcast_maxgrpsrc) || sizeof(*(
&in6_mcast_maxgrpsrc)) == sizeof(long)) ? 0 : -1];
159 "Max source filters per group")struct sysctl_oid sysctl__net_inet6_ip6_mcast_maxgrpsrc = { &
sysctl__net_inet6_ip6_mcast_children, { 0 }, (-1), (int)(2|(0x80000000
|0x40000000) | 0x00800000|0x00400000), &in6_mcast_maxgrpsrc
, (int)(0), "maxgrpsrc", sysctl_handle_long, "L", "Max source filters per group"
, 1, 0 }; void const * __set___sysctl_set_sym_sysctl__net_inet6_ip6_mcast_maxgrpsrc
__attribute__ ((section("__DATA,__sysctl_set"),used)) = (void
*)&sysctl__net_inet6_ip6_mcast_maxgrpsrc; typedef char _sysctl__net_inet6_ip6_mcast_maxgrpsrc_size_check
[(__builtin_constant_p(&in6_mcast_maxgrpsrc) || sizeof(*(
&in6_mcast_maxgrpsrc)) == sizeof(long)) ? 0 : -1];
;
160
161static unsigned long in6_mcast_maxsocksrc = IPV6_MAX_SOCK_SRC_FILTER128;
162SYSCTL_LONG(_net_inet6_ip6_mcast, OID_AUTO, maxsocksrc,struct sysctl_oid sysctl__net_inet6_ip6_mcast_maxsocksrc = { &
sysctl__net_inet6_ip6_mcast_children, { 0 }, (-1), (int)(2|(0x80000000
|0x40000000) | 0x00800000|0x00400000), &in6_mcast_maxsocksrc
, (int)(0), "maxsocksrc", sysctl_handle_long, "L", "Max source filters per socket"
, 1, 0 }; void const * __set___sysctl_set_sym_sysctl__net_inet6_ip6_mcast_maxsocksrc
__attribute__ ((section("__DATA,__sysctl_set"),used)) = (void
*)&sysctl__net_inet6_ip6_mcast_maxsocksrc; typedef char _sysctl__net_inet6_ip6_mcast_maxsocksrc_size_check
[(__builtin_constant_p(&in6_mcast_maxsocksrc) || sizeof(*
(&in6_mcast_maxsocksrc)) == sizeof(long)) ? 0 : -1];
163 CTLFLAG_RW | CTLFLAG_LOCKED, &in6_mcast_maxsocksrc,struct sysctl_oid sysctl__net_inet6_ip6_mcast_maxsocksrc = { &
sysctl__net_inet6_ip6_mcast_children, { 0 }, (-1), (int)(2|(0x80000000
|0x40000000) | 0x00800000|0x00400000), &in6_mcast_maxsocksrc
, (int)(0), "maxsocksrc", sysctl_handle_long, "L", "Max source filters per socket"
, 1, 0 }; void const * __set___sysctl_set_sym_sysctl__net_inet6_ip6_mcast_maxsocksrc
__attribute__ ((section("__DATA,__sysctl_set"),used)) = (void
*)&sysctl__net_inet6_ip6_mcast_maxsocksrc; typedef char _sysctl__net_inet6_ip6_mcast_maxsocksrc_size_check
[(__builtin_constant_p(&in6_mcast_maxsocksrc) || sizeof(*
(&in6_mcast_maxsocksrc)) == sizeof(long)) ? 0 : -1];
164 "Max source filters per socket")struct sysctl_oid sysctl__net_inet6_ip6_mcast_maxsocksrc = { &
sysctl__net_inet6_ip6_mcast_children, { 0 }, (-1), (int)(2|(0x80000000
|0x40000000) | 0x00800000|0x00400000), &in6_mcast_maxsocksrc
, (int)(0), "maxsocksrc", sysctl_handle_long, "L", "Max source filters per socket"
, 1, 0 }; void const * __set___sysctl_set_sym_sysctl__net_inet6_ip6_mcast_maxsocksrc
__attribute__ ((section("__DATA,__sysctl_set"),used)) = (void
*)&sysctl__net_inet6_ip6_mcast_maxsocksrc; typedef char _sysctl__net_inet6_ip6_mcast_maxsocksrc_size_check
[(__builtin_constant_p(&in6_mcast_maxsocksrc) || sizeof(*
(&in6_mcast_maxsocksrc)) == sizeof(long)) ? 0 : -1];
;
165
166int in6_mcast_loop = IPV6_DEFAULT_MULTICAST_LOOP1;
167SYSCTL_INT(_net_inet6_ip6_mcast, OID_AUTO, loop, CTLFLAG_RW | CTLFLAG_LOCKED,struct sysctl_oid sysctl__net_inet6_ip6_mcast_loop = { &sysctl__net_inet6_ip6_mcast_children
, { 0 }, (-1), (int)(2|(0x80000000|0x40000000) | 0x00800000|0x00400000
), &in6_mcast_loop, (int)(0), "loop", sysctl_handle_int, "I"
, "Loopback multicast datagrams by default", 1, 0 }; void const
* __set___sysctl_set_sym_sysctl__net_inet6_ip6_mcast_loop __attribute__
((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__net_inet6_ip6_mcast_loop
; typedef char _sysctl__net_inet6_ip6_mcast_loop_size_check[(
__builtin_constant_p(&in6_mcast_loop) || sizeof(*(&in6_mcast_loop
)) == sizeof(int)) ? 0 : -1];
168 &in6_mcast_loop, 0, "Loopback multicast datagrams by default")struct sysctl_oid sysctl__net_inet6_ip6_mcast_loop = { &sysctl__net_inet6_ip6_mcast_children
, { 0 }, (-1), (int)(2|(0x80000000|0x40000000) | 0x00800000|0x00400000
), &in6_mcast_loop, (int)(0), "loop", sysctl_handle_int, "I"
, "Loopback multicast datagrams by default", 1, 0 }; void const
* __set___sysctl_set_sym_sysctl__net_inet6_ip6_mcast_loop __attribute__
((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__net_inet6_ip6_mcast_loop
; typedef char _sysctl__net_inet6_ip6_mcast_loop_size_check[(
__builtin_constant_p(&in6_mcast_loop) || sizeof(*(&in6_mcast_loop
)) == sizeof(int)) ? 0 : -1];
;
169
170SYSCTL_NODE(_net_inet6_ip6_mcast, OID_AUTO, filters,struct sysctl_oid_list sysctl__net_inet6_ip6_mcast_filters_children
; struct sysctl_oid sysctl__net_inet6_ip6_mcast_filters = { &
sysctl__net_inet6_ip6_mcast_children, { 0 }, (-1), (int)(1|0x80000000
| 0x00800000|0x00400000), (void*)&sysctl__net_inet6_ip6_mcast_filters_children
, (int)(0), "filters", sysctl_ip6_mcast_filters, "N", "Per-interface stack-wide source filters"
, 1, 0 }; void const * __set___sysctl_set_sym_sysctl__net_inet6_ip6_mcast_filters
__attribute__ ((section("__DATA,__sysctl_set"),used)) = (void
*)&sysctl__net_inet6_ip6_mcast_filters;
171 CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_ip6_mcast_filters,struct sysctl_oid_list sysctl__net_inet6_ip6_mcast_filters_children
; struct sysctl_oid sysctl__net_inet6_ip6_mcast_filters = { &
sysctl__net_inet6_ip6_mcast_children, { 0 }, (-1), (int)(1|0x80000000
| 0x00800000|0x00400000), (void*)&sysctl__net_inet6_ip6_mcast_filters_children
, (int)(0), "filters", sysctl_ip6_mcast_filters, "N", "Per-interface stack-wide source filters"
, 1, 0 }; void const * __set___sysctl_set_sym_sysctl__net_inet6_ip6_mcast_filters
__attribute__ ((section("__DATA,__sysctl_set"),used)) = (void
*)&sysctl__net_inet6_ip6_mcast_filters;
172 "Per-interface stack-wide source filters")struct sysctl_oid_list sysctl__net_inet6_ip6_mcast_filters_children
; struct sysctl_oid sysctl__net_inet6_ip6_mcast_filters = { &
sysctl__net_inet6_ip6_mcast_children, { 0 }, (-1), (int)(1|0x80000000
| 0x00800000|0x00400000), (void*)&sysctl__net_inet6_ip6_mcast_filters_children
, (int)(0), "filters", sysctl_ip6_mcast_filters, "N", "Per-interface stack-wide source filters"
, 1, 0 }; void const * __set___sysctl_set_sym_sysctl__net_inet6_ip6_mcast_filters
__attribute__ ((section("__DATA,__sysctl_set"),used)) = (void
*)&sysctl__net_inet6_ip6_mcast_filters;
;
173
174RB_GENERATE_PREV(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp)struct ip6_msource *ip6_msource_tree_RB_GETPARENT(struct ip6_msource
*elm) { struct ip6_msource *parent = (elm)->im6s_link.rbe_parent
; if( parent != ((void *)0)) { parent = (struct ip6_msource*)
((uintptr_t)parent & ~(uintptr_t)0x1); return( (struct ip6_msource
*) ( (parent == (struct ip6_msource*) ((void *)0)) ? ((void *
)0): parent)); } return((struct ip6_msource*)((void *)0)); } int
ip6_msource_tree_RB_GETCOLOR(struct ip6_msource *elm) { int color
= 0; color = (int)((uintptr_t)(elm)->im6s_link.rbe_parent
& (uintptr_t)0x1); return(color); } void ip6_msource_tree_RB_SETCOLOR
(struct ip6_msource *elm,int color) { struct ip6_msource *parent
= ip6_msource_tree_RB_GETPARENT(elm); if(parent == (struct ip6_msource
*)((void *)0)) parent = (struct ip6_msource*) ((void *)0); (elm
)->im6s_link.rbe_parent = (struct ip6_msource*)((uintptr_t
)parent | (unsigned int)color);} struct ip6_msource *ip6_msource_tree_RB_SETPARENT
(struct ip6_msource *elm, struct ip6_msource *parent) { int color
= ip6_msource_tree_RB_GETCOLOR(elm); (elm)->im6s_link.rbe_parent
= parent; if(color) ip6_msource_tree_RB_SETCOLOR(elm, color)
; return(ip6_msource_tree_RB_GETPARENT(elm)); } void ip6_msource_tree_RB_INSERT_COLOR
(struct ip6_msource_tree *head, struct ip6_msource *elm) { struct
ip6_msource *parent, *gparent, *tmp; while ((parent = ip6_msource_tree_RB_GETPARENT
(elm)) != ((void *)0) && ip6_msource_tree_RB_GETCOLOR
(parent) == 1) { gparent = ip6_msource_tree_RB_GETPARENT(parent
); if (parent == (gparent)->im6s_link.rbe_left) { tmp = (gparent
)->im6s_link.rbe_right; if (tmp && ip6_msource_tree_RB_GETCOLOR
(tmp) == 1) { ip6_msource_tree_RB_SETCOLOR(tmp, 0); do { ip6_msource_tree_RB_SETCOLOR
(parent, 0); ip6_msource_tree_RB_SETCOLOR(gparent, 1); } while
( 0); elm = gparent; continue; } if ((parent)->im6s_link.
rbe_right == elm) { do { (tmp) = (parent)->im6s_link.rbe_right
; if (((parent)->im6s_link.rbe_right = (tmp)->im6s_link
.rbe_left) != ((void *)0)) { ip6_msource_tree_RB_SETPARENT((tmp
)->im6s_link.rbe_left,(parent)); } (void)(parent); if (ip6_msource_tree_RB_SETPARENT
(tmp, ip6_msource_tree_RB_GETPARENT(parent)) != ((void *)0)) {
if ((parent) == (ip6_msource_tree_RB_GETPARENT(parent))->
im6s_link.rbe_left) (ip6_msource_tree_RB_GETPARENT(parent))->
im6s_link.rbe_left = (tmp); else (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_right = (tmp); } else (head)->
rbh_root = (tmp); (tmp)->im6s_link.rbe_left = (parent); ip6_msource_tree_RB_SETPARENT
(parent, (tmp)); (void)(tmp); if ((ip6_msource_tree_RB_GETPARENT
(tmp))) (void)(ip6_msource_tree_RB_GETPARENT(tmp)); } while (
0); tmp = parent; parent = elm; elm = tmp; } do { ip6_msource_tree_RB_SETCOLOR
(parent, 0); ip6_msource_tree_RB_SETCOLOR(gparent, 1); } while
( 0); do { (tmp) = (gparent)->im6s_link.rbe_left; if (((gparent
)->im6s_link.rbe_left = (tmp)->im6s_link.rbe_right) != (
(void *)0)) { ip6_msource_tree_RB_SETPARENT((tmp)->im6s_link
.rbe_right, (gparent)); } (void)(gparent); if (ip6_msource_tree_RB_SETPARENT
(tmp, ip6_msource_tree_RB_GETPARENT(gparent)) != ((void *)0))
{ if ((gparent) == (ip6_msource_tree_RB_GETPARENT(gparent))->
im6s_link.rbe_left) (ip6_msource_tree_RB_GETPARENT(gparent))->
im6s_link.rbe_left = (tmp); else (ip6_msource_tree_RB_GETPARENT
(gparent))->im6s_link.rbe_right = (tmp); } else (head)->
rbh_root = (tmp); (tmp)->im6s_link.rbe_right = (gparent); ip6_msource_tree_RB_SETPARENT
(gparent, tmp); (void)(tmp); if ((ip6_msource_tree_RB_GETPARENT
(tmp))) (void)(ip6_msource_tree_RB_GETPARENT(tmp)); } while (
0); } else { tmp = (gparent)->im6s_link.rbe_left; if (tmp
&& ip6_msource_tree_RB_GETCOLOR(tmp) == 1) { ip6_msource_tree_RB_SETCOLOR
(tmp, 0); do { ip6_msource_tree_RB_SETCOLOR(parent, 0); ip6_msource_tree_RB_SETCOLOR
(gparent, 1); } while ( 0); elm = gparent; continue; } if ((parent
)->im6s_link.rbe_left == elm) { do { (tmp) = (parent)->
im6s_link.rbe_left; if (((parent)->im6s_link.rbe_left = (tmp
)->im6s_link.rbe_right) != ((void *)0)) { ip6_msource_tree_RB_SETPARENT
((tmp)->im6s_link.rbe_right, (parent)); } (void)(parent); if
(ip6_msource_tree_RB_SETPARENT(tmp, ip6_msource_tree_RB_GETPARENT
(parent)) != ((void *)0)) { if ((parent) == (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_left) (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_left = (tmp); else (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_right = (tmp); } else (head)->
rbh_root = (tmp); (tmp)->im6s_link.rbe_right = (parent); ip6_msource_tree_RB_SETPARENT
(parent, tmp); (void)(tmp); if ((ip6_msource_tree_RB_GETPARENT
(tmp))) (void)(ip6_msource_tree_RB_GETPARENT(tmp)); } while (
0); tmp = parent; parent = elm; elm = tmp; } do { ip6_msource_tree_RB_SETCOLOR
(parent, 0); ip6_msource_tree_RB_SETCOLOR(gparent, 1); } while
( 0); do { (tmp) = (gparent)->im6s_link.rbe_right; if (((
gparent)->im6s_link.rbe_right = (tmp)->im6s_link.rbe_left
) != ((void *)0)) { ip6_msource_tree_RB_SETPARENT((tmp)->im6s_link
.rbe_left,(gparent)); } (void)(gparent); if (ip6_msource_tree_RB_SETPARENT
(tmp, ip6_msource_tree_RB_GETPARENT(gparent)) != ((void *)0))
{ if ((gparent) == (ip6_msource_tree_RB_GETPARENT(gparent))->
im6s_link.rbe_left) (ip6_msource_tree_RB_GETPARENT(gparent))->
im6s_link.rbe_left = (tmp); else (ip6_msource_tree_RB_GETPARENT
(gparent))->im6s_link.rbe_right = (tmp); } else (head)->
rbh_root = (tmp); (tmp)->im6s_link.rbe_left = (gparent); ip6_msource_tree_RB_SETPARENT
(gparent, (tmp)); (void)(tmp); if ((ip6_msource_tree_RB_GETPARENT
(tmp))) (void)(ip6_msource_tree_RB_GETPARENT(tmp)); } while (
0); } } ip6_msource_tree_RB_SETCOLOR(head->rbh_root, 0); }
void ip6_msource_tree_RB_REMOVE_COLOR(struct ip6_msource_tree
*head, struct ip6_msource *parent, struct ip6_msource *elm) {
struct ip6_msource *tmp; while ((elm == ((void *)0) || ip6_msource_tree_RB_GETCOLOR
(elm) == 0) && elm != (head)->rbh_root) { if ((parent
)->im6s_link.rbe_left == elm) { tmp = (parent)->im6s_link
.rbe_right; if (ip6_msource_tree_RB_GETCOLOR(tmp) == 1) { do {
ip6_msource_tree_RB_SETCOLOR(tmp, 0); ip6_msource_tree_RB_SETCOLOR
(parent, 1); } while ( 0); do { (tmp) = (parent)->im6s_link
.rbe_right; if (((parent)->im6s_link.rbe_right = (tmp)->
im6s_link.rbe_left) != ((void *)0)) { ip6_msource_tree_RB_SETPARENT
((tmp)->im6s_link.rbe_left,(parent)); } (void)(parent); if
(ip6_msource_tree_RB_SETPARENT(tmp, ip6_msource_tree_RB_GETPARENT
(parent)) != ((void *)0)) { if ((parent) == (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_left) (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_left = (tmp); else (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_right = (tmp); } else (head)->
rbh_root = (tmp); (tmp)->im6s_link.rbe_left = (parent); ip6_msource_tree_RB_SETPARENT
(parent, (tmp)); (void)(tmp); if ((ip6_msource_tree_RB_GETPARENT
(tmp))) (void)(ip6_msource_tree_RB_GETPARENT(tmp)); } while (
0); tmp = (parent)->im6s_link.rbe_right; } if (((tmp)->
im6s_link.rbe_left == ((void *)0) || ip6_msource_tree_RB_GETCOLOR
((tmp)->im6s_link.rbe_left) == 0) && ((tmp)->im6s_link
.rbe_right == ((void *)0) || ip6_msource_tree_RB_GETCOLOR((tmp
)->im6s_link.rbe_right) == 0)) { ip6_msource_tree_RB_SETCOLOR
(tmp, 1); elm = parent; parent = ip6_msource_tree_RB_GETPARENT
(elm); } else { if ((tmp)->im6s_link.rbe_right == ((void *
)0) || ip6_msource_tree_RB_GETCOLOR((tmp)->im6s_link.rbe_right
) == 0) { struct ip6_msource *oleft; if ((oleft = (tmp)->im6s_link
.rbe_left) != ((void *)0)) ip6_msource_tree_RB_SETCOLOR(oleft
, 0); ip6_msource_tree_RB_SETCOLOR(tmp, 1); do { (oleft) = (tmp
)->im6s_link.rbe_left; if (((tmp)->im6s_link.rbe_left =
(oleft)->im6s_link.rbe_right) != ((void *)0)) { ip6_msource_tree_RB_SETPARENT
((oleft)->im6s_link.rbe_right, (tmp)); } (void)(tmp); if (
ip6_msource_tree_RB_SETPARENT(oleft, ip6_msource_tree_RB_GETPARENT
(tmp)) != ((void *)0)) { if ((tmp) == (ip6_msource_tree_RB_GETPARENT
(tmp))->im6s_link.rbe_left) (ip6_msource_tree_RB_GETPARENT
(tmp))->im6s_link.rbe_left = (oleft); else (ip6_msource_tree_RB_GETPARENT
(tmp))->im6s_link.rbe_right = (oleft); } else (head)->rbh_root
= (oleft); (oleft)->im6s_link.rbe_right = (tmp); ip6_msource_tree_RB_SETPARENT
(tmp, oleft); (void)(oleft); if ((ip6_msource_tree_RB_GETPARENT
(oleft))) (void)(ip6_msource_tree_RB_GETPARENT(oleft)); } while
( 0); tmp = (parent)->im6s_link.rbe_right; } ip6_msource_tree_RB_SETCOLOR
(tmp, (ip6_msource_tree_RB_GETCOLOR(parent))); ip6_msource_tree_RB_SETCOLOR
(parent, 0); if ((tmp)->im6s_link.rbe_right) ip6_msource_tree_RB_SETCOLOR
((tmp)->im6s_link.rbe_right,0); do { (tmp) = (parent)->
im6s_link.rbe_right; if (((parent)->im6s_link.rbe_right = (
tmp)->im6s_link.rbe_left) != ((void *)0)) { ip6_msource_tree_RB_SETPARENT
((tmp)->im6s_link.rbe_left,(parent)); } (void)(parent); if
(ip6_msource_tree_RB_SETPARENT(tmp, ip6_msource_tree_RB_GETPARENT
(parent)) != ((void *)0)) { if ((parent) == (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_left) (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_left = (tmp); else (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_right = (tmp); } else (head)->
rbh_root = (tmp); (tmp)->im6s_link.rbe_left = (parent); ip6_msource_tree_RB_SETPARENT
(parent, (tmp)); (void)(tmp); if ((ip6_msource_tree_RB_GETPARENT
(tmp))) (void)(ip6_msource_tree_RB_GETPARENT(tmp)); } while (
0); elm = (head)->rbh_root; break; } } else { tmp = (parent
)->im6s_link.rbe_left; if (ip6_msource_tree_RB_GETCOLOR(tmp
) == 1) { do { ip6_msource_tree_RB_SETCOLOR(tmp, 0); ip6_msource_tree_RB_SETCOLOR
(parent, 1); } while ( 0); do { (tmp) = (parent)->im6s_link
.rbe_left; if (((parent)->im6s_link.rbe_left = (tmp)->im6s_link
.rbe_right) != ((void *)0)) { ip6_msource_tree_RB_SETPARENT((
tmp)->im6s_link.rbe_right, (parent)); } (void)(parent); if
(ip6_msource_tree_RB_SETPARENT(tmp, ip6_msource_tree_RB_GETPARENT
(parent)) != ((void *)0)) { if ((parent) == (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_left) (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_left = (tmp); else (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_right = (tmp); } else (head)->
rbh_root = (tmp); (tmp)->im6s_link.rbe_right = (parent); ip6_msource_tree_RB_SETPARENT
(parent, tmp); (void)(tmp); if ((ip6_msource_tree_RB_GETPARENT
(tmp))) (void)(ip6_msource_tree_RB_GETPARENT(tmp)); } while (
0); tmp = (parent)->im6s_link.rbe_left; } if (((tmp)->
im6s_link.rbe_left == ((void *)0) || ip6_msource_tree_RB_GETCOLOR
((tmp)->im6s_link.rbe_left) == 0) && ((tmp)->im6s_link
.rbe_right == ((void *)0) || ip6_msource_tree_RB_GETCOLOR((tmp
)->im6s_link.rbe_right) == 0)) { ip6_msource_tree_RB_SETCOLOR
(tmp, 1); elm = parent; parent = ip6_msource_tree_RB_GETPARENT
(elm); } else { if ((tmp)->im6s_link.rbe_left == ((void *)
0) || ip6_msource_tree_RB_GETCOLOR((tmp)->im6s_link.rbe_left
) == 0) { struct ip6_msource *oright; if ((oright = (tmp)->
im6s_link.rbe_right) != ((void *)0)) ip6_msource_tree_RB_SETCOLOR
(oright, 0); ip6_msource_tree_RB_SETCOLOR(tmp, 1); do { (oright
) = (tmp)->im6s_link.rbe_right; if (((tmp)->im6s_link.rbe_right
= (oright)->im6s_link.rbe_left) != ((void *)0)) { ip6_msource_tree_RB_SETPARENT
((oright)->im6s_link.rbe_left,(tmp)); } (void)(tmp); if (ip6_msource_tree_RB_SETPARENT
(oright, ip6_msource_tree_RB_GETPARENT(tmp)) != ((void *)0)) {
if ((tmp) == (ip6_msource_tree_RB_GETPARENT(tmp))->im6s_link
.rbe_left) (ip6_msource_tree_RB_GETPARENT(tmp))->im6s_link
.rbe_left = (oright); else (ip6_msource_tree_RB_GETPARENT(tmp
))->im6s_link.rbe_right = (oright); } else (head)->rbh_root
= (oright); (oright)->im6s_link.rbe_left = (tmp); ip6_msource_tree_RB_SETPARENT
(tmp, (oright)); (void)(oright); if ((ip6_msource_tree_RB_GETPARENT
(oright))) (void)(ip6_msource_tree_RB_GETPARENT(oright)); } while
( 0); tmp = (parent)->im6s_link.rbe_left; } ip6_msource_tree_RB_SETCOLOR
(tmp,(ip6_msource_tree_RB_GETCOLOR(parent))); ip6_msource_tree_RB_SETCOLOR
(parent, 0); if ((tmp)->im6s_link.rbe_left) ip6_msource_tree_RB_SETCOLOR
((tmp)->im6s_link.rbe_left, 0); do { (tmp) = (parent)->
im6s_link.rbe_left; if (((parent)->im6s_link.rbe_left = (tmp
)->im6s_link.rbe_right) != ((void *)0)) { ip6_msource_tree_RB_SETPARENT
((tmp)->im6s_link.rbe_right, (parent)); } (void)(parent); if
(ip6_msource_tree_RB_SETPARENT(tmp, ip6_msource_tree_RB_GETPARENT
(parent)) != ((void *)0)) { if ((parent) == (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_left) (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_left = (tmp); else (ip6_msource_tree_RB_GETPARENT
(parent))->im6s_link.rbe_right = (tmp); } else (head)->
rbh_root = (tmp); (tmp)->im6s_link.rbe_right = (parent); ip6_msource_tree_RB_SETPARENT
(parent, tmp); (void)(tmp); if ((ip6_msource_tree_RB_GETPARENT
(tmp))) (void)(ip6_msource_tree_RB_GETPARENT(tmp)); } while (
0); elm = (head)->rbh_root; break; } } } if (elm) ip6_msource_tree_RB_SETCOLOR
(elm, 0); } struct ip6_msource * ip6_msource_tree_RB_REMOVE(struct
ip6_msource_tree *head, struct ip6_msource *elm) { struct ip6_msource
*child, *parent, *old = elm; int color; if ((elm)->im6s_link
.rbe_left == ((void *)0)) child = (elm)->im6s_link.rbe_right
; else if ((elm)->im6s_link.rbe_right == ((void *)0)) child
= (elm)->im6s_link.rbe_left; else { struct ip6_msource *left
; elm = (elm)->im6s_link.rbe_right; while ((left = (elm)->
im6s_link.rbe_left) != ((void *)0)) elm = left; child = (elm)
->im6s_link.rbe_right; parent = ip6_msource_tree_RB_GETPARENT
(elm); color = ip6_msource_tree_RB_GETCOLOR(elm); if (child) ip6_msource_tree_RB_SETPARENT
(child, parent); if (parent) { if ((parent)->im6s_link.rbe_left
== elm) (parent)->im6s_link.rbe_left = child; else (parent
)->im6s_link.rbe_right = child; (void)(parent); } else (head
)->rbh_root = child; if (ip6_msource_tree_RB_GETPARENT(elm
) == old) parent = elm; (elm)->im6s_link = (old)->im6s_link
; if (ip6_msource_tree_RB_GETPARENT(old)) { if ((ip6_msource_tree_RB_GETPARENT
(old))->im6s_link.rbe_left == old) (ip6_msource_tree_RB_GETPARENT
(old))->im6s_link.rbe_left = elm; else (ip6_msource_tree_RB_GETPARENT
(old))->im6s_link.rbe_right = elm; (void)(ip6_msource_tree_RB_GETPARENT
(old)); } else (head)->rbh_root = elm; ip6_msource_tree_RB_SETPARENT
((old)->im6s_link.rbe_left, elm); if ((old)->im6s_link.
rbe_right) ip6_msource_tree_RB_SETPARENT((old)->im6s_link.
rbe_right, elm); if (parent) { left = parent; do { (void)(left
); } while ((left = ip6_msource_tree_RB_GETPARENT(left)) != (
(void *)0)); } goto color; } parent = ip6_msource_tree_RB_GETPARENT
(elm); color = ip6_msource_tree_RB_GETCOLOR(elm); if (child) ip6_msource_tree_RB_SETPARENT
(child, parent); if (parent) { if ((parent)->im6s_link.rbe_left
== elm) (parent)->im6s_link.rbe_left = child; else (parent
)->im6s_link.rbe_right = child; (void)(parent); } else (head
)->rbh_root = child; color: if (color == 0) ip6_msource_tree_RB_REMOVE_COLOR
(head, parent, child); return (old); } struct ip6_msource * ip6_msource_tree_RB_INSERT
(struct ip6_msource_tree *head, struct ip6_msource *elm) { struct
ip6_msource *tmp; struct ip6_msource *parent = ((void *)0); int
comp = 0; tmp = (head)->rbh_root; while (tmp) { parent = tmp
; comp = (ip6_msource_cmp)(elm, parent); if (comp < 0) tmp
= (tmp)->im6s_link.rbe_left; else if (comp > 0) tmp = (
tmp)->im6s_link.rbe_right; else return (tmp); } do { ip6_msource_tree_RB_SETPARENT
(elm, parent); (elm)->im6s_link.rbe_left = (elm)->im6s_link
.rbe_right = ((void *)0); ip6_msource_tree_RB_SETCOLOR(elm, 1
); } while ( 0); if (parent != ((void *)0)) { if (comp < 0
) (parent)->im6s_link.rbe_left = elm; else (parent)->im6s_link
.rbe_right = elm; (void)(parent); } else (head)->rbh_root =
elm; ip6_msource_tree_RB_INSERT_COLOR(head, elm); return (((
void *)0)); } struct ip6_msource * ip6_msource_tree_RB_FIND(struct
ip6_msource_tree *head, struct ip6_msource *elm) { struct ip6_msource
*tmp = (head)->rbh_root; int comp; while (tmp) { comp = ip6_msource_cmp
(elm, tmp); if (comp < 0) tmp = (tmp)->im6s_link.rbe_left
; else if (comp > 0) tmp = (tmp)->im6s_link.rbe_right; else
return (tmp); } return (((void *)0)); } struct ip6_msource *
ip6_msource_tree_RB_NEXT(struct ip6_msource *elm) { if ((elm
)->im6s_link.rbe_right) { elm = (elm)->im6s_link.rbe_right
; while ((elm)->im6s_link.rbe_left) elm = (elm)->im6s_link
.rbe_left; } else { if (ip6_msource_tree_RB_GETPARENT(elm) &&
(elm == (ip6_msource_tree_RB_GETPARENT(elm))->im6s_link.rbe_left
)) elm = ip6_msource_tree_RB_GETPARENT(elm); else { while (ip6_msource_tree_RB_GETPARENT
(elm) && (elm == (ip6_msource_tree_RB_GETPARENT(elm))
->im6s_link.rbe_right)) elm = ip6_msource_tree_RB_GETPARENT
(elm); elm = ip6_msource_tree_RB_GETPARENT(elm); } } return (
elm); } struct ip6_msource * ip6_msource_tree_RB_MINMAX(struct
ip6_msource_tree *head, int val) { struct ip6_msource *tmp =
(head)->rbh_root; struct ip6_msource *parent = ((void *)0
); while (tmp) { parent = tmp; if (val < 0) tmp = (tmp)->
im6s_link.rbe_left; else tmp = (tmp)->im6s_link.rbe_right;
} return (parent); } struct ip6_msource * ip6_msource_tree_RB_PREV
(struct ip6_msource *elm) { if ((elm)->im6s_link.rbe_left)
{ elm = (elm)->im6s_link.rbe_left; while ((elm)->im6s_link
.rbe_right) elm = (elm)->im6s_link.rbe_right; } else { if (
ip6_msource_tree_RB_GETPARENT(elm) && (elm == (ip6_msource_tree_RB_GETPARENT
(elm))->im6s_link.rbe_right)) elm = ip6_msource_tree_RB_GETPARENT
(elm); else { while (ip6_msource_tree_RB_GETPARENT(elm) &&
(elm == (ip6_msource_tree_RB_GETPARENT(elm))->im6s_link.rbe_left
)) elm = ip6_msource_tree_RB_GETPARENT(elm); elm = ip6_msource_tree_RB_GETPARENT
(elm); } } return (elm); }
;
175
176#define IN6M_TRACE_HIST_SIZE32 32 /* size of trace history */
177
178/* For gdb */
179__private_extern__ unsigned int in6m_trace_hist_size = IN6M_TRACE_HIST_SIZE32;
180
181struct in6_multi_dbg {
182 struct in6_multi in6m; /* in6_multi */
183 u_int16_t in6m_refhold_cnt; /* # of ref */
184 u_int16_t in6m_refrele_cnt; /* # of rele */
185 /*
186 * Circular lists of in6m_addref and in6m_remref callers.
187 */
188 ctrace_t in6m_refhold[IN6M_TRACE_HIST_SIZE32];
189 ctrace_t in6m_refrele[IN6M_TRACE_HIST_SIZE32];
190 /*
191 * Trash list linkage
192 */
193 TAILQ_ENTRY(in6_multi_dbg)struct { struct in6_multi_dbg *tqe_next; struct in6_multi_dbg
**tqe_prev; }
in6m_trash_link;
194};
195
196/* List of trash in6_multi entries protected by in6m_trash_lock */
197static TAILQ_HEAD(, in6_multi_dbg)struct { struct in6_multi_dbg *tqh_first; struct in6_multi_dbg
**tqh_last; }
in6m_trash_head;
198static decl_lck_mtx_data(, in6m_trash_lock)lck_mtx_t in6m_trash_lock;;
199
200#if DEBUG
201static unsigned int in6m_debug = 1; /* debugging (enabled) */
202#else
203static unsigned int in6m_debug; /* debugging (disabled) */
204#endif /* !DEBUG */
205static unsigned int in6m_size; /* size of zone element */
206static struct zone *in6m_zone; /* zone for in6_multi */
207
208#define IN6M_ZONE_MAX64 64 /* maximum elements in zone */
209#define IN6M_ZONE_NAME"in6_multi" "in6_multi" /* zone name */
210
211static unsigned int imm_size; /* size of zone element */
212static struct zone *imm_zone; /* zone for in6_multi_mship */
213
214#define IMM_ZONE_MAX64 64 /* maximum elements in zone */
215#define IMM_ZONE_NAME"in6_multi_mship" "in6_multi_mship" /* zone name */
216
217#define IP6MS_ZONE_MAX64 64 /* maximum elements in zone */
218#define IP6MS_ZONE_NAME"ip6_msource" "ip6_msource" /* zone name */
219
220static unsigned int ip6ms_size; /* size of zone element */
221static struct zone *ip6ms_zone; /* zone for ip6_msource */
222
223#define IN6MS_ZONE_MAX64 64 /* maximum elements in zone */
224#define IN6MS_ZONE_NAME"in6_msource" "in6_msource" /* zone name */
225
226static unsigned int in6ms_size; /* size of zone element */
227static struct zone *in6ms_zone; /* zone for in6_msource */
228
229/* Lock group and attribute for in6_multihead_lock lock */
230static lck_attr_t *in6_multihead_lock_attr;
231static lck_grp_t *in6_multihead_lock_grp;
232static lck_grp_attr_t *in6_multihead_lock_grp_attr;
233
234static decl_lck_rw_data(, in6_multihead_lock)lck_rw_t in6_multihead_lock;;
235struct in6_multihead in6_multihead;
236
237static struct in6_multi *in6_multi_alloc(int);
238static void in6_multi_free(struct in6_multi *);
239static void in6_multi_attach(struct in6_multi *);
240static struct in6_multi_mship *in6_multi_mship_alloc(int);
241static void in6_multi_mship_free(struct in6_multi_mship *);
242static void in6m_trace(struct in6_multi *, int);
243
244static struct ip6_msource *ip6ms_alloc(int);
245static void ip6ms_free(struct ip6_msource *);
246static struct in6_msource *in6ms_alloc(int);
247static void in6ms_free(struct in6_msource *);
248
249/*
250 * IPv6 source tree comparison function.
251 *
252 * An ordered predicate is necessary; bcmp() is not documented to return
253 * an indication of order, memcmp() is, and is an ISO C99 requirement.
254 */
255static __inline int
256ip6_msource_cmp(const struct ip6_msource *a, const struct ip6_msource *b)
257{
258 return (memcmp(&a->im6s_addr, &b->im6s_addr, sizeof(struct in6_addr)));
259}
260
261/*
262 * Inline function which wraps assertions for a valid ifp.
263 */
264static __inline__ int
265in6m_is_ifp_detached(const struct in6_multi *inm)
266{
267 VERIFY(inm->in6m_ifma != NULL)((void)(__builtin_expect(!!((long)((inm->in6m_ifma != ((void
*)0)))), 1L) || assfail("inm->in6m_ifma != NULL", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 267)))
;
268 VERIFY(inm->in6m_ifp == inm->in6m_ifma->ifma_ifp)((void)(__builtin_expect(!!((long)((inm->in6m_ifp == inm->
in6m_ifma->ifma_ifp))), 1L) || assfail("inm->in6m_ifp == inm->in6m_ifma->ifma_ifp"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 268
)))
;
269
270 return (!ifnet_is_attached(inm->in6m_ifp, 0));
271}
272
273/*
274 * Initialize an in6_mfilter structure to a known state at t0, t1
275 * with an empty source filter list.
276 */
277static __inline__ void
278im6f_init(struct in6_mfilter *imf, const int st0, const int st1)
279{
280 memset(imf, 0, sizeof(struct in6_mfilter));
281 RB_INIT(&imf->im6f_sources)do { (&imf->im6f_sources)->rbh_root = ((void *)0); }
while ( 0)
;
282 imf->im6f_st[0] = st0;
283 imf->im6f_st[1] = st1;
284}
285
286/*
287 * Resize the ip6_moptions vector to the next power-of-two minus 1.
288 */
289static int
290im6o_grow(struct ip6_moptions *imo, size_t newmax)
291{
292 struct in6_multi **nmships;
293 struct in6_multi **omships;
294 struct in6_mfilter *nmfilters;
295 struct in6_mfilter *omfilters;
296 size_t idx;
297 size_t oldmax;
298
299 IM6O_LOCK_ASSERT_HELD(imo)lck_mtx_assert(&(imo)->im6o_lock, 1);
300
301 nmships = NULL((void *)0);
302 nmfilters = NULL((void *)0);
303 omships = imo->im6o_membership;
304 omfilters = imo->im6o_mfilters;
305 oldmax = imo->im6o_max_memberships;
306 if (newmax == 0)
307 newmax = ((oldmax + 1) * 2) - 1;
308
309 if (newmax > IPV6_MAX_MEMBERSHIPS4095)
310 return (ETOOMANYREFS59);
311
312 if ((nmships = (struct in6_multi **)_REALLOC(omships,({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __REALLOC(omships, sizeof (struct in6_multi *) * newmax,
113, 0x0000 | 0x0004, &site); })
313 sizeof (struct in6_multi *) * newmax, M_IP6MOPTS,({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __REALLOC(omships, sizeof (struct in6_multi *) * newmax,
113, 0x0000 | 0x0004, &site); })
314 M_WAITOK | M_ZERO)({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __REALLOC(omships, sizeof (struct in6_multi *) * newmax,
113, 0x0000 | 0x0004, &site); })
) == NULL((void *)0))
315 return (ENOMEM12);
316
317 imo->im6o_membership = nmships;
318
319 if ((nmfilters = (struct in6_mfilter *)_REALLOC(omfilters,({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __REALLOC(omfilters, sizeof (struct in6_mfilter) * newmax
, 112, 0x0000 | 0x0004, &site); })
320 sizeof (struct in6_mfilter) * newmax, M_IN6MFILTER,({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __REALLOC(omfilters, sizeof (struct in6_mfilter) * newmax
, 112, 0x0000 | 0x0004, &site); })
321 M_WAITOK | M_ZERO)({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __REALLOC(omfilters, sizeof (struct in6_mfilter) * newmax
, 112, 0x0000 | 0x0004, &site); })
) == NULL((void *)0))
322 return (ENOMEM12);
323
324 imo->im6o_mfilters = nmfilters;
325
326 /* Initialize newly allocated source filter heads. */
327 for (idx = oldmax; idx < newmax; idx++)
328 im6f_init(&nmfilters[idx], MCAST_UNDEFINED0, MCAST_EXCLUDE2);
329
330 imo->im6o_max_memberships = newmax;
331
332 return (0);
333}
334
335/*
336 * Find an IPv6 multicast group entry for this ip6_moptions instance
337 * which matches the specified group, and optionally an interface.
338 * Return its index into the array, or -1 if not found.
339 */
340static size_t
341im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp,
342 const struct sockaddr *group)
343{
344 const struct sockaddr_in6 *gsin6;
345 struct in6_multi *pinm;
346 int idx;
347 int nmships;
348
349 IM6O_LOCK_ASSERT_HELD(__DECONST(struct ip6_moptions *, imo))lck_mtx_assert(&((struct ip6_moptions *) (long)(imo))->
im6o_lock, 1)
;
350
351 gsin6 = (struct sockaddr_in6 *)(uintptr_t)(size_t)group;
352
353 /* The im6o_membership array may be lazy allocated. */
354 if (imo->im6o_membership == NULL((void *)0) || imo->im6o_num_memberships == 0)
355 return (-1);
356
357 nmships = imo->im6o_num_memberships;
358 for (idx = 0; idx < nmships; idx++) {
359 pinm = imo->im6o_membership[idx];
360 if (pinm == NULL((void *)0))
361 continue;
362 IN6M_LOCK(pinm)lck_mtx_lock(&(pinm)->in6m_lock);
363 if ((ifp == NULL((void *)0) || (pinm->in6m_ifp == ifp)) &&
364 IN6_ARE_ADDR_EQUAL(&pinm->in6m_addr,(bcmp(&(&pinm->in6m_addr)->__u6_addr.__u6_addr8
[0], &(&gsin6->sin6_addr)->__u6_addr.__u6_addr8
[0], sizeof (struct in6_addr)) == 0)
365 &gsin6->sin6_addr)(bcmp(&(&pinm->in6m_addr)->__u6_addr.__u6_addr8
[0], &(&gsin6->sin6_addr)->__u6_addr.__u6_addr8
[0], sizeof (struct in6_addr)) == 0)
) {
366 IN6M_UNLOCK(pinm)lck_mtx_unlock(&(pinm)->in6m_lock);
367 break;
368 }
369 IN6M_UNLOCK(pinm)lck_mtx_unlock(&(pinm)->in6m_lock);
370 }
371 if (idx >= nmships)
372 idx = -1;
373
374 return (idx);
375}
376
377/*
378 * Find an IPv6 multicast source entry for this imo which matches
379 * the given group index for this socket, and source address.
380 *
381 * XXX TODO: The scope ID, if present in src, is stripped before
382 * any comparison. We SHOULD enforce scope/zone checks where the source
383 * filter entry has a link scope.
384 *
385 * NOTE: This does not check if the entry is in-mode, merely if
386 * it exists, which may not be the desired behaviour.
387 */
388static struct in6_msource *
389im6o_match_source(const struct ip6_moptions *imo, const size_t gidx,
390 const struct sockaddr *src)
391{
392 struct ip6_msource find;
393 struct in6_mfilter *imf;
394 struct ip6_msource *ims;
395 const sockunion_t *psa;
396
397 IM6O_LOCK_ASSERT_HELD(__DECONST(struct ip6_moptions *, imo))lck_mtx_assert(&((struct ip6_moptions *) (long)(imo))->
im6o_lock, 1)
;
398
399 VERIFY(src->sa_family == AF_INET6)((void)(__builtin_expect(!!((long)((src->sa_family == 30))
), 1L) || assfail("src->sa_family == AF_INET6", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 399)))
;
400 VERIFY(gidx != (size_t)-1 && gidx < imo->im6o_num_memberships)((void)(__builtin_expect(!!((long)((gidx != (size_t)-1 &&
gidx < imo->im6o_num_memberships))), 1L) || assfail("gidx != (size_t)-1 && gidx < imo->im6o_num_memberships"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 400
)))
;
401
402 /* The im6o_mfilters array may be lazy allocated. */
403 if (imo->im6o_mfilters == NULL((void *)0))
404 return (NULL((void *)0));
405 imf = &imo->im6o_mfilters[gidx];
406
407 psa = (sockunion_t *)(uintptr_t)(size_t)src;
408 find.im6s_addr = psa->sin6.sin6_addr;
409 in6_clearscope(&find.im6s_addr); /* XXX */
410 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find)ip6_msource_tree_RB_FIND(&imf->im6f_sources, &find
)
;
411
412 return ((struct in6_msource *)ims);
413}
414
415/*
416 * Perform filtering for multicast datagrams on a socket by group and source.
417 *
418 * Returns 0 if a datagram should be allowed through, or various error codes
419 * if the socket was not a member of the group, or the source was muted, etc.
420 */
421int
422im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp,
423 const struct sockaddr *group, const struct sockaddr *src)
424{
425 size_t gidx;
426 struct in6_msource *ims;
427 int mode;
428
429 IM6O_LOCK_ASSERT_HELD(__DECONST(struct ip6_moptions *, imo))lck_mtx_assert(&((struct ip6_moptions *) (long)(imo))->
im6o_lock, 1)
;
430 VERIFY(ifp != NULL)((void)(__builtin_expect(!!((long)((ifp != ((void *)0)))), 1L
) || assfail("ifp != NULL", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 430)))
;
431
432 gidx = im6o_match_group(imo, ifp, group);
433 if (gidx == (size_t)-1)
434 return (MCAST_NOTGMEMBER1);
435
436 /*
437 * Check if the source was included in an (S,G) join.
438 * Allow reception on exclusive memberships by default,
439 * reject reception on inclusive memberships by default.
440 * Exclude source only if an in-mode exclude filter exists.
441 * Include source only if an in-mode include filter exists.
442 * NOTE: We are comparing group state here at MLD t1 (now)
443 * with socket-layer t0 (since last downcall).
444 */
445 mode = imo->im6o_mfilters[gidx].im6f_st[1];
446 ims = im6o_match_source(imo, gidx, src);
447
448 if ((ims == NULL((void *)0) && mode == MCAST_INCLUDE1) ||
449 (ims != NULL((void *)0) && ims->im6sl_st[0] != mode))
450 return (MCAST_NOTSMEMBER2);
451
452 return (MCAST_PASS0);
453}
454
455/*
456 * Find and return a reference to an in6_multi record for (ifp, group),
457 * and bump its reference count.
458 * If one does not exist, try to allocate it, and update link-layer multicast
459 * filters on ifp to listen for group.
460 * Assumes the IN6_MULTI lock is held across the call.
461 * Return 0 if successful, otherwise return an appropriate error code.
462 */
463static int
464in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
465 struct in6_multi **pinm)
466{
467 struct sockaddr_in6 gsin6;
468 struct ifmultiaddr *ifma;
469 struct in6_multi *inm;
470 int error;
471
472 *pinm = NULL((void *)0);
473
474 in6_multihead_lock_shared();
475 IN6_LOOKUP_MULTI(group, ifp, inm)do { struct in6_multistep _step; do { in6_multihead_lock_assert
(0x03); (_step).i_in6m = in6_multihead.lh_first; do { in6_multihead_lock_assert
(0x03); if ((((inm)) = ((_step)).i_in6m) != ((void *)0)) ((_step
)).i_in6m = ((_step)).i_in6m->in6m_entry.le_next; } while (
0); } while (0); while ((inm) != ((void *)0)) { lck_mtx_lock_spin
(&(inm)->in6m_lock); if ((inm)->in6m_ifp == (ifp) &&
(bcmp(&(&(inm)->in6m_addr)->__u6_addr.__u6_addr8
[0], &((group))->__u6_addr.__u6_addr8[0], sizeof (struct
in6_addr)) == 0)) { in6m_addref(inm, 1); lck_mtx_unlock(&
(inm)->in6m_lock); break; } lck_mtx_unlock(&(inm)->
in6m_lock); do { in6_multihead_lock_assert(0x03); if (((inm) =
(_step).i_in6m) != ((void *)0)) (_step).i_in6m = (_step).i_in6m
->in6m_entry.le_next; } while (0); } } while (0)
;
476 if (inm != NULL((void *)0)) {
477 IN6M_LOCK(inm)lck_mtx_lock(&(inm)->in6m_lock);
478 VERIFY(inm->in6m_reqcnt >= 1)((void)(__builtin_expect(!!((long)((inm->in6m_reqcnt >=
1))), 1L) || assfail("inm->in6m_reqcnt >= 1", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 478)))
;
479 inm->in6m_reqcnt++;
480 VERIFY(inm->in6m_reqcnt != 0)((void)(__builtin_expect(!!((long)((inm->in6m_reqcnt != 0)
)), 1L) || assfail("inm->in6m_reqcnt != 0", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 480)))
;
481 *pinm = inm;
482 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
483 in6_multihead_lock_done();
484 /*
485 * We already joined this group; return the in6m
486 * with a refcount held (via lookup) for caller.
487 */
488 return (0);
489 }
490 in6_multihead_lock_done();
491
492 memset(&gsin6, 0, sizeof(gsin6));
493 gsin6.sin6_family = AF_INET630;
494 gsin6.sin6_len = sizeof(struct sockaddr_in6);
495 gsin6.sin6_addr = *group;
496
497 /*
498 * Check if a link-layer group is already associated
499 * with this network-layer group on the given ifnet.
500 */
501 error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma);
502 if (error != 0)
503 return (error);
504
505 /*
506 * See comments in in6m_remref() for access to ifma_protospec.
507 */
508 in6_multihead_lock_exclusive();
509 IFMA_LOCK(ifma)lck_mtx_lock(&(ifma)->ifma_lock);
510 if ((inm = ifma->ifma_protospec) != NULL((void *)0)) {
511 VERIFY(ifma->ifma_addr != NULL)((void)(__builtin_expect(!!((long)((ifma->ifma_addr != ((void
*)0)))), 1L) || assfail("ifma->ifma_addr != NULL", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 511)))
;
512 VERIFY(ifma->ifma_addr->sa_family == AF_INET6)((void)(__builtin_expect(!!((long)((ifma->ifma_addr->sa_family
== 30))), 1L) || assfail("ifma->ifma_addr->sa_family == AF_INET6"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 512
)))
;
513 IN6M_ADDREF(inm)in6m_addref(inm, 0); /* for caller */
514 IFMA_UNLOCK(ifma)lck_mtx_unlock(&(ifma)->ifma_lock);
515 IN6M_LOCK(inm)lck_mtx_lock(&(inm)->in6m_lock);
516 VERIFY(inm->in6m_ifma == ifma)((void)(__builtin_expect(!!((long)((inm->in6m_ifma == ifma
))), 1L) || assfail("inm->in6m_ifma == ifma", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 516)))
;
517 VERIFY(inm->in6m_ifp == ifp)((void)(__builtin_expect(!!((long)((inm->in6m_ifp == ifp))
), 1L) || assfail("inm->in6m_ifp == ifp", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 517)))
;
518 VERIFY(IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, group))((void)(__builtin_expect(!!((long)(((bcmp(&(&inm->
in6m_addr)->__u6_addr.__u6_addr8[0], &(group)->__u6_addr
.__u6_addr8[0], sizeof (struct in6_addr)) == 0)))), 1L) || assfail
("IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, group)", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 518)))
;
519 if (inm->in6m_debug & IFD_ATTACHED0x1) {
520 VERIFY(inm->in6m_reqcnt >= 1)((void)(__builtin_expect(!!((long)((inm->in6m_reqcnt >=
1))), 1L) || assfail("inm->in6m_reqcnt >= 1", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 520)))
;
521 inm->in6m_reqcnt++;
522 VERIFY(inm->in6m_reqcnt != 0)((void)(__builtin_expect(!!((long)((inm->in6m_reqcnt != 0)
)), 1L) || assfail("inm->in6m_reqcnt != 0", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 522)))
;
523 *pinm = inm;
524 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
525 in6_multihead_lock_done();
526 IFMA_REMREF(ifma)ifma_remref(ifma);
527 /*
528 * We lost the race with another thread doing
529 * in6_mc_get(); since this group has already
530 * been joined; return the inm with a refcount
531 * held for caller.
532 */
533 return (0);
534 }
535 /*
536 * We lost the race with another thread doing in6_delmulti();
537 * the inm referring to the ifma has been detached, thus we
538 * reattach it back to the in6_multihead list, and return the
539 * inm with a refcount held for the caller.
540 */
541 in6_multi_attach(inm);
542 VERIFY((inm->in6m_debug &((void)(__builtin_expect(!!((long)(((inm->in6m_debug &
(0x1 | 0x10)) == 0x1))), 1L) || assfail("(inm->in6m_debug & (IFD_ATTACHED | IFD_TRASHED)) == IFD_ATTACHED"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 543
)))
543 (IFD_ATTACHED | IFD_TRASHED)) == IFD_ATTACHED)((void)(__builtin_expect(!!((long)(((inm->in6m_debug &
(0x1 | 0x10)) == 0x1))), 1L) || assfail("(inm->in6m_debug & (IFD_ATTACHED | IFD_TRASHED)) == IFD_ATTACHED"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 543
)))
;
544 *pinm = inm;
545 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
546 in6_multihead_lock_done();
547 IFMA_REMREF(ifma)ifma_remref(ifma);
548 return (0);
549 }
550 IFMA_UNLOCK(ifma)lck_mtx_unlock(&(ifma)->ifma_lock);
551
552 /*
553 * A new in6_multi record is needed; allocate and initialize it.
554 * We DO NOT perform an MLD join as the in6_ layer may need to
555 * push an initial source list down to MLD to support SSM.
556 *
557 * The initial source filter state is INCLUDE, {} as per the RFC.
558 * Pending state-changes per group are subject to a bounds check.
559 */
560 inm = in6_multi_alloc(M_WAITOK0x0000);
561 if (inm == NULL((void *)0)) {
562 in6_multihead_lock_done();
563 IFMA_REMREF(ifma)ifma_remref(ifma);
564 return (ENOMEM12);
565 }
566 IN6M_LOCK(inm)lck_mtx_lock(&(inm)->in6m_lock);
567 inm->in6m_addr = *group;
568 inm->in6m_ifp = ifp;
569 inm->in6m_mli = MLD_IFINFO(ifp)((ifp)->if_mli);
570 VERIFY(inm->in6m_mli != NULL)((void)(__builtin_expect(!!((long)((inm->in6m_mli != ((void
*)0)))), 1L) || assfail("inm->in6m_mli != NULL", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 570)))
;
571 MLI_ADDREF(inm->in6m_mli)mli_addref(inm->in6m_mli, 0);
572 inm->in6m_ifma = ifma; /* keep refcount from if_addmulti() */
573 inm->in6m_state = MLD_NOT_MEMBER0;
574 /*
575 * Pending state-changes per group are subject to a bounds check.
576 */
577 inm->in6m_scq.ifq_maxlen = MLD_MAX_STATE_CHANGES24;
578 inm->in6m_st[0].iss_fmode = MCAST_UNDEFINED0;
579 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED0;
580 RB_INIT(&inm->in6m_srcs)do { (&inm->in6m_srcs)->rbh_root = ((void *)0); } while
( 0)
;
581 *pinm = inm;
582 in6_multi_attach(inm);
583 VERIFY((inm->in6m_debug &((void)(__builtin_expect(!!((long)(((inm->in6m_debug &
(0x1 | 0x10)) == 0x1))), 1L) || assfail("(inm->in6m_debug & (IFD_ATTACHED | IFD_TRASHED)) == IFD_ATTACHED"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 584
)))
584 (IFD_ATTACHED | IFD_TRASHED)) == IFD_ATTACHED)((void)(__builtin_expect(!!((long)(((inm->in6m_debug &
(0x1 | 0x10)) == 0x1))), 1L) || assfail("(inm->in6m_debug & (IFD_ATTACHED | IFD_TRASHED)) == IFD_ATTACHED"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 584
)))
;
585 IN6M_ADDREF_LOCKED(inm)in6m_addref(inm, 1); /* for caller */
586 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
587
588 IFMA_LOCK(ifma)lck_mtx_lock(&(ifma)->ifma_lock);
589 VERIFY(ifma->ifma_protospec == NULL)((void)(__builtin_expect(!!((long)((ifma->ifma_protospec ==
((void *)0)))), 1L) || assfail("ifma->ifma_protospec == NULL"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 589
)))
;
590 ifma->ifma_protospec = inm;
591 IFMA_UNLOCK(ifma)lck_mtx_unlock(&(ifma)->ifma_lock);
592 in6_multihead_lock_done();
593
594 return (0);
595}
596
597/*
598 * Clear recorded source entries for a group.
599 * Used by the MLD code. Caller must hold the IN6_MULTI lock.
600 * FIXME: Should reap.
601 */
602void
603in6m_clear_recorded(struct in6_multi *inm)
604{
605 struct ip6_msource *ims;
606
607 IN6M_LOCK_ASSERT_HELD(inm)lck_mtx_assert(&(inm)->in6m_lock, 1);
608
609 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs)for ((ims) = ip6_msource_tree_RB_MINMAX(&inm->in6m_srcs
, -1); (ims) != ((void *)0); (ims) = ip6_msource_tree_RB_NEXT
(ims))
{
610 if (ims->im6s_stp) {
611 ims->im6s_stp = 0;
612 --inm->in6m_st[1].iss_rec;
613 }
614 }
615 VERIFY(inm->in6m_st[1].iss_rec == 0)((void)(__builtin_expect(!!((long)((inm->in6m_st[1].iss_rec
== 0))), 1L) || assfail("inm->in6m_st[1].iss_rec == 0", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 615)))
;
616}
617
618/*
619 * Record a source as pending for a Source-Group MLDv2 query.
620 * This lives here as it modifies the shared tree.
621 *
622 * inm is the group descriptor.
623 * naddr is the address of the source to record in network-byte order.
624 *
625 * If the net.inet6.mld.sgalloc sysctl is non-zero, we will
626 * lazy-allocate a source node in response to an SG query.
627 * Otherwise, no allocation is performed. This saves some memory
628 * with the trade-off that the source will not be reported to the
629 * router if joined in the window between the query response and
630 * the group actually being joined on the local host.
631 *
632 * VIMAGE: XXX: Currently the mld_sgalloc feature has been removed.
633 * This turns off the allocation of a recorded source entry if
634 * the group has not been joined.
635 *
636 * Return 0 if the source didn't exist or was already marked as recorded.
637 * Return 1 if the source was marked as recorded by this function.
638 * Return <0 if any error occured (negated errno code).
639 */
640int
641in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr)
642{
643 struct ip6_msource find;
644 struct ip6_msource *ims, *nims;
645
646 IN6M_LOCK_ASSERT_HELD(inm)lck_mtx_assert(&(inm)->in6m_lock, 1);
647
648 find.im6s_addr = *addr;
649 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find)ip6_msource_tree_RB_FIND(&inm->in6m_srcs, &find);
650 if (ims && ims->im6s_stp)
651 return (0);
652 if (ims == NULL((void *)0)) {
653 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc)
654 return (-ENOSPC28);
655 nims = ip6ms_alloc(M_WAITOK0x0000);
656 if (nims == NULL((void *)0))
657 return (-ENOMEM12);
658 nims->im6s_addr = find.im6s_addr;
659 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims)ip6_msource_tree_RB_INSERT(&inm->in6m_srcs, nims);
660 ++inm->in6m_nsrc;
661 ims = nims;
662 }
663
664 /*
665 * Mark the source as recorded and update the recorded
666 * source count.
667 */
668 ++ims->im6s_stp;
669 ++inm->in6m_st[1].iss_rec;
670
671 return (1);
672}
673
674/*
675 * Return a pointer to an in6_msource owned by an in6_mfilter,
676 * given its source address.
677 * Lazy-allocate if needed. If this is a new entry its filter state is
678 * undefined at t0.
679 *
680 * imf is the filter set being modified.
681 * addr is the source address.
682 *
683 * Caller is expected to be holding im6o_lock.
684 */
685static int
686im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin,
687 struct in6_msource **plims)
688{
689 struct ip6_msource find;
690 struct ip6_msource *ims;
691 struct in6_msource *lims;
692 int error;
693
694 error = 0;
695 ims = NULL((void *)0);
696 lims = NULL((void *)0);
697
698 find.im6s_addr = psin->sin6_addr;
699 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find)ip6_msource_tree_RB_FIND(&imf->im6f_sources, &find
)
;
700 lims = (struct in6_msource *)ims;
701 if (lims == NULL((void *)0)) {
702 if (imf->im6f_nsrc == in6_mcast_maxsocksrc)
703 return (ENOSPC28);
704 lims = in6ms_alloc(M_WAITOK0x0000);
705 if (lims == NULL((void *)0))
706 return (ENOMEM12);
707 lims->im6s_addr = find.im6s_addr;
708 lims->im6sl_st[0] = MCAST_UNDEFINED0;
709 RB_INSERT(ip6_msource_tree, &imf->im6f_sources,ip6_msource_tree_RB_INSERT(&imf->im6f_sources, (struct
ip6_msource *)lims)
710 (struct ip6_msource *)lims)ip6_msource_tree_RB_INSERT(&imf->im6f_sources, (struct
ip6_msource *)lims)
;
711 ++imf->im6f_nsrc;
712 }
713
714 *plims = lims;
715
716 return (error);
717}
718
719/*
720 * Graft a source entry into an existing socket-layer filter set,
721 * maintaining any required invariants and checking allocations.
722 *
723 * The source is marked as being in the new filter mode at t1.
724 *
725 * Return the pointer to the new node, otherwise return NULL.
726 *
727 * Caller is expected to be holding im6o_lock.
728 */
729static struct in6_msource *
730im6f_graft(struct in6_mfilter *imf, const uint8_t st1,
731 const struct sockaddr_in6 *psin)
732{
733 struct in6_msource *lims;
734
735 lims = in6ms_alloc(M_WAITOK0x0000);
736 if (lims == NULL((void *)0))
737 return (NULL((void *)0));
738 lims->im6s_addr = psin->sin6_addr;
739 lims->im6sl_st[0] = MCAST_UNDEFINED0;
740 lims->im6sl_st[1] = st1;
741 RB_INSERT(ip6_msource_tree, &imf->im6f_sources,ip6_msource_tree_RB_INSERT(&imf->im6f_sources, (struct
ip6_msource *)lims)
742 (struct ip6_msource *)lims)ip6_msource_tree_RB_INSERT(&imf->im6f_sources, (struct
ip6_msource *)lims)
;
743 ++imf->im6f_nsrc;
744
745 return (lims);
746}
747
748/*
749 * Prune a source entry from an existing socket-layer filter set,
750 * maintaining any required invariants and checking allocations.
751 *
752 * The source is marked as being left at t1, it is not freed.
753 *
754 * Return 0 if no error occurred, otherwise return an errno value.
755 *
756 * Caller is expected to be holding im6o_lock.
757 */
758static int
759im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin)
760{
761 struct ip6_msource find;
762 struct ip6_msource *ims;
763 struct in6_msource *lims;
764
765 find.im6s_addr = psin->sin6_addr;
766 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find)ip6_msource_tree_RB_FIND(&imf->im6f_sources, &find
)
;
767 if (ims == NULL((void *)0))
768 return (ENOENT2);
769 lims = (struct in6_msource *)ims;
770 lims->im6sl_st[1] = MCAST_UNDEFINED0;
771 return (0);
772}
773
774/*
775 * Revert socket-layer filter set deltas at t1 to t0 state.
776 *
777 * Caller is expected to be holding im6o_lock.
778 */
779static void
780im6f_rollback(struct in6_mfilter *imf)
781{
782 struct ip6_msource *ims, *tims;
783 struct in6_msource *lims;
784
785 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims)for ((ims) = ip6_msource_tree_RB_MINMAX(&imf->im6f_sources
, -1); ((ims) != ((void *)0)) && ((tims) = ip6_msource_tree_RB_NEXT
(ims), (ims) != ((void *)0)); (ims) = (tims))
{
786 lims = (struct in6_msource *)ims;
787 if (lims->im6sl_st[0] == lims->im6sl_st[1]) {
788 /* no change at t1 */
789 continue;
790 } else if (lims->im6sl_st[0] != MCAST_UNDEFINED0) {
791 /* revert change to existing source at t1 */
792 lims->im6sl_st[1] = lims->im6sl_st[0];
793 } else {
794 /* revert source added t1 */
795 MLD_PRINTF(("%s: free in6ms 0x%llx\n", __func__,do { if (mld_debug) printf ("%s: free in6ms 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(lims) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(lims) + vm_kernel_addrperm)); } while (0)
796 (uint64_t)VM_KERNEL_ADDRPERM(lims)))do { if (mld_debug) printf ("%s: free in6ms 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(lims) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(lims) + vm_kernel_addrperm)); } while (0)
;
797 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims)ip6_msource_tree_RB_REMOVE(&imf->im6f_sources, ims);
798 in6ms_free(lims);
799 imf->im6f_nsrc--;
800 }
801 }
802 imf->im6f_st[1] = imf->im6f_st[0];
803}
804
805/*
806 * Mark socket-layer filter set as INCLUDE {} at t1.
807 *
808 * Caller is expected to be holding im6o_lock.
809 */
810void
811im6f_leave(struct in6_mfilter *imf)
812{
813 struct ip6_msource *ims;
814 struct in6_msource *lims;
815
816 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources)for ((ims) = ip6_msource_tree_RB_MINMAX(&imf->im6f_sources
, -1); (ims) != ((void *)0); (ims) = ip6_msource_tree_RB_NEXT
(ims))
{
817 lims = (struct in6_msource *)ims;
818 lims->im6sl_st[1] = MCAST_UNDEFINED0;
819 }
820 imf->im6f_st[1] = MCAST_INCLUDE1;
821}
822
823/*
824 * Mark socket-layer filter set deltas as committed.
825 *
826 * Caller is expected to be holding im6o_lock.
827 */
828static void
829im6f_commit(struct in6_mfilter *imf)
830{
831 struct ip6_msource *ims;
832 struct in6_msource *lims;
833
834 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources)for ((ims) = ip6_msource_tree_RB_MINMAX(&imf->im6f_sources
, -1); (ims) != ((void *)0); (ims) = ip6_msource_tree_RB_NEXT
(ims))
{
835 lims = (struct in6_msource *)ims;
836 lims->im6sl_st[0] = lims->im6sl_st[1];
837 }
838 imf->im6f_st[0] = imf->im6f_st[1];
839}
840
841/*
842 * Reap unreferenced sources from socket-layer filter set.
843 *
844 * Caller is expected to be holding im6o_lock.
845 */
846static void
847im6f_reap(struct in6_mfilter *imf)
848{
849 struct ip6_msource *ims, *tims;
850 struct in6_msource *lims;
851
852 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims)for ((ims) = ip6_msource_tree_RB_MINMAX(&imf->im6f_sources
, -1); ((ims) != ((void *)0)) && ((tims) = ip6_msource_tree_RB_NEXT
(ims), (ims) != ((void *)0)); (ims) = (tims))
{
853 lims = (struct in6_msource *)ims;
854 if ((lims->im6sl_st[0] == MCAST_UNDEFINED0) &&
855 (lims->im6sl_st[1] == MCAST_UNDEFINED0)) {
856 MLD_PRINTF(("%s: free in6ms 0x%llx\n", __func__,do { if (mld_debug) printf ("%s: free in6ms 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(lims) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(lims) + vm_kernel_addrperm)); } while (0)
857 (uint64_t)VM_KERNEL_ADDRPERM(lims)))do { if (mld_debug) printf ("%s: free in6ms 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(lims) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(lims) + vm_kernel_addrperm)); } while (0)
;
858 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims)ip6_msource_tree_RB_REMOVE(&imf->im6f_sources, ims);
859 in6ms_free(lims);
860 imf->im6f_nsrc--;
861 }
862 }
863}
864
865/*
866 * Purge socket-layer filter set.
867 *
868 * Caller is expected to be holding im6o_lock.
869 */
870void
871im6f_purge(struct in6_mfilter *imf)
872{
873 struct ip6_msource *ims, *tims;
874 struct in6_msource *lims;
875
876 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims)for ((ims) = ip6_msource_tree_RB_MINMAX(&imf->im6f_sources
, -1); ((ims) != ((void *)0)) && ((tims) = ip6_msource_tree_RB_NEXT
(ims), (ims) != ((void *)0)); (ims) = (tims))
{
877 lims = (struct in6_msource *)ims;
878 MLD_PRINTF(("%s: free in6ms 0x%llx\n", __func__,do { if (mld_debug) printf ("%s: free in6ms 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(lims) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(lims) + vm_kernel_addrperm)); } while (0)
879 (uint64_t)VM_KERNEL_ADDRPERM(lims)))do { if (mld_debug) printf ("%s: free in6ms 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(lims) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(lims) + vm_kernel_addrperm)); } while (0)
;
880 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims)ip6_msource_tree_RB_REMOVE(&imf->im6f_sources, ims);
881 in6ms_free(lims);
882 imf->im6f_nsrc--;
883 }
884 imf->im6f_st[0] = imf->im6f_st[1] = MCAST_UNDEFINED0;
885 VERIFY(RB_EMPTY(&imf->im6f_sources))((void)(__builtin_expect(!!((long)((((&imf->im6f_sources
)->rbh_root == ((void *)0))))), 1L) || assfail("RB_EMPTY(&imf->im6f_sources)"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 885
)))
;
886}
887
888/*
889 * Look up a source filter entry for a multicast group.
890 *
891 * inm is the group descriptor to work with.
892 * addr is the IPv6 address to look up.
893 * noalloc may be non-zero to suppress allocation of sources.
894 * *pims will be set to the address of the retrieved or allocated source.
895 *
896 * Return 0 if successful, otherwise return a non-zero error code.
897 */
898static int
899in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr,
900 const int noalloc, struct ip6_msource **pims)
901{
902 struct ip6_msource find;
903 struct ip6_msource *ims, *nims;
904
905 IN6M_LOCK_ASSERT_HELD(inm)lck_mtx_assert(&(inm)->in6m_lock, 1);
906
907 find.im6s_addr = *addr;
908 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find)ip6_msource_tree_RB_FIND(&inm->in6m_srcs, &find);
909 if (ims == NULL((void *)0) && !noalloc) {
910 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc)
911 return (ENOSPC28);
912 nims = ip6ms_alloc(M_WAITOK0x0000);
913 if (nims == NULL((void *)0))
914 return (ENOMEM12);
915 nims->im6s_addr = *addr;
916 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims)ip6_msource_tree_RB_INSERT(&inm->in6m_srcs, nims);
917 ++inm->in6m_nsrc;
918 ims = nims;
919 MLD_PRINTF(("%s: allocated %s as 0x%llx\n", __func__,do { if (mld_debug) printf ("%s: allocated %s as 0x%llx\n", __func__
, ip6_sprintf(addr), (uint64_t)(((vm_offset_t)(ims) == 0) ? (
vm_offset_t)(0) : (vm_offset_t)(ims) + vm_kernel_addrperm)); }
while (0)
920 ip6_sprintf(addr), (uint64_t)VM_KERNEL_ADDRPERM(ims)))do { if (mld_debug) printf ("%s: allocated %s as 0x%llx\n", __func__
, ip6_sprintf(addr), (uint64_t)(((vm_offset_t)(ims) == 0) ? (
vm_offset_t)(0) : (vm_offset_t)(ims) + vm_kernel_addrperm)); }
while (0)
;
921 }
922
923 *pims = ims;
924 return (0);
925}
926
927/*
928 * Helper function to derive the filter mode on a source entry
929 * from its internal counters. Predicates are:
930 * A source is only excluded if all listeners exclude it.
931 * A source is only included if no listeners exclude it,
932 * and at least one listener includes it.
933 * May be used by ifmcstat(8).
934 */
935uint8_t
936im6s_get_mode(const struct in6_multi *inm, const struct ip6_msource *ims,
937 uint8_t t)
938{
939 IN6M_LOCK_ASSERT_HELD(__DECONST(struct in6_multi *, inm))lck_mtx_assert(&((struct in6_multi *) (long)(inm))->in6m_lock
, 1)
;
940
941 t = !!t;
942 if (inm->in6m_st[t].iss_ex > 0 &&
943 inm->in6m_st[t].iss_ex == ims->im6s_st[t].ex)
944 return (MCAST_EXCLUDE2);
945 else if (ims->im6s_st[t].in > 0 && ims->im6s_st[t].ex == 0)
946 return (MCAST_INCLUDE1);
947 return (MCAST_UNDEFINED0);
948}
949
950/*
951 * Merge socket-layer source into MLD-layer source.
952 * If rollback is non-zero, perform the inverse of the merge.
953 */
954static void
955im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims,
956 const int rollback)
957{
958 int n = rollback ? -1 : 1;
959
960 if (lims->im6sl_st[0] == MCAST_EXCLUDE2) {
961 MLD_PRINTF(("%s: t1 ex -= %d on %s\n", __func__, n,do { if (mld_debug) printf ("%s: t1 ex -= %d on %s\n", __func__
, n, ip6_sprintf(&lims->im6s_addr)); } while (0)
962 ip6_sprintf(&lims->im6s_addr)))do { if (mld_debug) printf ("%s: t1 ex -= %d on %s\n", __func__
, n, ip6_sprintf(&lims->im6s_addr)); } while (0)
;
963 ims->im6s_st[1].ex -= n;
964 } else if (lims->im6sl_st[0] == MCAST_INCLUDE1) {
965 MLD_PRINTF(("%s: t1 in -= %d on %s\n", __func__, n,do { if (mld_debug) printf ("%s: t1 in -= %d on %s\n", __func__
, n, ip6_sprintf(&lims->im6s_addr)); } while (0)
966 ip6_sprintf(&lims->im6s_addr)))do { if (mld_debug) printf ("%s: t1 in -= %d on %s\n", __func__
, n, ip6_sprintf(&lims->im6s_addr)); } while (0)
;
967 ims->im6s_st[1].in -= n;
968 }
969
970 if (lims->im6sl_st[1] == MCAST_EXCLUDE2) {
971 MLD_PRINTF(("%s: t1 ex += %d on %s\n", __func__, n,do { if (mld_debug) printf ("%s: t1 ex += %d on %s\n", __func__
, n, ip6_sprintf(&lims->im6s_addr)); } while (0)
972 ip6_sprintf(&lims->im6s_addr)))do { if (mld_debug) printf ("%s: t1 ex += %d on %s\n", __func__
, n, ip6_sprintf(&lims->im6s_addr)); } while (0)
;
973 ims->im6s_st[1].ex += n;
974 } else if (lims->im6sl_st[1] == MCAST_INCLUDE1) {
975 MLD_PRINTF(("%s: t1 in += %d on %s\n", __func__, n,do { if (mld_debug) printf ("%s: t1 in += %d on %s\n", __func__
, n, ip6_sprintf(&lims->im6s_addr)); } while (0)
976 ip6_sprintf(&lims->im6s_addr)))do { if (mld_debug) printf ("%s: t1 in += %d on %s\n", __func__
, n, ip6_sprintf(&lims->im6s_addr)); } while (0)
;
977 ims->im6s_st[1].in += n;
978 }
979}
980
981/*
982 * Atomically update the global in6_multi state, when a membership's
983 * filter list is being updated in any way.
984 *
985 * imf is the per-inpcb-membership group filter pointer.
986 * A fake imf may be passed for in-kernel consumers.
987 *
988 * XXX This is a candidate for a set-symmetric-difference style loop
989 * which would eliminate the repeated lookup from root of ims nodes,
990 * as they share the same key space.
991 *
992 * If any error occurred this function will back out of refcounts
993 * and return a non-zero value.
994 */
995static int
996in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
997{
998 struct ip6_msource *ims, *nims;
999 struct in6_msource *lims;
1000 int schanged, error;
1001 int nsrc0, nsrc1;
1002
1003 IN6M_LOCK_ASSERT_HELD(inm)lck_mtx_assert(&(inm)->in6m_lock, 1);
1004
1005 schanged = 0;
1006 error = 0;
1007 nsrc1 = nsrc0 = 0;
1008
1009 /*
1010 * Update the source filters first, as this may fail.
1011 * Maintain count of in-mode filters at t0, t1. These are
1012 * used to work out if we transition into ASM mode or not.
1013 * Maintain a count of source filters whose state was
1014 * actually modified by this operation.
1015 */
1016 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources)for ((ims) = ip6_msource_tree_RB_MINMAX(&imf->im6f_sources
, -1); (ims) != ((void *)0); (ims) = ip6_msource_tree_RB_NEXT
(ims))
{
1017 lims = (struct in6_msource *)ims;
1018 if (lims->im6sl_st[0] == imf->im6f_st[0]) nsrc0++;
1019 if (lims->im6sl_st[1] == imf->im6f_st[1]) nsrc1++;
1020 if (lims->im6sl_st[0] == lims->im6sl_st[1]) continue;
1021 error = in6m_get_source(inm, &lims->im6s_addr, 0, &nims);
1022 ++schanged;
1023 if (error)
1024 break;
1025 im6s_merge(nims, lims, 0);
1026 }
1027 if (error) {
1028 struct ip6_msource *bims;
1029
1030 RB_FOREACH_REVERSE_FROM(ims, ip6_msource_tree, nims)for ((ims) = (nims); ((ims) != ((void *)0)) && ((nims
) = ip6_msource_tree_RB_PREV(ims), (ims) != ((void *)0)); (ims
) = (nims))
{
1031 lims = (struct in6_msource *)ims;
1032 if (lims->im6sl_st[0] == lims->im6sl_st[1])
1033 continue;
1034 (void) in6m_get_source(inm, &lims->im6s_addr, 1, &bims);
1035 if (bims == NULL((void *)0))
1036 continue;
1037 im6s_merge(bims, lims, 1);
1038 }
1039 goto out_reap;
1040 }
1041
1042 MLD_PRINTF(("%s: imf filters in-mode: %d at t0, %d at t1\n",do { if (mld_debug) printf ("%s: imf filters in-mode: %d at t0, %d at t1\n"
, __func__, nsrc0, nsrc1); } while (0)
1043 __func__, nsrc0, nsrc1))do { if (mld_debug) printf ("%s: imf filters in-mode: %d at t0, %d at t1\n"
, __func__, nsrc0, nsrc1); } while (0)
;
1044
1045 /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */
1046 if (imf->im6f_st[0] == imf->im6f_st[1] &&
1047 imf->im6f_st[1] == MCAST_INCLUDE1) {
1048 if (nsrc1 == 0) {
1049 MLD_PRINTF(("%s: --in on inm at t1\n", __func__))do { if (mld_debug) printf ("%s: --in on inm at t1\n", __func__
); } while (0)
;
1050 --inm->in6m_st[1].iss_in;
1051 }
1052 }
1053
1054 /* Handle filter mode transition on socket. */
1055 if (imf->im6f_st[0] != imf->im6f_st[1]) {
1056 MLD_PRINTF(("%s: imf transition %d to %d\n",do { if (mld_debug) printf ("%s: imf transition %d to %d\n", __func__
, imf->im6f_st[0], imf->im6f_st[1]); } while (0)
1057 __func__, imf->im6f_st[0], imf->im6f_st[1]))do { if (mld_debug) printf ("%s: imf transition %d to %d\n", __func__
, imf->im6f_st[0], imf->im6f_st[1]); } while (0)
;
1058
1059 if (imf->im6f_st[0] == MCAST_EXCLUDE2) {
1060 MLD_PRINTF(("%s: --ex on inm at t1\n", __func__))do { if (mld_debug) printf ("%s: --ex on inm at t1\n", __func__
); } while (0)
;
1061 --inm->in6m_st[1].iss_ex;
1062 } else if (imf->im6f_st[0] == MCAST_INCLUDE1) {
1063 MLD_PRINTF(("%s: --in on inm at t1\n", __func__))do { if (mld_debug) printf ("%s: --in on inm at t1\n", __func__
); } while (0)
;
1064 --inm->in6m_st[1].iss_in;
1065 }
1066
1067 if (imf->im6f_st[1] == MCAST_EXCLUDE2) {
1068 MLD_PRINTF(("%s: ex++ on inm at t1\n", __func__))do { if (mld_debug) printf ("%s: ex++ on inm at t1\n", __func__
); } while (0)
;
1069 inm->in6m_st[1].iss_ex++;
1070 } else if (imf->im6f_st[1] == MCAST_INCLUDE1 && nsrc1 > 0) {
1071 MLD_PRINTF(("%s: in++ on inm at t1\n", __func__))do { if (mld_debug) printf ("%s: in++ on inm at t1\n", __func__
); } while (0)
;
1072 inm->in6m_st[1].iss_in++;
1073 }
1074 }
1075
1076 /*
1077 * Track inm filter state in terms of listener counts.
1078 * If there are any exclusive listeners, stack-wide
1079 * membership is exclusive.
1080 * Otherwise, if only inclusive listeners, stack-wide is inclusive.
1081 * If no listeners remain, state is undefined at t1,
1082 * and the MLD lifecycle for this group should finish.
1083 */
1084 if (inm->in6m_st[1].iss_ex > 0) {
1085 MLD_PRINTF(("%s: transition to EX\n", __func__))do { if (mld_debug) printf ("%s: transition to EX\n", __func__
); } while (0)
;
1086 inm->in6m_st[1].iss_fmode = MCAST_EXCLUDE2;
1087 } else if (inm->in6m_st[1].iss_in > 0) {
1088 MLD_PRINTF(("%s: transition to IN\n", __func__))do { if (mld_debug) printf ("%s: transition to IN\n", __func__
); } while (0)
;
1089 inm->in6m_st[1].iss_fmode = MCAST_INCLUDE1;
1090 } else {
1091 MLD_PRINTF(("%s: transition to UNDEF\n", __func__))do { if (mld_debug) printf ("%s: transition to UNDEF\n", __func__
); } while (0)
;
1092 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED0;
1093 }
1094
1095 /* Decrement ASM listener count on transition out of ASM mode. */
1096 if (imf->im6f_st[0] == MCAST_EXCLUDE2 && nsrc0 == 0) {
1097 if ((imf->im6f_st[1] != MCAST_EXCLUDE2) ||
1098 (imf->im6f_st[1] == MCAST_EXCLUDE2 && nsrc1 > 0)) {
1099 MLD_PRINTF(("%s: --asm on inm at t1\n", __func__))do { if (mld_debug) printf ("%s: --asm on inm at t1\n", __func__
); } while (0)
;
1100 --inm->in6m_st[1].iss_asm;
1101 }
1102 }
1103
1104 /* Increment ASM listener count on transition to ASM mode. */
1105 if (imf->im6f_st[1] == MCAST_EXCLUDE2 && nsrc1 == 0) {
1106 MLD_PRINTF(("%s: asm++ on inm at t1\n", __func__))do { if (mld_debug) printf ("%s: asm++ on inm at t1\n", __func__
); } while (0)
;
1107 inm->in6m_st[1].iss_asm++;
1108 }
1109
1110 MLD_PRINTF(("%s: merged imf 0x%llx to inm 0x%llx\n", __func__,do { if (mld_debug) printf ("%s: merged imf 0x%llx to inm 0x%llx\n"
, __func__, (uint64_t)(((vm_offset_t)(imf) == 0) ? (vm_offset_t
)(0) : (vm_offset_t)(imf) + vm_kernel_addrperm), (uint64_t)((
(vm_offset_t)(inm) == 0) ? (vm_offset_t)(0) : (vm_offset_t)(inm
) + vm_kernel_addrperm)); } while (0)
1111 (uint64_t)VM_KERNEL_ADDRPERM(imf),do { if (mld_debug) printf ("%s: merged imf 0x%llx to inm 0x%llx\n"
, __func__, (uint64_t)(((vm_offset_t)(imf) == 0) ? (vm_offset_t
)(0) : (vm_offset_t)(imf) + vm_kernel_addrperm), (uint64_t)((
(vm_offset_t)(inm) == 0) ? (vm_offset_t)(0) : (vm_offset_t)(inm
) + vm_kernel_addrperm)); } while (0)
1112 (uint64_t)VM_KERNEL_ADDRPERM(inm)))do { if (mld_debug) printf ("%s: merged imf 0x%llx to inm 0x%llx\n"
, __func__, (uint64_t)(((vm_offset_t)(imf) == 0) ? (vm_offset_t
)(0) : (vm_offset_t)(imf) + vm_kernel_addrperm), (uint64_t)((
(vm_offset_t)(inm) == 0) ? (vm_offset_t)(0) : (vm_offset_t)(inm
) + vm_kernel_addrperm)); } while (0)
;
1113 in6m_print(inm);
1114
1115out_reap:
1116 if (schanged > 0) {
1117 MLD_PRINTF(("%s: sources changed; reaping\n", __func__))do { if (mld_debug) printf ("%s: sources changed; reaping\n",
__func__); } while (0)
;
1118 in6m_reap(inm);
1119 }
1120 return (error);
1121}
1122
1123/*
1124 * Mark an in6_multi's filter set deltas as committed.
1125 * Called by MLD after a state change has been enqueued.
1126 */
1127void
1128in6m_commit(struct in6_multi *inm)
1129{
1130 struct ip6_msource *ims;
1131
1132 IN6M_LOCK_ASSERT_HELD(inm)lck_mtx_assert(&(inm)->in6m_lock, 1);
1133
1134 MLD_PRINTF(("%s: commit inm 0x%llx\n", __func__,do { if (mld_debug) printf ("%s: commit inm 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(inm) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(inm) + vm_kernel_addrperm)); } while (0)
1135 (uint64_t)VM_KERNEL_ADDRPERM(inm)))do { if (mld_debug) printf ("%s: commit inm 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(inm) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(inm) + vm_kernel_addrperm)); } while (0)
;
1136 MLD_PRINTF(("%s: pre commit:\n", __func__))do { if (mld_debug) printf ("%s: pre commit:\n", __func__); }
while (0)
;
1137 in6m_print(inm);
1138
1139 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs)for ((ims) = ip6_msource_tree_RB_MINMAX(&inm->in6m_srcs
, -1); (ims) != ((void *)0); (ims) = ip6_msource_tree_RB_NEXT
(ims))
{
1140 ims->im6s_st[0] = ims->im6s_st[1];
1141 }
1142 inm->in6m_st[0] = inm->in6m_st[1];
1143}
1144
1145/*
1146 * Reap unreferenced nodes from an in6_multi's filter set.
1147 */
1148static void
1149in6m_reap(struct in6_multi *inm)
1150{
1151 struct ip6_msource *ims, *tims;
1152
1153 IN6M_LOCK_ASSERT_HELD(inm)lck_mtx_assert(&(inm)->in6m_lock, 1);
1154
1155 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims)for ((ims) = ip6_msource_tree_RB_MINMAX(&inm->in6m_srcs
, -1); ((ims) != ((void *)0)) && ((tims) = ip6_msource_tree_RB_NEXT
(ims), (ims) != ((void *)0)); (ims) = (tims))
{
1156 if (ims->im6s_st[0].ex > 0 || ims->im6s_st[0].in > 0 ||
1157 ims->im6s_st[1].ex > 0 || ims->im6s_st[1].in > 0 ||
1158 ims->im6s_stp != 0)
1159 continue;
1160 MLD_PRINTF(("%s: free ims 0x%llx\n", __func__,do { if (mld_debug) printf ("%s: free ims 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(ims) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(ims) + vm_kernel_addrperm)); } while (0)
1161 (uint64_t)VM_KERNEL_ADDRPERM(ims)))do { if (mld_debug) printf ("%s: free ims 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(ims) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(ims) + vm_kernel_addrperm)); } while (0)
;
1162 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims)ip6_msource_tree_RB_REMOVE(&inm->in6m_srcs, ims);
1163 ip6ms_free(ims);
1164 inm->in6m_nsrc--;
1165 }
1166}
1167
1168/*
1169 * Purge all source nodes from an in6_multi's filter set.
1170 */
1171void
1172in6m_purge(struct in6_multi *inm)
1173{
1174 struct ip6_msource *ims, *tims;
1175
1176 IN6M_LOCK_ASSERT_HELD(inm)lck_mtx_assert(&(inm)->in6m_lock, 1);
1177
1178 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims)for ((ims) = ip6_msource_tree_RB_MINMAX(&inm->in6m_srcs
, -1); ((ims) != ((void *)0)) && ((tims) = ip6_msource_tree_RB_NEXT
(ims), (ims) != ((void *)0)); (ims) = (tims))
{
1179 MLD_PRINTF(("%s: free ims 0x%llx\n", __func__,do { if (mld_debug) printf ("%s: free ims 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(ims) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(ims) + vm_kernel_addrperm)); } while (0)
1180 (uint64_t)VM_KERNEL_ADDRPERM(ims)))do { if (mld_debug) printf ("%s: free ims 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(ims) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(ims) + vm_kernel_addrperm)); } while (0)
;
1181 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims)ip6_msource_tree_RB_REMOVE(&inm->in6m_srcs, ims);
1182 ip6ms_free(ims);
1183 inm->in6m_nsrc--;
1184 }
1185}
1186
1187/*
1188 * Join a multicast address w/o sources.
1189 * KAME compatibility entry point.
1190 *
1191 */
1192struct in6_multi_mship *
1193in6_joingroup(struct ifnet *ifp, struct in6_addr *mcaddr,
1194 int *errorp, int delay)
1195{
1196 struct in6_multi_mship *imm;
1197 int error;
1198
1199 *errorp = 0;
1200
1201 imm = in6_multi_mship_alloc(M_WAITOK0x0000);
1202 if (imm == NULL((void *)0)) {
1203 *errorp = ENOBUFS55;
1204 return (NULL((void *)0));
1205 }
1206
1207 error = in6_mc_join(ifp, mcaddr, NULL((void *)0), &imm->i6mm_maddr, delay);
1208 if (error) {
1209 *errorp = error;
1210 in6_multi_mship_free(imm);
1211 return (NULL((void *)0));
1212 }
1213
1214 return (imm);
1215}
1216
1217/*
1218 * Leave a multicast address w/o sources.
1219 * KAME compatibility entry point.
1220 */
1221int
1222in6_leavegroup(struct in6_multi_mship *imm)
1223{
1224 if (imm->i6mm_maddr != NULL((void *)0)) {
1225 in6_mc_leave(imm->i6mm_maddr, NULL((void *)0));
1226 IN6M_REMREF(imm->i6mm_maddr)in6m_remref(imm->i6mm_maddr, 0);
1227 imm->i6mm_maddr = NULL((void *)0);
1228 }
1229 in6_multi_mship_free(imm);
1230 return 0;
1231}
1232
1233/*
1234 * Join a multicast group; real entry point.
1235 *
1236 * Only preserves atomicity at inm level.
1237 * NOTE: imf argument cannot be const due to sys/tree.h limitations.
1238 *
1239 * If the MLD downcall fails, the group is not joined, and an error
1240 * code is returned.
1241 */
1242int
1243in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr,
1244 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm,
1245 const int delay)
1246{
1247 struct in6_mfilter timf;
1248 struct in6_multi *inm = NULL((void *)0);
1249 int error = 0;
1250 struct mld_tparams mtp;
1251
1252 /*
1253 * Sanity: Check scope zone ID was set for ifp, if and
1254 * only if group is scoped to an interface.
1255 */
1256 VERIFY(IN6_IS_ADDR_MULTICAST(mcaddr))((void)(__builtin_expect(!!((long)((((mcaddr)->__u6_addr.__u6_addr8
[0] == 0xff)))), 1L) || assfail("IN6_IS_ADDR_MULTICAST(mcaddr)"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 1256
)))
;
1257 if (IN6_IS_ADDR_MC_LINKLOCAL(mcaddr)(((mcaddr)->__u6_addr.__u6_addr8[0] == 0xff) && ((
(mcaddr)->__u6_addr.__u6_addr8[1] & 0x0f) == 0x02))
||
1258 IN6_IS_ADDR_MC_INTFACELOCAL(mcaddr)(((mcaddr)->__u6_addr.__u6_addr8[0] == 0xff) && ((
(mcaddr)->__u6_addr.__u6_addr8[1] & 0x0f) == 0x01))
) {
1259 VERIFY(mcaddr->s6_addr16[1] != 0)((void)(__builtin_expect(!!((long)((mcaddr->__u6_addr.__u6_addr16
[1] != 0))), 1L) || assfail("mcaddr->s6_addr16[1] != 0", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 1259)))
;
1260 }
1261
1262 MLD_PRINTF(("%s: join %s on 0x%llx(%s))\n", __func__,do { if (mld_debug) printf ("%s: join %s on 0x%llx(%s))\n", __func__
, ip6_sprintf(mcaddr), (uint64_t)(((vm_offset_t)(ifp) == 0) ?
(vm_offset_t)(0) : (vm_offset_t)(ifp) + vm_kernel_addrperm),
ifp->if_xname); } while (0)
1263 ip6_sprintf(mcaddr), (uint64_t)VM_KERNEL_ADDRPERM(ifp),do { if (mld_debug) printf ("%s: join %s on 0x%llx(%s))\n", __func__
, ip6_sprintf(mcaddr), (uint64_t)(((vm_offset_t)(ifp) == 0) ?
(vm_offset_t)(0) : (vm_offset_t)(ifp) + vm_kernel_addrperm),
ifp->if_xname); } while (0)
1264 if_name(ifp)))do { if (mld_debug) printf ("%s: join %s on 0x%llx(%s))\n", __func__
, ip6_sprintf(mcaddr), (uint64_t)(((vm_offset_t)(ifp) == 0) ?
(vm_offset_t)(0) : (vm_offset_t)(ifp) + vm_kernel_addrperm),
ifp->if_xname); } while (0)
;
1265
1266 bzero(&mtp, sizeof (mtp));
1267 *pinm = NULL((void *)0);
1268
1269 /*
1270 * If no imf was specified (i.e. kernel consumer),
1271 * fake one up and assume it is an ASM join.
1272 */
1273 if (imf == NULL((void *)0)) {
1274 im6f_init(&timf, MCAST_UNDEFINED0, MCAST_EXCLUDE2);
1275 imf = &timf;
1276 }
1277
1278 error = in6_mc_get(ifp, mcaddr, &inm);
1279 if (error) {
1280 MLD_PRINTF(("%s: in6_mc_get() failure\n", __func__))do { if (mld_debug) printf ("%s: in6_mc_get() failure\n", __func__
); } while (0)
;
1281 return (error);
1282 }
1283
1284 MLD_PRINTF(("%s: merge inm state\n", __func__))do { if (mld_debug) printf ("%s: merge inm state\n", __func__
); } while (0)
;
1285
1286 IN6M_LOCK(inm)lck_mtx_lock(&(inm)->in6m_lock);
1287 error = in6m_merge(inm, imf);
1288 if (error) {
1289 MLD_PRINTF(("%s: failed to merge inm state\n", __func__))do { if (mld_debug) printf ("%s: failed to merge inm state\n"
, __func__); } while (0)
;
1290 goto out_in6m_release;
1291 }
1292
1293 MLD_PRINTF(("%s: doing mld downcall\n", __func__))do { if (mld_debug) printf ("%s: doing mld downcall\n", __func__
); } while (0)
;
1294 error = mld_change_state(inm, &mtp, delay);
1295 if (error) {
1296 MLD_PRINTF(("%s: failed to update source\n", __func__))do { if (mld_debug) printf ("%s: failed to update source\n", __func__
); } while (0)
;
1297 im6f_rollback(imf);
1298 goto out_in6m_release;
1299 }
1300
1301out_in6m_release:
1302 if (error) {
1303 MLD_PRINTF(("%s: dropping ref on 0x%llx\n", __func__,do { if (mld_debug) printf ("%s: dropping ref on 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(inm) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(inm) + vm_kernel_addrperm)); } while (0)
1304 (uint64_t)VM_KERNEL_ADDRPERM(inm)))do { if (mld_debug) printf ("%s: dropping ref on 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(inm) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(inm) + vm_kernel_addrperm)); } while (0)
;
1305 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
1306 IN6M_REMREF(inm)in6m_remref(inm, 0);
1307 } else {
1308 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
1309 *pinm = inm; /* keep refcount from in6_mc_get() */
1310 }
1311
1312 /* schedule timer now that we've dropped the lock(s) */
1313 mld_set_timeout(&mtp);
1314
1315 return (error);
1316}
1317
1318/*
1319 * Leave a multicast group; real entry point.
1320 * All source filters will be expunged.
1321 *
1322 * Only preserves atomicity at inm level.
1323 *
1324 * Holding the write lock for the INP which contains imf
1325 * is highly advisable. We can't assert for it as imf does not
1326 * contain a back-pointer to the owning inp.
1327 *
1328 * Note: This is not the same as in6m_release(*) as this function also
1329 * makes a state change downcall into MLD.
1330 */
1331int
1332in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
1333{
1334 struct in6_mfilter timf;
1335 int error, lastref;
1336 struct mld_tparams mtp;
1337
1338 bzero(&mtp, sizeof (mtp));
1339 error = 0;
1340
1341 IN6M_LOCK_ASSERT_NOTHELD(inm)lck_mtx_assert(&(inm)->in6m_lock, 2);
1342
1343 in6_multihead_lock_exclusive();
1344 IN6M_LOCK(inm)lck_mtx_lock(&(inm)->in6m_lock);
1345
1346 MLD_PRINTF(("%s: leave inm 0x%llx, %s/%s%d, imf 0x%llx\n", __func__,do { if (mld_debug) printf ("%s: leave inm 0x%llx, %s/%s%d, imf 0x%llx\n"
, __func__, (uint64_t)(((vm_offset_t)(inm) == 0) ? (vm_offset_t
)(0) : (vm_offset_t)(inm) + vm_kernel_addrperm), ip6_sprintf(
&inm->in6m_addr), (in6m_is_ifp_detached(inm) ? "null" :
inm->in6m_ifp->if_name), inm->in6m_ifp->if_unit,
(uint64_t)(((vm_offset_t)(imf) == 0) ? (vm_offset_t)(0) : (vm_offset_t
)(imf) + vm_kernel_addrperm)); } while (0)
1347 (uint64_t)VM_KERNEL_ADDRPERM(inm), ip6_sprintf(&inm->in6m_addr),do { if (mld_debug) printf ("%s: leave inm 0x%llx, %s/%s%d, imf 0x%llx\n"
, __func__, (uint64_t)(((vm_offset_t)(inm) == 0) ? (vm_offset_t
)(0) : (vm_offset_t)(inm) + vm_kernel_addrperm), ip6_sprintf(
&inm->in6m_addr), (in6m_is_ifp_detached(inm) ? "null" :
inm->in6m_ifp->if_name), inm->in6m_ifp->if_unit,
(uint64_t)(((vm_offset_t)(imf) == 0) ? (vm_offset_t)(0) : (vm_offset_t
)(imf) + vm_kernel_addrperm)); } while (0)
1348 (in6m_is_ifp_detached(inm) ? "null" : inm->in6m_ifp->if_name),do { if (mld_debug) printf ("%s: leave inm 0x%llx, %s/%s%d, imf 0x%llx\n"
, __func__, (uint64_t)(((vm_offset_t)(inm) == 0) ? (vm_offset_t
)(0) : (vm_offset_t)(inm) + vm_kernel_addrperm), ip6_sprintf(
&inm->in6m_addr), (in6m_is_ifp_detached(inm) ? "null" :
inm->in6m_ifp->if_name), inm->in6m_ifp->if_unit,
(uint64_t)(((vm_offset_t)(imf) == 0) ? (vm_offset_t)(0) : (vm_offset_t
)(imf) + vm_kernel_addrperm)); } while (0)
1349 inm->in6m_ifp->if_unit, (uint64_t)VM_KERNEL_ADDRPERM(imf)))do { if (mld_debug) printf ("%s: leave inm 0x%llx, %s/%s%d, imf 0x%llx\n"
, __func__, (uint64_t)(((vm_offset_t)(inm) == 0) ? (vm_offset_t
)(0) : (vm_offset_t)(inm) + vm_kernel_addrperm), ip6_sprintf(
&inm->in6m_addr), (in6m_is_ifp_detached(inm) ? "null" :
inm->in6m_ifp->if_name), inm->in6m_ifp->if_unit,
(uint64_t)(((vm_offset_t)(imf) == 0) ? (vm_offset_t)(0) : (vm_offset_t
)(imf) + vm_kernel_addrperm)); } while (0)
;
1350
1351 /*
1352 * If no imf was specified (i.e. kernel consumer),
1353 * fake one up and assume it is an ASM join.
1354 */
1355 if (imf == NULL((void *)0)) {
1356 im6f_init(&timf, MCAST_EXCLUDE2, MCAST_UNDEFINED0);
1357 imf = &timf;
1358 }
1359
1360 /*
1361 * Begin state merge transaction at MLD layer.
1362 *
1363 * As this particular invocation should not cause any memory
1364 * to be allocated, and there is no opportunity to roll back
1365 * the transaction, it MUST NOT fail.
1366 */
1367 MLD_PRINTF(("%s: merge inm state\n", __func__))do { if (mld_debug) printf ("%s: merge inm state\n", __func__
); } while (0)
;
1368
1369 error = in6m_merge(inm, imf);
1370 KASSERT(error == 0, ("%s: failed to merge inm state\n", __func__));
1371
1372 MLD_PRINTF(("%s: doing mld downcall\n", __func__))do { if (mld_debug) printf ("%s: doing mld downcall\n", __func__
); } while (0)
;
1373 error = mld_change_state(inm, &mtp, 0);
1374#if MLD_DEBUG1
1375 if (error)
1376 MLD_PRINTF(("%s: failed mld downcall\n", __func__))do { if (mld_debug) printf ("%s: failed mld downcall\n", __func__
); } while (0)
;
1377#endif
1378 lastref = in6_multi_detach(inm);
1379 VERIFY(!lastref || (!(inm->in6m_debug & IFD_ATTACHED) &&((void)(__builtin_expect(!!((long)((!lastref || (!(inm->in6m_debug
& 0x1) && inm->in6m_reqcnt == 0)))), 1L) || assfail
("!lastref || (!(inm->in6m_debug & IFD_ATTACHED) && inm->in6m_reqcnt == 0)"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 1380
)))
1380 inm->in6m_reqcnt == 0))((void)(__builtin_expect(!!((long)((!lastref || (!(inm->in6m_debug
& 0x1) && inm->in6m_reqcnt == 0)))), 1L) || assfail
("!lastref || (!(inm->in6m_debug & IFD_ATTACHED) && inm->in6m_reqcnt == 0)"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 1380
)))
;
1381 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
1382 in6_multihead_lock_done();
1383
1384 if (lastref)
1385 IN6M_REMREF(inm)in6m_remref(inm, 0); /* for in6_multihead list */
1386
1387 /* schedule timer now that we've dropped the lock(s) */
1388 mld_set_timeout(&mtp);
1389
1390 return (error);
1391}
1392
1393/*
1394 * Block or unblock an ASM multicast source on an inpcb.
1395 * This implements the delta-based API described in RFC 3678.
1396 *
1397 * The delta-based API applies only to exclusive-mode memberships.
1398 * An MLD downcall will be performed.
1399 *
1400 * Return 0 if successful, otherwise return an appropriate error code.
1401 */
1402static int
1403in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
1404{
1405 struct group_source_req gsr;
1406 sockunion_t *gsa, *ssa;
1407 struct ifnet *ifp;
1408 struct in6_mfilter *imf;
1409 struct ip6_moptions *imo;
1410 struct in6_msource *ims;
1411 struct in6_multi *inm;
1412 size_t idx;
1413 uint16_t fmode;
1414 int error, doblock;
1415 struct mld_tparams mtp;
1416
1417 bzero(&mtp, sizeof (mtp));
1418 ifp = NULL((void *)0);
1419 error = 0;
1420 doblock = 0;
1421
1422 memset(&gsr, 0, sizeof(struct group_source_req));
1423 gsa = (sockunion_t *)&gsr.gsr_group;
1424 ssa = (sockunion_t *)&gsr.gsr_source;
1425
1426 switch (sopt->sopt_name) {
1427 case MCAST_BLOCK_SOURCE84:
1428 case MCAST_UNBLOCK_SOURCE85:
1429 error = sooptcopyin(sopt, &gsr,
1430 sizeof(struct group_source_req),
1431 sizeof(struct group_source_req));
1432 if (error)
1433 return (error);
1434
1435 if (gsa->sin6.sin6_family != AF_INET630 ||
1436 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6))
1437 return (EINVAL22);
1438
1439 if (ssa->sin6.sin6_family != AF_INET630 ||
1440 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6))
1441 return (EINVAL22);
1442
1443 ifnet_head_lock_shared();
1444 if (gsr.gsr_interface == 0 ||
1445 (u_int)if_index < gsr.gsr_interface) {
1446 ifnet_head_done();
1447 return (EADDRNOTAVAIL49);
1448 }
1449
1450 ifp = ifindex2ifnet[gsr.gsr_interface];
1451 ifnet_head_done();
1452
1453 if (ifp == NULL((void *)0))
1454 return (EADDRNOTAVAIL49);
1455
1456 if (sopt->sopt_name == MCAST_BLOCK_SOURCE84)
1457 doblock = 1;
1458 break;
1459
1460 default:
1461 MLD_PRINTF(("%s: unknown sopt_name %d\n",do { if (mld_debug) printf ("%s: unknown sopt_name %d\n", __func__
, sopt->sopt_name); } while (0)
1462 __func__, sopt->sopt_name))do { if (mld_debug) printf ("%s: unknown sopt_name %d\n", __func__
, sopt->sopt_name); } while (0)
;
1463 return (EOPNOTSUPP102);
1464 }
1465
1466 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)((&gsa->sin6.sin6_addr)->__u6_addr.__u6_addr8[0] ==
0xff)
)
1467 return (EINVAL22);
1468
1469 (void) in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL((void *)0));
1470
1471 /*
1472 * Check if we are actually a member of this group.
1473 */
1474 imo = in6p_findmoptions(inp);
1475 if (imo == NULL((void *)0))
1476 return (ENOMEM12);
1477
1478 IM6O_LOCK(imo)lck_mtx_lock(&(imo)->im6o_lock);
1479 idx = im6o_match_group(imo, ifp, &gsa->sa);
1480 if (idx == (size_t)-1 || imo->im6o_mfilters == NULL((void *)0)) {
1481 error = EADDRNOTAVAIL49;
1482 goto out_imo_locked;
1483 }
1484
1485 VERIFY(imo->im6o_mfilters != NULL)((void)(__builtin_expect(!!((long)((imo->im6o_mfilters != (
(void *)0)))), 1L) || assfail("imo->im6o_mfilters != NULL"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 1485
)))
;
1486 imf = &imo->im6o_mfilters[idx];
1487 inm = imo->im6o_membership[idx];
1488
1489 /*
1490 * Attempting to use the delta-based API on an
1491 * non exclusive-mode membership is an error.
1492 */
1493 fmode = imf->im6f_st[0];
1494 if (fmode != MCAST_EXCLUDE2) {
1495 error = EINVAL22;
1496 goto out_imo_locked;
1497 }
1498
1499 /*
1500 * Deal with error cases up-front:
1501 * Asked to block, but already blocked; or
1502 * Asked to unblock, but nothing to unblock.
1503 * If adding a new block entry, allocate it.
1504 */
1505 ims = im6o_match_source(imo, idx, &ssa->sa);
1506 if ((ims != NULL((void *)0) && doblock) || (ims == NULL((void *)0) && !doblock)) {
1507 MLD_PRINTF(("%s: source %s %spresent\n", __func__,do { if (mld_debug) printf ("%s: source %s %spresent\n", __func__
, ip6_sprintf(&ssa->sin6.sin6_addr), doblock ? "" : "not "
); } while (0)
1508 ip6_sprintf(&ssa->sin6.sin6_addr),do { if (mld_debug) printf ("%s: source %s %spresent\n", __func__
, ip6_sprintf(&ssa->sin6.sin6_addr), doblock ? "" : "not "
); } while (0)
1509 doblock ? "" : "not "))do { if (mld_debug) printf ("%s: source %s %spresent\n", __func__
, ip6_sprintf(&ssa->sin6.sin6_addr), doblock ? "" : "not "
); } while (0)
;
1510 error = EADDRNOTAVAIL49;
1511 goto out_imo_locked;
1512 }
1513
1514 /*
1515 * Begin state merge transaction at socket layer.
1516 */
1517 if (doblock) {
1518 MLD_PRINTF(("%s: %s source\n", __func__, "block"))do { if (mld_debug) printf ("%s: %s source\n", __func__, "block"
); } while (0)
;
1519 ims = im6f_graft(imf, fmode, &ssa->sin6);
1520 if (ims == NULL((void *)0))
1521 error = ENOMEM12;
1522 } else {
1523 MLD_PRINTF(("%s: %s source\n", __func__, "allow"))do { if (mld_debug) printf ("%s: %s source\n", __func__, "allow"
); } while (0)
;
1524 error = im6f_prune(imf, &ssa->sin6);
1525 }
1526
1527 if (error) {
1528 MLD_PRINTF(("%s: merge imf state failed\n", __func__))do { if (mld_debug) printf ("%s: merge imf state failed\n", __func__
); } while (0)
;
1529 goto out_im6f_rollback;
1530 }
1531
1532 /*
1533 * Begin state merge transaction at MLD layer.
1534 */
1535 IN6M_LOCK(inm)lck_mtx_lock(&(inm)->in6m_lock);
1536 MLD_PRINTF(("%s: merge inm state\n", __func__))do { if (mld_debug) printf ("%s: merge inm state\n", __func__
); } while (0)
;
1537 error = in6m_merge(inm, imf);
1538 if (error) {
1539 MLD_PRINTF(("%s: failed to merge inm state\n", __func__))do { if (mld_debug) printf ("%s: failed to merge inm state\n"
, __func__); } while (0)
;
1540 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
1541 goto out_im6f_rollback;
1542 }
1543
1544 MLD_PRINTF(("%s: doing mld downcall\n", __func__))do { if (mld_debug) printf ("%s: doing mld downcall\n", __func__
); } while (0)
;
1545 error = mld_change_state(inm, &mtp, 0);
1546 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
1547#if MLD_DEBUG1
1548 if (error)
1549 MLD_PRINTF(("%s: failed mld downcall\n", __func__))do { if (mld_debug) printf ("%s: failed mld downcall\n", __func__
); } while (0)
;
1550#endif
1551
1552out_im6f_rollback:
1553 if (error)
1554 im6f_rollback(imf);
1555 else
1556 im6f_commit(imf);
1557
1558 im6f_reap(imf);
1559
1560out_imo_locked:
1561 IM6O_UNLOCK(imo)lck_mtx_unlock(&(imo)->im6o_lock);
1562 IM6O_REMREF(imo)im6o_remref(imo); /* from in6p_findmoptions() */
1563
1564 /* schedule timer now that we've dropped the lock(s) */
1565 mld_set_timeout(&mtp);
1566
1567 return (error);
1568}
1569
1570/*
1571 * Given an inpcb, return its multicast options structure pointer. Accepts
1572 * an unlocked inpcb pointer, but will return it locked. May sleep.
1573 *
1574 */
1575static struct ip6_moptions *
1576in6p_findmoptions(struct inpcb *inp)
1577{
1578 struct ip6_moptions *imo;
1579 struct in6_multi **immp;
1580 struct in6_mfilter *imfp;
1581 size_t idx;
1582
1583 if ((imo = inp->in6p_moptionsinp_depend6.inp6_moptions) != NULL((void *)0)) {
1584 IM6O_ADDREF(imo)im6o_addref(imo, 0); /* for caller */
1585 return (imo);
1586 }
1587
1588 imo = ip6_allocmoptions(M_WAITOK0x0000);
1589 if (imo == NULL((void *)0))
1590 return (NULL((void *)0));
1591
1592 immp = _MALLOC(sizeof (*immp) * IPV6_MIN_MEMBERSHIPS, M_IP6MOPTS,({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __MALLOC(sizeof (*immp) * 31, 113, 0x0000 | 0x0004, &
site); })
1593 M_WAITOK | M_ZERO)({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __MALLOC(sizeof (*immp) * 31, 113, 0x0000 | 0x0004, &
site); })
;
1594 if (immp == NULL((void *)0)) {
1595 IM6O_REMREF(imo)im6o_remref(imo);
1596 return (NULL((void *)0));
1597 }
1598
1599 imfp = _MALLOC(sizeof (struct in6_mfilter) * IPV6_MIN_MEMBERSHIPS,({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __MALLOC(sizeof (struct in6_mfilter) * 31, 112, 0x0000 |
0x0004, &site); })
1600 M_IN6MFILTER, M_WAITOK | M_ZERO)({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __MALLOC(sizeof (struct in6_mfilter) * 31, 112, 0x0000 |
0x0004, &site); })
;
1601 if (imfp == NULL((void *)0)) {
1602 _FREE(immp, M_IP6MOPTS113);
1603 IM6O_REMREF(imo)im6o_remref(imo);
1604 return (NULL((void *)0));
1605 }
1606
1607 imo->im6o_multicast_ifp = NULL((void *)0);
1608 imo->im6o_multicast_hlim = ip6_defmcasthlim;
1609 imo->im6o_multicast_loop = in6_mcast_loop;
1610 imo->im6o_num_memberships = 0;
1611 imo->im6o_max_memberships = IPV6_MIN_MEMBERSHIPS31;
1612 imo->im6o_membership = immp;
1613
1614 /* Initialize per-group source filters. */
1615 for (idx = 0; idx < IPV6_MIN_MEMBERSHIPS31; idx++)
1616 im6f_init(&imfp[idx], MCAST_UNDEFINED0, MCAST_EXCLUDE2);
1617
1618 imo->im6o_mfilters = imfp;
1619 inp->in6p_moptionsinp_depend6.inp6_moptions = imo; /* keep reference from ip6_allocmoptions() */
1620 IM6O_ADDREF(imo)im6o_addref(imo, 0); /* for caller */
1621
1622 return (imo);
1623}
1624
1625/*
1626 * Atomically get source filters on a socket for an IPv6 multicast group.
1627 * Called with INP lock held; returns with lock released.
1628 */
1629static int
1630in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
1631{
1632 struct __msfilterreq64 msfr, msfr64;
1633 struct __msfilterreq32 msfr32;
1634 sockunion_t *gsa;
1635 struct ifnet *ifp;
1636 struct ip6_moptions *imo;
1637 struct in6_mfilter *imf;
1638 struct ip6_msource *ims;
1639 struct in6_msource *lims;
1640 struct sockaddr_in6 *psin;
1641 struct sockaddr_storage *ptss;
1642 struct sockaddr_storage *tss;
1643 int error;
1644 size_t idx, nsrcs, ncsrcs;
1645 user_addr_t tmp_ptr;
1646
1647 imo = inp->in6p_moptionsinp_depend6.inp6_moptions;
1648 VERIFY(imo != NULL)((void)(__builtin_expect(!!((long)((imo != ((void *)0)))), 1L
) || assfail("imo != NULL", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 1648)))
;
1649
1650 if (IS_64BIT_PROCESS(current_proc())) {
1
Taking true branch
1651 error = sooptcopyin(sopt, &msfr64,
1652 sizeof(struct __msfilterreq64),
1653 sizeof(struct __msfilterreq64));
1654 if (error)
2
Assuming 'error' is 0
3
Taking false branch
1655 return (error);
1656 /* we never use msfr.msfr_srcs; */
1657 memcpy(&msfr, &msfr64, sizeof(msfr64));
1658 } else {
1659 error = sooptcopyin(sopt, &msfr32,
1660 sizeof(struct __msfilterreq32),
1661 sizeof(struct __msfilterreq32));
1662 if (error)
1663 return (error);
1664 /* we never use msfr.msfr_srcs; */
1665 memcpy(&msfr, &msfr32, sizeof(msfr32));
1666 }
1667
1668 if (msfr.msfr_group.ss_family != AF_INET630 ||
4
Taking false branch
1669 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6))
1670 return (EINVAL22);
1671
1672 gsa = (sockunion_t *)&msfr.msfr_group;
1673 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)((&gsa->sin6.sin6_addr)->__u6_addr.__u6_addr8[0] ==
0xff)
)
5
Taking false branch
1674 return (EINVAL22);
1675
1676 ifnet_head_lock_shared();
1677 if (msfr.msfr_ifindex == 0 || (u_int)if_index < msfr.msfr_ifindex) {
6
Taking false branch
1678 ifnet_head_done();
1679 return (EADDRNOTAVAIL49);
1680 }
1681 ifp = ifindex2ifnet[msfr.msfr_ifindex];
1682 ifnet_head_done();
1683
1684 if (ifp == NULL((void *)0))
7
Assuming 'ifp' is not equal to null
8
Taking false branch
1685 return (EADDRNOTAVAIL49);
1686
1687 if ((size_t) msfr.msfr_nsrcs >
9
Taking false branch
1688 UINT32_MAX4294967295U / sizeof(struct sockaddr_storage))
1689 msfr.msfr_nsrcs = UINT32_MAX4294967295U / sizeof(struct sockaddr_storage);
1690
1691 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc)
10
Taking false branch
1692 msfr.msfr_nsrcs = in6_mcast_maxsocksrc;
1693
1694 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL((void *)0));
1695
1696 IM6O_LOCK(imo)lck_mtx_lock(&(imo)->im6o_lock);
1697 /*
1698 * Lookup group on the socket.
1699 */
1700 idx = im6o_match_group(imo, ifp, &gsa->sa);
1701 if (idx == (size_t)-1 || imo->im6o_mfilters == NULL((void *)0)) {
11
Taking false branch
1702 IM6O_UNLOCK(imo)lck_mtx_unlock(&(imo)->im6o_lock);
1703 return (EADDRNOTAVAIL49);
1704 }
1705 imf = &imo->im6o_mfilters[idx];
1706
1707 /*
1708 * Ignore memberships which are in limbo.
1709 */
1710 if (imf->im6f_st[1] == MCAST_UNDEFINED0) {
12
Taking false branch
1711 IM6O_UNLOCK(imo)lck_mtx_unlock(&(imo)->im6o_lock);
1712 return (EAGAIN35);
1713 }
1714 msfr.msfr_fmode = imf->im6f_st[1];
1715
1716 /*
1717 * If the user specified a buffer, copy out the source filter
1718 * entries to userland gracefully.
1719 * We only copy out the number of entries which userland
1720 * has asked for, but we always tell userland how big the
1721 * buffer really needs to be.
1722 */
1723 tss = NULL((void *)0);
1724
1725 if (IS_64BIT_PROCESS(current_proc()))
13
Taking false branch
1726 tmp_ptr = msfr64.msfr_srcs;
1727 else
1728 tmp_ptr = CAST_USER_ADDR_T(msfr32.msfr_srcs)((user_addr_t)((uintptr_t)(msfr32.msfr_srcs)));
1729
1730 if (tmp_ptr != USER_ADDR_NULL((user_addr_t) 0) && msfr.msfr_nsrcs > 0) {
1731 tss = _MALLOC((size_t) msfr.msfr_nsrcs * sizeof(*tss),({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __MALLOC((size_t) msfr.msfr_nsrcs * sizeof(*tss), 80, 0x0000
| 0x0004, &site); })
1732 M_TEMP, M_WAITOK | M_ZERO)({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __MALLOC((size_t) msfr.msfr_nsrcs * sizeof(*tss), 80, 0x0000
| 0x0004, &site); })
;
1733 if (tss == NULL((void *)0)) {
1734 IM6O_UNLOCK(imo)lck_mtx_unlock(&(imo)->im6o_lock);
1735 return (ENOBUFS55);
1736 }
1737 }
1738
1739 /*
1740 * Count number of sources in-mode at t0.
1741 * If buffer space exists and remains, copy out source entries.
1742 */
1743 nsrcs = msfr.msfr_nsrcs;
1744 ncsrcs = 0;
1745 ptss = tss;
1746 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources)for ((ims) = ip6_msource_tree_RB_MINMAX(&imf->im6f_sources
, -1); (ims) != ((void *)0); (ims) = ip6_msource_tree_RB_NEXT
(ims))
{
1747 lims = (struct in6_msource *)ims;
1748 if (lims->im6sl_st[0] == MCAST_UNDEFINED0 ||
1749 lims->im6sl_st[0] != imf->im6f_st[0])
1750 continue;
1751 if (tss != NULL((void *)0) && nsrcs > 0) {
1752 psin = (struct sockaddr_in6 *)ptss;
1753 psin->sin6_family = AF_INET630;
1754 psin->sin6_len = sizeof(struct sockaddr_in6);
1755 psin->sin6_addr = lims->im6s_addr;
1756 psin->sin6_port = 0;
1757 --nsrcs;
1758 ++ptss;
1759 ++ncsrcs;
1760 }
1761 }
1762
1763 IM6O_UNLOCK(imo)lck_mtx_unlock(&(imo)->im6o_lock);
1764
1765 if (tss != NULL((void *)0)) {
14
Taking false branch
1766 error = copyout(tss, tmp_ptr, ncsrcs * sizeof(*tss));
1767 FREE(tss, M_TEMP)_FREE((void *)tss, 80);
1768 if (error)
1769 return (error);
1770 }
1771
1772 msfr.msfr_nsrcs = ncsrcs;
1773 if (IS_64BIT_PROCESS(current_proc())) {
15
Taking false branch
1774 msfr64.msfr_ifindex = msfr.msfr_ifindex;
1775 msfr64.msfr_fmode = msfr.msfr_fmode;
1776 msfr64.msfr_nsrcs = msfr.msfr_nsrcs;
1777 memcpy(&msfr64.msfr_group, &msfr.msfr_group,
1778 sizeof(struct sockaddr_storage));
1779 error = sooptcopyout(sopt, &msfr64,
1780 sizeof(struct __msfilterreq64));
1781 } else {
1782 msfr32.msfr_ifindex = msfr.msfr_ifindex;
1783 msfr32.msfr_fmode = msfr.msfr_fmode;
1784 msfr32.msfr_nsrcs = msfr.msfr_nsrcs;
1785 memcpy(&msfr32.msfr_group, &msfr.msfr_group,
1786 sizeof(struct sockaddr_storage));
1787 error = sooptcopyout(sopt, &msfr32,
16
Copies out a struct with untouched element(s): __msfr_align, msfr_srcs
1788 sizeof(struct __msfilterreq32));
1789 }
1790
1791 return (error);
1792}
1793
1794/*
1795 * Return the IP multicast options in response to user getsockopt().
1796 */
1797int
1798ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt)
1799{
1800 struct ip6_moptions *im6o;
1801 int error;
1802 u_int optval;
1803
1804 im6o = inp->in6p_moptionsinp_depend6.inp6_moptions;
1805 /*
1806 * If socket is neither of type SOCK_RAW or SOCK_DGRAM,
1807 * or is a divert socket, reject it.
1808 */
1809 if (SOCK_PROTO(inp->inp_socket)((inp->inp_socket)->so_proto->pr_protocol) == IPPROTO_DIVERT254 ||
1810 (SOCK_TYPE(inp->inp_socket)((inp->inp_socket)->so_proto->pr_type) != SOCK_RAW3 &&
1811 SOCK_TYPE(inp->inp_socket)((inp->inp_socket)->so_proto->pr_type) != SOCK_DGRAM2)) {
1812 return (EOPNOTSUPP102);
1813 }
1814
1815 error = 0;
1816 switch (sopt->sopt_name) {
1817 case IPV6_MULTICAST_IF9:
1818 if (im6o != NULL((void *)0))
1819 IM6O_LOCK(im6o)lck_mtx_lock(&(im6o)->im6o_lock);
1820 if (im6o == NULL((void *)0) || im6o->im6o_multicast_ifp == NULL((void *)0)) {
1821 optval = 0;
1822 } else {
1823 optval = im6o->im6o_multicast_ifp->if_index;
1824 }
1825 if (im6o != NULL((void *)0))
1826 IM6O_UNLOCK(im6o)lck_mtx_unlock(&(im6o)->im6o_lock);
1827 error = sooptcopyout(sopt, &optval, sizeof(u_int));
1828 break;
1829
1830 case IPV6_MULTICAST_HOPS10:
1831 if (im6o == NULL((void *)0)) {
1832 optval = ip6_defmcasthlim;
1833 } else {
1834 IM6O_LOCK(im6o)lck_mtx_lock(&(im6o)->im6o_lock);
1835 optval = im6o->im6o_multicast_hlim;
1836 IM6O_UNLOCK(im6o)lck_mtx_unlock(&(im6o)->im6o_lock);
1837 }
1838 error = sooptcopyout(sopt, &optval, sizeof(u_int));
1839 break;
1840
1841 case IPV6_MULTICAST_LOOP11:
1842 if (im6o == NULL((void *)0)) {
1843 optval = in6_mcast_loop; /* XXX VIMAGE */
1844 } else {
1845 IM6O_LOCK(im6o)lck_mtx_lock(&(im6o)->im6o_lock);
1846 optval = im6o->im6o_multicast_loop;
1847 IM6O_UNLOCK(im6o)lck_mtx_unlock(&(im6o)->im6o_lock);
1848 }
1849 error = sooptcopyout(sopt, &optval, sizeof(u_int));
1850 break;
1851
1852 case IPV6_MSFILTER74:
1853 if (im6o == NULL((void *)0)) {
1854 error = EADDRNOTAVAIL49;
1855 } else {
1856 error = in6p_get_source_filters(inp, sopt);
1857 }
1858 break;
1859
1860 default:
1861 error = ENOPROTOOPT42;
1862 break;
1863 }
1864
1865 return (error);
1866}
1867
1868/*
1869 * Look up the ifnet to use for a multicast group membership,
1870 * given the address of an IPv6 group.
1871 *
1872 * This routine exists to support legacy IPv6 multicast applications.
1873 *
1874 * If inp is non-NULL and is bound to an interface, use this socket's
1875 * inp_boundif for any required routing table lookup.
1876 *
1877 * If the route lookup fails, return NULL.
1878 *
1879 * FUTURE: Support multiple forwarding tables for IPv6.
1880 *
1881 * Returns NULL if no ifp could be found.
1882 */
1883static struct ifnet *
1884in6p_lookup_mcast_ifp(const struct inpcb *in6p,
1885 const struct sockaddr_in6 *gsin6)
1886{
1887 struct route_in6 ro6;
1888 struct ifnet *ifp;
1889 unsigned int ifscope = IFSCOPE_NONE0;
1890
1891 VERIFY(in6p == NULL || (in6p->inp_vflag & INP_IPV6))((void)(__builtin_expect(!!((long)((in6p == ((void *)0) || (in6p
->inp_vflag & 0x2)))), 1L) || assfail("in6p == NULL || (in6p->inp_vflag & INP_IPV6)"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 1891
)))
;
1892 VERIFY(gsin6->sin6_family == AF_INET6)((void)(__builtin_expect(!!((long)((gsin6->sin6_family == 30
))), 1L) || assfail("gsin6->sin6_family == AF_INET6", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 1892)))
;
1893 if (IN6_IS_ADDR_MULTICAST(&gsin6->sin6_addr)((&gsin6->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
== 0)
1894 return NULL((void *)0);
1895
1896 if (in6p != NULL((void *)0) && (in6p->inp_flags & INP_BOUND_IF0x00004000))
1897 ifscope = in6p->inp_boundifp->if_index;
1898
1899 ifp = NULL((void *)0);
1900 memset(&ro6, 0, sizeof(struct route_in6));
1901 memcpy(&ro6.ro_dst, gsin6, sizeof(struct sockaddr_in6));
1902 rtalloc_scoped_ign((struct route *)&ro6, 0, ifscope);
1903 if (ro6.ro_rt != NULL((void *)0)) {
1904 ifp = ro6.ro_rt->rt_ifp;
1905 VERIFY(ifp != NULL)((void)(__builtin_expect(!!((long)((ifp != ((void *)0)))), 1L
) || assfail("ifp != NULL", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 1905)))
;
1906 }
1907 ROUTE_RELEASE(&ro6)do { if ((&ro6)->ro_rt != ((void *)0)) { lck_mtx_assert
(&((&ro6)->ro_rt)->rt_lock, 2); if (0) rtfree_locked
((&ro6)->ro_rt); else rtfree((&ro6)->ro_rt); (&
ro6)->ro_rt = ((void *)0); } if ((&ro6)->ro_srcia !=
((void *)0)) { do { (void) ifa_remref((&ro6)->ro_srcia
, 0); } while (0); (&ro6)->ro_srcia = ((void *)0); (&
ro6)->ro_flags &= ~0x1; } } while (0)
;
1908
1909 return (ifp);
1910}
1911
1912/*
1913 * Since ipv6_mreq contains an ifindex and ip_mreq contains an AF_INET
1914 * address, we need to lookup the AF_INET address when translating an
1915 * ipv6_mreq structure into an ipmreq structure.
1916 * This is used when userland performs multicast setsockopt() on AF_INET6
1917 * sockets with AF_INET multicast addresses (IPv6 v4 mapped addresses).
1918 */
1919static int
1920in6p_lookup_v4addr(struct ipv6_mreq *mreq, struct ip_mreq *v4mreq)
1921{
1922 struct ifnet *ifp;
1923 struct ifaddr *ifa;
1924 struct sockaddr_in *sin;
1925
1926 ifnet_head_lock_shared();
1927 if (mreq->ipv6mr_interface > (unsigned int)if_index) {
1928 ifnet_head_done();
1929 return (EADDRNOTAVAIL49);
1930 } else
1931 ifp = ifindex2ifnet[mreq->ipv6mr_interface];
1932 ifnet_head_done();
1933 if (ifp == NULL((void *)0))
1934 return (EADDRNOTAVAIL49);
1935 ifa = ifa_ifpgetprimary(ifp, AF_INET2);
1936 if (ifa == NULL((void *)0))
1937 return (EADDRNOTAVAIL49);
1938 sin = (struct sockaddr_in *)(uintptr_t)(size_t)ifa->ifa_addr;
1939 v4mreq->imr_interface.s_addr = sin->sin_addr.s_addr;
1940 IFA_REMREF(ifa)do { (void) ifa_remref(ifa, 0); } while (0);
1941
1942 return (0);
1943}
1944
1945/*
1946 * Join an IPv6 multicast group, possibly with a source.
1947 *
1948 * FIXME: The KAME use of the unspecified address (::)
1949 * to join *all* multicast groups is currently unsupported.
1950 */
1951static int
1952in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
1953{
1954 struct group_source_req gsr;
1955 sockunion_t *gsa, *ssa;
1956 struct ifnet *ifp;
1957 struct in6_mfilter *imf;
1958 struct ip6_moptions *imo;
1959 struct in6_multi *inm = NULL((void *)0);
1960 struct in6_msource *lims = NULL((void *)0);
1961 size_t idx;
1962 int error, is_new;
1963 uint32_t scopeid = 0;
1964 struct mld_tparams mtp;
1965
1966 bzero(&mtp, sizeof (mtp));
1967 ifp = NULL((void *)0);
1968 imf = NULL((void *)0);
1969 error = 0;
1970 is_new = 0;
1971
1972 memset(&gsr, 0, sizeof(struct group_source_req));
1973 gsa = (sockunion_t *)&gsr.gsr_group;
1974 gsa->ss.ss_family = AF_UNSPEC0;
1975 ssa = (sockunion_t *)&gsr.gsr_source;
1976 ssa->ss.ss_family = AF_UNSPEC0;
1977
1978 /*
1979 * Chew everything into struct group_source_req.
1980 * Overwrite the port field if present, as the sockaddr
1981 * being copied in may be matched with a binary comparison.
1982 * Ignore passed-in scope ID.
1983 */
1984 switch (sopt->sopt_name) {
1985 case IPV6_JOIN_GROUP12: {
1986 struct ipv6_mreq mreq;
1987 struct sockaddr_in6 *gsin6;
1988
1989 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq),
1990 sizeof(struct ipv6_mreq));
1991 if (error)
1992 return (error);
1993 if (IN6_IS_ADDR_V4MAPPED(&mreq.ipv6mr_multiaddr)((*(const __uint32_t *)(const void *)(&(&mreq.ipv6mr_multiaddr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const __uint32_t
*)(const void *)(&(&mreq.ipv6mr_multiaddr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const __uint32_t *)(const
void *)(&(&mreq.ipv6mr_multiaddr)->__u6_addr.__u6_addr8
[8]) == (__builtin_constant_p(0x0000ffff) ? ((__uint32_t)((((
__uint32_t)(0x0000ffff) & 0xff000000) >> 24) | (((__uint32_t
)(0x0000ffff) & 0x00ff0000) >> 8) | (((__uint32_t)(
0x0000ffff) & 0x0000ff00) << 8) | (((__uint32_t)(0x0000ffff
) & 0x000000ff) << 24))) : _OSSwapInt32(0x0000ffff)
)))
) {
1994 struct ip_mreq v4mreq;
1995 struct sockopt v4sopt;
1996
1997 v4mreq.imr_multiaddr.s_addr =
1998 mreq.ipv6mr_multiaddr.s6_addr32__u6_addr.__u6_addr32[3];
1999 if (mreq.ipv6mr_interface == 0)
2000 v4mreq.imr_interface.s_addr = INADDR_ANY(u_int32_t)0x00000000;
2001 else
2002 error = in6p_lookup_v4addr(&mreq, &v4mreq);
2003 if (error)
2004 return (error);
2005 v4sopt.sopt_dir = SOPT_SET;
2006 v4sopt.sopt_level = sopt->sopt_level;
2007 v4sopt.sopt_name = IP_ADD_MEMBERSHIP12;
2008 v4sopt.sopt_val = CAST_USER_ADDR_T(&v4mreq)((user_addr_t)((uintptr_t)(&v4mreq)));
2009 v4sopt.sopt_valsize = sizeof(v4mreq);
2010 v4sopt.sopt_p = kernproc;
2011
2012 return (inp_join_group(inp, &v4sopt));
2013 }
2014 gsa->sin6.sin6_family = AF_INET630;
2015 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6);
2016 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr;
2017
2018 gsin6 = &gsa->sin6;
2019
2020 /* Only allow IPv6 multicast addresses */
2021 if (IN6_IS_ADDR_MULTICAST(&gsin6->sin6_addr)((&gsin6->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
== 0) {
2022 return (EINVAL22);
2023 }
2024
2025 if (mreq.ipv6mr_interface == 0) {
2026 ifp = in6p_lookup_mcast_ifp(inp, gsin6);
2027 } else {
2028 ifnet_head_lock_shared();
2029 if ((u_int)if_index < mreq.ipv6mr_interface) {
2030 ifnet_head_done();
2031 return (EADDRNOTAVAIL49);
2032 }
2033 ifp = ifindex2ifnet[mreq.ipv6mr_interface];
2034 ifnet_head_done();
2035 }
2036 MLD_PRINTF(("%s: ipv6mr_interface = %d, ifp = 0x%llx\n",do { if (mld_debug) printf ("%s: ipv6mr_interface = %d, ifp = 0x%llx\n"
, __func__, mreq.ipv6mr_interface, (uint64_t)(((vm_offset_t)(
ifp) == 0) ? (vm_offset_t)(0) : (vm_offset_t)(ifp) + vm_kernel_addrperm
)); } while (0)
2037 __func__, mreq.ipv6mr_interface,do { if (mld_debug) printf ("%s: ipv6mr_interface = %d, ifp = 0x%llx\n"
, __func__, mreq.ipv6mr_interface, (uint64_t)(((vm_offset_t)(
ifp) == 0) ? (vm_offset_t)(0) : (vm_offset_t)(ifp) + vm_kernel_addrperm
)); } while (0)
2038 (uint64_t)VM_KERNEL_ADDRPERM(ifp)))do { if (mld_debug) printf ("%s: ipv6mr_interface = %d, ifp = 0x%llx\n"
, __func__, mreq.ipv6mr_interface, (uint64_t)(((vm_offset_t)(
ifp) == 0) ? (vm_offset_t)(0) : (vm_offset_t)(ifp) + vm_kernel_addrperm
)); } while (0)
;
2039 break;
2040 }
2041
2042 case MCAST_JOIN_GROUP80:
2043 case MCAST_JOIN_SOURCE_GROUP82:
2044 if (sopt->sopt_name == MCAST_JOIN_GROUP80) {
2045 error = sooptcopyin(sopt, &gsr,
2046 sizeof(struct group_req),
2047 sizeof(struct group_req));
2048 } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP82) {
2049 error = sooptcopyin(sopt, &gsr,
2050 sizeof(struct group_source_req),
2051 sizeof(struct group_source_req));
2052 }
2053 if (error)
2054 return (error);
2055
2056 if (gsa->sin6.sin6_family != AF_INET630 ||
2057 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6))
2058 return (EINVAL22);
2059
2060 if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP82) {
2061 if (ssa->sin6.sin6_family != AF_INET630 ||
2062 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6))
2063 return (EINVAL22);
2064 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)((&ssa->sin6.sin6_addr)->__u6_addr.__u6_addr8[0] ==
0xff)
)
2065 return (EINVAL22);
2066 /*
2067 * TODO: Validate embedded scope ID in source
2068 * list entry against passed-in ifp, if and only
2069 * if source list filter entry is iface or node local.
2070 */
2071 in6_clearscope(&ssa->sin6.sin6_addr);
2072 ssa->sin6.sin6_port = 0;
2073 ssa->sin6.sin6_scope_id = 0;
2074 }
2075
2076 ifnet_head_lock_shared();
2077 if (gsr.gsr_interface == 0 ||
2078 (u_int)if_index < gsr.gsr_interface) {
2079 ifnet_head_done();
2080 return (EADDRNOTAVAIL49);
2081 }
2082 ifp = ifindex2ifnet[gsr.gsr_interface];
2083 ifnet_head_done();
2084 break;
2085
2086 default:
2087 MLD_PRINTF(("%s: unknown sopt_name %d\n",do { if (mld_debug) printf ("%s: unknown sopt_name %d\n", __func__
, sopt->sopt_name); } while (0)
2088 __func__, sopt->sopt_name))do { if (mld_debug) printf ("%s: unknown sopt_name %d\n", __func__
, sopt->sopt_name); } while (0)
;
2089 return (EOPNOTSUPP102);
2090 }
2091
2092 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)((&gsa->sin6.sin6_addr)->__u6_addr.__u6_addr8[0] ==
0xff)
)
2093 return (EINVAL22);
2094
2095 if (ifp == NULL((void *)0) || (ifp->if_flags & IFF_MULTICAST0x8000) == 0)
2096 return (EADDRNOTAVAIL49);
2097
2098 gsa->sin6.sin6_port = 0;
2099 gsa->sin6.sin6_scope_id = 0;
2100
2101 /*
2102 * Always set the scope zone ID on memberships created from userland.
2103 * Use the passed-in ifp to do this.
2104 */
2105 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, &scopeid);
2106 /*
2107 * Some addresses are not valid without an embedded scopeid.
2108 * This check must be present because otherwise we will later hit
2109 * a VERIFY() in in6_mc_join().
2110 */
2111 if ((IN6_IS_ADDR_MC_LINKLOCAL(&gsa->sin6.sin6_addr)(((&gsa->sin6.sin6_addr)->__u6_addr.__u6_addr8[0] ==
0xff) && (((&gsa->sin6.sin6_addr)->__u6_addr
.__u6_addr8[1] & 0x0f) == 0x02))
||
2112 IN6_IS_ADDR_MC_INTFACELOCAL(&gsa->sin6.sin6_addr)(((&gsa->sin6.sin6_addr)->__u6_addr.__u6_addr8[0] ==
0xff) && (((&gsa->sin6.sin6_addr)->__u6_addr
.__u6_addr8[1] & 0x0f) == 0x01))
) &&
2113 (scopeid == 0 || gsa->sin6.sin6_addr.s6_addr16__u6_addr.__u6_addr16[1] == 0))
2114 return (EINVAL22);
2115
2116 imo = in6p_findmoptions(inp);
2117 if (imo == NULL((void *)0))
2118 return (ENOMEM12);
2119
2120 IM6O_LOCK(imo)lck_mtx_lock(&(imo)->im6o_lock);
2121 idx = im6o_match_group(imo, ifp, &gsa->sa);
2122 if (idx == (size_t)-1) {
2123 is_new = 1;
2124 } else {
2125 inm = imo->im6o_membership[idx];
2126 imf = &imo->im6o_mfilters[idx];
2127 if (ssa->ss.ss_family != AF_UNSPEC0) {
2128 /*
2129 * MCAST_JOIN_SOURCE_GROUP on an exclusive membership
2130 * is an error. On an existing inclusive membership,
2131 * it just adds the source to the filter list.
2132 */
2133 if (imf->im6f_st[1] != MCAST_INCLUDE1) {
2134 error = EINVAL22;
2135 goto out_imo_locked;
2136 }
2137 /*
2138 * Throw out duplicates.
2139 *
2140 * XXX FIXME: This makes a naive assumption that
2141 * even if entries exist for *ssa in this imf,
2142 * they will be rejected as dupes, even if they
2143 * are not valid in the current mode (in-mode).
2144 *
2145 * in6_msource is transactioned just as for anything
2146 * else in SSM -- but note naive use of in6m_graft()
2147 * below for allocating new filter entries.
2148 *
2149 * This is only an issue if someone mixes the
2150 * full-state SSM API with the delta-based API,
2151 * which is discouraged in the relevant RFCs.
2152 */
2153 lims = im6o_match_source(imo, idx, &ssa->sa);
2154 if (lims != NULL((void *)0) /*&&
2155 lims->im6sl_st[1] == MCAST_INCLUDE*/) {
2156 error = EADDRNOTAVAIL49;
2157 goto out_imo_locked;
2158 }
2159 } else {
2160 /*
2161 * MCAST_JOIN_GROUP on an existing exclusive
2162 * membership is an error; return EADDRINUSE
2163 * to preserve 4.4BSD API idempotence, and
2164 * avoid tedious detour to code below.
2165 * NOTE: This is bending RFC 3678 a bit.
2166 *
2167 * On an existing inclusive membership, this is also
2168 * an error; if you want to change filter mode,
2169 * you must use the userland API setsourcefilter().
2170 * XXX We don't reject this for imf in UNDEFINED
2171 * state at t1, because allocation of a filter
2172 * is atomic with allocation of a membership.
2173 */
2174 error = EINVAL22;
2175 /* See comments above for EADDRINUSE */
2176 if (imf->im6f_st[1] == MCAST_EXCLUDE2)
2177 error = EADDRINUSE48;
2178 goto out_imo_locked;
2179 }
2180 }
2181
2182 /*
2183 * Begin state merge transaction at socket layer.
2184 */
2185
2186 if (is_new) {
2187 if (imo->im6o_num_memberships == imo->im6o_max_memberships) {
2188 error = im6o_grow(imo, 0);
2189 if (error)
2190 goto out_imo_locked;
2191 }
2192 /*
2193 * Allocate the new slot upfront so we can deal with
2194 * grafting the new source filter in same code path
2195 * as for join-source on existing membership.
2196 */
2197 idx = imo->im6o_num_memberships;
2198 imo->im6o_membership[idx] = NULL((void *)0);
2199 imo->im6o_num_memberships++;
2200 VERIFY(imo->im6o_mfilters != NULL)((void)(__builtin_expect(!!((long)((imo->im6o_mfilters != (
(void *)0)))), 1L) || assfail("imo->im6o_mfilters != NULL"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 2200
)))
;
2201 imf = &imo->im6o_mfilters[idx];
2202 VERIFY(RB_EMPTY(&imf->im6f_sources))((void)(__builtin_expect(!!((long)((((&imf->im6f_sources
)->rbh_root == ((void *)0))))), 1L) || assfail("RB_EMPTY(&imf->im6f_sources)"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 2202
)))
;
2203 }
2204
2205 /*
2206 * Graft new source into filter list for this inpcb's
2207 * membership of the group. The in6_multi may not have
2208 * been allocated yet if this is a new membership, however,
2209 * the in_mfilter slot will be allocated and must be initialized.
2210 *
2211 * Note: Grafting of exclusive mode filters doesn't happen
2212 * in this path.
2213 * XXX: Should check for non-NULL lims (node exists but may
2214 * not be in-mode) for interop with full-state API.
2215 */
2216 if (ssa->ss.ss_family != AF_UNSPEC0) {
2217 /* Membership starts in IN mode */
2218 if (is_new) {
2219 MLD_PRINTF(("%s: new join w/source\n", __func__);do { if (mld_debug) printf ("%s: new join w/source\n", __func__
); im6f_init(imf, 0, 1); } while (0)
2220 im6f_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE))do { if (mld_debug) printf ("%s: new join w/source\n", __func__
); im6f_init(imf, 0, 1); } while (0)
;
2221 } else {
2222 MLD_PRINTF(("%s: %s source\n", __func__, "allow"))do { if (mld_debug) printf ("%s: %s source\n", __func__, "allow"
); } while (0)
;
2223 }
2224 lims = im6f_graft(imf, MCAST_INCLUDE1, &ssa->sin6);
2225 if (lims == NULL((void *)0)) {
2226 MLD_PRINTF(("%s: merge imf state failed\n",do { if (mld_debug) printf ("%s: merge imf state failed\n", __func__
); } while (0)
2227 __func__))do { if (mld_debug) printf ("%s: merge imf state failed\n", __func__
); } while (0)
;
2228 error = ENOMEM12;
2229 goto out_im6o_free;
2230 }
2231 } else {
2232 /* No address specified; Membership starts in EX mode */
2233 if (is_new) {
2234 MLD_PRINTF(("%s: new join w/o source", __func__))do { if (mld_debug) printf ("%s: new join w/o source", __func__
); } while (0)
;
2235 im6f_init(imf, MCAST_UNDEFINED0, MCAST_EXCLUDE2);
2236 }
2237 }
2238
2239 /*
2240 * Begin state merge transaction at MLD layer.
2241 */
2242
2243 if (is_new) {
2244 /*
2245 * See inp_join_group() for why we need to unlock
2246 */
2247 IM6O_ADDREF_LOCKED(imo)im6o_addref(imo, 1);
2248 IM6O_UNLOCK(imo)lck_mtx_unlock(&(imo)->im6o_lock);
2249 socket_unlock(inp->inp_socket, 0);
2250
2251 VERIFY(inm == NULL)((void)(__builtin_expect(!!((long)((inm == ((void *)0)))), 1L
) || assfail("inm == NULL", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 2251)))
;
2252 error = in6_mc_join(ifp, &gsa->sin6.sin6_addr, imf, &inm, 0);
2253 VERIFY(inm != NULL || error != 0)((void)(__builtin_expect(!!((long)((inm != ((void *)0) || error
!= 0))), 1L) || assfail("inm != NULL || error != 0", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 2253)))
;
2254
2255 socket_lock(inp->inp_socket, 0);
2256 IM6O_REMREF(imo)im6o_remref(imo);
2257 IM6O_LOCK(imo)lck_mtx_lock(&(imo)->im6o_lock);
2258
2259 if (error)
2260 goto out_im6o_free;
2261 imo->im6o_membership[idx] = inm; /* from in6_mc_join() */
2262 } else {
2263 MLD_PRINTF(("%s: merge inm state\n", __func__))do { if (mld_debug) printf ("%s: merge inm state\n", __func__
); } while (0)
;
2264 IN6M_LOCK(inm)lck_mtx_lock(&(inm)->in6m_lock);
2265 error = in6m_merge(inm, imf);
2266 if (error) {
2267 MLD_PRINTF(("%s: failed to merge inm state\n",do { if (mld_debug) printf ("%s: failed to merge inm state\n"
, __func__); } while (0)
2268 __func__))do { if (mld_debug) printf ("%s: failed to merge inm state\n"
, __func__); } while (0)
;
2269 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
2270 goto out_im6f_rollback;
2271 }
2272 MLD_PRINTF(("%s: doing mld downcall\n", __func__))do { if (mld_debug) printf ("%s: doing mld downcall\n", __func__
); } while (0)
;
2273 error = mld_change_state(inm, &mtp, 0);
2274 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
2275 if (error) {
2276 MLD_PRINTF(("%s: failed mld downcall\n",do { if (mld_debug) printf ("%s: failed mld downcall\n", __func__
); } while (0)
2277 __func__))do { if (mld_debug) printf ("%s: failed mld downcall\n", __func__
); } while (0)
;
2278 goto out_im6f_rollback;
2279 }
2280 }
2281
2282out_im6f_rollback:
2283 if (error) {
2284 im6f_rollback(imf);
2285 if (is_new)
2286 im6f_purge(imf);
2287 else
2288 im6f_reap(imf);
2289 } else {
2290 im6f_commit(imf);
2291 }
2292
2293out_im6o_free:
2294 if (error && is_new) {
2295 VERIFY(inm == NULL)((void)(__builtin_expect(!!((long)((inm == ((void *)0)))), 1L
) || assfail("inm == NULL", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 2295)))
;
2296 imo->im6o_membership[idx] = NULL((void *)0);
2297 --imo->im6o_num_memberships;
2298 }
2299
2300out_imo_locked:
2301 IM6O_UNLOCK(imo)lck_mtx_unlock(&(imo)->im6o_lock);
2302 IM6O_REMREF(imo)im6o_remref(imo); /* from in6p_findmoptions() */
2303
2304 /* schedule timer now that we've dropped the lock(s) */
2305 mld_set_timeout(&mtp);
2306
2307 return (error);
2308}
2309
2310/*
2311 * Leave an IPv6 multicast group on an inpcb, possibly with a source.
2312 */
2313static int
2314in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
2315{
2316 struct ipv6_mreq mreq;
2317 struct group_source_req gsr;
2318 sockunion_t *gsa, *ssa;
2319 struct ifnet *ifp;
2320 struct in6_mfilter *imf;
2321 struct ip6_moptions *imo;
2322 struct in6_msource *ims;
2323 struct in6_multi *inm = NULL((void *)0);
2324 uint32_t ifindex = 0;
2325 size_t idx;
2326 int error, is_final;
2327 struct mld_tparams mtp;
2328
2329 bzero(&mtp, sizeof (mtp));
2330 ifp = NULL((void *)0);
2331 error = 0;
2332 is_final = 1;
2333
2334 memset(&gsr, 0, sizeof(struct group_source_req));
2335 gsa = (sockunion_t *)&gsr.gsr_group;
2336 gsa->ss.ss_family = AF_UNSPEC0;
2337 ssa = (sockunion_t *)&gsr.gsr_source;
2338 ssa->ss.ss_family = AF_UNSPEC0;
2339
2340 /*
2341 * Chew everything passed in up into a struct group_source_req
2342 * as that is easier to process.
2343 * Note: Any embedded scope ID in the multicast group passed
2344 * in by userland is ignored, the interface index is the recommended
2345 * mechanism to specify an interface; see below.
2346 */
2347 switch (sopt->sopt_name) {
2348 case IPV6_LEAVE_GROUP13: {
2349 struct sockaddr_in6 *gsin6;
2350
2351 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq),
2352 sizeof(struct ipv6_mreq));
2353 if (error)
2354 return (error);
2355 if (IN6_IS_ADDR_V4MAPPED(&mreq.ipv6mr_multiaddr)((*(const __uint32_t *)(const void *)(&(&mreq.ipv6mr_multiaddr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const __uint32_t
*)(const void *)(&(&mreq.ipv6mr_multiaddr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const __uint32_t *)(const
void *)(&(&mreq.ipv6mr_multiaddr)->__u6_addr.__u6_addr8
[8]) == (__builtin_constant_p(0x0000ffff) ? ((__uint32_t)((((
__uint32_t)(0x0000ffff) & 0xff000000) >> 24) | (((__uint32_t
)(0x0000ffff) & 0x00ff0000) >> 8) | (((__uint32_t)(
0x0000ffff) & 0x0000ff00) << 8) | (((__uint32_t)(0x0000ffff
) & 0x000000ff) << 24))) : _OSSwapInt32(0x0000ffff)
)))
) {
2356 struct ip_mreq v4mreq;
2357 struct sockopt v4sopt;
2358
2359 v4mreq.imr_multiaddr.s_addr =
2360 mreq.ipv6mr_multiaddr.s6_addr32__u6_addr.__u6_addr32[3];
2361 if (mreq.ipv6mr_interface == 0)
2362 v4mreq.imr_interface.s_addr = INADDR_ANY(u_int32_t)0x00000000;
2363 else
2364 error = in6p_lookup_v4addr(&mreq, &v4mreq);
2365 if (error)
2366 return (error);
2367 v4sopt.sopt_dir = SOPT_SET;
2368 v4sopt.sopt_level = sopt->sopt_level;
2369 v4sopt.sopt_name = IP_DROP_MEMBERSHIP13;
2370 v4sopt.sopt_val = CAST_USER_ADDR_T(&v4mreq)((user_addr_t)((uintptr_t)(&v4mreq)));
2371 v4sopt.sopt_valsize = sizeof(v4mreq);
2372 v4sopt.sopt_p = kernproc;
2373
2374 return (inp_leave_group(inp, &v4sopt));
2375 }
2376 gsa->sin6.sin6_family = AF_INET630;
2377 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6);
2378 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr;
2379 gsa->sin6.sin6_port = 0;
2380 gsa->sin6.sin6_scope_id = 0;
2381 ifindex = mreq.ipv6mr_interface;
2382 gsin6 = &gsa->sin6;
2383 /* Only allow IPv6 multicast addresses */
2384 if (IN6_IS_ADDR_MULTICAST(&gsin6->sin6_addr)((&gsin6->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
== 0) {
2385 return (EINVAL22);
2386 }
2387 break;
2388 }
2389
2390 case MCAST_LEAVE_GROUP81:
2391 case MCAST_LEAVE_SOURCE_GROUP83:
2392 if (sopt->sopt_name == MCAST_LEAVE_GROUP81) {
2393 error = sooptcopyin(sopt, &gsr,
2394 sizeof(struct group_req),
2395 sizeof(struct group_req));
2396 } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP83) {
2397 error = sooptcopyin(sopt, &gsr,
2398 sizeof(struct group_source_req),
2399 sizeof(struct group_source_req));
2400 }
2401 if (error)
2402 return (error);
2403
2404 if (gsa->sin6.sin6_family != AF_INET630 ||
2405 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6))
2406 return (EINVAL22);
2407 if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP83) {
2408 if (ssa->sin6.sin6_family != AF_INET630 ||
2409 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6))
2410 return (EINVAL22);
2411 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)((&ssa->sin6.sin6_addr)->__u6_addr.__u6_addr8[0] ==
0xff)
)
2412 return (EINVAL22);
2413 /*
2414 * TODO: Validate embedded scope ID in source
2415 * list entry against passed-in ifp, if and only
2416 * if source list filter entry is iface or node local.
2417 */
2418 in6_clearscope(&ssa->sin6.sin6_addr);
2419 }
2420 gsa->sin6.sin6_port = 0;
2421 gsa->sin6.sin6_scope_id = 0;
2422 ifindex = gsr.gsr_interface;
2423 break;
2424
2425 default:
2426 MLD_PRINTF(("%s: unknown sopt_name %d\n",do { if (mld_debug) printf ("%s: unknown sopt_name %d\n", __func__
, sopt->sopt_name); } while (0)
2427 __func__, sopt->sopt_name))do { if (mld_debug) printf ("%s: unknown sopt_name %d\n", __func__
, sopt->sopt_name); } while (0)
;
2428 return (EOPNOTSUPP102);
2429 }
2430
2431 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)((&gsa->sin6.sin6_addr)->__u6_addr.__u6_addr8[0] ==
0xff)
)
2432 return (EINVAL22);
2433
2434 /*
2435 * Validate interface index if provided. If no interface index
2436 * was provided separately, attempt to look the membership up
2437 * from the default scope as a last resort to disambiguate
2438 * the membership we are being asked to leave.
2439 * XXX SCOPE6 lock potentially taken here.
2440 */
2441 if (ifindex != 0) {
2442 ifnet_head_lock_shared();
2443 if ((u_int)if_index < ifindex) {
2444 ifnet_head_done();
2445 return (EADDRNOTAVAIL49);
2446 }
2447 ifp = ifindex2ifnet[ifindex];
2448 ifnet_head_done();
2449 if (ifp == NULL((void *)0))
2450 return (EADDRNOTAVAIL49);
2451 (void) in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL((void *)0));
2452 } else {
2453 error = sa6_embedscope(&gsa->sin6, ip6_use_defzone);
2454 if (error)
2455 return (EADDRNOTAVAIL49);
2456 /*
2457 * Some badly behaved applications don't pass an ifindex
2458 * or a scope ID, which is an API violation. In this case,
2459 * perform a lookup as per a v6 join.
2460 *
2461 * XXX For now, stomp on zone ID for the corner case.
2462 * This is not the 'KAME way', but we need to see the ifp
2463 * directly until such time as this implementation is
2464 * refactored, assuming the scope IDs are the way to go.
2465 */
2466 ifindex = ntohs(gsa->sin6.sin6_addr.s6_addr16[1])((__uint16_t)(__builtin_constant_p(gsa->sin6.sin6_addr.__u6_addr
.__u6_addr16[1]) ? ((__uint16_t)((((__uint16_t)(gsa->sin6.
sin6_addr.__u6_addr.__u6_addr16[1]) & 0xff00) >> 8)
| (((__uint16_t)(gsa->sin6.sin6_addr.__u6_addr.__u6_addr16
[1]) & 0x00ff) << 8))) : _OSSwapInt16(gsa->sin6.
sin6_addr.__u6_addr.__u6_addr16[1])))
;
2467 if (ifindex == 0) {
2468 MLD_PRINTF(("%s: warning: no ifindex, looking up "do { if (mld_debug) printf ("%s: warning: no ifindex, looking up "
"ifp for group %s.\n", __func__, ip6_sprintf(&gsa->sin6
.sin6_addr)); } while (0)
2469 "ifp for group %s.\n", __func__,do { if (mld_debug) printf ("%s: warning: no ifindex, looking up "
"ifp for group %s.\n", __func__, ip6_sprintf(&gsa->sin6
.sin6_addr)); } while (0)
2470 ip6_sprintf(&gsa->sin6.sin6_addr)))do { if (mld_debug) printf ("%s: warning: no ifindex, looking up "
"ifp for group %s.\n", __func__, ip6_sprintf(&gsa->sin6
.sin6_addr)); } while (0)
;
2471 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6);
2472 } else {
2473 ifnet_head_lock_shared();
2474 ifp = ifindex2ifnet[ifindex];
2475 ifnet_head_done();
2476 }
2477 if (ifp == NULL((void *)0))
2478 return (EADDRNOTAVAIL49);
2479 }
2480
2481 VERIFY(ifp != NULL)((void)(__builtin_expect(!!((long)((ifp != ((void *)0)))), 1L
) || assfail("ifp != NULL", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 2481)))
;
2482 MLD_PRINTF(("%s: ifp = 0x%llx\n", __func__,do { if (mld_debug) printf ("%s: ifp = 0x%llx\n", __func__, (
uint64_t)(((vm_offset_t)(ifp) == 0) ? (vm_offset_t)(0) : (vm_offset_t
)(ifp) + vm_kernel_addrperm)); } while (0)
2483 (uint64_t)VM_KERNEL_ADDRPERM(ifp)))do { if (mld_debug) printf ("%s: ifp = 0x%llx\n", __func__, (
uint64_t)(((vm_offset_t)(ifp) == 0) ? (vm_offset_t)(0) : (vm_offset_t
)(ifp) + vm_kernel_addrperm)); } while (0)
;
2484
2485 /*
2486 * Find the membership in the membership array.
2487 */
2488 imo = in6p_findmoptions(inp);
2489 if (imo == NULL((void *)0))
2490 return (ENOMEM12);
2491
2492 IM6O_LOCK(imo)lck_mtx_lock(&(imo)->im6o_lock);
2493 idx = im6o_match_group(imo, ifp, &gsa->sa);
2494 if (idx == (size_t)-1) {
2495 error = EADDRNOTAVAIL49;
2496 goto out_locked;
2497 }
2498 inm = imo->im6o_membership[idx];
2499 imf = &imo->im6o_mfilters[idx];
2500
2501 if (ssa->ss.ss_family != AF_UNSPEC0)
2502 is_final = 0;
2503
2504 /*
2505 * Begin state merge transaction at socket layer.
2506 */
2507
2508 /*
2509 * If we were instructed only to leave a given source, do so.
2510 * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships.
2511 */
2512 if (is_final) {
2513 im6f_leave(imf);
2514 } else {
2515 if (imf->im6f_st[0] == MCAST_EXCLUDE2) {
2516 error = EADDRNOTAVAIL49;
2517 goto out_locked;
2518 }
2519 ims = im6o_match_source(imo, idx, &ssa->sa);
2520 if (ims == NULL((void *)0)) {
2521 MLD_PRINTF(("%s: source %s %spresent\n", __func__,do { if (mld_debug) printf ("%s: source %s %spresent\n", __func__
, ip6_sprintf(&ssa->sin6.sin6_addr), "not "); } while (
0)
2522 ip6_sprintf(&ssa->sin6.sin6_addr),do { if (mld_debug) printf ("%s: source %s %spresent\n", __func__
, ip6_sprintf(&ssa->sin6.sin6_addr), "not "); } while (
0)
2523 "not "))do { if (mld_debug) printf ("%s: source %s %spresent\n", __func__
, ip6_sprintf(&ssa->sin6.sin6_addr), "not "); } while (
0)
;
2524 error = EADDRNOTAVAIL49;
2525 goto out_locked;
2526 }
2527 MLD_PRINTF(("%s: %s source\n", __func__, "block"))do { if (mld_debug) printf ("%s: %s source\n", __func__, "block"
); } while (0)
;
2528 error = im6f_prune(imf, &ssa->sin6);
2529 if (error) {
2530 MLD_PRINTF(("%s: merge imf state failed\n",do { if (mld_debug) printf ("%s: merge imf state failed\n", __func__
); } while (0)
2531 __func__))do { if (mld_debug) printf ("%s: merge imf state failed\n", __func__
); } while (0)
;
2532 goto out_locked;
2533 }
2534 }
2535
2536 /*
2537 * Begin state merge transaction at MLD layer.
2538 */
2539
2540 if (is_final) {
2541 /*
2542 * Give up the multicast address record to which
2543 * the membership points. Reference held in im6o
2544 * will be released below.
2545 */
2546 (void) in6_mc_leave(inm, imf);
2547 } else {
2548 MLD_PRINTF(("%s: merge inm state\n", __func__))do { if (mld_debug) printf ("%s: merge inm state\n", __func__
); } while (0)
;
2549 IN6M_LOCK(inm)lck_mtx_lock(&(inm)->in6m_lock);
2550 error = in6m_merge(inm, imf);
2551 if (error) {
2552 MLD_PRINTF(("%s: failed to merge inm state\n",do { if (mld_debug) printf ("%s: failed to merge inm state\n"
, __func__); } while (0)
2553 __func__))do { if (mld_debug) printf ("%s: failed to merge inm state\n"
, __func__); } while (0)
;
2554 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
2555 goto out_im6f_rollback;
2556 }
2557
2558 MLD_PRINTF(("%s: doing mld downcall\n", __func__))do { if (mld_debug) printf ("%s: doing mld downcall\n", __func__
); } while (0)
;
2559 error = mld_change_state(inm, &mtp, 0);
2560 if (error) {
2561 MLD_PRINTF(("%s: failed mld downcall\n", __func__))do { if (mld_debug) printf ("%s: failed mld downcall\n", __func__
); } while (0)
;
2562 }
2563 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
2564 }
2565
2566out_im6f_rollback:
2567 if (error)
2568 im6f_rollback(imf);
2569 else
2570 im6f_commit(imf);
2571
2572 im6f_reap(imf);
2573
2574 if (is_final) {
2575 /* Remove the gap in the membership array. */
2576 VERIFY(inm == imo->im6o_membership[idx])((void)(__builtin_expect(!!((long)((inm == imo->im6o_membership
[idx]))), 1L) || assfail("inm == imo->im6o_membership[idx]"
, "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c", 2576
)))
;
2577 imo->im6o_membership[idx] = NULL((void *)0);
2578
2579 /*
2580 * See inp_join_group() for why we need to unlock
2581 */
2582 IM6O_ADDREF_LOCKED(imo)im6o_addref(imo, 1);
2583 IM6O_UNLOCK(imo)lck_mtx_unlock(&(imo)->im6o_lock);
2584 socket_unlock(inp->inp_socket, 0);
2585
2586 IN6M_REMREF(inm)in6m_remref(inm, 0);
2587
2588 socket_lock(inp->inp_socket, 0);
2589 IM6O_REMREF(imo)im6o_remref(imo);
2590 IM6O_LOCK(imo)lck_mtx_lock(&(imo)->im6o_lock);
2591
2592 for (++idx; idx < imo->im6o_num_memberships; ++idx) {
2593 imo->im6o_membership[idx-1] = imo->im6o_membership[idx];
2594 imo->im6o_mfilters[idx-1] = imo->im6o_mfilters[idx];
2595 }
2596 imo->im6o_num_memberships--;
2597 }
2598
2599out_locked:
2600 IM6O_UNLOCK(imo)lck_mtx_unlock(&(imo)->im6o_lock);
2601 IM6O_REMREF(imo)im6o_remref(imo); /* from in6p_findmoptions() */
2602
2603 /* schedule timer now that we've dropped the lock(s) */
2604 mld_set_timeout(&mtp);
2605
2606 return (error);
2607}
2608
2609/*
2610 * Select the interface for transmitting IPv6 multicast datagrams.
2611 *
2612 * Either an instance of struct in6_addr or an instance of struct ipv6_mreqn
2613 * may be passed to this socket option. An address of in6addr_any or an
2614 * interface index of 0 is used to remove a previous selection.
2615 * When no interface is selected, one is chosen for every send.
2616 */
2617static int
2618in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt)
2619{
2620 struct ifnet *ifp;
2621 struct ip6_moptions *imo;
2622 u_int ifindex;
2623 int error;
2624
2625 if (sopt->sopt_valsize != sizeof(u_int))
2626 return (EINVAL22);
2627
2628 error = sooptcopyin(sopt, &ifindex, sizeof(u_int), sizeof(u_int));
2629 if (error)
2630 return (error);
2631
2632 ifnet_head_lock_shared();
2633 if ((u_int)if_index < ifindex) {
2634 ifnet_head_done();
2635 return (EINVAL22);
2636 }
2637
2638 ifp = ifindex2ifnet[ifindex];
2639 ifnet_head_done();
2640 if (ifp == NULL((void *)0) || (ifp->if_flags & IFF_MULTICAST0x8000) == 0)
2641 return (EADDRNOTAVAIL49);
2642
2643 imo = in6p_findmoptions(inp);
2644 if (imo == NULL((void *)0))
2645 return (ENOMEM12);
2646
2647 IM6O_LOCK(imo)lck_mtx_lock(&(imo)->im6o_lock);
2648 imo->im6o_multicast_ifp = ifp;
2649 IM6O_UNLOCK(imo)lck_mtx_unlock(&(imo)->im6o_lock);
2650 IM6O_REMREF(imo)im6o_remref(imo); /* from in6p_findmoptions() */
2651
2652 return (0);
2653}
2654
2655/*
2656 * Atomically set source filters on a socket for an IPv6 multicast group.
2657 *
2658 */
2659static int
2660in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
2661{
2662 struct __msfilterreq64 msfr, msfr64;
2663 struct __msfilterreq32 msfr32;
2664 sockunion_t *gsa;
2665 struct ifnet *ifp;
2666 struct in6_mfilter *imf;
2667 struct ip6_moptions *imo;
2668 struct in6_multi *inm;
2669 size_t idx;
2670 int error;
2671 user_addr_t tmp_ptr;
2672 struct mld_tparams mtp;
2673
2674 bzero(&mtp, sizeof (mtp));
2675
2676 if (IS_64BIT_PROCESS(current_proc())) {
2677 error = sooptcopyin(sopt, &msfr64,
2678 sizeof(struct __msfilterreq64),
2679 sizeof(struct __msfilterreq64));
2680 if (error)
2681 return (error);
2682 /* we never use msfr.msfr_srcs; */
2683 memcpy(&msfr, &msfr64, sizeof(msfr));
2684 } else {
2685 error = sooptcopyin(sopt, &msfr32,
2686 sizeof(struct __msfilterreq32),
2687 sizeof(struct __msfilterreq32));
2688 if (error)
2689 return (error);
2690 /* we never use msfr.msfr_srcs; */
2691 memcpy(&msfr, &msfr32, sizeof(msfr));
2692 }
2693
2694 if ((size_t) msfr.msfr_nsrcs >
2695 UINT32_MAX4294967295U / sizeof(struct sockaddr_storage))
2696 msfr.msfr_nsrcs = UINT32_MAX4294967295U / sizeof(struct sockaddr_storage);
2697
2698 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc)
2699 return (ENOBUFS55);
2700
2701 if (msfr.msfr_fmode != MCAST_EXCLUDE2 &&
2702 msfr.msfr_fmode != MCAST_INCLUDE1)
2703 return (EINVAL22);
2704
2705 if (msfr.msfr_group.ss_family != AF_INET630 ||
2706 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6))
2707 return (EINVAL22);
2708
2709 gsa = (sockunion_t *)&msfr.msfr_group;
2710 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)((&gsa->sin6.sin6_addr)->__u6_addr.__u6_addr8[0] ==
0xff)
)
2711 return (EINVAL22);
2712
2713 gsa->sin6.sin6_port = 0; /* ignore port */
2714
2715 ifnet_head_lock_shared();
2716 if (msfr.msfr_ifindex == 0 || (u_int)if_index < msfr.msfr_ifindex) {
2717 ifnet_head_done();
2718 return (EADDRNOTAVAIL49);
2719 }
2720 ifp = ifindex2ifnet[msfr.msfr_ifindex];
2721 ifnet_head_done();
2722 if (ifp == NULL((void *)0))
2723 return (EADDRNOTAVAIL49);
2724
2725 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL((void *)0));
2726
2727 /*
2728 * Take the INP write lock.
2729 * Check if this socket is a member of this group.
2730 */
2731 imo = in6p_findmoptions(inp);
2732 if (imo == NULL((void *)0))
2733 return (ENOMEM12);
2734
2735 IM6O_LOCK(imo)lck_mtx_lock(&(imo)->im6o_lock);
2736 idx = im6o_match_group(imo, ifp, &gsa->sa);
2737 if (idx == (size_t)-1 || imo->im6o_mfilters == NULL((void *)0)) {
2738 error = EADDRNOTAVAIL49;
2739 goto out_imo_locked;
2740 }
2741 inm = imo->im6o_membership[idx];
2742 imf = &imo->im6o_mfilters[idx];
2743
2744 /*
2745 * Begin state merge transaction at socket layer.
2746 */
2747
2748 imf->im6f_st[1] = msfr.msfr_fmode;
2749
2750 /*
2751 * Apply any new source filters, if present.
2752 * Make a copy of the user-space source vector so
2753 * that we may copy them with a single copyin. This
2754 * allows us to deal with page faults up-front.
2755 */
2756 if (msfr.msfr_nsrcs > 0) {
2757 struct in6_msource *lims;
2758 struct sockaddr_in6 *psin;
2759 struct sockaddr_storage *kss, *pkss;
2760 unsigned int i;
2761
2762 if (IS_64BIT_PROCESS(current_proc()))
2763 tmp_ptr = msfr64.msfr_srcs;
2764 else
2765 tmp_ptr = CAST_USER_ADDR_T(msfr32.msfr_srcs)((user_addr_t)((uintptr_t)(msfr32.msfr_srcs)));
2766
2767 MLD_PRINTF(("%s: loading %lu source list entries\n",do { if (mld_debug) printf ("%s: loading %lu source list entries\n"
, __func__, (unsigned long)msfr.msfr_nsrcs); } while (0)
2768 __func__, (unsigned long)msfr.msfr_nsrcs))do { if (mld_debug) printf ("%s: loading %lu source list entries\n"
, __func__, (unsigned long)msfr.msfr_nsrcs); } while (0)
;
2769 kss = _MALLOC((size_t) msfr.msfr_nsrcs * sizeof(*kss),({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __MALLOC((size_t) msfr.msfr_nsrcs * sizeof(*kss), 80, 0x0000
, &site); })
2770 M_TEMP, M_WAITOK)({ static vm_allocation_site_t site __attribute__((section("__DATA, __data"
))); __MALLOC((size_t) msfr.msfr_nsrcs * sizeof(*kss), 80, 0x0000
, &site); })
;
2771 if (kss == NULL((void *)0)) {
2772 error = ENOMEM12;
2773 goto out_imo_locked;
2774 }
2775
2776 error = copyin(tmp_ptr, kss,
2777 (size_t) msfr.msfr_nsrcs * sizeof(*kss));
2778 if (error) {
2779 FREE(kss, M_TEMP)_FREE((void *)kss, 80);
2780 goto out_imo_locked;
2781 }
2782
2783 /*
2784 * Mark all source filters as UNDEFINED at t1.
2785 * Restore new group filter mode, as im6f_leave()
2786 * will set it to INCLUDE.
2787 */
2788 im6f_leave(imf);
2789 imf->im6f_st[1] = msfr.msfr_fmode;
2790
2791 /*
2792 * Update socket layer filters at t1, lazy-allocating
2793 * new entries. This saves a bunch of memory at the
2794 * cost of one RB_FIND() per source entry; duplicate
2795 * entries in the msfr_nsrcs vector are ignored.
2796 * If we encounter an error, rollback transaction.
2797 *
2798 * XXX This too could be replaced with a set-symmetric
2799 * difference like loop to avoid walking from root
2800 * every time, as the key space is common.
2801 */
2802 for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) {
2803 psin = (struct sockaddr_in6 *)pkss;
2804 if (psin->sin6_family != AF_INET630) {
2805 error = EAFNOSUPPORT47;
2806 break;
2807 }
2808 if (psin->sin6_len != sizeof(struct sockaddr_in6)) {
2809 error = EINVAL22;
2810 break;
2811 }
2812 if (IN6_IS_ADDR_MULTICAST(&psin->sin6_addr)((&psin->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
) {
2813 error = EINVAL22;
2814 break;
2815 }
2816 /*
2817 * TODO: Validate embedded scope ID in source
2818 * list entry against passed-in ifp, if and only
2819 * if source list filter entry is iface or node local.
2820 */
2821 in6_clearscope(&psin->sin6_addr);
2822 error = im6f_get_source(imf, psin, &lims);
2823 if (error)
2824 break;
2825 lims->im6sl_st[1] = imf->im6f_st[1];
2826 }
2827 FREE(kss, M_TEMP)_FREE((void *)kss, 80);
2828 }
2829
2830 if (error)
2831 goto out_im6f_rollback;
2832
2833 /*
2834 * Begin state merge transaction at MLD layer.
2835 */
2836 IN6M_LOCK(inm)lck_mtx_lock(&(inm)->in6m_lock);
2837 MLD_PRINTF(("%s: merge inm state\n", __func__))do { if (mld_debug) printf ("%s: merge inm state\n", __func__
); } while (0)
;
2838 error = in6m_merge(inm, imf);
2839 if (error) {
2840 MLD_PRINTF(("%s: failed to merge inm state\n", __func__))do { if (mld_debug) printf ("%s: failed to merge inm state\n"
, __func__); } while (0)
;
2841 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
2842 goto out_im6f_rollback;
2843 }
2844
2845 MLD_PRINTF(("%s: doing mld downcall\n", __func__))do { if (mld_debug) printf ("%s: doing mld downcall\n", __func__
); } while (0)
;
2846 error = mld_change_state(inm, &mtp, 0);
2847 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
2848#if MLD_DEBUG1
2849 if (error)
2850 MLD_PRINTF(("%s: failed mld downcall\n", __func__))do { if (mld_debug) printf ("%s: failed mld downcall\n", __func__
); } while (0)
;
2851#endif
2852
2853out_im6f_rollback:
2854 if (error)
2855 im6f_rollback(imf);
2856 else
2857 im6f_commit(imf);
2858
2859 im6f_reap(imf);
2860
2861out_imo_locked:
2862 IM6O_UNLOCK(imo)lck_mtx_unlock(&(imo)->im6o_lock);
2863 IM6O_REMREF(imo)im6o_remref(imo); /* from in6p_findmoptions() */
2864
2865 /* schedule timer now that we've dropped the lock(s) */
2866 mld_set_timeout(&mtp);
2867
2868 return (error);
2869}
2870
2871/*
2872 * Set the IP multicast options in response to user setsockopt().
2873 *
2874 * Many of the socket options handled in this function duplicate the
2875 * functionality of socket options in the regular unicast API. However,
2876 * it is not possible to merge the duplicate code, because the idempotence
2877 * of the IPv6 multicast part of the BSD Sockets API must be preserved;
2878 * the effects of these options must be treated as separate and distinct.
2879 *
2880 */
2881int
2882ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt)
2883{
2884 struct ip6_moptions *im6o;
2885 int error;
2886
2887 error = 0;
2888
2889 /*
2890 * If socket is neither of type SOCK_RAW or SOCK_DGRAM,
2891 * or is a divert socket, reject it.
2892 */
2893 if (SOCK_PROTO(inp->inp_socket)((inp->inp_socket)->so_proto->pr_protocol) == IPPROTO_DIVERT254 ||
2894 (SOCK_TYPE(inp->inp_socket)((inp->inp_socket)->so_proto->pr_type) != SOCK_RAW3 &&
2895 SOCK_TYPE(inp->inp_socket)((inp->inp_socket)->so_proto->pr_type) != SOCK_DGRAM2))
2896 return (EOPNOTSUPP102);
2897
2898 switch (sopt->sopt_name) {
2899 case IPV6_MULTICAST_IF9:
2900 error = in6p_set_multicast_if(inp, sopt);
2901 break;
2902
2903 case IPV6_MULTICAST_HOPS10: {
2904 int hlim;
2905
2906 if (sopt->sopt_valsize != sizeof(int)) {
2907 error = EINVAL22;
2908 break;
2909 }
2910 error = sooptcopyin(sopt, &hlim, sizeof(hlim), sizeof(int));
2911 if (error)
2912 break;
2913 if (hlim < -1 || hlim > 255) {
2914 error = EINVAL22;
2915 break;
2916 } else if (hlim == -1) {
2917 hlim = ip6_defmcasthlim;
2918 }
2919 im6o = in6p_findmoptions(inp);
2920 if (im6o == NULL((void *)0)) {
2921 error = ENOMEM12;
2922 break;
2923 }
2924 IM6O_LOCK(im6o)lck_mtx_lock(&(im6o)->im6o_lock);
2925 im6o->im6o_multicast_hlim = hlim;
2926 IM6O_UNLOCK(im6o)lck_mtx_unlock(&(im6o)->im6o_lock);
2927 IM6O_REMREF(im6o)im6o_remref(im6o); /* from in6p_findmoptions() */
2928 break;
2929 }
2930
2931 case IPV6_MULTICAST_LOOP11: {
2932 u_int loop;
2933
2934 /*
2935 * Set the loopback flag for outgoing multicast packets.
2936 * Must be zero or one.
2937 */
2938 if (sopt->sopt_valsize != sizeof(u_int)) {
2939 error = EINVAL22;
2940 break;
2941 }
2942 error = sooptcopyin(sopt, &loop, sizeof(u_int), sizeof(u_int));
2943 if (error)
2944 break;
2945 if (loop > 1) {
2946 error = EINVAL22;
2947 break;
2948 }
2949 im6o = in6p_findmoptions(inp);
2950 if (im6o == NULL((void *)0)) {
2951 error = ENOMEM12;
2952 break;
2953 }
2954 IM6O_LOCK(im6o)lck_mtx_lock(&(im6o)->im6o_lock);
2955 im6o->im6o_multicast_loop = loop;
2956 IM6O_UNLOCK(im6o)lck_mtx_unlock(&(im6o)->im6o_lock);
2957 IM6O_REMREF(im6o)im6o_remref(im6o); /* from in6p_findmoptions() */
2958 break;
2959 }
2960
2961 case IPV6_JOIN_GROUP12:
2962 case MCAST_JOIN_GROUP80:
2963 case MCAST_JOIN_SOURCE_GROUP82:
2964 error = in6p_join_group(inp, sopt);
2965 break;
2966
2967 case IPV6_LEAVE_GROUP13:
2968 case MCAST_LEAVE_GROUP81:
2969 case MCAST_LEAVE_SOURCE_GROUP83:
2970 error = in6p_leave_group(inp, sopt);
2971 break;
2972
2973 case MCAST_BLOCK_SOURCE84:
2974 case MCAST_UNBLOCK_SOURCE85:
2975 error = in6p_block_unblock_source(inp, sopt);
2976 break;
2977
2978 case IPV6_MSFILTER74:
2979 error = in6p_set_source_filters(inp, sopt);
2980 break;
2981
2982 default:
2983 error = EOPNOTSUPP102;
2984 break;
2985 }
2986
2987 return (error);
2988}
2989/*
2990 * Expose MLD's multicast filter mode and source list(s) to userland,
2991 * keyed by (ifindex, group).
2992 * The filter mode is written out as a uint32_t, followed by
2993 * 0..n of struct in6_addr.
2994 * For use by ifmcstat(8).
2995 */
2996static int
2997sysctl_ip6_mcast_filters SYSCTL_HANDLER_ARGS(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req
*req)
2998{
2999#pragma unused(oidp)
3000
3001 struct in6_addr mcaddr;
3002 struct in6_addr src;
3003 struct ifnet *ifp;
3004 struct in6_multi *inm;
3005 struct in6_multistep step;
3006 struct ip6_msource *ims;
3007 int *name;
3008 int retval = 0;
3009 u_int namelen;
3010 uint32_t fmode, ifindex;
3011
3012 name = (int *)arg1;
3013 namelen = arg2;
3014
3015 if (req->newptr != USER_ADDR_NULL((user_addr_t) 0))
3016 return (EPERM1);
3017
3018 /* int: ifindex + 4 * 32 bits of IPv6 address */
3019 if (namelen != 5)
3020 return (EINVAL22);
3021
3022 ifindex = name[0];
3023 ifnet_head_lock_shared();
3024 if (ifindex <= 0 || ifindex > (u_int)if_index) {
3025 MLD_PRINTF(("%s: ifindex %u out of range\n",do { if (mld_debug) printf ("%s: ifindex %u out of range\n", __func__
, ifindex); } while (0)
3026 __func__, ifindex))do { if (mld_debug) printf ("%s: ifindex %u out of range\n", __func__
, ifindex); } while (0)
;
3027 ifnet_head_done();
3028 return (ENOENT2);
3029 }
3030
3031 memcpy(&mcaddr, &name[1], sizeof(struct in6_addr));
3032 if (!IN6_IS_ADDR_MULTICAST(&mcaddr)((&mcaddr)->__u6_addr.__u6_addr8[0] == 0xff)) {
3033 MLD_PRINTF(("%s: group %s is not multicast\n",do { if (mld_debug) printf ("%s: group %s is not multicast\n"
, __func__, ip6_sprintf(&mcaddr)); } while (0)
3034 __func__, ip6_sprintf(&mcaddr)))do { if (mld_debug) printf ("%s: group %s is not multicast\n"
, __func__, ip6_sprintf(&mcaddr)); } while (0)
;
3035 ifnet_head_done();
3036 return (EINVAL22);
3037 }
3038
3039 ifp = ifindex2ifnet[ifindex];
3040 ifnet_head_done();
3041 if (ifp == NULL((void *)0)) {
3042 MLD_PRINTF(("%s: no ifp for ifindex %u\n", __func__, ifindex))do { if (mld_debug) printf ("%s: no ifp for ifindex %u\n", __func__
, ifindex); } while (0)
;
3043 return (ENOENT2);
3044 }
3045 /*
3046 * Internal MLD lookups require that scope/zone ID is set.
3047 */
3048 (void)in6_setscope(&mcaddr, ifp, NULL((void *)0));
3049
3050 in6_multihead_lock_shared();
3051 IN6_FIRST_MULTI(step, inm)do { in6_multihead_lock_assert(0x03); (step).i_in6m = in6_multihead
.lh_first; do { in6_multihead_lock_assert(0x03); if ((((inm))
= ((step)).i_in6m) != ((void *)0)) ((step)).i_in6m = ((step)
).i_in6m->in6m_entry.le_next; } while (0); } while (0)
;
3052 while (inm != NULL((void *)0)) {
3053 IN6M_LOCK(inm)lck_mtx_lock(&(inm)->in6m_lock);
3054 if (inm->in6m_ifp != ifp)
3055 goto next;
3056
3057 if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr)(bcmp(&(&inm->in6m_addr)->__u6_addr.__u6_addr8[
0], &(&mcaddr)->__u6_addr.__u6_addr8[0], sizeof (struct
in6_addr)) == 0)
)
3058 goto next;
3059
3060 fmode = inm->in6m_st[1].iss_fmode;
3061 retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t))(req->oldfunc)(req, &fmode, sizeof(uint32_t));
3062 if (retval != 0) {
3063 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
3064 break; /* abort */
3065 }
3066 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs)for ((ims) = ip6_msource_tree_RB_MINMAX(&inm->in6m_srcs
, -1); (ims) != ((void *)0); (ims) = ip6_msource_tree_RB_NEXT
(ims))
{
3067 MLD_PRINTF(("%s: visit node 0x%llx\n", __func__,do { if (mld_debug) printf ("%s: visit node 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(ims) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(ims) + vm_kernel_addrperm)); } while (0)
3068 (uint64_t)VM_KERNEL_ADDRPERM(ims)))do { if (mld_debug) printf ("%s: visit node 0x%llx\n", __func__
, (uint64_t)(((vm_offset_t)(ims) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(ims) + vm_kernel_addrperm)); } while (0)
;
3069 /*
3070 * Only copy-out sources which are in-mode.
3071 */
3072 if (fmode != im6s_get_mode(inm, ims, 1)) {
3073 MLD_PRINTF(("%s: skip non-in-mode\n",do { if (mld_debug) printf ("%s: skip non-in-mode\n", __func__
); } while (0)
3074 __func__))do { if (mld_debug) printf ("%s: skip non-in-mode\n", __func__
); } while (0)
;
3075 continue; /* process next source */
3076 }
3077 src = ims->im6s_addr;
3078 retval = SYSCTL_OUT(req, &src, sizeof(struct in6_addr))(req->oldfunc)(req, &src, sizeof(struct in6_addr));
3079 if (retval != 0)
3080 break; /* process next inm */
3081 }
3082next:
3083 IN6M_UNLOCK(inm)lck_mtx_unlock(&(inm)->in6m_lock);
3084 IN6_NEXT_MULTI(step, inm)do { in6_multihead_lock_assert(0x03); if (((inm) = (step).i_in6m
) != ((void *)0)) (step).i_in6m = (step).i_in6m->in6m_entry
.le_next; } while (0)
;
3085 }
3086 in6_multihead_lock_done();
3087
3088 return (retval);
3089}
3090
3091void
3092in6_multi_init(void)
3093{
3094 PE_parse_boot_argn("ifa_debug", &in6m_debug, sizeof (in6m_debug));
3095
3096 /* Setup lock group and attribute for in6_multihead */
3097 in6_multihead_lock_grp_attr = lck_grp_attr_alloc_init();
3098 in6_multihead_lock_grp = lck_grp_alloc_init("in6_multihead",
3099 in6_multihead_lock_grp_attr);
3100 in6_multihead_lock_attr = lck_attr_alloc_init();
3101 lck_rw_init(&in6_multihead_lock, in6_multihead_lock_grp,
3102 in6_multihead_lock_attr);
3103
3104 lck_mtx_init(&in6m_trash_lock, in6_multihead_lock_grp,
3105 in6_multihead_lock_attr);
3106 TAILQ_INIT(&in6m_trash_head)do { (((&in6m_trash_head))->tqh_first) = ((void *)0); (
&in6m_trash_head)->tqh_last = &(((&in6m_trash_head
))->tqh_first); ; } while (0)
;
3107
3108 in6m_size = (in6m_debug == 0) ? sizeof (struct in6_multi) :
3109 sizeof (struct in6_multi_dbg);
3110 in6m_zone = zinit(in6m_size, IN6M_ZONE_MAX64 * in6m_size,
3111 0, IN6M_ZONE_NAME"in6_multi");
3112 if (in6m_zone == NULL((void *)0)) {
3113 panic("%s: failed allocating %s", __func__, IN6M_ZONE_NAME)(panic)("\"%s: failed allocating %s\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3113", __func__, "in6_multi")
;
3114 /* NOTREACHED */
3115 }
3116 zone_change(in6m_zone, Z_EXPAND3, TRUE1);
3117
3118 imm_size = sizeof (struct in6_multi_mship);
3119 imm_zone = zinit(imm_size, IMM_ZONE_MAX64 * imm_size, 0, IMM_ZONE_NAME"in6_multi_mship");
3120 if (imm_zone == NULL((void *)0)) {
3121 panic("%s: failed allocating %s", __func__, IMM_ZONE_NAME)(panic)("\"%s: failed allocating %s\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3121", __func__, "in6_multi_mship")
;
3122 /* NOTREACHED */
3123 }
3124 zone_change(imm_zone, Z_EXPAND3, TRUE1);
3125
3126 ip6ms_size = sizeof (struct ip6_msource);
3127 ip6ms_zone = zinit(ip6ms_size, IP6MS_ZONE_MAX64 * ip6ms_size,
3128 0, IP6MS_ZONE_NAME"ip6_msource");
3129 if (ip6ms_zone == NULL((void *)0)) {
3130 panic("%s: failed allocating %s", __func__, IP6MS_ZONE_NAME)(panic)("\"%s: failed allocating %s\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3130", __func__, "ip6_msource")
;
3131 /* NOTREACHED */
3132 }
3133 zone_change(ip6ms_zone, Z_EXPAND3, TRUE1);
3134
3135 in6ms_size = sizeof (struct in6_msource);
3136 in6ms_zone = zinit(in6ms_size, IN6MS_ZONE_MAX64 * in6ms_size,
3137 0, IN6MS_ZONE_NAME"in6_msource");
3138 if (in6ms_zone == NULL((void *)0)) {
3139 panic("%s: failed allocating %s", __func__, IN6MS_ZONE_NAME)(panic)("\"%s: failed allocating %s\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3139", __func__, "in6_msource")
;
3140 /* NOTREACHED */
3141 }
3142 zone_change(in6ms_zone, Z_EXPAND3, TRUE1);
3143}
3144
3145static struct in6_multi *
3146in6_multi_alloc(int how)
3147{
3148 struct in6_multi *in6m;
3149
3150 in6m = (how == M_WAITOK0x0000) ? zalloc(in6m_zone) :
3151 zalloc_noblock(in6m_zone);
3152 if (in6m != NULL((void *)0)) {
3153 bzero(in6m, in6m_size);
3154 lck_mtx_init(&in6m->in6m_lock, in6_multihead_lock_grp,
3155 in6_multihead_lock_attr);
3156 in6m->in6m_debug |= IFD_ALLOC0x2;
3157 if (in6m_debug != 0) {
3158 in6m->in6m_debug |= IFD_DEBUG0x4;
3159 in6m->in6m_trace = in6m_trace;
3160 }
3161 }
3162 return (in6m);
3163}
3164
3165static void
3166in6_multi_free(struct in6_multi *in6m)
3167{
3168 IN6M_LOCK(in6m)lck_mtx_lock(&(in6m)->in6m_lock);
3169 if (in6m->in6m_debug & IFD_ATTACHED0x1) {
3170 panic("%s: attached in6m=%p is being freed", __func__, in6m)(panic)("\"%s: attached in6m=%p is being freed\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3170", __func__, in6m)
;
3171 /* NOTREACHED */
3172 } else if (in6m->in6m_ifma != NULL((void *)0)) {
3173 panic("%s: ifma not NULL for in6m=%p", __func__, in6m)(panic)("\"%s: ifma not NULL for in6m=%p\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3173", __func__, in6m)
;
3174 /* NOTREACHED */
3175 } else if (!(in6m->in6m_debug & IFD_ALLOC0x2)) {
3176 panic("%s: in6m %p cannot be freed", __func__, in6m)(panic)("\"%s: in6m %p cannot be freed\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3176", __func__, in6m)
;
3177 /* NOTREACHED */
3178 } else if (in6m->in6m_refcount != 0) {
3179 panic("%s: non-zero refcount in6m=%p", __func__, in6m)(panic)("\"%s: non-zero refcount in6m=%p\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3179", __func__, in6m)
;
3180 /* NOTREACHED */
3181 } else if (in6m->in6m_reqcnt != 0) {
3182 panic("%s: non-zero reqcnt in6m=%p", __func__, in6m)(panic)("\"%s: non-zero reqcnt in6m=%p\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3182", __func__, in6m)
;
3183 /* NOTREACHED */
3184 }
3185
3186 /* Free any pending MLDv2 state-change records */
3187 IF_DRAIN(&in6m->in6m_scq)do { struct mbuf *_m; for (;;) { do { (_m) = (&in6m->in6m_scq
)->ifq_head; if (_m != ((void *)0)) { if (((&in6m->
in6m_scq)->ifq_head = (_m)->m_hdr.mh_nextpkt) == ((void
*)0)) (&in6m->in6m_scq)->ifq_tail = ((void *)0); (
_m)->m_hdr.mh_nextpkt = ((void *)0); (&in6m->in6m_scq
)->ifq_len--; } } while (0); if (_m == ((void *)0)) break;
m_freem(_m); } } while (0)
;
3188
3189 in6m->in6m_debug &= ~IFD_ALLOC0x2;
3190 if ((in6m->in6m_debug & (IFD_DEBUG0x4 | IFD_TRASHED0x10)) ==
3191 (IFD_DEBUG0x4 | IFD_TRASHED0x10)) {
3192 lck_mtx_lock(&in6m_trash_lock);
3193 TAILQ_REMOVE(&in6m_trash_head, (struct in6_multi_dbg *)in6m,do { if ((((((struct in6_multi_dbg *)in6m))->in6m_trash_link
.tqe_next)) != ((void *)0)) ((((struct in6_multi_dbg *)in6m))
->in6m_trash_link.tqe_next)->in6m_trash_link.tqe_prev =
((struct in6_multi_dbg *)in6m)->in6m_trash_link.tqe_prev;
else { (&in6m_trash_head)->tqh_last = ((struct in6_multi_dbg
*)in6m)->in6m_trash_link.tqe_prev; ; } *((struct in6_multi_dbg
*)in6m)->in6m_trash_link.tqe_prev = ((((struct in6_multi_dbg
*)in6m))->in6m_trash_link.tqe_next); ; ; ; } while (0)
3194 in6m_trash_link)do { if ((((((struct in6_multi_dbg *)in6m))->in6m_trash_link
.tqe_next)) != ((void *)0)) ((((struct in6_multi_dbg *)in6m))
->in6m_trash_link.tqe_next)->in6m_trash_link.tqe_prev =
((struct in6_multi_dbg *)in6m)->in6m_trash_link.tqe_prev;
else { (&in6m_trash_head)->tqh_last = ((struct in6_multi_dbg
*)in6m)->in6m_trash_link.tqe_prev; ; } *((struct in6_multi_dbg
*)in6m)->in6m_trash_link.tqe_prev = ((((struct in6_multi_dbg
*)in6m))->in6m_trash_link.tqe_next); ; ; ; } while (0)
;
3195 lck_mtx_unlock(&in6m_trash_lock);
3196 in6m->in6m_debug &= ~IFD_TRASHED0x10;
3197 }
3198 IN6M_UNLOCK(in6m)lck_mtx_unlock(&(in6m)->in6m_lock);
3199
3200 lck_mtx_destroy(&in6m->in6m_lock, in6_multihead_lock_grp);
3201 zfree(in6m_zone, in6m);
3202}
3203
3204static void
3205in6_multi_attach(struct in6_multi *in6m)
3206{
3207 in6_multihead_lock_assert(LCK_RW_ASSERT_EXCLUSIVE0x02);
3208 IN6M_LOCK_ASSERT_HELD(in6m)lck_mtx_assert(&(in6m)->in6m_lock, 1);
3209
3210 if (in6m->in6m_debug & IFD_ATTACHED0x1) {
3211 panic("%s: Attempt to attach an already attached in6m=%p",(panic)("\"%s: Attempt to attach an already attached in6m=%p\""
"@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3212", __func__, in6m)
3212 __func__, in6m)(panic)("\"%s: Attempt to attach an already attached in6m=%p\""
"@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3212", __func__, in6m)
;
3213 /* NOTREACHED */
3214 } else if (in6m->in6m_debug & IFD_TRASHED0x10) {
3215 panic("%s: Attempt to reattach a detached in6m=%p",(panic)("\"%s: Attempt to reattach a detached in6m=%p\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3216", __func__, in6m)
3216 __func__, in6m)(panic)("\"%s: Attempt to reattach a detached in6m=%p\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3216", __func__, in6m)
;
3217 /* NOTREACHED */
3218 }
3219
3220 in6m->in6m_reqcnt++;
3221 VERIFY(in6m->in6m_reqcnt == 1)((void)(__builtin_expect(!!((long)((in6m->in6m_reqcnt == 1
))), 1L) || assfail("in6m->in6m_reqcnt == 1", "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
, 3221)))
;
3222 IN6M_ADDREF_LOCKED(in6m)in6m_addref(in6m, 1);
3223 in6m->in6m_debug |= IFD_ATTACHED0x1;
3224 /*
3225 * Reattach case: If debugging is enabled, take it
3226 * out of the trash list and clear IFD_TRASHED.
3227 */
3228 if ((in6m->in6m_debug & (IFD_DEBUG0x4 | IFD_TRASHED0x10)) ==
3229 (IFD_DEBUG0x4 | IFD_TRASHED0x10)) {
3230 /* Become a regular mutex, just in case */
3231 IN6M_CONVERT_LOCK(in6m)do { lck_mtx_assert(&(in6m)->in6m_lock, 1); lck_mtx_convert_spin
(&(in6m)->in6m_lock); } while (0)
;
3232 lck_mtx_lock(&in6m_trash_lock);
3233 TAILQ_REMOVE(&in6m_trash_head, (struct in6_multi_dbg *)in6m,do { if ((((((struct in6_multi_dbg *)in6m))->in6m_trash_link
.tqe_next)) != ((void *)0)) ((((struct in6_multi_dbg *)in6m))
->in6m_trash_link.tqe_next)->in6m_trash_link.tqe_prev =
((struct in6_multi_dbg *)in6m)->in6m_trash_link.tqe_prev;
else { (&in6m_trash_head)->tqh_last = ((struct in6_multi_dbg
*)in6m)->in6m_trash_link.tqe_prev; ; } *((struct in6_multi_dbg
*)in6m)->in6m_trash_link.tqe_prev = ((((struct in6_multi_dbg
*)in6m))->in6m_trash_link.tqe_next); ; ; ; } while (0)
3234 in6m_trash_link)do { if ((((((struct in6_multi_dbg *)in6m))->in6m_trash_link
.tqe_next)) != ((void *)0)) ((((struct in6_multi_dbg *)in6m))
->in6m_trash_link.tqe_next)->in6m_trash_link.tqe_prev =
((struct in6_multi_dbg *)in6m)->in6m_trash_link.tqe_prev;
else { (&in6m_trash_head)->tqh_last = ((struct in6_multi_dbg
*)in6m)->in6m_trash_link.tqe_prev; ; } *((struct in6_multi_dbg
*)in6m)->in6m_trash_link.tqe_prev = ((((struct in6_multi_dbg
*)in6m))->in6m_trash_link.tqe_next); ; ; ; } while (0)
;
3235 lck_mtx_unlock(&in6m_trash_lock);
3236 in6m->in6m_debug &= ~IFD_TRASHED0x10;
3237 }
3238
3239 LIST_INSERT_HEAD(&in6_multihead, in6m, in6m_entry)do { ; if (((((in6m))->in6m_entry.le_next) = (((&in6_multihead
))->lh_first)) != ((void *)0)) (((&in6_multihead))->
lh_first)->in6m_entry.le_prev = &(((in6m))->in6m_entry
.le_next); (((&in6_multihead))->lh_first) = (in6m); (in6m
)->in6m_entry.le_prev = &(((&in6_multihead))->lh_first
); } while (0)
;
3240}
3241
3242int
3243in6_multi_detach(struct in6_multi *in6m)
3244{
3245 in6_multihead_lock_assert(LCK_RW_ASSERT_EXCLUSIVE0x02);
3246 IN6M_LOCK_ASSERT_HELD(in6m)lck_mtx_assert(&(in6m)->in6m_lock, 1);
3247
3248 if (in6m->in6m_reqcnt == 0) {
3249 panic("%s: in6m=%p negative reqcnt", __func__, in6m)(panic)("\"%s: in6m=%p negative reqcnt\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3249", __func__, in6m)
;
3250 /* NOTREACHED */
3251 }
3252
3253 --in6m->in6m_reqcnt;
3254 if (in6m->in6m_reqcnt > 0)
3255 return (0);
3256
3257 if (!(in6m->in6m_debug & IFD_ATTACHED0x1)) {
3258 panic("%s: Attempt to detach an unattached record in6m=%p",(panic)("\"%s: Attempt to detach an unattached record in6m=%p\""
"@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3259", __func__, in6m)
3259 __func__, in6m)(panic)("\"%s: Attempt to detach an unattached record in6m=%p\""
"@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3259", __func__, in6m)
;
3260 /* NOTREACHED */
3261 } else if (in6m->in6m_debug & IFD_TRASHED0x10) {
3262 panic("%s: in6m %p is already in trash list", __func__, in6m)(panic)("\"%s: in6m %p is already in trash list\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3262", __func__, in6m)
;
3263 /* NOTREACHED */
3264 }
3265
3266 /*
3267 * NOTE: Caller calls IFMA_REMREF
3268 */
3269 in6m->in6m_debug &= ~IFD_ATTACHED0x1;
3270 LIST_REMOVE(in6m, in6m_entry)do { ; ; if ((((in6m))->in6m_entry.le_next) != ((void *)0)
) (((in6m))->in6m_entry.le_next)->in6m_entry.le_prev = (
in6m)->in6m_entry.le_prev; *(in6m)->in6m_entry.le_prev =
(((in6m))->in6m_entry.le_next); ; ; } while (0)
;
3271
3272 if (in6m->in6m_debug & IFD_DEBUG0x4) {
3273 /* Become a regular mutex, just in case */
3274 IN6M_CONVERT_LOCK(in6m)do { lck_mtx_assert(&(in6m)->in6m_lock, 1); lck_mtx_convert_spin
(&(in6m)->in6m_lock); } while (0)
;
3275 lck_mtx_lock(&in6m_trash_lock);
3276 TAILQ_INSERT_TAIL(&in6m_trash_head,do { ((((struct in6_multi_dbg *)in6m))->in6m_trash_link.tqe_next
) = ((void *)0); ((struct in6_multi_dbg *)in6m)->in6m_trash_link
.tqe_prev = (&in6m_trash_head)->tqh_last; *(&in6m_trash_head
)->tqh_last = ((struct in6_multi_dbg *)in6m); (&in6m_trash_head
)->tqh_last = &((((struct in6_multi_dbg *)in6m))->in6m_trash_link
.tqe_next); ; ; } while (0)
3277 (struct in6_multi_dbg *)in6m, in6m_trash_link)do { ((((struct in6_multi_dbg *)in6m))->in6m_trash_link.tqe_next
) = ((void *)0); ((struct in6_multi_dbg *)in6m)->in6m_trash_link
.tqe_prev = (&in6m_trash_head)->tqh_last; *(&in6m_trash_head
)->tqh_last = ((struct in6_multi_dbg *)in6m); (&in6m_trash_head
)->tqh_last = &((((struct in6_multi_dbg *)in6m))->in6m_trash_link
.tqe_next); ; ; } while (0)
;
3278 lck_mtx_unlock(&in6m_trash_lock);
3279 in6m->in6m_debug |= IFD_TRASHED0x10;
3280 }
3281
3282 return (1);
3283}
3284
3285void
3286in6m_addref(struct in6_multi *in6m, int locked)
3287{
3288 if (!locked)
3289 IN6M_LOCK_SPIN(in6m)lck_mtx_lock_spin(&(in6m)->in6m_lock);
3290 else
3291 IN6M_LOCK_ASSERT_HELD(in6m)lck_mtx_assert(&(in6m)->in6m_lock, 1);
3292
3293 if (++in6m->in6m_refcount == 0) {
3294 panic("%s: in6m=%p wraparound refcnt", __func__, in6m)(panic)("\"%s: in6m=%p wraparound refcnt\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3294", __func__, in6m)
;
3295 /* NOTREACHED */
3296 } else if (in6m->in6m_trace != NULL((void *)0)) {
3297 (*in6m->in6m_trace)(in6m, TRUE1);
3298 }
3299 if (!locked)
3300 IN6M_UNLOCK(in6m)lck_mtx_unlock(&(in6m)->in6m_lock);
3301}
3302
3303void
3304in6m_remref(struct in6_multi *in6m, int locked)
3305{
3306 struct ifmultiaddr *ifma;
3307 struct mld_ifinfo *mli;
3308
3309 if (!locked)
3310 IN6M_LOCK_SPIN(in6m)lck_mtx_lock_spin(&(in6m)->in6m_lock);
3311 else
3312 IN6M_LOCK_ASSERT_HELD(in6m)lck_mtx_assert(&(in6m)->in6m_lock, 1);
3313
3314 if (in6m->in6m_refcount == 0 || (in6m->in6m_refcount == 1 && locked)) {
3315 panic("%s: in6m=%p negative refcnt", __func__, in6m)(panic)("\"%s: in6m=%p negative refcnt\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3315", __func__, in6m)
;
3316 /* NOTREACHED */
3317 } else if (in6m->in6m_trace != NULL((void *)0)) {
3318 (*in6m->in6m_trace)(in6m, FALSE0);
3319 }
3320
3321 --in6m->in6m_refcount;
3322 if (in6m->in6m_refcount > 0) {
3323 if (!locked)
3324 IN6M_UNLOCK(in6m)lck_mtx_unlock(&(in6m)->in6m_lock);
3325 return;
3326 }
3327
3328 /*
3329 * Synchronization with in6_mc_get(). In the event the in6m has been
3330 * detached, the underlying ifma would still be in the if_multiaddrs
3331 * list, and thus can be looked up via if_addmulti(). At that point,
3332 * the only way to find this in6m is via ifma_protospec. To avoid
3333 * race conditions between the last in6m_remref() of that in6m and its
3334 * use via ifma_protospec, in6_multihead lock is used for serialization.
3335 * In order to avoid violating the lock order, we must drop in6m_lock
3336 * before acquiring in6_multihead lock. To prevent the in6m from being
3337 * freed prematurely, we hold an extra reference.
3338 */
3339 ++in6m->in6m_refcount;
3340 IN6M_UNLOCK(in6m)lck_mtx_unlock(&(in6m)->in6m_lock);
3341 in6_multihead_lock_shared();
3342 IN6M_LOCK_SPIN(in6m)lck_mtx_lock_spin(&(in6m)->in6m_lock);
3343 --in6m->in6m_refcount;
3344 if (in6m->in6m_refcount > 0) {
3345 /* We've lost the race, so abort since in6m is still in use */
3346 IN6M_UNLOCK(in6m)lck_mtx_unlock(&(in6m)->in6m_lock);
3347 in6_multihead_lock_done();
3348 /* If it was locked, return it as such */
3349 if (locked)
3350 IN6M_LOCK(in6m)lck_mtx_lock(&(in6m)->in6m_lock);
3351 return;
3352 }
3353 in6m_purge(in6m);
3354 ifma = in6m->in6m_ifma;
3355 in6m->in6m_ifma = NULL((void *)0);
3356 in6m->in6m_ifp = NULL((void *)0);
3357 mli = in6m->in6m_mli;
3358 in6m->in6m_mli = NULL((void *)0);
3359 IN6M_UNLOCK(in6m)lck_mtx_unlock(&(in6m)->in6m_lock);
3360 IFMA_LOCK_SPIN(ifma)lck_mtx_lock_spin(&(ifma)->ifma_lock);
3361 ifma->ifma_protospec = NULL((void *)0);
3362 IFMA_UNLOCK(ifma)lck_mtx_unlock(&(ifma)->ifma_lock);
3363 in6_multihead_lock_done();
3364
3365 in6_multi_free(in6m);
3366 if_delmulti_ifma(ifma);
3367 /* Release reference held to the underlying ifmultiaddr */
3368 IFMA_REMREF(ifma)ifma_remref(ifma);
3369
3370 if (mli != NULL((void *)0))
3371 MLI_REMREF(mli)mli_remref(mli);
3372}
3373
3374static void
3375in6m_trace(struct in6_multi *in6m, int refhold)
3376{
3377 struct in6_multi_dbg *in6m_dbg = (struct in6_multi_dbg *)in6m;
3378 ctrace_t *tr;
3379 u_int32_t idx;
3380 u_int16_t *cnt;
3381
3382 if (!(in6m->in6m_debug & IFD_DEBUG0x4)) {
3383 panic("%s: in6m %p has no debug structure", __func__, in6m)(panic)("\"%s: in6m %p has no debug structure\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3383", __func__, in6m)
;
3384 /* NOTREACHED */
3385 }
3386 if (refhold) {
3387 cnt = &in6m_dbg->in6m_refhold_cnt;
3388 tr = in6m_dbg->in6m_refhold;
3389 } else {
3390 cnt = &in6m_dbg->in6m_refrele_cnt;
3391 tr = in6m_dbg->in6m_refrele;
3392 }
3393
3394 idx = atomic_add_16_ov(cnt, 1)((u_int16_t) OSAddAtomic16(1, (volatile SInt16 *)cnt)) % IN6M_TRACE_HIST_SIZE32;
3395 ctrace_record(&tr[idx]);
3396}
3397
3398static struct in6_multi_mship *
3399in6_multi_mship_alloc(int how)
3400{
3401 struct in6_multi_mship *imm;
3402
3403 imm = (how == M_WAITOK0x0000) ? zalloc(imm_zone) : zalloc_noblock(imm_zone);
3404 if (imm != NULL((void *)0))
3405 bzero(imm, imm_size);
3406
3407 return (imm);
3408}
3409
3410static void
3411in6_multi_mship_free(struct in6_multi_mship *imm)
3412{
3413 if (imm->i6mm_maddr != NULL((void *)0)) {
3414 panic("%s: i6mm_maddr not NULL for imm=%p", __func__, imm)(panic)("\"%s: i6mm_maddr not NULL for imm=%p\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/netinet6/in6_mcast.c"
":" "3414", __func__, imm)
;
3415 /* NOTREACHED */
3416 }
3417 zfree(imm_zone, imm);
3418}
3419
3420void
3421in6_multihead_lock_exclusive(void)
3422{
3423 lck_rw_lock_exclusive(&in6_multihead_lock);
3424}
3425
3426void
3427in6_multihead_lock_shared(void)
3428{
3429 lck_rw_lock_shared(&in6_multihead_lock);
3430}
3431
3432void
3433in6_multihead_lock_assert(int what)
3434{
3435 lck_rw_assert(&in6_multihead_lock, what);
3436}
3437
3438void
3439in6_multihead_lock_done(void)
3440{
3441 lck_rw_done(&in6_multihead_lock);
3442}
3443
3444static struct ip6_msource *
3445ip6ms_alloc(int how)
3446{
3447 struct ip6_msource *i6ms;
3448
3449 i6ms = (how == M_WAITOK0x0000) ? zalloc(ip6ms_zone) :
3450 zalloc_noblock(ip6ms_zone);
3451 if (i6ms != NULL((void *)0))
3452 bzero(i6ms, ip6ms_size);
3453
3454 return (i6ms);
3455}
3456
3457static void
3458ip6ms_free(struct ip6_msource *i6ms)
3459{
3460 zfree(ip6ms_zone, i6ms);
3461}
3462
3463static struct in6_msource *
3464in6ms_alloc(int how)
3465{
3466 struct in6_msource *in6ms;
3467
3468 in6ms = (how == M_WAITOK0x0000) ? zalloc(in6ms_zone) :
3469 zalloc_noblock(in6ms_zone);
3470 if (in6ms != NULL((void *)0))
3471 bzero(in6ms, in6ms_size);
3472
3473 return (in6ms);
3474}
3475
3476static void
3477in6ms_free(struct in6_msource *in6ms)
3478{
3479 zfree(in6ms_zone, in6ms);
3480}
3481
3482#ifdef MLD_DEBUG1
3483
3484static const char *in6m_modestrs[] = { "un\n", "in", "ex" };
3485
3486static const char *
3487in6m_mode_str(const int mode)
3488{
3489 if (mode >= MCAST_UNDEFINED0 && mode <= MCAST_EXCLUDE2)
3490 return (in6m_modestrs[mode]);
3491 return ("??");
3492}
3493
3494static const char *in6m_statestrs[] = {
3495 "not-member\n",
3496 "silent\n",
3497 "reporting\n",
3498 "idle\n",
3499 "lazy\n",
3500 "sleeping\n",
3501 "awakening\n",
3502 "query-pending\n",
3503 "sg-query-pending\n",
3504 "leaving"
3505};
3506
3507static const char *
3508in6m_state_str(const int state)
3509{
3510 if (state >= MLD_NOT_MEMBER0 && state <= MLD_LEAVING_MEMBER9)
3511 return (in6m_statestrs[state]);
3512 return ("??");
3513}
3514
3515/*
3516 * Dump an in6_multi structure to the console.
3517 */
3518void
3519in6m_print(const struct in6_multi *inm)
3520{
3521 int t;
3522
3523 IN6M_LOCK_ASSERT_HELD(__DECONST(struct in6_multi *, inm))lck_mtx_assert(&((struct in6_multi *) (long)(inm))->in6m_lock
, 1)
;
3524
3525 if (mld_debug == 0)
3526 return;
3527
3528 printf("%s: --- begin in6m 0x%llx ---\n", __func__,
3529 (uint64_t)VM_KERNEL_ADDRPERM(inm)(((vm_offset_t)(inm) == 0) ? (vm_offset_t)(0) : (vm_offset_t)
(inm) + vm_kernel_addrperm)
);
3530 printf("addr %s ifp 0x%llx(%s) ifma 0x%llx\n",
3531 ip6_sprintf(&inm->in6m_addr),
3532 (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp)(((vm_offset_t)(inm->in6m_ifp) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(inm->in6m_ifp) + vm_kernel_addrperm)
,
3533 if_name(inm->in6m_ifp)inm->in6m_ifp->if_xname,
3534 (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifma)(((vm_offset_t)(inm->in6m_ifma) == 0) ? (vm_offset_t)(0) :
(vm_offset_t)(inm->in6m_ifma) + vm_kernel_addrperm)
);
3535 printf("timer %u state %s refcount %u scq.len %u\n",
3536 inm->in6m_timer,
3537 in6m_state_str(inm->in6m_state),
3538 inm->in6m_refcount,
3539 inm->in6m_scq.ifq_len);
3540 printf("mli 0x%llx nsrc %lu sctimer %u scrv %u\n",
3541 (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_mli)(((vm_offset_t)(inm->in6m_mli) == 0) ? (vm_offset_t)(0) : (
vm_offset_t)(inm->in6m_mli) + vm_kernel_addrperm)
,
3542 inm->in6m_nsrc,
3543 inm->in6m_sctimer,
3544 inm->in6m_scrv);
3545 for (t = 0; t < 2; t++) {
3546 printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t,
3547 in6m_mode_str(inm->in6m_st[t].iss_fmode),
3548 inm->in6m_st[t].iss_asm,
3549 inm->in6m_st[t].iss_ex,
3550 inm->in6m_st[t].iss_in,
3551 inm->in6m_st[t].iss_rec);
3552 }
3553 printf("%s: --- end in6m 0x%llx ---\n", __func__,
3554 (uint64_t)VM_KERNEL_ADDRPERM(inm)(((vm_offset_t)(inm) == 0) ? (vm_offset_t)(0) : (vm_offset_t)
(inm) + vm_kernel_addrperm)
);
3555}
3556
3557#else
3558
3559void
3560in6m_print(__unused__attribute__((unused)) const struct in6_multi *inm)
3561{
3562
3563}
3564
3565#endif