Bug Summary

File:bsd/net/pf_table.c
Warning:line 1110, column 8
Copies out a struct with uncleared padding (>= 4 bytes)

Annotated Source Code

1/*
2 * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $apfw: pf_table.c,v 1.4 2008/08/27 00:01:32 jhw Exp $ */
30/* $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $ */
31
32/*
33 * Copyright (c) 2002 Cedric Berger
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * - Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * - Redistributions in binary form must reproduce the above
43 * copyright notice, this list of conditions and the following
44 * disclaimer in the documentation and/or other materials provided
45 * with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
50 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
51 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
53 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
54 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
55 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
57 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
58 * POSSIBILITY OF SUCH DAMAGE.
59 *
60 */
61
62#include <sys/param.h>
63#include <sys/systm.h>
64#include <sys/socket.h>
65#include <sys/mbuf.h>
66#include <sys/kernel.h>
67#include <sys/malloc.h>
68
69#include <net/if.h>
70#include <net/route.h>
71#include <netinet/in.h>
72#include <net/radix.h>
73#include <net/pfvar.h>
74
75#define ACCEPT_FLAGS(flags, oklist)do { if ((flags & ~(oklist)) & 0x0000007F) return (22
); } while (0)
\
76 do { \
77 if ((flags & ~(oklist)) & \
78 PFR_FLAG_ALLMASK0x0000007F) \
79 return (EINVAL22); \
80 } while (0)
81
82#define COPYIN(from, to, size, flags)((flags & 0x10000000) ? copyin((from), (to), (size)) : (bcopy
((void *)(uintptr_t)(from), (to), (size)), 0))
\
83 ((flags & PFR_FLAG_USERIOCTL0x10000000) ? \
84 copyin((from), (to), (size)) : \
85 (bcopy((void *)(uintptr_t)(from), (to), (size)), 0))
86
87#define COPYOUT(from, to, size, flags)((flags & 0x10000000) ? copyout((from), (to), (size)) : (
bcopy((from), (void *)(uintptr_t)(to), (size)), 0))
\
88 ((flags & PFR_FLAG_USERIOCTL0x10000000) ? \
89 copyout((from), (to), (size)) : \
90 (bcopy((from), (void *)(uintptr_t)(to), (size)), 0))
91
92#define FILLIN_SIN(sin, addr)do { (sin).sin_len = sizeof (sin); (sin).sin_family = 2; (sin
).sin_addr = (addr); } while (0)
\
93 do { \
94 (sin).sin_len = sizeof (sin); \
95 (sin).sin_family = AF_INET2; \
96 (sin).sin_addr = (addr); \
97 } while (0)
98
99#define FILLIN_SIN6(sin6, addr)do { (sin6).sin6_len = sizeof (sin6); (sin6).sin6_family = 30
; (sin6).sin6_addr = (addr); } while (0)
\
100 do { \
101 (sin6).sin6_len = sizeof (sin6); \
102 (sin6).sin6_family = AF_INET630; \
103 (sin6).sin6_addr = (addr); \
104 } while (0)
105
106#define SWAP(type, a1, a2)do { type tmp = a1; a1 = a2; a2 = tmp; } while (0) \
107 do { \
108 type tmp = a1; \
109 a1 = a2; \
110 a2 = tmp; \
111 } while (0)
112
113#define SUNION2PF(su, af)(((af) == 2) ? (struct pf_addr *)&(su)->sin.sin_addr :
(struct pf_addr *)&(su)->sin6.sin6_addr)
(((af) == AF_INET2) ? \
114 (struct pf_addr *)&(su)->sin.sin_addr : \
115 (struct pf_addr *)&(su)->sin6.sin6_addr)
116
117#define AF_BITS(af)(((af) == 2) ? 32 : 128) (((af) == AF_INET2) ? 32 : 128)
118#define ADDR_NETWORK(ad)((ad)->pfra_net < ((((ad)->pfra_af) == 2) ? 32 : 128
))
((ad)->pfra_net < AF_BITS((ad)->pfra_af)((((ad)->pfra_af) == 2) ? 32 : 128))
119#define KENTRY_NETWORK(ke)((ke)->pfrke_net < ((((ke)->pfrke_af) == 2) ? 32 : 128
))
((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)((((ke)->pfrke_af) == 2) ? 32 : 128))
120#define KENTRY_RNF_ROOT(ke)((((struct radix_node *)(ke))->rn_flags & 2) != 0) \
121 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT2) != 0)
122
123#define NO_ADDRESSES(-1) (-1)
124#define ENQUEUE_UNMARKED_ONLY(1) (1)
125#define INVERT_NEG_FLAG(1) (1)
126
127struct pfr_walktree {
128 enum pfrw_op {
129 PFRW_MARK,
130 PFRW_SWEEP,
131 PFRW_ENQUEUE,
132 PFRW_GET_ADDRS,
133 PFRW_GET_ASTATS,
134 PFRW_POOL_GET,
135 PFRW_DYNADDR_UPDATE
136 } pfrw_op;
137 union {
138 user_addr_t pfrw1_addr;
139 user_addr_t pfrw1_astats;
140 struct pfr_kentryworkq *pfrw1_workq;
141 struct pfr_kentry *pfrw1_kentry;
142 struct pfi_dynaddr *pfrw1_dyn;
143 } pfrw_1;
144 int pfrw_free;
145 int pfrw_flags;
146};
147#define pfrw_addrpfrw_1.pfrw1_addr pfrw_1.pfrw1_addr
148#define pfrw_astatspfrw_1.pfrw1_astats pfrw_1.pfrw1_astats
149#define pfrw_workqpfrw_1.pfrw1_workq pfrw_1.pfrw1_workq
150#define pfrw_kentrypfrw_1.pfrw1_kentry pfrw_1.pfrw1_kentry
151#define pfrw_dynpfrw_1.pfrw1_dyn pfrw_1.pfrw1_dyn
152#define pfrw_cntpfrw_free pfrw_free
153
154#define senderr(e)do { rv = (e); goto _bad; } while (0) do { rv = (e); goto _bad; } while (0)
155
156struct pool pfr_ktable_pl;
157struct pool pfr_kentry_pl;
158
159static struct pool pfr_kentry_pl2;
160static struct sockaddr_in pfr_sin;
161static struct sockaddr_in6 pfr_sin6;
162static union sockaddr_union pfr_mask;
163static struct pf_addr pfr_ffaddr;
164
165static void pfr_copyout_addr(struct pfr_addr *, struct pfr_kentry *ke);
166static int pfr_validate_addr(struct pfr_addr *);
167static void pfr_enqueue_addrs(struct pfr_ktable *, struct pfr_kentryworkq *,
168 int *, int);
169static void pfr_mark_addrs(struct pfr_ktable *);
170static struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
171 struct pfr_addr *, int);
172static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
173static void pfr_destroy_kentries(struct pfr_kentryworkq *);
174static void pfr_destroy_kentry(struct pfr_kentry *);
175static void pfr_insert_kentries(struct pfr_ktable *,
176 struct pfr_kentryworkq *, u_int64_t);
177static void pfr_remove_kentries(struct pfr_ktable *, struct pfr_kentryworkq *);
178static void pfr_clstats_kentries(struct pfr_kentryworkq *, u_int64_t, int);
179static void pfr_reset_feedback(user_addr_t, int, int);
180static void pfr_prepare_network(union sockaddr_union *, int, int);
181static int pfr_route_kentry(struct pfr_ktable *, struct pfr_kentry *);
182static int pfr_unroute_kentry(struct pfr_ktable *, struct pfr_kentry *);
183static int pfr_walktree(struct radix_node *, void *);
184static int pfr_validate_table(struct pfr_table *, int, int);
185static int pfr_fix_anchor(char *);
186static void pfr_commit_ktable(struct pfr_ktable *, u_int64_t);
187static void pfr_insert_ktables(struct pfr_ktableworkq *);
188static void pfr_insert_ktable(struct pfr_ktable *);
189static void pfr_setflags_ktables(struct pfr_ktableworkq *);
190static void pfr_setflags_ktable(struct pfr_ktable *, int);
191static void pfr_clstats_ktables(struct pfr_ktableworkq *, u_int64_t, int);
192static void pfr_clstats_ktable(struct pfr_ktable *, u_int64_t, int);
193static struct pfr_ktable *pfr_create_ktable(struct pfr_table *, u_int64_t, int);
194static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
195static void pfr_destroy_ktable(struct pfr_ktable *, int);
196static int pfr_ktable_compare(struct pfr_ktable *, struct pfr_ktable *);
197static struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
198static void pfr_clean_node_mask(struct pfr_ktable *, struct pfr_kentryworkq *);
199static int pfr_table_count(struct pfr_table *, int);
200static int pfr_skip_table(struct pfr_table *, struct pfr_ktable *, int);
201static struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
202
203RB_PROTOTYPE_SC(static, pfr_ktablehead, pfr_ktable, pfrkt_tree,static void pfr_ktablehead_RB_INSERT_COLOR(struct pfr_ktablehead
*, struct pfr_ktable *); static void pfr_ktablehead_RB_REMOVE_COLOR
(struct pfr_ktablehead *, struct pfr_ktable *, struct pfr_ktable
*); static struct pfr_ktable * pfr_ktablehead_RB_REMOVE(struct
pfr_ktablehead *, struct pfr_ktable *); static struct pfr_ktable
* pfr_ktablehead_RB_INSERT(struct pfr_ktablehead *, struct pfr_ktable
*); static struct pfr_ktable * pfr_ktablehead_RB_FIND(struct
pfr_ktablehead *, struct pfr_ktable *); static struct pfr_ktable
* pfr_ktablehead_RB_NEXT(struct pfr_ktable *); static struct
pfr_ktable * pfr_ktablehead_RB_MINMAX(struct pfr_ktablehead *
, int); static struct pfr_ktable * pfr_ktablehead_RB_GETPARENT
(struct pfr_ktable*); static struct pfr_ktable * pfr_ktablehead_RB_SETPARENT
(struct pfr_ktable*, struct pfr_ktable*); static int pfr_ktablehead_RB_GETCOLOR
(struct pfr_ktable*); static void pfr_ktablehead_RB_SETCOLOR(
struct pfr_ktable*,int);
204 pfr_ktable_compare)static void pfr_ktablehead_RB_INSERT_COLOR(struct pfr_ktablehead
*, struct pfr_ktable *); static void pfr_ktablehead_RB_REMOVE_COLOR
(struct pfr_ktablehead *, struct pfr_ktable *, struct pfr_ktable
*); static struct pfr_ktable * pfr_ktablehead_RB_REMOVE(struct
pfr_ktablehead *, struct pfr_ktable *); static struct pfr_ktable
* pfr_ktablehead_RB_INSERT(struct pfr_ktablehead *, struct pfr_ktable
*); static struct pfr_ktable * pfr_ktablehead_RB_FIND(struct
pfr_ktablehead *, struct pfr_ktable *); static struct pfr_ktable
* pfr_ktablehead_RB_NEXT(struct pfr_ktable *); static struct
pfr_ktable * pfr_ktablehead_RB_MINMAX(struct pfr_ktablehead *
, int); static struct pfr_ktable * pfr_ktablehead_RB_GETPARENT
(struct pfr_ktable*); static struct pfr_ktable * pfr_ktablehead_RB_SETPARENT
(struct pfr_ktable*, struct pfr_ktable*); static int pfr_ktablehead_RB_GETCOLOR
(struct pfr_ktable*); static void pfr_ktablehead_RB_SETCOLOR(
struct pfr_ktable*,int);
;
205RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare)struct pfr_ktable *pfr_ktablehead_RB_GETPARENT(struct pfr_ktable
*elm) { struct pfr_ktable *parent = (elm)->pfrkt_tree.rbe_parent
; if( parent != ((void *)0)) { parent = (struct pfr_ktable*)(
(uintptr_t)parent & ~(uintptr_t)0x1); return( (struct pfr_ktable
*) ( (parent == (struct pfr_ktable*) ((void *)0)) ? ((void *)
0): parent)); } return((struct pfr_ktable*)((void *)0)); } int
pfr_ktablehead_RB_GETCOLOR(struct pfr_ktable *elm) { int color
= 0; color = (int)((uintptr_t)(elm)->pfrkt_tree.rbe_parent
& (uintptr_t)0x1); return(color); } void pfr_ktablehead_RB_SETCOLOR
(struct pfr_ktable *elm,int color) { struct pfr_ktable *parent
= pfr_ktablehead_RB_GETPARENT(elm); if(parent == (struct pfr_ktable
*)((void *)0)) parent = (struct pfr_ktable*) ((void *)0); (elm
)->pfrkt_tree.rbe_parent = (struct pfr_ktable*)((uintptr_t
)parent | (unsigned int)color);} struct pfr_ktable *pfr_ktablehead_RB_SETPARENT
(struct pfr_ktable *elm, struct pfr_ktable *parent) { int color
= pfr_ktablehead_RB_GETCOLOR(elm); (elm)->pfrkt_tree.rbe_parent
= parent; if(color) pfr_ktablehead_RB_SETCOLOR(elm, color); return
(pfr_ktablehead_RB_GETPARENT(elm)); } void pfr_ktablehead_RB_INSERT_COLOR
(struct pfr_ktablehead *head, struct pfr_ktable *elm) { struct
pfr_ktable *parent, *gparent, *tmp; while ((parent = pfr_ktablehead_RB_GETPARENT
(elm)) != ((void *)0) && pfr_ktablehead_RB_GETCOLOR(parent
) == 1) { gparent = pfr_ktablehead_RB_GETPARENT(parent); if (
parent == (gparent)->pfrkt_tree.rbe_left) { tmp = (gparent
)->pfrkt_tree.rbe_right; if (tmp && pfr_ktablehead_RB_GETCOLOR
(tmp) == 1) { pfr_ktablehead_RB_SETCOLOR(tmp, 0); do { pfr_ktablehead_RB_SETCOLOR
(parent, 0); pfr_ktablehead_RB_SETCOLOR(gparent, 1); } while (
0); elm = gparent; continue; } if ((parent)->pfrkt_tree.rbe_right
== elm) { do { (tmp) = (parent)->pfrkt_tree.rbe_right; if
(((parent)->pfrkt_tree.rbe_right = (tmp)->pfrkt_tree.rbe_left
) != ((void *)0)) { pfr_ktablehead_RB_SETPARENT((tmp)->pfrkt_tree
.rbe_left,(parent)); } (void)(parent); if (pfr_ktablehead_RB_SETPARENT
(tmp, pfr_ktablehead_RB_GETPARENT(parent)) != ((void *)0)) { if
((parent) == (pfr_ktablehead_RB_GETPARENT(parent))->pfrkt_tree
.rbe_left) (pfr_ktablehead_RB_GETPARENT(parent))->pfrkt_tree
.rbe_left = (tmp); else (pfr_ktablehead_RB_GETPARENT(parent))
->pfrkt_tree.rbe_right = (tmp); } else (head)->rbh_root
= (tmp); (tmp)->pfrkt_tree.rbe_left = (parent); pfr_ktablehead_RB_SETPARENT
(parent, (tmp)); (void)(tmp); if ((pfr_ktablehead_RB_GETPARENT
(tmp))) (void)(pfr_ktablehead_RB_GETPARENT(tmp)); } while ( 0
); tmp = parent; parent = elm; elm = tmp; } do { pfr_ktablehead_RB_SETCOLOR
(parent, 0); pfr_ktablehead_RB_SETCOLOR(gparent, 1); } while (
0); do { (tmp) = (gparent)->pfrkt_tree.rbe_left; if (((gparent
)->pfrkt_tree.rbe_left = (tmp)->pfrkt_tree.rbe_right) !=
((void *)0)) { pfr_ktablehead_RB_SETPARENT((tmp)->pfrkt_tree
.rbe_right, (gparent)); } (void)(gparent); if (pfr_ktablehead_RB_SETPARENT
(tmp, pfr_ktablehead_RB_GETPARENT(gparent)) != ((void *)0)) {
if ((gparent) == (pfr_ktablehead_RB_GETPARENT(gparent))->
pfrkt_tree.rbe_left) (pfr_ktablehead_RB_GETPARENT(gparent))->
pfrkt_tree.rbe_left = (tmp); else (pfr_ktablehead_RB_GETPARENT
(gparent))->pfrkt_tree.rbe_right = (tmp); } else (head)->
rbh_root = (tmp); (tmp)->pfrkt_tree.rbe_right = (gparent);
pfr_ktablehead_RB_SETPARENT(gparent, tmp); (void)(tmp); if (
(pfr_ktablehead_RB_GETPARENT(tmp))) (void)(pfr_ktablehead_RB_GETPARENT
(tmp)); } while ( 0); } else { tmp = (gparent)->pfrkt_tree
.rbe_left; if (tmp && pfr_ktablehead_RB_GETCOLOR(tmp)
== 1) { pfr_ktablehead_RB_SETCOLOR(tmp, 0); do { pfr_ktablehead_RB_SETCOLOR
(parent, 0); pfr_ktablehead_RB_SETCOLOR(gparent, 1); } while (
0); elm = gparent; continue; } if ((parent)->pfrkt_tree.rbe_left
== elm) { do { (tmp) = (parent)->pfrkt_tree.rbe_left; if (
((parent)->pfrkt_tree.rbe_left = (tmp)->pfrkt_tree.rbe_right
) != ((void *)0)) { pfr_ktablehead_RB_SETPARENT((tmp)->pfrkt_tree
.rbe_right, (parent)); } (void)(parent); if (pfr_ktablehead_RB_SETPARENT
(tmp, pfr_ktablehead_RB_GETPARENT(parent)) != ((void *)0)) { if
((parent) == (pfr_ktablehead_RB_GETPARENT(parent))->pfrkt_tree
.rbe_left) (pfr_ktablehead_RB_GETPARENT(parent))->pfrkt_tree
.rbe_left = (tmp); else (pfr_ktablehead_RB_GETPARENT(parent))
->pfrkt_tree.rbe_right = (tmp); } else (head)->rbh_root
= (tmp); (tmp)->pfrkt_tree.rbe_right = (parent); pfr_ktablehead_RB_SETPARENT
(parent, tmp); (void)(tmp); if ((pfr_ktablehead_RB_GETPARENT(
tmp))) (void)(pfr_ktablehead_RB_GETPARENT(tmp)); } while ( 0)
; tmp = parent; parent = elm; elm = tmp; } do { pfr_ktablehead_RB_SETCOLOR
(parent, 0); pfr_ktablehead_RB_SETCOLOR(gparent, 1); } while (
0); do { (tmp) = (gparent)->pfrkt_tree.rbe_right; if (((gparent
)->pfrkt_tree.rbe_right = (tmp)->pfrkt_tree.rbe_left) !=
((void *)0)) { pfr_ktablehead_RB_SETPARENT((tmp)->pfrkt_tree
.rbe_left,(gparent)); } (void)(gparent); if (pfr_ktablehead_RB_SETPARENT
(tmp, pfr_ktablehead_RB_GETPARENT(gparent)) != ((void *)0)) {
if ((gparent) == (pfr_ktablehead_RB_GETPARENT(gparent))->
pfrkt_tree.rbe_left) (pfr_ktablehead_RB_GETPARENT(gparent))->
pfrkt_tree.rbe_left = (tmp); else (pfr_ktablehead_RB_GETPARENT
(gparent))->pfrkt_tree.rbe_right = (tmp); } else (head)->
rbh_root = (tmp); (tmp)->pfrkt_tree.rbe_left = (gparent); pfr_ktablehead_RB_SETPARENT
(gparent, (tmp)); (void)(tmp); if ((pfr_ktablehead_RB_GETPARENT
(tmp))) (void)(pfr_ktablehead_RB_GETPARENT(tmp)); } while ( 0
); } } pfr_ktablehead_RB_SETCOLOR(head->rbh_root, 0); } void
pfr_ktablehead_RB_REMOVE_COLOR(struct pfr_ktablehead *head, struct
pfr_ktable *parent, struct pfr_ktable *elm) { struct pfr_ktable
*tmp; while ((elm == ((void *)0) || pfr_ktablehead_RB_GETCOLOR
(elm) == 0) && elm != (head)->rbh_root) { if ((parent
)->pfrkt_tree.rbe_left == elm) { tmp = (parent)->pfrkt_tree
.rbe_right; if (pfr_ktablehead_RB_GETCOLOR(tmp) == 1) { do { pfr_ktablehead_RB_SETCOLOR
(tmp, 0); pfr_ktablehead_RB_SETCOLOR(parent, 1); } while ( 0)
; do { (tmp) = (parent)->pfrkt_tree.rbe_right; if (((parent
)->pfrkt_tree.rbe_right = (tmp)->pfrkt_tree.rbe_left) !=
((void *)0)) { pfr_ktablehead_RB_SETPARENT((tmp)->pfrkt_tree
.rbe_left,(parent)); } (void)(parent); if (pfr_ktablehead_RB_SETPARENT
(tmp, pfr_ktablehead_RB_GETPARENT(parent)) != ((void *)0)) { if
((parent) == (pfr_ktablehead_RB_GETPARENT(parent))->pfrkt_tree
.rbe_left) (pfr_ktablehead_RB_GETPARENT(parent))->pfrkt_tree
.rbe_left = (tmp); else (pfr_ktablehead_RB_GETPARENT(parent))
->pfrkt_tree.rbe_right = (tmp); } else (head)->rbh_root
= (tmp); (tmp)->pfrkt_tree.rbe_left = (parent); pfr_ktablehead_RB_SETPARENT
(parent, (tmp)); (void)(tmp); if ((pfr_ktablehead_RB_GETPARENT
(tmp))) (void)(pfr_ktablehead_RB_GETPARENT(tmp)); } while ( 0
); tmp = (parent)->pfrkt_tree.rbe_right; } if (((tmp)->
pfrkt_tree.rbe_left == ((void *)0) || pfr_ktablehead_RB_GETCOLOR
((tmp)->pfrkt_tree.rbe_left) == 0) && ((tmp)->pfrkt_tree
.rbe_right == ((void *)0) || pfr_ktablehead_RB_GETCOLOR((tmp)
->pfrkt_tree.rbe_right) == 0)) { pfr_ktablehead_RB_SETCOLOR
(tmp, 1); elm = parent; parent = pfr_ktablehead_RB_GETPARENT(
elm); } else { if ((tmp)->pfrkt_tree.rbe_right == ((void *
)0) || pfr_ktablehead_RB_GETCOLOR((tmp)->pfrkt_tree.rbe_right
) == 0) { struct pfr_ktable *oleft; if ((oleft = (tmp)->pfrkt_tree
.rbe_left) != ((void *)0)) pfr_ktablehead_RB_SETCOLOR(oleft, 0
); pfr_ktablehead_RB_SETCOLOR(tmp, 1); do { (oleft) = (tmp)->
pfrkt_tree.rbe_left; if (((tmp)->pfrkt_tree.rbe_left = (oleft
)->pfrkt_tree.rbe_right) != ((void *)0)) { pfr_ktablehead_RB_SETPARENT
((oleft)->pfrkt_tree.rbe_right, (tmp)); } (void)(tmp); if (
pfr_ktablehead_RB_SETPARENT(oleft, pfr_ktablehead_RB_GETPARENT
(tmp)) != ((void *)0)) { if ((tmp) == (pfr_ktablehead_RB_GETPARENT
(tmp))->pfrkt_tree.rbe_left) (pfr_ktablehead_RB_GETPARENT(
tmp))->pfrkt_tree.rbe_left = (oleft); else (pfr_ktablehead_RB_GETPARENT
(tmp))->pfrkt_tree.rbe_right = (oleft); } else (head)->
rbh_root = (oleft); (oleft)->pfrkt_tree.rbe_right = (tmp);
pfr_ktablehead_RB_SETPARENT(tmp, oleft); (void)(oleft); if (
(pfr_ktablehead_RB_GETPARENT(oleft))) (void)(pfr_ktablehead_RB_GETPARENT
(oleft)); } while ( 0); tmp = (parent)->pfrkt_tree.rbe_right
; } pfr_ktablehead_RB_SETCOLOR(tmp, (pfr_ktablehead_RB_GETCOLOR
(parent))); pfr_ktablehead_RB_SETCOLOR(parent, 0); if ((tmp)->
pfrkt_tree.rbe_right) pfr_ktablehead_RB_SETCOLOR((tmp)->pfrkt_tree
.rbe_right,0); do { (tmp) = (parent)->pfrkt_tree.rbe_right
; if (((parent)->pfrkt_tree.rbe_right = (tmp)->pfrkt_tree
.rbe_left) != ((void *)0)) { pfr_ktablehead_RB_SETPARENT((tmp
)->pfrkt_tree.rbe_left,(parent)); } (void)(parent); if (pfr_ktablehead_RB_SETPARENT
(tmp, pfr_ktablehead_RB_GETPARENT(parent)) != ((void *)0)) { if
((parent) == (pfr_ktablehead_RB_GETPARENT(parent))->pfrkt_tree
.rbe_left) (pfr_ktablehead_RB_GETPARENT(parent))->pfrkt_tree
.rbe_left = (tmp); else (pfr_ktablehead_RB_GETPARENT(parent))
->pfrkt_tree.rbe_right = (tmp); } else (head)->rbh_root
= (tmp); (tmp)->pfrkt_tree.rbe_left = (parent); pfr_ktablehead_RB_SETPARENT
(parent, (tmp)); (void)(tmp); if ((pfr_ktablehead_RB_GETPARENT
(tmp))) (void)(pfr_ktablehead_RB_GETPARENT(tmp)); } while ( 0
); elm = (head)->rbh_root; break; } } else { tmp = (parent
)->pfrkt_tree.rbe_left; if (pfr_ktablehead_RB_GETCOLOR(tmp
) == 1) { do { pfr_ktablehead_RB_SETCOLOR(tmp, 0); pfr_ktablehead_RB_SETCOLOR
(parent, 1); } while ( 0); do { (tmp) = (parent)->pfrkt_tree
.rbe_left; if (((parent)->pfrkt_tree.rbe_left = (tmp)->
pfrkt_tree.rbe_right) != ((void *)0)) { pfr_ktablehead_RB_SETPARENT
((tmp)->pfrkt_tree.rbe_right, (parent)); } (void)(parent);
if (pfr_ktablehead_RB_SETPARENT(tmp, pfr_ktablehead_RB_GETPARENT
(parent)) != ((void *)0)) { if ((parent) == (pfr_ktablehead_RB_GETPARENT
(parent))->pfrkt_tree.rbe_left) (pfr_ktablehead_RB_GETPARENT
(parent))->pfrkt_tree.rbe_left = (tmp); else (pfr_ktablehead_RB_GETPARENT
(parent))->pfrkt_tree.rbe_right = (tmp); } else (head)->
rbh_root = (tmp); (tmp)->pfrkt_tree.rbe_right = (parent); pfr_ktablehead_RB_SETPARENT
(parent, tmp); (void)(tmp); if ((pfr_ktablehead_RB_GETPARENT(
tmp))) (void)(pfr_ktablehead_RB_GETPARENT(tmp)); } while ( 0)
; tmp = (parent)->pfrkt_tree.rbe_left; } if (((tmp)->pfrkt_tree
.rbe_left == ((void *)0) || pfr_ktablehead_RB_GETCOLOR((tmp)->
pfrkt_tree.rbe_left) == 0) && ((tmp)->pfrkt_tree.rbe_right
== ((void *)0) || pfr_ktablehead_RB_GETCOLOR((tmp)->pfrkt_tree
.rbe_right) == 0)) { pfr_ktablehead_RB_SETCOLOR(tmp, 1); elm =
parent; parent = pfr_ktablehead_RB_GETPARENT(elm); } else { if
((tmp)->pfrkt_tree.rbe_left == ((void *)0) || pfr_ktablehead_RB_GETCOLOR
((tmp)->pfrkt_tree.rbe_left) == 0) { struct pfr_ktable *oright
; if ((oright = (tmp)->pfrkt_tree.rbe_right) != ((void *)0
)) pfr_ktablehead_RB_SETCOLOR(oright, 0); pfr_ktablehead_RB_SETCOLOR
(tmp, 1); do { (oright) = (tmp)->pfrkt_tree.rbe_right; if (
((tmp)->pfrkt_tree.rbe_right = (oright)->pfrkt_tree.rbe_left
) != ((void *)0)) { pfr_ktablehead_RB_SETPARENT((oright)->
pfrkt_tree.rbe_left,(tmp)); } (void)(tmp); if (pfr_ktablehead_RB_SETPARENT
(oright, pfr_ktablehead_RB_GETPARENT(tmp)) != ((void *)0)) { if
((tmp) == (pfr_ktablehead_RB_GETPARENT(tmp))->pfrkt_tree.
rbe_left) (pfr_ktablehead_RB_GETPARENT(tmp))->pfrkt_tree.rbe_left
= (oright); else (pfr_ktablehead_RB_GETPARENT(tmp))->pfrkt_tree
.rbe_right = (oright); } else (head)->rbh_root = (oright);
(oright)->pfrkt_tree.rbe_left = (tmp); pfr_ktablehead_RB_SETPARENT
(tmp, (oright)); (void)(oright); if ((pfr_ktablehead_RB_GETPARENT
(oright))) (void)(pfr_ktablehead_RB_GETPARENT(oright)); } while
( 0); tmp = (parent)->pfrkt_tree.rbe_left; } pfr_ktablehead_RB_SETCOLOR
(tmp,(pfr_ktablehead_RB_GETCOLOR(parent))); pfr_ktablehead_RB_SETCOLOR
(parent, 0); if ((tmp)->pfrkt_tree.rbe_left) pfr_ktablehead_RB_SETCOLOR
((tmp)->pfrkt_tree.rbe_left, 0); do { (tmp) = (parent)->
pfrkt_tree.rbe_left; if (((parent)->pfrkt_tree.rbe_left = (
tmp)->pfrkt_tree.rbe_right) != ((void *)0)) { pfr_ktablehead_RB_SETPARENT
((tmp)->pfrkt_tree.rbe_right, (parent)); } (void)(parent);
if (pfr_ktablehead_RB_SETPARENT(tmp, pfr_ktablehead_RB_GETPARENT
(parent)) != ((void *)0)) { if ((parent) == (pfr_ktablehead_RB_GETPARENT
(parent))->pfrkt_tree.rbe_left) (pfr_ktablehead_RB_GETPARENT
(parent))->pfrkt_tree.rbe_left = (tmp); else (pfr_ktablehead_RB_GETPARENT
(parent))->pfrkt_tree.rbe_right = (tmp); } else (head)->
rbh_root = (tmp); (tmp)->pfrkt_tree.rbe_right = (parent); pfr_ktablehead_RB_SETPARENT
(parent, tmp); (void)(tmp); if ((pfr_ktablehead_RB_GETPARENT(
tmp))) (void)(pfr_ktablehead_RB_GETPARENT(tmp)); } while ( 0)
; elm = (head)->rbh_root; break; } } } if (elm) pfr_ktablehead_RB_SETCOLOR
(elm, 0); } struct pfr_ktable * pfr_ktablehead_RB_REMOVE(struct
pfr_ktablehead *head, struct pfr_ktable *elm) { struct pfr_ktable
*child, *parent, *old = elm; int color; if ((elm)->pfrkt_tree
.rbe_left == ((void *)0)) child = (elm)->pfrkt_tree.rbe_right
; else if ((elm)->pfrkt_tree.rbe_right == ((void *)0)) child
= (elm)->pfrkt_tree.rbe_left; else { struct pfr_ktable *left
; elm = (elm)->pfrkt_tree.rbe_right; while ((left = (elm)->
pfrkt_tree.rbe_left) != ((void *)0)) elm = left; child = (elm
)->pfrkt_tree.rbe_right; parent = pfr_ktablehead_RB_GETPARENT
(elm); color = pfr_ktablehead_RB_GETCOLOR(elm); if (child) pfr_ktablehead_RB_SETPARENT
(child, parent); if (parent) { if ((parent)->pfrkt_tree.rbe_left
== elm) (parent)->pfrkt_tree.rbe_left = child; else (parent
)->pfrkt_tree.rbe_right = child; (void)(parent); } else (head
)->rbh_root = child; if (pfr_ktablehead_RB_GETPARENT(elm) ==
old) parent = elm; (elm)->pfrkt_tree = (old)->pfrkt_tree
; if (pfr_ktablehead_RB_GETPARENT(old)) { if ((pfr_ktablehead_RB_GETPARENT
(old))->pfrkt_tree.rbe_left == old) (pfr_ktablehead_RB_GETPARENT
(old))->pfrkt_tree.rbe_left = elm; else (pfr_ktablehead_RB_GETPARENT
(old))->pfrkt_tree.rbe_right = elm; (void)(pfr_ktablehead_RB_GETPARENT
(old)); } else (head)->rbh_root = elm; pfr_ktablehead_RB_SETPARENT
((old)->pfrkt_tree.rbe_left, elm); if ((old)->pfrkt_tree
.rbe_right) pfr_ktablehead_RB_SETPARENT((old)->pfrkt_tree.
rbe_right, elm); if (parent) { left = parent; do { (void)(left
); } while ((left = pfr_ktablehead_RB_GETPARENT(left)) != ((void
*)0)); } goto color; } parent = pfr_ktablehead_RB_GETPARENT(
elm); color = pfr_ktablehead_RB_GETCOLOR(elm); if (child) pfr_ktablehead_RB_SETPARENT
(child, parent); if (parent) { if ((parent)->pfrkt_tree.rbe_left
== elm) (parent)->pfrkt_tree.rbe_left = child; else (parent
)->pfrkt_tree.rbe_right = child; (void)(parent); } else (head
)->rbh_root = child; color: if (color == 0) pfr_ktablehead_RB_REMOVE_COLOR
(head, parent, child); return (old); } struct pfr_ktable * pfr_ktablehead_RB_INSERT
(struct pfr_ktablehead *head, struct pfr_ktable *elm) { struct
pfr_ktable *tmp; struct pfr_ktable *parent = ((void *)0); int
comp = 0; tmp = (head)->rbh_root; while (tmp) { parent = tmp
; comp = (pfr_ktable_compare)(elm, parent); if (comp < 0) tmp
= (tmp)->pfrkt_tree.rbe_left; else if (comp > 0) tmp =
(tmp)->pfrkt_tree.rbe_right; else return (tmp); } do { pfr_ktablehead_RB_SETPARENT
(elm, parent); (elm)->pfrkt_tree.rbe_left = (elm)->pfrkt_tree
.rbe_right = ((void *)0); pfr_ktablehead_RB_SETCOLOR(elm, 1);
} while ( 0); if (parent != ((void *)0)) { if (comp < 0) (
parent)->pfrkt_tree.rbe_left = elm; else (parent)->pfrkt_tree
.rbe_right = elm; (void)(parent); } else (head)->rbh_root =
elm; pfr_ktablehead_RB_INSERT_COLOR(head, elm); return (((void
*)0)); } struct pfr_ktable * pfr_ktablehead_RB_FIND(struct pfr_ktablehead
*head, struct pfr_ktable *elm) { struct pfr_ktable *tmp = (head
)->rbh_root; int comp; while (tmp) { comp = pfr_ktable_compare
(elm, tmp); if (comp < 0) tmp = (tmp)->pfrkt_tree.rbe_left
; else if (comp > 0) tmp = (tmp)->pfrkt_tree.rbe_right;
else return (tmp); } return (((void *)0)); } struct pfr_ktable
* pfr_ktablehead_RB_NEXT(struct pfr_ktable *elm) { if ((elm)
->pfrkt_tree.rbe_right) { elm = (elm)->pfrkt_tree.rbe_right
; while ((elm)->pfrkt_tree.rbe_left) elm = (elm)->pfrkt_tree
.rbe_left; } else { if (pfr_ktablehead_RB_GETPARENT(elm) &&
(elm == (pfr_ktablehead_RB_GETPARENT(elm))->pfrkt_tree.rbe_left
)) elm = pfr_ktablehead_RB_GETPARENT(elm); else { while (pfr_ktablehead_RB_GETPARENT
(elm) && (elm == (pfr_ktablehead_RB_GETPARENT(elm))->
pfrkt_tree.rbe_right)) elm = pfr_ktablehead_RB_GETPARENT(elm)
; elm = pfr_ktablehead_RB_GETPARENT(elm); } } return (elm); }
struct pfr_ktable * pfr_ktablehead_RB_MINMAX(struct pfr_ktablehead
*head, int val) { struct pfr_ktable *tmp = (head)->rbh_root
; struct pfr_ktable *parent = ((void *)0); while (tmp) { parent
= tmp; if (val < 0) tmp = (tmp)->pfrkt_tree.rbe_left; else
tmp = (tmp)->pfrkt_tree.rbe_right; } return (parent); }
;
206
207static struct pfr_ktablehead pfr_ktables;
208static struct pfr_table pfr_nulltable;
209static int pfr_ktable_cnt;
210
211void
212pfr_initialize(void)
213{
214 pool_init(&pfr_ktable_pl, sizeof (struct pfr_ktable), 0, 0, 0,
215 "pfrktable", NULL((void *)0));
216 pool_init(&pfr_kentry_pl, sizeof (struct pfr_kentry), 0, 0, 0,
217 "pfrkentry", NULL((void *)0));
218 pool_init(&pfr_kentry_pl2, sizeof (struct pfr_kentry), 0, 0, 0,
219 "pfrkentry2", NULL((void *)0));
220
221 pfr_sin.sin_len = sizeof (pfr_sin);
222 pfr_sin.sin_family = AF_INET2;
223 pfr_sin6.sin6_len = sizeof (pfr_sin6);
224 pfr_sin6.sin6_family = AF_INET630;
225
226 memset(&pfr_ffaddr, 0xff, sizeof (pfr_ffaddr));
227}
228
229#if 0
230void
231pfr_destroy(void)
232{
233 pool_destroy(&pfr_ktable_pl);
234 pool_destroy(&pfr_kentry_pl);
235 pool_destroy(&pfr_kentry_pl2);
236}
237#endif
238
239int
240pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
241{
242 struct pfr_ktable *kt;
243 struct pfr_kentryworkq workq;
244
245 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY)do { if ((flags & ~(0x00000001 | 0x00000002)) & 0x0000007F
) return (22); } while (0)
;
246 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL0x10000000))
247 return (EINVAL22);
248 kt = pfr_lookup_table(tbl);
249 if (kt == NULL((void *)0) || !(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
250 return (ESRCH3);
251 if (kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_CONST0x00000002)
252 return (EPERM1);
253 pfr_enqueue_addrs(kt, &workq, ndel, 0);
254
255 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
256 pfr_remove_kentries(kt, &workq);
257 if (kt->pfrkt_cntpfrkt_ts.pfrts_cnt) {
258 printf("pfr_clr_addrs: corruption detected (%d).\n",
259 kt->pfrkt_cntpfrkt_ts.pfrts_cnt);
260 kt->pfrkt_cntpfrkt_ts.pfrts_cnt = 0;
261 }
262 }
263 return (0);
264}
265
266int
267pfr_add_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
268 int *nadd, int flags)
269{
270 struct pfr_ktable *kt, *tmpkt;
271 struct pfr_kentryworkq workq;
272 struct pfr_kentry *p, *q;
273 struct pfr_addr ad;
274 int i, rv, xadd = 0;
275 user_addr_t addr = _addr;
276 u_int64_t tzero = pf_calendar_time_second();
277
278 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |do { if ((flags & ~(0x00000001 | 0x00000002 | 0x00000004)
) & 0x0000007F) return (22); } while (0)
279 PFR_FLAG_FEEDBACK)do { if ((flags & ~(0x00000001 | 0x00000002 | 0x00000004)
) & 0x0000007F) return (22); } while (0)
;
280 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL0x10000000))
281 return (EINVAL22);
282 kt = pfr_lookup_table(tbl);
283 if (kt == NULL((void *)0) || !(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
284 return (ESRCH3);
285 if (kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_CONST0x00000002)
286 return (EPERM1);
287 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
288 if (tmpkt == NULL((void *)0))
289 return (ENOMEM12);
290 SLIST_INIT(&workq)do { (((&workq))->slh_first) = ((void *)0); } while (0
)
;
291 for (i = 0; i < size; i++, addr += sizeof (ad)) {
292 if (COPYIN(addr, &ad, sizeof (ad), flags)((flags & 0x10000000) ? copyin((addr), (&ad), (sizeof
(ad))) : (bcopy((void *)(uintptr_t)(addr), (&ad), (sizeof
(ad))), 0))
)
293 senderr(EFAULT)do { rv = (14); goto _bad; } while (0);
294 if (pfr_validate_addr(&ad))
295 senderr(EINVAL)do { rv = (22); goto _bad; } while (0);
296 p = pfr_lookup_addr(kt, &ad, 1);
297 q = pfr_lookup_addr(tmpkt, &ad, 1);
298 if (flags & PFR_FLAG_FEEDBACK0x00000004) {
299 if (q != NULL((void *)0))
300 ad.pfra_fback = PFR_FB_DUPLICATE;
301 else if (p == NULL((void *)0))
302 ad.pfra_fback = PFR_FB_ADDED;
303 else if (p->pfrke_not != ad.pfra_not)
304 ad.pfra_fback = PFR_FB_CONFLICT;
305 else
306 ad.pfra_fback = PFR_FB_NONE;
307 }
308 if (p == NULL((void *)0) && q == NULL((void *)0)) {
309 p = pfr_create_kentry(&ad,
310 !(flags & PFR_FLAG_USERIOCTL0x10000000));
311 if (p == NULL((void *)0))
312 senderr(ENOMEM)do { rv = (12); goto _bad; } while (0);
313 if (pfr_route_kentry(tmpkt, p)) {
314 pfr_destroy_kentry(p);
315 ad.pfra_fback = PFR_FB_NONE;
316 } else {
317 SLIST_INSERT_HEAD(&workq, p, pfrke_workq)do { (((p))->pfrke_workq.sle_next) = (((&workq))->slh_first
); (((&workq))->slh_first) = (p); } while (0)
;
318 xadd++;
319 }
320 }
321 if (flags & PFR_FLAG_FEEDBACK0x00000004)
322 if (COPYOUT(&ad, addr, sizeof (ad), flags)((flags & 0x10000000) ? copyout((&ad), (addr), (sizeof
(ad))) : (bcopy((&ad), (void *)(uintptr_t)(addr), (sizeof
(ad))), 0))
)
323 senderr(EFAULT)do { rv = (14); goto _bad; } while (0);
324 }
325 pfr_clean_node_mask(tmpkt, &workq);
326 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
327 pfr_insert_kentries(kt, &workq, tzero);
328 } else
329 pfr_destroy_kentries(&workq);
330 if (nadd != NULL((void *)0))
331 *nadd = xadd;
332 pfr_destroy_ktable(tmpkt, 0);
333 return (0);
334_bad:
335 pfr_clean_node_mask(tmpkt, &workq);
336 pfr_destroy_kentries(&workq);
337 if (flags & PFR_FLAG_FEEDBACK0x00000004)
338 pfr_reset_feedback(_addr, size, flags);
339 pfr_destroy_ktable(tmpkt, 0);
340 return (rv);
341}
342
343int
344pfr_del_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
345 int *ndel, int flags)
346{
347 struct pfr_ktable *kt;
348 struct pfr_kentryworkq workq;
349 struct pfr_kentry *p;
350 struct pfr_addr ad;
351 user_addr_t addr = _addr;
352 int i, rv, xdel = 0, log = 1;
353
354 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |do { if ((flags & ~(0x00000001 | 0x00000002 | 0x00000004)
) & 0x0000007F) return (22); } while (0)
355 PFR_FLAG_FEEDBACK)do { if ((flags & ~(0x00000001 | 0x00000002 | 0x00000004)
) & 0x0000007F) return (22); } while (0)
;
356 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL0x10000000))
357 return (EINVAL22);
358 kt = pfr_lookup_table(tbl);
359 if (kt == NULL((void *)0) || !(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
360 return (ESRCH3);
361 if (kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_CONST0x00000002)
362 return (EPERM1);
363 /*
364 * there are two algorithms to choose from here.
365 * with:
366 * n: number of addresses to delete
367 * N: number of addresses in the table
368 *
369 * one is O(N) and is better for large 'n'
370 * one is O(n*LOG(N)) and is better for small 'n'
371 *
372 * following code try to decide which one is best.
373 */
374 for (i = kt->pfrkt_cntpfrkt_ts.pfrts_cnt; i > 0; i >>= 1)
375 log++;
376 if (size > kt->pfrkt_cntpfrkt_ts.pfrts_cnt/log) {
377 /* full table scan */
378 pfr_mark_addrs(kt);
379 } else {
380 /* iterate over addresses to delete */
381 for (i = 0; i < size; i++, addr += sizeof (ad)) {
382 if (COPYIN(addr, &ad, sizeof (ad), flags)((flags & 0x10000000) ? copyin((addr), (&ad), (sizeof
(ad))) : (bcopy((void *)(uintptr_t)(addr), (&ad), (sizeof
(ad))), 0))
)
383 return (EFAULT14);
384 if (pfr_validate_addr(&ad))
385 return (EINVAL22);
386 p = pfr_lookup_addr(kt, &ad, 1);
387 if (p != NULL((void *)0))
388 p->pfrke_mark = 0;
389 }
390 }
391 SLIST_INIT(&workq)do { (((&workq))->slh_first) = ((void *)0); } while (0
)
;
392 for (addr = _addr, i = 0; i < size; i++, addr += sizeof (ad)) {
393 if (COPYIN(addr, &ad, sizeof (ad), flags)((flags & 0x10000000) ? copyin((addr), (&ad), (sizeof
(ad))) : (bcopy((void *)(uintptr_t)(addr), (&ad), (sizeof
(ad))), 0))
)
394 senderr(EFAULT)do { rv = (14); goto _bad; } while (0);
395 if (pfr_validate_addr(&ad))
396 senderr(EINVAL)do { rv = (22); goto _bad; } while (0);
397 p = pfr_lookup_addr(kt, &ad, 1);
398 if (flags & PFR_FLAG_FEEDBACK0x00000004) {
399 if (p == NULL((void *)0))
400 ad.pfra_fback = PFR_FB_NONE;
401 else if (p->pfrke_not != ad.pfra_not)
402 ad.pfra_fback = PFR_FB_CONFLICT;
403 else if (p->pfrke_mark)
404 ad.pfra_fback = PFR_FB_DUPLICATE;
405 else
406 ad.pfra_fback = PFR_FB_DELETED;
407 }
408 if (p != NULL((void *)0) && p->pfrke_not == ad.pfra_not &&
409 !p->pfrke_mark) {
410 p->pfrke_mark = 1;
411 SLIST_INSERT_HEAD(&workq, p, pfrke_workq)do { (((p))->pfrke_workq.sle_next) = (((&workq))->slh_first
); (((&workq))->slh_first) = (p); } while (0)
;
412 xdel++;
413 }
414 if (flags & PFR_FLAG_FEEDBACK0x00000004)
415 if (COPYOUT(&ad, addr, sizeof (ad), flags)((flags & 0x10000000) ? copyout((&ad), (addr), (sizeof
(ad))) : (bcopy((&ad), (void *)(uintptr_t)(addr), (sizeof
(ad))), 0))
)
416 senderr(EFAULT)do { rv = (14); goto _bad; } while (0);
417 }
418 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
419 pfr_remove_kentries(kt, &workq);
420 }
421 if (ndel != NULL((void *)0))
422 *ndel = xdel;
423 return (0);
424_bad:
425 if (flags & PFR_FLAG_FEEDBACK0x00000004)
426 pfr_reset_feedback(_addr, size, flags);
427 return (rv);
428}
429
430int
431pfr_set_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
432 int *size2, int *nadd, int *ndel, int *nchange, int flags,
433 u_int32_t ignore_pfrt_flags)
434{
435 struct pfr_ktable *kt, *tmpkt;
436 struct pfr_kentryworkq addq, delq, changeq;
437 struct pfr_kentry *p, *q;
438 struct pfr_addr ad;
439 user_addr_t addr = _addr;
440 int i, rv, xadd = 0, xdel = 0, xchange = 0;
441 u_int64_t tzero = pf_calendar_time_second();
442
443 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |do { if ((flags & ~(0x00000001 | 0x00000002 | 0x00000004)
) & 0x0000007F) return (22); } while (0)
444 PFR_FLAG_FEEDBACK)do { if ((flags & ~(0x00000001 | 0x00000002 | 0x00000004)
) & 0x0000007F) return (22); } while (0)
;
445 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
446 PFR_FLAG_USERIOCTL0x10000000))
447 return (EINVAL22);
448 kt = pfr_lookup_table(tbl);
449 if (kt == NULL((void *)0) || !(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
450 return (ESRCH3);
451 if (kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_CONST0x00000002)
452 return (EPERM1);
453 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
454 if (tmpkt == NULL((void *)0))
455 return (ENOMEM12);
456 pfr_mark_addrs(kt);
457 SLIST_INIT(&addq)do { (((&addq))->slh_first) = ((void *)0); } while (0);
458 SLIST_INIT(&delq)do { (((&delq))->slh_first) = ((void *)0); } while (0);
459 SLIST_INIT(&changeq)do { (((&changeq))->slh_first) = ((void *)0); } while (
0)
;
460 for (i = 0; i < size; i++, addr += sizeof (ad)) {
461 if (COPYIN(addr, &ad, sizeof (ad), flags)((flags & 0x10000000) ? copyin((addr), (&ad), (sizeof
(ad))) : (bcopy((void *)(uintptr_t)(addr), (&ad), (sizeof
(ad))), 0))
)
462 senderr(EFAULT)do { rv = (14); goto _bad; } while (0);
463 if (pfr_validate_addr(&ad))
464 senderr(EINVAL)do { rv = (22); goto _bad; } while (0);
465 ad.pfra_fback = PFR_FB_NONE;
466 p = pfr_lookup_addr(kt, &ad, 1);
467 if (p != NULL((void *)0)) {
468 if (p->pfrke_mark) {
469 ad.pfra_fback = PFR_FB_DUPLICATE;
470 goto _skip;
471 }
472 p->pfrke_mark = 1;
473 if (p->pfrke_not != ad.pfra_not) {
474 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq)do { (((p))->pfrke_workq.sle_next) = (((&changeq))->
slh_first); (((&changeq))->slh_first) = (p); } while (
0)
;
475 ad.pfra_fback = PFR_FB_CHANGED;
476 xchange++;
477 }
478 } else {
479 q = pfr_lookup_addr(tmpkt, &ad, 1);
480 if (q != NULL((void *)0)) {
481 ad.pfra_fback = PFR_FB_DUPLICATE;
482 goto _skip;
483 }
484 p = pfr_create_kentry(&ad,
485 !(flags & PFR_FLAG_USERIOCTL0x10000000));
486 if (p == NULL((void *)0))
487 senderr(ENOMEM)do { rv = (12); goto _bad; } while (0);
488 if (pfr_route_kentry(tmpkt, p)) {
489 pfr_destroy_kentry(p);
490 ad.pfra_fback = PFR_FB_NONE;
491 } else {
492 SLIST_INSERT_HEAD(&addq, p, pfrke_workq)do { (((p))->pfrke_workq.sle_next) = (((&addq))->slh_first
); (((&addq))->slh_first) = (p); } while (0)
;
493 ad.pfra_fback = PFR_FB_ADDED;
494 xadd++;
495 }
496 }
497_skip:
498 if (flags & PFR_FLAG_FEEDBACK0x00000004)
499 if (COPYOUT(&ad, addr, sizeof (ad), flags)((flags & 0x10000000) ? copyout((&ad), (addr), (sizeof
(ad))) : (bcopy((&ad), (void *)(uintptr_t)(addr), (sizeof
(ad))), 0))
)
500 senderr(EFAULT)do { rv = (14); goto _bad; } while (0);
501 }
502 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY(1));
503 if ((flags & PFR_FLAG_FEEDBACK0x00000004) && *size2) {
504 if (*size2 < size+xdel) {
505 *size2 = size+xdel;
506 senderr(0)do { rv = (0); goto _bad; } while (0);
507 }
508 i = 0;
509 addr = _addr + size;
510 SLIST_FOREACH(p, &delq, pfrke_workq)for ((p) = (((&delq))->slh_first); (p); (p) = (((p))->
pfrke_workq.sle_next))
{
511 pfr_copyout_addr(&ad, p);
512 ad.pfra_fback = PFR_FB_DELETED;
513 if (COPYOUT(&ad, addr, sizeof (ad), flags)((flags & 0x10000000) ? copyout((&ad), (addr), (sizeof
(ad))) : (bcopy((&ad), (void *)(uintptr_t)(addr), (sizeof
(ad))), 0))
)
514 senderr(EFAULT)do { rv = (14); goto _bad; } while (0);
515 addr += sizeof (ad);
516 i++;
517 }
518 }
519 pfr_clean_node_mask(tmpkt, &addq);
520 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
521 pfr_insert_kentries(kt, &addq, tzero);
522 pfr_remove_kentries(kt, &delq);
523 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG(1));
524 } else
525 pfr_destroy_kentries(&addq);
526 if (nadd != NULL((void *)0))
527 *nadd = xadd;
528 if (ndel != NULL((void *)0))
529 *ndel = xdel;
530 if (nchange != NULL((void *)0))
531 *nchange = xchange;
532 if ((flags & PFR_FLAG_FEEDBACK0x00000004) && size2)
533 *size2 = size+xdel;
534 pfr_destroy_ktable(tmpkt, 0);
535 return (0);
536_bad:
537 pfr_clean_node_mask(tmpkt, &addq);
538 pfr_destroy_kentries(&addq);
539 if (flags & PFR_FLAG_FEEDBACK0x00000004)
540 pfr_reset_feedback(_addr, size, flags);
541 pfr_destroy_ktable(tmpkt, 0);
542 return (rv);
543}
544
545int
546pfr_tst_addrs(struct pfr_table *tbl, user_addr_t addr, int size,
547 int *nmatch, int flags)
548{
549 struct pfr_ktable *kt;
550 struct pfr_kentry *p;
551 struct pfr_addr ad;
552 int i, xmatch = 0;
553
554 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE)do { if ((flags & ~(0x00000020)) & 0x0000007F) return
(22); } while (0)
;
555 if (pfr_validate_table(tbl, 0, 0))
556 return (EINVAL22);
557 kt = pfr_lookup_table(tbl);
558 if (kt == NULL((void *)0) || !(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
559 return (ESRCH3);
560
561 for (i = 0; i < size; i++, addr += sizeof (ad)) {
562 if (COPYIN(addr, &ad, sizeof (ad), flags)((flags & 0x10000000) ? copyin((addr), (&ad), (sizeof
(ad))) : (bcopy((void *)(uintptr_t)(addr), (&ad), (sizeof
(ad))), 0))
)
563 return (EFAULT14);
564 if (pfr_validate_addr(&ad))
565 return (EINVAL22);
566 if (ADDR_NETWORK(&ad)((&ad)->pfra_net < ((((&ad)->pfra_af) == 2) ?
32 : 128))
)
567 return (EINVAL22);
568 p = pfr_lookup_addr(kt, &ad, 0);
569 if (flags & PFR_FLAG_REPLACE0x00000020)
570 pfr_copyout_addr(&ad, p);
571 ad.pfra_fback = (p == NULL((void *)0)) ? PFR_FB_NONE :
572 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
573 if (p != NULL((void *)0) && !p->pfrke_not)
574 xmatch++;
575 if (COPYOUT(&ad, addr, sizeof (ad), flags)((flags & 0x10000000) ? copyout((&ad), (addr), (sizeof
(ad))) : (bcopy((&ad), (void *)(uintptr_t)(addr), (sizeof
(ad))), 0))
)
576 return (EFAULT14);
577 }
578 if (nmatch != NULL((void *)0))
579 *nmatch = xmatch;
580 return (0);
581}
582
583int
584pfr_get_addrs(struct pfr_table *tbl, user_addr_t addr, int *size,
585 int flags)
586{
587 struct pfr_ktable *kt;
588 struct pfr_walktree w;
589 int rv;
590
591 ACCEPT_FLAGS(flags, 0)do { if ((flags & ~(0)) & 0x0000007F) return (22); } while
(0)
;
592 if (pfr_validate_table(tbl, 0, 0))
593 return (EINVAL22);
594 kt = pfr_lookup_table(tbl);
595 if (kt == NULL((void *)0) || !(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
596 return (ESRCH3);
597 if (kt->pfrkt_cntpfrkt_ts.pfrts_cnt > *size) {
598 *size = kt->pfrkt_cntpfrkt_ts.pfrts_cnt;
599 return (0);
600 }
601
602 bzero(&w, sizeof (w));
603 w.pfrw_op = PFRW_GET_ADDRS;
604 w.pfrw_addrpfrw_1.pfrw1_addr = addr;
605 w.pfrw_free = kt->pfrkt_cntpfrkt_ts.pfrts_cnt;
606 w.pfrw_flags = flags;
607 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
608 if (!rv)
609 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
610 pfr_walktree, &w);
611 if (rv)
612 return (rv);
613
614 if (w.pfrw_free) {
615 printf("pfr_get_addrs: corruption detected (%d).\n",
616 w.pfrw_free);
617 return (ENOTTY25);
618 }
619 *size = kt->pfrkt_cntpfrkt_ts.pfrts_cnt;
620 return (0);
621}
622
623int
624pfr_get_astats(struct pfr_table *tbl, user_addr_t addr, int *size,
625 int flags)
626{
627 struct pfr_ktable *kt;
628 struct pfr_walktree w;
629 struct pfr_kentryworkq workq;
630 int rv;
631 u_int64_t tzero = pf_calendar_time_second();
632
633 /* XXX PFR_FLAG_CLSTATS disabled */
634 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC)do { if ((flags & ~(0x00000001)) & 0x0000007F) return
(22); } while (0)
;
635 if (pfr_validate_table(tbl, 0, 0))
636 return (EINVAL22);
637 kt = pfr_lookup_table(tbl);
638 if (kt == NULL((void *)0) || !(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
639 return (ESRCH3);
640 if (kt->pfrkt_cntpfrkt_ts.pfrts_cnt > *size) {
641 *size = kt->pfrkt_cntpfrkt_ts.pfrts_cnt;
642 return (0);
643 }
644
645 bzero(&w, sizeof (w));
646 w.pfrw_op = PFRW_GET_ASTATS;
647 w.pfrw_astatspfrw_1.pfrw1_astats = addr;
648 w.pfrw_free = kt->pfrkt_cntpfrkt_ts.pfrts_cnt;
649 w.pfrw_flags = flags;
650 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
651 if (!rv)
652 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
653 pfr_walktree, &w);
654 if (!rv && (flags & PFR_FLAG_CLSTATS0x00000008)) {
655 pfr_enqueue_addrs(kt, &workq, NULL((void *)0), 0);
656 pfr_clstats_kentries(&workq, tzero, 0);
657 }
658 if (rv)
659 return (rv);
660
661 if (w.pfrw_free) {
662 printf("pfr_get_astats: corruption detected (%d).\n",
663 w.pfrw_free);
664 return (ENOTTY25);
665 }
666 *size = kt->pfrkt_cntpfrkt_ts.pfrts_cnt;
667 return (0);
668}
669
670int
671pfr_clr_astats(struct pfr_table *tbl, user_addr_t _addr, int size,
672 int *nzero, int flags)
673{
674 struct pfr_ktable *kt;
675 struct pfr_kentryworkq workq;
676 struct pfr_kentry *p;
677 struct pfr_addr ad;
678 user_addr_t addr = _addr;
679 int i, rv, xzero = 0;
680
681 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |do { if ((flags & ~(0x00000001 | 0x00000002 | 0x00000004)
) & 0x0000007F) return (22); } while (0)
682 PFR_FLAG_FEEDBACK)do { if ((flags & ~(0x00000001 | 0x00000002 | 0x00000004)
) & 0x0000007F) return (22); } while (0)
;
683 if (pfr_validate_table(tbl, 0, 0))
684 return (EINVAL22);
685 kt = pfr_lookup_table(tbl);
686 if (kt == NULL((void *)0) || !(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
687 return (ESRCH3);
688 SLIST_INIT(&workq)do { (((&workq))->slh_first) = ((void *)0); } while (0
)
;
689 for (i = 0; i < size; i++, addr += sizeof (ad)) {
690 if (COPYIN(addr, &ad, sizeof (ad), flags)((flags & 0x10000000) ? copyin((addr), (&ad), (sizeof
(ad))) : (bcopy((void *)(uintptr_t)(addr), (&ad), (sizeof
(ad))), 0))
)
691 senderr(EFAULT)do { rv = (14); goto _bad; } while (0);
692 if (pfr_validate_addr(&ad))
693 senderr(EINVAL)do { rv = (22); goto _bad; } while (0);
694 p = pfr_lookup_addr(kt, &ad, 1);
695 if (flags & PFR_FLAG_FEEDBACK0x00000004) {
696 ad.pfra_fback = (p != NULL((void *)0)) ?
697 PFR_FB_CLEARED : PFR_FB_NONE;
698 if (COPYOUT(&ad, addr, sizeof (ad), flags)((flags & 0x10000000) ? copyout((&ad), (addr), (sizeof
(ad))) : (bcopy((&ad), (void *)(uintptr_t)(addr), (sizeof
(ad))), 0))
)
699 senderr(EFAULT)do { rv = (14); goto _bad; } while (0);
700 }
701 if (p != NULL((void *)0)) {
702 SLIST_INSERT_HEAD(&workq, p, pfrke_workq)do { (((p))->pfrke_workq.sle_next) = (((&workq))->slh_first
); (((&workq))->slh_first) = (p); } while (0)
;
703 xzero++;
704 }
705 }
706
707 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
708 pfr_clstats_kentries(&workq, 0, 0);
709 }
710 if (nzero != NULL((void *)0))
711 *nzero = xzero;
712 return (0);
713_bad:
714 if (flags & PFR_FLAG_FEEDBACK0x00000004)
715 pfr_reset_feedback(_addr, size, flags);
716 return (rv);
717}
718
719static int
720pfr_validate_addr(struct pfr_addr *ad)
721{
722 int i;
723
724 switch (ad->pfra_af) {
725#if INET1
726 case AF_INET2:
727 if (ad->pfra_net > 32)
728 return (-1);
729 break;
730#endif /* INET */
731#if INET61
732 case AF_INET630:
733 if (ad->pfra_net > 128)
734 return (-1);
735 break;
736#endif /* INET6 */
737 default:
738 return (-1);
739 }
740 if (ad->pfra_net < 128 &&
741 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
742 return (-1);
743 for (i = (ad->pfra_net+7)/8; i < (int)sizeof (ad->pfra_u); i++)
744 if (((caddr_t)ad)[i])
745 return (-1);
746 if (ad->pfra_not && ad->pfra_not != 1)
747 return (-1);
748 if (ad->pfra_fback)
749 return (-1);
750 return (0);
751}
752
753static void
754pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
755 int *naddr, int sweep)
756{
757 struct pfr_walktree w;
758
759 SLIST_INIT(workq)do { (((workq))->slh_first) = ((void *)0); } while (0);
760 bzero(&w, sizeof (w));
761 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
762 w.pfrw_workqpfrw_1.pfrw1_workq = workq;
763 if (kt->pfrkt_ip4 != NULL((void *)0))
764 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
765 pfr_walktree, &w))
766 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
767 if (kt->pfrkt_ip6 != NULL((void *)0))
768 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
769 pfr_walktree, &w))
770 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
771 if (naddr != NULL((void *)0))
772 *naddr = w.pfrw_cntpfrw_free;
773}
774
775static void
776pfr_mark_addrs(struct pfr_ktable *kt)
777{
778 struct pfr_walktree w;
779
780 bzero(&w, sizeof (w));
781 w.pfrw_op = PFRW_MARK;
782 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
783 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
784 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
785 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
786}
787
788
789static struct pfr_kentry *
790pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
791{
792 union sockaddr_union sa, mask;
793 struct radix_node_head *head;
794 struct pfr_kentry *ke;
795
796 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
797
798 bzero(&sa, sizeof (sa));
799 if (ad->pfra_af == AF_INET2) {
800 FILLIN_SIN(sa.sin, ad->pfra_ip4addr)do { (sa.sin).sin_len = sizeof (sa.sin); (sa.sin).sin_family =
2; (sa.sin).sin_addr = (ad->pfra_u._pfra_ip4addr); } while
(0)
;
801 head = kt->pfrkt_ip4;
802 } else if (ad->pfra_af == AF_INET630) {
803 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr)do { (sa.sin6).sin6_len = sizeof (sa.sin6); (sa.sin6).sin6_family
= 30; (sa.sin6).sin6_addr = (ad->pfra_u._pfra_ip6addr); }
while (0)
;
804 head = kt->pfrkt_ip6;
805 }
806 else
807 return NULL((void *)0);
808 if (ADDR_NETWORK(ad)((ad)->pfra_net < ((((ad)->pfra_af) == 2) ? 32 : 128
))
) {
809 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
810 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
811 if (ke && KENTRY_RNF_ROOT(ke)((((struct radix_node *)(ke))->rn_flags & 2) != 0))
812 ke = NULL((void *)0);
813 } else {
814 ke = (struct pfr_kentry *)rn_match(&sa, head);
815 if (ke && KENTRY_RNF_ROOT(ke)((((struct radix_node *)(ke))->rn_flags & 2) != 0))
816 ke = NULL((void *)0);
817 if (exact && ke && KENTRY_NETWORK(ke)((ke)->pfrke_net < ((((ke)->pfrke_af) == 2) ? 32 : 128
))
)
818 ke = NULL((void *)0);
819 }
820 return (ke);
821}
822
823static struct pfr_kentry *
824pfr_create_kentry(struct pfr_addr *ad, int intr)
825{
826 struct pfr_kentry *ke;
827
828 if (intr)
829 ke = pool_get(&pfr_kentry_pl2, PR_WAITOK1);
830 else
831 ke = pool_get(&pfr_kentry_pl, PR_WAITOK1);
832 if (ke == NULL((void *)0))
833 return (NULL((void *)0));
834 bzero(ke, sizeof (*ke));
835
836 if (ad->pfra_af == AF_INET2)
837 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr)do { (ke->pfrke_sa.sin).sin_len = sizeof (ke->pfrke_sa.
sin); (ke->pfrke_sa.sin).sin_family = 2; (ke->pfrke_sa.
sin).sin_addr = (ad->pfra_u._pfra_ip4addr); } while (0)
;
838 else if (ad->pfra_af == AF_INET630)
839 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr)do { (ke->pfrke_sa.sin6).sin6_len = sizeof (ke->pfrke_sa
.sin6); (ke->pfrke_sa.sin6).sin6_family = 30; (ke->pfrke_sa
.sin6).sin6_addr = (ad->pfra_u._pfra_ip6addr); } while (0)
;
840 ke->pfrke_af = ad->pfra_af;
841 ke->pfrke_net = ad->pfra_net;
842 ke->pfrke_not = ad->pfra_not;
843 ke->pfrke_intrpool = intr;
844 return (ke);
845}
846
847static void
848pfr_destroy_kentries(struct pfr_kentryworkq *workq)
849{
850 struct pfr_kentry *p, *q;
851
852 for (p = SLIST_FIRST(workq)((workq)->slh_first); p != NULL((void *)0); p = q) {
853 q = SLIST_NEXT(p, pfrke_workq)((p)->pfrke_workq.sle_next);
854 pfr_destroy_kentry(p);
855 }
856}
857
858static void
859pfr_destroy_kentry(struct pfr_kentry *ke)
860{
861 if (ke->pfrke_intrpool)
862 pool_put(&pfr_kentry_pl2, ke);
863 else
864 pool_put(&pfr_kentry_pl, ke);
865}
866
867static void
868pfr_insert_kentries(struct pfr_ktable *kt,
869 struct pfr_kentryworkq *workq, u_int64_t tzero)
870{
871 struct pfr_kentry *p;
872 int rv, n = 0;
873
874 SLIST_FOREACH(p, workq, pfrke_workq)for ((p) = (((workq))->slh_first); (p); (p) = (((p))->pfrke_workq
.sle_next))
{
875 rv = pfr_route_kentry(kt, p);
876 if (rv) {
877 printf("pfr_insert_kentries: cannot route entry "
878 "(code=%d).\n", rv);
879 break;
880 }
881 p->pfrke_tzero = tzero;
882 n++;
883 }
884 kt->pfrkt_cntpfrkt_ts.pfrts_cnt += n;
885}
886
887int
888pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, u_int64_t tzero)
889{
890 struct pfr_kentry *p;
891 int rv;
892
893 p = pfr_lookup_addr(kt, ad, 1);
894 if (p != NULL((void *)0))
895 return (0);
896 p = pfr_create_kentry(ad, 1);
897 if (p == NULL((void *)0))
898 return (EINVAL22);
899
900 rv = pfr_route_kentry(kt, p);
901 if (rv)
902 return (rv);
903
904 p->pfrke_tzero = tzero;
905 kt->pfrkt_cntpfrkt_ts.pfrts_cnt++;
906
907 return (0);
908}
909
910static void
911pfr_remove_kentries(struct pfr_ktable *kt,
912 struct pfr_kentryworkq *workq)
913{
914 struct pfr_kentry *p;
915 int n = 0;
916
917 SLIST_FOREACH(p, workq, pfrke_workq)for ((p) = (((workq))->slh_first); (p); (p) = (((p))->pfrke_workq
.sle_next))
{
918 pfr_unroute_kentry(kt, p);
919 n++;
920 }
921 kt->pfrkt_cntpfrkt_ts.pfrts_cnt -= n;
922 pfr_destroy_kentries(workq);
923}
924
925static void
926pfr_clean_node_mask(struct pfr_ktable *kt,
927 struct pfr_kentryworkq *workq)
928{
929 struct pfr_kentry *p;
930
931 SLIST_FOREACH(p, workq, pfrke_workq)for ((p) = (((workq))->slh_first); (p); (p) = (((p))->pfrke_workq
.sle_next))
932 pfr_unroute_kentry(kt, p);
933}
934
935static void
936pfr_clstats_kentries(struct pfr_kentryworkq *workq, u_int64_t tzero,
937 int negchange)
938{
939 struct pfr_kentry *p;
940
941 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
942
943 SLIST_FOREACH(p, workq, pfrke_workq)for ((p) = (((workq))->slh_first); (p); (p) = (((p))->pfrke_workq
.sle_next))
{
944 if (negchange)
945 p->pfrke_not = !p->pfrke_not;
946 bzero(p->pfrke_packets, sizeof (p->pfrke_packets));
947 bzero(p->pfrke_bytes, sizeof (p->pfrke_bytes));
948 p->pfrke_tzero = tzero;
949 }
950}
951
952static void
953pfr_reset_feedback(user_addr_t addr, int size, int flags)
954{
955 struct pfr_addr ad;
956 int i;
957
958 for (i = 0; i < size; i++, addr += sizeof (ad)) {
959 if (COPYIN(addr, &ad, sizeof (ad), flags)((flags & 0x10000000) ? copyin((addr), (&ad), (sizeof
(ad))) : (bcopy((void *)(uintptr_t)(addr), (&ad), (sizeof
(ad))), 0))
)
960 break;
961 ad.pfra_fback = PFR_FB_NONE;
962 if (COPYOUT(&ad, addr, sizeof (ad), flags)((flags & 0x10000000) ? copyout((&ad), (addr), (sizeof
(ad))) : (bcopy((&ad), (void *)(uintptr_t)(addr), (sizeof
(ad))), 0))
)
963 break;
964 }
965}
966
967static void
968pfr_prepare_network(union sockaddr_union *sa, int af, int net)
969{
970 int i;
971
972 bzero(sa, sizeof (*sa));
973 if (af == AF_INET2) {
974 sa->sin.sin_len = sizeof (sa->sin);
975 sa->sin.sin_family = AF_INET2;
976 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net))(__builtin_constant_p(-1 << (32-net)) ? ((__uint32_t)((
((__uint32_t)(-1 << (32-net)) & 0xff000000) >>
24) | (((__uint32_t)(-1 << (32-net)) & 0x00ff0000)
>> 8) | (((__uint32_t)(-1 << (32-net)) & 0x0000ff00
) << 8) | (((__uint32_t)(-1 << (32-net)) & 0x000000ff
) << 24))) : _OSSwapInt32(-1 << (32-net)))
: 0;
977 } else if (af == AF_INET630) {
978 sa->sin6.sin6_len = sizeof (sa->sin6);
979 sa->sin6.sin6_family = AF_INET630;
980 for (i = 0; i < 4; i++) {
981 if (net <= 32) {
982 sa->sin6.sin6_addr.s6_addr32__u6_addr.__u6_addr32[i] =
983 net ? htonl(-1 << (32-net))(__builtin_constant_p(-1 << (32-net)) ? ((__uint32_t)((
((__uint32_t)(-1 << (32-net)) & 0xff000000) >>
24) | (((__uint32_t)(-1 << (32-net)) & 0x00ff0000)
>> 8) | (((__uint32_t)(-1 << (32-net)) & 0x0000ff00
) << 8) | (((__uint32_t)(-1 << (32-net)) & 0x000000ff
) << 24))) : _OSSwapInt32(-1 << (32-net)))
: 0;
984 break;
985 }
986 sa->sin6.sin6_addr.s6_addr32__u6_addr.__u6_addr32[i] = 0xFFFFFFFF;
987 net -= 32;
988 }
989 }
990}
991
992static int
993pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
994{
995 union sockaddr_union mask;
996 struct radix_node *rn;
997 struct radix_node_head *head;
998
999 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1000
1001 bzero(ke->pfrke_node, sizeof (ke->pfrke_node));
1002 if (ke->pfrke_af == AF_INET2)
1003 head = kt->pfrkt_ip4;
1004 else if (ke->pfrke_af == AF_INET630)
1005 head = kt->pfrkt_ip6;
1006 else
1007 return (-1);
1008
1009 if (KENTRY_NETWORK(ke)((ke)->pfrke_net < ((((ke)->pfrke_af) == 2) ? 32 : 128
))
) {
1010 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1011 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1012 } else
1013 rn = rn_addroute(&ke->pfrke_sa, NULL((void *)0), head, ke->pfrke_node);
1014
1015 return (rn == NULL((void *)0) ? -1 : 0);
1016}
1017
1018static int
1019pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1020{
1021 union sockaddr_union mask;
1022 struct radix_node *rn;
1023 struct radix_node_head *head;
1024
1025 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1026
1027 if (ke->pfrke_af == AF_INET2)
1028 head = kt->pfrkt_ip4;
1029 else if (ke->pfrke_af == AF_INET630)
1030 head = kt->pfrkt_ip6;
1031 else
1032 return (-1);
1033
1034 if (KENTRY_NETWORK(ke)((ke)->pfrke_net < ((((ke)->pfrke_af) == 2) ? 32 : 128
))
) {
1035 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1036 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1037 } else
1038 rn = rn_delete(&ke->pfrke_sa, NULL((void *)0), head);
1039
1040 if (rn == NULL((void *)0)) {
1041 printf("pfr_unroute_kentry: delete failed.\n");
1042 return (-1);
1043 }
1044 return (0);
1045}
1046
1047static void
1048pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1049{
1050 bzero(ad, sizeof (*ad));
1051 if (ke == NULL((void *)0))
1052 return;
1053 ad->pfra_af = ke->pfrke_af;
1054 ad->pfra_net = ke->pfrke_net;
1055 ad->pfra_not = ke->pfrke_not;
1056 if (ad->pfra_af == AF_INET2)
1057 ad->pfra_ip4addrpfra_u._pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1058 else if (ad->pfra_af == AF_INET630)
1059 ad->pfra_ip6addrpfra_u._pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1060}
1061
1062static int
1063pfr_walktree(struct radix_node *rn, void *arg)
1064{
1065 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1066 struct pfr_walktree *w = arg;
1067 int flags = w->pfrw_flags;
1068
1069 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1070
1071 switch (w->pfrw_op) {
1
Control jumps to 'case PFRW_GET_ASTATS:' at line 1093
1072 case PFRW_MARK:
1073 ke->pfrke_mark = 0;
1074 break;
1075 case PFRW_SWEEP:
1076 if (ke->pfrke_mark)
1077 break;
1078 /* FALLTHROUGH */
1079 case PFRW_ENQUEUE:
1080 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq)do { (((ke))->pfrke_workq.sle_next) = (((w->pfrw_1.pfrw1_workq
))->slh_first); (((w->pfrw_1.pfrw1_workq))->slh_first
) = (ke); } while (0)
;
1081 w->pfrw_cntpfrw_free++;
1082 break;
1083 case PFRW_GET_ADDRS:
1084 if (w->pfrw_free-- > 0) {
1085 struct pfr_addr ad;
1086
1087 pfr_copyout_addr(&ad, ke);
1088 if (copyout(&ad, w->pfrw_addrpfrw_1.pfrw1_addr, sizeof (ad)))
1089 return (EFAULT14);
1090 w->pfrw_addrpfrw_1.pfrw1_addr += sizeof (ad);
1091 }
1092 break;
1093 case PFRW_GET_ASTATS:
1094 if (w->pfrw_free-- > 0) {
2
Taking true branch
1095 struct pfr_astats as;
1096
1097 pfr_copyout_addr(&as.pfras_a, ke);
1098
1099#if !defined(__LP64__1)
1100 /* Initialized to avoid potential info leak to
1101 * userspace */
1102 as._pad = 0;
1103#endif
1104 bcopy(ke->pfrke_packets, as.pfras_packets,
1105 sizeof (as.pfras_packets));
1106 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1107 sizeof (as.pfras_bytes));
1108 as.pfras_tzero = ke->pfrke_tzero;
1109
1110 if (COPYOUT(&as, w->pfrw_astats, sizeof (as), flags)((flags & 0x10000000) ? copyout((&as), (w->pfrw_1.
pfrw1_astats), (sizeof (as))) : (bcopy((&as), (void *)(uintptr_t
)(w->pfrw_1.pfrw1_astats), (sizeof (as))), 0))
)
3
Within the expansion of the macro 'COPYOUT':
a
Copies out a struct with uncleared padding (>= 4 bytes)
1111 return (EFAULT14);
1112 w->pfrw_astatspfrw_1.pfrw1_astats += sizeof (as);
1113 }
1114 break;
1115 case PFRW_POOL_GET:
1116 if (ke->pfrke_not)
1117 break; /* negative entries are ignored */
1118 if (!w->pfrw_cntpfrw_free--) {
1119 w->pfrw_kentrypfrw_1.pfrw1_kentry = ke;
1120 return (1); /* finish search */
1121 }
1122 break;
1123 case PFRW_DYNADDR_UPDATE:
1124 if (ke->pfrke_af == AF_INET2) {
1125 if (w->pfrw_dynpfrw_1.pfrw1_dyn->pfid_acnt4++ > 0)
1126 break;
1127 pfr_prepare_network(&pfr_mask, AF_INET2, ke->pfrke_net);
1128 w->pfrw_dynpfrw_1.pfrw1_dyn->pfid_addr4 = *SUNION2PF((((2) == 2) ? (struct pf_addr *)&(&ke->pfrke_sa)->
sin.sin_addr : (struct pf_addr *)&(&ke->pfrke_sa)->
sin6.sin6_addr)
1129 &ke->pfrke_sa, AF_INET)(((2) == 2) ? (struct pf_addr *)&(&ke->pfrke_sa)->
sin.sin_addr : (struct pf_addr *)&(&ke->pfrke_sa)->
sin6.sin6_addr)
;
1130 w->pfrw_dynpfrw_1.pfrw1_dyn->pfid_mask4 = *SUNION2PF((((2) == 2) ? (struct pf_addr *)&(&pfr_mask)->sin.
sin_addr : (struct pf_addr *)&(&pfr_mask)->sin6.sin6_addr
)
1131 &pfr_mask, AF_INET)(((2) == 2) ? (struct pf_addr *)&(&pfr_mask)->sin.
sin_addr : (struct pf_addr *)&(&pfr_mask)->sin6.sin6_addr
)
;
1132 } else if (ke->pfrke_af == AF_INET630) {
1133 if (w->pfrw_dynpfrw_1.pfrw1_dyn->pfid_acnt6++ > 0)
1134 break;
1135 pfr_prepare_network(&pfr_mask, AF_INET630, ke->pfrke_net);
1136 w->pfrw_dynpfrw_1.pfrw1_dyn->pfid_addr6 = *SUNION2PF((((30) == 2) ? (struct pf_addr *)&(&ke->pfrke_sa)->
sin.sin_addr : (struct pf_addr *)&(&ke->pfrke_sa)->
sin6.sin6_addr)
1137 &ke->pfrke_sa, AF_INET6)(((30) == 2) ? (struct pf_addr *)&(&ke->pfrke_sa)->
sin.sin_addr : (struct pf_addr *)&(&ke->pfrke_sa)->
sin6.sin6_addr)
;
1138 w->pfrw_dynpfrw_1.pfrw1_dyn->pfid_mask6 = *SUNION2PF((((30) == 2) ? (struct pf_addr *)&(&pfr_mask)->sin
.sin_addr : (struct pf_addr *)&(&pfr_mask)->sin6.sin6_addr
)
1139 &pfr_mask, AF_INET6)(((30) == 2) ? (struct pf_addr *)&(&pfr_mask)->sin
.sin_addr : (struct pf_addr *)&(&pfr_mask)->sin6.sin6_addr
)
;
1140 }
1141 break;
1142 }
1143 return (0);
1144}
1145
1146int
1147pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1148{
1149 struct pfr_ktableworkq workq;
1150 struct pfr_ktable *p;
1151 int xdel = 0;
1152
1153 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1154
1155 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |do { if ((flags & ~(0x00000001 | 0x00000002 | 0x00000040)
) & 0x0000007F) return (22); } while (0)
1156 PFR_FLAG_ALLRSETS)do { if ((flags & ~(0x00000001 | 0x00000002 | 0x00000040)
) & 0x0000007F) return (22); } while (0)
;
1157 if (pfr_fix_anchor(filter->pfrt_anchor))
1158 return (EINVAL22);
1159 if (pfr_table_count(filter, flags) < 0)
1160 return (ENOENT2);
1161
1162 SLIST_INIT(&workq)do { (((&workq))->slh_first) = ((void *)0); } while (0
)
;
1163 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables)for ((p) = pfr_ktablehead_RB_MINMAX(&pfr_ktables, -1); (p
) != ((void *)0); (p) = pfr_ktablehead_RB_NEXT(p))
{
1164 if (pfr_skip_table(filter, p, flags))
1165 continue;
1166 if (strcmp(p->pfrkt_anchorpfrkt_ts.pfrts_t.pfrt_anchor, PF_RESERVED_ANCHOR"_pf") == 0)
1167 continue;
1168 if (!(p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
1169 continue;
1170 p->pfrkt_nflags = p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & ~PFR_TFLAG_ACTIVE0x00000004;
1171 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq)do { (((p))->pfrkt_workq.sle_next) = (((&workq))->slh_first
); (((&workq))->slh_first) = (p); } while (0)
;
1172 xdel++;
1173 }
1174 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
1175 pfr_setflags_ktables(&workq);
1176 }
1177 if (ndel != NULL((void *)0))
1178 *ndel = xdel;
1179 return (0);
1180}
1181
1182int
1183pfr_add_tables(user_addr_t tbl, int size, int *nadd, int flags)
1184{
1185 struct pfr_ktableworkq addq, changeq;
1186 struct pfr_ktable *p, *q, *r, key;
1187 int i, rv, xadd = 0;
1188 u_int64_t tzero = pf_calendar_time_second();
1189
1190 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1191
1192 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY)do { if ((flags & ~(0x00000001 | 0x00000002)) & 0x0000007F
) return (22); } while (0)
;
1193 SLIST_INIT(&addq)do { (((&addq))->slh_first) = ((void *)0); } while (0);
1194 SLIST_INIT(&changeq)do { (((&changeq))->slh_first) = ((void *)0); } while (
0)
;
1195 for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_tpfrkt_ts.pfrts_t)) {
1196 if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags)((flags & 0x10000000) ? copyin((tbl), (&key.pfrkt_ts.
pfrts_t), (sizeof (key.pfrkt_ts.pfrts_t))) : (bcopy((void *)(
uintptr_t)(tbl), (&key.pfrkt_ts.pfrts_t), (sizeof (key.pfrkt_ts
.pfrts_t))), 0))
)
1197 senderr(EFAULT)do { rv = (14); goto _bad; } while (0);
1198 pfr_table_copyin_cleanup(&key.pfrkt_tpfrkt_ts.pfrts_t);
1199 if (pfr_validate_table(&key.pfrkt_tpfrkt_ts.pfrts_t, PFR_TFLAG_USRMASK0x00000003,
1200 flags & PFR_FLAG_USERIOCTL0x10000000))
1201 senderr(EINVAL)do { rv = (22); goto _bad; } while (0);
1202 key.pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags |= PFR_TFLAG_ACTIVE0x00000004;
1203 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key)pfr_ktablehead_RB_FIND(&pfr_ktables, &key);
1204 if (p == NULL((void *)0)) {
1205 p = pfr_create_ktable(&key.pfrkt_tpfrkt_ts.pfrts_t, tzero, 1);
1206 if (p == NULL((void *)0))
1207 senderr(ENOMEM)do { rv = (12); goto _bad; } while (0);
1208 SLIST_FOREACH(q, &addq, pfrkt_workq)for ((q) = (((&addq))->slh_first); (q); (q) = (((q))->
pfrkt_workq.sle_next))
{
1209 if (!pfr_ktable_compare(p, q))
1210 goto _skip;
1211 }
1212 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq)do { (((p))->pfrkt_workq.sle_next) = (((&addq))->slh_first
); (((&addq))->slh_first) = (p); } while (0)
;
1213 xadd++;
1214 if (!key.pfrkt_anchorpfrkt_ts.pfrts_t.pfrt_anchor[0])
1215 goto _skip;
1216
1217 /* find or create root table */
1218 bzero(key.pfrkt_anchorpfrkt_ts.pfrts_t.pfrt_anchor, sizeof (key.pfrkt_anchorpfrkt_ts.pfrts_t.pfrt_anchor));
1219 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key)pfr_ktablehead_RB_FIND(&pfr_ktables, &key);
1220 if (r != NULL((void *)0)) {
1221 p->pfrkt_root = r;
1222 goto _skip;
1223 }
1224 SLIST_FOREACH(q, &addq, pfrkt_workq)for ((q) = (((&addq))->slh_first); (q); (q) = (((q))->
pfrkt_workq.sle_next))
{
1225 if (!pfr_ktable_compare(&key, q)) {
1226 p->pfrkt_root = q;
1227 goto _skip;
1228 }
1229 }
1230 key.pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags = 0;
1231 r = pfr_create_ktable(&key.pfrkt_tpfrkt_ts.pfrts_t, 0, 1);
1232 if (r == NULL((void *)0))
1233 senderr(ENOMEM)do { rv = (12); goto _bad; } while (0);
1234 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq)do { (((r))->pfrkt_workq.sle_next) = (((&addq))->slh_first
); (((&addq))->slh_first) = (r); } while (0)
;
1235 p->pfrkt_root = r;
1236 } else if (!(p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004)) {
1237 SLIST_FOREACH(q, &changeq, pfrkt_workq)for ((q) = (((&changeq))->slh_first); (q); (q) = (((q)
)->pfrkt_workq.sle_next))
1238 if (!pfr_ktable_compare(&key, q))
1239 goto _skip;
1240 p->pfrkt_nflags = (p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags &
1241 ~PFR_TFLAG_USRMASK0x00000003) | key.pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags;
1242 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq)do { (((p))->pfrkt_workq.sle_next) = (((&changeq))->
slh_first); (((&changeq))->slh_first) = (p); } while (
0)
;
1243 xadd++;
1244 }
1245_skip:
1246 ;
1247 }
1248 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
1249 pfr_insert_ktables(&addq);
1250 pfr_setflags_ktables(&changeq);
1251 } else
1252 pfr_destroy_ktables(&addq, 0);
1253 if (nadd != NULL((void *)0))
1254 *nadd = xadd;
1255 return (0);
1256_bad:
1257 pfr_destroy_ktables(&addq, 0);
1258 return (rv);
1259}
1260
1261int
1262pfr_del_tables(user_addr_t tbl, int size, int *ndel, int flags)
1263{
1264 struct pfr_ktableworkq workq;
1265 struct pfr_ktable *p, *q, key;
1266 int i, xdel = 0;
1267
1268 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1269
1270 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY)do { if ((flags & ~(0x00000001 | 0x00000002)) & 0x0000007F
) return (22); } while (0)
;
1271 SLIST_INIT(&workq)do { (((&workq))->slh_first) = ((void *)0); } while (0
)
;
1272 for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_tpfrkt_ts.pfrts_t)) {
1273 if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags)((flags & 0x10000000) ? copyin((tbl), (&key.pfrkt_ts.
pfrts_t), (sizeof (key.pfrkt_ts.pfrts_t))) : (bcopy((void *)(
uintptr_t)(tbl), (&key.pfrkt_ts.pfrts_t), (sizeof (key.pfrkt_ts
.pfrts_t))), 0))
)
1274 return (EFAULT14);
1275 pfr_table_copyin_cleanup(&key.pfrkt_tpfrkt_ts.pfrts_t);
1276 if (pfr_validate_table(&key.pfrkt_tpfrkt_ts.pfrts_t, 0,
1277 flags & PFR_FLAG_USERIOCTL0x10000000))
1278 return (EINVAL22);
1279 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key)pfr_ktablehead_RB_FIND(&pfr_ktables, &key);
1280 if (p != NULL((void *)0) && (p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004)) {
1281 SLIST_FOREACH(q, &workq, pfrkt_workq)for ((q) = (((&workq))->slh_first); (q); (q) = (((q))->
pfrkt_workq.sle_next))
1282 if (!pfr_ktable_compare(p, q))
1283 goto _skip;
1284 p->pfrkt_nflags = p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & ~PFR_TFLAG_ACTIVE0x00000004;
1285 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq)do { (((p))->pfrkt_workq.sle_next) = (((&workq))->slh_first
); (((&workq))->slh_first) = (p); } while (0)
;
1286 xdel++;
1287 }
1288_skip:
1289 ;
1290 }
1291
1292 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
1293 pfr_setflags_ktables(&workq);
1294 }
1295 if (ndel != NULL((void *)0))
1296 *ndel = xdel;
1297 return (0);
1298}
1299
1300int
1301pfr_get_tables(struct pfr_table *filter, user_addr_t tbl, int *size,
1302 int flags)
1303{
1304 struct pfr_ktable *p;
1305 int n, nn;
1306
1307 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS)do { if ((flags & ~(0x00000040)) & 0x0000007F) return
(22); } while (0)
;
1308 if (pfr_fix_anchor(filter->pfrt_anchor))
1309 return (EINVAL22);
1310 n = nn = pfr_table_count(filter, flags);
1311 if (n < 0)
1312 return (ENOENT2);
1313 if (n > *size) {
1314 *size = n;
1315 return (0);
1316 }
1317 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables)for ((p) = pfr_ktablehead_RB_MINMAX(&pfr_ktables, -1); (p
) != ((void *)0); (p) = pfr_ktablehead_RB_NEXT(p))
{
1318 if (pfr_skip_table(filter, p, flags))
1319 continue;
1320 if (n-- <= 0)
1321 continue;
1322 if (COPYOUT(&p->pfrkt_t, tbl, sizeof (p->pfrkt_t), flags)((flags & 0x10000000) ? copyout((&p->pfrkt_ts.pfrts_t
), (tbl), (sizeof (p->pfrkt_ts.pfrts_t))) : (bcopy((&p
->pfrkt_ts.pfrts_t), (void *)(uintptr_t)(tbl), (sizeof (p->
pfrkt_ts.pfrts_t))), 0))
)
1323 return (EFAULT14);
1324 tbl += sizeof (p->pfrkt_tpfrkt_ts.pfrts_t);
1325 }
1326 if (n) {
1327 printf("pfr_get_tables: corruption detected (%d).\n", n);
1328 return (ENOTTY25);
1329 }
1330 *size = nn;
1331 return (0);
1332}
1333
1334int
1335pfr_get_tstats(struct pfr_table *filter, user_addr_t tbl, int *size,
1336 int flags)
1337{
1338 struct pfr_ktable *p;
1339 struct pfr_ktableworkq workq;
1340 int n, nn;
1341 u_int64_t tzero = pf_calendar_time_second();
1342
1343 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1344
1345 /* XXX PFR_FLAG_CLSTATS disabled */
1346 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS)do { if ((flags & ~(0x00000001 | 0x00000040)) & 0x0000007F
) return (22); } while (0)
;
1347 if (pfr_fix_anchor(filter->pfrt_anchor))
1348 return (EINVAL22);
1349 n = nn = pfr_table_count(filter, flags);
1350 if (n < 0)
1351 return (ENOENT2);
1352 if (n > *size) {
1353 *size = n;
1354 return (0);
1355 }
1356 SLIST_INIT(&workq)do { (((&workq))->slh_first) = ((void *)0); } while (0
)
;
1357 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables)for ((p) = pfr_ktablehead_RB_MINMAX(&pfr_ktables, -1); (p
) != ((void *)0); (p) = pfr_ktablehead_RB_NEXT(p))
{
1358 if (pfr_skip_table(filter, p, flags))
1359 continue;
1360 if (n-- <= 0)
1361 continue;
1362 if (COPYOUT(&p->pfrkt_ts, tbl, sizeof (p->pfrkt_ts), flags)((flags & 0x10000000) ? copyout((&p->pfrkt_ts), (tbl
), (sizeof (p->pfrkt_ts))) : (bcopy((&p->pfrkt_ts),
(void *)(uintptr_t)(tbl), (sizeof (p->pfrkt_ts))), 0))
) {
1363 return (EFAULT14);
1364 }
1365 tbl += sizeof (p->pfrkt_ts);
1366 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq)do { (((p))->pfrkt_workq.sle_next) = (((&workq))->slh_first
); (((&workq))->slh_first) = (p); } while (0)
;
1367 }
1368 if (flags & PFR_FLAG_CLSTATS0x00000008)
1369 pfr_clstats_ktables(&workq, tzero,
1370 flags & PFR_FLAG_ADDRSTOO0x00000010);
1371 if (n) {
1372 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1373 return (ENOTTY25);
1374 }
1375 *size = nn;
1376 return (0);
1377}
1378
1379int
1380pfr_clr_tstats(user_addr_t tbl, int size, int *nzero, int flags)
1381{
1382 struct pfr_ktableworkq workq;
1383 struct pfr_ktable *p, key;
1384 int i, xzero = 0;
1385 u_int64_t tzero = pf_calendar_time_second();
1386
1387 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1388
1389 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |do { if ((flags & ~(0x00000001 | 0x00000002 | 0x00000010)
) & 0x0000007F) return (22); } while (0)
1390 PFR_FLAG_ADDRSTOO)do { if ((flags & ~(0x00000001 | 0x00000002 | 0x00000010)
) & 0x0000007F) return (22); } while (0)
;
1391 SLIST_INIT(&workq)do { (((&workq))->slh_first) = ((void *)0); } while (0
)
;
1392 for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_tpfrkt_ts.pfrts_t)) {
1393 if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags)((flags & 0x10000000) ? copyin((tbl), (&key.pfrkt_ts.
pfrts_t), (sizeof (key.pfrkt_ts.pfrts_t))) : (bcopy((void *)(
uintptr_t)(tbl), (&key.pfrkt_ts.pfrts_t), (sizeof (key.pfrkt_ts
.pfrts_t))), 0))
)
1394 return (EFAULT14);
1395 pfr_table_copyin_cleanup(&key.pfrkt_tpfrkt_ts.pfrts_t);
1396 if (pfr_validate_table(&key.pfrkt_tpfrkt_ts.pfrts_t, 0, 0))
1397 return (EINVAL22);
1398 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key)pfr_ktablehead_RB_FIND(&pfr_ktables, &key);
1399 if (p != NULL((void *)0)) {
1400 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq)do { (((p))->pfrkt_workq.sle_next) = (((&workq))->slh_first
); (((&workq))->slh_first) = (p); } while (0)
;
1401 xzero++;
1402 }
1403 }
1404 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
1405 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO0x00000010);
1406 }
1407 if (nzero != NULL((void *)0))
1408 *nzero = xzero;
1409 return (0);
1410}
1411
1412int
1413pfr_set_tflags(user_addr_t tbl, int size, int setflag, int clrflag,
1414 int *nchange, int *ndel, int flags)
1415{
1416 struct pfr_ktableworkq workq;
1417 struct pfr_ktable *p, *q, key;
1418 int i, xchange = 0, xdel = 0;
1419
1420 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1421
1422 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY)do { if ((flags & ~(0x00000001 | 0x00000002)) & 0x0000007F
) return (22); } while (0)
;
1423 if ((setflag & ~PFR_TFLAG_USRMASK0x00000003) ||
1424 (clrflag & ~PFR_TFLAG_USRMASK0x00000003) ||
1425 (setflag & clrflag))
1426 return (EINVAL22);
1427 SLIST_INIT(&workq)do { (((&workq))->slh_first) = ((void *)0); } while (0
)
;
1428 for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_tpfrkt_ts.pfrts_t)) {
1429 if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags)((flags & 0x10000000) ? copyin((tbl), (&key.pfrkt_ts.
pfrts_t), (sizeof (key.pfrkt_ts.pfrts_t))) : (bcopy((void *)(
uintptr_t)(tbl), (&key.pfrkt_ts.pfrts_t), (sizeof (key.pfrkt_ts
.pfrts_t))), 0))
)
1430 return (EFAULT14);
1431 pfr_table_copyin_cleanup(&key.pfrkt_tpfrkt_ts.pfrts_t);
1432 if (pfr_validate_table(&key.pfrkt_tpfrkt_ts.pfrts_t, 0,
1433 flags & PFR_FLAG_USERIOCTL0x10000000))
1434 return (EINVAL22);
1435 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key)pfr_ktablehead_RB_FIND(&pfr_ktables, &key);
1436 if (p != NULL((void *)0) && (p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004)) {
1437 p->pfrkt_nflags = (p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags | setflag) &
1438 ~clrflag;
1439 if (p->pfrkt_nflags == p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags)
1440 goto _skip;
1441 SLIST_FOREACH(q, &workq, pfrkt_workq)for ((q) = (((&workq))->slh_first); (q); (q) = (((q))->
pfrkt_workq.sle_next))
1442 if (!pfr_ktable_compare(p, q))
1443 goto _skip;
1444 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq)do { (((p))->pfrkt_workq.sle_next) = (((&workq))->slh_first
); (((&workq))->slh_first) = (p); } while (0)
;
1445 if ((p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_PERSIST0x00000001) &&
1446 (clrflag & PFR_TFLAG_PERSIST0x00000001) &&
1447 !(p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_REFERENCED0x00000010))
1448 xdel++;
1449 else
1450 xchange++;
1451 }
1452_skip:
1453 ;
1454 }
1455 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
1456 pfr_setflags_ktables(&workq);
1457 }
1458 if (nchange != NULL((void *)0))
1459 *nchange = xchange;
1460 if (ndel != NULL((void *)0))
1461 *ndel = xdel;
1462 return (0);
1463}
1464
1465int
1466pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1467{
1468 struct pfr_ktableworkq workq;
1469 struct pfr_ktable *p;
1470 struct pf_ruleset *rs;
1471 int xdel = 0;
1472
1473 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1474
1475 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY)do { if ((flags & ~(0x00000002)) & 0x0000007F) return
(22); } while (0)
;
1476 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1477 if (rs == NULL((void *)0))
1478 return (ENOMEM12);
1479 SLIST_INIT(&workq)do { (((&workq))->slh_first) = ((void *)0); } while (0
)
;
1480 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables)for ((p) = pfr_ktablehead_RB_MINMAX(&pfr_ktables, -1); (p
) != ((void *)0); (p) = pfr_ktablehead_RB_NEXT(p))
{
1481 if (!(p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_INACTIVE0x00000008) ||
1482 pfr_skip_table(trs, p, 0))
1483 continue;
1484 p->pfrkt_nflags = p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & ~PFR_TFLAG_INACTIVE0x00000008;
1485 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq)do { (((p))->pfrkt_workq.sle_next) = (((&workq))->slh_first
); (((&workq))->slh_first) = (p); } while (0)
;
1486 xdel++;
1487 }
1488 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
1489 pfr_setflags_ktables(&workq);
1490 if (ticket != NULL((void *)0))
1491 *ticket = ++rs->tticket;
1492 rs->topen = 1;
1493 } else
1494 pf_remove_if_empty_ruleset(rs);
1495 if (ndel != NULL((void *)0))
1496 *ndel = xdel;
1497 return (0);
1498}
1499
1500int
1501pfr_ina_define(struct pfr_table *tbl, user_addr_t addr, int size,
1502 int *nadd, int *naddr, u_int32_t ticket, int flags)
1503{
1504 struct pfr_ktableworkq tableq;
1505 struct pfr_kentryworkq addrq;
1506 struct pfr_ktable *kt, *rt, *shadow, key;
1507 struct pfr_kentry *p;
1508 struct pfr_addr ad;
1509 struct pf_ruleset *rs;
1510 int i, rv, xadd = 0, xaddr = 0;
1511
1512 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1513
1514 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO)do { if ((flags & ~(0x00000002 | 0x00000010)) & 0x0000007F
) return (22); } while (0)
;
1515 if (size && !(flags & PFR_FLAG_ADDRSTOO0x00000010))
1516 return (EINVAL22);
1517 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK0x00000003,
1518 flags & PFR_FLAG_USERIOCTL0x10000000))
1519 return (EINVAL22);
1520 rs = pf_find_ruleset(tbl->pfrt_anchor);
1521 if (rs == NULL((void *)0) || !rs->topen || ticket != rs->tticket)
1522 return (EBUSY16);
1523 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE0x00000008;
1524 SLIST_INIT(&tableq)do { (((&tableq))->slh_first) = ((void *)0); } while (
0)
;
1525 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)(void *)tbl)pfr_ktablehead_RB_FIND(&pfr_ktables, (struct pfr_ktable *
)(void *)tbl)
;
1526 if (kt == NULL((void *)0)) {
1527 kt = pfr_create_ktable(tbl, 0, 1);
1528 if (kt == NULL((void *)0))
1529 return (ENOMEM12);
1530 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq)do { (((kt))->pfrkt_workq.sle_next) = (((&tableq))->
slh_first); (((&tableq))->slh_first) = (kt); } while (
0)
;
1531 xadd++;
1532 if (!tbl->pfrt_anchor[0])
1533 goto _skip;
1534
1535 /* find or create root table */
1536 bzero(&key, sizeof (key));
1537 strlcpy(key.pfrkt_namepfrkt_ts.pfrts_t.pfrt_name, tbl->pfrt_name,
1538 sizeof (key.pfrkt_namepfrkt_ts.pfrts_t.pfrt_name));
1539 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key)pfr_ktablehead_RB_FIND(&pfr_ktables, &key);
1540 if (rt != NULL((void *)0)) {
1541 kt->pfrkt_root = rt;
1542 goto _skip;
1543 }
1544 rt = pfr_create_ktable(&key.pfrkt_tpfrkt_ts.pfrts_t, 0, 1);
1545 if (rt == NULL((void *)0)) {
1546 pfr_destroy_ktables(&tableq, 0);
1547 return (ENOMEM12);
1548 }
1549 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq)do { (((rt))->pfrkt_workq.sle_next) = (((&tableq))->
slh_first); (((&tableq))->slh_first) = (rt); } while (
0)
;
1550 kt->pfrkt_root = rt;
1551 } else if (!(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_INACTIVE0x00000008))
1552 xadd++;
1553_skip:
1554 shadow = pfr_create_ktable(tbl, 0, 0);
1555 if (shadow == NULL((void *)0)) {
1556 pfr_destroy_ktables(&tableq, 0);
1557 return (ENOMEM12);
1558 }
1559 SLIST_INIT(&addrq)do { (((&addrq))->slh_first) = ((void *)0); } while (0
)
;
1560 for (i = 0; i < size; i++, addr += sizeof (ad)) {
1561 if (COPYIN(addr, &ad, sizeof (ad), flags)((flags & 0x10000000) ? copyin((addr), (&ad), (sizeof
(ad))) : (bcopy((void *)(uintptr_t)(addr), (&ad), (sizeof
(ad))), 0))
)
1562 senderr(EFAULT)do { rv = (14); goto _bad; } while (0);
1563 if (pfr_validate_addr(&ad))
1564 senderr(EINVAL)do { rv = (22); goto _bad; } while (0);
1565 if (pfr_lookup_addr(shadow, &ad, 1) != NULL((void *)0))
1566 continue;
1567 p = pfr_create_kentry(&ad, 0);
1568 if (p == NULL((void *)0))
1569 senderr(ENOMEM)do { rv = (12); goto _bad; } while (0);
1570 if (pfr_route_kentry(shadow, p)) {
1571 pfr_destroy_kentry(p);
1572 continue;
1573 }
1574 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq)do { (((p))->pfrke_workq.sle_next) = (((&addrq))->slh_first
); (((&addrq))->slh_first) = (p); } while (0)
;
1575 xaddr++;
1576 }
1577 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
1578 if (kt->pfrkt_shadow != NULL((void *)0))
1579 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1580 kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags |= PFR_TFLAG_INACTIVE0x00000008;
1581 pfr_insert_ktables(&tableq);
1582 shadow->pfrkt_cntpfrkt_ts.pfrts_cnt = (flags & PFR_FLAG_ADDRSTOO0x00000010) ?
1583 xaddr : NO_ADDRESSES(-1);
1584 kt->pfrkt_shadow = shadow;
1585 } else {
1586 pfr_clean_node_mask(shadow, &addrq);
1587 pfr_destroy_ktable(shadow, 0);
1588 pfr_destroy_ktables(&tableq, 0);
1589 pfr_destroy_kentries(&addrq);
1590 }
1591 if (nadd != NULL((void *)0))
1592 *nadd = xadd;
1593 if (naddr != NULL((void *)0))
1594 *naddr = xaddr;
1595 return (0);
1596_bad:
1597 pfr_destroy_ktable(shadow, 0);
1598 pfr_destroy_ktables(&tableq, 0);
1599 pfr_destroy_kentries(&addrq);
1600 return (rv);
1601}
1602
1603int
1604pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1605{
1606 struct pfr_ktableworkq workq;
1607 struct pfr_ktable *p;
1608 struct pf_ruleset *rs;
1609 int xdel = 0;
1610
1611 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1612
1613 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY)do { if ((flags & ~(0x00000002)) & 0x0000007F) return
(22); } while (0)
;
1614 rs = pf_find_ruleset(trs->pfrt_anchor);
1615 if (rs == NULL((void *)0) || !rs->topen || ticket != rs->tticket)
1616 return (0);
1617 SLIST_INIT(&workq)do { (((&workq))->slh_first) = ((void *)0); } while (0
)
;
1618 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables)for ((p) = pfr_ktablehead_RB_MINMAX(&pfr_ktables, -1); (p
) != ((void *)0); (p) = pfr_ktablehead_RB_NEXT(p))
{
1619 if (!(p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_INACTIVE0x00000008) ||
1620 pfr_skip_table(trs, p, 0))
1621 continue;
1622 p->pfrkt_nflags = p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & ~PFR_TFLAG_INACTIVE0x00000008;
1623 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq)do { (((p))->pfrkt_workq.sle_next) = (((&workq))->slh_first
); (((&workq))->slh_first) = (p); } while (0)
;
1624 xdel++;
1625 }
1626 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
1627 pfr_setflags_ktables(&workq);
1628 rs->topen = 0;
1629 pf_remove_if_empty_ruleset(rs);
1630 }
1631 if (ndel != NULL((void *)0))
1632 *ndel = xdel;
1633 return (0);
1634}
1635
1636int
1637pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1638 int *nchange, int flags)
1639{
1640 struct pfr_ktable *p, *q;
1641 struct pfr_ktableworkq workq;
1642 struct pf_ruleset *rs;
1643 int xadd = 0, xchange = 0;
1644 u_int64_t tzero = pf_calendar_time_second();
1645
1646 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1647
1648 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY)do { if ((flags & ~(0x00000001 | 0x00000002)) & 0x0000007F
) return (22); } while (0)
;
1649 rs = pf_find_ruleset(trs->pfrt_anchor);
1650 if (rs == NULL((void *)0) || !rs->topen || ticket != rs->tticket)
1651 return (EBUSY16);
1652
1653 SLIST_INIT(&workq)do { (((&workq))->slh_first) = ((void *)0); } while (0
)
;
1654 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables)for ((p) = pfr_ktablehead_RB_MINMAX(&pfr_ktables, -1); (p
) != ((void *)0); (p) = pfr_ktablehead_RB_NEXT(p))
{
1655 if (!(p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_INACTIVE0x00000008) ||
1656 pfr_skip_table(trs, p, 0))
1657 continue;
1658 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq)do { (((p))->pfrkt_workq.sle_next) = (((&workq))->slh_first
); (((&workq))->slh_first) = (p); } while (0)
;
1659 if (p->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004)
1660 xchange++;
1661 else
1662 xadd++;
1663 }
1664
1665 if (!(flags & PFR_FLAG_DUMMY0x00000002)) {
1666 for (p = SLIST_FIRST(&workq)((&workq)->slh_first); p != NULL((void *)0); p = q) {
1667 q = SLIST_NEXT(p, pfrkt_workq)((p)->pfrkt_workq.sle_next);
1668 pfr_commit_ktable(p, tzero);
1669 }
1670 rs->topen = 0;
1671 pf_remove_if_empty_ruleset(rs);
1672 }
1673 if (nadd != NULL((void *)0))
1674 *nadd = xadd;
1675 if (nchange != NULL((void *)0))
1676 *nchange = xchange;
1677
1678 return (0);
1679}
1680
1681static void
1682pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero)
1683{
1684 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1685 int nflags;
1686
1687 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1688
1689 if (shadow->pfrkt_cntpfrkt_ts.pfrts_cnt == NO_ADDRESSES(-1)) {
1690 if (!(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
1691 pfr_clstats_ktable(kt, tzero, 1);
1692 } else if (kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004) {
1693 /* kt might contain addresses */
1694 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1695 struct pfr_kentry *p, *q, *next;
1696 struct pfr_addr ad;
1697
1698 pfr_enqueue_addrs(shadow, &addrq, NULL((void *)0), 0);
1699 pfr_mark_addrs(kt);
1700 SLIST_INIT(&addq)do { (((&addq))->slh_first) = ((void *)0); } while (0);
1701 SLIST_INIT(&changeq)do { (((&changeq))->slh_first) = ((void *)0); } while (
0)
;
1702 SLIST_INIT(&delq)do { (((&delq))->slh_first) = ((void *)0); } while (0);
1703 SLIST_INIT(&garbageq)do { (((&garbageq))->slh_first) = ((void *)0); } while
(0)
;
1704 pfr_clean_node_mask(shadow, &addrq);
1705 for (p = SLIST_FIRST(&addrq)((&addrq)->slh_first); p != NULL((void *)0); p = next) {
1706 next = SLIST_NEXT(p, pfrke_workq)((p)->pfrke_workq.sle_next); /* XXX */
1707 pfr_copyout_addr(&ad, p);
1708 q = pfr_lookup_addr(kt, &ad, 1);
1709 if (q != NULL((void *)0)) {
1710 if (q->pfrke_not != p->pfrke_not)
1711 SLIST_INSERT_HEAD(&changeq, q,do { (((q))->pfrke_workq.sle_next) = (((&changeq))->
slh_first); (((&changeq))->slh_first) = (q); } while (
0)
1712 pfrke_workq)do { (((q))->pfrke_workq.sle_next) = (((&changeq))->
slh_first); (((&changeq))->slh_first) = (q); } while (
0)
;
1713 q->pfrke_mark = 1;
1714 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq)do { (((p))->pfrke_workq.sle_next) = (((&garbageq))->
slh_first); (((&garbageq))->slh_first) = (p); } while (
0)
;
1715 } else {
1716 p->pfrke_tzero = tzero;
1717 SLIST_INSERT_HEAD(&addq, p, pfrke_workq)do { (((p))->pfrke_workq.sle_next) = (((&addq))->slh_first
); (((&addq))->slh_first) = (p); } while (0)
;
1718 }
1719 }
1720 pfr_enqueue_addrs(kt, &delq, NULL((void *)0), ENQUEUE_UNMARKED_ONLY(1));
1721 pfr_insert_kentries(kt, &addq, tzero);
1722 pfr_remove_kentries(kt, &delq);
1723 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG(1));
1724 pfr_destroy_kentries(&garbageq);
1725 } else {
1726 /* kt cannot contain addresses */
1727 SWAP(struct radix_node_head *, kt->pfrkt_ip4,do { struct radix_node_head * tmp = kt->pfrkt_ip4; kt->
pfrkt_ip4 = shadow->pfrkt_ip4; shadow->pfrkt_ip4 = tmp;
} while (0)
1728 shadow->pfrkt_ip4)do { struct radix_node_head * tmp = kt->pfrkt_ip4; kt->
pfrkt_ip4 = shadow->pfrkt_ip4; shadow->pfrkt_ip4 = tmp;
} while (0)
;
1729 SWAP(struct radix_node_head *, kt->pfrkt_ip6,do { struct radix_node_head * tmp = kt->pfrkt_ip6; kt->
pfrkt_ip6 = shadow->pfrkt_ip6; shadow->pfrkt_ip6 = tmp;
} while (0)
1730 shadow->pfrkt_ip6)do { struct radix_node_head * tmp = kt->pfrkt_ip6; kt->
pfrkt_ip6 = shadow->pfrkt_ip6; shadow->pfrkt_ip6 = tmp;
} while (0)
;
1731 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt)do { int tmp = kt->pfrkt_ts.pfrts_cnt; kt->pfrkt_ts.pfrts_cnt
= shadow->pfrkt_ts.pfrts_cnt; shadow->pfrkt_ts.pfrts_cnt
= tmp; } while (0)
;
1732 pfr_clstats_ktable(kt, tzero, 1);
1733 }
1734 nflags = ((shadow->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_USRMASK0x00000003) |
1735 (kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_SETMASK0x0000003C) | PFR_TFLAG_ACTIVE0x00000004) &
1736 ~PFR_TFLAG_INACTIVE0x00000008;
1737 pfr_destroy_ktable(shadow, 0);
1738 kt->pfrkt_shadow = NULL((void *)0);
1739 pfr_setflags_ktable(kt, nflags);
1740}
1741
1742void
1743pfr_table_copyin_cleanup(struct pfr_table *tbl)
1744{
1745 tbl->pfrt_anchor[sizeof (tbl->pfrt_anchor) - 1] = '\0';
1746 tbl->pfrt_name[sizeof (tbl->pfrt_name) - 1] = '\0';
1747}
1748
1749static int
1750pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1751{
1752 int i;
1753
1754 if (!tbl->pfrt_name[0])
1755 return (-1);
1756 if (no_reserved && strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR"_pf") == 0)
1757 return (-1);
1758 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE32-1])
1759 return (-1);
1760 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE32; i++)
1761 if (tbl->pfrt_name[i])
1762 return (-1);
1763 if (pfr_fix_anchor(tbl->pfrt_anchor))
1764 return (-1);
1765 if (tbl->pfrt_flags & ~allowedflags)
1766 return (-1);
1767 return (0);
1768}
1769
1770/*
1771 * Rewrite anchors referenced by tables to remove slashes
1772 * and check for validity.
1773 */
1774static int
1775pfr_fix_anchor(char *anchor)
1776{
1777 size_t siz = MAXPATHLEN1024;
1778 int i;
1779
1780 if (anchor[0] == '/') {
1781 char *path;
1782 int off;
1783
1784 path = anchor;
1785 off = 1;
1786 while (*++path == '/')
1787 off++;
1788 bcopy(path, anchor, siz - off);
1789 memset(anchor + siz - off, 0, off);
1790 }
1791 if (anchor[siz - 1])
1792 return (-1);
1793 for (i = strlen(anchor); i < (int)siz; i++)
1794 if (anchor[i])
1795 return (-1);
1796 return (0);
1797}
1798
1799static int
1800pfr_table_count(struct pfr_table *filter, int flags)
1801{
1802 struct pf_ruleset *rs;
1803
1804 if (flags & PFR_FLAG_ALLRSETS0x00000040)
1805 return (pfr_ktable_cnt);
1806 if (filter->pfrt_anchor[0]) {
1807 rs = pf_find_ruleset(filter->pfrt_anchor);
1808 return ((rs != NULL((void *)0)) ? rs->tables : -1);
1809 }
1810 return (pf_main_rulesetpf_main_anchor.ruleset.tables);
1811}
1812
1813static int
1814pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1815{
1816 if (flags & PFR_FLAG_ALLRSETS0x00000040)
1817 return (0);
1818 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchorpfrkt_ts.pfrts_t.pfrt_anchor))
1819 return (1);
1820 return (0);
1821}
1822
1823static void
1824pfr_insert_ktables(struct pfr_ktableworkq *workq)
1825{
1826 struct pfr_ktable *p;
1827
1828 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1829
1830 SLIST_FOREACH(p, workq, pfrkt_workq)for ((p) = (((workq))->slh_first); (p); (p) = (((p))->pfrkt_workq
.sle_next))
1831 pfr_insert_ktable(p);
1832}
1833
1834static void
1835pfr_insert_ktable(struct pfr_ktable *kt)
1836{
1837 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1838
1839 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt)pfr_ktablehead_RB_INSERT(&pfr_ktables, kt);
1840 pfr_ktable_cnt++;
1841 if (kt->pfrkt_root != NULL((void *)0))
1842 if (!kt->pfrkt_root->pfrkt_refcntpfrkt_ts.pfrts_refcnt[PFR_REFCNT_ANCHOR]++)
1843 pfr_setflags_ktable(kt->pfrkt_root,
1844 kt->pfrkt_root->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags|PFR_TFLAG_REFDANCHOR0x00000020);
1845}
1846
1847static void
1848pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1849{
1850 struct pfr_ktable *p, *q;
1851
1852 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1853
1854 for (p = SLIST_FIRST(workq)((workq)->slh_first); p; p = q) {
1855 q = SLIST_NEXT(p, pfrkt_workq)((p)->pfrkt_workq.sle_next);
1856 pfr_setflags_ktable(p, p->pfrkt_nflags);
1857 }
1858}
1859
1860static void
1861pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1862{
1863 struct pfr_kentryworkq addrq;
1864
1865 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1866
1867 if (!(newf & PFR_TFLAG_REFERENCED0x00000010) &&
1868 !(newf & PFR_TFLAG_PERSIST0x00000001))
1869 newf &= ~PFR_TFLAG_ACTIVE0x00000004;
1870 if (!(newf & PFR_TFLAG_ACTIVE0x00000004))
1871 newf &= ~PFR_TFLAG_USRMASK0x00000003;
1872 if (!(newf & PFR_TFLAG_SETMASK0x0000003C)) {
1873 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt)pfr_ktablehead_RB_REMOVE(&pfr_ktables, kt);
1874 if (kt->pfrkt_root != NULL((void *)0))
1875 if (!--kt->pfrkt_root->pfrkt_refcntpfrkt_ts.pfrts_refcnt[PFR_REFCNT_ANCHOR])
1876 pfr_setflags_ktable(kt->pfrkt_root,
1877 kt->pfrkt_root->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags &
1878 ~PFR_TFLAG_REFDANCHOR0x00000020);
1879 pfr_destroy_ktable(kt, 1);
1880 pfr_ktable_cnt--;
1881 return;
1882 }
1883 if (!(newf & PFR_TFLAG_ACTIVE0x00000004) && kt->pfrkt_cntpfrkt_ts.pfrts_cnt) {
1884 pfr_enqueue_addrs(kt, &addrq, NULL((void *)0), 0);
1885 pfr_remove_kentries(kt, &addrq);
1886 }
1887 if (!(newf & PFR_TFLAG_INACTIVE0x00000008) && kt->pfrkt_shadow != NULL((void *)0)) {
1888 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1889 kt->pfrkt_shadow = NULL((void *)0);
1890 }
1891 kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags = newf;
1892}
1893
1894static void
1895pfr_clstats_ktables(struct pfr_ktableworkq *workq, u_int64_t tzero, int recurse)
1896{
1897 struct pfr_ktable *p;
1898
1899 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1900
1901 SLIST_FOREACH(p, workq, pfrkt_workq)for ((p) = (((workq))->slh_first); (p); (p) = (((p))->pfrkt_workq
.sle_next))
1902 pfr_clstats_ktable(p, tzero, recurse);
1903}
1904
1905static void
1906pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse)
1907{
1908 struct pfr_kentryworkq addrq;
1909
1910 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1911
1912 if (recurse) {
1913 pfr_enqueue_addrs(kt, &addrq, NULL((void *)0), 0);
1914 pfr_clstats_kentries(&addrq, tzero, 0);
1915 }
1916 bzero(kt->pfrkt_packetspfrkt_ts.pfrts_packets, sizeof (kt->pfrkt_packetspfrkt_ts.pfrts_packets));
1917 bzero(kt->pfrkt_bytespfrkt_ts.pfrts_bytes, sizeof (kt->pfrkt_bytespfrkt_ts.pfrts_bytes));
1918 kt->pfrkt_matchpfrkt_ts.pfrts_match = kt->pfrkt_nomatchpfrkt_ts.pfrts_nomatch = 0;
1919 kt->pfrkt_tzeropfrkt_ts.pfrts_tzero = tzero;
1920}
1921
1922static struct pfr_ktable *
1923pfr_create_ktable(struct pfr_table *tbl, u_int64_t tzero, int attachruleset)
1924{
1925 struct pfr_ktable *kt;
1926 struct pf_ruleset *rs;
1927
1928 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1929
1930 kt = pool_get(&pfr_ktable_pl, PR_WAITOK1);
1931 if (kt == NULL((void *)0))
1932 return (NULL((void *)0));
1933 bzero(kt, sizeof (*kt));
1934 kt->pfrkt_tpfrkt_ts.pfrts_t = *tbl;
1935
1936 if (attachruleset) {
1937 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1938 if (!rs) {
1939 pfr_destroy_ktable(kt, 0);
1940 return (NULL((void *)0));
1941 }
1942 kt->pfrkt_rs = rs;
1943 rs->tables++;
1944 }
1945
1946 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1947 offsetof(struct sockaddr_in, sin_addr)__builtin_offsetof(struct sockaddr_in, sin_addr) * 8) ||
1948 !rn_inithead((void **)&kt->pfrkt_ip6,
1949 offsetof(struct sockaddr_in6, sin6_addr)__builtin_offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1950 pfr_destroy_ktable(kt, 0);
1951 return (NULL((void *)0));
1952 }
1953 kt->pfrkt_tzeropfrkt_ts.pfrts_tzero = tzero;
1954
1955 return (kt);
1956}
1957
1958static void
1959pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1960{
1961 struct pfr_ktable *p, *q;
1962
1963 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1964
1965 for (p = SLIST_FIRST(workq)((workq)->slh_first); p; p = q) {
1966 q = SLIST_NEXT(p, pfrkt_workq)((p)->pfrkt_workq.sle_next);
1967 pfr_destroy_ktable(p, flushaddr);
1968 }
1969}
1970
1971static void
1972pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1973{
1974 struct pfr_kentryworkq addrq;
1975
1976 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
1977
1978 if (flushaddr) {
1979 pfr_enqueue_addrs(kt, &addrq, NULL((void *)0), 0);
1980 pfr_clean_node_mask(kt, &addrq);
1981 pfr_destroy_kentries(&addrq);
1982 }
1983 if (kt->pfrkt_ip4 != NULL((void *)0))
1984 _FREE((caddr_t)kt->pfrkt_ip4, M_RTABLE5);
1985 if (kt->pfrkt_ip6 != NULL((void *)0))
1986 _FREE((caddr_t)kt->pfrkt_ip6, M_RTABLE5);
1987 if (kt->pfrkt_shadow != NULL((void *)0))
1988 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1989 if (kt->pfrkt_rs != NULL((void *)0)) {
1990 kt->pfrkt_rs->tables--;
1991 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1992 }
1993 pool_put(&pfr_ktable_pl, kt);
1994}
1995
1996static int
1997pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1998{
1999 int d;
2000
2001 if ((d = strncmp(p->pfrkt_namepfrkt_ts.pfrts_t.pfrt_name, q->pfrkt_namepfrkt_ts.pfrts_t.pfrt_name, PF_TABLE_NAME_SIZE32)))
2002 return (d);
2003 return (strcmp(p->pfrkt_anchorpfrkt_ts.pfrts_t.pfrt_anchor, q->pfrkt_anchorpfrkt_ts.pfrts_t.pfrt_anchor));
2004}
2005
2006static struct pfr_ktable *
2007pfr_lookup_table(struct pfr_table *tbl)
2008{
2009 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
2010
2011 /* struct pfr_ktable start like a struct pfr_table */
2012 return (RB_FIND(pfr_ktablehead, &pfr_ktables,pfr_ktablehead_RB_FIND(&pfr_ktables, (struct pfr_ktable *
)(void *)tbl)
2013 (struct pfr_ktable *)(void *)tbl)pfr_ktablehead_RB_FIND(&pfr_ktables, (struct pfr_ktable *
)(void *)tbl)
);
2014}
2015
2016int
2017pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2018{
2019 struct pfr_kentry *ke = NULL((void *)0);
2020 int match;
2021
2022 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
2023
2024 if (!(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004) && kt->pfrkt_root != NULL((void *)0))
2025 kt = kt->pfrkt_root;
2026 if (!(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
2027 return (0);
2028
2029 switch (af) {
2030#if INET1
2031 case AF_INET2:
2032 pfr_sin.sin_addr.s_addr = a->addr32pfa.addr32[0];
2033 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2034 if (ke && KENTRY_RNF_ROOT(ke)((((struct radix_node *)(ke))->rn_flags & 2) != 0))
2035 ke = NULL((void *)0);
2036 break;
2037#endif /* INET */
2038#if INET61
2039 case AF_INET630:
2040 bcopy(a, &pfr_sin6.sin6_addr, sizeof (pfr_sin6.sin6_addr));
2041 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2042 if (ke && KENTRY_RNF_ROOT(ke)((((struct radix_node *)(ke))->rn_flags & 2) != 0))
2043 ke = NULL((void *)0);
2044 break;
2045#endif /* INET6 */
2046 }
2047 match = (ke && !ke->pfrke_not);
2048 if (match)
2049 kt->pfrkt_matchpfrkt_ts.pfrts_match++;
2050 else
2051 kt->pfrkt_nomatchpfrkt_ts.pfrts_nomatch++;
2052 return (match);
2053}
2054
2055void
2056pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2057 u_int64_t len, int dir_out, int op_pass, int notrule)
2058{
2059 struct pfr_kentry *ke = NULL((void *)0);
2060
2061 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
2062
2063 if (!(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004) && kt->pfrkt_root != NULL((void *)0))
2064 kt = kt->pfrkt_root;
2065 if (!(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
2066 return;
2067
2068 switch (af) {
2069#if INET1
2070 case AF_INET2:
2071 pfr_sin.sin_addr.s_addr = a->addr32pfa.addr32[0];
2072 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2073 if (ke && KENTRY_RNF_ROOT(ke)((((struct radix_node *)(ke))->rn_flags & 2) != 0))
2074 ke = NULL((void *)0);
2075 break;
2076#endif /* INET */
2077#if INET61
2078 case AF_INET630:
2079 bcopy(a, &pfr_sin6.sin6_addr, sizeof (pfr_sin6.sin6_addr));
2080 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2081 if (ke && KENTRY_RNF_ROOT(ke)((((struct radix_node *)(ke))->rn_flags & 2) != 0))
2082 ke = NULL((void *)0);
2083 break;
2084#endif /* INET6 */
2085 default:
2086 ;
2087 }
2088 if ((ke == NULL((void *)0) || ke->pfrke_not) != notrule) {
2089 if (op_pass != PFR_OP_PASS)
2090 printf("pfr_update_stats: assertion failed.\n");
2091 op_pass = PFR_OP_XPASSPFR_OP_ADDR_MAX;
2092 }
2093 kt->pfrkt_packetspfrkt_ts.pfrts_packets[dir_out][op_pass]++;
2094 kt->pfrkt_bytespfrkt_ts.pfrts_bytes[dir_out][op_pass] += len;
2095 if (ke != NULL((void *)0) && op_pass != PFR_OP_XPASSPFR_OP_ADDR_MAX) {
2096 ke->pfrke_packets[dir_out][op_pass]++;
2097 ke->pfrke_bytes[dir_out][op_pass] += len;
2098 }
2099}
2100
2101struct pfr_ktable *
2102pfr_attach_table(struct pf_ruleset *rs, char *name)
2103{
2104 struct pfr_ktable *kt, *rt;
2105 struct pfr_table tbl;
2106 struct pf_anchor *ac = rs->anchor;
2107
2108 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
2109
2110 bzero(&tbl, sizeof (tbl));
2111 strlcpy(tbl.pfrt_name, name, sizeof (tbl.pfrt_name));
2112 if (ac != NULL((void *)0))
2113 strlcpy(tbl.pfrt_anchor, ac->path, sizeof (tbl.pfrt_anchor));
2114 kt = pfr_lookup_table(&tbl);
2115 if (kt == NULL((void *)0)) {
2116 kt = pfr_create_ktable(&tbl, pf_calendar_time_second(), 1);
2117 if (kt == NULL((void *)0))
2118 return (NULL((void *)0));
2119 if (ac != NULL((void *)0)) {
2120 bzero(tbl.pfrt_anchor, sizeof (tbl.pfrt_anchor));
2121 rt = pfr_lookup_table(&tbl);
2122 if (rt == NULL((void *)0)) {
2123 rt = pfr_create_ktable(&tbl, 0, 1);
2124 if (rt == NULL((void *)0)) {
2125 pfr_destroy_ktable(kt, 0);
2126 return (NULL((void *)0));
2127 }
2128 pfr_insert_ktable(rt);
2129 }
2130 kt->pfrkt_root = rt;
2131 }
2132 pfr_insert_ktable(kt);
2133 }
2134 if (!kt->pfrkt_refcntpfrkt_ts.pfrts_refcnt[PFR_REFCNT_RULE]++)
2135 pfr_setflags_ktable(kt, kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags|PFR_TFLAG_REFERENCED0x00000010);
2136 return (kt);
2137}
2138
2139void
2140pfr_detach_table(struct pfr_ktable *kt)
2141{
2142 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
2143
2144 if (kt->pfrkt_refcntpfrkt_ts.pfrts_refcnt[PFR_REFCNT_RULE] <= 0)
2145 printf("pfr_detach_table: refcount = %d.\n",
2146 kt->pfrkt_refcntpfrkt_ts.pfrts_refcnt[PFR_REFCNT_RULE]);
2147 else if (!--kt->pfrkt_refcntpfrkt_ts.pfrts_refcnt[PFR_REFCNT_RULE])
2148 pfr_setflags_ktable(kt, kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags&~PFR_TFLAG_REFERENCED0x00000010);
2149}
2150
2151int
2152pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2153 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2154{
2155 struct pfr_kentry *ke, *ke2;
2156 struct pf_addr *addr;
2157 union sockaddr_union mask;
2158 int idx = -1, use_counter = 0;
2159
2160 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
2161
2162 if (af == AF_INET2)
2163 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2164 else if (af == AF_INET630)
2165 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2166 else
2167 return (-1);
2168
2169 if (!(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004) && kt->pfrkt_root != NULL((void *)0))
2170 kt = kt->pfrkt_root;
2171 if (!(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004))
2172 return (-1);
2173
2174 if (pidx != NULL((void *)0))
2175 idx = *pidx;
2176 if (counter != NULL((void *)0) && idx >= 0)
2177 use_counter = 1;
2178 if (idx < 0)
2179 idx = 0;
2180
2181_next_block:
2182 ke = pfr_kentry_byidx(kt, idx, af);
2183 if (ke == NULL((void *)0)) {
2184 kt->pfrkt_nomatchpfrkt_ts.pfrts_nomatch++;
2185 return (1);
2186 }
2187 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2188 *raddr = SUNION2PF(&ke->pfrke_sa, af)(((af) == 2) ? (struct pf_addr *)&(&ke->pfrke_sa)->
sin.sin_addr : (struct pf_addr *)&(&ke->pfrke_sa)->
sin6.sin6_addr)
;
2189 *rmask = SUNION2PF(&pfr_mask, af)(((af) == 2) ? (struct pf_addr *)&(&pfr_mask)->sin
.sin_addr : (struct pf_addr *)&(&pfr_mask)->sin6.sin6_addr
)
;
2190
2191 if (use_counter) {
2192 /* is supplied address within block? */
2193 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)pf_match_addr(0, *raddr, *rmask, counter, af)) {
2194 /* no, go to next block in table */
2195 idx++;
2196 use_counter = 0;
2197 goto _next_block;
2198 }
2199 PF_ACPY(addr, counter, af)pf_addrcpy(addr, counter, af);
2200 } else {
2201 /* use first address of block */
2202 PF_ACPY(addr, *raddr, af)pf_addrcpy(addr, *raddr, af);
2203 }
2204
2205 if (!KENTRY_NETWORK(ke)((ke)->pfrke_net < ((((ke)->pfrke_af) == 2) ? 32 : 128
))
) {
2206 /* this is a single IP address - no possible nested block */
2207 PF_ACPY(counter, addr, af)pf_addrcpy(counter, addr, af);
2208 *pidx = idx;
2209 kt->pfrkt_matchpfrkt_ts.pfrts_match++;
2210 return (0);
2211 }
2212 for (;;) {
2213 /* we don't want to use a nested block */
2214 if (af == AF_INET2)
2215 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2216 kt->pfrkt_ip4);
2217 else if (af == AF_INET630)
2218 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2219 kt->pfrkt_ip6);
2220 else
2221 return (-1); /* never happens */
2222 /* no need to check KENTRY_RNF_ROOT() here */
2223 if (ke2 == ke) {
2224 /* lookup return the same block - perfect */
2225 PF_ACPY(counter, addr, af)pf_addrcpy(counter, addr, af);
2226 *pidx = idx;
2227 kt->pfrkt_matchpfrkt_ts.pfrts_match++;
2228 return (0);
2229 }
2230
2231 /* we need to increase the counter past the nested block */
2232 pfr_prepare_network(&mask, AF_INET2, ke2->pfrke_net);
2233 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af)pf_poolmask(addr, addr, (((af) == 2) ? (struct pf_addr *)&
(&mask)->sin.sin_addr : (struct pf_addr *)&(&mask
)->sin6.sin6_addr), &pfr_ffaddr, af)
;
2234 PF_AINC(addr, af)pf_addr_inc(addr, af);
2235 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)pf_match_addr(0, *raddr, *rmask, addr, af)) {
2236 /* ok, we reached the end of our main block */
2237 /* go to next block in table */
2238 idx++;
2239 use_counter = 0;
2240 goto _next_block;
2241 }
2242 }
2243}
2244
2245static struct pfr_kentry *
2246pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2247{
2248 struct pfr_walktree w;
2249
2250 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
2251
2252 bzero(&w, sizeof (w));
2253 w.pfrw_op = PFRW_POOL_GET;
2254 w.pfrw_cntpfrw_free = idx;
2255
2256 switch (af) {
2257#if INET1
2258 case AF_INET2:
2259 (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2260 pfr_walktree, &w);
2261 return (w.pfrw_kentrypfrw_1.pfrw1_kentry);
2262#endif /* INET */
2263#if INET61
2264 case AF_INET630:
2265 (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2266 pfr_walktree, &w);
2267 return (w.pfrw_kentrypfrw_1.pfrw1_kentry);
2268#endif /* INET6 */
2269 default:
2270 return (NULL((void *)0));
2271 }
2272}
2273
2274void
2275pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2276{
2277 struct pfr_walktree w;
2278
2279 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED1);
2280
2281 bzero(&w, sizeof (w));
2282 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2283 w.pfrw_dynpfrw_1.pfrw1_dyn = dyn;
2284
2285 dyn->pfid_acnt4 = 0;
2286 dyn->pfid_acnt6 = 0;
2287 if (!dyn->pfid_af || dyn->pfid_af == AF_INET2)
2288 (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2289 pfr_walktree, &w);
2290 if (!dyn->pfid_af || dyn->pfid_af == AF_INET630)
2291 (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2292 pfr_walktree, &w);
2293}