Bug Summary

File:modules/pf/../../netpfil/pf/pf_ioctl.c
Warning:line 3238, column 11
Copies out a struct with a union element with different sizes

Annotated Source Code

1/*-
2 * Copyright (c) 2001 Daniel Hartmeier
3 * Copyright (c) 2002,2003 Henning Brauer
4 * Copyright (c) 2012 Gleb Smirnoff <[email protected]>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Effort sponsored in part by the Defense Advanced Research Projects
32 * Agency (DARPA) and Air Force Research Laboratory, Air Force
33 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
34 *
35 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: releng/11.0/sys/netpfil/pf/pf_ioctl.c 302173 2016-06-24 11:53:12Z bz $")__asm__(".ident\t\"" "$FreeBSD: releng/11.0/sys/netpfil/pf/pf_ioctl.c 302173 2016-06-24 11:53:12Z bz $"
"\"")
;
40
41#include "opt_inet.h"
42#include "opt_inet6.h"
43#include "opt_bpf.h"
44#include "opt_pf.h"
45
46#include <sys/param.h>
47#include <sys/bus.h>
48#include <sys/conf.h>
49#include <sys/endian.h>
50#include <sys/fcntl.h>
51#include <sys/filio.h>
52#include <sys/interrupt.h>
53#include <sys/jail.h>
54#include <sys/kernel.h>
55#include <sys/kthread.h>
56#include <sys/lock.h>
57#include <sys/mbuf.h>
58#include <sys/module.h>
59#include <sys/proc.h>
60#include <sys/rwlock.h>
61#include <sys/smp.h>
62#include <sys/socket.h>
63#include <sys/sysctl.h>
64#include <sys/md5.h>
65#include <sys/ucred.h>
66
67#include <net/if.h>
68#include <net/if_var.h>
69#include <net/vnet.h>
70#include <net/route.h>
71#include <net/pfil.h>
72#include <net/pfvar.h>
73#include <net/if_pfsync.h>
74#include <net/if_pflog.h>
75
76#include <netinet/in.h>
77#include <netinet/ip.h>
78#include <netinet/ip_var.h>
79#include <netinet6/ip6_var.h>
80#include <netinet/ip_icmp.h>
81
82#ifdef INET61
83#include <netinet/ip6.h>
84#endif /* INET6 */
85
86#ifdef ALTQ
87#include <net/altq/altq.h>
88#endif
89
90static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
91 u_int8_t, u_int8_t, u_int8_t);
92
93static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
94static void pf_empty_pool(struct pf_palist *);
95static int pfioctl(struct cdev *, u_long, caddr_t, int,
96 struct thread *);
97#ifdef ALTQ
98static int pf_begin_altq(u_int32_t *);
99static int pf_rollback_altq(u_int32_t);
100static int pf_commit_altq(u_int32_t);
101static int pf_enable_altq(struct pf_altq *);
102static int pf_disable_altq(struct pf_altq *);
103static u_int32_t pf_qname2qid(char *);
104static void pf_qid_unref(u_int32_t);
105#endif /* ALTQ */
106static int pf_begin_rules(u_int32_t *, int, const char *);
107static int pf_rollback_rules(u_int32_t, int, char *);
108static int pf_setup_pfsync_matching(struct pf_ruleset *);
109static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
110static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
111static int pf_commit_rules(u_int32_t, int, char *);
112static int pf_addr_setup(struct pf_ruleset *,
113 struct pf_addr_wrap *, sa_family_t);
114static void pf_addr_copyout(struct pf_addr_wrap *);
115
116VNET_DEFINE(struct pf_rule, pf_default_rule)struct pf_rule pf_default_rule;
117
118#ifdef ALTQ
119static VNET_DEFINE(int, pf_altq_running)int pf_altq_running;
120#define V_pf_altq_running VNET(pf_altq_running)(pf_altq_running)
121#endif
122
123#define TAGID_MAX50000 50000
124struct pf_tagname {
125 TAILQ_ENTRY(pf_tagname)struct { struct pf_tagname *tqe_next; struct pf_tagname **tqe_prev
; }
entries;
126 char name[PF_TAG_NAME_SIZE64];
127 uint16_t tag;
128 int ref;
129};
130
131TAILQ_HEAD(pf_tags, pf_tagname)struct pf_tags { struct pf_tagname *tqh_first; struct pf_tagname
**tqh_last; }
;
132#define V_pf_tags(pf_tags) VNET(pf_tags)(pf_tags)
133VNET_DEFINE(struct pf_tags, pf_tags)struct pf_tags pf_tags;
134#define V_pf_qids(pf_qids) VNET(pf_qids)(pf_qids)
135VNET_DEFINE(struct pf_tags, pf_qids)struct pf_tags pf_qids;
136static MALLOC_DEFINE(M_PFTAG, "pf_tag", "pf(4) tag names")struct malloc_type M_PFTAG[1] = { { ((void *)0), 877983977, "pf_tag"
, ((void *)0) } }; static struct sysinit M_PFTAG_init_sys_init
= { SI_SUB_KMEM, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_init, ((void *)(M_PFTAG)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_M_PFTAG_init_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(M_PFTAG_init_sys_init); static struct sysinit M_PFTAG_uninit_sys_uninit
= { SI_SUB_KMEM, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_uninit, ((void *)(M_PFTAG)) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_M_PFTAG_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(M_PFTAG_uninit_sys_uninit)
;
137static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db")struct malloc_type M_PFALTQ[1] = { { ((void *)0), 877983977, "pf_altq"
, ((void *)0) } }; static struct sysinit M_PFALTQ_init_sys_init
= { SI_SUB_KMEM, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_init, ((void *)(M_PFALTQ)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_M_PFALTQ_init_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(M_PFALTQ_init_sys_init); static struct sysinit M_PFALTQ_uninit_sys_uninit
= { SI_SUB_KMEM, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_uninit, ((void *)(M_PFALTQ)) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_M_PFALTQ_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(M_PFALTQ_uninit_sys_uninit)
;
138static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules")struct malloc_type M_PFRULE[1] = { { ((void *)0), 877983977, "pf_rule"
, ((void *)0) } }; static struct sysinit M_PFRULE_init_sys_init
= { SI_SUB_KMEM, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_init, ((void *)(M_PFRULE)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_M_PFRULE_init_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(M_PFRULE_init_sys_init); static struct sysinit M_PFRULE_uninit_sys_uninit
= { SI_SUB_KMEM, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_uninit, ((void *)(M_PFRULE)) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_M_PFRULE_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(M_PFRULE_uninit_sys_uninit)
;
139
140#if (PF_QNAME_SIZE64 != PF_TAG_NAME_SIZE64)
141#error PF_QNAME_SIZE64 must be equal to PF_TAG_NAME_SIZE64
142#endif
143
144static u_int16_t tagname2tag(struct pf_tags *, char *);
145static u_int16_t pf_tagname2tag(char *);
146static void tag_unref(struct pf_tags *, u_int16_t);
147
148#define DPFPRINTF(n, x)if ((pf_status).debug >= (n)) printf x if (V_pf_status(pf_status).debug >= (n)) printf x
149
150struct cdev *pf_dev;
151
152/*
153 * XXX - These are new and need to be checked when moveing to a new version
154 */
155static void pf_clear_states(void);
156static int pf_clear_tables(void);
157static void pf_clear_srcnodes(struct pf_src_node *);
158static void pf_kill_srcnodes(struct pfioc_src_node_kill *);
159static void pf_tbladdr_copyout(struct pf_addr_wrap *);
160
161/*
162 * Wrapper functions for pfil(9) hooks
163 */
164#ifdef INET1
165static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
166 int dir, struct inpcb *inp);
167static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
168 int dir, struct inpcb *inp);
169#endif
170#ifdef INET61
171static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
172 int dir, struct inpcb *inp);
173static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
174 int dir, struct inpcb *inp);
175#endif
176
177static int hook_pf(void);
178static int dehook_pf(void);
179static int shutdown_pf(void);
180static int pf_load(void);
181static int pf_unload(void);
182
183static struct cdevsw pf_cdevsw = {
184 .d_ioctl = pfioctl,
185 .d_name = PF_NAME"pf",
186 .d_version = D_VERSION0x17122009,
187};
188
189static volatile VNET_DEFINE(int, pf_pfil_hooked)int pf_pfil_hooked;
190#define V_pf_pfil_hooked(pf_pfil_hooked) VNET(pf_pfil_hooked)(pf_pfil_hooked)
191
192/*
193 * We need a flag that is neither hooked nor running to know when
194 * the VNET is "valid". We primarily need this to control (global)
195 * external event, e.g., eventhandlers.
196 */
197VNET_DEFINE(int, pf_vnet_active)int pf_vnet_active;
198#define V_pf_vnet_active(pf_vnet_active) VNET(pf_vnet_active)(pf_vnet_active)
199
200int pf_end_threads;
201
202struct rwlock pf_rules_lock;
203struct sx pf_ioctl_lock;
204
205/* pfsync */
206pfsync_state_import_t *pfsync_state_import_ptr = NULL((void *)0);
207pfsync_insert_state_t *pfsync_insert_state_ptr = NULL((void *)0);
208pfsync_update_state_t *pfsync_update_state_ptr = NULL((void *)0);
209pfsync_delete_state_t *pfsync_delete_state_ptr = NULL((void *)0);
210pfsync_clear_states_t *pfsync_clear_states_ptr = NULL((void *)0);
211pfsync_defer_t *pfsync_defer_ptr = NULL((void *)0);
212/* pflog */
213pflog_packet_t *pflog_packet_ptr = NULL((void *)0);
214
215static void
216pfattach_vnet(void)
217{
218 u_int32_t *my_timeout = V_pf_default_rule(pf_default_rule).timeout;
219
220 pf_initialize();
221 pfr_initialize();
222 pfi_initialize_vnet();
223 pf_normalize_init();
224
225 V_pf_limits(pf_limits)[PF_LIMIT_STATES].limit = PFSTATE_HIWAT10000;
226 V_pf_limits(pf_limits)[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT10000;
227
228 RB_INIT(&V_pf_anchors)do { (&(pf_anchors))->rbh_root = ((void *)0); } while (
0)
;
229 pf_init_ruleset(&pf_main_ruleset(pf_main_anchor).ruleset);
230
231 /* default rule should never be garbage collected */
232 V_pf_default_rule(pf_default_rule).entries.tqe_prev = &V_pf_default_rule(pf_default_rule).entries.tqe_next;
233#ifdef PF_DEFAULT_TO_DROP
234 V_pf_default_rule(pf_default_rule).action = PF_DROP;
235#else
236 V_pf_default_rule(pf_default_rule).action = PF_PASS;
237#endif
238 V_pf_default_rule(pf_default_rule).nr = -1;
239 V_pf_default_rule(pf_default_rule).rtableid = -1;
240
241 V_pf_default_rule(pf_default_rule).states_cur = counter_u64_alloc(M_WAITOK0x0002);
242 V_pf_default_rule(pf_default_rule).states_tot = counter_u64_alloc(M_WAITOK0x0002);
243 V_pf_default_rule(pf_default_rule).src_nodes = counter_u64_alloc(M_WAITOK0x0002);
244
245 /* initialize default timeouts */
246 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL120;
247 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL30;
248 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL24*60*60;
249 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL15 * 60;
250 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL45;
251 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL90;
252 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL60;
253 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL30;
254 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL60;
255 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL20;
256 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL10;
257 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL60;
258 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL30;
259 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL60;
260 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL30;
261 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL10;
262 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL0;
263 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL30;
264 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START6000;
265 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END12000;
266
267 bzero(&V_pf_status(pf_status), sizeof(V_pf_status(pf_status)));
268 V_pf_status(pf_status).debug = PF_DEBUG_URGENT;
269
270 V_pf_pfil_hooked(pf_pfil_hooked) = 0;
271
272 /* XXX do our best to avoid a conflict */
273 V_pf_status(pf_status).hostid = arc4random();
274
275 for (int i = 0; i < PFRES_MAX16; i++)
276 V_pf_status(pf_status).counters[i] = counter_u64_alloc(M_WAITOK0x0002);
277 for (int i = 0; i < LCNT_MAX7; i++)
278 V_pf_status(pf_status).lcounters[i] = counter_u64_alloc(M_WAITOK0x0002);
279 for (int i = 0; i < FCNT_MAX3; i++)
280 V_pf_status(pf_status).fcounters[i] = counter_u64_alloc(M_WAITOK0x0002);
281 for (int i = 0; i < SCNT_MAX3; i++)
282 V_pf_status(pf_status).scounters[i] = counter_u64_alloc(M_WAITOK0x0002);
283
284 if (swi_add(NULL((void *)0), "pf send", pf_intr, curvnet((void *)0), SWI_NET1,
285 INTR_MPSAFE, &V_pf_swi_cookie(pf_swi_cookie)) != 0)
286 /* XXXGL: leaked all above. */
287 return;
288}
289
290
291static struct pf_pool *
292pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
293 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
294 u_int8_t check_ticket)
295{
296 struct pf_ruleset *ruleset;
297 struct pf_rule *rule;
298 int rs_num;
299
300 ruleset = pf_find_ruleset(anchor);
301 if (ruleset == NULL((void *)0))
302 return (NULL((void *)0));
303 rs_num = pf_get_ruleset_number(rule_action);
304 if (rs_num >= PF_RULESET_MAX)
305 return (NULL((void *)0));
306 if (active) {
307 if (check_ticket && ticket !=
308 ruleset->rules[rs_num].active.ticket)
309 return (NULL((void *)0));
310 if (r_last)
311 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,(*(((struct pf_rulequeue *)((ruleset->rules[rs_num].active
.ptr)->tqh_last))->tqh_last))
312 pf_rulequeue)(*(((struct pf_rulequeue *)((ruleset->rules[rs_num].active
.ptr)->tqh_last))->tqh_last))
;
313 else
314 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr)((ruleset->rules[rs_num].active.ptr)->tqh_first);
315 } else {
316 if (check_ticket && ticket !=
317 ruleset->rules[rs_num].inactive.ticket)
318 return (NULL((void *)0));
319 if (r_last)
320 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,(*(((struct pf_rulequeue *)((ruleset->rules[rs_num].inactive
.ptr)->tqh_last))->tqh_last))
321 pf_rulequeue)(*(((struct pf_rulequeue *)((ruleset->rules[rs_num].inactive
.ptr)->tqh_last))->tqh_last))
;
322 else
323 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr)((ruleset->rules[rs_num].inactive.ptr)->tqh_first);
324 }
325 if (!r_last) {
326 while ((rule != NULL((void *)0)) && (rule->nr != rule_number))
327 rule = TAILQ_NEXT(rule, entries)((rule)->entries.tqe_next);
328 }
329 if (rule == NULL((void *)0))
330 return (NULL((void *)0));
331
332 return (&rule->rpool);
333}
334
335static void
336pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
337{
338 struct pf_pooladdr *mv_pool_pa;
339
340 while ((mv_pool_pa = TAILQ_FIRST(poola)((poola)->tqh_first)) != NULL((void *)0)) {
341 TAILQ_REMOVE(poola, mv_pool_pa, entries)do { ; ; ; ; if (((((mv_pool_pa))->entries.tqe_next)) != (
(void *)0)) (((mv_pool_pa))->entries.tqe_next)->entries
.tqe_prev = (mv_pool_pa)->entries.tqe_prev; else { (poola)
->tqh_last = (mv_pool_pa)->entries.tqe_prev; ; } *(mv_pool_pa
)->entries.tqe_prev = (((mv_pool_pa))->entries.tqe_next
); ; ; ; } while (0)
;
342 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries)do { ; (((mv_pool_pa))->entries.tqe_next) = ((void *)0); (
mv_pool_pa)->entries.tqe_prev = (poolb)->tqh_last; *(poolb
)->tqh_last = (mv_pool_pa); (poolb)->tqh_last = &((
(mv_pool_pa))->entries.tqe_next); ; ; } while (0)
;
343 }
344}
345
346static void
347pf_empty_pool(struct pf_palist *poola)
348{
349 struct pf_pooladdr *pa;
350
351 while ((pa = TAILQ_FIRST(poola)((poola)->tqh_first)) != NULL((void *)0)) {
352 switch (pa->addr.type) {
353 case PF_ADDR_DYNIFTL:
354 pfi_dynaddr_remove(pa->addr.p.dyn);
355 break;
356 case PF_ADDR_TABLE:
357 /* XXX: this could be unfinished pooladdr on pabuf */
358 if (pa->addr.p.tbl != NULL((void *)0))
359 pfr_detach_table(pa->addr.p.tbl);
360 break;
361 }
362 if (pa->kif)
363 pfi_kif_unref(pa->kif);
364 TAILQ_REMOVE(poola, pa, entries)do { ; ; ; ; if (((((pa))->entries.tqe_next)) != ((void *)
0)) (((pa))->entries.tqe_next)->entries.tqe_prev = (pa)
->entries.tqe_prev; else { (poola)->tqh_last = (pa)->
entries.tqe_prev; ; } *(pa)->entries.tqe_prev = (((pa))->
entries.tqe_next); ; ; ; } while (0)
;
365 free(pa, M_PFRULE);
366 }
367}
368
369static void
370pf_unlink_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
371{
372
373 PF_RULES_WASSERT();
374
375 TAILQ_REMOVE(rulequeue, rule, entries)do { ; ; ; ; if (((((rule))->entries.tqe_next)) != ((void *
)0)) (((rule))->entries.tqe_next)->entries.tqe_prev = (
rule)->entries.tqe_prev; else { (rulequeue)->tqh_last =
(rule)->entries.tqe_prev; ; } *(rule)->entries.tqe_prev
= (((rule))->entries.tqe_next); ; ; ; } while (0)
;
376
377 PF_UNLNKDRULES_LOCK()__mtx_lock_flags(&((((&pf_unlnkdrules_mtx))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (377))
;
378 rule->rule_flag |= PFRULE_REFS0x0080;
379 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries)do { ; (((rule))->entries.tqe_next) = ((void *)0); (rule)->
entries.tqe_prev = (&(pf_unlinked_rules))->tqh_last; *
(&(pf_unlinked_rules))->tqh_last = (rule); (&(pf_unlinked_rules
))->tqh_last = &(((rule))->entries.tqe_next); ; ; }
while (0)
;
380 PF_UNLNKDRULES_UNLOCK()__mtx_unlock_flags(&((((&pf_unlnkdrules_mtx))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (380))
;
381}
382
383void
384pf_free_rule(struct pf_rule *rule)
385{
386
387 PF_RULES_WASSERT();
388
389 if (rule->tag)
390 tag_unref(&V_pf_tags(pf_tags), rule->tag);
391 if (rule->match_tag)
392 tag_unref(&V_pf_tags(pf_tags), rule->match_tag);
393#ifdef ALTQ
394 if (rule->pqid != rule->qid)
395 pf_qid_unref(rule->pqid);
396 pf_qid_unref(rule->qid);
397#endif
398 switch (rule->src.addr.type) {
399 case PF_ADDR_DYNIFTL:
400 pfi_dynaddr_remove(rule->src.addr.p.dyn);
401 break;
402 case PF_ADDR_TABLE:
403 pfr_detach_table(rule->src.addr.p.tbl);
404 break;
405 }
406 switch (rule->dst.addr.type) {
407 case PF_ADDR_DYNIFTL:
408 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
409 break;
410 case PF_ADDR_TABLE:
411 pfr_detach_table(rule->dst.addr.p.tbl);
412 break;
413 }
414 if (rule->overload_tbl)
415 pfr_detach_table(rule->overload_tbl);
416 if (rule->kif)
417 pfi_kif_unref(rule->kif);
418 pf_anchor_remove(rule);
419 pf_empty_pool(&rule->rpool.list);
420 counter_u64_free(rule->states_cur);
421 counter_u64_free(rule->states_tot);
422 counter_u64_free(rule->src_nodes);
423 free(rule, M_PFRULE);
424}
425
426static u_int16_t
427tagname2tag(struct pf_tags *head, char *tagname)
428{
429 struct pf_tagname *tag, *p = NULL((void *)0);
430 u_int16_t new_tagid = 1;
431
432 PF_RULES_WASSERT();
433
434 TAILQ_FOREACH(tag, head, entries)for ((tag) = (((head))->tqh_first); (tag); (tag) = (((tag)
)->entries.tqe_next))
435 if (strcmp(tagname, tag->name) == 0) {
436 tag->ref++;
437 return (tag->tag);
438 }
439
440 /*
441 * to avoid fragmentation, we do a linear search from the beginning
442 * and take the first free slot we find. if there is none or the list
443 * is empty, append a new entry at the end.
444 */
445
446 /* new entry */
447 if (!TAILQ_EMPTY(head)((head)->tqh_first == ((void *)0)))
448 for (p = TAILQ_FIRST(head)((head)->tqh_first); p != NULL((void *)0) &&
449 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)((p)->entries.tqe_next))
450 new_tagid = p->tag + 1;
451
452 if (new_tagid > TAGID_MAX50000)
453 return (0);
454
455 /* allocate and fill new struct pf_tagname */
456 tag = malloc(sizeof(*tag), M_PFTAG, M_NOWAIT0x0001|M_ZERO0x0100);
457 if (tag == NULL((void *)0))
458 return (0);
459 strlcpy(tag->name, tagname, sizeof(tag->name));
460 tag->tag = new_tagid;
461 tag->ref++;
462
463 if (p != NULL((void *)0)) /* insert new entry before p */
464 TAILQ_INSERT_BEFORE(p, tag, entries)do { ; (tag)->entries.tqe_prev = (p)->entries.tqe_prev;
(((tag))->entries.tqe_next) = (p); *(p)->entries.tqe_prev
= (tag); (p)->entries.tqe_prev = &(((tag))->entries
.tqe_next); ; ; } while (0)
;
465 else /* either list empty or no free slot in between */
466 TAILQ_INSERT_TAIL(head, tag, entries)do { ; (((tag))->entries.tqe_next) = ((void *)0); (tag)->
entries.tqe_prev = (head)->tqh_last; *(head)->tqh_last =
(tag); (head)->tqh_last = &(((tag))->entries.tqe_next
); ; ; } while (0)
;
467
468 return (tag->tag);
469}
470
471static void
472tag_unref(struct pf_tags *head, u_int16_t tag)
473{
474 struct pf_tagname *p, *next;
475
476 PF_RULES_WASSERT();
477
478 for (p = TAILQ_FIRST(head)((head)->tqh_first); p != NULL((void *)0); p = next) {
479 next = TAILQ_NEXT(p, entries)((p)->entries.tqe_next);
480 if (tag == p->tag) {
481 if (--p->ref == 0) {
482 TAILQ_REMOVE(head, p, entries)do { ; ; ; ; if (((((p))->entries.tqe_next)) != ((void *)0
)) (((p))->entries.tqe_next)->entries.tqe_prev = (p)->
entries.tqe_prev; else { (head)->tqh_last = (p)->entries
.tqe_prev; ; } *(p)->entries.tqe_prev = (((p))->entries
.tqe_next); ; ; ; } while (0)
;
483 free(p, M_PFTAG);
484 }
485 break;
486 }
487 }
488}
489
490static u_int16_t
491pf_tagname2tag(char *tagname)
492{
493 return (tagname2tag(&V_pf_tags(pf_tags), tagname));
494}
495
496#ifdef ALTQ
497static u_int32_t
498pf_qname2qid(char *qname)
499{
500 return ((u_int32_t)tagname2tag(&V_pf_qids(pf_qids), qname));
501}
502
503static void
504pf_qid_unref(u_int32_t qid)
505{
506 tag_unref(&V_pf_qids(pf_qids), (u_int16_t)qid);
507}
508
509static int
510pf_begin_altq(u_int32_t *ticket)
511{
512 struct pf_altq *altq;
513 int error = 0;
514
515 PF_RULES_WASSERT();
516
517 /* Purge the old altq list */
518 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)(((pf_altqs_inactive))->tqh_first)) != NULL((void *)0)) {
519 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries)do { ; ; ; ; if (((((altq))->entries.tqe_next)) != ((void *
)0)) (((altq))->entries.tqe_next)->entries.tqe_prev = (
altq)->entries.tqe_prev; else { ((pf_altqs_inactive))->
tqh_last = (altq)->entries.tqe_prev; ; } *(altq)->entries
.tqe_prev = (((altq))->entries.tqe_next); ; ; ; } while (0
)
;
520 if (altq->qname[0] == 0 &&
521 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED0x01) == 0) {
522 /* detach and destroy the discipline */
523 error = altq_remove(altq);
524 } else
525 pf_qid_unref(altq->qid);
526 free(altq, M_PFALTQ);
527 }
528 if (error)
529 return (error);
530 *ticket = ++V_ticket_altqs_inactive(ticket_altqs_inactive);
531 V_altqs_inactive_open(altqs_inactive_open) = 1;
532 return (0);
533}
534
535static int
536pf_rollback_altq(u_int32_t ticket)
537{
538 struct pf_altq *altq;
539 int error = 0;
540
541 PF_RULES_WASSERT();
542
543 if (!V_altqs_inactive_open(altqs_inactive_open) || ticket != V_ticket_altqs_inactive(ticket_altqs_inactive))
544 return (0);
545 /* Purge the old altq list */
546 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)(((pf_altqs_inactive))->tqh_first)) != NULL((void *)0)) {
547 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries)do { ; ; ; ; if (((((altq))->entries.tqe_next)) != ((void *
)0)) (((altq))->entries.tqe_next)->entries.tqe_prev = (
altq)->entries.tqe_prev; else { ((pf_altqs_inactive))->
tqh_last = (altq)->entries.tqe_prev; ; } *(altq)->entries
.tqe_prev = (((altq))->entries.tqe_next); ; ; ; } while (0
)
;
548 if (altq->qname[0] == 0 &&
549 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED0x01) == 0) {
550 /* detach and destroy the discipline */
551 error = altq_remove(altq);
552 } else
553 pf_qid_unref(altq->qid);
554 free(altq, M_PFALTQ);
555 }
556 V_altqs_inactive_open(altqs_inactive_open) = 0;
557 return (error);
558}
559
560static int
561pf_commit_altq(u_int32_t ticket)
562{
563 struct pf_altqqueue *old_altqs;
564 struct pf_altq *altq;
565 int err, error = 0;
566
567 PF_RULES_WASSERT();
568
569 if (!V_altqs_inactive_open(altqs_inactive_open) || ticket != V_ticket_altqs_inactive(ticket_altqs_inactive))
570 return (EBUSY16);
571
572 /* swap altqs, keep the old. */
573 old_altqs = V_pf_altqs_active(pf_altqs_active);
574 V_pf_altqs_active(pf_altqs_active) = V_pf_altqs_inactive(pf_altqs_inactive);
575 V_pf_altqs_inactive(pf_altqs_inactive) = old_altqs;
576 V_ticket_altqs_active(ticket_altqs_active) = V_ticket_altqs_inactive(ticket_altqs_inactive);
577
578 /* Attach new disciplines */
579 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)for ((altq) = ((((pf_altqs_active)))->tqh_first); (altq); (
altq) = (((altq))->entries.tqe_next))
{
580 if (altq->qname[0] == 0 &&
581 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED0x01) == 0) {
582 /* attach the discipline */
583 error = altq_pfattach(altq);
584 if (error == 0 && V_pf_altq_running)
585 error = pf_enable_altq(altq);
586 if (error != 0)
587 return (error);
588 }
589 }
590
591 /* Purge the old altq list */
592 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)(((pf_altqs_inactive))->tqh_first)) != NULL((void *)0)) {
593 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries)do { ; ; ; ; if (((((altq))->entries.tqe_next)) != ((void *
)0)) (((altq))->entries.tqe_next)->entries.tqe_prev = (
altq)->entries.tqe_prev; else { ((pf_altqs_inactive))->
tqh_last = (altq)->entries.tqe_prev; ; } *(altq)->entries
.tqe_prev = (((altq))->entries.tqe_next); ; ; ; } while (0
)
;
594 if (altq->qname[0] == 0 &&
595 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED0x01) == 0) {
596 /* detach and destroy the discipline */
597 if (V_pf_altq_running)
598 error = pf_disable_altq(altq);
599 err = altq_pfdetach(altq);
600 if (err != 0 && error == 0)
601 error = err;
602 err = altq_remove(altq);
603 if (err != 0 && error == 0)
604 error = err;
605 } else
606 pf_qid_unref(altq->qid);
607 free(altq, M_PFALTQ);
608 }
609
610 V_altqs_inactive_open(altqs_inactive_open) = 0;
611 return (error);
612}
613
614static int
615pf_enable_altq(struct pf_altq *altq)
616{
617 struct ifnet *ifp;
618 struct tb_profile tb;
619 int error = 0;
620
621 if ((ifp = ifunit(altq->ifname)) == NULL((void *)0))
622 return (EINVAL22);
623
624 if (ifp->if_snd.altq_type != ALTQT_NONE)
625 error = altq_enable(&ifp->if_snd);
626
627 /* set tokenbucket regulator */
628 if (error == 0 && ifp != NULL((void *)0) && ALTQ_IS_ENABLED(&ifp->if_snd)((&ifp->if_snd)->altq_flags & 0x02)) {
629 tb.rate = altq->ifbandwidth;
630 tb.depth = altq->tbrsize;
631 error = tbr_set(&ifp->if_snd, &tb);
632 }
633
634 return (error);
635}
636
637static int
638pf_disable_altq(struct pf_altq *altq)
639{
640 struct ifnet *ifp;
641 struct tb_profile tb;
642 int error;
643
644 if ((ifp = ifunit(altq->ifname)) == NULL((void *)0))
645 return (EINVAL22);
646
647 /*
648 * when the discipline is no longer referenced, it was overridden
649 * by a new one. if so, just return.
650 */
651 if (altq->altq_disc != ifp->if_snd.altq_disc)
652 return (0);
653
654 error = altq_disable(&ifp->if_snd);
655
656 if (error == 0) {
657 /* clear tokenbucket regulator */
658 tb.rate = 0;
659 error = tbr_set(&ifp->if_snd, &tb);
660 }
661
662 return (error);
663}
664
665void
666pf_altq_ifnet_event(struct ifnet *ifp, int remove)
667{
668 struct ifnet *ifp1;
669 struct pf_altq *a1, *a2, *a3;
670 u_int32_t ticket;
671 int error = 0;
672
673 /* Interrupt userland queue modifications */
674 if (V_altqs_inactive_open(altqs_inactive_open))
675 pf_rollback_altq(V_ticket_altqs_inactive(ticket_altqs_inactive));
676
677 /* Start new altq ruleset */
678 if (pf_begin_altq(&ticket))
679 return;
680
681 /* Copy the current active set */
682 TAILQ_FOREACH(a1, V_pf_altqs_active, entries)for ((a1) = ((((pf_altqs_active)))->tqh_first); (a1); (a1)
= (((a1))->entries.tqe_next))
{
683 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT0x0001);
684 if (a2 == NULL((void *)0)) {
685 error = ENOMEM12;
686 break;
687 }
688 bcopy(a1, a2, sizeof(struct pf_altq));
689
690 if (a2->qname[0] != 0) {
691 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
692 error = EBUSY16;
693 free(a2, M_PFALTQ);
694 break;
695 }
696 a2->altq_disc = NULL((void *)0);
697 TAILQ_FOREACH(a3, V_pf_altqs_inactive, entries)for ((a3) = ((((pf_altqs_inactive)))->tqh_first); (a3); (a3
) = (((a3))->entries.tqe_next))
{
698 if (strncmp(a3->ifname, a2->ifname,
699 IFNAMSIZ16) == 0 && a3->qname[0] == 0) {
700 a2->altq_disc = a3->altq_disc;
701 break;
702 }
703 }
704 }
705 /* Deactivate the interface in question */
706 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED0x01;
707 if ((ifp1 = ifunit(a2->ifname)) == NULL((void *)0) ||
708 (remove && ifp1 == ifp)) {
709 a2->local_flags |= PFALTQ_FLAG_IF_REMOVED0x01;
710 } else {
711 error = altq_add(a2);
712
713 if (ticket != V_ticket_altqs_inactive(ticket_altqs_inactive))
714 error = EBUSY16;
715
716 if (error) {
717 free(a2, M_PFALTQ);
718 break;
719 }
720 }
721
722 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries)do { ; (((a2))->entries.tqe_next) = ((void *)0); (a2)->
entries.tqe_prev = ((pf_altqs_inactive))->tqh_last; *((pf_altqs_inactive
))->tqh_last = (a2); ((pf_altqs_inactive))->tqh_last = &
(((a2))->entries.tqe_next); ; ; } while (0)
;
723 }
724
725 if (error != 0)
726 pf_rollback_altq(ticket);
727 else
728 pf_commit_altq(ticket);
729}
730#endif /* ALTQ */
731
732static int
733pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
734{
735 struct pf_ruleset *rs;
736 struct pf_rule *rule;
737
738 PF_RULES_WASSERT();
739
740 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
741 return (EINVAL22);
742 rs = pf_find_or_create_ruleset(anchor);
743 if (rs == NULL((void *)0))
744 return (EINVAL22);
745 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)((rs->rules[rs_num].inactive.ptr)->tqh_first)) != NULL((void *)0)) {
746 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
747 rs->rules[rs_num].inactive.rcount--;
748 }
749 *ticket = ++rs->rules[rs_num].inactive.ticket;
750 rs->rules[rs_num].inactive.open = 1;
751 return (0);
752}
753
754static int
755pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
756{
757 struct pf_ruleset *rs;
758 struct pf_rule *rule;
759
760 PF_RULES_WASSERT();
761
762 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
763 return (EINVAL22);
764 rs = pf_find_ruleset(anchor);
765 if (rs == NULL((void *)0) || !rs->rules[rs_num].inactive.open ||
766 rs->rules[rs_num].inactive.ticket != ticket)
767 return (0);
768 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)((rs->rules[rs_num].inactive.ptr)->tqh_first)) != NULL((void *)0)) {
769 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
770 rs->rules[rs_num].inactive.rcount--;
771 }
772 rs->rules[rs_num].inactive.open = 0;
773 return (0);
774}
775
776#define PF_MD5_UPD(st, elm)MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->
elm))
\
777 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
778
779#define PF_MD5_UPD_STR(st, elm)MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm
))
\
780 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
781
782#define PF_MD5_UPD_HTONL(st, elm, stor)do { (stor) = (__builtin_constant_p((st)->elm) ? (((__uint32_t
)((__uint16_t)(__builtin_constant_p(((__uint32_t)((st)->elm
)) & 0xffff) ? (__uint16_t)(((__uint16_t)(((__uint32_t)((
st)->elm)) & 0xffff)) << 8 | ((__uint16_t)(((__uint32_t
)((st)->elm)) & 0xffff)) >> 8) : __bswap16_var((
(__uint32_t)((st)->elm)) & 0xffff))) << 16) | ((
__uint16_t)(__builtin_constant_p(((__uint32_t)((st)->elm))
>> 16) ? (__uint16_t)(((__uint16_t)(((__uint32_t)((st)
->elm)) >> 16)) << 8 | ((__uint16_t)(((__uint32_t
)((st)->elm)) >> 16)) >> 8) : __bswap16_var(((
__uint32_t)((st)->elm)) >> 16)))) : __bswap32_var((st
)->elm)); MD5Update(ctx, (u_int8_t *) &(stor), sizeof(
u_int32_t));} while (0)
do { \
783 (stor) = htonl((st)->elm)(__builtin_constant_p((st)->elm) ? (((__uint32_t)((__uint16_t
)(__builtin_constant_p(((__uint32_t)((st)->elm)) & 0xffff
) ? (__uint16_t)(((__uint16_t)(((__uint32_t)((st)->elm)) &
0xffff)) << 8 | ((__uint16_t)(((__uint32_t)((st)->elm
)) & 0xffff)) >> 8) : __bswap16_var(((__uint32_t)((
st)->elm)) & 0xffff))) << 16) | ((__uint16_t)(__builtin_constant_p
(((__uint32_t)((st)->elm)) >> 16) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((st)->elm)) >> 16)) << 8 | ((__uint16_t
)(((__uint32_t)((st)->elm)) >> 16)) >> 8) : __bswap16_var
(((__uint32_t)((st)->elm)) >> 16)))) : __bswap32_var
((st)->elm))
; \
784 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
785} while (0)
786
787#define PF_MD5_UPD_HTONS(st, elm, stor)do { (stor) = ((__uint16_t)(__builtin_constant_p((st)->elm
) ? (__uint16_t)(((__uint16_t)((st)->elm)) << 8 | ((
__uint16_t)((st)->elm)) >> 8) : __bswap16_var((st)->
elm))); MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t
));} while (0)
do { \
788 (stor) = htons((st)->elm)((__uint16_t)(__builtin_constant_p((st)->elm) ? (__uint16_t
)(((__uint16_t)((st)->elm)) << 8 | ((__uint16_t)((st
)->elm)) >> 8) : __bswap16_var((st)->elm)))
; \
789 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
790} while (0)
791
792static void
793pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
794{
795 PF_MD5_UPD(pfr, addr.type)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.type, sizeof(
(pfr)->addr.type))
;
796 switch (pfr->addr.type) {
797 case PF_ADDR_DYNIFTL:
798 PF_MD5_UPD(pfr, addr.v.ifname)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.v.ifname, sizeof
((pfr)->addr.v.ifname))
;
799 PF_MD5_UPD(pfr, addr.iflags)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.iflags, sizeof
((pfr)->addr.iflags))
;
800 break;
801 case PF_ADDR_TABLE:
802 PF_MD5_UPD(pfr, addr.v.tblname)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.v.tblname, sizeof
((pfr)->addr.v.tblname))
;
803 break;
804 case PF_ADDR_ADDRMASK:
805 /* XXX ignore af? */
806 PF_MD5_UPD(pfr, addr.v.a.addr.addr32)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.v.a.addr.pfa.
addr32, sizeof((pfr)->addr.v.a.addr.pfa.addr32))
;
807 PF_MD5_UPD(pfr, addr.v.a.mask.addr32)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.v.a.mask.pfa.
addr32, sizeof((pfr)->addr.v.a.mask.pfa.addr32))
;
808 break;
809 }
810
811 PF_MD5_UPD(pfr, port[0])MD5Update(ctx, (u_int8_t *) &(pfr)->port[0], sizeof((pfr
)->port[0]))
;
812 PF_MD5_UPD(pfr, port[1])MD5Update(ctx, (u_int8_t *) &(pfr)->port[1], sizeof((pfr
)->port[1]))
;
813 PF_MD5_UPD(pfr, neg)MD5Update(ctx, (u_int8_t *) &(pfr)->neg, sizeof((pfr)->
neg))
;
814 PF_MD5_UPD(pfr, port_op)MD5Update(ctx, (u_int8_t *) &(pfr)->port_op, sizeof((pfr
)->port_op))
;
815}
816
817static void
818pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
819{
820 u_int16_t x;
821 u_int32_t y;
822
823 pf_hash_rule_addr(ctx, &rule->src);
824 pf_hash_rule_addr(ctx, &rule->dst);
825 PF_MD5_UPD_STR(rule, label)MD5Update(ctx, (u_int8_t *) (rule)->label, strlen((rule)->
label))
;
826 PF_MD5_UPD_STR(rule, ifname)MD5Update(ctx, (u_int8_t *) (rule)->ifname, strlen((rule)->
ifname))
;
827 PF_MD5_UPD_STR(rule, match_tagname)MD5Update(ctx, (u_int8_t *) (rule)->match_tagname, strlen(
(rule)->match_tagname))
;
828 PF_MD5_UPD_HTONS(rule, match_tag, x)do { (x) = ((__uint16_t)(__builtin_constant_p((rule)->match_tag
) ? (__uint16_t)(((__uint16_t)((rule)->match_tag)) <<
8 | ((__uint16_t)((rule)->match_tag)) >> 8) : __bswap16_var
((rule)->match_tag))); MD5Update(ctx, (u_int8_t *) &(x
), sizeof(u_int16_t));} while (0)
; /* dup? */
829 PF_MD5_UPD_HTONL(rule, os_fingerprint, y)do { (y) = (__builtin_constant_p((rule)->os_fingerprint) ?
(((__uint32_t)((__uint16_t)(__builtin_constant_p(((__uint32_t
)((rule)->os_fingerprint)) & 0xffff) ? (__uint16_t)(((
__uint16_t)(((__uint32_t)((rule)->os_fingerprint)) & 0xffff
)) << 8 | ((__uint16_t)(((__uint32_t)((rule)->os_fingerprint
)) & 0xffff)) >> 8) : __bswap16_var(((__uint32_t)((
rule)->os_fingerprint)) & 0xffff))) << 16) | ((__uint16_t
)(__builtin_constant_p(((__uint32_t)((rule)->os_fingerprint
)) >> 16) ? (__uint16_t)(((__uint16_t)(((__uint32_t)((rule
)->os_fingerprint)) >> 16)) << 8 | ((__uint16_t
)(((__uint32_t)((rule)->os_fingerprint)) >> 16)) >>
8) : __bswap16_var(((__uint32_t)((rule)->os_fingerprint))
>> 16)))) : __bswap32_var((rule)->os_fingerprint));
MD5Update(ctx, (u_int8_t *) &(y), sizeof(u_int32_t));} while
(0)
;
830 PF_MD5_UPD_HTONL(rule, prob, y)do { (y) = (__builtin_constant_p((rule)->prob) ? (((__uint32_t
)((__uint16_t)(__builtin_constant_p(((__uint32_t)((rule)->
prob)) & 0xffff) ? (__uint16_t)(((__uint16_t)(((__uint32_t
)((rule)->prob)) & 0xffff)) << 8 | ((__uint16_t)
(((__uint32_t)((rule)->prob)) & 0xffff)) >> 8) :
__bswap16_var(((__uint32_t)((rule)->prob)) & 0xffff))
) << 16) | ((__uint16_t)(__builtin_constant_p(((__uint32_t
)((rule)->prob)) >> 16) ? (__uint16_t)(((__uint16_t)
(((__uint32_t)((rule)->prob)) >> 16)) << 8 | (
(__uint16_t)(((__uint32_t)((rule)->prob)) >> 16)) >>
8) : __bswap16_var(((__uint32_t)((rule)->prob)) >> 16
)))) : __bswap32_var((rule)->prob)); MD5Update(ctx, (u_int8_t
*) &(y), sizeof(u_int32_t));} while (0)
;
831 PF_MD5_UPD_HTONL(rule, uid.uid[0], y)do { (y) = (__builtin_constant_p((rule)->uid.uid[0]) ? (((
__uint32_t)((__uint16_t)(__builtin_constant_p(((__uint32_t)((
rule)->uid.uid[0])) & 0xffff) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((rule)->uid.uid[0])) & 0xffff)) <<
8 | ((__uint16_t)(((__uint32_t)((rule)->uid.uid[0])) &
0xffff)) >> 8) : __bswap16_var(((__uint32_t)((rule)->
uid.uid[0])) & 0xffff))) << 16) | ((__uint16_t)(__builtin_constant_p
(((__uint32_t)((rule)->uid.uid[0])) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)((rule)->uid.uid[0])) >>
16)) << 8 | ((__uint16_t)(((__uint32_t)((rule)->uid
.uid[0])) >> 16)) >> 8) : __bswap16_var(((__uint32_t
)((rule)->uid.uid[0])) >> 16)))) : __bswap32_var((rule
)->uid.uid[0])); MD5Update(ctx, (u_int8_t *) &(y), sizeof
(u_int32_t));} while (0)
;
832 PF_MD5_UPD_HTONL(rule, uid.uid[1], y)do { (y) = (__builtin_constant_p((rule)->uid.uid[1]) ? (((
__uint32_t)((__uint16_t)(__builtin_constant_p(((__uint32_t)((
rule)->uid.uid[1])) & 0xffff) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((rule)->uid.uid[1])) & 0xffff)) <<
8 | ((__uint16_t)(((__uint32_t)((rule)->uid.uid[1])) &
0xffff)) >> 8) : __bswap16_var(((__uint32_t)((rule)->
uid.uid[1])) & 0xffff))) << 16) | ((__uint16_t)(__builtin_constant_p
(((__uint32_t)((rule)->uid.uid[1])) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)((rule)->uid.uid[1])) >>
16)) << 8 | ((__uint16_t)(((__uint32_t)((rule)->uid
.uid[1])) >> 16)) >> 8) : __bswap16_var(((__uint32_t
)((rule)->uid.uid[1])) >> 16)))) : __bswap32_var((rule
)->uid.uid[1])); MD5Update(ctx, (u_int8_t *) &(y), sizeof
(u_int32_t));} while (0)
;
833 PF_MD5_UPD(rule, uid.op)MD5Update(ctx, (u_int8_t *) &(rule)->uid.op, sizeof((rule
)->uid.op))
;
834 PF_MD5_UPD_HTONL(rule, gid.gid[0], y)do { (y) = (__builtin_constant_p((rule)->gid.gid[0]) ? (((
__uint32_t)((__uint16_t)(__builtin_constant_p(((__uint32_t)((
rule)->gid.gid[0])) & 0xffff) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((rule)->gid.gid[0])) & 0xffff)) <<
8 | ((__uint16_t)(((__uint32_t)((rule)->gid.gid[0])) &
0xffff)) >> 8) : __bswap16_var(((__uint32_t)((rule)->
gid.gid[0])) & 0xffff))) << 16) | ((__uint16_t)(__builtin_constant_p
(((__uint32_t)((rule)->gid.gid[0])) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)((rule)->gid.gid[0])) >>
16)) << 8 | ((__uint16_t)(((__uint32_t)((rule)->gid
.gid[0])) >> 16)) >> 8) : __bswap16_var(((__uint32_t
)((rule)->gid.gid[0])) >> 16)))) : __bswap32_var((rule
)->gid.gid[0])); MD5Update(ctx, (u_int8_t *) &(y), sizeof
(u_int32_t));} while (0)
;
835 PF_MD5_UPD_HTONL(rule, gid.gid[1], y)do { (y) = (__builtin_constant_p((rule)->gid.gid[1]) ? (((
__uint32_t)((__uint16_t)(__builtin_constant_p(((__uint32_t)((
rule)->gid.gid[1])) & 0xffff) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((rule)->gid.gid[1])) & 0xffff)) <<
8 | ((__uint16_t)(((__uint32_t)((rule)->gid.gid[1])) &
0xffff)) >> 8) : __bswap16_var(((__uint32_t)((rule)->
gid.gid[1])) & 0xffff))) << 16) | ((__uint16_t)(__builtin_constant_p
(((__uint32_t)((rule)->gid.gid[1])) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)((rule)->gid.gid[1])) >>
16)) << 8 | ((__uint16_t)(((__uint32_t)((rule)->gid
.gid[1])) >> 16)) >> 8) : __bswap16_var(((__uint32_t
)((rule)->gid.gid[1])) >> 16)))) : __bswap32_var((rule
)->gid.gid[1])); MD5Update(ctx, (u_int8_t *) &(y), sizeof
(u_int32_t));} while (0)
;
836 PF_MD5_UPD(rule, gid.op)MD5Update(ctx, (u_int8_t *) &(rule)->gid.op, sizeof((rule
)->gid.op))
;
837 PF_MD5_UPD_HTONL(rule, rule_flag, y)do { (y) = (__builtin_constant_p((rule)->rule_flag) ? (((__uint32_t
)((__uint16_t)(__builtin_constant_p(((__uint32_t)((rule)->
rule_flag)) & 0xffff) ? (__uint16_t)(((__uint16_t)(((__uint32_t
)((rule)->rule_flag)) & 0xffff)) << 8 | ((__uint16_t
)(((__uint32_t)((rule)->rule_flag)) & 0xffff)) >>
8) : __bswap16_var(((__uint32_t)((rule)->rule_flag)) &
0xffff))) << 16) | ((__uint16_t)(__builtin_constant_p(
((__uint32_t)((rule)->rule_flag)) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)((rule)->rule_flag)) >>
16)) << 8 | ((__uint16_t)(((__uint32_t)((rule)->rule_flag
)) >> 16)) >> 8) : __bswap16_var(((__uint32_t)((rule
)->rule_flag)) >> 16)))) : __bswap32_var((rule)->
rule_flag)); MD5Update(ctx, (u_int8_t *) &(y), sizeof(u_int32_t
));} while (0)
;
838 PF_MD5_UPD(rule, action)MD5Update(ctx, (u_int8_t *) &(rule)->action, sizeof((rule
)->action))
;
839 PF_MD5_UPD(rule, direction)MD5Update(ctx, (u_int8_t *) &(rule)->direction, sizeof
((rule)->direction))
;
840 PF_MD5_UPD(rule, af)MD5Update(ctx, (u_int8_t *) &(rule)->af, sizeof((rule)
->af))
;
841 PF_MD5_UPD(rule, quick)MD5Update(ctx, (u_int8_t *) &(rule)->quick, sizeof((rule
)->quick))
;
842 PF_MD5_UPD(rule, ifnot)MD5Update(ctx, (u_int8_t *) &(rule)->ifnot, sizeof((rule
)->ifnot))
;
843 PF_MD5_UPD(rule, match_tag_not)MD5Update(ctx, (u_int8_t *) &(rule)->match_tag_not, sizeof
((rule)->match_tag_not))
;
844 PF_MD5_UPD(rule, natpass)MD5Update(ctx, (u_int8_t *) &(rule)->natpass, sizeof((
rule)->natpass))
;
845 PF_MD5_UPD(rule, keep_state)MD5Update(ctx, (u_int8_t *) &(rule)->keep_state, sizeof
((rule)->keep_state))
;
846 PF_MD5_UPD(rule, proto)MD5Update(ctx, (u_int8_t *) &(rule)->proto, sizeof((rule
)->proto))
;
847 PF_MD5_UPD(rule, type)MD5Update(ctx, (u_int8_t *) &(rule)->type, sizeof((rule
)->type))
;
848 PF_MD5_UPD(rule, code)MD5Update(ctx, (u_int8_t *) &(rule)->code, sizeof((rule
)->code))
;
849 PF_MD5_UPD(rule, flags)MD5Update(ctx, (u_int8_t *) &(rule)->flags, sizeof((rule
)->flags))
;
850 PF_MD5_UPD(rule, flagset)MD5Update(ctx, (u_int8_t *) &(rule)->flagset, sizeof((
rule)->flagset))
;
851 PF_MD5_UPD(rule, allow_opts)MD5Update(ctx, (u_int8_t *) &(rule)->allow_opts, sizeof
((rule)->allow_opts))
;
852 PF_MD5_UPD(rule, rt)MD5Update(ctx, (u_int8_t *) &(rule)->rt, sizeof((rule)
->rt))
;
853 PF_MD5_UPD(rule, tos)MD5Update(ctx, (u_int8_t *) &(rule)->tos, sizeof((rule
)->tos))
;
854}
855
856static int
857pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
858{
859 struct pf_ruleset *rs;
860 struct pf_rule *rule, **old_array;
861 struct pf_rulequeue *old_rules;
862 int error;
863 u_int32_t old_rcount;
864
865 PF_RULES_WASSERT();
866
867 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
868 return (EINVAL22);
869 rs = pf_find_ruleset(anchor);
870 if (rs == NULL((void *)0) || !rs->rules[rs_num].inactive.open ||
871 ticket != rs->rules[rs_num].inactive.ticket)
872 return (EBUSY16);
873
874 /* Calculate checksum for the main ruleset */
875 if (rs == &pf_main_ruleset(pf_main_anchor).ruleset) {
876 error = pf_setup_pfsync_matching(rs);
877 if (error != 0)
878 return (error);
879 }
880
881 /* Swap rules, keep the old. */
882 old_rules = rs->rules[rs_num].active.ptr;
883 old_rcount = rs->rules[rs_num].active.rcount;
884 old_array = rs->rules[rs_num].active.ptr_array;
885
886 rs->rules[rs_num].active.ptr =
887 rs->rules[rs_num].inactive.ptr;
888 rs->rules[rs_num].active.ptr_array =
889 rs->rules[rs_num].inactive.ptr_array;
890 rs->rules[rs_num].active.rcount =
891 rs->rules[rs_num].inactive.rcount;
892 rs->rules[rs_num].inactive.ptr = old_rules;
893 rs->rules[rs_num].inactive.ptr_array = old_array;
894 rs->rules[rs_num].inactive.rcount = old_rcount;
895
896 rs->rules[rs_num].active.ticket =
897 rs->rules[rs_num].inactive.ticket;
898 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
899
900
901 /* Purge the old rule list. */
902 while ((rule = TAILQ_FIRST(old_rules)((old_rules)->tqh_first)) != NULL((void *)0))
903 pf_unlink_rule(old_rules, rule);
904 if (rs->rules[rs_num].inactive.ptr_array)
905 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
906 rs->rules[rs_num].inactive.ptr_array = NULL((void *)0);
907 rs->rules[rs_num].inactive.rcount = 0;
908 rs->rules[rs_num].inactive.open = 0;
909 pf_remove_if_empty_ruleset(rs);
910
911 return (0);
912}
913
914static int
915pf_setup_pfsync_matching(struct pf_ruleset *rs)
916{
917 MD5_CTX ctx;
918 struct pf_rule *rule;
919 int rs_cnt;
920 u_int8_t digest[PF_MD5_DIGEST_LENGTH16];
921
922 MD5Init(&ctx);
923 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
924 /* XXX PF_RULESET_SCRUB as well? */
925 if (rs_cnt == PF_RULESET_SCRUB)
926 continue;
927
928 if (rs->rules[rs_cnt].inactive.ptr_array)
929 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
930 rs->rules[rs_cnt].inactive.ptr_array = NULL((void *)0);
931
932 if (rs->rules[rs_cnt].inactive.rcount) {
933 rs->rules[rs_cnt].inactive.ptr_array =
934 malloc(sizeof(caddr_t) *
935 rs->rules[rs_cnt].inactive.rcount,
936 M_TEMP, M_NOWAIT0x0001);
937
938 if (!rs->rules[rs_cnt].inactive.ptr_array)
939 return (ENOMEM12);
940 }
941
942 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,for ((rule) = (((rs->rules[rs_cnt].inactive.ptr))->tqh_first
); (rule); (rule) = (((rule))->entries.tqe_next))
943 entries)for ((rule) = (((rs->rules[rs_cnt].inactive.ptr))->tqh_first
); (rule); (rule) = (((rule))->entries.tqe_next))
{
944 pf_hash_rule(&ctx, rule);
945 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
946 }
947 }
948
949 MD5Final(digest, &ctx);
950 memcpy(V_pf_status(pf_status).pf_chksum, digest, sizeof(V_pf_status(pf_status).pf_chksum));
951 return (0);
952}
953
954static int
955pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
956 sa_family_t af)
957{
958 int error = 0;
959
960 switch (addr->type) {
961 case PF_ADDR_TABLE:
962 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
963 if (addr->p.tbl == NULL((void *)0))
964 error = ENOMEM12;
965 break;
966 case PF_ADDR_DYNIFTL:
967 error = pfi_dynaddr_setup(addr, af);
968 break;
969 }
970
971 return (error);
972}
973
974static void
975pf_addr_copyout(struct pf_addr_wrap *addr)
976{
977
978 switch (addr->type) {
979 case PF_ADDR_DYNIFTL:
980 pfi_dynaddr_copyout(addr);
981 break;
982 case PF_ADDR_TABLE:
983 pf_tbladdr_copyout(addr);
984 break;
985 }
986}
987
988static int
989pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
990{
991 int error = 0;
992
993 /* XXX keep in sync with switch() below */
994 if (securelevel_gt(td->td_ucred, 2))
1
Taking false branch
995 switch (cmd) {
996 case DIOCGETRULES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_rule)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((6))))
:
997 case DIOCGETRULE((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_rule)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((7))))
:
998 case DIOCGETADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_pooladdr)) & ((1 << 13) - 1)) << 16) |
((('D')) << 8) | ((53))))
:
999 case DIOCGETADDR((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_pooladdr)) & ((1 << 13) - 1)) << 16) |
((('D')) << 8) | ((54))))
:
1000 case DIOCGETSTATE((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_state)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((19))))
:
1001 case DIOCSETSTATUSIF((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_if)) & ((1 << 13) - 1)) << 16) | ((('D'
)) << 8) | ((20))))
:
1002 case DIOCGETSTATUS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pf_status)) & ((1 << 13) - 1)) << 16) | ((('D'
)) << 8) | ((21))))
:
1003 case DIOCCLRSTATUS((unsigned long) ((0x20000000) | (((0) & ((1 << 13)
- 1)) << 16) | ((('D')) << 8) | ((22))))
:
1004 case DIOCNATLOOK((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_natlook)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((23))))
:
1005 case DIOCSETDEBUG((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(u_int32_t
)) & ((1 << 13) - 1)) << 16) | ((('D')) <<
8) | ((24))))
:
1006 case DIOCGETSTATES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_states)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((25))))
:
1007 case DIOCGETTIMEOUT((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_tm)) & ((1 << 13) - 1)) << 16) | ((('D'
)) << 8) | ((30))))
:
1008 case DIOCCLRRULECTRS((unsigned long) ((0x20000000) | (((0) & ((1 << 13)
- 1)) << 16) | ((('D')) << 8) | ((38))))
:
1009 case DIOCGETLIMIT((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_limit)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((39))))
:
1010 case DIOCGETALTQS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_altq)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((47))))
:
1011 case DIOCGETALTQ((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_altq)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((48))))
:
1012 case DIOCGETQSTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_qstats)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((50))))
:
1013 case DIOCGETRULESETS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_ruleset)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((58))))
:
1014 case DIOCGETRULESET((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_ruleset)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((59))))
:
1015 case DIOCRGETTABLES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((63))))
:
1016 case DIOCRGETTSTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((64))))
:
1017 case DIOCRCLRTSTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((65))))
:
1018 case DIOCRCLRADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((66))))
:
1019 case DIOCRADDADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((67))))
:
1020 case DIOCRDELADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((68))))
:
1021 case DIOCRSETADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((69))))
:
1022 case DIOCRGETADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((70))))
:
1023 case DIOCRGETASTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((71))))
:
1024 case DIOCRCLRASTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((72))))
:
1025 case DIOCRTSTADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((73))))
:
1026 case DIOCOSFPGET((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pf_osfp_ioctl)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((80))))
:
1027 case DIOCGETSRCNODES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_src_nodes)) & ((1 << 13) - 1)) << 16) |
((('D')) << 8) | ((84))))
:
1028 case DIOCCLRSRCNODES((unsigned long) ((0x20000000) | (((0) & ((1 << 13)
- 1)) << 16) | ((('D')) << 8) | ((85))))
:
1029 case DIOCIGETIFACES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_iface)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((87))))
:
1030 case DIOCGIFSPEED((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pf_ifspeed)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((92))))
:
1031 case DIOCSETIFFLAG((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_iface)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((89))))
:
1032 case DIOCCLRIFFLAG((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_iface)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((90))))
:
1033 break;
1034 case DIOCRCLRTABLES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((60))))
:
1035 case DIOCRADDTABLES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((61))))
:
1036 case DIOCRDELTABLES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((62))))
:
1037 case DIOCRSETTFLAGS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((74))))
:
1038 if (((struct pfioc_table *)addr)->pfrio_flags &
1039 PFR_FLAG_DUMMY0x00000002)
1040 break; /* dummy operation ok */
1041 return (EPERM1);
1042 default:
1043 return (EPERM1);
1044 }
1045
1046 if (!(flags & FWRITE0x0002))
2
Taking false branch
1047 switch (cmd) {
1048 case DIOCGETRULES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_rule)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((6))))
:
1049 case DIOCGETADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_pooladdr)) & ((1 << 13) - 1)) << 16) |
((('D')) << 8) | ((53))))
:
1050 case DIOCGETADDR((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_pooladdr)) & ((1 << 13) - 1)) << 16) |
((('D')) << 8) | ((54))))
:
1051 case DIOCGETSTATE((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_state)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((19))))
:
1052 case DIOCGETSTATUS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pf_status)) & ((1 << 13) - 1)) << 16) | ((('D'
)) << 8) | ((21))))
:
1053 case DIOCGETSTATES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_states)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((25))))
:
1054 case DIOCGETTIMEOUT((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_tm)) & ((1 << 13) - 1)) << 16) | ((('D'
)) << 8) | ((30))))
:
1055 case DIOCGETLIMIT((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_limit)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((39))))
:
1056 case DIOCGETALTQS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_altq)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((47))))
:
1057 case DIOCGETALTQ((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_altq)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((48))))
:
1058 case DIOCGETQSTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_qstats)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((50))))
:
1059 case DIOCGETRULESETS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_ruleset)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((58))))
:
1060 case DIOCGETRULESET((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_ruleset)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((59))))
:
1061 case DIOCNATLOOK((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_natlook)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((23))))
:
1062 case DIOCRGETTABLES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((63))))
:
1063 case DIOCRGETTSTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((64))))
:
1064 case DIOCRGETADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((70))))
:
1065 case DIOCRGETASTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((71))))
:
1066 case DIOCRTSTADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((73))))
:
1067 case DIOCOSFPGET((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pf_osfp_ioctl)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((80))))
:
1068 case DIOCGETSRCNODES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_src_nodes)) & ((1 << 13) - 1)) << 16) |
((('D')) << 8) | ((84))))
:
1069 case DIOCIGETIFACES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_iface)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((87))))
:
1070 case DIOCGIFSPEED((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pf_ifspeed)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((92))))
:
1071 break;
1072 case DIOCRCLRTABLES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((60))))
:
1073 case DIOCRADDTABLES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((61))))
:
1074 case DIOCRDELTABLES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((62))))
:
1075 case DIOCRCLRTSTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((65))))
:
1076 case DIOCRCLRADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((66))))
:
1077 case DIOCRADDADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((67))))
:
1078 case DIOCRDELADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((68))))
:
1079 case DIOCRSETADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((69))))
:
1080 case DIOCRSETTFLAGS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((74))))
:
1081 if (((struct pfioc_table *)addr)->pfrio_flags &
1082 PFR_FLAG_DUMMY0x00000002) {
1083 flags |= FWRITE0x0002; /* need write lock for dummy */
1084 break; /* dummy operation ok */
1085 }
1086 return (EACCES13);
1087 case DIOCGETRULE((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_rule)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((7))))
:
1088 if (((struct pfioc_rule *)addr)->action ==
1089 PF_GET_CLR_CNTR)
1090 return (EACCES13);
1091 break;
1092 default:
1093 return (EACCES13);
1094 }
1095
1096 CURVNET_SET(TD_TO_VNET(td));
1097
1098 switch (cmd) {
3
Control jumps to 'case 3223864407:' at line 3223
1099 case DIOCSTART((unsigned long) ((0x20000000) | (((0) & ((1 << 13)
- 1)) << 16) | ((('D')) << 8) | ((1))))
:
1100 sx_xlock(&pf_ioctl_lock)(void)_sx_xlock(((&pf_ioctl_lock)), 0, ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (1100))
;
1101 if (V_pf_status(pf_status).running)
1102 error = EEXIST17;
1103 else {
1104 int cpu;
1105
1106 error = hook_pf();
1107 if (error) {
1108 DPFPRINTF(PF_DEBUG_MISC,if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("pf: pfil registration failed\n"
)
1109 ("pf: pfil registration failed\n"))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("pf: pfil registration failed\n"
)
;
1110 break;
1111 }
1112 V_pf_status(pf_status).running = 1;
1113 V_pf_status(pf_status).since = time_second;
1114
1115 CPU_FOREACH(cpu)for ((cpu) = 0; (cpu) <= mp_maxid; (cpu)++) if (!(!((((&
all_cpus)->__bits[(((((((256)) + (((sizeof(long) * 8)) - 1
)) / ((sizeof(long) * 8)))) == 1) ? 0 : (((cpu)) / (sizeof(long
) * 8)))] & (1L << ((((((((256))) + (((sizeof(long)
* 8)) - 1)) / ((sizeof(long) * 8)))) == 1) ? (__size_t)(((cpu
))) : ((((cpu))) % (sizeof(long) * 8))))) != 0))))
1116 V_pf_stateid(pf_stateid)[cpu] = time_second;
1117
1118 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("pf: started\n"
)
;
1119 }
1120 break;
1121
1122 case DIOCSTOP((unsigned long) ((0x20000000) | (((0) & ((1 << 13)
- 1)) << 16) | ((('D')) << 8) | ((2))))
:
1123 sx_xlock(&pf_ioctl_lock)(void)_sx_xlock(((&pf_ioctl_lock)), 0, ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (1123))
;
1124 if (!V_pf_status(pf_status).running)
1125 error = ENOENT2;
1126 else {
1127 V_pf_status(pf_status).running = 0;
1128 error = dehook_pf();
1129 if (error) {
1130 V_pf_status(pf_status).running = 1;
1131 DPFPRINTF(PF_DEBUG_MISC,if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("pf: pfil unregistration failed\n"
)
1132 ("pf: pfil unregistration failed\n"))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("pf: pfil unregistration failed\n"
)
;
1133 }
1134 V_pf_status(pf_status).since = time_second;
1135 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("pf: stopped\n"
)
;
1136 }
1137 break;
1138
1139 case DIOCADDRULE((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_rule)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((4))))
: {
1140 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1141 struct pf_ruleset *ruleset;
1142 struct pf_rule *rule, *tail;
1143 struct pf_pooladdr *pa;
1144 struct pfi_kif *kif = NULL((void *)0);
1145 int rs_num;
1146
1147 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE40) {
1148 error = EINVAL22;
1149 break;
1150 }
1151#ifndef INET1
1152 if (pr->rule.af == AF_INET2) {
1153 error = EAFNOSUPPORT47;
1154 break;
1155 }
1156#endif /* INET */
1157#ifndef INET61
1158 if (pr->rule.af == AF_INET628) {
1159 error = EAFNOSUPPORT47;
1160 break;
1161 }
1162#endif /* INET6 */
1163
1164 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK0x0002);
1165 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1166 if (rule->ifname[0])
1167 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK0x0002);
1168 rule->states_cur = counter_u64_alloc(M_WAITOK0x0002);
1169 rule->states_tot = counter_u64_alloc(M_WAITOK0x0002);
1170 rule->src_nodes = counter_u64_alloc(M_WAITOK0x0002);
1171 rule->cuid = td->td_ucred->cr_ruid;
1172 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1173 TAILQ_INIT(&rule->rpool.list)do { (((&rule->rpool.list))->tqh_first) = ((void *)
0); (&rule->rpool.list)->tqh_last = &(((&rule
->rpool.list))->tqh_first); ; } while (0)
;
1174
1175#define ERROUT(x) { error = (x); goto DIOCADDRULE_error; }
1176
1177 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1177)
;
1178 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1179 ruleset = pf_find_ruleset(pr->anchor);
1180 if (ruleset == NULL((void *)0))
1181 ERROUT(EINVAL22);
1182 rs_num = pf_get_ruleset_number(pr->rule.action);
1183 if (rs_num >= PF_RULESET_MAX)
1184 ERROUT(EINVAL22);
1185 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1186 DPFPRINTF(PF_DEBUG_MISC,if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("ticket: %d != [%d]%d\n"
, pr->ticket, rs_num, ruleset->rules[rs_num].inactive.ticket
)
1187 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num,if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("ticket: %d != [%d]%d\n"
, pr->ticket, rs_num, ruleset->rules[rs_num].inactive.ticket
)
1188 ruleset->rules[rs_num].inactive.ticket))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("ticket: %d != [%d]%d\n"
, pr->ticket, rs_num, ruleset->rules[rs_num].inactive.ticket
)
;
1189 ERROUT(EBUSY16);
1190 }
1191 if (pr->pool_ticket != V_ticket_pabuf(ticket_pabuf)) {
1192 DPFPRINTF(PF_DEBUG_MISC,if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("pool_ticket: %d != %d\n"
, pr->pool_ticket, (ticket_pabuf))
1193 ("pool_ticket: %d != %d\n", pr->pool_ticket,if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("pool_ticket: %d != %d\n"
, pr->pool_ticket, (ticket_pabuf))
1194 V_ticket_pabuf))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("pool_ticket: %d != %d\n"
, pr->pool_ticket, (ticket_pabuf))
;
1195 ERROUT(EBUSY16);
1196 }
1197
1198 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,(*(((struct pf_rulequeue *)((ruleset->rules[rs_num].inactive
.ptr)->tqh_last))->tqh_last))
1199 pf_rulequeue)(*(((struct pf_rulequeue *)((ruleset->rules[rs_num].inactive
.ptr)->tqh_last))->tqh_last))
;
1200 if (tail)
1201 rule->nr = tail->nr + 1;
1202 else
1203 rule->nr = 0;
1204 if (rule->ifname[0]) {
1205 rule->kif = pfi_kif_attach(kif, rule->ifname);
1206 pfi_kif_ref(rule->kif);
1207 } else
1208 rule->kif = NULL((void *)0);
1209
1210 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
1211 error = EBUSY16;
1212
1213#ifdef ALTQ
1214 /* set queue IDs */
1215 if (rule->qname[0] != 0) {
1216 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1217 error = EBUSY16;
1218 else if (rule->pqname[0] != 0) {
1219 if ((rule->pqid =
1220 pf_qname2qid(rule->pqname)) == 0)
1221 error = EBUSY16;
1222 } else
1223 rule->pqid = rule->qid;
1224 }
1225#endif
1226 if (rule->tagname[0])
1227 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1228 error = EBUSY16;
1229 if (rule->match_tagname[0])
1230 if ((rule->match_tag =
1231 pf_tagname2tag(rule->match_tagname)) == 0)
1232 error = EBUSY16;
1233 if (rule->rt && !rule->direction)
1234 error = EINVAL22;
1235 if (!rule->log)
1236 rule->logif = 0;
1237 if (rule->logif >= PFLOGIFS_MAX16)
1238 error = EINVAL22;
1239 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1240 error = ENOMEM12;
1241 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1242 error = ENOMEM12;
1243 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1244 error = EINVAL22;
1245 if (rule->scrub_flags & PFSTATE_SETPRIO0x0200 &&
1246 (rule->set_prio[0] > PF_PRIO_MAX7 ||
1247 rule->set_prio[1] > PF_PRIO_MAX7))
1248 error = EINVAL22;
1249 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)for ((pa) = (((&(pf_pabuf)))->tqh_first); (pa); (pa) =
(((pa))->entries.tqe_next))
1250 if (pa->addr.type == PF_ADDR_TABLE) {
1251 pa->addr.p.tbl = pfr_attach_table(ruleset,
1252 pa->addr.v.tblname);
1253 if (pa->addr.p.tbl == NULL((void *)0))
1254 error = ENOMEM12;
1255 }
1256
1257 rule->overload_tbl = NULL((void *)0);
1258 if (rule->overload_tblname[0]) {
1259 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1260 rule->overload_tblname)) == NULL((void *)0))
1261 error = EINVAL22;
1262 else
1263 rule->overload_tbl->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags |=
1264 PFR_TFLAG_ACTIVE0x00000004;
1265 }
1266
1267 pf_mv_pool(&V_pf_pabuf(pf_pabuf), &rule->rpool.list);
1268 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1269 (rule->action == PF_BINAT)) && rule->anchor == NULL((void *)0)) ||
1270 (rule->rt > PF_FASTROUTE)) &&
1271 (TAILQ_FIRST(&rule->rpool.list)((&rule->rpool.list)->tqh_first) == NULL((void *)0)))
1272 error = EINVAL22;
1273
1274 if (error) {
1275 pf_free_rule(rule);
1276 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1276)
;
1277 break;
1278 }
1279
1280 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list)((&rule->rpool.list)->tqh_first);
1281 rule->evaluations = rule->packets[0] = rule->packets[1] =
1282 rule->bytes[0] = rule->bytes[1] = 0;
1283 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,do { ; (((rule))->entries.tqe_next) = ((void *)0); (rule)->
entries.tqe_prev = (ruleset->rules[rs_num].inactive.ptr)->
tqh_last; *(ruleset->rules[rs_num].inactive.ptr)->tqh_last
= (rule); (ruleset->rules[rs_num].inactive.ptr)->tqh_last
= &(((rule))->entries.tqe_next); ; ; } while (0)
1284 rule, entries)do { ; (((rule))->entries.tqe_next) = ((void *)0); (rule)->
entries.tqe_prev = (ruleset->rules[rs_num].inactive.ptr)->
tqh_last; *(ruleset->rules[rs_num].inactive.ptr)->tqh_last
= (rule); (ruleset->rules[rs_num].inactive.ptr)->tqh_last
= &(((rule))->entries.tqe_next); ; ; } while (0)
;
1285 ruleset->rules[rs_num].inactive.rcount++;
1286 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1286)
;
1287 break;
1288
1289#undef ERROUT
1290DIOCADDRULE_error:
1291 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1291)
;
1292 counter_u64_free(rule->states_cur);
1293 counter_u64_free(rule->states_tot);
1294 counter_u64_free(rule->src_nodes);
1295 free(rule, M_PFRULE);
1296 if (kif)
1297 free(kif, PFI_MTYPE);
1298 break;
1299 }
1300
1301 case DIOCGETRULES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_rule)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((6))))
: {
1302 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1303 struct pf_ruleset *ruleset;
1304 struct pf_rule *tail;
1305 int rs_num;
1306
1307 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1307)
;
1308 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1309 ruleset = pf_find_ruleset(pr->anchor);
1310 if (ruleset == NULL((void *)0)) {
1311 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1311)
;
1312 error = EINVAL22;
1313 break;
1314 }
1315 rs_num = pf_get_ruleset_number(pr->rule.action);
1316 if (rs_num >= PF_RULESET_MAX) {
1317 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1317)
;
1318 error = EINVAL22;
1319 break;
1320 }
1321 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,(*(((struct pf_rulequeue *)((ruleset->rules[rs_num].active
.ptr)->tqh_last))->tqh_last))
1322 pf_rulequeue)(*(((struct pf_rulequeue *)((ruleset->rules[rs_num].active
.ptr)->tqh_last))->tqh_last))
;
1323 if (tail)
1324 pr->nr = tail->nr + 1;
1325 else
1326 pr->nr = 0;
1327 pr->ticket = ruleset->rules[rs_num].active.ticket;
1328 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1328)
;
1329 break;
1330 }
1331
1332 case DIOCGETRULE((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_rule)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((7))))
: {
1333 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1334 struct pf_ruleset *ruleset;
1335 struct pf_rule *rule;
1336 int rs_num, i;
1337
1338 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1338)
;
1339 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1340 ruleset = pf_find_ruleset(pr->anchor);
1341 if (ruleset == NULL((void *)0)) {
1342 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1342)
;
1343 error = EINVAL22;
1344 break;
1345 }
1346 rs_num = pf_get_ruleset_number(pr->rule.action);
1347 if (rs_num >= PF_RULESET_MAX) {
1348 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1348)
;
1349 error = EINVAL22;
1350 break;
1351 }
1352 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1353 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1353)
;
1354 error = EBUSY16;
1355 break;
1356 }
1357 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr)((ruleset->rules[rs_num].active.ptr)->tqh_first);
1358 while ((rule != NULL((void *)0)) && (rule->nr != pr->nr))
1359 rule = TAILQ_NEXT(rule, entries)((rule)->entries.tqe_next);
1360 if (rule == NULL((void *)0)) {
1361 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1361)
;
1362 error = EBUSY16;
1363 break;
1364 }
1365 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1366 pr->rule.u_states_cur = counter_u64_fetch(rule->states_cur);
1367 pr->rule.u_states_tot = counter_u64_fetch(rule->states_tot);
1368 pr->rule.u_src_nodes = counter_u64_fetch(rule->src_nodes);
1369 if (pf_anchor_copyout(ruleset, rule, pr)) {
1370 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1370)
;
1371 error = EBUSY16;
1372 break;
1373 }
1374 pf_addr_copyout(&pr->rule.src.addr);
1375 pf_addr_copyout(&pr->rule.dst.addr);
1376 for (i = 0; i < PF_SKIP_COUNT8; ++i)
1377 if (rule->skip[i].ptr == NULL((void *)0))
1378 pr->rule.skip[i].nr = -1;
1379 else
1380 pr->rule.skip[i].nr =
1381 rule->skip[i].ptr->nr;
1382
1383 if (pr->action == PF_GET_CLR_CNTR) {
1384 rule->evaluations = 0;
1385 rule->packets[0] = rule->packets[1] = 0;
1386 rule->bytes[0] = rule->bytes[1] = 0;
1387 counter_u64_zero(rule->states_tot);
1388 }
1389 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1389)
;
1390 break;
1391 }
1392
1393 case DIOCCHANGERULE((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_rule)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((26))))
: {
1394 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1395 struct pf_ruleset *ruleset;
1396 struct pf_rule *oldrule = NULL((void *)0), *newrule = NULL((void *)0);
1397 struct pfi_kif *kif = NULL((void *)0);
1398 struct pf_pooladdr *pa;
1399 u_int32_t nr = 0;
1400 int rs_num;
1401
1402 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1403 pcr->action > PF_CHANGE_GET_TICKET) {
1404 error = EINVAL22;
1405 break;
1406 }
1407 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE40) {
1408 error = EINVAL22;
1409 break;
1410 }
1411
1412 if (pcr->action != PF_CHANGE_REMOVE) {
1413#ifndef INET1
1414 if (pcr->rule.af == AF_INET2) {
1415 error = EAFNOSUPPORT47;
1416 break;
1417 }
1418#endif /* INET */
1419#ifndef INET61
1420 if (pcr->rule.af == AF_INET628) {
1421 error = EAFNOSUPPORT47;
1422 break;
1423 }
1424#endif /* INET6 */
1425 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK0x0002);
1426 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1427 if (newrule->ifname[0])
1428 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK0x0002);
1429 newrule->states_cur = counter_u64_alloc(M_WAITOK0x0002);
1430 newrule->states_tot = counter_u64_alloc(M_WAITOK0x0002);
1431 newrule->src_nodes = counter_u64_alloc(M_WAITOK0x0002);
1432 newrule->cuid = td->td_ucred->cr_ruid;
1433 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
1434 TAILQ_INIT(&newrule->rpool.list)do { (((&newrule->rpool.list))->tqh_first) = ((void
*)0); (&newrule->rpool.list)->tqh_last = &(((&
newrule->rpool.list))->tqh_first); ; } while (0)
;
1435 }
1436
1437#define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; }
1438
1439 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1439)
;
1440 if (!(pcr->action == PF_CHANGE_REMOVE ||
1441 pcr->action == PF_CHANGE_GET_TICKET) &&
1442 pcr->pool_ticket != V_ticket_pabuf(ticket_pabuf))
1443 ERROUT(EBUSY16);
1444
1445 ruleset = pf_find_ruleset(pcr->anchor);
1446 if (ruleset == NULL((void *)0))
1447 ERROUT(EINVAL22);
1448
1449 rs_num = pf_get_ruleset_number(pcr->rule.action);
1450 if (rs_num >= PF_RULESET_MAX)
1451 ERROUT(EINVAL22);
1452
1453 if (pcr->action == PF_CHANGE_GET_TICKET) {
1454 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1455 ERROUT(0);
1456 } else if (pcr->ticket !=
1457 ruleset->rules[rs_num].active.ticket)
1458 ERROUT(EINVAL22);
1459
1460 if (pcr->action != PF_CHANGE_REMOVE) {
1461 if (newrule->ifname[0]) {
1462 newrule->kif = pfi_kif_attach(kif,
1463 newrule->ifname);
1464 pfi_kif_ref(newrule->kif);
1465 } else
1466 newrule->kif = NULL((void *)0);
1467
1468 if (newrule->rtableid > 0 &&
1469 newrule->rtableid >= rt_numfibs)
1470 error = EBUSY16;
1471
1472#ifdef ALTQ
1473 /* set queue IDs */
1474 if (newrule->qname[0] != 0) {
1475 if ((newrule->qid =
1476 pf_qname2qid(newrule->qname)) == 0)
1477 error = EBUSY16;
1478 else if (newrule->pqname[0] != 0) {
1479 if ((newrule->pqid =
1480 pf_qname2qid(newrule->pqname)) == 0)
1481 error = EBUSY16;
1482 } else
1483 newrule->pqid = newrule->qid;
1484 }
1485#endif /* ALTQ */
1486 if (newrule->tagname[0])
1487 if ((newrule->tag =
1488 pf_tagname2tag(newrule->tagname)) == 0)
1489 error = EBUSY16;
1490 if (newrule->match_tagname[0])
1491 if ((newrule->match_tag = pf_tagname2tag(
1492 newrule->match_tagname)) == 0)
1493 error = EBUSY16;
1494 if (newrule->rt && !newrule->direction)
1495 error = EINVAL22;
1496 if (!newrule->log)
1497 newrule->logif = 0;
1498 if (newrule->logif >= PFLOGIFS_MAX16)
1499 error = EINVAL22;
1500 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1501 error = ENOMEM12;
1502 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1503 error = ENOMEM12;
1504 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1505 error = EINVAL22;
1506 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)for ((pa) = (((&(pf_pabuf)))->tqh_first); (pa); (pa) =
(((pa))->entries.tqe_next))
1507 if (pa->addr.type == PF_ADDR_TABLE) {
1508 pa->addr.p.tbl =
1509 pfr_attach_table(ruleset,
1510 pa->addr.v.tblname);
1511 if (pa->addr.p.tbl == NULL((void *)0))
1512 error = ENOMEM12;
1513 }
1514
1515 newrule->overload_tbl = NULL((void *)0);
1516 if (newrule->overload_tblname[0]) {
1517 if ((newrule->overload_tbl = pfr_attach_table(
1518 ruleset, newrule->overload_tblname)) ==
1519 NULL((void *)0))
1520 error = EINVAL22;
1521 else
1522 newrule->overload_tbl->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags |=
1523 PFR_TFLAG_ACTIVE0x00000004;
1524 }
1525
1526 pf_mv_pool(&V_pf_pabuf(pf_pabuf), &newrule->rpool.list);
1527 if (((((newrule->action == PF_NAT) ||
1528 (newrule->action == PF_RDR) ||
1529 (newrule->action == PF_BINAT) ||
1530 (newrule->rt > PF_FASTROUTE)) &&
1531 !newrule->anchor)) &&
1532 (TAILQ_FIRST(&newrule->rpool.list)((&newrule->rpool.list)->tqh_first) == NULL((void *)0)))
1533 error = EINVAL22;
1534
1535 if (error) {
1536 pf_free_rule(newrule);
1537 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1537)
;
1538 break;
1539 }
1540
1541 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list)((&newrule->rpool.list)->tqh_first);
1542 newrule->evaluations = 0;
1543 newrule->packets[0] = newrule->packets[1] = 0;
1544 newrule->bytes[0] = newrule->bytes[1] = 0;
1545 }
1546 pf_empty_pool(&V_pf_pabuf(pf_pabuf));
1547
1548 if (pcr->action == PF_CHANGE_ADD_HEAD)
1549 oldrule = TAILQ_FIRST(((ruleset->rules[rs_num].active.ptr)->tqh_first)
1550 ruleset->rules[rs_num].active.ptr)((ruleset->rules[rs_num].active.ptr)->tqh_first);
1551 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1552 oldrule = TAILQ_LAST((*(((struct pf_rulequeue *)((ruleset->rules[rs_num].active
.ptr)->tqh_last))->tqh_last))
1553 ruleset->rules[rs_num].active.ptr, pf_rulequeue)(*(((struct pf_rulequeue *)((ruleset->rules[rs_num].active
.ptr)->tqh_last))->tqh_last))
;
1554 else {
1555 oldrule = TAILQ_FIRST(((ruleset->rules[rs_num].active.ptr)->tqh_first)
1556 ruleset->rules[rs_num].active.ptr)((ruleset->rules[rs_num].active.ptr)->tqh_first);
1557 while ((oldrule != NULL((void *)0)) && (oldrule->nr != pcr->nr))
1558 oldrule = TAILQ_NEXT(oldrule, entries)((oldrule)->entries.tqe_next);
1559 if (oldrule == NULL((void *)0)) {
1560 if (newrule != NULL((void *)0))
1561 pf_free_rule(newrule);
1562 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1562)
;
1563 error = EINVAL22;
1564 break;
1565 }
1566 }
1567
1568 if (pcr->action == PF_CHANGE_REMOVE) {
1569 pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
1570 oldrule);
1571 ruleset->rules[rs_num].active.rcount--;
1572 } else {
1573 if (oldrule == NULL((void *)0))
1574 TAILQ_INSERT_TAIL(do { ; (((newrule))->entries.tqe_next) = ((void *)0); (newrule
)->entries.tqe_prev = (ruleset->rules[rs_num].active.ptr
)->tqh_last; *(ruleset->rules[rs_num].active.ptr)->tqh_last
= (newrule); (ruleset->rules[rs_num].active.ptr)->tqh_last
= &(((newrule))->entries.tqe_next); ; ; } while (0)
1575 ruleset->rules[rs_num].active.ptr,do { ; (((newrule))->entries.tqe_next) = ((void *)0); (newrule
)->entries.tqe_prev = (ruleset->rules[rs_num].active.ptr
)->tqh_last; *(ruleset->rules[rs_num].active.ptr)->tqh_last
= (newrule); (ruleset->rules[rs_num].active.ptr)->tqh_last
= &(((newrule))->entries.tqe_next); ; ; } while (0)
1576 newrule, entries)do { ; (((newrule))->entries.tqe_next) = ((void *)0); (newrule
)->entries.tqe_prev = (ruleset->rules[rs_num].active.ptr
)->tqh_last; *(ruleset->rules[rs_num].active.ptr)->tqh_last
= (newrule); (ruleset->rules[rs_num].active.ptr)->tqh_last
= &(((newrule))->entries.tqe_next); ; ; } while (0)
;
1577 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1578 pcr->action == PF_CHANGE_ADD_BEFORE)
1579 TAILQ_INSERT_BEFORE(oldrule, newrule, entries)do { ; (newrule)->entries.tqe_prev = (oldrule)->entries
.tqe_prev; (((newrule))->entries.tqe_next) = (oldrule); *(
oldrule)->entries.tqe_prev = (newrule); (oldrule)->entries
.tqe_prev = &(((newrule))->entries.tqe_next); ; ; } while
(0)
;
1580 else
1581 TAILQ_INSERT_AFTER(do { ; if (((((newrule))->entries.tqe_next) = (((oldrule))
->entries.tqe_next)) != ((void *)0)) (((newrule))->entries
.tqe_next)->entries.tqe_prev = &(((newrule))->entries
.tqe_next); else { (ruleset->rules[rs_num].active.ptr)->
tqh_last = &(((newrule))->entries.tqe_next); ; } (((oldrule
))->entries.tqe_next) = (newrule); (newrule)->entries.tqe_prev
= &(((oldrule))->entries.tqe_next); ; ; } while (0)
1582 ruleset->rules[rs_num].active.ptr,do { ; if (((((newrule))->entries.tqe_next) = (((oldrule))
->entries.tqe_next)) != ((void *)0)) (((newrule))->entries
.tqe_next)->entries.tqe_prev = &(((newrule))->entries
.tqe_next); else { (ruleset->rules[rs_num].active.ptr)->
tqh_last = &(((newrule))->entries.tqe_next); ; } (((oldrule
))->entries.tqe_next) = (newrule); (newrule)->entries.tqe_prev
= &(((oldrule))->entries.tqe_next); ; ; } while (0)
1583 oldrule, newrule, entries)do { ; if (((((newrule))->entries.tqe_next) = (((oldrule))
->entries.tqe_next)) != ((void *)0)) (((newrule))->entries
.tqe_next)->entries.tqe_prev = &(((newrule))->entries
.tqe_next); else { (ruleset->rules[rs_num].active.ptr)->
tqh_last = &(((newrule))->entries.tqe_next); ; } (((oldrule
))->entries.tqe_next) = (newrule); (newrule)->entries.tqe_prev
= &(((oldrule))->entries.tqe_next); ; ; } while (0)
;
1584 ruleset->rules[rs_num].active.rcount++;
1585 }
1586
1587 nr = 0;
1588 TAILQ_FOREACH(oldrule,for ((oldrule) = (((ruleset->rules[rs_num].active.ptr))->
tqh_first); (oldrule); (oldrule) = (((oldrule))->entries.tqe_next
))
1589 ruleset->rules[rs_num].active.ptr, entries)for ((oldrule) = (((ruleset->rules[rs_num].active.ptr))->
tqh_first); (oldrule); (oldrule) = (((oldrule))->entries.tqe_next
))
1590 oldrule->nr = nr++;
1591
1592 ruleset->rules[rs_num].active.ticket++;
1593
1594 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1595 pf_remove_if_empty_ruleset(ruleset);
1596
1597 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1597)
;
1598 break;
1599
1600#undef ERROUT
1601DIOCCHANGERULE_error:
1602 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1602)
;
1603 if (newrule != NULL((void *)0)) {
1604 counter_u64_free(newrule->states_cur);
1605 counter_u64_free(newrule->states_tot);
1606 counter_u64_free(newrule->src_nodes);
1607 free(newrule, M_PFRULE);
1608 }
1609 if (kif != NULL((void *)0))
1610 free(kif, PFI_MTYPE);
1611 break;
1612 }
1613
1614 case DIOCCLRSTATES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_state_kill)) & ((1 << 13) - 1)) << 16)
| ((('D')) << 8) | ((18))))
: {
1615 struct pf_state *s;
1616 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1617 u_int i, killed = 0;
1618
1619 for (i = 0; i <= pf_hashmask; i++) {
1620 struct pf_idhash *ih = &V_pf_idhash(pf_idhash)[i];
1621
1622relock_DIOCCLRSTATES:
1623 PF_HASHROW_LOCK(ih)__mtx_lock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (1623))
;
1624 LIST_FOREACH(s, &ih->states, entry)for ((s) = (((&ih->states))->lh_first); (s); (s) = (
((s))->entry.le_next))
1625 if (!psk->psk_ifname[0] ||
1626 !strcmp(psk->psk_ifname,
1627 s->kif->pfik_name)) {
1628 /*
1629 * Don't send out individual
1630 * delete messages.
1631 */
1632 s->state_flags |= PFSTATE_NOSYNC0x08;
1633 pf_unlink_state(s, PF_ENTER_LOCKED0x00000001);
1634 killed++;
1635 goto relock_DIOCCLRSTATES;
1636 }
1637 PF_HASHROW_UNLOCK(ih)__mtx_unlock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (1637))
;
1638 }
1639 psk->psk_killed = killed;
1640 if (pfsync_clear_states_ptr != NULL((void *)0))
1641 pfsync_clear_states_ptr(V_pf_status(pf_status).hostid, psk->psk_ifname);
1642 break;
1643 }
1644
1645 case DIOCKILLSTATES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_state_kill)) & ((1 << 13) - 1)) << 16)
| ((('D')) << 8) | ((41))))
: {
1646 struct pf_state *s;
1647 struct pf_state_key *sk;
1648 struct pf_addr *srcaddr, *dstaddr;
1649 u_int16_t srcport, dstport;
1650 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1651 u_int i, killed = 0;
1652
1653 if (psk->psk_pfcmp.id) {
1654 if (psk->psk_pfcmp.creatorid == 0)
1655 psk->psk_pfcmp.creatorid = V_pf_status(pf_status).hostid;
1656 if ((s = pf_find_state_byid(psk->psk_pfcmp.id,
1657 psk->psk_pfcmp.creatorid))) {
1658 pf_unlink_state(s, PF_ENTER_LOCKED0x00000001);
1659 psk->psk_killed = 1;
1660 }
1661 break;
1662 }
1663
1664 for (i = 0; i <= pf_hashmask; i++) {
1665 struct pf_idhash *ih = &V_pf_idhash(pf_idhash)[i];
1666
1667relock_DIOCKILLSTATES:
1668 PF_HASHROW_LOCK(ih)__mtx_lock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (1668))
;
1669 LIST_FOREACH(s, &ih->states, entry)for ((s) = (((&ih->states))->lh_first); (s); (s) = (
((s))->entry.le_next))
{
1670 sk = s->key[PF_SK_WIRE];
1671 if (s->direction == PF_OUT) {
1672 srcaddr = &sk->addr[1];
1673 dstaddr = &sk->addr[0];
1674 srcport = sk->port[1];
1675 dstport = sk->port[0];
1676 } else {
1677 srcaddr = &sk->addr[0];
1678 dstaddr = &sk->addr[1];
1679 srcport = sk->port[0];
1680 dstport = sk->port[1];
1681 }
1682
1683 if ((!psk->psk_af || sk->af == psk->psk_af)
1684 && (!psk->psk_proto || psk->psk_proto ==
1685 sk->proto) &&
1686 PF_MATCHA(psk->psk_src.neg,pf_match_addr(psk->psk_src.neg, &psk->psk_src.addr.
v.a.addr, &psk->psk_src.addr.v.a.mask, srcaddr, sk->
af)
1687 &psk->psk_src.addr.v.a.addr,pf_match_addr(psk->psk_src.neg, &psk->psk_src.addr.
v.a.addr, &psk->psk_src.addr.v.a.mask, srcaddr, sk->
af)
1688 &psk->psk_src.addr.v.a.mask,pf_match_addr(psk->psk_src.neg, &psk->psk_src.addr.
v.a.addr, &psk->psk_src.addr.v.a.mask, srcaddr, sk->
af)
1689 srcaddr, sk->af)pf_match_addr(psk->psk_src.neg, &psk->psk_src.addr.
v.a.addr, &psk->psk_src.addr.v.a.mask, srcaddr, sk->
af)
&&
1690 PF_MATCHA(psk->psk_dst.neg,pf_match_addr(psk->psk_dst.neg, &psk->psk_dst.addr.
v.a.addr, &psk->psk_dst.addr.v.a.mask, dstaddr, sk->
af)
1691 &psk->psk_dst.addr.v.a.addr,pf_match_addr(psk->psk_dst.neg, &psk->psk_dst.addr.
v.a.addr, &psk->psk_dst.addr.v.a.mask, dstaddr, sk->
af)
1692 &psk->psk_dst.addr.v.a.mask,pf_match_addr(psk->psk_dst.neg, &psk->psk_dst.addr.
v.a.addr, &psk->psk_dst.addr.v.a.mask, dstaddr, sk->
af)
1693 dstaddr, sk->af)pf_match_addr(psk->psk_dst.neg, &psk->psk_dst.addr.
v.a.addr, &psk->psk_dst.addr.v.a.mask, dstaddr, sk->
af)
&&
1694 (psk->psk_src.port_op == 0 ||
1695 pf_match_port(psk->psk_src.port_op,
1696 psk->psk_src.port[0], psk->psk_src.port[1],
1697 srcport)) &&
1698 (psk->psk_dst.port_op == 0 ||
1699 pf_match_port(psk->psk_dst.port_op,
1700 psk->psk_dst.port[0], psk->psk_dst.port[1],
1701 dstport)) &&
1702 (!psk->psk_label[0] ||
1703 (s->rule.ptr->label[0] &&
1704 !strcmp(psk->psk_label,
1705 s->rule.ptr->label))) &&
1706 (!psk->psk_ifname[0] ||
1707 !strcmp(psk->psk_ifname,
1708 s->kif->pfik_name))) {
1709 pf_unlink_state(s, PF_ENTER_LOCKED0x00000001);
1710 killed++;
1711 goto relock_DIOCKILLSTATES;
1712 }
1713 }
1714 PF_HASHROW_UNLOCK(ih)__mtx_unlock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (1714))
;
1715 }
1716 psk->psk_killed = killed;
1717 break;
1718 }
1719
1720 case DIOCADDSTATE((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_state)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((37))))
: {
1721 struct pfioc_state *ps = (struct pfioc_state *)addr;
1722 struct pfsync_state *sp = &ps->state;
1723
1724 if (sp->timeout >= PFTM_MAX) {
1725 error = EINVAL22;
1726 break;
1727 }
1728 if (pfsync_state_import_ptr != NULL((void *)0)) {
1729 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1729)
;
1730 error = pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL0x01);
1731 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1731)
;
1732 } else
1733 error = EOPNOTSUPP45;
1734 break;
1735 }
1736
1737 case DIOCGETSTATE((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_state)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((19))))
: {
1738 struct pfioc_state *ps = (struct pfioc_state *)addr;
1739 struct pf_state *s;
1740
1741 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
1742 if (s == NULL((void *)0)) {
1743 error = ENOENT2;
1744 break;
1745 }
1746
1747 pfsync_state_export(&ps->state, s);
1748 PF_STATE_UNLOCK(s)do { struct pf_idhash *_ih = &(pf_idhash)[((__builtin_constant_p
((((s))->id)) ? (((__uint64_t)(__builtin_constant_p(((__uint64_t
)((((s))->id))) & 0xffffffff) ? (((__uint32_t)((__uint16_t
)(__builtin_constant_p(((__uint32_t)(((__uint64_t)((((s))->
id))) & 0xffffffff)) & 0xffff) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)(((__uint64_t)((((s))->id))) & 0xffffffff
)) & 0xffff)) << 8 | ((__uint16_t)(((__uint32_t)(((
__uint64_t)((((s))->id))) & 0xffffffff)) & 0xffff)
) >> 8) : __bswap16_var(((__uint32_t)(((__uint64_t)((((
s))->id))) & 0xffffffff)) & 0xffff))) << 16)
| ((__uint16_t)(__builtin_constant_p(((__uint32_t)(((__uint64_t
)((((s))->id))) & 0xffffffff)) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)(((__uint64_t)((((s))->id))) &
0xffffffff)) >> 16)) << 8 | ((__uint16_t)(((__uint32_t
)(((__uint64_t)((((s))->id))) & 0xffffffff)) >> 16
)) >> 8) : __bswap16_var(((__uint32_t)(((__uint64_t)(((
(s))->id))) & 0xffffffff)) >> 16)))) : __bswap32_var
(((__uint64_t)((((s))->id))) & 0xffffffff)) << 32
) | (__builtin_constant_p(((__uint64_t)((((s))->id))) >>
32) ? (((__uint32_t)((__uint16_t)(__builtin_constant_p(((__uint32_t
)(((__uint64_t)((((s))->id))) >> 32)) & 0xffff) ?
(__uint16_t)(((__uint16_t)(((__uint32_t)(((__uint64_t)((((s)
)->id))) >> 32)) & 0xffff)) << 8 | ((__uint16_t
)(((__uint32_t)(((__uint64_t)((((s))->id))) >> 32)) &
0xffff)) >> 8) : __bswap16_var(((__uint32_t)(((__uint64_t
)((((s))->id))) >> 32)) & 0xffff))) << 16)
| ((__uint16_t)(__builtin_constant_p(((__uint32_t)(((__uint64_t
)((((s))->id))) >> 32)) >> 16) ? (__uint16_t)(
((__uint16_t)(((__uint32_t)(((__uint64_t)((((s))->id))) >>
32)) >> 16)) << 8 | ((__uint16_t)(((__uint32_t)(
((__uint64_t)((((s))->id))) >> 32)) >> 16)) >>
8) : __bswap16_var(((__uint32_t)(((__uint64_t)((((s))->id
))) >> 32)) >> 16)))) : __bswap32_var(((__uint64_t
)((((s))->id))) >> 32))) : __bswap64_var((((s))->
id))) % (pf_hashmask + 1))]; __mtx_unlock_flags(&((((&
(_ih)->lock))))->mtx_lock, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (1748)); } while (0)
;
1749 break;
1750 }
1751
1752 case DIOCGETSTATES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_states)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((25))))
: {
1753 struct pfioc_states *ps = (struct pfioc_states *)addr;
1754 struct pf_state *s;
1755 struct pfsync_state *pstore, *p;
1756 int i, nr;
1757
1758 if (ps->ps_len == 0) {
1759 nr = uma_zone_get_cur(V_pf_state_z(pf_state_z));
1760 ps->ps_len = sizeof(struct pfsync_state) * nr;
1761 break;
1762 }
1763
1764 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK0x0002);
1765 nr = 0;
1766
1767 for (i = 0; i <= pf_hashmask; i++) {
1768 struct pf_idhash *ih = &V_pf_idhash(pf_idhash)[i];
1769
1770 PF_HASHROW_LOCK(ih)__mtx_lock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (1770))
;
1771 LIST_FOREACH(s, &ih->states, entry)for ((s) = (((&ih->states))->lh_first); (s); (s) = (
((s))->entry.le_next))
{
1772
1773 if (s->timeout == PFTM_UNLINKED)
1774 continue;
1775
1776 if ((nr+1) * sizeof(*p) > ps->ps_len) {
1777 PF_HASHROW_UNLOCK(ih)__mtx_unlock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (1777))
;
1778 goto DIOCGETSTATES_full;
1779 }
1780 pfsync_state_export(p, s);
1781 p++;
1782 nr++;
1783 }
1784 PF_HASHROW_UNLOCK(ih)__mtx_unlock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (1784))
;
1785 }
1786DIOCGETSTATES_full:
1787 error = copyout(pstore, ps->ps_statesps_u.psu_states,
1788 sizeof(struct pfsync_state) * nr);
1789 if (error) {
1790 free(pstore, M_TEMP);
1791 break;
1792 }
1793 ps->ps_len = sizeof(struct pfsync_state) * nr;
1794 free(pstore, M_TEMP);
1795
1796 break;
1797 }
1798
1799 case DIOCGETSTATUS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pf_status)) & ((1 << 13) - 1)) << 16) | ((('D'
)) << 8) | ((21))))
: {
1800 struct pf_status *s = (struct pf_status *)addr;
1801
1802 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1802)
;
1803 s->running = V_pf_status(pf_status).running;
1804 s->since = V_pf_status(pf_status).since;
1805 s->debug = V_pf_status(pf_status).debug;
1806 s->hostid = V_pf_status(pf_status).hostid;
1807 s->states = V_pf_status(pf_status).states;
1808 s->src_nodes = V_pf_status(pf_status).src_nodes;
1809
1810 for (int i = 0; i < PFRES_MAX16; i++)
1811 s->counters[i] =
1812 counter_u64_fetch(V_pf_status(pf_status).counters[i]);
1813 for (int i = 0; i < LCNT_MAX7; i++)
1814 s->lcounters[i] =
1815 counter_u64_fetch(V_pf_status(pf_status).lcounters[i]);
1816 for (int i = 0; i < FCNT_MAX3; i++)
1817 s->fcounters[i] =
1818 counter_u64_fetch(V_pf_status(pf_status).fcounters[i]);
1819 for (int i = 0; i < SCNT_MAX3; i++)
1820 s->scounters[i] =
1821 counter_u64_fetch(V_pf_status(pf_status).scounters[i]);
1822
1823 bcopy(V_pf_status(pf_status).ifname, s->ifname, IFNAMSIZ16);
1824 bcopy(V_pf_status(pf_status).pf_chksum, s->pf_chksum,
1825 PF_MD5_DIGEST_LENGTH16);
1826
1827 pfi_update_status(s->ifname, s);
1828 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1828)
;
1829 break;
1830 }
1831
1832 case DIOCSETSTATUSIF((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_if)) & ((1 << 13) - 1)) << 16) | ((('D'
)) << 8) | ((20))))
: {
1833 struct pfioc_if *pi = (struct pfioc_if *)addr;
1834
1835 if (pi->ifname[0] == 0) {
1836 bzero(V_pf_status(pf_status).ifname, IFNAMSIZ16);
1837 break;
1838 }
1839 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1839)
;
1840 strlcpy(V_pf_status(pf_status).ifname, pi->ifname, IFNAMSIZ16);
1841 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1841)
;
1842 break;
1843 }
1844
1845 case DIOCCLRSTATUS((unsigned long) ((0x20000000) | (((0) & ((1 << 13)
- 1)) << 16) | ((('D')) << 8) | ((22))))
: {
1846 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1846)
;
1847 for (int i = 0; i < PFRES_MAX16; i++)
1848 counter_u64_zero(V_pf_status(pf_status).counters[i]);
1849 for (int i = 0; i < FCNT_MAX3; i++)
1850 counter_u64_zero(V_pf_status(pf_status).fcounters[i]);
1851 for (int i = 0; i < SCNT_MAX3; i++)
1852 counter_u64_zero(V_pf_status(pf_status).scounters[i]);
1853 V_pf_status(pf_status).since = time_second;
1854 if (*V_pf_status(pf_status).ifname)
1855 pfi_update_status(V_pf_status(pf_status).ifname, NULL((void *)0));
1856 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1856)
;
1857 break;
1858 }
1859
1860 case DIOCNATLOOK((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_natlook)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((23))))
: {
1861 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1862 struct pf_state_key *sk;
1863 struct pf_state *state;
1864 struct pf_state_key_cmp key;
1865 int m = 0, direction = pnl->direction;
1866 int sidx, didx;
1867
1868 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
1869 sidx = (direction == PF_IN) ? 1 : 0;
1870 didx = (direction == PF_IN) ? 0 : 1;
1871
1872 if (!pnl->proto ||
1873 PF_AZERO(&pnl->saddr, pnl->af)((pnl->af == 2 && !(&pnl->saddr)->pfa.addr32
[0]) || (pnl->af == 28 && !(&pnl->saddr)->
pfa.addr32[0] && !(&pnl->saddr)->pfa.addr32
[1] && !(&pnl->saddr)->pfa.addr32[2] &&
!(&pnl->saddr)->pfa.addr32[3] ))
||
1874 PF_AZERO(&pnl->daddr, pnl->af)((pnl->af == 2 && !(&pnl->daddr)->pfa.addr32
[0]) || (pnl->af == 28 && !(&pnl->daddr)->
pfa.addr32[0] && !(&pnl->daddr)->pfa.addr32
[1] && !(&pnl->daddr)->pfa.addr32[2] &&
!(&pnl->daddr)->pfa.addr32[3] ))
||
1875 ((pnl->proto == IPPROTO_TCP6 ||
1876 pnl->proto == IPPROTO_UDP17) &&
1877 (!pnl->dport || !pnl->sport)))
1878 error = EINVAL22;
1879 else {
1880 bzero(&key, sizeof(key));
1881 key.af = pnl->af;
1882 key.proto = pnl->proto;
1883 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af)pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af
)
;
1884 key.port[sidx] = pnl->sport;
1885 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af)pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af
)
;
1886 key.port[didx] = pnl->dport;
1887
1888 state = pf_find_state_all(&key, direction, &m);
1889
1890 if (m > 1)
1891 error = E2BIG7; /* more than one state */
1892 else if (state != NULL((void *)0)) {
1893 /* XXXGL: not locked read */
1894 sk = state->key[sidx];
1895 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af)pf_addrcpy(&pnl->rsaddr, &sk->addr[sidx], sk->
af)
;
1896 pnl->rsport = sk->port[sidx];
1897 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af)pf_addrcpy(&pnl->rdaddr, &sk->addr[didx], sk->
af)
;
1898 pnl->rdport = sk->port[didx];
1899 } else
1900 error = ENOENT2;
1901 }
1902 break;
1903 }
1904
1905 case DIOCSETTIMEOUT((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_tm)) & ((1 << 13) - 1)) << 16) | ((('D'
)) << 8) | ((29))))
: {
1906 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1907 int old;
1908
1909 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1910 pt->seconds < 0) {
1911 error = EINVAL22;
1912 break;
1913 }
1914 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1914)
;
1915 old = V_pf_default_rule(pf_default_rule).timeout[pt->timeout];
1916 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1917 pt->seconds = 1;
1918 V_pf_default_rule(pf_default_rule).timeout[pt->timeout] = pt->seconds;
1919 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
1920 wakeup(pf_purge_thread);
1921 pt->seconds = old;
1922 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1922)
;
1923 break;
1924 }
1925
1926 case DIOCGETTIMEOUT((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_tm)) & ((1 << 13) - 1)) << 16) | ((('D'
)) << 8) | ((30))))
: {
1927 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1928
1929 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1930 error = EINVAL22;
1931 break;
1932 }
1933 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1933)
;
1934 pt->seconds = V_pf_default_rule(pf_default_rule).timeout[pt->timeout];
1935 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1935)
;
1936 break;
1937 }
1938
1939 case DIOCGETLIMIT((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_limit)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((39))))
: {
1940 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1941
1942 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1943 error = EINVAL22;
1944 break;
1945 }
1946 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1946)
;
1947 pl->limit = V_pf_limits(pf_limits)[pl->index].limit;
1948 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1948)
;
1949 break;
1950 }
1951
1952 case DIOCSETLIMIT((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_limit)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((40))))
: {
1953 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1954 int old_limit;
1955
1956 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1956)
;
1957 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1958 V_pf_limits(pf_limits)[pl->index].zone == NULL((void *)0)) {
1959 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1959)
;
1960 error = EINVAL22;
1961 break;
1962 }
1963 uma_zone_set_max(V_pf_limits(pf_limits)[pl->index].zone, pl->limit);
1964 old_limit = V_pf_limits(pf_limits)[pl->index].limit;
1965 V_pf_limits(pf_limits)[pl->index].limit = pl->limit;
1966 pl->limit = old_limit;
1967 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1967)
;
1968 break;
1969 }
1970
1971 case DIOCSETDEBUG((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(u_int32_t
)) & ((1 << 13) - 1)) << 16) | ((('D')) <<
8) | ((24))))
: {
1972 u_int32_t *level = (u_int32_t *)addr;
1973
1974 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1974)
;
1975 V_pf_status(pf_status).debug = *level;
1976 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1976)
;
1977 break;
1978 }
1979
1980 case DIOCCLRRULECTRS((unsigned long) ((0x20000000) | (((0) & ((1 << 13)
- 1)) << 16) | ((('D')) << 8) | ((38))))
: {
1981 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1982 struct pf_ruleset *ruleset = &pf_main_ruleset(pf_main_anchor).ruleset;
1983 struct pf_rule *rule;
1984
1985 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1985)
;
1986 TAILQ_FOREACH(rule,for ((rule) = (((ruleset->rules[PF_RULESET_FILTER].active.
ptr))->tqh_first); (rule); (rule) = (((rule))->entries.
tqe_next))
1987 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)for ((rule) = (((ruleset->rules[PF_RULESET_FILTER].active.
ptr))->tqh_first); (rule); (rule) = (((rule))->entries.
tqe_next))
{
1988 rule->evaluations = 0;
1989 rule->packets[0] = rule->packets[1] = 0;
1990 rule->bytes[0] = rule->bytes[1] = 0;
1991 }
1992 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 1992)
;
1993 break;
1994 }
1995
1996 case DIOCGIFSPEED((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pf_ifspeed)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((92))))
: {
1997 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr;
1998 struct pf_ifspeed ps;
1999 struct ifnet *ifp;
2000
2001 if (psp->ifname[0] != 0) {
2002 /* Can we completely trust user-land? */
2003 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ16);
2004 ifp = ifunit(ps.ifname);
2005 if (ifp != NULL((void *)0))
2006 psp->baudrate = ifp->if_baudrate;
2007 else
2008 error = EINVAL22;
2009 } else
2010 error = EINVAL22;
2011 break;
2012 }
2013
2014#ifdef ALTQ
2015 case DIOCSTARTALTQ((unsigned long) ((0x20000000) | (((0) & ((1 << 13)
- 1)) << 16) | ((('D')) << 8) | ((42))))
: {
2016 struct pf_altq *altq;
2017
2018 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2018)
;
2019 /* enable all altq interfaces on active list */
2020 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)for ((altq) = ((((pf_altqs_active)))->tqh_first); (altq); (
altq) = (((altq))->entries.tqe_next))
{
2021 if (altq->qname[0] == 0 && (altq->local_flags &
2022 PFALTQ_FLAG_IF_REMOVED0x01) == 0) {
2023 error = pf_enable_altq(altq);
2024 if (error != 0)
2025 break;
2026 }
2027 }
2028 if (error == 0)
2029 V_pf_altq_running = 1;
2030 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2030)
;
2031 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("altq: started\n"
)
;
2032 break;
2033 }
2034
2035 case DIOCSTOPALTQ((unsigned long) ((0x20000000) | (((0) & ((1 << 13)
- 1)) << 16) | ((('D')) << 8) | ((43))))
: {
2036 struct pf_altq *altq;
2037
2038 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2038)
;
2039 /* disable all altq interfaces on active list */
2040 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)for ((altq) = ((((pf_altqs_active)))->tqh_first); (altq); (
altq) = (((altq))->entries.tqe_next))
{
2041 if (altq->qname[0] == 0 && (altq->local_flags &
2042 PFALTQ_FLAG_IF_REMOVED0x01) == 0) {
2043 error = pf_disable_altq(altq);
2044 if (error != 0)
2045 break;
2046 }
2047 }
2048 if (error == 0)
2049 V_pf_altq_running = 0;
2050 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2050)
;
2051 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("altq: stopped\n"
)
;
2052 break;
2053 }
2054
2055 case DIOCADDALTQ((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_altq)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((45))))
: {
2056 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2057 struct pf_altq *altq, *a;
2058 struct ifnet *ifp;
2059
2060 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK0x0002);
2061 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2062 altq->local_flags = 0;
2063
2064 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2064)
;
2065 if (pa->ticket != V_ticket_altqs_inactive(ticket_altqs_inactive)) {
2066 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2066)
;
2067 free(altq, M_PFALTQ);
2068 error = EBUSY16;
2069 break;
2070 }
2071
2072 /*
2073 * if this is for a queue, find the discipline and
2074 * copy the necessary fields
2075 */
2076 if (altq->qname[0] != 0) {
2077 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2078 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2078)
;
2079 error = EBUSY16;
2080 free(altq, M_PFALTQ);
2081 break;
2082 }
2083 altq->altq_disc = NULL((void *)0);
2084 TAILQ_FOREACH(a, V_pf_altqs_inactive, entries)for ((a) = ((((pf_altqs_inactive)))->tqh_first); (a); (a) =
(((a))->entries.tqe_next))
{
2085 if (strncmp(a->ifname, altq->ifname,
2086 IFNAMSIZ16) == 0 && a->qname[0] == 0) {
2087 altq->altq_disc = a->altq_disc;
2088 break;
2089 }
2090 }
2091 }
2092
2093 if ((ifp = ifunit(altq->ifname)) == NULL((void *)0))
2094 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED0x01;
2095 else
2096 error = altq_add(altq);
2097
2098 if (error) {
2099 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2099)
;
2100 free(altq, M_PFALTQ);
2101 break;
2102 }
2103
2104 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries)do { ; (((altq))->entries.tqe_next) = ((void *)0); (altq)->
entries.tqe_prev = ((pf_altqs_inactive))->tqh_last; *((pf_altqs_inactive
))->tqh_last = (altq); ((pf_altqs_inactive))->tqh_last =
&(((altq))->entries.tqe_next); ; ; } while (0)
;
2105 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2106 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2106)
;
2107 break;
2108 }
2109
2110 case DIOCGETALTQS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_altq)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((47))))
: {
2111 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2112 struct pf_altq *altq;
2113
2114 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2114)
;
2115 pa->nr = 0;
2116 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)for ((altq) = ((((pf_altqs_active)))->tqh_first); (altq); (
altq) = (((altq))->entries.tqe_next))
2117 pa->nr++;
2118 pa->ticket = V_ticket_altqs_active(ticket_altqs_active);
2119 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2119)
;
2120 break;
2121 }
2122
2123 case DIOCGETALTQ((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_altq)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((48))))
: {
2124 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2125 struct pf_altq *altq;
2126 u_int32_t nr;
2127
2128 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2128)
;
2129 if (pa->ticket != V_ticket_altqs_active(ticket_altqs_active)) {
2130 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2130)
;
2131 error = EBUSY16;
2132 break;
2133 }
2134 nr = 0;
2135 altq = TAILQ_FIRST(V_pf_altqs_active)(((pf_altqs_active))->tqh_first);
2136 while ((altq != NULL((void *)0)) && (nr < pa->nr)) {
2137 altq = TAILQ_NEXT(altq, entries)((altq)->entries.tqe_next);
2138 nr++;
2139 }
2140 if (altq == NULL((void *)0)) {
2141 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2141)
;
2142 error = EBUSY16;
2143 break;
2144 }
2145 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2146 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2146)
;
2147 break;
2148 }
2149
2150 case DIOCCHANGEALTQ((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_altq)) & ((1 << 13) - 1)) << 16) | (((
'D')) << 8) | ((49))))
:
2151 /* CHANGEALTQ not supported yet! */
2152 error = ENODEV19;
2153 break;
2154
2155 case DIOCGETQSTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_qstats)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((50))))
: {
2156 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2157 struct pf_altq *altq;
2158 u_int32_t nr;
2159 int nbytes;
2160
2161 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2161)
;
2162 if (pq->ticket != V_ticket_altqs_active(ticket_altqs_active)) {
2163 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2163)
;
2164 error = EBUSY16;
2165 break;
2166 }
2167 nbytes = pq->nbytes;
2168 nr = 0;
2169 altq = TAILQ_FIRST(V_pf_altqs_active)(((pf_altqs_active))->tqh_first);
2170 while ((altq != NULL((void *)0)) && (nr < pq->nr)) {
2171 altq = TAILQ_NEXT(altq, entries)((altq)->entries.tqe_next);
2172 nr++;
2173 }
2174 if (altq == NULL((void *)0)) {
2175 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2175)
;
2176 error = EBUSY16;
2177 break;
2178 }
2179
2180 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED0x01) != 0) {
2181 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2181)
;
2182 error = ENXIO6;
2183 break;
2184 }
2185 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2185)
;
2186 error = altq_getqstats(altq, pq->buf, &nbytes);
2187 if (error == 0) {
2188 pq->scheduler = altq->scheduler;
2189 pq->nbytes = nbytes;
2190 }
2191 break;
2192 }
2193#endif /* ALTQ */
2194
2195 case DIOCBEGINADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_pooladdr)) & ((1 << 13) - 1)) << 16) |
((('D')) << 8) | ((51))))
: {
2196 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2197
2198 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2198)
;
2199 pf_empty_pool(&V_pf_pabuf(pf_pabuf));
2200 pp->ticket = ++V_ticket_pabuf(ticket_pabuf);
2201 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2201)
;
2202 break;
2203 }
2204
2205 case DIOCADDADDR((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_pooladdr)) & ((1 << 13) - 1)) << 16) |
((('D')) << 8) | ((52))))
: {
2206 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2207 struct pf_pooladdr *pa;
2208 struct pfi_kif *kif = NULL((void *)0);
2209
2210#ifndef INET1
2211 if (pp->af == AF_INET2) {
2212 error = EAFNOSUPPORT47;
2213 break;
2214 }
2215#endif /* INET */
2216#ifndef INET61
2217 if (pp->af == AF_INET628) {
2218 error = EAFNOSUPPORT47;
2219 break;
2220 }
2221#endif /* INET6 */
2222 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2223 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2224 pp->addr.addr.type != PF_ADDR_TABLE) {
2225 error = EINVAL22;
2226 break;
2227 }
2228 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK0x0002);
2229 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2230 if (pa->ifname[0])
2231 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK0x0002);
2232 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2232)
;
2233 if (pp->ticket != V_ticket_pabuf(ticket_pabuf)) {
2234 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2234)
;
2235 if (pa->ifname[0])
2236 free(kif, PFI_MTYPE);
2237 free(pa, M_PFRULE);
2238 error = EBUSY16;
2239 break;
2240 }
2241 if (pa->ifname[0]) {
2242 pa->kif = pfi_kif_attach(kif, pa->ifname);
2243 pfi_kif_ref(pa->kif);
2244 } else
2245 pa->kif = NULL((void *)0);
2246 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2247 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2248 if (pa->ifname[0])
2249 pfi_kif_unref(pa->kif);
2250 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2250)
;
2251 free(pa, M_PFRULE);
2252 break;
2253 }
2254 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries)do { ; (((pa))->entries.tqe_next) = ((void *)0); (pa)->
entries.tqe_prev = (&(pf_pabuf))->tqh_last; *(&(pf_pabuf
))->tqh_last = (pa); (&(pf_pabuf))->tqh_last = &
(((pa))->entries.tqe_next); ; ; } while (0)
;
2255 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2255)
;
2256 break;
2257 }
2258
2259 case DIOCGETADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_pooladdr)) & ((1 << 13) - 1)) << 16) |
((('D')) << 8) | ((53))))
: {
2260 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2261 struct pf_pool *pool;
2262 struct pf_pooladdr *pa;
2263
2264 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2264)
;
2265 pp->nr = 0;
2266 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2267 pp->r_num, 0, 1, 0);
2268 if (pool == NULL((void *)0)) {
2269 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2269)
;
2270 error = EBUSY16;
2271 break;
2272 }
2273 TAILQ_FOREACH(pa, &pool->list, entries)for ((pa) = (((&pool->list))->tqh_first); (pa); (pa
) = (((pa))->entries.tqe_next))
2274 pp->nr++;
2275 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2275)
;
2276 break;
2277 }
2278
2279 case DIOCGETADDR((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_pooladdr)) & ((1 << 13) - 1)) << 16) |
((('D')) << 8) | ((54))))
: {
2280 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2281 struct pf_pool *pool;
2282 struct pf_pooladdr *pa;
2283 u_int32_t nr = 0;
2284
2285 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2285)
;
2286 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2287 pp->r_num, 0, 1, 1);
2288 if (pool == NULL((void *)0)) {
2289 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2289)
;
2290 error = EBUSY16;
2291 break;
2292 }
2293 pa = TAILQ_FIRST(&pool->list)((&pool->list)->tqh_first);
2294 while ((pa != NULL((void *)0)) && (nr < pp->nr)) {
2295 pa = TAILQ_NEXT(pa, entries)((pa)->entries.tqe_next);
2296 nr++;
2297 }
2298 if (pa == NULL((void *)0)) {
2299 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2299)
;
2300 error = EBUSY16;
2301 break;
2302 }
2303 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2304 pf_addr_copyout(&pp->addr.addr);
2305 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2305)
;
2306 break;
2307 }
2308
2309 case DIOCCHANGEADDR((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_pooladdr)) & ((1 << 13) - 1)) << 16) |
((('D')) << 8) | ((55))))
: {
2310 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2311 struct pf_pool *pool;
2312 struct pf_pooladdr *oldpa = NULL((void *)0), *newpa = NULL((void *)0);
2313 struct pf_ruleset *ruleset;
2314 struct pfi_kif *kif = NULL((void *)0);
2315
2316 if (pca->action < PF_CHANGE_ADD_HEAD ||
2317 pca->action > PF_CHANGE_REMOVE) {
2318 error = EINVAL22;
2319 break;
2320 }
2321 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2322 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2323 pca->addr.addr.type != PF_ADDR_TABLE) {
2324 error = EINVAL22;
2325 break;
2326 }
2327
2328 if (pca->action != PF_CHANGE_REMOVE) {
2329#ifndef INET1
2330 if (pca->af == AF_INET2) {
2331 error = EAFNOSUPPORT47;
2332 break;
2333 }
2334#endif /* INET */
2335#ifndef INET61
2336 if (pca->af == AF_INET628) {
2337 error = EAFNOSUPPORT47;
2338 break;
2339 }
2340#endif /* INET6 */
2341 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK0x0002);
2342 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2343 if (newpa->ifname[0])
2344 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK0x0002);
2345 newpa->kif = NULL((void *)0);
2346 }
2347
2348#define ERROUT(x) { error = (x); goto DIOCCHANGEADDR_error; }
2349 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2349)
;
2350 ruleset = pf_find_ruleset(pca->anchor);
2351 if (ruleset == NULL((void *)0))
2352 ERROUT(EBUSY16);
2353
2354 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2355 pca->r_num, pca->r_last, 1, 1);
2356 if (pool == NULL((void *)0))
2357 ERROUT(EBUSY16);
2358
2359 if (pca->action != PF_CHANGE_REMOVE) {
2360 if (newpa->ifname[0]) {
2361 newpa->kif = pfi_kif_attach(kif, newpa->ifname);
2362 pfi_kif_ref(newpa->kif);
2363 kif = NULL((void *)0);
2364 }
2365
2366 switch (newpa->addr.type) {
2367 case PF_ADDR_DYNIFTL:
2368 error = pfi_dynaddr_setup(&newpa->addr,
2369 pca->af);
2370 break;
2371 case PF_ADDR_TABLE:
2372 newpa->addr.p.tbl = pfr_attach_table(ruleset,
2373 newpa->addr.v.tblname);
2374 if (newpa->addr.p.tbl == NULL((void *)0))
2375 error = ENOMEM12;
2376 break;
2377 }
2378 if (error)
2379 goto DIOCCHANGEADDR_error;
2380 }
2381
2382 switch (pca->action) {
2383 case PF_CHANGE_ADD_HEAD:
2384 oldpa = TAILQ_FIRST(&pool->list)((&pool->list)->tqh_first);
2385 break;
2386 case PF_CHANGE_ADD_TAIL:
2387 oldpa = TAILQ_LAST(&pool->list, pf_palist)(*(((struct pf_palist *)((&pool->list)->tqh_last))->
tqh_last))
;
2388 break;
2389 default:
2390 oldpa = TAILQ_FIRST(&pool->list)((&pool->list)->tqh_first);
2391 for (int i = 0; oldpa && i < pca->nr; i++)
2392 oldpa = TAILQ_NEXT(oldpa, entries)((oldpa)->entries.tqe_next);
2393
2394 if (oldpa == NULL((void *)0))
2395 ERROUT(EINVAL22);
2396 }
2397
2398 if (pca->action == PF_CHANGE_REMOVE) {
2399 TAILQ_REMOVE(&pool->list, oldpa, entries)do { ; ; ; ; if (((((oldpa))->entries.tqe_next)) != ((void
*)0)) (((oldpa))->entries.tqe_next)->entries.tqe_prev =
(oldpa)->entries.tqe_prev; else { (&pool->list)->
tqh_last = (oldpa)->entries.tqe_prev; ; } *(oldpa)->entries
.tqe_prev = (((oldpa))->entries.tqe_next); ; ; ; } while (
0)
;
2400 switch (oldpa->addr.type) {
2401 case PF_ADDR_DYNIFTL:
2402 pfi_dynaddr_remove(oldpa->addr.p.dyn);
2403 break;
2404 case PF_ADDR_TABLE:
2405 pfr_detach_table(oldpa->addr.p.tbl);
2406 break;
2407 }
2408 if (oldpa->kif)
2409 pfi_kif_unref(oldpa->kif);
2410 free(oldpa, M_PFRULE);
2411 } else {
2412 if (oldpa == NULL((void *)0))
2413 TAILQ_INSERT_TAIL(&pool->list, newpa, entries)do { ; (((newpa))->entries.tqe_next) = ((void *)0); (newpa
)->entries.tqe_prev = (&pool->list)->tqh_last; *
(&pool->list)->tqh_last = (newpa); (&pool->list
)->tqh_last = &(((newpa))->entries.tqe_next); ; ; }
while (0)
;
2414 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2415 pca->action == PF_CHANGE_ADD_BEFORE)
2416 TAILQ_INSERT_BEFORE(oldpa, newpa, entries)do { ; (newpa)->entries.tqe_prev = (oldpa)->entries.tqe_prev
; (((newpa))->entries.tqe_next) = (oldpa); *(oldpa)->entries
.tqe_prev = (newpa); (oldpa)->entries.tqe_prev = &(((newpa
))->entries.tqe_next); ; ; } while (0)
;
2417 else
2418 TAILQ_INSERT_AFTER(&pool->list, oldpa,do { ; if (((((newpa))->entries.tqe_next) = (((oldpa))->
entries.tqe_next)) != ((void *)0)) (((newpa))->entries.tqe_next
)->entries.tqe_prev = &(((newpa))->entries.tqe_next
); else { (&pool->list)->tqh_last = &(((newpa))
->entries.tqe_next); ; } (((oldpa))->entries.tqe_next) =
(newpa); (newpa)->entries.tqe_prev = &(((oldpa))->
entries.tqe_next); ; ; } while (0)
2419 newpa, entries)do { ; if (((((newpa))->entries.tqe_next) = (((oldpa))->
entries.tqe_next)) != ((void *)0)) (((newpa))->entries.tqe_next
)->entries.tqe_prev = &(((newpa))->entries.tqe_next
); else { (&pool->list)->tqh_last = &(((newpa))
->entries.tqe_next); ; } (((oldpa))->entries.tqe_next) =
(newpa); (newpa)->entries.tqe_prev = &(((oldpa))->
entries.tqe_next); ; ; } while (0)
;
2420 }
2421
2422 pool->cur = TAILQ_FIRST(&pool->list)((&pool->list)->tqh_first);
2423 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af)pf_addrcpy(&pool->counter, &pool->cur->addr.
v.a.addr, pca->af)
;
2424 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2424)
;
2425 break;
2426
2427#undef ERROUT
2428DIOCCHANGEADDR_error:
2429 if (newpa->kif)
2430 pfi_kif_unref(newpa->kif);
2431 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2431)
;
2432 if (newpa != NULL((void *)0))
2433 free(newpa, M_PFRULE);
2434 if (kif != NULL((void *)0))
2435 free(kif, PFI_MTYPE);
2436 break;
2437 }
2438
2439 case DIOCGETRULESETS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_ruleset)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((58))))
: {
2440 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2441 struct pf_ruleset *ruleset;
2442 struct pf_anchor *anchor;
2443
2444 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2444)
;
2445 pr->path[sizeof(pr->path) - 1] = 0;
2446 if ((ruleset = pf_find_ruleset(pr->path)) == NULL((void *)0)) {
2447 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2447)
;
2448 error = ENOENT2;
2449 break;
2450 }
2451 pr->nr = 0;
2452 if (ruleset->anchor == NULL((void *)0)) {
2453 /* XXX kludge for pf_main_ruleset */
2454 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)for ((anchor) = pf_anchor_global_RB_MINMAX(&(pf_anchors),
-1); (anchor) != ((void *)0); (anchor) = pf_anchor_global_RB_NEXT
(anchor))
2455 if (anchor->parent == NULL((void *)0))
2456 pr->nr++;
2457 } else {
2458 RB_FOREACH(anchor, pf_anchor_node,for ((anchor) = pf_anchor_node_RB_MINMAX(&ruleset->anchor
->children, -1); (anchor) != ((void *)0); (anchor) = pf_anchor_node_RB_NEXT
(anchor))
2459 &ruleset->anchor->children)for ((anchor) = pf_anchor_node_RB_MINMAX(&ruleset->anchor
->children, -1); (anchor) != ((void *)0); (anchor) = pf_anchor_node_RB_NEXT
(anchor))
2460 pr->nr++;
2461 }
2462 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2462)
;
2463 break;
2464 }
2465
2466 case DIOCGETRULESET((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_ruleset)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((59))))
: {
2467 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2468 struct pf_ruleset *ruleset;
2469 struct pf_anchor *anchor;
2470 u_int32_t nr = 0;
2471
2472 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2472)
;
2473 pr->path[sizeof(pr->path) - 1] = 0;
2474 if ((ruleset = pf_find_ruleset(pr->path)) == NULL((void *)0)) {
2475 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2475)
;
2476 error = ENOENT2;
2477 break;
2478 }
2479 pr->name[0] = 0;
2480 if (ruleset->anchor == NULL((void *)0)) {
2481 /* XXX kludge for pf_main_ruleset */
2482 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors)for ((anchor) = pf_anchor_global_RB_MINMAX(&(pf_anchors),
-1); (anchor) != ((void *)0); (anchor) = pf_anchor_global_RB_NEXT
(anchor))
2483 if (anchor->parent == NULL((void *)0) && nr++ == pr->nr) {
2484 strlcpy(pr->name, anchor->name,
2485 sizeof(pr->name));
2486 break;
2487 }
2488 } else {
2489 RB_FOREACH(anchor, pf_anchor_node,for ((anchor) = pf_anchor_node_RB_MINMAX(&ruleset->anchor
->children, -1); (anchor) != ((void *)0); (anchor) = pf_anchor_node_RB_NEXT
(anchor))
2490 &ruleset->anchor->children)for ((anchor) = pf_anchor_node_RB_MINMAX(&ruleset->anchor
->children, -1); (anchor) != ((void *)0); (anchor) = pf_anchor_node_RB_NEXT
(anchor))
2491 if (nr++ == pr->nr) {
2492 strlcpy(pr->name, anchor->name,
2493 sizeof(pr->name));
2494 break;
2495 }
2496 }
2497 if (!pr->name[0])
2498 error = EBUSY16;
2499 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2499)
;
2500 break;
2501 }
2502
2503 case DIOCRCLRTABLES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((60))))
: {
2504 struct pfioc_table *io = (struct pfioc_table *)addr;
2505
2506 if (io->pfrio_esize != 0) {
2507 error = ENODEV19;
2508 break;
2509 }
2510 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2510)
;
2511 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2512 io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2513 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2513)
;
2514 break;
2515 }
2516
2517 case DIOCRADDTABLES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((61))))
: {
2518 struct pfioc_table *io = (struct pfioc_table *)addr;
2519 struct pfr_table *pfrts;
2520 size_t totlen;
2521
2522 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2523 error = ENODEV19;
2524 break;
2525 }
2526 totlen = io->pfrio_size * sizeof(struct pfr_table);
2527 pfrts = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2528 error = copyin(io->pfrio_buffer, pfrts, totlen);
2529 if (error) {
2530 free(pfrts, M_TEMP);
2531 break;
2532 }
2533 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2533)
;
2534 error = pfr_add_tables(pfrts, io->pfrio_size,
2535 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2536 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2536)
;
2537 free(pfrts, M_TEMP);
2538 break;
2539 }
2540
2541 case DIOCRDELTABLES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((62))))
: {
2542 struct pfioc_table *io = (struct pfioc_table *)addr;
2543 struct pfr_table *pfrts;
2544 size_t totlen;
2545
2546 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2547 error = ENODEV19;
2548 break;
2549 }
2550 totlen = io->pfrio_size * sizeof(struct pfr_table);
2551 pfrts = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2552 error = copyin(io->pfrio_buffer, pfrts, totlen);
2553 if (error) {
2554 free(pfrts, M_TEMP);
2555 break;
2556 }
2557 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2557)
;
2558 error = pfr_del_tables(pfrts, io->pfrio_size,
2559 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2560 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2560)
;
2561 free(pfrts, M_TEMP);
2562 break;
2563 }
2564
2565 case DIOCRGETTABLES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((63))))
: {
2566 struct pfioc_table *io = (struct pfioc_table *)addr;
2567 struct pfr_table *pfrts;
2568 size_t totlen;
2569
2570 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2571 error = ENODEV19;
2572 break;
2573 }
2574 totlen = io->pfrio_size * sizeof(struct pfr_table);
2575 pfrts = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2576 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2576)
;
2577 error = pfr_get_tables(&io->pfrio_table, pfrts,
2578 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2579 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2579)
;
2580 if (error == 0)
2581 error = copyout(pfrts, io->pfrio_buffer, totlen);
2582 free(pfrts, M_TEMP);
2583 break;
2584 }
2585
2586 case DIOCRGETTSTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((64))))
: {
2587 struct pfioc_table *io = (struct pfioc_table *)addr;
2588 struct pfr_tstats *pfrtstats;
2589 size_t totlen;
2590
2591 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2592 error = ENODEV19;
2593 break;
2594 }
2595 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
2596 pfrtstats = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2597 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2597)
;
2598 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
2599 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2600 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2600)
;
2601 if (error == 0)
2602 error = copyout(pfrtstats, io->pfrio_buffer, totlen);
2603 free(pfrtstats, M_TEMP);
2604 break;
2605 }
2606
2607 case DIOCRCLRTSTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((65))))
: {
2608 struct pfioc_table *io = (struct pfioc_table *)addr;
2609 struct pfr_table *pfrts;
2610 size_t totlen;
2611
2612 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2613 error = ENODEV19;
2614 break;
2615 }
2616 totlen = io->pfrio_size * sizeof(struct pfr_table);
2617 pfrts = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2618 error = copyin(io->pfrio_buffer, pfrts, totlen);
2619 if (error) {
2620 free(pfrts, M_TEMP);
2621 break;
2622 }
2623 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2623)
;
2624 error = pfr_clr_tstats(pfrts, io->pfrio_size,
2625 &io->pfrio_nzeropfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2626 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2626)
;
2627 free(pfrts, M_TEMP);
2628 break;
2629 }
2630
2631 case DIOCRSETTFLAGS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((74))))
: {
2632 struct pfioc_table *io = (struct pfioc_table *)addr;
2633 struct pfr_table *pfrts;
2634 size_t totlen;
2635
2636 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2637 error = ENODEV19;
2638 break;
2639 }
2640 totlen = io->pfrio_size * sizeof(struct pfr_table);
2641 pfrts = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2642 error = copyin(io->pfrio_buffer, pfrts, totlen);
2643 if (error) {
2644 free(pfrts, M_TEMP);
2645 break;
2646 }
2647 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2647)
;
2648 error = pfr_set_tflags(pfrts, io->pfrio_size,
2649 io->pfrio_setflagpfrio_size2, io->pfrio_clrflagpfrio_nadd, &io->pfrio_nchange,
2650 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2651 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2651)
;
2652 free(pfrts, M_TEMP);
2653 break;
2654 }
2655
2656 case DIOCRCLRADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((66))))
: {
2657 struct pfioc_table *io = (struct pfioc_table *)addr;
2658
2659 if (io->pfrio_esize != 0) {
2660 error = ENODEV19;
2661 break;
2662 }
2663 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2663)
;
2664 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2665 io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2666 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2666)
;
2667 break;
2668 }
2669
2670 case DIOCRADDADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((67))))
: {
2671 struct pfioc_table *io = (struct pfioc_table *)addr;
2672 struct pfr_addr *pfras;
2673 size_t totlen;
2674
2675 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2676 error = ENODEV19;
2677 break;
2678 }
2679 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2680 pfras = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2681 error = copyin(io->pfrio_buffer, pfras, totlen);
2682 if (error) {
2683 free(pfras, M_TEMP);
2684 break;
2685 }
2686 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2686)
;
2687 error = pfr_add_addrs(&io->pfrio_table, pfras,
2688 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2689 PFR_FLAG_USERIOCTL0x10000000);
2690 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2690)
;
2691 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK0x00000004)
2692 error = copyout(pfras, io->pfrio_buffer, totlen);
2693 free(pfras, M_TEMP);
2694 break;
2695 }
2696
2697 case DIOCRDELADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((68))))
: {
2698 struct pfioc_table *io = (struct pfioc_table *)addr;
2699 struct pfr_addr *pfras;
2700 size_t totlen;
2701
2702 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2703 error = ENODEV19;
2704 break;
2705 }
2706 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2707 pfras = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2708 error = copyin(io->pfrio_buffer, pfras, totlen);
2709 if (error) {
2710 free(pfras, M_TEMP);
2711 break;
2712 }
2713 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2713)
;
2714 error = pfr_del_addrs(&io->pfrio_table, pfras,
2715 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2716 PFR_FLAG_USERIOCTL0x10000000);
2717 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2717)
;
2718 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK0x00000004)
2719 error = copyout(pfras, io->pfrio_buffer, totlen);
2720 free(pfras, M_TEMP);
2721 break;
2722 }
2723
2724 case DIOCRSETADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((69))))
: {
2725 struct pfioc_table *io = (struct pfioc_table *)addr;
2726 struct pfr_addr *pfras;
2727 size_t totlen, count;
2728
2729 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2730 error = ENODEV19;
2731 break;
2732 }
2733 count = max(io->pfrio_size, io->pfrio_size2);
2734 totlen = count * sizeof(struct pfr_addr);
2735 pfras = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2736 error = copyin(io->pfrio_buffer, pfras, totlen);
2737 if (error) {
2738 free(pfras, M_TEMP);
2739 break;
2740 }
2741 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2741)
;
2742 error = pfr_set_addrs(&io->pfrio_table, pfras,
2743 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2744 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2745 PFR_FLAG_USERIOCTL0x10000000, 0);
2746 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2746)
;
2747 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK0x00000004)
2748 error = copyout(pfras, io->pfrio_buffer, totlen);
2749 free(pfras, M_TEMP);
2750 break;
2751 }
2752
2753 case DIOCRGETADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((70))))
: {
2754 struct pfioc_table *io = (struct pfioc_table *)addr;
2755 struct pfr_addr *pfras;
2756 size_t totlen;
2757
2758 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2759 error = ENODEV19;
2760 break;
2761 }
2762 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2763 pfras = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2764 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2764)
;
2765 error = pfr_get_addrs(&io->pfrio_table, pfras,
2766 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2767 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2767)
;
2768 if (error == 0)
2769 error = copyout(pfras, io->pfrio_buffer, totlen);
2770 free(pfras, M_TEMP);
2771 break;
2772 }
2773
2774 case DIOCRGETASTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((71))))
: {
2775 struct pfioc_table *io = (struct pfioc_table *)addr;
2776 struct pfr_astats *pfrastats;
2777 size_t totlen;
2778
2779 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2780 error = ENODEV19;
2781 break;
2782 }
2783 totlen = io->pfrio_size * sizeof(struct pfr_astats);
2784 pfrastats = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2785 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2785)
;
2786 error = pfr_get_astats(&io->pfrio_table, pfrastats,
2787 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2788 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2788)
;
2789 if (error == 0)
2790 error = copyout(pfrastats, io->pfrio_buffer, totlen);
2791 free(pfrastats, M_TEMP);
2792 break;
2793 }
2794
2795 case DIOCRCLRASTATS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((72))))
: {
2796 struct pfioc_table *io = (struct pfioc_table *)addr;
2797 struct pfr_addr *pfras;
2798 size_t totlen;
2799
2800 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2801 error = ENODEV19;
2802 break;
2803 }
2804 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2805 pfras = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2806 error = copyin(io->pfrio_buffer, pfras, totlen);
2807 if (error) {
2808 free(pfras, M_TEMP);
2809 break;
2810 }
2811 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2811)
;
2812 error = pfr_clr_astats(&io->pfrio_table, pfras,
2813 io->pfrio_size, &io->pfrio_nzeropfrio_nadd, io->pfrio_flags |
2814 PFR_FLAG_USERIOCTL0x10000000);
2815 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2815)
;
2816 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK0x00000004)
2817 error = copyout(pfras, io->pfrio_buffer, totlen);
2818 free(pfras, M_TEMP);
2819 break;
2820 }
2821
2822 case DIOCRTSTADDRS((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((73))))
: {
2823 struct pfioc_table *io = (struct pfioc_table *)addr;
2824 struct pfr_addr *pfras;
2825 size_t totlen;
2826
2827 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2828 error = ENODEV19;
2829 break;
2830 }
2831 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2832 pfras = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2833 error = copyin(io->pfrio_buffer, pfras, totlen);
2834 if (error) {
2835 free(pfras, M_TEMP);
2836 break;
2837 }
2838 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2838)
;
2839 error = pfr_tst_addrs(&io->pfrio_table, pfras,
2840 io->pfrio_size, &io->pfrio_nmatchpfrio_nadd, io->pfrio_flags |
2841 PFR_FLAG_USERIOCTL0x10000000);
2842 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2842)
;
2843 if (error == 0)
2844 error = copyout(pfras, io->pfrio_buffer, totlen);
2845 free(pfras, M_TEMP);
2846 break;
2847 }
2848
2849 case DIOCRINADEFINE((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_table)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((77))))
: {
2850 struct pfioc_table *io = (struct pfioc_table *)addr;
2851 struct pfr_addr *pfras;
2852 size_t totlen;
2853
2854 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2855 error = ENODEV19;
2856 break;
2857 }
2858 totlen = io->pfrio_size * sizeof(struct pfr_addr);
2859 pfras = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2860 error = copyin(io->pfrio_buffer, pfras, totlen);
2861 if (error) {
2862 free(pfras, M_TEMP);
2863 break;
2864 }
2865 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2865)
;
2866 error = pfr_ina_define(&io->pfrio_table, pfras,
2867 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddrpfrio_size2,
2868 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2869 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2869)
;
2870 free(pfras, M_TEMP);
2871 break;
2872 }
2873
2874 case DIOCOSFPADD((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pf_osfp_ioctl)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((79))))
: {
2875 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2876 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2876)
;
2877 error = pf_osfp_add(io);
2878 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2878)
;
2879 break;
2880 }
2881
2882 case DIOCOSFPGET((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pf_osfp_ioctl)) & ((1 << 13) - 1)) << 16) | (
(('D')) << 8) | ((80))))
: {
2883 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2884 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2884)
;
2885 error = pf_osfp_get(io);
2886 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2886)
;
2887 break;
2888 }
2889
2890 case DIOCXBEGIN((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_trans)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((81))))
: {
2891 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2892 struct pfioc_trans_e *ioes, *ioe;
2893 size_t totlen;
2894 int i;
2895
2896 if (io->esize != sizeof(*ioe)) {
2897 error = ENODEV19;
2898 break;
2899 }
2900 totlen = sizeof(struct pfioc_trans_e) * io->size;
2901 ioes = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2902 error = copyin(io->array, ioes, totlen);
2903 if (error) {
2904 free(ioes, M_TEMP);
2905 break;
2906 }
2907 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2907)
;
2908 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
2909 switch (ioe->rs_num) {
2910#ifdef ALTQ
2911 case PF_RULESET_ALTQ(PF_RULESET_MAX):
2912 if (ioe->anchor[0]) {
2913 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2913)
;
2914 free(ioes, M_TEMP);
2915 error = EINVAL22;
2916 goto fail;
2917 }
2918 if ((error = pf_begin_altq(&ioe->ticket))) {
2919 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2919)
;
2920 free(ioes, M_TEMP);
2921 goto fail;
2922 }
2923 break;
2924#endif /* ALTQ */
2925 case PF_RULESET_TABLE(PF_RULESET_MAX+1):
2926 {
2927 struct pfr_table table;
2928
2929 bzero(&table, sizeof(table));
2930 strlcpy(table.pfrt_anchor, ioe->anchor,
2931 sizeof(table.pfrt_anchor));
2932 if ((error = pfr_ina_begin(&table,
2933 &ioe->ticket, NULL((void *)0), 0))) {
2934 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2934)
;
2935 free(ioes, M_TEMP);
2936 goto fail;
2937 }
2938 break;
2939 }
2940 default:
2941 if ((error = pf_begin_rules(&ioe->ticket,
2942 ioe->rs_num, ioe->anchor))) {
2943 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2943)
;
2944 free(ioes, M_TEMP);
2945 goto fail;
2946 }
2947 break;
2948 }
2949 }
2950 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2950)
;
2951 error = copyout(ioes, io->array, totlen);
2952 free(ioes, M_TEMP);
2953 break;
2954 }
2955
2956 case DIOCXROLLBACK((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_trans)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((83))))
: {
2957 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2958 struct pfioc_trans_e *ioe, *ioes;
2959 size_t totlen;
2960 int i;
2961
2962 if (io->esize != sizeof(*ioe)) {
2963 error = ENODEV19;
2964 break;
2965 }
2966 totlen = sizeof(struct pfioc_trans_e) * io->size;
2967 ioes = malloc(totlen, M_TEMP, M_WAITOK0x0002);
2968 error = copyin(io->array, ioes, totlen);
2969 if (error) {
2970 free(ioes, M_TEMP);
2971 break;
2972 }
2973 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2973)
;
2974 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
2975 switch (ioe->rs_num) {
2976#ifdef ALTQ
2977 case PF_RULESET_ALTQ(PF_RULESET_MAX):
2978 if (ioe->anchor[0]) {
2979 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2979)
;
2980 free(ioes, M_TEMP);
2981 error = EINVAL22;
2982 goto fail;
2983 }
2984 if ((error = pf_rollback_altq(ioe->ticket))) {
2985 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 2985)
;
2986 free(ioes, M_TEMP);
2987 goto fail; /* really bad */
2988 }
2989 break;
2990#endif /* ALTQ */
2991 case PF_RULESET_TABLE(PF_RULESET_MAX+1):
2992 {
2993 struct pfr_table table;
2994
2995 bzero(&table, sizeof(table));
2996 strlcpy(table.pfrt_anchor, ioe->anchor,
2997 sizeof(table.pfrt_anchor));
2998 if ((error = pfr_ina_rollback(&table,
2999 ioe->ticket, NULL((void *)0), 0))) {
3000 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3000)
;
3001 free(ioes, M_TEMP);
3002 goto fail; /* really bad */
3003 }
3004 break;
3005 }
3006 default:
3007 if ((error = pf_rollback_rules(ioe->ticket,
3008 ioe->rs_num, ioe->anchor))) {
3009 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3009)
;
3010 free(ioes, M_TEMP);
3011 goto fail; /* really bad */
3012 }
3013 break;
3014 }
3015 }
3016 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3016)
;
3017 free(ioes, M_TEMP);
3018 break;
3019 }
3020
3021 case DIOCXCOMMIT((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_trans)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((82))))
: {
3022 struct pfioc_trans *io = (struct pfioc_trans *)addr;
3023 struct pfioc_trans_e *ioe, *ioes;
3024 struct pf_ruleset *rs;
3025 size_t totlen;
3026 int i;
3027
3028 if (io->esize != sizeof(*ioe)) {
3029 error = ENODEV19;
3030 break;
3031 }
3032 totlen = sizeof(struct pfioc_trans_e) * io->size;
3033 ioes = malloc(totlen, M_TEMP, M_WAITOK0x0002);
3034 error = copyin(io->array, ioes, totlen);
3035 if (error) {
3036 free(ioes, M_TEMP);
3037 break;
3038 }
3039 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3039)
;
3040 /* First makes sure everything will succeed. */
3041 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3042 switch (ioe->rs_num) {
3043#ifdef ALTQ
3044 case PF_RULESET_ALTQ(PF_RULESET_MAX):
3045 if (ioe->anchor[0]) {
3046 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3046)
;
3047 free(ioes, M_TEMP);
3048 error = EINVAL22;
3049 goto fail;
3050 }
3051 if (!V_altqs_inactive_open(altqs_inactive_open) || ioe->ticket !=
3052 V_ticket_altqs_inactive(ticket_altqs_inactive)) {
3053 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3053)
;
3054 free(ioes, M_TEMP);
3055 error = EBUSY16;
3056 goto fail;
3057 }
3058 break;
3059#endif /* ALTQ */
3060 case PF_RULESET_TABLE(PF_RULESET_MAX+1):
3061 rs = pf_find_ruleset(ioe->anchor);
3062 if (rs == NULL((void *)0) || !rs->topen || ioe->ticket !=
3063 rs->tticket) {
3064 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3064)
;
3065 free(ioes, M_TEMP);
3066 error = EBUSY16;
3067 goto fail;
3068 }
3069 break;
3070 default:
3071 if (ioe->rs_num < 0 || ioe->rs_num >=
3072 PF_RULESET_MAX) {
3073 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3073)
;
3074 free(ioes, M_TEMP);
3075 error = EINVAL22;
3076 goto fail;
3077 }
3078 rs = pf_find_ruleset(ioe->anchor);
3079 if (rs == NULL((void *)0) ||
3080 !rs->rules[ioe->rs_num].inactive.open ||
3081 rs->rules[ioe->rs_num].inactive.ticket !=
3082 ioe->ticket) {
3083 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3083)
;
3084 free(ioes, M_TEMP);
3085 error = EBUSY16;
3086 goto fail;
3087 }
3088 break;
3089 }
3090 }
3091 /* Now do the commit - no errors should happen here. */
3092 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
3093 switch (ioe->rs_num) {
3094#ifdef ALTQ
3095 case PF_RULESET_ALTQ(PF_RULESET_MAX):
3096 if ((error = pf_commit_altq(ioe->ticket))) {
3097 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3097)
;
3098 free(ioes, M_TEMP);
3099 goto fail; /* really bad */
3100 }
3101 break;
3102#endif /* ALTQ */
3103 case PF_RULESET_TABLE(PF_RULESET_MAX+1):
3104 {
3105 struct pfr_table table;
3106
3107 bzero(&table, sizeof(table));
3108 strlcpy(table.pfrt_anchor, ioe->anchor,
3109 sizeof(table.pfrt_anchor));
3110 if ((error = pfr_ina_commit(&table,
3111 ioe->ticket, NULL((void *)0), NULL((void *)0), 0))) {
3112 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3112)
;
3113 free(ioes, M_TEMP);
3114 goto fail; /* really bad */
3115 }
3116 break;
3117 }
3118 default:
3119 if ((error = pf_commit_rules(ioe->ticket,
3120 ioe->rs_num, ioe->anchor))) {
3121 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3121)
;
3122 free(ioes, M_TEMP);
3123 goto fail; /* really bad */
3124 }
3125 break;
3126 }
3127 }
3128 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3128)
;
3129 free(ioes, M_TEMP);
3130 break;
3131 }
3132
3133 case DIOCGETSRCNODES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_src_nodes)) & ((1 << 13) - 1)) << 16) |
((('D')) << 8) | ((84))))
: {
3134 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
3135 struct pf_srchash *sh;
3136 struct pf_src_node *n, *p, *pstore;
3137 uint32_t i, nr = 0;
3138
3139 if (psn->psn_len == 0) {
3140 for (i = 0, sh = V_pf_srchash(pf_srchash); i <= pf_srchashmask;
3141 i++, sh++) {
3142 PF_HASHROW_LOCK(sh)__mtx_lock_flags(&((((&(sh)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3142))
;
3143 LIST_FOREACH(n, &sh->nodes, entry)for ((n) = (((&sh->nodes))->lh_first); (n); (n) = (
((n))->entry.le_next))
3144 nr++;
3145 PF_HASHROW_UNLOCK(sh)__mtx_unlock_flags(&((((&(sh)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3145))
;
3146 }
3147 psn->psn_len = sizeof(struct pf_src_node) * nr;
3148 break;
3149 }
3150
3151 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK0x0002);
3152 for (i = 0, sh = V_pf_srchash(pf_srchash); i <= pf_srchashmask;
3153 i++, sh++) {
3154 PF_HASHROW_LOCK(sh)__mtx_lock_flags(&((((&(sh)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3154))
;
3155 LIST_FOREACH(n, &sh->nodes, entry)for ((n) = (((&sh->nodes))->lh_first); (n); (n) = (
((n))->entry.le_next))
{
3156 int secs = time_uptime, diff;
3157
3158 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3159 break;
3160
3161 bcopy(n, p, sizeof(struct pf_src_node));
3162 if (n->rule.ptr != NULL((void *)0))
3163 p->rule.nr = n->rule.ptr->nr;
3164 p->creation = secs - p->creation;
3165 if (p->expire > secs)
3166 p->expire -= secs;
3167 else
3168 p->expire = 0;
3169
3170 /* Adjust the connection rate estimate. */
3171 diff = secs - n->conn_rate.last;
3172 if (diff >= n->conn_rate.seconds)
3173 p->conn_rate.count = 0;
3174 else
3175 p->conn_rate.count -=
3176 n->conn_rate.count * diff /
3177 n->conn_rate.seconds;
3178 p++;
3179 nr++;
3180 }
3181 PF_HASHROW_UNLOCK(sh)__mtx_unlock_flags(&((((&(sh)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3181))
;
3182 }
3183 error = copyout(pstore, psn->psn_src_nodespsn_u.psu_src_nodes,
3184 sizeof(struct pf_src_node) * nr);
3185 if (error) {
3186 free(pstore, M_TEMP);
3187 break;
3188 }
3189 psn->psn_len = sizeof(struct pf_src_node) * nr;
3190 free(pstore, M_TEMP);
3191 break;
3192 }
3193
3194 case DIOCCLRSRCNODES((unsigned long) ((0x20000000) | (((0) & ((1 << 13)
- 1)) << 16) | ((('D')) << 8) | ((85))))
: {
3195
3196 pf_clear_srcnodes(NULL((void *)0));
3197 pf_purge_expired_src_nodes();
3198 break;
3199 }
3200
3201 case DIOCKILLSRCNODES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_src_node_kill)) & ((1 << 13) - 1)) << 16
) | ((('D')) << 8) | ((91))))
:
3202 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
3203 break;
3204
3205 case DIOCSETHOSTID((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(u_int32_t
)) & ((1 << 13) - 1)) << 16) | ((('D')) <<
8) | ((86))))
: {
3206 u_int32_t *hostid = (u_int32_t *)addr;
3207
3208 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3208)
;
3209 if (*hostid == 0)
3210 V_pf_status(pf_status).hostid = arc4random();
3211 else
3212 V_pf_status(pf_status).hostid = *hostid;
3213 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3213)
;
3214 break;
3215 }
3216
3217 case DIOCOSFPFLUSH((unsigned long) ((0x20000000) | (((0) & ((1 << 13)
- 1)) << 16) | ((('D')) << 8) | ((78))))
:
3218 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3218)
;
3219 pf_osfp_flush();
3220 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3220)
;
3221 break;
3222
3223 case DIOCIGETIFACES((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_iface)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((87))))
: {
3224 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3225 struct pfi_kif *ifstore;
3226 size_t bufsiz;
3227
3228 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
4
Taking false branch
3229 error = ENODEV19;
3230 break;
3231 }
3232
3233 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
3234 ifstore = malloc(bufsiz, M_TEMP, M_WAITOK0x0002);
3235 PF_RULES_RLOCK()__rw_rlock(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3235)
;
3236 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
3237 PF_RULES_RUNLOCK()_rw_runlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3237)
;
3238 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5
Copies out a struct with a union element with different sizes
3239 free(ifstore, M_TEMP);
3240 break;
3241 }
3242
3243 case DIOCSETIFFLAG((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_iface)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((89))))
: {
3244 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3245
3246 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3246)
;
3247 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3248 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3248)
;
3249 break;
3250 }
3251
3252 case DIOCCLRIFFLAG((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct
pfioc_iface)) & ((1 << 13) - 1)) << 16) | ((
('D')) << 8) | ((90))))
: {
3253 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3254
3255 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3255)
;
3256 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3257 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3257)
;
3258 break;
3259 }
3260
3261 default:
3262 error = ENODEV19;
3263 break;
3264 }
3265fail:
3266 if (sx_xlocked(&pf_ioctl_lock)(((&pf_ioctl_lock)->sx_lock & ~((0x01 | 0x02 | 0x04
| 0x08) & ~0x01)) == (uintptr_t)(__curthread()))
)
3267 sx_xunlock(&pf_ioctl_lock)_sx_xunlock(((&pf_ioctl_lock)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3267))
;
3268 CURVNET_RESTORE();
3269
3270 return (error);
3271}
3272
3273void
3274pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
3275{
3276 bzero(sp, sizeof(struct pfsync_state));
3277
3278 /* copy from state key */
3279 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
3280 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
3281 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
3282 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
3283 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
3284 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
3285 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
3286 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
3287 sp->proto = st->key[PF_SK_WIRE]->proto;
3288 sp->af = st->key[PF_SK_WIRE]->af;
3289
3290 /* copy from state */
3291 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
3292 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
3293 sp->creation = htonl(time_uptime - st->creation)(__builtin_constant_p(time_uptime - st->creation) ? (((__uint32_t
)((__uint16_t)(__builtin_constant_p(((__uint32_t)(time_uptime
- st->creation)) & 0xffff) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)(time_uptime - st->creation)) & 0xffff)
) << 8 | ((__uint16_t)(((__uint32_t)(time_uptime - st->
creation)) & 0xffff)) >> 8) : __bswap16_var(((__uint32_t
)(time_uptime - st->creation)) & 0xffff))) << 16
) | ((__uint16_t)(__builtin_constant_p(((__uint32_t)(time_uptime
- st->creation)) >> 16) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)(time_uptime - st->creation)) >> 16))
<< 8 | ((__uint16_t)(((__uint32_t)(time_uptime - st->
creation)) >> 16)) >> 8) : __bswap16_var(((__uint32_t
)(time_uptime - st->creation)) >> 16)))) : __bswap32_var
(time_uptime - st->creation))
;
3294 sp->expire = pf_state_expires(st);
3295 if (sp->expire <= time_uptime)
3296 sp->expire = htonl(0)(__builtin_constant_p(0) ? (((__uint32_t)((__uint16_t)(__builtin_constant_p
(((__uint32_t)(0)) & 0xffff) ? (__uint16_t)(((__uint16_t)
(((__uint32_t)(0)) & 0xffff)) << 8 | ((__uint16_t)(
((__uint32_t)(0)) & 0xffff)) >> 8) : __bswap16_var(
((__uint32_t)(0)) & 0xffff))) << 16) | ((__uint16_t
)(__builtin_constant_p(((__uint32_t)(0)) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)(0)) >> 16)) << 8 | (
(__uint16_t)(((__uint32_t)(0)) >> 16)) >> 8) : __bswap16_var
(((__uint32_t)(0)) >> 16)))) : __bswap32_var(0))
;
3297 else
3298 sp->expire = htonl(sp->expire - time_uptime)(__builtin_constant_p(sp->expire - time_uptime) ? (((__uint32_t
)((__uint16_t)(__builtin_constant_p(((__uint32_t)(sp->expire
- time_uptime)) & 0xffff) ? (__uint16_t)(((__uint16_t)((
(__uint32_t)(sp->expire - time_uptime)) & 0xffff)) <<
8 | ((__uint16_t)(((__uint32_t)(sp->expire - time_uptime)
) & 0xffff)) >> 8) : __bswap16_var(((__uint32_t)(sp
->expire - time_uptime)) & 0xffff))) << 16) | ((
__uint16_t)(__builtin_constant_p(((__uint32_t)(sp->expire -
time_uptime)) >> 16) ? (__uint16_t)(((__uint16_t)(((__uint32_t
)(sp->expire - time_uptime)) >> 16)) << 8 | ((
__uint16_t)(((__uint32_t)(sp->expire - time_uptime)) >>
16)) >> 8) : __bswap16_var(((__uint32_t)(sp->expire
- time_uptime)) >> 16)))) : __bswap32_var(sp->expire
- time_uptime))
;
3299
3300 sp->direction = st->direction;
3301 sp->log = st->log;
3302 sp->timeout = st->timeout;
3303 sp->state_flags = st->state_flags;
3304 if (st->src_node)
3305 sp->sync_flags |= PFSYNC_FLAG_SRCNODE0x04;
3306 if (st->nat_src_node)
3307 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE0x08;
3308
3309 sp->id = st->id;
3310 sp->creatorid = st->creatorid;
3311 pf_state_peer_hton(&st->src, &sp->src)do { (&sp->src)->seqlo = (__builtin_constant_p((&
st->src)->seqlo) ? (((__uint32_t)((__uint16_t)(__builtin_constant_p
(((__uint32_t)((&st->src)->seqlo)) & 0xffff) ? (
__uint16_t)(((__uint16_t)(((__uint32_t)((&st->src)->
seqlo)) & 0xffff)) << 8 | ((__uint16_t)(((__uint32_t
)((&st->src)->seqlo)) & 0xffff)) >> 8) : __bswap16_var
(((__uint32_t)((&st->src)->seqlo)) & 0xffff))) <<
16) | ((__uint16_t)(__builtin_constant_p(((__uint32_t)((&
st->src)->seqlo)) >> 16) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((&st->src)->seqlo)) >> 16)) <<
8 | ((__uint16_t)(((__uint32_t)((&st->src)->seqlo)
) >> 16)) >> 8) : __bswap16_var(((__uint32_t)((&
st->src)->seqlo)) >> 16)))) : __bswap32_var((&
st->src)->seqlo)); (&sp->src)->seqhi = (__builtin_constant_p
((&st->src)->seqhi) ? (((__uint32_t)((__uint16_t)(__builtin_constant_p
(((__uint32_t)((&st->src)->seqhi)) & 0xffff) ? (
__uint16_t)(((__uint16_t)(((__uint32_t)((&st->src)->
seqhi)) & 0xffff)) << 8 | ((__uint16_t)(((__uint32_t
)((&st->src)->seqhi)) & 0xffff)) >> 8) : __bswap16_var
(((__uint32_t)((&st->src)->seqhi)) & 0xffff))) <<
16) | ((__uint16_t)(__builtin_constant_p(((__uint32_t)((&
st->src)->seqhi)) >> 16) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((&st->src)->seqhi)) >> 16)) <<
8 | ((__uint16_t)(((__uint32_t)((&st->src)->seqhi)
) >> 16)) >> 8) : __bswap16_var(((__uint32_t)((&
st->src)->seqhi)) >> 16)))) : __bswap32_var((&
st->src)->seqhi)); (&sp->src)->seqdiff = (__builtin_constant_p
((&st->src)->seqdiff) ? (((__uint32_t)((__uint16_t)
(__builtin_constant_p(((__uint32_t)((&st->src)->seqdiff
)) & 0xffff) ? (__uint16_t)(((__uint16_t)(((__uint32_t)((
&st->src)->seqdiff)) & 0xffff)) << 8 | ((
__uint16_t)(((__uint32_t)((&st->src)->seqdiff)) &
0xffff)) >> 8) : __bswap16_var(((__uint32_t)((&st->
src)->seqdiff)) & 0xffff))) << 16) | ((__uint16_t
)(__builtin_constant_p(((__uint32_t)((&st->src)->seqdiff
)) >> 16) ? (__uint16_t)(((__uint16_t)(((__uint32_t)((&
st->src)->seqdiff)) >> 16)) << 8 | ((__uint16_t
)(((__uint32_t)((&st->src)->seqdiff)) >> 16))
>> 8) : __bswap16_var(((__uint32_t)((&st->src)->
seqdiff)) >> 16)))) : __bswap32_var((&st->src)->
seqdiff)); (&sp->src)->max_win = ((__uint16_t)(__builtin_constant_p
((&st->src)->max_win) ? (__uint16_t)(((__uint16_t)(
(&st->src)->max_win)) << 8 | ((__uint16_t)((&
st->src)->max_win)) >> 8) : __bswap16_var((&st
->src)->max_win))); (&sp->src)->mss = ((__uint16_t
)(__builtin_constant_p((&st->src)->mss) ? (__uint16_t
)(((__uint16_t)((&st->src)->mss)) << 8 | ((__uint16_t
)((&st->src)->mss)) >> 8) : __bswap16_var((&
st->src)->mss))); (&sp->src)->state = (&st
->src)->state; (&sp->src)->wscale = (&st->
src)->wscale; if ((&st->src)->scrub) { (&sp->
src)->scrub.pfss_flags = ((__uint16_t)(__builtin_constant_p
((&st->src)->scrub->pfss_flags & 0x0001) ? (
__uint16_t)(((__uint16_t)((&st->src)->scrub->pfss_flags
& 0x0001)) << 8 | ((__uint16_t)((&st->src)->
scrub->pfss_flags & 0x0001)) >> 8) : __bswap16_var
((&st->src)->scrub->pfss_flags & 0x0001))); (
&sp->src)->scrub.pfss_ttl = (&st->src)->scrub
->pfss_ttl; (&sp->src)->scrub.pfss_ts_mod = (__builtin_constant_p
((&st->src)->scrub->pfss_ts_mod) ? (((__uint32_t
)((__uint16_t)(__builtin_constant_p(((__uint32_t)((&st->
src)->scrub->pfss_ts_mod)) & 0xffff) ? (__uint16_t)
(((__uint16_t)(((__uint32_t)((&st->src)->scrub->
pfss_ts_mod)) & 0xffff)) << 8 | ((__uint16_t)(((__uint32_t
)((&st->src)->scrub->pfss_ts_mod)) & 0xffff)
) >> 8) : __bswap16_var(((__uint32_t)((&st->src)
->scrub->pfss_ts_mod)) & 0xffff))) << 16) | (
(__uint16_t)(__builtin_constant_p(((__uint32_t)((&st->
src)->scrub->pfss_ts_mod)) >> 16) ? (__uint16_t)(
((__uint16_t)(((__uint32_t)((&st->src)->scrub->pfss_ts_mod
)) >> 16)) << 8 | ((__uint16_t)(((__uint32_t)((&
st->src)->scrub->pfss_ts_mod)) >> 16)) >>
8) : __bswap16_var(((__uint32_t)((&st->src)->scrub
->pfss_ts_mod)) >> 16)))) : __bswap32_var((&st->
src)->scrub->pfss_ts_mod)); (&sp->src)->scrub
.scrub_flag = 0x01; } } while (0)
;
3312 pf_state_peer_hton(&st->dst, &sp->dst)do { (&sp->dst)->seqlo = (__builtin_constant_p((&
st->dst)->seqlo) ? (((__uint32_t)((__uint16_t)(__builtin_constant_p
(((__uint32_t)((&st->dst)->seqlo)) & 0xffff) ? (
__uint16_t)(((__uint16_t)(((__uint32_t)((&st->dst)->
seqlo)) & 0xffff)) << 8 | ((__uint16_t)(((__uint32_t
)((&st->dst)->seqlo)) & 0xffff)) >> 8) : __bswap16_var
(((__uint32_t)((&st->dst)->seqlo)) & 0xffff))) <<
16) | ((__uint16_t)(__builtin_constant_p(((__uint32_t)((&
st->dst)->seqlo)) >> 16) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((&st->dst)->seqlo)) >> 16)) <<
8 | ((__uint16_t)(((__uint32_t)((&st->dst)->seqlo)
) >> 16)) >> 8) : __bswap16_var(((__uint32_t)((&
st->dst)->seqlo)) >> 16)))) : __bswap32_var((&
st->dst)->seqlo)); (&sp->dst)->seqhi = (__builtin_constant_p
((&st->dst)->seqhi) ? (((__uint32_t)((__uint16_t)(__builtin_constant_p
(((__uint32_t)((&st->dst)->seqhi)) & 0xffff) ? (
__uint16_t)(((__uint16_t)(((__uint32_t)((&st->dst)->
seqhi)) & 0xffff)) << 8 | ((__uint16_t)(((__uint32_t
)((&st->dst)->seqhi)) & 0xffff)) >> 8) : __bswap16_var
(((__uint32_t)((&st->dst)->seqhi)) & 0xffff))) <<
16) | ((__uint16_t)(__builtin_constant_p(((__uint32_t)((&
st->dst)->seqhi)) >> 16) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((&st->dst)->seqhi)) >> 16)) <<
8 | ((__uint16_t)(((__uint32_t)((&st->dst)->seqhi)
) >> 16)) >> 8) : __bswap16_var(((__uint32_t)((&
st->dst)->seqhi)) >> 16)))) : __bswap32_var((&
st->dst)->seqhi)); (&sp->dst)->seqdiff = (__builtin_constant_p
((&st->dst)->seqdiff) ? (((__uint32_t)((__uint16_t)
(__builtin_constant_p(((__uint32_t)((&st->dst)->seqdiff
)) & 0xffff) ? (__uint16_t)(((__uint16_t)(((__uint32_t)((
&st->dst)->seqdiff)) & 0xffff)) << 8 | ((
__uint16_t)(((__uint32_t)((&st->dst)->seqdiff)) &
0xffff)) >> 8) : __bswap16_var(((__uint32_t)((&st->
dst)->seqdiff)) & 0xffff))) << 16) | ((__uint16_t
)(__builtin_constant_p(((__uint32_t)((&st->dst)->seqdiff
)) >> 16) ? (__uint16_t)(((__uint16_t)(((__uint32_t)((&
st->dst)->seqdiff)) >> 16)) << 8 | ((__uint16_t
)(((__uint32_t)((&st->dst)->seqdiff)) >> 16))
>> 8) : __bswap16_var(((__uint32_t)((&st->dst)->
seqdiff)) >> 16)))) : __bswap32_var((&st->dst)->
seqdiff)); (&sp->dst)->max_win = ((__uint16_t)(__builtin_constant_p
((&st->dst)->max_win) ? (__uint16_t)(((__uint16_t)(
(&st->dst)->max_win)) << 8 | ((__uint16_t)((&
st->dst)->max_win)) >> 8) : __bswap16_var((&st
->dst)->max_win))); (&sp->dst)->mss = ((__uint16_t
)(__builtin_constant_p((&st->dst)->mss) ? (__uint16_t
)(((__uint16_t)((&st->dst)->mss)) << 8 | ((__uint16_t
)((&st->dst)->mss)) >> 8) : __bswap16_var((&
st->dst)->mss))); (&sp->dst)->state = (&st
->dst)->state; (&sp->dst)->wscale = (&st->
dst)->wscale; if ((&st->dst)->scrub) { (&sp->
dst)->scrub.pfss_flags = ((__uint16_t)(__builtin_constant_p
((&st->dst)->scrub->pfss_flags & 0x0001) ? (
__uint16_t)(((__uint16_t)((&st->dst)->scrub->pfss_flags
& 0x0001)) << 8 | ((__uint16_t)((&st->dst)->
scrub->pfss_flags & 0x0001)) >> 8) : __bswap16_var
((&st->dst)->scrub->pfss_flags & 0x0001))); (
&sp->dst)->scrub.pfss_ttl = (&st->dst)->scrub
->pfss_ttl; (&sp->dst)->scrub.pfss_ts_mod = (__builtin_constant_p
((&st->dst)->scrub->pfss_ts_mod) ? (((__uint32_t
)((__uint16_t)(__builtin_constant_p(((__uint32_t)((&st->
dst)->scrub->pfss_ts_mod)) & 0xffff) ? (__uint16_t)
(((__uint16_t)(((__uint32_t)((&st->dst)->scrub->
pfss_ts_mod)) & 0xffff)) << 8 | ((__uint16_t)(((__uint32_t
)((&st->dst)->scrub->pfss_ts_mod)) & 0xffff)
) >> 8) : __bswap16_var(((__uint32_t)((&st->dst)
->scrub->pfss_ts_mod)) & 0xffff))) << 16) | (
(__uint16_t)(__builtin_constant_p(((__uint32_t)((&st->
dst)->scrub->pfss_ts_mod)) >> 16) ? (__uint16_t)(
((__uint16_t)(((__uint32_t)((&st->dst)->scrub->pfss_ts_mod
)) >> 16)) << 8 | ((__uint16_t)(((__uint32_t)((&
st->dst)->scrub->pfss_ts_mod)) >> 16)) >>
8) : __bswap16_var(((__uint32_t)((&st->dst)->scrub
->pfss_ts_mod)) >> 16)))) : __bswap32_var((&st->
dst)->scrub->pfss_ts_mod)); (&sp->dst)->scrub
.scrub_flag = 0x01; } } while (0)
;
3313
3314 if (st->rule.ptr == NULL((void *)0))
3315 sp->rule = htonl(-1)(__builtin_constant_p(-1) ? (((__uint32_t)((__uint16_t)(__builtin_constant_p
(((__uint32_t)(-1)) & 0xffff) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)(-1)) & 0xffff)) << 8 | ((__uint16_t
)(((__uint32_t)(-1)) & 0xffff)) >> 8) : __bswap16_var
(((__uint32_t)(-1)) & 0xffff))) << 16) | ((__uint16_t
)(__builtin_constant_p(((__uint32_t)(-1)) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)(-1)) >> 16)) << 8 |
((__uint16_t)(((__uint32_t)(-1)) >> 16)) >> 8) :
__bswap16_var(((__uint32_t)(-1)) >> 16)))) : __bswap32_var
(-1))
;
3316 else
3317 sp->rule = htonl(st->rule.ptr->nr)(__builtin_constant_p(st->rule.ptr->nr) ? (((__uint32_t
)((__uint16_t)(__builtin_constant_p(((__uint32_t)(st->rule
.ptr->nr)) & 0xffff) ? (__uint16_t)(((__uint16_t)(((__uint32_t
)(st->rule.ptr->nr)) & 0xffff)) << 8 | ((__uint16_t
)(((__uint32_t)(st->rule.ptr->nr)) & 0xffff)) >>
8) : __bswap16_var(((__uint32_t)(st->rule.ptr->nr)) &
0xffff))) << 16) | ((__uint16_t)(__builtin_constant_p(
((__uint32_t)(st->rule.ptr->nr)) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)(st->rule.ptr->nr)) >>
16)) << 8 | ((__uint16_t)(((__uint32_t)(st->rule.ptr
->nr)) >> 16)) >> 8) : __bswap16_var(((__uint32_t
)(st->rule.ptr->nr)) >> 16)))) : __bswap32_var(st
->rule.ptr->nr))
;
3318 if (st->anchor.ptr == NULL((void *)0))
3319 sp->anchor = htonl(-1)(__builtin_constant_p(-1) ? (((__uint32_t)((__uint16_t)(__builtin_constant_p
(((__uint32_t)(-1)) & 0xffff) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)(-1)) & 0xffff)) << 8 | ((__uint16_t
)(((__uint32_t)(-1)) & 0xffff)) >> 8) : __bswap16_var
(((__uint32_t)(-1)) & 0xffff))) << 16) | ((__uint16_t
)(__builtin_constant_p(((__uint32_t)(-1)) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)(-1)) >> 16)) << 8 |
((__uint16_t)(((__uint32_t)(-1)) >> 16)) >> 8) :
__bswap16_var(((__uint32_t)(-1)) >> 16)))) : __bswap32_var
(-1))
;
3320 else
3321 sp->anchor = htonl(st->anchor.ptr->nr)(__builtin_constant_p(st->anchor.ptr->nr) ? (((__uint32_t
)((__uint16_t)(__builtin_constant_p(((__uint32_t)(st->anchor
.ptr->nr)) & 0xffff) ? (__uint16_t)(((__uint16_t)(((__uint32_t
)(st->anchor.ptr->nr)) & 0xffff)) << 8 | ((__uint16_t
)(((__uint32_t)(st->anchor.ptr->nr)) & 0xffff)) >>
8) : __bswap16_var(((__uint32_t)(st->anchor.ptr->nr)) &
0xffff))) << 16) | ((__uint16_t)(__builtin_constant_p(
((__uint32_t)(st->anchor.ptr->nr)) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)(st->anchor.ptr->nr)) >>
16)) << 8 | ((__uint16_t)(((__uint32_t)(st->anchor.
ptr->nr)) >> 16)) >> 8) : __bswap16_var(((__uint32_t
)(st->anchor.ptr->nr)) >> 16)))) : __bswap32_var(
st->anchor.ptr->nr))
;
3322 if (st->nat_rule.ptr == NULL((void *)0))
3323 sp->nat_rule = htonl(-1)(__builtin_constant_p(-1) ? (((__uint32_t)((__uint16_t)(__builtin_constant_p
(((__uint32_t)(-1)) & 0xffff) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)(-1)) & 0xffff)) << 8 | ((__uint16_t
)(((__uint32_t)(-1)) & 0xffff)) >> 8) : __bswap16_var
(((__uint32_t)(-1)) & 0xffff))) << 16) | ((__uint16_t
)(__builtin_constant_p(((__uint32_t)(-1)) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)(-1)) >> 16)) << 8 |
((__uint16_t)(((__uint32_t)(-1)) >> 16)) >> 8) :
__bswap16_var(((__uint32_t)(-1)) >> 16)))) : __bswap32_var
(-1))
;
3324 else
3325 sp->nat_rule = htonl(st->nat_rule.ptr->nr)(__builtin_constant_p(st->nat_rule.ptr->nr) ? (((__uint32_t
)((__uint16_t)(__builtin_constant_p(((__uint32_t)(st->nat_rule
.ptr->nr)) & 0xffff) ? (__uint16_t)(((__uint16_t)(((__uint32_t
)(st->nat_rule.ptr->nr)) & 0xffff)) << 8 | ((
__uint16_t)(((__uint32_t)(st->nat_rule.ptr->nr)) & 0xffff
)) >> 8) : __bswap16_var(((__uint32_t)(st->nat_rule.
ptr->nr)) & 0xffff))) << 16) | ((__uint16_t)(__builtin_constant_p
(((__uint32_t)(st->nat_rule.ptr->nr)) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)(st->nat_rule.ptr->nr)) >>
16)) << 8 | ((__uint16_t)(((__uint32_t)(st->nat_rule
.ptr->nr)) >> 16)) >> 8) : __bswap16_var(((__uint32_t
)(st->nat_rule.ptr->nr)) >> 16)))) : __bswap32_var
(st->nat_rule.ptr->nr))
;
3326
3327 pf_state_counter_hton(st->packets[0], sp->packets[0])do { sp->packets[0][0] = (__builtin_constant_p((st->packets
[0]>>32)&0xffffffff) ? (((__uint32_t)((__uint16_t)(
__builtin_constant_p(((__uint32_t)((st->packets[0]>>
32)&0xffffffff)) & 0xffff) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((st->packets[0]>>32)&0xffffffff)
) & 0xffff)) << 8 | ((__uint16_t)(((__uint32_t)((st
->packets[0]>>32)&0xffffffff)) & 0xffff)) >>
8) : __bswap16_var(((__uint32_t)((st->packets[0]>>32
)&0xffffffff)) & 0xffff))) << 16) | ((__uint16_t
)(__builtin_constant_p(((__uint32_t)((st->packets[0]>>
32)&0xffffffff)) >> 16) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((st->packets[0]>>32)&0xffffffff)
) >> 16)) << 8 | ((__uint16_t)(((__uint32_t)((st->
packets[0]>>32)&0xffffffff)) >> 16)) >>
8) : __bswap16_var(((__uint32_t)((st->packets[0]>>32
)&0xffffffff)) >> 16)))) : __bswap32_var((st->packets
[0]>>32)&0xffffffff)); sp->packets[0][1] = (__builtin_constant_p
(st->packets[0]&0xffffffff) ? (((__uint32_t)((__uint16_t
)(__builtin_constant_p(((__uint32_t)(st->packets[0]&0xffffffff
)) & 0xffff) ? (__uint16_t)(((__uint16_t)(((__uint32_t)(st
->packets[0]&0xffffffff)) & 0xffff)) << 8 | (
(__uint16_t)(((__uint32_t)(st->packets[0]&0xffffffff))
& 0xffff)) >> 8) : __bswap16_var(((__uint32_t)(st->
packets[0]&0xffffffff)) & 0xffff))) << 16) | ((
__uint16_t)(__builtin_constant_p(((__uint32_t)(st->packets
[0]&0xffffffff)) >> 16) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)(st->packets[0]&0xffffffff)) >> 16
)) << 8 | ((__uint16_t)(((__uint32_t)(st->packets[0]
&0xffffffff)) >> 16)) >> 8) : __bswap16_var((
(__uint32_t)(st->packets[0]&0xffffffff)) >> 16))
)) : __bswap32_var(st->packets[0]&0xffffffff)); } while
(0)
;
3328 pf_state_counter_hton(st->packets[1], sp->packets[1])do { sp->packets[1][0] = (__builtin_constant_p((st->packets
[1]>>32)&0xffffffff) ? (((__uint32_t)((__uint16_t)(
__builtin_constant_p(((__uint32_t)((st->packets[1]>>
32)&0xffffffff)) & 0xffff) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((st->packets[1]>>32)&0xffffffff)
) & 0xffff)) << 8 | ((__uint16_t)(((__uint32_t)((st
->packets[1]>>32)&0xffffffff)) & 0xffff)) >>
8) : __bswap16_var(((__uint32_t)((st->packets[1]>>32
)&0xffffffff)) & 0xffff))) << 16) | ((__uint16_t
)(__builtin_constant_p(((__uint32_t)((st->packets[1]>>
32)&0xffffffff)) >> 16) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)((st->packets[1]>>32)&0xffffffff)
) >> 16)) << 8 | ((__uint16_t)(((__uint32_t)((st->
packets[1]>>32)&0xffffffff)) >> 16)) >>
8) : __bswap16_var(((__uint32_t)((st->packets[1]>>32
)&0xffffffff)) >> 16)))) : __bswap32_var((st->packets
[1]>>32)&0xffffffff)); sp->packets[1][1] = (__builtin_constant_p
(st->packets[1]&0xffffffff) ? (((__uint32_t)((__uint16_t
)(__builtin_constant_p(((__uint32_t)(st->packets[1]&0xffffffff
)) & 0xffff) ? (__uint16_t)(((__uint16_t)(((__uint32_t)(st
->packets[1]&0xffffffff)) & 0xffff)) << 8 | (
(__uint16_t)(((__uint32_t)(st->packets[1]&0xffffffff))
& 0xffff)) >> 8) : __bswap16_var(((__uint32_t)(st->
packets[1]&0xffffffff)) & 0xffff))) << 16) | ((
__uint16_t)(__builtin_constant_p(((__uint32_t)(st->packets
[1]&0xffffffff)) >> 16) ? (__uint16_t)(((__uint16_t
)(((__uint32_t)(st->packets[1]&0xffffffff)) >> 16
)) << 8 | ((__uint16_t)(((__uint32_t)(st->packets[1]
&0xffffffff)) >> 16)) >> 8) : __bswap16_var((
(__uint32_t)(st->packets[1]&0xffffffff)) >> 16))
)) : __bswap32_var(st->packets[1]&0xffffffff)); } while
(0)
;
3329 pf_state_counter_hton(st->bytes[0], sp->bytes[0])do { sp->bytes[0][0] = (__builtin_constant_p((st->bytes
[0]>>32)&0xffffffff) ? (((__uint32_t)((__uint16_t)(
__builtin_constant_p(((__uint32_t)((st->bytes[0]>>32
)&0xffffffff)) & 0xffff) ? (__uint16_t)(((__uint16_t)
(((__uint32_t)((st->bytes[0]>>32)&0xffffffff)) &
0xffff)) << 8 | ((__uint16_t)(((__uint32_t)((st->bytes
[0]>>32)&0xffffffff)) & 0xffff)) >> 8) : __bswap16_var
(((__uint32_t)((st->bytes[0]>>32)&0xffffffff)) &
0xffff))) << 16) | ((__uint16_t)(__builtin_constant_p(
((__uint32_t)((st->bytes[0]>>32)&0xffffffff)) >>
16) ? (__uint16_t)(((__uint16_t)(((__uint32_t)((st->bytes
[0]>>32)&0xffffffff)) >> 16)) << 8 | ((
__uint16_t)(((__uint32_t)((st->bytes[0]>>32)&0xffffffff
)) >> 16)) >> 8) : __bswap16_var(((__uint32_t)((st
->bytes[0]>>32)&0xffffffff)) >> 16)))) : __bswap32_var
((st->bytes[0]>>32)&0xffffffff)); sp->bytes[0
][1] = (__builtin_constant_p(st->bytes[0]&0xffffffff) ?
(((__uint32_t)((__uint16_t)(__builtin_constant_p(((__uint32_t
)(st->bytes[0]&0xffffffff)) & 0xffff) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)(st->bytes[0]&0xffffffff)
) & 0xffff)) << 8 | ((__uint16_t)(((__uint32_t)(st->
bytes[0]&0xffffffff)) & 0xffff)) >> 8) : __bswap16_var
(((__uint32_t)(st->bytes[0]&0xffffffff)) & 0xffff)
)) << 16) | ((__uint16_t)(__builtin_constant_p(((__uint32_t
)(st->bytes[0]&0xffffffff)) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)(st->bytes[0]&0xffffffff)
) >> 16)) << 8 | ((__uint16_t)(((__uint32_t)(st->
bytes[0]&0xffffffff)) >> 16)) >> 8) : __bswap16_var
(((__uint32_t)(st->bytes[0]&0xffffffff)) >> 16))
)) : __bswap32_var(st->bytes[0]&0xffffffff)); } while (
0)
;
3330 pf_state_counter_hton(st->bytes[1], sp->bytes[1])do { sp->bytes[1][0] = (__builtin_constant_p((st->bytes
[1]>>32)&0xffffffff) ? (((__uint32_t)((__uint16_t)(
__builtin_constant_p(((__uint32_t)((st->bytes[1]>>32
)&0xffffffff)) & 0xffff) ? (__uint16_t)(((__uint16_t)
(((__uint32_t)((st->bytes[1]>>32)&0xffffffff)) &
0xffff)) << 8 | ((__uint16_t)(((__uint32_t)((st->bytes
[1]>>32)&0xffffffff)) & 0xffff)) >> 8) : __bswap16_var
(((__uint32_t)((st->bytes[1]>>32)&0xffffffff)) &
0xffff))) << 16) | ((__uint16_t)(__builtin_constant_p(
((__uint32_t)((st->bytes[1]>>32)&0xffffffff)) >>
16) ? (__uint16_t)(((__uint16_t)(((__uint32_t)((st->bytes
[1]>>32)&0xffffffff)) >> 16)) << 8 | ((
__uint16_t)(((__uint32_t)((st->bytes[1]>>32)&0xffffffff
)) >> 16)) >> 8) : __bswap16_var(((__uint32_t)((st
->bytes[1]>>32)&0xffffffff)) >> 16)))) : __bswap32_var
((st->bytes[1]>>32)&0xffffffff)); sp->bytes[1
][1] = (__builtin_constant_p(st->bytes[1]&0xffffffff) ?
(((__uint32_t)((__uint16_t)(__builtin_constant_p(((__uint32_t
)(st->bytes[1]&0xffffffff)) & 0xffff) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)(st->bytes[1]&0xffffffff)
) & 0xffff)) << 8 | ((__uint16_t)(((__uint32_t)(st->
bytes[1]&0xffffffff)) & 0xffff)) >> 8) : __bswap16_var
(((__uint32_t)(st->bytes[1]&0xffffffff)) & 0xffff)
)) << 16) | ((__uint16_t)(__builtin_constant_p(((__uint32_t
)(st->bytes[1]&0xffffffff)) >> 16) ? (__uint16_t
)(((__uint16_t)(((__uint32_t)(st->bytes[1]&0xffffffff)
) >> 16)) << 8 | ((__uint16_t)(((__uint32_t)(st->
bytes[1]&0xffffffff)) >> 16)) >> 8) : __bswap16_var
(((__uint32_t)(st->bytes[1]&0xffffffff)) >> 16))
)) : __bswap32_var(st->bytes[1]&0xffffffff)); } while (
0)
;
3331
3332}
3333
3334static void
3335pf_tbladdr_copyout(struct pf_addr_wrap *aw)
3336{
3337 struct pfr_ktable *kt;
3338
3339 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type))do { } while (0);
3340
3341 kt = aw->p.tbl;
3342 if (!(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004) && kt->pfrkt_root != NULL((void *)0))
3343 kt = kt->pfrkt_root;
3344 aw->p.tbl = NULL((void *)0);
3345 aw->p.tblcnt = (kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004) ?
3346 kt->pfrkt_cntpfrkt_ts.pfrts_cnt : -1;
3347}
3348
3349/*
3350 * XXX - Check for version missmatch!!!
3351 */
3352static void
3353pf_clear_states(void)
3354{
3355 struct pf_state *s;
3356 u_int i;
3357
3358 for (i = 0; i <= pf_hashmask; i++) {
3359 struct pf_idhash *ih = &V_pf_idhash(pf_idhash)[i];
3360relock:
3361 PF_HASHROW_LOCK(ih)__mtx_lock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3361))
;
3362 LIST_FOREACH(s, &ih->states, entry)for ((s) = (((&ih->states))->lh_first); (s); (s) = (
((s))->entry.le_next))
{
3363 s->timeout = PFTM_PURGE;
3364 /* Don't send out individual delete messages. */
3365 s->state_flags |= PFSTATE_NOSYNC0x08;
3366 pf_unlink_state(s, PF_ENTER_LOCKED0x00000001);
3367 goto relock;
3368 }
3369 PF_HASHROW_UNLOCK(ih)__mtx_unlock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3369))
;
3370 }
3371}
3372
3373static int
3374pf_clear_tables(void)
3375{
3376 struct pfioc_table io;
3377 int error;
3378
3379 bzero(&io, sizeof(io));
3380
3381 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3382 io.pfrio_flags);
3383
3384 return (error);
3385}
3386
3387static void
3388pf_clear_srcnodes(struct pf_src_node *n)
3389{
3390 struct pf_state *s;
3391 int i;
3392
3393 for (i = 0; i <= pf_hashmask; i++) {
3394 struct pf_idhash *ih = &V_pf_idhash(pf_idhash)[i];
3395
3396 PF_HASHROW_LOCK(ih)__mtx_lock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3396))
;
3397 LIST_FOREACH(s, &ih->states, entry)for ((s) = (((&ih->states))->lh_first); (s); (s) = (
((s))->entry.le_next))
{
3398 if (n == NULL((void *)0) || n == s->src_node)
3399 s->src_node = NULL((void *)0);
3400 if (n == NULL((void *)0) || n == s->nat_src_node)
3401 s->nat_src_node = NULL((void *)0);
3402 }
3403 PF_HASHROW_UNLOCK(ih)__mtx_unlock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3403))
;
3404 }
3405
3406 if (n == NULL((void *)0)) {
3407 struct pf_srchash *sh;
3408
3409 for (i = 0, sh = V_pf_srchash(pf_srchash); i <= pf_srchashmask;
3410 i++, sh++) {
3411 PF_HASHROW_LOCK(sh)__mtx_lock_flags(&((((&(sh)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3411))
;
3412 LIST_FOREACH(n, &sh->nodes, entry)for ((n) = (((&sh->nodes))->lh_first); (n); (n) = (
((n))->entry.le_next))
{
3413 n->expire = 1;
3414 n->states = 0;
3415 }
3416 PF_HASHROW_UNLOCK(sh)__mtx_unlock_flags(&((((&(sh)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3416))
;
3417 }
3418 } else {
3419 /* XXX: hash slot should already be locked here. */
3420 n->expire = 1;
3421 n->states = 0;
3422 }
3423}
3424
3425static void
3426pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
3427{
3428 struct pf_src_node_list kill;
3429
3430 LIST_INIT(&kill)do { (((&kill))->lh_first) = ((void *)0); } while (0);
3431 for (int i = 0; i <= pf_srchashmask; i++) {
3432 struct pf_srchash *sh = &V_pf_srchash(pf_srchash)[i];
3433 struct pf_src_node *sn, *tmp;
3434
3435 PF_HASHROW_LOCK(sh)__mtx_lock_flags(&((((&(sh)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3435))
;
3436 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)for ((sn) = (((&sh->nodes))->lh_first); (sn) &&
((tmp) = (((sn))->entry.le_next), 1); (sn) = (tmp))
3437 if (PF_MATCHA(psnk->psnk_src.neg,pf_match_addr(psnk->psnk_src.neg, &psnk->psnk_src.addr
.v.a.addr, &psnk->psnk_src.addr.v.a.mask, &sn->
addr, sn->af)
3438 &psnk->psnk_src.addr.v.a.addr,pf_match_addr(psnk->psnk_src.neg, &psnk->psnk_src.addr
.v.a.addr, &psnk->psnk_src.addr.v.a.mask, &sn->
addr, sn->af)
3439 &psnk->psnk_src.addr.v.a.mask,pf_match_addr(psnk->psnk_src.neg, &psnk->psnk_src.addr
.v.a.addr, &psnk->psnk_src.addr.v.a.mask, &sn->
addr, sn->af)
3440 &sn->addr, sn->af)pf_match_addr(psnk->psnk_src.neg, &psnk->psnk_src.addr
.v.a.addr, &psnk->psnk_src.addr.v.a.mask, &sn->
addr, sn->af)
&&
3441 PF_MATCHA(psnk->psnk_dst.neg,pf_match_addr(psnk->psnk_dst.neg, &psnk->psnk_dst.addr
.v.a.addr, &psnk->psnk_dst.addr.v.a.mask, &sn->
raddr, sn->af)
3442 &psnk->psnk_dst.addr.v.a.addr,pf_match_addr(psnk->psnk_dst.neg, &psnk->psnk_dst.addr
.v.a.addr, &psnk->psnk_dst.addr.v.a.mask, &sn->
raddr, sn->af)
3443 &psnk->psnk_dst.addr.v.a.mask,pf_match_addr(psnk->psnk_dst.neg, &psnk->psnk_dst.addr
.v.a.addr, &psnk->psnk_dst.addr.v.a.mask, &sn->
raddr, sn->af)
3444 &sn->raddr, sn->af)pf_match_addr(psnk->psnk_dst.neg, &psnk->psnk_dst.addr
.v.a.addr, &psnk->psnk_dst.addr.v.a.mask, &sn->
raddr, sn->af)
) {
3445 pf_unlink_src_node(sn);
3446 LIST_INSERT_HEAD(&kill, sn, entry)do { ; if (((((sn))->entry.le_next) = (((&kill))->lh_first
)) != ((void *)0)) (((&kill))->lh_first)->entry.le_prev
= &(((sn))->entry.le_next); (((&kill))->lh_first
) = (sn); (sn)->entry.le_prev = &(((&kill))->lh_first
); } while (0)
;
3447 sn->expire = 1;
3448 }
3449 PF_HASHROW_UNLOCK(sh)__mtx_unlock_flags(&((((&(sh)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3449))
;
3450 }
3451
3452 for (int i = 0; i <= pf_hashmask; i++) {
3453 struct pf_idhash *ih = &V_pf_idhash(pf_idhash)[i];
3454 struct pf_state *s;
3455
3456 PF_HASHROW_LOCK(ih)__mtx_lock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3456))
;
3457 LIST_FOREACH(s, &ih->states, entry)for ((s) = (((&ih->states))->lh_first); (s); (s) = (
((s))->entry.le_next))
{
3458 if (s->src_node && s->src_node->expire == 1)
3459 s->src_node = NULL((void *)0);
3460 if (s->nat_src_node && s->nat_src_node->expire == 1)
3461 s->nat_src_node = NULL((void *)0);
3462 }
3463 PF_HASHROW_UNLOCK(ih)__mtx_unlock_flags(&((((&(ih)->lock))))->mtx_lock
, ((0)), ("/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
), (3463))
;
3464 }
3465
3466 psnk->psnk_killed = pf_free_src_nodes(&kill);
3467}
3468
3469/*
3470 * XXX - Check for version missmatch!!!
3471 */
3472
3473/*
3474 * Duplicate pfctl -Fa operation to get rid of as much as we can.
3475 */
3476static int
3477shutdown_pf(void)
3478{
3479 int error = 0;
3480 u_int32_t t[5];
3481 char nn = '\0';
3482
3483 do {
3484 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
3485 != 0) {
3486 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("shutdown_pf: SCRUB\n"
)
;
3487 break;
3488 }
3489 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
3490 != 0) {
3491 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("shutdown_pf: FILTER\n"
)
;
3492 break; /* XXX: rollback? */
3493 }
3494 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
3495 != 0) {
3496 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("shutdown_pf: NAT\n"
)
;
3497 break; /* XXX: rollback? */
3498 }
3499 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3500 != 0) {
3501 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("shutdown_pf: BINAT\n"
)
;
3502 break; /* XXX: rollback? */
3503 }
3504 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3505 != 0) {
3506 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("shutdown_pf: RDR\n"
)
;
3507 break; /* XXX: rollback? */
3508 }
3509
3510 /* XXX: these should always succeed here */
3511 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3512 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3513 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3514 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3515 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3516
3517 if ((error = pf_clear_tables()) != 0)
3518 break;
3519
3520#ifdef ALTQ
3521 if ((error = pf_begin_altq(&t[0])) != 0) {
3522 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"))if ((pf_status).debug >= (PF_DEBUG_MISC)) printf ("shutdown_pf: ALTQ\n"
)
;
3523 break;
3524 }
3525 pf_commit_altq(t[0]);
3526#endif
3527
3528 pf_clear_states();
3529
3530 pf_clear_srcnodes(NULL((void *)0));
3531
3532 /* status does not use malloced mem so no need to cleanup */
3533 /* fingerprints and interfaces have their own cleanup code */
3534
3535 /* Free counters last as we updated them during shutdown. */
3536 counter_u64_free(V_pf_default_rule(pf_default_rule).states_cur);
3537 counter_u64_free(V_pf_default_rule(pf_default_rule).states_tot);
3538 counter_u64_free(V_pf_default_rule(pf_default_rule).src_nodes);
3539
3540 for (int i = 0; i < PFRES_MAX16; i++)
3541 counter_u64_free(V_pf_status(pf_status).counters[i]);
3542 for (int i = 0; i < LCNT_MAX7; i++)
3543 counter_u64_free(V_pf_status(pf_status).lcounters[i]);
3544 for (int i = 0; i < FCNT_MAX3; i++)
3545 counter_u64_free(V_pf_status(pf_status).fcounters[i]);
3546 for (int i = 0; i < SCNT_MAX3; i++)
3547 counter_u64_free(V_pf_status(pf_status).scounters[i]);
3548 } while(0);
3549
3550 return (error);
3551}
3552
3553#ifdef INET1
3554static int
3555pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3556 struct inpcb *inp)
3557{
3558 int chk;
3559
3560 chk = pf_test(PF_IN, ifp, m, inp);
3561 if (chk && *m) {
3562 m_freem(*m);
3563 *m = NULL((void *)0);
3564 }
3565
3566 return (chk);
3567}
3568
3569static int
3570pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3571 struct inpcb *inp)
3572{
3573 int chk;
3574
3575 chk = pf_test(PF_OUT, ifp, m, inp);
3576 if (chk && *m) {
3577 m_freem(*m);
3578 *m = NULL((void *)0);
3579 }
3580
3581 return (chk);
3582}
3583#endif
3584
3585#ifdef INET61
3586static int
3587pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3588 struct inpcb *inp)
3589{
3590 int chk;
3591
3592 /*
3593 * In case of loopback traffic IPv6 uses the real interface in
3594 * order to support scoped addresses. In order to support stateful
3595 * filtering we have change this to lo0 as it is the case in IPv4.
3596 */
3597 CURVNET_SET(ifp->if_vnet);
3598 chk = pf_test6(PF_IN, (*m)->m_flags & M_LOOP0x00020000 ? V_loif(loif) : ifp, m, inp);
3599 CURVNET_RESTORE();
3600 if (chk && *m) {
3601 m_freem(*m);
3602 *m = NULL((void *)0);
3603 }
3604 return chk;
3605}
3606
3607static int
3608pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3609 struct inpcb *inp)
3610{
3611 int chk;
3612
3613 CURVNET_SET(ifp->if_vnet);
3614 chk = pf_test6(PF_OUT, ifp, m, inp);
3615 CURVNET_RESTORE();
3616 if (chk && *m) {
3617 m_freem(*m);
3618 *m = NULL((void *)0);
3619 }
3620 return chk;
3621}
3622#endif /* INET6 */
3623
3624static int
3625hook_pf(void)
3626{
3627#ifdef INET1
3628 struct pfil_head *pfh_inet;
3629#endif
3630#ifdef INET61
3631 struct pfil_head *pfh_inet6;
3632#endif
3633
3634 if (V_pf_pfil_hooked(pf_pfil_hooked))
3635 return (0);
3636
3637#ifdef INET1
3638 pfh_inet = pfil_head_get(PFIL_TYPE_AF1, AF_INET2);
3639 if (pfh_inet == NULL((void *)0))
3640 return (ESRCH3); /* XXX */
3641 pfil_add_hook(pf_check_in, NULL((void *)0), PFIL_IN0x00000001 | PFIL_WAITOK0x00000004, pfh_inet);
3642 pfil_add_hook(pf_check_out, NULL((void *)0), PFIL_OUT0x00000002 | PFIL_WAITOK0x00000004, pfh_inet);
3643#endif
3644#ifdef INET61
3645 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF1, AF_INET628);
3646 if (pfh_inet6 == NULL((void *)0)) {
3647#ifdef INET1
3648 pfil_remove_hook(pf_check_in, NULL((void *)0), PFIL_IN0x00000001 | PFIL_WAITOK0x00000004,
3649 pfh_inet);
3650 pfil_remove_hook(pf_check_out, NULL((void *)0), PFIL_OUT0x00000002 | PFIL_WAITOK0x00000004,
3651 pfh_inet);
3652#endif
3653 return (ESRCH3); /* XXX */
3654 }
3655 pfil_add_hook(pf_check6_in, NULL((void *)0), PFIL_IN0x00000001 | PFIL_WAITOK0x00000004, pfh_inet6);
3656 pfil_add_hook(pf_check6_out, NULL((void *)0), PFIL_OUT0x00000002 | PFIL_WAITOK0x00000004, pfh_inet6);
3657#endif
3658
3659 V_pf_pfil_hooked(pf_pfil_hooked) = 1;
3660 return (0);
3661}
3662
3663static int
3664dehook_pf(void)
3665{
3666#ifdef INET1
3667 struct pfil_head *pfh_inet;
3668#endif
3669#ifdef INET61
3670 struct pfil_head *pfh_inet6;
3671#endif
3672
3673 if (V_pf_pfil_hooked(pf_pfil_hooked) == 0)
3674 return (0);
3675
3676#ifdef INET1
3677 pfh_inet = pfil_head_get(PFIL_TYPE_AF1, AF_INET2);
3678 if (pfh_inet == NULL((void *)0))
3679 return (ESRCH3); /* XXX */
3680 pfil_remove_hook(pf_check_in, NULL((void *)0), PFIL_IN0x00000001 | PFIL_WAITOK0x00000004,
3681 pfh_inet);
3682 pfil_remove_hook(pf_check_out, NULL((void *)0), PFIL_OUT0x00000002 | PFIL_WAITOK0x00000004,
3683 pfh_inet);
3684#endif
3685#ifdef INET61
3686 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF1, AF_INET628);
3687 if (pfh_inet6 == NULL((void *)0))
3688 return (ESRCH3); /* XXX */
3689 pfil_remove_hook(pf_check6_in, NULL((void *)0), PFIL_IN0x00000001 | PFIL_WAITOK0x00000004,
3690 pfh_inet6);
3691 pfil_remove_hook(pf_check6_out, NULL((void *)0), PFIL_OUT0x00000002 | PFIL_WAITOK0x00000004,
3692 pfh_inet6);
3693#endif
3694
3695 V_pf_pfil_hooked(pf_pfil_hooked) = 0;
3696 return (0);
3697}
3698
3699static void
3700pf_load_vnet(void)
3701{
3702 VNET_ITERATOR_DECL(vnet_iter);
3703
3704 VNET_LIST_RLOCK();
3705 VNET_FOREACH(vnet_iter) {
3706 CURVNET_SET(vnet_iter);
3707 V_pf_pfil_hooked(pf_pfil_hooked) = 0;
3708 TAILQ_INIT(&V_pf_tags)do { (((&(pf_tags)))->tqh_first) = ((void *)0); (&
(pf_tags))->tqh_last = &(((&(pf_tags)))->tqh_first
); ; } while (0)
;
3709 TAILQ_INIT(&V_pf_qids)do { (((&(pf_qids)))->tqh_first) = ((void *)0); (&
(pf_qids))->tqh_last = &(((&(pf_qids)))->tqh_first
); ; } while (0)
;
3710 CURVNET_RESTORE();
3711 }
3712 VNET_LIST_RUNLOCK();
3713
3714 pfattach_vnet();
3715 V_pf_vnet_active(pf_vnet_active) = 1;
3716}
3717
3718static int
3719pf_load(void)
3720{
3721 int error;
3722
3723 rw_init(&pf_rules_lock, "pf rulesets")_rw_init_flags(&(&pf_rules_lock)->rw_lock, "pf rulesets"
, 0)
;
3724 sx_init(&pf_ioctl_lock, "pf ioctl")sx_init_flags((&pf_ioctl_lock), ("pf ioctl"), 0);
3725
3726 pf_mtag_initialize();
3727
3728 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME"pf");
3729 if (pf_dev == NULL((void *)0))
3730 return (ENOMEM12);
3731
3732 pf_end_threads = 0;
3733 error = kproc_create(pf_purge_thread, NULL((void *)0), NULL((void *)0), 0, 0, "pf purge");
3734 if (error != 0)
3735 return (error);
3736
3737 pfi_initialize();
3738
3739 return (0);
3740}
3741
3742static void
3743pf_unload_vnet(void)
3744{
3745 int error;
3746
3747 V_pf_vnet_active(pf_vnet_active) = 0;
3748 V_pf_status(pf_status).running = 0;
3749 swi_remove(V_pf_swi_cookie(pf_swi_cookie));
3750 error = dehook_pf();
3751 if (error) {
3752 /*
3753 * Should not happen!
3754 * XXX Due to error code ESRCH, kldunload will show
3755 * a message like 'No such process'.
3756 */
3757 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
3758 return;
3759 }
3760
3761 pf_unload_vnet_purge();
3762
3763 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3763)
;
3764 shutdown_pf();
3765 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3765)
;
3766
3767 pf_normalize_cleanup();
3768 PF_RULES_WLOCK()_rw_wlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3768)
;
3769 pfi_cleanup_vnet();
3770 PF_RULES_WUNLOCK()_rw_wunlock_cookie(&((&pf_rules_lock))->rw_lock, "/usr/src/sys/modules/pf/../../netpfil/pf/pf_ioctl.c"
, 3770)
;
3771 pfr_cleanup();
3772 pf_osfp_flush();
3773 pf_cleanup();
3774 if (IS_DEFAULT_VNET(curvnet)1)
3775 pf_mtag_cleanup();
3776}
3777
3778static int
3779pf_unload(void)
3780{
3781 int error = 0;
3782
3783 pf_end_threads = 1;
3784 while (pf_end_threads < 2) {
3785 wakeup_one(pf_purge_thread);
3786 rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftmo", 0)_sleep((pf_purge_thread), &(&pf_rules_lock)->lock_object
, (0), ("pftmo"), tick_sbt * (0), 0, 0x0100)
;
3787 }
3788
3789 if (pf_dev != NULL((void *)0))
3790 destroy_dev(pf_dev);
3791
3792 pfi_cleanup();
3793
3794 rw_destroy(&pf_rules_lock)_rw_destroy(&(&pf_rules_lock)->rw_lock);
3795 sx_destroy(&pf_ioctl_lock);
3796
3797 return (error);
3798}
3799
3800static void
3801vnet_pf_init(void *unused __unused__attribute__((__unused__)))
3802{
3803
3804 pf_load_vnet();
3805}
3806VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,static struct sysinit vnet_pf_init_sys_init = { SI_SUB_PROTO_FIREWALL
, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t)vnet_pf_init
, ((void *)(((void *)0))) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_vnet_pf_init_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(vnet_pf_init_sys_init)
3807 vnet_pf_init, NULL)static struct sysinit vnet_pf_init_sys_init = { SI_SUB_PROTO_FIREWALL
, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t)vnet_pf_init
, ((void *)(((void *)0))) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_vnet_pf_init_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(vnet_pf_init_sys_init)
;
3808
3809static void
3810vnet_pf_uninit(const void *unused __unused__attribute__((__unused__)))
3811{
3812
3813 pf_unload_vnet();
3814}
3815VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,static struct sysinit vnet_pf_uninit_sys_uninit = { SI_SUB_PROTO_FIREWALL
, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t)vnet_pf_uninit
, ((void *)(((void *)0))) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_vnet_pf_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(vnet_pf_uninit_sys_uninit)
3816 vnet_pf_uninit, NULL)static struct sysinit vnet_pf_uninit_sys_uninit = { SI_SUB_PROTO_FIREWALL
, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t)vnet_pf_uninit
, ((void *)(((void *)0))) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_vnet_pf_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(vnet_pf_uninit_sys_uninit)
;
3817
3818
3819static int
3820pf_modevent(module_t mod, int type, void *data)
3821{
3822 int error = 0;
3823
3824 switch(type) {
3825 case MOD_LOAD:
3826 error = pf_load();
3827 break;
3828 case MOD_QUIESCE:
3829 /*
3830 * Module should not be unloaded due to race conditions.
3831 */
3832 error = EBUSY16;
3833 break;
3834 case MOD_UNLOAD:
3835 error = pf_unload();
3836 break;
3837 default:
3838 error = EINVAL22;
3839 break;
3840 }
3841
3842 return (error);
3843}
3844
3845static moduledata_t pf_mod = {
3846 "pf",
3847 pf_modevent,
3848 0
3849};
3850
3851DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND)static struct mod_depend _pf_depend_on_kernel __attribute__((
__section__(".data"))) = { 1100122, 1100122, (((((1100122)+((
100000)-1))/(100000))*(100000)) - 1) }; static struct mod_metadata
_mod_metadata_md_pf_on_kernel = { 1, 1, &_pf_depend_on_kernel
, "kernel" }; __asm__(".globl " "__start_set_modmetadata_set"
); __asm__(".globl " "__stop_set_modmetadata_set"); static void
const * const __set_modmetadata_set_sym__mod_metadata_md_pf_on_kernel
__attribute__((__section__("set_" "modmetadata_set"))) __attribute__
((__used__)) = &(_mod_metadata_md_pf_on_kernel); static struct
mod_metadata _mod_metadata_md_pf = { 1, 2, &pf_mod, "pf"
}; __asm__(".globl " "__start_set_modmetadata_set"); __asm__
(".globl " "__stop_set_modmetadata_set"); static void const *
const __set_modmetadata_set_sym__mod_metadata_md_pf __attribute__
((__section__("set_" "modmetadata_set"))) __attribute__((__used__
)) = &(_mod_metadata_md_pf); static struct sysinit pfmodule_sys_init
= { SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, (sysinit_cfunc_t
)(sysinit_nfunc_t)module_register_init, ((void *)(&pf_mod
)) }; __asm__(".globl " "__start_set_sysinit_set"); __asm__(".globl "
"__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_pfmodule_sys_init
__attribute__((__section__("set_" "sysinit_set"))) __attribute__
((__used__)) = &(pfmodule_sys_init); struct __hack
;
3852MODULE_VERSION(pf, PF_MODVER)static struct mod_version _pf_version __attribute__((__section__
(".data"))) = { 1 }; static struct mod_metadata _mod_metadata_pf_version
= { 1, 3, &_pf_version, "pf" }; __asm__(".globl " "__start_set_modmetadata_set"
); __asm__(".globl " "__stop_set_modmetadata_set"); static void
const * const __set_modmetadata_set_sym__mod_metadata_pf_version
__attribute__((__section__("set_" "modmetadata_set"))) __attribute__
((__used__)) = &(_mod_metadata_pf_version)
;