Bug Summary

File:kern/kern_descrip.c
Warning:line 1369, column 11
Copies out a struct with uncleared padding (>= 4 bytes)

Annotated Source Code

1/*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: releng/11.0/sys/kern/kern_descrip.c 300852 2016-05-27 17:00:15Z mjg $")__asm__(".ident\t\"" "$FreeBSD: releng/11.0/sys/kern/kern_descrip.c 300852 2016-05-27 17:00:15Z mjg $"
"\"")
;
39
40#include "opt_capsicum.h"
41#include "opt_compat.h"
42#include "opt_ddb.h"
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47
48#include <sys/capsicum.h>
49#include <sys/conf.h>
50#include <sys/fcntl.h>
51#include <sys/file.h>
52#include <sys/filedesc.h>
53#include <sys/filio.h>
54#include <sys/jail.h>
55#include <sys/kernel.h>
56#include <sys/limits.h>
57#include <sys/lock.h>
58#include <sys/malloc.h>
59#include <sys/mount.h>
60#include <sys/mutex.h>
61#include <sys/namei.h>
62#include <sys/selinfo.h>
63#include <sys/priv.h>
64#include <sys/proc.h>
65#include <sys/protosw.h>
66#include <sys/racct.h>
67#include <sys/resourcevar.h>
68#include <sys/sbuf.h>
69#include <sys/signalvar.h>
70#include <sys/socketvar.h>
71#include <sys/kdb.h>
72#include <sys/stat.h>
73#include <sys/sx.h>
74#include <sys/syscallsubr.h>
75#include <sys/sysctl.h>
76#include <sys/sysproto.h>
77#include <sys/unistd.h>
78#include <sys/user.h>
79#include <sys/vnode.h>
80#ifdef KTRACE1
81#include <sys/ktrace.h>
82#endif
83
84#include <net/vnet.h>
85
86#include <security/audit/audit.h>
87
88#include <vm/uma.h>
89#include <vm/vm.h>
90
91#include <ddb/ddb.h>
92
93static MALLOC_DEFINE(M_FILEDESC, "filedesc", "Open file descriptor table")struct malloc_type M_FILEDESC[1] = { { ((void *)0), 877983977
, "filedesc", ((void *)0) } }; static struct sysinit M_FILEDESC_init_sys_init
= { SI_SUB_KMEM, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_init, ((void *)(M_FILEDESC)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_M_FILEDESC_init_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(M_FILEDESC_init_sys_init); static struct sysinit M_FILEDESC_uninit_sys_uninit
= { SI_SUB_KMEM, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_uninit, ((void *)(M_FILEDESC)) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_M_FILEDESC_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(M_FILEDESC_uninit_sys_uninit)
;
94static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "filedesc_to_leader",struct malloc_type M_FILEDESC_TO_LEADER[1] = { { ((void *)0),
877983977, "filedesc_to_leader", ((void *)0) } }; static struct
sysinit M_FILEDESC_TO_LEADER_init_sys_init = { SI_SUB_KMEM, SI_ORDER_THIRD
, (sysinit_cfunc_t)(sysinit_nfunc_t)malloc_init, ((void *)(M_FILEDESC_TO_LEADER
)) }; __asm__(".globl " "__start_set_sysinit_set"); __asm__(".globl "
"__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_M_FILEDESC_TO_LEADER_init_sys_init
__attribute__((__section__("set_" "sysinit_set"))) __attribute__
((__used__)) = &(M_FILEDESC_TO_LEADER_init_sys_init); static
struct sysinit M_FILEDESC_TO_LEADER_uninit_sys_uninit = { SI_SUB_KMEM
, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t)malloc_uninit
, ((void *)(M_FILEDESC_TO_LEADER)) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_M_FILEDESC_TO_LEADER_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(M_FILEDESC_TO_LEADER_uninit_sys_uninit)
95 "file desc to leader structures")struct malloc_type M_FILEDESC_TO_LEADER[1] = { { ((void *)0),
877983977, "filedesc_to_leader", ((void *)0) } }; static struct
sysinit M_FILEDESC_TO_LEADER_init_sys_init = { SI_SUB_KMEM, SI_ORDER_THIRD
, (sysinit_cfunc_t)(sysinit_nfunc_t)malloc_init, ((void *)(M_FILEDESC_TO_LEADER
)) }; __asm__(".globl " "__start_set_sysinit_set"); __asm__(".globl "
"__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_M_FILEDESC_TO_LEADER_init_sys_init
__attribute__((__section__("set_" "sysinit_set"))) __attribute__
((__used__)) = &(M_FILEDESC_TO_LEADER_init_sys_init); static
struct sysinit M_FILEDESC_TO_LEADER_uninit_sys_uninit = { SI_SUB_KMEM
, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t)malloc_uninit
, ((void *)(M_FILEDESC_TO_LEADER)) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_M_FILEDESC_TO_LEADER_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(M_FILEDESC_TO_LEADER_uninit_sys_uninit)
;
96static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures")struct malloc_type M_SIGIO[1] = { { ((void *)0), 877983977, "sigio"
, ((void *)0) } }; static struct sysinit M_SIGIO_init_sys_init
= { SI_SUB_KMEM, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_init, ((void *)(M_SIGIO)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_M_SIGIO_init_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(M_SIGIO_init_sys_init); static struct sysinit M_SIGIO_uninit_sys_uninit
= { SI_SUB_KMEM, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_uninit, ((void *)(M_SIGIO)) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_M_SIGIO_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(M_SIGIO_uninit_sys_uninit)
;
97MALLOC_DEFINE(M_FILECAPS, "filecaps", "descriptor capabilities")struct malloc_type M_FILECAPS[1] = { { ((void *)0), 877983977
, "filecaps", ((void *)0) } }; static struct sysinit M_FILECAPS_init_sys_init
= { SI_SUB_KMEM, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_init, ((void *)(M_FILECAPS)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_M_FILECAPS_init_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(M_FILECAPS_init_sys_init); static struct sysinit M_FILECAPS_uninit_sys_uninit
= { SI_SUB_KMEM, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_uninit, ((void *)(M_FILECAPS)) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_M_FILECAPS_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(M_FILECAPS_uninit_sys_uninit)
;
98
99MALLOC_DECLARE(M_FADVISE)extern struct malloc_type M_FADVISE[1];
100
101static uma_zone_t file_zone;
102static uma_zone_t filedesc0_zone;
103
104static int closefp(struct filedesc *fdp, int fd, struct file *fp,
105 struct thread *td, int holdleaders);
106static int fd_first_free(struct filedesc *fdp, int low, int size);
107static int fd_last_used(struct filedesc *fdp, int size);
108static void fdgrowtable(struct filedesc *fdp, int nfd);
109static void fdgrowtable_exp(struct filedesc *fdp, int nfd);
110static void fdunused(struct filedesc *fdp, int fd);
111static void fdused(struct filedesc *fdp, int fd);
112static int getmaxfd(struct thread *td);
113
114/*
115 * Each process has:
116 *
117 * - An array of open file descriptors (fd_ofiles)
118 * - An array of file flags (fd_ofileflags)
119 * - A bitmap recording which descriptors are in use (fd_map)
120 *
121 * A process starts out with NDFILE descriptors. The value of NDFILE has
122 * been selected based the historical limit of 20 open files, and an
123 * assumption that the majority of processes, especially short-lived
124 * processes like shells, will never need more.
125 *
126 * If this initial allocation is exhausted, a larger descriptor table and
127 * map are allocated dynamically, and the pointers in the process's struct
128 * filedesc are updated to point to those. This is repeated every time
129 * the process runs out of file descriptors (provided it hasn't hit its
130 * resource limit).
131 *
132 * Since threads may hold references to individual descriptor table
133 * entries, the tables are never freed. Instead, they are placed on a
134 * linked list and freed only when the struct filedesc is released.
135 */
136#define NDFILE20 20
137#define NDSLOTSIZEsizeof(u_long) sizeof(NDSLOTTYPEu_long)
138#define NDENTRIES(sizeof(u_long) * 8) (NDSLOTSIZEsizeof(u_long) * __CHAR_BIT8)
139#define NDSLOT(x)((x) / (sizeof(u_long) * 8)) ((x) / NDENTRIES(sizeof(u_long) * 8))
140#define NDBIT(x)((u_long)1 << ((x) % (sizeof(u_long) * 8))) ((NDSLOTTYPEu_long)1 << ((x) % NDENTRIES(sizeof(u_long) * 8)))
141#define NDSLOTS(x)(((x) + (sizeof(u_long) * 8) - 1) / (sizeof(u_long) * 8)) (((x) + NDENTRIES(sizeof(u_long) * 8) - 1) / NDENTRIES(sizeof(u_long) * 8))
142
143/*
144 * SLIST entry used to keep track of ofiles which must be reclaimed when
145 * the process exits.
146 */
147struct freetable {
148 struct fdescenttbl *ft_table;
149 SLIST_ENTRY(freetable)struct { struct freetable *sle_next; } ft_next;
150};
151
152/*
153 * Initial allocation: a filedesc structure + the head of SLIST used to
154 * keep track of old ofiles + enough space for NDFILE descriptors.
155 */
156
157struct fdescenttbl0 {
158 int fdt_nfiles;
159 struct filedescent fdt_ofiles[NDFILE20];
160};
161
162struct filedesc0 {
163 struct filedesc fd_fd;
164 SLIST_HEAD(, freetable)struct { struct freetable *slh_first; } fd_free;
165 struct fdescenttbl0 fd_dfiles;
166 NDSLOTTYPEu_long fd_dmap[NDSLOTS(NDFILE)(((20) + (sizeof(u_long) * 8) - 1) / (sizeof(u_long) * 8))];
167};
168
169/*
170 * Descriptor management.
171 */
172volatile int openfiles; /* actual number of open files */
173struct mtx sigio_lock; /* mtx to protect pointers to sigio */
174void (*mq_fdclose)(struct thread *td, int fd, struct file *fp);
175
176/*
177 * If low >= size, just return low. Otherwise find the first zero bit in the
178 * given bitmap, starting at low and not exceeding size - 1. Return size if
179 * not found.
180 */
181static int
182fd_first_free(struct filedesc *fdp, int low, int size)
183{
184 NDSLOTTYPEu_long *map = fdp->fd_map;
185 NDSLOTTYPEu_long mask;
186 int off, maxoff;
187
188 if (low >= size)
189 return (low);
190
191 off = NDSLOT(low)((low) / (sizeof(u_long) * 8));
192 if (low % NDENTRIES(sizeof(u_long) * 8)) {
193 mask = ~(~(NDSLOTTYPEu_long)0 >> (NDENTRIES(sizeof(u_long) * 8) - (low % NDENTRIES(sizeof(u_long) * 8))));
194 if ((mask &= ~map[off]) != 0UL)
195 return (off * NDENTRIES(sizeof(u_long) * 8) + ffsl(mask) - 1);
196 ++off;
197 }
198 for (maxoff = NDSLOTS(size)(((size) + (sizeof(u_long) * 8) - 1) / (sizeof(u_long) * 8)); off < maxoff; ++off)
199 if (map[off] != ~0UL)
200 return (off * NDENTRIES(sizeof(u_long) * 8) + ffsl(~map[off]) - 1);
201 return (size);
202}
203
204/*
205 * Find the highest non-zero bit in the given bitmap, starting at 0 and
206 * not exceeding size - 1. Return -1 if not found.
207 */
208static int
209fd_last_used(struct filedesc *fdp, int size)
210{
211 NDSLOTTYPEu_long *map = fdp->fd_map;
212 NDSLOTTYPEu_long mask;
213 int off, minoff;
214
215 off = NDSLOT(size)((size) / (sizeof(u_long) * 8));
216 if (size % NDENTRIES(sizeof(u_long) * 8)) {
217 mask = ~(~(NDSLOTTYPEu_long)0 << (size % NDENTRIES(sizeof(u_long) * 8)));
218 if ((mask &= map[off]) != 0)
219 return (off * NDENTRIES(sizeof(u_long) * 8) + flsl(mask) - 1);
220 --off;
221 }
222 for (minoff = NDSLOT(0)((0) / (sizeof(u_long) * 8)); off >= minoff; --off)
223 if (map[off] != 0)
224 return (off * NDENTRIES(sizeof(u_long) * 8) + flsl(map[off]) - 1);
225 return (-1);
226}
227
228static int
229fdisused(struct filedesc *fdp, int fd)
230{
231
232 KASSERT(fd >= 0 && fd < fdp->fd_nfiles,do { } while (0)
233 ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles))do { } while (0);
234
235 return ((fdp->fd_map[NDSLOT(fd)((fd) / (sizeof(u_long) * 8))] & NDBIT(fd)((u_long)1 << ((fd) % (sizeof(u_long) * 8)))) != 0);
236}
237
238/*
239 * Mark a file descriptor as used.
240 */
241static void
242fdused_init(struct filedesc *fdp, int fd)
243{
244
245 KASSERT(!fdisused(fdp, fd), ("fd=%d is already used", fd))do { } while (0);
246
247 fdp->fd_map[NDSLOT(fd)((fd) / (sizeof(u_long) * 8))] |= NDBIT(fd)((u_long)1 << ((fd) % (sizeof(u_long) * 8)));
248}
249
250static void
251fdused(struct filedesc *fdp, int fd)
252{
253
254 FILEDESC_XLOCK_ASSERT(fdp)(void)0;
255
256 fdused_init(fdp, fd);
257 if (fd > fdp->fd_lastfile)
258 fdp->fd_lastfile = fd;
259 if (fd == fdp->fd_freefile)
260 fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfilesfd_files->fdt_nfiles);
261}
262
263/*
264 * Mark a file descriptor as unused.
265 */
266static void
267fdunused(struct filedesc *fdp, int fd)
268{
269
270 FILEDESC_XLOCK_ASSERT(fdp)(void)0;
271
272 KASSERT(fdisused(fdp, fd), ("fd=%d is already unused", fd))do { } while (0);
273 KASSERT(fdp->fd_ofiles[fd].fde_file == NULL,do { } while (0)
274 ("fd=%d is still in use", fd))do { } while (0);
275
276 fdp->fd_map[NDSLOT(fd)((fd) / (sizeof(u_long) * 8))] &= ~NDBIT(fd)((u_long)1 << ((fd) % (sizeof(u_long) * 8)));
277 if (fd < fdp->fd_freefile)
278 fdp->fd_freefile = fd;
279 if (fd == fdp->fd_lastfile)
280 fdp->fd_lastfile = fd_last_used(fdp, fd);
281}
282
283/*
284 * Free a file descriptor.
285 *
286 * Avoid some work if fdp is about to be destroyed.
287 */
288static inline void
289fdefree_last(struct filedescent *fde)
290{
291
292 filecaps_free(&fde->fde_caps);
293}
294
295static inline void
296fdfree(struct filedesc *fdp, int fd)
297{
298 struct filedescent *fde;
299
300 fde = &fdp->fd_ofilesfd_files->fdt_ofiles[fd];
301#ifdef CAPABILITIES1
302 seq_write_begin(&fde->fde_seq);
303#endif
304 fdefree_last(fde);
305 fde->fde_file = NULL((void *)0);
306 fdunused(fdp, fd);
307#ifdef CAPABILITIES1
308 seq_write_end(&fde->fde_seq);
309#endif
310}
311
312void
313pwd_ensure_dirs(void)
314{
315 struct filedesc *fdp;
316
317 fdp = curproc((__curthread())->td_proc)->p_fd;
318 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
319 if (fdp->fd_cdir == NULL((void *)0)) {
320 fdp->fd_cdir = rootvnode;
321 VREF(rootvnode)vref(rootvnode);
322 }
323 if (fdp->fd_rdir == NULL((void *)0)) {
324 fdp->fd_rdir = rootvnode;
325 VREF(rootvnode)vref(rootvnode);
326 }
327 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
328}
329
330/*
331 * System calls on descriptors.
332 */
333#ifndef _SYS_SYSPROTO_H_
334struct getdtablesize_args {
335 int dummy;
336};
337#endif
338/* ARGSUSED */
339int
340sys_getdtablesize(struct thread *td, struct getdtablesize_args *uap)
341{
342#ifdef RACCT1
343 uint64_t lim;
344#endif
345
346 td->td_retvaltd_uretoff.tdu_retval[0] =
347 min((int)lim_cur(td, RLIMIT_NOFILE8), maxfilesperproc);
348#ifdef RACCT1
349 PROC_LOCK(td->td_proc)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(td->td_proc)->p_mtx))))->mtx_lock != 0x00000004
|| !atomic_cmpset_long(&(((((&(td->td_proc)->p_mtx
)))))->mtx_lock, 0x00000004, (_tid)))) __mtx_lock_sleep(&
(((((&(td->td_proc)->p_mtx)))))->mtx_lock, _tid,
(((0))), ((((void *)0))), ((0))); else do { (void)0; do { if
(__builtin_expect((sdt_lockstat___adaptive__acquire->id),
0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->id
, (uintptr_t) (((&(td->td_proc)->p_mtx))), (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0
); } while (0); } while (0)
;
350 lim = racct_get_limit(td->td_proc, RACCT_NOFILE7);
351 PROC_UNLOCK(td->td_proc)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(td->td_proc)->p_mtx))))->lock_object.lo_data == 0) do
{ (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(td->td_proc)->p_mtx))), (
uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); }
while (0); } while (0); if (((((&(td->td_proc)->p_mtx
))))->mtx_lock != _tid || !atomic_cmpset_long(&(((((&
(td->td_proc)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(td->td_proc)->p_mtx
)))))->mtx_lock, (((0))), ((((void *)0))), ((0))); } while
(0)
;
352 if (lim < td->td_retvaltd_uretoff.tdu_retval[0])
353 td->td_retvaltd_uretoff.tdu_retval[0] = lim;
354#endif
355 return (0);
356}
357
358/*
359 * Duplicate a file descriptor to a particular value.
360 *
361 * Note: keep in mind that a potential race condition exists when closing
362 * descriptors from a shared descriptor table (via rfork).
363 */
364#ifndef _SYS_SYSPROTO_H_
365struct dup2_args {
366 u_int from;
367 u_int to;
368};
369#endif
370/* ARGSUSED */
371int
372sys_dup2(struct thread *td, struct dup2_args *uap)
373{
374
375 return (kern_dup(td, FDDUP_FIXED, 0, (int)uap->from, (int)uap->to));
376}
377
378/*
379 * Duplicate a file descriptor.
380 */
381#ifndef _SYS_SYSPROTO_H_
382struct dup_args {
383 u_int fd;
384};
385#endif
386/* ARGSUSED */
387int
388sys_dup(struct thread *td, struct dup_args *uap)
389{
390
391 return (kern_dup(td, FDDUP_NORMAL, 0, (int)uap->fd, 0));
392}
393
394/*
395 * The file control system call.
396 */
397#ifndef _SYS_SYSPROTO_H_
398struct fcntl_args {
399 int fd;
400 int cmd;
401 long arg;
402};
403#endif
404/* ARGSUSED */
405int
406sys_fcntl(struct thread *td, struct fcntl_args *uap)
407{
408
409 return (kern_fcntl_freebsd(td, uap->fd, uap->cmd, uap->arg));
410}
411
412int
413kern_fcntl_freebsd(struct thread *td, int fd, int cmd, long arg)
414{
415 struct flock fl;
416 struct __oflock ofl;
417 intptr_t arg1;
418 int error, newcmd;
419
420 error = 0;
421 newcmd = cmd;
422 switch (cmd) {
423 case F_OGETLK7:
424 case F_OSETLK8:
425 case F_OSETLKW9:
426 /*
427 * Convert old flock structure to new.
428 */
429 error = copyin((void *)(intptr_t)arg, &ofl, sizeof(ofl));
430 fl.l_start = ofl.l_start;
431 fl.l_len = ofl.l_len;
432 fl.l_pid = ofl.l_pid;
433 fl.l_type = ofl.l_type;
434 fl.l_whence = ofl.l_whence;
435 fl.l_sysid = 0;
436
437 switch (cmd) {
438 case F_OGETLK7:
439 newcmd = F_GETLK11;
440 break;
441 case F_OSETLK8:
442 newcmd = F_SETLK12;
443 break;
444 case F_OSETLKW9:
445 newcmd = F_SETLKW13;
446 break;
447 }
448 arg1 = (intptr_t)&fl;
449 break;
450 case F_GETLK11:
451 case F_SETLK12:
452 case F_SETLKW13:
453 case F_SETLK_REMOTE14:
454 error = copyin((void *)(intptr_t)arg, &fl, sizeof(fl));
455 arg1 = (intptr_t)&fl;
456 break;
457 default:
458 arg1 = arg;
459 break;
460 }
461 if (error)
462 return (error);
463 error = kern_fcntl(td, fd, newcmd, arg1);
464 if (error)
465 return (error);
466 if (cmd == F_OGETLK7) {
467 ofl.l_start = fl.l_start;
468 ofl.l_len = fl.l_len;
469 ofl.l_pid = fl.l_pid;
470 ofl.l_type = fl.l_type;
471 ofl.l_whence = fl.l_whence;
472 error = copyout(&ofl, (void *)(intptr_t)arg, sizeof(ofl));
473 } else if (cmd == F_GETLK11) {
474 error = copyout(&fl, (void *)(intptr_t)arg, sizeof(fl));
475 }
476 return (error);
477}
478
479int
480kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
481{
482 struct filedesc *fdp;
483 struct flock *flp;
484 struct file *fp, *fp2;
485 struct filedescent *fde;
486 struct proc *p;
487 struct vnode *vp;
488 cap_rights_t rights;
489 int error, flg, tmp;
490 uint64_t bsize;
491 off_t foffset;
492
493 error = 0;
494 flg = F_POSIX0x040;
495 p = td->td_proc;
496 fdp = p->p_fd;
497
498 switch (cmd) {
499 case F_DUPFD0:
500 tmp = arg;
501 error = kern_dup(td, FDDUP_FCNTL, 0, fd, tmp);
502 break;
503
504 case F_DUPFD_CLOEXEC17:
505 tmp = arg;
506 error = kern_dup(td, FDDUP_FCNTL, FDDUP_FLAG_CLOEXEC0x1, fd, tmp);
507 break;
508
509 case F_DUP2FD10:
510 tmp = arg;
511 error = kern_dup(td, FDDUP_FIXED, 0, fd, tmp);
512 break;
513
514 case F_DUP2FD_CLOEXEC18:
515 tmp = arg;
516 error = kern_dup(td, FDDUP_FIXED, FDDUP_FLAG_CLOEXEC0x1, fd, tmp);
517 break;
518
519 case F_GETFD1:
520 FILEDESC_SLOCK(fdp)(void)__sx_slock(((&(fdp)->fd_sx)), 0, (((void *)0)), (
0))
;
521 if (fget_locked(fdp, fd) == NULL((void *)0)) {
522 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
523 error = EBADF9;
524 break;
525 }
526 fde = &fdp->fd_ofilesfd_files->fdt_ofiles[fd];
527 td->td_retvaltd_uretoff.tdu_retval[0] =
528 (fde->fde_flags & UF_EXCLOSE0x01) ? FD_CLOEXEC1 : 0;
529 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
530 break;
531
532 case F_SETFD2:
533 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
534 if (fget_locked(fdp, fd) == NULL((void *)0)) {
535 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
536 error = EBADF9;
537 break;
538 }
539 fde = &fdp->fd_ofilesfd_files->fdt_ofiles[fd];
540 fde->fde_flags = (fde->fde_flags & ~UF_EXCLOSE0x01) |
541 (arg & FD_CLOEXEC1 ? UF_EXCLOSE0x01 : 0);
542 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
543 break;
544
545 case F_GETFL3:
546 error = fget_fcntl(td, fd,
547 cap_rights_init(&rights, CAP_FCNTL)__cap_rights_init(0, &rights, ((1ULL << (57 + (0)))
| (0x0000000000008000ULL)), 0ULL)
, F_GETFL3, &fp);
548 if (error != 0)
549 break;
550 td->td_retvaltd_uretoff.tdu_retval[0] = OFLAGS(fp->f_flag)((fp->f_flag) & 0x00040000 ? (fp->f_flag) : (fp->
f_flag) - 1)
;
551 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
552 break;
553
554 case F_SETFL4:
555 error = fget_fcntl(td, fd,
556 cap_rights_init(&rights, CAP_FCNTL)__cap_rights_init(0, &rights, ((1ULL << (57 + (0)))
| (0x0000000000008000ULL)), 0ULL)
, F_SETFL4, &fp);
557 if (error != 0)
558 break;
559 do {
560 tmp = flg = fp->f_flag;
561 tmp &= ~FCNTLFLAGS(0x0008|0x0040|0x0080|0x0004|0x0100|0x0200| 0x00010000);
562 tmp |= FFLAGS(arg & ~O_ACCMODE)((arg & ~0x0003) & 0x00040000 ? (arg & ~0x0003) :
(arg & ~0x0003) + 1)
& FCNTLFLAGS(0x0008|0x0040|0x0080|0x0004|0x0100|0x0200| 0x00010000);
563 } while(atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
564 tmp = fp->f_flag & FNONBLOCK0x0004;
565 error = fo_ioctl(fp, FIONBIO((unsigned long) ((0x80000000) | (((sizeof(int)) & ((1 <<
13) - 1)) << 16) | ((('f')) << 8) | ((126))))
, &tmp, td->td_ucred, td);
566 if (error != 0) {
567 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
568 break;
569 }
570 tmp = fp->f_flag & FASYNC0x0040;
571 error = fo_ioctl(fp, FIOASYNC((unsigned long) ((0x80000000) | (((sizeof(int)) & ((1 <<
13) - 1)) << 16) | ((('f')) << 8) | ((125))))
, &tmp, td->td_ucred, td);
572 if (error == 0) {
573 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
574 break;
575 }
576 atomic_clear_int(&fp->f_flag, FNONBLOCK0x0004);
577 tmp = 0;
578 (void)fo_ioctl(fp, FIONBIO((unsigned long) ((0x80000000) | (((sizeof(int)) & ((1 <<
13) - 1)) << 16) | ((('f')) << 8) | ((126))))
, &tmp, td->td_ucred, td);
579 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
580 break;
581
582 case F_GETOWN5:
583 error = fget_fcntl(td, fd,
584 cap_rights_init(&rights, CAP_FCNTL)__cap_rights_init(0, &rights, ((1ULL << (57 + (0)))
| (0x0000000000008000ULL)), 0ULL)
, F_GETOWN5, &fp);
585 if (error != 0)
586 break;
587 error = fo_ioctl(fp, FIOGETOWN((unsigned long) ((0x40000000) | (((sizeof(int)) & ((1 <<
13) - 1)) << 16) | ((('f')) << 8) | ((123))))
, &tmp, td->td_ucred, td);
588 if (error == 0)
589 td->td_retvaltd_uretoff.tdu_retval[0] = tmp;
590 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
591 break;
592
593 case F_SETOWN6:
594 error = fget_fcntl(td, fd,
595 cap_rights_init(&rights, CAP_FCNTL)__cap_rights_init(0, &rights, ((1ULL << (57 + (0)))
| (0x0000000000008000ULL)), 0ULL)
, F_SETOWN6, &fp);
596 if (error != 0)
597 break;
598 tmp = arg;
599 error = fo_ioctl(fp, FIOSETOWN((unsigned long) ((0x80000000) | (((sizeof(int)) & ((1 <<
13) - 1)) << 16) | ((('f')) << 8) | ((124))))
, &tmp, td->td_ucred, td);
600 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
601 break;
602
603 case F_SETLK_REMOTE14:
604 error = priv_check(td, PRIV_NFS_LOCKD291);
605 if (error)
606 return (error);
607 flg = F_REMOTE0x080;
608 goto do_setlk;
609
610 case F_SETLKW13:
611 flg |= F_WAIT0x010;
612 /* FALLTHROUGH F_SETLK */
613
614 case F_SETLK12:
615 do_setlk:
616 cap_rights_init(&rights, CAP_FLOCK)__cap_rights_init(0, &rights, ((1ULL << (57 + (0)))
| (0x0000000000010000ULL)), 0ULL)
;
617 error = fget_unlocked(fdp, fd, &rights, &fp, NULL((void *)0));
618 if (error != 0)
619 break;
620 if (fp->f_type != DTYPE_VNODE1) {
621 error = EBADF9;
622 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
623 break;
624 }
625
626 flp = (struct flock *)arg;
627 if (flp->l_whence == SEEK_CUR1) {
628 foffset = foffset_get(fp);
629 if (foffset < 0 ||
630 (flp->l_start > 0 &&
631 foffset > OFF_MAX0x7fffffffffffffff - flp->l_start)) {
632 error = EOVERFLOW84;
633 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
634 break;
635 }
636 flp->l_start += foffset;
637 }
638
639 vp = fp->f_vnode;
640 switch (flp->l_type) {
641 case F_RDLCK1:
642 if ((fp->f_flag & FREAD0x0001) == 0) {
643 error = EBADF9;
644 break;
645 }
646 PROC_LOCK(p->p_leader)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p->p_leader)->p_mtx))))->mtx_lock != 0x00000004
|| !atomic_cmpset_long(&(((((&(p->p_leader)->p_mtx
)))))->mtx_lock, 0x00000004, (_tid)))) __mtx_lock_sleep(&
(((((&(p->p_leader)->p_mtx)))))->mtx_lock, _tid,
(((0))), ((((void *)0))), ((0))); else do { (void)0; do { if
(__builtin_expect((sdt_lockstat___adaptive__acquire->id),
0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->id
, (uintptr_t) (((&(p->p_leader)->p_mtx))), (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0
); } while (0); } while (0)
;
647 p->p_leader->p_flag |= P_ADVLOCK0x00001;
648 PROC_UNLOCK(p->p_leader)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p->p_leader)->p_mtx))))->lock_object.lo_data == 0) do
{ (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(p->p_leader)->p_mtx))), (
uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); }
while (0); } while (0); if (((((&(p->p_leader)->p_mtx
))))->mtx_lock != _tid || !atomic_cmpset_long(&(((((&
(p->p_leader)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p->p_leader)->p_mtx
)))))->mtx_lock, (((0))), ((((void *)0))), ((0))); } while
(0)
;
649 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK12,
650 flp, flg);
651 break;
652 case F_WRLCK3:
653 if ((fp->f_flag & FWRITE0x0002) == 0) {
654 error = EBADF9;
655 break;
656 }
657 PROC_LOCK(p->p_leader)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p->p_leader)->p_mtx))))->mtx_lock != 0x00000004
|| !atomic_cmpset_long(&(((((&(p->p_leader)->p_mtx
)))))->mtx_lock, 0x00000004, (_tid)))) __mtx_lock_sleep(&
(((((&(p->p_leader)->p_mtx)))))->mtx_lock, _tid,
(((0))), ((((void *)0))), ((0))); else do { (void)0; do { if
(__builtin_expect((sdt_lockstat___adaptive__acquire->id),
0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->id
, (uintptr_t) (((&(p->p_leader)->p_mtx))), (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0
); } while (0); } while (0)
;
658 p->p_leader->p_flag |= P_ADVLOCK0x00001;
659 PROC_UNLOCK(p->p_leader)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p->p_leader)->p_mtx))))->lock_object.lo_data == 0) do
{ (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(p->p_leader)->p_mtx))), (
uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); }
while (0); } while (0); if (((((&(p->p_leader)->p_mtx
))))->mtx_lock != _tid || !atomic_cmpset_long(&(((((&
(p->p_leader)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p->p_leader)->p_mtx
)))))->mtx_lock, (((0))), ((((void *)0))), ((0))); } while
(0)
;
660 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK12,
661 flp, flg);
662 break;
663 case F_UNLCK2:
664 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK2,
665 flp, flg);
666 break;
667 case F_UNLCKSYS4:
668 /*
669 * Temporary api for testing remote lock
670 * infrastructure.
671 */
672 if (flg != F_REMOTE0x080) {
673 error = EINVAL22;
674 break;
675 }
676 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
677 F_UNLCKSYS4, flp, flg);
678 break;
679 default:
680 error = EINVAL22;
681 break;
682 }
683 if (error != 0 || flp->l_type == F_UNLCK2 ||
684 flp->l_type == F_UNLCKSYS4) {
685 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
686 break;
687 }
688
689 /*
690 * Check for a race with close.
691 *
692 * The vnode is now advisory locked (or unlocked, but this case
693 * is not really important) as the caller requested.
694 * We had to drop the filedesc lock, so we need to recheck if
695 * the descriptor is still valid, because if it was closed
696 * in the meantime we need to remove advisory lock from the
697 * vnode - close on any descriptor leading to an advisory
698 * locked vnode, removes that lock.
699 * We will return 0 on purpose in that case, as the result of
700 * successful advisory lock might have been externally visible
701 * already. This is fine - effectively we pretend to the caller
702 * that the closing thread was a bit slower and that the
703 * advisory lock succeeded before the close.
704 */
705 error = fget_unlocked(fdp, fd, &rights, &fp2, NULL((void *)0));
706 if (error != 0) {
707 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
708 break;
709 }
710 if (fp != fp2) {
711 flp->l_whence = SEEK_SET0;
712 flp->l_start = 0;
713 flp->l_len = 0;
714 flp->l_type = F_UNLCK2;
715 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
716 F_UNLCK2, flp, F_POSIX0x040);
717 }
718 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
719 fdrop(fp2, td)(refcount_release(&(fp2)->f_count) ? _fdrop((fp2), (td
)) : _fnoop())
;
720 break;
721
722 case F_GETLK11:
723 error = fget_unlocked(fdp, fd,
724 cap_rights_init(&rights, CAP_FLOCK)__cap_rights_init(0, &rights, ((1ULL << (57 + (0)))
| (0x0000000000010000ULL)), 0ULL)
, &fp, NULL((void *)0));
725 if (error != 0)
726 break;
727 if (fp->f_type != DTYPE_VNODE1) {
728 error = EBADF9;
729 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
730 break;
731 }
732 flp = (struct flock *)arg;
733 if (flp->l_type != F_RDLCK1 && flp->l_type != F_WRLCK3 &&
734 flp->l_type != F_UNLCK2) {
735 error = EINVAL22;
736 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
737 break;
738 }
739 if (flp->l_whence == SEEK_CUR1) {
740 foffset = foffset_get(fp);
741 if ((flp->l_start > 0 &&
742 foffset > OFF_MAX0x7fffffffffffffff - flp->l_start) ||
743 (flp->l_start < 0 &&
744 foffset < OFF_MIN(-0x7fffffffffffffff - 1) - flp->l_start)) {
745 error = EOVERFLOW84;
746 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
747 break;
748 }
749 flp->l_start += foffset;
750 }
751 vp = fp->f_vnode;
752 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK11, flp,
753 F_POSIX0x040);
754 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
755 break;
756
757 case F_RDAHEAD16:
758 arg = arg ? 128 * 1024: 0;
759 /* FALLTHROUGH */
760 case F_READAHEAD15:
761 error = fget_unlocked(fdp, fd,
762 cap_rights_init(&rights)__cap_rights_init(0, &rights, 0ULL), &fp, NULL((void *)0));
763 if (error != 0)
764 break;
765 if (fp->f_type != DTYPE_VNODE1) {
766 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
767 error = EBADF9;
768 break;
769 }
770 vp = fp->f_vnode;
771 /*
772 * Exclusive lock synchronizes against f_seqcount reads and
773 * writes in sequential_heuristic().
774 */
775 error = vn_lock(vp, LK_EXCLUSIVE)_vn_lock(vp, 0x080000, "/usr/src/sys/kern/kern_descrip.c", 775
)
;
776 if (error != 0) {
777 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
778 break;
779 }
780 if (arg >= 0) {
781 bsize = fp->f_vnode->v_mount->mnt_stat.f_iosize;
782 fp->f_seqcount = (arg + bsize - 1) / bsize;
783 atomic_set_int(&fp->f_flag, FRDAHEAD0x0200);
784 } else {
785 atomic_clear_int(&fp->f_flag, FRDAHEAD0x0200);
786 }
787 VOP_UNLOCK(vp, 0);
788 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
789 break;
790
791 default:
792 error = EINVAL22;
793 break;
794 }
795 return (error);
796}
797
798static int
799getmaxfd(struct thread *td)
800{
801
802 return (min((int)lim_cur(td, RLIMIT_NOFILE8), maxfilesperproc));
803}
804
805/*
806 * Common code for dup, dup2, fcntl(F_DUPFD) and fcntl(F_DUP2FD).
807 */
808int
809kern_dup(struct thread *td, u_int mode, int flags, int old, int new)
810{
811 struct filedesc *fdp;
812 struct filedescent *oldfde, *newfde;
813 struct proc *p;
814 struct file *delfp;
815 int error, maxfd;
816
817 p = td->td_proc;
818 fdp = p->p_fd;
819
820 MPASS((flags & ~(FDDUP_FLAG_CLOEXEC)) == 0)do { } while (0);
821 MPASS(mode < FDDUP_LASTMODE)do { } while (0);
822
823 /*
824 * Verify we have a valid descriptor to dup from and possibly to
825 * dup to. Unlike dup() and dup2(), fcntl()'s F_DUPFD should
826 * return EINVAL when the new descriptor is out of bounds.
827 */
828 if (old < 0)
829 return (EBADF9);
830 if (new < 0)
831 return (mode == FDDUP_FCNTL ? EINVAL22 : EBADF9);
832 maxfd = getmaxfd(td);
833 if (new >= maxfd)
834 return (mode == FDDUP_FCNTL ? EINVAL22 : EBADF9);
835
836 error = EBADF9;
837 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
838 if (fget_locked(fdp, old) == NULL((void *)0))
839 goto unlock;
840 if ((mode == FDDUP_FIXED || mode == FDDUP_MUSTREPLACE) && old == new) {
841 td->td_retvaltd_uretoff.tdu_retval[0] = new;
842 if (flags & FDDUP_FLAG_CLOEXEC0x1)
843 fdp->fd_ofilesfd_files->fdt_ofiles[new].fde_flags |= UF_EXCLOSE0x01;
844 error = 0;
845 goto unlock;
846 }
847
848 /*
849 * If the caller specified a file descriptor, make sure the file
850 * table is large enough to hold it, and grab it. Otherwise, just
851 * allocate a new descriptor the usual way.
852 */
853 switch (mode) {
854 case FDDUP_NORMAL:
855 case FDDUP_FCNTL:
856 if ((error = fdalloc(td, new, &new)) != 0)
857 goto unlock;
858 break;
859 case FDDUP_MUSTREPLACE:
860 /* Target file descriptor must exist. */
861 if (fget_locked(fdp, new) == NULL((void *)0))
862 goto unlock;
863 break;
864 case FDDUP_FIXED:
865 if (new >= fdp->fd_nfilesfd_files->fdt_nfiles) {
866 /*
867 * The resource limits are here instead of e.g.
868 * fdalloc(), because the file descriptor table may be
869 * shared between processes, so we can't really use
870 * racct_add()/racct_sub(). Instead of counting the
871 * number of actually allocated descriptors, just put
872 * the limit on the size of the file descriptor table.
873 */
874#ifdef RACCT1
875 if (racct_enable) {
876 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
877 error = racct_set(p, RACCT_NOFILE7, new + 1);
878 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
879 if (error != 0) {
880 error = EMFILE24;
881 goto unlock;
882 }
883 }
884#endif
885 fdgrowtable_exp(fdp, new + 1);
886 }
887 if (!fdisused(fdp, new))
888 fdused(fdp, new);
889 break;
890 default:
891 KASSERT(0, ("%s unsupported mode %d", __func__, mode))do { } while (0);
892 }
893
894 KASSERT(old != new, ("new fd is same as old"))do { } while (0);
895
896 oldfde = &fdp->fd_ofilesfd_files->fdt_ofiles[old];
897 fhold(oldfde->fde_file)(refcount_acquire(&(oldfde->fde_file)->f_count));
898 newfde = &fdp->fd_ofilesfd_files->fdt_ofiles[new];
899 delfp = newfde->fde_file;
900
901 /*
902 * Duplicate the source descriptor.
903 */
904#ifdef CAPABILITIES1
905 seq_write_begin(&newfde->fde_seq);
906#endif
907 filecaps_free(&newfde->fde_caps);
908 memcpy(newfde, oldfde, fde_change_size(__builtin_offsetof(struct filedescent, fde_seq)));
909 filecaps_copy(&oldfde->fde_caps, &newfde->fde_caps, true1);
910 if ((flags & FDDUP_FLAG_CLOEXEC0x1) != 0)
911 newfde->fde_flags = oldfde->fde_flags | UF_EXCLOSE0x01;
912 else
913 newfde->fde_flags = oldfde->fde_flags & ~UF_EXCLOSE0x01;
914#ifdef CAPABILITIES1
915 seq_write_end(&newfde->fde_seq);
916#endif
917 td->td_retvaltd_uretoff.tdu_retval[0] = new;
918
919 error = 0;
920
921 if (delfp != NULL((void *)0)) {
922 (void) closefp(fdp, new, delfp, td, 1);
923 FILEDESC_UNLOCK_ASSERT(fdp)(void)0;
924 } else {
925unlock:
926 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
927 }
928
929 return (error);
930}
931
932/*
933 * If sigio is on the list associated with a process or process group,
934 * disable signalling from the device, remove sigio from the list and
935 * free sigio.
936 */
937void
938funsetown(struct sigio **sigiop)
939{
940 struct sigio *sigio;
941
942 SIGIO_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&sigio_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&sigio_lock)))))->mtx_lock, 0x00000004, (_tid
)))) __mtx_lock_sleep(&(((((&sigio_lock)))))->mtx_lock
, _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__acquire->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->
id, (uintptr_t) (((&sigio_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
} while (0)
;
943 sigio = *sigiop;
944 if (sigio == NULL((void *)0)) {
945 SIGIO_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
sigio_lock))))->lock_object.lo_data == 0) do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&sigio_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&sigio_lock))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&sigio_lock)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&sigio_lock)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
946 return;
947 }
948 *(sigio->sio_myref) = NULL((void *)0);
949 if ((sigio)->sio_pgid < 0) {
950 struct pgrp *pg = (sigio)->sio_pgrpsio_u.siu_pgrp;
951 PGRP_LOCK(pg)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(pg)->pg_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(pg)->pg_mtx)))))->mtx_lock, 0x00000004
, (_tid)))) __mtx_lock_sleep(&(((((&(pg)->pg_mtx))
)))->mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else
do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(pg)->pg_mtx))), (uintptr_t) 0
, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
952 SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,do { ; if ((((&sigio->sio_u.siu_pgrp->pg_sigiolst))
->slh_first) == (sigio)) { do { ((((&sigio->sio_u.siu_pgrp
->pg_sigiolst)))->slh_first) = ((((((&sigio->sio_u
.siu_pgrp->pg_sigiolst)))->slh_first))->sio_pgsigio.
sle_next); } while (0); } else { struct sigio *curelm = ((&
sigio->sio_u.siu_pgrp->pg_sigiolst)->slh_first); while
(((curelm)->sio_pgsigio.sle_next) != (sigio)) curelm = ((
curelm)->sio_pgsigio.sle_next); do { ((curelm)->sio_pgsigio
.sle_next) = ((((curelm)->sio_pgsigio.sle_next))->sio_pgsigio
.sle_next); } while (0); } ; } while (0)
953 sigio, sio_pgsigio)do { ; if ((((&sigio->sio_u.siu_pgrp->pg_sigiolst))
->slh_first) == (sigio)) { do { ((((&sigio->sio_u.siu_pgrp
->pg_sigiolst)))->slh_first) = ((((((&sigio->sio_u
.siu_pgrp->pg_sigiolst)))->slh_first))->sio_pgsigio.
sle_next); } while (0); } else { struct sigio *curelm = ((&
sigio->sio_u.siu_pgrp->pg_sigiolst)->slh_first); while
(((curelm)->sio_pgsigio.sle_next) != (sigio)) curelm = ((
curelm)->sio_pgsigio.sle_next); do { ((curelm)->sio_pgsigio
.sle_next) = ((((curelm)->sio_pgsigio.sle_next))->sio_pgsigio
.sle_next); } while (0); } ; } while (0)
;
954 PGRP_UNLOCK(pg)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(pg)->pg_mtx))))->lock_object.lo_data == 0) do { (void)
0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(pg)->pg_mtx))), (uintptr_t) 0
, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); if (((((&(pg)->pg_mtx))))->mtx_lock != _tid
|| !atomic_cmpset_long(&(((((&(pg)->pg_mtx)))))->
mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&(((((&
(pg)->pg_mtx)))))->mtx_lock, (((0))), ((((void *)0))), (
(0))); } while (0)
;
955 } else {
956 struct proc *p = (sigio)->sio_procsio_u.siu_proc;
957 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
958 SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,do { ; if ((((&sigio->sio_u.siu_proc->p_sigiolst))->
slh_first) == (sigio)) { do { ((((&sigio->sio_u.siu_proc
->p_sigiolst)))->slh_first) = ((((((&sigio->sio_u
.siu_proc->p_sigiolst)))->slh_first))->sio_pgsigio.sle_next
); } while (0); } else { struct sigio *curelm = ((&sigio->
sio_u.siu_proc->p_sigiolst)->slh_first); while (((curelm
)->sio_pgsigio.sle_next) != (sigio)) curelm = ((curelm)->
sio_pgsigio.sle_next); do { ((curelm)->sio_pgsigio.sle_next
) = ((((curelm)->sio_pgsigio.sle_next))->sio_pgsigio.sle_next
); } while (0); } ; } while (0)
959 sigio, sio_pgsigio)do { ; if ((((&sigio->sio_u.siu_proc->p_sigiolst))->
slh_first) == (sigio)) { do { ((((&sigio->sio_u.siu_proc
->p_sigiolst)))->slh_first) = ((((((&sigio->sio_u
.siu_proc->p_sigiolst)))->slh_first))->sio_pgsigio.sle_next
); } while (0); } else { struct sigio *curelm = ((&sigio->
sio_u.siu_proc->p_sigiolst)->slh_first); while (((curelm
)->sio_pgsigio.sle_next) != (sigio)) curelm = ((curelm)->
sio_pgsigio.sle_next); do { ((curelm)->sio_pgsigio.sle_next
) = ((((curelm)->sio_pgsigio.sle_next))->sio_pgsigio.sle_next
); } while (0); } ; } while (0)
;
960 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
961 }
962 SIGIO_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
sigio_lock))))->lock_object.lo_data == 0) do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&sigio_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&sigio_lock))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&sigio_lock)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&sigio_lock)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
963 crfree(sigio->sio_ucred);
964 free(sigio, M_SIGIO);
965}
966
967/*
968 * Free a list of sigio structures.
969 * We only need to lock the SIGIO_LOCK because we have made ourselves
970 * inaccessible to callers of fsetown and therefore do not need to lock
971 * the proc or pgrp struct for the list manipulation.
972 */
973void
974funsetownlst(struct sigiolst *sigiolst)
975{
976 struct proc *p;
977 struct pgrp *pg;
978 struct sigio *sigio;
979
980 sigio = SLIST_FIRST(sigiolst)((sigiolst)->slh_first);
981 if (sigio == NULL((void *)0))
982 return;
983 p = NULL((void *)0);
984 pg = NULL((void *)0);
985
986 /*
987 * Every entry of the list should belong
988 * to a single proc or pgrp.
989 */
990 if (sigio->sio_pgid < 0) {
991 pg = sigio->sio_pgrpsio_u.siu_pgrp;
992 PGRP_LOCK_ASSERT(pg, MA_NOTOWNED)(void)0;
993 } else /* if (sigio->sio_pgid > 0) */ {
994 p = sigio->sio_procsio_u.siu_proc;
995 PROC_LOCK_ASSERT(p, MA_NOTOWNED)(void)0;
996 }
997
998 SIGIO_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&sigio_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&sigio_lock)))))->mtx_lock, 0x00000004, (_tid
)))) __mtx_lock_sleep(&(((((&sigio_lock)))))->mtx_lock
, _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__acquire->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->
id, (uintptr_t) (((&sigio_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
} while (0)
;
999 while ((sigio = SLIST_FIRST(sigiolst)((sigiolst)->slh_first)) != NULL((void *)0)) {
1000 *(sigio->sio_myref) = NULL((void *)0);
1001 if (pg != NULL((void *)0)) {
1002 KASSERT(sigio->sio_pgid < 0,do { } while (0)
1003 ("Proc sigio in pgrp sigio list"))do { } while (0);
1004 KASSERT(sigio->sio_pgrp == pg,do { } while (0)
1005 ("Bogus pgrp in sigio list"))do { } while (0);
1006 PGRP_LOCK(pg)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(pg)->pg_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(pg)->pg_mtx)))))->mtx_lock, 0x00000004
, (_tid)))) __mtx_lock_sleep(&(((((&(pg)->pg_mtx))
)))->mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else
do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(pg)->pg_mtx))), (uintptr_t) 0
, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
1007 SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio,do { ; if ((((&pg->pg_sigiolst))->slh_first) == (sigio
)) { do { ((((&pg->pg_sigiolst)))->slh_first) = (((
(((&pg->pg_sigiolst)))->slh_first))->sio_pgsigio
.sle_next); } while (0); } else { struct sigio *curelm = ((&
pg->pg_sigiolst)->slh_first); while (((curelm)->sio_pgsigio
.sle_next) != (sigio)) curelm = ((curelm)->sio_pgsigio.sle_next
); do { ((curelm)->sio_pgsigio.sle_next) = ((((curelm)->
sio_pgsigio.sle_next))->sio_pgsigio.sle_next); } while (0)
; } ; } while (0)
1008 sio_pgsigio)do { ; if ((((&pg->pg_sigiolst))->slh_first) == (sigio
)) { do { ((((&pg->pg_sigiolst)))->slh_first) = (((
(((&pg->pg_sigiolst)))->slh_first))->sio_pgsigio
.sle_next); } while (0); } else { struct sigio *curelm = ((&
pg->pg_sigiolst)->slh_first); while (((curelm)->sio_pgsigio
.sle_next) != (sigio)) curelm = ((curelm)->sio_pgsigio.sle_next
); do { ((curelm)->sio_pgsigio.sle_next) = ((((curelm)->
sio_pgsigio.sle_next))->sio_pgsigio.sle_next); } while (0)
; } ; } while (0)
;
1009 PGRP_UNLOCK(pg)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(pg)->pg_mtx))))->lock_object.lo_data == 0) do { (void)
0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(pg)->pg_mtx))), (uintptr_t) 0
, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); if (((((&(pg)->pg_mtx))))->mtx_lock != _tid
|| !atomic_cmpset_long(&(((((&(pg)->pg_mtx)))))->
mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&(((((&
(pg)->pg_mtx)))))->mtx_lock, (((0))), ((((void *)0))), (
(0))); } while (0)
;
1010 } else /* if (p != NULL) */ {
1011 KASSERT(sigio->sio_pgid > 0,do { } while (0)
1012 ("Pgrp sigio in proc sigio list"))do { } while (0);
1013 KASSERT(sigio->sio_proc == p,do { } while (0)
1014 ("Bogus proc in sigio list"))do { } while (0);
1015 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
1016 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio,do { ; if ((((&p->p_sigiolst))->slh_first) == (sigio
)) { do { ((((&p->p_sigiolst)))->slh_first) = (((((
(&p->p_sigiolst)))->slh_first))->sio_pgsigio.sle_next
); } while (0); } else { struct sigio *curelm = ((&p->
p_sigiolst)->slh_first); while (((curelm)->sio_pgsigio.
sle_next) != (sigio)) curelm = ((curelm)->sio_pgsigio.sle_next
); do { ((curelm)->sio_pgsigio.sle_next) = ((((curelm)->
sio_pgsigio.sle_next))->sio_pgsigio.sle_next); } while (0)
; } ; } while (0)
1017 sio_pgsigio)do { ; if ((((&p->p_sigiolst))->slh_first) == (sigio
)) { do { ((((&p->p_sigiolst)))->slh_first) = (((((
(&p->p_sigiolst)))->slh_first))->sio_pgsigio.sle_next
); } while (0); } else { struct sigio *curelm = ((&p->
p_sigiolst)->slh_first); while (((curelm)->sio_pgsigio.
sle_next) != (sigio)) curelm = ((curelm)->sio_pgsigio.sle_next
); do { ((curelm)->sio_pgsigio.sle_next) = ((((curelm)->
sio_pgsigio.sle_next))->sio_pgsigio.sle_next); } while (0)
; } ; } while (0)
;
1018 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1019 }
1020 SIGIO_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
sigio_lock))))->lock_object.lo_data == 0) do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&sigio_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&sigio_lock))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&sigio_lock)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&sigio_lock)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1021 crfree(sigio->sio_ucred);
1022 free(sigio, M_SIGIO);
1023 SIGIO_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&sigio_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&sigio_lock)))))->mtx_lock, 0x00000004, (_tid
)))) __mtx_lock_sleep(&(((((&sigio_lock)))))->mtx_lock
, _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__acquire->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->
id, (uintptr_t) (((&sigio_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
} while (0)
;
1024 }
1025 SIGIO_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
sigio_lock))))->lock_object.lo_data == 0) do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&sigio_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&sigio_lock))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&sigio_lock)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&sigio_lock)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1026}
1027
1028/*
1029 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
1030 *
1031 * After permission checking, add a sigio structure to the sigio list for
1032 * the process or process group.
1033 */
1034int
1035fsetown(pid_t pgid, struct sigio **sigiop)
1036{
1037 struct proc *proc;
1038 struct pgrp *pgrp;
1039 struct sigio *sigio;
1040 int ret;
1041
1042 if (pgid == 0) {
1043 funsetown(sigiop);
1044 return (0);
1045 }
1046
1047 ret = 0;
1048
1049 /* Allocate and fill in the new sigio out of locks. */
1050 sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK0x0002);
1051 sigio->sio_pgid = pgid;
1052 sigio->sio_ucred = crhold(curthread(__curthread())->td_ucred);
1053 sigio->sio_myref = sigiop;
1054
1055 sx_slock(&proctree_lock)(void)__sx_slock(((&proctree_lock)), 0, (((void *)0)), (0
))
;
1056 if (pgid > 0) {
1057 proc = pfind(pgid);
1058 if (proc == NULL((void *)0)) {
1059 ret = ESRCH3;
1060 goto fail;
1061 }
1062
1063 /*
1064 * Policy - Don't allow a process to FSETOWN a process
1065 * in another session.
1066 *
1067 * Remove this test to allow maximum flexibility or
1068 * restrict FSETOWN to the current process or process
1069 * group for maximum safety.
1070 */
1071 PROC_UNLOCK(proc)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(proc)->p_mtx))))->lock_object.lo_data == 0) do { (void
)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(proc)->p_mtx))), (uintptr_t)
0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0)
; } while (0); if (((((&(proc)->p_mtx))))->mtx_lock
!= _tid || !atomic_cmpset_long(&(((((&(proc)->p_mtx
)))))->mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&
(((((&(proc)->p_mtx)))))->mtx_lock, (((0))), ((((void
*)0))), ((0))); } while (0)
;
1072 if (proc->p_sessionp_pgrp->pg_session != curthread(__curthread())->td_proc->p_sessionp_pgrp->pg_session) {
1073 ret = EPERM1;
1074 goto fail;
1075 }
1076
1077 pgrp = NULL((void *)0);
1078 } else /* if (pgid < 0) */ {
1079 pgrp = pgfind(-pgid);
1080 if (pgrp == NULL((void *)0)) {
1081 ret = ESRCH3;
1082 goto fail;
1083 }
1084 PGRP_UNLOCK(pgrp)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(pgrp)->pg_mtx))))->lock_object.lo_data == 0) do { (void
)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(pgrp)->pg_mtx))), (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0
); } while (0); if (((((&(pgrp)->pg_mtx))))->mtx_lock
!= _tid || !atomic_cmpset_long(&(((((&(pgrp)->pg_mtx
)))))->mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&
(((((&(pgrp)->pg_mtx)))))->mtx_lock, (((0))), ((((void
*)0))), ((0))); } while (0)
;
1085
1086 /*
1087 * Policy - Don't allow a process to FSETOWN a process
1088 * in another session.
1089 *
1090 * Remove this test to allow maximum flexibility or
1091 * restrict FSETOWN to the current process or process
1092 * group for maximum safety.
1093 */
1094 if (pgrp->pg_session != curthread(__curthread())->td_proc->p_sessionp_pgrp->pg_session) {
1095 ret = EPERM1;
1096 goto fail;
1097 }
1098
1099 proc = NULL((void *)0);
1100 }
1101 funsetown(sigiop);
1102 if (pgid > 0) {
1103 PROC_LOCK(proc)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(proc)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(proc)->p_mtx)))))->mtx_lock, 0x00000004
, (_tid)))) __mtx_lock_sleep(&(((((&(proc)->p_mtx)
))))->mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else
do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(proc)->p_mtx))), (uintptr_t)
0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0)
; } while (0); } while (0)
;
1104 /*
1105 * Since funsetownlst() is called without the proctree
1106 * locked, we need to check for P_WEXIT.
1107 * XXX: is ESRCH correct?
1108 */
1109 if ((proc->p_flag & P_WEXIT0x02000) != 0) {
1110 PROC_UNLOCK(proc)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(proc)->p_mtx))))->lock_object.lo_data == 0) do { (void
)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(proc)->p_mtx))), (uintptr_t)
0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0)
; } while (0); if (((((&(proc)->p_mtx))))->mtx_lock
!= _tid || !atomic_cmpset_long(&(((((&(proc)->p_mtx
)))))->mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&
(((((&(proc)->p_mtx)))))->mtx_lock, (((0))), ((((void
*)0))), ((0))); } while (0)
;
1111 ret = ESRCH3;
1112 goto fail;
1113 }
1114 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio)do { (((sigio))->sio_pgsigio.sle_next) = (((&proc->
p_sigiolst))->slh_first); (((&proc->p_sigiolst))->
slh_first) = (sigio); } while (0)
;
1115 sigio->sio_procsio_u.siu_proc = proc;
1116 PROC_UNLOCK(proc)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(proc)->p_mtx))))->lock_object.lo_data == 0) do { (void
)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(proc)->p_mtx))), (uintptr_t)
0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0)
; } while (0); if (((((&(proc)->p_mtx))))->mtx_lock
!= _tid || !atomic_cmpset_long(&(((((&(proc)->p_mtx
)))))->mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&
(((((&(proc)->p_mtx)))))->mtx_lock, (((0))), ((((void
*)0))), ((0))); } while (0)
;
1117 } else {
1118 PGRP_LOCK(pgrp)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(pgrp)->pg_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(pgrp)->pg_mtx)))))->mtx_lock, 0x00000004
, (_tid)))) __mtx_lock_sleep(&(((((&(pgrp)->pg_mtx
)))))->mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else
do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(pgrp)->pg_mtx))), (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0
); } while (0); } while (0)
;
1119 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio)do { (((sigio))->sio_pgsigio.sle_next) = (((&pgrp->
pg_sigiolst))->slh_first); (((&pgrp->pg_sigiolst))->
slh_first) = (sigio); } while (0)
;
1120 sigio->sio_pgrpsio_u.siu_pgrp = pgrp;
1121 PGRP_UNLOCK(pgrp)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(pgrp)->pg_mtx))))->lock_object.lo_data == 0) do { (void
)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(pgrp)->pg_mtx))), (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0
); } while (0); if (((((&(pgrp)->pg_mtx))))->mtx_lock
!= _tid || !atomic_cmpset_long(&(((((&(pgrp)->pg_mtx
)))))->mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&
(((((&(pgrp)->pg_mtx)))))->mtx_lock, (((0))), ((((void
*)0))), ((0))); } while (0)
;
1122 }
1123 sx_sunlock(&proctree_lock)__sx_sunlock(((&proctree_lock)), (((void *)0)), (0));
1124 SIGIO_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&sigio_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&sigio_lock)))))->mtx_lock, 0x00000004, (_tid
)))) __mtx_lock_sleep(&(((((&sigio_lock)))))->mtx_lock
, _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__acquire->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->
id, (uintptr_t) (((&sigio_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
} while (0)
;
1125 *sigiop = sigio;
1126 SIGIO_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
sigio_lock))))->lock_object.lo_data == 0) do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&sigio_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&sigio_lock))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&sigio_lock)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&sigio_lock)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1127 return (0);
1128
1129fail:
1130 sx_sunlock(&proctree_lock)__sx_sunlock(((&proctree_lock)), (((void *)0)), (0));
1131 crfree(sigio->sio_ucred);
1132 free(sigio, M_SIGIO);
1133 return (ret);
1134}
1135
1136/*
1137 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
1138 */
1139pid_t
1140fgetown(sigiop)
1141 struct sigio **sigiop;
1142{
1143 pid_t pgid;
1144
1145 SIGIO_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&sigio_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&sigio_lock)))))->mtx_lock, 0x00000004, (_tid
)))) __mtx_lock_sleep(&(((((&sigio_lock)))))->mtx_lock
, _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__acquire->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->
id, (uintptr_t) (((&sigio_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
} while (0)
;
1146 pgid = (*sigiop != NULL((void *)0)) ? (*sigiop)->sio_pgid : 0;
1147 SIGIO_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
sigio_lock))))->lock_object.lo_data == 0) do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&sigio_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&sigio_lock))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&sigio_lock)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&sigio_lock)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1148 return (pgid);
1149}
1150
1151/*
1152 * Function drops the filedesc lock on return.
1153 */
1154static int
1155closefp(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
1156 int holdleaders)
1157{
1158 int error;
1159
1160 FILEDESC_XLOCK_ASSERT(fdp)(void)0;
1161
1162 if (holdleaders) {
1163 if (td->td_proc->p_fdtol != NULL((void *)0)) {
1164 /*
1165 * Ask fdfree() to sleep to ensure that all relevant
1166 * process leaders can be traversed in closef().
1167 */
1168 fdp->fd_holdleaderscount++;
1169 } else {
1170 holdleaders = 0;
1171 }
1172 }
1173
1174 /*
1175 * We now hold the fp reference that used to be owned by the
1176 * descriptor array. We have to unlock the FILEDESC *AFTER*
1177 * knote_fdclose to prevent a race of the fd getting opened, a knote
1178 * added, and deleteing a knote for the new fd.
1179 */
1180 knote_fdclose(td, fd);
1181
1182 /*
1183 * We need to notify mqueue if the object is of type mqueue.
1184 */
1185 if (fp->f_type == DTYPE_MQUEUE7)
1186 mq_fdclose(td, fd, fp);
1187 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
1188
1189 error = closef(fp, td);
1190 if (holdleaders) {
1191 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
1192 fdp->fd_holdleaderscount--;
1193 if (fdp->fd_holdleaderscount == 0 &&
1194 fdp->fd_holdleaderswakeup != 0) {
1195 fdp->fd_holdleaderswakeup = 0;
1196 wakeup(&fdp->fd_holdleaderscount);
1197 }
1198 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
1199 }
1200 return (error);
1201}
1202
1203/*
1204 * Close a file descriptor.
1205 */
1206#ifndef _SYS_SYSPROTO_H_
1207struct close_args {
1208 int fd;
1209};
1210#endif
1211/* ARGSUSED */
1212int
1213sys_close(struct thread *td, struct close_args *uap)
1214{
1215
1216 return (kern_close(td, uap->fd));
1217}
1218
1219int
1220kern_close(struct thread *td, int fd)
1221{
1222 struct filedesc *fdp;
1223 struct file *fp;
1224
1225 fdp = td->td_proc->p_fd;
1226
1227 AUDIT_SYSCLOSE(td, fd)do { if (td->td_pflags & 0x01000000) audit_sysclose(td
, fd); } while (0)
;
1228
1229 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
1230 if ((fp = fget_locked(fdp, fd)) == NULL((void *)0)) {
1231 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
1232 return (EBADF9);
1233 }
1234 fdfree(fdp, fd);
1235
1236 /* closefp() drops the FILEDESC lock for us. */
1237 return (closefp(fdp, fd, fp, td, 1));
1238}
1239
1240/*
1241 * Close open file descriptors.
1242 */
1243#ifndef _SYS_SYSPROTO_H_
1244struct closefrom_args {
1245 int lowfd;
1246};
1247#endif
1248/* ARGSUSED */
1249int
1250sys_closefrom(struct thread *td, struct closefrom_args *uap)
1251{
1252 struct filedesc *fdp;
1253 int fd;
1254
1255 fdp = td->td_proc->p_fd;
1256 AUDIT_ARG_FD(uap->lowfd)do { if ((((__curthread()))->td_pflags & 0x01000000)) audit_arg_fd
((uap->lowfd)); } while (0)
;
1257
1258 /*
1259 * Treat negative starting file descriptor values identical to
1260 * closefrom(0) which closes all files.
1261 */
1262 if (uap->lowfd < 0)
1263 uap->lowfd = 0;
1264 FILEDESC_SLOCK(fdp)(void)__sx_slock(((&(fdp)->fd_sx)), 0, (((void *)0)), (
0))
;
1265 for (fd = uap->lowfd; fd <= fdp->fd_lastfile; fd++) {
1266 if (fdp->fd_ofilesfd_files->fdt_ofiles[fd].fde_file != NULL((void *)0)) {
1267 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
1268 (void)kern_close(td, fd);
1269 FILEDESC_SLOCK(fdp)(void)__sx_slock(((&(fdp)->fd_sx)), 0, (((void *)0)), (
0))
;
1270 }
1271 }
1272 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
1273 return (0);
1274}
1275
1276#if defined(COMPAT_43)
1277/*
1278 * Return status information about a file descriptor.
1279 */
1280#ifndef _SYS_SYSPROTO_H_
1281struct ofstat_args {
1282 int fd;
1283 struct ostat *sb;
1284};
1285#endif
1286/* ARGSUSED */
1287int
1288ofstat(struct thread *td, struct ofstat_args *uap)
1289{
1290 struct ostat oub;
1291 struct stat ub;
1292 int error;
1293
1294 error = kern_fstat(td, uap->fd, &ub);
1295 if (error == 0) {
1296 cvtstat(&ub, &oub);
1297 error = copyout(&oub, uap->sb, sizeof(oub));
1298 }
1299 return (error);
1300}
1301#endif /* COMPAT_43 */
1302
1303/*
1304 * Return status information about a file descriptor.
1305 */
1306#ifndef _SYS_SYSPROTO_H_
1307struct fstat_args {
1308 int fd;
1309 struct stat *sb;
1310};
1311#endif
1312/* ARGSUSED */
1313int
1314sys_fstat(struct thread *td, struct fstat_args *uap)
1315{
1316 struct stat ub;
1317 int error;
1318
1319 error = kern_fstat(td, uap->fd, &ub);
1320 if (error == 0)
1321 error = copyout(&ub, uap->sb, sizeof(ub));
1322 return (error);
1323}
1324
1325int
1326kern_fstat(struct thread *td, int fd, struct stat *sbp)
1327{
1328 struct file *fp;
1329 cap_rights_t rights;
1330 int error;
1331
1332 AUDIT_ARG_FD(fd)do { if ((((__curthread()))->td_pflags & 0x01000000)) audit_arg_fd
((fd)); } while (0)
;
1333
1334 error = fget(td, fd, cap_rights_init(&rights, CAP_FSTAT)__cap_rights_init(0, &rights, ((1ULL << (57 + (0)))
| (0x0000000000080000ULL)), 0ULL)
, &fp);
1335 if (error != 0)
1336 return (error);
1337
1338 AUDIT_ARG_FILE(td->td_proc, fp)do { if ((((__curthread()))->td_pflags & 0x01000000)) audit_arg_file
((td->td_proc), (fp)); } while (0)
;
1339
1340 error = fo_stat(fp, sbp, td->td_ucred, td);
1341 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
1342#ifdef KTRACE1
1343 if (error == 0 && KTRPOINT(td, KTR_STRUCT)((((td))->td_proc->p_traceflag & (1 << (8))) &&
!((td)->td_pflags & 0x00000004))
)
1344 ktrstat(sbp)ktrstruct("stat", (sbp), sizeof(struct stat));
1345#endif
1346 return (error);
1347}
1348
1349/*
1350 * Return status information about a file descriptor.
1351 */
1352#ifndef _SYS_SYSPROTO_H_
1353struct nfstat_args {
1354 int fd;
1355 struct nstat *sb;
1356};
1357#endif
1358/* ARGSUSED */
1359int
1360sys_nfstat(struct thread *td, struct nfstat_args *uap)
1361{
1362 struct nstat nub;
1363 struct stat ub;
1364 int error;
1365
1366 error = kern_fstat(td, uap->fd, &ub);
1367 if (error == 0) {
1
Taking true branch
1368 cvtnstat(&ub, &nub);
1369 error = copyout(&nub, uap->sb, sizeof(nub));
2
Copies out a struct with uncleared padding (>= 4 bytes)
1370 }
1371 return (error);
1372}
1373
1374/*
1375 * Return pathconf information about a file descriptor.
1376 */
1377#ifndef _SYS_SYSPROTO_H_
1378struct fpathconf_args {
1379 int fd;
1380 int name;
1381};
1382#endif
1383/* ARGSUSED */
1384int
1385sys_fpathconf(struct thread *td, struct fpathconf_args *uap)
1386{
1387 struct file *fp;
1388 struct vnode *vp;
1389 cap_rights_t rights;
1390 int error;
1391
1392 error = fget(td, uap->fd, cap_rights_init(&rights, CAP_FPATHCONF)__cap_rights_init(0, &rights, ((1ULL << (57 + (0)))
| (0x0000000000020000ULL)), 0ULL)
, &fp);
1393 if (error != 0)
1394 return (error);
1395
1396 if (uap->name == _PC_ASYNC_IO53) {
1397 td->td_retvaltd_uretoff.tdu_retval[0] = _POSIX_ASYNCHRONOUS_IO200112L;
1398 goto out;
1399 }
1400 vp = fp->f_vnode;
1401 if (vp != NULL((void *)0)) {
1402 vn_lock(vp, LK_SHARED | LK_RETRY)_vn_lock(vp, 0x200000 | 0x000400, "/usr/src/sys/kern/kern_descrip.c"
, 1402)
;
1403 error = VOP_PATHCONF(vp, uap->name, td->td_retvaltd_uretoff.tdu_retval);
1404 VOP_UNLOCK(vp, 0);
1405 } else if (fp->f_type == DTYPE_PIPE3 || fp->f_type == DTYPE_SOCKET2) {
1406 if (uap->name != _PC_PIPE_BUF6) {
1407 error = EINVAL22;
1408 } else {
1409 td->td_retvaltd_uretoff.tdu_retval[0] = PIPE_BUF512;
1410 error = 0;
1411 }
1412 } else {
1413 error = EOPNOTSUPP45;
1414 }
1415out:
1416 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
1417 return (error);
1418}
1419
1420/*
1421 * Initialize filecaps structure.
1422 */
1423void
1424filecaps_init(struct filecaps *fcaps)
1425{
1426
1427 bzero(fcaps, sizeof(*fcaps));
1428 fcaps->fc_nioctls = -1;
1429}
1430
1431/*
1432 * Copy filecaps structure allocating memory for ioctls array if needed.
1433 *
1434 * The last parameter indicates whether the fdtable is locked. If it is not and
1435 * ioctls are encountered, copying fails and the caller must lock the table.
1436 *
1437 * Note that if the table was not locked, the caller has to check the relevant
1438 * sequence counter to determine whether the operation was successful.
1439 */
1440int
1441filecaps_copy(const struct filecaps *src, struct filecaps *dst, bool locked)
1442{
1443 size_t size;
1444
1445 *dst = *src;
1446 if (src->fc_ioctls == NULL((void *)0))
1447 return (0);
1448 if (!locked)
1449 return (1);
1450
1451 KASSERT(src->fc_nioctls > 0,do { } while (0)
1452 ("fc_ioctls != NULL, but fc_nioctls=%hd", src->fc_nioctls))do { } while (0);
1453
1454 size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
1455 dst->fc_ioctls = malloc(size, M_FILECAPS, M_WAITOK0x0002);
1456 bcopy(src->fc_ioctls, dst->fc_ioctls, size);
1457 return (0);
1458}
1459
1460/*
1461 * Move filecaps structure to the new place and clear the old place.
1462 */
1463void
1464filecaps_move(struct filecaps *src, struct filecaps *dst)
1465{
1466
1467 *dst = *src;
1468 bzero(src, sizeof(*src));
1469}
1470
1471/*
1472 * Fill the given filecaps structure with full rights.
1473 */
1474static void
1475filecaps_fill(struct filecaps *fcaps)
1476{
1477
1478 CAP_ALL(&fcaps->fc_rights)do { (&fcaps->fc_rights)->cr_rights[0] = ((uint64_t
)0 << 62) | ((1ULL << (57 + (0))) | (0x000007FFFFFFFFFFULL
)); (&fcaps->fc_rights)->cr_rights[1] = ((1ULL <<
(57 + (1))) | (0x00000000001FFFFFULL)); } while (0)
;
1479 fcaps->fc_ioctls = NULL((void *)0);
1480 fcaps->fc_nioctls = -1;
1481 fcaps->fc_fcntls = CAP_FCNTL_ALL((1 << 3) | (1 << 4) | (1 << 5) | (1 <<
6))
;
1482}
1483
1484/*
1485 * Free memory allocated within filecaps structure.
1486 */
1487void
1488filecaps_free(struct filecaps *fcaps)
1489{
1490
1491 free(fcaps->fc_ioctls, M_FILECAPS);
1492 bzero(fcaps, sizeof(*fcaps));
1493}
1494
1495/*
1496 * Validate the given filecaps structure.
1497 */
1498static void
1499filecaps_validate(const struct filecaps *fcaps, const char *func)
1500{
1501
1502 KASSERT(cap_rights_is_valid(&fcaps->fc_rights),do { } while (0)
1503 ("%s: invalid rights", func))do { } while (0);
1504 KASSERT((fcaps->fc_fcntls & ~CAP_FCNTL_ALL) == 0,do { } while (0)
1505 ("%s: invalid fcntls", func))do { } while (0);
1506 KASSERT(fcaps->fc_fcntls == 0 ||do { } while (0)
1507 cap_rights_is_set(&fcaps->fc_rights, CAP_FCNTL),do { } while (0)
1508 ("%s: fcntls without CAP_FCNTL", func))do { } while (0);
1509 KASSERT(fcaps->fc_ioctls != NULL ? fcaps->fc_nioctls > 0 :do { } while (0)
1510 (fcaps->fc_nioctls == -1 || fcaps->fc_nioctls == 0),do { } while (0)
1511 ("%s: invalid ioctls", func))do { } while (0);
1512 KASSERT(fcaps->fc_nioctls == 0 ||do { } while (0)
1513 cap_rights_is_set(&fcaps->fc_rights, CAP_IOCTL),do { } while (0)
1514 ("%s: ioctls without CAP_IOCTL", func))do { } while (0);
1515}
1516
1517static void
1518fdgrowtable_exp(struct filedesc *fdp, int nfd)
1519{
1520 int nfd1;
1521
1522 FILEDESC_XLOCK_ASSERT(fdp)(void)0;
1523
1524 nfd1 = fdp->fd_nfilesfd_files->fdt_nfiles * 2;
1525 if (nfd1 < nfd)
1526 nfd1 = nfd;
1527 fdgrowtable(fdp, nfd1);
1528}
1529
1530/*
1531 * Grow the file table to accommodate (at least) nfd descriptors.
1532 */
1533static void
1534fdgrowtable(struct filedesc *fdp, int nfd)
1535{
1536 struct filedesc0 *fdp0;
1537 struct freetable *ft;
1538 struct fdescenttbl *ntable;
1539 struct fdescenttbl *otable;
1540 int nnfiles, onfiles;
1541 NDSLOTTYPEu_long *nmap, *omap;
1542
1543 /*
1544 * If lastfile is -1 this struct filedesc was just allocated and we are
1545 * growing it to accommodate for the one we are going to copy from. There
1546 * is no need to have a lock on this one as it's not visible to anyone.
1547 */
1548 if (fdp->fd_lastfile != -1)
1549 FILEDESC_XLOCK_ASSERT(fdp)(void)0;
1550
1551 KASSERT(fdp->fd_nfiles > 0, ("zero-length file table"))do { } while (0);
1552
1553 /* save old values */
1554 onfiles = fdp->fd_nfilesfd_files->fdt_nfiles;
1555 otable = fdp->fd_files;
1556 omap = fdp->fd_map;
1557
1558 /* compute the size of the new table */
1559 nnfiles = NDSLOTS(nfd)(((nfd) + (sizeof(u_long) * 8) - 1) / (sizeof(u_long) * 8)) * NDENTRIES(sizeof(u_long) * 8); /* round up */
1560 if (nnfiles <= onfiles)
1561 /* the table is already large enough */
1562 return;
1563
1564 /*
1565 * Allocate a new table. We need enough space for the number of
1566 * entries, file entries themselves and the struct freetable we will use
1567 * when we decommission the table and place it on the freelist.
1568 * We place the struct freetable in the middle so we don't have
1569 * to worry about padding.
1570 */
1571 ntable = malloc(offsetof(struct fdescenttbl, fdt_ofiles)__builtin_offsetof(struct fdescenttbl, fdt_ofiles) +
1572 nnfiles * sizeof(ntable->fdt_ofiles[0]) +
1573 sizeof(struct freetable),
1574 M_FILEDESC, M_ZERO0x0100 | M_WAITOK0x0002);
1575 /* copy the old data */
1576 ntable->fdt_nfiles = nnfiles;
1577 memcpy(ntable->fdt_ofiles, otable->fdt_ofiles,
1578 onfiles * sizeof(ntable->fdt_ofiles[0]));
1579
1580 /*
1581 * Allocate a new map only if the old is not large enough. It will
1582 * grow at a slower rate than the table as it can map more
1583 * entries than the table can hold.
1584 */
1585 if (NDSLOTS(nnfiles)(((nnfiles) + (sizeof(u_long) * 8) - 1) / (sizeof(u_long) * 8
))
> NDSLOTS(onfiles)(((onfiles) + (sizeof(u_long) * 8) - 1) / (sizeof(u_long) * 8
))
) {
1586 nmap = malloc(NDSLOTS(nnfiles)(((nnfiles) + (sizeof(u_long) * 8) - 1) / (sizeof(u_long) * 8
))
* NDSLOTSIZEsizeof(u_long), M_FILEDESC,
1587 M_ZERO0x0100 | M_WAITOK0x0002);
1588 /* copy over the old data and update the pointer */
1589 memcpy(nmap, omap, NDSLOTS(onfiles)(((onfiles) + (sizeof(u_long) * 8) - 1) / (sizeof(u_long) * 8
))
* sizeof(*omap));
1590 fdp->fd_map = nmap;
1591 }
1592
1593 /*
1594 * Make sure that ntable is correctly initialized before we replace
1595 * fd_files poiner. Otherwise fget_unlocked() may see inconsistent
1596 * data.
1597 */
1598 atomic_store_rel_ptratomic_store_rel_long((volatile void *)&fdp->fd_files, (uintptr_t)ntable);
1599
1600 /*
1601 * Do not free the old file table, as some threads may still
1602 * reference entries within it. Instead, place it on a freelist
1603 * which will be processed when the struct filedesc is released.
1604 *
1605 * Note that if onfiles == NDFILE, we're dealing with the original
1606 * static allocation contained within (struct filedesc0 *)fdp,
1607 * which must not be freed.
1608 */
1609 if (onfiles > NDFILE20) {
1610 ft = (struct freetable *)&otable->fdt_ofiles[onfiles];
1611 fdp0 = (struct filedesc0 *)fdp;
1612 ft->ft_table = otable;
1613 SLIST_INSERT_HEAD(&fdp0->fd_free, ft, ft_next)do { (((ft))->ft_next.sle_next) = (((&fdp0->fd_free
))->slh_first); (((&fdp0->fd_free))->slh_first) =
(ft); } while (0)
;
1614 }
1615 /*
1616 * The map does not have the same possibility of threads still
1617 * holding references to it. So always free it as long as it
1618 * does not reference the original static allocation.
1619 */
1620 if (NDSLOTS(onfiles)(((onfiles) + (sizeof(u_long) * 8) - 1) / (sizeof(u_long) * 8
))
> NDSLOTS(NDFILE)(((20) + (sizeof(u_long) * 8) - 1) / (sizeof(u_long) * 8)))
1621 free(omap, M_FILEDESC);
1622}
1623
1624/*
1625 * Allocate a file descriptor for the process.
1626 */
1627int
1628fdalloc(struct thread *td, int minfd, int *result)
1629{
1630 struct proc *p = td->td_proc;
1631 struct filedesc *fdp = p->p_fd;
1632 int fd, maxfd, allocfd;
1633#ifdef RACCT1
1634 int error;
1635#endif
1636
1637 FILEDESC_XLOCK_ASSERT(fdp)(void)0;
1638
1639 if (fdp->fd_freefile > minfd)
1640 minfd = fdp->fd_freefile;
1641
1642 maxfd = getmaxfd(td);
1643
1644 /*
1645 * Search the bitmap for a free descriptor starting at minfd.
1646 * If none is found, grow the file table.
1647 */
1648 fd = fd_first_free(fdp, minfd, fdp->fd_nfilesfd_files->fdt_nfiles);
1649 if (fd >= maxfd)
1650 return (EMFILE24);
1651 if (fd >= fdp->fd_nfilesfd_files->fdt_nfiles) {
1652 allocfd = min(fd * 2, maxfd);
1653#ifdef RACCT1
1654 if (racct_enable) {
1655 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
1656 error = racct_set(p, RACCT_NOFILE7, allocfd);
1657 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1658 if (error != 0)
1659 return (EMFILE24);
1660 }
1661#endif
1662 /*
1663 * fd is already equal to first free descriptor >= minfd, so
1664 * we only need to grow the table and we are done.
1665 */
1666 fdgrowtable_exp(fdp, allocfd);
1667 }
1668
1669 /*
1670 * Perform some sanity checks, then mark the file descriptor as
1671 * used and return it to the caller.
1672 */
1673 KASSERT(fd >= 0 && fd < min(maxfd, fdp->fd_nfiles),do { } while (0)
1674 ("invalid descriptor %d", fd))do { } while (0);
1675 KASSERT(!fdisused(fdp, fd),do { } while (0)
1676 ("fd_first_free() returned non-free descriptor"))do { } while (0);
1677 KASSERT(fdp->fd_ofiles[fd].fde_file == NULL,do { } while (0)
1678 ("file descriptor isn't free"))do { } while (0);
1679 fdused(fdp, fd);
1680 *result = fd;
1681 return (0);
1682}
1683
1684/*
1685 * Allocate n file descriptors for the process.
1686 */
1687int
1688fdallocn(struct thread *td, int minfd, int *fds, int n)
1689{
1690 struct proc *p = td->td_proc;
1691 struct filedesc *fdp = p->p_fd;
1692 int i;
1693
1694 FILEDESC_XLOCK_ASSERT(fdp)(void)0;
1695
1696 for (i = 0; i < n; i++)
1697 if (fdalloc(td, 0, &fds[i]) != 0)
1698 break;
1699
1700 if (i < n) {
1701 for (i--; i >= 0; i--)
1702 fdunused(fdp, fds[i]);
1703 return (EMFILE24);
1704 }
1705
1706 return (0);
1707}
1708
1709/*
1710 * Create a new open file structure and allocate a file descriptor for the
1711 * process that refers to it. We add one reference to the file for the
1712 * descriptor table and one reference for resultfp. This is to prevent us
1713 * being preempted and the entry in the descriptor table closed after we
1714 * release the FILEDESC lock.
1715 */
1716int
1717falloc_caps(struct thread *td, struct file **resultfp, int *resultfd, int flags,
1718 struct filecaps *fcaps)
1719{
1720 struct file *fp;
1721 int error, fd;
1722
1723 error = falloc_noinstall(td, &fp);
1724 if (error)
1725 return (error); /* no reference held on error */
1726
1727 error = finstall(td, fp, &fd, flags, fcaps);
1728 if (error) {
1729 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
; /* one reference (fp only) */
1730 return (error);
1731 }
1732
1733 if (resultfp != NULL((void *)0))
1734 *resultfp = fp; /* copy out result */
1735 else
1736 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
; /* release local reference */
1737
1738 if (resultfd != NULL((void *)0))
1739 *resultfd = fd;
1740
1741 return (0);
1742}
1743
1744/*
1745 * Create a new open file structure without allocating a file descriptor.
1746 */
1747int
1748falloc_noinstall(struct thread *td, struct file **resultfp)
1749{
1750 struct file *fp;
1751 int maxuserfiles = maxfiles - (maxfiles / 20);
1752 static struct timeval lastfail;
1753 static int curfail;
1754
1755 KASSERT(resultfp != NULL, ("%s: resultfp == NULL", __func__))do { } while (0);
1756
1757 if ((openfiles >= maxuserfiles &&
1758 priv_check(td, PRIV_MAXFILES3) != 0) ||
1759 openfiles >= maxfiles) {
1760 if (ppsratecheck(&lastfail, &curfail, 1)) {
1761 printf("kern.maxfiles limit exceeded by uid %i, "
1762 "please see tuning(7).\n", td->td_ucred->cr_ruid);
1763 }
1764 return (ENFILE23);
1765 }
1766 atomic_add_int(&openfiles, 1);
1767 fp = uma_zalloc(file_zone, M_WAITOK0x0002 | M_ZERO0x0100);
1768 refcount_init(&fp->f_count, 1);
1769 fp->f_cred = crhold(td->td_ucred);
1770 fp->f_ops = &badfileops;
1771 *resultfp = fp;
1772 return (0);
1773}
1774
1775/*
1776 * Install a file in a file descriptor table.
1777 */
1778void
1779_finstall(struct filedesc *fdp, struct file *fp, int fd, int flags,
1780 struct filecaps *fcaps)
1781{
1782 struct filedescent *fde;
1783
1784 MPASS(fp != NULL)do { } while (0);
1785 if (fcaps != NULL((void *)0))
1786 filecaps_validate(fcaps, __func__);
1787 FILEDESC_XLOCK_ASSERT(fdp)(void)0;
1788
1789 fde = &fdp->fd_ofilesfd_files->fdt_ofiles[fd];
1790#ifdef CAPABILITIES1
1791 seq_write_begin(&fde->fde_seq);
1792#endif
1793 fde->fde_file = fp;
1794 fde->fde_flags = (flags & O_CLOEXEC0x00100000) != 0 ? UF_EXCLOSE0x01 : 0;
1795 if (fcaps != NULL((void *)0))
1796 filecaps_move(fcaps, &fde->fde_caps);
1797 else
1798 filecaps_fill(&fde->fde_caps);
1799#ifdef CAPABILITIES1
1800 seq_write_end(&fde->fde_seq);
1801#endif
1802}
1803
1804int
1805finstall(struct thread *td, struct file *fp, int *fd, int flags,
1806 struct filecaps *fcaps)
1807{
1808 struct filedesc *fdp = td->td_proc->p_fd;
1809 int error;
1810
1811 MPASS(fd != NULL)do { } while (0);
1812
1813 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
1814 if ((error = fdalloc(td, 0, fd))) {
1815 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
1816 return (error);
1817 }
1818 fhold(fp)(refcount_acquire(&(fp)->f_count));
1819 _finstall(fdp, fp, *fd, flags, fcaps);
1820 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
1821 return (0);
1822}
1823
1824/*
1825 * Build a new filedesc structure from another.
1826 * Copy the current, root, and jail root vnode references.
1827 *
1828 * If fdp is not NULL, return with it shared locked.
1829 */
1830struct filedesc *
1831fdinit(struct filedesc *fdp, bool prepfiles)
1832{
1833 struct filedesc0 *newfdp0;
1834 struct filedesc *newfdp;
1835
1836 newfdp0 = uma_zalloc(filedesc0_zone, M_WAITOK0x0002 | M_ZERO0x0100);
1837 newfdp = &newfdp0->fd_fd;
1838
1839 /* Create the file descriptor table. */
1840 FILEDESC_LOCK_INIT(newfdp)sx_init_flags((&(newfdp)->fd_sx), ("filedesc structure"
), 0)
;
1841 refcount_init(&newfdp->fd_refcnt, 1);
1842 refcount_init(&newfdp->fd_holdcnt, 1);
1843 newfdp->fd_cmask = CMASK022;
1844 newfdp->fd_map = newfdp0->fd_dmap;
1845 newfdp->fd_lastfile = -1;
1846 newfdp->fd_files = (struct fdescenttbl *)&newfdp0->fd_dfiles;
1847 newfdp->fd_files->fdt_nfiles = NDFILE20;
1848
1849 if (fdp == NULL((void *)0))
1850 return (newfdp);
1851
1852 if (prepfiles && fdp->fd_lastfile >= newfdp->fd_nfilesfd_files->fdt_nfiles)
1853 fdgrowtable(newfdp, fdp->fd_lastfile + 1);
1854
1855 FILEDESC_SLOCK(fdp)(void)__sx_slock(((&(fdp)->fd_sx)), 0, (((void *)0)), (
0))
;
1856 newfdp->fd_cdir = fdp->fd_cdir;
1857 if (newfdp->fd_cdir)
1858 VREF(newfdp->fd_cdir)vref(newfdp->fd_cdir);
1859 newfdp->fd_rdir = fdp->fd_rdir;
1860 if (newfdp->fd_rdir)
1861 VREF(newfdp->fd_rdir)vref(newfdp->fd_rdir);
1862 newfdp->fd_jdir = fdp->fd_jdir;
1863 if (newfdp->fd_jdir)
1864 VREF(newfdp->fd_jdir)vref(newfdp->fd_jdir);
1865
1866 if (!prepfiles) {
1867 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
1868 } else {
1869 while (fdp->fd_lastfile >= newfdp->fd_nfilesfd_files->fdt_nfiles) {
1870 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
1871 fdgrowtable(newfdp, fdp->fd_lastfile + 1);
1872 FILEDESC_SLOCK(fdp)(void)__sx_slock(((&(fdp)->fd_sx)), 0, (((void *)0)), (
0))
;
1873 }
1874 }
1875
1876 return (newfdp);
1877}
1878
1879static struct filedesc *
1880fdhold(struct proc *p)
1881{
1882 struct filedesc *fdp;
1883
1884 PROC_LOCK_ASSERT(p, MA_OWNED)(void)0;
1885 fdp = p->p_fd;
1886 if (fdp != NULL((void *)0))
1887 refcount_acquire(&fdp->fd_holdcnt);
1888 return (fdp);
1889}
1890
1891static void
1892fddrop(struct filedesc *fdp)
1893{
1894
1895 if (fdp->fd_holdcnt > 1) {
1896 if (refcount_release(&fdp->fd_holdcnt) == 0)
1897 return;
1898 }
1899
1900 FILEDESC_LOCK_DESTROY(fdp)sx_destroy(&(fdp)->fd_sx);
1901 uma_zfree(filedesc0_zone, fdp);
1902}
1903
1904/*
1905 * Share a filedesc structure.
1906 */
1907struct filedesc *
1908fdshare(struct filedesc *fdp)
1909{
1910
1911 refcount_acquire(&fdp->fd_refcnt);
1912 return (fdp);
1913}
1914
1915/*
1916 * Unshare a filedesc structure, if necessary by making a copy
1917 */
1918void
1919fdunshare(struct thread *td)
1920{
1921 struct filedesc *tmp;
1922 struct proc *p = td->td_proc;
1923
1924 if (p->p_fd->fd_refcnt == 1)
1925 return;
1926
1927 tmp = fdcopy(p->p_fd);
1928 fdescfree(td);
1929 p->p_fd = tmp;
1930}
1931
1932void
1933fdinstall_remapped(struct thread *td, struct filedesc *fdp)
1934{
1935
1936 fdescfree(td);
1937 td->td_proc->p_fd = fdp;
1938}
1939
1940/*
1941 * Copy a filedesc structure. A NULL pointer in returns a NULL reference,
1942 * this is to ease callers, not catch errors.
1943 */
1944struct filedesc *
1945fdcopy(struct filedesc *fdp)
1946{
1947 struct filedesc *newfdp;
1948 struct filedescent *nfde, *ofde;
1949 int i;
1950
1951 MPASS(fdp != NULL)do { } while (0);
1952
1953 newfdp = fdinit(fdp, true1);
1954 /* copy all passable descriptors (i.e. not kqueue) */
1955 newfdp->fd_freefile = -1;
1956 for (i = 0; i <= fdp->fd_lastfile; ++i) {
1957 ofde = &fdp->fd_ofilesfd_files->fdt_ofiles[i];
1958 if (ofde->fde_file == NULL((void *)0) ||
1959 (ofde->fde_file->f_ops->fo_flags & DFLAG_PASSABLE0x01) == 0) {
1960 if (newfdp->fd_freefile == -1)
1961 newfdp->fd_freefile = i;
1962 continue;
1963 }
1964 nfde = &newfdp->fd_ofilesfd_files->fdt_ofiles[i];
1965 *nfde = *ofde;
1966 filecaps_copy(&ofde->fde_caps, &nfde->fde_caps, true1);
1967 fhold(nfde->fde_file)(refcount_acquire(&(nfde->fde_file)->f_count));
1968 fdused_init(newfdp, i);
1969 newfdp->fd_lastfile = i;
1970 }
1971 if (newfdp->fd_freefile == -1)
1972 newfdp->fd_freefile = i;
1973 newfdp->fd_cmask = fdp->fd_cmask;
1974 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
1975 return (newfdp);
1976}
1977
1978/*
1979 * Copies a filedesc structure, while remapping all file descriptors
1980 * stored inside using a translation table.
1981 *
1982 * File descriptors are copied over to the new file descriptor table,
1983 * regardless of whether the close-on-exec flag is set.
1984 */
1985int
1986fdcopy_remapped(struct filedesc *fdp, const int *fds, size_t nfds,
1987 struct filedesc **ret)
1988{
1989 struct filedesc *newfdp;
1990 struct filedescent *nfde, *ofde;
1991 int error, i;
1992
1993 MPASS(fdp != NULL)do { } while (0);
1994
1995 newfdp = fdinit(fdp, true1);
1996 if (nfds > fdp->fd_lastfile + 1) {
1997 /* New table cannot be larger than the old one. */
1998 error = E2BIG7;
1999 goto bad;
2000 }
2001 /* Copy all passable descriptors (i.e. not kqueue). */
2002 newfdp->fd_freefile = nfds;
2003 for (i = 0; i < nfds; ++i) {
2004 if (fds[i] < 0 || fds[i] > fdp->fd_lastfile) {
2005 /* File descriptor out of bounds. */
2006 error = EBADF9;
2007 goto bad;
2008 }
2009 ofde = &fdp->fd_ofilesfd_files->fdt_ofiles[fds[i]];
2010 if (ofde->fde_file == NULL((void *)0)) {
2011 /* Unused file descriptor. */
2012 error = EBADF9;
2013 goto bad;
2014 }
2015 if ((ofde->fde_file->f_ops->fo_flags & DFLAG_PASSABLE0x01) == 0) {
2016 /* File descriptor cannot be passed. */
2017 error = EINVAL22;
2018 goto bad;
2019 }
2020 nfde = &newfdp->fd_ofilesfd_files->fdt_ofiles[i];
2021 *nfde = *ofde;
2022 filecaps_copy(&ofde->fde_caps, &nfde->fde_caps, true1);
2023 fhold(nfde->fde_file)(refcount_acquire(&(nfde->fde_file)->f_count));
2024 fdused_init(newfdp, i);
2025 newfdp->fd_lastfile = i;
2026 }
2027 newfdp->fd_cmask = fdp->fd_cmask;
2028 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
2029 *ret = newfdp;
2030 return (0);
2031bad:
2032 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
2033 fdescfree_remapped(newfdp);
2034 return (error);
2035}
2036
2037/*
2038 * Clear POSIX style locks. This is only used when fdp looses a reference (i.e.
2039 * one of processes using it exits) and the table used to be shared.
2040 */
2041static void
2042fdclearlocks(struct thread *td)
2043{
2044 struct filedesc *fdp;
2045 struct filedesc_to_leader *fdtol;
2046 struct flock lf;
2047 struct file *fp;
2048 struct proc *p;
2049 struct vnode *vp;
2050 int i;
2051
2052 p = td->td_proc;
2053 fdp = p->p_fd;
2054 fdtol = p->p_fdtol;
2055 MPASS(fdtol != NULL)do { } while (0);
2056
2057 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
2058 KASSERT(fdtol->fdl_refcount > 0,do { } while (0)
2059 ("filedesc_to_refcount botch: fdl_refcount=%d",do { } while (0)
2060 fdtol->fdl_refcount))do { } while (0);
2061 if (fdtol->fdl_refcount == 1 &&
2062 (p->p_leader->p_flag & P_ADVLOCK0x00001) != 0) {
2063 for (i = 0; i <= fdp->fd_lastfile; i++) {
2064 fp = fdp->fd_ofilesfd_files->fdt_ofiles[i].fde_file;
2065 if (fp == NULL((void *)0) || fp->f_type != DTYPE_VNODE1)
2066 continue;
2067 fhold(fp)(refcount_acquire(&(fp)->f_count));
2068 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
2069 lf.l_whence = SEEK_SET0;
2070 lf.l_start = 0;
2071 lf.l_len = 0;
2072 lf.l_type = F_UNLCK2;
2073 vp = fp->f_vnode;
2074 (void) VOP_ADVLOCK(vp,
2075 (caddr_t)p->p_leader, F_UNLCK2,
2076 &lf, F_POSIX0x040);
2077 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
2078 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
2079 }
2080 }
2081retry:
2082 if (fdtol->fdl_refcount == 1) {
2083 if (fdp->fd_holdleaderscount > 0 &&
2084 (p->p_leader->p_flag & P_ADVLOCK0x00001) != 0) {
2085 /*
2086 * close() or kern_dup() has cleared a reference
2087 * in a shared file descriptor table.
2088 */
2089 fdp->fd_holdleaderswakeup = 1;
2090 sx_sleep(&fdp->fd_holdleaderscount,_sleep((&fdp->fd_holdleaderscount), &((&(fdp)->
fd_sx))->lock_object, (((80) + 32)), ("fdlhold"), tick_sbt
* (0), 0, 0x0100)
2091 FILEDESC_LOCK(fdp), PLOCK, "fdlhold", 0)_sleep((&fdp->fd_holdleaderscount), &((&(fdp)->
fd_sx))->lock_object, (((80) + 32)), ("fdlhold"), tick_sbt
* (0), 0, 0x0100)
;
2092 goto retry;
2093 }
2094 if (fdtol->fdl_holdcount > 0) {
2095 /*
2096 * Ensure that fdtol->fdl_leader remains
2097 * valid in closef().
2098 */
2099 fdtol->fdl_wakeup = 1;
2100 sx_sleep(fdtol, FILEDESC_LOCK(fdp), PLOCK,_sleep((fdtol), &((&(fdp)->fd_sx))->lock_object
, (((80) + 32)), ("fdlhold"), tick_sbt * (0), 0, 0x0100)
2101 "fdlhold", 0)_sleep((fdtol), &((&(fdp)->fd_sx))->lock_object
, (((80) + 32)), ("fdlhold"), tick_sbt * (0), 0, 0x0100)
;
2102 goto retry;
2103 }
2104 }
2105 fdtol->fdl_refcount--;
2106 if (fdtol->fdl_refcount == 0 &&
2107 fdtol->fdl_holdcount == 0) {
2108 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
2109 fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
2110 } else
2111 fdtol = NULL((void *)0);
2112 p->p_fdtol = NULL((void *)0);
2113 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
2114 if (fdtol != NULL((void *)0))
2115 free(fdtol, M_FILEDESC_TO_LEADER);
2116}
2117
2118/*
2119 * Release a filedesc structure.
2120 */
2121static void
2122fdescfree_fds(struct thread *td, struct filedesc *fdp, bool needclose)
2123{
2124 struct filedesc0 *fdp0;
2125 struct freetable *ft, *tft;
2126 struct filedescent *fde;
2127 struct file *fp;
2128 int i;
2129
2130 for (i = 0; i <= fdp->fd_lastfile; i++) {
2131 fde = &fdp->fd_ofilesfd_files->fdt_ofiles[i];
2132 fp = fde->fde_file;
2133 if (fp != NULL((void *)0)) {
2134 fdefree_last(fde);
2135 if (needclose)
2136 (void) closef(fp, td);
2137 else
2138 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
2139 }
2140 }
2141
2142 if (NDSLOTS(fdp->fd_nfiles)(((fdp->fd_files->fdt_nfiles) + (sizeof(u_long) * 8) - 1
) / (sizeof(u_long) * 8))
> NDSLOTS(NDFILE)(((20) + (sizeof(u_long) * 8) - 1) / (sizeof(u_long) * 8)))
2143 free(fdp->fd_map, M_FILEDESC);
2144 if (fdp->fd_nfilesfd_files->fdt_nfiles > NDFILE20)
2145 free(fdp->fd_files, M_FILEDESC);
2146
2147 fdp0 = (struct filedesc0 *)fdp;
2148 SLIST_FOREACH_SAFE(ft, &fdp0->fd_free, ft_next, tft)for ((ft) = (((&fdp0->fd_free))->slh_first); (ft) &&
((tft) = (((ft))->ft_next.sle_next), 1); (ft) = (tft))
2149 free(ft->ft_table, M_FILEDESC);
2150
2151 fddrop(fdp);
2152}
2153
2154void
2155fdescfree(struct thread *td)
2156{
2157 struct proc *p;
2158 struct filedesc *fdp;
2159 struct vnode *cdir, *jdir, *rdir;
2160
2161 p = td->td_proc;
2162 fdp = p->p_fd;
2163 MPASS(fdp != NULL)do { } while (0);
2164
2165#ifdef RACCT1
2166 if (racct_enable) {
2167 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
2168 racct_set(p, RACCT_NOFILE7, 0);
2169 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
2170 }
2171#endif
2172
2173 if (p->p_fdtol != NULL((void *)0))
2174 fdclearlocks(td);
2175
2176 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
2177 p->p_fd = NULL((void *)0);
2178 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
2179
2180 if (refcount_release(&fdp->fd_refcnt) == 0)
2181 return;
2182
2183 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
2184 cdir = fdp->fd_cdir;
2185 fdp->fd_cdir = NULL((void *)0);
2186 rdir = fdp->fd_rdir;
2187 fdp->fd_rdir = NULL((void *)0);
2188 jdir = fdp->fd_jdir;
2189 fdp->fd_jdir = NULL((void *)0);
2190 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
2191
2192 if (cdir != NULL((void *)0))
2193 vrele(cdir);
2194 if (rdir != NULL((void *)0))
2195 vrele(rdir);
2196 if (jdir != NULL((void *)0))
2197 vrele(jdir);
2198
2199 fdescfree_fds(td, fdp, 1);
2200}
2201
2202void
2203fdescfree_remapped(struct filedesc *fdp)
2204{
2205
2206 if (fdp->fd_cdir != NULL((void *)0))
2207 vrele(fdp->fd_cdir);
2208 if (fdp->fd_rdir != NULL((void *)0))
2209 vrele(fdp->fd_rdir);
2210 if (fdp->fd_jdir != NULL((void *)0))
2211 vrele(fdp->fd_jdir);
2212
2213 fdescfree_fds(curthread(__curthread()), fdp, 0);
2214}
2215
2216/*
2217 * For setugid programs, we don't want to people to use that setugidness
2218 * to generate error messages which write to a file which otherwise would
2219 * otherwise be off-limits to the process. We check for filesystems where
2220 * the vnode can change out from under us after execve (like [lin]procfs).
2221 *
2222 * Since fdsetugidsafety calls this only for fd 0, 1 and 2, this check is
2223 * sufficient. We also don't check for setugidness since we know we are.
2224 */
2225static bool
2226is_unsafe(struct file *fp)
2227{
2228 struct vnode *vp;
2229
2230 if (fp->f_type != DTYPE_VNODE1)
2231 return (false0);
2232
2233 vp = fp->f_vnode;
2234 return ((vp->v_vflag & VV_PROCDEP0x0100) != 0);
2235}
2236
2237/*
2238 * Make this setguid thing safe, if at all possible.
2239 */
2240void
2241fdsetugidsafety(struct thread *td)
2242{
2243 struct filedesc *fdp;
2244 struct file *fp;
2245 int i;
2246
2247 fdp = td->td_proc->p_fd;
2248 KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared"))do { } while (0);
2249 MPASS(fdp->fd_nfiles >= 3)do { } while (0);
2250 for (i = 0; i <= 2; i++) {
2251 fp = fdp->fd_ofilesfd_files->fdt_ofiles[i].fde_file;
2252 if (fp != NULL((void *)0) && is_unsafe(fp)) {
2253 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
2254 knote_fdclose(td, i);
2255 /*
2256 * NULL-out descriptor prior to close to avoid
2257 * a race while close blocks.
2258 */
2259 fdfree(fdp, i);
2260 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
2261 (void) closef(fp, td);
2262 }
2263 }
2264}
2265
2266/*
2267 * If a specific file object occupies a specific file descriptor, close the
2268 * file descriptor entry and drop a reference on the file object. This is a
2269 * convenience function to handle a subsequent error in a function that calls
2270 * falloc() that handles the race that another thread might have closed the
2271 * file descriptor out from under the thread creating the file object.
2272 */
2273void
2274fdclose(struct thread *td, struct file *fp, int idx)
2275{
2276 struct filedesc *fdp = td->td_proc->p_fd;
2277
2278 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
2279 if (fdp->fd_ofilesfd_files->fdt_ofiles[idx].fde_file == fp) {
2280 fdfree(fdp, idx);
2281 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
2282 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
2283 } else
2284 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
2285}
2286
2287/*
2288 * Close any files on exec?
2289 */
2290void
2291fdcloseexec(struct thread *td)
2292{
2293 struct filedesc *fdp;
2294 struct filedescent *fde;
2295 struct file *fp;
2296 int i;
2297
2298 fdp = td->td_proc->p_fd;
2299 KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared"))do { } while (0);
2300 for (i = 0; i <= fdp->fd_lastfile; i++) {
2301 fde = &fdp->fd_ofilesfd_files->fdt_ofiles[i];
2302 fp = fde->fde_file;
2303 if (fp != NULL((void *)0) && (fp->f_type == DTYPE_MQUEUE7 ||
2304 (fde->fde_flags & UF_EXCLOSE0x01))) {
2305 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
2306 fdfree(fdp, i);
2307 (void) closefp(fdp, i, fp, td, 0);
2308 FILEDESC_UNLOCK_ASSERT(fdp)(void)0;
2309 }
2310 }
2311}
2312
2313/*
2314 * It is unsafe for set[ug]id processes to be started with file
2315 * descriptors 0..2 closed, as these descriptors are given implicit
2316 * significance in the Standard C library. fdcheckstd() will create a
2317 * descriptor referencing /dev/null for each of stdin, stdout, and
2318 * stderr that is not already open.
2319 */
2320int
2321fdcheckstd(struct thread *td)
2322{
2323 struct filedesc *fdp;
2324 register_t save;
2325 int i, error, devnull;
2326
2327 fdp = td->td_proc->p_fd;
2328 KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared"))do { } while (0);
2329 MPASS(fdp->fd_nfiles >= 3)do { } while (0);
2330 devnull = -1;
2331 for (i = 0; i <= 2; i++) {
2332 if (fdp->fd_ofilesfd_files->fdt_ofiles[i].fde_file != NULL((void *)0))
2333 continue;
2334
2335 save = td->td_retvaltd_uretoff.tdu_retval[0];
2336 if (devnull != -1) {
2337 error = kern_dup(td, FDDUP_FIXED, 0, devnull, i);
2338 } else {
2339 error = kern_openat(td, AT_FDCWD-100, "/dev/null",
2340 UIO_SYSSPACE, O_RDWR0x0002, 0);
2341 if (error == 0) {
2342 devnull = td->td_retvaltd_uretoff.tdu_retval[0];
2343 KASSERT(devnull == i, ("we didn't get our fd"))do { } while (0);
2344 }
2345 }
2346 td->td_retvaltd_uretoff.tdu_retval[0] = save;
2347 if (error != 0)
2348 return (error);
2349 }
2350 return (0);
2351}
2352
2353/*
2354 * Internal form of close. Decrement reference count on file structure.
2355 * Note: td may be NULL when closing a file that was being passed in a
2356 * message.
2357 *
2358 * XXXRW: Giant is not required for the caller, but often will be held; this
2359 * makes it moderately likely the Giant will be recursed in the VFS case.
2360 */
2361int
2362closef(struct file *fp, struct thread *td)
2363{
2364 struct vnode *vp;
2365 struct flock lf;
2366 struct filedesc_to_leader *fdtol;
2367 struct filedesc *fdp;
2368
2369 /*
2370 * POSIX record locking dictates that any close releases ALL
2371 * locks owned by this process. This is handled by setting
2372 * a flag in the unlock to free ONLY locks obeying POSIX
2373 * semantics, and not to free BSD-style file locks.
2374 * If the descriptor was in a message, POSIX-style locks
2375 * aren't passed with the descriptor, and the thread pointer
2376 * will be NULL. Callers should be careful only to pass a
2377 * NULL thread pointer when there really is no owning
2378 * context that might have locks, or the locks will be
2379 * leaked.
2380 */
2381 if (fp->f_type == DTYPE_VNODE1 && td != NULL((void *)0)) {
2382 vp = fp->f_vnode;
2383 if ((td->td_proc->p_leader->p_flag & P_ADVLOCK0x00001) != 0) {
2384 lf.l_whence = SEEK_SET0;
2385 lf.l_start = 0;
2386 lf.l_len = 0;
2387 lf.l_type = F_UNLCK2;
2388 (void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader,
2389 F_UNLCK2, &lf, F_POSIX0x040);
2390 }
2391 fdtol = td->td_proc->p_fdtol;
2392 if (fdtol != NULL((void *)0)) {
2393 /*
2394 * Handle special case where file descriptor table is
2395 * shared between multiple process leaders.
2396 */
2397 fdp = td->td_proc->p_fd;
2398 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
2399 for (fdtol = fdtol->fdl_next;
2400 fdtol != td->td_proc->p_fdtol;
2401 fdtol = fdtol->fdl_next) {
2402 if ((fdtol->fdl_leader->p_flag &
2403 P_ADVLOCK0x00001) == 0)
2404 continue;
2405 fdtol->fdl_holdcount++;
2406 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
2407 lf.l_whence = SEEK_SET0;
2408 lf.l_start = 0;
2409 lf.l_len = 0;
2410 lf.l_type = F_UNLCK2;
2411 vp = fp->f_vnode;
2412 (void) VOP_ADVLOCK(vp,
2413 (caddr_t)fdtol->fdl_leader, F_UNLCK2, &lf,
2414 F_POSIX0x040);
2415 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
2416 fdtol->fdl_holdcount--;
2417 if (fdtol->fdl_holdcount == 0 &&
2418 fdtol->fdl_wakeup != 0) {
2419 fdtol->fdl_wakeup = 0;
2420 wakeup(fdtol);
2421 }
2422 }
2423 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
2424 }
2425 }
2426 return (fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
);
2427}
2428
2429/*
2430 * Initialize the file pointer with the specified properties.
2431 *
2432 * The ops are set with release semantics to be certain that the flags, type,
2433 * and data are visible when ops is. This is to prevent ops methods from being
2434 * called with bad data.
2435 */
2436void
2437finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
2438{
2439 fp->f_data = data;
2440 fp->f_flag = flag;
2441 fp->f_type = type;
2442 atomic_store_rel_ptratomic_store_rel_long((volatile uintptr_t *)&fp->f_ops, (uintptr_t)ops);
2443}
2444
2445int
2446fget_unlocked(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
2447 struct file **fpp, seq_t *seqp)
2448{
2449#ifdef CAPABILITIES1
2450 struct filedescent *fde;
2451#endif
2452 struct fdescenttbl *fdt;
2453 struct file *fp;
2454 u_int count;
2455#ifdef CAPABILITIES1
2456 seq_t seq;
2457 cap_rights_t haverights;
2458 int error;
2459#endif
2460
2461 fdt = fdp->fd_files;
2462 if ((u_int)fd >= fdt->fdt_nfiles)
2463 return (EBADF9);
2464 /*
2465 * Fetch the descriptor locklessly. We avoid fdrop() races by
2466 * never raising a refcount above 0. To accomplish this we have
2467 * to use a cmpset loop rather than an atomic_add. The descriptor
2468 * must be re-verified once we acquire a reference to be certain
2469 * that the identity is still correct and we did not lose a race
2470 * due to preemption.
2471 */
2472 for (;;) {
2473#ifdef CAPABILITIES1
2474 seq = seq_read(fd_seq(fdt, fd)(&(fdt)->fdt_ofiles[(fd)].fde_seq));
2475 fde = &fdt->fdt_ofiles[fd];
2476 haverights = *cap_rights_fde(fde);
2477 fp = fde->fde_file;
2478 if (!seq_consistent(fd_seq(fdt, fd)(&(fdt)->fdt_ofiles[(fd)].fde_seq), seq)) {
2479 cpu_spinwait()ia32_pause();
2480 continue;
2481 }
2482#else
2483 fp = fdt->fdt_ofiles[fd].fde_file;
2484#endif
2485 if (fp == NULL((void *)0))
2486 return (EBADF9);
2487#ifdef CAPABILITIES1
2488 error = cap_check(&haverights, needrightsp);
2489 if (error != 0)
2490 return (error);
2491#endif
2492 retry:
2493 count = fp->f_count;
2494 if (count == 0) {
2495 /*
2496 * Force a reload. Other thread could reallocate the
2497 * table before this fd was closed, so it possible that
2498 * there is a stale fp pointer in cached version.
2499 */
2500 fdt = *(struct fdescenttbl * volatile *)&(fdp->fd_files);
2501 continue;
2502 }
2503 /*
2504 * Use an acquire barrier to force re-reading of fdt so it is
2505 * refreshed for verification.
2506 */
2507 if (atomic_cmpset_acq_intatomic_cmpset_int(&fp->f_count, count, count + 1) == 0)
2508 goto retry;
2509 fdt = fdp->fd_files;
2510#ifdef CAPABILITIES1
2511 if (seq_consistent_nomb(fd_seq(fdt, fd)(&(fdt)->fdt_ofiles[(fd)].fde_seq), seq))
2512#else
2513 if (fp == fdt->fdt_ofiles[fd].fde_file)
2514#endif
2515 break;
2516 fdrop(fp, curthread)(refcount_release(&(fp)->f_count) ? _fdrop((fp), ((__curthread
()))) : _fnoop())
;
2517 }
2518 *fpp = fp;
2519 if (seqp != NULL((void *)0)) {
2520#ifdef CAPABILITIES1
2521 *seqp = seq;
2522#endif
2523 }
2524 return (0);
2525}
2526
2527/*
2528 * Extract the file pointer associated with the specified descriptor for the
2529 * current user process.
2530 *
2531 * If the descriptor doesn't exist or doesn't match 'flags', EBADF is
2532 * returned.
2533 *
2534 * File's rights will be checked against the capability rights mask.
2535 *
2536 * If an error occurred the non-zero error is returned and *fpp is set to
2537 * NULL. Otherwise *fpp is held and set and zero is returned. Caller is
2538 * responsible for fdrop().
2539 */
2540static __inline int
2541_fget(struct thread *td, int fd, struct file **fpp, int flags,
2542 cap_rights_t *needrightsp, seq_t *seqp)
2543{
2544 struct filedesc *fdp;
2545 struct file *fp;
2546 int error;
2547
2548 *fpp = NULL((void *)0);
2549 fdp = td->td_proc->p_fd;
2550 error = fget_unlocked(fdp, fd, needrightsp, &fp, seqp);
2551 if (error != 0)
2552 return (error);
2553 if (fp->f_ops == &badfileops) {
2554 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
2555 return (EBADF9);
2556 }
2557
2558 /*
2559 * FREAD and FWRITE failure return EBADF as per POSIX.
2560 */
2561 error = 0;
2562 switch (flags) {
2563 case FREAD0x0001:
2564 case FWRITE0x0002:
2565 if ((fp->f_flag & flags) == 0)
2566 error = EBADF9;
2567 break;
2568 case FEXEC0x00040000:
2569 if ((fp->f_flag & (FREAD0x0001 | FEXEC0x00040000)) == 0 ||
2570 ((fp->f_flag & FWRITE0x0002) != 0))
2571 error = EBADF9;
2572 break;
2573 case 0:
2574 break;
2575 default:
2576 KASSERT(0, ("wrong flags"))do { } while (0);
2577 }
2578
2579 if (error != 0) {
2580 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
2581 return (error);
2582 }
2583
2584 *fpp = fp;
2585 return (0);
2586}
2587
2588int
2589fget(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
2590{
2591
2592 return (_fget(td, fd, fpp, 0, rightsp, NULL((void *)0)));
2593}
2594
2595int
2596fget_mmap(struct thread *td, int fd, cap_rights_t *rightsp, u_char *maxprotp,
2597 struct file **fpp)
2598{
2599 int error;
2600#ifndef CAPABILITIES1
2601 error = _fget(td, fd, fpp, 0, rightsp, NULL((void *)0));
2602 if (maxprotp != NULL((void *)0))
2603 *maxprotp = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04));
2604#else
2605 struct filedesc *fdp = td->td_proc->p_fd;
2606 seq_t seq;
2607
2608 MPASS(cap_rights_is_set(rightsp, CAP_MMAP))do { } while (0);
2609 for (;;) {
2610 error = _fget(td, fd, fpp, 0, rightsp, &seq);
2611 if (error != 0)
2612 return (error);
2613 /*
2614 * If requested, convert capability rights to access flags.
2615 */
2616 if (maxprotp != NULL((void *)0))
2617 *maxprotp = cap_rights_to_vmprot(cap_rights(fdp, fd));
2618 if (!fd_modified(fdp, fd, seq))
2619 break;
2620 fdrop(*fpp, td)(refcount_release(&(*fpp)->f_count) ? _fdrop((*fpp), (
td)) : _fnoop())
;
2621 }
2622#endif
2623 return (error);
2624}
2625
2626int
2627fget_read(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
2628{
2629
2630 return (_fget(td, fd, fpp, FREAD0x0001, rightsp, NULL((void *)0)));
2631}
2632
2633int
2634fget_write(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
2635{
2636
2637 return (_fget(td, fd, fpp, FWRITE0x0002, rightsp, NULL((void *)0)));
2638}
2639
2640int
2641fget_fcntl(struct thread *td, int fd, cap_rights_t *rightsp, int needfcntl,
2642 struct file **fpp)
2643{
2644 struct filedesc *fdp = td->td_proc->p_fd;
2645#ifndef CAPABILITIES1
2646 return (fget_unlocked(fdp, fd, rightsp, fpp, NULL((void *)0)));
2647#else
2648 int error;
2649 seq_t seq;
2650
2651 MPASS(cap_rights_is_set(rightsp, CAP_FCNTL))do { } while (0);
2652 for (;;) {
2653 error = fget_unlocked(fdp, fd, rightsp, fpp, &seq);
2654 if (error != 0)
2655 return (error);
2656 error = cap_fcntl_check(fdp, fd, needfcntl);
2657 if (!fd_modified(fdp, fd, seq))
2658 break;
2659 fdrop(*fpp, td)(refcount_release(&(*fpp)->f_count) ? _fdrop((*fpp), (
td)) : _fnoop())
;
2660 }
2661 if (error != 0) {
2662 fdrop(*fpp, td)(refcount_release(&(*fpp)->f_count) ? _fdrop((*fpp), (
td)) : _fnoop())
;
2663 *fpp = NULL((void *)0);
2664 }
2665 return (error);
2666#endif
2667}
2668
2669/*
2670 * Like fget() but loads the underlying vnode, or returns an error if the
2671 * descriptor does not represent a vnode. Note that pipes use vnodes but
2672 * never have VM objects. The returned vnode will be vref()'d.
2673 *
2674 * XXX: what about the unused flags ?
2675 */
2676static __inline int
2677_fgetvp(struct thread *td, int fd, int flags, cap_rights_t *needrightsp,
2678 struct vnode **vpp)
2679{
2680 struct file *fp;
2681 int error;
2682
2683 *vpp = NULL((void *)0);
2684 error = _fget(td, fd, &fp, flags, needrightsp, NULL((void *)0));
2685 if (error != 0)
2686 return (error);
2687 if (fp->f_vnode == NULL((void *)0)) {
2688 error = EINVAL22;
2689 } else {
2690 *vpp = fp->f_vnode;
2691 vref(*vpp);
2692 }
2693 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
2694
2695 return (error);
2696}
2697
2698int
2699fgetvp(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
2700{
2701
2702 return (_fgetvp(td, fd, 0, rightsp, vpp));
2703}
2704
2705int
2706fgetvp_rights(struct thread *td, int fd, cap_rights_t *needrightsp,
2707 struct filecaps *havecaps, struct vnode **vpp)
2708{
2709 struct filedesc *fdp;
2710 struct file *fp;
2711#ifdef CAPABILITIES1
2712 int error;
2713#endif
2714
2715 fdp = td->td_proc->p_fd;
2716 fp = fget_locked(fdp, fd);
2717 if (fp == NULL((void *)0) || fp->f_ops == &badfileops)
2718 return (EBADF9);
2719
2720#ifdef CAPABILITIES1
2721 error = cap_check(cap_rights(fdp, fd), needrightsp);
2722 if (error != 0)
2723 return (error);
2724#endif
2725
2726 if (fp->f_vnode == NULL((void *)0))
2727 return (EINVAL22);
2728
2729 *vpp = fp->f_vnode;
2730 vref(*vpp);
2731 filecaps_copy(&fdp->fd_ofilesfd_files->fdt_ofiles[fd].fde_caps, havecaps, true1);
2732
2733 return (0);
2734}
2735
2736int
2737fgetvp_read(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
2738{
2739
2740 return (_fgetvp(td, fd, FREAD0x0001, rightsp, vpp));
2741}
2742
2743int
2744fgetvp_exec(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
2745{
2746
2747 return (_fgetvp(td, fd, FEXEC0x00040000, rightsp, vpp));
2748}
2749
2750#ifdef notyet
2751int
2752fgetvp_write(struct thread *td, int fd, cap_rights_t *rightsp,
2753 struct vnode **vpp)
2754{
2755
2756 return (_fgetvp(td, fd, FWRITE0x0002, rightsp, vpp));
2757}
2758#endif
2759
2760/*
2761 * Like fget() but loads the underlying socket, or returns an error if the
2762 * descriptor does not represent a socket.
2763 *
2764 * We bump the ref count on the returned socket. XXX Also obtain the SX lock
2765 * in the future.
2766 *
2767 * Note: fgetsock() and fputsock() are deprecated, as consumers should rely
2768 * on their file descriptor reference to prevent the socket from being free'd
2769 * during use.
2770 */
2771int
2772fgetsock(struct thread *td, int fd, cap_rights_t *rightsp, struct socket **spp,
2773 u_int *fflagp)
2774{
2775 struct file *fp;
2776 int error;
2777
2778 *spp = NULL((void *)0);
2779 if (fflagp != NULL((void *)0))
2780 *fflagp = 0;
2781 if ((error = _fget(td, fd, &fp, 0, rightsp, NULL((void *)0))) != 0)
2782 return (error);
2783 if (fp->f_type != DTYPE_SOCKET2) {
2784 error = ENOTSOCK38;
2785 } else {
2786 *spp = fp->f_data;
2787 if (fflagp)
2788 *fflagp = fp->f_flag;
2789 SOCK_LOCK(*spp)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
(&(&(*spp)->so_rcv)->sb_mtx)))))->mtx_lock !=
0x00000004 || !atomic_cmpset_long(&((((((&(&(*spp
)->so_rcv)->sb_mtx))))))->mtx_lock, 0x00000004, (_tid
)))) __mtx_lock_sleep(&((((((&(&(*spp)->so_rcv
)->sb_mtx))))))->mtx_lock, _tid, (((0))), ((((void *)0)
)), ((0))); else do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) ((((&(&(*spp)->so_rcv)->sb_mtx
)))), (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t
) 0); } while (0); } while (0); } while (0)
;
2790 soref(*spp)do { (void)0; ++(*spp)->so_count; } while (0);
2791 SOCK_UNLOCK(*spp)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(&(*spp)->so_rcv)->sb_mtx)))))->lock_object
.lo_data == 0) do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) ((((&(&(*spp)->so_rcv)->sb_mtx
)))), (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t
) 0); } while (0); } while (0); if ((((((&(&(*spp)->
so_rcv)->sb_mtx)))))->mtx_lock != _tid || !atomic_cmpset_long
(&((((((&(&(*spp)->so_rcv)->sb_mtx))))))->
mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&((((((
&(&(*spp)->so_rcv)->sb_mtx))))))->mtx_lock, (
((0))), ((((void *)0))), ((0))); } while (0)
;
2792 }
2793 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
2794
2795 return (error);
2796}
2797
2798/*
2799 * Drop the reference count on the socket and XXX release the SX lock in the
2800 * future. The last reference closes the socket.
2801 *
2802 * Note: fputsock() is deprecated, see comment for fgetsock().
2803 */
2804void
2805fputsock(struct socket *so)
2806{
2807
2808 ACCEPT_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&accept_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&accept_mtx)))))->mtx_lock, 0x00000004, (_tid
)))) __mtx_lock_sleep(&(((((&accept_mtx)))))->mtx_lock
, _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__acquire->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->
id, (uintptr_t) (((&accept_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
} while (0)
;
2809 SOCK_LOCK(so)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
(&(&(so)->so_rcv)->sb_mtx)))))->mtx_lock != 0x00000004
|| !atomic_cmpset_long(&((((((&(&(so)->so_rcv
)->sb_mtx))))))->mtx_lock, 0x00000004, (_tid)))) __mtx_lock_sleep
(&((((((&(&(so)->so_rcv)->sb_mtx))))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) ((((&(&(so)->so_rcv)->sb_mtx
)))), (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t
) 0); } while (0); } while (0); } while (0)
;
2810 CURVNET_SET(so->so_vnet);
2811 sorele(so)do { (void)0; (void)0; if ((so)->so_count <= 0) panic("sorele"
); if (--(so)->so_count == 0) sofree(so); else { do { uintptr_t
_tid = (uintptr_t)((__curthread())); if ((((((&(&(so
)->so_rcv)->sb_mtx)))))->lock_object.lo_data == 0) do
{ (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) ((((&(&(so)->so_rcv)->sb_mtx
)))), (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t
) 0); } while (0); } while (0); if ((((((&(&(so)->
so_rcv)->sb_mtx)))))->mtx_lock != _tid || !atomic_cmpset_long
(&((((((&(&(so)->so_rcv)->sb_mtx))))))->
mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&((((((
&(&(so)->so_rcv)->sb_mtx))))))->mtx_lock, ((
(0))), ((((void *)0))), ((0))); } while (0); do { uintptr_t _tid
= (uintptr_t)((__curthread())); if (((((&accept_mtx))))->
lock_object.lo_data == 0) do { (void)0; do { if (__builtin_expect
((sdt_lockstat___adaptive__release->id), 0)) (*sdt_probe_func
)(sdt_lockstat___adaptive__release->id, (uintptr_t) (((&
accept_mtx))), (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0, (
uintptr_t) 0); } while (0); } while (0); if (((((&accept_mtx
))))->mtx_lock != _tid || !atomic_cmpset_long(&(((((&
accept_mtx)))))->mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep
(&(((((&accept_mtx)))))->mtx_lock, (((0))), ((((void
*)0))), ((0))); } while (0); } } while (0)
;
2812 CURVNET_RESTORE();
2813}
2814
2815/*
2816 * Handle the last reference to a file being closed.
2817 */
2818int
2819_fdrop(struct file *fp, struct thread *td)
2820{
2821 int error;
2822
2823 if (fp->f_count != 0)
2824 panic("fdrop: count %d", fp->f_count);
2825 error = fo_close(fp, td);
2826 atomic_subtract_int(&openfiles, 1);
2827 crfree(fp->f_cred);
2828 free(fp->f_advicef_vnun.fvn_advice, M_FADVISE);
2829 uma_zfree(file_zone, fp);
2830
2831 return (error);
2832}
2833
2834/*
2835 * Apply an advisory lock on a file descriptor.
2836 *
2837 * Just attempt to get a record lock of the requested type on the entire file
2838 * (l_whence = SEEK_SET, l_start = 0, l_len = 0).
2839 */
2840#ifndef _SYS_SYSPROTO_H_
2841struct flock_args {
2842 int fd;
2843 int how;
2844};
2845#endif
2846/* ARGSUSED */
2847int
2848sys_flock(struct thread *td, struct flock_args *uap)
2849{
2850 struct file *fp;
2851 struct vnode *vp;
2852 struct flock lf;
2853 cap_rights_t rights;
2854 int error;
2855
2856 error = fget(td, uap->fd, cap_rights_init(&rights, CAP_FLOCK)__cap_rights_init(0, &rights, ((1ULL << (57 + (0)))
| (0x0000000000010000ULL)), 0ULL)
, &fp);
2857 if (error != 0)
2858 return (error);
2859 if (fp->f_type != DTYPE_VNODE1) {
2860 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
2861 return (EOPNOTSUPP45);
2862 }
2863
2864 vp = fp->f_vnode;
2865 lf.l_whence = SEEK_SET0;
2866 lf.l_start = 0;
2867 lf.l_len = 0;
2868 if (uap->how & LOCK_UN0x08) {
2869 lf.l_type = F_UNLCK2;
2870 atomic_clear_int(&fp->f_flag, FHASLOCK0x4000);
2871 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK2, &lf, F_FLOCK0x020);
2872 goto done2;
2873 }
2874 if (uap->how & LOCK_EX0x02)
2875 lf.l_type = F_WRLCK3;
2876 else if (uap->how & LOCK_SH0x01)
2877 lf.l_type = F_RDLCK1;
2878 else {
2879 error = EBADF9;
2880 goto done2;
2881 }
2882 atomic_set_int(&fp->f_flag, FHASLOCK0x4000);
2883 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK12, &lf,
2884 (uap->how & LOCK_NB0x04) ? F_FLOCK0x020 : F_FLOCK0x020 | F_WAIT0x010);
2885done2:
2886 fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td))
: _fnoop())
;
2887 return (error);
2888}
2889/*
2890 * Duplicate the specified descriptor to a free descriptor.
2891 */
2892int
2893dupfdopen(struct thread *td, struct filedesc *fdp, int dfd, int mode,
2894 int openerror, int *indxp)
2895{
2896 struct filedescent *newfde, *oldfde;
2897 struct file *fp;
2898 int error, indx;
2899
2900 KASSERT(openerror == ENODEV || openerror == ENXIO,do { } while (0)
2901 ("unexpected error %d in %s", openerror, __func__))do { } while (0);
2902
2903 /*
2904 * If the to-be-dup'd fd number is greater than the allowed number
2905 * of file descriptors, or the fd to be dup'd has already been
2906 * closed, then reject.
2907 */
2908 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
2909 if ((fp = fget_locked(fdp, dfd)) == NULL((void *)0)) {
2910 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
2911 return (EBADF9);
2912 }
2913
2914 error = fdalloc(td, 0, &indx);
2915 if (error != 0) {
2916 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
2917 return (error);
2918 }
2919
2920 /*
2921 * There are two cases of interest here.
2922 *
2923 * For ENODEV simply dup (dfd) to file descriptor (indx) and return.
2924 *
2925 * For ENXIO steal away the file structure from (dfd) and store it in
2926 * (indx). (dfd) is effectively closed by this operation.
2927 */
2928 switch (openerror) {
2929 case ENODEV19:
2930 /*
2931 * Check that the mode the file is being opened for is a
2932 * subset of the mode of the existing descriptor.
2933 */
2934 if (((mode & (FREAD0x0001|FWRITE0x0002)) | fp->f_flag) != fp->f_flag) {
2935 fdunused(fdp, indx);
2936 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
2937 return (EACCES13);
2938 }
2939 fhold(fp)(refcount_acquire(&(fp)->f_count));
2940 newfde = &fdp->fd_ofilesfd_files->fdt_ofiles[indx];
2941 oldfde = &fdp->fd_ofilesfd_files->fdt_ofiles[dfd];
2942#ifdef CAPABILITIES1
2943 seq_write_begin(&newfde->fde_seq);
2944#endif
2945 memcpy(newfde, oldfde, fde_change_size(__builtin_offsetof(struct filedescent, fde_seq)));
2946 filecaps_copy(&oldfde->fde_caps, &newfde->fde_caps, true1);
2947#ifdef CAPABILITIES1
2948 seq_write_end(&newfde->fde_seq);
2949#endif
2950 break;
2951 case ENXIO6:
2952 /*
2953 * Steal away the file pointer from dfd and stuff it into indx.
2954 */
2955 newfde = &fdp->fd_ofilesfd_files->fdt_ofiles[indx];
2956 oldfde = &fdp->fd_ofilesfd_files->fdt_ofiles[dfd];
2957#ifdef CAPABILITIES1
2958 seq_write_begin(&newfde->fde_seq);
2959#endif
2960 memcpy(newfde, oldfde, fde_change_size(__builtin_offsetof(struct filedescent, fde_seq)));
2961 oldfde->fde_file = NULL((void *)0);
2962 fdunused(fdp, dfd);
2963#ifdef CAPABILITIES1
2964 seq_write_end(&newfde->fde_seq);
2965#endif
2966 break;
2967 }
2968 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
2969 *indxp = indx;
2970 return (0);
2971}
2972
2973/*
2974 * This sysctl determines if we will allow a process to chroot(2) if it
2975 * has a directory open:
2976 * 0: disallowed for all processes.
2977 * 1: allowed for processes that were not already chroot(2)'ed.
2978 * 2: allowed for all processes.
2979 */
2980
2981static int chroot_allow_open_directories = 1;
2982
2983SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_chroot_allow_open_directories
= { .oid_parent = ((&(&sysctl___kern)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (
&chroot_allow_open_directories), .oid_arg2 = (0), .oid_name
= ("chroot_allow_open_directories"), .oid_handler = (sysctl_handle_int
), .oid_fmt = ("I"), .oid_descr = "Allow a process to chroot(2) if it has a directory open"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_chroot_allow_open_directories
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_chroot_allow_open_directories
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0) == 2) && sizeof
(int) == sizeof(*(&chroot_allow_open_directories)), "compile-time assertion failed"
)
2984 &chroot_allow_open_directories, 0,static struct sysctl_oid sysctl___kern_chroot_allow_open_directories
= { .oid_parent = ((&(&sysctl___kern)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (
&chroot_allow_open_directories), .oid_arg2 = (0), .oid_name
= ("chroot_allow_open_directories"), .oid_handler = (sysctl_handle_int
), .oid_fmt = ("I"), .oid_descr = "Allow a process to chroot(2) if it has a directory open"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_chroot_allow_open_directories
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_chroot_allow_open_directories
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0) == 2) && sizeof
(int) == sizeof(*(&chroot_allow_open_directories)), "compile-time assertion failed"
)
2985 "Allow a process to chroot(2) if it has a directory open")static struct sysctl_oid sysctl___kern_chroot_allow_open_directories
= { .oid_parent = ((&(&sysctl___kern)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (
&chroot_allow_open_directories), .oid_arg2 = (0), .oid_name
= ("chroot_allow_open_directories"), .oid_handler = (sysctl_handle_int
), .oid_fmt = ("I"), .oid_descr = "Allow a process to chroot(2) if it has a directory open"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_chroot_allow_open_directories
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_chroot_allow_open_directories
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0) == 2) && sizeof
(int) == sizeof(*(&chroot_allow_open_directories)), "compile-time assertion failed"
)
;
2986
2987/*
2988 * Helper function for raised chroot(2) security function: Refuse if
2989 * any filedescriptors are open directories.
2990 */
2991static int
2992chroot_refuse_vdir_fds(struct filedesc *fdp)
2993{
2994 struct vnode *vp;
2995 struct file *fp;
2996 int fd;
2997
2998 FILEDESC_LOCK_ASSERT(fdp)(void)0;
2999
3000 for (fd = 0; fd <= fdp->fd_lastfile; fd++) {
3001 fp = fget_locked(fdp, fd);
3002 if (fp == NULL((void *)0))
3003 continue;
3004 if (fp->f_type == DTYPE_VNODE1) {
3005 vp = fp->f_vnode;
3006 if (vp->v_type == VDIR)
3007 return (EPERM1);
3008 }
3009 }
3010 return (0);
3011}
3012
3013/*
3014 * Common routine for kern_chroot() and jail_attach(). The caller is
3015 * responsible for invoking priv_check() and mac_vnode_check_chroot() to
3016 * authorize this operation.
3017 */
3018int
3019pwd_chroot(struct thread *td, struct vnode *vp)
3020{
3021 struct filedesc *fdp;
3022 struct vnode *oldvp;
3023 int error;
3024
3025 fdp = td->td_proc->p_fd;
3026 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
3027 if (chroot_allow_open_directories == 0 ||
3028 (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode)) {
3029 error = chroot_refuse_vdir_fds(fdp);
3030 if (error != 0) {
3031 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
3032 return (error);
3033 }
3034 }
3035 oldvp = fdp->fd_rdir;
3036 VREF(vp)vref(vp);
3037 fdp->fd_rdir = vp;
3038 if (fdp->fd_jdir == NULL((void *)0)) {
3039 VREF(vp)vref(vp);
3040 fdp->fd_jdir = vp;
3041 }
3042 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
3043 vrele(oldvp);
3044 return (0);
3045}
3046
3047void
3048pwd_chdir(struct thread *td, struct vnode *vp)
3049{
3050 struct filedesc *fdp;
3051 struct vnode *oldvp;
3052
3053 fdp = td->td_proc->p_fd;
3054 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
3055 VNASSERT(vp->v_usecount > 0, vp,do { } while (0)
3056 ("chdir to a vnode with zero usecount"))do { } while (0);
3057 oldvp = fdp->fd_cdir;
3058 fdp->fd_cdir = vp;
3059 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
3060 vrele(oldvp);
3061}
3062
3063/*
3064 * Scan all active processes and prisons to see if any of them have a current
3065 * or root directory of `olddp'. If so, replace them with the new mount point.
3066 */
3067void
3068mountcheckdirs(struct vnode *olddp, struct vnode *newdp)
3069{
3070 struct filedesc *fdp;
3071 struct prison *pr;
3072 struct proc *p;
3073 int nrele;
3074
3075 if (vrefcnt(olddp) == 1)
3076 return;
3077 nrele = 0;
3078 sx_slock(&allproc_lock)(void)__sx_slock(((&allproc_lock)), 0, (((void *)0)), (0)
)
;
3079 FOREACH_PROC_IN_SYSTEM(p)for (((p)) = (((&allproc))->lh_first); ((p)); ((p)) = (
(((p)))->p_list.le_next))
{
3080 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
3081 fdp = fdhold(p);
3082 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
3083 if (fdp == NULL((void *)0))
3084 continue;
3085 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
3086 if (fdp->fd_cdir == olddp) {
3087 vref(newdp);
3088 fdp->fd_cdir = newdp;
3089 nrele++;
3090 }
3091 if (fdp->fd_rdir == olddp) {
3092 vref(newdp);
3093 fdp->fd_rdir = newdp;
3094 nrele++;
3095 }
3096 if (fdp->fd_jdir == olddp) {
3097 vref(newdp);
3098 fdp->fd_jdir = newdp;
3099 nrele++;
3100 }
3101 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
3102 fddrop(fdp);
3103 }
3104 sx_sunlock(&allproc_lock)__sx_sunlock(((&allproc_lock)), (((void *)0)), (0));
3105 if (rootvnode == olddp) {
3106 vref(newdp);
3107 rootvnode = newdp;
3108 nrele++;
3109 }
3110 mtx_lock(&prison0.pr_mtx)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&prison0.pr_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&prison0.pr_mtx)))))->mtx_lock, 0x00000004,
(_tid)))) __mtx_lock_sleep(&(((((&prison0.pr_mtx))))
)->mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do
{ (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&prison0.pr_mtx))), (uintptr_t) 0
, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
3111 if (prison0.pr_root == olddp) {
3112 vref(newdp);
3113 prison0.pr_root = newdp;
3114 nrele++;
3115 }
3116 mtx_unlock(&prison0.pr_mtx)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
prison0.pr_mtx))))->lock_object.lo_data == 0) do { (void)0
; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&prison0.pr_mtx))), (uintptr_t) 0
, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); if (((((&prison0.pr_mtx))))->mtx_lock != _tid
|| !atomic_cmpset_long(&(((((&prison0.pr_mtx)))))->
mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&(((((&
prison0.pr_mtx)))))->mtx_lock, (((0))), ((((void *)0))), (
(0))); } while (0)
;
3117 sx_slock(&allprison_lock)(void)__sx_slock(((&allprison_lock)), 0, (((void *)0)), (
0))
;
3118 TAILQ_FOREACH(pr, &allprison, pr_list)for ((pr) = (((&allprison))->tqh_first); (pr); (pr) = (
((pr))->pr_list.tqe_next))
{
3119 mtx_lock(&pr->pr_mtx)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&pr->pr_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&pr->pr_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&pr->pr_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&pr->pr_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
3120 if (pr->pr_root == olddp) {
3121 vref(newdp);
3122 pr->pr_root = newdp;
3123 nrele++;
3124 }
3125 mtx_unlock(&pr->pr_mtx)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
pr->pr_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&pr->pr_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&pr->pr_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&pr->pr_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&pr->pr_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
3126 }
3127 sx_sunlock(&allprison_lock)__sx_sunlock(((&allprison_lock)), (((void *)0)), (0));
3128 while (nrele--)
3129 vrele(olddp);
3130}
3131
3132struct filedesc_to_leader *
3133filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp, struct proc *leader)
3134{
3135 struct filedesc_to_leader *fdtol;
3136
3137 fdtol = malloc(sizeof(struct filedesc_to_leader),
3138 M_FILEDESC_TO_LEADER, M_WAITOK0x0002);
3139 fdtol->fdl_refcount = 1;
3140 fdtol->fdl_holdcount = 0;
3141 fdtol->fdl_wakeup = 0;
3142 fdtol->fdl_leader = leader;
3143 if (old != NULL((void *)0)) {
3144 FILEDESC_XLOCK(fdp)(void)__sx_xlock(((&(fdp)->fd_sx)), (__curthread()), 0
, (((void *)0)), (0))
;
3145 fdtol->fdl_next = old->fdl_next;
3146 fdtol->fdl_prev = old;
3147 old->fdl_next = fdtol;
3148 fdtol->fdl_next->fdl_prev = fdtol;
3149 FILEDESC_XUNLOCK(fdp)__sx_xunlock(((&(fdp)->fd_sx)), (__curthread()), (((void
*)0)), (0))
;
3150 } else {
3151 fdtol->fdl_next = fdtol;
3152 fdtol->fdl_prev = fdtol;
3153 }
3154 return (fdtol);
3155}
3156
3157static int
3158sysctl_kern_proc_nfds(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req
*req
)
3159{
3160 struct filedesc *fdp;
3161 int i, count, slots;
3162
3163 if (*(int *)arg1 != 0)
3164 return (EINVAL22);
3165
3166 fdp = curproc((__curthread())->td_proc)->p_fd;
3167 count = 0;
3168 FILEDESC_SLOCK(fdp)(void)__sx_slock(((&(fdp)->fd_sx)), 0, (((void *)0)), (
0))
;
3169 slots = NDSLOTS(fdp->fd_lastfile + 1)(((fdp->fd_lastfile + 1) + (sizeof(u_long) * 8) - 1) / (sizeof
(u_long) * 8))
;
3170 for (i = 0; i < slots; i++)
3171 count += bitcountl(fdp->fd_map[i])__bitcount64((unsigned long)((u_long)(fdp->fd_map[i])));
3172 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
3173
3174 return (SYSCTL_OUT(req, &count, sizeof(count))(req->oldfunc)(req, &count, sizeof(count)));
3175}
3176
3177static SYSCTL_NODE(_kern_proc, KERN_PROC_NFDS, nfds,struct sysctl_oid sysctl___kern_proc_nfds = { .oid_parent = (
(&(&sysctl___kern_proc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (43), .oid_kind = (1|(0x80000000
|0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name
= ("nfds"), .oid_handler = (sysctl_kern_proc_nfds), .oid_fmt
= ("N"), .oid_descr = "Number of open file descriptors" }; __asm__
(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_proc_nfds
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_proc_nfds); _Static_assert
(((0x80000000|0x00040000) & 0xf) == 0 || ((0x80000000|0x00040000
) & 0) == 1, "compile-time assertion failed")
3178 CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_kern_proc_nfds,struct sysctl_oid sysctl___kern_proc_nfds = { .oid_parent = (
(&(&sysctl___kern_proc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (43), .oid_kind = (1|(0x80000000
|0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name
= ("nfds"), .oid_handler = (sysctl_kern_proc_nfds), .oid_fmt
= ("N"), .oid_descr = "Number of open file descriptors" }; __asm__
(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_proc_nfds
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_proc_nfds); _Static_assert
(((0x80000000|0x00040000) & 0xf) == 0 || ((0x80000000|0x00040000
) & 0) == 1, "compile-time assertion failed")
3179 "Number of open file descriptors")struct sysctl_oid sysctl___kern_proc_nfds = { .oid_parent = (
(&(&sysctl___kern_proc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (43), .oid_kind = (1|(0x80000000
|0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name
= ("nfds"), .oid_handler = (sysctl_kern_proc_nfds), .oid_fmt
= ("N"), .oid_descr = "Number of open file descriptors" }; __asm__
(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_proc_nfds
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_proc_nfds); _Static_assert
(((0x80000000|0x00040000) & 0xf) == 0 || ((0x80000000|0x00040000
) & 0) == 1, "compile-time assertion failed")
;
3180
3181/*
3182 * Get file structures globally.
3183 */
3184static int
3185sysctl_kern_file(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req
*req
)
3186{
3187 struct xfile xf;
3188 struct filedesc *fdp;
3189 struct file *fp;
3190 struct proc *p;
3191 int error, n;
3192
3193 error = sysctl_wire_old_buffer(req, 0);
3194 if (error != 0)
3195 return (error);
3196 if (req->oldptr == NULL((void *)0)) {
3197 n = 0;
3198 sx_slock(&allproc_lock)(void)__sx_slock(((&allproc_lock)), 0, (((void *)0)), (0)
)
;
3199 FOREACH_PROC_IN_SYSTEM(p)for (((p)) = (((&allproc))->lh_first); ((p)); ((p)) = (
(((p)))->p_list.le_next))
{
3200 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
3201 if (p->p_state == PRS_NEW) {
3202 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
3203 continue;
3204 }
3205 fdp = fdhold(p);
3206 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
3207 if (fdp == NULL((void *)0))
3208 continue;
3209 /* overestimates sparse tables. */
3210 if (fdp->fd_lastfile > 0)
3211 n += fdp->fd_lastfile;
3212 fddrop(fdp);
3213 }
3214 sx_sunlock(&allproc_lock)__sx_sunlock(((&allproc_lock)), (((void *)0)), (0));
3215 return (SYSCTL_OUT(req, 0, n * sizeof(xf))(req->oldfunc)(req, 0, n * sizeof(xf)));
3216 }
3217 error = 0;
3218 bzero(&xf, sizeof(xf));
3219 xf.xf_size = sizeof(xf);
3220 sx_slock(&allproc_lock)(void)__sx_slock(((&allproc_lock)), 0, (((void *)0)), (0)
)
;
3221 FOREACH_PROC_IN_SYSTEM(p)for (((p)) = (((&allproc))->lh_first); ((p)); ((p)) = (
(((p)))->p_list.le_next))
{
3222 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
3223 if (p->p_state == PRS_NEW) {
3224 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
3225 continue;
3226 }
3227 if (p_cansee(req->td, p) != 0) {
3228 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
3229 continue;
3230 }
3231 xf.xf_pid = p->p_pid;
3232 xf.xf_uid = p->p_ucred->cr_uid;
3233 fdp = fdhold(p);
3234 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
3235 if (fdp == NULL((void *)0))
3236 continue;
3237 FILEDESC_SLOCK(fdp)(void)__sx_slock(((&(fdp)->fd_sx)), 0, (((void *)0)), (
0))
;
3238 for (n = 0; fdp->fd_refcnt > 0 && n <= fdp->fd_lastfile; ++n) {
3239 if ((fp = fdp->fd_ofilesfd_files->fdt_ofiles[n].fde_file) == NULL((void *)0))
3240 continue;
3241 xf.xf_fd = n;
3242 xf.xf_file = fp;
3243 xf.xf_data = fp->f_data;
3244 xf.xf_vnode = fp->f_vnode;
3245 xf.xf_type = fp->f_type;
3246 xf.xf_count = fp->f_count;
3247 xf.xf_msgcount = 0;
3248 xf.xf_offset = foffset_get(fp);
3249 xf.xf_flag = fp->f_flag;
3250 error = SYSCTL_OUT(req, &xf, sizeof(xf))(req->oldfunc)(req, &xf, sizeof(xf));
3251 if (error)
3252 break;
3253 }
3254 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
3255 fddrop(fdp);
3256 if (error)
3257 break;
3258 }
3259 sx_sunlock(&allproc_lock)__sx_sunlock(((&allproc_lock)), (((void *)0)), (0));
3260 return (error);
3261}
3262
3263SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE,static struct sysctl_oid sysctl___kern_file = { .oid_parent =
((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (15), .oid_kind = ((5|0x80000000
|0x00040000)), .oid_arg1 = (0), .oid_arg2 = (0), .oid_name = (
"file"), .oid_handler = (sysctl_kern_file), .oid_fmt = ("S,xfile"
), .oid_descr = "Entire file table" }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_file __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_file); _Static_assert(((5|0x80000000|
0x00040000) & 0xf) != 0, "compile-time assertion failed")
3264 0, 0, sysctl_kern_file, "S,xfile", "Entire file table")static struct sysctl_oid sysctl___kern_file = { .oid_parent =
((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (15), .oid_kind = ((5|0x80000000
|0x00040000)), .oid_arg1 = (0), .oid_arg2 = (0), .oid_name = (
"file"), .oid_handler = (sysctl_kern_file), .oid_fmt = ("S,xfile"
), .oid_descr = "Entire file table" }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_file __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_file); _Static_assert(((5|0x80000000|
0x00040000) & 0xf) != 0, "compile-time assertion failed")
;
3265
3266#ifdef KINFO_FILE_SIZE1392
3267CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE)_Static_assert(sizeof(struct kinfo_file) == 1392, "compile-time assertion failed"
)
;
3268#endif
3269
3270static int
3271xlate_fflags(int fflags)
3272{
3273 static const struct {
3274 int fflag;
3275 int kf_fflag;
3276 } fflags_table[] = {
3277 { FAPPEND0x0008, KF_FLAG_APPEND0x00000004 },
3278 { FASYNC0x0040, KF_FLAG_ASYNC0x00000008 },
3279 { FFSYNC0x0080, KF_FLAG_FSYNC0x00000010 },
3280 { FHASLOCK0x4000, KF_FLAG_HASLOCK0x00000080 },
3281 { FNONBLOCK0x0004, KF_FLAG_NONBLOCK0x00000020 },
3282 { FREAD0x0001, KF_FLAG_READ0x00000001 },
3283 { FWRITE0x0002, KF_FLAG_WRITE0x00000002 },
3284 { O_CREAT0x0200, KF_FLAG_CREAT0x00000800 },
3285 { O_DIRECT0x00010000, KF_FLAG_DIRECT0x00000040 },
3286 { O_EXCL0x0800, KF_FLAG_EXCL0x00002000 },
3287 { O_EXEC0x00040000, KF_FLAG_EXEC0x00004000 },
3288 { O_EXLOCK0x0020, KF_FLAG_EXLOCK0x00000200 },
3289 { O_NOFOLLOW0x0100, KF_FLAG_NOFOLLOW0x00000400 },
3290 { O_SHLOCK0x0010, KF_FLAG_SHLOCK0x00000100 },
3291 { O_TRUNC0x0400, KF_FLAG_TRUNC0x00001000 }
3292 };
3293 unsigned int i;
3294 int kflags;
3295
3296 kflags = 0;
3297 for (i = 0; i < nitems(fflags_table)(sizeof((fflags_table)) / sizeof((fflags_table)[0])); i++)
3298 if (fflags & fflags_table[i].fflag)
3299 kflags |= fflags_table[i].kf_fflag;
3300 return (kflags);
3301}
3302
3303/* Trim unused data from kf_path by truncating the structure size. */
3304static void
3305pack_kinfo(struct kinfo_file *kif)
3306{
3307
3308 kif->kf_structsize = offsetof(struct kinfo_file, kf_path)__builtin_offsetof(struct kinfo_file, kf_path) +
3309 strlen(kif->kf_path) + 1;
3310 kif->kf_structsize = roundup(kif->kf_structsize, sizeof(uint64_t))((((kif->kf_structsize)+((sizeof(uint64_t))-1))/(sizeof(uint64_t
)))*(sizeof(uint64_t)))
;
3311}
3312
3313static void
3314export_file_to_kinfo(struct file *fp, int fd, cap_rights_t *rightsp,
3315 struct kinfo_file *kif, struct filedesc *fdp, int flags)
3316{
3317 int error;
3318
3319 bzero(kif, sizeof(*kif));
3320
3321 /* Set a default type to allow for empty fill_kinfo() methods. */
3322 kif->kf_type = KF_TYPE_UNKNOWN255;
3323 kif->kf_flags = xlate_fflags(fp->f_flag);
3324 if (rightsp != NULL((void *)0))
3325 kif->kf_cap_rights = *rightsp;
3326 else
3327 cap_rights_init(&kif->kf_cap_rights)__cap_rights_init(0, &kif->kf_cap_rights, 0ULL);
3328 kif->kf_fd = fd;
3329 kif->kf_ref_count = fp->f_count;
3330 kif->kf_offset = foffset_get(fp);
3331
3332 /*
3333 * This may drop the filedesc lock, so the 'fp' cannot be
3334 * accessed after this call.
3335 */
3336 error = fo_fill_kinfo(fp, kif, fdp);
3337 if (error == 0)
3338 kif->kf_status |= KF_ATTR_VALID0x0001;
3339 if ((flags & KERN_FILEDESC_PACK_KINFO0x00000001U) != 0)
3340 pack_kinfo(kif);
3341 else
3342 kif->kf_structsize = roundup2(sizeof(*kif), sizeof(uint64_t))(((sizeof(*kif))+((sizeof(uint64_t))-1))&(~((sizeof(uint64_t
))-1)))
;
3343}
3344
3345static void
3346export_vnode_to_kinfo(struct vnode *vp, int fd, int fflags,
3347 struct kinfo_file *kif, int flags)
3348{
3349 int error;
3350
3351 bzero(kif, sizeof(*kif));
3352
3353 kif->kf_type = KF_TYPE_VNODE1;
3354 error = vn_fill_kinfo_vnode(vp, kif);
3355 if (error == 0)
3356 kif->kf_status |= KF_ATTR_VALID0x0001;
3357 kif->kf_flags = xlate_fflags(fflags);
3358 cap_rights_init(&kif->kf_cap_rights)__cap_rights_init(0, &kif->kf_cap_rights, 0ULL);
3359 kif->kf_fd = fd;
3360 kif->kf_ref_count = -1;
3361 kif->kf_offset = -1;
3362 if ((flags & KERN_FILEDESC_PACK_KINFO0x00000001U) != 0)
3363 pack_kinfo(kif);
3364 else
3365 kif->kf_structsize = roundup2(sizeof(*kif), sizeof(uint64_t))(((sizeof(*kif))+((sizeof(uint64_t))-1))&(~((sizeof(uint64_t
))-1)))
;
3366 vrele(vp);
3367}
3368
3369struct export_fd_buf {
3370 struct filedesc *fdp;
3371 struct sbuf *sb;
3372 ssize_t remainder;
3373 struct kinfo_file kif;
3374 int flags;
3375};
3376
3377static int
3378export_kinfo_to_sb(struct export_fd_buf *efbuf)
3379{
3380 struct kinfo_file *kif;
3381
3382 kif = &efbuf->kif;
3383 if (efbuf->remainder != -1) {
3384 if (efbuf->remainder < kif->kf_structsize) {
3385 /* Terminate export. */
3386 efbuf->remainder = 0;
3387 return (0);
3388 }
3389 efbuf->remainder -= kif->kf_structsize;
3390 }
3391 return (sbuf_bcat(efbuf->sb, kif, kif->kf_structsize) == 0 ? 0 : ENOMEM12);
3392}
3393
3394static int
3395export_file_to_sb(struct file *fp, int fd, cap_rights_t *rightsp,
3396 struct export_fd_buf *efbuf)
3397{
3398 int error;
3399
3400 if (efbuf->remainder == 0)
3401 return (0);
3402 export_file_to_kinfo(fp, fd, rightsp, &efbuf->kif, efbuf->fdp,
3403 efbuf->flags);
3404 FILEDESC_SUNLOCK(efbuf->fdp)__sx_sunlock(((&(efbuf->fdp)->fd_sx)), (((void *)0)
), (0))
;
3405 error = export_kinfo_to_sb(efbuf);
3406 FILEDESC_SLOCK(efbuf->fdp)(void)__sx_slock(((&(efbuf->fdp)->fd_sx)), 0, (((void
*)0)), (0))
;
3407 return (error);
3408}
3409
3410static int
3411export_vnode_to_sb(struct vnode *vp, int fd, int fflags,
3412 struct export_fd_buf *efbuf)
3413{
3414 int error;
3415
3416 if (efbuf->remainder == 0)
3417 return (0);
3418 if (efbuf->fdp != NULL((void *)0))
3419 FILEDESC_SUNLOCK(efbuf->fdp)__sx_sunlock(((&(efbuf->fdp)->fd_sx)), (((void *)0)
), (0))
;
3420 export_vnode_to_kinfo(vp, fd, fflags, &efbuf->kif, efbuf->flags);
3421 error = export_kinfo_to_sb(efbuf);
3422 if (efbuf->fdp != NULL((void *)0))
3423 FILEDESC_SLOCK(efbuf->fdp)(void)__sx_slock(((&(efbuf->fdp)->fd_sx)), 0, (((void
*)0)), (0))
;
3424 return (error);
3425}
3426
3427/*
3428 * Store a process file descriptor information to sbuf.
3429 *
3430 * Takes a locked proc as argument, and returns with the proc unlocked.
3431 */
3432int
3433kern_proc_filedesc_out(struct proc *p, struct sbuf *sb, ssize_t maxlen,
3434 int flags)
3435{
3436 struct file *fp;
3437 struct filedesc *fdp;
3438 struct export_fd_buf *efbuf;
3439 struct vnode *cttyvp, *textvp, *tracevp;
3440 int error, i;
3441 cap_rights_t rights;
3442
3443 PROC_LOCK_ASSERT(p, MA_OWNED)(void)0;
3444
3445 /* ktrace vnode */
3446 tracevp = p->p_tracevp;
3447 if (tracevp != NULL((void *)0))
3448 vref(tracevp);
3449 /* text vnode */
3450 textvp = p->p_textvp;
3451 if (textvp != NULL((void *)0))
3452 vref(textvp);
3453 /* Controlling tty. */
3454 cttyvp = NULL((void *)0);
3455 if (p->p_pgrp != NULL((void *)0) && p->p_pgrp->pg_session != NULL((void *)0)) {
3456 cttyvp = p->p_pgrp->pg_session->s_ttyvp;
3457 if (cttyvp != NULL((void *)0))
3458 vref(cttyvp);
3459 }
3460 fdp = fdhold(p);
3461 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
3462 efbuf = malloc(sizeof(*efbuf), M_TEMP, M_WAITOK0x0002);
3463 efbuf->fdp = NULL((void *)0);
3464 efbuf->sb = sb;
3465 efbuf->remainder = maxlen;
3466 efbuf->flags = flags;
3467 if (tracevp != NULL((void *)0))
3468 export_vnode_to_sb(tracevp, KF_FD_TYPE_TRACE-4, FREAD0x0001 | FWRITE0x0002,
3469 efbuf);
3470 if (textvp != NULL((void *)0))
3471 export_vnode_to_sb(textvp, KF_FD_TYPE_TEXT-5, FREAD0x0001, efbuf);
3472 if (cttyvp != NULL((void *)0))
3473 export_vnode_to_sb(cttyvp, KF_FD_TYPE_CTTY-6, FREAD0x0001 | FWRITE0x0002,
3474 efbuf);
3475 error = 0;
3476 if (fdp == NULL((void *)0))
3477 goto fail;
3478 efbuf->fdp = fdp;
3479 FILEDESC_SLOCK(fdp)(void)__sx_slock(((&(fdp)->fd_sx)), 0, (((void *)0)), (
0))
;
3480 /* working directory */
3481 if (fdp->fd_cdir != NULL((void *)0)) {
3482 vref(fdp->fd_cdir);
3483 export_vnode_to_sb(fdp->fd_cdir, KF_FD_TYPE_CWD-1, FREAD0x0001, efbuf);
3484 }
3485 /* root directory */
3486 if (fdp->fd_rdir != NULL((void *)0)) {
3487 vref(fdp->fd_rdir);
3488 export_vnode_to_sb(fdp->fd_rdir, KF_FD_TYPE_ROOT-2, FREAD0x0001, efbuf);
3489 }
3490 /* jail directory */
3491 if (fdp->fd_jdir != NULL((void *)0)) {
3492 vref(fdp->fd_jdir);
3493 export_vnode_to_sb(fdp->fd_jdir, KF_FD_TYPE_JAIL-3, FREAD0x0001, efbuf);
3494 }
3495 for (i = 0; fdp->fd_refcnt > 0 && i <= fdp->fd_lastfile; i++) {
3496 if ((fp = fdp->fd_ofilesfd_files->fdt_ofiles[i].fde_file) == NULL((void *)0))
3497 continue;
3498#ifdef CAPABILITIES1
3499 rights = *cap_rights(fdp, i);
3500#else /* !CAPABILITIES */
3501 cap_rights_init(&rights)__cap_rights_init(0, &rights, 0ULL);
3502#endif
3503 /*
3504 * Create sysctl entry. It is OK to drop the filedesc
3505 * lock inside of export_file_to_sb() as we will
3506 * re-validate and re-evaluate its properties when the
3507 * loop continues.
3508 */
3509 error = export_file_to_sb(fp, i, &rights, efbuf);
3510 if (error != 0 || efbuf->remainder == 0)
3511 break;
3512 }
3513 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
3514 fddrop(fdp);
3515fail:
3516 free(efbuf, M_TEMP);
3517 return (error);
3518}
3519
3520#define FILEDESC_SBUF_SIZE(sizeof(struct kinfo_file) * 5) (sizeof(struct kinfo_file) * 5)
3521
3522/*
3523 * Get per-process file descriptors for use by procstat(1), et al.
3524 */
3525static int
3526sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req
*req
)
3527{
3528 struct sbuf sb;
3529 struct proc *p;
3530 ssize_t maxlen;
3531 int error, error2, *name;
3532
3533 name = (int *)arg1;
3534
3535 sbuf_new_for_sysctl(&sb, NULL((void *)0), FILEDESC_SBUF_SIZE(sizeof(struct kinfo_file) * 5), req);
3536 sbuf_clear_flags(&sb, SBUF_INCLUDENUL0x00000002);
3537 error = pget((pid_t)name[0], PGET_CANDEBUG0x00004 | PGET_NOTWEXIT0x00010, &p);
3538 if (error != 0) {
3539 sbuf_delete(&sb);
3540 return (error);
3541 }
3542 maxlen = req->oldptr != NULL((void *)0) ? req->oldlen : -1;
3543 error = kern_proc_filedesc_out(p, &sb, maxlen,
3544 KERN_FILEDESC_PACK_KINFO0x00000001U);
3545 error2 = sbuf_finish(&sb);
3546 sbuf_delete(&sb);
3547 return (error != 0 ? error : error2);
3548}
3549
3550#ifdef KINFO_OFILE_SIZE1328
3551CTASSERT(sizeof(struct kinfo_ofile) == KINFO_OFILE_SIZE)_Static_assert(sizeof(struct kinfo_ofile) == 1328, "compile-time assertion failed"
)
;
3552#endif
3553
3554#ifdef COMPAT_FREEBSD71
3555static void
3556kinfo_to_okinfo(struct kinfo_file *kif, struct kinfo_ofile *okif)
3557{
3558
3559 okif->kf_structsize = sizeof(*okif);
3560 okif->kf_type = kif->kf_type;
3561 okif->kf_fd = kif->kf_fd;
3562 okif->kf_ref_count = kif->kf_ref_count;
3563 okif->kf_flags = kif->kf_flags & (KF_FLAG_READ0x00000001 | KF_FLAG_WRITE0x00000002 |
3564 KF_FLAG_APPEND0x00000004 | KF_FLAG_ASYNC0x00000008 | KF_FLAG_FSYNC0x00000010 | KF_FLAG_NONBLOCK0x00000020 |
3565 KF_FLAG_DIRECT0x00000040 | KF_FLAG_HASLOCK0x00000080);
3566 okif->kf_offset = kif->kf_offset;
3567 okif->kf_vnode_type = kif->kf_vnode_type;
3568 okif->kf_sock_domain = kif->kf_sock_domain;
3569 okif->kf_sock_type = kif->kf_sock_type;
3570 okif->kf_sock_protocol = kif->kf_sock_protocol;
3571 strlcpy(okif->kf_path, kif->kf_path, sizeof(okif->kf_path));
3572 okif->kf_sa_local = kif->kf_sa_local;
3573 okif->kf_sa_peer = kif->kf_sa_peer;
3574}
3575
3576static int
3577export_vnode_for_osysctl(struct vnode *vp, int type, struct kinfo_file *kif,
3578 struct kinfo_ofile *okif, struct filedesc *fdp, struct sysctl_req *req)
3579{
3580 int error;
3581
3582 vref(vp);
3583 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
3584 export_vnode_to_kinfo(vp, type, 0, kif, KERN_FILEDESC_PACK_KINFO0x00000001U);
3585 kinfo_to_okinfo(kif, okif);
3586 error = SYSCTL_OUT(req, okif, sizeof(*okif))(req->oldfunc)(req, okif, sizeof(*okif));
3587 FILEDESC_SLOCK(fdp)(void)__sx_slock(((&(fdp)->fd_sx)), 0, (((void *)0)), (
0))
;
3588 return (error);
3589}
3590
3591/*
3592 * Get per-process file descriptors for use by procstat(1), et al.
3593 */
3594static int
3595sysctl_kern_proc_ofiledesc(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req
*req
)
3596{
3597 struct kinfo_ofile *okif;
3598 struct kinfo_file *kif;
3599 struct filedesc *fdp;
3600 int error, i, *name;
3601 struct file *fp;
3602 struct proc *p;
3603
3604 name = (int *)arg1;
3605 error = pget((pid_t)name[0], PGET_CANDEBUG0x00004 | PGET_NOTWEXIT0x00010, &p);
3606 if (error != 0)
3607 return (error);
3608 fdp = fdhold(p);
3609 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
3610 if (fdp == NULL((void *)0))
3611 return (ENOENT2);
3612 kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK0x0002);
3613 okif = malloc(sizeof(*okif), M_TEMP, M_WAITOK0x0002);
3614 FILEDESC_SLOCK(fdp)(void)__sx_slock(((&(fdp)->fd_sx)), 0, (((void *)0)), (
0))
;
3615 if (fdp->fd_cdir != NULL((void *)0))
3616 export_vnode_for_osysctl(fdp->fd_cdir, KF_FD_TYPE_CWD-1, kif,
3617 okif, fdp, req);
3618 if (fdp->fd_rdir != NULL((void *)0))
3619 export_vnode_for_osysctl(fdp->fd_rdir, KF_FD_TYPE_ROOT-2, kif,
3620 okif, fdp, req);
3621 if (fdp->fd_jdir != NULL((void *)0))
3622 export_vnode_for_osysctl(fdp->fd_jdir, KF_FD_TYPE_JAIL-3, kif,
3623 okif, fdp, req);
3624 for (i = 0; fdp->fd_refcnt > 0 && i <= fdp->fd_lastfile; i++) {
3625 if ((fp = fdp->fd_ofilesfd_files->fdt_ofiles[i].fde_file) == NULL((void *)0))
3626 continue;
3627 export_file_to_kinfo(fp, i, NULL((void *)0), kif, fdp,
3628 KERN_FILEDESC_PACK_KINFO0x00000001U);
3629 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
3630 kinfo_to_okinfo(kif, okif);
3631 error = SYSCTL_OUT(req, okif, sizeof(*okif))(req->oldfunc)(req, okif, sizeof(*okif));
3632 FILEDESC_SLOCK(fdp)(void)__sx_slock(((&(fdp)->fd_sx)), 0, (((void *)0)), (
0))
;
3633 if (error)
3634 break;
3635 }
3636 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
3637 fddrop(fdp);
3638 free(kif, M_TEMP);
3639 free(okif, M_TEMP);
3640 return (0);
3641}
3642
3643static SYSCTL_NODE(_kern_proc, KERN_PROC_OFILEDESC, ofiledesc,struct sysctl_oid sysctl___kern_proc_ofiledesc = { .oid_parent
= ((&(&sysctl___kern_proc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (14), .oid_kind = (1|(0x80000000
|0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name
= ("ofiledesc"), .oid_handler = (sysctl_kern_proc_ofiledesc)
, .oid_fmt = ("N"), .oid_descr = "Process ofiledesc entries" }
; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_proc_ofiledesc
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_proc_ofiledesc); _Static_assert
(((0x80000000|0x00040000) & 0xf) == 0 || ((0x80000000|0x00040000
) & 0) == 1, "compile-time assertion failed")
3644 CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_kern_proc_ofiledesc,struct sysctl_oid sysctl___kern_proc_ofiledesc = { .oid_parent
= ((&(&sysctl___kern_proc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (14), .oid_kind = (1|(0x80000000
|0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name
= ("ofiledesc"), .oid_handler = (sysctl_kern_proc_ofiledesc)
, .oid_fmt = ("N"), .oid_descr = "Process ofiledesc entries" }
; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_proc_ofiledesc
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_proc_ofiledesc); _Static_assert
(((0x80000000|0x00040000) & 0xf) == 0 || ((0x80000000|0x00040000
) & 0) == 1, "compile-time assertion failed")
3645 "Process ofiledesc entries")struct sysctl_oid sysctl___kern_proc_ofiledesc = { .oid_parent
= ((&(&sysctl___kern_proc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (14), .oid_kind = (1|(0x80000000
|0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name
= ("ofiledesc"), .oid_handler = (sysctl_kern_proc_ofiledesc)
, .oid_fmt = ("N"), .oid_descr = "Process ofiledesc entries" }
; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_proc_ofiledesc
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_proc_ofiledesc); _Static_assert
(((0x80000000|0x00040000) & 0xf) == 0 || ((0x80000000|0x00040000
) & 0) == 1, "compile-time assertion failed")
;
3646#endif /* COMPAT_FREEBSD7 */
3647
3648int
3649vntype_to_kinfo(int vtype)
3650{
3651 struct {
3652 int vtype;
3653 int kf_vtype;
3654 } vtypes_table[] = {
3655 { VBAD, KF_VTYPE_VBAD8 },
3656 { VBLK, KF_VTYPE_VBLK3 },
3657 { VCHR, KF_VTYPE_VCHR4 },
3658 { VDIR, KF_VTYPE_VDIR2 },
3659 { VFIFO, KF_VTYPE_VFIFO7 },
3660 { VLNK, KF_VTYPE_VLNK5 },
3661 { VNON, KF_VTYPE_VNON0 },
3662 { VREG, KF_VTYPE_VREG1 },
3663 { VSOCK, KF_VTYPE_VSOCK6 }
3664 };
3665 unsigned int i;
3666
3667 /*
3668 * Perform vtype translation.
3669 */
3670 for (i = 0; i < nitems(vtypes_table)(sizeof((vtypes_table)) / sizeof((vtypes_table)[0])); i++)
3671 if (vtypes_table[i].vtype == vtype)
3672 return (vtypes_table[i].kf_vtype);
3673
3674 return (KF_VTYPE_UNKNOWN255);
3675}
3676
3677static SYSCTL_NODE(_kern_proc, KERN_PROC_FILEDESC, filedesc,struct sysctl_oid sysctl___kern_proc_filedesc = { .oid_parent
= ((&(&sysctl___kern_proc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (33), .oid_kind = (1|(0x80000000
|0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name
= ("filedesc"), .oid_handler = (sysctl_kern_proc_filedesc), .
oid_fmt = ("N"), .oid_descr = "Process filedesc entries" }; __asm__
(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_proc_filedesc
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_proc_filedesc); _Static_assert
(((0x80000000|0x00040000) & 0xf) == 0 || ((0x80000000|0x00040000
) & 0) == 1, "compile-time assertion failed")
3678 CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_kern_proc_filedesc,struct sysctl_oid sysctl___kern_proc_filedesc = { .oid_parent
= ((&(&sysctl___kern_proc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (33), .oid_kind = (1|(0x80000000
|0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name
= ("filedesc"), .oid_handler = (sysctl_kern_proc_filedesc), .
oid_fmt = ("N"), .oid_descr = "Process filedesc entries" }; __asm__
(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_proc_filedesc
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_proc_filedesc); _Static_assert
(((0x80000000|0x00040000) & 0xf) == 0 || ((0x80000000|0x00040000
) & 0) == 1, "compile-time assertion failed")
3679 "Process filedesc entries")struct sysctl_oid sysctl___kern_proc_filedesc = { .oid_parent
= ((&(&sysctl___kern_proc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (33), .oid_kind = (1|(0x80000000
|0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name
= ("filedesc"), .oid_handler = (sysctl_kern_proc_filedesc), .
oid_fmt = ("N"), .oid_descr = "Process filedesc entries" }; __asm__
(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_proc_filedesc
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_proc_filedesc); _Static_assert
(((0x80000000|0x00040000) & 0xf) == 0 || ((0x80000000|0x00040000
) & 0) == 1, "compile-time assertion failed")
;
3680
3681/*
3682 * Store a process current working directory information to sbuf.
3683 *
3684 * Takes a locked proc as argument, and returns with the proc unlocked.
3685 */
3686int
3687kern_proc_cwd_out(struct proc *p, struct sbuf *sb, ssize_t maxlen)
3688{
3689 struct filedesc *fdp;
3690 struct export_fd_buf *efbuf;
3691 int error;
3692
3693 PROC_LOCK_ASSERT(p, MA_OWNED)(void)0;
3694
3695 fdp = fdhold(p);
3696 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
3697 if (fdp == NULL((void *)0))
3698 return (EINVAL22);
3699
3700 efbuf = malloc(sizeof(*efbuf), M_TEMP, M_WAITOK0x0002);
3701 efbuf->fdp = fdp;
3702 efbuf->sb = sb;
3703 efbuf->remainder = maxlen;
3704
3705 FILEDESC_SLOCK(fdp)(void)__sx_slock(((&(fdp)->fd_sx)), 0, (((void *)0)), (
0))
;
3706 if (fdp->fd_cdir == NULL((void *)0))
3707 error = EINVAL22;
3708 else {
3709 vref(fdp->fd_cdir);
3710 error = export_vnode_to_sb(fdp->fd_cdir, KF_FD_TYPE_CWD-1,
3711 FREAD0x0001, efbuf);
3712 }
3713 FILEDESC_SUNLOCK(fdp)__sx_sunlock(((&(fdp)->fd_sx)), (((void *)0)), (0));
3714 fddrop(fdp);
3715 free(efbuf, M_TEMP);
3716 return (error);
3717}
3718
3719/*
3720 * Get per-process current working directory.
3721 */
3722static int
3723sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req
*req
)
3724{
3725 struct sbuf sb;
3726 struct proc *p;
3727 ssize_t maxlen;
3728 int error, error2, *name;
3729
3730 name = (int *)arg1;
3731
3732 sbuf_new_for_sysctl(&sb, NULL((void *)0), sizeof(struct kinfo_file), req);
3733 sbuf_clear_flags(&sb, SBUF_INCLUDENUL0x00000002);
3734 error = pget((pid_t)name[0], PGET_CANDEBUG0x00004 | PGET_NOTWEXIT0x00010, &p);
3735 if (error != 0) {
3736 sbuf_delete(&sb);
3737 return (error);
3738 }
3739 maxlen = req->oldptr != NULL((void *)0) ? req->oldlen : -1;
3740 error = kern_proc_cwd_out(p, &sb, maxlen);
3741 error2 = sbuf_finish(&sb);
3742 sbuf_delete(&sb);
3743 return (error != 0 ? error : error2);
3744}
3745
3746static SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD|CTLFLAG_MPSAFE,struct sysctl_oid sysctl___kern_proc_cwd = { .oid_parent = ((
&(&sysctl___kern_proc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (42), .oid_kind = (1|(0x80000000
|0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name
= ("cwd"), .oid_handler = (sysctl_kern_proc_cwd), .oid_fmt =
("N"), .oid_descr = "Process current working directory" }; __asm__
(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_proc_cwd
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_proc_cwd); _Static_assert(
((0x80000000|0x00040000) & 0xf) == 0 || ((0x80000000|0x00040000
) & 0) == 1, "compile-time assertion failed")
3747 sysctl_kern_proc_cwd, "Process current working directory")struct sysctl_oid sysctl___kern_proc_cwd = { .oid_parent = ((
&(&sysctl___kern_proc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (42), .oid_kind = (1|(0x80000000
|0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name
= ("cwd"), .oid_handler = (sysctl_kern_proc_cwd), .oid_fmt =
("N"), .oid_descr = "Process current working directory" }; __asm__
(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_proc_cwd
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_proc_cwd); _Static_assert(
((0x80000000|0x00040000) & 0xf) == 0 || ((0x80000000|0x00040000
) & 0) == 1, "compile-time assertion failed")
;
3748
3749#ifdef DDB
3750/*
3751 * For the purposes of debugging, generate a human-readable string for the
3752 * file type.
3753 */
3754static const char *
3755file_type_to_name(short type)
3756{
3757
3758 switch (type) {
3759 case 0:
3760 return ("zero");
3761 case DTYPE_VNODE1:
3762 return ("vnod");
3763 case DTYPE_SOCKET2:
3764 return ("sock");
3765 case DTYPE_PIPE3:
3766 return ("pipe");
3767 case DTYPE_FIFO4:
3768 return ("fifo");
3769 case DTYPE_KQUEUE5:
3770 return ("kque");
3771 case DTYPE_CRYPTO6:
3772 return ("crpt");
3773 case DTYPE_MQUEUE7:
3774 return ("mque");
3775 case DTYPE_SHM8:
3776 return ("shm");
3777 case DTYPE_SEM9:
3778 return ("ksem");
3779 default:
3780 return ("unkn");
3781 }
3782}
3783
3784/*
3785 * For the purposes of debugging, identify a process (if any, perhaps one of
3786 * many) that references the passed file in its file descriptor array. Return
3787 * NULL if none.
3788 */
3789static struct proc *
3790file_to_first_proc(struct file *fp)
3791{
3792 struct filedesc *fdp;
3793 struct proc *p;
3794 int n;
3795
3796 FOREACH_PROC_IN_SYSTEM(p)for (((p)) = (((&allproc))->lh_first); ((p)); ((p)) = (
(((p)))->p_list.le_next))
{
3797 if (p->p_state == PRS_NEW)
3798 continue;
3799 fdp = p->p_fd;
3800 if (fdp == NULL((void *)0))
3801 continue;
3802 for (n = 0; n <= fdp->fd_lastfile; n++) {
3803 if (fp == fdp->fd_ofilesfd_files->fdt_ofiles[n].fde_file)
3804 return (p);
3805 }
3806 }
3807 return (NULL((void *)0));
3808}
3809
3810static void
3811db_print_file(struct file *fp, int header)
3812{
3813 struct proc *p;
3814
3815 if (header)
3816 db_printf("%8s %4s %8s %8s %4s %5s %6s %8s %5s %12s\n",
3817 "File", "Type", "Data", "Flag", "GCFl", "Count",
3818 "MCount", "Vnode", "FPID", "FCmd");
3819 p = file_to_first_proc(fp);
3820 db_printf("%8p %4s %8p %08x %04x %5d %6d %8p %5d %12s\n", fp,
3821 file_type_to_name(fp->f_type), fp->f_data, fp->f_flag,
3822 0, fp->f_count, 0, fp->f_vnode,
3823 p != NULL((void *)0) ? p->p_pid : -1, p != NULL((void *)0) ? p->p_comm : "-");
3824}
3825
3826DB_SHOW_COMMAND(file, db_show_file)static db_cmdfcn_t db_show_file; static struct command file_show
= { .name = "file", .fcn = db_show_file, .flag = 0, .more = (
(void *)0) }; static void file_show_add(void *arg __attribute__
((__unused__))) { db_command_register(&db_show_table, &
file_show); } static struct sysinit file_show_sys_init = { SI_SUB_KLD
, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t)file_show_add
, ((void *)(((void *)0))) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_file_show_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(file_show_sys_init); static void file_show_del(void
*arg __attribute__((__unused__))) { db_command_unregister(&
db_show_table, &file_show); } static struct sysinit file_show_sys_uninit
= { SI_SUB_KLD, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)file_show_del, ((void *)(((void *)0))) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_file_show_sys_uninit __attribute__
((__section__("set_" "sysuninit_set"))) __attribute__((__used__
)) = &(file_show_sys_uninit);; static void db_show_file(db_expr_t
addr, bool have_addr, db_expr_t count, char *modif)
3827{
3828 struct file *fp;
3829
3830 if (!have_addr) {
3831 db_printf("usage: show file <addr>\n");
3832 return;
3833 }
3834 fp = (struct file *)addr;
3835 db_print_file(fp, 1);
3836}
3837
3838DB_SHOW_COMMAND(files, db_show_files)static db_cmdfcn_t db_show_files; static struct command files_show
= { .name = "files", .fcn = db_show_files, .flag = 0, .more =
((void *)0) }; static void files_show_add(void *arg __attribute__
((__unused__))) { db_command_register(&db_show_table, &
files_show); } static struct sysinit files_show_sys_init = { SI_SUB_KLD
, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t)files_show_add
, ((void *)(((void *)0))) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_files_show_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(files_show_sys_init); static void files_show_del(void
*arg __attribute__((__unused__))) { db_command_unregister(&
db_show_table, &files_show); } static struct sysinit files_show_sys_uninit
= { SI_SUB_KLD, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)files_show_del, ((void *)(((void *)0))) }; __asm__(".globl "
"__start_set_sysuninit_set"); __asm__(".globl " "__stop_set_sysuninit_set"
); static void const * const __set_sysuninit_set_sym_files_show_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(files_show_sys_uninit);; static void db_show_files
(db_expr_t addr, bool have_addr, db_expr_t count, char *modif
)
3839{
3840 struct filedesc *fdp;
3841 struct file *fp;
3842 struct proc *p;
3843 int header;
3844 int n;
3845
3846 header = 1;
3847 FOREACH_PROC_IN_SYSTEM(p)for (((p)) = (((&allproc))->lh_first); ((p)); ((p)) = (
(((p)))->p_list.le_next))
{
3848 if (p->p_state == PRS_NEW)
3849 continue;
3850 if ((fdp = p->p_fd) == NULL((void *)0))
3851 continue;
3852 for (n = 0; n <= fdp->fd_lastfile; ++n) {
3853 if ((fp = fdp->fd_ofilesfd_files->fdt_ofiles[n].fde_file) == NULL((void *)0))
3854 continue;
3855 db_print_file(fp, header);
3856 header = 0;
3857 }
3858 }
3859}
3860#endif
3861
3862SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_maxfilesperproc = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (27), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&maxfilesperproc
), .oid_arg2 = (0), .oid_name = ("maxfilesperproc"), .oid_handler
= (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Maximum files allowed open per process"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_maxfilesperproc
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_maxfilesperproc); _Static_assert
(((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000
|0x40000000)) & 0) == 2) && sizeof(int) == sizeof
(*(&maxfilesperproc)), "compile-time assertion failed")
3863 &maxfilesperproc, 0, "Maximum files allowed open per process")static struct sysctl_oid sysctl___kern_maxfilesperproc = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (27), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&maxfilesperproc
), .oid_arg2 = (0), .oid_name = ("maxfilesperproc"), .oid_handler
= (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Maximum files allowed open per process"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_maxfilesperproc
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_maxfilesperproc); _Static_assert
(((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000
|0x40000000)) & 0) == 2) && sizeof(int) == sizeof
(*(&maxfilesperproc)), "compile-time assertion failed")
;
3864
3865SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_maxfiles = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (7), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&maxfiles), .
oid_arg2 = (0), .oid_name = ("maxfiles"), .oid_handler = (sysctl_handle_int
), .oid_fmt = ("I"), .oid_descr = "Maximum number of files" }
; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_maxfiles
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_maxfiles); _Static_assert(
((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000|
0x40000000)) & 0) == 2) && sizeof(int) == sizeof(
*(&maxfiles)), "compile-time assertion failed")
3866 &maxfiles, 0, "Maximum number of files")static struct sysctl_oid sysctl___kern_maxfiles = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (7), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&maxfiles), .
oid_arg2 = (0), .oid_name = ("maxfiles"), .oid_handler = (sysctl_handle_int
), .oid_fmt = ("I"), .oid_descr = "Maximum number of files" }
; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_maxfiles
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_maxfiles); _Static_assert(
((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000|
0x40000000)) & 0) == 2) && sizeof(int) == sizeof(
*(&maxfiles)), "compile-time assertion failed")
;
3867
3868SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,static struct sysctl_oid sysctl___kern_openfiles = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| (0x80000000)), .oid_arg1 = (((int *)(__uintptr_t)(volatile
void *)(&openfiles))), .oid_arg2 = (0), .oid_name = ("openfiles"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "System-wide number of open files" }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_openfiles __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_openfiles); _Static_assert((((0x80000000
) & 0xf) == 0 || ((0x80000000) & 0) == 2) && sizeof
(int) == sizeof(*(((int *)(__uintptr_t)(volatile void *)(&
openfiles)))), "compile-time assertion failed")
3869 __DEVOLATILE(int *, &openfiles), 0, "System-wide number of open files")static struct sysctl_oid sysctl___kern_openfiles = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| (0x80000000)), .oid_arg1 = (((int *)(__uintptr_t)(volatile
void *)(&openfiles))), .oid_arg2 = (0), .oid_name = ("openfiles"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "System-wide number of open files" }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_openfiles __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_openfiles); _Static_assert((((0x80000000
) & 0xf) == 0 || ((0x80000000) & 0) == 2) && sizeof
(int) == sizeof(*(((int *)(__uintptr_t)(volatile void *)(&
openfiles)))), "compile-time assertion failed")
;
3870
3871/* ARGSUSED*/
3872static void
3873filelistinit(void *dummy)
3874{
3875
3876 file_zone = uma_zcreate("Files", sizeof(struct file), NULL((void *)0), NULL((void *)0),
3877 NULL((void *)0), NULL((void *)0), UMA_ALIGN_PTR(sizeof(void *) - 1), UMA_ZONE_NOFREE0x0020);
3878 filedesc0_zone = uma_zcreate("filedesc0", sizeof(struct filedesc0),
3879 NULL((void *)0), NULL((void *)0), NULL((void *)0), NULL((void *)0), UMA_ALIGN_PTR(sizeof(void *) - 1), 0);
3880 mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF)_mtx_init(&(&sigio_lock)->mtx_lock, "sigio lock", (
(void *)0), 0x00000000)
;
3881}
3882SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL)static struct sysinit select_sys_init = { SI_SUB_LOCK, SI_ORDER_FIRST
, (sysinit_cfunc_t)(sysinit_nfunc_t)filelistinit, ((void *)((
(void *)0))) }; __asm__(".globl " "__start_set_sysinit_set");
__asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_select_sys_init __attribute__(
(__section__("set_" "sysinit_set"))) __attribute__((__used__)
) = &(select_sys_init)
;
3883
3884/*-------------------------------------------------------------------*/
3885
3886static int
3887badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred,
3888 int flags, struct thread *td)
3889{
3890
3891 return (EBADF9);
3892}
3893
3894static int
3895badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
3896 struct thread *td)
3897{
3898
3899 return (EINVAL22);
3900}
3901
3902static int
3903badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
3904 struct thread *td)
3905{
3906
3907 return (EBADF9);
3908}
3909
3910static int
3911badfo_poll(struct file *fp, int events, struct ucred *active_cred,
3912 struct thread *td)
3913{
3914
3915 return (0);
3916}
3917
3918static int
3919badfo_kqfilter(struct file *fp, struct knote *kn)
3920{
3921
3922 return (EBADF9);
3923}
3924
3925static int
3926badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
3927 struct thread *td)
3928{
3929
3930 return (EBADF9);
3931}
3932
3933static int
3934badfo_close(struct file *fp, struct thread *td)
3935{
3936
3937 return (0);
3938}
3939
3940static int
3941badfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
3942 struct thread *td)
3943{
3944
3945 return (EBADF9);
3946}
3947
3948static int
3949badfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
3950 struct thread *td)
3951{
3952
3953 return (EBADF9);
3954}
3955
3956static int
3957badfo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
3958 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
3959 struct thread *td)
3960{
3961
3962 return (EBADF9);
3963}
3964
3965static int
3966badfo_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
3967{
3968
3969 return (0);
3970}
3971
3972struct fileops badfileops = {
3973 .fo_read = badfo_readwrite,
3974 .fo_write = badfo_readwrite,
3975 .fo_truncate = badfo_truncate,
3976 .fo_ioctl = badfo_ioctl,
3977 .fo_poll = badfo_poll,
3978 .fo_kqfilter = badfo_kqfilter,
3979 .fo_stat = badfo_stat,
3980 .fo_close = badfo_close,
3981 .fo_chmod = badfo_chmod,
3982 .fo_chown = badfo_chown,
3983 .fo_sendfile = badfo_sendfile,
3984 .fo_fill_kinfo = badfo_fill_kinfo,
3985};
3986
3987int
3988invfo_rdwr(struct file *fp, struct uio *uio, struct ucred *active_cred,
3989 int flags, struct thread *td)
3990{
3991
3992 return (EOPNOTSUPP45);
3993}
3994
3995int
3996invfo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
3997 struct thread *td)
3998{
3999
4000 return (EINVAL22);
4001}
4002
4003int
4004invfo_ioctl(struct file *fp, u_long com, void *data,
4005 struct ucred *active_cred, struct thread *td)
4006{
4007
4008 return (ENOTTY25);
4009}
4010
4011int
4012invfo_poll(struct file *fp, int events, struct ucred *active_cred,
4013 struct thread *td)
4014{
4015
4016 return (poll_no_poll(events));
4017}
4018
4019int
4020invfo_kqfilter(struct file *fp, struct knote *kn)
4021{
4022
4023 return (EINVAL22);
4024}
4025
4026int
4027invfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
4028 struct thread *td)
4029{
4030
4031 return (EINVAL22);
4032}
4033
4034int
4035invfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
4036 struct thread *td)
4037{
4038
4039 return (EINVAL22);
4040}
4041
4042int
4043invfo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
4044 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
4045 struct thread *td)
4046{
4047
4048 return (EINVAL22);
4049}
4050
4051/*-------------------------------------------------------------------*/
4052
4053/*
4054 * File Descriptor pseudo-device driver (/dev/fd/).
4055 *
4056 * Opening minor device N dup()s the file (if any) connected to file
4057 * descriptor N belonging to the calling process. Note that this driver
4058 * consists of only the ``open()'' routine, because all subsequent
4059 * references to this file will be direct to the other driver.
4060 *
4061 * XXX: we could give this one a cloning event handler if necessary.
4062 */
4063
4064/* ARGSUSED */
4065static int
4066fdopen(struct cdev *dev, int mode, int type, struct thread *td)
4067{
4068
4069 /*
4070 * XXX Kludge: set curthread->td_dupfd to contain the value of the
4071 * the file descriptor being sought for duplication. The error
4072 * return ensures that the vnode for this device will be released
4073 * by vn_open. Open will detect this special error and take the
4074 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
4075 * will simply report the error.
4076 */
4077 td->td_dupfd = dev2unit(dev)((dev)->si_drv0);
4078 return (ENODEV19);
4079}
4080
4081static struct cdevsw fildesc_cdevsw = {
4082 .d_version = D_VERSION0x17122009,
4083 .d_open = fdopen,
4084 .d_name = "FD",
4085};
4086
4087static void
4088fildesc_drvinit(void *unused)
4089{
4090 struct cdev *dev;
4091
4092 dev = make_dev_credf(MAKEDEV_ETERNAL0x10, &fildesc_cdevsw, 0, NULL((void *)0),
4093 UID_ROOT0, GID_WHEEL0, 0666, "fd/0");
4094 make_dev_alias(dev, "stdin");
4095 dev = make_dev_credf(MAKEDEV_ETERNAL0x10, &fildesc_cdevsw, 1, NULL((void *)0),
4096 UID_ROOT0, GID_WHEEL0, 0666, "fd/1");
4097 make_dev_alias(dev, "stdout");
4098 dev = make_dev_credf(MAKEDEV_ETERNAL0x10, &fildesc_cdevsw, 2, NULL((void *)0),
4099 UID_ROOT0, GID_WHEEL0, 0666, "fd/2");
4100 make_dev_alias(dev, "stderr");
4101}
4102
4103SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, fildesc_drvinit, NULL)static struct sysinit fildescdev_sys_init = { SI_SUB_DRIVERS,
SI_ORDER_MIDDLE, (sysinit_cfunc_t)(sysinit_nfunc_t)fildesc_drvinit
, ((void *)(((void *)0))) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_fildescdev_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(fildescdev_sys_init)
;