Bug Summary

File:kern/kern_sig.c
Warning:line 1229, column 11
Copies out a struct with a union element with different sizes

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-unknown-freebsd11.2 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name kern_sig.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model static -mthread-model posix -mdisable-fp-elim -relaxed-aliasing -masm-verbose -mconstructor-aliases -ffreestanding -mcode-model kernel -target-cpu x86-64 -target-feature -mmx -target-feature -sse -target-feature -aes -target-feature -avx -disable-red-zone -no-implicit-float -dwarf-column-info -debugger-tuning=gdb -nostdsysteminc -nobuiltininc -resource-dir /root/kernel-uninitialized-memory-checker/build/lib/clang/8.0.0 -include opt_global.h -I . -I /root/freebsd/sys -I /root/freebsd/sys/contrib/ck/include -I /root/freebsd/sys/contrib/libfdt -D _KERNEL -D HAVE_KERNEL_OPTION_HEADERS -D __printf__=__freebsd_kprintf__ -O2 -Wno-pointer-sign -Wno-unknown-pragmas -Wno-error-tautological-compare -Wno-error-empty-body -Wno-error-parentheses-equality -Wno-error-unused-function -Wno-error-pointer-sign -Wno-error-shift-negative-value -Wno-address-of-packed-member -std=iso9899:1999 -fdebug-compilation-dir /usr/obj/root/freebsd/amd64.amd64/sys/GENERIC -ferror-limit 19 -fmessage-length 0 -fwrapv -stack-protector 1 -fobjc-runtime=gnustep -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker alpha.security.KernelMemoryDisclosure -analyzer-disable-checker core,unix,deadcode,nullability -analyzer-output=html -o /root/analyzer/2018-12-28-044519-76292-1 -x c /root/freebsd/sys/kern/kern_sig.c -faddrsig
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD$")__asm__(".ident\t\"" "$FreeBSD$" "\"");
41
42#include "opt_ktrace.h"
43
44#include <sys/param.h>
45#include <sys/ctype.h>
46#include <sys/systm.h>
47#include <sys/signalvar.h>
48#include <sys/vnode.h>
49#include <sys/acct.h>
50#include <sys/bus.h>
51#include <sys/capsicum.h>
52#include <sys/compressor.h>
53#include <sys/condvar.h>
54#include <sys/event.h>
55#include <sys/fcntl.h>
56#include <sys/imgact.h>
57#include <sys/kernel.h>
58#include <sys/ktr.h>
59#include <sys/ktrace.h>
60#include <sys/lock.h>
61#include <sys/malloc.h>
62#include <sys/mutex.h>
63#include <sys/refcount.h>
64#include <sys/namei.h>
65#include <sys/proc.h>
66#include <sys/procdesc.h>
67#include <sys/posix4.h>
68#include <sys/pioctl.h>
69#include <sys/racct.h>
70#include <sys/resourcevar.h>
71#include <sys/sdt.h>
72#include <sys/sbuf.h>
73#include <sys/sleepqueue.h>
74#include <sys/smp.h>
75#include <sys/stat.h>
76#include <sys/sx.h>
77#include <sys/syscallsubr.h>
78#include <sys/sysctl.h>
79#include <sys/sysent.h>
80#include <sys/syslog.h>
81#include <sys/sysproto.h>
82#include <sys/timers.h>
83#include <sys/unistd.h>
84#include <sys/wait.h>
85#include <vm/vm.h>
86#include <vm/vm_extern.h>
87#include <vm/uma.h>
88
89#include <sys/jail.h>
90
91#include <machine/cpu.h>
92
93#include <security/audit/audit.h>
94
95#define ONSIG32 32 /* NSIG for osig* syscalls. XXX. */
96
97SDT_PROVIDER_DECLARE(proc)extern struct sdt_provider sdt_provider_proc[1];
98SDT_PROBE_DEFINE3(proc, , , signal__send,struct sdt_probe sdt_proc___signal__send[1] = { { sizeof(struct
sdt_probe), sdt_provider_proc, { ((void *)0), ((void *)0) },
{ ((void *)0), ((void *)0) }, "", "", "signal__send", 0, 0, (
(void *)0) } }; __asm__(".globl " "__start_set_sdt_probes_set"
); __asm__(".globl " "__stop_set_sdt_probes_set"); static void
const * const __set_sdt_probes_set_sym_sdt_proc___signal__send
__attribute__((__section__("set_" "sdt_probes_set"))) __attribute__
((__used__)) = &(sdt_proc___signal__send);; static struct
sdt_argtype sdta_proc___signal__send0[1] = { { 0, "struct thread *"
, ((void *)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__send
} }; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__send0 __attribute__
((__section__("set_" "sdt_argtypes_set"))) __attribute__((__used__
)) = &(sdta_proc___signal__send0);; static struct sdt_argtype
sdta_proc___signal__send1[1] = { { 1, "struct proc *", ((void
*)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__send }
}; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__send1 __attribute__
((__section__("set_" "sdt_argtypes_set"))) __attribute__((__used__
)) = &(sdta_proc___signal__send1);; static struct sdt_argtype
sdta_proc___signal__send2[1] = { { 2, "int", ((void *)0), { (
(void *)0), ((void *)0) }, sdt_proc___signal__send } }; __asm__
(".globl " "__start_set_sdt_argtypes_set"); __asm__(".globl "
"__stop_set_sdt_argtypes_set"); static void const * const __set_sdt_argtypes_set_sym_sdta_proc___signal__send2
__attribute__((__section__("set_" "sdt_argtypes_set"))) __attribute__
((__used__)) = &(sdta_proc___signal__send2);
99 "struct thread *", "struct proc *", "int")struct sdt_probe sdt_proc___signal__send[1] = { { sizeof(struct
sdt_probe), sdt_provider_proc, { ((void *)0), ((void *)0) },
{ ((void *)0), ((void *)0) }, "", "", "signal__send", 0, 0, (
(void *)0) } }; __asm__(".globl " "__start_set_sdt_probes_set"
); __asm__(".globl " "__stop_set_sdt_probes_set"); static void
const * const __set_sdt_probes_set_sym_sdt_proc___signal__send
__attribute__((__section__("set_" "sdt_probes_set"))) __attribute__
((__used__)) = &(sdt_proc___signal__send);; static struct
sdt_argtype sdta_proc___signal__send0[1] = { { 0, "struct thread *"
, ((void *)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__send
} }; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__send0 __attribute__
((__section__("set_" "sdt_argtypes_set"))) __attribute__((__used__
)) = &(sdta_proc___signal__send0);; static struct sdt_argtype
sdta_proc___signal__send1[1] = { { 1, "struct proc *", ((void
*)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__send }
}; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__send1 __attribute__
((__section__("set_" "sdt_argtypes_set"))) __attribute__((__used__
)) = &(sdta_proc___signal__send1);; static struct sdt_argtype
sdta_proc___signal__send2[1] = { { 2, "int", ((void *)0), { (
(void *)0), ((void *)0) }, sdt_proc___signal__send } }; __asm__
(".globl " "__start_set_sdt_argtypes_set"); __asm__(".globl "
"__stop_set_sdt_argtypes_set"); static void const * const __set_sdt_argtypes_set_sym_sdta_proc___signal__send2
__attribute__((__section__("set_" "sdt_argtypes_set"))) __attribute__
((__used__)) = &(sdta_proc___signal__send2);
;
100SDT_PROBE_DEFINE2(proc, , , signal__clear,struct sdt_probe sdt_proc___signal__clear[1] = { { sizeof(struct
sdt_probe), sdt_provider_proc, { ((void *)0), ((void *)0) },
{ ((void *)0), ((void *)0) }, "", "", "signal__clear", 0, 0,
((void *)0) } }; __asm__(".globl " "__start_set_sdt_probes_set"
); __asm__(".globl " "__stop_set_sdt_probes_set"); static void
const * const __set_sdt_probes_set_sym_sdt_proc___signal__clear
__attribute__((__section__("set_" "sdt_probes_set"))) __attribute__
((__used__)) = &(sdt_proc___signal__clear);; static struct
sdt_argtype sdta_proc___signal__clear0[1] = { { 0, "int", ((
void *)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__clear
} }; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__clear0 __attribute__
((__section__("set_" "sdt_argtypes_set"))) __attribute__((__used__
)) = &(sdta_proc___signal__clear0);; static struct sdt_argtype
sdta_proc___signal__clear1[1] = { { 1, "ksiginfo_t *", ((void
*)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__clear
} }; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__clear1 __attribute__
((__section__("set_" "sdt_argtypes_set"))) __attribute__((__used__
)) = &(sdta_proc___signal__clear1);
101 "int", "ksiginfo_t *")struct sdt_probe sdt_proc___signal__clear[1] = { { sizeof(struct
sdt_probe), sdt_provider_proc, { ((void *)0), ((void *)0) },
{ ((void *)0), ((void *)0) }, "", "", "signal__clear", 0, 0,
((void *)0) } }; __asm__(".globl " "__start_set_sdt_probes_set"
); __asm__(".globl " "__stop_set_sdt_probes_set"); static void
const * const __set_sdt_probes_set_sym_sdt_proc___signal__clear
__attribute__((__section__("set_" "sdt_probes_set"))) __attribute__
((__used__)) = &(sdt_proc___signal__clear);; static struct
sdt_argtype sdta_proc___signal__clear0[1] = { { 0, "int", ((
void *)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__clear
} }; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__clear0 __attribute__
((__section__("set_" "sdt_argtypes_set"))) __attribute__((__used__
)) = &(sdta_proc___signal__clear0);; static struct sdt_argtype
sdta_proc___signal__clear1[1] = { { 1, "ksiginfo_t *", ((void
*)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__clear
} }; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__clear1 __attribute__
((__section__("set_" "sdt_argtypes_set"))) __attribute__((__used__
)) = &(sdta_proc___signal__clear1);
;
102SDT_PROBE_DEFINE3(proc, , , signal__discard,struct sdt_probe sdt_proc___signal__discard[1] = { { sizeof(struct
sdt_probe), sdt_provider_proc, { ((void *)0), ((void *)0) },
{ ((void *)0), ((void *)0) }, "", "", "signal__discard", 0, 0
, ((void *)0) } }; __asm__(".globl " "__start_set_sdt_probes_set"
); __asm__(".globl " "__stop_set_sdt_probes_set"); static void
const * const __set_sdt_probes_set_sym_sdt_proc___signal__discard
__attribute__((__section__("set_" "sdt_probes_set"))) __attribute__
((__used__)) = &(sdt_proc___signal__discard);; static struct
sdt_argtype sdta_proc___signal__discard0[1] = { { 0, "struct thread *"
, ((void *)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__discard
} }; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__discard0
__attribute__((__section__("set_" "sdt_argtypes_set"))) __attribute__
((__used__)) = &(sdta_proc___signal__discard0);; static struct
sdt_argtype sdta_proc___signal__discard1[1] = { { 1, "struct proc *"
, ((void *)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__discard
} }; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__discard1
__attribute__((__section__("set_" "sdt_argtypes_set"))) __attribute__
((__used__)) = &(sdta_proc___signal__discard1);; static struct
sdt_argtype sdta_proc___signal__discard2[1] = { { 2, "int", (
(void *)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__discard
} }; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__discard2
__attribute__((__section__("set_" "sdt_argtypes_set"))) __attribute__
((__used__)) = &(sdta_proc___signal__discard2);
103 "struct thread *", "struct proc *", "int")struct sdt_probe sdt_proc___signal__discard[1] = { { sizeof(struct
sdt_probe), sdt_provider_proc, { ((void *)0), ((void *)0) },
{ ((void *)0), ((void *)0) }, "", "", "signal__discard", 0, 0
, ((void *)0) } }; __asm__(".globl " "__start_set_sdt_probes_set"
); __asm__(".globl " "__stop_set_sdt_probes_set"); static void
const * const __set_sdt_probes_set_sym_sdt_proc___signal__discard
__attribute__((__section__("set_" "sdt_probes_set"))) __attribute__
((__used__)) = &(sdt_proc___signal__discard);; static struct
sdt_argtype sdta_proc___signal__discard0[1] = { { 0, "struct thread *"
, ((void *)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__discard
} }; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__discard0
__attribute__((__section__("set_" "sdt_argtypes_set"))) __attribute__
((__used__)) = &(sdta_proc___signal__discard0);; static struct
sdt_argtype sdta_proc___signal__discard1[1] = { { 1, "struct proc *"
, ((void *)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__discard
} }; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__discard1
__attribute__((__section__("set_" "sdt_argtypes_set"))) __attribute__
((__used__)) = &(sdta_proc___signal__discard1);; static struct
sdt_argtype sdta_proc___signal__discard2[1] = { { 2, "int", (
(void *)0), { ((void *)0), ((void *)0) }, sdt_proc___signal__discard
} }; __asm__(".globl " "__start_set_sdt_argtypes_set"); __asm__
(".globl " "__stop_set_sdt_argtypes_set"); static void const *
const __set_sdt_argtypes_set_sym_sdta_proc___signal__discard2
__attribute__((__section__("set_" "sdt_argtypes_set"))) __attribute__
((__used__)) = &(sdta_proc___signal__discard2);
;
104
105static int coredump(struct thread *);
106static int killpg1(struct thread *td, int sig, int pgid, int all,
107 ksiginfo_t *ksi);
108static int issignal(struct thread *td);
109static int sigprop(int sig);
110static void tdsigwakeup(struct thread *, int, sig_t, int);
111static int sig_suspend_threads(struct thread *, struct proc *, int);
112static int filt_sigattach(struct knote *kn);
113static void filt_sigdetach(struct knote *kn);
114static int filt_signal(struct knote *kn, long hint);
115static struct thread *sigtd(struct proc *p, int sig, int prop);
116static void sigqueue_start(void);
117
118static uma_zone_t ksiginfo_zone = NULL((void *)0);
119struct filterops sig_filtops = {
120 .f_isfd = 0,
121 .f_attach = filt_sigattach,
122 .f_detach = filt_sigdetach,
123 .f_event = filt_signal,
124};
125
126static int kern_logsigexit = 1;
127SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_logsigexit = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (34), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&kern_logsigexit
), .oid_arg2 = (0), .oid_name = ("logsigexit"), .oid_handler =
(sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Log processes quitting on abnormal signals to syslog(3)"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_logsigexit __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_logsigexit); _Static_assert(((((0x80000000
|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000)) &
0) == 2) && sizeof(int) == sizeof(*(&kern_logsigexit
)), "compile-time assertion failed")
128 &kern_logsigexit, 0,static struct sysctl_oid sysctl___kern_logsigexit = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (34), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&kern_logsigexit
), .oid_arg2 = (0), .oid_name = ("logsigexit"), .oid_handler =
(sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Log processes quitting on abnormal signals to syslog(3)"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_logsigexit __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_logsigexit); _Static_assert(((((0x80000000
|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000)) &
0) == 2) && sizeof(int) == sizeof(*(&kern_logsigexit
)), "compile-time assertion failed")
129 "Log processes quitting on abnormal signals to syslog(3)")static struct sysctl_oid sysctl___kern_logsigexit = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = (34), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&kern_logsigexit
), .oid_arg2 = (0), .oid_name = ("logsigexit"), .oid_handler =
(sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Log processes quitting on abnormal signals to syslog(3)"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_logsigexit __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_logsigexit); _Static_assert(((((0x80000000
|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000)) &
0) == 2) && sizeof(int) == sizeof(*(&kern_logsigexit
)), "compile-time assertion failed")
;
130
131static int kern_forcesigexit = 1;
132SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_forcesigexit = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&kern_forcesigexit
), .oid_arg2 = (0), .oid_name = ("forcesigexit"), .oid_handler
= (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Force trap signal to be handled"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_forcesigexit __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_forcesigexit); _Static_assert(((((0x80000000
|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000)) &
0) == 2) && sizeof(int) == sizeof(*(&kern_forcesigexit
)), "compile-time assertion failed")
133 &kern_forcesigexit, 0, "Force trap signal to be handled")static struct sysctl_oid sysctl___kern_forcesigexit = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&kern_forcesigexit
), .oid_arg2 = (0), .oid_name = ("forcesigexit"), .oid_handler
= (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Force trap signal to be handled"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_forcesigexit __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_forcesigexit); _Static_assert(((((0x80000000
|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000)) &
0) == 2) && sizeof(int) == sizeof(*(&kern_forcesigexit
)), "compile-time assertion failed")
;
134
135static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW, 0,struct sysctl_oid sysctl___kern_sigqueue = { .oid_parent = ((
&(&sysctl___kern)->oid_children)), .oid_children =
{ ((void *)0) }, .oid_number = ((-1)), .oid_kind = (1|((0x80000000
|0x40000000))), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .
oid_name = ("sigqueue"), .oid_handler = (0), .oid_fmt = ("N")
, .oid_descr = "POSIX real time signal", .oid_label = (((void
*)0)), }; __asm__(".globl " "__start_set_sysctl_set"); __asm__
(".globl " "__stop_set_sysctl_set"); static void const * const
__set_sysctl_set_sym_sysctl___kern_sigqueue __attribute__((__section__
("set_" "sysctl_set"))) __attribute__((__used__)) = &(sysctl___kern_sigqueue
); _Static_assert((((0x80000000|0x40000000)) & 0xf) == 0 ||
(((0x80000000|0x40000000)) & 0) == 1, "compile-time assertion failed"
)
136 "POSIX real time signal")struct sysctl_oid sysctl___kern_sigqueue = { .oid_parent = ((
&(&sysctl___kern)->oid_children)), .oid_children =
{ ((void *)0) }, .oid_number = ((-1)), .oid_kind = (1|((0x80000000
|0x40000000))), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .
oid_name = ("sigqueue"), .oid_handler = (0), .oid_fmt = ("N")
, .oid_descr = "POSIX real time signal", .oid_label = (((void
*)0)), }; __asm__(".globl " "__start_set_sysctl_set"); __asm__
(".globl " "__stop_set_sysctl_set"); static void const * const
__set_sysctl_set_sym_sysctl___kern_sigqueue __attribute__((__section__
("set_" "sysctl_set"))) __attribute__((__used__)) = &(sysctl___kern_sigqueue
); _Static_assert((((0x80000000|0x40000000)) & 0xf) == 0 ||
(((0x80000000|0x40000000)) & 0) == 1, "compile-time assertion failed"
)
;
137
138static int max_pending_per_proc = 128;
139SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_sigqueue_max_pending_per_proc
= { .oid_parent = ((&(&sysctl___kern_sigqueue)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (
&max_pending_per_proc), .oid_arg2 = (0), .oid_name = ("max_pending_per_proc"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "Max pending signals per proc", .oid_label = (((void *)0))
, }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_sigqueue_max_pending_per_proc
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_sigqueue_max_pending_per_proc
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0) == 2) && sizeof
(int) == sizeof(*(&max_pending_per_proc)), "compile-time assertion failed"
)
140 &max_pending_per_proc, 0, "Max pending signals per proc")static struct sysctl_oid sysctl___kern_sigqueue_max_pending_per_proc
= { .oid_parent = ((&(&sysctl___kern_sigqueue)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (
&max_pending_per_proc), .oid_arg2 = (0), .oid_name = ("max_pending_per_proc"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "Max pending signals per proc", .oid_label = (((void *)0))
, }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_sigqueue_max_pending_per_proc
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_sigqueue_max_pending_per_proc
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0) == 2) && sizeof
(int) == sizeof(*(&max_pending_per_proc)), "compile-time assertion failed"
)
;
141
142static int preallocate_siginfo = 1024;
143SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,static struct sysctl_oid sysctl___kern_sigqueue_preallocate =
{ .oid_parent = ((&(&sysctl___kern_sigqueue)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x00080000))), .oid_arg1 = (
&preallocate_siginfo), .oid_arg2 = (0), .oid_name = ("preallocate"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "Preallocated signal memory size", .oid_label = (((void *)
0)), }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(
".globl " "__stop_set_sysctl_set"); static void const * const
__set_sysctl_set_sym_sysctl___kern_sigqueue_preallocate __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_sigqueue_preallocate); _Static_assert
(((((0x80000000|0x00080000)) & 0xf) == 0 || (((0x80000000
|0x00080000)) & 0) == 2) && sizeof(int) == sizeof
(*(&preallocate_siginfo)), "compile-time assertion failed"
)
144 &preallocate_siginfo, 0, "Preallocated signal memory size")static struct sysctl_oid sysctl___kern_sigqueue_preallocate =
{ .oid_parent = ((&(&sysctl___kern_sigqueue)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x00080000))), .oid_arg1 = (
&preallocate_siginfo), .oid_arg2 = (0), .oid_name = ("preallocate"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "Preallocated signal memory size", .oid_label = (((void *)
0)), }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(
".globl " "__stop_set_sysctl_set"); static void const * const
__set_sysctl_set_sym_sysctl___kern_sigqueue_preallocate __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_sigqueue_preallocate); _Static_assert
(((((0x80000000|0x00080000)) & 0xf) == 0 || (((0x80000000
|0x00080000)) & 0) == 2) && sizeof(int) == sizeof
(*(&preallocate_siginfo)), "compile-time assertion failed"
)
;
145
146static int signal_overflow = 0;
147SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,static struct sysctl_oid sysctl___kern_sigqueue_overflow = { .
oid_parent = ((&(&sysctl___kern_sigqueue)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (0x80000000)), .oid_arg1 = (&signal_overflow
), .oid_arg2 = (0), .oid_name = ("overflow"), .oid_handler = (
sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Number of signals overflew"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_sigqueue_overflow
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_sigqueue_overflow); _Static_assert
((((0x80000000) & 0xf) == 0 || ((0x80000000) & 0) == 2
) && sizeof(int) == sizeof(*(&signal_overflow)), "compile-time assertion failed"
)
148 &signal_overflow, 0, "Number of signals overflew")static struct sysctl_oid sysctl___kern_sigqueue_overflow = { .
oid_parent = ((&(&sysctl___kern_sigqueue)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (0x80000000)), .oid_arg1 = (&signal_overflow
), .oid_arg2 = (0), .oid_name = ("overflow"), .oid_handler = (
sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Number of signals overflew"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_sigqueue_overflow
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_sigqueue_overflow); _Static_assert
((((0x80000000) & 0xf) == 0 || ((0x80000000) & 0) == 2
) && sizeof(int) == sizeof(*(&signal_overflow)), "compile-time assertion failed"
)
;
149
150static int signal_alloc_fail = 0;
151SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,static struct sysctl_oid sysctl___kern_sigqueue_alloc_fail = {
.oid_parent = ((&(&sysctl___kern_sigqueue)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (0x80000000)), .oid_arg1 = (&signal_alloc_fail
), .oid_arg2 = (0), .oid_name = ("alloc_fail"), .oid_handler =
(sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "signals failed to be allocated"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_sigqueue_alloc_fail
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_sigqueue_alloc_fail); _Static_assert
((((0x80000000) & 0xf) == 0 || ((0x80000000) & 0) == 2
) && sizeof(int) == sizeof(*(&signal_alloc_fail))
, "compile-time assertion failed")
152 &signal_alloc_fail, 0, "signals failed to be allocated")static struct sysctl_oid sysctl___kern_sigqueue_alloc_fail = {
.oid_parent = ((&(&sysctl___kern_sigqueue)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (0x80000000)), .oid_arg1 = (&signal_alloc_fail
), .oid_arg2 = (0), .oid_name = ("alloc_fail"), .oid_handler =
(sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "signals failed to be allocated"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_sigqueue_alloc_fail
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_sigqueue_alloc_fail); _Static_assert
((((0x80000000) & 0xf) == 0 || ((0x80000000) & 0) == 2
) && sizeof(int) == sizeof(*(&signal_alloc_fail))
, "compile-time assertion failed")
;
153
154static int kern_lognosys = 0;
155SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0,static struct sysctl_oid sysctl___kern_lognosys = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| (((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (&
kern_lognosys), .oid_arg2 = (0), .oid_name = ("lognosys"), .oid_handler
= (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Log invalid syscalls"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_lognosys __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_lognosys); _Static_assert((((((0x80000000
|0x40000000)|0x00080000)) & 0xf) == 0 || ((((0x80000000|0x40000000
)|0x00080000)) & 0) == 2) && sizeof(int) == sizeof
(*(&kern_lognosys)), "compile-time assertion failed")
156 "Log invalid syscalls")static struct sysctl_oid sysctl___kern_lognosys = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| (((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (&
kern_lognosys), .oid_arg2 = (0), .oid_name = ("lognosys"), .oid_handler
= (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Log invalid syscalls"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_lognosys __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_lognosys); _Static_assert((((((0x80000000
|0x40000000)|0x00080000)) & 0xf) == 0 || ((((0x80000000|0x40000000
)|0x00080000)) & 0) == 2) && sizeof(int) == sizeof
(*(&kern_lognosys)), "compile-time assertion failed")
;
157
158SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL)static struct sysinit signal_sys_init = { SI_SUB_P1003_1B, SI_ORDER_FIRST
+3, (sysinit_cfunc_t)(sysinit_nfunc_t)sigqueue_start, ((void *
)(((void *)0))) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* __set_sysinit_set_sym_signal_sys_init __attribute__((__section__
("set_" "sysinit_set"))) __attribute__((__used__)) = &(signal_sys_init
)
;
159
160/*
161 * Policy -- Can ucred cr1 send SIGIO to process cr2?
162 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
163 * in the right situations.
164 */
165#define CANSIGIO(cr1, cr2)((cr1)->cr_uid == 0 || (cr1)->cr_ruid == (cr2)->cr_ruid
|| (cr1)->cr_uid == (cr2)->cr_ruid || (cr1)->cr_ruid
== (cr2)->cr_uid || (cr1)->cr_uid == (cr2)->cr_uid)
\
166 ((cr1)->cr_uid == 0 || \
167 (cr1)->cr_ruid == (cr2)->cr_ruid || \
168 (cr1)->cr_uid == (cr2)->cr_ruid || \
169 (cr1)->cr_ruid == (cr2)->cr_uid || \
170 (cr1)->cr_uid == (cr2)->cr_uid)
171
172static int sugid_coredump;
173SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,static struct sysctl_oid sysctl___kern_sugid_coredump = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| (((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (&
sugid_coredump), .oid_arg2 = (0), .oid_name = ("sugid_coredump"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "Allow setuid and setgid processes to dump core", .oid_label
= (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_sugid_coredump __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_sugid_coredump); _Static_assert((((((
0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || ((((0x80000000
|0x40000000)|0x00080000)) & 0) == 2) && sizeof(int
) == sizeof(*(&sugid_coredump)), "compile-time assertion failed"
)
174 &sugid_coredump, 0, "Allow setuid and setgid processes to dump core")static struct sysctl_oid sysctl___kern_sugid_coredump = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| (((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (&
sugid_coredump), .oid_arg2 = (0), .oid_name = ("sugid_coredump"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "Allow setuid and setgid processes to dump core", .oid_label
= (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_sugid_coredump __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_sugid_coredump); _Static_assert((((((
0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || ((((0x80000000
|0x40000000)|0x00080000)) & 0) == 2) && sizeof(int
) == sizeof(*(&sugid_coredump)), "compile-time assertion failed"
)
;
175
176static int capmode_coredump;
177SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,static struct sysctl_oid sysctl___kern_capmode_coredump = { .
oid_parent = ((&(&sysctl___kern)->oid_children)), .
oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&capmode_coredump), .oid_arg2 = (0), .oid_name
= ("capmode_coredump"), .oid_handler = (sysctl_handle_int), .
oid_fmt = ("I"), .oid_descr = "Allow processes in capability mode to dump core"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_capmode_coredump __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_capmode_coredump); _Static_assert((((
((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || ((((
0x80000000|0x40000000)|0x00080000)) & 0) == 2) &&
sizeof(int) == sizeof(*(&capmode_coredump)), "compile-time assertion failed"
)
178 &capmode_coredump, 0, "Allow processes in capability mode to dump core")static struct sysctl_oid sysctl___kern_capmode_coredump = { .
oid_parent = ((&(&sysctl___kern)->oid_children)), .
oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&capmode_coredump), .oid_arg2 = (0), .oid_name
= ("capmode_coredump"), .oid_handler = (sysctl_handle_int), .
oid_fmt = ("I"), .oid_descr = "Allow processes in capability mode to dump core"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_capmode_coredump __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_capmode_coredump); _Static_assert((((
((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || ((((
0x80000000|0x40000000)|0x00080000)) & 0) == 2) &&
sizeof(int) == sizeof(*(&capmode_coredump)), "compile-time assertion failed"
)
;
179
180static int do_coredump = 1;
181SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_coredump = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&do_coredump)
, .oid_arg2 = (0), .oid_name = ("coredump"), .oid_handler = (
sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Enable/Disable coredumps"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_coredump __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_coredump); _Static_assert(((((0x80000000
|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000)) &
0) == 2) && sizeof(int) == sizeof(*(&do_coredump
)), "compile-time assertion failed")
182 &do_coredump, 0, "Enable/Disable coredumps")static struct sysctl_oid sysctl___kern_coredump = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&do_coredump)
, .oid_arg2 = (0), .oid_name = ("coredump"), .oid_handler = (
sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Enable/Disable coredumps"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_coredump __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_coredump); _Static_assert(((((0x80000000
|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000)) &
0) == 2) && sizeof(int) == sizeof(*(&do_coredump
)), "compile-time assertion failed")
;
183
184static int set_core_nodump_flag = 0;
185SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,static struct sysctl_oid sysctl___kern_nodump_coredump = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&set_core_nodump_flag
), .oid_arg2 = (0), .oid_name = ("nodump_coredump"), .oid_handler
= (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Enable setting the NODUMP flag on coredump files"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_nodump_coredump __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_nodump_coredump); _Static_assert(((((
0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000
)) & 0) == 2) && sizeof(int) == sizeof(*(&set_core_nodump_flag
)), "compile-time assertion failed")
186 0, "Enable setting the NODUMP flag on coredump files")static struct sysctl_oid sysctl___kern_nodump_coredump = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&set_core_nodump_flag
), .oid_arg2 = (0), .oid_name = ("nodump_coredump"), .oid_handler
= (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Enable setting the NODUMP flag on coredump files"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_nodump_coredump __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_nodump_coredump); _Static_assert(((((
0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000
)) & 0) == 2) && sizeof(int) == sizeof(*(&set_core_nodump_flag
)), "compile-time assertion failed")
;
187
188static int coredump_devctl = 0;
189SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,static struct sysctl_oid sysctl___kern_coredump_devctl = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&coredump_devctl
), .oid_arg2 = (0), .oid_name = ("coredump_devctl"), .oid_handler
= (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Generate a devctl notification when processes coredump"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_coredump_devctl __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_coredump_devctl); _Static_assert(((((
0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000
)) & 0) == 2) && sizeof(int) == sizeof(*(&coredump_devctl
)), "compile-time assertion failed")
190 0, "Generate a devctl notification when processes coredump")static struct sysctl_oid sysctl___kern_coredump_devctl = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x40000000))), .oid_arg1 = (&coredump_devctl
), .oid_arg2 = (0), .oid_name = ("coredump_devctl"), .oid_handler
= (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Generate a devctl notification when processes coredump"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_coredump_devctl __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_coredump_devctl); _Static_assert(((((
0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000
)) & 0) == 2) && sizeof(int) == sizeof(*(&coredump_devctl
)), "compile-time assertion failed")
;
191
192/*
193 * Signal properties and actions.
194 * The array below categorizes the signals and their default actions
195 * according to the following properties:
196 */
197#define SIGPROP_KILL0x01 0x01 /* terminates process by default */
198#define SIGPROP_CORE0x02 0x02 /* ditto and coredumps */
199#define SIGPROP_STOP0x04 0x04 /* suspend process */
200#define SIGPROP_TTYSTOP0x08 0x08 /* ditto, from tty */
201#define SIGPROP_IGNORE0x10 0x10 /* ignore by default */
202#define SIGPROP_CONT0x20 0x20 /* continue if suspended */
203#define SIGPROP_CANTMASK0x40 0x40 /* non-maskable, catchable */
204
205static int sigproptbl[NSIG32] = {
206 [SIGHUP1] = SIGPROP_KILL0x01,
207 [SIGINT2] = SIGPROP_KILL0x01,
208 [SIGQUIT3] = SIGPROP_KILL0x01 | SIGPROP_CORE0x02,
209 [SIGILL4] = SIGPROP_KILL0x01 | SIGPROP_CORE0x02,
210 [SIGTRAP5] = SIGPROP_KILL0x01 | SIGPROP_CORE0x02,
211 [SIGABRT6] = SIGPROP_KILL0x01 | SIGPROP_CORE0x02,
212 [SIGEMT7] = SIGPROP_KILL0x01 | SIGPROP_CORE0x02,
213 [SIGFPE8] = SIGPROP_KILL0x01 | SIGPROP_CORE0x02,
214 [SIGKILL9] = SIGPROP_KILL0x01,
215 [SIGBUS10] = SIGPROP_KILL0x01 | SIGPROP_CORE0x02,
216 [SIGSEGV11] = SIGPROP_KILL0x01 | SIGPROP_CORE0x02,
217 [SIGSYS12] = SIGPROP_KILL0x01 | SIGPROP_CORE0x02,
218 [SIGPIPE13] = SIGPROP_KILL0x01,
219 [SIGALRM14] = SIGPROP_KILL0x01,
220 [SIGTERM15] = SIGPROP_KILL0x01,
221 [SIGURG16] = SIGPROP_IGNORE0x10,
222 [SIGSTOP17] = SIGPROP_STOP0x04,
223 [SIGTSTP18] = SIGPROP_STOP0x04 | SIGPROP_TTYSTOP0x08,
224 [SIGCONT19] = SIGPROP_IGNORE0x10 | SIGPROP_CONT0x20,
225 [SIGCHLD20] = SIGPROP_IGNORE0x10,
226 [SIGTTIN21] = SIGPROP_STOP0x04 | SIGPROP_TTYSTOP0x08,
227 [SIGTTOU22] = SIGPROP_STOP0x04 | SIGPROP_TTYSTOP0x08,
228 [SIGIO23] = SIGPROP_IGNORE0x10,
229 [SIGXCPU24] = SIGPROP_KILL0x01,
230 [SIGXFSZ25] = SIGPROP_KILL0x01,
231 [SIGVTALRM26] = SIGPROP_KILL0x01,
232 [SIGPROF27] = SIGPROP_KILL0x01,
233 [SIGWINCH28] = SIGPROP_IGNORE0x10,
234 [SIGINFO29] = SIGPROP_IGNORE0x10,
235 [SIGUSR130] = SIGPROP_KILL0x01,
236 [SIGUSR231] = SIGPROP_KILL0x01,
237};
238
239static void reschedule_signals(struct proc *p, sigset_t block, int flags);
240
241static void
242sigqueue_start(void)
243{
244 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
245 NULL((void *)0), NULL((void *)0), NULL((void *)0), NULL((void *)0), UMA_ALIGN_PTR(sizeof(void *) - 1), 0);
246 uma_prealloc(ksiginfo_zone, preallocate_siginfo);
247 p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS9, _POSIX_REALTIME_SIGNALS200112L);
248 p31b_setcfg(CTL_P1003_1B_RTSIG_MAX21, SIGRTMAX126 - SIGRTMIN65 + 1);
249 p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX24, max_pending_per_proc);
250}
251
252ksiginfo_t *
253ksiginfo_alloc(int wait)
254{
255 int flags;
256
257 flags = M_ZERO0x0100;
258 if (! wait)
259 flags |= M_NOWAIT0x0001;
260 if (ksiginfo_zone != NULL((void *)0))
261 return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
262 return (NULL((void *)0));
263}
264
265void
266ksiginfo_free(ksiginfo_t *ksi)
267{
268 uma_zfree(ksiginfo_zone, ksi);
269}
270
271static __inline int
272ksiginfo_tryfree(ksiginfo_t *ksi)
273{
274 if (!(ksi->ksi_flags & KSI_EXT0x02)) {
275 uma_zfree(ksiginfo_zone, ksi);
276 return (1);
277 }
278 return (0);
279}
280
281void
282sigqueue_init(sigqueue_t *list, struct proc *p)
283{
284 SIGEMPTYSET(list->sq_signals)do { int __i; for (__i = 0; __i < 4; __i++) (list->sq_signals
).__bits[__i] = 0; } while (0)
;
285 SIGEMPTYSET(list->sq_kill)do { int __i; for (__i = 0; __i < 4; __i++) (list->sq_kill
).__bits[__i] = 0; } while (0)
;
286 SIGEMPTYSET(list->sq_ptrace)do { int __i; for (__i = 0; __i < 4; __i++) (list->sq_ptrace
).__bits[__i] = 0; } while (0)
;
287 TAILQ_INIT(&list->sq_list)do { (((&list->sq_list))->tqh_first) = ((void *)0);
(&list->sq_list)->tqh_last = &(((&list->
sq_list))->tqh_first); ; } while (0)
;
288 list->sq_proc = p;
289 list->sq_flags = SQ_INIT0x01;
290}
291
292/*
293 * Get a signal's ksiginfo.
294 * Return:
295 * 0 - signal not found
296 * others - signal number
297 */
298static int
299sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
300{
301 struct proc *p = sq->sq_proc;
302 struct ksiginfo *ksi, *next;
303 int count = 0;
304
305 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"))do { if (__builtin_expect((!(sq->sq_flags & 0x01)), 0)
) panic ("sigqueue not inited"); } while (0)
;
306
307 if (!SIGISMEMBER(sq->sq_signals, signo)((sq->sq_signals).__bits[(((signo) - 1) >> 5)] &
(1 << (((signo) - 1) & 31)))
)
308 return (0);
309
310 if (SIGISMEMBER(sq->sq_ptrace, signo)((sq->sq_ptrace).__bits[(((signo) - 1) >> 5)] & (
1 << (((signo) - 1) & 31)))
) {
311 count++;
312 SIGDELSET(sq->sq_ptrace, signo)((sq->sq_ptrace).__bits[(((signo) - 1) >> 5)] &=
~(1 << (((signo) - 1) & 31)))
;
313 si->ksi_flags |= KSI_PTRACE0x20;
314 }
315 if (SIGISMEMBER(sq->sq_kill, signo)((sq->sq_kill).__bits[(((signo) - 1) >> 5)] & (1
<< (((signo) - 1) & 31)))
) {
316 count++;
317 if (count == 1)
318 SIGDELSET(sq->sq_kill, signo)((sq->sq_kill).__bits[(((signo) - 1) >> 5)] &= ~
(1 << (((signo) - 1) & 31)))
;
319 }
320
321 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next)for ((ksi) = (((&sq->sq_list))->tqh_first); (ksi) &&
((next) = (((ksi))->ksi_link.tqe_next), 1); (ksi) = (next
))
{
322 if (ksi->ksi_signoksi_info.si_signo == signo) {
323 if (count == 0) {
324 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link)do { ; ; do { if ((((ksi))->ksi_link.tqe_next) != ((void *
)0) && (((ksi))->ksi_link.tqe_next)->ksi_link.tqe_prev
!= &((ksi)->ksi_link.tqe_next)) panic("Bad link elm %p next->prev != elm"
, (ksi)); } while (0); do { if (*(ksi)->ksi_link.tqe_prev !=
(ksi)) panic("Bad link elm %p prev->next != elm", (ksi));
} while (0); if (((((ksi))->ksi_link.tqe_next)) != ((void
*)0)) (((ksi))->ksi_link.tqe_next)->ksi_link.tqe_prev =
(ksi)->ksi_link.tqe_prev; else { (&sq->sq_list)->
tqh_last = (ksi)->ksi_link.tqe_prev; ; } *(ksi)->ksi_link
.tqe_prev = (((ksi))->ksi_link.tqe_next); ; ; ; } while (0
)
;
325 ksi->ksi_sigq = NULL((void *)0);
326 ksiginfo_copy(ksi, si);
327 if (ksiginfo_tryfree(ksi) && p != NULL((void *)0))
328 p->p_pendingcnt--;
329 }
330 if (++count > 1)
331 break;
332 }
333 }
334
335 if (count <= 1)
336 SIGDELSET(sq->sq_signals, signo)((sq->sq_signals).__bits[(((signo) - 1) >> 5)] &=
~(1 << (((signo) - 1) & 31)))
;
337 si->ksi_signoksi_info.si_signo = signo;
338 return (signo);
339}
340
341void
342sigqueue_take(ksiginfo_t *ksi)
343{
344 struct ksiginfo *kp;
345 struct proc *p;
346 sigqueue_t *sq;
347
348 if (ksi == NULL((void *)0) || (sq = ksi->ksi_sigq) == NULL((void *)0))
349 return;
350
351 p = sq->sq_proc;
352 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link)do { ; ; do { if ((((ksi))->ksi_link.tqe_next) != ((void *
)0) && (((ksi))->ksi_link.tqe_next)->ksi_link.tqe_prev
!= &((ksi)->ksi_link.tqe_next)) panic("Bad link elm %p next->prev != elm"
, (ksi)); } while (0); do { if (*(ksi)->ksi_link.tqe_prev !=
(ksi)) panic("Bad link elm %p prev->next != elm", (ksi));
} while (0); if (((((ksi))->ksi_link.tqe_next)) != ((void
*)0)) (((ksi))->ksi_link.tqe_next)->ksi_link.tqe_prev =
(ksi)->ksi_link.tqe_prev; else { (&sq->sq_list)->
tqh_last = (ksi)->ksi_link.tqe_prev; ; } *(ksi)->ksi_link
.tqe_prev = (((ksi))->ksi_link.tqe_next); ; ; ; } while (0
)
;
353 ksi->ksi_sigq = NULL((void *)0);
354 if (!(ksi->ksi_flags & KSI_EXT0x02) && p != NULL((void *)0))
355 p->p_pendingcnt--;
356
357 for (kp = TAILQ_FIRST(&sq->sq_list)((&sq->sq_list)->tqh_first); kp != NULL((void *)0);
358 kp = TAILQ_NEXT(kp, ksi_link)((kp)->ksi_link.tqe_next)) {
359 if (kp->ksi_signoksi_info.si_signo == ksi->ksi_signoksi_info.si_signo)
360 break;
361 }
362 if (kp == NULL((void *)0) && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo)((sq->sq_kill).__bits[(((ksi->ksi_info.si_signo) - 1) >>
5)] & (1 << (((ksi->ksi_info.si_signo) - 1) &
31)))
&&
363 !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo)((sq->sq_ptrace).__bits[(((ksi->ksi_info.si_signo) - 1)
>> 5)] & (1 << (((ksi->ksi_info.si_signo)
- 1) & 31)))
)
364 SIGDELSET(sq->sq_signals, ksi->ksi_signo)((sq->sq_signals).__bits[(((ksi->ksi_info.si_signo) - 1
) >> 5)] &= ~(1 << (((ksi->ksi_info.si_signo
) - 1) & 31)))
;
365}
366
367static int
368sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
369{
370 struct proc *p = sq->sq_proc;
371 struct ksiginfo *ksi;
372 int ret = 0;
373
374 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"))do { if (__builtin_expect((!(sq->sq_flags & 0x01)), 0)
) panic ("sigqueue not inited"); } while (0)
;
375
376 /*
377 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
378 * for these signals.
379 */
380 if (signo == SIGKILL9 || signo == SIGSTOP17 || si == NULL((void *)0)) {
381 SIGADDSET(sq->sq_kill, signo)((sq->sq_kill).__bits[(((signo) - 1) >> 5)] |= (1 <<
(((signo) - 1) & 31)))
;
382 goto out_set_bit;
383 }
384
385 /* directly insert the ksi, don't copy it */
386 if (si->ksi_flags & KSI_INS0x04) {
387 if (si->ksi_flags & KSI_HEAD0x10)
388 TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link)do { do { if (!((&sq->sq_list)->tqh_first == ((void
*)0)) && (((&sq->sq_list))->tqh_first)->
ksi_link.tqe_prev != &(((&sq->sq_list))->tqh_first
)) panic("Bad tailq head %p first->prev != head", (&sq
->sq_list)); } while (0); if (((((si))->ksi_link.tqe_next
) = (((&sq->sq_list))->tqh_first)) != ((void *)0)) (
((&sq->sq_list))->tqh_first)->ksi_link.tqe_prev =
&(((si))->ksi_link.tqe_next); else (&sq->sq_list
)->tqh_last = &(((si))->ksi_link.tqe_next); (((&
sq->sq_list))->tqh_first) = (si); (si)->ksi_link.tqe_prev
= &(((&sq->sq_list))->tqh_first); ; ; } while (
0)
;
389 else
390 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link)do { do { if (*(&sq->sq_list)->tqh_last != ((void *
)0)) panic("Bad tailq NEXT(%p->tqh_last) != NULL", (&sq
->sq_list)); } while (0); (((si))->ksi_link.tqe_next) =
((void *)0); (si)->ksi_link.tqe_prev = (&sq->sq_list
)->tqh_last; *(&sq->sq_list)->tqh_last = (si); (
&sq->sq_list)->tqh_last = &(((si))->ksi_link
.tqe_next); ; ; } while (0)
;
391 si->ksi_sigq = sq;
392 goto out_set_bit;
393 }
394
395 if (__predict_false(ksiginfo_zone == NULL)__builtin_expect((ksiginfo_zone == ((void *)0)), 0)) {
396 SIGADDSET(sq->sq_kill, signo)((sq->sq_kill).__bits[(((signo) - 1) >> 5)] |= (1 <<
(((signo) - 1) & 31)))
;
397 goto out_set_bit;
398 }
399
400 if (p != NULL((void *)0) && p->p_pendingcnt >= max_pending_per_proc) {
401 signal_overflow++;
402 ret = EAGAIN35;
403 } else if ((ksi = ksiginfo_alloc(0)) == NULL((void *)0)) {
404 signal_alloc_fail++;
405 ret = EAGAIN35;
406 } else {
407 if (p != NULL((void *)0))
408 p->p_pendingcnt++;
409 ksiginfo_copy(si, ksi);
410 ksi->ksi_signoksi_info.si_signo = signo;
411 if (si->ksi_flags & KSI_HEAD0x10)
412 TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link)do { do { if (!((&sq->sq_list)->tqh_first == ((void
*)0)) && (((&sq->sq_list))->tqh_first)->
ksi_link.tqe_prev != &(((&sq->sq_list))->tqh_first
)) panic("Bad tailq head %p first->prev != head", (&sq
->sq_list)); } while (0); if (((((ksi))->ksi_link.tqe_next
) = (((&sq->sq_list))->tqh_first)) != ((void *)0)) (
((&sq->sq_list))->tqh_first)->ksi_link.tqe_prev =
&(((ksi))->ksi_link.tqe_next); else (&sq->sq_list
)->tqh_last = &(((ksi))->ksi_link.tqe_next); (((&
sq->sq_list))->tqh_first) = (ksi); (ksi)->ksi_link.tqe_prev
= &(((&sq->sq_list))->tqh_first); ; ; } while (
0)
;
413 else
414 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link)do { do { if (*(&sq->sq_list)->tqh_last != ((void *
)0)) panic("Bad tailq NEXT(%p->tqh_last) != NULL", (&sq
->sq_list)); } while (0); (((ksi))->ksi_link.tqe_next) =
((void *)0); (ksi)->ksi_link.tqe_prev = (&sq->sq_list
)->tqh_last; *(&sq->sq_list)->tqh_last = (ksi); (
&sq->sq_list)->tqh_last = &(((ksi))->ksi_link
.tqe_next); ; ; } while (0)
;
415 ksi->ksi_sigq = sq;
416 }
417
418 if (ret != 0) {
419 if ((si->ksi_flags & KSI_PTRACE0x20) != 0) {
420 SIGADDSET(sq->sq_ptrace, signo)((sq->sq_ptrace).__bits[(((signo) - 1) >> 5)] |= (1 <<
(((signo) - 1) & 31)))
;
421 ret = 0;
422 goto out_set_bit;
423 } else if ((si->ksi_flags & KSI_TRAP0x01) != 0 ||
424 (si->ksi_flags & KSI_SIGQ0x08) == 0) {
425 SIGADDSET(sq->sq_kill, signo)((sq->sq_kill).__bits[(((signo) - 1) >> 5)] |= (1 <<
(((signo) - 1) & 31)))
;
426 ret = 0;
427 goto out_set_bit;
428 }
429 return (ret);
430 }
431
432out_set_bit:
433 SIGADDSET(sq->sq_signals, signo)((sq->sq_signals).__bits[(((signo) - 1) >> 5)] |= (1
<< (((signo) - 1) & 31)))
;
434 return (ret);
435}
436
437void
438sigqueue_flush(sigqueue_t *sq)
439{
440 struct proc *p = sq->sq_proc;
441 ksiginfo_t *ksi;
442
443 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"))do { if (__builtin_expect((!(sq->sq_flags & 0x01)), 0)
) panic ("sigqueue not inited"); } while (0)
;
444
445 if (p != NULL((void *)0))
446 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (446))
;
447
448 while ((ksi = TAILQ_FIRST(&sq->sq_list)((&sq->sq_list)->tqh_first)) != NULL((void *)0)) {
449 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link)do { ; ; do { if ((((ksi))->ksi_link.tqe_next) != ((void *
)0) && (((ksi))->ksi_link.tqe_next)->ksi_link.tqe_prev
!= &((ksi)->ksi_link.tqe_next)) panic("Bad link elm %p next->prev != elm"
, (ksi)); } while (0); do { if (*(ksi)->ksi_link.tqe_prev !=
(ksi)) panic("Bad link elm %p prev->next != elm", (ksi));
} while (0); if (((((ksi))->ksi_link.tqe_next)) != ((void
*)0)) (((ksi))->ksi_link.tqe_next)->ksi_link.tqe_prev =
(ksi)->ksi_link.tqe_prev; else { (&sq->sq_list)->
tqh_last = (ksi)->ksi_link.tqe_prev; ; } *(ksi)->ksi_link
.tqe_prev = (((ksi))->ksi_link.tqe_next); ; ; ; } while (0
)
;
450 ksi->ksi_sigq = NULL((void *)0);
451 if (ksiginfo_tryfree(ksi) && p != NULL((void *)0))
452 p->p_pendingcnt--;
453 }
454
455 SIGEMPTYSET(sq->sq_signals)do { int __i; for (__i = 0; __i < 4; __i++) (sq->sq_signals
).__bits[__i] = 0; } while (0)
;
456 SIGEMPTYSET(sq->sq_kill)do { int __i; for (__i = 0; __i < 4; __i++) (sq->sq_kill
).__bits[__i] = 0; } while (0)
;
457 SIGEMPTYSET(sq->sq_ptrace)do { int __i; for (__i = 0; __i < 4; __i++) (sq->sq_ptrace
).__bits[__i] = 0; } while (0)
;
458}
459
460static void
461sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
462{
463 sigset_t tmp;
464 struct proc *p1, *p2;
465 ksiginfo_t *ksi, *next;
466
467 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"))do { if (__builtin_expect((!(src->sq_flags & 0x01)), 0
)) panic ("src sigqueue not inited"); } while (0)
;
468 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"))do { if (__builtin_expect((!(dst->sq_flags & 0x01)), 0
)) panic ("dst sigqueue not inited"); } while (0)
;
469 p1 = src->sq_proc;
470 p2 = dst->sq_proc;
471 /* Move siginfo to target list */
472 TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next)for ((ksi) = (((&src->sq_list))->tqh_first); (ksi) &&
((next) = (((ksi))->ksi_link.tqe_next), 1); (ksi) = (next
))
{
473 if (SIGISMEMBER(*set, ksi->ksi_signo)((*set).__bits[(((ksi->ksi_info.si_signo) - 1) >> 5)
] & (1 << (((ksi->ksi_info.si_signo) - 1) & 31
)))
) {
474 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link)do { ; ; do { if ((((ksi))->ksi_link.tqe_next) != ((void *
)0) && (((ksi))->ksi_link.tqe_next)->ksi_link.tqe_prev
!= &((ksi)->ksi_link.tqe_next)) panic("Bad link elm %p next->prev != elm"
, (ksi)); } while (0); do { if (*(ksi)->ksi_link.tqe_prev !=
(ksi)) panic("Bad link elm %p prev->next != elm", (ksi));
} while (0); if (((((ksi))->ksi_link.tqe_next)) != ((void
*)0)) (((ksi))->ksi_link.tqe_next)->ksi_link.tqe_prev =
(ksi)->ksi_link.tqe_prev; else { (&src->sq_list)->
tqh_last = (ksi)->ksi_link.tqe_prev; ; } *(ksi)->ksi_link
.tqe_prev = (((ksi))->ksi_link.tqe_next); ; ; ; } while (0
)
;
475 if (p1 != NULL((void *)0))
476 p1->p_pendingcnt--;
477 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link)do { do { if (*(&dst->sq_list)->tqh_last != ((void *
)0)) panic("Bad tailq NEXT(%p->tqh_last) != NULL", (&dst
->sq_list)); } while (0); (((ksi))->ksi_link.tqe_next) =
((void *)0); (ksi)->ksi_link.tqe_prev = (&dst->sq_list
)->tqh_last; *(&dst->sq_list)->tqh_last = (ksi);
(&dst->sq_list)->tqh_last = &(((ksi))->ksi_link
.tqe_next); ; ; } while (0)
;
478 ksi->ksi_sigq = dst;
479 if (p2 != NULL((void *)0))
480 p2->p_pendingcnt++;
481 }
482 }
483
484 /* Move pending bits to target list */
485 tmp = src->sq_kill;
486 SIGSETAND(tmp, *set)do { int __i; for (__i = 0; __i < 4; __i++) (tmp).__bits[__i
] &= (*set).__bits[__i]; } while (0)
;
487 SIGSETOR(dst->sq_kill, tmp)do { int __i; for (__i = 0; __i < 4; __i++) (dst->sq_kill
).__bits[__i] |= (tmp).__bits[__i]; } while (0)
;
488 SIGSETNAND(src->sq_kill, tmp)do { int __i; for (__i = 0; __i < 4; __i++) (src->sq_kill
).__bits[__i] &= ~(tmp).__bits[__i]; } while (0)
;
489
490 tmp = src->sq_ptrace;
491 SIGSETAND(tmp, *set)do { int __i; for (__i = 0; __i < 4; __i++) (tmp).__bits[__i
] &= (*set).__bits[__i]; } while (0)
;
492 SIGSETOR(dst->sq_ptrace, tmp)do { int __i; for (__i = 0; __i < 4; __i++) (dst->sq_ptrace
).__bits[__i] |= (tmp).__bits[__i]; } while (0)
;
493 SIGSETNAND(src->sq_ptrace, tmp)do { int __i; for (__i = 0; __i < 4; __i++) (src->sq_ptrace
).__bits[__i] &= ~(tmp).__bits[__i]; } while (0)
;
494
495 tmp = src->sq_signals;
496 SIGSETAND(tmp, *set)do { int __i; for (__i = 0; __i < 4; __i++) (tmp).__bits[__i
] &= (*set).__bits[__i]; } while (0)
;
497 SIGSETOR(dst->sq_signals, tmp)do { int __i; for (__i = 0; __i < 4; __i++) (dst->sq_signals
).__bits[__i] |= (tmp).__bits[__i]; } while (0)
;
498 SIGSETNAND(src->sq_signals, tmp)do { int __i; for (__i = 0; __i < 4; __i++) (src->sq_signals
).__bits[__i] &= ~(tmp).__bits[__i]; } while (0)
;
499}
500
501#if 0
502static void
503sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
504{
505 sigset_t set;
506
507 SIGEMPTYSET(set)do { int __i; for (__i = 0; __i < 4; __i++) (set).__bits[__i
] = 0; } while (0)
;
508 SIGADDSET(set, signo)((set).__bits[(((signo) - 1) >> 5)] |= (1 << (((signo
) - 1) & 31)))
;
509 sigqueue_move_set(src, dst, &set);
510}
511#endif
512
513static void
514sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
515{
516 struct proc *p = sq->sq_proc;
517 ksiginfo_t *ksi, *next;
518
519 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"))do { if (__builtin_expect((!(sq->sq_flags & 0x01)), 0)
) panic ("src sigqueue not inited"); } while (0)
;
520
521 /* Remove siginfo queue */
522 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next)for ((ksi) = (((&sq->sq_list))->tqh_first); (ksi) &&
((next) = (((ksi))->ksi_link.tqe_next), 1); (ksi) = (next
))
{
523 if (SIGISMEMBER(*set, ksi->ksi_signo)((*set).__bits[(((ksi->ksi_info.si_signo) - 1) >> 5)
] & (1 << (((ksi->ksi_info.si_signo) - 1) & 31
)))
) {
524 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link)do { ; ; do { if ((((ksi))->ksi_link.tqe_next) != ((void *
)0) && (((ksi))->ksi_link.tqe_next)->ksi_link.tqe_prev
!= &((ksi)->ksi_link.tqe_next)) panic("Bad link elm %p next->prev != elm"
, (ksi)); } while (0); do { if (*(ksi)->ksi_link.tqe_prev !=
(ksi)) panic("Bad link elm %p prev->next != elm", (ksi));
} while (0); if (((((ksi))->ksi_link.tqe_next)) != ((void
*)0)) (((ksi))->ksi_link.tqe_next)->ksi_link.tqe_prev =
(ksi)->ksi_link.tqe_prev; else { (&sq->sq_list)->
tqh_last = (ksi)->ksi_link.tqe_prev; ; } *(ksi)->ksi_link
.tqe_prev = (((ksi))->ksi_link.tqe_next); ; ; ; } while (0
)
;
525 ksi->ksi_sigq = NULL((void *)0);
526 if (ksiginfo_tryfree(ksi) && p != NULL((void *)0))
527 p->p_pendingcnt--;
528 }
529 }
530 SIGSETNAND(sq->sq_kill, *set)do { int __i; for (__i = 0; __i < 4; __i++) (sq->sq_kill
).__bits[__i] &= ~(*set).__bits[__i]; } while (0)
;
531 SIGSETNAND(sq->sq_ptrace, *set)do { int __i; for (__i = 0; __i < 4; __i++) (sq->sq_ptrace
).__bits[__i] &= ~(*set).__bits[__i]; } while (0)
;
532 SIGSETNAND(sq->sq_signals, *set)do { int __i; for (__i = 0; __i < 4; __i++) (sq->sq_signals
).__bits[__i] &= ~(*set).__bits[__i]; } while (0)
;
533}
534
535void
536sigqueue_delete(sigqueue_t *sq, int signo)
537{
538 sigset_t set;
539
540 SIGEMPTYSET(set)do { int __i; for (__i = 0; __i < 4; __i++) (set).__bits[__i
] = 0; } while (0)
;
541 SIGADDSET(set, signo)((set).__bits[(((signo) - 1) >> 5)] |= (1 << (((signo
) - 1) & 31)))
;
542 sigqueue_delete_set(sq, &set);
543}
544
545/* Remove a set of signals for a process */
546static void
547sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
548{
549 sigqueue_t worklist;
550 struct thread *td0;
551
552 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (552))
;
553
554 sigqueue_init(&worklist, NULL((void *)0));
555 sigqueue_move_set(&p->p_sigqueue, &worklist, set);
556
557 FOREACH_THREAD_IN_PROC(p, td0)for (((td0)) = (((&(p)->p_threads))->tqh_first); ((
td0)); ((td0)) = ((((td0)))->td_plist.tqe_next))
558 sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
559
560 sigqueue_flush(&worklist);
561}
562
563void
564sigqueue_delete_proc(struct proc *p, int signo)
565{
566 sigset_t set;
567
568 SIGEMPTYSET(set)do { int __i; for (__i = 0; __i < 4; __i++) (set).__bits[__i
] = 0; } while (0)
;
569 SIGADDSET(set, signo)((set).__bits[(((signo) - 1) >> 5)] |= (1 << (((signo
) - 1) & 31)))
;
570 sigqueue_delete_set_proc(p, &set);
571}
572
573static void
574sigqueue_delete_stopmask_proc(struct proc *p)
575{
576 sigset_t set;
577
578 SIGEMPTYSET(set)do { int __i; for (__i = 0; __i < 4; __i++) (set).__bits[__i
] = 0; } while (0)
;
579 SIGADDSET(set, SIGSTOP)((set).__bits[(((17) - 1) >> 5)] |= (1 << (((17) -
1) & 31)))
;
580 SIGADDSET(set, SIGTSTP)((set).__bits[(((18) - 1) >> 5)] |= (1 << (((18) -
1) & 31)))
;
581 SIGADDSET(set, SIGTTIN)((set).__bits[(((21) - 1) >> 5)] |= (1 << (((21) -
1) & 31)))
;
582 SIGADDSET(set, SIGTTOU)((set).__bits[(((22) - 1) >> 5)] |= (1 << (((22) -
1) & 31)))
;
583 sigqueue_delete_set_proc(p, &set);
584}
585
586/*
587 * Determine signal that should be delivered to thread td, the current
588 * thread, 0 if none. If there is a pending stop signal with default
589 * action, the process stops in issignal().
590 */
591int
592cursig(struct thread *td)
593{
594 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED)__mtx_assert(&(((&(td->td_proc)->p_mtx)))->mtx_lock
, (((0x00000004))), ("/root/freebsd/sys/kern/kern_sig.c"), (594
))
;
595 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED)__mtx_assert(&(((&td->td_proc->p_sigacts->ps_mtx
)))->mtx_lock, ((0x00000004)), ("/root/freebsd/sys/kern/kern_sig.c"
), (595))
;
596 THREAD_LOCK_ASSERT(td, MA_NOTOWNED)do { struct mtx *__m = (td)->td_lock; if (__m != &blocked_lock
) __mtx_assert(&(((__m)))->mtx_lock, (((0x00000000))),
("/root/freebsd/sys/kern/kern_sig.c"), (596)); } while (0)
;
597 return (SIGPENDING(td)((!(__sigisempty(&((td)->td_sigqueue.sq_signals))) &&
!sigsetmasked(&(td)->td_sigqueue.sq_signals, &(td
)->td_sigmask)) || (!(__sigisempty(&((td)->td_proc->
p_sigqueue.sq_signals))) && !sigsetmasked(&(td)->
td_proc->p_sigqueue.sq_signals, &(td)->td_sigmask))
)
? issignal(td) : 0);
598}
599
600/*
601 * Arrange for ast() to handle unmasked pending signals on return to user
602 * mode. This must be called whenever a signal is added to td_sigqueue or
603 * unmasked in td_sigmask.
604 */
605void
606signotify(struct thread *td)
607{
608
609 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED)__mtx_assert(&(((&(td->td_proc)->p_mtx)))->mtx_lock
, (((0x00000004))), ("/root/freebsd/sys/kern/kern_sig.c"), (609
))
;
610
611 if (SIGPENDING(td)((!(__sigisempty(&((td)->td_sigqueue.sq_signals))) &&
!sigsetmasked(&(td)->td_sigqueue.sq_signals, &(td
)->td_sigmask)) || (!(__sigisempty(&((td)->td_proc->
p_sigqueue.sq_signals))) && !sigsetmasked(&(td)->
td_proc->p_sigqueue.sq_signals, &(td)->td_sigmask))
)
) {
612 thread_lock(td)_thread_lock((td), 0, "/root/freebsd/sys/kern/kern_sig.c", 612
)
;
613 td->td_flags |= TDF_NEEDSIGCHK0x00020000 | TDF_ASTPENDING0x00000800;
614 thread_unlock(td)__mtx_unlock_spin_flags(&(((((td)->td_lock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (614))
;
615 }
616}
617
618/*
619 * Returns 1 (true) if altstack is configured for the thread, and the
620 * passed stack bottom address falls into the altstack range. Handles
621 * the 43 compat special case where the alt stack size is zero.
622 */
623int
624sigonstack(size_t sp)
625{
626 struct thread *td;
627
628 td = curthread(__curthread());
629 if ((td->td_pflags & TDP_ALTSTACK0x00000020) == 0)
630 return (0);
631#if defined(COMPAT_43)
632 if (td->td_sigstk.ss_size == 0)
633 return ((td->td_sigstk.ss_flags & SS_ONSTACK0x0001) != 0);
634#endif
635 return (sp >= (size_t)td->td_sigstk.ss_sp &&
636 sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp);
637}
638
639static __inline int
640sigprop(int sig)
641{
642
643 if (sig > 0 && sig < nitems(sigproptbl)(sizeof((sigproptbl)) / sizeof((sigproptbl)[0])))
644 return (sigproptbl[sig]);
645 return (0);
646}
647
648int
649sig_ffs(sigset_t *set)
650{
651 int i;
652
653 for (i = 0; i < _SIG_WORDS4; i++)
654 if (set->__bits[i])
655 return (ffs(set->__bits[i])__builtin_ffs(set->__bits[i]) + (i * 32));
656 return (0);
657}
658
659static bool
660sigact_flag_test(const struct sigaction *act, int flag)
661{
662
663 /*
664 * SA_SIGINFO is reset when signal disposition is set to
665 * ignore or default. Other flags are kept according to user
666 * settings.
667 */
668 return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO0x0040 ||
669 ((__sighandler_t *)act->sa_sigaction__sigaction_u.__sa_sigaction != SIG_IGN((__sighandler_t *)1) &&
670 (__sighandler_t *)act->sa_sigaction__sigaction_u.__sa_sigaction != SIG_DFL((__sighandler_t *)0))));
671}
672
673/*
674 * kern_sigaction
675 * sigaction
676 * freebsd4_sigaction
677 * osigaction
678 */
679int
680kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
681 struct sigaction *oact, int flags)
682{
683 struct sigacts *ps;
684 struct proc *p = td->td_proc;
685
686 if (!_SIG_VALID(sig)((sig) <= 128 && (sig) > 0))
687 return (EINVAL22);
688 if (act != NULL((void *)0) && act->sa_handler__sigaction_u.__sa_handler != SIG_DFL((__sighandler_t *)0) &&
689 act->sa_handler__sigaction_u.__sa_handler != SIG_IGN((__sighandler_t *)1) && (act->sa_flags & ~(SA_ONSTACK0x0001 |
690 SA_RESTART0x0002 | SA_RESETHAND0x0004 | SA_NOCLDSTOP0x0008 | SA_NODEFER0x0010 |
691 SA_NOCLDWAIT0x0020 | SA_SIGINFO0x0040)) != 0)
692 return (EINVAL22);
693
694 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (694))
;
695 ps = p->p_sigacts;
696 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (696))
;
697 if (oact) {
698 memset(oact, 0, sizeof(*oact))__builtin_memset((oact), (0), (sizeof(*oact)));
699 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)((sig) - 1)];
700 if (SIGISMEMBER(ps->ps_sigonstack, sig)((ps->ps_sigonstack).__bits[(((sig) - 1) >> 5)] &
(1 << (((sig) - 1) & 31)))
)
701 oact->sa_flags |= SA_ONSTACK0x0001;
702 if (!SIGISMEMBER(ps->ps_sigintr, sig)((ps->ps_sigintr).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
)
703 oact->sa_flags |= SA_RESTART0x0002;
704 if (SIGISMEMBER(ps->ps_sigreset, sig)((ps->ps_sigreset).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
)
705 oact->sa_flags |= SA_RESETHAND0x0004;
706 if (SIGISMEMBER(ps->ps_signodefer, sig)((ps->ps_signodefer).__bits[(((sig) - 1) >> 5)] &
(1 << (((sig) - 1) & 31)))
)
707 oact->sa_flags |= SA_NODEFER0x0010;
708 if (SIGISMEMBER(ps->ps_siginfo, sig)((ps->ps_siginfo).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
) {
709 oact->sa_flags |= SA_SIGINFO0x0040;
710 oact->sa_sigaction__sigaction_u.__sa_sigaction =
711 (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)];
712 } else
713 oact->sa_handler__sigaction_u.__sa_handler = ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)];
714 if (sig == SIGCHLD20 && ps->ps_flag & PS_NOCLDSTOP0x0002)
715 oact->sa_flags |= SA_NOCLDSTOP0x0008;
716 if (sig == SIGCHLD20 && ps->ps_flag & PS_NOCLDWAIT0x0001)
717 oact->sa_flags |= SA_NOCLDWAIT0x0020;
718 }
719 if (act) {
720 if ((sig == SIGKILL9 || sig == SIGSTOP17) &&
721 act->sa_handler__sigaction_u.__sa_handler != SIG_DFL((__sighandler_t *)0)) {
722 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (722))
;
723 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (723))
;
724 return (EINVAL22);
725 }
726
727 /*
728 * Change setting atomically.
729 */
730
731 ps->ps_catchmask[_SIG_IDX(sig)((sig) - 1)] = act->sa_mask;
732 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)])((ps->ps_catchmask[((sig) - 1)]).__bits[(((9) - 1) >>
5)] &= ~(1 << (((9) - 1) & 31))), ((ps->ps_catchmask
[((sig) - 1)]).__bits[(((17) - 1) >> 5)] &= ~(1 <<
(((17) - 1) & 31)))
;
733 if (sigact_flag_test(act, SA_SIGINFO0x0040)) {
734 ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)] =
735 (__sighandler_t *)act->sa_sigaction__sigaction_u.__sa_sigaction;
736 SIGADDSET(ps->ps_siginfo, sig)((ps->ps_siginfo).__bits[(((sig) - 1) >> 5)] |= (1 <<
(((sig) - 1) & 31)))
;
737 } else {
738 ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)] = act->sa_handler__sigaction_u.__sa_handler;
739 SIGDELSET(ps->ps_siginfo, sig)((ps->ps_siginfo).__bits[(((sig) - 1) >> 5)] &= ~
(1 << (((sig) - 1) & 31)))
;
740 }
741 if (!sigact_flag_test(act, SA_RESTART0x0002))
742 SIGADDSET(ps->ps_sigintr, sig)((ps->ps_sigintr).__bits[(((sig) - 1) >> 5)] |= (1 <<
(((sig) - 1) & 31)))
;
743 else
744 SIGDELSET(ps->ps_sigintr, sig)((ps->ps_sigintr).__bits[(((sig) - 1) >> 5)] &= ~
(1 << (((sig) - 1) & 31)))
;
745 if (sigact_flag_test(act, SA_ONSTACK0x0001))
746 SIGADDSET(ps->ps_sigonstack, sig)((ps->ps_sigonstack).__bits[(((sig) - 1) >> 5)] |= (
1 << (((sig) - 1) & 31)))
;
747 else
748 SIGDELSET(ps->ps_sigonstack, sig)((ps->ps_sigonstack).__bits[(((sig) - 1) >> 5)] &=
~(1 << (((sig) - 1) & 31)))
;
749 if (sigact_flag_test(act, SA_RESETHAND0x0004))
750 SIGADDSET(ps->ps_sigreset, sig)((ps->ps_sigreset).__bits[(((sig) - 1) >> 5)] |= (1 <<
(((sig) - 1) & 31)))
;
751 else
752 SIGDELSET(ps->ps_sigreset, sig)((ps->ps_sigreset).__bits[(((sig) - 1) >> 5)] &=
~(1 << (((sig) - 1) & 31)))
;
753 if (sigact_flag_test(act, SA_NODEFER0x0010))
754 SIGADDSET(ps->ps_signodefer, sig)((ps->ps_signodefer).__bits[(((sig) - 1) >> 5)] |= (
1 << (((sig) - 1) & 31)))
;
755 else
756 SIGDELSET(ps->ps_signodefer, sig)((ps->ps_signodefer).__bits[(((sig) - 1) >> 5)] &=
~(1 << (((sig) - 1) & 31)))
;
757 if (sig == SIGCHLD20) {
758 if (act->sa_flags & SA_NOCLDSTOP0x0008)
759 ps->ps_flag |= PS_NOCLDSTOP0x0002;
760 else
761 ps->ps_flag &= ~PS_NOCLDSTOP0x0002;
762 if (act->sa_flags & SA_NOCLDWAIT0x0020) {
763 /*
764 * Paranoia: since SA_NOCLDWAIT is implemented
765 * by reparenting the dying child to PID 1 (and
766 * trust it to reap the zombie), PID 1 itself
767 * is forbidden to set SA_NOCLDWAIT.
768 */
769 if (p->p_pid == 1)
770 ps->ps_flag &= ~PS_NOCLDWAIT0x0001;
771 else
772 ps->ps_flag |= PS_NOCLDWAIT0x0001;
773 } else
774 ps->ps_flag &= ~PS_NOCLDWAIT0x0001;
775 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)((20) - 1)] == SIG_IGN((__sighandler_t *)1))
776 ps->ps_flag |= PS_CLDSIGIGN0x0004;
777 else
778 ps->ps_flag &= ~PS_CLDSIGIGN0x0004;
779 }
780 /*
781 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
782 * and for signals set to SIG_DFL where the default is to
783 * ignore. However, don't put SIGCONT in ps_sigignore, as we
784 * have to restart the process.
785 */
786 if (ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)] == SIG_IGN((__sighandler_t *)1) ||
787 (sigprop(sig) & SIGPROP_IGNORE0x10 &&
788 ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)] == SIG_DFL((__sighandler_t *)0))) {
789 /* never to be seen again */
790 sigqueue_delete_proc(p, sig);
791 if (sig != SIGCONT19)
792 /* easier in psignal */
793 SIGADDSET(ps->ps_sigignore, sig)((ps->ps_sigignore).__bits[(((sig) - 1) >> 5)] |= (1
<< (((sig) - 1) & 31)))
;
794 SIGDELSET(ps->ps_sigcatch, sig)((ps->ps_sigcatch).__bits[(((sig) - 1) >> 5)] &=
~(1 << (((sig) - 1) & 31)))
;
795 } else {
796 SIGDELSET(ps->ps_sigignore, sig)((ps->ps_sigignore).__bits[(((sig) - 1) >> 5)] &=
~(1 << (((sig) - 1) & 31)))
;
797 if (ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)] == SIG_DFL((__sighandler_t *)0))
798 SIGDELSET(ps->ps_sigcatch, sig)((ps->ps_sigcatch).__bits[(((sig) - 1) >> 5)] &=
~(1 << (((sig) - 1) & 31)))
;
799 else
800 SIGADDSET(ps->ps_sigcatch, sig)((ps->ps_sigcatch).__bits[(((sig) - 1) >> 5)] |= (1 <<
(((sig) - 1) & 31)))
;
801 }
802#ifdef COMPAT_FREEBSD41
803 if (ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)] == SIG_IGN((__sighandler_t *)1) ||
804 ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)] == SIG_DFL((__sighandler_t *)0) ||
805 (flags & KSA_FREEBSD40x0002) == 0)
806 SIGDELSET(ps->ps_freebsd4, sig)((ps->ps_freebsd4).__bits[(((sig) - 1) >> 5)] &=
~(1 << (((sig) - 1) & 31)))
;
807 else
808 SIGADDSET(ps->ps_freebsd4, sig)((ps->ps_freebsd4).__bits[(((sig) - 1) >> 5)] |= (1 <<
(((sig) - 1) & 31)))
;
809#endif
810#ifdef COMPAT_43
811 if (ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)] == SIG_IGN((__sighandler_t *)1) ||
812 ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)] == SIG_DFL((__sighandler_t *)0) ||
813 (flags & KSA_OSIGSET0x0001) == 0)
814 SIGDELSET(ps->ps_osigset, sig)((ps->ps_osigset).__bits[(((sig) - 1) >> 5)] &= ~
(1 << (((sig) - 1) & 31)))
;
815 else
816 SIGADDSET(ps->ps_osigset, sig)((ps->ps_osigset).__bits[(((sig) - 1) >> 5)] |= (1 <<
(((sig) - 1) & 31)))
;
817#endif
818 }
819 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (819))
;
820 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (820))
;
821 return (0);
822}
823
824#ifndef _SYS_SYSPROTO_H_
825struct sigaction_args {
826 int sig;
827 struct sigaction *act;
828 struct sigaction *oact;
829};
830#endif
831int
832sys_sigaction(struct thread *td, struct sigaction_args *uap)
833{
834 struct sigaction act, oact;
835 struct sigaction *actp, *oactp;
836 int error;
837
838 actp = (uap->act != NULL((void *)0)) ? &act : NULL((void *)0);
839 oactp = (uap->oact != NULL((void *)0)) ? &oact : NULL((void *)0);
840 if (actp) {
841 error = copyin(uap->act, actp, sizeof(act));
842 if (error)
843 return (error);
844 }
845 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
846 if (oactp && !error)
847 error = copyout(oactp, uap->oact, sizeof(oact));
848 return (error);
849}
850
851#ifdef COMPAT_FREEBSD41
852#ifndef _SYS_SYSPROTO_H_
853struct freebsd4_sigaction_args {
854 int sig;
855 struct sigaction *act;
856 struct sigaction *oact;
857};
858#endif
859int
860freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap)
861{
862 struct sigaction act, oact;
863 struct sigaction *actp, *oactp;
864 int error;
865
866
867 actp = (uap->act != NULL((void *)0)) ? &act : NULL((void *)0);
868 oactp = (uap->oact != NULL((void *)0)) ? &oact : NULL((void *)0);
869 if (actp) {
870 error = copyin(uap->act, actp, sizeof(act));
871 if (error)
872 return (error);
873 }
874 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD40x0002);
875 if (oactp && !error)
876 error = copyout(oactp, uap->oact, sizeof(oact));
877 return (error);
878}
879#endif /* COMAPT_FREEBSD4 */
880
881#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
882#ifndef _SYS_SYSPROTO_H_
883struct osigaction_args {
884 int signum;
885 struct osigaction *nsa;
886 struct osigaction *osa;
887};
888#endif
889int
890osigaction(struct thread *td, struct osigaction_args *uap)
891{
892 struct osigaction sa;
893 struct sigaction nsa, osa;
894 struct sigaction *nsap, *osap;
895 int error;
896
897 if (uap->signum <= 0 || uap->signum >= ONSIG32)
898 return (EINVAL22);
899
900 nsap = (uap->nsa != NULL((void *)0)) ? &nsa : NULL((void *)0);
901 osap = (uap->osa != NULL((void *)0)) ? &osa : NULL((void *)0);
902
903 if (nsap) {
904 error = copyin(uap->nsa, &sa, sizeof(sa));
905 if (error)
906 return (error);
907 nsap->sa_handler__sigaction_u.__sa_handler = sa.sa_handler__sigaction_u.__sa_handler;
908 nsap->sa_flags = sa.sa_flags;
909 OSIG2SIG(sa.sa_mask, nsap->sa_mask)do { int __i; for (__i = 0; __i < 4; __i++) (nsap->sa_mask
).__bits[__i] = 0; } while (0); (nsap->sa_mask).__bits[0] =
sa.sa_mask
;
910 }
911 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET0x0001);
912 if (osap && !error) {
913 sa.sa_handler__sigaction_u.__sa_handler = osap->sa_handler__sigaction_u.__sa_handler;
914 sa.sa_flags = osap->sa_flags;
915 SIG2OSIG(osap->sa_mask, sa.sa_mask)(sa.sa_mask = (osap->sa_mask).__bits[0]);
916 error = copyout(&sa, uap->osa, sizeof(sa));
917 }
918 return (error);
919}
920
921#if !defined(__i386__)
922/* Avoid replicating the same stub everywhere */
923int
924osigreturn(struct thread *td, struct osigreturn_args *uap)
925{
926
927 return (nosys(td, (struct nosys_args *)uap));
928}
929#endif
930#endif /* COMPAT_43 */
931
932/*
933 * Initialize signal state for process 0;
934 * set to ignore signals that are ignored by default.
935 */
936void
937siginit(struct proc *p)
938{
939 int i;
940 struct sigacts *ps;
941
942 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (942))
;
943 ps = p->p_sigacts;
944 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (944))
;
945 for (i = 1; i <= NSIG32; i++) {
946 if (sigprop(i) & SIGPROP_IGNORE0x10 && i != SIGCONT19) {
947 SIGADDSET(ps->ps_sigignore, i)((ps->ps_sigignore).__bits[(((i) - 1) >> 5)] |= (1 <<
(((i) - 1) & 31)))
;
948 }
949 }
950 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (950))
;
951 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (951))
;
952}
953
954/*
955 * Reset specified signal to the default disposition.
956 */
957static void
958sigdflt(struct sigacts *ps, int sig)
959{
960
961 mtx_assert(&ps->ps_mtx, MA_OWNED)__mtx_assert(&(((&ps->ps_mtx)))->mtx_lock, ((0x00000004
)), ("/root/freebsd/sys/kern/kern_sig.c"), (961))
;
962 SIGDELSET(ps->ps_sigcatch, sig)((ps->ps_sigcatch).__bits[(((sig) - 1) >> 5)] &=
~(1 << (((sig) - 1) & 31)))
;
963 if ((sigprop(sig) & SIGPROP_IGNORE0x10) != 0 && sig != SIGCONT19)
964 SIGADDSET(ps->ps_sigignore, sig)((ps->ps_sigignore).__bits[(((sig) - 1) >> 5)] |= (1
<< (((sig) - 1) & 31)))
;
965 ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)] = SIG_DFL((__sighandler_t *)0);
966 SIGDELSET(ps->ps_siginfo, sig)((ps->ps_siginfo).__bits[(((sig) - 1) >> 5)] &= ~
(1 << (((sig) - 1) & 31)))
;
967}
968
969/*
970 * Reset signals for an exec of the specified process.
971 */
972void
973execsigs(struct proc *p)
974{
975 sigset_t osigignore;
976 struct sigacts *ps;
977 int sig;
978 struct thread *td;
979
980 /*
981 * Reset caught signals. Held signals remain held
982 * through td_sigmask (unless they were caught,
983 * and are now ignored by default).
984 */
985 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (985))
;
986 ps = p->p_sigacts;
987 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (987))
;
988 while (SIGNOTEMPTY(ps->ps_sigcatch)(!__sigisempty(&(ps->ps_sigcatch)))) {
989 sig = sig_ffs(&ps->ps_sigcatch);
990 sigdflt(ps, sig);
991 if ((sigprop(sig) & SIGPROP_IGNORE0x10) != 0)
992 sigqueue_delete_proc(p, sig);
993 }
994
995 /*
996 * As CloudABI processes cannot modify signal handlers, fully
997 * reset all signals to their default behavior. Do ignore
998 * SIGPIPE, as it would otherwise be impossible to recover from
999 * writes to broken pipes and sockets.
1000 */
1001 if (SV_PROC_ABI(p)((p)->p_sysent->sv_flags & 0xff) == SV_ABI_CLOUDABI17) {
1002 osigignore = ps->ps_sigignore;
1003 while (SIGNOTEMPTY(osigignore)(!__sigisempty(&(osigignore)))) {
1004 sig = sig_ffs(&osigignore);
1005 SIGDELSET(osigignore, sig)((osigignore).__bits[(((sig) - 1) >> 5)] &= ~(1 <<
(((sig) - 1) & 31)))
;
1006 if (sig != SIGPIPE13)
1007 sigdflt(ps, sig);
1008 }
1009 SIGADDSET(ps->ps_sigignore, SIGPIPE)((ps->ps_sigignore).__bits[(((13) - 1) >> 5)] |= (1 <<
(((13) - 1) & 31)))
;
1010 }
1011
1012 /*
1013 * Reset stack state to the user stack.
1014 * Clear set of signals caught on the signal stack.
1015 */
1016 td = curthread(__curthread());
1017 MPASS(td->td_proc == p)do { if (__builtin_expect((!((td->td_proc == p))), 0)) panic
("Assertion %s failed at %s:%d", "td->td_proc == p", "/root/freebsd/sys/kern/kern_sig.c"
, 1017); } while (0)
;
1018 td->td_sigstk.ss_flags = SS_DISABLE0x0004;
1019 td->td_sigstk.ss_size = 0;
1020 td->td_sigstk.ss_sp = 0;
1021 td->td_pflags &= ~TDP_ALTSTACK0x00000020;
1022 /*
1023 * Reset no zombies if child dies flag as Solaris does.
1024 */
1025 ps->ps_flag &= ~(PS_NOCLDWAIT0x0001 | PS_CLDSIGIGN0x0004);
1026 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)((20) - 1)] == SIG_IGN((__sighandler_t *)1))
1027 ps->ps_sigact[_SIG_IDX(SIGCHLD)((20) - 1)] = SIG_DFL((__sighandler_t *)0);
1028 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1028))
;
1029}
1030
1031/*
1032 * kern_sigprocmask()
1033 *
1034 * Manipulate signal mask.
1035 */
1036int
1037kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
1038 int flags)
1039{
1040 sigset_t new_block, oset1;
1041 struct proc *p;
1042 int error;
1043
1044 p = td->td_proc;
1045 if ((flags & SIGPROCMASK_PROC_LOCKED0x0002) != 0)
1046 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (1046))
;
1047 else
1048 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1048))
;
1049 mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0__mtx_assert(&(((&p->p_sigacts->ps_mtx)))->mtx_lock
, (((flags & 0x0004) != 0 ? 0x00000004 : 0x00000000)), ("/root/freebsd/sys/kern/kern_sig.c"
), (1050))
1050 ? MA_OWNED : MA_NOTOWNED)__mtx_assert(&(((&p->p_sigacts->ps_mtx)))->mtx_lock
, (((flags & 0x0004) != 0 ? 0x00000004 : 0x00000000)), ("/root/freebsd/sys/kern/kern_sig.c"
), (1050))
;
1051 if (oset != NULL((void *)0))
1052 *oset = td->td_sigmask;
1053
1054 error = 0;
1055 if (set != NULL((void *)0)) {
1056 switch (how) {
1057 case SIG_BLOCK1:
1058 SIG_CANTMASK(*set)((*set).__bits[(((9) - 1) >> 5)] &= ~(1 << ((
(9) - 1) & 31))), ((*set).__bits[(((17) - 1) >> 5)]
&= ~(1 << (((17) - 1) & 31)))
;
1059 oset1 = td->td_sigmask;
1060 SIGSETOR(td->td_sigmask, *set)do { int __i; for (__i = 0; __i < 4; __i++) (td->td_sigmask
).__bits[__i] |= (*set).__bits[__i]; } while (0)
;
1061 new_block = td->td_sigmask;
1062 SIGSETNAND(new_block, oset1)do { int __i; for (__i = 0; __i < 4; __i++) (new_block).__bits
[__i] &= ~(oset1).__bits[__i]; } while (0)
;
1063 break;
1064 case SIG_UNBLOCK2:
1065 SIGSETNAND(td->td_sigmask, *set)do { int __i; for (__i = 0; __i < 4; __i++) (td->td_sigmask
).__bits[__i] &= ~(*set).__bits[__i]; } while (0)
;
1066 signotify(td);
1067 goto out;
1068 case SIG_SETMASK3:
1069 SIG_CANTMASK(*set)((*set).__bits[(((9) - 1) >> 5)] &= ~(1 << ((
(9) - 1) & 31))), ((*set).__bits[(((17) - 1) >> 5)]
&= ~(1 << (((17) - 1) & 31)))
;
1070 oset1 = td->td_sigmask;
1071 if (flags & SIGPROCMASK_OLD0x0001)
1072 SIGSETLO(td->td_sigmask, *set)((td->td_sigmask).__bits[0] = (*set).__bits[0]);
1073 else
1074 td->td_sigmask = *set;
1075 new_block = td->td_sigmask;
1076 SIGSETNAND(new_block, oset1)do { int __i; for (__i = 0; __i < 4; __i++) (new_block).__bits
[__i] &= ~(oset1).__bits[__i]; } while (0)
;
1077 signotify(td);
1078 break;
1079 default:
1080 error = EINVAL22;
1081 goto out;
1082 }
1083
1084 /*
1085 * The new_block set contains signals that were not previously
1086 * blocked, but are blocked now.
1087 *
1088 * In case we block any signal that was not previously blocked
1089 * for td, and process has the signal pending, try to schedule
1090 * signal delivery to some thread that does not block the
1091 * signal, possibly waking it up.
1092 */
1093 if (p->p_numthreads != 1)
1094 reschedule_signals(p, new_block, flags);
1095 }
1096
1097out:
1098 if (!(flags & SIGPROCMASK_PROC_LOCKED0x0002))
1099 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1099))
;
1100 return (error);
1101}
1102
1103#ifndef _SYS_SYSPROTO_H_
1104struct sigprocmask_args {
1105 int how;
1106 const sigset_t *set;
1107 sigset_t *oset;
1108};
1109#endif
1110int
1111sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap)
1112{
1113 sigset_t set, oset;
1114 sigset_t *setp, *osetp;
1115 int error;
1116
1117 setp = (uap->set != NULL((void *)0)) ? &set : NULL((void *)0);
1118 osetp = (uap->oset != NULL((void *)0)) ? &oset : NULL((void *)0);
1119 if (setp) {
1120 error = copyin(uap->set, setp, sizeof(set));
1121 if (error)
1122 return (error);
1123 }
1124 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
1125 if (osetp && !error) {
1126 error = copyout(osetp, uap->oset, sizeof(oset));
1127 }
1128 return (error);
1129}
1130
1131#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1132#ifndef _SYS_SYSPROTO_H_
1133struct osigprocmask_args {
1134 int how;
1135 osigset_t mask;
1136};
1137#endif
1138int
1139osigprocmask(struct thread *td, struct osigprocmask_args *uap)
1140{
1141 sigset_t set, oset;
1142 int error;
1143
1144 OSIG2SIG(uap->mask, set)do { int __i; for (__i = 0; __i < 4; __i++) (set).__bits[__i
] = 0; } while (0); (set).__bits[0] = uap->mask
;
1145 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
1146 SIG2OSIG(oset, td->td_retval[0])(td->td_uretoff.tdu_retval[0] = (oset).__bits[0]);
1147 return (error);
1148}
1149#endif /* COMPAT_43 */
1150
1151int
1152sys_sigwait(struct thread *td, struct sigwait_args *uap)
1153{
1154 ksiginfo_t ksi;
1155 sigset_t set;
1156 int error;
1157
1158 error = copyin(uap->set, &set, sizeof(set));
1159 if (error) {
1160 td->td_retvaltd_uretoff.tdu_retval[0] = error;
1161 return (0);
1162 }
1163
1164 error = kern_sigtimedwait(td, set, &ksi, NULL((void *)0));
1165 if (error) {
1166 if (error == EINTR4 && td->td_proc->p_osrel < P_OSREL_SIGWAIT700000)
1167 error = ERESTART(-1);
1168 if (error == ERESTART(-1))
1169 return (error);
1170 td->td_retvaltd_uretoff.tdu_retval[0] = error;
1171 return (0);
1172 }
1173
1174 error = copyout(&ksi.ksi_signoksi_info.si_signo, uap->sig, sizeof(ksi.ksi_signoksi_info.si_signo));
1175 td->td_retvaltd_uretoff.tdu_retval[0] = error;
1176 return (0);
1177}
1178
1179int
1180sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
1181{
1182 struct timespec ts;
1183 struct timespec *timeout;
1184 sigset_t set;
1185 ksiginfo_t ksi;
1186 int error;
1187
1188 if (uap->timeout) {
1189 error = copyin(uap->timeout, &ts, sizeof(ts));
1190 if (error)
1191 return (error);
1192
1193 timeout = &ts;
1194 } else
1195 timeout = NULL((void *)0);
1196
1197 error = copyin(uap->set, &set, sizeof(set));
1198 if (error)
1199 return (error);
1200
1201 error = kern_sigtimedwait(td, set, &ksi, timeout);
1202 if (error)
1203 return (error);
1204
1205 if (uap->info)
1206 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1207
1208 if (error == 0)
1209 td->td_retvaltd_uretoff.tdu_retval[0] = ksi.ksi_signoksi_info.si_signo;
1210 return (error);
1211}
1212
1213int
1214sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
1215{
1216 ksiginfo_t ksi;
1217 sigset_t set;
1218 int error;
1219
1220 error = copyin(uap->set, &set, sizeof(set));
1221 if (error)
1
Assuming 'error' is 0
2
Taking false branch
1222 return (error);
1223
1224 error = kern_sigtimedwait(td, set, &ksi, NULL((void *)0));
1225 if (error)
3
Assuming 'error' is 0
4
Taking false branch
1226 return (error);
1227
1228 if (uap->info)
5
Assuming the condition is true
6
Taking true branch
1229 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
7
Copies out a struct with a union element with different sizes
1230
1231 if (error == 0)
1232 td->td_retvaltd_uretoff.tdu_retval[0] = ksi.ksi_signoksi_info.si_signo;
1233 return (error);
1234}
1235
1236static void
1237proc_td_siginfo_capture(struct thread *td, siginfo_t *si)
1238{
1239 struct thread *thr;
1240
1241 FOREACH_THREAD_IN_PROC(td->td_proc, thr)for (((thr)) = (((&(td->td_proc)->p_threads))->tqh_first
); ((thr)); ((thr)) = ((((thr)))->td_plist.tqe_next))
{
1242 if (thr == td)
1243 thr->td_si = *si;
1244 else
1245 thr->td_si.si_signo = 0;
1246 }
1247}
1248
1249int
1250kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
1251 struct timespec *timeout)
1252{
1253 struct sigacts *ps;
1254 sigset_t saved_mask, new_block;
1255 struct proc *p;
1256 int error, sig, timo, timevalid = 0;
1257 struct timespec rts, ets, ts;
1258 struct timeval tv;
1259
1260 p = td->td_proc;
1261 error = 0;
1262 ets.tv_sec = 0;
1263 ets.tv_nsec = 0;
1264
1265 if (timeout != NULL((void *)0)) {
1266 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
1267 timevalid = 1;
1268 getnanouptime(&rts);
1269 timespecadd(&rts, timeout, &ets)do { (&ets)->tv_sec = (&rts)->tv_sec + (timeout
)->tv_sec; (&ets)->tv_nsec = (&rts)->tv_nsec
+ (timeout)->tv_nsec; if ((&ets)->tv_nsec >= 1000000000L
) { (&ets)->tv_sec++; (&ets)->tv_nsec -= 1000000000L
; } } while (0)
;
1270 }
1271 }
1272 ksiginfo_init(ksi)do { __builtin_memset((ksi), 0, (sizeof(ksiginfo_t))); } while
(0)
;
1273 /* Some signals can not be waited for. */
1274 SIG_CANTMASK(waitset)((waitset).__bits[(((9) - 1) >> 5)] &= ~(1 <<
(((9) - 1) & 31))), ((waitset).__bits[(((17) - 1) >>
5)] &= ~(1 << (((17) - 1) & 31)))
;
1275 ps = p->p_sigacts;
1276 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1276))
;
1277 saved_mask = td->td_sigmask;
1278 SIGSETNAND(td->td_sigmask, waitset)do { int __i; for (__i = 0; __i < 4; __i++) (td->td_sigmask
).__bits[__i] &= ~(waitset).__bits[__i]; } while (0)
;
1279 for (;;) {
1280 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1280))
;
1281 sig = cursig(td);
1282 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1282))
;
1283 KASSERT(sig >= 0, ("sig %d", sig))do { if (__builtin_expect((!(sig >= 0)), 0)) panic ("sig %d"
, sig); } while (0)
;
1284 if (sig != 0 && SIGISMEMBER(waitset, sig)((waitset).__bits[(((sig) - 1) >> 5)] & (1 <<
(((sig) - 1) & 31)))
) {
1285 if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
1286 sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
1287 error = 0;
1288 break;
1289 }
1290 }
1291
1292 if (error != 0)
1293 break;
1294
1295 /*
1296 * POSIX says this must be checked after looking for pending
1297 * signals.
1298 */
1299 if (timeout != NULL((void *)0)) {
1300 if (!timevalid) {
1301 error = EINVAL22;
1302 break;
1303 }
1304 getnanouptime(&rts);
1305 if (timespeccmp(&rts, &ets, >=)(((&rts)->tv_sec == (&ets)->tv_sec) ? ((&rts
)->tv_nsec >= (&ets)->tv_nsec) : ((&rts)->
tv_sec >= (&ets)->tv_sec))
) {
1306 error = EAGAIN35;
1307 break;
1308 }
1309 timespecsub(&ets, &rts, &ts)do { (&ts)->tv_sec = (&ets)->tv_sec - (&rts
)->tv_sec; (&ts)->tv_nsec = (&ets)->tv_nsec -
(&rts)->tv_nsec; if ((&ts)->tv_nsec < 0) { (
&ts)->tv_sec--; (&ts)->tv_nsec += 1000000000L; }
} while (0)
;
1310 TIMESPEC_TO_TIMEVAL(&tv, &ts)do { (&tv)->tv_sec = (&ts)->tv_sec; (&tv)->
tv_usec = (&ts)->tv_nsec / 1000; } while (0)
;
1311 timo = tvtohz(&tv);
1312 } else {
1313 timo = 0;
1314 }
1315
1316 error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", timo)_sleep((ps), &(&p->p_mtx)->lock_object, (((80) +
36)|0x100), ("sigwait"), tick_sbt * (timo), 0, 0x0100)
;
1317
1318 if (timeout != NULL((void *)0)) {
1319 if (error == ERESTART(-1)) {
1320 /* Timeout can not be restarted. */
1321 error = EINTR4;
1322 } else if (error == EAGAIN35) {
1323 /* We will calculate timeout by ourself. */
1324 error = 0;
1325 }
1326 }
1327 }
1328
1329 new_block = saved_mask;
1330 SIGSETNAND(new_block, td->td_sigmask)do { int __i; for (__i = 0; __i < 4; __i++) (new_block).__bits
[__i] &= ~(td->td_sigmask).__bits[__i]; } while (0)
;
1331 td->td_sigmask = saved_mask;
1332 /*
1333 * Fewer signals can be delivered to us, reschedule signal
1334 * notification.
1335 */
1336 if (p->p_numthreads != 1)
1337 reschedule_signals(p, new_block, 0);
1338
1339 if (error == 0) {
1340 SDT_PROBE2(proc, , , signal__clear, sig, ksi)do { if (__builtin_expect((sdt_probes_enabled), 0)) { if (__builtin_expect
((sdt_proc___signal__clear->id), 0)) (*sdt_probe_func)(sdt_proc___signal__clear
->id, (uintptr_t) sig, (uintptr_t) ksi, (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0); } } while (0)
;
1341
1342 if (ksi->ksi_codeksi_info.si_code == SI_TIMER0x10003)
1343 itimer_accept(p, ksi->ksi_timeridksi_info._reason._timer._timerid, ksi);
1344
1345#ifdef KTRACE1
1346 if (KTRPOINT(td, KTR_PSIG)(__builtin_expect(((((td))->td_proc->p_traceflag & (
1 << (5)))), 0))
) {
1347 sig_t action;
1348
1349 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1349))
;
1350 action = ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)];
1351 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1351))
;
1352 ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_codeksi_info.si_code);
1353 }
1354#endif
1355 if (sig == SIGKILL9) {
1356 proc_td_siginfo_capture(td, &ksi->ksi_info);
1357 sigexit(td, sig);
1358 }
1359 }
1360 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1360))
;
1361 return (error);
1362}
1363
1364#ifndef _SYS_SYSPROTO_H_
1365struct sigpending_args {
1366 sigset_t *set;
1367};
1368#endif
1369int
1370sys_sigpending(struct thread *td, struct sigpending_args *uap)
1371{
1372 struct proc *p = td->td_proc;
1373 sigset_t pending;
1374
1375 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1375))
;
1376 pending = p->p_sigqueue.sq_signals;
1377 SIGSETOR(pending, td->td_sigqueue.sq_signals)do { int __i; for (__i = 0; __i < 4; __i++) (pending).__bits
[__i] |= (td->td_sigqueue.sq_signals).__bits[__i]; } while
(0)
;
1378 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1378))
;
1379 return (copyout(&pending, uap->set, sizeof(sigset_t)));
1380}
1381
1382#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1383#ifndef _SYS_SYSPROTO_H_
1384struct osigpending_args {
1385 int dummy;
1386};
1387#endif
1388int
1389osigpending(struct thread *td, struct osigpending_args *uap)
1390{
1391 struct proc *p = td->td_proc;
1392 sigset_t pending;
1393
1394 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1394))
;
1395 pending = p->p_sigqueue.sq_signals;
1396 SIGSETOR(pending, td->td_sigqueue.sq_signals)do { int __i; for (__i = 0; __i < 4; __i++) (pending).__bits
[__i] |= (td->td_sigqueue.sq_signals).__bits[__i]; } while
(0)
;
1397 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1397))
;
1398 SIG2OSIG(pending, td->td_retval[0])(td->td_uretoff.tdu_retval[0] = (pending).__bits[0]);
1399 return (0);
1400}
1401#endif /* COMPAT_43 */
1402
1403#if defined(COMPAT_43)
1404/*
1405 * Generalized interface signal handler, 4.3-compatible.
1406 */
1407#ifndef _SYS_SYSPROTO_H_
1408struct osigvec_args {
1409 int signum;
1410 struct sigvec *nsv;
1411 struct sigvec *osv;
1412};
1413#endif
1414/* ARGSUSED */
1415int
1416osigvec(struct thread *td, struct osigvec_args *uap)
1417{
1418 struct sigvec vec;
1419 struct sigaction nsa, osa;
1420 struct sigaction *nsap, *osap;
1421 int error;
1422
1423 if (uap->signum <= 0 || uap->signum >= ONSIG32)
1424 return (EINVAL22);
1425 nsap = (uap->nsv != NULL((void *)0)) ? &nsa : NULL((void *)0);
1426 osap = (uap->osv != NULL((void *)0)) ? &osa : NULL((void *)0);
1427 if (nsap) {
1428 error = copyin(uap->nsv, &vec, sizeof(vec));
1429 if (error)
1430 return (error);
1431 nsap->sa_handler__sigaction_u.__sa_handler = vec.sv_handler;
1432 OSIG2SIG(vec.sv_mask, nsap->sa_mask)do { int __i; for (__i = 0; __i < 4; __i++) (nsap->sa_mask
).__bits[__i] = 0; } while (0); (nsap->sa_mask).__bits[0] =
vec.sv_mask
;
1433 nsap->sa_flags = vec.sv_flags;
1434 nsap->sa_flags ^= SA_RESTART0x0002; /* opposite of SV_INTERRUPT */
1435 }
1436 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET0x0001);
1437 if (osap && !error) {
1438 vec.sv_handler = osap->sa_handler__sigaction_u.__sa_handler;
1439 SIG2OSIG(osap->sa_mask, vec.sv_mask)(vec.sv_mask = (osap->sa_mask).__bits[0]);
1440 vec.sv_flags = osap->sa_flags;
1441 vec.sv_flags &= ~SA_NOCLDWAIT0x0020;
1442 vec.sv_flags ^= SA_RESTART0x0002;
1443 error = copyout(&vec, uap->osv, sizeof(vec));
1444 }
1445 return (error);
1446}
1447
1448#ifndef _SYS_SYSPROTO_H_
1449struct osigblock_args {
1450 int mask;
1451};
1452#endif
1453int
1454osigblock(struct thread *td, struct osigblock_args *uap)
1455{
1456 sigset_t set, oset;
1457
1458 OSIG2SIG(uap->mask, set)do { int __i; for (__i = 0; __i < 4; __i++) (set).__bits[__i
] = 0; } while (0); (set).__bits[0] = uap->mask
;
1459 kern_sigprocmask(td, SIG_BLOCK1, &set, &oset, 0);
1460 SIG2OSIG(oset, td->td_retval[0])(td->td_uretoff.tdu_retval[0] = (oset).__bits[0]);
1461 return (0);
1462}
1463
1464#ifndef _SYS_SYSPROTO_H_
1465struct osigsetmask_args {
1466 int mask;
1467};
1468#endif
1469int
1470osigsetmask(struct thread *td, struct osigsetmask_args *uap)
1471{
1472 sigset_t set, oset;
1473
1474 OSIG2SIG(uap->mask, set)do { int __i; for (__i = 0; __i < 4; __i++) (set).__bits[__i
] = 0; } while (0); (set).__bits[0] = uap->mask
;
1475 kern_sigprocmask(td, SIG_SETMASK3, &set, &oset, 0);
1476 SIG2OSIG(oset, td->td_retval[0])(td->td_uretoff.tdu_retval[0] = (oset).__bits[0]);
1477 return (0);
1478}
1479#endif /* COMPAT_43 */
1480
1481/*
1482 * Suspend calling thread until signal, providing mask to be set in the
1483 * meantime.
1484 */
1485#ifndef _SYS_SYSPROTO_H_
1486struct sigsuspend_args {
1487 const sigset_t *sigmask;
1488};
1489#endif
1490/* ARGSUSED */
1491int
1492sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap)
1493{
1494 sigset_t mask;
1495 int error;
1496
1497 error = copyin(uap->sigmask, &mask, sizeof(mask));
1498 if (error)
1499 return (error);
1500 return (kern_sigsuspend(td, mask));
1501}
1502
1503int
1504kern_sigsuspend(struct thread *td, sigset_t mask)
1505{
1506 struct proc *p = td->td_proc;
1507 int has_sig, sig;
1508
1509 /*
1510 * When returning from sigsuspend, we want
1511 * the old mask to be restored after the
1512 * signal handler has finished. Thus, we
1513 * save it here and mark the sigacts structure
1514 * to indicate this.
1515 */
1516 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1516))
;
1517 kern_sigprocmask(td, SIG_SETMASK3, &mask, &td->td_oldsigmask,
1518 SIGPROCMASK_PROC_LOCKED0x0002);
1519 td->td_pflags |= TDP_OLDMASK0x00000001;
1520
1521 /*
1522 * Process signals now. Otherwise, we can get spurious wakeup
1523 * due to signal entered process queue, but delivered to other
1524 * thread. But sigsuspend should return only on signal
1525 * delivery.
1526 */
1527 (p->p_sysent->sv_set_syscall_retval)(td, EINTR4);
1528 for (has_sig = 0; !has_sig;) {
1529 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",_sleep((&p->p_sigacts), &(&p->p_mtx)->lock_object
, (((80) + 36)|0x100), ("pause"), tick_sbt * (0), 0, 0x0100)
1530 0)_sleep((&p->p_sigacts), &(&p->p_mtx)->lock_object
, (((80) + 36)|0x100), ("pause"), tick_sbt * (0), 0, 0x0100)
== 0)
1531 /* void */;
1532 thread_suspend_check(0);
1533 mtx_lock(&p->p_sigacts->ps_mtx)__mtx_lock_flags(&((((&p->p_sigacts->ps_mtx))))
->mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (
1533))
;
1534 while ((sig = cursig(td)) != 0) {
1535 KASSERT(sig >= 0, ("sig %d", sig))do { if (__builtin_expect((!(sig >= 0)), 0)) panic ("sig %d"
, sig); } while (0)
;
1536 has_sig += postsig(sig);
1537 }
1538 mtx_unlock(&p->p_sigacts->ps_mtx)__mtx_unlock_flags(&((((&p->p_sigacts->ps_mtx))
))->mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c")
, (1538))
;
1539 }
1540 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1540))
;
1541 td->td_errno = EINTR4;
1542 td->td_pflags |= TDP_NERRNO0x08000000;
1543 return (EJUSTRETURN(-2));
1544}
1545
1546#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1547/*
1548 * Compatibility sigsuspend call for old binaries. Note nonstandard calling
1549 * convention: libc stub passes mask, not pointer, to save a copyin.
1550 */
1551#ifndef _SYS_SYSPROTO_H_
1552struct osigsuspend_args {
1553 osigset_t mask;
1554};
1555#endif
1556/* ARGSUSED */
1557int
1558osigsuspend(struct thread *td, struct osigsuspend_args *uap)
1559{
1560 sigset_t mask;
1561
1562 OSIG2SIG(uap->mask, mask)do { int __i; for (__i = 0; __i < 4; __i++) (mask).__bits[
__i] = 0; } while (0); (mask).__bits[0] = uap->mask
;
1563 return (kern_sigsuspend(td, mask));
1564}
1565#endif /* COMPAT_43 */
1566
1567#if defined(COMPAT_43)
1568#ifndef _SYS_SYSPROTO_H_
1569struct osigstack_args {
1570 struct sigstack *nss;
1571 struct sigstack *oss;
1572};
1573#endif
1574/* ARGSUSED */
1575int
1576osigstack(struct thread *td, struct osigstack_args *uap)
1577{
1578 struct sigstack nss, oss;
1579 int error = 0;
1580
1581 if (uap->nss != NULL((void *)0)) {
1582 error = copyin(uap->nss, &nss, sizeof(nss));
1583 if (error)
1584 return (error);
1585 }
1586 oss.ss_sp = td->td_sigstk.ss_sp;
1587 oss.ss_onstack = sigonstack(cpu_getstack(td)((td)->td_frame->tf_rsp));
1588 if (uap->nss != NULL((void *)0)) {
1589 td->td_sigstk.ss_sp = nss.ss_sp;
1590 td->td_sigstk.ss_size = 0;
1591 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK0x0001;
1592 td->td_pflags |= TDP_ALTSTACK0x00000020;
1593 }
1594 if (uap->oss != NULL((void *)0))
1595 error = copyout(&oss, uap->oss, sizeof(oss));
1596
1597 return (error);
1598}
1599#endif /* COMPAT_43 */
1600
1601#ifndef _SYS_SYSPROTO_H_
1602struct sigaltstack_args {
1603 stack_t *ss;
1604 stack_t *oss;
1605};
1606#endif
1607/* ARGSUSED */
1608int
1609sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap)
1610{
1611 stack_t ss, oss;
1612 int error;
1613
1614 if (uap->ss != NULL((void *)0)) {
1615 error = copyin(uap->ss, &ss, sizeof(ss));
1616 if (error)
1617 return (error);
1618 }
1619 error = kern_sigaltstack(td, (uap->ss != NULL((void *)0)) ? &ss : NULL((void *)0),
1620 (uap->oss != NULL((void *)0)) ? &oss : NULL((void *)0));
1621 if (error)
1622 return (error);
1623 if (uap->oss != NULL((void *)0))
1624 error = copyout(&oss, uap->oss, sizeof(stack_t));
1625 return (error);
1626}
1627
1628int
1629kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1630{
1631 struct proc *p = td->td_proc;
1632 int oonstack;
1633
1634 oonstack = sigonstack(cpu_getstack(td)((td)->td_frame->tf_rsp));
1635
1636 if (oss != NULL((void *)0)) {
1637 *oss = td->td_sigstk;
1638 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK0x00000020)
1639 ? ((oonstack) ? SS_ONSTACK0x0001 : 0) : SS_DISABLE0x0004;
1640 }
1641
1642 if (ss != NULL((void *)0)) {
1643 if (oonstack)
1644 return (EPERM1);
1645 if ((ss->ss_flags & ~SS_DISABLE0x0004) != 0)
1646 return (EINVAL22);
1647 if (!(ss->ss_flags & SS_DISABLE0x0004)) {
1648 if (ss->ss_size < p->p_sysent->sv_minsigstksz)
1649 return (ENOMEM12);
1650
1651 td->td_sigstk = *ss;
1652 td->td_pflags |= TDP_ALTSTACK0x00000020;
1653 } else {
1654 td->td_pflags &= ~TDP_ALTSTACK0x00000020;
1655 }
1656 }
1657 return (0);
1658}
1659
1660/*
1661 * Common code for kill process group/broadcast kill.
1662 * cp is calling process.
1663 */
1664static int
1665killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
1666{
1667 struct proc *p;
1668 struct pgrp *pgrp;
1669 int err;
1670 int ret;
1671
1672 ret = ESRCH3;
1673 if (all) {
1674 /*
1675 * broadcast
1676 */
1677 sx_slock(&allproc_lock)(void)_sx_slock(((&allproc_lock)), 0, ("/root/freebsd/sys/kern/kern_sig.c"
), (1677))
;
1678 FOREACH_PROC_IN_SYSTEM(p)for (((p)) = (((&allproc))->lh_first); ((p)); ((p)) = (
(((p)))->p_list.le_next))
{
1679 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM0x00200 ||
1680 p == td->td_proc || p->p_state == PRS_NEW) {
1681 continue;
1682 }
1683 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1683))
;
1684 err = p_cansignal(td, p, sig);
1685 if (err == 0) {
1686 if (sig)
1687 pksignal(p, sig, ksi);
1688 ret = err;
1689 }
1690 else if (ret == ESRCH3)
1691 ret = err;
1692 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1692))
;
1693 }
1694 sx_sunlock(&allproc_lock)_sx_sunlock(((&allproc_lock)), ("/root/freebsd/sys/kern/kern_sig.c"
), (1694))
;
1695 } else {
1696 sx_slock(&proctree_lock)(void)_sx_slock(((&proctree_lock)), 0, ("/root/freebsd/sys/kern/kern_sig.c"
), (1696))
;
1697 if (pgid == 0) {
1698 /*
1699 * zero pgid means send to my process group.
1700 */
1701 pgrp = td->td_proc->p_pgrp;
1702 PGRP_LOCK(pgrp)__mtx_lock_flags(&((((&(pgrp)->pg_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1702))
;
1703 } else {
1704 pgrp = pgfind(pgid);
1705 if (pgrp == NULL((void *)0)) {
1706 sx_sunlock(&proctree_lock)_sx_sunlock(((&proctree_lock)), ("/root/freebsd/sys/kern/kern_sig.c"
), (1706))
;
1707 return (ESRCH3);
1708 }
1709 }
1710 sx_sunlock(&proctree_lock)_sx_sunlock(((&proctree_lock)), ("/root/freebsd/sys/kern/kern_sig.c"
), (1710))
;
1711 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)for ((p) = (((&pgrp->pg_members))->lh_first); (p); (
p) = (((p))->p_pglist.le_next))
{
1712 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1712))
;
1713 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM0x00200 ||
1714 p->p_state == PRS_NEW) {
1715 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1715))
;
1716 continue;
1717 }
1718 err = p_cansignal(td, p, sig);
1719 if (err == 0) {
1720 if (sig)
1721 pksignal(p, sig, ksi);
1722 ret = err;
1723 }
1724 else if (ret == ESRCH3)
1725 ret = err;
1726 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1726))
;
1727 }
1728 PGRP_UNLOCK(pgrp)__mtx_unlock_flags(&((((&(pgrp)->pg_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1728))
;
1729 }
1730 return (ret);
1731}
1732
1733#ifndef _SYS_SYSPROTO_H_
1734struct kill_args {
1735 int pid;
1736 int signum;
1737};
1738#endif
1739/* ARGSUSED */
1740int
1741sys_kill(struct thread *td, struct kill_args *uap)
1742{
1743 ksiginfo_t ksi;
1744 struct proc *p;
1745 int error;
1746
1747 /*
1748 * A process in capability mode can send signals only to himself.
1749 * The main rationale behind this is that abort(3) is implemented as
1750 * kill(getpid(), SIGABRT).
1751 */
1752 if (IN_CAPABILITY_MODE(td)(((td)->td_ucred->cr_flags & 0x00000001) != 0) && uap->pid != td->td_proc->p_pid)
1753 return (ECAPMODE94);
1754
1755 AUDIT_ARG_SIGNUM(uap->signum)do { if ((__builtin_expect((((__curthread()))->td_pflags &
0x01000000), 0))) audit_arg_signum((uap->signum)); } while
(0)
;
1756 AUDIT_ARG_PID(uap->pid)do { if ((__builtin_expect((((__curthread()))->td_pflags &
0x01000000), 0))) audit_arg_pid((uap->pid)); } while (0)
;
1757 if ((u_int)uap->signum > _SIG_MAXSIG128)
1758 return (EINVAL22);
1759
1760 ksiginfo_init(&ksi)do { __builtin_memset((&ksi), 0, (sizeof(ksiginfo_t))); }
while(0)
;
1761 ksi.ksi_signoksi_info.si_signo = uap->signum;
1762 ksi.ksi_codeksi_info.si_code = SI_USER0x10001;
1763 ksi.ksi_pidksi_info.si_pid = td->td_proc->p_pid;
1764 ksi.ksi_uidksi_info.si_uid = td->td_ucred->cr_ruid;
1765
1766 if (uap->pid > 0) {
1767 /* kill single process */
1768 if ((p = pfind_any(uap->pid)) == NULL((void *)0))
1769 return (ESRCH3);
1770 AUDIT_ARG_PROCESS(p)do { if ((__builtin_expect((((__curthread()))->td_pflags &
0x01000000), 0))) audit_arg_process((p)); } while (0)
;
1771 error = p_cansignal(td, p, uap->signum);
1772 if (error == 0 && uap->signum)
1773 pksignal(p, uap->signum, &ksi);
1774 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1774))
;
1775 return (error);
1776 }
1777 switch (uap->pid) {
1778 case -1: /* broadcast signal */
1779 return (killpg1(td, uap->signum, 0, 1, &ksi));
1780 case 0: /* signal own process group */
1781 return (killpg1(td, uap->signum, 0, 0, &ksi));
1782 default: /* negative explicit process group */
1783 return (killpg1(td, uap->signum, -uap->pid, 0, &ksi));
1784 }
1785 /* NOTREACHED */
1786}
1787
1788int
1789sys_pdkill(struct thread *td, struct pdkill_args *uap)
1790{
1791 struct proc *p;
1792 int error;
1793
1794 AUDIT_ARG_SIGNUM(uap->signum)do { if ((__builtin_expect((((__curthread()))->td_pflags &
0x01000000), 0))) audit_arg_signum((uap->signum)); } while
(0)
;
1795 AUDIT_ARG_FD(uap->fd)do { if ((__builtin_expect((((__curthread()))->td_pflags &
0x01000000), 0))) audit_arg_fd((uap->fd)); } while (0)
;
1796 if ((u_int)uap->signum > _SIG_MAXSIG128)
1797 return (EINVAL22);
1798
1799 error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p);
1800 if (error)
1801 return (error);
1802 AUDIT_ARG_PROCESS(p)do { if ((__builtin_expect((((__curthread()))->td_pflags &
0x01000000), 0))) audit_arg_process((p)); } while (0)
;
1803 error = p_cansignal(td, p, uap->signum);
1804 if (error == 0 && uap->signum)
1805 kern_psignal(p, uap->signum);
1806 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1806))
;
1807 return (error);
1808}
1809
1810#if defined(COMPAT_43)
1811#ifndef _SYS_SYSPROTO_H_
1812struct okillpg_args {
1813 int pgid;
1814 int signum;
1815};
1816#endif
1817/* ARGSUSED */
1818int
1819okillpg(struct thread *td, struct okillpg_args *uap)
1820{
1821 ksiginfo_t ksi;
1822
1823 AUDIT_ARG_SIGNUM(uap->signum)do { if ((__builtin_expect((((__curthread()))->td_pflags &
0x01000000), 0))) audit_arg_signum((uap->signum)); } while
(0)
;
1824 AUDIT_ARG_PID(uap->pgid)do { if ((__builtin_expect((((__curthread()))->td_pflags &
0x01000000), 0))) audit_arg_pid((uap->pgid)); } while (0)
;
1825 if ((u_int)uap->signum > _SIG_MAXSIG128)
1826 return (EINVAL22);
1827
1828 ksiginfo_init(&ksi)do { __builtin_memset((&ksi), 0, (sizeof(ksiginfo_t))); }
while(0)
;
1829 ksi.ksi_signoksi_info.si_signo = uap->signum;
1830 ksi.ksi_codeksi_info.si_code = SI_USER0x10001;
1831 ksi.ksi_pidksi_info.si_pid = td->td_proc->p_pid;
1832 ksi.ksi_uidksi_info.si_uid = td->td_ucred->cr_ruid;
1833 return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
1834}
1835#endif /* COMPAT_43 */
1836
1837#ifndef _SYS_SYSPROTO_H_
1838struct sigqueue_args {
1839 pid_t pid;
1840 int signum;
1841 /* union sigval */ void *value;
1842};
1843#endif
1844int
1845sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
1846{
1847 union sigval sv;
1848
1849 sv.sival_ptr = uap->value;
1850
1851 return (kern_sigqueue(td, uap->pid, uap->signum, &sv));
1852}
1853
1854int
1855kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value)
1856{
1857 ksiginfo_t ksi;
1858 struct proc *p;
1859 int error;
1860
1861 if ((u_int)signum > _SIG_MAXSIG128)
1862 return (EINVAL22);
1863
1864 /*
1865 * Specification says sigqueue can only send signal to
1866 * single process.
1867 */
1868 if (pid <= 0)
1869 return (EINVAL22);
1870
1871 if ((p = pfind_any(pid)) == NULL((void *)0))
1872 return (ESRCH3);
1873 error = p_cansignal(td, p, signum);
1874 if (error == 0 && signum != 0) {
1875 ksiginfo_init(&ksi)do { __builtin_memset((&ksi), 0, (sizeof(ksiginfo_t))); }
while(0)
;
1876 ksi.ksi_flags = KSI_SIGQ0x08;
1877 ksi.ksi_signoksi_info.si_signo = signum;
1878 ksi.ksi_codeksi_info.si_code = SI_QUEUE0x10002;
1879 ksi.ksi_pidksi_info.si_pid = td->td_proc->p_pid;
1880 ksi.ksi_uidksi_info.si_uid = td->td_ucred->cr_ruid;
1881 ksi.ksi_valueksi_info.si_value = *value;
1882 error = pksignal(p, ksi.ksi_signoksi_info.si_signo, &ksi);
1883 }
1884 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1884))
;
1885 return (error);
1886}
1887
1888/*
1889 * Send a signal to a process group.
1890 */
1891void
1892gsignal(int pgid, int sig, ksiginfo_t *ksi)
1893{
1894 struct pgrp *pgrp;
1895
1896 if (pgid != 0) {
1897 sx_slock(&proctree_lock)(void)_sx_slock(((&proctree_lock)), 0, ("/root/freebsd/sys/kern/kern_sig.c"
), (1897))
;
1898 pgrp = pgfind(pgid);
1899 sx_sunlock(&proctree_lock)_sx_sunlock(((&proctree_lock)), ("/root/freebsd/sys/kern/kern_sig.c"
), (1899))
;
1900 if (pgrp != NULL((void *)0)) {
1901 pgsignal(pgrp, sig, 0, ksi);
1902 PGRP_UNLOCK(pgrp)__mtx_unlock_flags(&((((&(pgrp)->pg_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1902))
;
1903 }
1904 }
1905}
1906
1907/*
1908 * Send a signal to a process group. If checktty is 1,
1909 * limit to members which have a controlling terminal.
1910 */
1911void
1912pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
1913{
1914 struct proc *p;
1915
1916 if (pgrp) {
1917 PGRP_LOCK_ASSERT(pgrp, MA_OWNED)__mtx_assert(&(((&(pgrp)->pg_mtx)))->mtx_lock, (
((0x00000004))), ("/root/freebsd/sys/kern/kern_sig.c"), (1917
))
;
1918 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)for ((p) = (((&pgrp->pg_members))->lh_first); (p); (
p) = (((p))->p_pglist.le_next))
{
1919 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1919))
;
1920 if (p->p_state == PRS_NORMAL &&
1921 (checkctty == 0 || p->p_flag & P_CONTROLT0x00002))
1922 pksignal(p, sig, ksi);
1923 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1923))
;
1924 }
1925 }
1926}
1927
1928
1929/*
1930 * Recalculate the signal mask and reset the signal disposition after
1931 * usermode frame for delivery is formed. Should be called after
1932 * mach-specific routine, because sysent->sv_sendsig() needs correct
1933 * ps_siginfo and signal mask.
1934 */
1935static void
1936postsig_done(int sig, struct thread *td, struct sigacts *ps)
1937{
1938 sigset_t mask;
1939
1940 mtx_assert(&ps->ps_mtx, MA_OWNED)__mtx_assert(&(((&ps->ps_mtx)))->mtx_lock, ((0x00000004
)), ("/root/freebsd/sys/kern/kern_sig.c"), (1940))
;
1941 td->td_ru.ru_nsignals++;
1942 mask = ps->ps_catchmask[_SIG_IDX(sig)((sig) - 1)];
1943 if (!SIGISMEMBER(ps->ps_signodefer, sig)((ps->ps_signodefer).__bits[(((sig) - 1) >> 5)] &
(1 << (((sig) - 1) & 31)))
)
1944 SIGADDSET(mask, sig)((mask).__bits[(((sig) - 1) >> 5)] |= (1 << (((sig
) - 1) & 31)))
;
1945 kern_sigprocmask(td, SIG_BLOCK1, &mask, NULL((void *)0),
1946 SIGPROCMASK_PROC_LOCKED0x0002 | SIGPROCMASK_PS_LOCKED0x0004);
1947 if (SIGISMEMBER(ps->ps_sigreset, sig)((ps->ps_sigreset).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
)
1948 sigdflt(ps, sig);
1949}
1950
1951
1952/*
1953 * Send a signal caused by a trap to the current thread. If it will be
1954 * caught immediately, deliver it with correct code. Otherwise, post it
1955 * normally.
1956 */
1957void
1958trapsignal(struct thread *td, ksiginfo_t *ksi)
1959{
1960 struct sigacts *ps;
1961 struct proc *p;
1962 int sig;
1963 int code;
1964
1965 p = td->td_proc;
1966 sig = ksi->ksi_signoksi_info.si_signo;
1967 code = ksi->ksi_codeksi_info.si_code;
1968 KASSERT(_SIG_VALID(sig), ("invalid signal"))do { if (__builtin_expect((!(((sig) <= 128 && (sig
) > 0))), 0)) panic ("invalid signal"); } while (0)
;
1969
1970 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1970))
;
1971 ps = p->p_sigacts;
1972 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1972))
;
1973 if ((p->p_flag & P_TRACED0x00800) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig)((ps->ps_sigcatch).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
&&
1974 !SIGISMEMBER(td->td_sigmask, sig)((td->td_sigmask).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
) {
1975#ifdef KTRACE1
1976 if (KTRPOINT(curthread, KTR_PSIG)(__builtin_expect((((((__curthread())))->td_proc->p_traceflag
& (1 << (5)))), 0))
)
1977 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)],
1978 &td->td_sigmask, code);
1979#endif
1980 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)],
1981 ksi, &td->td_sigmask);
1982 postsig_done(sig, td, ps);
1983 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1983))
;
1984 } else {
1985 /*
1986 * Avoid a possible infinite loop if the thread
1987 * masking the signal or process is ignoring the
1988 * signal.
1989 */
1990 if (kern_forcesigexit &&
1991 (SIGISMEMBER(td->td_sigmask, sig)((td->td_sigmask).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
||
1992 ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)] == SIG_IGN((__sighandler_t *)1))) {
1993 SIGDELSET(td->td_sigmask, sig)((td->td_sigmask).__bits[(((sig) - 1) >> 5)] &= ~
(1 << (((sig) - 1) & 31)))
;
1994 SIGDELSET(ps->ps_sigcatch, sig)((ps->ps_sigcatch).__bits[(((sig) - 1) >> 5)] &=
~(1 << (((sig) - 1) & 31)))
;
1995 SIGDELSET(ps->ps_sigignore, sig)((ps->ps_sigignore).__bits[(((sig) - 1) >> 5)] &=
~(1 << (((sig) - 1) & 31)))
;
1996 ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)] = SIG_DFL((__sighandler_t *)0);
1997 }
1998 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (1998))
;
1999 p->p_code = code; /* XXX for core dump/debugger */
2000 p->p_sig = sig; /* XXX to verify code */
2001 tdsendsignal(p, td, sig, ksi);
2002 }
2003 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2003))
;
2004}
2005
2006static struct thread *
2007sigtd(struct proc *p, int sig, int prop)
2008{
2009 struct thread *td, *signal_td;
2010
2011 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (2011))
;
2012
2013 /*
2014 * Check if current thread can handle the signal without
2015 * switching context to another thread.
2016 */
2017 if (curproc((__curthread())->td_proc) == p && !SIGISMEMBER(curthread->td_sigmask, sig)(((__curthread())->td_sigmask).__bits[(((sig) - 1) >>
5)] & (1 << (((sig) - 1) & 31)))
)
2018 return (curthread(__curthread()));
2019 signal_td = NULL((void *)0);
2020 FOREACH_THREAD_IN_PROC(p, td)for (((td)) = (((&(p)->p_threads))->tqh_first); ((td
)); ((td)) = ((((td)))->td_plist.tqe_next))
{
2021 if (!SIGISMEMBER(td->td_sigmask, sig)((td->td_sigmask).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
) {
2022 signal_td = td;
2023 break;
2024 }
2025 }
2026 if (signal_td == NULL((void *)0))
2027 signal_td = FIRST_THREAD_IN_PROC(p)((&(p)->p_threads)->tqh_first);
2028 return (signal_td);
2029}
2030
2031/*
2032 * Send the signal to the process. If the signal has an action, the action
2033 * is usually performed by the target process rather than the caller; we add
2034 * the signal to the set of pending signals for the process.
2035 *
2036 * Exceptions:
2037 * o When a stop signal is sent to a sleeping process that takes the
2038 * default action, the process is stopped without awakening it.
2039 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2040 * regardless of the signal action (eg, blocked or ignored).
2041 *
2042 * Other ignored signals are discarded immediately.
2043 *
2044 * NB: This function may be entered from the debugger via the "kill" DDB
2045 * command. There is little that can be done to mitigate the possibly messy
2046 * side effects of this unwise possibility.
2047 */
2048void
2049kern_psignal(struct proc *p, int sig)
2050{
2051 ksiginfo_t ksi;
2052
2053 ksiginfo_init(&ksi)do { __builtin_memset((&ksi), 0, (sizeof(ksiginfo_t))); }
while(0)
;
2054 ksi.ksi_signoksi_info.si_signo = sig;
2055 ksi.ksi_codeksi_info.si_code = SI_KERNEL0x10006;
2056 (void) tdsendsignal(p, NULL((void *)0), sig, &ksi);
2057}
2058
2059int
2060pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
2061{
2062
2063 return (tdsendsignal(p, NULL((void *)0), sig, ksi));
2064}
2065
2066/* Utility function for finding a thread to send signal event to. */
2067int
2068sigev_findtd(struct proc *p ,struct sigevent *sigev, struct thread **ttd)
2069{
2070 struct thread *td;
2071
2072 if (sigev->sigev_notify == SIGEV_THREAD_ID4) {
2073 td = tdfind(sigev->sigev_notify_thread_id_sigev_un._threadid, p->p_pid);
2074 if (td == NULL((void *)0))
2075 return (ESRCH3);
2076 *ttd = td;
2077 } else {
2078 *ttd = NULL((void *)0);
2079 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2079))
;
2080 }
2081 return (0);
2082}
2083
2084void
2085tdsignal(struct thread *td, int sig)
2086{
2087 ksiginfo_t ksi;
2088
2089 ksiginfo_init(&ksi)do { __builtin_memset((&ksi), 0, (sizeof(ksiginfo_t))); }
while(0)
;
2090 ksi.ksi_signoksi_info.si_signo = sig;
2091 ksi.ksi_codeksi_info.si_code = SI_KERNEL0x10006;
2092 (void) tdsendsignal(td->td_proc, td, sig, &ksi);
2093}
2094
2095void
2096tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
2097{
2098
2099 (void) tdsendsignal(td->td_proc, td, sig, ksi);
2100}
2101
2102int
2103tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
2104{
2105 sig_t action;
2106 sigqueue_t *sigqueue;
2107 int prop;
2108 struct sigacts *ps;
2109 int intrval;
2110 int ret = 0;
2111 int wakeup_swapper;
2112
2113 MPASS(td == NULL || p == td->td_proc)do { if (__builtin_expect((!((td == ((void *)0) || p == td->
td_proc))), 0)) panic ("Assertion %s failed at %s:%d", "td == NULL || p == td->td_proc"
, "/root/freebsd/sys/kern/kern_sig.c", 2113); } while (0)
;
2114 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (2114))
;
2115
2116 if (!_SIG_VALID(sig)((sig) <= 128 && (sig) > 0))
2117 panic("%s(): invalid signal %d", __func__, sig);
2118
2119 KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__))do { if (__builtin_expect((!(ksi == ((void *)0) || !((ksi)->
ksi_sigq != ((void *)0)))), 0)) panic ("%s: ksi on queue", __func__
); } while (0)
;
2120
2121 /*
2122 * IEEE Std 1003.1-2001: return success when killing a zombie.
2123 */
2124 if (p->p_state == PRS_ZOMBIE) {
2125 if (ksi && (ksi->ksi_flags & KSI_INS0x04))
2126 ksiginfo_tryfree(ksi);
2127 return (ret);
2128 }
2129
2130 ps = p->p_sigacts;
2131 KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig)knote(p->p_klist, 0x08000000 | sig, 0x0001);
2132 prop = sigprop(sig);
2133
2134 if (td == NULL((void *)0)) {
2135 td = sigtd(p, sig, prop);
2136 sigqueue = &p->p_sigqueue;
2137 } else
2138 sigqueue = &td->td_sigqueue;
2139
2140 SDT_PROBE3(proc, , , signal__send, td, p, sig)do { if (__builtin_expect((sdt_probes_enabled), 0)) { if (__builtin_expect
((sdt_proc___signal__send->id), 0)) (*sdt_probe_func)(sdt_proc___signal__send
->id, (uintptr_t) td, (uintptr_t) p, (uintptr_t) sig, (uintptr_t
) 0, (uintptr_t) 0); } } while (0)
;
2141
2142 /*
2143 * If the signal is being ignored,
2144 * then we forget about it immediately.
2145 * (Note: we don't set SIGCONT in ps_sigignore,
2146 * and if it is set to SIG_IGN,
2147 * action will be SIG_DFL here.)
2148 */
2149 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2149))
;
2150 if (SIGISMEMBER(ps->ps_sigignore, sig)((ps->ps_sigignore).__bits[(((sig) - 1) >> 5)] &
(1 << (((sig) - 1) & 31)))
) {
2151 SDT_PROBE3(proc, , , signal__discard, td, p, sig)do { if (__builtin_expect((sdt_probes_enabled), 0)) { if (__builtin_expect
((sdt_proc___signal__discard->id), 0)) (*sdt_probe_func)(sdt_proc___signal__discard
->id, (uintptr_t) td, (uintptr_t) p, (uintptr_t) sig, (uintptr_t
) 0, (uintptr_t) 0); } } while (0)
;
2152
2153 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2153))
;
2154 if (ksi && (ksi->ksi_flags & KSI_INS0x04))
2155 ksiginfo_tryfree(ksi);
2156 return (ret);
2157 }
2158 if (SIGISMEMBER(td->td_sigmask, sig)((td->td_sigmask).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
)
2159 action = SIG_HOLD((__sighandler_t *)3);
2160 else if (SIGISMEMBER(ps->ps_sigcatch, sig)((ps->ps_sigcatch).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
)
2161 action = SIG_CATCH((__sighandler_t *)2);
2162 else
2163 action = SIG_DFL((__sighandler_t *)0);
2164 if (SIGISMEMBER(ps->ps_sigintr, sig)((ps->ps_sigintr).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
)
2165 intrval = EINTR4;
2166 else
2167 intrval = ERESTART(-1);
2168 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2168))
;
2169
2170 if (prop & SIGPROP_CONT0x20)
2171 sigqueue_delete_stopmask_proc(p);
2172 else if (prop & SIGPROP_STOP0x04) {
2173 /*
2174 * If sending a tty stop signal to a member of an orphaned
2175 * process group, discard the signal here if the action
2176 * is default; don't stop the process below if sleeping,
2177 * and don't clear any pending SIGCONT.
2178 */
2179 if ((prop & SIGPROP_TTYSTOP0x08) &&
2180 (p->p_pgrp->pg_jobc == 0) &&
2181 (action == SIG_DFL((__sighandler_t *)0))) {
2182 if (ksi && (ksi->ksi_flags & KSI_INS0x04))
2183 ksiginfo_tryfree(ksi);
2184 return (ret);
2185 }
2186 sigqueue_delete_proc(p, SIGCONT19);
2187 if (p->p_flag & P_CONTINUED0x10000) {
2188 p->p_flag &= ~P_CONTINUED0x10000;
2189 PROC_LOCK(p->p_pptr)__mtx_lock_flags(&((((&(p->p_pptr)->p_mtx))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2189
))
;
2190 sigqueue_take(p->p_ksi);
2191 PROC_UNLOCK(p->p_pptr)__mtx_unlock_flags(&((((&(p->p_pptr)->p_mtx))))
->mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (
2191))
;
2192 }
2193 }
2194
2195 ret = sigqueue_add(sigqueue, sig, ksi);
2196 if (ret != 0)
2197 return (ret);
2198 signotify(td);
2199 /*
2200 * Defer further processing for signals which are held,
2201 * except that stopped processes must be continued by SIGCONT.
2202 */
2203 if (action == SIG_HOLD((__sighandler_t *)3) &&
2204 !((prop & SIGPROP_CONT0x20) && (p->p_flag & P_STOPPED_SIG0x20000)))
2205 return (ret);
2206
2207 /* SIGKILL: Remove procfs STOPEVENTs. */
2208 if (sig == SIGKILL9) {
2209 /* from procfs_ioctl.c: PIOCBIC */
2210 p->p_stops = 0;
2211 /* from procfs_ioctl.c: PIOCCONT */
2212 p->p_step = 0;
2213 wakeup(&p->p_step);
2214 }
2215 /*
2216 * Some signals have a process-wide effect and a per-thread
2217 * component. Most processing occurs when the process next
2218 * tries to cross the user boundary, however there are some
2219 * times when processing needs to be done immediately, such as
2220 * waking up threads so that they can cross the user boundary.
2221 * We try to do the per-process part here.
2222 */
2223 if (P_SHOULDSTOP(p)((p)->p_flag & (0x20000|0x80000|0x40000))) {
2224 KASSERT(!(p->p_flag & P_WEXIT),do { if (__builtin_expect((!(!(p->p_flag & 0x02000))),
0)) panic ("signal to stopped but exiting process"); } while
(0)
2225 ("signal to stopped but exiting process"))do { if (__builtin_expect((!(!(p->p_flag & 0x02000))),
0)) panic ("signal to stopped but exiting process"); } while
(0)
;
2226 if (sig == SIGKILL9) {
2227 /*
2228 * If traced process is already stopped,
2229 * then no further action is necessary.
2230 */
2231 if (p->p_flag & P_TRACED0x00800)
2232 goto out;
2233 /*
2234 * SIGKILL sets process running.
2235 * It will die elsewhere.
2236 * All threads must be restarted.
2237 */
2238 p->p_flag &= ~P_STOPPED_SIG0x20000;
2239 goto runfast;
2240 }
2241
2242 if (prop & SIGPROP_CONT0x20) {
2243 /*
2244 * If traced process is already stopped,
2245 * then no further action is necessary.
2246 */
2247 if (p->p_flag & P_TRACED0x00800)
2248 goto out;
2249 /*
2250 * If SIGCONT is default (or ignored), we continue the
2251 * process but don't leave the signal in sigqueue as
2252 * it has no further action. If SIGCONT is held, we
2253 * continue the process and leave the signal in
2254 * sigqueue. If the process catches SIGCONT, let it
2255 * handle the signal itself. If it isn't waiting on
2256 * an event, it goes back to run state.
2257 * Otherwise, process goes back to sleep state.
2258 */
2259 p->p_flag &= ~P_STOPPED_SIG0x20000;
2260 PROC_SLOCK(p)__mtx_lock_spin_flags(&((((&(p)->p_slock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2260))
;
2261 if (p->p_numthreads == p->p_suspcount) {
2262 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2262
))
;
2263 p->p_flag |= P_CONTINUED0x10000;
2264 p->p_xsig = SIGCONT19;
2265 PROC_LOCK(p->p_pptr)__mtx_lock_flags(&((((&(p->p_pptr)->p_mtx))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2265
))
;
2266 childproc_continued(p);
2267 PROC_UNLOCK(p->p_pptr)__mtx_unlock_flags(&((((&(p->p_pptr)->p_mtx))))
->mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (
2267))
;
2268 PROC_SLOCK(p)__mtx_lock_spin_flags(&((((&(p)->p_slock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2268))
;
2269 }
2270 if (action == SIG_DFL((__sighandler_t *)0)) {
2271 thread_unsuspend(p);
2272 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2272
))
;
2273 sigqueue_delete(sigqueue, sig);
2274 goto out;
2275 }
2276 if (action == SIG_CATCH((__sighandler_t *)2)) {
2277 /*
2278 * The process wants to catch it so it needs
2279 * to run at least one thread, but which one?
2280 */
2281 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2281
))
;
2282 goto runfast;
2283 }
2284 /*
2285 * The signal is not ignored or caught.
2286 */
2287 thread_unsuspend(p);
2288 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2288
))
;
2289 goto out;
2290 }
2291
2292 if (prop & SIGPROP_STOP0x04) {
2293 /*
2294 * If traced process is already stopped,
2295 * then no further action is necessary.
2296 */
2297 if (p->p_flag & P_TRACED0x00800)
2298 goto out;
2299 /*
2300 * Already stopped, don't need to stop again
2301 * (If we did the shell could get confused).
2302 * Just make sure the signal STOP bit set.
2303 */
2304 p->p_flag |= P_STOPPED_SIG0x20000;
2305 sigqueue_delete(sigqueue, sig);
2306 goto out;
2307 }
2308
2309 /*
2310 * All other kinds of signals:
2311 * If a thread is sleeping interruptibly, simulate a
2312 * wakeup so that when it is continued it will be made
2313 * runnable and can look at the signal. However, don't make
2314 * the PROCESS runnable, leave it stopped.
2315 * It may run a bit until it hits a thread_suspend_check().
2316 */
2317 wakeup_swapper = 0;
2318 PROC_SLOCK(p)__mtx_lock_spin_flags(&((((&(p)->p_slock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2318))
;
2319 thread_lock(td)_thread_lock((td), 0, "/root/freebsd/sys/kern/kern_sig.c", 2319
)
;
2320 if (TD_ON_SLEEPQ(td)((td)->td_wchan != ((void *)0)) && (td->td_flags & TDF_SINTR0x00000008))
2321 wakeup_swapper = sleepq_abort(td, intrval);
2322 thread_unlock(td)__mtx_unlock_spin_flags(&(((((td)->td_lock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2322))
;
2323 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2323
))
;
2324 if (wakeup_swapper)
2325 kick_proc0();
2326 goto out;
2327 /*
2328 * Mutexes are short lived. Threads waiting on them will
2329 * hit thread_suspend_check() soon.
2330 */
2331 } else if (p->p_state == PRS_NORMAL) {
2332 if (p->p_flag & P_TRACED0x00800 || action == SIG_CATCH((__sighandler_t *)2)) {
2333 tdsigwakeup(td, sig, action, intrval);
2334 goto out;
2335 }
2336
2337 MPASS(action == SIG_DFL)do { if (__builtin_expect((!((action == ((__sighandler_t *)0)
))), 0)) panic ("Assertion %s failed at %s:%d", "action == SIG_DFL"
, "/root/freebsd/sys/kern/kern_sig.c", 2337); } while (0)
;
2338
2339 if (prop & SIGPROP_STOP0x04) {
2340 if (p->p_flag & (P_PPWAIT0x00010|P_WEXIT0x02000))
2341 goto out;
2342 p->p_flag |= P_STOPPED_SIG0x20000;
2343 p->p_xsig = sig;
2344 PROC_SLOCK(p)__mtx_lock_spin_flags(&((((&(p)->p_slock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2344))
;
2345 wakeup_swapper = sig_suspend_threads(td, p, 1);
2346 if (p->p_numthreads == p->p_suspcount) {
2347 /*
2348 * only thread sending signal to another
2349 * process can reach here, if thread is sending
2350 * signal to its process, because thread does
2351 * not suspend itself here, p_numthreads
2352 * should never be equal to p_suspcount.
2353 */
2354 thread_stopped(p);
2355 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2355
))
;
2356 sigqueue_delete_proc(p, p->p_xsig);
2357 } else
2358 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2358
))
;
2359 if (wakeup_swapper)
2360 kick_proc0();
2361 goto out;
2362 }
2363 } else {
2364 /* Not in "NORMAL" state. discard the signal. */
2365 sigqueue_delete(sigqueue, sig);
2366 goto out;
2367 }
2368
2369 /*
2370 * The process is not stopped so we need to apply the signal to all the
2371 * running threads.
2372 */
2373runfast:
2374 tdsigwakeup(td, sig, action, intrval);
2375 PROC_SLOCK(p)__mtx_lock_spin_flags(&((((&(p)->p_slock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2375))
;
2376 thread_unsuspend(p);
2377 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2377
))
;
2378out:
2379 /* If we jump here, proc slock should not be owned. */
2380 PROC_SLOCK_ASSERT(p, MA_NOTOWNED)__mtx_assert(&(((&(p)->p_slock)))->mtx_lock, ((
(0x00000000))), ("/root/freebsd/sys/kern/kern_sig.c"), (2380)
)
;
2381 return (ret);
2382}
2383
2384/*
2385 * The force of a signal has been directed against a single
2386 * thread. We need to see what we can do about knocking it
2387 * out of any sleep it may be in etc.
2388 */
2389static void
2390tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
2391{
2392 struct proc *p = td->td_proc;
2393 int prop;
2394 int wakeup_swapper;
2395
2396 wakeup_swapper = 0;
2397 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (2397))
;
2398 prop = sigprop(sig);
2399
2400 PROC_SLOCK(p)__mtx_lock_spin_flags(&((((&(p)->p_slock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2400))
;
2401 thread_lock(td)_thread_lock((td), 0, "/root/freebsd/sys/kern/kern_sig.c", 2401
)
;
2402 /*
2403 * Bring the priority of a thread up if we want it to get
2404 * killed in this lifetime. Be careful to avoid bumping the
2405 * priority of the idle thread, since we still allow to signal
2406 * kernel processes.
2407 */
2408 if (action == SIG_DFL((__sighandler_t *)0) && (prop & SIGPROP_KILL0x01) != 0 &&
2409 td->td_priority > PUSER((120)) && !TD_IS_IDLETHREAD(td)((td)->td_flags & 0x00000020))
2410 sched_prio(td, PUSER((120)));
2411 if (TD_ON_SLEEPQ(td)((td)->td_wchan != ((void *)0))) {
2412 /*
2413 * If thread is sleeping uninterruptibly
2414 * we can't interrupt the sleep... the signal will
2415 * be noticed when the process returns through
2416 * trap() or syscall().
2417 */
2418 if ((td->td_flags & TDF_SINTR0x00000008) == 0)
2419 goto out;
2420 /*
2421 * If SIGCONT is default (or ignored) and process is
2422 * asleep, we are finished; the process should not
2423 * be awakened.
2424 */
2425 if ((prop & SIGPROP_CONT0x20) && action == SIG_DFL((__sighandler_t *)0)) {
2426 thread_unlock(td)__mtx_unlock_spin_flags(&(((((td)->td_lock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2426))
;
2427 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2427
))
;
2428 sigqueue_delete(&p->p_sigqueue, sig);
2429 /*
2430 * It may be on either list in this state.
2431 * Remove from both for now.
2432 */
2433 sigqueue_delete(&td->td_sigqueue, sig);
2434 return;
2435 }
2436
2437 /*
2438 * Don't awaken a sleeping thread for SIGSTOP if the
2439 * STOP signal is deferred.
2440 */
2441 if ((prop & SIGPROP_STOP0x04) != 0 && (td->td_flags & (TDF_SBDRY0x00002000 |
2442 TDF_SERESTART0x00080000 | TDF_SEINTR0x00200000)) == TDF_SBDRY0x00002000)
2443 goto out;
2444
2445 /*
2446 * Give low priority threads a better chance to run.
2447 */
2448 if (td->td_priority > PUSER((120)) && !TD_IS_IDLETHREAD(td)((td)->td_flags & 0x00000020))
2449 sched_prio(td, PUSER((120)));
2450
2451 wakeup_swapper = sleepq_abort(td, intrval);
2452 } else {
2453 /*
2454 * Other states do nothing with the signal immediately,
2455 * other than kicking ourselves if we are running.
2456 * It will either never be noticed, or noticed very soon.
2457 */
2458#ifdef SMP1
2459 if (TD_IS_RUNNING(td)((td)->td_state == TDS_RUNNING) && td != curthread(__curthread()))
2460 forward_signal(td);
2461#endif
2462 }
2463out:
2464 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2464
))
;
2465 thread_unlock(td)__mtx_unlock_spin_flags(&(((((td)->td_lock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2465))
;
2466 if (wakeup_swapper)
2467 kick_proc0();
2468}
2469
2470static int
2471sig_suspend_threads(struct thread *td, struct proc *p, int sending)
2472{
2473 struct thread *td2;
2474 int wakeup_swapper;
2475
2476 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (2476))
;
2477 PROC_SLOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_slock)))->mtx_lock, ((
(0x00000004))), ("/root/freebsd/sys/kern/kern_sig.c"), (2477)
)
;
2478 MPASS(sending || td == curthread)do { if (__builtin_expect((!((sending || td == (__curthread()
)))), 0)) panic ("Assertion %s failed at %s:%d", "sending || td == curthread"
, "/root/freebsd/sys/kern/kern_sig.c", 2478); } while (0)
;
2479
2480 wakeup_swapper = 0;
2481 FOREACH_THREAD_IN_PROC(p, td2)for (((td2)) = (((&(p)->p_threads))->tqh_first); ((
td2)); ((td2)) = ((((td2)))->td_plist.tqe_next))
{
2482 thread_lock(td2)_thread_lock((td2), 0, "/root/freebsd/sys/kern/kern_sig.c", 2482
)
;
2483 td2->td_flags |= TDF_ASTPENDING0x00000800 | TDF_NEEDSUSPCHK0x00008000;
2484 if ((TD_IS_SLEEPING(td2)((td2)->td_inhibitors & 0x0002) || TD_IS_SWAPPED(td2)((td2)->td_inhibitors & 0x0004)) &&
2485 (td2->td_flags & TDF_SINTR0x00000008)) {
2486 if (td2->td_flags & TDF_SBDRY0x00002000) {
2487 /*
2488 * Once a thread is asleep with
2489 * TDF_SBDRY and without TDF_SERESTART
2490 * or TDF_SEINTR set, it should never
2491 * become suspended due to this check.
2492 */
2493 KASSERT(!TD_IS_SUSPENDED(td2),do { if (__builtin_expect((!(!((td2)->td_inhibitors & 0x0001
))), 0)) panic ("thread with deferred stops suspended"); } while
(0)
2494 ("thread with deferred stops suspended"))do { if (__builtin_expect((!(!((td2)->td_inhibitors & 0x0001
))), 0)) panic ("thread with deferred stops suspended"); } while
(0)
;
2495 if (TD_SBDRY_INTR(td2)(((td2)->td_flags & (0x00200000 | 0x00080000)) != 0))
2496 wakeup_swapper |= sleepq_abort(td2,
2497 TD_SBDRY_ERRNO(td2)(((td2)->td_flags & 0x00200000) != 0 ? 4 : (-1)));
2498 } else if (!TD_IS_SUSPENDED(td2)((td2)->td_inhibitors & 0x0001)) {
2499 thread_suspend_one(td2);
2500 }
2501 } else if (!TD_IS_SUSPENDED(td2)((td2)->td_inhibitors & 0x0001)) {
2502 if (sending || td != td2)
2503 td2->td_flags |= TDF_ASTPENDING0x00000800;
2504#ifdef SMP1
2505 if (TD_IS_RUNNING(td2)((td2)->td_state == TDS_RUNNING) && td2 != td)
2506 forward_signal(td2);
2507#endif
2508 }
2509 thread_unlock(td2)__mtx_unlock_spin_flags(&(((((td2)->td_lock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2509))
;
2510 }
2511 return (wakeup_swapper);
2512}
2513
2514/*
2515 * Stop the process for an event deemed interesting to the debugger. If si is
2516 * non-NULL, this is a signal exchange; the new signal requested by the
2517 * debugger will be returned for handling. If si is NULL, this is some other
2518 * type of interesting event. The debugger may request a signal be delivered in
2519 * that case as well, however it will be deferred until it can be handled.
2520 */
2521int
2522ptracestop(struct thread *td, int sig, ksiginfo_t *si)
2523{
2524 struct proc *p = td->td_proc;
2525 struct thread *td2;
2526 ksiginfo_t ksi;
2527 int prop;
2528
2529 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (2529))
;
2530 KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"))do { if (__builtin_expect((!(!(p->p_flag & 0x02000))),
0)) panic ("Stopping exiting process"); } while (0)
;
2531 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,witness_warn((0x01 | 0x04), (&p->p_mtx.lock_object), (
"Stopping for traced signal"))
2532 &p->p_mtx.lock_object, "Stopping for traced signal")witness_warn((0x01 | 0x04), (&p->p_mtx.lock_object), (
"Stopping for traced signal"))
;
2533
2534 td->td_xsig = sig;
2535
2536 if (si == NULL((void *)0) || (si->ksi_flags & KSI_PTRACE0x20) == 0) {
2537 td->td_dbgflags |= TDB_XSIG0x00000002;
2538 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",(void)0
2539 td->td_tid, p->p_pid, td->td_dbgflags, sig)(void)0;
2540 PROC_SLOCK(p)__mtx_lock_spin_flags(&((((&(p)->p_slock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2540))
;
2541 while ((p->p_flag & P_TRACED0x00800) && (td->td_dbgflags & TDB_XSIG0x00000002)) {
2542 if (P_KILLED(p)((p)->p_flag & 0x08000)) {
2543 /*
2544 * Ensure that, if we've been PT_KILLed, the
2545 * exit status reflects that. Another thread
2546 * may also be in ptracestop(), having just
2547 * received the SIGKILL, but this thread was
2548 * unsuspended first.
2549 */
2550 td->td_dbgflags &= ~TDB_XSIG0x00000002;
2551 td->td_xsig = SIGKILL9;
2552 p->p_ptevents = 0;
2553 break;
2554 }
2555 if (p->p_flag & P_SINGLE_EXIT0x00400 &&
2556 !(td->td_dbgflags & TDB_EXIT0x00000400)) {
2557 /*
2558 * Ignore ptrace stops except for thread exit
2559 * events when the process exits.
2560 */
2561 td->td_dbgflags &= ~TDB_XSIG0x00000002;
2562 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2562
))
;
2563 return (0);
2564 }
2565
2566 /*
2567 * Make wait(2) work. Ensure that right after the
2568 * attach, the thread which was decided to become the
2569 * leader of attach gets reported to the waiter.
2570 * Otherwise, just avoid overwriting another thread's
2571 * assignment to p_xthread. If another thread has
2572 * already set p_xthread, the current thread will get
2573 * a chance to report itself upon the next iteration.
2574 */
2575 if ((td->td_dbgflags & TDB_FSTP0x00001000) != 0 ||
2576 ((p->p_flag2 & P2_PTRACE_FSTP0x00000010) == 0 &&
2577 p->p_xthread == NULL((void *)0))) {
2578 p->p_xsig = sig;
2579 p->p_xthread = td;
2580 td->td_dbgflags &= ~TDB_FSTP0x00001000;
2581 p->p_flag2 &= ~P2_PTRACE_FSTP0x00000010;
2582 p->p_flag |= P_STOPPED_SIG0x20000 | P_STOPPED_TRACE0x40000;
2583 sig_suspend_threads(td, p, 0);
2584 }
2585 if ((td->td_dbgflags & TDB_STOPATFORK0x00000080) != 0) {
2586 td->td_dbgflags &= ~TDB_STOPATFORK0x00000080;
2587 }
2588stopme:
2589 thread_suspend_switch(td, p);
2590 if (p->p_xthread == td)
2591 p->p_xthread = NULL((void *)0);
2592 if (!(p->p_flag & P_TRACED0x00800))
2593 break;
2594 if (td->td_dbgflags & TDB_SUSPEND0x00000001) {
2595 if (p->p_flag & P_SINGLE_EXIT0x00400)
2596 break;
2597 goto stopme;
2598 }
2599 }
2600 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2600
))
;
2601 }
2602
2603 if (si != NULL((void *)0) && sig == td->td_xsig) {
2604 /* Parent wants us to take the original signal unchanged. */
2605 si->ksi_flags |= KSI_HEAD0x10;
2606 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
2607 si->ksi_signoksi_info.si_signo = 0;
2608 } else if (td->td_xsig != 0) {
2609 /*
2610 * If parent wants us to take a new signal, then it will leave
2611 * it in td->td_xsig; otherwise we just look for signals again.
2612 */
2613 ksiginfo_init(&ksi)do { __builtin_memset((&ksi), 0, (sizeof(ksiginfo_t))); }
while(0)
;
2614 ksi.ksi_signoksi_info.si_signo = td->td_xsig;
2615 ksi.ksi_flags |= KSI_PTRACE0x20;
2616 prop = sigprop(td->td_xsig);
2617 td2 = sigtd(p, td->td_xsig, prop);
2618 tdsendsignal(p, td2, td->td_xsig, &ksi);
2619 if (td != td2)
2620 return (0);
2621 }
2622
2623 return (td->td_xsig);
2624}
2625
2626static void
2627reschedule_signals(struct proc *p, sigset_t block, int flags)
2628{
2629 struct sigacts *ps;
2630 struct thread *td;
2631 int sig;
2632
2633 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (2633))
;
2634 ps = p->p_sigacts;
2635 mtx_assert(&ps->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 ?__mtx_assert(&(((&ps->ps_mtx)))->mtx_lock, (((flags
& 0x0004) != 0 ? 0x00000004 : 0x00000000)), ("/root/freebsd/sys/kern/kern_sig.c"
), (2636))
2636 MA_OWNED : MA_NOTOWNED)__mtx_assert(&(((&ps->ps_mtx)))->mtx_lock, (((flags
& 0x0004) != 0 ? 0x00000004 : 0x00000000)), ("/root/freebsd/sys/kern/kern_sig.c"
), (2636))
;
2637 if (SIGISEMPTY(p->p_siglist)(__sigisempty(&(p->p_sigqueue.sq_signals))))
2638 return;
2639 SIGSETAND(block, p->p_siglist)do { int __i; for (__i = 0; __i < 4; __i++) (block).__bits
[__i] &= (p->p_sigqueue.sq_signals).__bits[__i]; } while
(0)
;
2640 while ((sig = sig_ffs(&block)) != 0) {
2641 SIGDELSET(block, sig)((block).__bits[(((sig) - 1) >> 5)] &= ~(1 <<
(((sig) - 1) & 31)))
;
2642 td = sigtd(p, sig, 0);
2643 signotify(td);
2644 if (!(flags & SIGPROCMASK_PS_LOCKED0x0004))
2645 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2645))
;
2646 if (p->p_flag & P_TRACED0x00800 ||
2647 (SIGISMEMBER(ps->ps_sigcatch, sig)((ps->ps_sigcatch).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
&&
2648 !SIGISMEMBER(td->td_sigmask, sig)((td->td_sigmask).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
))
2649 tdsigwakeup(td, sig, SIG_CATCH((__sighandler_t *)2),
2650 (SIGISMEMBER(ps->ps_sigintr, sig)((ps->ps_sigintr).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
? EINTR4 :
2651 ERESTART(-1)));
2652 if (!(flags & SIGPROCMASK_PS_LOCKED0x0004))
2653 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2653))
;
2654 }
2655}
2656
2657void
2658tdsigcleanup(struct thread *td)
2659{
2660 struct proc *p;
2661 sigset_t unblocked;
2662
2663 p = td->td_proc;
2664 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (2664))
;
2665
2666 sigqueue_flush(&td->td_sigqueue);
2667 if (p->p_numthreads == 1)
2668 return;
2669
2670 /*
2671 * Since we cannot handle signals, notify signal post code
2672 * about this by filling the sigmask.
2673 *
2674 * Also, if needed, wake up thread(s) that do not block the
2675 * same signals as the exiting thread, since the thread might
2676 * have been selected for delivery and woken up.
2677 */
2678 SIGFILLSET(unblocked)do { int __i; for (__i = 0; __i < 4; __i++) (unblocked).__bits
[__i] = ~0U; } while (0)
;
2679 SIGSETNAND(unblocked, td->td_sigmask)do { int __i; for (__i = 0; __i < 4; __i++) (unblocked).__bits
[__i] &= ~(td->td_sigmask).__bits[__i]; } while (0)
;
2680 SIGFILLSET(td->td_sigmask)do { int __i; for (__i = 0; __i < 4; __i++) (td->td_sigmask
).__bits[__i] = ~0U; } while (0)
;
2681 reschedule_signals(p, unblocked, 0);
2682
2683}
2684
2685static int
2686sigdeferstop_curr_flags(int cflags)
2687{
2688
2689 MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||do { if (__builtin_expect((!(((cflags & (0x00200000 | 0x00080000
)) == 0 || (cflags & 0x00002000) != 0))), 0)) panic ("Assertion %s failed at %s:%d"
, "(cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 || (cflags & TDF_SBDRY) != 0"
, "/root/freebsd/sys/kern/kern_sig.c", 2690); } while (0)
2690 (cflags & TDF_SBDRY) != 0)do { if (__builtin_expect((!(((cflags & (0x00200000 | 0x00080000
)) == 0 || (cflags & 0x00002000) != 0))), 0)) panic ("Assertion %s failed at %s:%d"
, "(cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 || (cflags & TDF_SBDRY) != 0"
, "/root/freebsd/sys/kern/kern_sig.c", 2690); } while (0)
;
2691 return (cflags & (TDF_SBDRY0x00002000 | TDF_SEINTR0x00200000 | TDF_SERESTART0x00080000));
2692}
2693
2694/*
2695 * Defer the delivery of SIGSTOP for the current thread, according to
2696 * the requested mode. Returns previous flags, which must be restored
2697 * by sigallowstop().
2698 *
2699 * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
2700 * cleared by the current thread, which allow the lock-less read-only
2701 * accesses below.
2702 */
2703int
2704sigdeferstop_impl(int mode)
2705{
2706 struct thread *td;
2707 int cflags, nflags;
2708
2709 td = curthread(__curthread());
2710 cflags = sigdeferstop_curr_flags(td->td_flags);
2711 switch (mode) {
2712 case SIGDEFERSTOP_NOP0:
2713 nflags = cflags;
2714 break;
2715 case SIGDEFERSTOP_OFF1:
2716 nflags = 0;
2717 break;
2718 case SIGDEFERSTOP_SILENT2:
2719 nflags = (cflags | TDF_SBDRY0x00002000) & ~(TDF_SEINTR0x00200000 | TDF_SERESTART0x00080000);
2720 break;
2721 case SIGDEFERSTOP_EINTR3:
2722 nflags = (cflags | TDF_SBDRY0x00002000 | TDF_SEINTR0x00200000) & ~TDF_SERESTART0x00080000;
2723 break;
2724 case SIGDEFERSTOP_ERESTART4:
2725 nflags = (cflags | TDF_SBDRY0x00002000 | TDF_SERESTART0x00080000) & ~TDF_SEINTR0x00200000;
2726 break;
2727 default:
2728 panic("sigdeferstop: invalid mode %x", mode);
2729 break;
2730 }
2731 if (cflags == nflags)
2732 return (SIGDEFERSTOP_VAL_NCHG(-1));
2733 thread_lock(td)_thread_lock((td), 0, "/root/freebsd/sys/kern/kern_sig.c", 2733
)
;
2734 td->td_flags = (td->td_flags & ~cflags) | nflags;
2735 thread_unlock(td)__mtx_unlock_spin_flags(&(((((td)->td_lock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2735))
;
2736 return (cflags);
2737}
2738
2739/*
2740 * Restores the STOP handling mode, typically permitting the delivery
2741 * of SIGSTOP for the current thread. This does not immediately
2742 * suspend if a stop was posted. Instead, the thread will suspend
2743 * either via ast() or a subsequent interruptible sleep.
2744 */
2745void
2746sigallowstop_impl(int prev)
2747{
2748 struct thread *td;
2749 int cflags;
2750
2751 KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"))do { if (__builtin_expect((!(prev != (-1))), 0)) panic ("failed sigallowstop"
); } while (0)
;
2752 KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,do { if (__builtin_expect((!((prev & ~(0x00002000 | 0x00200000
| 0x00080000)) == 0)), 0)) panic ("sigallowstop: incorrect previous mode %x"
, prev); } while (0)
2753 ("sigallowstop: incorrect previous mode %x", prev))do { if (__builtin_expect((!((prev & ~(0x00002000 | 0x00200000
| 0x00080000)) == 0)), 0)) panic ("sigallowstop: incorrect previous mode %x"
, prev); } while (0)
;
2754 td = curthread(__curthread());
2755 cflags = sigdeferstop_curr_flags(td->td_flags);
2756 if (cflags != prev) {
2757 thread_lock(td)_thread_lock((td), 0, "/root/freebsd/sys/kern/kern_sig.c", 2757
)
;
2758 td->td_flags = (td->td_flags & ~cflags) | prev;
2759 thread_unlock(td)__mtx_unlock_spin_flags(&(((((td)->td_lock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2759))
;
2760 }
2761}
2762
2763/*
2764 * If the current process has received a signal (should be caught or cause
2765 * termination, should interrupt current syscall), return the signal number.
2766 * Stop signals with default action are processed immediately, then cleared;
2767 * they aren't returned. This is checked after each entry to the system for
2768 * a syscall or trap (though this can usually be done without calling issignal
2769 * by checking the pending signal masks in cursig.) The normal call
2770 * sequence is
2771 *
2772 * while (sig = cursig(curthread))
2773 * postsig(sig);
2774 */
2775static int
2776issignal(struct thread *td)
2777{
2778 struct proc *p;
2779 struct sigacts *ps;
2780 struct sigqueue *queue;
2781 sigset_t sigpending;
2782 ksiginfo_t ksi;
2783 int prop, sig, traced;
2784
2785 p = td->td_proc;
2786 ps = p->p_sigacts;
2787 mtx_assert(&ps->ps_mtx, MA_OWNED)__mtx_assert(&(((&ps->ps_mtx)))->mtx_lock, ((0x00000004
)), ("/root/freebsd/sys/kern/kern_sig.c"), (2787))
;
2788 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (2788))
;
2789 for (;;) {
2790 traced = (p->p_flag & P_TRACED0x00800) || (p->p_stops & S_SIG0x00000002);
2791
2792 sigpending = td->td_sigqueue.sq_signals;
2793 SIGSETOR(sigpending, p->p_sigqueue.sq_signals)do { int __i; for (__i = 0; __i < 4; __i++) (sigpending).__bits
[__i] |= (p->p_sigqueue.sq_signals).__bits[__i]; } while (
0)
;
2794 SIGSETNAND(sigpending, td->td_sigmask)do { int __i; for (__i = 0; __i < 4; __i++) (sigpending).__bits
[__i] &= ~(td->td_sigmask).__bits[__i]; } while (0)
;
2795
2796 if ((p->p_flag & P_PPWAIT0x00010) != 0 || (td->td_flags &
2797 (TDF_SBDRY0x00002000 | TDF_SERESTART0x00080000 | TDF_SEINTR0x00200000)) == TDF_SBDRY0x00002000)
2798 SIG_STOPSIGMASK(sigpending)((sigpending).__bits[(((17) - 1) >> 5)] &= ~(1 <<
(((17) - 1) & 31))), ((sigpending).__bits[(((18) - 1) >>
5)] &= ~(1 << (((18) - 1) & 31))), ((sigpending
).__bits[(((21) - 1) >> 5)] &= ~(1 << (((21) -
1) & 31))), ((sigpending).__bits[(((22) - 1) >> 5)
] &= ~(1 << (((22) - 1) & 31)))
;
2799 if (SIGISEMPTY(sigpending)(__sigisempty(&(sigpending)))) /* no signal to send */
2800 return (0);
2801 if ((p->p_flag & (P_TRACED0x00800 | P_PPTRACE0x80000000)) == P_TRACED0x00800 &&
2802 (p->p_flag2 & P2_PTRACE_FSTP0x00000010) != 0 &&
2803 SIGISMEMBER(sigpending, SIGSTOP)((sigpending).__bits[(((17) - 1) >> 5)] & (1 <<
(((17) - 1) & 31)))
) {
2804 /*
2805 * If debugger just attached, always consume
2806 * SIGSTOP from ptrace(PT_ATTACH) first, to
2807 * execute the debugger attach ritual in
2808 * order.
2809 */
2810 sig = SIGSTOP17;
2811 td->td_dbgflags |= TDB_FSTP0x00001000;
2812 } else {
2813 sig = sig_ffs(&sigpending);
2814 }
2815
2816 if (p->p_stops & S_SIG0x00000002) {
2817 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2817))
;
2818 stopevent(p, S_SIG0x00000002, sig);
2819 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2819))
;
2820 }
2821
2822 /*
2823 * We should see pending but ignored signals
2824 * only if P_TRACED was on when they were posted.
2825 */
2826 if (SIGISMEMBER(ps->ps_sigignore, sig)((ps->ps_sigignore).__bits[(((sig) - 1) >> 5)] &
(1 << (((sig) - 1) & 31)))
&& (traced == 0)) {
2827 sigqueue_delete(&td->td_sigqueue, sig);
2828 sigqueue_delete(&p->p_sigqueue, sig);
2829 continue;
2830 }
2831 if ((p->p_flag & (P_TRACED0x00800 | P_PPTRACE0x80000000)) == P_TRACED0x00800) {
2832 /*
2833 * If traced, always stop.
2834 * Remove old signal from queue before the stop.
2835 * XXX shrug off debugger, it causes siginfo to
2836 * be thrown away.
2837 */
2838 queue = &td->td_sigqueue;
2839 ksiginfo_init(&ksi)do { __builtin_memset((&ksi), 0, (sizeof(ksiginfo_t))); }
while(0)
;
2840 if (sigqueue_get(queue, sig, &ksi) == 0) {
2841 queue = &p->p_sigqueue;
2842 sigqueue_get(queue, sig, &ksi);
2843 }
2844 td->td_si = ksi.ksi_info;
2845
2846 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2846))
;
2847 sig = ptracestop(td, sig, &ksi);
2848 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2848))
;
2849
2850 td->td_si.si_signo = 0;
2851
2852 /*
2853 * Keep looking if the debugger discarded or
2854 * replaced the signal.
2855 */
2856 if (sig == 0)
2857 continue;
2858
2859 /*
2860 * If the signal became masked, re-queue it.
2861 */
2862 if (SIGISMEMBER(td->td_sigmask, sig)((td->td_sigmask).__bits[(((sig) - 1) >> 5)] & (
1 << (((sig) - 1) & 31)))
) {
2863 ksi.ksi_flags |= KSI_HEAD0x10;
2864 sigqueue_add(&p->p_sigqueue, sig, &ksi);
2865 continue;
2866 }
2867
2868 /*
2869 * If the traced bit got turned off, requeue
2870 * the signal and go back up to the top to
2871 * rescan signals. This ensures that p_sig*
2872 * and p_sigact are consistent.
2873 */
2874 if ((p->p_flag & P_TRACED0x00800) == 0) {
2875 ksi.ksi_flags |= KSI_HEAD0x10;
2876 sigqueue_add(queue, sig, &ksi);
2877 continue;
2878 }
2879 }
2880
2881 prop = sigprop(sig);
2882
2883 /*
2884 * Decide whether the signal should be returned.
2885 * Return the signal's number, or fall through
2886 * to clear it from the pending mask.
2887 */
2888 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)((sig) - 1)]) {
2889
2890 case (intptr_t)SIG_DFL((__sighandler_t *)0):
2891 /*
2892 * Don't take default actions on system processes.
2893 */
2894 if (p->p_pid <= 1) {
2895#ifdef DIAGNOSTIC
2896 /*
2897 * Are you sure you want to ignore SIGSEGV
2898 * in init? XXX
2899 */
2900 printf("Process (pid %lu) got signal %d\n",
2901 (u_long)p->p_pid, sig);
2902#endif
2903 break; /* == ignore */
2904 }
2905 /*
2906 * If there is a pending stop signal to process with
2907 * default action, stop here, then clear the signal.
2908 * Traced or exiting processes should ignore stops.
2909 * Additionally, a member of an orphaned process group
2910 * should ignore tty stops.
2911 */
2912 if (prop & SIGPROP_STOP0x04) {
2913 if (p->p_flag &
2914 (P_TRACED0x00800 | P_WEXIT0x02000 | P_SINGLE_EXIT0x00400) ||
2915 (p->p_pgrp->pg_jobc == 0 &&
2916 prop & SIGPROP_TTYSTOP0x08))
2917 break; /* == ignore */
2918 if (TD_SBDRY_INTR(td)(((td)->td_flags & (0x00200000 | 0x00080000)) != 0)) {
2919 KASSERT((td->td_flags & TDF_SBDRY) != 0,do { if (__builtin_expect((!((td->td_flags & 0x00002000
) != 0)), 0)) panic ("lost TDF_SBDRY"); } while (0)
2920 ("lost TDF_SBDRY"))do { if (__builtin_expect((!((td->td_flags & 0x00002000
) != 0)), 0)) panic ("lost TDF_SBDRY"); } while (0)
;
2921 return (-1);
2922 }
2923 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2923))
;
2924 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,witness_warn((0x01 | 0x04), (&p->p_mtx.lock_object), (
"Catching SIGSTOP"))
2925 &p->p_mtx.lock_object, "Catching SIGSTOP")witness_warn((0x01 | 0x04), (&p->p_mtx.lock_object), (
"Catching SIGSTOP"))
;
2926 sigqueue_delete(&td->td_sigqueue, sig);
2927 sigqueue_delete(&p->p_sigqueue, sig);
2928 p->p_flag |= P_STOPPED_SIG0x20000;
2929 p->p_xsig = sig;
2930 PROC_SLOCK(p)__mtx_lock_spin_flags(&((((&(p)->p_slock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2930))
;
2931 sig_suspend_threads(td, p, 0);
2932 thread_suspend_switch(td, p);
2933 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2933
))
;
2934 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2934))
;
2935 goto next;
2936 } else if (prop & SIGPROP_IGNORE0x10) {
2937 /*
2938 * Except for SIGCONT, shouldn't get here.
2939 * Default action is to ignore; drop it.
2940 */
2941 break; /* == ignore */
2942 } else
2943 return (sig);
2944 /*NOTREACHED*/
2945
2946 case (intptr_t)SIG_IGN((__sighandler_t *)1):
2947 /*
2948 * Masking above should prevent us ever trying
2949 * to take action on an ignored signal other
2950 * than SIGCONT, unless process is traced.
2951 */
2952 if ((prop & SIGPROP_CONT0x20) == 0 &&
2953 (p->p_flag & P_TRACED0x00800) == 0)
2954 printf("issignal\n");
2955 break; /* == ignore */
2956
2957 default:
2958 /*
2959 * This signal has an action, let
2960 * postsig() process it.
2961 */
2962 return (sig);
2963 }
2964 sigqueue_delete(&td->td_sigqueue, sig); /* take the signal! */
2965 sigqueue_delete(&p->p_sigqueue, sig);
2966next:;
2967 }
2968 /* NOTREACHED */
2969}
2970
2971void
2972thread_stopped(struct proc *p)
2973{
2974 int n;
2975
2976 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (2976))
;
2977 PROC_SLOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_slock)))->mtx_lock, ((
(0x00000004))), ("/root/freebsd/sys/kern/kern_sig.c"), (2977)
)
;
2978 n = p->p_suspcount;
2979 if (p == curproc((__curthread())->td_proc))
2980 n++;
2981 if ((p->p_flag & P_STOPPED_SIG0x20000) && (n == p->p_numthreads)) {
2982 PROC_SUNLOCK(p)__mtx_unlock_spin_flags(&((((&(p)->p_slock))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2982
))
;
2983 p->p_flag &= ~P_WAITED0x01000;
2984 PROC_LOCK(p->p_pptr)__mtx_lock_flags(&((((&(p->p_pptr)->p_mtx))))->
mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2984
))
;
2985 childproc_stopped(p, (p->p_flag & P_TRACED0x00800) ?
2986 CLD_TRAPPED4 : CLD_STOPPED5);
2987 PROC_UNLOCK(p->p_pptr)__mtx_unlock_flags(&((((&(p->p_pptr)->p_mtx))))
->mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (
2987))
;
2988 PROC_SLOCK(p)__mtx_lock_spin_flags(&((((&(p)->p_slock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (2988))
;
2989 }
2990}
2991
2992/*
2993 * Take the action for the specified signal
2994 * from the current set of pending signals.
2995 */
2996int
2997postsig(int sig)
2998{
2999 struct thread *td;
3000 struct proc *p;
3001 struct sigacts *ps;
3002 sig_t action;
3003 ksiginfo_t ksi;
3004 sigset_t returnmask;
3005
3006 KASSERT(sig != 0, ("postsig"))do { if (__builtin_expect((!(sig != 0)), 0)) panic ("postsig"
); } while (0)
;
3007
3008 td = curthread(__curthread());
3009 p = td->td_proc;
3010 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (3010))
;
3011 ps = p->p_sigacts;
3012 mtx_assert(&ps->ps_mtx, MA_OWNED)__mtx_assert(&(((&ps->ps_mtx)))->mtx_lock, ((0x00000004
)), ("/root/freebsd/sys/kern/kern_sig.c"), (3012))
;
3013 ksiginfo_init(&ksi)do { __builtin_memset((&ksi), 0, (sizeof(ksiginfo_t))); }
while(0)
;
3014 if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
3015 sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
3016 return (0);
3017 ksi.ksi_signoksi_info.si_signo = sig;
3018 if (ksi.ksi_codeksi_info.si_code == SI_TIMER0x10003)
3019 itimer_accept(p, ksi.ksi_timeridksi_info._reason._timer._timerid, &ksi);
3020 action = ps->ps_sigact[_SIG_IDX(sig)((sig) - 1)];
3021#ifdef KTRACE1
3022 if (KTRPOINT(td, KTR_PSIG)(__builtin_expect(((((td))->td_proc->p_traceflag & (
1 << (5)))), 0))
)
3023 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK0x00000001 ?
3024 &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_codeksi_info.si_code);
3025#endif
3026 if ((p->p_stops & S_SIG0x00000002) != 0) {
3027 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3027))
;
3028 stopevent(p, S_SIG0x00000002, sig);
3029 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3029))
;
3030 }
3031
3032 if (action == SIG_DFL((__sighandler_t *)0)) {
3033 /*
3034 * Default action, where the default is to kill
3035 * the process. (Other cases were ignored above.)
3036 */
3037 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3037))
;
3038 proc_td_siginfo_capture(td, &ksi.ksi_info);
3039 sigexit(td, sig);
3040 /* NOTREACHED */
3041 } else {
3042 /*
3043 * If we get here, the signal must be caught.
3044 */
3045 KASSERT(action != SIG_IGN, ("postsig action %p", action))do { if (__builtin_expect((!(action != ((__sighandler_t *)1))
), 0)) panic ("postsig action %p", action); } while (0)
;
3046 KASSERT(!SIGISMEMBER(td->td_sigmask, sig),do { if (__builtin_expect((!(!((td->td_sigmask).__bits[(((
sig) - 1) >> 5)] & (1 << (((sig) - 1) & 31
))))), 0)) panic ("postsig action: blocked sig %d", sig); } while
(0)
3047 ("postsig action: blocked sig %d", sig))do { if (__builtin_expect((!(!((td->td_sigmask).__bits[(((
sig) - 1) >> 5)] & (1 << (((sig) - 1) & 31
))))), 0)) panic ("postsig action: blocked sig %d", sig); } while
(0)
;
3048
3049 /*
3050 * Set the new mask value and also defer further
3051 * occurrences of this signal.
3052 *
3053 * Special case: user has done a sigsuspend. Here the
3054 * current mask is not of interest, but rather the
3055 * mask from before the sigsuspend is what we want
3056 * restored after the signal processing is completed.
3057 */
3058 if (td->td_pflags & TDP_OLDMASK0x00000001) {
3059 returnmask = td->td_oldsigmask;
3060 td->td_pflags &= ~TDP_OLDMASK0x00000001;
3061 } else
3062 returnmask = td->td_sigmask;
3063
3064 if (p->p_sig == sig) {
3065 p->p_code = 0;
3066 p->p_sig = 0;
3067 }
3068 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
3069 postsig_done(sig, td, ps);
3070 }
3071 return (1);
3072}
3073
3074void
3075proc_wkilled(struct proc *p)
3076{
3077
3078 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (3078))
;
3079 if ((p->p_flag & P_WKILLED0x08000) == 0) {
3080 p->p_flag |= P_WKILLED0x08000;
3081 /*
3082 * Notify swapper that there is a process to swap in.
3083 * The notification is racy, at worst it would take 10
3084 * seconds for the swapper process to notice.
3085 */
3086 if ((p->p_flag & (P_INMEM0x10000000 | P_SWAPPINGIN0x40000000)) == 0)
3087 wakeup(&proc0);
3088 }
3089}
3090
3091/*
3092 * Kill the current process for stated reason.
3093 */
3094void
3095killproc(struct proc *p, char *why)
3096{
3097
3098 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (3098))
;
3099 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,(void)0
3100 p->p_comm)(void)0;
3101 log(LOG_ERR3, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid,
3102 p->p_comm, p->p_ucred ? p->p_ucred->cr_uid : -1, why);
3103 proc_wkilled(p);
3104 kern_psignal(p, SIGKILL9);
3105}
3106
3107/*
3108 * Force the current process to exit with the specified signal, dumping core
3109 * if appropriate. We bypass the normal tests for masked and caught signals,
3110 * allowing unrecoverable failures to terminate the process without changing
3111 * signal state. Mark the accounting record with the signal termination.
3112 * If dumping core, save the signal number for the debugger. Calls exit and
3113 * does not return.
3114 */
3115void
3116sigexit(struct thread *td, int sig)
3117{
3118 struct proc *p = td->td_proc;
3119
3120 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (3120))
;
3121 p->p_acflag |= AXSIG0x10;
3122 /*
3123 * We must be single-threading to generate a core dump. This
3124 * ensures that the registers in the core file are up-to-date.
3125 * Also, the ELF dump handler assumes that the thread list doesn't
3126 * change out from under it.
3127 *
3128 * XXX If another thread attempts to single-thread before us
3129 * (e.g. via fork()), we won't get a dump at all.
3130 */
3131 if ((sigprop(sig) & SIGPROP_CORE0x02) &&
3132 thread_single(p, SINGLE_NO_EXIT0) == 0) {
3133 p->p_sig = sig;
3134 /*
3135 * Log signals which would cause core dumps
3136 * (Log as LOG_INFO to appease those who don't want
3137 * these messages.)
3138 * XXX : Todo, as well as euid, write out ruid too
3139 * Note that coredump() drops proc lock.
3140 */
3141 if (coredump(td) == 0)
3142 sig |= WCOREFLAG0200;
3143 if (kern_logsigexit)
3144 log(LOG_INFO6,
3145 "pid %d (%s), uid %d: exited on signal %d%s\n",
3146 p->p_pid, p->p_comm,
3147 td->td_ucred ? td->td_ucred->cr_uid : -1,
3148 sig &~ WCOREFLAG0200,
3149 sig & WCOREFLAG0200 ? " (core dumped)" : "");
3150 } else
3151 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3151))
;
3152 exit1(td, 0, sig);
3153 /* NOTREACHED */
3154}
3155
3156/*
3157 * Send queued SIGCHLD to parent when child process's state
3158 * is changed.
3159 */
3160static void
3161sigparent(struct proc *p, int reason, int status)
3162{
3163 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (3163))
;
3164 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED)__mtx_assert(&(((&(p->p_pptr)->p_mtx)))->mtx_lock
, (((0x00000004))), ("/root/freebsd/sys/kern/kern_sig.c"), (3164
))
;
3165
3166 if (p->p_ksi != NULL((void *)0)) {
3167 p->p_ksi->ksi_signoksi_info.si_signo = SIGCHLD20;
3168 p->p_ksi->ksi_codeksi_info.si_code = reason;
3169 p->p_ksi->ksi_statusksi_info.si_status = status;
3170 p->p_ksi->ksi_pidksi_info.si_pid = p->p_pid;
3171 p->p_ksi->ksi_uidksi_info.si_uid = p->p_ucred->cr_ruid;
3172 if (KSI_ONQ(p->p_ksi)((p->p_ksi)->ksi_sigq != ((void *)0)))
3173 return;
3174 }
3175 pksignal(p->p_pptr, SIGCHLD20, p->p_ksi);
3176}
3177
3178static void
3179childproc_jobstate(struct proc *p, int reason, int sig)
3180{
3181 struct sigacts *ps;
3182
3183 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (3183))
;
3184 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED)__mtx_assert(&(((&(p->p_pptr)->p_mtx)))->mtx_lock
, (((0x00000004))), ("/root/freebsd/sys/kern/kern_sig.c"), (3184
))
;
3185
3186 /*
3187 * Wake up parent sleeping in kern_wait(), also send
3188 * SIGCHLD to parent, but SIGCHLD does not guarantee
3189 * that parent will awake, because parent may masked
3190 * the signal.
3191 */
3192 p->p_pptr->p_flag |= P_STATCHILD0x8000000;
3193 wakeup(p->p_pptr);
3194
3195 ps = p->p_pptr->p_sigacts;
3196 mtx_lock(&ps->ps_mtx)__mtx_lock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3196))
;
3197 if ((ps->ps_flag & PS_NOCLDSTOP0x0002) == 0) {
3198 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3198))
;
3199 sigparent(p, reason, sig);
3200 } else
3201 mtx_unlock(&ps->ps_mtx)__mtx_unlock_flags(&((((&ps->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3201))
;
3202}
3203
3204void
3205childproc_stopped(struct proc *p, int reason)
3206{
3207
3208 childproc_jobstate(p, reason, p->p_xsig);
3209}
3210
3211void
3212childproc_continued(struct proc *p)
3213{
3214 childproc_jobstate(p, CLD_CONTINUED6, SIGCONT19);
3215}
3216
3217void
3218childproc_exited(struct proc *p)
3219{
3220 int reason, status;
3221
3222 if (WCOREDUMP(p->p_xsig)((p->p_xsig) & 0200)) {
3223 reason = CLD_DUMPED3;
3224 status = WTERMSIG(p->p_xsig)(((p->p_xsig) & 0177));
3225 } else if (WIFSIGNALED(p->p_xsig)(((p->p_xsig) & 0177) != 0177 && ((p->p_xsig
) & 0177) != 0 && (p->p_xsig) != 0x13)
) {
3226 reason = CLD_KILLED2;
3227 status = WTERMSIG(p->p_xsig)(((p->p_xsig) & 0177));
3228 } else {
3229 reason = CLD_EXITED1;
3230 status = p->p_xexit;
3231 }
3232 /*
3233 * XXX avoid calling wakeup(p->p_pptr), the work is
3234 * done in exit1().
3235 */
3236 sigparent(p, reason, status);
3237}
3238
3239#define MAX_NUM_CORE_FILES100000 100000
3240#ifndef NUM_CORE_FILES5
3241#define NUM_CORE_FILES5 5
3242#endif
3243CTASSERT(NUM_CORE_FILES >= 0 && NUM_CORE_FILES <= MAX_NUM_CORE_FILES)_Static_assert(5 >= 0 && 5 <= 100000, "compile-time assertion failed"
)
;
3244static int num_cores = NUM_CORE_FILES5;
3245
3246static int
3247sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req
*req
)
3248{
3249 int error;
3250 int new_val;
3251
3252 new_val = num_cores;
3253 error = sysctl_handle_int(oidp, &new_val, 0, req);
3254 if (error != 0 || req->newptr == NULL((void *)0))
3255 return (error);
3256 if (new_val > MAX_NUM_CORE_FILES100000)
3257 new_val = MAX_NUM_CORE_FILES100000;
3258 if (new_val < 0)
3259 new_val = 0;
3260 num_cores = new_val;
3261 return (0);
3262}
3263SYSCTL_PROC(_debug, OID_AUTO, ncores, CTLTYPE_INT|CTLFLAG_RW,static struct sysctl_oid sysctl___debug_ncores = { .oid_parent
= ((&(&sysctl___debug)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((2|(0x80000000
|0x40000000))), .oid_arg1 = (0), .oid_arg2 = (sizeof(int)), .
oid_name = ("ncores"), .oid_handler = (sysctl_debug_num_cores_check
), .oid_fmt = ("I"), .oid_descr = "Maximum number of generated process corefiles while using index format"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___debug_ncores __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___debug_ncores); _Static_assert(((2|(0x80000000
|0x40000000)) & 0xf) != 0, "compile-time assertion failed"
)
3264 0, sizeof(int), sysctl_debug_num_cores_check, "I",static struct sysctl_oid sysctl___debug_ncores = { .oid_parent
= ((&(&sysctl___debug)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((2|(0x80000000
|0x40000000))), .oid_arg1 = (0), .oid_arg2 = (sizeof(int)), .
oid_name = ("ncores"), .oid_handler = (sysctl_debug_num_cores_check
), .oid_fmt = ("I"), .oid_descr = "Maximum number of generated process corefiles while using index format"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___debug_ncores __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___debug_ncores); _Static_assert(((2|(0x80000000
|0x40000000)) & 0xf) != 0, "compile-time assertion failed"
)
3265 "Maximum number of generated process corefiles while using index format")static struct sysctl_oid sysctl___debug_ncores = { .oid_parent
= ((&(&sysctl___debug)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((2|(0x80000000
|0x40000000))), .oid_arg1 = (0), .oid_arg2 = (sizeof(int)), .
oid_name = ("ncores"), .oid_handler = (sysctl_debug_num_cores_check
), .oid_fmt = ("I"), .oid_descr = "Maximum number of generated process corefiles while using index format"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___debug_ncores __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___debug_ncores); _Static_assert(((2|(0x80000000
|0x40000000)) & 0xf) != 0, "compile-time assertion failed"
)
;
3266
3267#define GZIP_SUFFIX".gz" ".gz"
3268#define ZSTD_SUFFIX".zst" ".zst"
3269
3270int compress_user_cores = 0;
3271
3272static int
3273sysctl_compress_user_cores(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req
*req
)
3274{
3275 int error, val;
3276
3277 val = compress_user_cores;
3278 error = sysctl_handle_int(oidp, &val, 0, req);
3279 if (error != 0 || req->newptr == NULL((void *)0))
3280 return (error);
3281 if (val != 0 && !compressor_avail(val))
3282 return (EINVAL22);
3283 compress_user_cores = val;
3284 return (error);
3285}
3286SYSCTL_PROC(_kern, OID_AUTO, compress_user_cores, CTLTYPE_INT | CTLFLAG_RWTUN,static struct sysctl_oid sysctl___kern_compress_user_cores = {
.oid_parent = ((&(&sysctl___kern)->oid_children))
, .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= ((2 | ((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (
0), .oid_arg2 = (sizeof(int)), .oid_name = ("compress_user_cores"
), .oid_handler = (sysctl_compress_user_cores), .oid_fmt = ("I"
), .oid_descr = "Enable compression of user corefiles (" "1" " = gzip, "
"2" " = zstd)", .oid_label = (((void *)0)), }; __asm__(".globl "
"__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_compress_user_cores
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_compress_user_cores); _Static_assert
(((2 | ((0x80000000|0x40000000)|0x00080000)) & 0xf) != 0,
"compile-time assertion failed")
3287 0, sizeof(int), sysctl_compress_user_cores, "I",static struct sysctl_oid sysctl___kern_compress_user_cores = {
.oid_parent = ((&(&sysctl___kern)->oid_children))
, .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= ((2 | ((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (
0), .oid_arg2 = (sizeof(int)), .oid_name = ("compress_user_cores"
), .oid_handler = (sysctl_compress_user_cores), .oid_fmt = ("I"
), .oid_descr = "Enable compression of user corefiles (" "1" " = gzip, "
"2" " = zstd)", .oid_label = (((void *)0)), }; __asm__(".globl "
"__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_compress_user_cores
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_compress_user_cores); _Static_assert
(((2 | ((0x80000000|0x40000000)|0x00080000)) & 0xf) != 0,
"compile-time assertion failed")
3288 "Enable compression of user corefiles ("static struct sysctl_oid sysctl___kern_compress_user_cores = {
.oid_parent = ((&(&sysctl___kern)->oid_children))
, .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= ((2 | ((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (
0), .oid_arg2 = (sizeof(int)), .oid_name = ("compress_user_cores"
), .oid_handler = (sysctl_compress_user_cores), .oid_fmt = ("I"
), .oid_descr = "Enable compression of user corefiles (" "1" " = gzip, "
"2" " = zstd)", .oid_label = (((void *)0)), }; __asm__(".globl "
"__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_compress_user_cores
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_compress_user_cores); _Static_assert
(((2 | ((0x80000000|0x40000000)|0x00080000)) & 0xf) != 0,
"compile-time assertion failed")
3289 __XSTRING(COMPRESS_GZIP) " = gzip, "static struct sysctl_oid sysctl___kern_compress_user_cores = {
.oid_parent = ((&(&sysctl___kern)->oid_children))
, .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= ((2 | ((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (
0), .oid_arg2 = (sizeof(int)), .oid_name = ("compress_user_cores"
), .oid_handler = (sysctl_compress_user_cores), .oid_fmt = ("I"
), .oid_descr = "Enable compression of user corefiles (" "1" " = gzip, "
"2" " = zstd)", .oid_label = (((void *)0)), }; __asm__(".globl "
"__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_compress_user_cores
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_compress_user_cores); _Static_assert
(((2 | ((0x80000000|0x40000000)|0x00080000)) & 0xf) != 0,
"compile-time assertion failed")
3290 __XSTRING(COMPRESS_ZSTD) " = zstd)")static struct sysctl_oid sysctl___kern_compress_user_cores = {
.oid_parent = ((&(&sysctl___kern)->oid_children))
, .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= ((2 | ((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (
0), .oid_arg2 = (sizeof(int)), .oid_name = ("compress_user_cores"
), .oid_handler = (sysctl_compress_user_cores), .oid_fmt = ("I"
), .oid_descr = "Enable compression of user corefiles (" "1" " = gzip, "
"2" " = zstd)", .oid_label = (((void *)0)), }; __asm__(".globl "
"__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_compress_user_cores
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_compress_user_cores); _Static_assert
(((2 | ((0x80000000|0x40000000)|0x00080000)) & 0xf) != 0,
"compile-time assertion failed")
;
3291
3292int compress_user_cores_level = 6;
3293SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_level, CTLFLAG_RWTUN,static struct sysctl_oid sysctl___kern_compress_user_cores_level
= { .oid_parent = ((&(&sysctl___kern)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&compress_user_cores_level), .oid_arg2 = (0
), .oid_name = ("compress_user_cores_level"), .oid_handler = (
sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Corefile compression level"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_compress_user_cores_level
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_compress_user_cores_level)
; _Static_assert((((((0x80000000|0x40000000)|0x00080000)) &
0xf) == 0 || ((((0x80000000|0x40000000)|0x00080000)) & 0
) == 2) && sizeof(int) == sizeof(*(&compress_user_cores_level
)), "compile-time assertion failed")
3294 &compress_user_cores_level, 0,static struct sysctl_oid sysctl___kern_compress_user_cores_level
= { .oid_parent = ((&(&sysctl___kern)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&compress_user_cores_level), .oid_arg2 = (0
), .oid_name = ("compress_user_cores_level"), .oid_handler = (
sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Corefile compression level"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_compress_user_cores_level
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_compress_user_cores_level)
; _Static_assert((((((0x80000000|0x40000000)|0x00080000)) &
0xf) == 0 || ((((0x80000000|0x40000000)|0x00080000)) & 0
) == 2) && sizeof(int) == sizeof(*(&compress_user_cores_level
)), "compile-time assertion failed")
3295 "Corefile compression level")static struct sysctl_oid sysctl___kern_compress_user_cores_level
= { .oid_parent = ((&(&sysctl___kern)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&compress_user_cores_level), .oid_arg2 = (0
), .oid_name = ("compress_user_cores_level"), .oid_handler = (
sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Corefile compression level"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_compress_user_cores_level
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_compress_user_cores_level)
; _Static_assert((((((0x80000000|0x40000000)|0x00080000)) &
0xf) == 0 || ((((0x80000000|0x40000000)|0x00080000)) & 0
) == 2) && sizeof(int) == sizeof(*(&compress_user_cores_level
)), "compile-time assertion failed")
;
3296
3297/*
3298 * Protect the access to corefilename[] by allproc_lock.
3299 */
3300#define corefilename_lockallproc_lock allproc_lock
3301
3302static char corefilename[MAXPATHLEN1024] = {"%N.core"};
3303TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename))static struct tunable_str __tunable_str_3303 = { ("kern.corefile"
), (corefilename), (sizeof(corefilename)), }; static struct sysinit
__Tunable_init_3303_sys_init = { SI_SUB_TUNABLES, SI_ORDER_MIDDLE
, (sysinit_cfunc_t)(sysinit_nfunc_t)tunable_str_init, ((void *
)(&__tunable_str_3303)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* __set_sysinit_set_sym___Tunable_init_3303_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(__Tunable_init_3303_sys_init)
;
3304
3305static int
3306sysctl_kern_corefile(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req
*req
)
3307{
3308 int error;
3309
3310 sx_xlock(&corefilename_lock)(void)_sx_xlock(((&allproc_lock)), 0, ("/root/freebsd/sys/kern/kern_sig.c"
), (3310))
;
3311 error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
3312 req);
3313 sx_xunlock(&corefilename_lock)_sx_xunlock(((&allproc_lock)), ("/root/freebsd/sys/kern/kern_sig.c"
), (3313))
;
3314
3315 return (error);
3316}
3317SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |static struct sysctl_oid sysctl___kern_corefile = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((3 | (
0x80000000|0x40000000) | 0x00040000)), .oid_arg1 = (0), .oid_arg2
= (0), .oid_name = ("corefile"), .oid_handler = (sysctl_kern_corefile
), .oid_fmt = ("A"), .oid_descr = "Process corefile name format string"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_corefile __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_corefile); _Static_assert(((3 | (0x80000000
|0x40000000) | 0x00040000) & 0xf) != 0, "compile-time assertion failed"
)
3318 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",static struct sysctl_oid sysctl___kern_corefile = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((3 | (
0x80000000|0x40000000) | 0x00040000)), .oid_arg1 = (0), .oid_arg2
= (0), .oid_name = ("corefile"), .oid_handler = (sysctl_kern_corefile
), .oid_fmt = ("A"), .oid_descr = "Process corefile name format string"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_corefile __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_corefile); _Static_assert(((3 | (0x80000000
|0x40000000) | 0x00040000) & 0xf) != 0, "compile-time assertion failed"
)
3319 "Process corefile name format string")static struct sysctl_oid sysctl___kern_corefile = { .oid_parent
= ((&(&sysctl___kern)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((3 | (
0x80000000|0x40000000) | 0x00040000)), .oid_arg1 = (0), .oid_arg2
= (0), .oid_name = ("corefile"), .oid_handler = (sysctl_kern_corefile
), .oid_fmt = ("A"), .oid_descr = "Process corefile name format string"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_corefile __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_corefile); _Static_assert(((3 | (0x80000000
|0x40000000) | 0x00040000) & 0xf) != 0, "compile-time assertion failed"
)
;
3320
3321static void
3322vnode_close_locked(struct thread *td, struct vnode *vp)
3323{
3324
3325 VOP_UNLOCK(vp, 0);
3326 vn_close(vp, FWRITE0x0002, td->td_ucred, td);
3327}
3328
3329/*
3330 * If the core format has a %I in it, then we need to check
3331 * for existing corefiles before defining a name.
3332 * To do this we iterate over 0..ncores to find a
3333 * non-existing core file name to use. If all core files are
3334 * already used we choose the oldest one.
3335 */
3336static int
3337corefile_open_last(struct thread *td, char *name, int indexpos,
3338 int indexlen, int ncores, struct vnode **vpp)
3339{
3340 struct vnode *oldvp, *nextvp, *vp;
3341 struct vattr vattr;
3342 struct nameidata nd;
3343 int error, i, flags, oflags, cmode;
3344 char ch;
3345 struct timespec lasttime;
3346
3347 nextvp = oldvp = NULL((void *)0);
3348 cmode = S_IRUSR0000400 | S_IWUSR0000200;
3349 oflags = VN_OPEN_NOAUDIT0x00000001 | VN_OPEN_NAMECACHE0x00000004 |
3350 (capmode_coredump ? VN_OPEN_NOCAPCHECK0x00000002 : 0);
3351
3352 for (i = 0; i < ncores; i++) {
3353 flags = O_CREAT0x0200 | FWRITE0x0002 | O_NOFOLLOW0x0100;
3354
3355 ch = name[indexpos + indexlen];
3356 (void)snprintf(name + indexpos, indexlen + 1, "%.*u", indexlen,
3357 i);
3358 name[indexpos + indexlen] = ch;
3359
3360 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td)NDINIT_ALL(&nd, 0, 0x0000, UIO_SYSSPACE, name, -100, ((void
*)0), 0, td)
;
3361 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3362 NULL((void *)0));
3363 if (error != 0)
3364 break;
3365
3366 vp = nd.ni_vp;
3367 NDFREE(&nd, NDF_ONLY_PNBUF(~0x00000020));
3368 if ((flags & O_CREAT0x0200) == O_CREAT0x0200) {
3369 nextvp = vp;
3370 break;
3371 }
3372
3373 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3374 if (error != 0) {
3375 vnode_close_locked(td, vp);
3376 break;
3377 }
3378
3379 if (oldvp == NULL((void *)0) ||
3380 lasttime.tv_sec > vattr.va_mtime.tv_sec ||
3381 (lasttime.tv_sec == vattr.va_mtime.tv_sec &&
3382 lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) {
3383 if (oldvp != NULL((void *)0))
3384 vnode_close_locked(td, oldvp);
3385 oldvp = vp;
3386 lasttime = vattr.va_mtime;
3387 } else {
3388 vnode_close_locked(td, vp);
3389 }
3390 }
3391
3392 if (oldvp != NULL((void *)0)) {
3393 if (nextvp == NULL((void *)0))
3394 nextvp = oldvp;
3395 else
3396 vnode_close_locked(td, oldvp);
3397 }
3398 if (error != 0) {
3399 if (nextvp != NULL((void *)0))
3400 vnode_close_locked(td, oldvp);
3401 } else {
3402 *vpp = nextvp;
3403 }
3404
3405 return (error);
3406}
3407
3408/*
3409 * corefile_open(comm, uid, pid, td, compress, vpp, namep)
3410 * Expand the name described in corefilename, using name, uid, and pid
3411 * and open/create core file.
3412 * corefilename is a printf-like string, with three format specifiers:
3413 * %N name of process ("name")
3414 * %P process id (pid)
3415 * %U user id (uid)
3416 * For example, "%N.core" is the default; they can be disabled completely
3417 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
3418 * This is controlled by the sysctl variable kern.corefile (see above).
3419 */
3420static int
3421corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
3422 int compress, struct vnode **vpp, char **namep)
3423{
3424 struct sbuf sb;
3425 struct nameidata nd;
3426 const char *format;
3427 char *hostname, *name;
3428 int cmode, error, flags, i, indexpos, indexlen, oflags, ncores;
3429
3430 hostname = NULL((void *)0);
3431 format = corefilename;
3432 name = malloc(MAXPATHLEN1024, M_TEMP, M_WAITOK0x0002 | M_ZERO0x0100);
3433 indexlen = 0;
3434 indexpos = -1;
3435 ncores = num_cores;
3436 (void)sbuf_new(&sb, name, MAXPATHLEN1024, SBUF_FIXEDLEN0x00000000);
3437 sx_slock(&corefilename_lock)(void)_sx_slock(((&allproc_lock)), 0, ("/root/freebsd/sys/kern/kern_sig.c"
), (3437))
;
3438 for (i = 0; format[i] != '\0'; i++) {
3439 switch (format[i]) {
3440 case '%': /* Format character */
3441 i++;
3442 switch (format[i]) {
3443 case '%':
3444 sbuf_putc(&sb, '%');
3445 break;
3446 case 'H': /* hostname */
3447 if (hostname == NULL((void *)0)) {
3448 hostname = malloc(MAXHOSTNAMELEN256,
3449 M_TEMP, M_WAITOK0x0002);
3450 }
3451 getcredhostname(td->td_ucred, hostname,
3452 MAXHOSTNAMELEN256);
3453 sbuf_printf(&sb, "%s", hostname);
3454 break;
3455 case 'I': /* autoincrementing index */
3456 if (indexpos != -1) {
3457 sbuf_printf(&sb, "%%I");
3458 break;
3459 }
3460
3461 indexpos = sbuf_len(&sb);
3462 sbuf_printf(&sb, "%u", ncores - 1);
3463 indexlen = sbuf_len(&sb) - indexpos;
3464 break;
3465 case 'N': /* process name */
3466 sbuf_printf(&sb, "%s", comm);
3467 break;
3468 case 'P': /* process id */
3469 sbuf_printf(&sb, "%u", pid);
3470 break;
3471 case 'U': /* user id */
3472 sbuf_printf(&sb, "%u", uid);
3473 break;
3474 default:
3475 log(LOG_ERR3,
3476 "Unknown format character %c in "
3477 "corename `%s'\n", format[i], format);
3478 break;
3479 }
3480 break;
3481 default:
3482 sbuf_putc(&sb, format[i]);
3483 break;
3484 }
3485 }
3486 sx_sunlock(&corefilename_lock)_sx_sunlock(((&allproc_lock)), ("/root/freebsd/sys/kern/kern_sig.c"
), (3486))
;
3487 free(hostname, M_TEMP);
3488 if (compress == COMPRESS_GZIP1)
3489 sbuf_printf(&sb, GZIP_SUFFIX".gz");
3490 else if (compress == COMPRESS_ZSTD2)
3491 sbuf_printf(&sb, ZSTD_SUFFIX".zst");
3492 if (sbuf_error(&sb) != 0) {
3493 log(LOG_ERR3, "pid %ld (%s), uid (%lu): corename is too "
3494 "long\n", (long)pid, comm, (u_long)uid);
3495 sbuf_delete(&sb);
3496 free(name, M_TEMP);
3497 return (ENOMEM12);
3498 }
3499 sbuf_finish(&sb);
3500 sbuf_delete(&sb);
3501
3502 if (indexpos != -1) {
3503 error = corefile_open_last(td, name, indexpos, indexlen, ncores,
3504 vpp);
3505 if (error != 0) {
3506 log(LOG_ERR3,
3507 "pid %d (%s), uid (%u): Path `%s' failed "
3508 "on initial open test, error = %d\n",
3509 pid, comm, uid, name, error);
3510 }
3511 } else {
3512 cmode = S_IRUSR0000400 | S_IWUSR0000200;
3513 oflags = VN_OPEN_NOAUDIT0x00000001 | VN_OPEN_NAMECACHE0x00000004 |
3514 (capmode_coredump ? VN_OPEN_NOCAPCHECK0x00000002 : 0);
3515 flags = O_CREAT0x0200 | FWRITE0x0002 | O_NOFOLLOW0x0100;
3516
3517 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td)NDINIT_ALL(&nd, 0, 0x0000, UIO_SYSSPACE, name, -100, ((void
*)0), 0, td)
;
3518 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3519 NULL((void *)0));
3520 if (error == 0) {
3521 *vpp = nd.ni_vp;
3522 NDFREE(&nd, NDF_ONLY_PNBUF(~0x00000020));
3523 }
3524 }
3525
3526 if (error != 0) {
3527#ifdef AUDIT1
3528 audit_proc_coredump(td, name, error);
3529#endif
3530 free(name, M_TEMP);
3531 return (error);
3532 }
3533 *namep = name;
3534 return (0);
3535}
3536
3537/*
3538 * Dump a process' core. The main routine does some
3539 * policy checking, and creates the name of the coredump;
3540 * then it passes on a vnode and a size limit to the process-specific
3541 * coredump routine if there is one; if there _is not_ one, it returns
3542 * ENOSYS; otherwise it returns the error from the process-specific routine.
3543 */
3544
3545static int
3546coredump(struct thread *td)
3547{
3548 struct proc *p = td->td_proc;
3549 struct ucred *cred = td->td_ucred;
3550 struct vnode *vp;
3551 struct flock lf;
3552 struct vattr vattr;
3553 int error, error1, locked;
3554 char *name; /* name of corefile */
3555 void *rl_cookie;
3556 off_t limit;
3557 char *fullpath, *freepath = NULL((void *)0);
3558 struct sbuf *sb;
3559
3560 PROC_LOCK_ASSERT(p, MA_OWNED)__mtx_assert(&(((&(p)->p_mtx)))->mtx_lock, (((0x00000004
))), ("/root/freebsd/sys/kern/kern_sig.c"), (3560))
;
3561 MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td)do { if (__builtin_expect((!(((p->p_flag & 0x00080) ==
0 || p->p_singlethread == td))), 0)) panic ("Assertion %s failed at %s:%d"
, "(p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td"
, "/root/freebsd/sys/kern/kern_sig.c", 3561); } while (0)
;
3562 _STOPEVENT(p, S_CORE, 0)do { __mtx_assert(&(((&(p)->p_mtx)))->mtx_lock,
(((0x00000004))), ("/root/freebsd/sys/kern/kern_sig.c"), (3562
)); witness_warn((0x01 | 0x04), (&p->p_mtx.lock_object
), ("checking stopevent %d"), (0x00000010)); if ((p)->p_stops
& (0x00000010)) stopevent((p), (0x00000010), (0)); } while
(0)
;
3563
3564 if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID0x00100) != 0) ||
3565 (p->p_flag2 & P2_NOTRACE0x00000002) != 0) {
3566 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3566))
;
3567 return (EFAULT14);
3568 }
3569
3570 /*
3571 * Note that the bulk of limit checking is done after
3572 * the corefile is created. The exception is if the limit
3573 * for corefiles is 0, in which case we don't bother
3574 * creating the corefile at all. This layout means that
3575 * a corefile is truncated instead of not being created,
3576 * if it is larger than the limit.
3577 */
3578 limit = (off_t)lim_cur(td, RLIMIT_CORE)({ rlim_t _rlim; struct thread *_td = (td); int _which = (4);
if (__builtin_constant_p(4) && 4 != 2 && 4 !=
3 && 4 != 10) { _rlim = td->td_limit->pl_rlimit
[4].rlim_cur; } else { _rlim = lim_cur(_td, _which); } _rlim;
})
;
3579 if (limit == 0 || racct_get_available(p, RACCT_CORE3) == 0) {
3580 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3580))
;
3581 return (EFBIG27);
3582 }
3583 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3583))
;
3584
3585 error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
3586 compress_user_cores, &vp, &name);
3587 if (error != 0)
3588 return (error);
3589
3590 /*
3591 * Don't dump to non-regular files or files with links.
3592 * Do not dump into system files.
3593 */
3594 if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
3595 vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM0x0080) != 0) {
3596 VOP_UNLOCK(vp, 0);
3597 error = EFAULT14;
3598 goto out;
3599 }
3600
3601 VOP_UNLOCK(vp, 0);
3602
3603 /* Postpone other writers, including core dumps of other processes. */
3604 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX)rangelock_wlock(&(vp)->v_rl, (0), (0x7fffffffffffffff)
, (&(vp)->v_interlock))
;
3605
3606 lf.l_whence = SEEK_SET0;
3607 lf.l_start = 0;
3608 lf.l_len = 0;
3609 lf.l_type = F_WRLCK3;
3610 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK12, &lf, F_FLOCK0x020) == 0);
3611
3612 VATTR_NULL(&vattr)(*(&vattr) = va_null);
3613 vattr.va_size = 0;
3614 if (set_core_nodump_flag)
3615 vattr.va_flags = UF_NODUMP0x00000001;
3616 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)_vn_lock(vp, 0x080000 | 0x000400, "/root/freebsd/sys/kern/kern_sig.c"
, 3616)
;
3617 VOP_SETATTR(vp, &vattr, cred);
3618 VOP_UNLOCK(vp, 0);
3619 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3619))
;
3620 p->p_acflag |= ACORE0x08;
3621 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3621))
;
3622
3623 if (p->p_sysent->sv_coredump != NULL((void *)0)) {
3624 error = p->p_sysent->sv_coredump(td, vp, limit, 0);
3625 } else {
3626 error = ENOSYS78;
3627 }
3628
3629 if (locked) {
3630 lf.l_type = F_UNLCK2;
3631 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK2, &lf, F_FLOCK0x020);
3632 }
3633 vn_rangelock_unlock(vp, rl_cookie)rangelock_unlock(&(vp)->v_rl, (rl_cookie), (&(vp)->
v_interlock))
;
3634
3635 /*
3636 * Notify the userland helper that a process triggered a core dump.
3637 * This allows the helper to run an automated debugging session.
3638 */
3639 if (error != 0 || coredump_devctl == 0)
3640 goto out;
3641 sb = sbuf_new_auto()sbuf_new(((void *)0), ((void *)0), 0, 0x00000001);
3642 if (vn_fullpath_global(td, p->p_textvp, &fullpath, &freepath) != 0)
3643 goto out2;
3644 sbuf_printf(sb, "comm=\"");
3645 devctl_safe_quote_sb(sb, fullpath);
3646 free(freepath, M_TEMP);
3647 sbuf_printf(sb, "\" core=\"");
3648
3649 /*
3650 * We can't lookup core file vp directly. When we're replacing a core, and
3651 * other random times, we flush the name cache, so it will fail. Instead,
3652 * if the path of the core is relative, add the current dir in front if it.
3653 */
3654 if (name[0] != '/') {
3655 fullpath = malloc(MAXPATHLEN1024, M_TEMP, M_WAITOK0x0002);
3656 if (kern___getcwd(td, fullpath, UIO_SYSSPACE, MAXPATHLEN1024, MAXPATHLEN1024) != 0) {
3657 free(fullpath, M_TEMP);
3658 goto out2;
3659 }
3660 devctl_safe_quote_sb(sb, fullpath);
3661 free(fullpath, M_TEMP);
3662 sbuf_putc(sb, '/');
3663 }
3664 devctl_safe_quote_sb(sb, name);
3665 sbuf_printf(sb, "\"");
3666 if (sbuf_finish(sb) == 0)
3667 devctl_notify("kernel", "signal", "coredump", sbuf_data(sb));
3668out2:
3669 sbuf_delete(sb);
3670out:
3671 error1 = vn_close(vp, FWRITE0x0002, cred, td);
3672 if (error == 0)
3673 error = error1;
3674#ifdef AUDIT1
3675 audit_proc_coredump(td, name, error);
3676#endif
3677 free(name, M_TEMP);
3678 return (error);
3679}
3680
3681/*
3682 * Nonexistent system call-- signal process (may want to handle it). Flag
3683 * error in case process won't see signal immediately (blocked or ignored).
3684 */
3685#ifndef _SYS_SYSPROTO_H_
3686struct nosys_args {
3687 int dummy;
3688};
3689#endif
3690/* ARGSUSED */
3691int
3692nosys(struct thread *td, struct nosys_args *args)
3693{
3694 struct proc *p;
3695
3696 p = td->td_proc;
3697
3698 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3698))
;
3699 tdsignal(td, SIGSYS12);
3700 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3700))
;
3701 if (kern_lognosys == 1 || kern_lognosys == 3) {
3702 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
3703 td->td_sa.code);
3704 }
3705 if (kern_lognosys == 2 || kern_lognosys == 3) {
3706 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
3707 td->td_sa.code);
3708 }
3709 return (ENOSYS78);
3710}
3711
3712/*
3713 * Send a SIGIO or SIGURG signal to a process or process group using stored
3714 * credentials rather than those of the current process.
3715 */
3716void
3717pgsigio(struct sigio **sigiop, int sig, int checkctty)
3718{
3719 ksiginfo_t ksi;
3720 struct sigio *sigio;
3721
3722 ksiginfo_init(&ksi)do { __builtin_memset((&ksi), 0, (sizeof(ksiginfo_t))); }
while(0)
;
3723 ksi.ksi_signoksi_info.si_signo = sig;
3724 ksi.ksi_codeksi_info.si_code = SI_KERNEL0x10006;
3725
3726 SIGIO_LOCK()__mtx_lock_flags(&((((&sigio_lock))))->mtx_lock, (
(0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3726))
;
3727 sigio = *sigiop;
3728 if (sigio == NULL((void *)0)) {
3729 SIGIO_UNLOCK()__mtx_unlock_flags(&((((&sigio_lock))))->mtx_lock,
((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3729))
;
3730 return;
3731 }
3732 if (sigio->sio_pgid > 0) {
3733 PROC_LOCK(sigio->sio_proc)__mtx_lock_flags(&((((&(sigio->sio_u.siu_proc)->
p_mtx))))->mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"
), (3733))
;
3734 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred)((sigio->sio_ucred)->cr_uid == 0 || (sigio->sio_ucred
)->cr_ruid == (sigio->sio_u.siu_proc->p_ucred)->cr_ruid
|| (sigio->sio_ucred)->cr_uid == (sigio->sio_u.siu_proc
->p_ucred)->cr_ruid || (sigio->sio_ucred)->cr_ruid
== (sigio->sio_u.siu_proc->p_ucred)->cr_uid || (sigio
->sio_ucred)->cr_uid == (sigio->sio_u.siu_proc->p_ucred
)->cr_uid)
)
3735 kern_psignal(sigio->sio_procsio_u.siu_proc, sig);
3736 PROC_UNLOCK(sigio->sio_proc)__mtx_unlock_flags(&((((&(sigio->sio_u.siu_proc)->
p_mtx))))->mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"
), (3736))
;
3737 } else if (sigio->sio_pgid < 0) {
3738 struct proc *p;
3739
3740 PGRP_LOCK(sigio->sio_pgrp)__mtx_lock_flags(&((((&(sigio->sio_u.siu_pgrp)->
pg_mtx))))->mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"
), (3740))
;
3741 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist)for ((p) = (((&sigio->sio_u.siu_pgrp->pg_members))->
lh_first); (p); (p) = (((p))->p_pglist.le_next))
{
3742 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3742))
;
3743 if (p->p_state == PRS_NORMAL &&
3744 CANSIGIO(sigio->sio_ucred, p->p_ucred)((sigio->sio_ucred)->cr_uid == 0 || (sigio->sio_ucred
)->cr_ruid == (p->p_ucred)->cr_ruid || (sigio->sio_ucred
)->cr_uid == (p->p_ucred)->cr_ruid || (sigio->sio_ucred
)->cr_ruid == (p->p_ucred)->cr_uid || (sigio->sio_ucred
)->cr_uid == (p->p_ucred)->cr_uid)
&&
3745 (checkctty == 0 || (p->p_flag & P_CONTROLT0x00002)))
3746 kern_psignal(p, sig);
3747 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3747))
;
3748 }
3749 PGRP_UNLOCK(sigio->sio_pgrp)__mtx_unlock_flags(&((((&(sigio->sio_u.siu_pgrp)->
pg_mtx))))->mtx_lock, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"
), (3749))
;
3750 }
3751 SIGIO_UNLOCK()__mtx_unlock_flags(&((((&sigio_lock))))->mtx_lock,
((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3751))
;
3752}
3753
3754static int
3755filt_sigattach(struct knote *kn)
3756{
3757 struct proc *p = curproc((__curthread())->td_proc);
3758
3759 kn->kn_ptr.p_proc = p;
3760 kn->kn_flagskn_kevent.flags |= EV_CLEAR0x0020; /* automatically set */
3761
3762 knlist_add(p->p_klist, kn, 0);
3763
3764 return (0);
3765}
3766
3767static void
3768filt_sigdetach(struct knote *kn)
3769{
3770 struct proc *p = kn->kn_ptr.p_proc;
3771
3772 knlist_remove(p->p_klist, kn, 0);
3773}
3774
3775/*
3776 * signal knotes are shared with proc knotes, so we apply a mask to
3777 * the hint in order to differentiate them from process hints. This
3778 * could be avoided by using a signal-specific knote list, but probably
3779 * isn't worth the trouble.
3780 */
3781static int
3782filt_signal(struct knote *kn, long hint)
3783{
3784
3785 if (hint & NOTE_SIGNAL0x08000000) {
3786 hint &= ~NOTE_SIGNAL0x08000000;
3787
3788 if (kn->kn_idkn_kevent.ident == hint)
3789 kn->kn_datakn_kevent.data++;
3790 }
3791 return (kn->kn_datakn_kevent.data != 0);
3792}
3793
3794struct sigacts *
3795sigacts_alloc(void)
3796{
3797 struct sigacts *ps;
3798
3799 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK0x0002 | M_ZERO0x0100);
3800 refcount_init(&ps->ps_refcnt, 1);
3801 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF)_mtx_init(&(&ps->ps_mtx)->mtx_lock, "sigacts", (
(void *)0), 0x00000000)
;
3802 return (ps);
3803}
3804
3805void
3806sigacts_free(struct sigacts *ps)
3807{
3808
3809 if (refcount_release(&ps->ps_refcnt) == 0)
3810 return;
3811 mtx_destroy(&ps->ps_mtx)_mtx_destroy(&(&ps->ps_mtx)->mtx_lock);
3812 free(ps, M_SUBPROC);
3813}
3814
3815struct sigacts *
3816sigacts_hold(struct sigacts *ps)
3817{
3818
3819 refcount_acquire(&ps->ps_refcnt);
3820 return (ps);
3821}
3822
3823void
3824sigacts_copy(struct sigacts *dest, struct sigacts *src)
3825{
3826
3827 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"))do { if (__builtin_expect((!(dest->ps_refcnt == 1)), 0)) panic
("sigacts_copy to shared dest"); } while (0)
;
3828 mtx_lock(&src->ps_mtx)__mtx_lock_flags(&((((&src->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3828))
;
3829 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt))__builtin_memmove((dest), (src), (__builtin_offsetof(struct sigacts
, ps_refcnt)))
;
3830 mtx_unlock(&src->ps_mtx)__mtx_unlock_flags(&((((&src->ps_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/kern/kern_sig.c"), (3830))
;
3831}
3832
3833int
3834sigacts_shared(struct sigacts *ps)
3835{
3836
3837 return (ps->ps_refcnt > 1);
3838}