Bug Summary

File:kern/kern_resource.c
Warning:line 319, column 11
Copies out a struct with untouched element(s): prio

Annotated Source Code

1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: releng/11.0/sys/kern/kern_resource.c 296162 2016-02-28 17:52:33Z kib $")__asm__(".ident\t\"" "$FreeBSD: releng/11.0/sys/kern/kern_resource.c 296162 2016-02-28 17:52:33Z kib $"
"\"")
;
39
40#include "opt_compat.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/sysproto.h>
45#include <sys/file.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/malloc.h>
49#include <sys/mutex.h>
50#include <sys/priv.h>
51#include <sys/proc.h>
52#include <sys/refcount.h>
53#include <sys/racct.h>
54#include <sys/resourcevar.h>
55#include <sys/rwlock.h>
56#include <sys/sched.h>
57#include <sys/sx.h>
58#include <sys/syscallsubr.h>
59#include <sys/sysctl.h>
60#include <sys/sysent.h>
61#include <sys/time.h>
62#include <sys/umtx.h>
63
64#include <vm/vm.h>
65#include <vm/vm_param.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h>
68
69
70static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures")struct malloc_type M_PLIMIT[1] = { { ((void *)0), 877983977, "plimit"
, ((void *)0) } }; static struct sysinit M_PLIMIT_init_sys_init
= { SI_SUB_KMEM, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_init, ((void *)(M_PLIMIT)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_M_PLIMIT_init_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(M_PLIMIT_init_sys_init); static struct sysinit M_PLIMIT_uninit_sys_uninit
= { SI_SUB_KMEM, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_uninit, ((void *)(M_PLIMIT)) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_M_PLIMIT_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(M_PLIMIT_uninit_sys_uninit)
;
71static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures")struct malloc_type M_UIDINFO[1] = { { ((void *)0), 877983977,
"uidinfo", ((void *)0) } }; static struct sysinit M_UIDINFO_init_sys_init
= { SI_SUB_KMEM, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_init, ((void *)(M_UIDINFO)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_M_UIDINFO_init_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(M_UIDINFO_init_sys_init); static struct sysinit M_UIDINFO_uninit_sys_uninit
= { SI_SUB_KMEM, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_uninit, ((void *)(M_UIDINFO)) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_M_UIDINFO_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(M_UIDINFO_uninit_sys_uninit)
;
72#define UIHASH(uid)(&uihashtbl[(uid) & uihash]) (&uihashtbl[(uid) & uihash])
73static struct rwlock uihashtbl_lock;
74static LIST_HEAD(uihashhead, uidinfo)struct uihashhead { struct uidinfo *lh_first; } *uihashtbl;
75static u_long uihash; /* size of hash table - 1 */
76
77static void calcru1(struct proc *p, struct rusage_ext *ruxp,
78 struct timeval *up, struct timeval *sp);
79static int donice(struct thread *td, struct proc *chgp, int n);
80static struct uidinfo *uilookup(uid_t uid);
81static void ruxagg_locked(struct rusage_ext *rux, struct thread *td);
82
83/*
84 * Resource controls and accounting.
85 */
86#ifndef _SYS_SYSPROTO_H_
87struct getpriority_args {
88 int which;
89 int who;
90};
91#endif
92int
93sys_getpriority(struct thread *td, register struct getpriority_args *uap)
94{
95 struct proc *p;
96 struct pgrp *pg;
97 int error, low;
98
99 error = 0;
100 low = PRIO_MAX20 + 1;
101 switch (uap->which) {
102
103 case PRIO_PROCESS0:
104 if (uap->who == 0)
105 low = td->td_proc->p_nice;
106 else {
107 p = pfind(uap->who);
108 if (p == NULL((void *)0))
109 break;
110 if (p_cansee(td, p) == 0)
111 low = p->p_nice;
112 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
113 }
114 break;
115
116 case PRIO_PGRP1:
117 sx_slock(&proctree_lock)(void)__sx_slock(((&proctree_lock)), 0, (((void *)0)), (0
))
;
118 if (uap->who == 0) {
119 pg = td->td_proc->p_pgrp;
120 PGRP_LOCK(pg)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(pg)->pg_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(pg)->pg_mtx)))))->mtx_lock, 0x00000004
, (_tid)))) __mtx_lock_sleep(&(((((&(pg)->pg_mtx))
)))->mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else
do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(pg)->pg_mtx))), (uintptr_t) 0
, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
121 } else {
122 pg = pgfind(uap->who);
123 if (pg == NULL((void *)0)) {
124 sx_sunlock(&proctree_lock)__sx_sunlock(((&proctree_lock)), (((void *)0)), (0));
125 break;
126 }
127 }
128 sx_sunlock(&proctree_lock)__sx_sunlock(((&proctree_lock)), (((void *)0)), (0));
129 LIST_FOREACH(p, &pg->pg_members, p_pglist)for ((p) = (((&pg->pg_members))->lh_first); (p); (p
) = (((p))->p_pglist.le_next))
{
130 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
131 if (p->p_state == PRS_NORMAL &&
132 p_cansee(td, p) == 0) {
133 if (p->p_nice < low)
134 low = p->p_nice;
135 }
136 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
137 }
138 PGRP_UNLOCK(pg)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(pg)->pg_mtx))))->lock_object.lo_data == 0) do { (void)
0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(pg)->pg_mtx))), (uintptr_t) 0
, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); if (((((&(pg)->pg_mtx))))->mtx_lock != _tid
|| !atomic_cmpset_long(&(((((&(pg)->pg_mtx)))))->
mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&(((((&
(pg)->pg_mtx)))))->mtx_lock, (((0))), ((((void *)0))), (
(0))); } while (0)
;
139 break;
140
141 case PRIO_USER2:
142 if (uap->who == 0)
143 uap->who = td->td_ucred->cr_uid;
144 sx_slock(&allproc_lock)(void)__sx_slock(((&allproc_lock)), 0, (((void *)0)), (0)
)
;
145 FOREACH_PROC_IN_SYSTEM(p)for (((p)) = (((&allproc))->lh_first); ((p)); ((p)) = (
(((p)))->p_list.le_next))
{
146 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
147 if (p->p_state == PRS_NORMAL &&
148 p_cansee(td, p) == 0 &&
149 p->p_ucred->cr_uid == uap->who) {
150 if (p->p_nice < low)
151 low = p->p_nice;
152 }
153 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
154 }
155 sx_sunlock(&allproc_lock)__sx_sunlock(((&allproc_lock)), (((void *)0)), (0));
156 break;
157
158 default:
159 error = EINVAL22;
160 break;
161 }
162 if (low == PRIO_MAX20 + 1 && error == 0)
163 error = ESRCH3;
164 td->td_retvaltd_uretoff.tdu_retval[0] = low;
165 return (error);
166}
167
168#ifndef _SYS_SYSPROTO_H_
169struct setpriority_args {
170 int which;
171 int who;
172 int prio;
173};
174#endif
175int
176sys_setpriority(struct thread *td, struct setpriority_args *uap)
177{
178 struct proc *curp, *p;
179 struct pgrp *pg;
180 int found = 0, error = 0;
181
182 curp = td->td_proc;
183 switch (uap->which) {
184 case PRIO_PROCESS0:
185 if (uap->who == 0) {
186 PROC_LOCK(curp)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(curp)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(curp)->p_mtx)))))->mtx_lock, 0x00000004
, (_tid)))) __mtx_lock_sleep(&(((((&(curp)->p_mtx)
))))->mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else
do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(curp)->p_mtx))), (uintptr_t)
0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0)
; } while (0); } while (0)
;
187 error = donice(td, curp, uap->prio);
188 PROC_UNLOCK(curp)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(curp)->p_mtx))))->lock_object.lo_data == 0) do { (void
)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(curp)->p_mtx))), (uintptr_t)
0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0)
; } while (0); if (((((&(curp)->p_mtx))))->mtx_lock
!= _tid || !atomic_cmpset_long(&(((((&(curp)->p_mtx
)))))->mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&
(((((&(curp)->p_mtx)))))->mtx_lock, (((0))), ((((void
*)0))), ((0))); } while (0)
;
189 } else {
190 p = pfind(uap->who);
191 if (p == NULL((void *)0))
192 break;
193 error = p_cansee(td, p);
194 if (error == 0)
195 error = donice(td, p, uap->prio);
196 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
197 }
198 found++;
199 break;
200
201 case PRIO_PGRP1:
202 sx_slock(&proctree_lock)(void)__sx_slock(((&proctree_lock)), 0, (((void *)0)), (0
))
;
203 if (uap->who == 0) {
204 pg = curp->p_pgrp;
205 PGRP_LOCK(pg)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(pg)->pg_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(pg)->pg_mtx)))))->mtx_lock, 0x00000004
, (_tid)))) __mtx_lock_sleep(&(((((&(pg)->pg_mtx))
)))->mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else
do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(pg)->pg_mtx))), (uintptr_t) 0
, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
206 } else {
207 pg = pgfind(uap->who);
208 if (pg == NULL((void *)0)) {
209 sx_sunlock(&proctree_lock)__sx_sunlock(((&proctree_lock)), (((void *)0)), (0));
210 break;
211 }
212 }
213 sx_sunlock(&proctree_lock)__sx_sunlock(((&proctree_lock)), (((void *)0)), (0));
214 LIST_FOREACH(p, &pg->pg_members, p_pglist)for ((p) = (((&pg->pg_members))->lh_first); (p); (p
) = (((p))->p_pglist.le_next))
{
215 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
216 if (p->p_state == PRS_NORMAL &&
217 p_cansee(td, p) == 0) {
218 error = donice(td, p, uap->prio);
219 found++;
220 }
221 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
222 }
223 PGRP_UNLOCK(pg)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(pg)->pg_mtx))))->lock_object.lo_data == 0) do { (void)
0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(pg)->pg_mtx))), (uintptr_t) 0
, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); if (((((&(pg)->pg_mtx))))->mtx_lock != _tid
|| !atomic_cmpset_long(&(((((&(pg)->pg_mtx)))))->
mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep(&(((((&
(pg)->pg_mtx)))))->mtx_lock, (((0))), ((((void *)0))), (
(0))); } while (0)
;
224 break;
225
226 case PRIO_USER2:
227 if (uap->who == 0)
228 uap->who = td->td_ucred->cr_uid;
229 sx_slock(&allproc_lock)(void)__sx_slock(((&allproc_lock)), 0, (((void *)0)), (0)
)
;
230 FOREACH_PROC_IN_SYSTEM(p)for (((p)) = (((&allproc))->lh_first); ((p)); ((p)) = (
(((p)))->p_list.le_next))
{
231 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
232 if (p->p_state == PRS_NORMAL &&
233 p->p_ucred->cr_uid == uap->who &&
234 p_cansee(td, p) == 0) {
235 error = donice(td, p, uap->prio);
236 found++;
237 }
238 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
239 }
240 sx_sunlock(&allproc_lock)__sx_sunlock(((&allproc_lock)), (((void *)0)), (0));
241 break;
242
243 default:
244 error = EINVAL22;
245 break;
246 }
247 if (found == 0 && error == 0)
248 error = ESRCH3;
249 return (error);
250}
251
252/*
253 * Set "nice" for a (whole) process.
254 */
255static int
256donice(struct thread *td, struct proc *p, int n)
257{
258 int error;
259
260 PROC_LOCK_ASSERT(p, MA_OWNED)(void)0;
261 if ((error = p_cansched(td, p)))
262 return (error);
263 if (n > PRIO_MAX20)
264 n = PRIO_MAX20;
265 if (n < PRIO_MIN-20)
266 n = PRIO_MIN-20;
267 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY201) != 0)
268 return (EACCES13);
269 sched_nice(p, n);
270 return (0);
271}
272
273static int unprivileged_idprio;
274SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW,static struct sysctl_oid sysctl___security_bsd_unprivileged_idprio
= { .oid_parent = ((&(&sysctl___security_bsd)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (
&unprivileged_idprio), .oid_arg2 = (0), .oid_name = ("unprivileged_idprio"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "Allow non-root users to set an idle priority" }; __asm__(
".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___security_bsd_unprivileged_idprio
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___security_bsd_unprivileged_idprio
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0) == 2) && sizeof
(int) == sizeof(*(&unprivileged_idprio)), "compile-time assertion failed"
)
275 &unprivileged_idprio, 0, "Allow non-root users to set an idle priority")static struct sysctl_oid sysctl___security_bsd_unprivileged_idprio
= { .oid_parent = ((&(&sysctl___security_bsd)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (
&unprivileged_idprio), .oid_arg2 = (0), .oid_name = ("unprivileged_idprio"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "Allow non-root users to set an idle priority" }; __asm__(
".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___security_bsd_unprivileged_idprio
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___security_bsd_unprivileged_idprio
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0) == 2) && sizeof
(int) == sizeof(*(&unprivileged_idprio)), "compile-time assertion failed"
)
;
276
277/*
278 * Set realtime priority for LWP.
279 */
280#ifndef _SYS_SYSPROTO_H_
281struct rtprio_thread_args {
282 int function;
283 lwpid_t lwpid;
284 struct rtprio *rtp;
285};
286#endif
287int
288sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
289{
290 struct proc *p;
291 struct rtprio rtp;
292 struct thread *td1;
293 int cierror, error;
294
295 /* Perform copyin before acquiring locks if needed. */
296 if (uap->function == RTP_SET1)
1
Taking false branch
297 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
298 else
299 cierror = 0;
300
301 if (uap->lwpid == 0 || uap->lwpid == td->td_tid) {
2
Taking false branch
302 p = td->td_proc;
303 td1 = td;
304 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
305 } else {
306 /* Only look up thread in current process */
307 td1 = tdfind(uap->lwpid, curproc((__curthread())->td_proc)->p_pid);
308 if (td1 == NULL((void *)0))
3
Assuming 'td1' is not equal to null
4
Taking false branch
309 return (ESRCH3);
310 p = td1->td_proc;
311 }
312
313 switch (uap->function) {
5
Control jumps to 'case 0:' at line 314
314 case RTP_LOOKUP0:
315 if ((error = p_cansee(td, p)))
6
Assuming 'error' is zero
7
Taking false branch
316 break;
317 pri_to_rtp(td1, &rtp);
318 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
319 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
8
Copies out a struct with untouched element(s): prio
320 case RTP_SET1:
321 if ((error = p_cansched(td, p)) || (error = cierror))
322 break;
323
324 /* Disallow setting rtprio in most cases if not superuser. */
325
326 /*
327 * Realtime priority has to be restricted for reasons which
328 * should be obvious. However, for idleprio processes, there is
329 * a potential for system deadlock if an idleprio process gains
330 * a lock on a resource that other processes need (and the
331 * idleprio process can't run due to a CPU-bound normal
332 * process). Fix me! XXX
333 *
334 * This problem is not only related to idleprio process.
335 * A user level program can obtain a file lock and hold it
336 * indefinitely. Additionally, without idleprio processes it is
337 * still conceivable that a program with low priority will never
338 * get to run. In short, allowing this feature might make it
339 * easier to lock a resource indefinitely, but it is not the
340 * only thing that makes it possible.
341 */
342 if (RTP_PRIO_BASE(rtp.type)((rtp.type) & ~8) == RTP_PRIO_REALTIME2 ||
343 (RTP_PRIO_BASE(rtp.type)((rtp.type) & ~8) == RTP_PRIO_IDLE4 &&
344 unprivileged_idprio == 0)) {
345 error = priv_check(td, PRIV_SCHED_RTPRIO202);
346 if (error)
347 break;
348 }
349 error = rtp_to_pri(&rtp, td1);
350 break;
351 default:
352 error = EINVAL22;
353 break;
354 }
355 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
356 return (error);
357}
358
359/*
360 * Set realtime priority.
361 */
362#ifndef _SYS_SYSPROTO_H_
363struct rtprio_args {
364 int function;
365 pid_t pid;
366 struct rtprio *rtp;
367};
368#endif
369int
370sys_rtprio(struct thread *td, register struct rtprio_args *uap)
371{
372 struct proc *p;
373 struct thread *tdp;
374 struct rtprio rtp;
375 int cierror, error;
376
377 /* Perform copyin before acquiring locks if needed. */
378 if (uap->function == RTP_SET1)
379 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
380 else
381 cierror = 0;
382
383 if (uap->pid == 0) {
384 p = td->td_proc;
385 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
386 } else {
387 p = pfind(uap->pid);
388 if (p == NULL((void *)0))
389 return (ESRCH3);
390 }
391
392 switch (uap->function) {
393 case RTP_LOOKUP0:
394 if ((error = p_cansee(td, p)))
395 break;
396 /*
397 * Return OUR priority if no pid specified,
398 * or if one is, report the highest priority
399 * in the process. There isn't much more you can do as
400 * there is only room to return a single priority.
401 * Note: specifying our own pid is not the same
402 * as leaving it zero.
403 */
404 if (uap->pid == 0) {
405 pri_to_rtp(td, &rtp);
406 } else {
407 struct rtprio rtp2;
408
409 rtp.type = RTP_PRIO_IDLE4;
410 rtp.prio = RTP_PRIO_MAX31;
411 FOREACH_THREAD_IN_PROC(p, tdp)for (((tdp)) = (((&(p)->p_threads))->tqh_first); ((
tdp)); ((tdp)) = ((((tdp)))->td_plist.tqe_next))
{
412 pri_to_rtp(tdp, &rtp2);
413 if (rtp2.type < rtp.type ||
414 (rtp2.type == rtp.type &&
415 rtp2.prio < rtp.prio)) {
416 rtp.type = rtp2.type;
417 rtp.prio = rtp2.prio;
418 }
419 }
420 }
421 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
422 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
423 case RTP_SET1:
424 if ((error = p_cansched(td, p)) || (error = cierror))
425 break;
426
427 /*
428 * Disallow setting rtprio in most cases if not superuser.
429 * See the comment in sys_rtprio_thread about idprio
430 * threads holding a lock.
431 */
432 if (RTP_PRIO_BASE(rtp.type)((rtp.type) & ~8) == RTP_PRIO_REALTIME2 ||
433 (RTP_PRIO_BASE(rtp.type)((rtp.type) & ~8) == RTP_PRIO_IDLE4 &&
434 !unprivileged_idprio)) {
435 error = priv_check(td, PRIV_SCHED_RTPRIO202);
436 if (error)
437 break;
438 }
439
440 /*
441 * If we are setting our own priority, set just our
442 * thread but if we are doing another process,
443 * do all the threads on that process. If we
444 * specify our own pid we do the latter.
445 */
446 if (uap->pid == 0) {
447 error = rtp_to_pri(&rtp, td);
448 } else {
449 FOREACH_THREAD_IN_PROC(p, td)for (((td)) = (((&(p)->p_threads))->tqh_first); ((td
)); ((td)) = ((((td)))->td_plist.tqe_next))
{
450 if ((error = rtp_to_pri(&rtp, td)) != 0)
451 break;
452 }
453 }
454 break;
455 default:
456 error = EINVAL22;
457 break;
458 }
459 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
460 return (error);
461}
462
463int
464rtp_to_pri(struct rtprio *rtp, struct thread *td)
465{
466 u_char newpri, oldclass, oldpri;
467
468 switch (RTP_PRIO_BASE(rtp->type)((rtp->type) & ~8)) {
469 case RTP_PRIO_REALTIME2:
470 if (rtp->prio > RTP_PRIO_MAX31)
471 return (EINVAL22);
472 newpri = PRI_MIN_REALTIME(48) + rtp->prio;
473 break;
474 case RTP_PRIO_NORMAL3:
475 if (rtp->prio > (PRI_MAX_TIMESHARE((224) - 1) - PRI_MIN_TIMESHARE(120)))
476 return (EINVAL22);
477 newpri = PRI_MIN_TIMESHARE(120) + rtp->prio;
478 break;
479 case RTP_PRIO_IDLE4:
480 if (rtp->prio > RTP_PRIO_MAX31)
481 return (EINVAL22);
482 newpri = PRI_MIN_IDLE(224) + rtp->prio;
483 break;
484 default:
485 return (EINVAL22);
486 }
487
488 thread_lock(td)thread_lock_flags_((td), 0, "/usr/src/sys/kern/kern_resource.c"
, 488)
;
489 oldclass = td->td_pri_class;
490 sched_class(td, rtp->type); /* XXX fix */
491 oldpri = td->td_user_pri;
492 sched_user_prio(td, newpri);
493 if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL3 ||
494 td->td_pri_class != RTP_PRIO_NORMAL3))
495 sched_prio(td, td->td_user_pri);
496 if (TD_ON_UPILOCK(td)((td)->td_flags & 0x00004000) && oldpri != newpri) {
497 critical_enter();
498 thread_unlock(td)do { if ((((((((td)->td_lock)))))->lock_object.lo_data !=
0)) (((((td)->td_lock))))->lock_object.lo_data--; else
{ do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__release->
id, (uintptr_t) ((((td)->td_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
atomic_store_rel_long(&((((((td)->td_lock)))))->mtx_lock
, 0x00000004); } spinlock_exit(); } while (0)
;
499 umtx_pi_adjust(td, oldpri);
500 critical_exit();
501 } else
502 thread_unlock(td)do { if ((((((((td)->td_lock)))))->lock_object.lo_data !=
0)) (((((td)->td_lock))))->lock_object.lo_data--; else
{ do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__release->
id, (uintptr_t) ((((td)->td_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
atomic_store_rel_long(&((((((td)->td_lock)))))->mtx_lock
, 0x00000004); } spinlock_exit(); } while (0)
;
503 return (0);
504}
505
506void
507pri_to_rtp(struct thread *td, struct rtprio *rtp)
508{
509
510 thread_lock(td)thread_lock_flags_((td), 0, "/usr/src/sys/kern/kern_resource.c"
, 510)
;
511 switch (PRI_BASE(td->td_pri_class)((td->td_pri_class) & ~8)) {
512 case PRI_REALTIME2:
513 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME(48);
514 break;
515 case PRI_TIMESHARE3:
516 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE(120);
517 break;
518 case PRI_IDLE4:
519 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE(224);
520 break;
521 default:
522 break;
523 }
524 rtp->type = td->td_pri_class;
525 thread_unlock(td)do { if ((((((((td)->td_lock)))))->lock_object.lo_data !=
0)) (((((td)->td_lock))))->lock_object.lo_data--; else
{ do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__release->
id, (uintptr_t) ((((td)->td_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
atomic_store_rel_long(&((((((td)->td_lock)))))->mtx_lock
, 0x00000004); } spinlock_exit(); } while (0)
;
526}
527
528#if defined(COMPAT_43)
529#ifndef _SYS_SYSPROTO_H_
530struct osetrlimit_args {
531 u_int which;
532 struct orlimit *rlp;
533};
534#endif
535int
536osetrlimit(struct thread *td, register struct osetrlimit_args *uap)
537{
538 struct orlimit olim;
539 struct rlimit lim;
540 int error;
541
542 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
543 return (error);
544 lim.rlim_cur = olim.rlim_cur;
545 lim.rlim_max = olim.rlim_max;
546 error = kern_setrlimit(td, uap->which, &lim);
547 return (error);
548}
549
550#ifndef _SYS_SYSPROTO_H_
551struct ogetrlimit_args {
552 u_int which;
553 struct orlimit *rlp;
554};
555#endif
556int
557ogetrlimit(struct thread *td, register struct ogetrlimit_args *uap)
558{
559 struct orlimit olim;
560 struct rlimit rl;
561 int error;
562
563 if (uap->which >= RLIM_NLIMITS15)
564 return (EINVAL22);
565 lim_rlimit(td, uap->which, &rl);
566
567 /*
568 * XXX would be more correct to convert only RLIM_INFINITY to the
569 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
570 * values. Most 64->32 and 32->16 conversions, including not
571 * unimportant ones of uids are even more broken than what we
572 * do here (they blindly truncate). We don't do this correctly
573 * here since we have little experience with EOVERFLOW yet.
574 * Elsewhere, getuid() can't fail...
575 */
576 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
577 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
578 error = copyout(&olim, uap->rlp, sizeof(olim));
579 return (error);
580}
581#endif /* COMPAT_43 */
582
583#ifndef _SYS_SYSPROTO_H_
584struct __setrlimit_args {
585 u_int which;
586 struct rlimit *rlp;
587};
588#endif
589int
590sys_setrlimit(struct thread *td, register struct __setrlimit_args *uap)
591{
592 struct rlimit alim;
593 int error;
594
595 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
596 return (error);
597 error = kern_setrlimit(td, uap->which, &alim);
598 return (error);
599}
600
601static void
602lim_cb(void *arg)
603{
604 struct rlimit rlim;
605 struct thread *td;
606 struct proc *p;
607
608 p = arg;
609 PROC_LOCK_ASSERT(p, MA_OWNED)(void)0;
610 /*
611 * Check if the process exceeds its cpu resource allocation. If
612 * it reaches the max, arrange to kill the process in ast().
613 */
614 if (p->p_cpulimit == RLIM_INFINITY((rlim_t)(((__uint64_t)1 << 63) - 1)))
615 return;
616 PROC_STATLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); spinlock_enter
(); if ((((((&(p)->p_statmtx))))->mtx_lock != 0x00000004
|| !atomic_cmpset_long(&(((((&(p)->p_statmtx)))))
->mtx_lock, 0x00000004, (_tid)))) { if (((((&(p)->p_statmtx
))))->mtx_lock == _tid) ((((&(p)->p_statmtx))))->
lock_object.lo_data++; else _mtx_lock_spin_cookie(&(((((&
(p)->p_statmtx)))))->mtx_lock, _tid, (((0))), ((((void *
)0))), ((0))); } else do { (void)0; do { if (__builtin_expect
((sdt_lockstat___spin__acquire->id), 0)) (*sdt_probe_func)
(sdt_lockstat___spin__acquire->id, (uintptr_t) (((&(p)
->p_statmtx))), (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0
, (uintptr_t) 0); } while (0); } while (0); } while (0)
;
617 FOREACH_THREAD_IN_PROC(p, td)for (((td)) = (((&(p)->p_threads))->tqh_first); ((td
)); ((td)) = ((((td)))->td_plist.tqe_next))
{
618 ruxagg(p, td);
619 }
620 PROC_STATUNLOCK(p)do { if (((((((&(p)->p_statmtx)))))->lock_object.lo_data
!= 0)) ((((&(p)->p_statmtx))))->lock_object.lo_data
--; else { do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__release->
id, (uintptr_t) (((&(p)->p_statmtx))), (uintptr_t) 0, (
uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while
(0); atomic_store_rel_long(&(((((&(p)->p_statmtx)
))))->mtx_lock, 0x00000004); } spinlock_exit(); } while (0
)
;
621 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
622 lim_rlimit_proc(p, RLIMIT_CPU0, &rlim);
623 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
624 killproc(p, "exceeded maximum CPU limit");
625 } else {
626 if (p->p_cpulimit < rlim.rlim_max)
627 p->p_cpulimit += 5;
628 kern_psignal(p, SIGXCPU24);
629 }
630 }
631 if ((p->p_flag & P_WEXIT0x02000) == 0)
632 callout_reset_sbt(&p->p_limco, SBT_1S, 0,callout_reset_sbt_on((&p->p_limco), (((sbintime_t)1 <<
32)), (0), (lim_cb), (p), -1, ((((1) + 1) << 1)))
633 lim_cb, p, C_PREL(1))callout_reset_sbt_on((&p->p_limco), (((sbintime_t)1 <<
32)), (0), (lim_cb), (p), -1, ((((1) + 1) << 1)))
;
634}
635
636int
637kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp)
638{
639
640 return (kern_proc_setrlimit(td, td->td_proc, which, limp));
641}
642
643int
644kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
645 struct rlimit *limp)
646{
647 struct plimit *newlim, *oldlim;
648 register struct rlimit *alimp;
649 struct rlimit oldssiz;
650 int error;
651
652 if (which >= RLIM_NLIMITS15)
653 return (EINVAL22);
654
655 /*
656 * Preserve historical bugs by treating negative limits as unsigned.
657 */
658 if (limp->rlim_cur < 0)
659 limp->rlim_cur = RLIM_INFINITY((rlim_t)(((__uint64_t)1 << 63) - 1));
660 if (limp->rlim_max < 0)
661 limp->rlim_max = RLIM_INFINITY((rlim_t)(((__uint64_t)1 << 63) - 1));
662
663 oldssiz.rlim_cur = 0;
664 newlim = lim_alloc();
665 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
666 oldlim = p->p_limit;
667 alimp = &oldlim->pl_rlimit[which];
668 if (limp->rlim_cur > alimp->rlim_max ||
669 limp->rlim_max > alimp->rlim_max)
670 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT162))) {
671 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
672 lim_free(newlim);
673 return (error);
674 }
675 if (limp->rlim_cur > limp->rlim_max)
676 limp->rlim_cur = limp->rlim_max;
677 lim_copy(newlim, oldlim);
678 alimp = &newlim->pl_rlimit[which];
679
680 switch (which) {
681
682 case RLIMIT_CPU0:
683 if (limp->rlim_cur != RLIM_INFINITY((rlim_t)(((__uint64_t)1 << 63) - 1)) &&
684 p->p_cpulimit == RLIM_INFINITY((rlim_t)(((__uint64_t)1 << 63) - 1)))
685 callout_reset_sbt(&p->p_limco, SBT_1S, 0,callout_reset_sbt_on((&p->p_limco), (((sbintime_t)1 <<
32)), (0), (lim_cb), (p), -1, ((((1) + 1) << 1)))
686 lim_cb, p, C_PREL(1))callout_reset_sbt_on((&p->p_limco), (((sbintime_t)1 <<
32)), (0), (lim_cb), (p), -1, ((((1) + 1) << 1)))
;
687 p->p_cpulimit = limp->rlim_cur;
688 break;
689 case RLIMIT_DATA2:
690 if (limp->rlim_cur > maxdsiz)
691 limp->rlim_cur = maxdsiz;
692 if (limp->rlim_max > maxdsiz)
693 limp->rlim_max = maxdsiz;
694 break;
695
696 case RLIMIT_STACK3:
697 if (limp->rlim_cur > maxssiz)
698 limp->rlim_cur = maxssiz;
699 if (limp->rlim_max > maxssiz)
700 limp->rlim_max = maxssiz;
701 oldssiz = *alimp;
702 if (p->p_sysent->sv_fixlimit != NULL((void *)0))
703 p->p_sysent->sv_fixlimit(&oldssiz,
704 RLIMIT_STACK3);
705 break;
706
707 case RLIMIT_NOFILE8:
708 if (limp->rlim_cur > maxfilesperproc)
709 limp->rlim_cur = maxfilesperproc;
710 if (limp->rlim_max > maxfilesperproc)
711 limp->rlim_max = maxfilesperproc;
712 break;
713
714 case RLIMIT_NPROC7:
715 if (limp->rlim_cur > maxprocperuid)
716 limp->rlim_cur = maxprocperuid;
717 if (limp->rlim_max > maxprocperuid)
718 limp->rlim_max = maxprocperuid;
719 if (limp->rlim_cur < 1)
720 limp->rlim_cur = 1;
721 if (limp->rlim_max < 1)
722 limp->rlim_max = 1;
723 break;
724 }
725 if (p->p_sysent->sv_fixlimit != NULL((void *)0))
726 p->p_sysent->sv_fixlimit(limp, which);
727 *alimp = *limp;
728 p->p_limit = newlim;
729 PROC_UPDATE_COW(p)do { (void)0; (p)->p_cowgen++; } while (0);
730 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
731 lim_free(oldlim);
732
733 if (which == RLIMIT_STACK3 &&
734 /*
735 * Skip calls from exec_new_vmspace(), done when stack is
736 * not mapped yet.
737 */
738 (td != curthread(__curthread()) || (p->p_flag & P_INEXEC0x4000000) == 0)) {
739 /*
740 * Stack is allocated to the max at exec time with only
741 * "rlim_cur" bytes accessible. If stack limit is going
742 * up make more accessible, if going down make inaccessible.
743 */
744 if (limp->rlim_cur != oldssiz.rlim_cur) {
745 vm_offset_t addr;
746 vm_size_t size;
747 vm_prot_t prot;
748
749 if (limp->rlim_cur > oldssiz.rlim_cur) {
750 prot = p->p_sysent->sv_stackprot;
751 size = limp->rlim_cur - oldssiz.rlim_cur;
752 addr = p->p_sysent->sv_usrstack -
753 limp->rlim_cur;
754 } else {
755 prot = VM_PROT_NONE((vm_prot_t) 0x00);
756 size = oldssiz.rlim_cur - limp->rlim_cur;
757 addr = p->p_sysent->sv_usrstack -
758 oldssiz.rlim_cur;
759 }
760 addr = trunc_page(addr)((unsigned long)(addr) & ~(((1<<12)-1)));
761 size = round_page(size)((((unsigned long)(size)) + ((1<<12)-1)) & ~(((1<<
12)-1)))
;
762 (void)vm_map_protect(&p->p_vmspace->vm_map,
763 addr, addr + size, prot, FALSE0);
764 }
765 }
766
767 return (0);
768}
769
770#ifndef _SYS_SYSPROTO_H_
771struct __getrlimit_args {
772 u_int which;
773 struct rlimit *rlp;
774};
775#endif
776/* ARGSUSED */
777int
778sys_getrlimit(struct thread *td, register struct __getrlimit_args *uap)
779{
780 struct rlimit rlim;
781 int error;
782
783 if (uap->which >= RLIM_NLIMITS15)
784 return (EINVAL22);
785 lim_rlimit(td, uap->which, &rlim);
786 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
787 return (error);
788}
789
790/*
791 * Transform the running time and tick information for children of proc p
792 * into user and system time usage.
793 */
794void
795calccru(struct proc *p, struct timeval *up, struct timeval *sp)
796{
797
798 PROC_LOCK_ASSERT(p, MA_OWNED)(void)0;
799 calcru1(p, &p->p_crux, up, sp);
800}
801
802/*
803 * Transform the running time and tick information in proc p into user
804 * and system time usage. If appropriate, include the current time slice
805 * on this CPU.
806 */
807void
808calcru(struct proc *p, struct timeval *up, struct timeval *sp)
809{
810 struct thread *td;
811 uint64_t runtime, u;
812
813 PROC_LOCK_ASSERT(p, MA_OWNED)(void)0;
814 PROC_STATLOCK_ASSERT(p, MA_OWNED)(void)0;
815 /*
816 * If we are getting stats for the current process, then add in the
817 * stats that this thread has accumulated in its current time slice.
818 * We reset the thread and CPU state as if we had performed a context
819 * switch right here.
820 */
821 td = curthread(__curthread());
822 if (td->td_proc == p) {
823 u = cpu_ticks();
824 runtime = u - PCPU_GET(switchtime)__extension__ ({ __typeof(((struct pcpu *)0)->pc_switchtime
) __res; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu
*)0)->pc_switchtime)))<(8))?(sizeof(__typeof(((struct pcpu
*)0)->pc_switchtime))):(8))]; } __s; if (sizeof(__res) ==
1 || sizeof(__res) == 2 || sizeof(__res) == 4 || sizeof(__res
) == 8) { __asm volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (
*(struct __s *)(__builtin_offsetof(struct pcpu, pc_switchtime
)))); *(struct __s *)(void *)&__res = __s; } else { __res
= *__extension__ ({ __typeof(((struct pcpu *)0)->pc_switchtime
) *__p; __asm volatile("movq %%gs:%1,%0; addq %2,%0" : "=r" (
__p) : "m" (*(struct pcpu *)(__builtin_offsetof(struct pcpu, pc_prvspace
))), "i" (__builtin_offsetof(struct pcpu, pc_switchtime))); __p
; }); } __res; })
;
825 td->td_runtime += runtime;
826 td->td_incruntime += runtime;
827 PCPU_SET(switchtime, u){ __typeof(((struct pcpu *)0)->pc_switchtime) __val; struct
__s { u_char __b[(((sizeof(__typeof(((struct pcpu *)0)->pc_switchtime
)))<(8))?(sizeof(__typeof(((struct pcpu *)0)->pc_switchtime
))):(8))]; } __s; __val = (u); if (sizeof(__val) == 1 || sizeof
(__val) == 2 || sizeof(__val) == 4 || sizeof(__val) == 8) { __s
= *(struct __s *)(void *)&__val; __asm volatile("mov %1,%%gs:%0"
: "=m" (*(struct __s *)(__builtin_offsetof(struct pcpu, pc_switchtime
))) : "r" (__s)); } else { *__extension__ ({ __typeof(((struct
pcpu *)0)->pc_switchtime) *__p; __asm volatile("movq %%gs:%1,%0; addq %2,%0"
: "=r" (__p) : "m" (*(struct pcpu *)(__builtin_offsetof(struct
pcpu, pc_prvspace))), "i" (__builtin_offsetof(struct pcpu, pc_switchtime
))); __p; }) = __val; } }
;
828 }
829 /* Make sure the per-thread stats are current. */
830 FOREACH_THREAD_IN_PROC(p, td)for (((td)) = (((&(p)->p_threads))->tqh_first); ((td
)); ((td)) = ((((td)))->td_plist.tqe_next))
{
831 if (td->td_incruntime == 0)
832 continue;
833 ruxagg(p, td);
834 }
835 calcru1(p, &p->p_rux, up, sp);
836}
837
838/* Collect resource usage for a single thread. */
839void
840rufetchtd(struct thread *td, struct rusage *ru)
841{
842 struct proc *p;
843 uint64_t runtime, u;
844
845 p = td->td_proc;
846 PROC_STATLOCK_ASSERT(p, MA_OWNED)(void)0;
847 THREAD_LOCK_ASSERT(td, MA_OWNED)do { struct mtx *__m = (td)->td_lock; if (__m != &blocked_lock
) (void)0; } while (0)
;
848 /*
849 * If we are getting stats for the current thread, then add in the
850 * stats that this thread has accumulated in its current time slice.
851 * We reset the thread and CPU state as if we had performed a context
852 * switch right here.
853 */
854 if (td == curthread(__curthread())) {
855 u = cpu_ticks();
856 runtime = u - PCPU_GET(switchtime)__extension__ ({ __typeof(((struct pcpu *)0)->pc_switchtime
) __res; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu
*)0)->pc_switchtime)))<(8))?(sizeof(__typeof(((struct pcpu
*)0)->pc_switchtime))):(8))]; } __s; if (sizeof(__res) ==
1 || sizeof(__res) == 2 || sizeof(__res) == 4 || sizeof(__res
) == 8) { __asm volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (
*(struct __s *)(__builtin_offsetof(struct pcpu, pc_switchtime
)))); *(struct __s *)(void *)&__res = __s; } else { __res
= *__extension__ ({ __typeof(((struct pcpu *)0)->pc_switchtime
) *__p; __asm volatile("movq %%gs:%1,%0; addq %2,%0" : "=r" (
__p) : "m" (*(struct pcpu *)(__builtin_offsetof(struct pcpu, pc_prvspace
))), "i" (__builtin_offsetof(struct pcpu, pc_switchtime))); __p
; }); } __res; })
;
857 td->td_runtime += runtime;
858 td->td_incruntime += runtime;
859 PCPU_SET(switchtime, u){ __typeof(((struct pcpu *)0)->pc_switchtime) __val; struct
__s { u_char __b[(((sizeof(__typeof(((struct pcpu *)0)->pc_switchtime
)))<(8))?(sizeof(__typeof(((struct pcpu *)0)->pc_switchtime
))):(8))]; } __s; __val = (u); if (sizeof(__val) == 1 || sizeof
(__val) == 2 || sizeof(__val) == 4 || sizeof(__val) == 8) { __s
= *(struct __s *)(void *)&__val; __asm volatile("mov %1,%%gs:%0"
: "=m" (*(struct __s *)(__builtin_offsetof(struct pcpu, pc_switchtime
))) : "r" (__s)); } else { *__extension__ ({ __typeof(((struct
pcpu *)0)->pc_switchtime) *__p; __asm volatile("movq %%gs:%1,%0; addq %2,%0"
: "=r" (__p) : "m" (*(struct pcpu *)(__builtin_offsetof(struct
pcpu, pc_prvspace))), "i" (__builtin_offsetof(struct pcpu, pc_switchtime
))); __p; }) = __val; } }
;
860 }
861 ruxagg(p, td);
862 *ru = td->td_ru;
863 calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
864}
865
866static void
867calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
868 struct timeval *sp)
869{
870 /* {user, system, interrupt, total} {ticks, usec}: */
871 uint64_t ut, uu, st, su, it, tt, tu;
872
873 ut = ruxp->rux_uticks;
874 st = ruxp->rux_sticks;
875 it = ruxp->rux_iticks;
876 tt = ut + st + it;
877 if (tt == 0) {
878 /* Avoid divide by zero */
879 st = 1;
880 tt = 1;
881 }
882 tu = cputick2usec(ruxp->rux_runtime);
883 if ((int64_t)tu < 0) {
884 /* XXX: this should be an assert /phk */
885 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
886 (intmax_t)tu, p->p_pid, p->p_comm);
887 tu = ruxp->rux_tu;
888 }
889
890 if (tu >= ruxp->rux_tu) {
891 /*
892 * The normal case, time increased.
893 * Enforce monotonicity of bucketed numbers.
894 */
895 uu = (tu * ut) / tt;
896 if (uu < ruxp->rux_uu)
897 uu = ruxp->rux_uu;
898 su = (tu * st) / tt;
899 if (su < ruxp->rux_su)
900 su = ruxp->rux_su;
901 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
902 /*
903 * When we calibrate the cputicker, it is not uncommon to
904 * see the presumably fixed frequency increase slightly over
905 * time as a result of thermal stabilization and NTP
906 * discipline (of the reference clock). We therefore ignore
907 * a bit of backwards slop because we expect to catch up
908 * shortly. We use a 3 microsecond limit to catch low
909 * counts and a 1% limit for high counts.
910 */
911 uu = ruxp->rux_uu;
912 su = ruxp->rux_su;
913 tu = ruxp->rux_tu;
914 } else { /* tu < ruxp->rux_tu */
915 /*
916 * What happened here was likely that a laptop, which ran at
917 * a reduced clock frequency at boot, kicked into high gear.
918 * The wisdom of spamming this message in that case is
919 * dubious, but it might also be indicative of something
920 * serious, so lets keep it and hope laptops can be made
921 * more truthful about their CPU speed via ACPI.
922 */
923 printf("calcru: runtime went backwards from %ju usec "
924 "to %ju usec for pid %d (%s)\n",
925 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
926 p->p_pid, p->p_comm);
927 uu = (tu * ut) / tt;
928 su = (tu * st) / tt;
929 }
930
931 ruxp->rux_uu = uu;
932 ruxp->rux_su = su;
933 ruxp->rux_tu = tu;
934
935 up->tv_sec = uu / 1000000;
936 up->tv_usec = uu % 1000000;
937 sp->tv_sec = su / 1000000;
938 sp->tv_usec = su % 1000000;
939}
940
941#ifndef _SYS_SYSPROTO_H_
942struct getrusage_args {
943 int who;
944 struct rusage *rusage;
945};
946#endif
947int
948sys_getrusage(register struct thread *td, register struct getrusage_args *uap)
949{
950 struct rusage ru;
951 int error;
952
953 error = kern_getrusage(td, uap->who, &ru);
954 if (error == 0)
955 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
956 return (error);
957}
958
959int
960kern_getrusage(struct thread *td, int who, struct rusage *rup)
961{
962 struct proc *p;
963 int error;
964
965 error = 0;
966 p = td->td_proc;
967 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
968 switch (who) {
969 case RUSAGE_SELF0:
970 rufetchcalc(p, rup, &rup->ru_utime,
971 &rup->ru_stime);
972 break;
973
974 case RUSAGE_CHILDREN-1:
975 *rup = p->p_stats->p_cru;
976 calccru(p, &rup->ru_utime, &rup->ru_stime);
977 break;
978
979 case RUSAGE_THREAD1:
980 PROC_STATLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); spinlock_enter
(); if ((((((&(p)->p_statmtx))))->mtx_lock != 0x00000004
|| !atomic_cmpset_long(&(((((&(p)->p_statmtx)))))
->mtx_lock, 0x00000004, (_tid)))) { if (((((&(p)->p_statmtx
))))->mtx_lock == _tid) ((((&(p)->p_statmtx))))->
lock_object.lo_data++; else _mtx_lock_spin_cookie(&(((((&
(p)->p_statmtx)))))->mtx_lock, _tid, (((0))), ((((void *
)0))), ((0))); } else do { (void)0; do { if (__builtin_expect
((sdt_lockstat___spin__acquire->id), 0)) (*sdt_probe_func)
(sdt_lockstat___spin__acquire->id, (uintptr_t) (((&(p)
->p_statmtx))), (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0
, (uintptr_t) 0); } while (0); } while (0); } while (0)
;
981 thread_lock(td)thread_lock_flags_((td), 0, "/usr/src/sys/kern/kern_resource.c"
, 981)
;
982 rufetchtd(td, rup);
983 thread_unlock(td)do { if ((((((((td)->td_lock)))))->lock_object.lo_data !=
0)) (((((td)->td_lock))))->lock_object.lo_data--; else
{ do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__release->
id, (uintptr_t) ((((td)->td_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
atomic_store_rel_long(&((((((td)->td_lock)))))->mtx_lock
, 0x00000004); } spinlock_exit(); } while (0)
;
984 PROC_STATUNLOCK(p)do { if (((((((&(p)->p_statmtx)))))->lock_object.lo_data
!= 0)) ((((&(p)->p_statmtx))))->lock_object.lo_data
--; else { do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__release->
id, (uintptr_t) (((&(p)->p_statmtx))), (uintptr_t) 0, (
uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while
(0); atomic_store_rel_long(&(((((&(p)->p_statmtx)
))))->mtx_lock, 0x00000004); } spinlock_exit(); } while (0
)
;
985 break;
986
987 default:
988 error = EINVAL22;
989 }
990 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
991 return (error);
992}
993
994void
995rucollect(struct rusage *ru, struct rusage *ru2)
996{
997 long *ip, *ip2;
998 int i;
999
1000 if (ru->ru_maxrss < ru2->ru_maxrss)
1001 ru->ru_maxrss = ru2->ru_maxrss;
1002 ip = &ru->ru_firstru_ixrss;
1003 ip2 = &ru2->ru_firstru_ixrss;
1004 for (i = &ru->ru_lastru_nivcsw - &ru->ru_firstru_ixrss; i >= 0; i--)
1005 *ip++ += *ip2++;
1006}
1007
1008void
1009ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
1010 struct rusage_ext *rux2)
1011{
1012
1013 rux->rux_runtime += rux2->rux_runtime;
1014 rux->rux_uticks += rux2->rux_uticks;
1015 rux->rux_sticks += rux2->rux_sticks;
1016 rux->rux_iticks += rux2->rux_iticks;
1017 rux->rux_uu += rux2->rux_uu;
1018 rux->rux_su += rux2->rux_su;
1019 rux->rux_tu += rux2->rux_tu;
1020 rucollect(ru, ru2);
1021}
1022
1023/*
1024 * Aggregate tick counts into the proc's rusage_ext.
1025 */
1026static void
1027ruxagg_locked(struct rusage_ext *rux, struct thread *td)
1028{
1029
1030 THREAD_LOCK_ASSERT(td, MA_OWNED)do { struct mtx *__m = (td)->td_lock; if (__m != &blocked_lock
) (void)0; } while (0)
;
1031 PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED)(void)0;
1032 rux->rux_runtime += td->td_incruntime;
1033 rux->rux_uticks += td->td_uticks;
1034 rux->rux_sticks += td->td_sticks;
1035 rux->rux_iticks += td->td_iticks;
1036}
1037
1038void
1039ruxagg(struct proc *p, struct thread *td)
1040{
1041
1042 thread_lock(td)thread_lock_flags_((td), 0, "/usr/src/sys/kern/kern_resource.c"
, 1042)
;
1043 ruxagg_locked(&p->p_rux, td);
1044 ruxagg_locked(&td->td_rux, td);
1045 td->td_incruntime = 0;
1046 td->td_uticks = 0;
1047 td->td_iticks = 0;
1048 td->td_sticks = 0;
1049 thread_unlock(td)do { if ((((((((td)->td_lock)))))->lock_object.lo_data !=
0)) (((((td)->td_lock))))->lock_object.lo_data--; else
{ do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__release->
id, (uintptr_t) ((((td)->td_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
atomic_store_rel_long(&((((((td)->td_lock)))))->mtx_lock
, 0x00000004); } spinlock_exit(); } while (0)
;
1050}
1051
1052/*
1053 * Update the rusage_ext structure and fetch a valid aggregate rusage
1054 * for proc p if storage for one is supplied.
1055 */
1056void
1057rufetch(struct proc *p, struct rusage *ru)
1058{
1059 struct thread *td;
1060
1061 PROC_STATLOCK_ASSERT(p, MA_OWNED)(void)0;
1062
1063 *ru = p->p_ru;
1064 if (p->p_numthreads > 0) {
1065 FOREACH_THREAD_IN_PROC(p, td)for (((td)) = (((&(p)->p_threads))->tqh_first); ((td
)); ((td)) = ((((td)))->td_plist.tqe_next))
{
1066 ruxagg(p, td);
1067 rucollect(ru, &td->td_ru);
1068 }
1069 }
1070}
1071
1072/*
1073 * Atomically perform a rufetch and a calcru together.
1074 * Consumers, can safely assume the calcru is executed only once
1075 * rufetch is completed.
1076 */
1077void
1078rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1079 struct timeval *sp)
1080{
1081
1082 PROC_STATLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); spinlock_enter
(); if ((((((&(p)->p_statmtx))))->mtx_lock != 0x00000004
|| !atomic_cmpset_long(&(((((&(p)->p_statmtx)))))
->mtx_lock, 0x00000004, (_tid)))) { if (((((&(p)->p_statmtx
))))->mtx_lock == _tid) ((((&(p)->p_statmtx))))->
lock_object.lo_data++; else _mtx_lock_spin_cookie(&(((((&
(p)->p_statmtx)))))->mtx_lock, _tid, (((0))), ((((void *
)0))), ((0))); } else do { (void)0; do { if (__builtin_expect
((sdt_lockstat___spin__acquire->id), 0)) (*sdt_probe_func)
(sdt_lockstat___spin__acquire->id, (uintptr_t) (((&(p)
->p_statmtx))), (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0
, (uintptr_t) 0); } while (0); } while (0); } while (0)
;
1083 rufetch(p, ru);
1084 calcru(p, up, sp);
1085 PROC_STATUNLOCK(p)do { if (((((((&(p)->p_statmtx)))))->lock_object.lo_data
!= 0)) ((((&(p)->p_statmtx))))->lock_object.lo_data
--; else { do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__release->
id, (uintptr_t) (((&(p)->p_statmtx))), (uintptr_t) 0, (
uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while
(0); atomic_store_rel_long(&(((((&(p)->p_statmtx)
))))->mtx_lock, 0x00000004); } spinlock_exit(); } while (0
)
;
1086}
1087
1088/*
1089 * Allocate a new resource limits structure and initialize its
1090 * reference count and mutex pointer.
1091 */
1092struct plimit *
1093lim_alloc()
1094{
1095 struct plimit *limp;
1096
1097 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK0x0002);
1098 refcount_init(&limp->pl_refcnt, 1);
1099 return (limp);
1100}
1101
1102struct plimit *
1103lim_hold(struct plimit *limp)
1104{
1105
1106 refcount_acquire(&limp->pl_refcnt);
1107 return (limp);
1108}
1109
1110void
1111lim_fork(struct proc *p1, struct proc *p2)
1112{
1113
1114 PROC_LOCK_ASSERT(p1, MA_OWNED)(void)0;
1115 PROC_LOCK_ASSERT(p2, MA_OWNED)(void)0;
1116
1117 p2->p_limit = lim_hold(p1->p_limit);
1118 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0)_callout_init_lock((&p2->p_limco), ((&p2->p_mtx
) != ((void *)0)) ? &(&p2->p_mtx)->lock_object :
((void *)0), (0))
;
1119 if (p1->p_cpulimit != RLIM_INFINITY((rlim_t)(((__uint64_t)1 << 63) - 1)))
1120 callout_reset_sbt(&p2->p_limco, SBT_1S, 0,callout_reset_sbt_on((&p2->p_limco), (((sbintime_t)1 <<
32)), (0), (lim_cb), (p2), -1, ((((1) + 1) << 1)))
1121 lim_cb, p2, C_PREL(1))callout_reset_sbt_on((&p2->p_limco), (((sbintime_t)1 <<
32)), (0), (lim_cb), (p2), -1, ((((1) + 1) << 1)))
;
1122}
1123
1124void
1125lim_free(struct plimit *limp)
1126{
1127
1128 if (refcount_release(&limp->pl_refcnt))
1129 free((void *)limp, M_PLIMIT);
1130}
1131
1132/*
1133 * Make a copy of the plimit structure.
1134 * We share these structures copy-on-write after fork.
1135 */
1136void
1137lim_copy(struct plimit *dst, struct plimit *src)
1138{
1139
1140 KASSERT(dst->pl_refcnt <= 1, ("lim_copy to shared limit"))do { } while (0);
1141 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1142}
1143
1144/*
1145 * Return the hard limit for a particular system resource. The
1146 * which parameter specifies the index into the rlimit array.
1147 */
1148rlim_t
1149lim_max(struct thread *td, int which)
1150{
1151 struct rlimit rl;
1152
1153 lim_rlimit(td, which, &rl);
1154 return (rl.rlim_max);
1155}
1156
1157rlim_t
1158lim_max_proc(struct proc *p, int which)
1159{
1160 struct rlimit rl;
1161
1162 lim_rlimit_proc(p, which, &rl);
1163 return (rl.rlim_max);
1164}
1165
1166/*
1167 * Return the current (soft) limit for a particular system resource.
1168 * The which parameter which specifies the index into the rlimit array
1169 */
1170rlim_t
1171lim_cur(struct thread *td, int which)
1172{
1173 struct rlimit rl;
1174
1175 lim_rlimit(td, which, &rl);
1176 return (rl.rlim_cur);
1177}
1178
1179rlim_t
1180lim_cur_proc(struct proc *p, int which)
1181{
1182 struct rlimit rl;
1183
1184 lim_rlimit_proc(p, which, &rl);
1185 return (rl.rlim_cur);
1186}
1187
1188/*
1189 * Return a copy of the entire rlimit structure for the system limit
1190 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1191 */
1192void
1193lim_rlimit(struct thread *td, int which, struct rlimit *rlp)
1194{
1195 struct proc *p = td->td_proc;
1196
1197 MPASS(td == curthread)do { } while (0);
1198 KASSERT(which >= 0 && which < RLIM_NLIMITS,do { } while (0)
1199 ("request for invalid resource limit"))do { } while (0);
1200 *rlp = td->td_limit->pl_rlimit[which];
1201 if (p->p_sysent->sv_fixlimit != NULL((void *)0))
1202 p->p_sysent->sv_fixlimit(rlp, which);
1203}
1204
1205void
1206lim_rlimit_proc(struct proc *p, int which, struct rlimit *rlp)
1207{
1208
1209 PROC_LOCK_ASSERT(p, MA_OWNED)(void)0;
1210 KASSERT(which >= 0 && which < RLIM_NLIMITS,do { } while (0)
1211 ("request for invalid resource limit"))do { } while (0);
1212 *rlp = p->p_limit->pl_rlimit[which];
1213 if (p->p_sysent->sv_fixlimit != NULL((void *)0))
1214 p->p_sysent->sv_fixlimit(rlp, which);
1215}
1216
1217void
1218uihashinit()
1219{
1220
1221 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1222 rw_init(&uihashtbl_lock, "uidinfo hash")_rw_init_flags(&(&uihashtbl_lock)->rw_lock, "uidinfo hash"
, 0)
;
1223}
1224
1225/*
1226 * Look up a uidinfo struct for the parameter uid.
1227 * uihashtbl_lock must be locked.
1228 * Increase refcount on uidinfo struct returned.
1229 */
1230static struct uidinfo *
1231uilookup(uid_t uid)
1232{
1233 struct uihashhead *uipp;
1234 struct uidinfo *uip;
1235
1236 rw_assert(&uihashtbl_lock, RA_LOCKED);
1237 uipp = UIHASH(uid)(&uihashtbl[(uid) & uihash]);
1238 LIST_FOREACH(uip, uipp, ui_hash)for ((uip) = (((uipp))->lh_first); (uip); (uip) = (((uip))
->ui_hash.le_next))
1239 if (uip->ui_uid == uid) {
1240 uihold(uip);
1241 break;
1242 }
1243
1244 return (uip);
1245}
1246
1247/*
1248 * Find or allocate a struct uidinfo for a particular uid.
1249 * Returns with uidinfo struct referenced.
1250 * uifree() should be called on a struct uidinfo when released.
1251 */
1252struct uidinfo *
1253uifind(uid_t uid)
1254{
1255 struct uidinfo *new_uip, *uip;
1256
1257 rw_rlock(&uihashtbl_lock)__rw_rlock(&((&uihashtbl_lock))->rw_lock, ((void *
)0), 0)
;
1258 uip = uilookup(uid);
1259 rw_runlock(&uihashtbl_lock)_rw_runlock_cookie(&((&uihashtbl_lock))->rw_lock, (
(void *)0), 0)
;
1260 if (uip != NULL((void *)0))
1261 return (uip);
1262
1263 new_uip = malloc(sizeof(*new_uip), M_UIDINFO, M_WAITOK0x0002 | M_ZERO0x0100);
1264 racct_create(&new_uip->ui_racct);
1265 refcount_init(&new_uip->ui_ref, 1);
1266 new_uip->ui_uid = uid;
1267 mtx_init(&new_uip->ui_vmsize_mtx, "ui_vmsize", NULL, MTX_DEF)_mtx_init(&(&new_uip->ui_vmsize_mtx)->mtx_lock,
"ui_vmsize", ((void *)0), 0x00000000)
;
1268
1269 rw_wlock(&uihashtbl_lock)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((&
uihashtbl_lock))->rw_lock != ((0) << 4 | 0x01) || !atomic_cmpset_long
(&(((&uihashtbl_lock)))->rw_lock, ((0) << 4 |
0x01), (_tid))) __rw_wlock_hard(&(((&uihashtbl_lock)
))->rw_lock, _tid, (((void *)0)), (0)); else do { (void)0;
do { if (__builtin_expect((sdt_lockstat___rw__acquire->id
), 0)) (*sdt_probe_func)(sdt_lockstat___rw__acquire->id, (
uintptr_t) (&uihashtbl_lock), (uintptr_t) 0, (uintptr_t) 0
, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); } while
(0)
;
1270 /*
1271 * There's a chance someone created our uidinfo while we
1272 * were in malloc and not holding the lock, so we have to
1273 * make sure we don't insert a duplicate uidinfo.
1274 */
1275 if ((uip = uilookup(uid)) == NULL((void *)0)) {
1276 LIST_INSERT_HEAD(UIHASH(uid), new_uip, ui_hash)do { ; if (((((new_uip))->ui_hash.le_next) = ((((&uihashtbl
[(uid) & uihash])))->lh_first)) != ((void *)0)) ((((&
uihashtbl[(uid) & uihash])))->lh_first)->ui_hash.le_prev
= &(((new_uip))->ui_hash.le_next); ((((&uihashtbl
[(uid) & uihash])))->lh_first) = (new_uip); (new_uip)->
ui_hash.le_prev = &((((&uihashtbl[(uid) & uihash]
)))->lh_first); } while (0)
;
1277 rw_wunlock(&uihashtbl_lock)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((&
uihashtbl_lock))->lock_object.lo_data) ((&uihashtbl_lock
))->lock_object.lo_data--; else { do { (void)0; do { if (__builtin_expect
((sdt_lockstat___rw__release->id), 0)) (*sdt_probe_func)(sdt_lockstat___rw__release
->id, (uintptr_t) (&uihashtbl_lock), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((&uihashtbl_lock))->rw_lock != _tid || !atomic_cmpset_long
(&(((&uihashtbl_lock)))->rw_lock, (_tid), ((0) <<
4 | 0x01))) __rw_wunlock_hard(&(((&uihashtbl_lock)))
->rw_lock, _tid, (((void *)0)), (0)); } } while (0)
;
1278 uip = new_uip;
1279 } else {
1280 rw_wunlock(&uihashtbl_lock)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((&
uihashtbl_lock))->lock_object.lo_data) ((&uihashtbl_lock
))->lock_object.lo_data--; else { do { (void)0; do { if (__builtin_expect
((sdt_lockstat___rw__release->id), 0)) (*sdt_probe_func)(sdt_lockstat___rw__release
->id, (uintptr_t) (&uihashtbl_lock), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((&uihashtbl_lock))->rw_lock != _tid || !atomic_cmpset_long
(&(((&uihashtbl_lock)))->rw_lock, (_tid), ((0) <<
4 | 0x01))) __rw_wunlock_hard(&(((&uihashtbl_lock)))
->rw_lock, _tid, (((void *)0)), (0)); } } while (0)
;
1281 racct_destroy(&new_uip->ui_racct);
1282 mtx_destroy(&new_uip->ui_vmsize_mtx)_mtx_destroy(&(&new_uip->ui_vmsize_mtx)->mtx_lock
)
;
1283 free(new_uip, M_UIDINFO);
1284 }
1285 return (uip);
1286}
1287
1288/*
1289 * Place another refcount on a uidinfo struct.
1290 */
1291void
1292uihold(struct uidinfo *uip)
1293{
1294
1295 refcount_acquire(&uip->ui_ref);
1296}
1297
1298/*-
1299 * Since uidinfo structs have a long lifetime, we use an
1300 * opportunistic refcounting scheme to avoid locking the lookup hash
1301 * for each release.
1302 *
1303 * If the refcount hits 0, we need to free the structure,
1304 * which means we need to lock the hash.
1305 * Optimal case:
1306 * After locking the struct and lowering the refcount, if we find
1307 * that we don't need to free, simply unlock and return.
1308 * Suboptimal case:
1309 * If refcount lowering results in need to free, bump the count
1310 * back up, lose the lock and acquire the locks in the proper
1311 * order to try again.
1312 */
1313void
1314uifree(struct uidinfo *uip)
1315{
1316 int old;
1317
1318 /* Prepare for optimal case. */
1319 old = uip->ui_ref;
1320 if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
1321 return;
1322
1323 /* Prepare for suboptimal case. */
1324 rw_wlock(&uihashtbl_lock)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((&
uihashtbl_lock))->rw_lock != ((0) << 4 | 0x01) || !atomic_cmpset_long
(&(((&uihashtbl_lock)))->rw_lock, ((0) << 4 |
0x01), (_tid))) __rw_wlock_hard(&(((&uihashtbl_lock)
))->rw_lock, _tid, (((void *)0)), (0)); else do { (void)0;
do { if (__builtin_expect((sdt_lockstat___rw__acquire->id
), 0)) (*sdt_probe_func)(sdt_lockstat___rw__acquire->id, (
uintptr_t) (&uihashtbl_lock), (uintptr_t) 0, (uintptr_t) 0
, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); } while
(0)
;
1325 if (refcount_release(&uip->ui_ref) == 0) {
1326 rw_wunlock(&uihashtbl_lock)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((&
uihashtbl_lock))->lock_object.lo_data) ((&uihashtbl_lock
))->lock_object.lo_data--; else { do { (void)0; do { if (__builtin_expect
((sdt_lockstat___rw__release->id), 0)) (*sdt_probe_func)(sdt_lockstat___rw__release
->id, (uintptr_t) (&uihashtbl_lock), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((&uihashtbl_lock))->rw_lock != _tid || !atomic_cmpset_long
(&(((&uihashtbl_lock)))->rw_lock, (_tid), ((0) <<
4 | 0x01))) __rw_wunlock_hard(&(((&uihashtbl_lock)))
->rw_lock, _tid, (((void *)0)), (0)); } } while (0)
;
1327 return;
1328 }
1329
1330 racct_destroy(&uip->ui_racct);
1331 LIST_REMOVE(uip, ui_hash)do { ; ; ; ; if ((((uip))->ui_hash.le_next) != ((void *)0)
) (((uip))->ui_hash.le_next)->ui_hash.le_prev = (uip)->
ui_hash.le_prev; *(uip)->ui_hash.le_prev = (((uip))->ui_hash
.le_next); ; ; } while (0)
;
1332 rw_wunlock(&uihashtbl_lock)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((&
uihashtbl_lock))->lock_object.lo_data) ((&uihashtbl_lock
))->lock_object.lo_data--; else { do { (void)0; do { if (__builtin_expect
((sdt_lockstat___rw__release->id), 0)) (*sdt_probe_func)(sdt_lockstat___rw__release
->id, (uintptr_t) (&uihashtbl_lock), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((&uihashtbl_lock))->rw_lock != _tid || !atomic_cmpset_long
(&(((&uihashtbl_lock)))->rw_lock, (_tid), ((0) <<
4 | 0x01))) __rw_wunlock_hard(&(((&uihashtbl_lock)))
->rw_lock, _tid, (((void *)0)), (0)); } } while (0)
;
1333
1334 if (uip->ui_sbsize != 0)
1335 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1336 uip->ui_uid, uip->ui_sbsize);
1337 if (uip->ui_proccnt != 0)
1338 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1339 uip->ui_uid, uip->ui_proccnt);
1340 if (uip->ui_vmsize != 0)
1341 printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1342 uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1343 mtx_destroy(&uip->ui_vmsize_mtx)_mtx_destroy(&(&uip->ui_vmsize_mtx)->mtx_lock);
1344 free(uip, M_UIDINFO);
1345}
1346
1347#ifdef RACCT1
1348void
1349ui_racct_foreach(void (*callback)(struct racct *racct,
1350 void *arg2, void *arg3), void (*pre)(void), void (*post)(void),
1351 void *arg2, void *arg3)
1352{
1353 struct uidinfo *uip;
1354 struct uihashhead *uih;
1355
1356 rw_rlock(&uihashtbl_lock)__rw_rlock(&((&uihashtbl_lock))->rw_lock, ((void *
)0), 0)
;
1357 if (pre != NULL((void *)0))
1358 (pre)();
1359 for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
1360 LIST_FOREACH(uip, uih, ui_hash)for ((uip) = (((uih))->lh_first); (uip); (uip) = (((uip))->
ui_hash.le_next))
{
1361 (callback)(uip->ui_racct, arg2, arg3);
1362 }
1363 }
1364 if (post != NULL((void *)0))
1365 (post)();
1366 rw_runlock(&uihashtbl_lock)_rw_runlock_cookie(&((&uihashtbl_lock))->rw_lock, (
(void *)0), 0)
;
1367}
1368#endif
1369
1370static inline int
1371chglimit(struct uidinfo *uip, long *limit, int diff, rlim_t max, const char *name)
1372{
1373
1374 /* Don't allow them to exceed max, but allow subtraction. */
1375 if (diff > 0 && max != 0) {
1376 if (atomic_fetchadd_long(limit, (long)diff) + diff > max) {
1377 atomic_subtract_long(limit, (long)diff);
1378 return (0);
1379 }
1380 } else {
1381 atomic_add_long(limit, (long)diff);
1382 if (*limit < 0)
1383 printf("negative %s for uid = %d\n", name, uip->ui_uid);
1384 }
1385 return (1);
1386}
1387
1388/*
1389 * Change the count associated with number of processes
1390 * a given user is using. When 'max' is 0, don't enforce a limit
1391 */
1392int
1393chgproccnt(struct uidinfo *uip, int diff, rlim_t max)
1394{
1395
1396 return (chglimit(uip, &uip->ui_proccnt, diff, max, "proccnt"));
1397}
1398
1399/*
1400 * Change the total socket buffer size a user has used.
1401 */
1402int
1403chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max)
1404{
1405 int diff, rv;
1406
1407 diff = to - *hiwat;
1408 if (diff > 0 && max == 0) {
1409 rv = 0;
1410 } else {
1411 rv = chglimit(uip, &uip->ui_sbsize, diff, max, "sbsize");
1412 if (rv != 0)
1413 *hiwat = to;
1414 }
1415 return (rv);
1416}
1417
1418/*
1419 * Change the count associated with number of pseudo-terminals
1420 * a given user is using. When 'max' is 0, don't enforce a limit
1421 */
1422int
1423chgptscnt(struct uidinfo *uip, int diff, rlim_t max)
1424{
1425
1426 return (chglimit(uip, &uip->ui_ptscnt, diff, max, "ptscnt"));
1427}
1428
1429int
1430chgkqcnt(struct uidinfo *uip, int diff, rlim_t max)
1431{
1432
1433 return (chglimit(uip, &uip->ui_kqcnt, diff, max, "kqcnt"));
1434}
1435
1436int
1437chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max)
1438{
1439
1440 return (chglimit(uip, &uip->ui_umtxcnt, diff, max, "umtxcnt"));
1441}