Bug Summary

File:kern/sys_process.c
Warning:line 651, column 11
Copies out a struct with uncleared padding (>= 4 bytes)

Annotated Source Code

1/*-
2 * Copyright (c) 1994, Sean Eric Fagan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Sean Eric Fagan.
16 * 4. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: releng/11.0/sys/kern/sys_process.c 303197 2016-07-22 17:34:58Z kib $")__asm__(".ident\t\"" "$FreeBSD: releng/11.0/sys/kern/sys_process.c 303197 2016-07-22 17:34:58Z kib $"
"\"")
;
34
35#include "opt_compat.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/syscallsubr.h>
42#include <sys/sysent.h>
43#include <sys/sysproto.h>
44#include <sys/pioctl.h>
45#include <sys/priv.h>
46#include <sys/proc.h>
47#include <sys/vnode.h>
48#include <sys/ptrace.h>
49#include <sys/rwlock.h>
50#include <sys/sx.h>
51#include <sys/malloc.h>
52#include <sys/signalvar.h>
53
54#include <machine/reg.h>
55
56#include <security/audit/audit.h>
57
58#include <vm/vm.h>
59#include <vm/pmap.h>
60#include <vm/vm_extern.h>
61#include <vm/vm_map.h>
62#include <vm/vm_kern.h>
63#include <vm/vm_object.h>
64#include <vm/vm_page.h>
65#include <vm/vm_param.h>
66
67#ifdef COMPAT_FREEBSD321
68#include <sys/procfs.h>
69#include <compat/freebsd32/freebsd32_signal.h>
70
71struct ptrace_io_desc32 {
72 int piod_op;
73 uint32_t piod_offs;
74 uint32_t piod_addr;
75 uint32_t piod_len;
76};
77
78struct ptrace_vm_entry32 {
79 int pve_entry;
80 int pve_timestamp;
81 uint32_t pve_start;
82 uint32_t pve_end;
83 uint32_t pve_offset;
84 u_int pve_prot;
85 u_int pve_pathlen;
86 int32_t pve_fileid;
87 u_int pve_fsid;
88 uint32_t pve_path;
89};
90
91struct ptrace_lwpinfo32 {
92 lwpid_t pl_lwpid; /* LWP described. */
93 int pl_event; /* Event that stopped the LWP. */
94 int pl_flags; /* LWP flags. */
95 sigset_t pl_sigmask; /* LWP signal mask */
96 sigset_t pl_siglist; /* LWP pending signal */
97 struct siginfo32 pl_siginfo; /* siginfo for signal */
98 char pl_tdname[MAXCOMLEN19 + 1]; /* LWP name. */
99 pid_t pl_child_pid; /* New child pid */
100 u_int pl_syscall_code;
101 u_int pl_syscall_narg;
102};
103
104#endif
105
106/*
107 * Functions implemented using PROC_ACTION():
108 *
109 * proc_read_regs(proc, regs)
110 * Get the current user-visible register set from the process
111 * and copy it into the regs structure (<machine/reg.h>).
112 * The process is stopped at the time read_regs is called.
113 *
114 * proc_write_regs(proc, regs)
115 * Update the current register set from the passed in regs
116 * structure. Take care to avoid clobbering special CPU
117 * registers or privileged bits in the PSL.
118 * Depending on the architecture this may have fix-up work to do,
119 * especially if the IAR or PCW are modified.
120 * The process is stopped at the time write_regs is called.
121 *
122 * proc_read_fpregs, proc_write_fpregs
123 * deal with the floating point register set, otherwise as above.
124 *
125 * proc_read_dbregs, proc_write_dbregs
126 * deal with the processor debug register set, otherwise as above.
127 *
128 * proc_sstep(proc)
129 * Arrange for the process to trap after executing a single instruction.
130 */
131
132#define PROC_ACTION(action)do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (action); return (
error); } while(0)
do { \
133 int error; \
134 \
135 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED)(void)0; \
136 if ((td->td_proc->p_flag & P_INMEM0x10000000) == 0) \
137 error = EIO5; \
138 else \
139 error = (action); \
140 return (error); \
141} while(0)
142
143int
144proc_read_regs(struct thread *td, struct reg *regs)
145{
146
147 PROC_ACTION(fill_regs(td, regs))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (fill_regs(td, regs
)); return (error); } while(0)
;
148}
149
150int
151proc_write_regs(struct thread *td, struct reg *regs)
152{
153
154 PROC_ACTION(set_regs(td, regs))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (set_regs(td, regs
)); return (error); } while(0)
;
155}
156
157int
158proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
159{
160
161 PROC_ACTION(fill_dbregs(td, dbregs))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (fill_dbregs(td, dbregs
)); return (error); } while(0)
;
162}
163
164int
165proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
166{
167
168 PROC_ACTION(set_dbregs(td, dbregs))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (set_dbregs(td, dbregs
)); return (error); } while(0)
;
169}
170
171/*
172 * Ptrace doesn't support fpregs at all, and there are no security holes
173 * or translations for fpregs, so we can just copy them.
174 */
175int
176proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
177{
178
179 PROC_ACTION(fill_fpregs(td, fpregs))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (fill_fpregs(td, fpregs
)); return (error); } while(0)
;
180}
181
182int
183proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
184{
185
186 PROC_ACTION(set_fpregs(td, fpregs))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (set_fpregs(td, fpregs
)); return (error); } while(0)
;
187}
188
189#ifdef COMPAT_FREEBSD321
190/* For 32 bit binaries, we need to expose the 32 bit regs layouts. */
191int
192proc_read_regs32(struct thread *td, struct reg32 *regs32)
193{
194
195 PROC_ACTION(fill_regs32(td, regs32))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (fill_regs32(td, regs32
)); return (error); } while(0)
;
196}
197
198int
199proc_write_regs32(struct thread *td, struct reg32 *regs32)
200{
201
202 PROC_ACTION(set_regs32(td, regs32))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (set_regs32(td, regs32
)); return (error); } while(0)
;
203}
204
205int
206proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32)
207{
208
209 PROC_ACTION(fill_dbregs32(td, dbregs32))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (fill_dbregs32(td,
dbregs32)); return (error); } while(0)
;
210}
211
212int
213proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32)
214{
215
216 PROC_ACTION(set_dbregs32(td, dbregs32))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (set_dbregs32(td, dbregs32
)); return (error); } while(0)
;
217}
218
219int
220proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32)
221{
222
223 PROC_ACTION(fill_fpregs32(td, fpregs32))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (fill_fpregs32(td,
fpregs32)); return (error); } while(0)
;
224}
225
226int
227proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32)
228{
229
230 PROC_ACTION(set_fpregs32(td, fpregs32))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (set_fpregs32(td, fpregs32
)); return (error); } while(0)
;
231}
232#endif
233
234int
235proc_sstep(struct thread *td)
236{
237
238 PROC_ACTION(ptrace_single_step(td))do { int error; (void)0; if ((td->td_proc->p_flag &
0x10000000) == 0) error = 5; else error = (ptrace_single_step
(td)); return (error); } while(0)
;
239}
240
241int
242proc_rwmem(struct proc *p, struct uio *uio)
243{
244 vm_map_t map;
245 vm_offset_t pageno; /* page number */
246 vm_prot_t reqprot;
247 int error, fault_flags, page_offset, writing;
248
249 /*
250 * Assert that someone has locked this vmspace. (Should be
251 * curthread but we can't assert that.) This keeps the process
252 * from exiting out from under us until this operation completes.
253 */
254 PROC_ASSERT_HELD(p)do { do { } while (0); } while (0);
255 PROC_LOCK_ASSERT(p, MA_NOTOWNED)(void)0;
256
257 /*
258 * The map we want...
259 */
260 map = &p->p_vmspace->vm_map;
261
262 /*
263 * If we are writing, then we request vm_fault() to create a private
264 * copy of each page. Since these copies will not be writeable by the
265 * process, we must explicity request that they be dirtied.
266 */
267 writing = uio->uio_rw == UIO_WRITE;
268 reqprot = writing ? VM_PROT_COPY((vm_prot_t) 0x08) | VM_PROT_READ((vm_prot_t) 0x01) : VM_PROT_READ((vm_prot_t) 0x01);
269 fault_flags = writing ? VM_FAULT_DIRTY2 : VM_FAULT_NORMAL0;
270
271 /*
272 * Only map in one page at a time. We don't have to, but it
273 * makes things easier. This way is trivial - right?
274 */
275 do {
276 vm_offset_t uva;
277 u_int len;
278 vm_page_t m;
279
280 uva = (vm_offset_t)uio->uio_offset;
281
282 /*
283 * Get the page number of this segment.
284 */
285 pageno = trunc_page(uva)((unsigned long)(uva) & ~(((1<<12)-1)));
286 page_offset = uva - pageno;
287
288 /*
289 * How many bytes to copy
290 */
291 len = min(PAGE_SIZE(1<<12) - page_offset, uio->uio_resid);
292
293 /*
294 * Fault and hold the page on behalf of the process.
295 */
296 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m);
297 if (error != KERN_SUCCESS0) {
298 if (error == KERN_RESOURCE_SHORTAGE6)
299 error = ENOMEM12;
300 else
301 error = EFAULT14;
302 break;
303 }
304
305 /*
306 * Now do the i/o move.
307 */
308 error = uiomove_fromphys(&m, page_offset, len, uio);
309
310 /* Make the I-cache coherent for breakpoints. */
311 if (writing && error == 0) {
312 vm_map_lock_read(map)_vm_map_lock_read(map, ((void *)0), 0);
313 if (vm_map_check_protection(map, pageno, pageno +
314 PAGE_SIZE(1<<12), VM_PROT_EXECUTE((vm_prot_t) 0x04)))
315 vm_sync_icache(map, uva, len);
316 vm_map_unlock_read(map)_vm_map_unlock_read(map, ((void *)0), 0);
317 }
318
319 /*
320 * Release the page.
321 */
322 vm_page_lock(m)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
(((struct mtx *)(&pa_lock[((((((m)))->phys_addr)) >>
21) % 256])))))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&((((((((struct mtx *)(&pa_lock[((((((m)))->phys_addr
)) >> 21) % 256]))))))))->mtx_lock, 0x00000004, (_tid
)))) __mtx_lock_sleep(&((((((((struct mtx *)(&pa_lock
[((((((m)))->phys_addr)) >> 21) % 256]))))))))->mtx_lock
, _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___adaptive__acquire->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->
id, (uintptr_t) ((((((struct mtx *)(&pa_lock[((((((m)))->
phys_addr)) >> 21) % 256])))))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
} while (0)
;
323 vm_page_unhold(m);
324 vm_page_unlock(m)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
((struct mtx *)(&pa_lock[((((((m)))->phys_addr)) >>
21) % 256])))))))->lock_object.lo_data == 0) do { (void)0
; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) ((((((struct mtx *)(&pa_lock[((((((m
)))->phys_addr)) >> 21) % 256])))))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); if ((((((((struct mtx *)(&pa_lock[((((((m)))->
phys_addr)) >> 21) % 256])))))))->mtx_lock != _tid ||
!atomic_cmpset_long(&((((((((struct mtx *)(&pa_lock[
((((((m)))->phys_addr)) >> 21) % 256]))))))))->mtx_lock
, (_tid), 0x00000004)) __mtx_unlock_sleep(&((((((((struct
mtx *)(&pa_lock[((((((m)))->phys_addr)) >> 21) %
256]))))))))->mtx_lock, (((0))), ((((void *)0))), ((0)));
} while (0)
;
325
326 } while (error == 0 && uio->uio_resid > 0);
327
328 return (error);
329}
330
331static ssize_t
332proc_iop(struct thread *td, struct proc *p, vm_offset_t va, void *buf,
333 size_t len, enum uio_rw rw)
334{
335 struct iovec iov;
336 struct uio uio;
337 ssize_t slen;
338 int error;
339
340 MPASS(len < SSIZE_MAX)do { } while (0);
341 slen = (ssize_t)len;
342
343 iov.iov_base = (caddr_t)buf;
344 iov.iov_len = len;
345 uio.uio_iov = &iov;
346 uio.uio_iovcnt = 1;
347 uio.uio_offset = va;
348 uio.uio_resid = slen;
349 uio.uio_segflg = UIO_SYSSPACE;
350 uio.uio_rw = rw;
351 uio.uio_td = td;
352 error = proc_rwmem(p, &uio);
353 if (uio.uio_resid == slen)
354 return (-1);
355 return (slen - uio.uio_resid);
356}
357
358ssize_t
359proc_readmem(struct thread *td, struct proc *p, vm_offset_t va, void *buf,
360 size_t len)
361{
362
363 return (proc_iop(td, p, va, buf, len, UIO_READ));
364}
365
366ssize_t
367proc_writemem(struct thread *td, struct proc *p, vm_offset_t va, void *buf,
368 size_t len)
369{
370
371 return (proc_iop(td, p, va, buf, len, UIO_WRITE));
372}
373
374static int
375ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
376{
377 struct vattr vattr;
378 vm_map_t map;
379 vm_map_entry_t entry;
380 vm_object_t obj, tobj, lobj;
381 struct vmspace *vm;
382 struct vnode *vp;
383 char *freepath, *fullpath;
384 u_int pathlen;
385 int error, index;
386
387 error = 0;
388 obj = NULL((void *)0);
389
390 vm = vmspace_acquire_ref(p);
391 map = &vm->vm_map;
392 vm_map_lock_read(map)_vm_map_lock_read(map, ((void *)0), 0);
393
394 do {
395 entry = map->header.next;
396 index = 0;
397 while (index < pve->pve_entry && entry != &map->header) {
398 entry = entry->next;
399 index++;
400 }
401 if (index != pve->pve_entry) {
402 error = EINVAL22;
403 break;
404 }
405 while (entry != &map->header &&
406 (entry->eflags & MAP_ENTRY_IS_SUB_MAP0x0002) != 0) {
407 entry = entry->next;
408 index++;
409 }
410 if (entry == &map->header) {
411 error = ENOENT2;
412 break;
413 }
414
415 /* We got an entry. */
416 pve->pve_entry = index + 1;
417 pve->pve_timestamp = map->timestamp;
418 pve->pve_start = entry->start;
419 pve->pve_end = entry->end - 1;
420 pve->pve_offset = entry->offset;
421 pve->pve_prot = entry->protection;
422
423 /* Backing object's path needed? */
424 if (pve->pve_pathlen == 0)
425 break;
426
427 pathlen = pve->pve_pathlen;
428 pve->pve_pathlen = 0;
429
430 obj = entry->object.vm_object;
431 if (obj != NULL((void *)0))
432 VM_OBJECT_RLOCK(obj)__rw_rlock(&((&(obj)->lock))->rw_lock, ((void *
)0), 0)
;
433 } while (0);
434
435 vm_map_unlock_read(map)_vm_map_unlock_read(map, ((void *)0), 0);
436
437 pve->pve_fsid = VNOVAL(-1);
438 pve->pve_fileid = VNOVAL(-1);
439
440 if (error == 0 && obj != NULL((void *)0)) {
441 lobj = obj;
442 for (tobj = obj; tobj != NULL((void *)0); tobj = tobj->backing_object) {
443 if (tobj != obj)
444 VM_OBJECT_RLOCK(tobj)__rw_rlock(&((&(tobj)->lock))->rw_lock, ((void *
)0), 0)
;
445 if (lobj != obj)
446 VM_OBJECT_RUNLOCK(lobj)_rw_runlock_cookie(&((&(lobj)->lock))->rw_lock,
((void *)0), 0)
;
447 lobj = tobj;
448 pve->pve_offset += tobj->backing_object_offset;
449 }
450 vp = vm_object_vnode(lobj);
451 if (vp != NULL((void *)0))
452 vref(vp);
453 if (lobj != obj)
454 VM_OBJECT_RUNLOCK(lobj)_rw_runlock_cookie(&((&(lobj)->lock))->rw_lock,
((void *)0), 0)
;
455 VM_OBJECT_RUNLOCK(obj)_rw_runlock_cookie(&((&(obj)->lock))->rw_lock, (
(void *)0), 0)
;
456
457 if (vp != NULL((void *)0)) {
458 freepath = NULL((void *)0);
459 fullpath = NULL((void *)0);
460 vn_fullpath(td, vp, &fullpath, &freepath);
461 vn_lock(vp, LK_SHARED | LK_RETRY)_vn_lock(vp, 0x200000 | 0x000400, "/usr/src/sys/kern/sys_process.c"
, 461)
;
462 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) {
463 pve->pve_fileid = vattr.va_fileid;
464 pve->pve_fsid = vattr.va_fsid;
465 }
466 vput(vp);
467
468 if (fullpath != NULL((void *)0)) {
469 pve->pve_pathlen = strlen(fullpath) + 1;
470 if (pve->pve_pathlen <= pathlen) {
471 error = copyout(fullpath, pve->pve_path,
472 pve->pve_pathlen);
473 } else
474 error = ENAMETOOLONG63;
475 }
476 if (freepath != NULL((void *)0))
477 free(freepath, M_TEMP);
478 }
479 }
480 vmspace_free(vm);
481 if (error == 0)
482 CTR3(KTR_PTRACE, "PT_VM_ENTRY: pid %d, entry %d, start %p",(void)0
483 p->p_pid, pve->pve_entry, pve->pve_start)(void)0;
484
485 return (error);
486}
487
488#ifdef COMPAT_FREEBSD321
489static int
490ptrace_vm_entry32(struct thread *td, struct proc *p,
491 struct ptrace_vm_entry32 *pve32)
492{
493 struct ptrace_vm_entry pve;
494 int error;
495
496 pve.pve_entry = pve32->pve_entry;
497 pve.pve_pathlen = pve32->pve_pathlen;
498 pve.pve_path = (void *)(uintptr_t)pve32->pve_path;
499
500 error = ptrace_vm_entry(td, p, &pve);
501 if (error == 0) {
502 pve32->pve_entry = pve.pve_entry;
503 pve32->pve_timestamp = pve.pve_timestamp;
504 pve32->pve_start = pve.pve_start;
505 pve32->pve_end = pve.pve_end;
506 pve32->pve_offset = pve.pve_offset;
507 pve32->pve_prot = pve.pve_prot;
508 pve32->pve_fileid = pve.pve_fileid;
509 pve32->pve_fsid = pve.pve_fsid;
510 }
511
512 pve32->pve_pathlen = pve.pve_pathlen;
513 return (error);
514}
515
516static void
517ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl,
518 struct ptrace_lwpinfo32 *pl32)
519{
520
521 pl32->pl_lwpid = pl->pl_lwpid;
522 pl32->pl_event = pl->pl_event;
523 pl32->pl_flags = pl->pl_flags;
524 pl32->pl_sigmask = pl->pl_sigmask;
525 pl32->pl_siglist = pl->pl_siglist;
526 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo);
527 strcpy(pl32->pl_tdname, pl->pl_tdname);
528 pl32->pl_child_pid = pl->pl_child_pid;
529 pl32->pl_syscall_code = pl->pl_syscall_code;
530 pl32->pl_syscall_narg = pl->pl_syscall_narg;
531}
532#endif /* COMPAT_FREEBSD32 */
533
534/*
535 * Process debugging system call.
536 */
537#ifndef _SYS_SYSPROTO_H_
538struct ptrace_args {
539 int req;
540 pid_t pid;
541 caddr_t addr;
542 int data;
543};
544#endif
545
546#ifdef COMPAT_FREEBSD321
547/*
548 * This CPP subterfuge is to try and reduce the number of ifdefs in
549 * the body of the code.
550 * COPYIN(uap->addr, &r.reg, sizeof r.reg);
551 * becomes either:
552 * copyin(uap->addr, &r.reg, sizeof r.reg);
553 * or
554 * copyin(uap->addr, &r.reg32, sizeof r.reg32);
555 * .. except this is done at runtime.
556 */
557#define COPYIN(u, k, s) wrap32 ? \
558 copyin(u, k ## 32, s ## 32) : \
559 copyin(u, k, s)
560#define COPYOUT(k, u, s) wrap32 ? \
561 copyout(k ## 32, u, s ## 32) : \
562 copyout(k, u, s)
563#else
564#define COPYIN(u, k, s) copyin(u, k, s)
565#define COPYOUT(k, u, s) copyout(k, u, s)
566#endif
567int
568sys_ptrace(struct thread *td, struct ptrace_args *uap)
569{
570 /*
571 * XXX this obfuscation is to reduce stack usage, but the register
572 * structs may be too large to put on the stack anyway.
573 */
574 union {
575 struct ptrace_io_desc piod;
576 struct ptrace_lwpinfo pl;
577 struct ptrace_vm_entry pve;
578 struct dbreg dbreg;
579 struct fpreg fpreg;
580 struct reg reg;
581#ifdef COMPAT_FREEBSD321
582 struct dbreg32 dbreg32;
583 struct fpreg32 fpreg32;
584 struct reg32 reg32;
585 struct ptrace_io_desc32 piod32;
586 struct ptrace_lwpinfo32 pl32;
587 struct ptrace_vm_entry32 pve32;
588#endif
589 } r;
590 void *addr;
591 int error = 0;
592#ifdef COMPAT_FREEBSD321
593 int wrap32 = 0;
594
595 if (SV_CURPROC_FLAG(SV_ILP32)((((__curthread())->td_proc))->p_sysent->sv_flags &
(0x000100))
)
1
Taking false branch
596 wrap32 = 1;
597#endif
598 AUDIT_ARG_PID(uap->pid)do { if ((((__curthread()))->td_pflags & 0x01000000)) audit_arg_pid
((uap->pid)); } while (0)
;
599 AUDIT_ARG_CMD(uap->req)do { if ((((__curthread()))->td_pflags & 0x01000000)) audit_arg_cmd
((uap->req)); } while (0)
;
600 AUDIT_ARG_VALUE(uap->data)do { if ((((__curthread()))->td_pflags & 0x01000000)) audit_arg_value
((uap->data)); } while (0)
;
601 addr = &r;
602 switch (uap->req) {
2
Control jumps to 'case 13:' at line 606
603 case PT_GETREGS33:
604 case PT_GETFPREGS35:
605 case PT_GETDBREGS37:
606 case PT_LWPINFO13:
607 break;
3
Execution continues on line 627
608 case PT_SETREGS34:
609 error = COPYIN(uap->addr, &r.reg, sizeof r.reg);
610 break;
611 case PT_SETFPREGS36:
612 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg);
613 break;
614 case PT_SETDBREGS38:
615 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg);
616 break;
617 case PT_IO12:
618 error = COPYIN(uap->addr, &r.piod, sizeof r.piod);
619 break;
620 case PT_VM_ENTRY41:
621 error = COPYIN(uap->addr, &r.pve, sizeof r.pve);
622 break;
623 default:
624 addr = uap->addr;
625 break;
626 }
627 if (error)
4
Taking false branch
628 return (error);
629
630 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
631 if (error)
5
Assuming 'error' is 0
6
Taking false branch
632 return (error);
633
634 switch (uap->req) {
7
Control jumps to 'case 13:' at line 650
635 case PT_VM_ENTRY41:
636 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve);
637 break;
638 case PT_IO12:
639 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod);
640 break;
641 case PT_GETREGS33:
642 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg);
643 break;
644 case PT_GETFPREGS35:
645 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg);
646 break;
647 case PT_GETDBREGS37:
648 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg);
649 break;
650 case PT_LWPINFO13:
651 error = copyout(&r.pl, uap->addr, uap->data);
8
Copies out a struct with uncleared padding (>= 4 bytes)
652 break;
653 }
654
655 return (error);
656}
657#undef COPYIN
658#undef COPYOUT
659
660#ifdef COMPAT_FREEBSD321
661/*
662 * PROC_READ(regs, td2, addr);
663 * becomes either:
664 * proc_read_regs(td2, addr);
665 * or
666 * proc_read_regs32(td2, addr);
667 * .. except this is done at runtime. There is an additional
668 * complication in that PROC_WRITE disallows 32 bit consumers
669 * from writing to 64 bit address space targets.
670 */
671#define PROC_READ(w, t, a) wrap32 ? \
672 proc_read_ ## w ## 32(t, a) : \
673 proc_read_ ## w (t, a)
674#define PROC_WRITE(w, t, a) wrap32 ? \
675 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL22 ) : \
676 proc_write_ ## w (t, a)
677#else
678#define PROC_READ(w, t, a) proc_read_ ## w (t, a)
679#define PROC_WRITE(w, t, a) proc_write_ ## w (t, a)
680#endif
681
682int
683kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
684{
685 struct iovec iov;
686 struct uio uio;
687 struct proc *curp, *p, *pp;
688 struct thread *td2 = NULL((void *)0), *td3;
689 struct ptrace_io_desc *piod = NULL((void *)0);
690 struct ptrace_lwpinfo *pl;
691 int error, num, tmp;
692 int proctree_locked = 0;
693 lwpid_t tid = 0, *buf;
694#ifdef COMPAT_FREEBSD321
695 int wrap32 = 0, safe = 0;
696 struct ptrace_io_desc32 *piod32 = NULL((void *)0);
697 struct ptrace_lwpinfo32 *pl32 = NULL((void *)0);
698 struct ptrace_lwpinfo plr;
699#endif
700
701 curp = td->td_proc;
702
703 /* Lock proctree before locking the process. */
704 switch (req) {
705 case PT_TRACE_ME0:
706 case PT_ATTACH10:
707 case PT_STEP9:
708 case PT_CONTINUE7:
709 case PT_TO_SCE20:
710 case PT_TO_SCX21:
711 case PT_SYSCALL22:
712 case PT_FOLLOW_FORK23:
713 case PT_LWP_EVENTS24:
714 case PT_DETACH11:
715 sx_xlock(&proctree_lock)(void)__sx_xlock(((&proctree_lock)), (__curthread()), 0, (
((void *)0)), (0))
;
716 proctree_locked = 1;
717 break;
718 default:
719 break;
720 }
721
722 if (req == PT_TRACE_ME0) {
723 p = td->td_proc;
724 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
725 } else {
726 if (pid <= PID_MAX99999) {
727 if ((p = pfind(pid)) == NULL((void *)0)) {
728 if (proctree_locked)
729 sx_xunlock(&proctree_lock)__sx_xunlock(((&proctree_lock)), (__curthread()), (((void
*)0)), (0))
;
730 return (ESRCH3);
731 }
732 } else {
733 td2 = tdfind(pid, -1);
734 if (td2 == NULL((void *)0)) {
735 if (proctree_locked)
736 sx_xunlock(&proctree_lock)__sx_xunlock(((&proctree_lock)), (__curthread()), (((void
*)0)), (0))
;
737 return (ESRCH3);
738 }
739 p = td2->td_proc;
740 tid = pid;
741 pid = p->p_pid;
742 }
743 }
744 AUDIT_ARG_PROCESS(p)do { if ((((__curthread()))->td_pflags & 0x01000000)) audit_arg_process
((p)); } while (0)
;
745
746 if ((p->p_flag & P_WEXIT0x02000) != 0) {
747 error = ESRCH3;
748 goto fail;
749 }
750 if ((error = p_cansee(td, p)) != 0)
751 goto fail;
752
753 if ((error = p_candebug(td, p)) != 0)
754 goto fail;
755
756 /*
757 * System processes can't be debugged.
758 */
759 if ((p->p_flag & P_SYSTEM0x00200) != 0) {
760 error = EINVAL22;
761 goto fail;
762 }
763
764 if (tid == 0) {
765 if ((p->p_flag & P_STOPPED_TRACE0x40000) != 0) {
766 KASSERT(p->p_xthread != NULL, ("NULL p_xthread"))do { } while (0);
767 td2 = p->p_xthread;
768 } else {
769 td2 = FIRST_THREAD_IN_PROC(p)((&(p)->p_threads)->tqh_first);
770 }
771 tid = td2->td_tid;
772 }
773
774#ifdef COMPAT_FREEBSD321
775 /*
776 * Test if we're a 32 bit client and what the target is.
777 * Set the wrap controls accordingly.
778 */
779 if (SV_CURPROC_FLAG(SV_ILP32)((((__curthread())->td_proc))->p_sysent->sv_flags &
(0x000100))
) {
780 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)((td2->td_proc)->p_sysent->sv_flags & (0x000100)
)
)
781 safe = 1;
782 wrap32 = 1;
783 }
784#endif
785 /*
786 * Permissions check
787 */
788 switch (req) {
789 case PT_TRACE_ME0:
790 /*
791 * Always legal, when there is a parent process which
792 * could trace us. Otherwise, reject.
793 */
794 if ((p->p_flag & P_TRACED0x00800) != 0) {
795 error = EBUSY16;
796 goto fail;
797 }
798 if (p->p_pptr == initproc) {
799 error = EPERM1;
800 goto fail;
801 }
802 break;
803
804 case PT_ATTACH10:
805 /* Self */
806 if (p == td->td_proc) {
807 error = EINVAL22;
808 goto fail;
809 }
810
811 /* Already traced */
812 if (p->p_flag & P_TRACED0x00800) {
813 error = EBUSY16;
814 goto fail;
815 }
816
817 /* Can't trace an ancestor if you're being traced. */
818 if (curp->p_flag & P_TRACED0x00800) {
819 for (pp = curp->p_pptr; pp != NULL((void *)0); pp = pp->p_pptr) {
820 if (pp == p) {
821 error = EINVAL22;
822 goto fail;
823 }
824 }
825 }
826
827
828 /* OK */
829 break;
830
831 case PT_CLEARSTEP16:
832 /* Allow thread to clear single step for itself */
833 if (td->td_tid == tid)
834 break;
835
836 /* FALLTHROUGH */
837 default:
838 /* not being traced... */
839 if ((p->p_flag & P_TRACED0x00800) == 0) {
840 error = EPERM1;
841 goto fail;
842 }
843
844 /* not being traced by YOU */
845 if (p->p_pptr != td->td_proc) {
846 error = EBUSY16;
847 goto fail;
848 }
849
850 /* not currently stopped */
851 if ((p->p_flag & (P_STOPPED_SIG0x20000 | P_STOPPED_TRACE0x40000)) == 0 ||
852 p->p_suspcount != p->p_numthreads ||
853 (p->p_flag & P_WAITED0x01000) == 0) {
854 error = EBUSY16;
855 goto fail;
856 }
857
858 if ((p->p_flag & P_STOPPED_TRACE0x40000) == 0) {
859 static int count = 0;
860 if (count++ == 0)
861 printf("P_STOPPED_TRACE not set.\n");
862 }
863
864 /* OK */
865 break;
866 }
867
868 /* Keep this process around until we finish this request. */
869 _PHOLD(p)do { (void)0; do { } while (0); (p)->p_lock++; if (((p)->
p_flag & 0x10000000) == 0) faultin((p)); } while (0)
;
870
871#ifdef FIX_SSTEP
872 /*
873 * Single step fixup ala procfs
874 */
875 FIX_SSTEP(td2);
876#endif
877
878 /*
879 * Actually do the requests
880 */
881
882 td->td_retvaltd_uretoff.tdu_retval[0] = 0;
883
884 switch (req) {
885 case PT_TRACE_ME0:
886 /* set my trace flag and "owner" so it can read/write me */
887 p->p_flag |= P_TRACED0x00800;
888 if (p->p_flag & P_PPWAIT0x00010)
889 p->p_flag |= P_PPTRACE0x80000000;
890 p->p_oppid = p->p_pptr->p_pid;
891 CTR1(KTR_PTRACE, "PT_TRACE_ME: pid %d", p->p_pid)(void)0;
892 break;
893
894 case PT_ATTACH10:
895 /* security check done above */
896 /*
897 * It would be nice if the tracing relationship was separate
898 * from the parent relationship but that would require
899 * another set of links in the proc struct or for "wait"
900 * to scan the entire proc table. To make life easier,
901 * we just re-parent the process we're trying to trace.
902 * The old parent is remembered so we can put things back
903 * on a "detach".
904 */
905 p->p_flag |= P_TRACED0x00800;
906 p->p_oppid = p->p_pptr->p_pid;
907 if (p->p_pptr != td->td_proc) {
908 proc_reparent(p, td->td_proc);
909 }
910 data = SIGSTOP17;
911 CTR2(KTR_PTRACE, "PT_ATTACH: pid %d, oppid %d", p->p_pid,(void)0
912 p->p_oppid)(void)0;
913 goto sendsig; /* in PT_CONTINUE below */
914
915 case PT_CLEARSTEP16:
916 CTR2(KTR_PTRACE, "PT_CLEARSTEP: tid %d (pid %d)", td2->td_tid,(void)0
917 p->p_pid)(void)0;
918 error = ptrace_clear_single_step(td2);
919 break;
920
921 case PT_SETSTEP17:
922 CTR2(KTR_PTRACE, "PT_SETSTEP: tid %d (pid %d)", td2->td_tid,(void)0
923 p->p_pid)(void)0;
924 error = ptrace_single_step(td2);
925 break;
926
927 case PT_SUSPEND18:
928 CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid,(void)0
929 p->p_pid)(void)0;
930 td2->td_dbgflags |= TDB_SUSPEND0x00000001;
931 thread_lock(td2)thread_lock_flags_((td2), 0, "/usr/src/sys/kern/sys_process.c"
, 931)
;
932 td2->td_flags |= TDF_NEEDSUSPCHK0x00008000;
933 thread_unlock(td2)do { if ((((((((td2)->td_lock)))))->lock_object.lo_data
!= 0)) (((((td2)->td_lock))))->lock_object.lo_data--; else
{ do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__release->
id, (uintptr_t) ((((td2)->td_lock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
atomic_store_rel_long(&((((((td2)->td_lock)))))->mtx_lock
, 0x00000004); } spinlock_exit(); } while (0)
;
934 break;
935
936 case PT_RESUME19:
937 CTR2(KTR_PTRACE, "PT_RESUME: tid %d (pid %d)", td2->td_tid,(void)0
938 p->p_pid)(void)0;
939 td2->td_dbgflags &= ~TDB_SUSPEND0x00000001;
940 break;
941
942 case PT_FOLLOW_FORK23:
943 CTR3(KTR_PTRACE, "PT_FOLLOW_FORK: pid %d %s -> %s", p->p_pid,(void)0
944 p->p_flag & P_FOLLOWFORK ? "enabled" : "disabled",(void)0
945 data ? "enabled" : "disabled")(void)0;
946 if (data)
947 p->p_flag |= P_FOLLOWFORK0x00008;
948 else
949 p->p_flag &= ~P_FOLLOWFORK0x00008;
950 break;
951
952 case PT_LWP_EVENTS24:
953 CTR3(KTR_PTRACE, "PT_LWP_EVENTS: pid %d %s -> %s", p->p_pid,(void)0
954 p->p_flag2 & P2_LWP_EVENTS ? "enabled" : "disabled",(void)0
955 data ? "enabled" : "disabled")(void)0;
956 if (data)
957 p->p_flag2 |= P2_LWP_EVENTS0x00000010;
958 else
959 p->p_flag2 &= ~P2_LWP_EVENTS0x00000010;
960 break;
961
962 case PT_STEP9:
963 case PT_CONTINUE7:
964 case PT_TO_SCE20:
965 case PT_TO_SCX21:
966 case PT_SYSCALL22:
967 case PT_DETACH11:
968 /* Zero means do not send any signal */
969 if (data < 0 || data > _SIG_MAXSIG128) {
970 error = EINVAL22;
971 break;
972 }
973
974 switch (req) {
975 case PT_STEP9:
976 CTR2(KTR_PTRACE, "PT_STEP: tid %d (pid %d)",(void)0
977 td2->td_tid, p->p_pid)(void)0;
978 error = ptrace_single_step(td2);
979 if (error)
980 goto out;
981 break;
982 case PT_CONTINUE7:
983 case PT_TO_SCE20:
984 case PT_TO_SCX21:
985 case PT_SYSCALL22:
986 if (addr != (void *)1) {
987 error = ptrace_set_pc(td2,
988 (u_long)(uintfptr_t)addr);
989 if (error)
990 goto out;
991 }
992 switch (req) {
993 case PT_TO_SCE20:
994 p->p_stops |= S_PT_SCE0x000010000;
995 CTR4(KTR_PTRACE,(void)0
996 "PT_TO_SCE: pid %d, stops = %#x, PC = %#lx, sig = %d",(void)0
997 p->p_pid, p->p_stops,(void)0
998 (u_long)(uintfptr_t)addr, data)(void)0;
999 break;
1000 case PT_TO_SCX21:
1001 p->p_stops |= S_PT_SCX0x000020000;
1002 CTR4(KTR_PTRACE,(void)0
1003 "PT_TO_SCX: pid %d, stops = %#x, PC = %#lx, sig = %d",(void)0
1004 p->p_pid, p->p_stops,(void)0
1005 (u_long)(uintfptr_t)addr, data)(void)0;
1006 break;
1007 case PT_SYSCALL22:
1008 p->p_stops |= S_PT_SCE0x000010000 | S_PT_SCX0x000020000;
1009 CTR4(KTR_PTRACE,(void)0
1010 "PT_SYSCALL: pid %d, stops = %#x, PC = %#lx, sig = %d",(void)0
1011 p->p_pid, p->p_stops,(void)0
1012 (u_long)(uintfptr_t)addr, data)(void)0;
1013 break;
1014 case PT_CONTINUE7:
1015 CTR3(KTR_PTRACE,(void)0
1016 "PT_CONTINUE: pid %d, PC = %#lx, sig = %d",(void)0
1017 p->p_pid, (u_long)(uintfptr_t)addr, data)(void)0;
1018 break;
1019 }
1020 break;
1021 case PT_DETACH11:
1022 /*
1023 * Reset the process parent.
1024 *
1025 * NB: This clears P_TRACED before reparenting
1026 * a detached process back to its original
1027 * parent. Otherwise the debugee will be set
1028 * as an orphan of the debugger.
1029 */
1030 p->p_flag &= ~(P_TRACED0x00800 | P_WAITED0x01000 | P_FOLLOWFORK0x00008);
1031 if (p->p_oppid != p->p_pptr->p_pid) {
1032 PROC_LOCK(p->p_pptr)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p->p_pptr)->p_mtx))))->mtx_lock != 0x00000004 ||
!atomic_cmpset_long(&(((((&(p->p_pptr)->p_mtx)
))))->mtx_lock, 0x00000004, (_tid)))) __mtx_lock_sleep(&
(((((&(p->p_pptr)->p_mtx)))))->mtx_lock, _tid, (
((0))), ((((void *)0))), ((0))); else do { (void)0; do { if (
__builtin_expect((sdt_lockstat___adaptive__acquire->id), 0
)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->id,
(uintptr_t) (((&(p->p_pptr)->p_mtx))), (uintptr_t)
0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0)
; } while (0); } while (0)
;
1033 sigqueue_take(p->p_ksi);
1034 PROC_UNLOCK(p->p_pptr)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p->p_pptr)->p_mtx))))->lock_object.lo_data == 0) do
{ (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(p->p_pptr)->p_mtx))), (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0
); } while (0); if (((((&(p->p_pptr)->p_mtx))))->
mtx_lock != _tid || !atomic_cmpset_long(&(((((&(p->
p_pptr)->p_mtx)))))->mtx_lock, (_tid), 0x00000004)) __mtx_unlock_sleep
(&(((((&(p->p_pptr)->p_mtx)))))->mtx_lock, (
((0))), ((((void *)0))), ((0))); } while (0)
;
1035
1036 pp = proc_realparent(p);
1037 proc_reparent(p, pp);
1038 if (pp == initproc)
1039 p->p_sigparent = SIGCHLD20;
1040 CTR3(KTR_PTRACE,(void)0
1041 "PT_DETACH: pid %d reparented to pid %d, sig %d",(void)0
1042 p->p_pid, pp->p_pid, data)(void)0;
1043 } else
1044 CTR2(KTR_PTRACE, "PT_DETACH: pid %d, sig %d",(void)0
1045 p->p_pid, data)(void)0;
1046 p->p_oppid = 0;
1047 p->p_stops = 0;
1048
1049 /* should we send SIGCHLD? */
1050 /* childproc_continued(p); */
1051 break;
1052 }
1053
1054 sendsig:
1055 if (proctree_locked) {
1056 sx_xunlock(&proctree_lock)__sx_xunlock(((&proctree_lock)), (__curthread()), (((void
*)0)), (0))
;
1057 proctree_locked = 0;
1058 }
1059 p->p_xsig = data;
1060 p->p_xthread = NULL((void *)0);
1061 if ((p->p_flag & (P_STOPPED_SIG0x20000 | P_STOPPED_TRACE0x40000)) != 0) {
1062 /* deliver or queue signal */
1063 td2->td_dbgflags &= ~TDB_XSIG0x00000002;
1064 td2->td_xsig = data;
1065
1066 if (req == PT_DETACH11) {
1067 FOREACH_THREAD_IN_PROC(p, td3)for (((td3)) = (((&(p)->p_threads))->tqh_first); ((
td3)); ((td3)) = ((((td3)))->td_plist.tqe_next))
1068 td3->td_dbgflags &= ~TDB_SUSPEND0x00000001;
1069 }
1070 /*
1071 * unsuspend all threads, to not let a thread run,
1072 * you should use PT_SUSPEND to suspend it before
1073 * continuing process.
1074 */
1075 PROC_SLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); spinlock_enter
(); if ((((((&(p)->p_slock))))->mtx_lock != 0x00000004
|| !atomic_cmpset_long(&(((((&(p)->p_slock)))))->
mtx_lock, 0x00000004, (_tid)))) { if (((((&(p)->p_slock
))))->mtx_lock == _tid) ((((&(p)->p_slock))))->lock_object
.lo_data++; else _mtx_lock_spin_cookie(&(((((&(p)->
p_slock)))))->mtx_lock, _tid, (((0))), ((((void *)0))), ((
0))); } else do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__acquire->
id, (uintptr_t) (((&(p)->p_slock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
} while (0)
;
1076 p->p_flag &= ~(P_STOPPED_TRACE0x40000|P_STOPPED_SIG0x20000|P_WAITED0x01000);
1077 thread_unsuspend(p);
1078 PROC_SUNLOCK(p)do { if (((((((&(p)->p_slock)))))->lock_object.lo_data
!= 0)) ((((&(p)->p_slock))))->lock_object.lo_data--
; else { do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__release->
id, (uintptr_t) (((&(p)->p_slock))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
atomic_store_rel_long(&(((((&(p)->p_slock)))))->
mtx_lock, 0x00000004); } spinlock_exit(); } while (0)
;
1079 if (req == PT_ATTACH10)
1080 kern_psignal(p, data);
1081 } else {
1082 if (data)
1083 kern_psignal(p, data);
1084 }
1085 break;
1086
1087 case PT_WRITE_I4:
1088 case PT_WRITE_D5:
1089 td2->td_dbgflags |= TDB_USERWR0x00000004;
1090 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1091 error = 0;
1092 if (proc_writemem(td, p, (off_t)(uintptr_t)addr, &data,
1093 sizeof(int)) != sizeof(int))
1094 error = ENOMEM12;
1095 else
1096 CTR3(KTR_PTRACE, "PT_WRITE: pid %d: %p <= %#x",(void)0
1097 p->p_pid, addr, data)(void)0;
1098 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
1099 break;
1100
1101 case PT_READ_I1:
1102 case PT_READ_D2:
1103 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1104 error = tmp = 0;
1105 if (proc_readmem(td, p, (off_t)(uintptr_t)addr, &tmp,
1106 sizeof(int)) != sizeof(int))
1107 error = ENOMEM12;
1108 else
1109 CTR3(KTR_PTRACE, "PT_READ: pid %d: %p >= %#x",(void)0
1110 p->p_pid, addr, tmp)(void)0;
1111 td->td_retvaltd_uretoff.tdu_retval[0] = tmp;
1112 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
1113 break;
1114
1115 case PT_IO12:
1116#ifdef COMPAT_FREEBSD321
1117 if (wrap32) {
1118 piod32 = addr;
1119 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr;
1120 iov.iov_len = piod32->piod_len;
1121 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs;
1122 uio.uio_resid = piod32->piod_len;
1123 } else
1124#endif
1125 {
1126 piod = addr;
1127 iov.iov_base = piod->piod_addr;
1128 iov.iov_len = piod->piod_len;
1129 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
1130 uio.uio_resid = piod->piod_len;
1131 }
1132 uio.uio_iov = &iov;
1133 uio.uio_iovcnt = 1;
1134 uio.uio_segflg = UIO_USERSPACE;
1135 uio.uio_td = td;
1136#ifdef COMPAT_FREEBSD321
1137 tmp = wrap32 ? piod32->piod_op : piod->piod_op;
1138#else
1139 tmp = piod->piod_op;
1140#endif
1141 switch (tmp) {
1142 case PIOD_READ_D1:
1143 case PIOD_READ_I3:
1144 CTR3(KTR_PTRACE, "PT_IO: pid %d: READ (%p, %#x)",(void)0
1145 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid)(void)0;
1146 uio.uio_rw = UIO_READ;
1147 break;
1148 case PIOD_WRITE_D2:
1149 case PIOD_WRITE_I4:
1150 CTR3(KTR_PTRACE, "PT_IO: pid %d: WRITE (%p, %#x)",(void)0
1151 p->p_pid, (uintptr_t)uio.uio_offset, uio.uio_resid)(void)0;
1152 td2->td_dbgflags |= TDB_USERWR0x00000004;
1153 uio.uio_rw = UIO_WRITE;
1154 break;
1155 default:
1156 error = EINVAL22;
1157 goto out;
1158 }
1159 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1160 error = proc_rwmem(p, &uio);
1161#ifdef COMPAT_FREEBSD321
1162 if (wrap32)
1163 piod32->piod_len -= uio.uio_resid;
1164 else
1165#endif
1166 piod->piod_len -= uio.uio_resid;
1167 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
1168 break;
1169
1170 case PT_KILL8:
1171 CTR1(KTR_PTRACE, "PT_KILL: pid %d", p->p_pid)(void)0;
1172 data = SIGKILL9;
1173 goto sendsig; /* in PT_CONTINUE above */
1174
1175 case PT_SETREGS34:
1176 CTR2(KTR_PTRACE, "PT_SETREGS: tid %d (pid %d)", td2->td_tid,(void)0
1177 p->p_pid)(void)0;
1178 td2->td_dbgflags |= TDB_USERWR0x00000004;
1179 error = PROC_WRITE(regs, td2, addr);
1180 break;
1181
1182 case PT_GETREGS33:
1183 CTR2(KTR_PTRACE, "PT_GETREGS: tid %d (pid %d)", td2->td_tid,(void)0
1184 p->p_pid)(void)0;
1185 error = PROC_READ(regs, td2, addr);
1186 break;
1187
1188 case PT_SETFPREGS36:
1189 CTR2(KTR_PTRACE, "PT_SETFPREGS: tid %d (pid %d)", td2->td_tid,(void)0
1190 p->p_pid)(void)0;
1191 td2->td_dbgflags |= TDB_USERWR0x00000004;
1192 error = PROC_WRITE(fpregs, td2, addr);
1193 break;
1194
1195 case PT_GETFPREGS35:
1196 CTR2(KTR_PTRACE, "PT_GETFPREGS: tid %d (pid %d)", td2->td_tid,(void)0
1197 p->p_pid)(void)0;
1198 error = PROC_READ(fpregs, td2, addr);
1199 break;
1200
1201 case PT_SETDBREGS38:
1202 CTR2(KTR_PTRACE, "PT_SETDBREGS: tid %d (pid %d)", td2->td_tid,(void)0
1203 p->p_pid)(void)0;
1204 td2->td_dbgflags |= TDB_USERWR0x00000004;
1205 error = PROC_WRITE(dbregs, td2, addr);
1206 break;
1207
1208 case PT_GETDBREGS37:
1209 CTR2(KTR_PTRACE, "PT_GETDBREGS: tid %d (pid %d)", td2->td_tid,(void)0
1210 p->p_pid)(void)0;
1211 error = PROC_READ(dbregs, td2, addr);
1212 break;
1213
1214 case PT_LWPINFO13:
1215 if (data <= 0 ||
1216#ifdef COMPAT_FREEBSD321
1217 (!wrap32 && data > sizeof(*pl)) ||
1218 (wrap32 && data > sizeof(*pl32))) {
1219#else
1220 data > sizeof(*pl)) {
1221#endif
1222 error = EINVAL22;
1223 break;
1224 }
1225#ifdef COMPAT_FREEBSD321
1226 if (wrap32) {
1227 pl = &plr;
1228 pl32 = addr;
1229 } else
1230#endif
1231 pl = addr;
1232 pl->pl_lwpid = td2->td_tid;
1233 pl->pl_event = PL_EVENT_NONE0;
1234 pl->pl_flags = 0;
1235 if (td2->td_dbgflags & TDB_XSIG0x00000002) {
1236 pl->pl_event = PL_EVENT_SIGNAL1;
1237 if (td2->td_dbgksi.ksi_signoksi_info.si_signo != 0 &&
1238#ifdef COMPAT_FREEBSD321
1239 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo,__builtin_offsetof(struct ptrace_lwpinfo, pl_siginfo)
1240 pl_siginfo)__builtin_offsetof(struct ptrace_lwpinfo, pl_siginfo) + sizeof(pl->pl_siginfo)) ||
1241 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32,__builtin_offsetof(struct ptrace_lwpinfo32, pl_siginfo)
1242 pl_siginfo)__builtin_offsetof(struct ptrace_lwpinfo32, pl_siginfo) + sizeof(struct siginfo32)))
1243#else
1244 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo)__builtin_offsetof(struct ptrace_lwpinfo, pl_siginfo)
1245 + sizeof(pl->pl_siginfo)
1246#endif
1247 ){
1248 pl->pl_flags |= PL_FLAG_SI0x20;
1249 pl->pl_siginfo = td2->td_dbgksi.ksi_info;
1250 }
1251 }
1252 if ((pl->pl_flags & PL_FLAG_SI0x20) == 0)
1253 bzero(&pl->pl_siginfo, sizeof(pl->pl_siginfo));
1254 if (td2->td_dbgflags & TDB_SCE0x00000008)
1255 pl->pl_flags |= PL_FLAG_SCE0x04;
1256 else if (td2->td_dbgflags & TDB_SCX0x00000010)
1257 pl->pl_flags |= PL_FLAG_SCX0x08;
1258 if (td2->td_dbgflags & TDB_EXEC0x00000020)
1259 pl->pl_flags |= PL_FLAG_EXEC0x10;
1260 if (td2->td_dbgflags & TDB_FORK0x00000040) {
1261 pl->pl_flags |= PL_FLAG_FORKED0x40;
1262 pl->pl_child_pid = td2->td_dbg_forked;
1263 }
1264 if (td2->td_dbgflags & TDB_CHILD0x00000100)
1265 pl->pl_flags |= PL_FLAG_CHILD0x80;
1266 if (td2->td_dbgflags & TDB_BORN0x00000200)
1267 pl->pl_flags |= PL_FLAG_BORN0x100;
1268 if (td2->td_dbgflags & TDB_EXIT0x00000400)
1269 pl->pl_flags |= PL_FLAG_EXITED0x200;
1270 pl->pl_sigmask = td2->td_sigmask;
1271 pl->pl_siglist = td2->td_siglisttd_sigqueue.sq_signals;
1272 strcpy(pl->pl_tdname, td2->td_name);
1273 if ((td2->td_dbgflags & (TDB_SCE0x00000008 | TDB_SCX0x00000010)) != 0) {
1274 pl->pl_syscall_code = td2->td_dbg_sc_code;
1275 pl->pl_syscall_narg = td2->td_dbg_sc_narg;
1276 } else {
1277 pl->pl_syscall_code = 0;
1278 pl->pl_syscall_narg = 0;
1279 }
1280#ifdef COMPAT_FREEBSD321
1281 if (wrap32)
1282 ptrace_lwpinfo_to32(pl, pl32);
1283#endif
1284 CTR6(KTR_PTRACE,(void)0
1285 "PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d syscall %d",(void)0
1286 td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags,(void)0
1287 pl->pl_child_pid, pl->pl_syscall_code)(void)0;
1288 break;
1289
1290 case PT_GETNUMLWPS14:
1291 CTR2(KTR_PTRACE, "PT_GETNUMLWPS: pid %d: %d threads", p->p_pid,(void)0
1292 p->p_numthreads)(void)0;
1293 td->td_retvaltd_uretoff.tdu_retval[0] = p->p_numthreads;
1294 break;
1295
1296 case PT_GETLWPLIST15:
1297 CTR3(KTR_PTRACE, "PT_GETLWPLIST: pid %d: data %d, actual %d",(void)0
1298 p->p_pid, data, p->p_numthreads)(void)0;
1299 if (data <= 0) {
1300 error = EINVAL22;
1301 break;
1302 }
1303 num = imin(p->p_numthreads, data);
1304 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1305 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK0x0002);
1306 tmp = 0;
1307 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
1308 FOREACH_THREAD_IN_PROC(p, td2)for (((td2)) = (((&(p)->p_threads))->tqh_first); ((
td2)); ((td2)) = ((((td2)))->td_plist.tqe_next))
{
1309 if (tmp >= num)
1310 break;
1311 buf[tmp++] = td2->td_tid;
1312 }
1313 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1314 error = copyout(buf, addr, tmp * sizeof(lwpid_t));
1315 free(buf, M_TEMP);
1316 if (!error)
1317 td->td_retvaltd_uretoff.tdu_retval[0] = tmp;
1318 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
1319 break;
1320
1321 case PT_VM_TIMESTAMP40:
1322 CTR2(KTR_PTRACE, "PT_VM_TIMESTAMP: pid %d: timestamp %d",(void)0
1323 p->p_pid, p->p_vmspace->vm_map.timestamp)(void)0;
1324 td->td_retvaltd_uretoff.tdu_retval[0] = p->p_vmspace->vm_map.timestamp;
1325 break;
1326
1327 case PT_VM_ENTRY41:
1328 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1329#ifdef COMPAT_FREEBSD321
1330 if (wrap32)
1331 error = ptrace_vm_entry32(td, p, addr);
1332 else
1333#endif
1334 error = ptrace_vm_entry(td, p, addr);
1335 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
1336 break;
1337
1338 default:
1339#ifdef __HAVE_PTRACE_MACHDEP
1340 if (req >= PT_FIRSTMACH64) {
1341 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1342 error = cpu_ptrace(td2, req, addr, data);
1343 PROC_LOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(p)->p_mtx))))->mtx_lock != 0x00000004 || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, 0x00000004, (
_tid)))) __mtx_lock_sleep(&(((((&(p)->p_mtx)))))->
mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); else do { (
void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire
->id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); }
while (0); } while (0)
;
1344 } else
1345#endif
1346 /* Unknown request. */
1347 error = EINVAL22;
1348 break;
1349 }
1350
1351out:
1352 /* Drop our hold on this process now that the request has completed. */
1353 _PRELE(p)do { (void)0; do { do { } while (0); } while (0); (--(p)->
p_lock); if (((p)->p_flag & 0x02000) && (p)->
p_lock == 0) wakeup(&(p)->p_lock); } while (0)
;
1354fail:
1355 PROC_UNLOCK(p)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(p)->p_mtx))))->lock_object.lo_data == 0) do { (void)0;
do { if (__builtin_expect((sdt_lockstat___adaptive__release->
id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release->
id, (uintptr_t) (((&(p)->p_mtx))), (uintptr_t) 0, (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0);
if (((((&(p)->p_mtx))))->mtx_lock != _tid || !atomic_cmpset_long
(&(((((&(p)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(p)->p_mtx)))))->mtx_lock
, (((0))), ((((void *)0))), ((0))); } while (0)
;
1356 if (proctree_locked)
1357 sx_xunlock(&proctree_lock)__sx_xunlock(((&proctree_lock)), (__curthread()), (((void
*)0)), (0))
;
1358 return (error);
1359}
1360#undef PROC_READ
1361#undef PROC_WRITE
1362
1363/*
1364 * Stop a process because of a debugging event;
1365 * stay stopped until p->p_step is cleared
1366 * (cleared by PIOCCONT in procfs).
1367 */
1368void
1369stopevent(struct proc *p, unsigned int event, unsigned int val)
1370{
1371
1372 PROC_LOCK_ASSERT(p, MA_OWNED)(void)0;
1373 p->p_step = 1;
1374 CTR3(KTR_PTRACE, "stopevent: pid %d event %u val %u", p->p_pid, event,(void)0
1375 val)(void)0;
1376 do {
1377 if (event != S_EXIT0x00000020)
1378 p->p_xsig = val;
1379 p->p_xthread = NULL((void *)0);
1380 p->p_stype = event; /* Which event caused the stop? */
1381 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */
1382 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0)_sleep((&p->p_step), &(&p->p_mtx)->lock_object
, (((80) + 28)), ("stopevent"), tick_sbt * (0), 0, 0x0100)
;
1383 } while (p->p_step);
1384}