Bug Summary

File:dev/hwpmc/hwpmc_mod.c
Warning:line 3710, column 12
Copies out a struct with untouched element(s): pm_name, pm_class, pm_enabled

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-unknown-freebsd11.2 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name hwpmc_mod.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model static -mthread-model posix -mdisable-fp-elim -relaxed-aliasing -masm-verbose -mconstructor-aliases -ffreestanding -mcode-model kernel -target-cpu x86-64 -target-feature -mmx -target-feature -sse -target-feature -aes -target-feature -avx -disable-red-zone -no-implicit-float -dwarf-column-info -debugger-tuning=gdb -nostdsysteminc -nobuiltininc -resource-dir /root/kernel-uninitialized-memory-checker/build/lib/clang/8.0.0 -include /usr/obj/root/freebsd/amd64.amd64/sys/GENERIC/opt_global.h -D _KERNEL -D KLD_MODULE -D KLD_TIED -D HAVE_KERNEL_OPTION_HEADERS -I . -I /root/freebsd/sys -I /root/freebsd/sys/contrib/ck/include -I /usr/obj/root/freebsd/amd64.amd64/sys/GENERIC -D __printf__=__freebsd_kprintf__ -O2 -Wno-pointer-sign -Wno-unknown-pragmas -Wno-error-tautological-compare -Wno-error-empty-body -Wno-error-parentheses-equality -Wno-error-unused-function -Wno-error-pointer-sign -Wno-error-shift-negative-value -Wno-address-of-packed-member -std=iso9899:1999 -fdebug-compilation-dir /usr/obj/root/freebsd/amd64.amd64/sys/GENERIC/modules/root/freebsd/sys/modules/hwpmc -ferror-limit 19 -fmessage-length 0 -fwrapv -stack-protector 1 -fobjc-runtime=gnustep -fno-common -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker alpha.security.KernelMemoryDisclosure -analyzer-disable-checker core,unix,deadcode,nullability -analyzer-output=html -o /root/analyzer/2018-12-28-044519-76292-1 -x c /root/freebsd/sys/dev/hwpmc/hwpmc_mod.c -faddrsig
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2003-2008 Joseph Koshy
5 * Copyright (c) 2007 The FreeBSD Foundation
6 * Copyright (c) 2018 Matthew Macy
7 * All rights reserved.
8 *
9 * Portions of this software were developed by A. Joseph Koshy under
10 * sponsorship from the FreeBSD Foundation and Google, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$")__asm__(".ident\t\"" "$FreeBSD$" "\"");
37
38#include <sys/param.h>
39#include <sys/domainset.h>
40#include <sys/eventhandler.h>
41#include <sys/gtaskqueue.h>
42#include <sys/jail.h>
43#include <sys/kernel.h>
44#include <sys/kthread.h>
45#include <sys/limits.h>
46#include <sys/lock.h>
47#include <sys/malloc.h>
48#include <sys/module.h>
49#include <sys/mount.h>
50#include <sys/mutex.h>
51#include <sys/pmc.h>
52#include <sys/pmckern.h>
53#include <sys/pmclog.h>
54#include <sys/priv.h>
55#include <sys/proc.h>
56#include <sys/queue.h>
57#include <sys/resourcevar.h>
58#include <sys/rwlock.h>
59#include <sys/sched.h>
60#include <sys/signalvar.h>
61#include <sys/smp.h>
62#include <sys/sx.h>
63#include <sys/sysctl.h>
64#include <sys/sysent.h>
65#include <sys/syslog.h>
66#include <sys/systm.h>
67#include <sys/vnode.h>
68
69#include <sys/linker.h> /* needs to be after <sys/malloc.h> */
70
71#include <machine/atomic.h>
72#include <machine/md_var.h>
73
74#include <vm/vm.h>
75#include <vm/vm_extern.h>
76#include <vm/pmap.h>
77#include <vm/vm_map.h>
78#include <vm/vm_object.h>
79
80#include "hwpmc_soft.h"
81
82#define PMC_EPOCH_ENTER()struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt
, &pmc_et)
struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt, &pmc_et)
83#define PMC_EPOCH_EXIT()epoch_exit_preempt(global_epoch_preempt, &pmc_et) epoch_exit_preempt(global_epoch_preempt, &pmc_et)
84
85/*
86 * Types
87 */
88
89enum pmc_flags {
90 PMC_FLAG_NONE = 0x00, /* do nothing */
91 PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */
92 PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
93 PMC_FLAG_NOWAIT = 0x04, /* do not wait for mallocs */
94};
95
96/*
97 * The offset in sysent where the syscall is allocated.
98 */
99
100static int pmc_syscall_num = NO_SYSCALL(-1);
101struct pmc_cpu **pmc_pcpu; /* per-cpu state */
102pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */
103
104#define PMC_PCPU_SAVED(C,R)pmc_pcpu_saved[(R) + md->pmd_npmc*(C)] pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
105
106struct mtx_pool *pmc_mtxpool;
107static int *pmc_pmcdisp; /* PMC row dispositions */
108
109#define PMC_ROW_DISP_IS_FREE(R)(pmc_pmcdisp[(R)] == 0) (pmc_pmcdisp[(R)] == 0)
110#define PMC_ROW_DISP_IS_THREAD(R)(pmc_pmcdisp[(R)] > 0) (pmc_pmcdisp[(R)] > 0)
111#define PMC_ROW_DISP_IS_STANDALONE(R)(pmc_pmcdisp[(R)] < 0) (pmc_pmcdisp[(R)] < 0)
112
113#define PMC_MARK_ROW_FREE(R)do { pmc_pmcdisp[(R)] = 0; } while (0) do { \
114 pmc_pmcdisp[(R)] = 0; \
115} while (0)
116
117#define PMC_MARK_ROW_STANDALONE(R)do { do { if (__builtin_expect((!(pmc_pmcdisp[(R)] <= 0)),
0)) panic ("[pmc,%d] row disposition error", 117); } while (
0); atomic_add_int(&pmc_pmcdisp[(R)], -1); do { if (__builtin_expect
((!(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()))), 0)) panic
("[pmc,%d] row disposition error", 117); } while (0); } while
(0)
do { \
118 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \do { if (__builtin_expect((!(pmc_pmcdisp[(R)] <= 0)), 0)) panic
("[pmc,%d] row disposition error", 119); } while (0)
119 __LINE__))do { if (__builtin_expect((!(pmc_pmcdisp[(R)] <= 0)), 0)) panic
("[pmc,%d] row disposition error", 119); } while (0)
; \
120 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
121 KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()), \do { if (__builtin_expect((!(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active
()))), 0)) panic ("[pmc,%d] row disposition error", 122); } while
(0)
122 ("[pmc,%d] row disposition error", __LINE__))do { if (__builtin_expect((!(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active
()))), 0)) panic ("[pmc,%d] row disposition error", 122); } while
(0)
; \
123} while (0)
124
125#define PMC_UNMARK_ROW_STANDALONE(R)do { atomic_add_int(&pmc_pmcdisp[(R)], 1); do { if (__builtin_expect
((!(pmc_pmcdisp[(R)] <= 0)), 0)) panic ("[pmc,%d] row disposition error"
, 125); } while (0); } while (0)
do { \
126 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
127 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \do { if (__builtin_expect((!(pmc_pmcdisp[(R)] <= 0)), 0)) panic
("[pmc,%d] row disposition error", 128); } while (0)
128 __LINE__))do { if (__builtin_expect((!(pmc_pmcdisp[(R)] <= 0)), 0)) panic
("[pmc,%d] row disposition error", 128); } while (0)
; \
129} while (0)
130
131#define PMC_MARK_ROW_THREAD(R)do { do { if (__builtin_expect((!(pmc_pmcdisp[(R)] >= 0)),
0)) panic ("[pmc,%d] row disposition error", 131); } while (
0); atomic_add_int(&pmc_pmcdisp[(R)], 1); } while (0)
do { \
132 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \do { if (__builtin_expect((!(pmc_pmcdisp[(R)] >= 0)), 0)) panic
("[pmc,%d] row disposition error", 133); } while (0)
133 __LINE__))do { if (__builtin_expect((!(pmc_pmcdisp[(R)] >= 0)), 0)) panic
("[pmc,%d] row disposition error", 133); } while (0)
; \
134 atomic_add_int(&pmc_pmcdisp[(R)], 1); \
135} while (0)
136
137#define PMC_UNMARK_ROW_THREAD(R)do { atomic_add_int(&pmc_pmcdisp[(R)], -1); do { if (__builtin_expect
((!(pmc_pmcdisp[(R)] >= 0)), 0)) panic ("[pmc,%d] row disposition error"
, 137); } while (0); } while (0)
do { \
138 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
139 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \do { if (__builtin_expect((!(pmc_pmcdisp[(R)] >= 0)), 0)) panic
("[pmc,%d] row disposition error", 140); } while (0)
140 __LINE__))do { if (__builtin_expect((!(pmc_pmcdisp[(R)] >= 0)), 0)) panic
("[pmc,%d] row disposition error", 140); } while (0)
; \
141} while (0)
142
143
144/* various event handlers */
145static eventhandler_tag pmc_exit_tag, pmc_fork_tag, pmc_kld_load_tag,
146 pmc_kld_unload_tag;
147
148/* Module statistics */
149struct pmc_driverstats pmc_stats;
150
151
152/* Machine/processor dependent operations */
153static struct pmc_mdep *md;
154
155/*
156 * Hash tables mapping owner processes and target threads to PMCs.
157 */
158
159struct mtx pmc_processhash_mtx; /* spin mutex */
160static u_long pmc_processhashmask;
161static LIST_HEAD(pmc_processhash, pmc_process)struct pmc_processhash { struct pmc_process *lh_first; } *pmc_processhash;
162
163/*
164 * Hash table of PMC owner descriptors. This table is protected by
165 * the shared PMC "sx" lock.
166 */
167
168static u_long pmc_ownerhashmask;
169static LIST_HEAD(pmc_ownerhash, pmc_owner)struct pmc_ownerhash { struct pmc_owner *lh_first; } *pmc_ownerhash;
170
171/*
172 * List of PMC owners with system-wide sampling PMCs.
173 */
174
175static CK_LIST_HEAD(, pmc_owner)struct { struct pmc_owner *clh_first; } pmc_ss_owners;
176
177/*
178 * List of free thread entries. This is protected by the spin
179 * mutex.
180 */
181static struct mtx pmc_threadfreelist_mtx; /* spin mutex */
182static LIST_HEAD(, pmc_thread)struct { struct pmc_thread *lh_first; } pmc_threadfreelist;
183static int pmc_threadfreelist_entries=0;
184#define THREADENTRY_SIZE(sizeof(struct pmc_thread) + (md->pmd_npmc * sizeof(struct
pmc_threadpmcstate)))
\
185(sizeof(struct pmc_thread) + (md->pmd_npmc * sizeof(struct pmc_threadpmcstate)))
186
187/*
188 * Task to free thread descriptors
189 */
190static struct grouptask free_gtask;
191
192/*
193 * A map of row indices to classdep structures.
194 */
195static struct pmc_classdep **pmc_rowindex_to_classdep;
196
197/*
198 * Prototypes
199 */
200
201#ifdef HWPMC_DEBUG
202static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req
*req
);
203static int pmc_debugflags_parse(char *newstr, char *fence);
204#endif
205
206static int load(struct module *module, int cmd, void *arg);
207static int pmc_add_sample(ring_type_t ring, struct pmc *pm, struct trapframe *tf);
208static void pmc_add_thread_descriptors_from_proc(struct proc *p,
209 struct pmc_process *pp);
210static int pmc_attach_process(struct proc *p, struct pmc *pm);
211static struct pmc *pmc_allocate_pmc_descriptor(void);
212static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
213static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
214static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
215 int cpu);
216static int pmc_can_attach(struct pmc *pm, struct proc *p);
217static void pmc_capture_user_callchain(int cpu, int soft, struct trapframe *tf);
218static void pmc_cleanup(void);
219static int pmc_detach_process(struct proc *p, struct pmc *pm);
220static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
221 int flags);
222static void pmc_destroy_owner_descriptor(struct pmc_owner *po);
223static void pmc_destroy_pmc_descriptor(struct pmc *pm);
224static void pmc_destroy_process_descriptor(struct pmc_process *pp);
225static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
226static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
227static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
228 pmc_id_t pmc);
229static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
230 uint32_t mode);
231static struct pmc_thread *pmc_find_thread_descriptor(struct pmc_process *pp,
232 struct thread *td, uint32_t mode);
233static void pmc_force_context_switch(void);
234static void pmc_link_target_process(struct pmc *pm,
235 struct pmc_process *pp);
236static void pmc_log_all_process_mappings(struct pmc_owner *po);
237static void pmc_log_kernel_mappings(struct pmc *pm);
238static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p);
239static void pmc_maybe_remove_owner(struct pmc_owner *po);
240static void pmc_process_csw_in(struct thread *td);
241static void pmc_process_csw_out(struct thread *td);
242static void pmc_process_exit(void *arg, struct proc *p);
243static void pmc_process_fork(void *arg, struct proc *p1,
244 struct proc *p2, int n);
245static void pmc_process_samples(int cpu, ring_type_t soft);
246static void pmc_release_pmc_descriptor(struct pmc *pmc);
247static void pmc_process_thread_add(struct thread *td);
248static void pmc_process_thread_delete(struct thread *td);
249static void pmc_process_thread_userret(struct thread *td);
250static void pmc_remove_owner(struct pmc_owner *po);
251static void pmc_remove_process_descriptor(struct pmc_process *pp);
252static void pmc_restore_cpu_binding(struct pmc_binding *pb);
253static void pmc_save_cpu_binding(struct pmc_binding *pb);
254static void pmc_select_cpu(int cpu);
255static int pmc_start(struct pmc *pm);
256static int pmc_stop(struct pmc *pm);
257static int pmc_syscall_handler(struct thread *td, void *syscall_args);
258static struct pmc_thread *pmc_thread_descriptor_pool_alloc(void);
259static void pmc_thread_descriptor_pool_drain(void);
260static void pmc_thread_descriptor_pool_free(struct pmc_thread *pt);
261static void pmc_unlink_target_process(struct pmc *pmc,
262 struct pmc_process *pp);
263static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp);
264static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp);
265static struct pmc_mdep *pmc_generic_cpu_initialize(void);
266static void pmc_generic_cpu_finalize(struct pmc_mdep *md);
267static void pmc_post_callchain_callback(void);
268static void pmc_process_threadcreate(struct thread *td);
269static void pmc_process_threadexit(struct thread *td);
270static void pmc_process_proccreate(struct proc *p);
271static void pmc_process_allproc(struct pmc *pm);
272
273/*
274 * Kernel tunables and sysctl(8) interface.
275 */
276
277SYSCTL_DECL(_kern_hwpmc)extern struct sysctl_oid sysctl___kern_hwpmc;
278SYSCTL_NODE(_kern_hwpmc, OID_AUTO, stats, CTLFLAG_RW, 0, "HWPMC stats")struct sysctl_oid sysctl___kern_hwpmc_stats = { .oid_parent =
((&(&sysctl___kern_hwpmc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (1|((0x80000000
|0x40000000))), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .
oid_name = ("stats"), .oid_handler = (0), .oid_fmt = ("N"), .
oid_descr = "HWPMC stats", .oid_label = (((void *)0)), }; __asm__
(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats); _Static_assert
((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000|
0x40000000)) & 0xf) == 1, "compile-time assertion failed"
)
;
279
280
281/* Stats. */
282SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_ignored, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_hwpmc_stats_intr_ignored
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_intr_ignored)), .oid_arg2
= (0), .oid_name = ("intr_ignored"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of interrupts ignored",
.oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_intr_ignored
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_intr_ignored);
_Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0 ||
(((0x80000000|0x40000000)) & 0xf) == 9) && sizeof
(counter_u64_t) == sizeof(*(&pmc_stats.pm_intr_ignored)) &&
sizeof(uint64_t) == sizeof(**(&pmc_stats.pm_intr_ignored
)), "compile-time assertion failed")
283 &pmc_stats.pm_intr_ignored, "# of interrupts ignored")static struct sysctl_oid sysctl___kern_hwpmc_stats_intr_ignored
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_intr_ignored)), .oid_arg2
= (0), .oid_name = ("intr_ignored"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of interrupts ignored",
.oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_intr_ignored
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_intr_ignored);
_Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0 ||
(((0x80000000|0x40000000)) & 0xf) == 9) && sizeof
(counter_u64_t) == sizeof(*(&pmc_stats.pm_intr_ignored)) &&
sizeof(uint64_t) == sizeof(**(&pmc_stats.pm_intr_ignored
)), "compile-time assertion failed")
;
284SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_processed, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_hwpmc_stats_intr_processed
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_intr_processed)), .oid_arg2
= (0), .oid_name = ("intr_processed"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of interrupts processed"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_intr_processed
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_intr_processed
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0xf) == 9) && sizeof
(counter_u64_t) == sizeof(*(&pmc_stats.pm_intr_processed)
) && sizeof(uint64_t) == sizeof(**(&pmc_stats.pm_intr_processed
)), "compile-time assertion failed")
285 &pmc_stats.pm_intr_processed, "# of interrupts processed")static struct sysctl_oid sysctl___kern_hwpmc_stats_intr_processed
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_intr_processed)), .oid_arg2
= (0), .oid_name = ("intr_processed"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of interrupts processed"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_intr_processed
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_intr_processed
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0xf) == 9) && sizeof
(counter_u64_t) == sizeof(*(&pmc_stats.pm_intr_processed)
) && sizeof(uint64_t) == sizeof(**(&pmc_stats.pm_intr_processed
)), "compile-time assertion failed")
;
286SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_bufferfull, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_hwpmc_stats_intr_bufferfull
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_intr_bufferfull)), .oid_arg2
= (0), .oid_name = ("intr_bufferfull"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of interrupts where buffer was full"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_intr_bufferfull
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_intr_bufferfull
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0xf) == 9) && sizeof
(counter_u64_t) == sizeof(*(&pmc_stats.pm_intr_bufferfull
)) && sizeof(uint64_t) == sizeof(**(&pmc_stats.pm_intr_bufferfull
)), "compile-time assertion failed")
287 &pmc_stats.pm_intr_bufferfull, "# of interrupts where buffer was full")static struct sysctl_oid sysctl___kern_hwpmc_stats_intr_bufferfull
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_intr_bufferfull)), .oid_arg2
= (0), .oid_name = ("intr_bufferfull"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of interrupts where buffer was full"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_intr_bufferfull
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_intr_bufferfull
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0xf) == 9) && sizeof
(counter_u64_t) == sizeof(*(&pmc_stats.pm_intr_bufferfull
)) && sizeof(uint64_t) == sizeof(**(&pmc_stats.pm_intr_bufferfull
)), "compile-time assertion failed")
;
288SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscalls, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_hwpmc_stats_syscalls =
{ .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_syscalls)), .oid_arg2 = (
0), .oid_name = ("syscalls"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of syscalls", .oid_label
= (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_syscalls
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_syscalls); _Static_assert
(((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000
|0x40000000)) & 0xf) == 9) && sizeof(counter_u64_t
) == sizeof(*(&pmc_stats.pm_syscalls)) && sizeof(
uint64_t) == sizeof(**(&pmc_stats.pm_syscalls)), "compile-time assertion failed"
)
289 &pmc_stats.pm_syscalls, "# of syscalls")static struct sysctl_oid sysctl___kern_hwpmc_stats_syscalls =
{ .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_syscalls)), .oid_arg2 = (
0), .oid_name = ("syscalls"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of syscalls", .oid_label
= (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_syscalls
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_syscalls); _Static_assert
(((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000
|0x40000000)) & 0xf) == 9) && sizeof(counter_u64_t
) == sizeof(*(&pmc_stats.pm_syscalls)) && sizeof(
uint64_t) == sizeof(**(&pmc_stats.pm_syscalls)), "compile-time assertion failed"
)
;
290SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscall_errors, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_hwpmc_stats_syscall_errors
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_syscall_errors)), .oid_arg2
= (0), .oid_name = ("syscall_errors"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of syscall_errors", .oid_label
= (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_syscall_errors
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_syscall_errors
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0xf) == 9) && sizeof
(counter_u64_t) == sizeof(*(&pmc_stats.pm_syscall_errors)
) && sizeof(uint64_t) == sizeof(**(&pmc_stats.pm_syscall_errors
)), "compile-time assertion failed")
291 &pmc_stats.pm_syscall_errors, "# of syscall_errors")static struct sysctl_oid sysctl___kern_hwpmc_stats_syscall_errors
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_syscall_errors)), .oid_arg2
= (0), .oid_name = ("syscall_errors"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of syscall_errors", .oid_label
= (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_syscall_errors
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_syscall_errors
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0xf) == 9) && sizeof
(counter_u64_t) == sizeof(*(&pmc_stats.pm_syscall_errors)
) && sizeof(uint64_t) == sizeof(**(&pmc_stats.pm_syscall_errors
)), "compile-time assertion failed")
;
292SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_hwpmc_stats_buffer_requests
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_buffer_requests)), .oid_arg2
= (0), .oid_name = ("buffer_requests"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of buffer requests", .oid_label
= (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_buffer_requests
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_buffer_requests
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0xf) == 9) && sizeof
(counter_u64_t) == sizeof(*(&pmc_stats.pm_buffer_requests
)) && sizeof(uint64_t) == sizeof(**(&pmc_stats.pm_buffer_requests
)), "compile-time assertion failed")
293 &pmc_stats.pm_buffer_requests, "# of buffer requests")static struct sysctl_oid sysctl___kern_hwpmc_stats_buffer_requests
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_buffer_requests)), .oid_arg2
= (0), .oid_name = ("buffer_requests"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of buffer requests", .oid_label
= (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_buffer_requests
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_buffer_requests
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0xf) == 9) && sizeof
(counter_u64_t) == sizeof(*(&pmc_stats.pm_buffer_requests
)) && sizeof(uint64_t) == sizeof(**(&pmc_stats.pm_buffer_requests
)), "compile-time assertion failed")
;
294SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests_failed, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_hwpmc_stats_buffer_requests_failed
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_buffer_requests_failed))
, .oid_arg2 = (0), .oid_name = ("buffer_requests_failed"), .oid_handler
= (sysctl_handle_counter_u64), .oid_fmt = ("QU"), .oid_descr
= "# of buffer requests which failed", .oid_label = (((void *
)0)), }; __asm__(".globl " "__start_set_sysctl_set"); __asm__
(".globl " "__stop_set_sysctl_set"); static void const * const
__set_sysctl_set_sym_sysctl___kern_hwpmc_stats_buffer_requests_failed
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_buffer_requests_failed
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0xf) == 9) && sizeof
(counter_u64_t) == sizeof(*(&pmc_stats.pm_buffer_requests_failed
)) && sizeof(uint64_t) == sizeof(**(&pmc_stats.pm_buffer_requests_failed
)), "compile-time assertion failed")
295 &pmc_stats.pm_buffer_requests_failed, "# of buffer requests which failed")static struct sysctl_oid sysctl___kern_hwpmc_stats_buffer_requests_failed
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_buffer_requests_failed))
, .oid_arg2 = (0), .oid_name = ("buffer_requests_failed"), .oid_handler
= (sysctl_handle_counter_u64), .oid_fmt = ("QU"), .oid_descr
= "# of buffer requests which failed", .oid_label = (((void *
)0)), }; __asm__(".globl " "__start_set_sysctl_set"); __asm__
(".globl " "__stop_set_sysctl_set"); static void const * const
__set_sysctl_set_sym_sysctl___kern_hwpmc_stats_buffer_requests_failed
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_buffer_requests_failed
); _Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0
|| (((0x80000000|0x40000000)) & 0xf) == 9) && sizeof
(counter_u64_t) == sizeof(*(&pmc_stats.pm_buffer_requests_failed
)) && sizeof(uint64_t) == sizeof(**(&pmc_stats.pm_buffer_requests_failed
)), "compile-time assertion failed")
;
296SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, log_sweeps, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_hwpmc_stats_log_sweeps
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_log_sweeps)), .oid_arg2 =
(0), .oid_name = ("log_sweeps"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of ?", .oid_label = (((
void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"); __asm__
(".globl " "__stop_set_sysctl_set"); static void const * const
__set_sysctl_set_sym_sysctl___kern_hwpmc_stats_log_sweeps __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_hwpmc_stats_log_sweeps); _Static_assert
(((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000
|0x40000000)) & 0xf) == 9) && sizeof(counter_u64_t
) == sizeof(*(&pmc_stats.pm_log_sweeps)) && sizeof
(uint64_t) == sizeof(**(&pmc_stats.pm_log_sweeps)), "compile-time assertion failed"
)
297 &pmc_stats.pm_log_sweeps, "# of ?")static struct sysctl_oid sysctl___kern_hwpmc_stats_log_sweeps
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_log_sweeps)), .oid_arg2 =
(0), .oid_name = ("log_sweeps"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of ?", .oid_label = (((
void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"); __asm__
(".globl " "__stop_set_sysctl_set"); static void const * const
__set_sysctl_set_sym_sysctl___kern_hwpmc_stats_log_sweeps __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_hwpmc_stats_log_sweeps); _Static_assert
(((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000
|0x40000000)) & 0xf) == 9) && sizeof(counter_u64_t
) == sizeof(*(&pmc_stats.pm_log_sweeps)) && sizeof
(uint64_t) == sizeof(**(&pmc_stats.pm_log_sweeps)), "compile-time assertion failed"
)
;
298SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, merges, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_hwpmc_stats_merges = {
.oid_parent = ((&(&sysctl___kern_hwpmc_stats)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (9 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (
(&pmc_stats.pm_merges)), .oid_arg2 = (0), .oid_name = ("merges"
), .oid_handler = (sysctl_handle_counter_u64), .oid_fmt = ("QU"
), .oid_descr = "# of times kernel stack was found for user trace"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_merges
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_merges); _Static_assert
(((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000
|0x40000000)) & 0xf) == 9) && sizeof(counter_u64_t
) == sizeof(*(&pmc_stats.pm_merges)) && sizeof(uint64_t
) == sizeof(**(&pmc_stats.pm_merges)), "compile-time assertion failed"
)
299 &pmc_stats.pm_merges, "# of times kernel stack was found for user trace")static struct sysctl_oid sysctl___kern_hwpmc_stats_merges = {
.oid_parent = ((&(&sysctl___kern_hwpmc_stats)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (9 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (
(&pmc_stats.pm_merges)), .oid_arg2 = (0), .oid_name = ("merges"
), .oid_handler = (sysctl_handle_counter_u64), .oid_fmt = ("QU"
), .oid_descr = "# of times kernel stack was found for user trace"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_merges
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_merges); _Static_assert
(((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000
|0x40000000)) & 0xf) == 9) && sizeof(counter_u64_t
) == sizeof(*(&pmc_stats.pm_merges)) && sizeof(uint64_t
) == sizeof(**(&pmc_stats.pm_merges)), "compile-time assertion failed"
)
;
300SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, overwrites, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_hwpmc_stats_overwrites
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_overwrites)), .oid_arg2 =
(0), .oid_name = ("overwrites"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of times a sample was overwritten before being logged"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_overwrites
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_overwrites); _Static_assert
(((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000
|0x40000000)) & 0xf) == 9) && sizeof(counter_u64_t
) == sizeof(*(&pmc_stats.pm_overwrites)) && sizeof
(uint64_t) == sizeof(**(&pmc_stats.pm_overwrites)), "compile-time assertion failed"
)
301 &pmc_stats.pm_overwrites, "# of times a sample was overwritten before being logged")static struct sysctl_oid sysctl___kern_hwpmc_stats_overwrites
= { .oid_parent = ((&(&sysctl___kern_hwpmc_stats)->
oid_children)), .oid_children = { ((void *)0) }, .oid_number =
((-1)), .oid_kind = (9 | 0x00040000 | ((0x80000000|0x40000000
))), .oid_arg1 = ((&pmc_stats.pm_overwrites)), .oid_arg2 =
(0), .oid_name = ("overwrites"), .oid_handler = (sysctl_handle_counter_u64
), .oid_fmt = ("QU"), .oid_descr = "# of times a sample was overwritten before being logged"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_stats_overwrites
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_stats_overwrites); _Static_assert
(((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000
|0x40000000)) & 0xf) == 9) && sizeof(counter_u64_t
) == sizeof(*(&pmc_stats.pm_overwrites)) && sizeof
(uint64_t) == sizeof(**(&pmc_stats.pm_overwrites)), "compile-time assertion failed"
)
;
302
303static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH128;
304SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_RDTUN,static struct sysctl_oid sysctl___kern_hwpmc_callchaindepth =
{ .oid_parent = ((&(&sysctl___kern_hwpmc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x00080000))), .oid_arg1 = (
&pmc_callchaindepth), .oid_arg2 = (0), .oid_name = ("callchaindepth"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "depth of call chain records", .oid_label = (((void *)0)),
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_hwpmc_callchaindepth
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_callchaindepth); _Static_assert
(((((0x80000000|0x00080000)) & 0xf) == 0 || (((0x80000000
|0x00080000)) & 0xf) == 2) && sizeof(int) == sizeof
(*(&pmc_callchaindepth)), "compile-time assertion failed"
)
305 &pmc_callchaindepth, 0, "depth of call chain records")static struct sysctl_oid sysctl___kern_hwpmc_callchaindepth =
{ .oid_parent = ((&(&sysctl___kern_hwpmc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x00080000))), .oid_arg1 = (
&pmc_callchaindepth), .oid_arg2 = (0), .oid_name = ("callchaindepth"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "depth of call chain records", .oid_label = (((void *)0)),
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_hwpmc_callchaindepth
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_callchaindepth); _Static_assert
(((((0x80000000|0x00080000)) & 0xf) == 0 || (((0x80000000
|0x00080000)) & 0xf) == 2) && sizeof(int) == sizeof
(*(&pmc_callchaindepth)), "compile-time assertion failed"
)
;
306
307char pmc_cpuid[64];
308SYSCTL_STRING(_kern_hwpmc, OID_AUTO, cpuid, CTLFLAG_RD,static struct sysctl_oid sysctl___kern_hwpmc_cpuid = { .oid_parent
= ((&(&sysctl___kern_hwpmc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (3|(0x80000000
)), .oid_arg1 = (pmc_cpuid), .oid_arg2 = (0), .oid_name = ("cpuid"
), .oid_handler = (sysctl_handle_string), .oid_fmt = ("A"), .
oid_descr = "cpu version string", .oid_label = (((void *)0)),
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_hwpmc_cpuid
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_cpuid); _Static_assert
(((0x80000000) & 0xf) == 0 || ((0x80000000) & 0xf) ==
3, "compile-time assertion failed")
309 pmc_cpuid, 0, "cpu version string")static struct sysctl_oid sysctl___kern_hwpmc_cpuid = { .oid_parent
= ((&(&sysctl___kern_hwpmc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (3|(0x80000000
)), .oid_arg1 = (pmc_cpuid), .oid_arg2 = (0), .oid_name = ("cpuid"
), .oid_handler = (sysctl_handle_string), .oid_fmt = ("A"), .
oid_descr = "cpu version string", .oid_label = (((void *)0)),
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_hwpmc_cpuid
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_cpuid); _Static_assert
(((0x80000000) & 0xf) == 0 || ((0x80000000) & 0xf) ==
3, "compile-time assertion failed")
;
310#ifdef HWPMC_DEBUG
311struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
312char pmc_debugstr[PMC_DEBUG_STRSIZE];
313TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,static struct tunable_str __tunable_str_314 = { ("kern." "hwpmc"
"." "debugflags"), (pmc_debugstr), (sizeof(pmc_debugstr)), }
; static struct sysinit __Tunable_init_314_sys_init = { SI_SUB_TUNABLES
, SI_ORDER_MIDDLE, (sysinit_cfunc_t)(sysinit_nfunc_t)tunable_str_init
, ((void *)(&__tunable_str_314)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* __set_sysinit_set_sym___Tunable_init_314_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(__Tunable_init_314_sys_init)
314 sizeof(pmc_debugstr))static struct tunable_str __tunable_str_314 = { ("kern." "hwpmc"
"." "debugflags"), (pmc_debugstr), (sizeof(pmc_debugstr)), }
; static struct sysinit __Tunable_init_314_sys_init = { SI_SUB_TUNABLES
, SI_ORDER_MIDDLE, (sysinit_cfunc_t)(sysinit_nfunc_t)tunable_str_init
, ((void *)(&__tunable_str_314)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* __set_sysinit_set_sym___Tunable_init_314_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(__Tunable_init_314_sys_init)
;
315SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,static struct sysctl_oid sysctl___kern_hwpmc_debugflags = { .
oid_parent = ((&(&sysctl___kern_hwpmc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= ((3 | ((0x80000000|0x40000000)|0x00080000) | 0x00001000)),
.oid_arg1 = (0), .oid_arg2 = (0), .oid_name = ("debugflags")
, .oid_handler = (pmc_debugflags_sysctl_handler), .oid_fmt = (
"A"), .oid_descr = "debug flags", .oid_label = (((void *)0)),
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_hwpmc_debugflags
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_debugflags); _Static_assert
(((3 | ((0x80000000|0x40000000)|0x00080000) | 0x00001000) &
0xf) != 0, "compile-time assertion failed")
316 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH,static struct sysctl_oid sysctl___kern_hwpmc_debugflags = { .
oid_parent = ((&(&sysctl___kern_hwpmc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= ((3 | ((0x80000000|0x40000000)|0x00080000) | 0x00001000)),
.oid_arg1 = (0), .oid_arg2 = (0), .oid_name = ("debugflags")
, .oid_handler = (pmc_debugflags_sysctl_handler), .oid_fmt = (
"A"), .oid_descr = "debug flags", .oid_label = (((void *)0)),
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_hwpmc_debugflags
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_debugflags); _Static_assert
(((3 | ((0x80000000|0x40000000)|0x00080000) | 0x00001000) &
0xf) != 0, "compile-time assertion failed")
317 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags")static struct sysctl_oid sysctl___kern_hwpmc_debugflags = { .
oid_parent = ((&(&sysctl___kern_hwpmc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= ((3 | ((0x80000000|0x40000000)|0x00080000) | 0x00001000)),
.oid_arg1 = (0), .oid_arg2 = (0), .oid_name = ("debugflags")
, .oid_handler = (pmc_debugflags_sysctl_handler), .oid_fmt = (
"A"), .oid_descr = "debug flags", .oid_label = (((void *)0)),
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_hwpmc_debugflags
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_debugflags); _Static_assert
(((3 | ((0x80000000|0x40000000)|0x00080000) | 0x00001000) &
0xf) != 0, "compile-time assertion failed")
;
318#endif
319
320
321/*
322 * kern.hwpmc.hashrows -- determines the number of rows in the
323 * of the hash table used to look up threads
324 */
325
326static int pmc_hashsize = PMC_HASH_SIZE1024;
327SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_RDTUN,static struct sysctl_oid sysctl___kern_hwpmc_hashsize = { .oid_parent
= ((&(&sysctl___kern_hwpmc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x00080000))), .oid_arg1 = (&pmc_hashsize
), .oid_arg2 = (0), .oid_name = ("hashsize"), .oid_handler = (
sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "rows in hash tables"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_hashsize __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_hwpmc_hashsize); _Static_assert(((((0x80000000
|0x00080000)) & 0xf) == 0 || (((0x80000000|0x00080000)) &
0xf) == 2) && sizeof(int) == sizeof(*(&pmc_hashsize
)), "compile-time assertion failed")
328 &pmc_hashsize, 0, "rows in hash tables")static struct sysctl_oid sysctl___kern_hwpmc_hashsize = { .oid_parent
= ((&(&sysctl___kern_hwpmc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x00080000))), .oid_arg1 = (&pmc_hashsize
), .oid_arg2 = (0), .oid_name = ("hashsize"), .oid_handler = (
sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "rows in hash tables"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_hashsize __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_hwpmc_hashsize); _Static_assert(((((0x80000000
|0x00080000)) & 0xf) == 0 || (((0x80000000|0x00080000)) &
0xf) == 2) && sizeof(int) == sizeof(*(&pmc_hashsize
)), "compile-time assertion failed")
;
329
330/*
331 * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU
332 */
333
334static int pmc_nsamples = PMC_NSAMPLES256;
335SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_RDTUN,static struct sysctl_oid sysctl___kern_hwpmc_nsamples = { .oid_parent
= ((&(&sysctl___kern_hwpmc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x00080000))), .oid_arg1 = (&pmc_nsamples
), .oid_arg2 = (0), .oid_name = ("nsamples"), .oid_handler = (
sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "number of PC samples per CPU"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_nsamples __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_hwpmc_nsamples); _Static_assert(((((0x80000000
|0x00080000)) & 0xf) == 0 || (((0x80000000|0x00080000)) &
0xf) == 2) && sizeof(int) == sizeof(*(&pmc_nsamples
)), "compile-time assertion failed")
336 &pmc_nsamples, 0, "number of PC samples per CPU")static struct sysctl_oid sysctl___kern_hwpmc_nsamples = { .oid_parent
= ((&(&sysctl___kern_hwpmc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000
| ((0x80000000|0x00080000))), .oid_arg1 = (&pmc_nsamples
), .oid_arg2 = (0), .oid_name = ("nsamples"), .oid_handler = (
sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "number of PC samples per CPU"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_nsamples __attribute__
((__section__("set_" "sysctl_set"))) __attribute__((__used__)
) = &(sysctl___kern_hwpmc_nsamples); _Static_assert(((((0x80000000
|0x00080000)) & 0xf) == 0 || (((0x80000000|0x00080000)) &
0xf) == 2) && sizeof(int) == sizeof(*(&pmc_nsamples
)), "compile-time assertion failed")
;
337
338static uint64_t pmc_sample_mask = PMC_NSAMPLES256-1;
339
340/*
341 * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
342 */
343
344static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE2048;
345SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_RDTUN,static struct sysctl_oid sysctl___kern_hwpmc_mtxpoolsize = { .
oid_parent = ((&(&sysctl___kern_hwpmc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x00080000))), .oid_arg1 = (
&pmc_mtxpool_size), .oid_arg2 = (0), .oid_name = ("mtxpoolsize"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "size of spin mutex pool", .oid_label = (((void *)0)), }; __asm__
(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_hwpmc_mtxpoolsize
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_mtxpoolsize); _Static_assert
(((((0x80000000|0x00080000)) & 0xf) == 0 || (((0x80000000
|0x00080000)) & 0xf) == 2) && sizeof(int) == sizeof
(*(&pmc_mtxpool_size)), "compile-time assertion failed")
346 &pmc_mtxpool_size, 0, "size of spin mutex pool")static struct sysctl_oid sysctl___kern_hwpmc_mtxpoolsize = { .
oid_parent = ((&(&sysctl___kern_hwpmc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x00080000))), .oid_arg1 = (
&pmc_mtxpool_size), .oid_arg2 = (0), .oid_name = ("mtxpoolsize"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "size of spin mutex pool", .oid_label = (((void *)0)), }; __asm__
(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_hwpmc_mtxpoolsize
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_mtxpoolsize); _Static_assert
(((((0x80000000|0x00080000)) & 0xf) == 0 || (((0x80000000
|0x00080000)) & 0xf) == 2) && sizeof(int) == sizeof
(*(&pmc_mtxpool_size)), "compile-time assertion failed")
;
347
348
349/*
350 * kern.hwpmc.threadfreelist_entries -- number of free entries
351 */
352
353SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_entries, CTLFLAG_RD,static struct sysctl_oid sysctl___kern_hwpmc_threadfreelist_entries
= { .oid_parent = ((&(&sysctl___kern_hwpmc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (0x80000000)), .oid_arg1 = (&pmc_threadfreelist_entries
), .oid_arg2 = (0), .oid_name = ("threadfreelist_entries"), .
oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "number of avalable thread entries", .oid_label = (((void *
)0)), }; __asm__(".globl " "__start_set_sysctl_set"); __asm__
(".globl " "__stop_set_sysctl_set"); static void const * const
__set_sysctl_set_sym_sysctl___kern_hwpmc_threadfreelist_entries
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_threadfreelist_entries
); _Static_assert((((0x80000000) & 0xf) == 0 || ((0x80000000
) & 0xf) == 2) && sizeof(int) == sizeof(*(&pmc_threadfreelist_entries
)), "compile-time assertion failed")
354 &pmc_threadfreelist_entries, 0, "number of avalable thread entries")static struct sysctl_oid sysctl___kern_hwpmc_threadfreelist_entries
= { .oid_parent = ((&(&sysctl___kern_hwpmc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (0x80000000)), .oid_arg1 = (&pmc_threadfreelist_entries
), .oid_arg2 = (0), .oid_name = ("threadfreelist_entries"), .
oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "number of avalable thread entries", .oid_label = (((void *
)0)), }; __asm__(".globl " "__start_set_sysctl_set"); __asm__
(".globl " "__stop_set_sysctl_set"); static void const * const
__set_sysctl_set_sym_sysctl___kern_hwpmc_threadfreelist_entries
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_threadfreelist_entries
); _Static_assert((((0x80000000) & 0xf) == 0 || ((0x80000000
) & 0xf) == 2) && sizeof(int) == sizeof(*(&pmc_threadfreelist_entries
)), "compile-time assertion failed")
;
355
356
357/*
358 * kern.hwpmc.threadfreelist_max -- maximum number of free entries
359 */
360
361static int pmc_threadfreelist_max = PMC_THREADLIST_MAX128;
362SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_max, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_hwpmc_threadfreelist_max
= { .oid_parent = ((&(&sysctl___kern_hwpmc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (
&pmc_threadfreelist_max), .oid_arg2 = (0), .oid_name = ("threadfreelist_max"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "maximum number of available thread entries before freeing some"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_threadfreelist_max
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_threadfreelist_max);
_Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0 ||
(((0x80000000|0x40000000)) & 0xf) == 2) && sizeof
(int) == sizeof(*(&pmc_threadfreelist_max)), "compile-time assertion failed"
)
363 &pmc_threadfreelist_max, 0,static struct sysctl_oid sysctl___kern_hwpmc_threadfreelist_max
= { .oid_parent = ((&(&sysctl___kern_hwpmc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (
&pmc_threadfreelist_max), .oid_arg2 = (0), .oid_name = ("threadfreelist_max"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "maximum number of available thread entries before freeing some"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_threadfreelist_max
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_threadfreelist_max);
_Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0 ||
(((0x80000000|0x40000000)) & 0xf) == 2) && sizeof
(int) == sizeof(*(&pmc_threadfreelist_max)), "compile-time assertion failed"
)
364 "maximum number of available thread entries before freeing some")static struct sysctl_oid sysctl___kern_hwpmc_threadfreelist_max
= { .oid_parent = ((&(&sysctl___kern_hwpmc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (
&pmc_threadfreelist_max), .oid_arg2 = (0), .oid_name = ("threadfreelist_max"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "maximum number of available thread entries before freeing some"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___kern_hwpmc_threadfreelist_max
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_hwpmc_threadfreelist_max);
_Static_assert(((((0x80000000|0x40000000)) & 0xf) == 0 ||
(((0x80000000|0x40000000)) & 0xf) == 2) && sizeof
(int) == sizeof(*(&pmc_threadfreelist_max)), "compile-time assertion failed"
)
;
365
366
367/*
368 * security.bsd.unprivileged_syspmcs -- allow non-root processes to
369 * allocate system-wide PMCs.
370 *
371 * Allowing unprivileged processes to allocate system PMCs is convenient
372 * if system-wide measurements need to be taken concurrently with other
373 * per-process measurements. This feature is turned off by default.
374 */
375
376static int pmc_unprivileged_syspmcs = 0;
377SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RWTUN,static struct sysctl_oid sysctl___security_bsd_unprivileged_syspmcs
= { .oid_parent = ((&(&sysctl___security_bsd)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&pmc_unprivileged_syspmcs), .oid_arg2 = (0)
, .oid_name = ("unprivileged_syspmcs"), .oid_handler = (sysctl_handle_int
), .oid_fmt = ("I"), .oid_descr = "allow unprivileged process to allocate system PMCs"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___security_bsd_unprivileged_syspmcs
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___security_bsd_unprivileged_syspmcs
); _Static_assert((((((0x80000000|0x40000000)|0x00080000)) &
0xf) == 0 || ((((0x80000000|0x40000000)|0x00080000)) & 0xf
) == 2) && sizeof(int) == sizeof(*(&pmc_unprivileged_syspmcs
)), "compile-time assertion failed")
378 &pmc_unprivileged_syspmcs, 0,static struct sysctl_oid sysctl___security_bsd_unprivileged_syspmcs
= { .oid_parent = ((&(&sysctl___security_bsd)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&pmc_unprivileged_syspmcs), .oid_arg2 = (0)
, .oid_name = ("unprivileged_syspmcs"), .oid_handler = (sysctl_handle_int
), .oid_fmt = ("I"), .oid_descr = "allow unprivileged process to allocate system PMCs"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___security_bsd_unprivileged_syspmcs
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___security_bsd_unprivileged_syspmcs
); _Static_assert((((((0x80000000|0x40000000)|0x00080000)) &
0xf) == 0 || ((((0x80000000|0x40000000)|0x00080000)) & 0xf
) == 2) && sizeof(int) == sizeof(*(&pmc_unprivileged_syspmcs
)), "compile-time assertion failed")
379 "allow unprivileged process to allocate system PMCs")static struct sysctl_oid sysctl___security_bsd_unprivileged_syspmcs
= { .oid_parent = ((&(&sysctl___security_bsd)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&pmc_unprivileged_syspmcs), .oid_arg2 = (0)
, .oid_name = ("unprivileged_syspmcs"), .oid_handler = (sysctl_handle_int
), .oid_fmt = ("I"), .oid_descr = "allow unprivileged process to allocate system PMCs"
, .oid_label = (((void *)0)), }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___security_bsd_unprivileged_syspmcs
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___security_bsd_unprivileged_syspmcs
); _Static_assert((((((0x80000000|0x40000000)|0x00080000)) &
0xf) == 0 || ((((0x80000000|0x40000000)|0x00080000)) & 0xf
) == 2) && sizeof(int) == sizeof(*(&pmc_unprivileged_syspmcs
)), "compile-time assertion failed")
;
380
381/*
382 * Hash function. Discard the lower 2 bits of the pointer since
383 * these are always zero for our uses. The hash multiplier is
384 * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
385 */
386
387#if LONG_BIT64 == 64
388#define _PMC_HM11400714819323198486u 11400714819323198486u
389#elif LONG_BIT64 == 32
390#define _PMC_HM11400714819323198486u 2654435769u
391#else
392#error Must know the size of 'long' to compile
393#endif
394
395#define PMC_HASH_PTR(P,M)((((unsigned long) (P) >> 2) * 11400714819323198486u) &
(M))
((((unsigned long) (P) >> 2) * _PMC_HM11400714819323198486u) & (M))
396
397/*
398 * Syscall structures
399 */
400
401/* The `sysent' for the new syscall */
402static struct sysent pmc_sysent = {
403 .sy_narg = 2,
404 .sy_call = pmc_syscall_handler,
405};
406
407static struct syscall_module_data pmc_syscall_mod = {
408 .chainevh = load,
409 .chainarg = NULL((void *)0),
410 .offset = &pmc_syscall_num,
411 .new_sysent = &pmc_sysent,
412 .old_sysent = { .sy_narg = 0, .sy_call = NULL((void *)0) },
413 .flags = SY_THR_STATIC_KLD0,
414};
415
416static moduledata_t pmc_mod = {
417 .name = PMC_MODULE_NAME"hwpmc",
418 .evhand = syscall_module_handler,
419 .priv = &pmc_syscall_mod,
420};
421
422#ifdef EARLY_AP_STARTUP1
423DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SYSCALLS, SI_ORDER_ANY)static struct mod_depend _pmc_depend_on_kernel __attribute__(
(__section__(".data"))) = { 1300006, 1300006, 1300006 }; static
struct mod_metadata _mod_metadata_md_pmc_on_kernel = { 1, 1,
&_pmc_depend_on_kernel, "kernel" }; __asm__(".globl " "__start_set_modmetadata_set"
); __asm__(".globl " "__stop_set_modmetadata_set"); static void
const * const __set_modmetadata_set_sym__mod_metadata_md_pmc_on_kernel
__attribute__((__section__("set_" "modmetadata_set"))) __attribute__
((__used__)) = &(_mod_metadata_md_pmc_on_kernel); static struct
mod_metadata _mod_metadata_md_pmc = { 1, 2, &pmc_mod, "pmc"
}; __asm__(".globl " "__start_set_modmetadata_set"); __asm__
(".globl " "__stop_set_modmetadata_set"); static void const *
const __set_modmetadata_set_sym__mod_metadata_md_pmc __attribute__
((__section__("set_" "modmetadata_set"))) __attribute__((__used__
)) = &(_mod_metadata_md_pmc); static struct sysinit pmcmodule_sys_init
= { SI_SUB_SYSCALLS, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)module_register_init, ((void *)(&pmc_mod)) }; __asm__(".globl "
"__start_set_sysinit_set"); __asm__(".globl " "__stop_set_sysinit_set"
); static void const * __set_sysinit_set_sym_pmcmodule_sys_init
__attribute__((__section__("set_" "sysinit_set"))) __attribute__
((__used__)) = &(pmcmodule_sys_init); struct __hack
;
424#else
425DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY)static struct mod_depend _pmc_depend_on_kernel __attribute__(
(__section__(".data"))) = { 1300006, 1300006, 1300006 }; static
struct mod_metadata _mod_metadata_md_pmc_on_kernel = { 1, 1,
&_pmc_depend_on_kernel, "kernel" }; __asm__(".globl " "__start_set_modmetadata_set"
); __asm__(".globl " "__stop_set_modmetadata_set"); static void
const * const __set_modmetadata_set_sym__mod_metadata_md_pmc_on_kernel
__attribute__((__section__("set_" "modmetadata_set"))) __attribute__
((__used__)) = &(_mod_metadata_md_pmc_on_kernel); static struct
mod_metadata _mod_metadata_md_pmc = { 1, 2, &pmc_mod, "pmc"
}; __asm__(".globl " "__start_set_modmetadata_set"); __asm__
(".globl " "__stop_set_modmetadata_set"); static void const *
const __set_modmetadata_set_sym__mod_metadata_md_pmc __attribute__
((__section__("set_" "modmetadata_set"))) __attribute__((__used__
)) = &(_mod_metadata_md_pmc); static struct sysinit pmcmodule_sys_init
= { SI_SUB_SMP, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)module_register_init, ((void *)(&pmc_mod)) }; __asm__(".globl "
"__start_set_sysinit_set"); __asm__(".globl " "__stop_set_sysinit_set"
); static void const * __set_sysinit_set_sym_pmcmodule_sys_init
__attribute__((__section__("set_" "sysinit_set"))) __attribute__
((__used__)) = &(pmcmodule_sys_init); struct __hack
;
426#endif
427MODULE_VERSION(pmc, PMC_VERSION)static struct mod_version _pmc_version __attribute__((__section__
(".data"))) = { (0x09 << 24 | 0x03 << 16 | 0x0000
) }; static struct mod_metadata _mod_metadata_pmc_version = {
1, 3, &_pmc_version, "pmc" }; __asm__(".globl " "__start_set_modmetadata_set"
); __asm__(".globl " "__stop_set_modmetadata_set"); static void
const * const __set_modmetadata_set_sym__mod_metadata_pmc_version
__attribute__((__section__("set_" "modmetadata_set"))) __attribute__
((__used__)) = &(_mod_metadata_pmc_version)
;
428
429#ifdef HWPMC_DEBUG
430enum pmc_dbgparse_state {
431 PMCDS_WS, /* in whitespace */
432 PMCDS_MAJOR, /* seen a major keyword */
433 PMCDS_MINOR
434};
435
436static int
437pmc_debugflags_parse(char *newstr, char *fence)
438{
439 char c, *p, *q;
440 struct pmc_debugflags *tmpflags;
441 int error, found, *newbits, tmp;
442 size_t kwlen;
443
444 tmpflags = malloc(sizeof(*tmpflags), M_PMC, M_WAITOK0x0002|M_ZERO0x0100);
445
446 p = newstr;
447 error = 0;
448
449 for (; p < fence && (c = *p); p++) {
450
451 /* skip white space */
452 if (c == ' ' || c == '\t')
453 continue;
454
455 /* look for a keyword followed by "=" */
456 for (q = p; p < fence && (c = *p) && c != '='; p++)
457 ;
458 if (c != '=') {
459 error = EINVAL22;
460 goto done;
461 }
462
463 kwlen = p - q;
464 newbits = NULL((void *)0);
465
466 /* lookup flag group name */
467#define DBG_SET_FLAG_MAJ(S,F) \
468 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
469 newbits = &tmpflags->pdb_ ## F;
470
471 DBG_SET_FLAG_MAJ("cpu", CPU);
472 DBG_SET_FLAG_MAJ("csw", CSW);
473 DBG_SET_FLAG_MAJ("logging", LOG);
474 DBG_SET_FLAG_MAJ("module", MOD);
475 DBG_SET_FLAG_MAJ("md", MDP);
476 DBG_SET_FLAG_MAJ("owner", OWN);
477 DBG_SET_FLAG_MAJ("pmc", PMC);
478 DBG_SET_FLAG_MAJ("process", PRC);
479 DBG_SET_FLAG_MAJ("sampling", SAM);
480
481 if (newbits == NULL((void *)0)) {
482 error = EINVAL22;
483 goto done;
484 }
485
486 p++; /* skip the '=' */
487
488 /* Now parse the individual flags */
489 tmp = 0;
490 newflag:
491 for (q = p; p < fence && (c = *p); p++)
492 if (c == ' ' || c == '\t' || c == ',')
493 break;
494
495 /* p == fence or c == ws or c == "," or c == 0 */
496
497 if ((kwlen = p - q) == 0) {
498 *newbits = tmp;
499 continue;
500 }
501
502 found = 0;
503#define DBG_SET_FLAG_MIN(S,F) \
504 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
505 tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
506
507 /* a '*' denotes all possible flags in the group */
508 if (kwlen == 1 && *q == '*')
509 tmp = found = ~0;
510 /* look for individual flag names */
511 DBG_SET_FLAG_MIN("allocaterow", ALR);
512 DBG_SET_FLAG_MIN("allocate", ALL);
513 DBG_SET_FLAG_MIN("attach", ATT);
514 DBG_SET_FLAG_MIN("bind", BND);
515 DBG_SET_FLAG_MIN("config", CFG);
516 DBG_SET_FLAG_MIN("exec", EXC);
517 DBG_SET_FLAG_MIN("exit", EXT);
518 DBG_SET_FLAG_MIN("find", FND);
519 DBG_SET_FLAG_MIN("flush", FLS);
520 DBG_SET_FLAG_MIN("fork", FRK);
521 DBG_SET_FLAG_MIN("getbuf", GTB);
522 DBG_SET_FLAG_MIN("hook", PMH);
523 DBG_SET_FLAG_MIN("init", INI);
524 DBG_SET_FLAG_MIN("intr", INT);
525 DBG_SET_FLAG_MIN("linktarget", TLK);
526 DBG_SET_FLAG_MIN("mayberemove", OMR);
527 DBG_SET_FLAG_MIN("ops", OPS);
528 DBG_SET_FLAG_MIN("read", REA);
529 DBG_SET_FLAG_MIN("register", REG);
530 DBG_SET_FLAG_MIN("release", REL);
531 DBG_SET_FLAG_MIN("remove", ORM);
532 DBG_SET_FLAG_MIN("sample", SAM);
533 DBG_SET_FLAG_MIN("scheduleio", SIO);
534 DBG_SET_FLAG_MIN("select", SEL);
535 DBG_SET_FLAG_MIN("signal", SIG);
536 DBG_SET_FLAG_MIN("swi", SWI);
537 DBG_SET_FLAG_MIN("swo", SWO);
538 DBG_SET_FLAG_MIN("start", STA);
539 DBG_SET_FLAG_MIN("stop", STO);
540 DBG_SET_FLAG_MIN("syscall", PMS);
541 DBG_SET_FLAG_MIN("unlinktarget", TUL);
542 DBG_SET_FLAG_MIN("write", WRI);
543 if (found == 0) {
544 /* unrecognized flag name */
545 error = EINVAL22;
546 goto done;
547 }
548
549 if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */
550 *newbits = tmp;
551 continue;
552 }
553
554 p++;
555 goto newflag;
556 }
557
558 /* save the new flag set */
559 bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags))__builtin_memmove((&pmc_debugflags), (tmpflags), (sizeof(
pmc_debugflags)))
;
560
561 done:
562 free(tmpflags, M_PMC);
563 return error;
564}
565
566static int
567pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req
*req
)
568{
569 char *fence, *newstr;
570 int error;
571 unsigned int n;
572
573 (void) arg1; (void) arg2; /* unused parameters */
574
575 n = sizeof(pmc_debugstr);
576 newstr = malloc(n, M_PMC, M_WAITOK0x0002|M_ZERO0x0100);
577 (void) strlcpy(newstr, pmc_debugstr, n);
578
579 error = sysctl_handle_string(oidp, newstr, n, req);
580
581 /* if there is a new string, parse and copy it */
582 if (error == 0 && req->newptr != NULL((void *)0)) {
583 fence = newstr + (n < req->newlen ? n : req->newlen + 1);
584 if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
585 (void) strlcpy(pmc_debugstr, newstr,
586 sizeof(pmc_debugstr));
587 }
588
589 free(newstr, M_PMC);
590
591 return error;
592}
593#endif
594
595/*
596 * Map a row index to a classdep structure and return the adjusted row
597 * index for the PMC class index.
598 */
599static struct pmc_classdep *
600pmc_ri_to_classdep(struct pmc_mdep *md, int ri, int *adjri)
601{
602 struct pmc_classdep *pcd;
603
604 (void) md;
605
606 KASSERT(ri >= 0 && ri < md->pmd_npmc,do { if (__builtin_expect((!(ri >= 0 && ri < md
->pmd_npmc)), 0)) panic ("[pmc,%d] illegal row-index %d", 607
, ri); } while (0)
607 ("[pmc,%d] illegal row-index %d", __LINE__, ri))do { if (__builtin_expect((!(ri >= 0 && ri < md
->pmd_npmc)), 0)) panic ("[pmc,%d] illegal row-index %d", 607
, ri); } while (0)
;
608
609 pcd = pmc_rowindex_to_classdep[ri];
610
611 KASSERT(pcd != NULL,do { if (__builtin_expect((!(pcd != ((void *)0))), 0)) panic (
"[pmc,%d] ri %d null pcd", 612, ri); } while (0)
612 ("[pmc,%d] ri %d null pcd", __LINE__, ri))do { if (__builtin_expect((!(pcd != ((void *)0))), 0)) panic (
"[pmc,%d] ri %d null pcd", 612, ri); } while (0)
;
613
614 *adjri = ri - pcd->pcd_ri;
615
616 KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num,do { if (__builtin_expect((!(*adjri >= 0 && *adjri
< pcd->pcd_num)), 0)) panic ("[pmc,%d] adjusted row-index %d"
, 617, *adjri); } while (0)
617 ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri))do { if (__builtin_expect((!(*adjri >= 0 && *adjri
< pcd->pcd_num)), 0)) panic ("[pmc,%d] adjusted row-index %d"
, 617, *adjri); } while (0)
;
618
619 return (pcd);
620}
621
622/*
623 * Concurrency Control
624 *
625 * The driver manages the following data structures:
626 *
627 * - target process descriptors, one per target process
628 * - owner process descriptors (and attached lists), one per owner process
629 * - lookup hash tables for owner and target processes
630 * - PMC descriptors (and attached lists)
631 * - per-cpu hardware state
632 * - the 'hook' variable through which the kernel calls into
633 * this module
634 * - the machine hardware state (managed by the MD layer)
635 *
636 * These data structures are accessed from:
637 *
638 * - thread context-switch code
639 * - interrupt handlers (possibly on multiple cpus)
640 * - kernel threads on multiple cpus running on behalf of user
641 * processes doing system calls
642 * - this driver's private kernel threads
643 *
644 * = Locks and Locking strategy =
645 *
646 * The driver uses four locking strategies for its operation:
647 *
648 * - The global SX lock "pmc_sx" is used to protect internal
649 * data structures.
650 *
651 * Calls into the module by syscall() start with this lock being
652 * held in exclusive mode. Depending on the requested operation,
653 * the lock may be downgraded to 'shared' mode to allow more
654 * concurrent readers into the module. Calls into the module from
655 * other parts of the kernel acquire the lock in shared mode.
656 *
657 * This SX lock is held in exclusive mode for any operations that
658 * modify the linkages between the driver's internal data structures.
659 *
660 * The 'pmc_hook' function pointer is also protected by this lock.
661 * It is only examined with the sx lock held in exclusive mode. The
662 * kernel module is allowed to be unloaded only with the sx lock held
663 * in exclusive mode. In normal syscall handling, after acquiring the
664 * pmc_sx lock we first check that 'pmc_hook' is non-null before
665 * proceeding. This prevents races between the thread unloading the module
666 * and other threads seeking to use the module.
667 *
668 * - Lookups of target process structures and owner process structures
669 * cannot use the global "pmc_sx" SX lock because these lookups need
670 * to happen during context switches and in other critical sections
671 * where sleeping is not allowed. We protect these lookup tables
672 * with their own private spin-mutexes, "pmc_processhash_mtx" and
673 * "pmc_ownerhash_mtx".
674 *
675 * - Interrupt handlers work in a lock free manner. At interrupt
676 * time, handlers look at the PMC pointer (phw->phw_pmc) configured
677 * when the PMC was started. If this pointer is NULL, the interrupt
678 * is ignored after updating driver statistics. We ensure that this
679 * pointer is set (using an atomic operation if necessary) before the
680 * PMC hardware is started. Conversely, this pointer is unset atomically
681 * only after the PMC hardware is stopped.
682 *
683 * We ensure that everything needed for the operation of an
684 * interrupt handler is available without it needing to acquire any
685 * locks. We also ensure that a PMC's software state is destroyed only
686 * after the PMC is taken off hardware (on all CPUs).
687 *
688 * - Context-switch handling with process-private PMCs needs more
689 * care.
690 *
691 * A given process may be the target of multiple PMCs. For example,
692 * PMCATTACH and PMCDETACH may be requested by a process on one CPU
693 * while the target process is running on another. A PMC could also
694 * be getting released because its owner is exiting. We tackle
695 * these situations in the following manner:
696 *
697 * - each target process structure 'pmc_process' has an array
698 * of 'struct pmc *' pointers, one for each hardware PMC.
699 *
700 * - At context switch IN time, each "target" PMC in RUNNING state
701 * gets started on hardware and a pointer to each PMC is copied into
702 * the per-cpu phw array. The 'runcount' for the PMC is
703 * incremented.
704 *
705 * - At context switch OUT time, all process-virtual PMCs are stopped
706 * on hardware. The saved value is added to the PMCs value field
707 * only if the PMC is in a non-deleted state (the PMCs state could
708 * have changed during the current time slice).
709 *
710 * Note that since in-between a switch IN on a processor and a switch
711 * OUT, the PMC could have been released on another CPU. Therefore
712 * context switch OUT always looks at the hardware state to turn
713 * OFF PMCs and will update a PMC's saved value only if reachable
714 * from the target process record.
715 *
716 * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
717 * be attached to many processes at the time of the call and could
718 * be active on multiple CPUs).
719 *
720 * We prevent further scheduling of the PMC by marking it as in
721 * state 'DELETED'. If the runcount of the PMC is non-zero then
722 * this PMC is currently running on a CPU somewhere. The thread
723 * doing the PMCRELEASE operation waits by repeatedly doing a
724 * pause() till the runcount comes to zero.
725 *
726 * The contents of a PMC descriptor (struct pmc) are protected using
727 * a spin-mutex. In order to save space, we use a mutex pool.
728 *
729 * In terms of lock types used by witness(4), we use:
730 * - Type "pmc-sx", used by the global SX lock.
731 * - Type "pmc-sleep", for sleep mutexes used by logger threads.
732 * - Type "pmc-per-proc", for protecting PMC owner descriptors.
733 * - Type "pmc-leaf", used for all other spin mutexes.
734 */
735
736/*
737 * save the cpu binding of the current kthread
738 */
739
740static void
741pmc_save_cpu_binding(struct pmc_binding *pb)
742{
743 PMCDBG0(CPU,BND,2, "save-cpu");
744 thread_lock(curthread)_thread_lock(((__curthread())), 0, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 744)
;
745 pb->pb_bound = sched_is_bound(curthread(__curthread()));
746 pb->pb_cpu = curthread(__curthread())->td_oncpu;
747 thread_unlock(curthread)__mtx_unlock_spin_flags(&((((((__curthread()))->td_lock
))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (747))
;
748 PMCDBG1(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
749}
750
751/*
752 * restore the cpu binding of the current thread
753 */
754
755static void
756pmc_restore_cpu_binding(struct pmc_binding *pb)
757{
758 PMCDBG2(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
759 curthread->td_oncpu, pb->pb_cpu);
760 thread_lock(curthread)_thread_lock(((__curthread())), 0, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 760)
;
761 if (pb->pb_bound)
762 sched_bind(curthread(__curthread()), pb->pb_cpu);
763 else
764 sched_unbind(curthread(__curthread()));
765 thread_unlock(curthread)__mtx_unlock_spin_flags(&((((((__curthread()))->td_lock
))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (765))
;
766 PMCDBG0(CPU,BND,2, "restore-cpu done");
767}
768
769/*
770 * move execution over the specified cpu and bind it there.
771 */
772
773static void
774pmc_select_cpu(int cpu)
775{
776 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),do { if (__builtin_expect((!(cpu >= 0 && cpu < pmc_cpu_max
())), 0)) panic ("[pmc,%d] bad cpu number %d", 777, cpu); } while
(0)
777 ("[pmc,%d] bad cpu number %d", __LINE__, cpu))do { if (__builtin_expect((!(cpu >= 0 && cpu < pmc_cpu_max
())), 0)) panic ("[pmc,%d] bad cpu number %d", 777, cpu); } while
(0)
;
778
779 /* Never move to an inactive CPU. */
780 KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive "do { if (__builtin_expect((!(pmc_cpu_is_active(cpu))), 0)) panic
("[pmc,%d] selecting inactive " "CPU %d", 781, cpu); } while
(0)
781 "CPU %d", __LINE__, cpu))do { if (__builtin_expect((!(pmc_cpu_is_active(cpu))), 0)) panic
("[pmc,%d] selecting inactive " "CPU %d", 781, cpu); } while
(0)
;
782
783 PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d", cpu);
784 thread_lock(curthread)_thread_lock(((__curthread())), 0, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 784)
;
785 sched_bind(curthread(__curthread()), cpu);
786 thread_unlock(curthread)__mtx_unlock_spin_flags(&((((((__curthread()))->td_lock
))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (786))
;
787
788 KASSERT(curthread->td_oncpu == cpu,do { if (__builtin_expect((!((__curthread())->td_oncpu == cpu
)), 0)) panic ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", 789
, cpu, (__curthread())->td_oncpu); } while (0)
789 ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,do { if (__builtin_expect((!((__curthread())->td_oncpu == cpu
)), 0)) panic ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", 789
, cpu, (__curthread())->td_oncpu); } while (0)
790 cpu, curthread->td_oncpu))do { if (__builtin_expect((!((__curthread())->td_oncpu == cpu
)), 0)) panic ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", 789
, cpu, (__curthread())->td_oncpu); } while (0)
;
791
792 PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
793}
794
795/*
796 * Force a context switch.
797 *
798 * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
799 * guaranteed to force a context switch.
800 */
801
802static void
803pmc_force_context_switch(void)
804{
805
806 pause("pmcctx", 1)pause_sbt(("pmcctx"), tick_sbt * (1), 0, 0x0100);
807}
808
809uint64_t
810pmc_rdtsc(void)
811{
812#if defined(__i386__) || defined(__amd64__1)
813 if (__predict_true(amd_feature & AMDID_RDTSCP)__builtin_expect((amd_feature & 0x08000000), 1))
814 return rdtscp();
815 else
816 return rdtsc();
817#else
818 return get_cyclecount();
819#endif
820}
821
822/*
823 * Get the file name for an executable. This is a simple wrapper
824 * around vn_fullpath(9).
825 */
826
827static void
828pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
829{
830
831 *fullpath = "unknown";
832 *freepath = NULL((void *)0);
833 vn_fullpath(curthread(__curthread()), v, fullpath, freepath);
834}
835
836/*
837 * remove an process owning PMCs
838 */
839
840void
841pmc_remove_owner(struct pmc_owner *po)
842{
843 struct pmc *pm, *tmp;
844
845 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (845))
;
846
847 PMCDBG1(OWN,ORM,1, "remove-owner po=%p", po);
848
849 /* Remove descriptor from the owner hash table */
850 LIST_REMOVE(po, po_next)do { ; ; do { if ((((po))->po_next.le_next) != ((void *)0)
&& (((po))->po_next.le_next)->po_next.le_prev !=
&((po)->po_next.le_next)) panic("Bad link elm %p next->prev != elm"
, (po)); } while (0); do { if (*(po)->po_next.le_prev != (
po)) panic("Bad link elm %p prev->next != elm", (po)); } while
(0); if ((((po))->po_next.le_next) != ((void *)0)) (((po)
)->po_next.le_next)->po_next.le_prev = (po)->po_next
.le_prev; *(po)->po_next.le_prev = (((po))->po_next.le_next
); ; ; } while (0)
;
851
852 /* release all owned PMC descriptors */
853 LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp)for ((pm) = (((&po->po_pmcs))->lh_first); (pm) &&
((tmp) = (((pm))->pm_next.le_next), 1); (pm) = (tmp))
{
854 PMCDBG1(OWN,ORM,2, "pmc=%p", pm);
855 KASSERT(pm->pm_owner == po,do { if (__builtin_expect((!(pm->pm_owner == po)), 0)) panic
("[pmc,%d] owner %p != po %p", 856, pm->pm_owner, po); } while
(0)
856 ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po))do { if (__builtin_expect((!(pm->pm_owner == po)), 0)) panic
("[pmc,%d] owner %p != po %p", 856, pm->pm_owner, po); } while
(0)
;
857
858 pmc_release_pmc_descriptor(pm); /* will unlink from the list */
859 pmc_destroy_pmc_descriptor(pm);
860 }
861
862 KASSERT(po->po_sscount == 0,do { if (__builtin_expect((!(po->po_sscount == 0)), 0)) panic
("[pmc,%d] SS count not zero", 863); } while (0)
863 ("[pmc,%d] SS count not zero", __LINE__))do { if (__builtin_expect((!(po->po_sscount == 0)), 0)) panic
("[pmc,%d] SS count not zero", 863); } while (0)
;
864 KASSERT(LIST_EMPTY(&po->po_pmcs),do { if (__builtin_expect((!(((&po->po_pmcs)->lh_first
== ((void *)0)))), 0)) panic ("[pmc,%d] PMC list not empty",
865); } while (0)
865 ("[pmc,%d] PMC list not empty", __LINE__))do { if (__builtin_expect((!(((&po->po_pmcs)->lh_first
== ((void *)0)))), 0)) panic ("[pmc,%d] PMC list not empty",
865); } while (0)
;
866
867 /* de-configure the log file if present */
868 if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
869 pmclog_deconfigure_log(po);
870}
871
872/*
873 * remove an owner process record if all conditions are met.
874 */
875
876static void
877pmc_maybe_remove_owner(struct pmc_owner *po)
878{
879
880 PMCDBG1(OWN,OMR,1, "maybe-remove-owner po=%p", po);
881
882 /*
883 * Remove owner record if
884 * - this process does not own any PMCs
885 * - this process has not allocated a system-wide sampling buffer
886 */
887
888 if (LIST_EMPTY(&po->po_pmcs)((&po->po_pmcs)->lh_first == ((void *)0)) &&
889 ((po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001) == 0)) {
890 pmc_remove_owner(po);
891 pmc_destroy_owner_descriptor(po);
892 }
893}
894
895/*
896 * Add an association between a target process and a PMC.
897 */
898
899static void
900pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
901{
902 int ri;
903 struct pmc_target *pt;
904#ifdef INVARIANTS1
905 struct pmc_thread *pt_td;
906#endif
907
908 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (908))
;
909
910 KASSERT(pm != NULL && pp != NULL,do { if (__builtin_expect((!(pm != ((void *)0) && pp !=
((void *)0))), 0)) panic ("[pmc,%d] Null pm %p or pp %p", 911
, pm, pp); } while (0)
911 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp))do { if (__builtin_expect((!(pm != ((void *)0) && pp !=
((void *)0))), 0)) panic ("[pmc,%d] Null pm %p or pp %p", 911
, pm, pp); } while (0)
;
912 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),do { if (__builtin_expect((!(((((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TS || (((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TC))), 0)) panic ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d"
, 914, pm, pp->pp_proc->p_pid); } while (0)
913 ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",do { if (__builtin_expect((!(((((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TS || (((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TC))), 0)) panic ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d"
, 914, pm, pp->pp_proc->p_pid); } while (0)
914 __LINE__, pm, pp->pp_proc->p_pid))do { if (__builtin_expect((!(((((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TS || (((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TC))), 0)) panic ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d"
, 914, pm, pp->pp_proc->p_pid); } while (0)
;
915 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= ((int) md->pmd_npmc - 1),do { if (__builtin_expect((!(pp->pp_refcnt >= 0 &&
pp->pp_refcnt <= ((int) md->pmd_npmc - 1))), 0)) panic
("[pmc,%d] Illegal reference count %d for process record %p"
, 917, pp->pp_refcnt, (void *) pp); } while (0)
916 ("[pmc,%d] Illegal reference count %d for process record %p",do { if (__builtin_expect((!(pp->pp_refcnt >= 0 &&
pp->pp_refcnt <= ((int) md->pmd_npmc - 1))), 0)) panic
("[pmc,%d] Illegal reference count %d for process record %p"
, 917, pp->pp_refcnt, (void *) pp); } while (0)
917 __LINE__, pp->pp_refcnt, (void *) pp))do { if (__builtin_expect((!(pp->pp_refcnt >= 0 &&
pp->pp_refcnt <= ((int) md->pmd_npmc - 1))), 0)) panic
("[pmc,%d] Illegal reference count %d for process record %p"
, 917, pp->pp_refcnt, (void *) pp); } while (0)
;
918
919 ri = PMC_TO_ROWINDEX(pm)(((pm)->pm_id) & 0xFF);
920
921 PMCDBG3(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
922 pm, ri, pp);
923
924#ifdef HWPMC_DEBUG
925 LIST_FOREACH(pt, &pm->pm_targets, pt_next)for ((pt) = (((&pm->pm_targets))->lh_first); (pt); (
pt) = (((pt))->pt_next.le_next))
926 if (pt->pt_process == pp)
927 KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",do { if (__builtin_expect((!(0)), 0)) panic ("[pmc,%d] pp %p already in pmc %p targets"
, 928, pp, pm); } while (0)
928 __LINE__, pp, pm))do { if (__builtin_expect((!(0)), 0)) panic ("[pmc,%d] pp %p already in pmc %p targets"
, 928, pp, pm); } while (0)
;
929#endif
930
931 pt = malloc(sizeof(struct pmc_target), M_PMC, M_WAITOK0x0002|M_ZERO0x0100);
932 pt->pt_process = pp;
933
934 LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next)do { do { if (((((&pm->pm_targets)))->lh_first) != (
(void *)0) && ((((&pm->pm_targets)))->lh_first
)->pt_next.le_prev != &((((&pm->pm_targets)))->
lh_first)) panic("Bad list head %p first->prev != head", (
(&pm->pm_targets))); } while (0); if (((((pt))->pt_next
.le_next) = (((&pm->pm_targets))->lh_first)) != ((void
*)0)) (((&pm->pm_targets))->lh_first)->pt_next.
le_prev = &(((pt))->pt_next.le_next); (((&pm->pm_targets
))->lh_first) = (pt); (pt)->pt_next.le_prev = &(((&
pm->pm_targets))->lh_first); } while (0)
;
935
936 atomic_store_rel_ptratomic_store_rel_long((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc,
937 (uintptr_t)pm);
938
939 if (pm->pm_owner->po_owner == pp->pp_proc)
940 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER0x00010000;
941
942 /*
943 * Initialize the per-process values at this row index.
944 */
945 pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12) == PMC_MODE_TS ?
946 pm->pm_sc.pm_reloadcount : 0;
947
948 pp->pp_refcnt++;
949
950#ifdef INVARIANTS1
951 /* Confirm that the per-thread values at this row index are cleared. */
952 if (PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12) == PMC_MODE_TS) {
953 mtx_lock_spin(pp->pp_tdslock)__mtx_lock_spin_flags(&((((pp->pp_tdslock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (953))
;
954 LIST_FOREACH(pt_td, &pp->pp_tds, pt_next)for ((pt_td) = (((&pp->pp_tds))->lh_first); (pt_td)
; (pt_td) = (((pt_td))->pt_next.le_next))
{
955 KASSERT(pt_td->pt_pmcs[ri].pt_pmcval == (pmc_value_t) 0,do { if (__builtin_expect((!(pt_td->pt_pmcs[ri].pt_pmcval ==
(pmc_value_t) 0)), 0)) panic ("[pmc,%d] pt_pmcval not cleared for pid=%d at "
"ri=%d", 957, pp->pp_proc->p_pid, ri); } while (0)
956 ("[pmc,%d] pt_pmcval not cleared for pid=%d at "do { if (__builtin_expect((!(pt_td->pt_pmcs[ri].pt_pmcval ==
(pmc_value_t) 0)), 0)) panic ("[pmc,%d] pt_pmcval not cleared for pid=%d at "
"ri=%d", 957, pp->pp_proc->p_pid, ri); } while (0)
957 "ri=%d", __LINE__, pp->pp_proc->p_pid, ri))do { if (__builtin_expect((!(pt_td->pt_pmcs[ri].pt_pmcval ==
(pmc_value_t) 0)), 0)) panic ("[pmc,%d] pt_pmcval not cleared for pid=%d at "
"ri=%d", 957, pp->pp_proc->p_pid, ri); } while (0)
;
958 }
959 mtx_unlock_spin(pp->pp_tdslock)__mtx_unlock_spin_flags(&((((pp->pp_tdslock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (959))
;
960 }
961#endif
962}
963
964/*
965 * Removes the association between a target process and a PMC.
966 */
967
968static void
969pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
970{
971 int ri;
972 struct proc *p;
973 struct pmc_target *ptgt;
974 struct pmc_thread *pt;
975
976 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (976))
;
977
978 KASSERT(pm != NULL && pp != NULL,do { if (__builtin_expect((!(pm != ((void *)0) && pp !=
((void *)0))), 0)) panic ("[pmc,%d] Null pm %p or pp %p", 979
, pm, pp); } while (0)
979 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp))do { if (__builtin_expect((!(pm != ((void *)0) && pp !=
((void *)0))), 0)) panic ("[pmc,%d] Null pm %p or pp %p", 979
, pm, pp); } while (0)
;
980
981 KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc,do { if (__builtin_expect((!(pp->pp_refcnt >= 1 &&
pp->pp_refcnt <= (int) md->pmd_npmc)), 0)) panic ("[pmc,%d] Illegal ref count %d on process record %p"
, 983, pp->pp_refcnt, (void *) pp); } while (0)
982 ("[pmc,%d] Illegal ref count %d on process record %p",do { if (__builtin_expect((!(pp->pp_refcnt >= 1 &&
pp->pp_refcnt <= (int) md->pmd_npmc)), 0)) panic ("[pmc,%d] Illegal ref count %d on process record %p"
, 983, pp->pp_refcnt, (void *) pp); } while (0)
983 __LINE__, pp->pp_refcnt, (void *) pp))do { if (__builtin_expect((!(pp->pp_refcnt >= 1 &&
pp->pp_refcnt <= (int) md->pmd_npmc)), 0)) panic ("[pmc,%d] Illegal ref count %d on process record %p"
, 983, pp->pp_refcnt, (void *) pp); } while (0)
;
984
985 ri = PMC_TO_ROWINDEX(pm)(((pm)->pm_id) & 0xFF);
986
987 PMCDBG3(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
988 pm, ri, pp);
989
990 KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,do { if (__builtin_expect((!(pp->pp_pmcs[ri].pp_pmc == pm)
), 0)) panic ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p"
, 991, ri, pm, pp->pp_pmcs[ri].pp_pmc); } while (0)
991 ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,do { if (__builtin_expect((!(pp->pp_pmcs[ri].pp_pmc == pm)
), 0)) panic ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p"
, 991, ri, pm, pp->pp_pmcs[ri].pp_pmc); } while (0)
992 ri, pm, pp->pp_pmcs[ri].pp_pmc))do { if (__builtin_expect((!(pp->pp_pmcs[ri].pp_pmc == pm)
), 0)) panic ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p"
, 991, ri, pm, pp->pp_pmcs[ri].pp_pmc); } while (0)
;
993
994 pp->pp_pmcs[ri].pp_pmc = NULL((void *)0);
995 pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
996
997 /* Clear the per-thread values at this row index. */
998 if (PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12) == PMC_MODE_TS) {
999 mtx_lock_spin(pp->pp_tdslock)__mtx_lock_spin_flags(&((((pp->pp_tdslock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (999))
;
1000 LIST_FOREACH(pt, &pp->pp_tds, pt_next)for ((pt) = (((&pp->pp_tds))->lh_first); (pt); (pt)
= (((pt))->pt_next.le_next))
1001 pt->pt_pmcs[ri].pt_pmcval = (pmc_value_t) 0;
1002 mtx_unlock_spin(pp->pp_tdslock)__mtx_unlock_spin_flags(&((((pp->pp_tdslock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1002))
;
1003 }
1004
1005 /* Remove owner-specific flags */
1006 if (pm->pm_owner->po_owner == pp->pp_proc) {
1007 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS0x00000001;
1008 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER0x00010000;
1009 }
1010
1011 pp->pp_refcnt--;
1012
1013 /* Remove the target process from the PMC structure */
1014 LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)for ((ptgt) = (((&pm->pm_targets))->lh_first); (ptgt
); (ptgt) = (((ptgt))->pt_next.le_next))
1015 if (ptgt->pt_process == pp)
1016 break;
1017
1018 KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "do { if (__builtin_expect((!(ptgt != ((void *)0))), 0)) panic
("[pmc,%d] process %p (pp: %p) not found " "in pmc %p", 1019
, pp->pp_proc, pp, pm); } while (0)
1019 "in pmc %p", __LINE__, pp->pp_proc, pp, pm))do { if (__builtin_expect((!(ptgt != ((void *)0))), 0)) panic
("[pmc,%d] process %p (pp: %p) not found " "in pmc %p", 1019
, pp->pp_proc, pp, pm); } while (0)
;
1020
1021 LIST_REMOVE(ptgt, pt_next)do { ; ; do { if ((((ptgt))->pt_next.le_next) != ((void *)
0) && (((ptgt))->pt_next.le_next)->pt_next.le_prev
!= &((ptgt)->pt_next.le_next)) panic("Bad link elm %p next->prev != elm"
, (ptgt)); } while (0); do { if (*(ptgt)->pt_next.le_prev !=
(ptgt)) panic("Bad link elm %p prev->next != elm", (ptgt)
); } while (0); if ((((ptgt))->pt_next.le_next) != ((void *
)0)) (((ptgt))->pt_next.le_next)->pt_next.le_prev = (ptgt
)->pt_next.le_prev; *(ptgt)->pt_next.le_prev = (((ptgt)
)->pt_next.le_next); ; ; } while (0)
;
1022 free(ptgt, M_PMC);
1023
1024 /* if the PMC now lacks targets, send the owner a SIGIO */
1025 if (LIST_EMPTY(&pm->pm_targets)((&pm->pm_targets)->lh_first == ((void *)0))) {
1026 p = pm->pm_owner->po_owner;
1027 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1027))
;
1028 kern_psignal(p, SIGIO23);
1029 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1029))
;
1030
1031 PMCDBG2(PRC,SIG,2, "signalling proc=%p signal=%d", p,
1032 SIGIO);
1033 }
1034}
1035
1036/*
1037 * Check if PMC 'pm' may be attached to target process 't'.
1038 */
1039
1040static int
1041pmc_can_attach(struct pmc *pm, struct proc *t)
1042{
1043 struct proc *o; /* pmc owner */
1044 struct ucred *oc, *tc; /* owner, target credentials */
1045 int decline_attach, i;
1046
1047 /*
1048 * A PMC's owner can always attach that PMC to itself.
1049 */
1050
1051 if ((o = pm->pm_owner->po_owner) == t)
1052 return 0;
1053
1054 PROC_LOCK(o)__mtx_lock_flags(&((((&(o)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1054))
;
1055 oc = o->p_ucred;
1056 crhold(oc);
1057 PROC_UNLOCK(o)__mtx_unlock_flags(&((((&(o)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1057))
;
1058
1059 PROC_LOCK(t)__mtx_lock_flags(&((((&(t)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1059))
;
1060 tc = t->p_ucred;
1061 crhold(tc);
1062 PROC_UNLOCK(t)__mtx_unlock_flags(&((((&(t)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1062))
;
1063
1064 /*
1065 * The effective uid of the PMC owner should match at least one
1066 * of the {effective,real,saved} uids of the target process.
1067 */
1068
1069 decline_attach = oc->cr_uid != tc->cr_uid &&
1070 oc->cr_uid != tc->cr_svuid &&
1071 oc->cr_uid != tc->cr_ruid;
1072
1073 /*
1074 * Every one of the target's group ids, must be in the owner's
1075 * group list.
1076 */
1077 for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
1078 decline_attach = !groupmember(tc->cr_groups[i], oc);
1079
1080 /* check the read and saved gids too */
1081 if (decline_attach == 0)
1082 decline_attach = !groupmember(tc->cr_rgid, oc) ||
1083 !groupmember(tc->cr_svgid, oc);
1084
1085 crfree(tc);
1086 crfree(oc);
1087
1088 return !decline_attach;
1089}
1090
1091/*
1092 * Attach a process to a PMC.
1093 */
1094
1095static int
1096pmc_attach_one_process(struct proc *p, struct pmc *pm)
1097{
1098 int ri, error;
1099 char *fullpath, *freepath;
1100 struct pmc_process *pp;
1101
1102 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1102))
;
1103
1104 PMCDBG5(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
1105 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1106
1107 /*
1108 * Locate the process descriptor corresponding to process 'p',
1109 * allocating space as needed.
1110 *
1111 * Verify that rowindex 'pm_rowindex' is free in the process
1112 * descriptor.
1113 *
1114 * If not, allocate space for a descriptor and link the
1115 * process descriptor and PMC.
1116 */
1117 ri = PMC_TO_ROWINDEX(pm)(((pm)->pm_id) & 0xFF);
1118
1119 /* mark process as using HWPMCs */
1120 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1120))
;
1121 p->p_flag |= P_HWPMC0x800000;
1122 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1122))
;
1123
1124 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL((void *)0)) {
1125 error = ENOMEM12;
1126 goto fail;
1127 }
1128
1129 if (pp->pp_pmcs[ri].pp_pmc == pm) {/* already present at slot [ri] */
1130 error = EEXIST17;
1131 goto fail;
1132 }
1133
1134 if (pp->pp_pmcs[ri].pp_pmc != NULL((void *)0)) {
1135 error = EBUSY16;
1136 goto fail;
1137 }
1138
1139 pmc_link_target_process(pm, pp);
1140
1141 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TS
)
&&
1142 (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER0x00010000) == 0)
1143 pm->pm_flags |= PMC_F_NEEDS_LOGFILE0x00020000;
1144
1145 pm->pm_flags |= PMC_F_ATTACH_DONE0x00040000; /* mark as attached */
1146
1147 /* issue an attach event to a configured log file */
1148 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE0x00000001) {
1149 if (p->p_flag & P_KPROC0x00004) {
1150 fullpath = kernelname;
1151 freepath = NULL((void *)0);
1152 } else {
1153 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1154 pmclog_process_pmcattach(pm, p->p_pid, fullpath);
1155 }
1156 free(freepath, M_TEMP);
1157 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TS
)
)
1158 pmc_log_process_mappings(pm->pm_owner, p);
1159 }
1160
1161 return (0);
1162 fail:
1163 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1163))
;
1164 p->p_flag &= ~P_HWPMC0x800000;
1165 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1165))
;
1166 return (error);
1167}
1168
1169/*
1170 * Attach a process and optionally its children
1171 */
1172
1173static int
1174pmc_attach_process(struct proc *p, struct pmc *pm)
1175{
1176 int error;
1177 struct proc *top;
1178
1179 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1179))
;
1180
1181 PMCDBG5(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
1182 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1183
1184
1185 /*
1186 * If this PMC successfully allowed a GETMSR operation
1187 * in the past, disallow further ATTACHes.
1188 */
1189
1190 if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS0x00000001) != 0)
1191 return EPERM1;
1192
1193 if ((pm->pm_flags & PMC_F_DESCENDANTS0x00000002) == 0)
1194 return pmc_attach_one_process(p, pm);
1195
1196 /*
1197 * Traverse all child processes, attaching them to
1198 * this PMC.
1199 */
1200
1201 sx_slock(&proctree_lock)(void)_sx_slock(((&proctree_lock)), 0, ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1201))
;
1202
1203 top = p;
1204
1205 for (;;) {
1206 if ((error = pmc_attach_one_process(p, pm)) != 0)
1207 break;
1208 if (!LIST_EMPTY(&p->p_children)((&p->p_children)->lh_first == ((void *)0)))
1209 p = LIST_FIRST(&p->p_children)((&p->p_children)->lh_first);
1210 else for (;;) {
1211 if (p == top)
1212 goto done;
1213 if (LIST_NEXT(p, p_sibling)((p)->p_sibling.le_next)) {
1214 p = LIST_NEXT(p, p_sibling)((p)->p_sibling.le_next);
1215 break;
1216 }
1217 p = p->p_pptr;
1218 }
1219 }
1220
1221 if (error)
1222 (void) pmc_detach_process(top, pm);
1223
1224 done:
1225 sx_sunlock(&proctree_lock)_sx_sunlock(((&proctree_lock)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1225))
;
1226 return error;
1227}
1228
1229/*
1230 * Detach a process from a PMC. If there are no other PMCs tracking
1231 * this process, remove the process structure from its hash table. If
1232 * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
1233 */
1234
1235static int
1236pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
1237{
1238 int ri;
1239 struct pmc_process *pp;
1240
1241 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1241))
;
1242
1243 KASSERT(pm != NULL,do { if (__builtin_expect((!(pm != ((void *)0))), 0)) panic (
"[pmc,%d] null pm pointer", 1244); } while (0)
1244 ("[pmc,%d] null pm pointer", __LINE__))do { if (__builtin_expect((!(pm != ((void *)0))), 0)) panic (
"[pmc,%d] null pm pointer", 1244); } while (0)
;
1245
1246 ri = PMC_TO_ROWINDEX(pm)(((pm)->pm_id) & 0xFF);
1247
1248 PMCDBG6(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
1249 pm, ri, p, p->p_pid, p->p_comm, flags);
1250
1251 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL((void *)0))
1252 return ESRCH3;
1253
1254 if (pp->pp_pmcs[ri].pp_pmc != pm)
1255 return EINVAL22;
1256
1257 pmc_unlink_target_process(pm, pp);
1258
1259 /* Issue a detach entry if a log file is configured */
1260 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
1261 pmclog_process_pmcdetach(pm, p->p_pid);
1262
1263 /*
1264 * If there are no PMCs targeting this process, we remove its
1265 * descriptor from the target hash table and unset the P_HWPMC
1266 * flag in the struct proc.
1267 */
1268 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,do { if (__builtin_expect((!(pp->pp_refcnt >= 0 &&
pp->pp_refcnt <= (int) md->pmd_npmc)), 0)) panic ("[pmc,%d] Illegal refcnt %d for process struct %p"
, 1270, pp->pp_refcnt, pp); } while (0)
1269 ("[pmc,%d] Illegal refcnt %d for process struct %p",do { if (__builtin_expect((!(pp->pp_refcnt >= 0 &&
pp->pp_refcnt <= (int) md->pmd_npmc)), 0)) panic ("[pmc,%d] Illegal refcnt %d for process struct %p"
, 1270, pp->pp_refcnt, pp); } while (0)
1270 __LINE__, pp->pp_refcnt, pp))do { if (__builtin_expect((!(pp->pp_refcnt >= 0 &&
pp->pp_refcnt <= (int) md->pmd_npmc)), 0)) panic ("[pmc,%d] Illegal refcnt %d for process struct %p"
, 1270, pp->pp_refcnt, pp); } while (0)
;
1271
1272 if (pp->pp_refcnt != 0) /* still a target of some PMC */
1273 return 0;
1274
1275 pmc_remove_process_descriptor(pp);
1276
1277 if (flags & PMC_FLAG_REMOVE)
1278 pmc_destroy_process_descriptor(pp);
1279
1280 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1280))
;
1281 p->p_flag &= ~P_HWPMC0x800000;
1282 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (1282))
;
1283
1284 return 0;
1285}
1286
1287/*
1288 * Detach a process and optionally its descendants from a PMC.
1289 */
1290
1291static int
1292pmc_detach_process(struct proc *p, struct pmc *pm)
1293{
1294 struct proc *top;
1295
1296 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1296))
;
1297
1298 PMCDBG5(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
1299 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1300
1301 if ((pm->pm_flags & PMC_F_DESCENDANTS0x00000002) == 0)
1302 return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1303
1304 /*
1305 * Traverse all children, detaching them from this PMC. We
1306 * ignore errors since we could be detaching a PMC from a
1307 * partially attached proc tree.
1308 */
1309
1310 sx_slock(&proctree_lock)(void)_sx_slock(((&proctree_lock)), 0, ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1310))
;
1311
1312 top = p;
1313
1314 for (;;) {
1315 (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1316
1317 if (!LIST_EMPTY(&p->p_children)((&p->p_children)->lh_first == ((void *)0)))
1318 p = LIST_FIRST(&p->p_children)((&p->p_children)->lh_first);
1319 else for (;;) {
1320 if (p == top)
1321 goto done;
1322 if (LIST_NEXT(p, p_sibling)((p)->p_sibling.le_next)) {
1323 p = LIST_NEXT(p, p_sibling)((p)->p_sibling.le_next);
1324 break;
1325 }
1326 p = p->p_pptr;
1327 }
1328 }
1329
1330 done:
1331 sx_sunlock(&proctree_lock)_sx_sunlock(((&proctree_lock)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1331))
;
1332
1333 if (LIST_EMPTY(&pm->pm_targets)((&pm->pm_targets)->lh_first == ((void *)0)))
1334 pm->pm_flags &= ~PMC_F_ATTACH_DONE0x00040000;
1335
1336 return 0;
1337}
1338
1339
1340/*
1341 * Thread context switch IN
1342 */
1343
1344static void
1345pmc_process_csw_in(struct thread *td)
1346{
1347 int cpu;
1348 unsigned int adjri, ri;
1349 struct pmc *pm;
1350 struct proc *p;
1351 struct pmc_cpu *pc;
1352 struct pmc_hw *phw;
1353 pmc_value_t newvalue;
1354 struct pmc_process *pp;
1355 struct pmc_thread *pt;
1356 struct pmc_classdep *pcd;
1357
1358 p = td->td_proc;
1359 pt = NULL((void *)0);
1360 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL((void *)0))
1361 return;
1362
1363 KASSERT(pp->pp_proc == td->td_proc,do { if (__builtin_expect((!(pp->pp_proc == td->td_proc
)), 0)) panic ("[pmc,%d] not my thread state", 1364); } while
(0)
1364 ("[pmc,%d] not my thread state", __LINE__))do { if (__builtin_expect((!(pp->pp_proc == td->td_proc
)), 0)) panic ("[pmc,%d] not my thread state", 1364); } while
(0)
;
1365
1366 critical_enter()critical_enter_KBI(); /* no preemption from this point */
1367
1368 cpu = PCPU_GET(cpuid)__extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid) __res
; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu *)
0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *)0
)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; })
; /* td->td_oncpu is invalid */
1369
1370 PMCDBG5(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1371 p->p_pid, p->p_comm, pp);
1372
1373 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),do { if (__builtin_expect((!(cpu >= 0 && cpu < pmc_cpu_max
())), 0)) panic ("[pmc,%d] weird CPU id %d", 1374, cpu); } while
(0)
1374 ("[pmc,%d] weird CPU id %d", __LINE__, cpu))do { if (__builtin_expect((!(cpu >= 0 && cpu < pmc_cpu_max
())), 0)) panic ("[pmc,%d] weird CPU id %d", 1374, cpu); } while
(0)
;
1375
1376 pc = pmc_pcpu[cpu];
1377
1378 for (ri = 0; ri < md->pmd_npmc; ri++) {
1379
1380 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL((void *)0))
1381 continue;
1382
1383 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),do { if (__builtin_expect((!(((((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TS || (((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TC))), 0)) panic ("[pmc,%d] Target PMC in non-virtual mode (%d)"
, 1385, ((((pm)->pm_id) & 0xFF000) >> 12)); } while
(0)
1384 ("[pmc,%d] Target PMC in non-virtual mode (%d)",do { if (__builtin_expect((!(((((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TS || (((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TC))), 0)) panic ("[pmc,%d] Target PMC in non-virtual mode (%d)"
, 1385, ((((pm)->pm_id) & 0xFF000) >> 12)); } while
(0)
1385 __LINE__, PMC_TO_MODE(pm)))do { if (__builtin_expect((!(((((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TS || (((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TC))), 0)) panic ("[pmc,%d] Target PMC in non-virtual mode (%d)"
, 1385, ((((pm)->pm_id) & 0xFF000) >> 12)); } while
(0)
;
1386
1387 KASSERT(PMC_TO_ROWINDEX(pm) == ri,do { if (__builtin_expect((!((((pm)->pm_id) & 0xFF) ==
ri)), 0)) panic ("[pmc,%d] Row index mismatch pmc %d != ri %d"
, 1389, (((pm)->pm_id) & 0xFF), ri); } while (0)
1388 ("[pmc,%d] Row index mismatch pmc %d != ri %d",do { if (__builtin_expect((!((((pm)->pm_id) & 0xFF) ==
ri)), 0)) panic ("[pmc,%d] Row index mismatch pmc %d != ri %d"
, 1389, (((pm)->pm_id) & 0xFF), ri); } while (0)
1389 __LINE__, PMC_TO_ROWINDEX(pm), ri))do { if (__builtin_expect((!((((pm)->pm_id) & 0xFF) ==
ri)), 0)) panic ("[pmc,%d] Row index mismatch pmc %d != ri %d"
, 1389, (((pm)->pm_id) & 0xFF), ri); } while (0)
;
1390
1391 /*
1392 * Only PMCs that are marked as 'RUNNING' need
1393 * be placed on hardware.
1394 */
1395
1396 if (pm->pm_state != PMC_STATE_RUNNING)
1397 continue;
1398
1399 KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) >= 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 1400, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
1400 ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) >= 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 1400, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
1401 (unsigned long)counter_u64_fetch(pm->pm_runcount)))do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) >= 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 1400, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
;
1402
1403 /* increment PMC runcount */
1404 counter_u64_add(pm->pm_runcount, 1);
1405
1406 /* configure the HWPMC we are going to use. */
1407 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1408 pcd->pcd_config_pmc(cpu, adjri, pm);
1409
1410 phw = pc->pc_hwpmcs[ri];
1411
1412 KASSERT(phw != NULL,do { if (__builtin_expect((!(phw != ((void *)0))), 0)) panic (
"[pmc,%d] null hw pointer", 1413); } while (0)
1413 ("[pmc,%d] null hw pointer", __LINE__))do { if (__builtin_expect((!(phw != ((void *)0))), 0)) panic (
"[pmc,%d] null hw pointer", 1413); } while (0)
;
1414
1415 KASSERT(phw->phw_pmc == pm,do { if (__builtin_expect((!(phw->phw_pmc == pm)), 0)) panic
("[pmc,%d] hw->pmc %p != pmc %p", 1416, phw->phw_pmc, pm
); } while (0)
1416 ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,do { if (__builtin_expect((!(phw->phw_pmc == pm)), 0)) panic
("[pmc,%d] hw->pmc %p != pmc %p", 1416, phw->phw_pmc, pm
); } while (0)
1417 phw->phw_pmc, pm))do { if (__builtin_expect((!(phw->phw_pmc == pm)), 0)) panic
("[pmc,%d] hw->pmc %p != pmc %p", 1416, phw->phw_pmc, pm
); } while (0)
;
1418
1419 /*
1420 * Write out saved value and start the PMC.
1421 *
1422 * Sampling PMCs use a per-thread value, while
1423 * counting mode PMCs use a per-pmc value that is
1424 * inherited across descendants.
1425 */
1426 if (PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12) == PMC_MODE_TS) {
1427 if (pt == NULL((void *)0))
1428 pt = pmc_find_thread_descriptor(pp, td,
1429 PMC_FLAG_NONE);
1430
1431 KASSERT(pt != NULL,do { if (__builtin_expect((!(pt != ((void *)0))), 0)) panic (
"[pmc,%d] No thread found for td=%p", 1432, td); } while (0)
1432 ("[pmc,%d] No thread found for td=%p", __LINE__,do { if (__builtin_expect((!(pt != ((void *)0))), 0)) panic (
"[pmc,%d] No thread found for td=%p", 1432, td); } while (0)
1433 td))do { if (__builtin_expect((!(pt != ((void *)0))), 0)) panic (
"[pmc,%d] No thread found for td=%p", 1432, td); } while (0)
;
1434
1435 mtx_pool_lock_spin(pmc_mtxpool, pm)__mtx_lock_spin_flags(&((((mtx_pool_find((pmc_mtxpool), (
pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1435))
;
1436
1437 /*
1438 * If we have a thread descriptor, use the per-thread
1439 * counter in the descriptor. If not, we will use
1440 * a per-process counter.
1441 *
1442 * TODO: Remove the per-process "safety net" once
1443 * we have thoroughly tested that we don't hit the
1444 * above assert.
1445 */
1446 if (pt != NULL((void *)0)) {
1447 if (pt->pt_pmcs[ri].pt_pmcval > 0)
1448 newvalue = pt->pt_pmcs[ri].pt_pmcval;
1449 else
1450 newvalue = pm->pm_sc.pm_reloadcount;
1451 } else {
1452 /*
1453 * Use the saved value calculated after the most
1454 * recent time a thread using the shared counter
1455 * switched out. Reset the saved count in case
1456 * another thread from this process switches in
1457 * before any threads switch out.
1458 */
1459
1460 newvalue = pp->pp_pmcs[ri].pp_pmcval;
1461 pp->pp_pmcs[ri].pp_pmcval =
1462 pm->pm_sc.pm_reloadcount;
1463 }
1464 mtx_pool_unlock_spin(pmc_mtxpool, pm)__mtx_unlock_spin_flags(&((((mtx_pool_find((pmc_mtxpool),
(pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1464))
;
1465 KASSERT(newvalue > 0 && newvalue <=do { if (__builtin_expect((!(newvalue > 0 && newvalue
<= pm->pm_sc.pm_reloadcount)), 0)) panic ("[pmc,%d] pmcval outside of expected range cpu=%d "
"ri=%d pmcval=%jx pm_reloadcount=%jx", 1468, cpu, ri, newvalue
, pm->pm_sc.pm_reloadcount); } while (0)
1466 pm->pm_sc.pm_reloadcount,do { if (__builtin_expect((!(newvalue > 0 && newvalue
<= pm->pm_sc.pm_reloadcount)), 0)) panic ("[pmc,%d] pmcval outside of expected range cpu=%d "
"ri=%d pmcval=%jx pm_reloadcount=%jx", 1468, cpu, ri, newvalue
, pm->pm_sc.pm_reloadcount); } while (0)
1467 ("[pmc,%d] pmcval outside of expected range cpu=%d "do { if (__builtin_expect((!(newvalue > 0 && newvalue
<= pm->pm_sc.pm_reloadcount)), 0)) panic ("[pmc,%d] pmcval outside of expected range cpu=%d "
"ri=%d pmcval=%jx pm_reloadcount=%jx", 1468, cpu, ri, newvalue
, pm->pm_sc.pm_reloadcount); } while (0)
1468 "ri=%d pmcval=%jx pm_reloadcount=%jx", __LINE__,do { if (__builtin_expect((!(newvalue > 0 && newvalue
<= pm->pm_sc.pm_reloadcount)), 0)) panic ("[pmc,%d] pmcval outside of expected range cpu=%d "
"ri=%d pmcval=%jx pm_reloadcount=%jx", 1468, cpu, ri, newvalue
, pm->pm_sc.pm_reloadcount); } while (0)
1469 cpu, ri, newvalue, pm->pm_sc.pm_reloadcount))do { if (__builtin_expect((!(newvalue > 0 && newvalue
<= pm->pm_sc.pm_reloadcount)), 0)) panic ("[pmc,%d] pmcval outside of expected range cpu=%d "
"ri=%d pmcval=%jx pm_reloadcount=%jx", 1468, cpu, ri, newvalue
, pm->pm_sc.pm_reloadcount); } while (0)
;
1470 } else {
1471 KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,do { if (__builtin_expect((!(((((pm)->pm_id) & 0xFF000
) >> 12) == PMC_MODE_TC)), 0)) panic ("[pmc,%d] illegal mode=%d"
, 1472, ((((pm)->pm_id) & 0xFF000) >> 12)); } while
(0)
1472 ("[pmc,%d] illegal mode=%d", __LINE__,do { if (__builtin_expect((!(((((pm)->pm_id) & 0xFF000
) >> 12) == PMC_MODE_TC)), 0)) panic ("[pmc,%d] illegal mode=%d"
, 1472, ((((pm)->pm_id) & 0xFF000) >> 12)); } while
(0)
1473 PMC_TO_MODE(pm)))do { if (__builtin_expect((!(((((pm)->pm_id) & 0xFF000
) >> 12) == PMC_MODE_TC)), 0)) panic ("[pmc,%d] illegal mode=%d"
, 1472, ((((pm)->pm_id) & 0xFF000) >> 12)); } while
(0)
;
1474 mtx_pool_lock_spin(pmc_mtxpool, pm)__mtx_lock_spin_flags(&((((mtx_pool_find((pmc_mtxpool), (
pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1474))
;
1475 newvalue = PMC_PCPU_SAVED(cpu, ri)pmc_pcpu_saved[(ri) + md->pmd_npmc*(cpu)] =
1476 pm->pm_gv.pm_savedvalue;
1477 mtx_pool_unlock_spin(pmc_mtxpool, pm)__mtx_unlock_spin_flags(&((((mtx_pool_find((pmc_mtxpool),
(pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1477))
;
1478 }
1479
1480 PMCDBG3(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
1481
1482 pcd->pcd_write_pmc(cpu, adjri, newvalue);
1483
1484 /* If a sampling mode PMC, reset stalled state. */
1485 if (PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12) == PMC_MODE_TS)
1486 pm->pm_pcpu_state[cpu].pps_stalled = 0;
1487
1488 /* Indicate that we desire this to run. */
1489 pm->pm_pcpu_state[cpu].pps_cpustate = 1;
1490
1491 /* Start the PMC. */
1492 pcd->pcd_start_pmc(cpu, adjri);
1493 }
1494
1495 /*
1496 * perform any other architecture/cpu dependent thread
1497 * switch-in actions.
1498 */
1499
1500 (void) (*md->pmd_switch_in)(pc, pp);
1501
1502 critical_exit()critical_exit_KBI();
1503
1504}
1505
1506/*
1507 * Thread context switch OUT.
1508 */
1509
1510static void
1511pmc_process_csw_out(struct thread *td)
1512{
1513 int cpu;
1514 int64_t tmp;
1515 struct pmc *pm;
1516 struct proc *p;
1517 enum pmc_mode mode;
1518 struct pmc_cpu *pc;
1519 pmc_value_t newvalue;
1520 unsigned int adjri, ri;
1521 struct pmc_process *pp;
1522 struct pmc_thread *pt = NULL((void *)0);
1523 struct pmc_classdep *pcd;
1524
1525
1526 /*
1527 * Locate our process descriptor; this may be NULL if
1528 * this process is exiting and we have already removed
1529 * the process from the target process table.
1530 *
1531 * Note that due to kernel preemption, multiple
1532 * context switches may happen while the process is
1533 * exiting.
1534 *
1535 * Note also that if the target process cannot be
1536 * found we still need to deconfigure any PMCs that
1537 * are currently running on hardware.
1538 */
1539
1540 p = td->td_proc;
1541 pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
1542
1543 /*
1544 * save PMCs
1545 */
1546
1547 critical_enter()critical_enter_KBI();
1548
1549 cpu = PCPU_GET(cpuid)__extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid) __res
; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu *)
0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *)0
)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; })
; /* td->td_oncpu is invalid */
1550
1551 PMCDBG5(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1552 p->p_pid, p->p_comm, pp);
1553
1554 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),do { if (__builtin_expect((!(cpu >= 0 && cpu < pmc_cpu_max
())), 0)) panic ("[pmc,%d weird CPU id %d", 1555, cpu); } while
(0)
1555 ("[pmc,%d weird CPU id %d", __LINE__, cpu))do { if (__builtin_expect((!(cpu >= 0 && cpu < pmc_cpu_max
())), 0)) panic ("[pmc,%d weird CPU id %d", 1555, cpu); } while
(0)
;
1556
1557 pc = pmc_pcpu[cpu];
1558
1559 /*
1560 * When a PMC gets unlinked from a target PMC, it will
1561 * be removed from the target's pp_pmc[] array.
1562 *
1563 * However, on a MP system, the target could have been
1564 * executing on another CPU at the time of the unlink.
1565 * So, at context switch OUT time, we need to look at
1566 * the hardware to determine if a PMC is scheduled on
1567 * it.
1568 */
1569
1570 for (ri = 0; ri < md->pmd_npmc; ri++) {
1571
1572 pcd = pmc_ri_to_classdep(md, ri, &adjri);
1573 pm = NULL((void *)0);
1574 (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
1575
1576 if (pm == NULL((void *)0)) /* nothing at this row index */
1577 continue;
1578
1579 mode = PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12);
1580 if (!PMC_IS_VIRTUAL_MODE(mode)((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC))
1581 continue; /* not a process virtual PMC */
1582
1583 KASSERT(PMC_TO_ROWINDEX(pm) == ri,do { if (__builtin_expect((!((((pm)->pm_id) & 0xFF) ==
ri)), 0)) panic ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", 1585
, (((pm)->pm_id) & 0xFF), ri); } while (0)
1584 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",do { if (__builtin_expect((!((((pm)->pm_id) & 0xFF) ==
ri)), 0)) panic ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", 1585
, (((pm)->pm_id) & 0xFF), ri); } while (0)
1585 __LINE__, PMC_TO_ROWINDEX(pm), ri))do { if (__builtin_expect((!((((pm)->pm_id) & 0xFF) ==
ri)), 0)) panic ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", 1585
, (((pm)->pm_id) & 0xFF), ri); } while (0)
;
1586
1587 /*
1588 * Change desired state, and then stop if not stalled.
1589 * This two-step dance should avoid race conditions where
1590 * an interrupt re-enables the PMC after this code has
1591 * already checked the pm_stalled flag.
1592 */
1593 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
1594 if (pm->pm_pcpu_state[cpu].pps_stalled == 0)
1595 pcd->pcd_stop_pmc(cpu, adjri);
1596
1597 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 1598, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
1598 ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 1598, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
1599 (unsigned long)counter_u64_fetch(pm->pm_runcount)))do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 1598, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
;
1600
1601 /* reduce this PMC's runcount */
1602 counter_u64_add(pm->pm_runcount, -1);
1603
1604 /*
1605 * If this PMC is associated with this process,
1606 * save the reading.
1607 */
1608
1609 if (pm->pm_state != PMC_STATE_DELETED && pp != NULL((void *)0) &&
1610 pp->pp_pmcs[ri].pp_pmc != NULL((void *)0)) {
1611 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,do { if (__builtin_expect((!(pm == pp->pp_pmcs[ri].pp_pmc)
), 0)) panic ("[pmc,%d] pm %p != pp_pmcs[%d] %p", 1612, pm, ri
, pp->pp_pmcs[ri].pp_pmc); } while (0)
1612 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,do { if (__builtin_expect((!(pm == pp->pp_pmcs[ri].pp_pmc)
), 0)) panic ("[pmc,%d] pm %p != pp_pmcs[%d] %p", 1612, pm, ri
, pp->pp_pmcs[ri].pp_pmc); } while (0)
1613 pm, ri, pp->pp_pmcs[ri].pp_pmc))do { if (__builtin_expect((!(pm == pp->pp_pmcs[ri].pp_pmc)
), 0)) panic ("[pmc,%d] pm %p != pp_pmcs[%d] %p", 1612, pm, ri
, pp->pp_pmcs[ri].pp_pmc); } while (0)
;
1614
1615 KASSERT(pp->pp_refcnt > 0,do { if (__builtin_expect((!(pp->pp_refcnt > 0)), 0)) panic
("[pmc,%d] pp refcnt = %d", 1616, pp->pp_refcnt); } while
(0)
1616 ("[pmc,%d] pp refcnt = %d", __LINE__,do { if (__builtin_expect((!(pp->pp_refcnt > 0)), 0)) panic
("[pmc,%d] pp refcnt = %d", 1616, pp->pp_refcnt); } while
(0)
1617 pp->pp_refcnt))do { if (__builtin_expect((!(pp->pp_refcnt > 0)), 0)) panic
("[pmc,%d] pp refcnt = %d", 1616, pp->pp_refcnt); } while
(0)
;
1618
1619 pcd->pcd_read_pmc(cpu, adjri, &newvalue);
1620
1621 if (mode == PMC_MODE_TS) {
1622 PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d val=%jd (samp)",
1623 cpu, ri, newvalue);
1624
1625 if (pt == NULL((void *)0))
1626 pt = pmc_find_thread_descriptor(pp, td,
1627 PMC_FLAG_NONE);
1628
1629 KASSERT(pt != NULL,do { if (__builtin_expect((!(pt != ((void *)0))), 0)) panic (
"[pmc,%d] No thread found for td=%p", 1631, td); } while (0)
1630 ("[pmc,%d] No thread found for td=%p",do { if (__builtin_expect((!(pt != ((void *)0))), 0)) panic (
"[pmc,%d] No thread found for td=%p", 1631, td); } while (0)
1631 __LINE__, td))do { if (__builtin_expect((!(pt != ((void *)0))), 0)) panic (
"[pmc,%d] No thread found for td=%p", 1631, td); } while (0)
;
1632
1633 mtx_pool_lock_spin(pmc_mtxpool, pm)__mtx_lock_spin_flags(&((((mtx_pool_find((pmc_mtxpool), (
pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1633))
;
1634
1635 /*
1636 * If we have a thread descriptor, save the
1637 * per-thread counter in the descriptor. If not,
1638 * we will update the per-process counter.
1639 *
1640 * TODO: Remove the per-process "safety net"
1641 * once we have thoroughly tested that we
1642 * don't hit the above assert.
1643 */
1644 if (pt != NULL((void *)0))
1645 pt->pt_pmcs[ri].pt_pmcval = newvalue;
1646 else {
1647 /*
1648 * For sampling process-virtual PMCs,
1649 * newvalue is the number of events to
1650 * be seen until the next sampling
1651 * interrupt. We can just add the events
1652 * left from this invocation to the
1653 * counter, then adjust in case we
1654 * overflow our range.
1655 *
1656 * (Recall that we reload the counter
1657 * every time we use it.)
1658 */
1659 pp->pp_pmcs[ri].pp_pmcval += newvalue;
1660 if (pp->pp_pmcs[ri].pp_pmcval >
1661 pm->pm_sc.pm_reloadcount)
1662 pp->pp_pmcs[ri].pp_pmcval -=
1663 pm->pm_sc.pm_reloadcount;
1664 }
1665 mtx_pool_unlock_spin(pmc_mtxpool, pm)__mtx_unlock_spin_flags(&((((mtx_pool_find((pmc_mtxpool),
(pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1665))
;
1666 } else {
1667 tmp = newvalue - PMC_PCPU_SAVED(cpu,ri)pmc_pcpu_saved[(ri) + md->pmd_npmc*(cpu)];
1668
1669 PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd (count)",
1670 cpu, ri, tmp);
1671
1672 /*
1673 * For counting process-virtual PMCs,
1674 * we expect the count to be
1675 * increasing monotonically, modulo a 64
1676 * bit wraparound.
1677 */
1678 KASSERT(tmp >= 0,do { if (__builtin_expect((!(tmp >= 0)), 0)) panic ("[pmc,%d] negative increment cpu=%d "
"ri=%d newvalue=%jx saved=%jx " "incr=%jx", 1681, cpu, ri, newvalue
, pmc_pcpu_saved[(ri) + md->pmd_npmc*(cpu)], tmp); } while
(0)
1679 ("[pmc,%d] negative increment cpu=%d "do { if (__builtin_expect((!(tmp >= 0)), 0)) panic ("[pmc,%d] negative increment cpu=%d "
"ri=%d newvalue=%jx saved=%jx " "incr=%jx", 1681, cpu, ri, newvalue
, pmc_pcpu_saved[(ri) + md->pmd_npmc*(cpu)], tmp); } while
(0)
1680 "ri=%d newvalue=%jx saved=%jx "do { if (__builtin_expect((!(tmp >= 0)), 0)) panic ("[pmc,%d] negative increment cpu=%d "
"ri=%d newvalue=%jx saved=%jx " "incr=%jx", 1681, cpu, ri, newvalue
, pmc_pcpu_saved[(ri) + md->pmd_npmc*(cpu)], tmp); } while
(0)
1681 "incr=%jx", __LINE__, cpu, ri,do { if (__builtin_expect((!(tmp >= 0)), 0)) panic ("[pmc,%d] negative increment cpu=%d "
"ri=%d newvalue=%jx saved=%jx " "incr=%jx", 1681, cpu, ri, newvalue
, pmc_pcpu_saved[(ri) + md->pmd_npmc*(cpu)], tmp); } while
(0)
1682 newvalue, PMC_PCPU_SAVED(cpu,ri), tmp))do { if (__builtin_expect((!(tmp >= 0)), 0)) panic ("[pmc,%d] negative increment cpu=%d "
"ri=%d newvalue=%jx saved=%jx " "incr=%jx", 1681, cpu, ri, newvalue
, pmc_pcpu_saved[(ri) + md->pmd_npmc*(cpu)], tmp); } while
(0)
;
1683
1684 mtx_pool_lock_spin(pmc_mtxpool, pm)__mtx_lock_spin_flags(&((((mtx_pool_find((pmc_mtxpool), (
pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1684))
;
1685 pm->pm_gv.pm_savedvalue += tmp;
1686 pp->pp_pmcs[ri].pp_pmcval += tmp;
1687 mtx_pool_unlock_spin(pmc_mtxpool, pm)__mtx_unlock_spin_flags(&((((mtx_pool_find((pmc_mtxpool),
(pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (1687))
;
1688
1689 if (pm->pm_flags & PMC_F_LOG_PROCCSW0x00000004)
1690 pmclog_process_proccsw(pm, pp, tmp, td);
1691 }
1692 }
1693
1694 /* mark hardware as free */
1695 pcd->pcd_config_pmc(cpu, adjri, NULL((void *)0));
1696 }
1697
1698 /*
1699 * perform any other architecture/cpu dependent thread
1700 * switch out functions.
1701 */
1702
1703 (void) (*md->pmd_switch_out)(pc, pp);
1704
1705 critical_exit()critical_exit_KBI();
1706}
1707
1708/*
1709 * A new thread for a process.
1710 */
1711static void
1712pmc_process_thread_add(struct thread *td)
1713{
1714 struct pmc_process *pmc;
1715
1716 pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE);
1717 if (pmc != NULL((void *)0))
1718 pmc_find_thread_descriptor(pmc, td, PMC_FLAG_ALLOCATE);
1719}
1720
1721/*
1722 * A thread delete for a process.
1723 */
1724static void
1725pmc_process_thread_delete(struct thread *td)
1726{
1727 struct pmc_process *pmc;
1728
1729 pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE);
1730 if (pmc != NULL((void *)0))
1731 pmc_thread_descriptor_pool_free(pmc_find_thread_descriptor(pmc,
1732 td, PMC_FLAG_REMOVE));
1733}
1734
1735/*
1736 * A userret() call for a thread.
1737 */
1738static void
1739pmc_process_thread_userret(struct thread *td)
1740{
1741 sched_pin();
1742 pmc_capture_user_callchain(curcpu__extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid) __res
; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu *)
0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *)0
)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; })
, PMC_UR, td->td_frame);
1743 sched_unpin();
1744}
1745
1746/*
1747 * A mapping change for a process.
1748 */
1749
1750static void
1751pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm)
1752{
1753 int ri;
1754 pid_t pid;
1755 char *fullpath, *freepath;
1756 const struct pmc *pm;
1757 struct pmc_owner *po;
1758 const struct pmc_process *pp;
1759
1760 freepath = fullpath = NULL((void *)0);
1761 MPASS(!in_epoch(global_epoch_preempt))do { if (__builtin_expect((!((!in_epoch(global_epoch_preempt)
))), 0)) panic ("Assertion %s failed at %s:%d", "!in_epoch(global_epoch_preempt)"
, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c", 1761); } while (
0)
;
1762 pmc_getfilename((struct vnode *) pkm->pm_file, &fullpath, &freepath);
1763
1764 pid = td->td_proc->p_pid;
1765
1766 PMC_EPOCH_ENTER()struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt
, &pmc_et)
;
1767 /* Inform owners of all system-wide sampling PMCs. */
1768 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)for ((po) = (__typeof__(*((&((&pmc_ss_owners))->clh_first
))))ck_pr_md_load_ptr(((&((&pmc_ss_owners))->clh_first
))); (po) && (ck_pr_fence_load(), 1); (po) = (__typeof__
(*((&((po))->po_ssnext.cle_next))))ck_pr_md_load_ptr((
(&((po))->po_ssnext.cle_next))))
1769 if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
1770 pmclog_process_map_in(po, pid, pkm->pm_address, fullpath);
1771
1772 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL((void *)0))
1773 goto done;
1774
1775 /*
1776 * Inform sampling PMC owners tracking this process.
1777 */
1778 for (ri = 0; ri < md->pmd_npmc; ri++)
1779 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL((void *)0) &&
1780 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TS
)
)
1781 pmclog_process_map_in(pm->pm_owner,
1782 pid, pkm->pm_address, fullpath);
1783
1784 done:
1785 if (freepath)
1786 free(freepath, M_TEMP);
1787 PMC_EPOCH_EXIT()epoch_exit_preempt(global_epoch_preempt, &pmc_et);
1788}
1789
1790
1791/*
1792 * Log an munmap request.
1793 */
1794
1795static void
1796pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm)
1797{
1798 int ri;
1799 pid_t pid;
1800 struct pmc_owner *po;
1801 const struct pmc *pm;
1802 const struct pmc_process *pp;
1803
1804 pid = td->td_proc->p_pid;
1805
1806 PMC_EPOCH_ENTER()struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt
, &pmc_et)
;
1807 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)for ((po) = (__typeof__(*((&((&pmc_ss_owners))->clh_first
))))ck_pr_md_load_ptr(((&((&pmc_ss_owners))->clh_first
))); (po) && (ck_pr_fence_load(), 1); (po) = (__typeof__
(*((&((po))->po_ssnext.cle_next))))ck_pr_md_load_ptr((
(&((po))->po_ssnext.cle_next))))
1808 if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
1809 pmclog_process_map_out(po, pid, pkm->pm_address,
1810 pkm->pm_address + pkm->pm_size);
1811 PMC_EPOCH_EXIT()epoch_exit_preempt(global_epoch_preempt, &pmc_et);
1812
1813 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL((void *)0))
1814 return;
1815
1816 for (ri = 0; ri < md->pmd_npmc; ri++)
1817 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL((void *)0) &&
1818 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TS
)
)
1819 pmclog_process_map_out(pm->pm_owner, pid,
1820 pkm->pm_address, pkm->pm_address + pkm->pm_size);
1821}
1822
1823/*
1824 * Log mapping information about the kernel.
1825 */
1826
1827static void
1828pmc_log_kernel_mappings(struct pmc *pm)
1829{
1830 struct pmc_owner *po;
1831 struct pmckern_map_in *km, *kmbase;
1832
1833 MPASS(in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx))do { if (__builtin_expect((!((in_epoch(global_epoch_preempt) ||
(((&pmc_sx)->sx_lock & ~((0x01 | 0x02 | 0x04 | 0x10
| 0x08) & ~0x01)) == (uintptr_t)(__curthread()))))), 0))
panic ("Assertion %s failed at %s:%d", "in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx)"
, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c", 1833); } while (
0)
;
1834 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),do { if (__builtin_expect((!(((((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_SS || (((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TS))), 0)) panic ("[pmc,%d] non-sampling PMC (%p) desires mapping information"
, 1836, (void *) pm); } while (0)
1835 ("[pmc,%d] non-sampling PMC (%p) desires mapping information",do { if (__builtin_expect((!(((((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_SS || (((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TS))), 0)) panic ("[pmc,%d] non-sampling PMC (%p) desires mapping information"
, 1836, (void *) pm); } while (0)
1836 __LINE__, (void *) pm))do { if (__builtin_expect((!(((((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_SS || (((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TS))), 0)) panic ("[pmc,%d] non-sampling PMC (%p) desires mapping information"
, 1836, (void *) pm); } while (0)
;
1837
1838 po = pm->pm_owner;
1839
1840 if (po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE0x00000020)
1841 return;
1842 if (PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12) == PMC_MODE_SS)
1843 pmc_process_allproc(pm);
1844 /*
1845 * Log the current set of kernel modules.
1846 */
1847 kmbase = linker_hwpmc_list_objects();
1848 for (km = kmbase; km->pm_file != NULL((void *)0); km++) {
1849 PMCDBG2(LOG,REG,1,"%s %p", (char *) km->pm_file,
1850 (void *) km->pm_address);
1851 pmclog_process_map_in(po, (pid_t) -1, km->pm_address,
1852 km->pm_file);
1853 }
1854 free(kmbase, M_LINKER);
1855
1856 po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE0x00000020;
1857}
1858
1859/*
1860 * Log the mappings for a single process.
1861 */
1862
1863static void
1864pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
1865{
1866 vm_map_t map;
1867 struct vnode *vp;
1868 struct vmspace *vm;
1869 vm_map_entry_t entry;
1870 vm_offset_t last_end;
1871 u_int last_timestamp;
1872 struct vnode *last_vp;
1873 vm_offset_t start_addr;
1874 vm_object_t obj, lobj, tobj;
1875 char *fullpath, *freepath;
1876
1877 last_vp = NULL((void *)0);
1878 last_end = (vm_offset_t) 0;
1879 fullpath = freepath = NULL((void *)0);
1880
1881 if ((vm = vmspace_acquire_ref(p)) == NULL((void *)0))
1882 return;
1883
1884 map = &vm->vm_map;
1885 vm_map_lock_read(map)_vm_map_lock_read(map, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 1885)
;
1886
1887 for (entry = map->header.next; entry != &map->header; entry = entry->next) {
1888
1889 if (entry == NULL((void *)0)) {
1890 PMCDBG2(LOG,OPS,2, "hwpmc: vm_map entry unexpectedly "
1891 "NULL! pid=%d vm_map=%p\n", p->p_pid, map);
1892 break;
1893 }
1894
1895 /*
1896 * We only care about executable map entries.
1897 */
1898 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP0x0002) ||
1899 !(entry->protection & VM_PROT_EXECUTE((vm_prot_t) 0x04)) ||
1900 (entry->object.vm_object == NULL((void *)0))) {
1901 continue;
1902 }
1903
1904 obj = entry->object.vm_object;
1905 VM_OBJECT_RLOCK(obj)__rw_rlock(&((&(obj)->lock))->rw_lock, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 1905)
;
1906
1907 /*
1908 * Walk the backing_object list to find the base
1909 * (non-shadowed) vm_object.
1910 */
1911 for (lobj = tobj = obj; tobj != NULL((void *)0); tobj = tobj->backing_object) {
1912 if (tobj != obj)
1913 VM_OBJECT_RLOCK(tobj)__rw_rlock(&((&(tobj)->lock))->rw_lock, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 1913)
;
1914 if (lobj != obj)
1915 VM_OBJECT_RUNLOCK(lobj)_rw_runlock_cookie(&((&(lobj)->lock))->rw_lock,
"/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c", 1915)
;
1916 lobj = tobj;
1917 }
1918
1919 /*
1920 * At this point lobj is the base vm_object and it is locked.
1921 */
1922 if (lobj == NULL((void *)0)) {
1923 PMCDBG3(LOG,OPS,2, "hwpmc: lobj unexpectedly NULL! pid=%d "
1924 "vm_map=%p vm_obj=%p\n", p->p_pid, map, obj);
1925 VM_OBJECT_RUNLOCK(obj)_rw_runlock_cookie(&((&(obj)->lock))->rw_lock, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 1925)
;
1926 continue;
1927 }
1928
1929 vp = vm_object_vnode(lobj);
1930 if (vp == NULL((void *)0)) {
1931 if (lobj != obj)
1932 VM_OBJECT_RUNLOCK(lobj)_rw_runlock_cookie(&((&(lobj)->lock))->rw_lock,
"/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c", 1932)
;
1933 VM_OBJECT_RUNLOCK(obj)_rw_runlock_cookie(&((&(obj)->lock))->rw_lock, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 1933)
;
1934 continue;
1935 }
1936
1937 /*
1938 * Skip contiguous regions that point to the same
1939 * vnode, so we don't emit redundant MAP-IN
1940 * directives.
1941 */
1942 if (entry->start == last_end && vp == last_vp) {
1943 last_end = entry->end;
1944 if (lobj != obj)
1945 VM_OBJECT_RUNLOCK(lobj)_rw_runlock_cookie(&((&(lobj)->lock))->rw_lock,
"/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c", 1945)
;
1946 VM_OBJECT_RUNLOCK(obj)_rw_runlock_cookie(&((&(obj)->lock))->rw_lock, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 1946)
;
1947 continue;
1948 }
1949
1950 /*
1951 * We don't want to keep the proc's vm_map or this
1952 * vm_object locked while we walk the pathname, since
1953 * vn_fullpath() can sleep. However, if we drop the
1954 * lock, it's possible for concurrent activity to
1955 * modify the vm_map list. To protect against this,
1956 * we save the vm_map timestamp before we release the
1957 * lock, and check it after we reacquire the lock
1958 * below.
1959 */
1960 start_addr = entry->start;
1961 last_end = entry->end;
1962 last_timestamp = map->timestamp;
1963 vm_map_unlock_read(map)_vm_map_unlock_read(map, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 1963)
;
1964
1965 vref(vp);
1966 if (lobj != obj)
1967 VM_OBJECT_RUNLOCK(lobj)_rw_runlock_cookie(&((&(lobj)->lock))->rw_lock,
"/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c", 1967)
;
1968
1969 VM_OBJECT_RUNLOCK(obj)_rw_runlock_cookie(&((&(obj)->lock))->rw_lock, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 1969)
;
1970
1971 freepath = NULL((void *)0);
1972 pmc_getfilename(vp, &fullpath, &freepath);
1973 last_vp = vp;
1974
1975 vrele(vp);
1976
1977 vp = NULL((void *)0);
1978 pmclog_process_map_in(po, p->p_pid, start_addr, fullpath);
1979 if (freepath)
1980 free(freepath, M_TEMP);
1981
1982 vm_map_lock_read(map)_vm_map_lock_read(map, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 1982)
;
1983
1984 /*
1985 * If our saved timestamp doesn't match, this means
1986 * that the vm_map was modified out from under us and
1987 * we can't trust our current "entry" pointer. Do a
1988 * new lookup for this entry. If there is no entry
1989 * for this address range, vm_map_lookup_entry() will
1990 * return the previous one, so we always want to go to
1991 * entry->next on the next loop iteration.
1992 *
1993 * There is an edge condition here that can occur if
1994 * there is no entry at or before this address. In
1995 * this situation, vm_map_lookup_entry returns
1996 * &map->header, which would cause our loop to abort
1997 * without processing the rest of the map. However,
1998 * in practice this will never happen for process
1999 * vm_map. This is because the executable's text
2000 * segment is the first mapping in the proc's address
2001 * space, and this mapping is never removed until the
2002 * process exits, so there will always be a non-header
2003 * entry at or before the requested address for
2004 * vm_map_lookup_entry to return.
2005 */
2006 if (map->timestamp != last_timestamp)
2007 vm_map_lookup_entry(map, last_end - 1, &entry);
2008 }
2009
2010 vm_map_unlock_read(map)_vm_map_unlock_read(map, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 2010)
;
2011 vmspace_free(vm);
2012 return;
2013}
2014
2015/*
2016 * Log mappings for all processes in the system.
2017 */
2018
2019static void
2020pmc_log_all_process_mappings(struct pmc_owner *po)
2021{
2022 struct proc *p, *top;
2023
2024 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2024))
;
2025
2026 if ((p = pfind(1)) == NULL((void *)0))
2027 panic("[pmc,%d] Cannot find init", __LINE__2027);
2028
2029 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (2029))
;
2030
2031 sx_slock(&proctree_lock)(void)_sx_slock(((&proctree_lock)), 0, ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2031))
;
2032
2033 top = p;
2034
2035 for (;;) {
2036 pmc_log_process_mappings(po, p);
2037 if (!LIST_EMPTY(&p->p_children)((&p->p_children)->lh_first == ((void *)0)))
2038 p = LIST_FIRST(&p->p_children)((&p->p_children)->lh_first);
2039 else for (;;) {
2040 if (p == top)
2041 goto done;
2042 if (LIST_NEXT(p, p_sibling)((p)->p_sibling.le_next)) {
2043 p = LIST_NEXT(p, p_sibling)((p)->p_sibling.le_next);
2044 break;
2045 }
2046 p = p->p_pptr;
2047 }
2048 }
2049 done:
2050 sx_sunlock(&proctree_lock)_sx_sunlock(((&proctree_lock)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2050))
;
2051}
2052
2053/*
2054 * The 'hook' invoked from the kernel proper
2055 */
2056
2057
2058#ifdef HWPMC_DEBUG
2059const char *pmc_hooknames[] = {
2060 /* these strings correspond to PMC_FN_* in <sys/pmckern.h> */
2061 "",
2062 "EXEC",
2063 "CSW-IN",
2064 "CSW-OUT",
2065 "SAMPLE",
2066 "UNUSED1",
2067 "UNUSED2",
2068 "MMAP",
2069 "MUNMAP",
2070 "CALLCHAIN-NMI",
2071 "CALLCHAIN-SOFT",
2072 "SOFTSAMPLING",
2073 "THR-CREATE",
2074 "THR-EXIT",
2075 "THR-USERRET",
2076 "THR-CREATE-LOG",
2077 "THR-EXIT-LOG",
2078 "PROC-CREATE-LOG"
2079};
2080#endif
2081
2082static int
2083pmc_hook_handler(struct thread *td, int function, void *arg)
2084{
2085 int cpu;
2086
2087 PMCDBG4(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
2088 pmc_hooknames[function], arg);
2089
2090 switch (function)
2091 {
2092
2093 /*
2094 * Process exec()
2095 */
2096
2097 case PMC_FN_PROCESS_EXEC1:
2098 {
2099 char *fullpath, *freepath;
2100 unsigned int ri;
2101 int is_using_hwpmcs;
2102 struct pmc *pm;
2103 struct proc *p;
2104 struct pmc_owner *po;
2105 struct pmc_process *pp;
2106 struct pmckern_procexec *pk;
2107
2108 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2108))
;
2109
2110 p = td->td_proc;
2111 pmc_getfilename(p->p_textvp, &fullpath, &freepath);
2112
2113 pk = (struct pmckern_procexec *) arg;
2114
2115 PMC_EPOCH_ENTER()struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt
, &pmc_et)
;
2116 /* Inform owners of SS mode PMCs of the exec event. */
2117 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)for ((po) = (__typeof__(*((&((&pmc_ss_owners))->clh_first
))))ck_pr_md_load_ptr(((&((&pmc_ss_owners))->clh_first
))); (po) && (ck_pr_fence_load(), 1); (po) = (__typeof__
(*((&((po))->po_ssnext.cle_next))))ck_pr_md_load_ptr((
(&((po))->po_ssnext.cle_next))))
2118 if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
2119 pmclog_process_procexec(po, PMC_ID_INVALID(~ (pmc_id_t) 0),
2120 p->p_pid, pk->pm_entryaddr, fullpath);
2121 PMC_EPOCH_EXIT()epoch_exit_preempt(global_epoch_preempt, &pmc_et);
2122
2123 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (2123))
;
2124 is_using_hwpmcs = p->p_flag & P_HWPMC0x800000;
2125 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (2125))
;
2126
2127 if (!is_using_hwpmcs) {
2128 if (freepath)
2129 free(freepath, M_TEMP);
2130 break;
2131 }
2132
2133 /*
2134 * PMCs are not inherited across an exec(): remove any
2135 * PMCs that this process is the owner of.
2136 */
2137
2138 if ((po = pmc_find_owner_descriptor(p)) != NULL((void *)0)) {
2139 pmc_remove_owner(po);
2140 pmc_destroy_owner_descriptor(po);
2141 }
2142
2143 /*
2144 * If the process being exec'ed is not the target of any
2145 * PMC, we are done.
2146 */
2147 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL((void *)0)) {
2148 if (freepath)
2149 free(freepath, M_TEMP);
2150 break;
2151 }
2152
2153 /*
2154 * Log the exec event to all monitoring owners. Skip
2155 * owners who have already received the event because
2156 * they had system sampling PMCs active.
2157 */
2158 for (ri = 0; ri < md->pmd_npmc; ri++)
2159 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL((void *)0)) {
2160 po = pm->pm_owner;
2161 if (po->po_sscount == 0 &&
2162 po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
2163 pmclog_process_procexec(po, pm->pm_id,
2164 p->p_pid, pk->pm_entryaddr,
2165 fullpath);
2166 }
2167
2168 if (freepath)
2169 free(freepath, M_TEMP);
2170
2171
2172 PMCDBG4(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
2173 p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
2174
2175 if (pk->pm_credentialschanged == 0) /* no change */
2176 break;
2177
2178 /*
2179 * If the newly exec()'ed process has a different credential
2180 * than before, allow it to be the target of a PMC only if
2181 * the PMC's owner has sufficient privilege.
2182 */
2183
2184 for (ri = 0; ri < md->pmd_npmc; ri++)
2185 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL((void *)0))
2186 if (pmc_can_attach(pm, td->td_proc) != 0)
2187 pmc_detach_one_process(td->td_proc,
2188 pm, PMC_FLAG_NONE);
2189
2190 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,do { if (__builtin_expect((!(pp->pp_refcnt >= 0 &&
pp->pp_refcnt <= (int) md->pmd_npmc)), 0)) panic ("[pmc,%d] Illegal ref count %d on pp %p"
, 2191, pp->pp_refcnt, pp); } while (0)
2191 ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,do { if (__builtin_expect((!(pp->pp_refcnt >= 0 &&
pp->pp_refcnt <= (int) md->pmd_npmc)), 0)) panic ("[pmc,%d] Illegal ref count %d on pp %p"
, 2191, pp->pp_refcnt, pp); } while (0)
2192 pp->pp_refcnt, pp))do { if (__builtin_expect((!(pp->pp_refcnt >= 0 &&
pp->pp_refcnt <= (int) md->pmd_npmc)), 0)) panic ("[pmc,%d] Illegal ref count %d on pp %p"
, 2191, pp->pp_refcnt, pp); } while (0)
;
2193
2194 /*
2195 * If this process is no longer the target of any
2196 * PMCs, we can remove the process entry and free
2197 * up space.
2198 */
2199
2200 if (pp->pp_refcnt == 0) {
2201 pmc_remove_process_descriptor(pp);
2202 pmc_destroy_process_descriptor(pp);
2203 break;
2204 }
2205
2206 }
2207 break;
2208
2209 case PMC_FN_CSW_IN2:
2210 pmc_process_csw_in(td);
2211 break;
2212
2213 case PMC_FN_CSW_OUT3:
2214 pmc_process_csw_out(td);
2215 break;
2216
2217 /*
2218 * Process accumulated PC samples.
2219 *
2220 * This function is expected to be called by hardclock() for
2221 * each CPU that has accumulated PC samples.
2222 *
2223 * This function is to be executed on the CPU whose samples
2224 * are being processed.
2225 */
2226 case PMC_FN_DO_SAMPLES4:
2227
2228 /*
2229 * Clear the cpu specific bit in the CPU mask before
2230 * do the rest of the processing. If the NMI handler
2231 * gets invoked after the "atomic_clear_int()" call
2232 * below but before "pmc_process_samples()" gets
2233 * around to processing the interrupt, then we will
2234 * come back here at the next hardclock() tick (and
2235 * may find nothing to do if "pmc_process_samples()"
2236 * had already processed the interrupt). We don't
2237 * lose the interrupt sample.
2238 */
2239 DPCPU_SET(pmc_sampled, 0)(*(__typeof(pcpu_entry_pmc_sampled)*)((__extension__ ({ __typeof
(((struct pcpu *)0)->pc_dynamic) __res; struct __s { u_char
__b[(((sizeof(__typeof(((struct pcpu *)0)->pc_dynamic)))<
(8))?(sizeof(__typeof(((struct pcpu *)0)->pc_dynamic))):(8
))]; } __s; if (sizeof(__res) == 1 || sizeof(__res) == 2 || sizeof
(__res) == 4 || sizeof(__res) == 8) { __asm volatile("mov %%gs:%1,%0"
: "=r" (__s) : "m" (*(struct __s *)(__builtin_offsetof(struct
pcpu, pc_dynamic)))); *(struct __s *)(void *)&__res = __s
; } else { __res = *__extension__ ({ __typeof(((struct pcpu *
)0)->pc_dynamic) *__p; __asm volatile("movq %%gs:%1,%0; addq %2,%0"
: "=r" (__p) : "m" (*(struct pcpu *)(__builtin_offsetof(struct
pcpu, pc_prvspace))), "i" (__builtin_offsetof(struct pcpu, pc_dynamic
))); __p; }); } __res; })) + (uintptr_t)&pcpu_entry_pmc_sampled
) = 0)
;
2240 cpu = PCPU_GET(cpuid)__extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid) __res
; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu *)
0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *)0
)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; })
;
2241 pmc_process_samples(cpu, PMC_HR);
2242 pmc_process_samples(cpu, PMC_SR);
2243 pmc_process_samples(cpu, PMC_UR);
2244 break;
2245
2246 case PMC_FN_MMAP7:
2247 pmc_process_mmap(td, (struct pmckern_map_in *) arg);
2248 break;
2249
2250 case PMC_FN_MUNMAP8:
2251 MPASS(in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx))do { if (__builtin_expect((!((in_epoch(global_epoch_preempt) ||
(((&pmc_sx)->sx_lock & ~((0x01 | 0x02 | 0x04 | 0x10
| 0x08) & ~0x01)) == (uintptr_t)(__curthread()))))), 0))
panic ("Assertion %s failed at %s:%d", "in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx)"
, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c", 2251); } while (
0)
;
2252 pmc_process_munmap(td, (struct pmckern_map_out *) arg);
2253 break;
2254
2255 case PMC_FN_PROC_CREATE_LOG17:
2256 pmc_process_proccreate((struct proc *)arg);
2257 break;
2258
2259 case PMC_FN_USER_CALLCHAIN9:
2260 /*
2261 * Record a call chain.
2262 */
2263 KASSERT(td == curthread, ("[pmc,%d] td != curthread",do { if (__builtin_expect((!(td == (__curthread()))), 0)) panic
("[pmc,%d] td != curthread", 2264); } while (0)
2264 __LINE__))do { if (__builtin_expect((!(td == (__curthread()))), 0)) panic
("[pmc,%d] td != curthread", 2264); } while (0)
;
2265
2266 pmc_capture_user_callchain(PCPU_GET(cpuid)__extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid) __res
; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu *)
0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *)0
)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; })
, PMC_HR,
2267 (struct trapframe *) arg);
2268
2269 KASSERT(td->td_pinned == 1,do { if (__builtin_expect((!(td->td_pinned == 1)), 0)) panic
("[pmc,%d] invalid td_pinned value", 2270); } while (0)
2270 ("[pmc,%d] invalid td_pinned value", __LINE__))do { if (__builtin_expect((!(td->td_pinned == 1)), 0)) panic
("[pmc,%d] invalid td_pinned value", 2270); } while (0)
;
2271 sched_unpin(); /* Can migrate safely now. */
2272
2273 td->td_pflags &= ~TDP_CALLCHAIN0x00400000;
2274 break;
2275
2276 case PMC_FN_USER_CALLCHAIN_SOFT10:
2277 /*
2278 * Record a call chain.
2279 */
2280 KASSERT(td == curthread, ("[pmc,%d] td != curthread",do { if (__builtin_expect((!(td == (__curthread()))), 0)) panic
("[pmc,%d] td != curthread", 2281); } while (0)
2281 __LINE__))do { if (__builtin_expect((!(td == (__curthread()))), 0)) panic
("[pmc,%d] td != curthread", 2281); } while (0)
;
2282
2283 cpu = PCPU_GET(cpuid)__extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid) __res
; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu *)
0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *)0
)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; })
;
2284 pmc_capture_user_callchain(cpu, PMC_SR,
2285 (struct trapframe *) arg);
2286
2287 KASSERT(td->td_pinned == 1,do { if (__builtin_expect((!(td->td_pinned == 1)), 0)) panic
("[pmc,%d] invalid td_pinned value", 2288); } while (0)
2288 ("[pmc,%d] invalid td_pinned value", __LINE__))do { if (__builtin_expect((!(td->td_pinned == 1)), 0)) panic
("[pmc,%d] invalid td_pinned value", 2288); } while (0)
;
2289
2290 sched_unpin(); /* Can migrate safely now. */
2291
2292 td->td_pflags &= ~TDP_CALLCHAIN0x00400000;
2293 break;
2294
2295 case PMC_FN_SOFT_SAMPLING11:
2296 /*
2297 * Call soft PMC sampling intr.
2298 */
2299 pmc_soft_intr((struct pmckern_soft *) arg);
2300 break;
2301
2302 case PMC_FN_THR_CREATE12:
2303 pmc_process_thread_add(td);
2304 pmc_process_threadcreate(td);
2305 break;
2306
2307 case PMC_FN_THR_CREATE_LOG15:
2308 pmc_process_threadcreate(td);
2309 break;
2310
2311 case PMC_FN_THR_EXIT13:
2312 KASSERT(td == curthread, ("[pmc,%d] td != curthread",do { if (__builtin_expect((!(td == (__curthread()))), 0)) panic
("[pmc,%d] td != curthread", 2313); } while (0)
2313 __LINE__))do { if (__builtin_expect((!(td == (__curthread()))), 0)) panic
("[pmc,%d] td != curthread", 2313); } while (0)
;
2314 pmc_process_thread_delete(td);
2315 pmc_process_threadexit(td);
2316 break;
2317 case PMC_FN_THR_EXIT_LOG16:
2318 pmc_process_threadexit(td);
2319 break;
2320 case PMC_FN_THR_USERRET14:
2321 KASSERT(td == curthread, ("[pmc,%d] td != curthread",do { if (__builtin_expect((!(td == (__curthread()))), 0)) panic
("[pmc,%d] td != curthread", 2322); } while (0)
2322 __LINE__))do { if (__builtin_expect((!(td == (__curthread()))), 0)) panic
("[pmc,%d] td != curthread", 2322); } while (0)
;
2323 pmc_process_thread_userret(td);
2324 break;
2325
2326 default:
2327#ifdef HWPMC_DEBUG
2328 KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function))do { if (__builtin_expect((!(0)), 0)) panic ("[pmc,%d] unknown hook %d\n"
, 2328, function); } while (0)
;
2329#endif
2330 break;
2331
2332 }
2333
2334 return 0;
2335}
2336
2337/*
2338 * allocate a 'struct pmc_owner' descriptor in the owner hash table.
2339 */
2340
2341static struct pmc_owner *
2342pmc_allocate_owner_descriptor(struct proc *p)
2343{
2344 uint32_t hindex;
2345 struct pmc_owner *po;
2346 struct pmc_ownerhash *poh;
2347
2348 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask)((((unsigned long) (p) >> 2) * 11400714819323198486u) &
(pmc_ownerhashmask))
;
2349 poh = &pmc_ownerhash[hindex];
2350
2351 /* allocate space for N pointers and one descriptor struct */
2352 po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK0x0002|M_ZERO0x0100);
2353 po->po_owner = p;
2354 LIST_INSERT_HEAD(poh, po, po_next)do { do { if (((((poh)))->lh_first) != ((void *)0) &&
((((poh)))->lh_first)->po_next.le_prev != &((((poh
)))->lh_first)) panic("Bad list head %p first->prev != head"
, ((poh))); } while (0); if (((((po))->po_next.le_next) = (
((poh))->lh_first)) != ((void *)0)) (((poh))->lh_first)
->po_next.le_prev = &(((po))->po_next.le_next); (((
poh))->lh_first) = (po); (po)->po_next.le_prev = &(
((poh))->lh_first); } while (0)
; /* insert into hash table */
2355
2356 TAILQ_INIT(&po->po_logbuffers)do { (((&po->po_logbuffers))->tqh_first) = ((void *
)0); (&po->po_logbuffers)->tqh_last = &(((&
po->po_logbuffers))->tqh_first); ; } while (0)
;
2357 mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN)_mtx_init(&(&po->po_mtx)->mtx_lock, "pmc-owner-mtx"
, "pmc-per-proc", 0x00000001)
;
2358
2359 PMCDBG4(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
2360 p, p->p_pid, p->p_comm, po);
2361
2362 return po;
2363}
2364
2365static void
2366pmc_destroy_owner_descriptor(struct pmc_owner *po)
2367{
2368
2369 PMCDBG4(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
2370 po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
2371
2372 mtx_destroy(&po->po_mtx)_mtx_destroy(&(&po->po_mtx)->mtx_lock);
2373 free(po, M_PMC);
2374}
2375
2376/*
2377 * Allocate a thread descriptor from the free pool.
2378 *
2379 * NOTE: This *can* return NULL.
2380 */
2381static struct pmc_thread *
2382pmc_thread_descriptor_pool_alloc(void)
2383{
2384 struct pmc_thread *pt;
2385
2386 mtx_lock_spin(&pmc_threadfreelist_mtx)__mtx_lock_spin_flags(&((((&pmc_threadfreelist_mtx)))
)->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2386))
;
2387 if ((pt = LIST_FIRST(&pmc_threadfreelist)((&pmc_threadfreelist)->lh_first)) != NULL((void *)0)) {
2388 LIST_REMOVE(pt, pt_next)do { ; ; do { if ((((pt))->pt_next.le_next) != ((void *)0)
&& (((pt))->pt_next.le_next)->pt_next.le_prev !=
&((pt)->pt_next.le_next)) panic("Bad link elm %p next->prev != elm"
, (pt)); } while (0); do { if (*(pt)->pt_next.le_prev != (
pt)) panic("Bad link elm %p prev->next != elm", (pt)); } while
(0); if ((((pt))->pt_next.le_next) != ((void *)0)) (((pt)
)->pt_next.le_next)->pt_next.le_prev = (pt)->pt_next
.le_prev; *(pt)->pt_next.le_prev = (((pt))->pt_next.le_next
); ; ; } while (0)
;
2389 pmc_threadfreelist_entries--;
2390 }
2391 mtx_unlock_spin(&pmc_threadfreelist_mtx)__mtx_unlock_spin_flags(&((((&pmc_threadfreelist_mtx)
)))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2391))
;
2392
2393 return (pt);
2394}
2395
2396/*
2397 * Add a thread descriptor to the free pool. We use this instead of free()
2398 * to maintain a cache of free entries. Additionally, we can safely call
2399 * this function when we cannot call free(), such as in a critical section.
2400 *
2401 */
2402static void
2403pmc_thread_descriptor_pool_free(struct pmc_thread *pt)
2404{
2405
2406 if (pt == NULL((void *)0))
2407 return;
2408
2409 memset(pt, 0, THREADENTRY_SIZE)__builtin_memset((pt), (0), ((sizeof(struct pmc_thread) + (md
->pmd_npmc * sizeof(struct pmc_threadpmcstate)))))
;
2410 mtx_lock_spin(&pmc_threadfreelist_mtx)__mtx_lock_spin_flags(&((((&pmc_threadfreelist_mtx)))
)->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2410))
;
2411 LIST_INSERT_HEAD(&pmc_threadfreelist, pt, pt_next)do { do { if (((((&pmc_threadfreelist)))->lh_first) !=
((void *)0) && ((((&pmc_threadfreelist)))->lh_first
)->pt_next.le_prev != &((((&pmc_threadfreelist)))->
lh_first)) panic("Bad list head %p first->prev != head", (
(&pmc_threadfreelist))); } while (0); if (((((pt))->pt_next
.le_next) = (((&pmc_threadfreelist))->lh_first)) != ((
void *)0)) (((&pmc_threadfreelist))->lh_first)->pt_next
.le_prev = &(((pt))->pt_next.le_next); (((&pmc_threadfreelist
))->lh_first) = (pt); (pt)->pt_next.le_prev = &(((&
pmc_threadfreelist))->lh_first); } while (0)
;
2412 pmc_threadfreelist_entries++;
2413 if (pmc_threadfreelist_entries > pmc_threadfreelist_max)
2414 GROUPTASK_ENQUEUE(&free_gtask)grouptaskqueue_enqueue((&free_gtask)->gt_taskqueue, &
(&free_gtask)->gt_task)
;
2415 mtx_unlock_spin(&pmc_threadfreelist_mtx)__mtx_unlock_spin_flags(&((((&pmc_threadfreelist_mtx)
)))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2415))
;
2416}
2417
2418/*
2419 * A callout to manage the free list.
2420 */
2421static void
2422pmc_thread_descriptor_pool_free_task(void *arg __unused__attribute__((__unused__)))
2423{
2424 struct pmc_thread *pt;
2425 LIST_HEAD(, pmc_thread)struct { struct pmc_thread *lh_first; } tmplist;
2426 int delta;
2427
2428 LIST_INIT(&tmplist)do { (((&tmplist))->lh_first) = ((void *)0); } while (
0)
;
2429 /* Determine what changes, if any, we need to make. */
2430 mtx_lock_spin(&pmc_threadfreelist_mtx)__mtx_lock_spin_flags(&((((&pmc_threadfreelist_mtx)))
)->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2430))
;
2431 delta = pmc_threadfreelist_entries - pmc_threadfreelist_max;
2432 while (delta > 0 &&
2433 (pt = LIST_FIRST(&pmc_threadfreelist)((&pmc_threadfreelist)->lh_first)) != NULL((void *)0)) {
2434 delta--;
2435 LIST_REMOVE(pt, pt_next)do { ; ; do { if ((((pt))->pt_next.le_next) != ((void *)0)
&& (((pt))->pt_next.le_next)->pt_next.le_prev !=
&((pt)->pt_next.le_next)) panic("Bad link elm %p next->prev != elm"
, (pt)); } while (0); do { if (*(pt)->pt_next.le_prev != (
pt)) panic("Bad link elm %p prev->next != elm", (pt)); } while
(0); if ((((pt))->pt_next.le_next) != ((void *)0)) (((pt)
)->pt_next.le_next)->pt_next.le_prev = (pt)->pt_next
.le_prev; *(pt)->pt_next.le_prev = (((pt))->pt_next.le_next
); ; ; } while (0)
;
2436 LIST_INSERT_HEAD(&tmplist, pt, pt_next)do { do { if (((((&tmplist)))->lh_first) != ((void *)0
) && ((((&tmplist)))->lh_first)->pt_next.le_prev
!= &((((&tmplist)))->lh_first)) panic("Bad list head %p first->prev != head"
, ((&tmplist))); } while (0); if (((((pt))->pt_next.le_next
) = (((&tmplist))->lh_first)) != ((void *)0)) (((&
tmplist))->lh_first)->pt_next.le_prev = &(((pt))->
pt_next.le_next); (((&tmplist))->lh_first) = (pt); (pt
)->pt_next.le_prev = &(((&tmplist))->lh_first);
} while (0)
;
2437 }
2438 mtx_unlock_spin(&pmc_threadfreelist_mtx)__mtx_unlock_spin_flags(&((((&pmc_threadfreelist_mtx)
)))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2438))
;
2439
2440 /* If there are entries to free, free them. */
2441 while (!LIST_EMPTY(&tmplist)((&tmplist)->lh_first == ((void *)0))) {
2442 pt = LIST_FIRST(&tmplist)((&tmplist)->lh_first);
2443 LIST_REMOVE(pt, pt_next)do { ; ; do { if ((((pt))->pt_next.le_next) != ((void *)0)
&& (((pt))->pt_next.le_next)->pt_next.le_prev !=
&((pt)->pt_next.le_next)) panic("Bad link elm %p next->prev != elm"
, (pt)); } while (0); do { if (*(pt)->pt_next.le_prev != (
pt)) panic("Bad link elm %p prev->next != elm", (pt)); } while
(0); if ((((pt))->pt_next.le_next) != ((void *)0)) (((pt)
)->pt_next.le_next)->pt_next.le_prev = (pt)->pt_next
.le_prev; *(pt)->pt_next.le_prev = (((pt))->pt_next.le_next
); ; ; } while (0)
;
2444 free(pt, M_PMC);
2445 }
2446}
2447
2448/*
2449 * Drain the thread free pool, freeing all allocations.
2450 */
2451static void
2452pmc_thread_descriptor_pool_drain()
2453{
2454 struct pmc_thread *pt, *next;
2455
2456 LIST_FOREACH_SAFE(pt, &pmc_threadfreelist, pt_next, next)for ((pt) = (((&pmc_threadfreelist))->lh_first); (pt) &&
((next) = (((pt))->pt_next.le_next), 1); (pt) = (next))
{
2457 LIST_REMOVE(pt, pt_next)do { ; ; do { if ((((pt))->pt_next.le_next) != ((void *)0)
&& (((pt))->pt_next.le_next)->pt_next.le_prev !=
&((pt)->pt_next.le_next)) panic("Bad link elm %p next->prev != elm"
, (pt)); } while (0); do { if (*(pt)->pt_next.le_prev != (
pt)) panic("Bad link elm %p prev->next != elm", (pt)); } while
(0); if ((((pt))->pt_next.le_next) != ((void *)0)) (((pt)
)->pt_next.le_next)->pt_next.le_prev = (pt)->pt_next
.le_prev; *(pt)->pt_next.le_prev = (((pt))->pt_next.le_next
); ; ; } while (0)
;
2458 free(pt, M_PMC);
2459 }
2460}
2461
2462/*
2463 * find the descriptor corresponding to thread 'td', adding or removing it
2464 * as specified by 'mode'.
2465 *
2466 * Note that this supports additional mode flags in addition to those
2467 * supported by pmc_find_process_descriptor():
2468 * PMC_FLAG_NOWAIT: Causes the function to not wait for mallocs.
2469 * This makes it safe to call while holding certain other locks.
2470 */
2471
2472static struct pmc_thread *
2473pmc_find_thread_descriptor(struct pmc_process *pp, struct thread *td,
2474 uint32_t mode)
2475{
2476 struct pmc_thread *pt = NULL((void *)0), *ptnew = NULL((void *)0);
2477 int wait_flag;
2478
2479 KASSERT(td != NULL, ("[pmc,%d] called to add NULL td", __LINE__))do { if (__builtin_expect((!(td != ((void *)0))), 0)) panic (
"[pmc,%d] called to add NULL td", 2479); } while (0)
;
2480
2481 /*
2482 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case prior to
2483 * acquiring the lock.
2484 */
2485 if (mode & PMC_FLAG_ALLOCATE) {
2486 if ((ptnew = pmc_thread_descriptor_pool_alloc()) == NULL((void *)0)) {
2487 wait_flag = M_WAITOK0x0002;
2488 if ((mode & PMC_FLAG_NOWAIT) || in_epoch(global_epoch_preempt))
2489 wait_flag = M_NOWAIT0x0001;
2490
2491 ptnew = malloc(THREADENTRY_SIZE(sizeof(struct pmc_thread) + (md->pmd_npmc * sizeof(struct
pmc_threadpmcstate)))
, M_PMC,
2492 wait_flag|M_ZERO0x0100);
2493 }
2494 }
2495
2496 mtx_lock_spin(pp->pp_tdslock)__mtx_lock_spin_flags(&((((pp->pp_tdslock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (2496))
;
2497
2498 LIST_FOREACH(pt, &pp->pp_tds, pt_next)for ((pt) = (((&pp->pp_tds))->lh_first); (pt); (pt)
= (((pt))->pt_next.le_next))
2499 if (pt->pt_td == td)
2500 break;
2501
2502 if ((mode & PMC_FLAG_REMOVE) && pt != NULL((void *)0))
2503 LIST_REMOVE(pt, pt_next)do { ; ; do { if ((((pt))->pt_next.le_next) != ((void *)0)
&& (((pt))->pt_next.le_next)->pt_next.le_prev !=
&((pt)->pt_next.le_next)) panic("Bad link elm %p next->prev != elm"
, (pt)); } while (0); do { if (*(pt)->pt_next.le_prev != (
pt)) panic("Bad link elm %p prev->next != elm", (pt)); } while
(0); if ((((pt))->pt_next.le_next) != ((void *)0)) (((pt)
)->pt_next.le_next)->pt_next.le_prev = (pt)->pt_next
.le_prev; *(pt)->pt_next.le_prev = (((pt))->pt_next.le_next
); ; ; } while (0)
;
2504
2505 if ((mode & PMC_FLAG_ALLOCATE) && pt == NULL((void *)0) && ptnew != NULL((void *)0)) {
2506 pt = ptnew;
2507 ptnew = NULL((void *)0);
2508 pt->pt_td = td;
2509 LIST_INSERT_HEAD(&pp->pp_tds, pt, pt_next)do { do { if (((((&pp->pp_tds)))->lh_first) != ((void
*)0) && ((((&pp->pp_tds)))->lh_first)->
pt_next.le_prev != &((((&pp->pp_tds)))->lh_first
)) panic("Bad list head %p first->prev != head", ((&pp
->pp_tds))); } while (0); if (((((pt))->pt_next.le_next
) = (((&pp->pp_tds))->lh_first)) != ((void *)0)) ((
(&pp->pp_tds))->lh_first)->pt_next.le_prev = &
(((pt))->pt_next.le_next); (((&pp->pp_tds))->lh_first
) = (pt); (pt)->pt_next.le_prev = &(((&pp->pp_tds
))->lh_first); } while (0)
;
2510 }
2511
2512 mtx_unlock_spin(pp->pp_tdslock)__mtx_unlock_spin_flags(&((((pp->pp_tdslock))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (2512))
;
2513
2514 if (ptnew != NULL((void *)0)) {
2515 free(ptnew, M_PMC);
2516 }
2517
2518 return pt;
2519}
2520
2521/*
2522 * Try to add thread descriptors for each thread in a process.
2523 */
2524
2525static void
2526pmc_add_thread_descriptors_from_proc(struct proc *p, struct pmc_process *pp)
2527{
2528 struct thread *curtd;
2529 struct pmc_thread **tdlist;
2530 int i, tdcnt, tdlistsz;
2531
2532 KASSERT(!PROC_LOCKED(p), ("[pmc,%d] proc unexpectedly locked",do { if (__builtin_expect((!(!(((struct thread *)((((&(p)
->p_mtx)->mtx_lock)) & ~(0x00000001 | 0x00000002 | 0x00000004
))) == (__curthread())))), 0)) panic ("[pmc,%d] proc unexpectedly locked"
, 2533); } while (0)
2533 __LINE__))do { if (__builtin_expect((!(!(((struct thread *)((((&(p)
->p_mtx)->mtx_lock)) & ~(0x00000001 | 0x00000002 | 0x00000004
))) == (__curthread())))), 0)) panic ("[pmc,%d] proc unexpectedly locked"
, 2533); } while (0)
;
2534 tdcnt = 32;
2535 restart:
2536 tdlistsz = roundup2(tdcnt, 32)(((tdcnt)+((32)-1))&(~((32)-1)));
2537
2538 tdcnt = 0;
2539 tdlist = malloc(sizeof(struct pmc_thread*) * tdlistsz, M_TEMP, M_WAITOK0x0002);
2540
2541 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (2541))
;
2542 FOREACH_THREAD_IN_PROC(p, curtd)for (((curtd)) = (((&(p)->p_threads))->tqh_first); (
(curtd)); ((curtd)) = ((((curtd)))->td_plist.tqe_next))
2543 tdcnt++;
2544 if (tdcnt >= tdlistsz) {
2545 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (2545))
;
2546 free(tdlist, M_TEMP);
2547 goto restart;
2548 }
2549 /*
2550 * Try to add each thread to the list without sleeping. If unable,
2551 * add to a queue to retry after dropping the process lock.
2552 */
2553 tdcnt = 0;
2554 FOREACH_THREAD_IN_PROC(p, curtd)for (((curtd)) = (((&(p)->p_threads))->tqh_first); (
(curtd)); ((curtd)) = ((((curtd)))->td_plist.tqe_next))
{
2555 tdlist[tdcnt] = pmc_find_thread_descriptor(pp, curtd,
2556 PMC_FLAG_ALLOCATE|PMC_FLAG_NOWAIT);
2557 if (tdlist[tdcnt] == NULL((void *)0)) {
2558 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (2558))
;
2559 for (i = 0; i <= tdcnt; i++)
2560 pmc_thread_descriptor_pool_free(tdlist[i]);
2561 free(tdlist, M_TEMP);
2562 goto restart;
2563 }
2564 tdcnt++;
2565 }
2566 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (2566))
;
2567 free(tdlist, M_TEMP);
2568}
2569
2570/*
2571 * find the descriptor corresponding to process 'p', adding or removing it
2572 * as specified by 'mode'.
2573 */
2574
2575static struct pmc_process *
2576pmc_find_process_descriptor(struct proc *p, uint32_t mode)
2577{
2578 uint32_t hindex;
2579 struct pmc_process *pp, *ppnew;
2580 struct pmc_processhash *pph;
2581
2582 hindex = PMC_HASH_PTR(p, pmc_processhashmask)((((unsigned long) (p) >> 2) * 11400714819323198486u) &
(pmc_processhashmask))
;
2583 pph = &pmc_processhash[hindex];
2584
2585 ppnew = NULL((void *)0);
2586
2587 /*
2588 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case since we
2589 * cannot call malloc(9) once we hold a spin lock.
2590 */
2591 if (mode & PMC_FLAG_ALLOCATE)
2592 ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc *
2593 sizeof(struct pmc_targetstate), M_PMC, M_WAITOK0x0002|M_ZERO0x0100);
2594
2595 mtx_lock_spin(&pmc_processhash_mtx)__mtx_lock_spin_flags(&((((&pmc_processhash_mtx))))->
mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"),
(2595))
;
2596 LIST_FOREACH(pp, pph, pp_next)for ((pp) = (((pph))->lh_first); (pp); (pp) = (((pp))->
pp_next.le_next))
2597 if (pp->pp_proc == p)
2598 break;
2599
2600 if ((mode & PMC_FLAG_REMOVE) && pp != NULL((void *)0))
2601 LIST_REMOVE(pp, pp_next)do { ; ; do { if ((((pp))->pp_next.le_next) != ((void *)0)
&& (((pp))->pp_next.le_next)->pp_next.le_prev !=
&((pp)->pp_next.le_next)) panic("Bad link elm %p next->prev != elm"
, (pp)); } while (0); do { if (*(pp)->pp_next.le_prev != (
pp)) panic("Bad link elm %p prev->next != elm", (pp)); } while
(0); if ((((pp))->pp_next.le_next) != ((void *)0)) (((pp)
)->pp_next.le_next)->pp_next.le_prev = (pp)->pp_next
.le_prev; *(pp)->pp_next.le_prev = (((pp))->pp_next.le_next
); ; ; } while (0)
;
2602
2603 if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL((void *)0) &&
2604 ppnew != NULL((void *)0)) {
2605 ppnew->pp_proc = p;
2606 LIST_INIT(&ppnew->pp_tds)do { (((&ppnew->pp_tds))->lh_first) = ((void *)0); }
while (0)
;
2607 ppnew->pp_tdslock = mtx_pool_find(pmc_mtxpool, ppnew);
2608 LIST_INSERT_HEAD(pph, ppnew, pp_next)do { do { if (((((pph)))->lh_first) != ((void *)0) &&
((((pph)))->lh_first)->pp_next.le_prev != &((((pph
)))->lh_first)) panic("Bad list head %p first->prev != head"
, ((pph))); } while (0); if (((((ppnew))->pp_next.le_next)
= (((pph))->lh_first)) != ((void *)0)) (((pph))->lh_first
)->pp_next.le_prev = &(((ppnew))->pp_next.le_next);
(((pph))->lh_first) = (ppnew); (ppnew)->pp_next.le_prev
= &(((pph))->lh_first); } while (0)
;
2609 mtx_unlock_spin(&pmc_processhash_mtx)__mtx_unlock_spin_flags(&((((&pmc_processhash_mtx))))
->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2609))
;
2610 pp = ppnew;
2611 ppnew = NULL((void *)0);
2612
2613 /* Add thread descriptors for this process' current threads. */
2614 pmc_add_thread_descriptors_from_proc(p, pp);
2615 }
2616 else
2617 mtx_unlock_spin(&pmc_processhash_mtx)__mtx_unlock_spin_flags(&((((&pmc_processhash_mtx))))
->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2617))
;
2618
2619 if (ppnew != NULL((void *)0))
2620 free(ppnew, M_PMC);
2621
2622 return pp;
2623}
2624
2625/*
2626 * remove a process descriptor from the process hash table.
2627 */
2628
2629static void
2630pmc_remove_process_descriptor(struct pmc_process *pp)
2631{
2632 KASSERT(pp->pp_refcnt == 0,do { if (__builtin_expect((!(pp->pp_refcnt == 0)), 0)) panic
("[pmc,%d] Removing process descriptor %p with count %d", 2634
, pp, pp->pp_refcnt); } while (0)
2633 ("[pmc,%d] Removing process descriptor %p with count %d",do { if (__builtin_expect((!(pp->pp_refcnt == 0)), 0)) panic
("[pmc,%d] Removing process descriptor %p with count %d", 2634
, pp, pp->pp_refcnt); } while (0)
2634 __LINE__, pp, pp->pp_refcnt))do { if (__builtin_expect((!(pp->pp_refcnt == 0)), 0)) panic
("[pmc,%d] Removing process descriptor %p with count %d", 2634
, pp, pp->pp_refcnt); } while (0)
;
2635
2636 mtx_lock_spin(&pmc_processhash_mtx)__mtx_lock_spin_flags(&((((&pmc_processhash_mtx))))->
mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"),
(2636))
;
2637 LIST_REMOVE(pp, pp_next)do { ; ; do { if ((((pp))->pp_next.le_next) != ((void *)0)
&& (((pp))->pp_next.le_next)->pp_next.le_prev !=
&((pp)->pp_next.le_next)) panic("Bad link elm %p next->prev != elm"
, (pp)); } while (0); do { if (*(pp)->pp_next.le_prev != (
pp)) panic("Bad link elm %p prev->next != elm", (pp)); } while
(0); if ((((pp))->pp_next.le_next) != ((void *)0)) (((pp)
)->pp_next.le_next)->pp_next.le_prev = (pp)->pp_next
.le_prev; *(pp)->pp_next.le_prev = (((pp))->pp_next.le_next
); ; ; } while (0)
;
2638 mtx_unlock_spin(&pmc_processhash_mtx)__mtx_unlock_spin_flags(&((((&pmc_processhash_mtx))))
->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2638))
;
2639}
2640
2641/*
2642 * destroy a process descriptor.
2643 */
2644
2645static void
2646pmc_destroy_process_descriptor(struct pmc_process *pp)
2647{
2648 struct pmc_thread *pmc_td;
2649
2650 while ((pmc_td = LIST_FIRST(&pp->pp_tds)((&pp->pp_tds)->lh_first)) != NULL((void *)0)) {
2651 LIST_REMOVE(pmc_td, pt_next)do { ; ; do { if ((((pmc_td))->pt_next.le_next) != ((void *
)0) && (((pmc_td))->pt_next.le_next)->pt_next.le_prev
!= &((pmc_td)->pt_next.le_next)) panic("Bad link elm %p next->prev != elm"
, (pmc_td)); } while (0); do { if (*(pmc_td)->pt_next.le_prev
!= (pmc_td)) panic("Bad link elm %p prev->next != elm", (
pmc_td)); } while (0); if ((((pmc_td))->pt_next.le_next) !=
((void *)0)) (((pmc_td))->pt_next.le_next)->pt_next.le_prev
= (pmc_td)->pt_next.le_prev; *(pmc_td)->pt_next.le_prev
= (((pmc_td))->pt_next.le_next); ; ; } while (0)
;
2652 pmc_thread_descriptor_pool_free(pmc_td);
2653 }
2654 free(pp, M_PMC);
2655}
2656
2657
2658/*
2659 * find an owner descriptor corresponding to proc 'p'
2660 */
2661
2662static struct pmc_owner *
2663pmc_find_owner_descriptor(struct proc *p)
2664{
2665 uint32_t hindex;
2666 struct pmc_owner *po;
2667 struct pmc_ownerhash *poh;
2668
2669 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask)((((unsigned long) (p) >> 2) * 11400714819323198486u) &
(pmc_ownerhashmask))
;
2670 poh = &pmc_ownerhash[hindex];
2671
2672 po = NULL((void *)0);
2673 LIST_FOREACH(po, poh, po_next)for ((po) = (((poh))->lh_first); (po); (po) = (((po))->
po_next.le_next))
2674 if (po->po_owner == p)
2675 break;
2676
2677 PMCDBG5(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
2678 "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
2679
2680 return po;
2681}
2682
2683/*
2684 * pmc_allocate_pmc_descriptor
2685 *
2686 * Allocate a pmc descriptor and initialize its
2687 * fields.
2688 */
2689
2690static struct pmc *
2691pmc_allocate_pmc_descriptor(void)
2692{
2693 struct pmc *pmc;
2694
2695 pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK0x0002|M_ZERO0x0100);
2696 pmc->pm_runcount = counter_u64_alloc(M_WAITOK0x0002);
2697 pmc->pm_pcpu_state = malloc(sizeof(struct pmc_pcpu_state)*mp_ncpus, M_PMC, M_WAITOK0x0002|M_ZERO0x0100);
2698 PMCDBG1(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
2699
2700 return pmc;
2701}
2702
2703/*
2704 * Destroy a pmc descriptor.
2705 */
2706
2707static void
2708pmc_destroy_pmc_descriptor(struct pmc *pm)
2709{
2710
2711 KASSERT(pm->pm_state == PMC_STATE_DELETED ||do { if (__builtin_expect((!(pm->pm_state == PMC_STATE_DELETED
|| pm->pm_state == PMC_STATE_FREE)), 0)) panic ("[pmc,%d] destroying non-deleted PMC"
, 2713); } while (0)
2712 pm->pm_state == PMC_STATE_FREE,do { if (__builtin_expect((!(pm->pm_state == PMC_STATE_DELETED
|| pm->pm_state == PMC_STATE_FREE)), 0)) panic ("[pmc,%d] destroying non-deleted PMC"
, 2713); } while (0)
2713 ("[pmc,%d] destroying non-deleted PMC", __LINE__))do { if (__builtin_expect((!(pm->pm_state == PMC_STATE_DELETED
|| pm->pm_state == PMC_STATE_FREE)), 0)) panic ("[pmc,%d] destroying non-deleted PMC"
, 2713); } while (0)
;
2714 KASSERT(LIST_EMPTY(&pm->pm_targets),do { if (__builtin_expect((!(((&pm->pm_targets)->lh_first
== ((void *)0)))), 0)) panic ("[pmc,%d] destroying pmc with targets"
, 2715); } while (0)
2715 ("[pmc,%d] destroying pmc with targets", __LINE__))do { if (__builtin_expect((!(((&pm->pm_targets)->lh_first
== ((void *)0)))), 0)) panic ("[pmc,%d] destroying pmc with targets"
, 2715); } while (0)
;
2716 KASSERT(pm->pm_owner == NULL,do { if (__builtin_expect((!(pm->pm_owner == ((void *)0)))
, 0)) panic ("[pmc,%d] destroying pmc attached to an owner", 2717
); } while (0)
2717 ("[pmc,%d] destroying pmc attached to an owner", __LINE__))do { if (__builtin_expect((!(pm->pm_owner == ((void *)0)))
, 0)) panic ("[pmc,%d] destroying pmc attached to an owner", 2717
); } while (0)
;
2718 KASSERT(counter_u64_fetch(pm->pm_runcount) == 0,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) == 0)), 0)) panic ("[pmc,%d] pmc has non-zero run count %ld"
, 2719, (unsigned long)counter_u64_fetch(pm->pm_runcount))
; } while (0)
2719 ("[pmc,%d] pmc has non-zero run count %ld", __LINE__,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) == 0)), 0)) panic ("[pmc,%d] pmc has non-zero run count %ld"
, 2719, (unsigned long)counter_u64_fetch(pm->pm_runcount))
; } while (0)
2720 (unsigned long)counter_u64_fetch(pm->pm_runcount)))do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) == 0)), 0)) panic ("[pmc,%d] pmc has non-zero run count %ld"
, 2719, (unsigned long)counter_u64_fetch(pm->pm_runcount))
; } while (0)
;
2721
2722 counter_u64_free(pm->pm_runcount);
2723 free(pm->pm_pcpu_state, M_PMC);
2724 free(pm, M_PMC);
2725}
2726
2727static void
2728pmc_wait_for_pmc_idle(struct pmc *pm)
2729{
2730#ifdef INVARIANTS1
2731 volatile int maxloop;
2732
2733 maxloop = 100 * pmc_cpu_max();
2734#endif
2735 /*
2736 * Loop (with a forced context switch) till the PMC's runcount
2737 * comes down to zero.
2738 */
2739 pmclog_flush(pm->pm_owner, 1);
2740 while (counter_u64_fetch(pm->pm_runcount) > 0) {
2741 pmclog_flush(pm->pm_owner, 1);
2742#ifdef INVARIANTS1
2743 maxloop--;
2744 KASSERT(maxloop > 0,do { if (__builtin_expect((!(maxloop > 0)), 0)) panic ("[pmc,%d] (ri%d, rc%ld) waiting too long for "
"pmc to be free", 2746, (((pm)->pm_id) & 0xFF), (unsigned
long)counter_u64_fetch(pm->pm_runcount)); } while (0)
2745 ("[pmc,%d] (ri%d, rc%ld) waiting too long for "do { if (__builtin_expect((!(maxloop > 0)), 0)) panic ("[pmc,%d] (ri%d, rc%ld) waiting too long for "
"pmc to be free", 2746, (((pm)->pm_id) & 0xFF), (unsigned
long)counter_u64_fetch(pm->pm_runcount)); } while (0)
2746 "pmc to be free", __LINE__,do { if (__builtin_expect((!(maxloop > 0)), 0)) panic ("[pmc,%d] (ri%d, rc%ld) waiting too long for "
"pmc to be free", 2746, (((pm)->pm_id) & 0xFF), (unsigned
long)counter_u64_fetch(pm->pm_runcount)); } while (0)
2747 PMC_TO_ROWINDEX(pm), (unsigned long)counter_u64_fetch(pm->pm_runcount)))do { if (__builtin_expect((!(maxloop > 0)), 0)) panic ("[pmc,%d] (ri%d, rc%ld) waiting too long for "
"pmc to be free", 2746, (((pm)->pm_id) & 0xFF), (unsigned
long)counter_u64_fetch(pm->pm_runcount)); } while (0)
;
2748#endif
2749 pmc_force_context_switch();
2750 }
2751}
2752
2753/*
2754 * This function does the following things:
2755 *
2756 * - detaches the PMC from hardware
2757 * - unlinks all target threads that were attached to it
2758 * - removes the PMC from its owner's list
2759 * - destroys the PMC private mutex
2760 *
2761 * Once this function completes, the given pmc pointer can be freed by
2762 * calling pmc_destroy_pmc_descriptor().
2763 */
2764
2765static void
2766pmc_release_pmc_descriptor(struct pmc *pm)
2767{
2768 enum pmc_mode mode;
2769 struct pmc_hw *phw;
2770 u_int adjri, ri, cpu;
2771 struct pmc_owner *po;
2772 struct pmc_binding pb;
2773 struct pmc_process *pp;
2774 struct pmc_classdep *pcd;
2775 struct pmc_target *ptgt, *tmp;
2776
2777 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2777))
;
2778
2779 KASSERT(pm, ("[pmc,%d] null pmc", __LINE__))do { if (__builtin_expect((!(pm)), 0)) panic ("[pmc,%d] null pmc"
, 2779); } while (0)
;
2780
2781 ri = PMC_TO_ROWINDEX(pm)(((pm)->pm_id) & 0xFF);
2782 pcd = pmc_ri_to_classdep(md, ri, &adjri);
2783 mode = PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12);
2784
2785 PMCDBG3(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
2786 mode);
2787
2788 /*
2789 * First, we take the PMC off hardware.
2790 */
2791 cpu = 0;
2792 if (PMC_IS_SYSTEM_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC)) {
2793
2794 /*
2795 * A system mode PMC runs on a specific CPU. Switch
2796 * to this CPU and turn hardware off.
2797 */
2798 pmc_save_cpu_binding(&pb);
2799
2800 cpu = PMC_TO_CPU(pm)((((pm)->pm_id) & 0xFFF00000) >> 20);
2801
2802 pmc_select_cpu(cpu);
2803
2804 /* switch off non-stalled CPUs */
2805 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
2806 if (pm->pm_state == PMC_STATE_RUNNING &&
2807 pm->pm_pcpu_state[cpu].pps_stalled == 0) {
2808
2809 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
2810
2811 KASSERT(phw->phw_pmc == pm,do { if (__builtin_expect((!(phw->phw_pmc == pm)), 0)) panic
("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)", 2813, ri, phw->
phw_pmc, pm); } while (0)
2812 ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",do { if (__builtin_expect((!(phw->phw_pmc == pm)), 0)) panic
("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)", 2813, ri, phw->
phw_pmc, pm); } while (0)
2813 __LINE__, ri, phw->phw_pmc, pm))do { if (__builtin_expect((!(phw->phw_pmc == pm)), 0)) panic
("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)", 2813, ri, phw->
phw_pmc, pm); } while (0)
;
2814 PMCDBG2(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
2815
2816 critical_enter()critical_enter_KBI();
2817 pcd->pcd_stop_pmc(cpu, adjri);
2818 critical_exit()critical_exit_KBI();
2819 }
2820
2821 PMCDBG2(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
2822
2823 critical_enter()critical_enter_KBI();
2824 pcd->pcd_config_pmc(cpu, adjri, NULL((void *)0));
2825 critical_exit()critical_exit_KBI();
2826
2827 /* adjust the global and process count of SS mode PMCs */
2828 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
2829 po = pm->pm_owner;
2830 po->po_sscount--;
2831 if (po->po_sscount == 0) {
2832 atomic_subtract_rel_intatomic_subtract_barr_int(&pmc_ss_count, 1);
2833 CK_LIST_REMOVE(po, po_ssnext)do { ck_pr_md_store_ptr( ((void)sizeof(*(((po)->po_ssnext.
cle_prev)) = (((po)->po_ssnext.cle_next))), (((po)->po_ssnext
.cle_prev))), (((po)->po_ssnext.cle_next))); if ((po)->
po_ssnext.cle_next != ((void *)0)) (po)->po_ssnext.cle_next
->po_ssnext.cle_prev = (po)->po_ssnext.cle_prev; } while
(0)
;
2834 epoch_wait_preempt(global_epoch_preempt);
2835 }
2836 }
2837
2838 pm->pm_state = PMC_STATE_DELETED;
2839
2840 pmc_restore_cpu_binding(&pb);
2841
2842 /*
2843 * We could have references to this PMC structure in
2844 * the per-cpu sample queues. Wait for the queue to
2845 * drain.
2846 */
2847 pmc_wait_for_pmc_idle(pm);
2848
2849 } else if (PMC_IS_VIRTUAL_MODE(mode)((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC)) {
2850
2851 /*
2852 * A virtual PMC could be running on multiple CPUs at
2853 * a given instant.
2854 *
2855 * By marking its state as DELETED, we ensure that
2856 * this PMC is never further scheduled on hardware.
2857 *
2858 * Then we wait till all CPUs are done with this PMC.
2859 */
2860 pm->pm_state = PMC_STATE_DELETED;
2861
2862
2863 /* Wait for the PMCs runcount to come to zero. */
2864 pmc_wait_for_pmc_idle(pm);
2865
2866 /*
2867 * At this point the PMC is off all CPUs and cannot be
2868 * freshly scheduled onto a CPU. It is now safe to
2869 * unlink all targets from this PMC. If a
2870 * process-record's refcount falls to zero, we remove
2871 * it from the hash table. The module-wide SX lock
2872 * protects us from races.
2873 */
2874 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp)for ((ptgt) = (((&pm->pm_targets))->lh_first); (ptgt
) && ((tmp) = (((ptgt))->pt_next.le_next), 1); (ptgt
) = (tmp))
{
2875 pp = ptgt->pt_process;
2876 pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
2877
2878 PMCDBG1(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
2879
2880 /*
2881 * If the target process record shows that no
2882 * PMCs are attached to it, reclaim its space.
2883 */
2884
2885 if (pp->pp_refcnt == 0) {
2886 pmc_remove_process_descriptor(pp);
2887 pmc_destroy_process_descriptor(pp);
2888 }
2889 }
2890
2891 cpu = curthread(__curthread())->td_oncpu; /* setup cpu for pmd_release() */
2892
2893 }
2894
2895 /*
2896 * Release any MD resources
2897 */
2898 (void) pcd->pcd_release_pmc(cpu, adjri, pm);
2899
2900 /*
2901 * Update row disposition
2902 */
2903
2904 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SC
)
)
2905 PMC_UNMARK_ROW_STANDALONE(ri)do { atomic_add_int(&pmc_pmcdisp[(ri)], 1); do { if (__builtin_expect
((!(pmc_pmcdisp[(ri)] <= 0)), 0)) panic ("[pmc,%d] row disposition error"
, 2905); } while (0); } while (0)
;
2906 else
2907 PMC_UNMARK_ROW_THREAD(ri)do { atomic_add_int(&pmc_pmcdisp[(ri)], -1); do { if (__builtin_expect
((!(pmc_pmcdisp[(ri)] >= 0)), 0)) panic ("[pmc,%d] row disposition error"
, 2907); } while (0); } while (0)
;
2908
2909 /* unlink from the owner's list */
2910 if (pm->pm_owner) {
2911 LIST_REMOVE(pm, pm_next)do { ; ; do { if ((((pm))->pm_next.le_next) != ((void *)0)
&& (((pm))->pm_next.le_next)->pm_next.le_prev !=
&((pm)->pm_next.le_next)) panic("Bad link elm %p next->prev != elm"
, (pm)); } while (0); do { if (*(pm)->pm_next.le_prev != (
pm)) panic("Bad link elm %p prev->next != elm", (pm)); } while
(0); if ((((pm))->pm_next.le_next) != ((void *)0)) (((pm)
)->pm_next.le_next)->pm_next.le_prev = (pm)->pm_next
.le_prev; *(pm)->pm_next.le_prev = (((pm))->pm_next.le_next
); ; ; } while (0)
;
2912 pm->pm_owner = NULL((void *)0);
2913 }
2914}
2915
2916/*
2917 * Register an owner and a pmc.
2918 */
2919
2920static int
2921pmc_register_owner(struct proc *p, struct pmc *pmc)
2922{
2923 struct pmc_owner *po;
2924
2925 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (2925))
;
2926
2927 if ((po = pmc_find_owner_descriptor(p)) == NULL((void *)0))
2928 if ((po = pmc_allocate_owner_descriptor(p)) == NULL((void *)0))
2929 return ENOMEM12;
2930
2931 KASSERT(pmc->pm_owner == NULL,do { if (__builtin_expect((!(pmc->pm_owner == ((void *)0))
), 0)) panic ("[pmc,%d] attempting to own an initialized PMC"
, 2932); } while (0)
2932 ("[pmc,%d] attempting to own an initialized PMC", __LINE__))do { if (__builtin_expect((!(pmc->pm_owner == ((void *)0))
), 0)) panic ("[pmc,%d] attempting to own an initialized PMC"
, 2932); } while (0)
;
2933 pmc->pm_owner = po;
2934
2935 LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next)do { do { if (((((&po->po_pmcs)))->lh_first) != ((void
*)0) && ((((&po->po_pmcs)))->lh_first)->
pm_next.le_prev != &((((&po->po_pmcs)))->lh_first
)) panic("Bad list head %p first->prev != head", ((&po
->po_pmcs))); } while (0); if (((((pmc))->pm_next.le_next
) = (((&po->po_pmcs))->lh_first)) != ((void *)0)) (
((&po->po_pmcs))->lh_first)->pm_next.le_prev = &
(((pmc))->pm_next.le_next); (((&po->po_pmcs))->lh_first
) = (pmc); (pmc)->pm_next.le_prev = &(((&po->po_pmcs
))->lh_first); } while (0)
;
2936
2937 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (2937))
;
2938 p->p_flag |= P_HWPMC0x800000;
2939 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (2939))
;
2940
2941 if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
2942 pmclog_process_pmcallocate(pmc);
2943
2944 PMCDBG2(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
2945 po, pmc);
2946
2947 return 0;
2948}
2949
2950/*
2951 * Return the current row disposition:
2952 * == 0 => FREE
2953 * > 0 => PROCESS MODE
2954 * < 0 => SYSTEM MODE
2955 */
2956
2957int
2958pmc_getrowdisp(int ri)
2959{
2960 return pmc_pmcdisp[ri];
2961}
2962
2963/*
2964 * Check if a PMC at row index 'ri' can be allocated to the current
2965 * process.
2966 *
2967 * Allocation can fail if:
2968 * - the current process is already being profiled by a PMC at index 'ri',
2969 * attached to it via OP_PMCATTACH.
2970 * - the current process has already allocated a PMC at index 'ri'
2971 * via OP_ALLOCATE.
2972 */
2973
2974static int
2975pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
2976{
2977 enum pmc_mode mode;
2978 struct pmc *pm;
2979 struct pmc_owner *po;
2980 struct pmc_process *pp;
2981
2982 PMCDBG5(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
2983 "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
2984
2985 /*
2986 * We shouldn't have already allocated a process-mode PMC at
2987 * row index 'ri'.
2988 *
2989 * We shouldn't have allocated a system-wide PMC on the same
2990 * CPU and same RI.
2991 */
2992 if ((po = pmc_find_owner_descriptor(p)) != NULL((void *)0))
2993 LIST_FOREACH(pm, &po->po_pmcs, pm_next)for ((pm) = (((&po->po_pmcs))->lh_first); (pm); (pm
) = (((pm))->pm_next.le_next))
{
2994 if (PMC_TO_ROWINDEX(pm)(((pm)->pm_id) & 0xFF) == ri) {
2995 mode = PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12);
2996 if (PMC_IS_VIRTUAL_MODE(mode)((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC))
2997 return EEXIST17;
2998 if (PMC_IS_SYSTEM_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC) &&
2999 (int) PMC_TO_CPU(pm)((((pm)->pm_id) & 0xFFF00000) >> 20) == cpu)
3000 return EEXIST17;
3001 }
3002 }
3003
3004 /*
3005 * We also shouldn't be the target of any PMC at this index
3006 * since otherwise a PMC_ATTACH to ourselves will fail.
3007 */
3008 if ((pp = pmc_find_process_descriptor(p, 0)) != NULL((void *)0))
3009 if (pp->pp_pmcs[ri].pp_pmc)
3010 return EEXIST17;
3011
3012 PMCDBG4(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
3013 p, p->p_pid, p->p_comm, ri);
3014
3015 return 0;
3016}
3017
3018/*
3019 * Check if a given PMC at row index 'ri' can be currently used in
3020 * mode 'mode'.
3021 */
3022
3023static int
3024pmc_can_allocate_row(int ri, enum pmc_mode mode)
3025{
3026 enum pmc_disp disp;
3027
3028 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (3028))
;
3029
3030 PMCDBG2(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
3031
3032 if (PMC_IS_SYSTEM_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC))
3033 disp = PMC_DISP_STANDALONE;
3034 else
3035 disp = PMC_DISP_THREAD;
3036
3037 /*
3038 * check disposition for PMC row 'ri':
3039 *
3040 * Expected disposition Row-disposition Result
3041 *
3042 * STANDALONE STANDALONE or FREE proceed
3043 * STANDALONE THREAD fail
3044 * THREAD THREAD or FREE proceed
3045 * THREAD STANDALONE fail
3046 */
3047
3048 if (!PMC_ROW_DISP_IS_FREE(ri)(pmc_pmcdisp[(ri)] == 0) &&
3049 !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)(pmc_pmcdisp[(ri)] > 0)) &&
3050 !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)(pmc_pmcdisp[(ri)] < 0)))
3051 return EBUSY16;
3052
3053 /*
3054 * All OK
3055 */
3056
3057 PMCDBG2(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
3058
3059 return 0;
3060
3061}
3062
3063/*
3064 * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
3065 */
3066
3067static struct pmc *
3068pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
3069{
3070 struct pmc *pm;
3071
3072 KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,do { if (__builtin_expect((!(((pmcid) & 0xFF) < md->
pmd_npmc)), 0)) panic ("[pmc,%d] Illegal pmc index %d (max %d)"
, 3073, ((pmcid) & 0xFF), md->pmd_npmc); } while (0)
3073 ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,do { if (__builtin_expect((!(((pmcid) & 0xFF) < md->
pmd_npmc)), 0)) panic ("[pmc,%d] Illegal pmc index %d (max %d)"
, 3073, ((pmcid) & 0xFF), md->pmd_npmc); } while (0)
3074 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc))do { if (__builtin_expect((!(((pmcid) & 0xFF) < md->
pmd_npmc)), 0)) panic ("[pmc,%d] Illegal pmc index %d (max %d)"
, 3073, ((pmcid) & 0xFF), md->pmd_npmc); } while (0)
;
3075
3076 LIST_FOREACH(pm, &po->po_pmcs, pm_next)for ((pm) = (((&po->po_pmcs))->lh_first); (pm); (pm
) = (((pm))->pm_next.le_next))
3077 if (pm->pm_id == pmcid)
3078 return pm;
3079
3080 return NULL((void *)0);
3081}
3082
3083static int
3084pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
3085{
3086
3087 struct pmc *pm, *opm;
3088 struct pmc_owner *po;
3089 struct pmc_process *pp;
3090
3091 PMCDBG1(PMC,FND,1, "find-pmc id=%d", pmcid);
3092 if (PMC_ID_TO_ROWINDEX(pmcid)((pmcid) & 0xFF) >= md->pmd_npmc)
3093 return (EINVAL22);
3094
3095 if ((po = pmc_find_owner_descriptor(curthread(__curthread())->td_proc)) == NULL((void *)0)) {
3096 /*
3097 * In case of PMC_F_DESCENDANTS child processes we will not find
3098 * the current process in the owners hash list. Find the owner
3099 * process first and from there lookup the po.
3100 */
3101 if ((pp = pmc_find_process_descriptor(curthread(__curthread())->td_proc,
3102 PMC_FLAG_NONE)) == NULL((void *)0)) {
3103 return ESRCH3;
3104 } else {
3105 opm = pp->pp_pmcs[PMC_ID_TO_ROWINDEX(pmcid)((pmcid) & 0xFF)].pp_pmc;
3106 if (opm == NULL((void *)0))
3107 return ESRCH3;
3108 if ((opm->pm_flags & (PMC_F_ATTACHED_TO_OWNER0x00010000|
3109 PMC_F_DESCENDANTS0x00000002)) != (PMC_F_ATTACHED_TO_OWNER0x00010000|
3110 PMC_F_DESCENDANTS0x00000002))
3111 return ESRCH3;
3112 po = opm->pm_owner;
3113 }
3114 }
3115
3116 if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL((void *)0))
3117 return EINVAL22;
3118
3119 PMCDBG2(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
3120
3121 *pmc = pm;
3122 return 0;
3123}
3124
3125/*
3126 * Start a PMC.
3127 */
3128
3129static int
3130pmc_start(struct pmc *pm)
3131{
3132 enum pmc_mode mode;
3133 struct pmc_owner *po;
3134 struct pmc_binding pb;
3135 struct pmc_classdep *pcd;
3136 int adjri, error, cpu, ri;
3137
3138 KASSERT(pm != NULL,do { if (__builtin_expect((!(pm != ((void *)0))), 0)) panic (
"[pmc,%d] null pm", 3139); } while (0)
3139 ("[pmc,%d] null pm", __LINE__))do { if (__builtin_expect((!(pm != ((void *)0))), 0)) panic (
"[pmc,%d] null pm", 3139); } while (0)
;
3140
3141 mode = PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12);
3142 ri = PMC_TO_ROWINDEX(pm)(((pm)->pm_id) & 0xFF);
3143 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3144
3145 error = 0;
3146
3147 PMCDBG3(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
3148
3149 po = pm->pm_owner;
3150
3151 /*
3152 * Disallow PMCSTART if a logfile is required but has not been
3153 * configured yet.
3154 */
3155 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE0x00020000) &&
3156 (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001) == 0)
3157 return (EDOOFUS88); /* programming error */
3158
3159 /*
3160 * If this is a sampling mode PMC, log mapping information for
3161 * the kernel modules that are currently loaded.
3162 */
3163 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TS
)
)
3164 pmc_log_kernel_mappings(pm);
3165
3166 if (PMC_IS_VIRTUAL_MODE(mode)((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC)) {
3167
3168 /*
3169 * If a PMCATTACH has never been done on this PMC,
3170 * attach it to its owner process.
3171 */
3172
3173 if (LIST_EMPTY(&pm->pm_targets)((&pm->pm_targets)->lh_first == ((void *)0)))
3174 error = (pm->pm_flags & PMC_F_ATTACH_DONE0x00040000) ? ESRCH3 :
3175 pmc_attach_process(po->po_owner, pm);
3176
3177 /*
3178 * If the PMC is attached to its owner, then force a context
3179 * switch to ensure that the MD state gets set correctly.
3180 */
3181
3182 if (error == 0) {
3183 pm->pm_state = PMC_STATE_RUNNING;
3184 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER0x00010000)
3185 pmc_force_context_switch();
3186 }
3187
3188 return (error);
3189 }
3190
3191
3192 /*
3193 * A system-wide PMC.
3194 *
3195 * Add the owner to the global list if this is a system-wide
3196 * sampling PMC.
3197 */
3198
3199 if (mode == PMC_MODE_SS) {
3200 /*
3201 * Log mapping information for all existing processes in the
3202 * system. Subsequent mappings are logged as they happen;
3203 * see pmc_process_mmap().
3204 */
3205 if (po->po_logprocmaps == 0) {
3206 pmc_log_all_process_mappings(po);
3207 po->po_logprocmaps = 1;
3208 }
3209 po->po_sscount++;
3210 if (po->po_sscount == 1) {
3211 atomic_add_rel_intatomic_add_barr_int(&pmc_ss_count, 1);
3212 CK_LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext)do { (po)->po_ssnext.cle_next = (&pmc_ss_owners)->clh_first
; ck_pr_fence_store(); if ((po)->po_ssnext.cle_next != ((void
*)0)) (&pmc_ss_owners)->clh_first->po_ssnext.cle_prev
= &(po)->po_ssnext.cle_next; ck_pr_md_store_ptr( ((void
)sizeof(*((&(&pmc_ss_owners)->clh_first)) = ((po))
), ((&(&pmc_ss_owners)->clh_first))), ((po))); (po
)->po_ssnext.cle_prev = &(&pmc_ss_owners)->clh_first
; } while (0)
;
3213 PMCDBG1(PMC,OPS,1, "po=%p in global list", po);
3214 }
3215 }
3216
3217 /*
3218 * Move to the CPU associated with this
3219 * PMC, and start the hardware.
3220 */
3221
3222 pmc_save_cpu_binding(&pb);
3223
3224 cpu = PMC_TO_CPU(pm)((((pm)->pm_id) & 0xFFF00000) >> 20);
3225
3226 if (!pmc_cpu_is_active(cpu))
3227 return (ENXIO6);
3228
3229 pmc_select_cpu(cpu);
3230
3231 /*
3232 * global PMCs are configured at allocation time
3233 * so write out the initial value and start the PMC.
3234 */
3235
3236 pm->pm_state = PMC_STATE_RUNNING;
3237
3238 critical_enter()critical_enter_KBI();
3239 if ((error = pcd->pcd_write_pmc(cpu, adjri,
3240 PMC_IS_SAMPLING_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS) ?
3241 pm->pm_sc.pm_reloadcount :
3242 pm->pm_sc.pm_initial)) == 0) {
3243 /* If a sampling mode PMC, reset stalled state. */
3244 if (PMC_IS_SAMPLING_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS))
3245 pm->pm_pcpu_state[cpu].pps_stalled = 0;
3246
3247 /* Indicate that we desire this to run. Start it. */
3248 pm->pm_pcpu_state[cpu].pps_cpustate = 1;
3249 error = pcd->pcd_start_pmc(cpu, adjri);
3250 }
3251 critical_exit()critical_exit_KBI();
3252
3253 pmc_restore_cpu_binding(&pb);
3254
3255 return (error);
3256}
3257
3258/*
3259 * Stop a PMC.
3260 */
3261
3262static int
3263pmc_stop(struct pmc *pm)
3264{
3265 struct pmc_owner *po;
3266 struct pmc_binding pb;
3267 struct pmc_classdep *pcd;
3268 int adjri, cpu, error, ri;
3269
3270 KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__))do { if (__builtin_expect((!(pm != ((void *)0))), 0)) panic (
"[pmc,%d] null pmc", 3270); } while (0)
;
3271
3272 PMCDBG3(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm,
3273 PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm));
3274
3275 pm->pm_state = PMC_STATE_STOPPED;
3276
3277 /*
3278 * If the PMC is a virtual mode one, changing the state to
3279 * non-RUNNING is enough to ensure that the PMC never gets
3280 * scheduled.
3281 *
3282 * If this PMC is current running on a CPU, then it will
3283 * handled correctly at the time its target process is context
3284 * switched out.
3285 */
3286
3287 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TC
)
)
3288 return 0;
3289
3290 /*
3291 * A system-mode PMC. Move to the CPU associated with
3292 * this PMC, and stop the hardware. We update the
3293 * 'initial count' so that a subsequent PMCSTART will
3294 * resume counting from the current hardware count.
3295 */
3296
3297 pmc_save_cpu_binding(&pb);
3298
3299 cpu = PMC_TO_CPU(pm)((((pm)->pm_id) & 0xFFF00000) >> 20);
3300
3301 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),do { if (__builtin_expect((!(cpu >= 0 && cpu < pmc_cpu_max
())), 0)) panic ("[pmc,%d] illegal cpu=%d", 3302, cpu); } while
(0)
3302 ("[pmc,%d] illegal cpu=%d", __LINE__, cpu))do { if (__builtin_expect((!(cpu >= 0 && cpu < pmc_cpu_max
())), 0)) panic ("[pmc,%d] illegal cpu=%d", 3302, cpu); } while
(0)
;
3303
3304 if (!pmc_cpu_is_active(cpu))
3305 return ENXIO6;
3306
3307 pmc_select_cpu(cpu);
3308
3309 ri = PMC_TO_ROWINDEX(pm)(((pm)->pm_id) & 0xFF);
3310 pcd = pmc_ri_to_classdep(md, ri, &adjri);
3311
3312 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
3313 critical_enter()critical_enter_KBI();
3314 if ((error = pcd->pcd_stop_pmc(cpu, adjri)) == 0)
3315 error = pcd->pcd_read_pmc(cpu, adjri, &pm->pm_sc.pm_initial);
3316 critical_exit()critical_exit_KBI();
3317
3318 pmc_restore_cpu_binding(&pb);
3319
3320 po = pm->pm_owner;
3321
3322 /* remove this owner from the global list of SS PMC owners */
3323 if (PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12) == PMC_MODE_SS) {
3324 po->po_sscount--;
3325 if (po->po_sscount == 0) {
3326 atomic_subtract_rel_intatomic_subtract_barr_int(&pmc_ss_count, 1);
3327 CK_LIST_REMOVE(po, po_ssnext)do { ck_pr_md_store_ptr( ((void)sizeof(*(((po)->po_ssnext.
cle_prev)) = (((po)->po_ssnext.cle_next))), (((po)->po_ssnext
.cle_prev))), (((po)->po_ssnext.cle_next))); if ((po)->
po_ssnext.cle_next != ((void *)0)) (po)->po_ssnext.cle_next
->po_ssnext.cle_prev = (po)->po_ssnext.cle_prev; } while
(0)
;
3328 epoch_wait_preempt(global_epoch_preempt);
3329 PMCDBG1(PMC,OPS,2,"po=%p removed from global list", po);
3330 }
3331 }
3332
3333 return (error);
3334}
3335
3336static struct pmc_classdep *
3337pmc_class_to_classdep(enum pmc_class class)
3338{
3339 int n;
3340
3341 for (n = 0; n < md->pmd_nclass; n++)
3342 if (md->pmd_classdep[n].pcd_class == class)
3343 return (&md->pmd_classdep[n]);
3344 return (NULL((void *)0));
3345}
3346
3347#if defined(HWPMC_DEBUG) && defined(KTR)
3348static const char *pmc_op_to_name[] = {
3349#undef __PMC_OP
3350#define __PMC_OP(N, D)PMC_OP_N, #N ,
3351 __PMC_OPS()PMC_OP_CONFIGURELOG, PMC_OP_FLUSHLOG, PMC_OP_GETCPUINFO, PMC_OP_GETDRIVERSTATS
, PMC_OP_GETMODULEVERSION, PMC_OP_GETPMCINFO, PMC_OP_PMCADMIN
, PMC_OP_PMCALLOCATE, PMC_OP_PMCATTACH, PMC_OP_PMCDETACH, PMC_OP_PMCGETMSR
, PMC_OP_PMCRELEASE, PMC_OP_PMCRW, PMC_OP_PMCSETCOUNT, PMC_OP_PMCSTART
, PMC_OP_PMCSTOP, PMC_OP_WRITELOG, PMC_OP_CLOSELOG, PMC_OP_GETDYNEVENTINFO
,
3352 NULL((void *)0)
3353};
3354#endif
3355
3356/*
3357 * The syscall interface
3358 */
3359
3360#define PMC_GET_SX_XLOCK(...)do { (void)_sx_xlock(((&pmc_sx)), 0, ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (3360)); if (pmc_hook == ((void *)0)) { _sx_xunlock(((&
pmc_sx)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (3360)
); return ...; } } while (0)
do { \
3361 sx_xlock(&pmc_sx)(void)_sx_xlock(((&pmc_sx)), 0, ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (3361))
; \
3362 if (pmc_hook == NULL((void *)0)) { \
3363 sx_xunlock(&pmc_sx)_sx_xunlock(((&pmc_sx)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (3363))
; \
3364 return __VA_ARGS__; \
3365 } \
3366} while (0)
3367
3368#define PMC_DOWNGRADE_SX()do { sx_downgrade_((&pmc_sx), "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 3368); is_sx_downgraded = 1; } while (0)
do { \
3369 sx_downgrade(&pmc_sx)sx_downgrade_((&pmc_sx), "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 3369)
; \
3370 is_sx_downgraded = 1; \
3371} while (0)
3372
3373static int
3374pmc_syscall_handler(struct thread *td, void *syscall_args)
3375{
3376 int error, is_sx_downgraded, op;
3377 struct pmc_syscall_args *c;
3378 void *pmclog_proc_handle;
3379 void *arg;
3380
3381 c = (struct pmc_syscall_args *)syscall_args;
3382 op = c->pmop_code;
3383 arg = c->pmop_data;
3384 /* PMC isn't set up yet */
3385 if (pmc_hook == NULL((void *)0))
1
Assuming 'pmc_hook' is not equal to NULL
2
Taking false branch
3386 return (EINVAL22);
3387 if (op == PMC_OP_CONFIGURELOG) {
3
Assuming 'op' is not equal to PMC_OP_CONFIGURELOG
4
Taking false branch
3388 /*
3389 * We cannot create the logging process inside
3390 * pmclog_configure_log() because there is a LOR
3391 * between pmc_sx and process structure locks.
3392 * Instead, pre-create the process and ignite the loop
3393 * if everything is fine, otherwise direct the process
3394 * to exit.
3395 */
3396 error = pmclog_proc_create(td, &pmclog_proc_handle);
3397 if (error != 0)
3398 goto done_syscall;
3399 }
3400
3401 PMC_GET_SX_XLOCK(ENOSYS)do { (void)_sx_xlock(((&pmc_sx)), 0, ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (3401)); if (pmc_hook == ((void *)0)) { _sx_xunlock(((&
pmc_sx)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (3401)
); return 78; } } while (0)
;
5
Assuming 'pmc_hook' is not equal to PMC_GET_SX_XLOCK
6
Taking false branch
7
Loop condition is false. Exiting loop
3402 is_sx_downgraded = 0;
3403 PMCDBG3(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
3404 pmc_op_to_name[op], arg);
3405
3406 error = 0;
3407 counter_u64_add(pmc_stats.pm_syscalls, 1);
3408
3409 switch (op) {
8
Control jumps to 'case PMC_OP_GETPMCINFO:' at line 3628
3410
3411
3412 /*
3413 * Configure a log file.
3414 *
3415 * XXX This OP will be reworked.
3416 */
3417
3418 case PMC_OP_CONFIGURELOG:
3419 {
3420 struct proc *p;
3421 struct pmc *pm;
3422 struct pmc_owner *po;
3423 struct pmc_op_configurelog cl;
3424
3425 if ((error = copyin(arg, &cl, sizeof(cl))) != 0) {
3426 pmclog_proc_ignite(pmclog_proc_handle, NULL((void *)0));
3427 break;
3428 }
3429
3430 /* mark this process as owning a log file */
3431 p = td->td_proc;
3432 if ((po = pmc_find_owner_descriptor(p)) == NULL((void *)0))
3433 if ((po = pmc_allocate_owner_descriptor(p)) == NULL((void *)0)) {
3434 pmclog_proc_ignite(pmclog_proc_handle, NULL((void *)0));
3435 error = ENOMEM12;
3436 break;
3437 }
3438
3439 /*
3440 * If a valid fd was passed in, try to configure that,
3441 * otherwise if 'fd' was less than zero and there was
3442 * a log file configured, flush its buffers and
3443 * de-configure it.
3444 */
3445 if (cl.pm_logfd >= 0) {
3446 error = pmclog_configure_log(md, po, cl.pm_logfd);
3447 pmclog_proc_ignite(pmclog_proc_handle, error == 0 ?
3448 po : NULL((void *)0));
3449 } else if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001) {
3450 pmclog_proc_ignite(pmclog_proc_handle, NULL((void *)0));
3451 error = pmclog_close(po);
3452 if (error == 0) {
3453 LIST_FOREACH(pm, &po->po_pmcs, pm_next)for ((pm) = (((&po->po_pmcs))->lh_first); (pm); (pm
) = (((pm))->pm_next.le_next))
3454 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE0x00020000 &&
3455 pm->pm_state == PMC_STATE_RUNNING)
3456 pmc_stop(pm);
3457 error = pmclog_deconfigure_log(po);
3458 }
3459 } else {
3460 pmclog_proc_ignite(pmclog_proc_handle, NULL((void *)0));
3461 error = EINVAL22;
3462 }
3463 }
3464 break;
3465
3466 /*
3467 * Flush a log file.
3468 */
3469
3470 case PMC_OP_FLUSHLOG:
3471 {
3472 struct pmc_owner *po;
3473
3474 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (3474))
;
3475
3476 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL((void *)0)) {
3477 error = EINVAL22;
3478 break;
3479 }
3480
3481 error = pmclog_flush(po, 0);
3482 }
3483 break;
3484
3485 /*
3486 * Close a log file.
3487 */
3488
3489 case PMC_OP_CLOSELOG:
3490 {
3491 struct pmc_owner *po;
3492
3493 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (3493))
;
3494
3495 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL((void *)0)) {
3496 error = EINVAL22;
3497 break;
3498 }
3499
3500 error = pmclog_close(po);
3501 }
3502 break;
3503
3504 /*
3505 * Retrieve hardware configuration.
3506 */
3507
3508 case PMC_OP_GETCPUINFO: /* CPU information */
3509 {
3510 struct pmc_op_getcpuinfo gci;
3511 struct pmc_classinfo *pci;
3512 struct pmc_classdep *pcd;
3513 int cl;
3514
3515 gci.pm_cputype = md->pmd_cputype;
3516 gci.pm_ncpu = pmc_cpu_max();
3517 gci.pm_npmc = md->pmd_npmc;
3518 gci.pm_nclass = md->pmd_nclass;
3519 pci = gci.pm_classes;
3520 pcd = md->pmd_classdep;
3521 for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) {
3522 pci->pm_caps = pcd->pcd_caps;
3523 pci->pm_class = pcd->pcd_class;
3524 pci->pm_width = pcd->pcd_width;
3525 pci->pm_num = pcd->pcd_num;
3526 }
3527 error = copyout(&gci, arg, sizeof(gci));
3528 }
3529 break;
3530
3531 /*
3532 * Retrieve soft events list.
3533 */
3534 case PMC_OP_GETDYNEVENTINFO:
3535 {
3536 enum pmc_class cl;
3537 enum pmc_event ev;
3538 struct pmc_op_getdyneventinfo *gei;
3539 struct pmc_dyn_event_descr dev;
3540 struct pmc_soft *ps;
3541 uint32_t nevent;
3542
3543 sx_assert(&pmc_sx, SX_LOCKED)_sx_assert(((&pmc_sx)), ((0x00000001)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (3543))
;
3544
3545 gei = (struct pmc_op_getdyneventinfo *) arg;
3546
3547 if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0)
3548 break;
3549
3550 /* Only SOFT class is dynamic. */
3551 if (cl != PMC_CLASS_SOFT) {
3552 error = EINVAL22;
3553 break;
3554 }
3555
3556 nevent = 0;
3557 for (ev = PMC_EV_SOFT_FIRST0x20000; (int)ev <= PMC_EV_SOFT_LAST(0x20000 + 0x1000 - 1); ev++) {
3558 ps = pmc_soft_ev_acquire(ev);
3559 if (ps == NULL((void *)0))
3560 continue;
3561 bcopy(&ps->ps_ev, &dev, sizeof(dev))__builtin_memmove((&dev), (&ps->ps_ev), (sizeof(dev
)))
;
3562 pmc_soft_ev_release(ps);
3563
3564 error = copyout(&dev,
3565 &gei->pm_events[nevent],
3566 sizeof(struct pmc_dyn_event_descr));
3567 if (error != 0)
3568 break;
3569 nevent++;
3570 }
3571 if (error != 0)
3572 break;
3573
3574 error = copyout(&nevent, &gei->pm_nevent,
3575 sizeof(nevent));
3576 }
3577 break;
3578
3579 /*
3580 * Get module statistics
3581 */
3582
3583 case PMC_OP_GETDRIVERSTATS:
3584 {
3585 struct pmc_op_getdriverstats gms;
3586#define CFETCH(a, b, field) a.field = counter_u64_fetch(b.field)
3587 CFETCH(gms, pmc_stats, pm_intr_ignored);
3588 CFETCH(gms, pmc_stats, pm_intr_processed);
3589 CFETCH(gms, pmc_stats, pm_intr_bufferfull);
3590 CFETCH(gms, pmc_stats, pm_syscalls);
3591 CFETCH(gms, pmc_stats, pm_syscall_errors);
3592 CFETCH(gms, pmc_stats, pm_buffer_requests);
3593 CFETCH(gms, pmc_stats, pm_buffer_requests_failed);
3594 CFETCH(gms, pmc_stats, pm_log_sweeps);
3595#undef CFETCH
3596 error = copyout(&gms, arg, sizeof(gms));
3597 }
3598 break;
3599
3600
3601 /*
3602 * Retrieve module version number
3603 */
3604
3605 case PMC_OP_GETMODULEVERSION:
3606 {
3607 uint32_t cv, modv;
3608
3609 /* retrieve the client's idea of the ABI version */
3610 if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
3611 break;
3612 /* don't service clients newer than our driver */
3613 modv = PMC_VERSION(0x09 << 24 | 0x03 << 16 | 0x0000);
3614 if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
3615 error = EPROGMISMATCH75;
3616 break;
3617 }
3618 error = copyout(&modv, arg, sizeof(int));
3619 }
3620 break;
3621
3622
3623 /*
3624 * Retrieve the state of all the PMCs on a given
3625 * CPU.
3626 */
3627
3628 case PMC_OP_GETPMCINFO:
3629 {
3630 int ari;
3631 struct pmc *pm;
3632 size_t pmcinfo_size;
3633 uint32_t cpu, n, npmc;
3634 struct pmc_owner *po;
3635 struct pmc_binding pb;
3636 struct pmc_classdep *pcd;
3637 struct pmc_info *p, *pmcinfo;
3638 struct pmc_op_getpmcinfo *gpi;
3639
3640 PMC_DOWNGRADE_SX()do { sx_downgrade_((&pmc_sx), "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 3640); is_sx_downgraded = 1; } while (0)
;
9
Loop condition is false. Exiting loop
3641
3642 gpi = (struct pmc_op_getpmcinfo *) arg;
3643
3644 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
10
Assuming the condition is false
11
Taking false branch
3645 break;
3646
3647 if (cpu >= pmc_cpu_max()) {
12
Assuming the condition is false
13
Taking false branch
3648 error = EINVAL22;
3649 break;
3650 }
3651
3652 if (!pmc_cpu_is_active(cpu)) {
14
Assuming the condition is false
15
Taking false branch
3653 error = ENXIO6;
3654 break;
3655 }
3656
3657 /* switch to CPU 'cpu' */
3658 pmc_save_cpu_binding(&pb);
3659 pmc_select_cpu(cpu);
3660
3661 npmc = md->pmd_npmc;
3662
3663 pmcinfo_size = npmc * sizeof(struct pmc_info);
3664 pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK0x0002);
3665
3666 p = pmcinfo;
3667
3668 for (n = 0; n < md->pmd_npmc; n++, p++) {
16
Assuming the condition is true
17
Loop condition is true. Entering loop body
35
Assuming the condition is false
36
Loop condition is false. Execution continues on line 3706
3669
3670 pcd = pmc_ri_to_classdep(md, n, &ari);
3671
3672 KASSERT(pcd != NULL,do { if (__builtin_expect((!(pcd != ((void *)0))), 0)) panic (
"[pmc,%d] null pcd ri=%d", 3673, n); } while (0)
18
Assuming 'pcd' is equal to null
19
Assuming the condition is false
20
Taking false branch
21
Loop condition is false. Exiting loop
3673 ("[pmc,%d] null pcd ri=%d", __LINE__, n))do { if (__builtin_expect((!(pcd != ((void *)0))), 0)) panic (
"[pmc,%d] null pcd ri=%d", 3673, n); } while (0)
;
3674
3675 if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0)
22
Assuming the condition is false
23
Taking false branch
3676 break;
3677
3678 if (PMC_ROW_DISP_IS_STANDALONE(n)(pmc_pmcdisp[(n)] < 0))
24
Assuming the condition is false
25
Taking false branch
3679 p->pm_rowdisp = PMC_DISP_STANDALONE;
3680 else if (PMC_ROW_DISP_IS_THREAD(n)(pmc_pmcdisp[(n)] > 0))
26
Assuming the condition is false
27
Taking false branch
3681 p->pm_rowdisp = PMC_DISP_THREAD;
3682 else
3683 p->pm_rowdisp = PMC_DISP_FREE;
3684
3685 p->pm_ownerpid = -1;
3686
3687 if (pm == NULL((void *)0)) /* no PMC associated */
28
Assuming 'pm' is not equal to NULL
29
Taking false branch
3688 continue;
3689
3690 po = pm->pm_owner;
3691
3692 KASSERT(po->po_owner != NULL,do { if (__builtin_expect((!(po->po_owner != ((void *)0)))
, 0)) panic ("[pmc,%d] pmc_owner had a null proc pointer", 3694
); } while (0)
30
Assuming the condition is false
31
Assuming the condition is false
32
Taking false branch
33
Loop condition is false. Exiting loop
3693 ("[pmc,%d] pmc_owner had a null proc pointer",do { if (__builtin_expect((!(po->po_owner != ((void *)0)))
, 0)) panic ("[pmc,%d] pmc_owner had a null proc pointer", 3694
); } while (0)
3694 __LINE__))do { if (__builtin_expect((!(po->po_owner != ((void *)0)))
, 0)) panic ("[pmc,%d] pmc_owner had a null proc pointer", 3694
); } while (0)
;
3695
3696 p->pm_ownerpid = po->po_owner->p_pid;
3697 p->pm_mode = PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12);
3698 p->pm_event = pm->pm_event;
3699 p->pm_flags = pm->pm_flags;
3700
3701 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TS
)
)
34
Assuming the condition is true
3702 p->pm_reloadcount =
3703 pm->pm_sc.pm_reloadcount;
3704 }
3705
3706 pmc_restore_cpu_binding(&pb);
3707
3708 /* now copy out the PMC info collected */
3709 if (error == 0)
37
Taking true branch
3710 error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
38
Copies out a struct with untouched element(s): pm_name, pm_class, pm_enabled
3711
3712 free(pmcinfo, M_PMC);
3713 }
3714 break;
3715
3716
3717 /*
3718 * Set the administrative state of a PMC. I.e. whether
3719 * the PMC is to be used or not.
3720 */
3721
3722 case PMC_OP_PMCADMIN:
3723 {
3724 int cpu, ri;
3725 enum pmc_state request;
3726 struct pmc_cpu *pc;
3727 struct pmc_hw *phw;
3728 struct pmc_op_pmcadmin pma;
3729 struct pmc_binding pb;
3730
3731 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (3731))
;
3732
3733 KASSERT(td == curthread,do { if (__builtin_expect((!(td == (__curthread()))), 0)) panic
("[pmc,%d] td != curthread", 3734); } while (0)
3734 ("[pmc,%d] td != curthread", __LINE__))do { if (__builtin_expect((!(td == (__curthread()))), 0)) panic
("[pmc,%d] td != curthread", 3734); } while (0)
;
3735
3736 error = priv_check(td, PRIV_PMC_MANAGE190);
3737 if (error)
3738 break;
3739
3740 if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
3741 break;
3742
3743 cpu = pma.pm_cpu;
3744
3745 if (cpu < 0 || cpu >= (int) pmc_cpu_max()) {
3746 error = EINVAL22;
3747 break;
3748 }
3749
3750 if (!pmc_cpu_is_active(cpu)) {
3751 error = ENXIO6;
3752 break;
3753 }
3754
3755 request = pma.pm_state;
3756
3757 if (request != PMC_STATE_DISABLED &&
3758 request != PMC_STATE_FREE) {
3759 error = EINVAL22;
3760 break;
3761 }
3762
3763 ri = pma.pm_pmc; /* pmc id == row index */
3764 if (ri < 0 || ri >= (int) md->pmd_npmc) {
3765 error = EINVAL22;
3766 break;
3767 }
3768
3769 /*
3770 * We can't disable a PMC with a row-index allocated
3771 * for process virtual PMCs.
3772 */
3773
3774 if (PMC_ROW_DISP_IS_THREAD(ri)(pmc_pmcdisp[(ri)] > 0) &&
3775 request == PMC_STATE_DISABLED) {
3776 error = EBUSY16;
3777 break;
3778 }
3779
3780 /*
3781 * otherwise, this PMC on this CPU is either free or
3782 * in system-wide mode.
3783 */
3784
3785 pmc_save_cpu_binding(&pb);
3786 pmc_select_cpu(cpu);
3787
3788 pc = pmc_pcpu[cpu];
3789 phw = pc->pc_hwpmcs[ri];
3790
3791 /*
3792 * XXX do we need some kind of 'forced' disable?
3793 */
3794
3795 if (phw->phw_pmc == NULL((void *)0)) {
3796 if (request == PMC_STATE_DISABLED &&
3797 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED((((0x01) << 16) & 0xFFFF0000)))) {
3798 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED((((0x01) << 16) & 0xFFFF0000));
3799 PMC_MARK_ROW_STANDALONE(ri)do { do { if (__builtin_expect((!(pmc_pmcdisp[(ri)] <= 0))
, 0)) panic ("[pmc,%d] row disposition error", 3799); } while
(0); atomic_add_int(&pmc_pmcdisp[(ri)], -1); do { if (__builtin_expect
((!(pmc_pmcdisp[(ri)] >= (-pmc_cpu_max_active()))), 0)) panic
("[pmc,%d] row disposition error", 3799); } while (0); } while
(0)
;
3800 } else if (request == PMC_STATE_FREE &&
3801 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED((((0x01) << 16) & 0xFFFF0000))) == 0) {
3802 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED((((0x01) << 16) & 0xFFFF0000));
3803 PMC_UNMARK_ROW_STANDALONE(ri)do { atomic_add_int(&pmc_pmcdisp[(ri)], 1); do { if (__builtin_expect
((!(pmc_pmcdisp[(ri)] <= 0)), 0)) panic ("[pmc,%d] row disposition error"
, 3803); } while (0); } while (0)
;
3804 }
3805 /* other cases are a no-op */
3806 } else
3807 error = EBUSY16;
3808
3809 pmc_restore_cpu_binding(&pb);
3810 }
3811 break;
3812
3813
3814 /*
3815 * Allocate a PMC.
3816 */
3817
3818 case PMC_OP_PMCALLOCATE:
3819 {
3820 int adjri, n;
3821 u_int cpu;
3822 uint32_t caps;
3823 struct pmc *pmc;
3824 enum pmc_mode mode;
3825 struct pmc_hw *phw;
3826 struct pmc_binding pb;
3827 struct pmc_classdep *pcd;
3828 struct pmc_op_pmcallocate pa;
3829
3830 if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
3831 break;
3832
3833 caps = pa.pm_caps;
3834 mode = pa.pm_mode;
3835 cpu = pa.pm_cpu;
3836
3837 if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
3838 mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
3839 (cpu != (u_int) PMC_CPU_ANY~0 && cpu >= pmc_cpu_max())) {
3840 error = EINVAL22;
3841 break;
3842 }
3843
3844 /*
3845 * Virtual PMCs should only ask for a default CPU.
3846 * System mode PMCs need to specify a non-default CPU.
3847 */
3848
3849 if ((PMC_IS_VIRTUAL_MODE(mode)((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC) && cpu != (u_int) PMC_CPU_ANY~0) ||
3850 (PMC_IS_SYSTEM_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC) && cpu == (u_int) PMC_CPU_ANY~0)) {
3851 error = EINVAL22;
3852 break;
3853 }
3854
3855 /*
3856 * Check that an inactive CPU is not being asked for.
3857 */
3858
3859 if (PMC_IS_SYSTEM_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC) && !pmc_cpu_is_active(cpu)) {
3860 error = ENXIO6;
3861 break;
3862 }
3863
3864 /*
3865 * Refuse an allocation for a system-wide PMC if this
3866 * process has been jailed, or if this process lacks
3867 * super-user credentials and the sysctl tunable
3868 * 'security.bsd.unprivileged_syspmcs' is zero.
3869 */
3870
3871 if (PMC_IS_SYSTEM_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC)) {
3872 if (jailed(curthread(__curthread())->td_ucred)) {
3873 error = EPERM1;
3874 break;
3875 }
3876 if (!pmc_unprivileged_syspmcs) {
3877 error = priv_check(curthread(__curthread()),
3878 PRIV_PMC_SYSTEM191);
3879 if (error)
3880 break;
3881 }
3882 }
3883
3884 /*
3885 * Look for valid values for 'pm_flags'
3886 */
3887
3888 if ((pa.pm_flags & ~(PMC_F_DESCENDANTS0x00000002 | PMC_F_LOG_PROCCSW0x00000004 |
3889 PMC_F_LOG_PROCEXIT0x00000008 | PMC_F_CALLCHAIN0x00000080 |
3890 PMC_F_USERCALLCHAIN0x00000100)) != 0) {
3891 error = EINVAL22;
3892 break;
3893 }
3894
3895 /* PMC_F_USERCALLCHAIN is only valid with PMC_F_CALLCHAIN */
3896 if ((pa.pm_flags & (PMC_F_CALLCHAIN0x00000080 | PMC_F_USERCALLCHAIN0x00000100)) ==
3897 PMC_F_USERCALLCHAIN0x00000100) {
3898 error = EINVAL22;
3899 break;
3900 }
3901
3902 /* PMC_F_USERCALLCHAIN is only valid for sampling mode */
3903 if (pa.pm_flags & PMC_F_USERCALLCHAIN0x00000100 &&
3904 mode != PMC_MODE_TS && mode != PMC_MODE_SS) {
3905 error = EINVAL22;
3906 break;
3907 }
3908
3909 /* process logging options are not allowed for system PMCs */
3910 if (PMC_IS_SYSTEM_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC) && (pa.pm_flags &
3911 (PMC_F_LOG_PROCCSW0x00000004 | PMC_F_LOG_PROCEXIT0x00000008))) {
3912 error = EINVAL22;
3913 break;
3914 }
3915
3916 /*
3917 * All sampling mode PMCs need to be able to interrupt the
3918 * CPU.
3919 */
3920 if (PMC_IS_SAMPLING_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS))
3921 caps |= PMC_CAP_INTERRUPT;
3922
3923 /* A valid class specifier should have been passed in. */
3924 pcd = pmc_class_to_classdep(pa.pm_class);
3925 if (pcd == NULL((void *)0)) {
3926 error = EINVAL22;
3927 break;
3928 }
3929
3930 /* The requested PMC capabilities should be feasible. */
3931 if ((pcd->pcd_caps & caps) != caps) {
3932 error = EOPNOTSUPP45;
3933 break;
3934 }
3935
3936 PMCDBG4(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
3937 pa.pm_ev, caps, mode, cpu);
3938
3939 pmc = pmc_allocate_pmc_descriptor();
3940 pmc->pm_id = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class,((((cpu) & 0xFFF) << 20) | (((pa.pm_mode) & 0xFF
) << 12) | (((pa.pm_class) & 0xF) << 8) | (((
~ (pmc_id_t) 0)) & 0xFF))
3941 PMC_ID_INVALID)((((cpu) & 0xFFF) << 20) | (((pa.pm_mode) & 0xFF
) << 12) | (((pa.pm_class) & 0xF) << 8) | (((
~ (pmc_id_t) 0)) & 0xFF))
;
3942 pmc->pm_event = pa.pm_ev;
3943 pmc->pm_state = PMC_STATE_FREE;
3944 pmc->pm_caps = caps;
3945 pmc->pm_flags = pa.pm_flags;
3946
3947 /* XXX set lower bound on sampling for process counters */
3948 if (PMC_IS_SAMPLING_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS)) {
3949 /*
3950 * Don't permit requested sample rate to be less than 1000
3951 */
3952 if (pa.pm_count < 1000)
3953 log(LOG_WARNING4,
3954 "pmcallocate: passed sample rate %ju - setting to 1000\n",
3955 (uintmax_t)pa.pm_count);
3956 pmc->pm_sc.pm_reloadcount = MAX(1000, pa.pm_count)(((1000)>(pa.pm_count))?(1000):(pa.pm_count));
3957 } else
3958 pmc->pm_sc.pm_initial = pa.pm_count;
3959
3960 /* switch thread to CPU 'cpu' */
3961 pmc_save_cpu_binding(&pb);
3962
3963#define PMC_IS_SHAREABLE_PMC(cpu, n) \
3964 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \
3965 PMC_PHW_FLAG_IS_SHAREABLE((((0x02) << 16) & 0xFFFF0000)))
3966#define PMC_IS_UNALLOCATED(cpu, n) \
3967 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL((void *)0))
3968
3969 if (PMC_IS_SYSTEM_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC)) {
3970 pmc_select_cpu(cpu);
3971 for (n = pcd->pcd_ri; n < (int) md->pmd_npmc; n++) {
3972 pcd = pmc_ri_to_classdep(md, n, &adjri);
3973 if (pmc_can_allocate_row(n, mode) == 0 &&
3974 pmc_can_allocate_rowindex(
3975 curthread(__curthread())->td_proc, n, cpu) == 0 &&
3976 (PMC_IS_UNALLOCATED(cpu, n) ||
3977 PMC_IS_SHAREABLE_PMC(cpu, n)) &&
3978 pcd->pcd_allocate_pmc(cpu, adjri, pmc,
3979 &pa) == 0)
3980 break;
3981 }
3982 } else {
3983 /* Process virtual mode */
3984 for (n = pcd->pcd_ri; n < (int) md->pmd_npmc; n++) {
3985 pcd = pmc_ri_to_classdep(md, n, &adjri);
3986 if (pmc_can_allocate_row(n, mode) == 0 &&
3987 pmc_can_allocate_rowindex(
3988 curthread(__curthread())->td_proc, n,
3989 PMC_CPU_ANY~0) == 0 &&
3990 pcd->pcd_allocate_pmc(curthread(__curthread())->td_oncpu,
3991 adjri, pmc, &pa) == 0)
3992 break;
3993 }
3994 }
3995
3996#undef PMC_IS_UNALLOCATED
3997#undef PMC_IS_SHAREABLE_PMC
3998
3999 pmc_restore_cpu_binding(&pb);
4000
4001 if (n == (int) md->pmd_npmc) {
4002 pmc_destroy_pmc_descriptor(pmc);
4003 pmc = NULL((void *)0);
4004 error = EINVAL22;
4005 break;
4006 }
4007
4008 /* Fill in the correct value in the ID field */
4009 pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n)((((cpu) & 0xFFF) << 20) | (((mode) & 0xFF) <<
12) | (((pa.pm_class) & 0xF) << 8) | ((n) & 0xFF
))
;
4010
4011 PMCDBG5(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
4012 pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id);
4013
4014 /* Process mode PMCs with logging enabled need log files */
4015 if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT0x00000008 | PMC_F_LOG_PROCCSW0x00000004))
4016 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE0x00020000;
4017
4018 /* All system mode sampling PMCs require a log file */
4019 if (PMC_IS_SAMPLING_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS) && PMC_IS_SYSTEM_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC))
4020 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE0x00020000;
4021
4022 /*
4023 * Configure global pmc's immediately
4024 */
4025
4026 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))((((((pmc)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SS
|| (((((pmc)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SC
)
) {
4027
4028 pmc_save_cpu_binding(&pb);
4029 pmc_select_cpu(cpu);
4030
4031 phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
4032 pcd = pmc_ri_to_classdep(md, n, &adjri);
4033
4034 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED((((0x01) << 16) & 0xFFFF0000))) == 0 ||
4035 (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) {
4036 (void) pcd->pcd_release_pmc(cpu, adjri, pmc);
4037 pmc_destroy_pmc_descriptor(pmc);
4038 pmc = NULL((void *)0);
4039 pmc_restore_cpu_binding(&pb);
4040 error = EPERM1;
4041 break;
4042 }
4043
4044 pmc_restore_cpu_binding(&pb);
4045 }
4046
4047 pmc->pm_state = PMC_STATE_ALLOCATED;
4048 pmc->pm_class = pa.pm_class;
4049
4050 /*
4051 * mark row disposition
4052 */
4053
4054 if (PMC_IS_SYSTEM_MODE(mode)((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC))
4055 PMC_MARK_ROW_STANDALONE(n)do { do { if (__builtin_expect((!(pmc_pmcdisp[(n)] <= 0)),
0)) panic ("[pmc,%d] row disposition error", 4055); } while (
0); atomic_add_int(&pmc_pmcdisp[(n)], -1); do { if (__builtin_expect
((!(pmc_pmcdisp[(n)] >= (-pmc_cpu_max_active()))), 0)) panic
("[pmc,%d] row disposition error", 4055); } while (0); } while
(0)
;
4056 else
4057 PMC_MARK_ROW_THREAD(n)do { do { if (__builtin_expect((!(pmc_pmcdisp[(n)] >= 0)),
0)) panic ("[pmc,%d] row disposition error", 4057); } while (
0); atomic_add_int(&pmc_pmcdisp[(n)], 1); } while (0)
;
4058
4059 /*
4060 * Register this PMC with the current thread as its owner.
4061 */
4062
4063 if ((error =
4064 pmc_register_owner(curthread(__curthread())->td_proc, pmc)) != 0) {
4065 pmc_release_pmc_descriptor(pmc);
4066 pmc_destroy_pmc_descriptor(pmc);
4067 pmc = NULL((void *)0);
4068 break;
4069 }
4070
4071
4072 /*
4073 * Return the allocated index.
4074 */
4075
4076 pa.pm_pmcid = pmc->pm_id;
4077
4078 error = copyout(&pa, arg, sizeof(pa));
4079 }
4080 break;
4081
4082
4083 /*
4084 * Attach a PMC to a process.
4085 */
4086
4087 case PMC_OP_PMCATTACH:
4088 {
4089 struct pmc *pm;
4090 struct proc *p;
4091 struct pmc_op_pmcattach a;
4092
4093 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (4093))
;
4094
4095 if ((error = copyin(arg, &a, sizeof(a))) != 0)
4096 break;
4097
4098 if (a.pm_pid < 0) {
4099 error = EINVAL22;
4100 break;
4101 } else if (a.pm_pid == 0)
4102 a.pm_pid = td->td_proc->p_pid;
4103
4104 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
4105 break;
4106
4107 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SC
)
) {
4108 error = EINVAL22;
4109 break;
4110 }
4111
4112 /* PMCs may be (re)attached only when allocated or stopped */
4113 if (pm->pm_state == PMC_STATE_RUNNING) {
4114 error = EBUSY16;
4115 break;
4116 } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
4117 pm->pm_state != PMC_STATE_STOPPED) {
4118 error = EINVAL22;
4119 break;
4120 }
4121
4122 /* lookup pid */
4123 if ((p = pfind(a.pm_pid)) == NULL((void *)0)) {
4124 error = ESRCH3;
4125 break;
4126 }
4127
4128 /*
4129 * Ignore processes that are working on exiting.
4130 */
4131 if (p->p_flag & P_WEXIT0x02000) {
4132 error = ESRCH3;
4133 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (4133))
; /* pfind() returns a locked process */
4134 break;
4135 }
4136
4137 /*
4138 * we are allowed to attach a PMC to a process if
4139 * we can debug it.
4140 */
4141 error = p_candebug(curthread(__curthread()), p);
4142
4143 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (4143))
;
4144
4145 if (error == 0)
4146 error = pmc_attach_process(p, pm);
4147 }
4148 break;
4149
4150
4151 /*
4152 * Detach an attached PMC from a process.
4153 */
4154
4155 case PMC_OP_PMCDETACH:
4156 {
4157 struct pmc *pm;
4158 struct proc *p;
4159 struct pmc_op_pmcattach a;
4160
4161 if ((error = copyin(arg, &a, sizeof(a))) != 0)
4162 break;
4163
4164 if (a.pm_pid < 0) {
4165 error = EINVAL22;
4166 break;
4167 } else if (a.pm_pid == 0)
4168 a.pm_pid = td->td_proc->p_pid;
4169
4170 if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
4171 break;
4172
4173 if ((p = pfind(a.pm_pid)) == NULL((void *)0)) {
4174 error = ESRCH3;
4175 break;
4176 }
4177
4178 /*
4179 * Treat processes that are in the process of exiting
4180 * as if they were not present.
4181 */
4182
4183 if (p->p_flag & P_WEXIT0x02000)
4184 error = ESRCH3;
4185
4186 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (4186))
; /* pfind() returns a locked process */
4187
4188 if (error == 0)
4189 error = pmc_detach_process(p, pm);
4190 }
4191 break;
4192
4193
4194 /*
4195 * Retrieve the MSR number associated with the counter
4196 * 'pmc_id'. This allows processes to directly use RDPMC
4197 * instructions to read their PMCs, without the overhead of a
4198 * system call.
4199 */
4200
4201 case PMC_OP_PMCGETMSR:
4202 {
4203 int adjri, ri;
4204 struct pmc *pm;
4205 struct pmc_target *pt;
4206 struct pmc_op_getmsr gm;
4207 struct pmc_classdep *pcd;
4208
4209 PMC_DOWNGRADE_SX()do { sx_downgrade_((&pmc_sx), "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 4209); is_sx_downgraded = 1; } while (0)
;
4210
4211 if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
4212 break;
4213
4214 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
4215 break;
4216
4217 /*
4218 * The allocated PMC has to be a process virtual PMC,
4219 * i.e., of type MODE_T[CS]. Global PMCs can only be
4220 * read using the PMCREAD operation since they may be
4221 * allocated on a different CPU than the one we could
4222 * be running on at the time of the RDPMC instruction.
4223 *
4224 * The GETMSR operation is not allowed for PMCs that
4225 * are inherited across processes.
4226 */
4227
4228 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TC
)
||
4229 (pm->pm_flags & PMC_F_DESCENDANTS0x00000002)) {
4230 error = EINVAL22;
4231 break;
4232 }
4233
4234 /*
4235 * It only makes sense to use a RDPMC (or its
4236 * equivalent instruction on non-x86 architectures) on
4237 * a process that has allocated and attached a PMC to
4238 * itself. Conversely the PMC is only allowed to have
4239 * one process attached to it -- its owner.
4240 */
4241
4242 if ((pt = LIST_FIRST(&pm->pm_targets)((&pm->pm_targets)->lh_first)) == NULL((void *)0) ||
4243 LIST_NEXT(pt, pt_next)((pt)->pt_next.le_next) != NULL((void *)0) ||
4244 pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
4245 error = EINVAL22;
4246 break;
4247 }
4248
4249 ri = PMC_TO_ROWINDEX(pm)(((pm)->pm_id) & 0xFF);
4250 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4251
4252 /* PMC class has no 'GETMSR' support */
4253 if (pcd->pcd_get_msr == NULL((void *)0)) {
4254 error = ENOSYS78;
4255 break;
4256 }
4257
4258 if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0)
4259 break;
4260
4261 if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
4262 break;
4263
4264 /*
4265 * Mark our process as using MSRs. Update machine
4266 * state using a forced context switch.
4267 */
4268
4269 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS0x00000001;
4270 pmc_force_context_switch();
4271
4272 }
4273 break;
4274
4275 /*
4276 * Release an allocated PMC
4277 */
4278
4279 case PMC_OP_PMCRELEASE:
4280 {
4281 pmc_id_t pmcid;
4282 struct pmc *pm;
4283 struct pmc_owner *po;
4284 struct pmc_op_simple sp;
4285
4286 /*
4287 * Find PMC pointer for the named PMC.
4288 *
4289 * Use pmc_release_pmc_descriptor() to switch off the
4290 * PMC, remove all its target threads, and remove the
4291 * PMC from its owner's list.
4292 *
4293 * Remove the owner record if this is the last PMC
4294 * owned.
4295 *
4296 * Free up space.
4297 */
4298
4299 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4300 break;
4301
4302 pmcid = sp.pm_pmcid;
4303
4304 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4305 break;
4306
4307 po = pm->pm_owner;
4308 pmc_release_pmc_descriptor(pm);
4309 pmc_maybe_remove_owner(po);
4310 pmc_destroy_pmc_descriptor(pm);
4311 }
4312 break;
4313
4314
4315 /*
4316 * Read and/or write a PMC.
4317 */
4318
4319 case PMC_OP_PMCRW:
4320 {
4321 int adjri;
4322 struct pmc *pm;
4323 uint32_t cpu, ri;
4324 pmc_value_t oldvalue;
4325 struct pmc_binding pb;
4326 struct pmc_op_pmcrw prw;
4327 struct pmc_classdep *pcd;
4328 struct pmc_op_pmcrw *pprw;
4329
4330 PMC_DOWNGRADE_SX()do { sx_downgrade_((&pmc_sx), "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 4330); is_sx_downgraded = 1; } while (0)
;
4331
4332 if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
4333 break;
4334
4335 ri = 0;
4336 PMCDBG2(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
4337 prw.pm_flags);
4338
4339 /* must have at least one flag set */
4340 if ((prw.pm_flags & (PMC_F_OLDVALUE0x00000020|PMC_F_NEWVALUE0x00000010)) == 0) {
4341 error = EINVAL22;
4342 break;
4343 }
4344
4345 /* locate pmc descriptor */
4346 if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
4347 break;
4348
4349 /* Can't read a PMC that hasn't been started. */
4350 if (pm->pm_state != PMC_STATE_ALLOCATED &&
4351 pm->pm_state != PMC_STATE_STOPPED &&
4352 pm->pm_state != PMC_STATE_RUNNING) {
4353 error = EINVAL22;
4354 break;
4355 }
4356
4357 /* writing a new value is allowed only for 'STOPPED' pmcs */
4358 if (pm->pm_state == PMC_STATE_RUNNING &&
4359 (prw.pm_flags & PMC_F_NEWVALUE0x00000010)) {
4360 error = EBUSY16;
4361 break;
4362 }
4363
4364 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TC
)
) {
4365
4366 /*
4367 * If this PMC is attached to its owner (i.e.,
4368 * the process requesting this operation) and
4369 * is running, then attempt to get an
4370 * upto-date reading from hardware for a READ.
4371 * Writes are only allowed when the PMC is
4372 * stopped, so only update the saved value
4373 * field.
4374 *
4375 * If the PMC is not running, or is not
4376 * attached to its owner, read/write to the
4377 * savedvalue field.
4378 */
4379
4380 ri = PMC_TO_ROWINDEX(pm)(((pm)->pm_id) & 0xFF);
4381 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4382
4383 mtx_pool_lock_spin(pmc_mtxpool, pm)__mtx_lock_spin_flags(&((((mtx_pool_find((pmc_mtxpool), (
pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (4383))
;
4384 cpu = curthread(__curthread())->td_oncpu;
4385
4386 if (prw.pm_flags & PMC_F_OLDVALUE0x00000020) {
4387 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER0x00010000) &&
4388 (pm->pm_state == PMC_STATE_RUNNING))
4389 error = (*pcd->pcd_read_pmc)(cpu, adjri,
4390 &oldvalue);
4391 else
4392 oldvalue = pm->pm_gv.pm_savedvalue;
4393 }
4394 if (prw.pm_flags & PMC_F_NEWVALUE0x00000010)
4395 pm->pm_gv.pm_savedvalue = prw.pm_value;
4396
4397 mtx_pool_unlock_spin(pmc_mtxpool, pm)__mtx_unlock_spin_flags(&((((mtx_pool_find((pmc_mtxpool),
(pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (4397))
;
4398
4399 } else { /* System mode PMCs */
4400 cpu = PMC_TO_CPU(pm)((((pm)->pm_id) & 0xFFF00000) >> 20);
4401 ri = PMC_TO_ROWINDEX(pm)(((pm)->pm_id) & 0xFF);
4402 pcd = pmc_ri_to_classdep(md, ri, &adjri);
4403
4404 if (!pmc_cpu_is_active(cpu)) {
4405 error = ENXIO6;
4406 break;
4407 }
4408
4409 /* move this thread to CPU 'cpu' */
4410 pmc_save_cpu_binding(&pb);
4411 pmc_select_cpu(cpu);
4412
4413 critical_enter()critical_enter_KBI();
4414 /* save old value */
4415 if (prw.pm_flags & PMC_F_OLDVALUE0x00000020)
4416 if ((error = (*pcd->pcd_read_pmc)(cpu, adjri,
4417 &oldvalue)))
4418 goto error;
4419 /* write out new value */
4420 if (prw.pm_flags & PMC_F_NEWVALUE0x00000010)
4421 error = (*pcd->pcd_write_pmc)(cpu, adjri,
4422 prw.pm_value);
4423 error:
4424 critical_exit()critical_exit_KBI();
4425 pmc_restore_cpu_binding(&pb);
4426 if (error)
4427 break;
4428 }
4429
4430 pprw = (struct pmc_op_pmcrw *) arg;
4431
4432#ifdef HWPMC_DEBUG
4433 if (prw.pm_flags & PMC_F_NEWVALUE0x00000010)
4434 PMCDBG3(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
4435 ri, prw.pm_value, oldvalue);
4436 else if (prw.pm_flags & PMC_F_OLDVALUE0x00000020)
4437 PMCDBG2(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
4438#endif
4439
4440 /* return old value if requested */
4441 if (prw.pm_flags & PMC_F_OLDVALUE0x00000020)
4442 if ((error = copyout(&oldvalue, &pprw->pm_value,
4443 sizeof(prw.pm_value))))
4444 break;
4445
4446 }
4447 break;
4448
4449
4450 /*
4451 * Set the sampling rate for a sampling mode PMC and the
4452 * initial count for a counting mode PMC.
4453 */
4454
4455 case PMC_OP_PMCSETCOUNT:
4456 {
4457 struct pmc *pm;
4458 struct pmc_op_pmcsetcount sc;
4459
4460 PMC_DOWNGRADE_SX()do { sx_downgrade_((&pmc_sx), "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 4460); is_sx_downgraded = 1; } while (0)
;
4461
4462 if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
4463 break;
4464
4465 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
4466 break;
4467
4468 if (pm->pm_state == PMC_STATE_RUNNING) {
4469 error = EBUSY16;
4470 break;
4471 }
4472
4473 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TS
)
) {
4474 /*
4475 * Don't permit requested sample rate to be less than 1000
4476 */
4477 if (sc.pm_count < 1000)
4478 log(LOG_WARNING4,
4479 "pmcsetcount: passed sample rate %ju - setting to 1000\n",
4480 (uintmax_t)sc.pm_count);
4481 pm->pm_sc.pm_reloadcount = MAX(1000, sc.pm_count)(((1000)>(sc.pm_count))?(1000):(sc.pm_count));
4482 } else
4483 pm->pm_sc.pm_initial = sc.pm_count;
4484 }
4485 break;
4486
4487
4488 /*
4489 * Start a PMC.
4490 */
4491
4492 case PMC_OP_PMCSTART:
4493 {
4494 pmc_id_t pmcid;
4495 struct pmc *pm;
4496 struct pmc_op_simple sp;
4497
4498 sx_assert(&pmc_sx, SX_XLOCKED)_sx_assert(((&pmc_sx)), ((0x00000004)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (4498))
;
4499
4500 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4501 break;
4502
4503 pmcid = sp.pm_pmcid;
4504
4505 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4506 break;
4507
4508 KASSERT(pmcid == pm->pm_id,do { if (__builtin_expect((!(pmcid == pm->pm_id)), 0)) panic
("[pmc,%d] pmcid %x != id %x", 4509, pm->pm_id, pmcid); }
while (0)
4509 ("[pmc,%d] pmcid %x != id %x", __LINE__,do { if (__builtin_expect((!(pmcid == pm->pm_id)), 0)) panic
("[pmc,%d] pmcid %x != id %x", 4509, pm->pm_id, pmcid); }
while (0)
4510 pm->pm_id, pmcid))do { if (__builtin_expect((!(pmcid == pm->pm_id)), 0)) panic
("[pmc,%d] pmcid %x != id %x", 4509, pm->pm_id, pmcid); }
while (0)
;
4511
4512 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
4513 break;
4514 else if (pm->pm_state != PMC_STATE_STOPPED &&
4515 pm->pm_state != PMC_STATE_ALLOCATED) {
4516 error = EINVAL22;
4517 break;
4518 }
4519
4520 error = pmc_start(pm);
4521 }
4522 break;
4523
4524
4525 /*
4526 * Stop a PMC.
4527 */
4528
4529 case PMC_OP_PMCSTOP:
4530 {
4531 pmc_id_t pmcid;
4532 struct pmc *pm;
4533 struct pmc_op_simple sp;
4534
4535 PMC_DOWNGRADE_SX()do { sx_downgrade_((&pmc_sx), "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 4535); is_sx_downgraded = 1; } while (0)
;
4536
4537 if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4538 break;
4539
4540 pmcid = sp.pm_pmcid;
4541
4542 /*
4543 * Mark the PMC as inactive and invoke the MD stop
4544 * routines if needed.
4545 */
4546
4547 if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4548 break;
4549
4550 KASSERT(pmcid == pm->pm_id,do { if (__builtin_expect((!(pmcid == pm->pm_id)), 0)) panic
("[pmc,%d] pmc id %x != pmcid %x", 4551, pm->pm_id, pmcid
); } while (0)
4551 ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,do { if (__builtin_expect((!(pmcid == pm->pm_id)), 0)) panic
("[pmc,%d] pmc id %x != pmcid %x", 4551, pm->pm_id, pmcid
); } while (0)
4552 pm->pm_id, pmcid))do { if (__builtin_expect((!(pmcid == pm->pm_id)), 0)) panic
("[pmc,%d] pmc id %x != pmcid %x", 4551, pm->pm_id, pmcid
); } while (0)
;
4553
4554 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
4555 break;
4556 else if (pm->pm_state != PMC_STATE_RUNNING) {
4557 error = EINVAL22;
4558 break;
4559 }
4560
4561 error = pmc_stop(pm);
4562 }
4563 break;
4564
4565
4566 /*
4567 * Write a user supplied value to the log file.
4568 */
4569
4570 case PMC_OP_WRITELOG:
4571 {
4572 struct pmc_op_writelog wl;
4573 struct pmc_owner *po;
4574
4575 PMC_DOWNGRADE_SX()do { sx_downgrade_((&pmc_sx), "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
, 4575); is_sx_downgraded = 1; } while (0)
;
4576
4577 if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
4578 break;
4579
4580 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL((void *)0)) {
4581 error = EINVAL22;
4582 break;
4583 }
4584
4585 if ((po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001) == 0) {
4586 error = EINVAL22;
4587 break;
4588 }
4589
4590 error = pmclog_process_userlog(po, &wl);
4591 }
4592 break;
4593
4594
4595 default:
4596 error = EINVAL22;
4597 break;
4598 }
4599
4600 if (is_sx_downgraded)
4601 sx_sunlock(&pmc_sx)_sx_sunlock(((&pmc_sx)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (4601))
;
4602 else
4603 sx_xunlock(&pmc_sx)_sx_xunlock(((&pmc_sx)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (4603))
;
4604done_syscall:
4605 if (error)
4606 counter_u64_add(pmc_stats.pm_syscall_errors, 1);
4607
4608 return (error);
4609}
4610
4611/*
4612 * Helper functions
4613 */
4614
4615
4616/*
4617 * Mark the thread as needing callchain capture and post an AST. The
4618 * actual callchain capture will be done in a context where it is safe
4619 * to take page faults.
4620 */
4621
4622static void
4623pmc_post_callchain_callback(void)
4624{
4625 struct thread *td;
4626
4627 td = curthread(__curthread());
4628
4629 /*
4630 * If there is multiple PMCs for the same interrupt ignore new post
4631 */
4632 if (td->td_pflags & TDP_CALLCHAIN0x00400000)
4633 return;
4634
4635 /*
4636 * Mark this thread as needing callchain capture.
4637 * `td->td_pflags' will be safe to touch because this thread
4638 * was in user space when it was interrupted.
4639 */
4640 td->td_pflags |= TDP_CALLCHAIN0x00400000;
4641
4642 /*
4643 * Don't let this thread migrate between CPUs until callchain
4644 * capture completes.
4645 */
4646 sched_pin();
4647
4648 return;
4649}
4650
4651/*
4652 * Find a free slot in the per-cpu array of samples and capture the
4653 * current callchain there. If a sample was successfully added, a bit
4654 * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook
4655 * needs to be invoked from the clock handler.
4656 *
4657 * This function is meant to be called from an NMI handler. It cannot
4658 * use any of the locking primitives supplied by the OS.
4659 */
4660
4661static int
4662pmc_add_sample(ring_type_t ring, struct pmc *pm, struct trapframe *tf)
4663{
4664 int error, cpu, callchaindepth, inuserspace;
4665 struct thread *td;
4666 struct pmc_sample *ps;
4667 struct pmc_samplebuffer *psb;
4668
4669 error = 0;
4670
4671 /*
4672 * Allocate space for a sample buffer.
4673 */
4674 cpu = curcpu__extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid) __res
; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu *)
0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *)0
)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; })
;
4675 psb = pmc_pcpu[cpu]->pc_sb[ring];
4676 inuserspace = TRAPF_USERMODE(tf)((((tf)->tf_cs)&3) == 3);
4677 ps = PMC_PROD_SAMPLE(psb)(&(psb)->ps_samples[(psb)->ps_prodidx & pmc_sample_mask
])
;
4678 if (psb->ps_considx != psb->ps_prodidx &&
4679 ps->ps_nsamples) { /* in use, reader hasn't caught up */
4680 pm->pm_pcpu_state[cpu].pps_stalled = 1;
4681 counter_u64_add(pmc_stats.pm_intr_bufferfull, 1);
4682 PMCDBG6(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d",
4683 cpu, pm, (void *) tf, inuserspace,
4684 (int) (psb->ps_prodidx & pmc_sample_mask),
4685 (int) (psb->ps_considx & pmc_sample_mask));
4686 callchaindepth = 1;
4687 error = ENOMEM12;
4688 goto done;
4689 }
4690
4691 /* Fill in entry. */
4692 PMCDBG6(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm,
4693 (void *) tf, inuserspace,
4694 (int) (psb->ps_prodidx & pmc_sample_mask),
4695 (int) (psb->ps_considx & pmc_sample_mask));
4696
4697 td = curthread(__curthread());
4698 ps->ps_pmc = pm;
4699 ps->ps_td = td;
4700 ps->ps_pid = td->td_proc->p_pid;
4701 ps->ps_tid = td->td_tid;
4702 ps->ps_tsc = pmc_rdtsc();
4703 ps->ps_ticks = ticks;
4704 ps->ps_cpu = cpu;
4705 ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE0x01 : 0;
4706
4707 callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN0x00000080) ?
4708 pmc_callchaindepth : 1;
4709
4710 MPASS(ps->ps_pc != NULL)do { if (__builtin_expect((!((ps->ps_pc != ((void *)0)))),
0)) panic ("Assertion %s failed at %s:%d", "ps->ps_pc != NULL"
, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c", 4710); } while (
0)
;
4711 if (callchaindepth == 1)
4712 ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf)((tf)->tf_rip);
4713 else {
4714 /*
4715 * Kernel stack traversals can be done immediately,
4716 * while we defer to an AST for user space traversals.
4717 */
4718 if (!inuserspace) {
4719 callchaindepth =
4720 pmc_save_kernel_callchain(ps->ps_pc,
4721 callchaindepth, tf);
4722 } else {
4723 pmc_post_callchain_callback();
4724 callchaindepth = PMC_USER_CALLCHAIN_PENDING((uint16_t) 0xFFFF);
4725 }
4726 }
4727
4728 ps->ps_nsamples = callchaindepth; /* mark entry as in use */
4729 if (ring == PMC_UR) {
4730 ps->ps_nsamples_actual = callchaindepth; /* mark entry as in use */
4731 ps->ps_nsamples = PMC_USER_CALLCHAIN_PENDING((uint16_t) 0xFFFF);
4732 } else
4733 ps->ps_nsamples = callchaindepth; /* mark entry as in use */
4734
4735 KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) >= 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 4736, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
4736 ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) >= 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 4736, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
4737 (unsigned long)counter_u64_fetch(pm->pm_runcount)))do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) >= 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 4736, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
;
4738
4739 counter_u64_add(pm->pm_runcount, 1); /* hold onto PMC */
4740 /* increment write pointer */
4741 psb->ps_prodidx++;
4742 done:
4743 /* mark CPU as needing processing */
4744 if (callchaindepth != PMC_USER_CALLCHAIN_PENDING((uint16_t) 0xFFFF))
4745 DPCPU_SET(pmc_sampled, 1)(*(__typeof(pcpu_entry_pmc_sampled)*)((__extension__ ({ __typeof
(((struct pcpu *)0)->pc_dynamic) __res; struct __s { u_char
__b[(((sizeof(__typeof(((struct pcpu *)0)->pc_dynamic)))<
(8))?(sizeof(__typeof(((struct pcpu *)0)->pc_dynamic))):(8
))]; } __s; if (sizeof(__res) == 1 || sizeof(__res) == 2 || sizeof
(__res) == 4 || sizeof(__res) == 8) { __asm volatile("mov %%gs:%1,%0"
: "=r" (__s) : "m" (*(struct __s *)(__builtin_offsetof(struct
pcpu, pc_dynamic)))); *(struct __s *)(void *)&__res = __s
; } else { __res = *__extension__ ({ __typeof(((struct pcpu *
)0)->pc_dynamic) *__p; __asm volatile("movq %%gs:%1,%0; addq %2,%0"
: "=r" (__p) : "m" (*(struct pcpu *)(__builtin_offsetof(struct
pcpu, pc_prvspace))), "i" (__builtin_offsetof(struct pcpu, pc_dynamic
))); __p; }); } __res; })) + (uintptr_t)&pcpu_entry_pmc_sampled
) = 1)
;
4746
4747 return (error);
4748}
4749
4750/*
4751 * Interrupt processing.
4752 *
4753 * This function is meant to be called from an NMI handler. It cannot
4754 * use any of the locking primitives supplied by the OS.
4755 */
4756
4757int
4758pmc_process_interrupt(int ring, struct pmc *pm, struct trapframe *tf)
4759{
4760 struct thread *td;
4761
4762 td = curthread(__curthread());
4763 if ((pm->pm_flags & PMC_F_USERCALLCHAIN0x00000100) &&
4764 (td->td_proc->p_flag & P_KPROC0x00004) == 0 &&
4765 !TRAPF_USERMODE(tf)((((tf)->tf_cs)&3) == 3)) {
4766 atomic_add_int(&td->td_pmcpend, 1);
4767 return (pmc_add_sample(PMC_UR, pm, tf));
4768 }
4769 return (pmc_add_sample(ring, pm, tf));
4770}
4771
4772/*
4773 * Capture a user call chain. This function will be called from ast()
4774 * before control returns to userland and before the process gets
4775 * rescheduled.
4776 */
4777
4778static void
4779pmc_capture_user_callchain(int cpu, int ring, struct trapframe *tf)
4780{
4781 struct pmc *pm;
4782 struct thread *td;
4783 struct pmc_sample *ps;
4784 struct pmc_samplebuffer *psb;
4785 uint64_t considx, prodidx;
4786 int nsamples, nrecords, pass, iter;
4787#ifdef INVARIANTS1
4788 int ncallchains;
4789 int nfree;
4790 int start_ticks = ticks;
4791#endif
4792 psb = pmc_pcpu[cpu]->pc_sb[ring];
4793 td = curthread(__curthread());
4794
4795 KASSERT(td->td_pflags & TDP_CALLCHAIN,do { if (__builtin_expect((!(td->td_pflags & 0x00400000
)), 0)) panic ("[pmc,%d] Retrieving callchain for thread that doesn't want it"
, 4797); } while (0)
4796 ("[pmc,%d] Retrieving callchain for thread that doesn't want it",do { if (__builtin_expect((!(td->td_pflags & 0x00400000
)), 0)) panic ("[pmc,%d] Retrieving callchain for thread that doesn't want it"
, 4797); } while (0)
4797 __LINE__))do { if (__builtin_expect((!(td->td_pflags & 0x00400000
)), 0)) panic ("[pmc,%d] Retrieving callchain for thread that doesn't want it"
, 4797); } while (0)
;
4798
4799#ifdef INVARIANTS1
4800 ncallchains = 0;
4801 nfree = 0;
4802#endif
4803 nrecords = INT_MAX0x7fffffff;
4804 pass = 0;
4805 restart:
4806 if (ring == PMC_UR)
4807 nrecords = atomic_readandclear_32(&td->td_pmcpend)atomic_swap_int(&td->td_pmcpend, 0);
4808
4809 for (iter = 0, considx = psb->ps_considx, prodidx = psb->ps_prodidx;
4810 considx < prodidx && iter < pmc_nsamples; considx++, iter++) {
4811 ps = PMC_CONS_SAMPLE_OFF(psb, considx)(&(psb)->ps_samples[(considx) & pmc_sample_mask]);
4812
4813 /*
4814 * Iterate through all deferred callchain requests.
4815 * Walk from the current read pointer to the current
4816 * write pointer.
4817 */
4818
4819#ifdef INVARIANTS1
4820 if (ps->ps_nsamples == PMC_SAMPLE_FREE((uint16_t) 0)) {
4821 nfree++;
4822 continue;
4823 }
4824
4825 if ((ps->ps_pmc == NULL((void *)0)) ||
4826 (ps->ps_pmc->pm_state != PMC_STATE_RUNNING))
4827 nfree++;
4828#endif
4829 if (ps->ps_td != td ||
4830 ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING((uint16_t) 0xFFFF) ||
4831 ps->ps_pmc->pm_state != PMC_STATE_RUNNING)
4832 continue;
4833
4834 KASSERT(ps->ps_cpu == cpu,do { if (__builtin_expect((!(ps->ps_cpu == cpu)), 0)) panic
("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", 4835, ps->ps_cpu
, __extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid) __res
; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu *)
0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *)0
)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; })); } while (0)
4835 ("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", __LINE__,do { if (__builtin_expect((!(ps->ps_cpu == cpu)), 0)) panic
("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", 4835, ps->ps_cpu
, __extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid) __res
; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu *)
0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *)0
)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; })); } while (0)
4836 ps->ps_cpu, PCPU_GET(cpuid)))do { if (__builtin_expect((!(ps->ps_cpu == cpu)), 0)) panic
("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", 4835, ps->ps_cpu
, __extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid) __res
; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu *)
0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *)0
)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; })); } while (0)
;
4837
4838 pm = ps->ps_pmc;
4839
4840 KASSERT(pm->pm_flags & PMC_F_CALLCHAIN,do { if (__builtin_expect((!(pm->pm_flags & 0x00000080
)), 0)) panic ("[pmc,%d] Retrieving callchain for PMC that doesn't "
"want it", 4842); } while (0)
4841 ("[pmc,%d] Retrieving callchain for PMC that doesn't "do { if (__builtin_expect((!(pm->pm_flags & 0x00000080
)), 0)) panic ("[pmc,%d] Retrieving callchain for PMC that doesn't "
"want it", 4842); } while (0)
4842 "want it", __LINE__))do { if (__builtin_expect((!(pm->pm_flags & 0x00000080
)), 0)) panic ("[pmc,%d] Retrieving callchain for PMC that doesn't "
"want it", 4842); } while (0)
;
4843
4844 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] runcount %ld", 4845, (unsigned
long)counter_u64_fetch(pm->pm_runcount)); } while (0)
4845 ("[pmc,%d] runcount %ld", __LINE__, (unsigned long)counter_u64_fetch(pm->pm_runcount)))do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] runcount %ld", 4845, (unsigned
long)counter_u64_fetch(pm->pm_runcount)); } while (0)
;
4846
4847 if (ring == PMC_UR) {
4848 nsamples = ps->ps_nsamples_actual;
4849 counter_u64_add(pmc_stats.pm_merges, 1);
4850 } else
4851 nsamples = 0;
4852
4853 /*
4854 * Retrieve the callchain and mark the sample buffer
4855 * as 'processable' by the timer tick sweep code.
4856 */
4857
4858#ifdef INVARIANTS1
4859 ncallchains++;
4860#endif
4861
4862 if (__predict_true(nsamples < pmc_callchaindepth - 1)__builtin_expect((nsamples < pmc_callchaindepth - 1), 1))
4863 nsamples += pmc_save_user_callchain(ps->ps_pc + nsamples,
4864 pmc_callchaindepth - nsamples - 1, tf);
4865
4866 /*
4867 * We have to prevent hardclock from potentially overwriting
4868 * this sample between when we read the value and when we set
4869 * it
4870 */
4871 spinlock_enter();
4872 /*
4873 * Verify that the sample hasn't been dropped in the meantime
4874 */
4875 if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING((uint16_t) 0xFFFF)) {
4876 ps->ps_nsamples = nsamples;
4877 /*
4878 * If we couldn't get a sample, simply drop the reference
4879 */
4880 if (nsamples == 0)
4881 counter_u64_add(pm->pm_runcount, -1);
4882 }
4883 spinlock_exit();
4884 if (nrecords-- == 1)
4885 break;
4886 }
4887 if (__predict_false(ring == PMC_UR && td->td_pmcpend)__builtin_expect((ring == PMC_UR && td->td_pmcpend
), 0)
) {
4888 if (pass == 0) {
4889 pass = 1;
4890 goto restart;
4891 }
4892 /* only collect samples for this part once */
4893 td->td_pmcpend = 0;
4894 }
4895
4896#ifdef INVARIANTS1
4897 if ((ticks - start_ticks) > hz)
4898 log(LOG_ERR3, "%s took %d ticks\n", __func__, (ticks - start_ticks));
4899#endif
4900
4901 /* mark CPU as needing processing */
4902 DPCPU_SET(pmc_sampled, 1)(*(__typeof(pcpu_entry_pmc_sampled)*)((__extension__ ({ __typeof
(((struct pcpu *)0)->pc_dynamic) __res; struct __s { u_char
__b[(((sizeof(__typeof(((struct pcpu *)0)->pc_dynamic)))<
(8))?(sizeof(__typeof(((struct pcpu *)0)->pc_dynamic))):(8
))]; } __s; if (sizeof(__res) == 1 || sizeof(__res) == 2 || sizeof
(__res) == 4 || sizeof(__res) == 8) { __asm volatile("mov %%gs:%1,%0"
: "=r" (__s) : "m" (*(struct __s *)(__builtin_offsetof(struct
pcpu, pc_dynamic)))); *(struct __s *)(void *)&__res = __s
; } else { __res = *__extension__ ({ __typeof(((struct pcpu *
)0)->pc_dynamic) *__p; __asm volatile("movq %%gs:%1,%0; addq %2,%0"
: "=r" (__p) : "m" (*(struct pcpu *)(__builtin_offsetof(struct
pcpu, pc_prvspace))), "i" (__builtin_offsetof(struct pcpu, pc_dynamic
))); __p; }); } __res; })) + (uintptr_t)&pcpu_entry_pmc_sampled
) = 1)
;
4903}
4904
4905/*
4906 * Process saved PC samples.
4907 */
4908
4909static void
4910pmc_process_samples(int cpu, ring_type_t ring)
4911{
4912 struct pmc *pm;
4913 int adjri, n;
4914 struct thread *td;
4915 struct pmc_owner *po;
4916 struct pmc_sample *ps;
4917 struct pmc_classdep *pcd;
4918 struct pmc_samplebuffer *psb;
4919 uint64_t delta;
4920
4921 KASSERT(PCPU_GET(cpuid) == cpu,do { if (__builtin_expect((!(__extension__ ({ __typeof(((struct
pcpu *)0)->pc_cpuid) __res; struct __s { u_char __b[(((sizeof
(__typeof(((struct pcpu *)0)->pc_cpuid)))<(8))?(sizeof(
__typeof(((struct pcpu *)0)->pc_cpuid))):(8))]; } __s; if (
sizeof(__res) == 1 || sizeof(__res) == 2 || sizeof(__res) == 4
|| sizeof(__res) == 8) { __asm volatile("mov %%gs:%1,%0" : "=r"
(__s) : "m" (*(struct __s *)(__builtin_offsetof(struct pcpu,
pc_cpuid)))); *(struct __s *)(void *)&__res = __s; } else
{ __res = *__extension__ ({ __typeof(((struct pcpu *)0)->
pc_cpuid) *__p; __asm volatile("movq %%gs:%1,%0; addq %2,%0" :
"=r" (__p) : "m" (*(struct pcpu *)(__builtin_offsetof(struct
pcpu, pc_prvspace))), "i" (__builtin_offsetof(struct pcpu, pc_cpuid
))); __p; }); } __res; }) == cpu)), 0)) panic ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d"
, 4922, __extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid
) __res; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu
*)0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *
)0)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; }), cpu); } while
(0)
4922 ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,do { if (__builtin_expect((!(__extension__ ({ __typeof(((struct
pcpu *)0)->pc_cpuid) __res; struct __s { u_char __b[(((sizeof
(__typeof(((struct pcpu *)0)->pc_cpuid)))<(8))?(sizeof(
__typeof(((struct pcpu *)0)->pc_cpuid))):(8))]; } __s; if (
sizeof(__res) == 1 || sizeof(__res) == 2 || sizeof(__res) == 4
|| sizeof(__res) == 8) { __asm volatile("mov %%gs:%1,%0" : "=r"
(__s) : "m" (*(struct __s *)(__builtin_offsetof(struct pcpu,
pc_cpuid)))); *(struct __s *)(void *)&__res = __s; } else
{ __res = *__extension__ ({ __typeof(((struct pcpu *)0)->
pc_cpuid) *__p; __asm volatile("movq %%gs:%1,%0; addq %2,%0" :
"=r" (__p) : "m" (*(struct pcpu *)(__builtin_offsetof(struct
pcpu, pc_prvspace))), "i" (__builtin_offsetof(struct pcpu, pc_cpuid
))); __p; }); } __res; }) == cpu)), 0)) panic ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d"
, 4922, __extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid
) __res; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu
*)0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *
)0)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; }), cpu); } while
(0)
4923 PCPU_GET(cpuid), cpu))do { if (__builtin_expect((!(__extension__ ({ __typeof(((struct
pcpu *)0)->pc_cpuid) __res; struct __s { u_char __b[(((sizeof
(__typeof(((struct pcpu *)0)->pc_cpuid)))<(8))?(sizeof(
__typeof(((struct pcpu *)0)->pc_cpuid))):(8))]; } __s; if (
sizeof(__res) == 1 || sizeof(__res) == 2 || sizeof(__res) == 4
|| sizeof(__res) == 8) { __asm volatile("mov %%gs:%1,%0" : "=r"
(__s) : "m" (*(struct __s *)(__builtin_offsetof(struct pcpu,
pc_cpuid)))); *(struct __s *)(void *)&__res = __s; } else
{ __res = *__extension__ ({ __typeof(((struct pcpu *)0)->
pc_cpuid) *__p; __asm volatile("movq %%gs:%1,%0; addq %2,%0" :
"=r" (__p) : "m" (*(struct pcpu *)(__builtin_offsetof(struct
pcpu, pc_prvspace))), "i" (__builtin_offsetof(struct pcpu, pc_cpuid
))); __p; }); } __res; }) == cpu)), 0)) panic ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d"
, 4922, __extension__ ({ __typeof(((struct pcpu *)0)->pc_cpuid
) __res; struct __s { u_char __b[(((sizeof(__typeof(((struct pcpu
*)0)->pc_cpuid)))<(8))?(sizeof(__typeof(((struct pcpu *
)0)->pc_cpuid))):(8))]; } __s; if (sizeof(__res) == 1 || sizeof
(__res) == 2 || sizeof(__res) == 4 || sizeof(__res) == 8) { __asm
volatile("mov %%gs:%1,%0" : "=r" (__s) : "m" (*(struct __s *
)(__builtin_offsetof(struct pcpu, pc_cpuid)))); *(struct __s *
)(void *)&__res = __s; } else { __res = *__extension__ ({
__typeof(((struct pcpu *)0)->pc_cpuid) *__p; __asm volatile
("movq %%gs:%1,%0; addq %2,%0" : "=r" (__p) : "m" (*(struct pcpu
*)(__builtin_offsetof(struct pcpu, pc_prvspace))), "i" (__builtin_offsetof
(struct pcpu, pc_cpuid))); __p; }); } __res; }), cpu); } while
(0)
;
4924
4925 psb = pmc_pcpu[cpu]->pc_sb[ring];
4926 delta = psb->ps_prodidx - psb->ps_considx;
4927 MPASS(delta <= pmc_nsamples)do { if (__builtin_expect((!((delta <= pmc_nsamples))), 0)
) panic ("Assertion %s failed at %s:%d", "delta <= pmc_nsamples"
, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c", 4927); } while (
0)
;
4928 MPASS(psb->ps_considx <= psb->ps_prodidx)do { if (__builtin_expect((!((psb->ps_considx <= psb->
ps_prodidx))), 0)) panic ("Assertion %s failed at %s:%d", "psb->ps_considx <= psb->ps_prodidx"
, "/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c", 4928); } while (
0)
;
4929 for (n = 0; psb->ps_considx < psb->ps_prodidx; psb->ps_considx++, n++) {
4930 ps = PMC_CONS_SAMPLE(psb)(&(psb)->ps_samples[(psb)->ps_considx & pmc_sample_mask
])
;
4931
4932 if (__predict_false(ps->ps_nsamples == PMC_SAMPLE_FREE)__builtin_expect((ps->ps_nsamples == ((uint16_t) 0)), 0))
4933 continue;
4934 pm = ps->ps_pmc;
4935 /* skip non-running samples */
4936 if (pm->pm_state != PMC_STATE_RUNNING)
4937 goto entrydone;
4938
4939 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 4940, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
4940 ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 4940, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
4941 (unsigned long)counter_u64_fetch(pm->pm_runcount)))do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 4940, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
;
4942
4943 po = pm->pm_owner;
4944
4945 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),do { if (__builtin_expect((!(((((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_SS || (((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TS))), 0)) panic ("[pmc,%d] pmc=%p non-sampling mode=%d"
, 4946, pm, ((((pm)->pm_id) & 0xFF000) >> 12)); }
while (0)
4946 ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,do { if (__builtin_expect((!(((((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_SS || (((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TS))), 0)) panic ("[pmc,%d] pmc=%p non-sampling mode=%d"
, 4946, pm, ((((pm)->pm_id) & 0xFF000) >> 12)); }
while (0)
4947 pm, PMC_TO_MODE(pm)))do { if (__builtin_expect((!(((((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_SS || (((((pm)->pm_id) & 0xFF000
) >> 12)) == PMC_MODE_TS))), 0)) panic ("[pmc,%d] pmc=%p non-sampling mode=%d"
, 4946, pm, ((((pm)->pm_id) & 0xFF000) >> 12)); }
while (0)
;
4948
4949
4950 /* If there is a pending AST wait for completion */
4951 if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING((uint16_t) 0xFFFF)) {
4952 /* if we've been waiting more than 1 tick to
4953 * collect a callchain for this record then
4954 * drop it and move on.
4955 */
4956 if (ticks - ps->ps_ticks > 1) {
4957 /*
4958 * track how often we hit this as it will
4959 * preferentially lose user samples
4960 * for long running system calls
4961 */
4962 counter_u64_add(pmc_stats.pm_overwrites, 1);
4963 goto entrydone;
4964 }
4965 /* Need a rescan at a later time. */
4966 DPCPU_SET(pmc_sampled, 1)(*(__typeof(pcpu_entry_pmc_sampled)*)((__extension__ ({ __typeof
(((struct pcpu *)0)->pc_dynamic) __res; struct __s { u_char
__b[(((sizeof(__typeof(((struct pcpu *)0)->pc_dynamic)))<
(8))?(sizeof(__typeof(((struct pcpu *)0)->pc_dynamic))):(8
))]; } __s; if (sizeof(__res) == 1 || sizeof(__res) == 2 || sizeof
(__res) == 4 || sizeof(__res) == 8) { __asm volatile("mov %%gs:%1,%0"
: "=r" (__s) : "m" (*(struct __s *)(__builtin_offsetof(struct
pcpu, pc_dynamic)))); *(struct __s *)(void *)&__res = __s
; } else { __res = *__extension__ ({ __typeof(((struct pcpu *
)0)->pc_dynamic) *__p; __asm volatile("movq %%gs:%1,%0; addq %2,%0"
: "=r" (__p) : "m" (*(struct pcpu *)(__builtin_offsetof(struct
pcpu, pc_prvspace))), "i" (__builtin_offsetof(struct pcpu, pc_dynamic
))); __p; }); } __res; })) + (uintptr_t)&pcpu_entry_pmc_sampled
) = 1)
;
4967 break;
4968 }
4969
4970 PMCDBG6(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu,
4971 pm, ps->ps_nsamples, ps->ps_flags,
4972 (int) (psb->ps_prodidx & pmc_sample_mask),
4973 (int) (psb->ps_considx & pmc_sample_mask));
4974
4975 /*
4976 * If this is a process-mode PMC that is attached to
4977 * its owner, and if the PC is in user mode, update
4978 * profiling statistics like timer-based profiling
4979 * would have done.
4980 *
4981 * Otherwise, this is either a sampling-mode PMC that
4982 * is attached to a different process than its owner,
4983 * or a system-wide sampling PMC. Dispatch a log
4984 * entry to the PMC's owner process.
4985 */
4986 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER0x00010000) {
4987 if (ps->ps_flags & PMC_CC_F_USERSPACE0x01) {
4988 td = FIRST_THREAD_IN_PROC(po->po_owner)((&(po->po_owner)->p_threads)->tqh_first);
4989 addupc_intr(td, ps->ps_pc[0], 1);
4990 }
4991 } else
4992 pmclog_process_callchain(pm, ps);
4993
4994 entrydone:
4995 ps->ps_nsamples = 0; /* mark entry as free */
4996 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 4997, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
4997 ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 4997, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
4998 (unsigned long)counter_u64_fetch(pm->pm_runcount)))do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] pm=%p runcount %ld", 4997, (
void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount
)); } while (0)
;
4999
5000 counter_u64_add(pm->pm_runcount, -1);
5001 }
5002
5003 counter_u64_add(pmc_stats.pm_log_sweeps, 1);
5004
5005 /* Do not re-enable stalled PMCs if we failed to process any samples */
5006 if (n == 0)
5007 return;
5008
5009 /*
5010 * Restart any stalled sampling PMCs on this CPU.
5011 *
5012 * If the NMI handler sets the pm_stalled field of a PMC after
5013 * the check below, we'll end up processing the stalled PMC at
5014 * the next hardclock tick.
5015 */
5016 for (n = 0; n < md->pmd_npmc; n++) {
5017 pcd = pmc_ri_to_classdep(md, n, &adjri);
5018 KASSERT(pcd != NULL,do { if (__builtin_expect((!(pcd != ((void *)0))), 0)) panic (
"[pmc,%d] null pcd ri=%d", 5019, n); } while (0)
5019 ("[pmc,%d] null pcd ri=%d", __LINE__, n))do { if (__builtin_expect((!(pcd != ((void *)0))), 0)) panic (
"[pmc,%d] null pcd ri=%d", 5019, n); } while (0)
;
5020 (void) (*pcd->pcd_get_config)(cpu,adjri,&pm);
5021
5022 if (pm == NULL((void *)0) || /* !cfg'ed */
5023 pm->pm_state != PMC_STATE_RUNNING || /* !active */
5024 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TS
)
|| /* !sampling */
5025 !pm->pm_pcpu_state[cpu].pps_cpustate || /* !desired */
5026 !pm->pm_pcpu_state[cpu].pps_stalled) /* !stalled */
5027 continue;
5028
5029 pm->pm_pcpu_state[cpu].pps_stalled = 0;
5030 (*pcd->pcd_start_pmc)(cpu, adjri);
5031 }
5032}
5033
5034/*
5035 * Event handlers.
5036 */
5037
5038/*
5039 * Handle a process exit.
5040 *
5041 * Remove this process from all hash tables. If this process
5042 * owned any PMCs, turn off those PMCs and deallocate them,
5043 * removing any associations with target processes.
5044 *
5045 * This function will be called by the last 'thread' of a
5046 * process.
5047 *
5048 * XXX This eventhandler gets called early in the exit process.
5049 * Consider using a 'hook' invocation from thread_exit() or equivalent
5050 * spot. Another negative is that kse_exit doesn't seem to call
5051 * exit1() [??].
5052 *
5053 */
5054
5055static void
5056pmc_process_exit(void *arg __unused__attribute__((__unused__)), struct proc *p)
5057{
5058 struct pmc *pm;
5059 int adjri, cpu;
5060 unsigned int ri;
5061 int is_using_hwpmcs;
5062 struct pmc_owner *po;
5063 struct pmc_process *pp;
5064 struct pmc_classdep *pcd;
5065 pmc_value_t newvalue, tmp;
5066
5067 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (5067))
;
5068 is_using_hwpmcs = p->p_flag & P_HWPMC0x800000;
5069 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (5069))
;
5070
5071 /*
5072 * Log a sysexit event to all SS PMC owners.
5073 */
5074 PMC_EPOCH_ENTER()struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt
, &pmc_et)
;
5075 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)for ((po) = (__typeof__(*((&((&pmc_ss_owners))->clh_first
))))ck_pr_md_load_ptr(((&((&pmc_ss_owners))->clh_first
))); (po) && (ck_pr_fence_load(), 1); (po) = (__typeof__
(*((&((po))->po_ssnext.cle_next))))ck_pr_md_load_ptr((
(&((po))->po_ssnext.cle_next))))
5076 if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
5077 pmclog_process_sysexit(po, p->p_pid);
5078 PMC_EPOCH_EXIT()epoch_exit_preempt(global_epoch_preempt, &pmc_et);
5079
5080 if (!is_using_hwpmcs)
5081 return;
5082
5083 PMC_GET_SX_XLOCK()do { (void)_sx_xlock(((&pmc_sx)), 0, ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5083)); if (pmc_hook == ((void *)0)) { _sx_xunlock(((&
pmc_sx)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (5083)
); return ; } } while (0)
;
5084 PMCDBG3(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
5085 p->p_comm);
5086
5087 /*
5088 * Since this code is invoked by the last thread in an exiting
5089 * process, we would have context switched IN at some prior
5090 * point. However, with PREEMPTION, kernel mode context
5091 * switches may happen any time, so we want to disable a
5092 * context switch OUT till we get any PMCs targeting this
5093 * process off the hardware.
5094 *
5095 * We also need to atomically remove this process'
5096 * entry from our target process hash table, using
5097 * PMC_FLAG_REMOVE.
5098 */
5099 PMCDBG3(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
5100 p->p_comm);
5101
5102 critical_enter()critical_enter_KBI(); /* no preemption */
5103
5104 cpu = curthread(__curthread())->td_oncpu;
5105
5106 if ((pp = pmc_find_process_descriptor(p,
5107 PMC_FLAG_REMOVE)) != NULL((void *)0)) {
5108
5109 PMCDBG2(PRC,EXT,2,
5110 "process-exit proc=%p pmc-process=%p", p, pp);
5111
5112 /*
5113 * The exiting process could the target of
5114 * some PMCs which will be running on
5115 * currently executing CPU.
5116 *
5117 * We need to turn these PMCs off like we
5118 * would do at context switch OUT time.
5119 */
5120 for (ri = 0; ri < md->pmd_npmc; ri++) {
5121
5122 /*
5123 * Pick up the pmc pointer from hardware
5124 * state similar to the CSW_OUT code.
5125 */
5126 pm = NULL((void *)0);
5127
5128 pcd = pmc_ri_to_classdep(md, ri, &adjri);
5129
5130 (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
5131
5132 PMCDBG2(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
5133
5134 if (pm == NULL((void *)0) ||
5135 !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TS
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TC
)
)
5136 continue;
5137
5138 PMCDBG4(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
5139 "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
5140 pm, pm->pm_state);
5141
5142 KASSERT(PMC_TO_ROWINDEX(pm) == ri,do { if (__builtin_expect((!((((pm)->pm_id) & 0xFF) ==
ri)), 0)) panic ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", 5144
, (((pm)->pm_id) & 0xFF), ri); } while (0)
5143 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",do { if (__builtin_expect((!((((pm)->pm_id) & 0xFF) ==
ri)), 0)) panic ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", 5144
, (((pm)->pm_id) & 0xFF), ri); } while (0)
5144 __LINE__, PMC_TO_ROWINDEX(pm), ri))do { if (__builtin_expect((!((((pm)->pm_id) & 0xFF) ==
ri)), 0)) panic ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", 5144
, (((pm)->pm_id) & 0xFF), ri); } while (0)
;
5145
5146 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,do { if (__builtin_expect((!(pm == pp->pp_pmcs[ri].pp_pmc)
), 0)) panic ("[pmc,%d] pm %p != pp_pmcs[%d] %p", 5148, pm, ri
, pp->pp_pmcs[ri].pp_pmc); } while (0)
5147 ("[pmc,%d] pm %p != pp_pmcs[%d] %p",do { if (__builtin_expect((!(pm == pp->pp_pmcs[ri].pp_pmc)
), 0)) panic ("[pmc,%d] pm %p != pp_pmcs[%d] %p", 5148, pm, ri
, pp->pp_pmcs[ri].pp_pmc); } while (0)
5148 __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc))do { if (__builtin_expect((!(pm == pp->pp_pmcs[ri].pp_pmc)
), 0)) panic ("[pmc,%d] pm %p != pp_pmcs[%d] %p", 5148, pm, ri
, pp->pp_pmcs[ri].pp_pmc); } while (0)
;
5149
5150 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] bad runcount ri %d rc %ld", 5152
, ri, (unsigned long)counter_u64_fetch(pm->pm_runcount)); }
while (0)
5151 ("[pmc,%d] bad runcount ri %d rc %ld",do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] bad runcount ri %d rc %ld", 5152
, ri, (unsigned long)counter_u64_fetch(pm->pm_runcount)); }
while (0)
5152 __LINE__, ri, (unsigned long)counter_u64_fetch(pm->pm_runcount)))do { if (__builtin_expect((!(counter_u64_fetch(pm->pm_runcount
) > 0)), 0)) panic ("[pmc,%d] bad runcount ri %d rc %ld", 5152
, ri, (unsigned long)counter_u64_fetch(pm->pm_runcount)); }
while (0)
;
5153
5154 /*
5155 * Change desired state, and then stop if not
5156 * stalled. This two-step dance should avoid
5157 * race conditions where an interrupt re-enables
5158 * the PMC after this code has already checked
5159 * the pm_stalled flag.
5160 */
5161 if (pm->pm_pcpu_state[cpu].pps_cpustate) {
5162 pm->pm_pcpu_state[cpu].pps_cpustate = 0;
5163 if (!pm->pm_pcpu_state[cpu].pps_stalled) {
5164 (void) pcd->pcd_stop_pmc(cpu, adjri);
5165
5166 if (PMC_TO_MODE(pm)((((pm)->pm_id) & 0xFF000) >> 12) == PMC_MODE_TC) {
5167 pcd->pcd_read_pmc(cpu, adjri,
5168 &newvalue);
5169 tmp = newvalue -
5170 PMC_PCPU_SAVED(cpu,ri)pmc_pcpu_saved[(ri) + md->pmd_npmc*(cpu)];
5171
5172 mtx_pool_lock_spin(pmc_mtxpool,__mtx_lock_spin_flags(&((((mtx_pool_find((pmc_mtxpool), (
pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5173))
5173 pm)__mtx_lock_spin_flags(&((((mtx_pool_find((pmc_mtxpool), (
pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5173))
;
5174 pm->pm_gv.pm_savedvalue += tmp;
5175 pp->pp_pmcs[ri].pp_pmcval +=
5176 tmp;
5177 mtx_pool_unlock_spin(__mtx_unlock_spin_flags(&((((mtx_pool_find((pmc_mtxpool),
(pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5178))
5178 pmc_mtxpool, pm)__mtx_unlock_spin_flags(&((((mtx_pool_find((pmc_mtxpool),
(pm))))))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5178))
;
5179 }
5180 }
5181 }
5182
5183 KASSERT((int64_t) counter_u64_fetch(pm->pm_runcount) > 0,do { if (__builtin_expect((!((int64_t) counter_u64_fetch(pm->
pm_runcount) > 0)), 0)) panic ("[pmc,%d] runcount is %d", 5184
, ri); } while (0)
5184 ("[pmc,%d] runcount is %d", __LINE__, ri))do { if (__builtin_expect((!((int64_t) counter_u64_fetch(pm->
pm_runcount) > 0)), 0)) panic ("[pmc,%d] runcount is %d", 5184
, ri); } while (0)
;
5185
5186 counter_u64_add(pm->pm_runcount, -1);
5187
5188 (void) pcd->pcd_config_pmc(cpu, adjri, NULL((void *)0));
5189 }
5190
5191 /*
5192 * Inform the MD layer of this pseudo "context switch
5193 * out"
5194 */
5195 (void) md->pmd_switch_out(pmc_pcpu[cpu], pp);
5196
5197 critical_exit()critical_exit_KBI(); /* ok to be pre-empted now */
5198
5199 /*
5200 * Unlink this process from the PMCs that are
5201 * targeting it. This will send a signal to
5202 * all PMC owner's whose PMCs are orphaned.
5203 *
5204 * Log PMC value at exit time if requested.
5205 */
5206 for (ri = 0; ri < md->pmd_npmc; ri++)
5207 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL((void *)0)) {
5208 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE0x00020000 &&
5209 PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm))((((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_SC
|| (((((pm)->pm_id) & 0xFF000) >> 12)) == PMC_MODE_TC
)
)
5210 pmclog_process_procexit(pm, pp);
5211 pmc_unlink_target_process(pm, pp);
5212 }
5213 free(pp, M_PMC);
5214
5215 } else
5216 critical_exit()critical_exit_KBI(); /* pp == NULL */
5217
5218
5219 /*
5220 * If the process owned PMCs, free them up and free up
5221 * memory.
5222 */
5223 if ((po = pmc_find_owner_descriptor(p)) != NULL((void *)0)) {
5224 pmc_remove_owner(po);
5225 pmc_destroy_owner_descriptor(po);
5226 }
5227
5228 sx_xunlock(&pmc_sx)_sx_xunlock(((&pmc_sx)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5228))
;
5229}
5230
5231/*
5232 * Handle a process fork.
5233 *
5234 * If the parent process 'p1' is under HWPMC monitoring, then copy
5235 * over any attached PMCs that have 'do_descendants' semantics.
5236 */
5237
5238static void
5239pmc_process_fork(void *arg __unused__attribute__((__unused__)), struct proc *p1, struct proc *newproc,
5240 int flags)
5241{
5242 int is_using_hwpmcs;
5243 unsigned int ri;
5244 uint32_t do_descendants;
5245 struct pmc *pm;
5246 struct pmc_owner *po;
5247 struct pmc_process *ppnew, *ppold;
5248
5249 (void) flags; /* unused parameter */
5250
5251 PROC_LOCK(p1)__mtx_lock_flags(&((((&(p1)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (5251))
;
5252 is_using_hwpmcs = p1->p_flag & P_HWPMC0x800000;
5253 PROC_UNLOCK(p1)__mtx_unlock_flags(&((((&(p1)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (5253))
;
5254
5255 /*
5256 * If there are system-wide sampling PMCs active, we need to
5257 * log all fork events to their owner's logs.
5258 */
5259 PMC_EPOCH_ENTER()struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt
, &pmc_et)
;
5260 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)for ((po) = (__typeof__(*((&((&pmc_ss_owners))->clh_first
))))ck_pr_md_load_ptr(((&((&pmc_ss_owners))->clh_first
))); (po) && (ck_pr_fence_load(), 1); (po) = (__typeof__
(*((&((po))->po_ssnext.cle_next))))ck_pr_md_load_ptr((
(&((po))->po_ssnext.cle_next))))
5261 if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001) {
5262 pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
5263 pmclog_process_proccreate(po, newproc, 1);
5264 }
5265 PMC_EPOCH_EXIT()epoch_exit_preempt(global_epoch_preempt, &pmc_et);
5266
5267 if (!is_using_hwpmcs)
5268 return;
5269
5270 PMC_GET_SX_XLOCK()do { (void)_sx_xlock(((&pmc_sx)), 0, ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5270)); if (pmc_hook == ((void *)0)) { _sx_xunlock(((&
pmc_sx)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (5270)
); return ; } } while (0)
;
5271 PMCDBG4(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
5272 p1->p_pid, p1->p_comm, newproc);
5273
5274 /*
5275 * If the parent process (curthread->td_proc) is a
5276 * target of any PMCs, look for PMCs that are to be
5277 * inherited, and link these into the new process
5278 * descriptor.
5279 */
5280 if ((ppold = pmc_find_process_descriptor(curthread(__curthread())->td_proc,
5281 PMC_FLAG_NONE)) == NULL((void *)0))
5282 goto done; /* nothing to do */
5283
5284 do_descendants = 0;
5285 for (ri = 0; ri < md->pmd_npmc; ri++)
5286 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL((void *)0))
5287 do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS0x00000002;
5288 if (do_descendants == 0) /* nothing to do */
5289 goto done;
5290
5291 /*
5292 * Now mark the new process as being tracked by this driver.
5293 */
5294 PROC_LOCK(newproc)__mtx_lock_flags(&((((&(newproc)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (5294))
;
5295 newproc->p_flag |= P_HWPMC0x800000;
5296 PROC_UNLOCK(newproc)__mtx_unlock_flags(&((((&(newproc)->p_mtx))))->
mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"),
(5296))
;
5297
5298 /* allocate a descriptor for the new process */
5299 if ((ppnew = pmc_find_process_descriptor(newproc,
5300 PMC_FLAG_ALLOCATE)) == NULL((void *)0))
5301 goto done;
5302
5303 /*
5304 * Run through all PMCs that were targeting the old process
5305 * and which specified F_DESCENDANTS and attach them to the
5306 * new process.
5307 *
5308 * Log the fork event to all owners of PMCs attached to this
5309 * process, if not already logged.
5310 */
5311 for (ri = 0; ri < md->pmd_npmc; ri++)
5312 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL((void *)0) &&
5313 (pm->pm_flags & PMC_F_DESCENDANTS0x00000002)) {
5314 pmc_link_target_process(pm, ppnew);
5315 po = pm->pm_owner;
5316 if (po->po_sscount == 0 &&
5317 po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
5318 pmclog_process_procfork(po, p1->p_pid,
5319 newproc->p_pid);
5320 }
5321
5322 done:
5323 sx_xunlock(&pmc_sx)_sx_xunlock(((&pmc_sx)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5323))
;
5324}
5325
5326static void
5327pmc_process_threadcreate(struct thread *td)
5328{
5329 struct pmc_owner *po;
5330
5331 PMC_EPOCH_ENTER()struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt
, &pmc_et)
;
5332 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)for ((po) = (__typeof__(*((&((&pmc_ss_owners))->clh_first
))))ck_pr_md_load_ptr(((&((&pmc_ss_owners))->clh_first
))); (po) && (ck_pr_fence_load(), 1); (po) = (__typeof__
(*((&((po))->po_ssnext.cle_next))))ck_pr_md_load_ptr((
(&((po))->po_ssnext.cle_next))))
5333 if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
5334 pmclog_process_threadcreate(po, td, 1);
5335 PMC_EPOCH_EXIT()epoch_exit_preempt(global_epoch_preempt, &pmc_et);
5336}
5337
5338static void
5339pmc_process_threadexit(struct thread *td)
5340{
5341 struct pmc_owner *po;
5342
5343 PMC_EPOCH_ENTER()struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt
, &pmc_et)
;
5344 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)for ((po) = (__typeof__(*((&((&pmc_ss_owners))->clh_first
))))ck_pr_md_load_ptr(((&((&pmc_ss_owners))->clh_first
))); (po) && (ck_pr_fence_load(), 1); (po) = (__typeof__
(*((&((po))->po_ssnext.cle_next))))ck_pr_md_load_ptr((
(&((po))->po_ssnext.cle_next))))
5345 if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
5346 pmclog_process_threadexit(po, td);
5347 PMC_EPOCH_EXIT()epoch_exit_preempt(global_epoch_preempt, &pmc_et);
5348}
5349
5350static void
5351pmc_process_proccreate(struct proc *p)
5352{
5353 struct pmc_owner *po;
5354
5355 PMC_EPOCH_ENTER()struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt
, &pmc_et)
;
5356 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)for ((po) = (__typeof__(*((&((&pmc_ss_owners))->clh_first
))))ck_pr_md_load_ptr(((&((&pmc_ss_owners))->clh_first
))); (po) && (ck_pr_fence_load(), 1); (po) = (__typeof__
(*((&((po))->po_ssnext.cle_next))))ck_pr_md_load_ptr((
(&((po))->po_ssnext.cle_next))))
5357 if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
5358 pmclog_process_proccreate(po, p, 1 /* sync */);
5359 PMC_EPOCH_EXIT()epoch_exit_preempt(global_epoch_preempt, &pmc_et);
5360}
5361
5362static void
5363pmc_process_allproc(struct pmc *pm)
5364{
5365 struct pmc_owner *po;
5366 struct thread *td;
5367 struct proc *p;
5368
5369 po = pm->pm_owner;
5370 if ((po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001) == 0)
5371 return;
5372 sx_slock(&allproc_lock)(void)_sx_slock(((&allproc_lock)), 0, ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5372))
;
5373 FOREACH_PROC_IN_SYSTEM(p)for (((p)) = (((&allproc))->lh_first); ((p)); ((p)) = (
(((p)))->p_list.le_next))
{
5374 pmclog_process_proccreate(po, p, 0 /* sync */);
5375 PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (5375))
;
5376 FOREACH_THREAD_IN_PROC(p, td)for (((td)) = (((&(p)->p_threads))->tqh_first); ((td
)); ((td)) = ((((td)))->td_plist.tqe_next))
5377 pmclog_process_threadcreate(po, td, 0 /* sync */);
5378 PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock
, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"), (5378))
;
5379 }
5380 sx_sunlock(&allproc_lock)_sx_sunlock(((&allproc_lock)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5380))
;
5381 pmclog_flush(po, 0);
5382}
5383
5384static void
5385pmc_kld_load(void *arg __unused__attribute__((__unused__)), linker_file_t lf)
5386{
5387 struct pmc_owner *po;
5388
5389 /*
5390 * Notify owners of system sampling PMCs about KLD operations.
5391 */
5392 PMC_EPOCH_ENTER()struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt
, &pmc_et)
;
5393 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)for ((po) = (__typeof__(*((&((&pmc_ss_owners))->clh_first
))))ck_pr_md_load_ptr(((&((&pmc_ss_owners))->clh_first
))); (po) && (ck_pr_fence_load(), 1); (po) = (__typeof__
(*((&((po))->po_ssnext.cle_next))))ck_pr_md_load_ptr((
(&((po))->po_ssnext.cle_next))))
5394 if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
5395 pmclog_process_map_in(po, (pid_t) -1,
5396 (uintfptr_t) lf->address, lf->filename);
5397 PMC_EPOCH_EXIT()epoch_exit_preempt(global_epoch_preempt, &pmc_et);
5398
5399 /*
5400 * TODO: Notify owners of (all) process-sampling PMCs too.
5401 */
5402}
5403
5404static void
5405pmc_kld_unload(void *arg __unused__attribute__((__unused__)), const char *filename __unused__attribute__((__unused__)),
5406 caddr_t address, size_t size)
5407{
5408 struct pmc_owner *po;
5409
5410 PMC_EPOCH_ENTER()struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt
, &pmc_et)
;
5411 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)for ((po) = (__typeof__(*((&((&pmc_ss_owners))->clh_first
))))ck_pr_md_load_ptr(((&((&pmc_ss_owners))->clh_first
))); (po) && (ck_pr_fence_load(), 1); (po) = (__typeof__
(*((&((po))->po_ssnext.cle_next))))ck_pr_md_load_ptr((
(&((po))->po_ssnext.cle_next))))
5412 if (po->po_flags & PMC_PO_OWNS_LOGFILE0x00000001)
5413 pmclog_process_map_out(po, (pid_t) -1,
5414 (uintfptr_t) address, (uintfptr_t) address + size);
5415 PMC_EPOCH_EXIT()epoch_exit_preempt(global_epoch_preempt, &pmc_et);
5416
5417 /*
5418 * TODO: Notify owners of process-sampling PMCs.
5419 */
5420}
5421
5422/*
5423 * initialization
5424 */
5425static const char *
5426pmc_name_of_pmcclass(enum pmc_class class)
5427{
5428
5429 switch (class) {
5430#undef __PMC_CLASS
5431#define __PMC_CLASS(S,V,D)case PMC_CLASS_S: return "S"; \
5432 case PMC_CLASS_##S: \
5433 return #S;
5434 __PMC_CLASSES()case PMC_CLASS_TSC: return "TSC"; case PMC_CLASS_K7: return "K7"
; case PMC_CLASS_K8: return "K8"; case PMC_CLASS_P5: return "P5"
; case PMC_CLASS_P6: return "P6"; case PMC_CLASS_P4: return "P4"
; case PMC_CLASS_IAF: return "IAF"; case PMC_CLASS_IAP: return
"IAP"; case PMC_CLASS_UCF: return "UCF"; case PMC_CLASS_UCP:
return "UCP"; case PMC_CLASS_XSCALE: return "XSCALE"; case PMC_CLASS_MIPS24K
: return "MIPS24K"; case PMC_CLASS_OCTEON: return "OCTEON"; case
PMC_CLASS_PPC7450: return "PPC7450"; case PMC_CLASS_PPC970: return
"PPC970"; case PMC_CLASS_SOFT: return "SOFT"; case PMC_CLASS_ARMV7
: return "ARMV7"; case PMC_CLASS_ARMV8: return "ARMV8"; case PMC_CLASS_MIPS74K
: return "MIPS74K"; case PMC_CLASS_E500: return "E500";
;
5435 default:
5436 return ("<unknown>");
5437 }
5438}
5439
5440/*
5441 * Base class initializer: allocate structure and set default classes.
5442 */
5443struct pmc_mdep *
5444pmc_mdep_alloc(int nclasses)
5445{
5446 struct pmc_mdep *md;
5447 int n;
5448
5449 /* SOFT + md classes */
5450 n = 1 + nclasses;
5451 md = malloc(sizeof(struct pmc_mdep) + n *
5452 sizeof(struct pmc_classdep), M_PMC, M_WAITOK0x0002|M_ZERO0x0100);
5453 md->pmd_nclass = n;
5454
5455 /* Add base class. */
5456 pmc_soft_initialize(md);
5457 return md;
5458}
5459
5460void
5461pmc_mdep_free(struct pmc_mdep *md)
5462{
5463 pmc_soft_finalize(md);
5464 free(md, M_PMC);
5465}
5466
5467static int
5468generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
5469{
5470 (void) pc; (void) pp;
5471
5472 return (0);
5473}
5474
5475static int
5476generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
5477{
5478 (void) pc; (void) pp;
5479
5480 return (0);
5481}
5482
5483static struct pmc_mdep *
5484pmc_generic_cpu_initialize(void)
5485{
5486 struct pmc_mdep *md;
5487
5488 md = pmc_mdep_alloc(0);
5489
5490 md->pmd_cputype = PMC_CPU_GENERIC;
5491
5492 md->pmd_pcpu_init = NULL((void *)0);
5493 md->pmd_pcpu_fini = NULL((void *)0);
5494 md->pmd_switch_in = generic_switch_in;
5495 md->pmd_switch_out = generic_switch_out;
5496
5497 return (md);
5498}
5499
5500static void
5501pmc_generic_cpu_finalize(struct pmc_mdep *md)
5502{
5503 (void) md;
5504}
5505
5506
5507static int
5508pmc_initialize(void)
5509{
5510 int c, cpu, error, n, ri;
5511 unsigned int maxcpu, domain;
5512 struct pcpu *pc;
5513 struct pmc_binding pb;
5514 struct pmc_sample *ps;
5515 struct pmc_classdep *pcd;
5516 struct pmc_samplebuffer *sb;
5517
5518 md = NULL((void *)0);
5519 error = 0;
5520
5521 pmc_stats.pm_intr_ignored = counter_u64_alloc(M_WAITOK0x0002);
5522 pmc_stats.pm_intr_processed = counter_u64_alloc(M_WAITOK0x0002);
5523 pmc_stats.pm_intr_bufferfull = counter_u64_alloc(M_WAITOK0x0002);
5524 pmc_stats.pm_syscalls = counter_u64_alloc(M_WAITOK0x0002);
5525 pmc_stats.pm_syscall_errors = counter_u64_alloc(M_WAITOK0x0002);
5526 pmc_stats.pm_buffer_requests = counter_u64_alloc(M_WAITOK0x0002);
5527 pmc_stats.pm_buffer_requests_failed = counter_u64_alloc(M_WAITOK0x0002);
5528 pmc_stats.pm_log_sweeps = counter_u64_alloc(M_WAITOK0x0002);
5529 pmc_stats.pm_merges = counter_u64_alloc(M_WAITOK0x0002);
5530 pmc_stats.pm_overwrites = counter_u64_alloc(M_WAITOK0x0002);
5531
5532#ifdef HWPMC_DEBUG
5533 /* parse debug flags first */
5534 if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",getenv_string(("kern." "hwpmc" "." "debugflags"), (pmc_debugstr
), (sizeof(pmc_debugstr)))
5535 pmc_debugstr, sizeof(pmc_debugstr))getenv_string(("kern." "hwpmc" "." "debugflags"), (pmc_debugstr
), (sizeof(pmc_debugstr)))
)
5536 pmc_debugflags_parse(pmc_debugstr,
5537 pmc_debugstr+strlen(pmc_debugstr));
5538#endif
5539
5540 PMCDBG1(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
5541
5542 /* check kernel version */
5543 if (pmc_kernel_version != PMC_VERSION(0x09 << 24 | 0x03 << 16 | 0x0000)) {
5544 if (pmc_kernel_version == 0)
5545 printf("hwpmc: this kernel has not been compiled with "
5546 "'options HWPMC_HOOKS'.\n");
5547 else
5548 printf("hwpmc: kernel version (0x%x) does not match "
5549 "module version (0x%x).\n", pmc_kernel_version,
5550 PMC_VERSION(0x09 << 24 | 0x03 << 16 | 0x0000));
5551 return EPROGMISMATCH75;
5552 }
5553
5554 /*
5555 * check sysctl parameters
5556 */
5557
5558 if (pmc_hashsize <= 0) {
5559 (void) printf("hwpmc: tunable \"hashsize\"=%d must be "
5560 "greater than zero.\n", pmc_hashsize);
5561 pmc_hashsize = PMC_HASH_SIZE1024;
5562 }
5563
5564 if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
5565 (void) printf("hwpmc: tunable \"nsamples\"=%d out of "
5566 "range.\n", pmc_nsamples);
5567 pmc_nsamples = PMC_NSAMPLES256;
5568 }
5569 pmc_sample_mask = pmc_nsamples-1;
5570
5571 if (pmc_callchaindepth <= 0 ||
5572 pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX512) {
5573 (void) printf("hwpmc: tunable \"callchaindepth\"=%d out of "
5574 "range - using %d.\n", pmc_callchaindepth,
5575 PMC_CALLCHAIN_DEPTH_MAX512);
5576 pmc_callchaindepth = PMC_CALLCHAIN_DEPTH_MAX512;
5577 }
5578
5579 md = pmc_md_initialize();
5580 if (md == NULL((void *)0)) {
5581 /* Default to generic CPU. */
5582 md = pmc_generic_cpu_initialize();
5583 if (md == NULL((void *)0))
5584 return (ENOSYS78);
5585 }
5586
5587 KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1,do { if (__builtin_expect((!(md->pmd_nclass >= 1 &&
md->pmd_npmc >= 1)), 0)) panic ("[pmc,%d] no classes or pmcs"
, 5588); } while (0)
5588 ("[pmc,%d] no classes or pmcs", __LINE__))do { if (__builtin_expect((!(md->pmd_nclass >= 1 &&
md->pmd_npmc >= 1)), 0)) panic ("[pmc,%d] no classes or pmcs"
, 5588); } while (0)
;
5589
5590 /* Compute the map from row-indices to classdep pointers. */
5591 pmc_rowindex_to_classdep = malloc(sizeof(struct pmc_classdep *) *
5592 md->pmd_npmc, M_PMC, M_WAITOK0x0002|M_ZERO0x0100);
5593
5594 for (n = 0; n < md->pmd_npmc; n++)
5595 pmc_rowindex_to_classdep[n] = NULL((void *)0);
5596 for (ri = c = 0; c < md->pmd_nclass; c++) {
5597 pcd = &md->pmd_classdep[c];
5598 for (n = 0; n < pcd->pcd_num; n++, ri++)
5599 pmc_rowindex_to_classdep[ri] = pcd;
5600 }
5601
5602 KASSERT(ri == md->pmd_npmc,do { if (__builtin_expect((!(ri == md->pmd_npmc)), 0)) panic
("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", 5603, ri
, md->pmd_npmc); } while (0)
5603 ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__,do { if (__builtin_expect((!(ri == md->pmd_npmc)), 0)) panic
("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", 5603, ri
, md->pmd_npmc); } while (0)
5604 ri, md->pmd_npmc))do { if (__builtin_expect((!(ri == md->pmd_npmc)), 0)) panic
("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", 5603, ri
, md->pmd_npmc); } while (0)
;
5605
5606 maxcpu = pmc_cpu_max();
5607
5608 /* allocate space for the per-cpu array */
5609 pmc_pcpu = malloc(maxcpu * sizeof(struct pmc_cpu *), M_PMC,
5610 M_WAITOK0x0002|M_ZERO0x0100);
5611
5612 /* per-cpu 'saved values' for managing process-mode PMCs */
5613 pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc,
5614 M_PMC, M_WAITOK0x0002);
5615
5616 /* Perform CPU-dependent initialization. */
5617 pmc_save_cpu_binding(&pb);
5618 error = 0;
5619 for (cpu = 0; error == 0 && cpu < maxcpu; cpu++) {
5620 if (!pmc_cpu_is_active(cpu))
5621 continue;
5622 pmc_select_cpu(cpu);
5623 pmc_pcpu[cpu] = malloc(sizeof(struct pmc_cpu) +
5624 md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC,
5625 M_WAITOK0x0002|M_ZERO0x0100);
5626 if (md->pmd_pcpu_init)
5627 error = md->pmd_pcpu_init(md, cpu);
5628 for (n = 0; error == 0 && n < md->pmd_nclass; n++)
5629 error = md->pmd_classdep[n].pcd_pcpu_init(md, cpu);
5630 }
5631 pmc_restore_cpu_binding(&pb);
5632
5633 if (error)
5634 return (error);
5635
5636 /* allocate space for the sample array */
5637 for (cpu = 0; cpu < maxcpu; cpu++) {
5638 if (!pmc_cpu_is_active(cpu))
5639 continue;
5640 pc = pcpu_find(cpu);
5641 domain = pc->pc_domain;
5642 sb = malloc_domainset(sizeof(struct pmc_samplebuffer) +
5643 pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
5644 DOMAINSET_PREF(domain)(&domainset_prefer[(domain)]), M_WAITOK0x0002 | M_ZERO0x0100);
5645
5646 KASSERT(pmc_pcpu[cpu] != NULL,do { if (__builtin_expect((!(pmc_pcpu[cpu] != ((void *)0))), 0
)) panic ("[pmc,%d] cpu=%d Null per-cpu data", 5647, cpu); } while
(0)
5647 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu))do { if (__builtin_expect((!(pmc_pcpu[cpu] != ((void *)0))), 0
)) panic ("[pmc,%d] cpu=%d Null per-cpu data", 5647, cpu); } while
(0)
;
5648
5649 sb->ps_callchains = malloc_domainset(pmc_callchaindepth *
5650 pmc_nsamples * sizeof(uintptr_t), M_PMC,
5651 DOMAINSET_PREF(domain)(&domainset_prefer[(domain)]), M_WAITOK0x0002 | M_ZERO0x0100);
5652
5653 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5654 ps->ps_pc = sb->ps_callchains +
5655 (n * pmc_callchaindepth);
5656
5657 pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb;
5658
5659 sb = malloc_domainset(sizeof(struct pmc_samplebuffer) +
5660 pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
5661 DOMAINSET_PREF(domain)(&domainset_prefer[(domain)]), M_WAITOK0x0002 | M_ZERO0x0100);
5662
5663 sb->ps_callchains = malloc_domainset(pmc_callchaindepth *
5664 pmc_nsamples * sizeof(uintptr_t), M_PMC,
5665 DOMAINSET_PREF(domain)(&domainset_prefer[(domain)]), M_WAITOK0x0002 | M_ZERO0x0100);
5666 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5667 ps->ps_pc = sb->ps_callchains +
5668 (n * pmc_callchaindepth);
5669
5670 pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb;
5671
5672 sb = malloc_domainset(sizeof(struct pmc_samplebuffer) +
5673 pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
5674 DOMAINSET_PREF(domain)(&domainset_prefer[(domain)]), M_WAITOK0x0002 | M_ZERO0x0100);
5675 sb->ps_callchains = malloc_domainset(pmc_callchaindepth *
5676 pmc_nsamples * sizeof(uintptr_t), M_PMC,
5677 DOMAINSET_PREF(domain)(&domainset_prefer[(domain)]), M_WAITOK0x0002 | M_ZERO0x0100);
5678 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5679 ps->ps_pc = sb->ps_callchains + n * pmc_callchaindepth;
5680
5681 pmc_pcpu[cpu]->pc_sb[PMC_UR] = sb;
5682 }
5683
5684 /* allocate space for the row disposition array */
5685 pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
5686 M_PMC, M_WAITOK0x0002|M_ZERO0x0100);
5687
5688 /* mark all PMCs as available */
5689 for (n = 0; n < (int) md->pmd_npmc; n++)
5690 PMC_MARK_ROW_FREE(n)do { pmc_pmcdisp[(n)] = 0; } while (0);
5691
5692 /* allocate thread hash tables */
5693 pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
5694 &pmc_ownerhashmask);
5695
5696 pmc_processhash = hashinit(pmc_hashsize, M_PMC,
5697 &pmc_processhashmask);
5698 mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",_mtx_init(&(&pmc_processhash_mtx)->mtx_lock, "pmc-process-hash"
, "pmc-leaf", 0x00000001)
5699 MTX_SPIN)_mtx_init(&(&pmc_processhash_mtx)->mtx_lock, "pmc-process-hash"
, "pmc-leaf", 0x00000001)
;
5700
5701 CK_LIST_INIT(&pmc_ss_owners)do { ck_pr_md_store_ptr( ((void)sizeof(*((&(&pmc_ss_owners
)->clh_first)) = ((((void *)0)))), ((&(&pmc_ss_owners
)->clh_first))), ((((void *)0)))); ck_pr_fence_store(); } while
(0)
;
5702 pmc_ss_count = 0;
5703
5704 /* allocate a pool of spin mutexes */
5705 pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size,
5706 MTX_SPIN0x00000001);
5707
5708 PMCDBG4(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
5709 "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
5710 pmc_processhash, pmc_processhashmask);
5711
5712 /* Initialize a spin mutex for the thread free list. */
5713 mtx_init(&pmc_threadfreelist_mtx, "pmc-threadfreelist", "pmc-leaf",_mtx_init(&(&pmc_threadfreelist_mtx)->mtx_lock, "pmc-threadfreelist"
, "pmc-leaf", 0x00000001)
5714 MTX_SPIN)_mtx_init(&(&pmc_threadfreelist_mtx)->mtx_lock, "pmc-threadfreelist"
, "pmc-leaf", 0x00000001)
;
5715
5716 /*
5717 * Initialize the callout to monitor the thread free list.
5718 * This callout will also handle the initial population of the list.
5719 */
5720 taskqgroup_config_gtask_init(NULL((void *)0), &free_gtask, pmc_thread_descriptor_pool_free_task, "thread descriptor pool free task");
5721
5722 /* register process {exit,fork,exec} handlers */
5723 pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,eventhandler_register(((void *)0), "process_exit", pmc_process_exit
, ((void *)0), 10000)
5724 pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY)eventhandler_register(((void *)0), "process_exit", pmc_process_exit
, ((void *)0), 10000)
;
5725 pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,eventhandler_register(((void *)0), "process_fork", pmc_process_fork
, ((void *)0), 10000)
5726 pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY)eventhandler_register(((void *)0), "process_fork", pmc_process_fork
, ((void *)0), 10000)
;
5727
5728 /* register kld event handlers */
5729 pmc_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, pmc_kld_load,eventhandler_register(((void *)0), "kld_load", pmc_kld_load, (
(void *)0), 10000)
5730 NULL, EVENTHANDLER_PRI_ANY)eventhandler_register(((void *)0), "kld_load", pmc_kld_load, (
(void *)0), 10000)
;
5731 pmc_kld_unload_tag = EVENTHANDLER_REGISTER(kld_unload, pmc_kld_unload,eventhandler_register(((void *)0), "kld_unload", pmc_kld_unload
, ((void *)0), 10000)
5732 NULL, EVENTHANDLER_PRI_ANY)eventhandler_register(((void *)0), "kld_unload", pmc_kld_unload
, ((void *)0), 10000)
;
5733
5734 /* initialize logging */
5735 pmclog_initialize();
5736
5737 /* set hook functions */
5738 pmc_intr = md->pmd_intr;
5739 wmb()__asm volatile("sfence;" : : : "memory");
5740 pmc_hook = pmc_hook_handler;
5741
5742 if (error == 0) {
5743 printf(PMC_MODULE_NAME"hwpmc" ":");
5744 for (n = 0; n < (int) md->pmd_nclass; n++) {
5745 pcd = &md->pmd_classdep[n];
5746 printf(" %s/%d/%d/0x%b",
5747 pmc_name_of_pmcclass(pcd->pcd_class),
5748 pcd->pcd_num,
5749 pcd->pcd_width,
5750 pcd->pcd_caps,
5751 "\20"
5752 "\1INT\2USR\3SYS\4EDG\5THR"
5753 "\6REA\7WRI\10INV\11QUA\12PRC"
5754 "\13TAG\14CSC");
5755 }
5756 printf("\n");
5757 }
5758
5759 return (error);
5760}
5761
5762/* prepare to be unloaded */
5763static void
5764pmc_cleanup(void)
5765{
5766 int c, cpu;
5767 unsigned int maxcpu;
5768 struct pmc_ownerhash *ph;
5769 struct pmc_owner *po, *tmp;
5770 struct pmc_binding pb;
5771#ifdef HWPMC_DEBUG
5772 struct pmc_processhash *prh;
5773#endif
5774
5775 PMCDBG0(MOD,INI,0, "cleanup");
5776
5777 /* switch off sampling */
5778 CPU_FOREACH(cpu)for ((cpu) = 0; (cpu) <= mp_maxid; (cpu)++) if (!(!((((&
all_cpus)->__bits[(((((((256)) + (((sizeof(long) * 8)) - 1
)) / ((sizeof(long) * 8)))) == 1) ? 0 : (((cpu)) / (sizeof(long
) * 8)))] & (1L << ((((((((256))) + (((sizeof(long)
* 8)) - 1)) / ((sizeof(long) * 8)))) == 1) ? (__size_t)(((cpu
))) : ((((cpu))) % (sizeof(long) * 8))))) != 0))))
5779 DPCPU_ID_SET(cpu, pmc_sampled, 0)(*(__typeof(pcpu_entry_pmc_sampled)*)((dpcpu_off[(cpu)]) + (uintptr_t
)&pcpu_entry_pmc_sampled) = 0)
;
5780 pmc_intr = NULL((void *)0);
5781
5782 sx_xlock(&pmc_sx)(void)_sx_xlock(((&pmc_sx)), 0, ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5782))
;
5783 if (pmc_hook == NULL((void *)0)) { /* being unloaded already */
5784 sx_xunlock(&pmc_sx)_sx_xunlock(((&pmc_sx)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5784))
;
5785 return;
5786 }
5787
5788 pmc_hook = NULL((void *)0); /* prevent new threads from entering module */
5789
5790 /* deregister event handlers */
5791 EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag)do { struct eventhandler_list *_el; if ((_el = eventhandler_find_list
("process_fork")) != ((void *)0)) eventhandler_deregister(_el
, pmc_fork_tag); } while(0)
;
5792 EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag)do { struct eventhandler_list *_el; if ((_el = eventhandler_find_list
("process_exit")) != ((void *)0)) eventhandler_deregister(_el
, pmc_exit_tag); } while(0)
;
5793 EVENTHANDLER_DEREGISTER(kld_load, pmc_kld_load_tag)do { struct eventhandler_list *_el; if ((_el = eventhandler_find_list
("kld_load")) != ((void *)0)) eventhandler_deregister(_el, pmc_kld_load_tag
); } while(0)
;
5794 EVENTHANDLER_DEREGISTER(kld_unload, pmc_kld_unload_tag)do { struct eventhandler_list *_el; if ((_el = eventhandler_find_list
("kld_unload")) != ((void *)0)) eventhandler_deregister(_el, pmc_kld_unload_tag
); } while(0)
;
5795
5796 /* send SIGBUS to all owner threads, free up allocations */
5797 if (pmc_ownerhash)
5798 for (ph = pmc_ownerhash;
5799 ph <= &pmc_ownerhash[pmc_ownerhashmask];
5800 ph++) {
5801 LIST_FOREACH_SAFE(po, ph, po_next, tmp)for ((po) = (((ph))->lh_first); (po) && ((tmp) = (
((po))->po_next.le_next), 1); (po) = (tmp))
{
5802 pmc_remove_owner(po);
5803
5804 /* send SIGBUS to owner processes */
5805 PMCDBG3(MOD,INI,2, "cleanup signal proc=%p "
5806 "(%d, %s)", po->po_owner,
5807 po->po_owner->p_pid,
5808 po->po_owner->p_comm);
5809
5810 PROC_LOCK(po->po_owner)__mtx_lock_flags(&((((&(po->po_owner)->p_mtx)))
)->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5810))
;
5811 kern_psignal(po->po_owner, SIGBUS10);
5812 PROC_UNLOCK(po->po_owner)__mtx_unlock_flags(&((((&(po->po_owner)->p_mtx)
)))->mtx_lock, ((0)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5812))
;
5813
5814 pmc_destroy_owner_descriptor(po);
5815 }
5816 }
5817
5818 /* reclaim allocated data structures */
5819 mtx_destroy(&pmc_threadfreelist_mtx)_mtx_destroy(&(&pmc_threadfreelist_mtx)->mtx_lock);
5820 pmc_thread_descriptor_pool_drain();
5821
5822 if (pmc_mtxpool)
5823 mtx_pool_destroy(&pmc_mtxpool);
5824
5825 mtx_destroy(&pmc_processhash_mtx)_mtx_destroy(&(&pmc_processhash_mtx)->mtx_lock);
5826 taskqgroup_config_gtask_deinit(&free_gtask);
5827 if (pmc_processhash) {
5828#ifdef HWPMC_DEBUG
5829 struct pmc_process *pp;
5830
5831 PMCDBG0(MOD,INI,3, "destroy process hash");
5832 for (prh = pmc_processhash;
5833 prh <= &pmc_processhash[pmc_processhashmask];
5834 prh++)
5835 LIST_FOREACH(pp, prh, pp_next)for ((pp) = (((prh))->lh_first); (pp); (pp) = (((pp))->
pp_next.le_next))
5836 PMCDBG1(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
5837#endif
5838
5839 hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
5840 pmc_processhash = NULL((void *)0);
5841 }
5842
5843 if (pmc_ownerhash) {
5844 PMCDBG0(MOD,INI,3, "destroy owner hash");
5845 hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
5846 pmc_ownerhash = NULL((void *)0);
5847 }
5848
5849 KASSERT(CK_LIST_EMPTY(&pmc_ss_owners),do { if (__builtin_expect((!(((__typeof__(*((&(&pmc_ss_owners
)->clh_first))))ck_pr_md_load_ptr(((&(&pmc_ss_owners
)->clh_first))) == ((void *)0)))), 0)) panic ("[pmc,%d] Global SS owner list not empty"
, 5850); } while (0)
5850 ("[pmc,%d] Global SS owner list not empty", __LINE__))do { if (__builtin_expect((!(((__typeof__(*((&(&pmc_ss_owners
)->clh_first))))ck_pr_md_load_ptr(((&(&pmc_ss_owners
)->clh_first))) == ((void *)0)))), 0)) panic ("[pmc,%d] Global SS owner list not empty"
, 5850); } while (0)
;
5851 KASSERT(pmc_ss_count == 0,do { if (__builtin_expect((!(pmc_ss_count == 0)), 0)) panic (
"[pmc,%d] Global SS count not empty", 5852); } while (0)
5852 ("[pmc,%d] Global SS count not empty", __LINE__))do { if (__builtin_expect((!(pmc_ss_count == 0)), 0)) panic (
"[pmc,%d] Global SS count not empty", 5852); } while (0)
;
5853
5854 /* do processor and pmc-class dependent cleanup */
5855 maxcpu = pmc_cpu_max();
5856
5857 PMCDBG0(MOD,INI,3, "md cleanup");
5858 if (md) {
5859 pmc_save_cpu_binding(&pb);
5860 for (cpu = 0; cpu < maxcpu; cpu++) {
5861 PMCDBG2(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
5862 cpu, pmc_pcpu[cpu]);
5863 if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL((void *)0))
5864 continue;
5865 pmc_select_cpu(cpu);
5866 for (c = 0; c < md->pmd_nclass; c++)
5867 md->pmd_classdep[c].pcd_pcpu_fini(md, cpu);
5868 if (md->pmd_pcpu_fini)
5869 md->pmd_pcpu_fini(md, cpu);
5870 }
5871
5872 if (md->pmd_cputype == PMC_CPU_GENERIC)
5873 pmc_generic_cpu_finalize(md);
5874 else
5875 pmc_md_finalize(md);
5876
5877 pmc_mdep_free(md);
5878 md = NULL((void *)0);
5879 pmc_restore_cpu_binding(&pb);
5880 }
5881
5882 /* Free per-cpu descriptors. */
5883 for (cpu = 0; cpu < maxcpu; cpu++) {
5884 if (!pmc_cpu_is_active(cpu))
5885 continue;
5886 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL,do { if (__builtin_expect((!(pmc_pcpu[cpu]->pc_sb[PMC_HR] !=
((void *)0))), 0)) panic ("[pmc,%d] Null hw cpu sample buffer cpu=%d"
, 5887, cpu); } while (0)
5887 ("[pmc,%d] Null hw cpu sample buffer cpu=%d", __LINE__,do { if (__builtin_expect((!(pmc_pcpu[cpu]->pc_sb[PMC_HR] !=
((void *)0))), 0)) panic ("[pmc,%d] Null hw cpu sample buffer cpu=%d"
, 5887, cpu); } while (0)
5888 cpu))do { if (__builtin_expect((!(pmc_pcpu[cpu]->pc_sb[PMC_HR] !=
((void *)0))), 0)) panic ("[pmc,%d] Null hw cpu sample buffer cpu=%d"
, 5887, cpu); } while (0)
;
5889 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL,do { if (__builtin_expect((!(pmc_pcpu[cpu]->pc_sb[PMC_SR] !=
((void *)0))), 0)) panic ("[pmc,%d] Null sw cpu sample buffer cpu=%d"
, 5890, cpu); } while (0)
5890 ("[pmc,%d] Null sw cpu sample buffer cpu=%d", __LINE__,do { if (__builtin_expect((!(pmc_pcpu[cpu]->pc_sb[PMC_SR] !=
((void *)0))), 0)) panic ("[pmc,%d] Null sw cpu sample buffer cpu=%d"
, 5890, cpu); } while (0)
5891 cpu))do { if (__builtin_expect((!(pmc_pcpu[cpu]->pc_sb[PMC_SR] !=
((void *)0))), 0)) panic ("[pmc,%d] Null sw cpu sample buffer cpu=%d"
, 5890, cpu); } while (0)
;
5892 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_UR] != NULL,do { if (__builtin_expect((!(pmc_pcpu[cpu]->pc_sb[PMC_UR] !=
((void *)0))), 0)) panic ("[pmc,%d] Null userret cpu sample buffer cpu=%d"
, 5893, cpu); } while (0)
5893 ("[pmc,%d] Null userret cpu sample buffer cpu=%d", __LINE__,do { if (__builtin_expect((!(pmc_pcpu[cpu]->pc_sb[PMC_UR] !=
((void *)0))), 0)) panic ("[pmc,%d] Null userret cpu sample buffer cpu=%d"
, 5893, cpu); } while (0)
5894 cpu))do { if (__builtin_expect((!(pmc_pcpu[cpu]->pc_sb[PMC_UR] !=
((void *)0))), 0)) panic ("[pmc,%d] Null userret cpu sample buffer cpu=%d"
, 5893, cpu); } while (0)
;
5895 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC);
5896 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC);
5897 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC);
5898 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC);
5899 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_UR]->ps_callchains, M_PMC);
5900 free_domain(pmc_pcpu[cpu]->pc_sb[PMC_UR], M_PMC);
5901 free_domain(pmc_pcpu[cpu], M_PMC);
5902 }
5903
5904 free(pmc_pcpu, M_PMC);
5905 pmc_pcpu = NULL((void *)0);
5906
5907 free(pmc_pcpu_saved, M_PMC);
5908 pmc_pcpu_saved = NULL((void *)0);
5909
5910 if (pmc_pmcdisp) {
5911 free(pmc_pmcdisp, M_PMC);
5912 pmc_pmcdisp = NULL((void *)0);
5913 }
5914
5915 if (pmc_rowindex_to_classdep) {
5916 free(pmc_rowindex_to_classdep, M_PMC);
5917 pmc_rowindex_to_classdep = NULL((void *)0);
5918 }
5919
5920 pmclog_shutdown();
5921 counter_u64_free(pmc_stats.pm_intr_ignored);
5922 counter_u64_free(pmc_stats.pm_intr_processed);
5923 counter_u64_free(pmc_stats.pm_intr_bufferfull);
5924 counter_u64_free(pmc_stats.pm_syscalls);
5925 counter_u64_free(pmc_stats.pm_syscall_errors);
5926 counter_u64_free(pmc_stats.pm_buffer_requests);
5927 counter_u64_free(pmc_stats.pm_buffer_requests_failed);
5928 counter_u64_free(pmc_stats.pm_log_sweeps);
5929 counter_u64_free(pmc_stats.pm_merges);
5930 counter_u64_free(pmc_stats.pm_overwrites);
5931 sx_xunlock(&pmc_sx)_sx_xunlock(((&pmc_sx)), ("/root/freebsd/sys/dev/hwpmc/hwpmc_mod.c"
), (5931))
; /* we are done */
5932}
5933
5934/*
5935 * The function called at load/unload.
5936 */
5937
5938static int
5939load (struct module *module __unused__attribute__((__unused__)), int cmd, void *arg __unused__attribute__((__unused__)))
5940{
5941 int error;
5942
5943 error = 0;
5944
5945 switch (cmd) {
5946 case MOD_LOAD :
5947 /* initialize the subsystem */
5948 error = pmc_initialize();
5949 if (error != 0)
5950 break;
5951 PMCDBG2(MOD,INI,1, "syscall=%d maxcpu=%d",
5952 pmc_syscall_num, pmc_cpu_max());
5953 break;
5954
5955
5956 case MOD_UNLOAD :
5957 case MOD_SHUTDOWN:
5958 pmc_cleanup();
5959 PMCDBG0(MOD,INI,1, "unloaded");
5960 break;
5961
5962 default :
5963 error = EINVAL22; /* XXX should panic(9) */
5964 break;
5965 }
5966
5967 return error;
5968}