File: | bsd/kern/kern_time.c |
Warning: | line 264, column 12 Copies out a struct with uncleared padding (>= 4 bytes) |
1 | /* | |||
2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved. | |||
3 | * | |||
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |||
5 | * | |||
6 | * This file contains Original Code and/or Modifications of Original Code | |||
7 | * as defined in and that are subject to the Apple Public Source License | |||
8 | * Version 2.0 (the 'License'). You may not use this file except in | |||
9 | * compliance with the License. The rights granted to you under the License | |||
10 | * may not be used to create, or enable the creation or redistribution of, | |||
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |||
12 | * circumvent, violate, or enable the circumvention or violation of, any | |||
13 | * terms of an Apple operating system software license agreement. | |||
14 | * | |||
15 | * Please obtain a copy of the License at | |||
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |||
17 | * | |||
18 | * The Original Code and all software distributed under the License are | |||
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |||
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |||
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |||
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |||
23 | * Please see the License for the specific language governing rights and | |||
24 | * limitations under the License. | |||
25 | * | |||
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |||
27 | */ | |||
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ | |||
29 | /* | |||
30 | * Copyright (c) 1982, 1986, 1989, 1993 | |||
31 | * The Regents of the University of California. All rights reserved. | |||
32 | * | |||
33 | * Redistribution and use in source and binary forms, with or without | |||
34 | * modification, are permitted provided that the following conditions | |||
35 | * are met: | |||
36 | * 1. Redistributions of source code must retain the above copyright | |||
37 | * notice, this list of conditions and the following disclaimer. | |||
38 | * 2. Redistributions in binary form must reproduce the above copyright | |||
39 | * notice, this list of conditions and the following disclaimer in the | |||
40 | * documentation and/or other materials provided with the distribution. | |||
41 | * 3. All advertising materials mentioning features or use of this software | |||
42 | * must display the following acknowledgement: | |||
43 | * This product includes software developed by the University of | |||
44 | * California, Berkeley and its contributors. | |||
45 | * 4. Neither the name of the University nor the names of its contributors | |||
46 | * may be used to endorse or promote products derived from this software | |||
47 | * without specific prior written permission. | |||
48 | * | |||
49 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |||
50 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
51 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
52 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |||
53 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
54 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |||
55 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |||
56 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |||
57 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |||
58 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |||
59 | * SUCH DAMAGE. | |||
60 | * | |||
61 | * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 | |||
62 | */ | |||
63 | /* | |||
64 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce | |||
65 | * support for mandatory and extensible security protections. This notice | |||
66 | * is included in support of clause 2.2 (b) of the Apple Public License, | |||
67 | * Version 2.0. | |||
68 | */ | |||
69 | ||||
70 | #include <sys/param.h> | |||
71 | #include <sys/resourcevar.h> | |||
72 | #include <sys/kernel.h> | |||
73 | #include <sys/systm.h> | |||
74 | #include <sys/proc_internal.h> | |||
75 | #include <sys/kauth.h> | |||
76 | #include <sys/vnode.h> | |||
77 | #include <sys/time.h> | |||
78 | #include <sys/priv.h> | |||
79 | ||||
80 | #include <sys/mount_internal.h> | |||
81 | #include <sys/sysproto.h> | |||
82 | #include <sys/signalvar.h> | |||
83 | #include <sys/protosw.h> /* for net_uptime2timeval() */ | |||
84 | ||||
85 | #include <kern/clock.h> | |||
86 | #include <kern/task.h> | |||
87 | #include <kern/thread_call.h> | |||
88 | #if CONFIG_MACF1 | |||
89 | #include <security/mac_framework.h> | |||
90 | #endif | |||
91 | ||||
92 | #define HZ100 100 /* XXX */ | |||
93 | ||||
94 | /* simple lock used to access timezone, tz structure */ | |||
95 | lck_spin_t * tz_slock; | |||
96 | lck_grp_t * tz_slock_grp; | |||
97 | lck_attr_t * tz_slock_attr; | |||
98 | lck_grp_attr_t *tz_slock_grp_attr; | |||
99 | ||||
100 | static void setthetime( | |||
101 | struct timeval *tv); | |||
102 | ||||
103 | void time_zone_slock_init(void); | |||
104 | ||||
105 | /* | |||
106 | * Time of day and interval timer support. | |||
107 | * | |||
108 | * These routines provide the kernel entry points to get and set | |||
109 | * the time-of-day and per-process interval timers. Subroutines | |||
110 | * here provide support for adding and subtracting timeval structures | |||
111 | * and decrementing interval timers, optionally reloading the interval | |||
112 | * timers when they expire. | |||
113 | */ | |||
114 | /* ARGSUSED */ | |||
115 | int | |||
116 | gettimeofday( | |||
117 | struct proc *p, | |||
118 | struct gettimeofday_args *uap, | |||
119 | __unused__attribute__((unused)) int32_t *retval) | |||
120 | { | |||
121 | int error = 0; | |||
122 | struct timezone ltz; /* local copy */ | |||
123 | clock_sec_t secs; | |||
124 | clock_usec_t usecs; | |||
125 | uint64_t mach_time; | |||
126 | ||||
127 | if (uap->tp || uap->mach_absolute_time) { | |||
128 | clock_gettimeofday_and_absolute_time(&secs, &usecs, &mach_time); | |||
129 | } | |||
130 | ||||
131 | if (uap->tp) { | |||
132 | /* Casting secs through a uint32_t to match arm64 commpage */ | |||
133 | if (IS_64BIT_PROCESS(p)) { | |||
134 | struct user64_timeval user_atv = {}; | |||
135 | user_atv.tv_sec = (uint32_t)secs; | |||
136 | user_atv.tv_usec = usecs; | |||
137 | error = copyout(&user_atv, uap->tp, sizeof(user_atv)); | |||
138 | } else { | |||
139 | struct user32_timeval user_atv = {}; | |||
140 | user_atv.tv_sec = (uint32_t)secs; | |||
141 | user_atv.tv_usec = usecs; | |||
142 | error = copyout(&user_atv, uap->tp, sizeof(user_atv)); | |||
143 | } | |||
144 | if (error) { | |||
145 | return error; | |||
146 | } | |||
147 | } | |||
148 | ||||
149 | if (uap->tzp) { | |||
150 | lck_spin_lock(tz_slock); | |||
151 | ltz = tz; | |||
152 | lck_spin_unlock(tz_slock); | |||
153 | ||||
154 | error = copyout((caddr_t)<z, CAST_USER_ADDR_T(uap->tzp)((user_addr_t)((uintptr_t)(uap->tzp))), sizeof(tz)); | |||
155 | } | |||
156 | ||||
157 | if (error == 0 && uap->mach_absolute_time) { | |||
158 | error = copyout(&mach_time, uap->mach_absolute_time, sizeof(mach_time)); | |||
159 | } | |||
160 | ||||
161 | return error; | |||
162 | } | |||
163 | ||||
164 | /* | |||
165 | * XXX Y2038 bug because of setthetime() argument | |||
166 | */ | |||
167 | /* ARGSUSED */ | |||
168 | int | |||
169 | settimeofday(__unused__attribute__((unused)) struct proc *p, struct settimeofday_args *uap, __unused__attribute__((unused)) int32_t *retval) | |||
170 | { | |||
171 | struct timeval atv; | |||
172 | struct timezone atz; | |||
173 | int error; | |||
174 | ||||
175 | bzero(&atv, sizeof(atv)); | |||
176 | ||||
177 | #if CONFIG_MACF1 | |||
178 | error = mac_system_check_settime(kauth_cred_get()); | |||
179 | if (error) | |||
180 | return (error); | |||
181 | #endif | |||
182 | if ((error = suser(kauth_cred_get(), &p->p_acflag))) | |||
183 | return (error); | |||
184 | /* Verify all parameters before changing time */ | |||
185 | if (uap->tv) { | |||
186 | if (IS_64BIT_PROCESS(p)) { | |||
187 | struct user64_timeval user_atv; | |||
188 | error = copyin(uap->tv, &user_atv, sizeof(user_atv)); | |||
189 | atv.tv_sec = user_atv.tv_sec; | |||
190 | atv.tv_usec = user_atv.tv_usec; | |||
191 | } else { | |||
192 | struct user32_timeval user_atv; | |||
193 | error = copyin(uap->tv, &user_atv, sizeof(user_atv)); | |||
194 | atv.tv_sec = user_atv.tv_sec; | |||
195 | atv.tv_usec = user_atv.tv_usec; | |||
196 | } | |||
197 | if (error) | |||
198 | return (error); | |||
199 | } | |||
200 | if (uap->tzp && (error = copyin(uap->tzp, (caddr_t)&atz, sizeof(atz)))) | |||
201 | return (error); | |||
202 | if (uap->tv) { | |||
203 | timevalfix(&atv); | |||
204 | if (atv.tv_sec < 0 || (atv.tv_sec == 0 && atv.tv_usec < 0)) | |||
205 | return (EPERM1); | |||
206 | setthetime(&atv); | |||
207 | } | |||
208 | if (uap->tzp) { | |||
209 | lck_spin_lock(tz_slock); | |||
210 | tz = atz; | |||
211 | lck_spin_unlock(tz_slock); | |||
212 | } | |||
213 | return (0); | |||
214 | } | |||
215 | ||||
216 | static void | |||
217 | setthetime( | |||
218 | struct timeval *tv) | |||
219 | { | |||
220 | clock_set_calendar_microtime(tv->tv_sec, tv->tv_usec); | |||
221 | } | |||
222 | ||||
223 | /* | |||
224 | * XXX Y2038 bug because of clock_adjtime() first argument | |||
225 | */ | |||
226 | /* ARGSUSED */ | |||
227 | int | |||
228 | adjtime(struct proc *p, struct adjtime_args *uap, __unused__attribute__((unused)) int32_t *retval) | |||
229 | { | |||
230 | struct timeval atv; | |||
231 | int error; | |||
232 | ||||
233 | #if CONFIG_MACF1 | |||
234 | error = mac_system_check_settime(kauth_cred_get()); | |||
235 | if (error) | |||
| ||||
236 | return (error); | |||
237 | #endif | |||
238 | if ((error = priv_check_cred(kauth_cred_get(), PRIV_ADJTIME1000, 0))) | |||
239 | return (error); | |||
240 | if (IS_64BIT_PROCESS(p)) { | |||
241 | struct user64_timeval user_atv; | |||
242 | error = copyin(uap->delta, &user_atv, sizeof(user_atv)); | |||
243 | atv.tv_sec = user_atv.tv_sec; | |||
244 | atv.tv_usec = user_atv.tv_usec; | |||
245 | } else { | |||
246 | struct user32_timeval user_atv; | |||
247 | error = copyin(uap->delta, &user_atv, sizeof(user_atv)); | |||
248 | atv.tv_sec = user_atv.tv_sec; | |||
249 | atv.tv_usec = user_atv.tv_usec; | |||
250 | } | |||
251 | if (error) | |||
252 | return (error); | |||
253 | ||||
254 | /* | |||
255 | * Compute the total correction and the rate at which to apply it. | |||
256 | */ | |||
257 | clock_adjtime(&atv.tv_sec, &atv.tv_usec); | |||
258 | ||||
259 | if (uap->olddelta) { | |||
260 | if (IS_64BIT_PROCESS(p)) { | |||
261 | struct user64_timeval user_atv; | |||
262 | user_atv.tv_sec = atv.tv_sec; | |||
263 | user_atv.tv_usec = atv.tv_usec; | |||
264 | error = copyout(&user_atv, uap->olddelta, sizeof(user_atv)); | |||
| ||||
265 | } else { | |||
266 | struct user32_timeval user_atv; | |||
267 | user_atv.tv_sec = atv.tv_sec; | |||
268 | user_atv.tv_usec = atv.tv_usec; | |||
269 | error = copyout(&user_atv, uap->olddelta, sizeof(user_atv)); | |||
270 | } | |||
271 | } | |||
272 | ||||
273 | return (0); | |||
274 | } | |||
275 | ||||
276 | /* | |||
277 | * Verify the calendar value. If negative, | |||
278 | * reset to zero (the epoch). | |||
279 | */ | |||
280 | void | |||
281 | inittodr( | |||
282 | __unused__attribute__((unused)) time_t base) | |||
283 | { | |||
284 | struct timeval tv; | |||
285 | ||||
286 | /* | |||
287 | * Assertion: | |||
288 | * The calendar has already been | |||
289 | * set up from the platform clock. | |||
290 | * | |||
291 | * The value returned by microtime() | |||
292 | * is gotten from the calendar. | |||
293 | */ | |||
294 | microtime(&tv); | |||
295 | ||||
296 | if (tv.tv_sec < 0 || tv.tv_usec < 0) { | |||
297 | printf ("WARNING: preposterous time in Real Time Clock"); | |||
298 | tv.tv_sec = 0; /* the UNIX epoch */ | |||
299 | tv.tv_usec = 0; | |||
300 | setthetime(&tv); | |||
301 | printf(" -- CHECK AND RESET THE DATE!\n"); | |||
302 | } | |||
303 | } | |||
304 | ||||
305 | time_t | |||
306 | boottime_sec(void) | |||
307 | { | |||
308 | clock_sec_t secs; | |||
309 | clock_nsec_t nanosecs; | |||
310 | ||||
311 | clock_get_boottime_nanotime(&secs, &nanosecs); | |||
312 | return (secs); | |||
313 | } | |||
314 | ||||
315 | void | |||
316 | boottime_timeval(struct timeval *tv) | |||
317 | { | |||
318 | clock_sec_t secs; | |||
319 | clock_usec_t microsecs; | |||
320 | ||||
321 | clock_get_boottime_microtime(&secs, µsecs); | |||
322 | ||||
323 | tv->tv_sec = secs; | |||
324 | tv->tv_usec = microsecs; | |||
325 | } | |||
326 | ||||
327 | /* | |||
328 | * Get value of an interval timer. The process virtual and | |||
329 | * profiling virtual time timers are kept internally in the | |||
330 | * way they are specified externally: in time until they expire. | |||
331 | * | |||
332 | * The real time interval timer expiration time (p_rtime) | |||
333 | * is kept as an absolute time rather than as a delta, so that | |||
334 | * it is easy to keep periodic real-time signals from drifting. | |||
335 | * | |||
336 | * The real time timer is processed by a callout routine. | |||
337 | * Since a callout may be delayed in real time due to | |||
338 | * other processing in the system, it is possible for the real | |||
339 | * time callout routine (realitexpire, given below), to be delayed | |||
340 | * in real time past when it is supposed to occur. It does not | |||
341 | * suffice, therefore, to reload the real time .it_value from the | |||
342 | * real time .it_interval. Rather, we compute the next time in | |||
343 | * absolute time when the timer should go off. | |||
344 | * | |||
345 | * Returns: 0 Success | |||
346 | * EINVAL Invalid argument | |||
347 | * copyout:EFAULT Bad address | |||
348 | */ | |||
349 | /* ARGSUSED */ | |||
350 | int | |||
351 | getitimer(struct proc *p, struct getitimer_args *uap, __unused__attribute__((unused)) int32_t *retval) | |||
352 | { | |||
353 | struct itimerval aitv; | |||
354 | ||||
355 | if (uap->which > ITIMER_PROF2) | |||
356 | return(EINVAL22); | |||
357 | ||||
358 | bzero(&aitv, sizeof(aitv)); | |||
359 | ||||
360 | proc_spinlock(p); | |||
361 | switch (uap->which) { | |||
362 | ||||
363 | case ITIMER_REAL0: | |||
364 | /* | |||
365 | * If time for real time timer has passed return 0, | |||
366 | * else return difference between current time and | |||
367 | * time for the timer to go off. | |||
368 | */ | |||
369 | aitv = p->p_realtimer; | |||
370 | if (timerisset(&p->p_rtime)((&p->p_rtime)->tv_sec || (&p->p_rtime)-> tv_usec)) { | |||
371 | struct timeval now; | |||
372 | ||||
373 | microuptime(&now); | |||
374 | if (timercmp(&p->p_rtime, &now, <)(((&p->p_rtime)->tv_sec == (&now)->tv_sec) ? ((&p->p_rtime)->tv_usec < (&now)->tv_usec ) : ((&p->p_rtime)->tv_sec < (&now)->tv_sec ))) | |||
375 | timerclear(&aitv.it_value)(&aitv.it_value)->tv_sec = (&aitv.it_value)->tv_usec = 0; | |||
376 | else { | |||
377 | aitv.it_value = p->p_rtime; | |||
378 | timevalsub(&aitv.it_value, &now); | |||
379 | } | |||
380 | } | |||
381 | else | |||
382 | timerclear(&aitv.it_value)(&aitv.it_value)->tv_sec = (&aitv.it_value)->tv_usec = 0; | |||
383 | break; | |||
384 | ||||
385 | case ITIMER_VIRTUAL1: | |||
386 | aitv = p->p_vtimer_user; | |||
387 | break; | |||
388 | ||||
389 | case ITIMER_PROF2: | |||
390 | aitv = p->p_vtimer_prof; | |||
391 | break; | |||
392 | } | |||
393 | ||||
394 | proc_spinunlock(p); | |||
395 | ||||
396 | if (IS_64BIT_PROCESS(p)) { | |||
397 | struct user64_itimerval user_itv; | |||
398 | bzero(&user_itv, sizeof (user_itv)); | |||
399 | user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec; | |||
400 | user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec; | |||
401 | user_itv.it_value.tv_sec = aitv.it_value.tv_sec; | |||
402 | user_itv.it_value.tv_usec = aitv.it_value.tv_usec; | |||
403 | return (copyout((caddr_t)&user_itv, uap->itv, sizeof (user_itv))); | |||
404 | } else { | |||
405 | struct user32_itimerval user_itv; | |||
406 | bzero(&user_itv, sizeof (user_itv)); | |||
407 | user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec; | |||
408 | user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec; | |||
409 | user_itv.it_value.tv_sec = aitv.it_value.tv_sec; | |||
410 | user_itv.it_value.tv_usec = aitv.it_value.tv_usec; | |||
411 | return (copyout((caddr_t)&user_itv, uap->itv, sizeof (user_itv))); | |||
412 | } | |||
413 | } | |||
414 | ||||
415 | /* | |||
416 | * Returns: 0 Success | |||
417 | * EINVAL Invalid argument | |||
418 | * copyin:EFAULT Bad address | |||
419 | * getitimer:EINVAL Invalid argument | |||
420 | * getitimer:EFAULT Bad address | |||
421 | */ | |||
422 | /* ARGSUSED */ | |||
423 | int | |||
424 | setitimer(struct proc *p, struct setitimer_args *uap, int32_t *retval) | |||
425 | { | |||
426 | struct itimerval aitv; | |||
427 | user_addr_t itvp; | |||
428 | int error; | |||
429 | ||||
430 | bzero(&aitv, sizeof(aitv)); | |||
431 | ||||
432 | if (uap->which > ITIMER_PROF2) | |||
433 | return (EINVAL22); | |||
434 | if ((itvp = uap->itv)) { | |||
435 | if (IS_64BIT_PROCESS(p)) { | |||
436 | struct user64_itimerval user_itv; | |||
437 | if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof (user_itv)))) | |||
438 | return (error); | |||
439 | aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec; | |||
440 | aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec; | |||
441 | aitv.it_value.tv_sec = user_itv.it_value.tv_sec; | |||
442 | aitv.it_value.tv_usec = user_itv.it_value.tv_usec; | |||
443 | } else { | |||
444 | struct user32_itimerval user_itv; | |||
445 | if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof (user_itv)))) | |||
446 | return (error); | |||
447 | aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec; | |||
448 | aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec; | |||
449 | aitv.it_value.tv_sec = user_itv.it_value.tv_sec; | |||
450 | aitv.it_value.tv_usec = user_itv.it_value.tv_usec; | |||
451 | } | |||
452 | } | |||
453 | if ((uap->itv = uap->oitv) && (error = getitimer(p, (struct getitimer_args *)uap, retval))) | |||
454 | return (error); | |||
455 | if (itvp == 0) | |||
456 | return (0); | |||
457 | if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) | |||
458 | return (EINVAL22); | |||
459 | ||||
460 | switch (uap->which) { | |||
461 | ||||
462 | case ITIMER_REAL0: | |||
463 | proc_spinlock(p); | |||
464 | if (timerisset(&aitv.it_value)((&aitv.it_value)->tv_sec || (&aitv.it_value)-> tv_usec)) { | |||
465 | microuptime(&p->p_rtime); | |||
466 | timevaladd(&p->p_rtime, &aitv.it_value); | |||
467 | p->p_realtimer = aitv; | |||
468 | if (!thread_call_enter_delayed_with_leeway(p->p_rcall, NULL((void *)0), | |||
469 | tvtoabstime(&p->p_rtime), 0, THREAD_CALL_DELAY_USER_NORMAL0x10)) | |||
470 | p->p_ractive++; | |||
471 | } else { | |||
472 | timerclear(&p->p_rtime)(&p->p_rtime)->tv_sec = (&p->p_rtime)->tv_usec = 0; | |||
473 | p->p_realtimer = aitv; | |||
474 | if (thread_call_cancel(p->p_rcall)) | |||
475 | p->p_ractive--; | |||
476 | } | |||
477 | proc_spinunlock(p); | |||
478 | ||||
479 | break; | |||
480 | ||||
481 | ||||
482 | case ITIMER_VIRTUAL1: | |||
483 | if (timerisset(&aitv.it_value)((&aitv.it_value)->tv_sec || (&aitv.it_value)-> tv_usec)) | |||
484 | task_vtimer_set(p->task, TASK_VTIMER_USER0x01); | |||
485 | else | |||
486 | task_vtimer_clear(p->task, TASK_VTIMER_USER0x01); | |||
487 | ||||
488 | proc_spinlock(p); | |||
489 | p->p_vtimer_user = aitv; | |||
490 | proc_spinunlock(p); | |||
491 | break; | |||
492 | ||||
493 | case ITIMER_PROF2: | |||
494 | if (timerisset(&aitv.it_value)((&aitv.it_value)->tv_sec || (&aitv.it_value)-> tv_usec)) | |||
495 | task_vtimer_set(p->task, TASK_VTIMER_PROF0x02); | |||
496 | else | |||
497 | task_vtimer_clear(p->task, TASK_VTIMER_PROF0x02); | |||
498 | ||||
499 | proc_spinlock(p); | |||
500 | p->p_vtimer_prof = aitv; | |||
501 | proc_spinunlock(p); | |||
502 | break; | |||
503 | } | |||
504 | ||||
505 | return (0); | |||
506 | } | |||
507 | ||||
508 | /* | |||
509 | * Real interval timer expired: | |||
510 | * send process whose timer expired an alarm signal. | |||
511 | * If time is not set up to reload, then just return. | |||
512 | * Else compute next time timer should go off which is > current time. | |||
513 | * This is where delay in processing this timeout causes multiple | |||
514 | * SIGALRM calls to be compressed into one. | |||
515 | */ | |||
516 | void | |||
517 | realitexpire( | |||
518 | struct proc *p) | |||
519 | { | |||
520 | struct proc *r; | |||
521 | struct timeval t; | |||
522 | ||||
523 | r = proc_find(p->p_pid); | |||
524 | ||||
525 | proc_spinlock(p); | |||
526 | ||||
527 | assert(p->p_ractive > 0)((void)0); | |||
528 | ||||
529 | if (--p->p_ractive > 0 || r != p) { | |||
530 | /* | |||
531 | * bail, because either proc is exiting | |||
532 | * or there's another active thread call | |||
533 | */ | |||
534 | proc_spinunlock(p); | |||
535 | ||||
536 | if (r != NULL((void *)0)) | |||
537 | proc_rele(r); | |||
538 | return; | |||
539 | } | |||
540 | ||||
541 | if (!timerisset(&p->p_realtimer.it_interval)((&p->p_realtimer.it_interval)->tv_sec || (&p-> p_realtimer.it_interval)->tv_usec)) { | |||
542 | /* | |||
543 | * p_realtimer was cleared while this call was pending, | |||
544 | * send one last SIGALRM, but don't re-arm | |||
545 | */ | |||
546 | timerclear(&p->p_rtime)(&p->p_rtime)->tv_sec = (&p->p_rtime)->tv_usec = 0; | |||
547 | proc_spinunlock(p); | |||
548 | ||||
549 | psignal(p, SIGALRM14); | |||
550 | proc_rele(p); | |||
551 | return; | |||
552 | } | |||
553 | ||||
554 | proc_spinunlock(p); | |||
555 | ||||
556 | /* | |||
557 | * Send the signal before re-arming the next thread call, | |||
558 | * so in case psignal blocks, we won't create yet another thread call. | |||
559 | */ | |||
560 | ||||
561 | psignal(p, SIGALRM14); | |||
562 | ||||
563 | proc_spinlock(p); | |||
564 | ||||
565 | /* Should we still re-arm the next thread call? */ | |||
566 | if (!timerisset(&p->p_realtimer.it_interval)((&p->p_realtimer.it_interval)->tv_sec || (&p-> p_realtimer.it_interval)->tv_usec)) { | |||
567 | timerclear(&p->p_rtime)(&p->p_rtime)->tv_sec = (&p->p_rtime)->tv_usec = 0; | |||
568 | proc_spinunlock(p); | |||
569 | ||||
570 | proc_rele(p); | |||
571 | return; | |||
572 | } | |||
573 | ||||
574 | microuptime(&t); | |||
575 | timevaladd(&p->p_rtime, &p->p_realtimer.it_interval); | |||
576 | ||||
577 | if (timercmp(&p->p_rtime, &t, <=)(((&p->p_rtime)->tv_sec == (&t)->tv_sec) ? ( (&p->p_rtime)->tv_usec <= (&t)->tv_usec) : ((&p->p_rtime)->tv_sec <= (&t)->tv_sec))) { | |||
578 | if ((p->p_rtime.tv_sec + 2) >= t.tv_sec) { | |||
579 | for (;;) { | |||
580 | timevaladd(&p->p_rtime, &p->p_realtimer.it_interval); | |||
581 | if (timercmp(&p->p_rtime, &t, >)(((&p->p_rtime)->tv_sec == (&t)->tv_sec) ? ( (&p->p_rtime)->tv_usec > (&t)->tv_usec) : ((&p->p_rtime)->tv_sec > (&t)->tv_sec))) | |||
582 | break; | |||
583 | } | |||
584 | } else { | |||
585 | p->p_rtime = p->p_realtimer.it_interval; | |||
586 | timevaladd(&p->p_rtime, &t); | |||
587 | } | |||
588 | } | |||
589 | ||||
590 | assert(p->p_rcall != NULL)((void)0); | |||
591 | ||||
592 | if (!thread_call_enter_delayed_with_leeway(p->p_rcall, NULL((void *)0), tvtoabstime(&p->p_rtime), 0, | |||
593 | THREAD_CALL_DELAY_USER_NORMAL0x10)) { | |||
594 | p->p_ractive++; | |||
595 | } | |||
596 | ||||
597 | proc_spinunlock(p); | |||
598 | ||||
599 | proc_rele(p); | |||
600 | } | |||
601 | ||||
602 | /* | |||
603 | * Called once in proc_exit to clean up after an armed or pending realitexpire | |||
604 | * | |||
605 | * This will only be called after the proc refcount is drained, | |||
606 | * so realitexpire cannot be currently holding a proc ref. | |||
607 | * i.e. it will/has gotten PROC_NULL from proc_find. | |||
608 | */ | |||
609 | void | |||
610 | proc_free_realitimer(proc_t p) | |||
611 | { | |||
612 | proc_spinlock(p); | |||
613 | ||||
614 | assert(p->p_rcall != NULL)((void)0); | |||
615 | assert(p->p_refcount == 0)((void)0); | |||
616 | ||||
617 | timerclear(&p->p_realtimer.it_interval)(&p->p_realtimer.it_interval)->tv_sec = (&p-> p_realtimer.it_interval)->tv_usec = 0; | |||
618 | ||||
619 | if (thread_call_cancel(p->p_rcall)) { | |||
620 | assert(p->p_ractive > 0)((void)0); | |||
621 | p->p_ractive--; | |||
622 | } | |||
623 | ||||
624 | while (p->p_ractive > 0) { | |||
625 | proc_spinunlock(p); | |||
626 | ||||
627 | delay(1); | |||
628 | ||||
629 | proc_spinlock(p); | |||
630 | } | |||
631 | ||||
632 | thread_call_t call = p->p_rcall; | |||
633 | p->p_rcall = NULL((void *)0); | |||
634 | ||||
635 | proc_spinunlock(p); | |||
636 | ||||
637 | thread_call_free(call); | |||
638 | } | |||
639 | ||||
640 | /* | |||
641 | * Check that a proposed value to load into the .it_value or | |||
642 | * .it_interval part of an interval timer is acceptable. | |||
643 | */ | |||
644 | int | |||
645 | itimerfix( | |||
646 | struct timeval *tv) | |||
647 | { | |||
648 | ||||
649 | if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || | |||
650 | tv->tv_usec < 0 || tv->tv_usec >= 1000000) | |||
651 | return (EINVAL22); | |||
652 | return (0); | |||
653 | } | |||
654 | ||||
655 | int | |||
656 | timespec_is_valid(const struct timespec *ts) | |||
657 | { | |||
658 | /* The INT32_MAX limit ensures the timespec is safe for clock_*() functions | |||
659 | * which accept 32-bit ints. */ | |||
660 | if (ts->tv_sec < 0 || ts->tv_sec > INT32_MAX2147483647 || | |||
661 | ts->tv_nsec < 0 || (unsigned long long)ts->tv_nsec > NSEC_PER_SEC1000000000ull) { | |||
662 | return 0; | |||
663 | } | |||
664 | return 1; | |||
665 | } | |||
666 | ||||
667 | /* | |||
668 | * Decrement an interval timer by a specified number | |||
669 | * of microseconds, which must be less than a second, | |||
670 | * i.e. < 1000000. If the timer expires, then reload | |||
671 | * it. In this case, carry over (usec - old value) to | |||
672 | * reduce the value reloaded into the timer so that | |||
673 | * the timer does not drift. This routine assumes | |||
674 | * that it is called in a context where the timers | |||
675 | * on which it is operating cannot change in value. | |||
676 | */ | |||
677 | int | |||
678 | itimerdecr(proc_t p, | |||
679 | struct itimerval *itp, int usec) | |||
680 | { | |||
681 | ||||
682 | proc_spinlock(p); | |||
683 | ||||
684 | if (itp->it_value.tv_usec < usec) { | |||
685 | if (itp->it_value.tv_sec == 0) { | |||
686 | /* expired, and already in next interval */ | |||
687 | usec -= itp->it_value.tv_usec; | |||
688 | goto expire; | |||
689 | } | |||
690 | itp->it_value.tv_usec += 1000000; | |||
691 | itp->it_value.tv_sec--; | |||
692 | } | |||
693 | itp->it_value.tv_usec -= usec; | |||
694 | usec = 0; | |||
695 | if (timerisset(&itp->it_value)((&itp->it_value)->tv_sec || (&itp->it_value )->tv_usec)) { | |||
696 | proc_spinunlock(p); | |||
697 | return (1); | |||
698 | } | |||
699 | /* expired, exactly at end of interval */ | |||
700 | expire: | |||
701 | if (timerisset(&itp->it_interval)((&itp->it_interval)->tv_sec || (&itp->it_interval )->tv_usec)) { | |||
702 | itp->it_value = itp->it_interval; | |||
703 | if (itp->it_value.tv_sec > 0) { | |||
704 | itp->it_value.tv_usec -= usec; | |||
705 | if (itp->it_value.tv_usec < 0) { | |||
706 | itp->it_value.tv_usec += 1000000; | |||
707 | itp->it_value.tv_sec--; | |||
708 | } | |||
709 | } | |||
710 | } else | |||
711 | itp->it_value.tv_usec = 0; /* sec is already 0 */ | |||
712 | proc_spinunlock(p); | |||
713 | return (0); | |||
714 | } | |||
715 | ||||
716 | /* | |||
717 | * Add and subtract routines for timevals. | |||
718 | * N.B.: subtract routine doesn't deal with | |||
719 | * results which are before the beginning, | |||
720 | * it just gets very confused in this case. | |||
721 | * Caveat emptor. | |||
722 | */ | |||
723 | void | |||
724 | timevaladd( | |||
725 | struct timeval *t1, | |||
726 | struct timeval *t2) | |||
727 | { | |||
728 | ||||
729 | t1->tv_sec += t2->tv_sec; | |||
730 | t1->tv_usec += t2->tv_usec; | |||
731 | timevalfix(t1); | |||
732 | } | |||
733 | void | |||
734 | timevalsub( | |||
735 | struct timeval *t1, | |||
736 | struct timeval *t2) | |||
737 | { | |||
738 | ||||
739 | t1->tv_sec -= t2->tv_sec; | |||
740 | t1->tv_usec -= t2->tv_usec; | |||
741 | timevalfix(t1); | |||
742 | } | |||
743 | void | |||
744 | timevalfix( | |||
745 | struct timeval *t1) | |||
746 | { | |||
747 | ||||
748 | if (t1->tv_usec < 0) { | |||
749 | t1->tv_sec--; | |||
750 | t1->tv_usec += 1000000; | |||
751 | } | |||
752 | if (t1->tv_usec >= 1000000) { | |||
753 | t1->tv_sec++; | |||
754 | t1->tv_usec -= 1000000; | |||
755 | } | |||
756 | } | |||
757 | ||||
758 | /* | |||
759 | * Return the best possible estimate of the time in the timeval | |||
760 | * to which tvp points. | |||
761 | */ | |||
762 | void | |||
763 | microtime( | |||
764 | struct timeval *tvp) | |||
765 | { | |||
766 | clock_sec_t tv_sec; | |||
767 | clock_usec_t tv_usec; | |||
768 | ||||
769 | clock_get_calendar_microtime(&tv_sec, &tv_usec); | |||
770 | ||||
771 | tvp->tv_sec = tv_sec; | |||
772 | tvp->tv_usec = tv_usec; | |||
773 | } | |||
774 | ||||
775 | void | |||
776 | microtime_with_abstime( | |||
777 | struct timeval *tvp, uint64_t *abstime) | |||
778 | { | |||
779 | clock_sec_t tv_sec; | |||
780 | clock_usec_t tv_usec; | |||
781 | ||||
782 | clock_get_calendar_absolute_and_microtime(&tv_sec, &tv_usec, abstime); | |||
783 | ||||
784 | tvp->tv_sec = tv_sec; | |||
785 | tvp->tv_usec = tv_usec; | |||
786 | } | |||
787 | ||||
788 | void | |||
789 | microuptime( | |||
790 | struct timeval *tvp) | |||
791 | { | |||
792 | clock_sec_t tv_sec; | |||
793 | clock_usec_t tv_usec; | |||
794 | ||||
795 | clock_get_system_microtime(&tv_sec, &tv_usec); | |||
796 | ||||
797 | tvp->tv_sec = tv_sec; | |||
798 | tvp->tv_usec = tv_usec; | |||
799 | } | |||
800 | ||||
801 | /* | |||
802 | * Ditto for timespec. | |||
803 | */ | |||
804 | void | |||
805 | nanotime( | |||
806 | struct timespec *tsp) | |||
807 | { | |||
808 | clock_sec_t tv_sec; | |||
809 | clock_nsec_t tv_nsec; | |||
810 | ||||
811 | clock_get_calendar_nanotime(&tv_sec, &tv_nsec); | |||
812 | ||||
813 | tsp->tv_sec = tv_sec; | |||
814 | tsp->tv_nsec = tv_nsec; | |||
815 | } | |||
816 | ||||
817 | void | |||
818 | nanouptime( | |||
819 | struct timespec *tsp) | |||
820 | { | |||
821 | clock_sec_t tv_sec; | |||
822 | clock_nsec_t tv_nsec; | |||
823 | ||||
824 | clock_get_system_nanotime(&tv_sec, &tv_nsec); | |||
825 | ||||
826 | tsp->tv_sec = tv_sec; | |||
827 | tsp->tv_nsec = tv_nsec; | |||
828 | } | |||
829 | ||||
830 | uint64_t | |||
831 | tvtoabstime( | |||
832 | struct timeval *tvp) | |||
833 | { | |||
834 | uint64_t result, usresult; | |||
835 | ||||
836 | clock_interval_to_absolutetime_interval( | |||
837 | tvp->tv_sec, NSEC_PER_SEC1000000000ull, &result); | |||
838 | clock_interval_to_absolutetime_interval( | |||
839 | tvp->tv_usec, NSEC_PER_USEC1000ull, &usresult); | |||
840 | ||||
841 | return (result + usresult); | |||
842 | } | |||
843 | ||||
844 | uint64_t | |||
845 | tstoabstime(struct timespec *ts) | |||
846 | { | |||
847 | uint64_t abstime_s, abstime_ns; | |||
848 | clock_interval_to_absolutetime_interval(ts->tv_sec, NSEC_PER_SEC1000000000ull, &abstime_s); | |||
849 | clock_interval_to_absolutetime_interval(ts->tv_nsec, 1, &abstime_ns); | |||
850 | return abstime_s + abstime_ns; | |||
851 | } | |||
852 | ||||
853 | #if NETWORKING1 | |||
854 | /* | |||
855 | * ratecheck(): simple time-based rate-limit checking. | |||
856 | */ | |||
857 | int | |||
858 | ratecheck(struct timeval *lasttime, const struct timeval *mininterval) | |||
859 | { | |||
860 | struct timeval tv, delta; | |||
861 | int rv = 0; | |||
862 | ||||
863 | net_uptime2timeval(&tv); | |||
864 | delta = tv; | |||
865 | timevalsub(&delta, lasttime); | |||
866 | ||||
867 | /* | |||
868 | * check for 0,0 is so that the message will be seen at least once, | |||
869 | * even if interval is huge. | |||
870 | */ | |||
871 | if (timevalcmp(&delta, mininterval, >=)(((&delta)->tv_sec == (mininterval)->tv_sec) ? ((& delta)->tv_usec >= (mininterval)->tv_usec) : ((& delta)->tv_sec >= (mininterval)->tv_sec)) || | |||
872 | (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { | |||
873 | *lasttime = tv; | |||
874 | rv = 1; | |||
875 | } | |||
876 | ||||
877 | return (rv); | |||
878 | } | |||
879 | ||||
880 | /* | |||
881 | * ppsratecheck(): packets (or events) per second limitation. | |||
882 | */ | |||
883 | int | |||
884 | ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) | |||
885 | { | |||
886 | struct timeval tv, delta; | |||
887 | int rv; | |||
888 | ||||
889 | net_uptime2timeval(&tv); | |||
890 | ||||
891 | timersub(&tv, lasttime, &delta)do { (&delta)->tv_sec = (&tv)->tv_sec - (lasttime )->tv_sec; (&delta)->tv_usec = (&tv)->tv_usec - (lasttime)->tv_usec; if ((&delta)->tv_usec < 0 ) { (&delta)->tv_sec--; (&delta)->tv_usec += 1000000 ; } } while (0); | |||
892 | ||||
893 | /* | |||
894 | * Check for 0,0 so that the message will be seen at least once. | |||
895 | * If more than one second has passed since the last update of | |||
896 | * lasttime, reset the counter. | |||
897 | * | |||
898 | * we do increment *curpps even in *curpps < maxpps case, as some may | |||
899 | * try to use *curpps for stat purposes as well. | |||
900 | */ | |||
901 | if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) || | |||
902 | delta.tv_sec >= 1) { | |||
903 | *lasttime = tv; | |||
904 | *curpps = 0; | |||
905 | rv = 1; | |||
906 | } else if (maxpps < 0) | |||
907 | rv = 1; | |||
908 | else if (*curpps < maxpps) | |||
909 | rv = 1; | |||
910 | else | |||
911 | rv = 0; | |||
912 | ||||
913 | #if 1 /* DIAGNOSTIC? */ | |||
914 | /* be careful about wrap-around */ | |||
915 | if (*curpps + 1 > 0) | |||
916 | *curpps = *curpps + 1; | |||
917 | #else | |||
918 | /* | |||
919 | * assume that there's not too many calls to this function. | |||
920 | * not sure if the assumption holds, as it depends on *caller's* | |||
921 | * behavior, not the behavior of this function. | |||
922 | * IMHO it is wrong to make assumption on the caller's behavior, | |||
923 | * so the above #if is #if 1, not #ifdef DIAGNOSTIC. | |||
924 | */ | |||
925 | *curpps = *curpps + 1; | |||
926 | #endif | |||
927 | ||||
928 | return (rv); | |||
929 | } | |||
930 | #endif /* NETWORKING */ | |||
931 | ||||
932 | void | |||
933 | time_zone_slock_init(void) | |||
934 | { | |||
935 | /* allocate lock group attribute and group */ | |||
936 | tz_slock_grp_attr = lck_grp_attr_alloc_init(); | |||
937 | ||||
938 | tz_slock_grp = lck_grp_alloc_init("tzlock", tz_slock_grp_attr); | |||
939 | ||||
940 | /* Allocate lock attribute */ | |||
941 | tz_slock_attr = lck_attr_alloc_init(); | |||
942 | ||||
943 | /* Allocate the spin lock */ | |||
944 | tz_slock = lck_spin_alloc_init(tz_slock_grp, tz_slock_attr); | |||
945 | } |