File: | bsd/kern/sysv_shm.c |
Warning: | line 1255, column 11 Copies out a struct with uncleared padding (>= 2 bytes) |
1 | /* | |||
2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. | |||
3 | * | |||
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |||
5 | * | |||
6 | * This file contains Original Code and/or Modifications of Original Code | |||
7 | * as defined in and that are subject to the Apple Public Source License | |||
8 | * Version 2.0 (the 'License'). You may not use this file except in | |||
9 | * compliance with the License. The rights granted to you under the License | |||
10 | * may not be used to create, or enable the creation or redistribution of, | |||
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |||
12 | * circumvent, violate, or enable the circumvention or violation of, any | |||
13 | * terms of an Apple operating system software license agreement. | |||
14 | * | |||
15 | * Please obtain a copy of the License at | |||
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |||
17 | * | |||
18 | * The Original Code and all software distributed under the License are | |||
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |||
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |||
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |||
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |||
23 | * Please see the License for the specific language governing rights and | |||
24 | * limitations under the License. | |||
25 | * | |||
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |||
27 | */ | |||
28 | /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ | |||
29 | ||||
30 | /* | |||
31 | * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. | |||
32 | * | |||
33 | * Redistribution and use in source and binary forms, with or without | |||
34 | * modification, are permitted provided that the following conditions | |||
35 | * are met: | |||
36 | * 1. Redistributions of source code must retain the above copyright | |||
37 | * notice, this list of conditions and the following disclaimer. | |||
38 | * 2. Redistributions in binary form must reproduce the above copyright | |||
39 | * notice, this list of conditions and the following disclaimer in the | |||
40 | * documentation and/or other materials provided with the distribution. | |||
41 | * 3. All advertising materials mentioning features or use of this software | |||
42 | * must display the following acknowledgement: | |||
43 | * This product includes software developed by Adam Glass and Charles | |||
44 | * Hannum. | |||
45 | * 4. The names of the authors may not be used to endorse or promote products | |||
46 | * derived from this software without specific prior written permission. | |||
47 | * | |||
48 | * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR | |||
49 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |||
50 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |||
51 | * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |||
52 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |||
53 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
54 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
55 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
56 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |||
57 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
58 | */ | |||
59 | /* | |||
60 | * NOTICE: This file was modified by McAfee Research in 2004 to introduce | |||
61 | * support for mandatory and extensible security protections. This notice | |||
62 | * is included in support of clause 2.2 (b) of the Apple Public License, | |||
63 | * Version 2.0. | |||
64 | * Copyright (c) 2005-2006 SPARTA, Inc. | |||
65 | */ | |||
66 | ||||
67 | ||||
68 | #include <sys/appleapiopts.h> | |||
69 | #include <sys/param.h> | |||
70 | #include <sys/systm.h> | |||
71 | #include <sys/kernel.h> | |||
72 | #include <sys/shm_internal.h> | |||
73 | #include <sys/proc_internal.h> | |||
74 | #include <sys/kauth.h> | |||
75 | #include <sys/malloc.h> | |||
76 | #include <sys/mman.h> | |||
77 | #include <sys/stat.h> | |||
78 | #include <sys/sysctl.h> | |||
79 | #include <sys/ipcs.h> | |||
80 | #include <sys/sysent.h> | |||
81 | #include <sys/sysproto.h> | |||
82 | #if CONFIG_MACF1 | |||
83 | #include <security/mac_framework.h> | |||
84 | #endif | |||
85 | ||||
86 | #include <security/audit/audit.h> | |||
87 | ||||
88 | #include <mach/mach_types.h> | |||
89 | #include <mach/vm_inherit.h> | |||
90 | #include <mach/vm_map.h> | |||
91 | ||||
92 | #include <mach/mach_vm.h> | |||
93 | ||||
94 | #include <vm/vm_map.h> | |||
95 | #include <vm/vm_protos.h> | |||
96 | #include <vm/vm_kern.h> | |||
97 | ||||
98 | #include <kern/locks.h> | |||
99 | #include <os/overflow.h> | |||
100 | ||||
101 | /* Uncomment this line to see MAC debugging output. */ | |||
102 | /* #define MAC_DEBUG */ | |||
103 | #if CONFIG_MACF_DEBUG | |||
104 | #define MPRINTF(a) printf a | |||
105 | #else | |||
106 | #define MPRINTF(a) | |||
107 | #endif | |||
108 | ||||
109 | #if SYSV_SHM1 | |||
110 | static int shminit(void); | |||
111 | ||||
112 | static lck_grp_t *sysv_shm_subsys_lck_grp; | |||
113 | static lck_grp_attr_t *sysv_shm_subsys_lck_grp_attr; | |||
114 | static lck_attr_t *sysv_shm_subsys_lck_attr; | |||
115 | static lck_mtx_t sysv_shm_subsys_mutex; | |||
116 | ||||
117 | #define SYSV_SHM_SUBSYS_LOCK()lck_mtx_lock(&sysv_shm_subsys_mutex) lck_mtx_lock(&sysv_shm_subsys_mutex) | |||
118 | #define SYSV_SHM_SUBSYS_UNLOCK()lck_mtx_unlock(&sysv_shm_subsys_mutex) lck_mtx_unlock(&sysv_shm_subsys_mutex) | |||
119 | ||||
120 | static int oshmctl(void *p, void *uap, void *retval); | |||
121 | static int shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode, int * retval); | |||
122 | static int shmget_existing(struct shmget_args *uap, int mode, int segnum, int * retval); | |||
123 | static void shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out); | |||
124 | static void shmid_ds_32to64(struct user32_shmid_ds *in, struct user_shmid_ds *out); | |||
125 | ||||
126 | /* XXX casting to (sy_call_t *) is bogus, as usual. */ | |||
127 | static sy_call_t *shmcalls[] = { | |||
128 | (sy_call_t *)shmat, (sy_call_t *)oshmctl, | |||
129 | (sy_call_t *)shmdt, (sy_call_t *)shmget, | |||
130 | (sy_call_t *)shmctl | |||
131 | }; | |||
132 | ||||
133 | #define SHMSEG_FREE0x0200 0x0200 | |||
134 | #define SHMSEG_REMOVED0x0400 0x0400 | |||
135 | #define SHMSEG_ALLOCATED0x0800 0x0800 | |||
136 | #define SHMSEG_WANTED0x1000 0x1000 | |||
137 | ||||
138 | static int shm_last_free, shm_nused, shm_committed; | |||
139 | struct shmid_kernel *shmsegs; /* 64 bit version */ | |||
140 | static int shm_inited = 0; | |||
141 | ||||
142 | /* | |||
143 | * Since anonymous memory chunks are limited to ANON_MAX_SIZE bytes, | |||
144 | * we have to keep a list of chunks when we want to handle a shared memory | |||
145 | * segment bigger than ANON_MAX_SIZE. | |||
146 | * Each chunk points to a VM named entry of up to ANON_MAX_SIZE bytes | |||
147 | * of anonymous memory. | |||
148 | */ | |||
149 | struct shm_handle { | |||
150 | void * shm_object; /* named entry for this chunk*/ | |||
151 | memory_object_size_t shm_handle_size; /* size of this chunk */ | |||
152 | struct shm_handle *shm_handle_next; /* next chunk */ | |||
153 | }; | |||
154 | ||||
155 | struct shmmap_state { | |||
156 | mach_vm_address_t va; /* user address */ | |||
157 | int shmid; /* segment id */ | |||
158 | }; | |||
159 | ||||
160 | static void shm_deallocate_segment(struct shmid_kernel *); | |||
161 | static int shm_find_segment_by_key(key_t); | |||
162 | static struct shmid_kernel *shm_find_segment_by_shmid(int); | |||
163 | static int shm_delete_mapping(struct proc *, struct shmmap_state *, int); | |||
164 | ||||
165 | #ifdef __APPLE_API_PRIVATE | |||
166 | #define DEFAULT_SHMMAX(4 * 1024 * 1024) (4 * 1024 * 1024) | |||
167 | #define DEFAULT_SHMMIN1 1 | |||
168 | #define DEFAULT_SHMMNI32 32 | |||
169 | #define DEFAULT_SHMSEG8 8 | |||
170 | #define DEFAULT_SHMALL1024 1024 | |||
171 | ||||
172 | struct shminfo shminfo = { | |||
173 | DEFAULT_SHMMAX(4 * 1024 * 1024), | |||
174 | DEFAULT_SHMMIN1, | |||
175 | DEFAULT_SHMMNI32, | |||
176 | DEFAULT_SHMSEG8, | |||
177 | DEFAULT_SHMALL1024 | |||
178 | }; | |||
179 | ||||
180 | #define SHMID_IS_VALID(x)((x) >= 0) ((x) >= 0) | |||
181 | #define SHMID_UNALLOCATED(-1) (-1) | |||
182 | #define SHMID_SENTINEL(-2) (-2) | |||
183 | ||||
184 | #endif /* __APPLE_API_PRIVATE */ | |||
185 | ||||
186 | void sysv_shm_lock_init(void); | |||
187 | ||||
188 | static __inline__ time_t | |||
189 | sysv_shmtime(void) | |||
190 | { | |||
191 | struct timeval tv; | |||
192 | microtime(&tv); | |||
193 | return (tv.tv_sec); | |||
194 | } | |||
195 | ||||
196 | /* | |||
197 | * This conversion is safe, since if we are converting for a 32 bit process, | |||
198 | * then it's value of (struct shmid_ds)->shm_segsz will never exceed 4G. | |||
199 | * | |||
200 | * NOTE: Source and target may *NOT* overlap! (target is smaller) | |||
201 | */ | |||
202 | static void | |||
203 | shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out) | |||
204 | { | |||
205 | out->shm_perm = in->shm_perm; | |||
206 | out->shm_segsz = in->shm_segsz; | |||
207 | out->shm_lpid = in->shm_lpid; | |||
208 | out->shm_cpid = in->shm_cpid; | |||
209 | out->shm_nattch = in->shm_nattch; | |||
210 | out->shm_atime = in->shm_atime; | |||
211 | out->shm_dtime = in->shm_dtime; | |||
212 | out->shm_ctime = in->shm_ctime; | |||
213 | out->shm_internal = CAST_DOWN_EXPLICIT(int,in->shm_internal)( ((int)((uintptr_t) (in->shm_internal))) ); | |||
214 | } | |||
215 | ||||
216 | /* | |||
217 | * NOTE: Source and target may are permitted to overlap! (source is smaller); | |||
218 | * this works because we copy fields in order from the end of the struct to | |||
219 | * the beginning. | |||
220 | */ | |||
221 | static void | |||
222 | shmid_ds_32to64(struct user32_shmid_ds *in, struct user_shmid_ds *out) | |||
223 | { | |||
224 | out->shm_internal = in->shm_internal; | |||
225 | out->shm_ctime = in->shm_ctime; | |||
226 | out->shm_dtime = in->shm_dtime; | |||
227 | out->shm_atime = in->shm_atime; | |||
228 | out->shm_nattch = in->shm_nattch; | |||
229 | out->shm_cpid = in->shm_cpid; | |||
230 | out->shm_lpid = in->shm_lpid; | |||
231 | out->shm_segsz = in->shm_segsz; | |||
232 | out->shm_perm = in->shm_perm; | |||
233 | } | |||
234 | ||||
235 | ||||
236 | static int | |||
237 | shm_find_segment_by_key(key_t key) | |||
238 | { | |||
239 | int i; | |||
240 | ||||
241 | for (i = 0; i < shminfo.shmmni; i++) | |||
242 | if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED0x0800) && | |||
243 | shmsegs[i].u.shm_perm._key == key) | |||
244 | return i; | |||
245 | return -1; | |||
246 | } | |||
247 | ||||
248 | static struct shmid_kernel * | |||
249 | shm_find_segment_by_shmid(int shmid) | |||
250 | { | |||
251 | int segnum; | |||
252 | struct shmid_kernel *shmseg; | |||
253 | ||||
254 | segnum = IPCID_TO_IX(shmid)((shmid) & 0xffff); | |||
255 | if (segnum < 0 || segnum >= shminfo.shmmni) | |||
256 | return NULL((void *)0); | |||
257 | shmseg = &shmsegs[segnum]; | |||
258 | if ((shmseg->u.shm_perm.mode & (SHMSEG_ALLOCATED0x0800 | SHMSEG_REMOVED0x0400)) | |||
259 | != SHMSEG_ALLOCATED0x0800 || | |||
260 | shmseg->u.shm_perm._seq != IPCID_TO_SEQ(shmid)(((shmid) >> 16) & 0xffff)) | |||
261 | return NULL((void *)0); | |||
262 | return shmseg; | |||
263 | } | |||
264 | ||||
265 | static void | |||
266 | shm_deallocate_segment(struct shmid_kernel *shmseg) | |||
267 | { | |||
268 | struct shm_handle *shm_handle, *shm_handle_next; | |||
269 | mach_vm_size_t size; | |||
270 | ||||
271 | for (shm_handle = CAST_DOWN(void *,shmseg->u.shm_internal)( ((void *)((uintptr_t) (shmseg->u.shm_internal)/(sizeof(void *) < sizeof(uintptr_t) ? 0 : 1))) ); /* tunnel */ | |||
272 | shm_handle != NULL((void *)0); | |||
273 | shm_handle = shm_handle_next) { | |||
274 | shm_handle_next = shm_handle->shm_handle_next; | |||
275 | mach_memory_entry_port_release(shm_handle->shm_object); | |||
276 | FREE((caddr_t) shm_handle, M_SHM)_FREE((void *)(caddr_t) shm_handle, 29); | |||
277 | } | |||
278 | shmseg->u.shm_internal = USER_ADDR_NULL((user_addr_t) 0); /* tunnel */ | |||
279 | size = mach_vm_round_page(shmseg->u.shm_segsz)(((mach_vm_offset_t)(shmseg->u.shm_segsz) + (4096 - 1)) & ~((signed)(4096 - 1))); | |||
280 | shm_committed -= btoc(size)(((unsigned)(size)+(4096 -1))>>12); | |||
281 | shm_nused--; | |||
282 | shmseg->u.shm_perm.mode = SHMSEG_FREE0x0200; | |||
283 | #if CONFIG_MACF1 | |||
284 | /* Reset the MAC label */ | |||
285 | mac_sysvshm_label_recycle(shmseg); | |||
286 | #endif | |||
287 | } | |||
288 | ||||
289 | static int | |||
290 | shm_delete_mapping(__unused__attribute__((unused)) struct proc *p, struct shmmap_state *shmmap_s, | |||
291 | int deallocate) | |||
292 | { | |||
293 | struct shmid_kernel *shmseg; | |||
294 | int segnum, result; | |||
295 | mach_vm_size_t size; | |||
296 | ||||
297 | segnum = IPCID_TO_IX(shmmap_s->shmid)((shmmap_s->shmid) & 0xffff); | |||
298 | shmseg = &shmsegs[segnum]; | |||
299 | size = mach_vm_round_page(shmseg->u.shm_segsz)(((mach_vm_offset_t)(shmseg->u.shm_segsz) + (4096 - 1)) & ~((signed)(4096 - 1))); /* XXX done for us? */ | |||
300 | if (deallocate) { | |||
301 | result = mach_vm_deallocate(current_map(), shmmap_s->va, size); | |||
302 | if (result != KERN_SUCCESS0) | |||
303 | return EINVAL22; | |||
304 | } | |||
305 | shmmap_s->shmid = SHMID_UNALLOCATED(-1); | |||
306 | shmseg->u.shm_dtime = sysv_shmtime(); | |||
307 | if ((--shmseg->u.shm_nattch <= 0) && | |||
308 | (shmseg->u.shm_perm.mode & SHMSEG_REMOVED0x0400)) { | |||
309 | shm_deallocate_segment(shmseg); | |||
310 | shm_last_free = segnum; | |||
311 | } | |||
312 | return 0; | |||
313 | } | |||
314 | ||||
315 | int | |||
316 | shmdt(struct proc *p, struct shmdt_args *uap, int32_t *retval) | |||
317 | { | |||
318 | #if CONFIG_MACF1 | |||
319 | struct shmid_kernel *shmsegptr; | |||
320 | #endif | |||
321 | struct shmmap_state *shmmap_s; | |||
322 | int i; | |||
323 | int shmdtret = 0; | |||
324 | ||||
325 | AUDIT_ARG(svipc_addr, uap->shmaddr)do { if (__builtin_expect(audit_syscalls, 0)) { struct kaudit_record *__ar = ((struct uthread*)get_bsdthread_info(current_thread( )))->uu_ar; if (__builtin_expect(((void *)0) != (__ar), 0) ) audit_arg_svipc_addr (__ar, uap->shmaddr); } } while (0); | |||
326 | ||||
327 | SYSV_SHM_SUBSYS_LOCK()lck_mtx_lock(&sysv_shm_subsys_mutex); | |||
328 | ||||
329 | if ((shmdtret = shminit())) { | |||
330 | goto shmdt_out; | |||
331 | } | |||
332 | ||||
333 | shmmap_s = (struct shmmap_state *)p->vm_shm; | |||
334 | if (shmmap_s == NULL((void *)0)) { | |||
335 | shmdtret = EINVAL22; | |||
336 | goto shmdt_out; | |||
337 | } | |||
338 | ||||
339 | for (; shmmap_s->shmid != SHMID_SENTINEL(-2); shmmap_s++) { | |||
340 | if (SHMID_IS_VALID(shmmap_s->shmid)((shmmap_s->shmid) >= 0) && | |||
341 | shmmap_s->va == (mach_vm_offset_t)uap->shmaddr) { | |||
342 | break; | |||
343 | } | |||
344 | } | |||
345 | ||||
346 | if (!SHMID_IS_VALID(shmmap_s->shmid)((shmmap_s->shmid) >= 0)) { | |||
347 | shmdtret = EINVAL22; | |||
348 | goto shmdt_out; | |||
349 | } | |||
350 | ||||
351 | #if CONFIG_MACF1 | |||
352 | /* | |||
353 | * XXX: It might be useful to move this into the shm_delete_mapping | |||
354 | * function | |||
355 | */ | |||
356 | shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)((shmmap_s->shmid) & 0xffff)]; | |||
357 | shmdtret = mac_sysvshm_check_shmdt(kauth_cred_get(), shmsegptr); | |||
358 | if (shmdtret) | |||
359 | goto shmdt_out; | |||
360 | #endif | |||
361 | i = shm_delete_mapping(p, shmmap_s, 1); | |||
362 | ||||
363 | if (i == 0) | |||
364 | *retval = 0; | |||
365 | shmdtret = i; | |||
366 | shmdt_out: | |||
367 | SYSV_SHM_SUBSYS_UNLOCK()lck_mtx_unlock(&sysv_shm_subsys_mutex); | |||
368 | return shmdtret; | |||
369 | } | |||
370 | ||||
371 | int | |||
372 | shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) | |||
373 | { | |||
374 | int error, i, flags; | |||
375 | struct shmid_kernel *shmseg; | |||
376 | struct shmmap_state *shmmap_s = NULL((void *)0); | |||
377 | struct shm_handle *shm_handle; | |||
378 | mach_vm_address_t attach_va; /* attach address in/out */ | |||
379 | mach_vm_size_t map_size; /* size of map entry */ | |||
380 | mach_vm_size_t mapped_size; | |||
381 | vm_prot_t prot; | |||
382 | size_t size; | |||
383 | kern_return_t rv; | |||
384 | int shmat_ret; | |||
385 | int vm_flags; | |||
386 | ||||
387 | shmat_ret = 0; | |||
388 | ||||
389 | AUDIT_ARG(svipc_id, uap->shmid)do { if (__builtin_expect(audit_syscalls, 0)) { struct kaudit_record *__ar = ((struct uthread*)get_bsdthread_info(current_thread( )))->uu_ar; if (__builtin_expect(((void *)0) != (__ar), 0) ) audit_arg_svipc_id (__ar, uap->shmid); } } while (0); | |||
390 | AUDIT_ARG(svipc_addr, uap->shmaddr)do { if (__builtin_expect(audit_syscalls, 0)) { struct kaudit_record *__ar = ((struct uthread*)get_bsdthread_info(current_thread( )))->uu_ar; if (__builtin_expect(((void *)0) != (__ar), 0) ) audit_arg_svipc_addr (__ar, uap->shmaddr); } } while (0); | |||
391 | ||||
392 | SYSV_SHM_SUBSYS_LOCK()lck_mtx_lock(&sysv_shm_subsys_mutex); | |||
393 | ||||
394 | if ((shmat_ret = shminit())) { | |||
395 | goto shmat_out; | |||
396 | } | |||
397 | ||||
398 | shmmap_s = (struct shmmap_state *)p->vm_shm; | |||
399 | if (shmmap_s == NULL((void *)0)) { | |||
400 | /* lazily allocate the shm map */ | |||
401 | ||||
402 | int nsegs = shminfo.shmseg; | |||
403 | if (nsegs <= 0) { | |||
404 | shmat_ret = EMFILE24; | |||
405 | goto shmat_out; | |||
406 | } | |||
407 | ||||
408 | /* +1 for the sentinel */ | |||
409 | if (os_add_and_mul_overflow(nsegs, 1, sizeof(struct shmmap_state), &size)__os_warn_unused(__extension__({ __typeof(*(&size)) _tmp; _Bool _s, _t; _s = __os_warn_unused(__builtin_add_overflow(( (nsegs)), ((1)), (&_tmp))); _t = __os_warn_unused(__builtin_mul_overflow (((sizeof(struct shmmap_state))), (_tmp), ((&size)))); _s | _t; }))) { | |||
410 | shmat_ret = ENOMEM12; | |||
411 | goto shmat_out; | |||
412 | } | |||
413 | ||||
414 | MALLOC(shmmap_s, struct shmmap_state *, size, M_SHM, M_WAITOK)({ static vm_allocation_site_t site __attribute__((section("__DATA, __data" ))); (shmmap_s) = (struct shmmap_state *)__MALLOC(size, 29, 0x0000 , &site); }); | |||
415 | if (shmmap_s == NULL((void *)0)) { | |||
416 | shmat_ret = ENOMEM12; | |||
417 | goto shmat_out; | |||
418 | } | |||
419 | ||||
420 | /* initialize the entries */ | |||
421 | for (i = 0; i < nsegs; i++) { | |||
422 | shmmap_s[i].shmid = SHMID_UNALLOCATED(-1); | |||
423 | } | |||
424 | shmmap_s[i].shmid = SHMID_SENTINEL(-2); | |||
425 | ||||
426 | p->vm_shm = (caddr_t)shmmap_s; | |||
427 | } | |||
428 | ||||
429 | shmseg = shm_find_segment_by_shmid(uap->shmid); | |||
430 | if (shmseg == NULL((void *)0)) { | |||
431 | shmat_ret = EINVAL22; | |||
432 | goto shmat_out; | |||
433 | } | |||
434 | ||||
435 | AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm)do { if (__builtin_expect(audit_syscalls, 0)) { struct kaudit_record *__ar = ((struct uthread*)get_bsdthread_info(current_thread( )))->uu_ar; if (__builtin_expect(((void *)0) != (__ar), 0) ) audit_arg_svipc_perm (__ar, &shmseg->u.shm_perm); } } while (0); | |||
436 | error = ipcperm(kauth_cred_get(), &shmseg->u.shm_perm, | |||
437 | (uap->shmflg & SHM_RDONLY010000) ? IPC_R000400 : IPC_R000400|IPC_W000200); | |||
438 | if (error) { | |||
439 | shmat_ret = error; | |||
440 | goto shmat_out; | |||
441 | } | |||
442 | ||||
443 | #if CONFIG_MACF1 | |||
444 | error = mac_sysvshm_check_shmat(kauth_cred_get(), shmseg, uap->shmflg); | |||
445 | if (error) { | |||
446 | shmat_ret = error; | |||
447 | goto shmat_out; | |||
448 | } | |||
449 | #endif | |||
450 | ||||
451 | /* find a free shmid */ | |||
452 | while (SHMID_IS_VALID(shmmap_s->shmid)((shmmap_s->shmid) >= 0)) { | |||
453 | shmmap_s++; | |||
454 | } | |||
455 | if (shmmap_s->shmid != SHMID_UNALLOCATED(-1)) { | |||
456 | /* no free shmids */ | |||
457 | shmat_ret = EMFILE24; | |||
458 | goto shmat_out; | |||
459 | } | |||
460 | ||||
461 | map_size = mach_vm_round_page(shmseg->u.shm_segsz)(((mach_vm_offset_t)(shmseg->u.shm_segsz) + (4096 - 1)) & ~((signed)(4096 - 1))); | |||
462 | prot = VM_PROT_READ((vm_prot_t) 0x01); | |||
463 | if ((uap->shmflg & SHM_RDONLY010000) == 0) | |||
464 | prot |= VM_PROT_WRITE((vm_prot_t) 0x02); | |||
465 | flags = MAP_ANON0x1000 | MAP_SHARED0x0001; | |||
466 | if (uap->shmaddr) | |||
467 | flags |= MAP_FIXED0x0010; | |||
468 | ||||
469 | attach_va = (mach_vm_address_t)uap->shmaddr; | |||
470 | if (uap->shmflg & SHM_RND020000) | |||
471 | attach_va &= ~(SHMLBA4096-1); | |||
472 | else if ((attach_va & (SHMLBA4096-1)) != 0) { | |||
473 | shmat_ret = EINVAL22; | |||
474 | goto shmat_out; | |||
475 | } | |||
476 | ||||
477 | if (flags & MAP_FIXED0x0010) { | |||
478 | vm_flags = VM_FLAGS_FIXED0x0000; | |||
479 | } else { | |||
480 | vm_flags = VM_FLAGS_ANYWHERE0x0001; | |||
481 | } | |||
482 | ||||
483 | mapped_size = 0; | |||
484 | ||||
485 | /* first reserve enough space... */ | |||
486 | rv = mach_vm_map(current_map(), | |||
487 | &attach_va, | |||
488 | map_size, | |||
489 | 0, | |||
490 | vm_flags, | |||
491 | IPC_PORT_NULL((ipc_port_t) 0UL), | |||
492 | 0, | |||
493 | FALSE0, | |||
494 | VM_PROT_NONE((vm_prot_t) 0x00), | |||
495 | VM_PROT_NONE((vm_prot_t) 0x00), | |||
496 | VM_INHERIT_NONE((vm_inherit_t) 2)); | |||
497 | if (rv != KERN_SUCCESS0) { | |||
498 | goto out; | |||
499 | } | |||
500 | ||||
501 | shmmap_s->va = attach_va; | |||
502 | ||||
503 | /* ... then map the shared memory over the reserved space */ | |||
504 | for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal)( ((void *)((uintptr_t) (shmseg->u.shm_internal)/(sizeof(void *) < sizeof(uintptr_t) ? 0 : 1))) );/* tunnel */ | |||
505 | shm_handle != NULL((void *)0); | |||
506 | shm_handle = shm_handle->shm_handle_next) { | |||
507 | ||||
508 | rv = vm_map_enter_mem_object( | |||
509 | current_map(), /* process map */ | |||
510 | &attach_va, /* attach address */ | |||
511 | shm_handle->shm_handle_size, /* segment size */ | |||
512 | (mach_vm_offset_t)0, /* alignment mask */ | |||
513 | VM_FLAGS_FIXED0x0000 | VM_FLAGS_OVERWRITE0x4000, | |||
514 | shm_handle->shm_object, | |||
515 | (mach_vm_offset_t)0, | |||
516 | FALSE0, | |||
517 | prot, | |||
518 | prot, | |||
519 | VM_INHERIT_SHARE((vm_inherit_t) 0)); | |||
520 | if (rv != KERN_SUCCESS0) | |||
521 | goto out; | |||
522 | ||||
523 | mapped_size += shm_handle->shm_handle_size; | |||
524 | attach_va = attach_va + shm_handle->shm_handle_size; | |||
525 | } | |||
526 | ||||
527 | shmmap_s->shmid = uap->shmid; | |||
528 | shmseg->u.shm_lpid = p->p_pid; | |||
529 | shmseg->u.shm_atime = sysv_shmtime(); | |||
530 | shmseg->u.shm_nattch++; | |||
531 | *retval = shmmap_s->va; /* XXX return -1 on error */ | |||
532 | shmat_ret = 0; | |||
533 | goto shmat_out; | |||
534 | out: | |||
535 | if (mapped_size > 0) { | |||
536 | (void) mach_vm_deallocate(current_map(), | |||
537 | shmmap_s->va, | |||
538 | mapped_size); | |||
539 | } | |||
540 | switch (rv) { | |||
541 | case KERN_INVALID_ADDRESS1: | |||
542 | case KERN_NO_SPACE3: | |||
543 | shmat_ret = ENOMEM12; | |||
544 | break; | |||
545 | case KERN_PROTECTION_FAILURE2: | |||
546 | shmat_ret = EACCES13; | |||
547 | break; | |||
548 | default: | |||
549 | shmat_ret = EINVAL22; | |||
550 | break; | |||
551 | } | |||
552 | shmat_out: | |||
553 | SYSV_SHM_SUBSYS_UNLOCK()lck_mtx_unlock(&sysv_shm_subsys_mutex); | |||
554 | return shmat_ret; | |||
555 | } | |||
556 | ||||
557 | static int | |||
558 | oshmctl(__unused__attribute__((unused)) void *p, __unused__attribute__((unused)) void *uap, __unused__attribute__((unused)) void *retval) | |||
559 | { | |||
560 | return EINVAL22; | |||
561 | } | |||
562 | ||||
563 | /* | |||
564 | * Returns: 0 Success | |||
565 | * EINVAL | |||
566 | * copyout:EFAULT | |||
567 | * copyin:EFAULT | |||
568 | * ipcperm:EPERM | |||
569 | * ipcperm:EACCES | |||
570 | */ | |||
571 | int | |||
572 | shmctl(__unused__attribute__((unused)) struct proc *p, struct shmctl_args *uap, int32_t *retval) | |||
573 | { | |||
574 | int error; | |||
575 | kauth_cred_t cred = kauth_cred_get(); | |||
576 | struct user_shmid_ds inbuf; | |||
577 | struct shmid_kernel *shmseg; | |||
578 | ||||
579 | int shmctl_ret = 0; | |||
580 | ||||
581 | AUDIT_ARG(svipc_cmd, uap->cmd)do { if (__builtin_expect(audit_syscalls, 0)) { struct kaudit_record *__ar = ((struct uthread*)get_bsdthread_info(current_thread( )))->uu_ar; if (__builtin_expect(((void *)0) != (__ar), 0) ) audit_arg_svipc_cmd (__ar, uap->cmd); } } while (0); | |||
582 | AUDIT_ARG(svipc_id, uap->shmid)do { if (__builtin_expect(audit_syscalls, 0)) { struct kaudit_record *__ar = ((struct uthread*)get_bsdthread_info(current_thread( )))->uu_ar; if (__builtin_expect(((void *)0) != (__ar), 0) ) audit_arg_svipc_id (__ar, uap->shmid); } } while (0); | |||
583 | ||||
584 | SYSV_SHM_SUBSYS_LOCK()lck_mtx_lock(&sysv_shm_subsys_mutex); | |||
585 | ||||
586 | if ((shmctl_ret = shminit())) { | |||
587 | goto shmctl_out; | |||
588 | } | |||
589 | ||||
590 | shmseg = shm_find_segment_by_shmid(uap->shmid); | |||
591 | if (shmseg == NULL((void *)0)) { | |||
592 | shmctl_ret = EINVAL22; | |||
593 | goto shmctl_out; | |||
594 | } | |||
595 | ||||
596 | /* XXAUDIT: This is the perms BEFORE any change by this call. This | |||
597 | * may not be what is desired. | |||
598 | */ | |||
599 | AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm)do { if (__builtin_expect(audit_syscalls, 0)) { struct kaudit_record *__ar = ((struct uthread*)get_bsdthread_info(current_thread( )))->uu_ar; if (__builtin_expect(((void *)0) != (__ar), 0) ) audit_arg_svipc_perm (__ar, &shmseg->u.shm_perm); } } while (0); | |||
600 | ||||
601 | #if CONFIG_MACF1 | |||
602 | error = mac_sysvshm_check_shmctl(cred, shmseg, uap->cmd); | |||
603 | if (error) { | |||
604 | shmctl_ret = error; | |||
605 | goto shmctl_out; | |||
606 | } | |||
607 | #endif | |||
608 | switch (uap->cmd) { | |||
609 | case IPC_STAT2: | |||
610 | error = ipcperm(cred, &shmseg->u.shm_perm, IPC_R000400); | |||
611 | if (error) { | |||
612 | shmctl_ret = error; | |||
613 | goto shmctl_out; | |||
614 | } | |||
615 | ||||
616 | if (IS_64BIT_PROCESS(p)) { | |||
617 | struct user_shmid_ds shmid_ds__shmid_ds_new; | |||
618 | memcpy(&shmid_ds__shmid_ds_new, &shmseg->u, sizeof(struct user_shmid_ds)); | |||
619 | ||||
620 | /* Clear kernel reserved pointer before copying to user space */ | |||
621 | shmid_ds__shmid_ds_new.shm_internal = USER_ADDR_NULL((user_addr_t) 0); | |||
622 | ||||
623 | error = copyout(&shmid_ds__shmid_ds_new, uap->buf, sizeof(shmid_ds__shmid_ds_new)); | |||
624 | } else { | |||
625 | struct user32_shmid_ds shmid_ds32; | |||
626 | shmid_ds_64to32(&shmseg->u, &shmid_ds32); | |||
627 | ||||
628 | /* Clear kernel reserved pointer before copying to user space */ | |||
629 | shmid_ds32.shm_internal = (user32_addr_t)0; | |||
630 | ||||
631 | error = copyout(&shmid_ds32, uap->buf, sizeof(shmid_ds32)); | |||
632 | } | |||
633 | if (error) { | |||
634 | shmctl_ret = error; | |||
635 | goto shmctl_out; | |||
636 | } | |||
637 | break; | |||
638 | case IPC_SET1: | |||
639 | error = ipcperm(cred, &shmseg->u.shm_perm, IPC_M010000); | |||
640 | if (error) { | |||
641 | shmctl_ret = error; | |||
642 | goto shmctl_out; | |||
643 | } | |||
644 | if (IS_64BIT_PROCESS(p)) { | |||
645 | error = copyin(uap->buf, &inbuf, sizeof(struct user_shmid_ds)); | |||
646 | } else { | |||
647 | struct user32_shmid_ds shmid_ds32; | |||
648 | error = copyin(uap->buf, &shmid_ds32, sizeof(shmid_ds32)); | |||
649 | /* convert in place; ugly, but safe */ | |||
650 | shmid_ds_32to64(&shmid_ds32, &inbuf); | |||
651 | } | |||
652 | if (error) { | |||
653 | shmctl_ret = error; | |||
654 | goto shmctl_out; | |||
655 | } | |||
656 | shmseg->u.shm_perm.uid = inbuf.shm_perm.uid; | |||
657 | shmseg->u.shm_perm.gid = inbuf.shm_perm.gid; | |||
658 | shmseg->u.shm_perm.mode = | |||
659 | (shmseg->u.shm_perm.mode & ~ACCESSPERMS(0000700|0000070|0000007)) | | |||
660 | (inbuf.shm_perm.mode & ACCESSPERMS(0000700|0000070|0000007)); | |||
661 | shmseg->u.shm_ctime = sysv_shmtime(); | |||
662 | break; | |||
663 | case IPC_RMID0: | |||
664 | error = ipcperm(cred, &shmseg->u.shm_perm, IPC_M010000); | |||
665 | if (error) { | |||
666 | shmctl_ret = error; | |||
667 | goto shmctl_out; | |||
668 | } | |||
669 | shmseg->u.shm_perm._key = IPC_PRIVATE((key_t)0); | |||
670 | shmseg->u.shm_perm.mode |= SHMSEG_REMOVED0x0400; | |||
671 | if (shmseg->u.shm_nattch <= 0) { | |||
672 | shm_deallocate_segment(shmseg); | |||
673 | shm_last_free = IPCID_TO_IX(uap->shmid)((uap->shmid) & 0xffff); | |||
674 | } | |||
675 | break; | |||
676 | #if 0 | |||
677 | case SHM_LOCK: | |||
678 | case SHM_UNLOCK: | |||
679 | #endif | |||
680 | default: | |||
681 | shmctl_ret = EINVAL22; | |||
682 | goto shmctl_out; | |||
683 | } | |||
684 | *retval = 0; | |||
685 | shmctl_ret = 0; | |||
686 | shmctl_out: | |||
687 | SYSV_SHM_SUBSYS_UNLOCK()lck_mtx_unlock(&sysv_shm_subsys_mutex); | |||
688 | return shmctl_ret; | |||
689 | } | |||
690 | ||||
691 | static int | |||
692 | shmget_existing(struct shmget_args *uap, int mode, int segnum, int *retval) | |||
693 | { | |||
694 | struct shmid_kernel *shmseg; | |||
695 | int error = 0; | |||
696 | ||||
697 | shmseg = &shmsegs[segnum]; | |||
698 | if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED0x0400) { | |||
699 | /* | |||
700 | * This segment is in the process of being allocated. Wait | |||
701 | * until it's done, and look the key up again (in case the | |||
702 | * allocation failed or it was freed). | |||
703 | */ | |||
704 | shmseg->u.shm_perm.mode |= SHMSEG_WANTED0x1000; | |||
705 | error = tsleep((caddr_t)shmseg, PLOCK36 | PCATCH0x100, "shmget", 0); | |||
706 | if (error) | |||
707 | return error; | |||
708 | return EAGAIN35; | |||
709 | } | |||
710 | ||||
711 | /* | |||
712 | * The low 9 bits of shmflag are the mode bits being requested, which | |||
713 | * are the actual mode bits desired on the segment, and not in IPC_R | |||
714 | * form; therefore it would be incorrect to call ipcperm() to validate | |||
715 | * them; instead, we AND the existing mode with the requested mode, and | |||
716 | * verify that it matches the requested mode; otherwise, we fail with | |||
717 | * EACCES (access denied). | |||
718 | */ | |||
719 | if ((shmseg->u.shm_perm.mode & mode) != mode) | |||
720 | return EACCES13; | |||
721 | ||||
722 | #if CONFIG_MACF1 | |||
723 | error = mac_sysvshm_check_shmget(kauth_cred_get(), shmseg, uap->shmflg); | |||
724 | if (error) | |||
725 | return (error); | |||
726 | #endif | |||
727 | ||||
728 | if (uap->size && uap->size > shmseg->u.shm_segsz) | |||
729 | return EINVAL22; | |||
730 | ||||
731 | if ((uap->shmflg & (IPC_CREAT001000 | IPC_EXCL002000)) == (IPC_CREAT001000 | IPC_EXCL002000)) | |||
732 | return EEXIST17; | |||
733 | ||||
734 | *retval = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm)(((shmseg->u.shm_perm._seq) << 16L) | ((segnum) & 0xffff)); | |||
735 | return 0; | |||
736 | } | |||
737 | ||||
738 | static int | |||
739 | shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode, | |||
740 | int *retval) | |||
741 | { | |||
742 | int i, segnum, shmid; | |||
743 | kauth_cred_t cred = kauth_cred_get(); | |||
744 | struct shmid_kernel *shmseg; | |||
745 | struct shm_handle *shm_handle; | |||
746 | kern_return_t kret; | |||
747 | mach_vm_size_t total_size, size, alloc_size; | |||
748 | void * mem_object; | |||
749 | struct shm_handle *shm_handle_next, **shm_handle_next_p; | |||
750 | ||||
751 | if (uap->size <= 0 || | |||
752 | uap->size < (user_size_t)shminfo.shmmin || | |||
753 | uap->size > (user_size_t)shminfo.shmmax) { | |||
754 | return EINVAL22; | |||
755 | } | |||
756 | if (shm_nused >= shminfo.shmmni) /* any shmids left? */ | |||
757 | return ENOSPC28; | |||
758 | if (mach_vm_round_page_overflow(uap->size, &total_size)) { | |||
759 | return EINVAL22; | |||
760 | } | |||
761 | if ((user_ssize_t)(shm_committed + btoc(total_size)(((unsigned)(total_size)+(4096 -1))>>12)) > shminfo.shmall) | |||
762 | return ENOMEM12; | |||
763 | if (shm_last_free < 0) { | |||
764 | for (i = 0; i < shminfo.shmmni; i++) | |||
765 | if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE0x0200) | |||
766 | break; | |||
767 | if (i == shminfo.shmmni) | |||
768 | panic("shmseg free count inconsistent")(panic)("\"shmseg free count inconsistent\"" "@" "/Users/vlad/tmp/xnu-3789.41.3/bsd/kern/sysv_shm.c" ":" "768"); | |||
769 | segnum = i; | |||
770 | } else { | |||
771 | segnum = shm_last_free; | |||
772 | shm_last_free = -1; | |||
773 | } | |||
774 | shmseg = &shmsegs[segnum]; | |||
775 | ||||
776 | /* | |||
777 | * In case we sleep in malloc(), mark the segment present but deleted | |||
778 | * so that noone else tries to create the same key. | |||
779 | * XXX but we don't release the global lock !? | |||
780 | */ | |||
781 | shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED0x0800 | SHMSEG_REMOVED0x0400; | |||
782 | shmseg->u.shm_perm._key = uap->key; | |||
783 | shmseg->u.shm_perm._seq = (shmseg->u.shm_perm._seq + 1) & 0x7fff; | |||
784 | ||||
785 | shm_handle_next_p = NULL((void *)0); | |||
786 | for (alloc_size = 0; | |||
787 | alloc_size < total_size; | |||
788 | alloc_size += size) { | |||
789 | size = MIN(total_size - alloc_size, ANON_MAX_SIZE)(((total_size - alloc_size)<(0xFFFFF000ULL))?(total_size - alloc_size):(0xFFFFF000ULL)); | |||
790 | kret = mach_make_memory_entry_64( | |||
791 | VM_MAP_NULL((vm_map_t) 0), | |||
792 | (memory_object_size_t *) &size, | |||
793 | (memory_object_offset_t) 0, | |||
794 | MAP_MEM_NAMED_CREATE0x020000 | VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)), | |||
795 | (ipc_port_t *) &mem_object, 0); | |||
796 | if (kret != KERN_SUCCESS0) | |||
797 | goto out; | |||
798 | ||||
799 | MALLOC(shm_handle, struct shm_handle *, sizeof(struct shm_handle), M_SHM, M_WAITOK)({ static vm_allocation_site_t site __attribute__((section("__DATA, __data" ))); (shm_handle) = (struct shm_handle *)__MALLOC(sizeof(struct shm_handle), 29, 0x0000, &site); }); | |||
800 | if (shm_handle == NULL((void *)0)) { | |||
801 | kret = KERN_NO_SPACE3; | |||
802 | mach_memory_entry_port_release(mem_object); | |||
803 | mem_object = NULL((void *)0); | |||
804 | goto out; | |||
805 | } | |||
806 | shm_handle->shm_object = mem_object; | |||
807 | shm_handle->shm_handle_size = size; | |||
808 | shm_handle->shm_handle_next = NULL((void *)0); | |||
809 | if (shm_handle_next_p == NULL((void *)0)) { | |||
810 | shmseg->u.shm_internal = CAST_USER_ADDR_T(shm_handle)((user_addr_t)((uintptr_t)(shm_handle)));/* tunnel */ | |||
811 | } else { | |||
812 | *shm_handle_next_p = shm_handle; | |||
813 | } | |||
814 | shm_handle_next_p = &shm_handle->shm_handle_next; | |||
815 | } | |||
816 | ||||
817 | shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm)(((shmseg->u.shm_perm._seq) << 16L) | ((segnum) & 0xffff)); | |||
818 | ||||
819 | shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = kauth_cred_getuid(cred); | |||
820 | shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = kauth_cred_getgid(cred); | |||
821 | shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED0x1000) | | |||
822 | (mode & ACCESSPERMS(0000700|0000070|0000007)) | SHMSEG_ALLOCATED0x0800; | |||
823 | shmseg->u.shm_segsz = uap->size; | |||
824 | shmseg->u.shm_cpid = p->p_pid; | |||
825 | shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0; | |||
826 | shmseg->u.shm_atime = shmseg->u.shm_dtime = 0; | |||
827 | #if CONFIG_MACF1 | |||
828 | mac_sysvshm_label_associate(cred, shmseg); | |||
829 | #endif | |||
830 | shmseg->u.shm_ctime = sysv_shmtime(); | |||
831 | shm_committed += btoc(size)(((unsigned)(size)+(4096 -1))>>12); | |||
832 | shm_nused++; | |||
833 | AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm)do { if (__builtin_expect(audit_syscalls, 0)) { struct kaudit_record *__ar = ((struct uthread*)get_bsdthread_info(current_thread( )))->uu_ar; if (__builtin_expect(((void *)0) != (__ar), 0) ) audit_arg_svipc_perm (__ar, &shmseg->u.shm_perm); } } while (0); | |||
834 | if (shmseg->u.shm_perm.mode & SHMSEG_WANTED0x1000) { | |||
835 | /* | |||
836 | * Somebody else wanted this key while we were asleep. Wake | |||
837 | * them up now. | |||
838 | */ | |||
839 | shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED0x1000; | |||
840 | wakeup((caddr_t)shmseg); | |||
841 | } | |||
842 | *retval = shmid; | |||
843 | AUDIT_ARG(svipc_id, shmid)do { if (__builtin_expect(audit_syscalls, 0)) { struct kaudit_record *__ar = ((struct uthread*)get_bsdthread_info(current_thread( )))->uu_ar; if (__builtin_expect(((void *)0) != (__ar), 0) ) audit_arg_svipc_id (__ar, shmid); } } while (0); | |||
844 | return 0; | |||
845 | out: | |||
846 | if (kret != KERN_SUCCESS0) { | |||
847 | for (shm_handle = CAST_DOWN(void *,shmseg->u.shm_internal)( ((void *)((uintptr_t) (shmseg->u.shm_internal)/(sizeof(void *) < sizeof(uintptr_t) ? 0 : 1))) ); /* tunnel */ | |||
848 | shm_handle != NULL((void *)0); | |||
849 | shm_handle = shm_handle_next) { | |||
850 | shm_handle_next = shm_handle->shm_handle_next; | |||
851 | mach_memory_entry_port_release(shm_handle->shm_object); | |||
852 | FREE((caddr_t) shm_handle, M_SHM)_FREE((void *)(caddr_t) shm_handle, 29); | |||
853 | } | |||
854 | shmseg->u.shm_internal = USER_ADDR_NULL((user_addr_t) 0); /* tunnel */ | |||
855 | } | |||
856 | ||||
857 | switch (kret) { | |||
858 | case KERN_INVALID_ADDRESS1: | |||
859 | case KERN_NO_SPACE3: | |||
860 | return (ENOMEM12); | |||
861 | case KERN_PROTECTION_FAILURE2: | |||
862 | return (EACCES13); | |||
863 | default: | |||
864 | return (EINVAL22); | |||
865 | } | |||
866 | ||||
867 | } | |||
868 | ||||
869 | int | |||
870 | shmget(struct proc *p, struct shmget_args *uap, int32_t *retval) | |||
871 | { | |||
872 | int segnum, mode, error; | |||
873 | int shmget_ret = 0; | |||
874 | ||||
875 | /* Auditing is actually done in shmget_allocate_segment() */ | |||
876 | ||||
877 | SYSV_SHM_SUBSYS_LOCK()lck_mtx_lock(&sysv_shm_subsys_mutex); | |||
878 | ||||
879 | if ((shmget_ret = shminit())) { | |||
880 | goto shmget_out; | |||
881 | } | |||
882 | ||||
883 | mode = uap->shmflg & ACCESSPERMS(0000700|0000070|0000007); | |||
884 | if (uap->key != IPC_PRIVATE((key_t)0)) { | |||
885 | again: | |||
886 | segnum = shm_find_segment_by_key(uap->key); | |||
887 | if (segnum >= 0) { | |||
888 | error = shmget_existing(uap, mode, segnum, retval); | |||
889 | if (error == EAGAIN35) | |||
890 | goto again; | |||
891 | shmget_ret = error; | |||
892 | goto shmget_out; | |||
893 | } | |||
894 | if ((uap->shmflg & IPC_CREAT001000) == 0) { | |||
895 | shmget_ret = ENOENT2; | |||
896 | goto shmget_out; | |||
897 | } | |||
898 | } | |||
899 | shmget_ret = shmget_allocate_segment(p, uap, mode, retval); | |||
900 | shmget_out: | |||
901 | SYSV_SHM_SUBSYS_UNLOCK()lck_mtx_unlock(&sysv_shm_subsys_mutex); | |||
902 | return shmget_ret; | |||
903 | } | |||
904 | ||||
905 | /* | |||
906 | * shmsys | |||
907 | * | |||
908 | * Entry point for all SHM calls: shmat, oshmctl, shmdt, shmget, shmctl | |||
909 | * | |||
910 | * Parameters: p Process requesting the call | |||
911 | * uap User argument descriptor (see below) | |||
912 | * retval Return value of the selected shm call | |||
913 | * | |||
914 | * Indirect parameters: uap->which msg call to invoke (index in array of shm calls) | |||
915 | * uap->a2 User argument descriptor | |||
916 | * | |||
917 | * Returns: 0 Success | |||
918 | * !0 Not success | |||
919 | * | |||
920 | * Implicit returns: retval Return value of the selected shm call | |||
921 | * | |||
922 | * DEPRECATED: This interface should not be used to call the other SHM | |||
923 | * functions (shmat, oshmctl, shmdt, shmget, shmctl). The correct | |||
924 | * usage is to call the other SHM functions directly. | |||
925 | */ | |||
926 | int | |||
927 | shmsys(struct proc *p, struct shmsys_args *uap, int32_t *retval) | |||
928 | { | |||
929 | ||||
930 | /* The routine that we are dispatching already does this */ | |||
931 | ||||
932 | if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) | |||
933 | return EINVAL22; | |||
934 | return ((*shmcalls[uap->which])(p, &uap->a2, retval)); | |||
935 | } | |||
936 | ||||
937 | /* | |||
938 | * Return 0 on success, 1 on failure. | |||
939 | */ | |||
940 | int | |||
941 | shmfork(struct proc *p1, struct proc *p2) | |||
942 | { | |||
943 | struct shmmap_state *shmmap_s; | |||
944 | size_t size; | |||
945 | int nsegs = 0; | |||
946 | int ret = 0; | |||
947 | ||||
948 | SYSV_SHM_SUBSYS_LOCK()lck_mtx_lock(&sysv_shm_subsys_mutex); | |||
949 | ||||
950 | if (shminit()) { | |||
951 | ret = 1; | |||
952 | goto shmfork_out; | |||
953 | } | |||
954 | ||||
955 | struct shmmap_state *src = (struct shmmap_state *)p1->vm_shm; | |||
956 | assert(src)((void)0); | |||
957 | ||||
958 | /* count number of shmid entries in src */ | |||
959 | for (struct shmmap_state *s = src; s->shmid != SHMID_SENTINEL(-2); s++) { | |||
960 | nsegs++; | |||
961 | } | |||
962 | ||||
963 | if (os_add_and_mul_overflow(nsegs, 1, sizeof(struct shmmap_state), &size)__os_warn_unused(__extension__({ __typeof(*(&size)) _tmp; _Bool _s, _t; _s = __os_warn_unused(__builtin_add_overflow(( (nsegs)), ((1)), (&_tmp))); _t = __os_warn_unused(__builtin_mul_overflow (((sizeof(struct shmmap_state))), (_tmp), ((&size)))); _s | _t; }))) { | |||
964 | ret = 1; | |||
965 | goto shmfork_out; | |||
966 | } | |||
967 | MALLOC(shmmap_s, struct shmmap_state *, size, M_SHM, M_WAITOK)({ static vm_allocation_site_t site __attribute__((section("__DATA, __data" ))); (shmmap_s) = (struct shmmap_state *)__MALLOC(size, 29, 0x0000 , &site); }); | |||
968 | if (shmmap_s == NULL((void *)0)) { | |||
969 | ret = 1; | |||
970 | goto shmfork_out; | |||
971 | } | |||
972 | ||||
973 | bcopy(src, (caddr_t)shmmap_s, size); | |||
974 | p2->vm_shm = (caddr_t)shmmap_s; | |||
975 | for (; shmmap_s->shmid != SHMID_SENTINEL(-2); shmmap_s++) { | |||
976 | if (SHMID_IS_VALID(shmmap_s->shmid)((shmmap_s->shmid) >= 0)) { | |||
977 | shmsegs[IPCID_TO_IX(shmmap_s->shmid)((shmmap_s->shmid) & 0xffff)].u.shm_nattch++; | |||
978 | } | |||
979 | } | |||
980 | ||||
981 | shmfork_out: | |||
982 | SYSV_SHM_SUBSYS_UNLOCK()lck_mtx_unlock(&sysv_shm_subsys_mutex); | |||
983 | return ret; | |||
984 | } | |||
985 | ||||
986 | static void | |||
987 | shmcleanup(struct proc *p, int deallocate) | |||
988 | { | |||
989 | struct shmmap_state *shmmap_s; | |||
990 | ||||
991 | SYSV_SHM_SUBSYS_LOCK()lck_mtx_lock(&sysv_shm_subsys_mutex); | |||
992 | ||||
993 | shmmap_s = (struct shmmap_state *)p->vm_shm; | |||
994 | for (; shmmap_s->shmid != SHMID_SENTINEL(-2); shmmap_s++) { | |||
995 | if (SHMID_IS_VALID(shmmap_s->shmid)((shmmap_s->shmid) >= 0)) { | |||
996 | /* | |||
997 | * XXX: Should the MAC framework enforce | |||
998 | * check here as well. | |||
999 | */ | |||
1000 | shm_delete_mapping(p, shmmap_s, deallocate); | |||
1001 | } | |||
1002 | } | |||
1003 | ||||
1004 | FREE((caddr_t)p->vm_shm, M_SHM)_FREE((void *)(caddr_t)p->vm_shm, 29); | |||
1005 | p->vm_shm = NULL((void *)0); | |||
1006 | SYSV_SHM_SUBSYS_UNLOCK()lck_mtx_unlock(&sysv_shm_subsys_mutex); | |||
1007 | } | |||
1008 | ||||
1009 | void | |||
1010 | shmexit(struct proc *p) | |||
1011 | { | |||
1012 | shmcleanup(p, 1); | |||
1013 | } | |||
1014 | ||||
1015 | /* | |||
1016 | * shmexec() is like shmexit(), only it doesn't delete the mappings, | |||
1017 | * since the old address space has already been destroyed and the new | |||
1018 | * one instantiated. Instead, it just does the housekeeping work we | |||
1019 | * need to do to keep the System V shared memory subsystem sane. | |||
1020 | */ | |||
1021 | __private_extern__ void | |||
1022 | shmexec(struct proc *p) | |||
1023 | { | |||
1024 | shmcleanup(p, 0); | |||
1025 | } | |||
1026 | ||||
1027 | int | |||
1028 | shminit(void) | |||
1029 | { | |||
1030 | size_t sz; | |||
1031 | int i; | |||
1032 | ||||
1033 | if (!shm_inited) { | |||
1034 | /* | |||
1035 | * we store internally 64 bit, since if we didn't, we would | |||
1036 | * be unable to represent a segment size in excess of 32 bits | |||
1037 | * with the (struct shmid_ds)->shm_segsz field; also, POSIX | |||
1038 | * dictates this filed be a size_t, which is 64 bits when | |||
1039 | * running 64 bit binaries. | |||
1040 | */ | |||
1041 | if (os_mul_overflow(shminfo.shmmni, sizeof(struct shmid_kernel), &sz)__os_warn_unused(__builtin_mul_overflow((shminfo.shmmni), (sizeof (struct shmid_kernel)), (&sz)))) { | |||
1042 | return ENOMEM12; | |||
1043 | } | |||
1044 | ||||
1045 | MALLOC(shmsegs, struct shmid_kernel *, sz, M_SHM, M_WAITOK)({ static vm_allocation_site_t site __attribute__((section("__DATA, __data" ))); (shmsegs) = (struct shmid_kernel *)__MALLOC(sz, 29, 0x0000 , &site); }); | |||
1046 | if (shmsegs == NULL((void *)0)) { | |||
1047 | return ENOMEM12; | |||
1048 | } | |||
1049 | for (i = 0; i < shminfo.shmmni; i++) { | |||
1050 | shmsegs[i].u.shm_perm.mode = SHMSEG_FREE0x0200; | |||
1051 | shmsegs[i].u.shm_perm._seq = 0; | |||
1052 | #if CONFIG_MACF1 | |||
1053 | mac_sysvshm_label_init(&shmsegs[i]); | |||
1054 | #endif | |||
1055 | } | |||
1056 | shm_last_free = 0; | |||
1057 | shm_nused = 0; | |||
1058 | shm_committed = 0; | |||
1059 | shm_inited = 1; | |||
1060 | } | |||
1061 | ||||
1062 | return 0; | |||
1063 | } | |||
1064 | ||||
1065 | /* Initialize the mutex governing access to the SysV shm subsystem */ | |||
1066 | __private_extern__ void | |||
1067 | sysv_shm_lock_init( void ) | |||
1068 | { | |||
1069 | ||||
1070 | sysv_shm_subsys_lck_grp_attr = lck_grp_attr_alloc_init(); | |||
1071 | ||||
1072 | sysv_shm_subsys_lck_grp = lck_grp_alloc_init("sysv_shm_subsys_lock", sysv_shm_subsys_lck_grp_attr); | |||
1073 | ||||
1074 | sysv_shm_subsys_lck_attr = lck_attr_alloc_init(); | |||
1075 | lck_mtx_init(&sysv_shm_subsys_mutex, sysv_shm_subsys_lck_grp, sysv_shm_subsys_lck_attr); | |||
1076 | } | |||
1077 | ||||
1078 | /* (struct sysctl_oid *oidp, void *arg1, int arg2, \ | |||
1079 | struct sysctl_req *req) */ | |||
1080 | static int | |||
1081 | sysctl_shminfo(__unused__attribute__((unused)) struct sysctl_oid *oidp, void *arg1, | |||
1082 | __unused__attribute__((unused)) int arg2, struct sysctl_req *req) | |||
1083 | { | |||
1084 | int error = 0; | |||
1085 | int sysctl_shminfo_ret = 0; | |||
1086 | int64_t saved_shmmax; | |||
1087 | int64_t saved_shmmin; | |||
1088 | int64_t saved_shmseg; | |||
1089 | int64_t saved_shmmni; | |||
1090 | int64_t saved_shmall; | |||
1091 | ||||
1092 | error = SYSCTL_OUT(req, arg1, sizeof(int64_t))(req->oldfunc)(req, arg1, sizeof(int64_t)); | |||
1093 | if (error || req->newptr == USER_ADDR_NULL((user_addr_t) 0)) | |||
1094 | return(error); | |||
1095 | ||||
1096 | SYSV_SHM_SUBSYS_LOCK()lck_mtx_lock(&sysv_shm_subsys_mutex); | |||
1097 | ||||
1098 | /* shmmni can not be changed after SysV SHM has been initialized */ | |||
1099 | if (shm_inited && arg1 == &shminfo.shmmni) { | |||
1100 | sysctl_shminfo_ret = EPERM1; | |||
1101 | goto sysctl_shminfo_out; | |||
1102 | } | |||
1103 | saved_shmmax = shminfo.shmmax; | |||
1104 | saved_shmmin = shminfo.shmmin; | |||
1105 | saved_shmseg = shminfo.shmseg; | |||
1106 | saved_shmmni = shminfo.shmmni; | |||
1107 | saved_shmall = shminfo.shmall; | |||
1108 | ||||
1109 | if ((error = SYSCTL_IN(req, arg1, sizeof(int64_t))(req->newfunc)(req, arg1, sizeof(int64_t))) != 0) { | |||
1110 | sysctl_shminfo_ret = error; | |||
1111 | goto sysctl_shminfo_out; | |||
1112 | } | |||
1113 | ||||
1114 | if (arg1 == &shminfo.shmmax) { | |||
1115 | /* shmmax needs to be page-aligned */ | |||
1116 | if (shminfo.shmmax & PAGE_MASK_64(unsigned long long)(4096 - 1) || shminfo.shmmax < 0) { | |||
1117 | shminfo.shmmax = saved_shmmax; | |||
1118 | sysctl_shminfo_ret = EINVAL22; | |||
1119 | goto sysctl_shminfo_out; | |||
1120 | } | |||
1121 | } | |||
1122 | else if (arg1 == &shminfo.shmmin) { | |||
1123 | if (shminfo.shmmin < 0) { | |||
1124 | shminfo.shmmin = saved_shmmin; | |||
1125 | sysctl_shminfo_ret = EINVAL22; | |||
1126 | goto sysctl_shminfo_out; | |||
1127 | } | |||
1128 | } | |||
1129 | else if (arg1 == &shminfo.shmseg) { | |||
1130 | /* add a sanity check - 20847256 */ | |||
1131 | if (shminfo.shmseg > INT32_MAX2147483647 || shminfo.shmseg < 0) { | |||
1132 | shminfo.shmseg = saved_shmseg; | |||
1133 | sysctl_shminfo_ret = EINVAL22; | |||
1134 | goto sysctl_shminfo_out; | |||
1135 | } | |||
1136 | } | |||
1137 | else if (arg1 == &shminfo.shmmni) { | |||
1138 | /* add a sanity check - 20847256 */ | |||
1139 | if (shminfo.shmmni > INT32_MAX2147483647 || shminfo.shmmni < 0) { | |||
1140 | shminfo.shmmni = saved_shmmni; | |||
1141 | sysctl_shminfo_ret = EINVAL22; | |||
1142 | goto sysctl_shminfo_out; | |||
1143 | } | |||
1144 | } | |||
1145 | else if (arg1 == &shminfo.shmall) { | |||
1146 | /* add a sanity check - 20847256 */ | |||
1147 | if (shminfo.shmall > INT32_MAX2147483647 || shminfo.shmall < 0) { | |||
1148 | shminfo.shmall = saved_shmall; | |||
1149 | sysctl_shminfo_ret = EINVAL22; | |||
1150 | goto sysctl_shminfo_out; | |||
1151 | } | |||
1152 | } | |||
1153 | sysctl_shminfo_ret = 0; | |||
1154 | sysctl_shminfo_out: | |||
1155 | SYSV_SHM_SUBSYS_UNLOCK()lck_mtx_unlock(&sysv_shm_subsys_mutex); | |||
1156 | return sysctl_shminfo_ret; | |||
1157 | } | |||
1158 | ||||
1159 | static int | |||
1160 | IPCS_shm_sysctl(__unused__attribute__((unused)) struct sysctl_oid *oidp, __unused__attribute__((unused)) void *arg1, | |||
1161 | __unused__attribute__((unused)) int arg2, struct sysctl_req *req) | |||
1162 | { | |||
1163 | int error; | |||
1164 | int cursor; | |||
1165 | union { | |||
1166 | struct user32_IPCS_command u32; | |||
1167 | struct user_IPCS_command u64; | |||
1168 | } ipcs; | |||
1169 | struct user32_shmid_ds shmid_ds32; /* post conversion, 32 bit version */ | |||
1170 | struct user_shmid_ds shmid_ds__shmid_ds_new; /* 64 bit version */ | |||
1171 | void *shmid_dsp; | |||
1172 | size_t ipcs_sz = sizeof(struct user_IPCS_command); | |||
1173 | size_t shmid_ds_sz = sizeof(struct user_shmid_ds); | |||
1174 | struct proc *p = current_proc(); | |||
1175 | ||||
1176 | SYSV_SHM_SUBSYS_LOCK()lck_mtx_lock(&sysv_shm_subsys_mutex); | |||
1177 | ||||
1178 | if ((error = shminit())) { | |||
| ||||
1179 | goto ipcs_shm_sysctl_out; | |||
1180 | } | |||
1181 | ||||
1182 | if (!IS_64BIT_PROCESS(p)) { | |||
1183 | ipcs_sz = sizeof(struct user32_IPCS_command); | |||
1184 | shmid_ds_sz = sizeof(struct user32_shmid_ds); | |||
1185 | } | |||
1186 | ||||
1187 | /* Copy in the command structure */ | |||
1188 | if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)(req->newfunc)(req, &ipcs, ipcs_sz)) != 0) { | |||
1189 | goto ipcs_shm_sysctl_out; | |||
1190 | } | |||
1191 | ||||
1192 | if (!IS_64BIT_PROCESS(p)) /* convert in place */ | |||
1193 | ipcs.u64.ipcs_data = CAST_USER_ADDR_T(ipcs.u32.ipcs_data)((user_addr_t)((uintptr_t)(ipcs.u32.ipcs_data))); | |||
1194 | ||||
1195 | /* Let us version this interface... */ | |||
1196 | if (ipcs.u64.ipcs_magic != IPCS_MAGIC0x00000001) { | |||
1197 | error = EINVAL22; | |||
1198 | goto ipcs_shm_sysctl_out; | |||
1199 | } | |||
1200 | ||||
1201 | switch(ipcs.u64.ipcs_op) { | |||
1202 | case IPCS_SHM_CONF0x00000001: /* Obtain global configuration data */ | |||
1203 | if (ipcs.u64.ipcs_datalen != sizeof(struct shminfo)) { | |||
1204 | if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */ | |||
1205 | error = ENOMEM12; | |||
1206 | break; | |||
1207 | } | |||
1208 | error = ERANGE34; | |||
1209 | break; | |||
1210 | } | |||
1211 | error = copyout(&shminfo, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen); | |||
1212 | break; | |||
1213 | ||||
1214 | case IPCS_SHM_ITER0x00000002: /* Iterate over existing segments */ | |||
1215 | cursor = ipcs.u64.ipcs_cursor; | |||
1216 | if (cursor < 0 || cursor >= shminfo.shmmni) { | |||
1217 | error = ERANGE34; | |||
1218 | break; | |||
1219 | } | |||
1220 | if (ipcs.u64.ipcs_datalen != (int)shmid_ds_sz) { | |||
1221 | error = EINVAL22; | |||
1222 | break; | |||
1223 | } | |||
1224 | for( ; cursor < shminfo.shmmni; cursor++) { | |||
1225 | if (shmsegs[cursor].u.shm_perm.mode & SHMSEG_ALLOCATED0x0800) | |||
1226 | break; | |||
1227 | continue; | |||
1228 | } | |||
1229 | if (cursor == shminfo.shmmni) { | |||
1230 | error = ENOENT2; | |||
1231 | break; | |||
1232 | } | |||
1233 | ||||
1234 | shmid_dsp = &shmsegs[cursor]; /* default: 64 bit */ | |||
1235 | ||||
1236 | /* | |||
1237 | * If necessary, convert the 64 bit kernel segment | |||
1238 | * descriptor to a 32 bit user one. | |||
1239 | */ | |||
1240 | if (!IS_64BIT_PROCESS(p)) { | |||
1241 | shmid_ds_64to32(shmid_dsp, &shmid_ds32); | |||
1242 | ||||
1243 | /* Clear kernel reserved pointer before copying to user space */ | |||
1244 | shmid_ds32.shm_internal = (user32_addr_t)0; | |||
1245 | ||||
1246 | shmid_dsp = &shmid_ds32; | |||
1247 | } else { | |||
1248 | memcpy(&shmid_ds__shmid_ds_new, shmid_dsp, sizeof(shmid_ds__shmid_ds_new)); | |||
1249 | ||||
1250 | /* Clear kernel reserved pointer before copying to user space */ | |||
1251 | shmid_ds__shmid_ds_new.shm_internal = USER_ADDR_NULL((user_addr_t) 0); | |||
1252 | ||||
1253 | shmid_dsp = &shmid_ds__shmid_ds_new; | |||
1254 | } | |||
1255 | error = copyout(shmid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen); | |||
| ||||
1256 | if (!error) { | |||
1257 | /* update cursor */ | |||
1258 | ipcs.u64.ipcs_cursor = cursor + 1; | |||
1259 | ||||
1260 | if (!IS_64BIT_PROCESS(p)) /* convert in place */ | |||
1261 | ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t,ipcs.u64.ipcs_data)( ((user32_addr_t)((uintptr_t) (ipcs.u64.ipcs_data))) ); | |||
1262 | ||||
1263 | error = SYSCTL_OUT(req, &ipcs, ipcs_sz)(req->oldfunc)(req, &ipcs, ipcs_sz); | |||
1264 | } | |||
1265 | break; | |||
1266 | ||||
1267 | default: | |||
1268 | error = EINVAL22; | |||
1269 | break; | |||
1270 | } | |||
1271 | ipcs_shm_sysctl_out: | |||
1272 | SYSV_SHM_SUBSYS_UNLOCK()lck_mtx_unlock(&sysv_shm_subsys_mutex); | |||
1273 | return(error); | |||
1274 | } | |||
1275 | ||||
1276 | SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSV")struct sysctl_oid_list sysctl__kern_sysv_children; struct sysctl_oid sysctl__kern_sysv = { &sysctl__kern_children, { 0 }, 42, (int)(1|(0x80000000|0x40000000) | 0x00800000 | 0x10000000|0x00400000 ), (void*)&sysctl__kern_sysv_children, (int)(0), "sysv", 0 , "N", "SYSV", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv;; | |||
1277 | ||||
1278 | SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmax, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,struct sysctl_oid sysctl__kern_sysv_shmmax = { &sysctl__kern_sysv_children , { 0 }, (-1), (int)(4 | (0x80000000|0x40000000) | 0x00800000 |0x00400000), &shminfo.shmmax, (int)(0), "shmmax", &sysctl_shminfo , "Q", "shmmax", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_shmmax __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_shmmax | |||
1279 | &shminfo.shmmax, 0, &sysctl_shminfo ,"Q","shmmax")struct sysctl_oid sysctl__kern_sysv_shmmax = { &sysctl__kern_sysv_children , { 0 }, (-1), (int)(4 | (0x80000000|0x40000000) | 0x00800000 |0x00400000), &shminfo.shmmax, (int)(0), "shmmax", &sysctl_shminfo , "Q", "shmmax", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_shmmax __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_shmmax; | |||
1280 | ||||
1281 | SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmin, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,struct sysctl_oid sysctl__kern_sysv_shmmin = { &sysctl__kern_sysv_children , { 0 }, (-1), (int)(4 | (0x80000000|0x40000000) | 0x00800000 |0x00400000), &shminfo.shmmin, (int)(0), "shmmin", &sysctl_shminfo , "Q", "shmmin", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_shmmin __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_shmmin | |||
1282 | &shminfo.shmmin, 0, &sysctl_shminfo ,"Q","shmmin")struct sysctl_oid sysctl__kern_sysv_shmmin = { &sysctl__kern_sysv_children , { 0 }, (-1), (int)(4 | (0x80000000|0x40000000) | 0x00800000 |0x00400000), &shminfo.shmmin, (int)(0), "shmmin", &sysctl_shminfo , "Q", "shmmin", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_shmmin __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_shmmin; | |||
1283 | ||||
1284 | SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmni, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,struct sysctl_oid sysctl__kern_sysv_shmmni = { &sysctl__kern_sysv_children , { 0 }, (-1), (int)(4 | (0x80000000|0x40000000) | 0x00800000 |0x00400000), &shminfo.shmmni, (int)(0), "shmmni", &sysctl_shminfo , "Q", "shmmni", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_shmmni __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_shmmni | |||
1285 | &shminfo.shmmni, 0, &sysctl_shminfo ,"Q","shmmni")struct sysctl_oid sysctl__kern_sysv_shmmni = { &sysctl__kern_sysv_children , { 0 }, (-1), (int)(4 | (0x80000000|0x40000000) | 0x00800000 |0x00400000), &shminfo.shmmni, (int)(0), "shmmni", &sysctl_shminfo , "Q", "shmmni", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_shmmni __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_shmmni; | |||
1286 | ||||
1287 | SYSCTL_PROC(_kern_sysv, OID_AUTO, shmseg, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,struct sysctl_oid sysctl__kern_sysv_shmseg = { &sysctl__kern_sysv_children , { 0 }, (-1), (int)(4 | (0x80000000|0x40000000) | 0x00800000 |0x00400000), &shminfo.shmseg, (int)(0), "shmseg", &sysctl_shminfo , "Q", "shmseg", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_shmseg __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_shmseg | |||
1288 | &shminfo.shmseg, 0, &sysctl_shminfo ,"Q","shmseg")struct sysctl_oid sysctl__kern_sysv_shmseg = { &sysctl__kern_sysv_children , { 0 }, (-1), (int)(4 | (0x80000000|0x40000000) | 0x00800000 |0x00400000), &shminfo.shmseg, (int)(0), "shmseg", &sysctl_shminfo , "Q", "shmseg", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_shmseg __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_shmseg; | |||
1289 | ||||
1290 | SYSCTL_PROC(_kern_sysv, OID_AUTO, shmall, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,struct sysctl_oid sysctl__kern_sysv_shmall = { &sysctl__kern_sysv_children , { 0 }, (-1), (int)(4 | (0x80000000|0x40000000) | 0x00800000 |0x00400000), &shminfo.shmall, (int)(0), "shmall", &sysctl_shminfo , "Q", "shmall", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_shmall __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_shmall | |||
1291 | &shminfo.shmall, 0, &sysctl_shminfo ,"Q","shmall")struct sysctl_oid sysctl__kern_sysv_shmall = { &sysctl__kern_sysv_children , { 0 }, (-1), (int)(4 | (0x80000000|0x40000000) | 0x00800000 |0x00400000), &shminfo.shmall, (int)(0), "shmall", &sysctl_shminfo , "Q", "shmall", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_shmall __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_shmall; | |||
1292 | ||||
1293 | SYSCTL_NODE(_kern_sysv, OID_AUTO, ipcs, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSVIPCS")struct sysctl_oid_list sysctl__kern_sysv_ipcs_children; struct sysctl_oid sysctl__kern_sysv_ipcs = { &sysctl__kern_sysv_children , { 0 }, (-1), (int)(1|(0x80000000|0x40000000) | 0x00800000 | 0x10000000|0x00400000), (void*)&sysctl__kern_sysv_ipcs_children , (int)(0), "ipcs", 0, "N", "SYSVIPCS", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_ipcs __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_ipcs;; | |||
1294 | ||||
1295 | SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, shm, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,struct sysctl_oid sysctl__kern_sysv_ipcs_shm = { &sysctl__kern_sysv_ipcs_children , { 0 }, (-1), (int)((0x80000000|0x40000000) | 0x10000000 | 0x00800000 |0x00400000), 0, (int)(0), "shm", IPCS_shm_sysctl, "S,IPCS_shm_command" , "ipcs shm command interface", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_ipcs_shm __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_ipcs_shm | |||
1296 | 0, 0, IPCS_shm_sysctl,struct sysctl_oid sysctl__kern_sysv_ipcs_shm = { &sysctl__kern_sysv_ipcs_children , { 0 }, (-1), (int)((0x80000000|0x40000000) | 0x10000000 | 0x00800000 |0x00400000), 0, (int)(0), "shm", IPCS_shm_sysctl, "S,IPCS_shm_command" , "ipcs shm command interface", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_ipcs_shm __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_ipcs_shm | |||
1297 | "S,IPCS_shm_command",struct sysctl_oid sysctl__kern_sysv_ipcs_shm = { &sysctl__kern_sysv_ipcs_children , { 0 }, (-1), (int)((0x80000000|0x40000000) | 0x10000000 | 0x00800000 |0x00400000), 0, (int)(0), "shm", IPCS_shm_sysctl, "S,IPCS_shm_command" , "ipcs shm command interface", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_ipcs_shm __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_ipcs_shm | |||
1298 | "ipcs shm command interface")struct sysctl_oid sysctl__kern_sysv_ipcs_shm = { &sysctl__kern_sysv_ipcs_children , { 0 }, (-1), (int)((0x80000000|0x40000000) | 0x10000000 | 0x00800000 |0x00400000), 0, (int)(0), "shm", IPCS_shm_sysctl, "S,IPCS_shm_command" , "ipcs shm command interface", 1, 0 }; void const * __set___sysctl_set_sym_sysctl__kern_sysv_ipcs_shm __attribute__ ((section("__DATA,__sysctl_set"),used)) = (void *)&sysctl__kern_sysv_ipcs_shm; | |||
1299 | #endif /* SYSV_SHM */ | |||
1300 | ||||
1301 | /* DSEP Review Done pl-20051108-v02 @2743,@2908,@2913,@3009 */ |