Bug Summary

File:kern/sysv_shm.c
Warning:line 1597, column 11
Copies out a struct with uncleared padding (>= 2 bytes)

Annotated Source Code

1/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
2/*-
3 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Adam Glass and Charles
16 * Hannum.
17 * 4. The names of the authors may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31/*-
32 * Copyright (c) 2003-2005 McAfee, Inc.
33 * All rights reserved.
34 *
35 * This software was developed for the FreeBSD Project in part by McAfee
36 * Research, the Security Research Division of McAfee, Inc under DARPA/SPAWAR
37 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS research
38 * program.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62#include <sys/cdefs.h>
63__FBSDID("$FreeBSD: releng/11.0/sys/kern/sysv_shm.c 298661 2016-04-26 19:57:35Z cem $")__asm__(".ident\t\"" "$FreeBSD: releng/11.0/sys/kern/sysv_shm.c 298661 2016-04-26 19:57:35Z cem $"
"\"")
;
64
65#include "opt_compat.h"
66#include "opt_sysvipc.h"
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/kernel.h>
71#include <sys/limits.h>
72#include <sys/lock.h>
73#include <sys/sysctl.h>
74#include <sys/shm.h>
75#include <sys/proc.h>
76#include <sys/malloc.h>
77#include <sys/mman.h>
78#include <sys/module.h>
79#include <sys/mutex.h>
80#include <sys/racct.h>
81#include <sys/resourcevar.h>
82#include <sys/rwlock.h>
83#include <sys/stat.h>
84#include <sys/syscall.h>
85#include <sys/syscallsubr.h>
86#include <sys/sysent.h>
87#include <sys/sysproto.h>
88#include <sys/jail.h>
89
90#include <security/mac/mac_framework.h>
91
92#include <vm/vm.h>
93#include <vm/vm_param.h>
94#include <vm/pmap.h>
95#include <vm/vm_object.h>
96#include <vm/vm_map.h>
97#include <vm/vm_page.h>
98#include <vm/vm_pager.h>
99
100FEATURE(sysv_shm, "System V shared memory segments support")static struct sysctl_oid sysctl___kern_features_sysv_shm = { .
oid_parent = ((&(&sysctl___kern_features)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (0x80000000 | 0x00008000)), .oid_arg1 = (
((int *)((void *)0))), .oid_arg2 = (1), .oid_name = ("sysv_shm"
), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr
= "System V shared memory segments support" }; __asm__(".globl "
"__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"
); static void const * const __set_sysctl_set_sym_sysctl___kern_features_sysv_shm
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_features_sysv_shm); _Static_assert
((((0x80000000 | 0x00008000) & 0xf) == 0 || ((0x80000000 |
0x00008000) & 0) == 2) && sizeof(int) == sizeof(
*(((int *)((void *)0)))), "compile-time assertion failed")
;
101
102static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments")struct malloc_type M_SHM[1] = { { ((void *)0), 877983977, "shm"
, ((void *)0) } }; static struct sysinit M_SHM_init_sys_init =
{ SI_SUB_KMEM, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_init, ((void *)(M_SHM)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_M_SHM_init_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(M_SHM_init_sys_init); static struct sysinit M_SHM_uninit_sys_uninit
= { SI_SUB_KMEM, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t
)malloc_uninit, ((void *)(M_SHM)) }; __asm__(".globl " "__start_set_sysuninit_set"
); __asm__(".globl " "__stop_set_sysuninit_set"); static void
const * const __set_sysuninit_set_sym_M_SHM_uninit_sys_uninit
__attribute__((__section__("set_" "sysuninit_set"))) __attribute__
((__used__)) = &(M_SHM_uninit_sys_uninit)
;
103
104static int shmget_allocate_segment(struct thread *td,
105 struct shmget_args *uap, int mode);
106static int shmget_existing(struct thread *td, struct shmget_args *uap,
107 int mode, int segnum);
108
109#define SHMSEG_FREE0x0200 0x0200
110#define SHMSEG_REMOVED0x0400 0x0400
111#define SHMSEG_ALLOCATED0x0800 0x0800
112
113static int shm_last_free, shm_nused, shmalloced;
114vm_size_t shm_committed;
115static struct shmid_kernel *shmsegs;
116static unsigned shm_prison_slot;
117
118struct shmmap_state {
119 vm_offset_t va;
120 int shmid;
121};
122
123static void shm_deallocate_segment(struct shmid_kernel *);
124static int shm_find_segment_by_key(struct prison *, key_t);
125static struct shmid_kernel *shm_find_segment(struct prison *, int, bool);
126static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
127static void shmrealloc(void);
128static int shminit(void);
129static int sysvshm_modload(struct module *, int, void *);
130static int shmunload(void);
131static void shmexit_myhook(struct vmspace *vm);
132static void shmfork_myhook(struct proc *p1, struct proc *p2);
133static int sysctl_shmsegs(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req
*req
);
134static void shm_remove(struct shmid_kernel *, int);
135static struct prison *shm_find_prison(struct ucred *);
136static int shm_prison_cansee(struct prison *, struct shmid_kernel *);
137static int shm_prison_check(void *, void *);
138static int shm_prison_set(void *, void *);
139static int shm_prison_get(void *, void *);
140static int shm_prison_remove(void *, void *);
141static void shm_prison_cleanup(struct prison *);
142
143/*
144 * Tuneable values.
145 */
146#ifndef SHMMAXPGS131072
147#define SHMMAXPGS131072 131072 /* Note: sysv shared memory is swap backed. */
148#endif
149#ifndef SHMMAX(131072*(1<<12))
150#define SHMMAX(131072*(1<<12)) (SHMMAXPGS131072*PAGE_SIZE(1<<12))
151#endif
152#ifndef SHMMIN1
153#define SHMMIN1 1
154#endif
155#ifndef SHMMNI192
156#define SHMMNI192 192
157#endif
158#ifndef SHMSEG128
159#define SHMSEG128 128
160#endif
161#ifndef SHMALL(131072)
162#define SHMALL(131072) (SHMMAXPGS131072)
163#endif
164
165struct shminfo shminfo = {
166 .shmmax = SHMMAX(131072*(1<<12)),
167 .shmmin = SHMMIN1,
168 .shmmni = SHMMNI192,
169 .shmseg = SHMSEG128,
170 .shmall = SHMALL(131072)
171};
172
173static int shm_use_phys;
174static int shm_allow_removed = 1;
175
176SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RWTUN, &shminfo.shmmax, 0,static struct sysctl_oid sysctl___kern_ipc_shmmax = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (8 | 0x00040000
| (((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (&
shminfo.shmmax), .oid_arg2 = (0), .oid_name = ("shmmax"), .oid_handler
= (sysctl_handle_long), .oid_fmt = ("LU"), .oid_descr = "Maximum shared memory segment size"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmmax
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmmax); _Static_assert
((((((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || (
(((0x80000000|0x40000000)|0x00080000)) & 0) == 8) &&
sizeof(unsigned long) == sizeof(*(&shminfo.shmmax)), "compile-time assertion failed"
)
177 "Maximum shared memory segment size")static struct sysctl_oid sysctl___kern_ipc_shmmax = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (8 | 0x00040000
| (((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (&
shminfo.shmmax), .oid_arg2 = (0), .oid_name = ("shmmax"), .oid_handler
= (sysctl_handle_long), .oid_fmt = ("LU"), .oid_descr = "Maximum shared memory segment size"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmmax
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmmax); _Static_assert
((((((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || (
(((0x80000000|0x40000000)|0x00080000)) & 0) == 8) &&
sizeof(unsigned long) == sizeof(*(&shminfo.shmmax)), "compile-time assertion failed"
)
;
178SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RWTUN, &shminfo.shmmin, 0,static struct sysctl_oid sysctl___kern_ipc_shmmin = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (8 | 0x00040000
| (((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (&
shminfo.shmmin), .oid_arg2 = (0), .oid_name = ("shmmin"), .oid_handler
= (sysctl_handle_long), .oid_fmt = ("LU"), .oid_descr = "Minimum shared memory segment size"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmmin
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmmin); _Static_assert
((((((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || (
(((0x80000000|0x40000000)|0x00080000)) & 0) == 8) &&
sizeof(unsigned long) == sizeof(*(&shminfo.shmmin)), "compile-time assertion failed"
)
179 "Minimum shared memory segment size")static struct sysctl_oid sysctl___kern_ipc_shmmin = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (8 | 0x00040000
| (((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (&
shminfo.shmmin), .oid_arg2 = (0), .oid_name = ("shmmin"), .oid_handler
= (sysctl_handle_long), .oid_fmt = ("LU"), .oid_descr = "Minimum shared memory segment size"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmmin
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmmin); _Static_assert
((((((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || (
(((0x80000000|0x40000000)|0x00080000)) & 0) == 8) &&
sizeof(unsigned long) == sizeof(*(&shminfo.shmmin)), "compile-time assertion failed"
)
;
180SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0,static struct sysctl_oid sysctl___kern_ipc_shmmni = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (8 | 0x00040000
| ((0x80000000|0x00080000))), .oid_arg1 = (&shminfo.shmmni
), .oid_arg2 = (0), .oid_name = ("shmmni"), .oid_handler = (sysctl_handle_long
), .oid_fmt = ("LU"), .oid_descr = "Number of shared memory identifiers"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmmni
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmmni); _Static_assert
(((((0x80000000|0x00080000)) & 0xf) == 0 || (((0x80000000
|0x00080000)) & 0) == 8) && sizeof(unsigned long)
== sizeof(*(&shminfo.shmmni)), "compile-time assertion failed"
)
181 "Number of shared memory identifiers")static struct sysctl_oid sysctl___kern_ipc_shmmni = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (8 | 0x00040000
| ((0x80000000|0x00080000))), .oid_arg1 = (&shminfo.shmmni
), .oid_arg2 = (0), .oid_name = ("shmmni"), .oid_handler = (sysctl_handle_long
), .oid_fmt = ("LU"), .oid_descr = "Number of shared memory identifiers"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmmni
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmmni); _Static_assert
(((((0x80000000|0x00080000)) & 0xf) == 0 || (((0x80000000
|0x00080000)) & 0) == 8) && sizeof(unsigned long)
== sizeof(*(&shminfo.shmmni)), "compile-time assertion failed"
)
;
182SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0,static struct sysctl_oid sysctl___kern_ipc_shmseg = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (8 | 0x00040000
| ((0x80000000|0x00080000))), .oid_arg1 = (&shminfo.shmseg
), .oid_arg2 = (0), .oid_name = ("shmseg"), .oid_handler = (sysctl_handle_long
), .oid_fmt = ("LU"), .oid_descr = "Number of segments per process"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmseg
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmseg); _Static_assert
(((((0x80000000|0x00080000)) & 0xf) == 0 || (((0x80000000
|0x00080000)) & 0) == 8) && sizeof(unsigned long)
== sizeof(*(&shminfo.shmseg)), "compile-time assertion failed"
)
183 "Number of segments per process")static struct sysctl_oid sysctl___kern_ipc_shmseg = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (8 | 0x00040000
| ((0x80000000|0x00080000))), .oid_arg1 = (&shminfo.shmseg
), .oid_arg2 = (0), .oid_name = ("shmseg"), .oid_handler = (sysctl_handle_long
), .oid_fmt = ("LU"), .oid_descr = "Number of segments per process"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmseg
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmseg); _Static_assert
(((((0x80000000|0x00080000)) & 0xf) == 0 || (((0x80000000
|0x00080000)) & 0) == 8) && sizeof(unsigned long)
== sizeof(*(&shminfo.shmseg)), "compile-time assertion failed"
)
;
184SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RWTUN, &shminfo.shmall, 0,static struct sysctl_oid sysctl___kern_ipc_shmall = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (8 | 0x00040000
| (((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (&
shminfo.shmall), .oid_arg2 = (0), .oid_name = ("shmall"), .oid_handler
= (sysctl_handle_long), .oid_fmt = ("LU"), .oid_descr = "Maximum number of pages available for shared memory"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmall
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmall); _Static_assert
((((((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || (
(((0x80000000|0x40000000)|0x00080000)) & 0) == 8) &&
sizeof(unsigned long) == sizeof(*(&shminfo.shmall)), "compile-time assertion failed"
)
185 "Maximum number of pages available for shared memory")static struct sysctl_oid sysctl___kern_ipc_shmall = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (8 | 0x00040000
| (((0x80000000|0x40000000)|0x00080000))), .oid_arg1 = (&
shminfo.shmall), .oid_arg2 = (0), .oid_name = ("shmall"), .oid_handler
= (sysctl_handle_long), .oid_fmt = ("LU"), .oid_descr = "Maximum number of pages available for shared memory"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmall
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmall); _Static_assert
((((((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || (
(((0x80000000|0x40000000)|0x00080000)) & 0) == 8) &&
sizeof(unsigned long) == sizeof(*(&shminfo.shmall)), "compile-time assertion failed"
)
;
186SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RWTUN,static struct sysctl_oid sysctl___kern_ipc_shm_use_phys = { .
oid_parent = ((&(&sysctl___kern_ipc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&shm_use_phys), .oid_arg2 = (0), .oid_name =
("shm_use_phys"), .oid_handler = (sysctl_handle_int), .oid_fmt
= ("I"), .oid_descr = "Enable/Disable locking of shared memory pages in core"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shm_use_phys
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shm_use_phys); _Static_assert
((((((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || (
(((0x80000000|0x40000000)|0x00080000)) & 0) == 2) &&
sizeof(int) == sizeof(*(&shm_use_phys)), "compile-time assertion failed"
)
187 &shm_use_phys, 0, "Enable/Disable locking of shared memory pages in core")static struct sysctl_oid sysctl___kern_ipc_shm_use_phys = { .
oid_parent = ((&(&sysctl___kern_ipc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&shm_use_phys), .oid_arg2 = (0), .oid_name =
("shm_use_phys"), .oid_handler = (sysctl_handle_int), .oid_fmt
= ("I"), .oid_descr = "Enable/Disable locking of shared memory pages in core"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shm_use_phys
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shm_use_phys); _Static_assert
((((((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || (
(((0x80000000|0x40000000)|0x00080000)) & 0) == 2) &&
sizeof(int) == sizeof(*(&shm_use_phys)), "compile-time assertion failed"
)
;
188SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RWTUN,static struct sysctl_oid sysctl___kern_ipc_shm_allow_removed =
{ .oid_parent = ((&(&sysctl___kern_ipc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&shm_allow_removed), .oid_arg2 = (0), .oid_name
= ("shm_allow_removed"), .oid_handler = (sysctl_handle_int),
.oid_fmt = ("I"), .oid_descr = "Enable/Disable attachment to attached segments marked for removal"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shm_allow_removed
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shm_allow_removed); _Static_assert
((((((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || (
(((0x80000000|0x40000000)|0x00080000)) & 0) == 2) &&
sizeof(int) == sizeof(*(&shm_allow_removed)), "compile-time assertion failed"
)
189 &shm_allow_removed, 0,static struct sysctl_oid sysctl___kern_ipc_shm_allow_removed =
{ .oid_parent = ((&(&sysctl___kern_ipc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&shm_allow_removed), .oid_arg2 = (0), .oid_name
= ("shm_allow_removed"), .oid_handler = (sysctl_handle_int),
.oid_fmt = ("I"), .oid_descr = "Enable/Disable attachment to attached segments marked for removal"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shm_allow_removed
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shm_allow_removed); _Static_assert
((((((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || (
(((0x80000000|0x40000000)|0x00080000)) & 0) == 2) &&
sizeof(int) == sizeof(*(&shm_allow_removed)), "compile-time assertion failed"
)
190 "Enable/Disable attachment to attached segments marked for removal")static struct sysctl_oid sysctl___kern_ipc_shm_allow_removed =
{ .oid_parent = ((&(&sysctl___kern_ipc)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (2 | 0x00040000 | (((0x80000000|0x40000000)|0x00080000))),
.oid_arg1 = (&shm_allow_removed), .oid_arg2 = (0), .oid_name
= ("shm_allow_removed"), .oid_handler = (sysctl_handle_int),
.oid_fmt = ("I"), .oid_descr = "Enable/Disable attachment to attached segments marked for removal"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shm_allow_removed
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shm_allow_removed); _Static_assert
((((((0x80000000|0x40000000)|0x00080000)) & 0xf) == 0 || (
(((0x80000000|0x40000000)|0x00080000)) & 0) == 2) &&
sizeof(int) == sizeof(*(&shm_allow_removed)), "compile-time assertion failed"
)
;
191SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLTYPE_OPAQUE | CTLFLAG_RD |static struct sysctl_oid sysctl___kern_ipc_shmsegs = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((5 | 0x80000000
| 0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .
oid_name = ("shmsegs"), .oid_handler = (sysctl_shmsegs), .oid_fmt
= (""), .oid_descr = "Current number of shared memory segments allocated"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmsegs
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmsegs); _Static_assert
(((5 | 0x80000000 | 0x00040000) & 0xf) != 0, "compile-time assertion failed"
)
192 CTLFLAG_MPSAFE, NULL, 0, sysctl_shmsegs, "",static struct sysctl_oid sysctl___kern_ipc_shmsegs = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((5 | 0x80000000
| 0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .
oid_name = ("shmsegs"), .oid_handler = (sysctl_shmsegs), .oid_fmt
= (""), .oid_descr = "Current number of shared memory segments allocated"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmsegs
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmsegs); _Static_assert
(((5 | 0x80000000 | 0x00040000) & 0xf) != 0, "compile-time assertion failed"
)
193 "Current number of shared memory segments allocated")static struct sysctl_oid sysctl___kern_ipc_shmsegs = { .oid_parent
= ((&(&sysctl___kern_ipc)->oid_children)), .oid_children
= { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((5 | 0x80000000
| 0x00040000)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .
oid_name = ("shmsegs"), .oid_handler = (sysctl_shmsegs), .oid_fmt
= (""), .oid_descr = "Current number of shared memory segments allocated"
}; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl "
"__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ipc_shmsegs
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___kern_ipc_shmsegs); _Static_assert
(((5 | 0x80000000 | 0x00040000) & 0xf) != 0, "compile-time assertion failed"
)
;
194
195static struct sx sysvshmsx;
196#define SYSVSHM_LOCK()(void)__sx_xlock(((&sysvshmsx)), (__curthread()), 0, (((void
*)0)), (0))
sx_xlock(&sysvshmsx)(void)__sx_xlock(((&sysvshmsx)), (__curthread()), 0, (((void
*)0)), (0))
197#define SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
sx_xunlock(&sysvshmsx)__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
198#define SYSVSHM_ASSERT_LOCKED()(void)0 sx_assert(&sysvshmsx, SA_XLOCKED)(void)0
199
200static int
201shm_find_segment_by_key(struct prison *pr, key_t key)
202{
203 int i;
204
205 for (i = 0; i < shmalloced; i++)
206 if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED0x0800) &&
207 shmsegs[i].cred != NULL((void *)0) &&
208 shmsegs[i].cred->cr_prison == pr &&
209 shmsegs[i].u.shm_perm.key == key)
210 return (i);
211 return (-1);
212}
213
214/*
215 * Finds segment either by shmid if is_shmid is true, or by segnum if
216 * is_shmid is false.
217 */
218static struct shmid_kernel *
219shm_find_segment(struct prison *rpr, int arg, bool is_shmid)
220{
221 struct shmid_kernel *shmseg;
222 int segnum;
223
224 segnum = is_shmid ? IPCID_TO_IX(arg)((arg) & 0xffff) : arg;
225 if (segnum < 0 || segnum >= shmalloced)
226 return (NULL((void *)0));
227 shmseg = &shmsegs[segnum];
228 if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED0x0800) == 0 ||
229 (!shm_allow_removed &&
230 (shmseg->u.shm_perm.mode & SHMSEG_REMOVED0x0400) != 0) ||
231 (is_shmid && shmseg->u.shm_perm.seq != IPCID_TO_SEQ(arg)(((arg) >> 16) & 0xffff)) ||
232 shm_prison_cansee(rpr, shmseg) != 0)
233 return (NULL((void *)0));
234 return (shmseg);
235}
236
237static void
238shm_deallocate_segment(struct shmid_kernel *shmseg)
239{
240 vm_size_t size;
241
242 SYSVSHM_ASSERT_LOCKED()(void)0;
243
244 vm_object_deallocate(shmseg->object);
245 shmseg->object = NULL((void *)0);
246 size = round_page(shmseg->u.shm_segsz)((((unsigned long)(shmseg->u.shm_segsz)) + ((1<<12)-
1)) & ~(((1<<12)-1)))
;
247 shm_committed -= btoc(size)(((vm_offset_t)(size)+((1<<12)-1))>>12);
248 shm_nused--;
249 shmseg->u.shm_perm.mode = SHMSEG_FREE0x0200;
250#ifdef MAC1
251 mac_sysvshm_cleanup(shmseg);
252#endif
253 racct_sub_cred(shmseg->cred, RACCT_NSHM17, 1);
254 racct_sub_cred(shmseg->cred, RACCT_SHMSIZE18, size);
255 crfree(shmseg->cred);
256 shmseg->cred = NULL((void *)0);
257}
258
259static int
260shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
261{
262 struct shmid_kernel *shmseg;
263 int segnum, result;
264 vm_size_t size;
265
266 SYSVSHM_ASSERT_LOCKED()(void)0;
267 segnum = IPCID_TO_IX(shmmap_s->shmid)((shmmap_s->shmid) & 0xffff);
268 KASSERT(segnum >= 0 && segnum < shmalloced,do { } while (0)
269 ("segnum %d shmalloced %d", segnum, shmalloced))do { } while (0);
270
271 shmseg = &shmsegs[segnum];
272 size = round_page(shmseg->u.shm_segsz)((((unsigned long)(shmseg->u.shm_segsz)) + ((1<<12)-
1)) & ~(((1<<12)-1)))
;
273 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
274 if (result != KERN_SUCCESS0)
275 return (EINVAL22);
276 shmmap_s->shmid = -1;
277 shmseg->u.shm_dtime = time_second;
278 if ((--shmseg->u.shm_nattch <= 0) &&
279 (shmseg->u.shm_perm.mode & SHMSEG_REMOVED0x0400)) {
280 shm_deallocate_segment(shmseg);
281 shm_last_free = segnum;
282 }
283 return (0);
284}
285
286static void
287shm_remove(struct shmid_kernel *shmseg, int segnum)
288{
289
290 shmseg->u.shm_perm.key = IPC_PRIVATE(key_t)0;
291 shmseg->u.shm_perm.mode |= SHMSEG_REMOVED0x0400;
292 if (shmseg->u.shm_nattch <= 0) {
293 shm_deallocate_segment(shmseg);
294 shm_last_free = segnum;
295 }
296}
297
298static struct prison *
299shm_find_prison(struct ucred *cred)
300{
301 struct prison *pr, *rpr;
302
303 pr = cred->cr_prison;
304 prison_lock(pr);
305 rpr = osd_jail_get(pr, shm_prison_slot)osd_get(1, &(pr)->pr_osd, (shm_prison_slot));
306 prison_unlock(pr);
307 return rpr;
308}
309
310static int
311shm_prison_cansee(struct prison *rpr, struct shmid_kernel *shmseg)
312{
313
314 if (shmseg->cred == NULL((void *)0) ||
315 !(rpr == shmseg->cred->cr_prison ||
316 prison_ischild(rpr, shmseg->cred->cr_prison)))
317 return (EINVAL22);
318 return (0);
319}
320
321static int
322kern_shmdt_locked(struct thread *td, const void *shmaddr)
323{
324 struct proc *p = td->td_proc;
325 struct shmmap_state *shmmap_s;
326#ifdef MAC1
327 struct shmid_kernel *shmsegptr;
328 int error;
329#endif
330 int i;
331
332 SYSVSHM_ASSERT_LOCKED()(void)0;
333 if (shm_find_prison(td->td_ucred) == NULL((void *)0))
334 return (ENOSYS78);
335 shmmap_s = p->p_vmspace->vm_shm;
336 if (shmmap_s == NULL((void *)0))
337 return (EINVAL22);
338 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
339 if (shmmap_s->shmid != -1 &&
340 shmmap_s->va == (vm_offset_t)shmaddr) {
341 break;
342 }
343 }
344 if (i == shminfo.shmseg)
345 return (EINVAL22);
346#ifdef MAC1
347 shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)((shmmap_s->shmid) & 0xffff)];
348 error = mac_sysvshm_check_shmdt(td->td_ucred, shmsegptr);
349 if (error != 0)
350 return (error);
351#endif
352 return (shm_delete_mapping(p->p_vmspace, shmmap_s));
353}
354
355#ifndef _SYS_SYSPROTO_H_
356struct shmdt_args {
357 const void *shmaddr;
358};
359#endif
360int
361sys_shmdt(struct thread *td, struct shmdt_args *uap)
362{
363 int error;
364
365 SYSVSHM_LOCK()(void)__sx_xlock(((&sysvshmsx)), (__curthread()), 0, (((void
*)0)), (0))
;
366 error = kern_shmdt_locked(td, uap->shmaddr);
367 SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
;
368 return (error);
369}
370
371static int
372kern_shmat_locked(struct thread *td, int shmid, const void *shmaddr,
373 int shmflg)
374{
375 struct prison *rpr;
376 struct proc *p = td->td_proc;
377 struct shmid_kernel *shmseg;
378 struct shmmap_state *shmmap_s;
379 vm_offset_t attach_va;
380 vm_prot_t prot;
381 vm_size_t size;
382 int error, i, rv;
383
384 SYSVSHM_ASSERT_LOCKED()(void)0;
385 rpr = shm_find_prison(td->td_ucred);
386 if (rpr == NULL((void *)0))
387 return (ENOSYS78);
388 shmmap_s = p->p_vmspace->vm_shm;
389 if (shmmap_s == NULL((void *)0)) {
390 shmmap_s = malloc(shminfo.shmseg * sizeof(struct shmmap_state),
391 M_SHM, M_WAITOK0x0002);
392 for (i = 0; i < shminfo.shmseg; i++)
393 shmmap_s[i].shmid = -1;
394 KASSERT(p->p_vmspace->vm_shm == NULL, ("raced"))do { } while (0);
395 p->p_vmspace->vm_shm = shmmap_s;
396 }
397 shmseg = shm_find_segment(rpr, shmid, true1);
398 if (shmseg == NULL((void *)0))
399 return (EINVAL22);
400 error = ipcperm(td, &shmseg->u.shm_perm,
401 (shmflg & SHM_RDONLY010000) ? IPC_R000400 : IPC_R000400|IPC_W000200);
402 if (error != 0)
403 return (error);
404#ifdef MAC1
405 error = mac_sysvshm_check_shmat(td->td_ucred, shmseg, shmflg);
406 if (error != 0)
407 return (error);
408#endif
409 for (i = 0; i < shminfo.shmseg; i++) {
410 if (shmmap_s->shmid == -1)
411 break;
412 shmmap_s++;
413 }
414 if (i >= shminfo.shmseg)
415 return (EMFILE24);
416 size = round_page(shmseg->u.shm_segsz)((((unsigned long)(shmseg->u.shm_segsz)) + ((1<<12)-
1)) & ~(((1<<12)-1)))
;
417 prot = VM_PROT_READ((vm_prot_t) 0x01);
418 if ((shmflg & SHM_RDONLY010000) == 0)
419 prot |= VM_PROT_WRITE((vm_prot_t) 0x02);
420 if (shmaddr != NULL((void *)0)) {
421 if ((shmflg & SHM_RND020000) != 0)
422 attach_va = rounddown2((vm_offset_t)shmaddr, SHMLBA)(((vm_offset_t)shmaddr)&(~(((1<<12))-1)));
423 else if (((vm_offset_t)shmaddr & (SHMLBA(1<<12)-1)) == 0)
424 attach_va = (vm_offset_t)shmaddr;
425 else
426 return (EINVAL22);
427 } else {
428 /*
429 * This is just a hint to vm_map_find() about where to
430 * put it.
431 */
432 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr +((((unsigned long)((vm_offset_t)p->p_vmspace->vm_daddr +
lim_max(td, 2))) + ((1<<12)-1)) & ~(((1<<12)
-1)))
433 lim_max(td, RLIMIT_DATA))((((unsigned long)((vm_offset_t)p->p_vmspace->vm_daddr +
lim_max(td, 2))) + ((1<<12)-1)) & ~(((1<<12)
-1)))
;
434 }
435
436 vm_object_reference(shmseg->object);
437 rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->object, 0, &attach_va,
438 size, 0, shmaddr != NULL((void *)0) ? VMFS_NO_SPACE0 : VMFS_OPTIMAL_SPACE2,
439 prot, prot, MAP_INHERIT_SHARE0x0001 | MAP_PREFAULT_PARTIAL0x0010);
440 if (rv != KERN_SUCCESS0) {
441 vm_object_deallocate(shmseg->object);
442 return (ENOMEM12);
443 }
444
445 shmmap_s->va = attach_va;
446 shmmap_s->shmid = shmid;
447 shmseg->u.shm_lpid = p->p_pid;
448 shmseg->u.shm_atime = time_second;
449 shmseg->u.shm_nattch++;
450 td->td_retvaltd_uretoff.tdu_retval[0] = attach_va;
451 return (error);
452}
453
454int
455kern_shmat(struct thread *td, int shmid, const void *shmaddr, int shmflg)
456{
457 int error;
458
459 SYSVSHM_LOCK()(void)__sx_xlock(((&sysvshmsx)), (__curthread()), 0, (((void
*)0)), (0))
;
460 error = kern_shmat_locked(td, shmid, shmaddr, shmflg);
461 SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
;
462 return (error);
463}
464
465#ifndef _SYS_SYSPROTO_H_
466struct shmat_args {
467 int shmid;
468 const void *shmaddr;
469 int shmflg;
470};
471#endif
472int
473sys_shmat(struct thread *td, struct shmat_args *uap)
474{
475
476 return (kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg));
477}
478
479static int
480kern_shmctl_locked(struct thread *td, int shmid, int cmd, void *buf,
481 size_t *bufsz)
482{
483 struct prison *rpr;
484 struct shmid_kernel *shmseg;
485 struct shmid_ds *shmidp;
486 struct shm_info shm_info;
487 int error;
488
489 SYSVSHM_ASSERT_LOCKED()(void)0;
490
491 rpr = shm_find_prison(td->td_ucred);
492 if (rpr == NULL((void *)0))
493 return (ENOSYS78);
494
495 switch (cmd) {
496 /*
497 * It is possible that kern_shmctl is being called from the Linux ABI
498 * layer, in which case, we will need to implement IPC_INFO. It should
499 * be noted that other shmctl calls will be funneled through here for
500 * Linix binaries as well.
501 *
502 * NB: The Linux ABI layer will convert this data to structure(s) more
503 * consistent with the Linux ABI.
504 */
505 case IPC_INFO3:
506 memcpy(buf, &shminfo, sizeof(shminfo));
507 if (bufsz)
508 *bufsz = sizeof(shminfo);
509 td->td_retvaltd_uretoff.tdu_retval[0] = shmalloced;
510 return (0);
511 case SHM_INFO14: {
512 shm_info.used_ids = shm_nused;
513 shm_info.shm_rss = 0; /*XXX where to get from ? */
514 shm_info.shm_tot = 0; /*XXX where to get from ? */
515 shm_info.shm_swp = 0; /*XXX where to get from ? */
516 shm_info.swap_attempts = 0; /*XXX where to get from ? */
517 shm_info.swap_successes = 0; /*XXX where to get from ? */
518 memcpy(buf, &shm_info, sizeof(shm_info));
519 if (bufsz != NULL((void *)0))
520 *bufsz = sizeof(shm_info);
521 td->td_retvaltd_uretoff.tdu_retval[0] = shmalloced;
522 return (0);
523 }
524 }
525 shmseg = shm_find_segment(rpr, shmid, cmd != SHM_STAT13);
526 if (shmseg == NULL((void *)0))
527 return (EINVAL22);
528#ifdef MAC1
529 error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, cmd);
530 if (error != 0)
531 return (error);
532#endif
533 switch (cmd) {
534 case SHM_STAT13:
535 case IPC_STAT2:
536 shmidp = (struct shmid_ds *)buf;
537 error = ipcperm(td, &shmseg->u.shm_perm, IPC_R000400);
538 if (error != 0)
539 return (error);
540 memcpy(shmidp, &shmseg->u, sizeof(struct shmid_ds));
541 if (td->td_ucred->cr_prison != shmseg->cred->cr_prison)
542 shmidp->shm_perm.key = IPC_PRIVATE(key_t)0;
543 if (bufsz != NULL((void *)0))
544 *bufsz = sizeof(struct shmid_ds);
545 if (cmd == SHM_STAT13) {
546 td->td_retvaltd_uretoff.tdu_retval[0] = IXSEQ_TO_IPCID(shmid,(((shmseg->u.shm_perm.seq) << 16) | (shmid & 0xffff
))
547 shmseg->u.shm_perm)(((shmseg->u.shm_perm.seq) << 16) | (shmid & 0xffff
))
;
548 }
549 break;
550 case IPC_SET1:
551 shmidp = (struct shmid_ds *)buf;
552 error = ipcperm(td, &shmseg->u.shm_perm, IPC_M010000);
553 if (error != 0)
554 return (error);
555 shmseg->u.shm_perm.uid = shmidp->shm_perm.uid;
556 shmseg->u.shm_perm.gid = shmidp->shm_perm.gid;
557 shmseg->u.shm_perm.mode =
558 (shmseg->u.shm_perm.mode & ~ACCESSPERMS(0000700|0000070|0000007)) |
559 (shmidp->shm_perm.mode & ACCESSPERMS(0000700|0000070|0000007));
560 shmseg->u.shm_ctime = time_second;
561 break;
562 case IPC_RMID0:
563 error = ipcperm(td, &shmseg->u.shm_perm, IPC_M010000);
564 if (error != 0)
565 return (error);
566 shm_remove(shmseg, IPCID_TO_IX(shmid)((shmid) & 0xffff));
567 break;
568#if 0
569 case SHM_LOCK11:
570 case SHM_UNLOCK12:
571#endif
572 default:
573 error = EINVAL22;
574 break;
575 }
576 return (error);
577}
578
579int
580kern_shmctl(struct thread *td, int shmid, int cmd, void *buf, size_t *bufsz)
581{
582 int error;
583
584 SYSVSHM_LOCK()(void)__sx_xlock(((&sysvshmsx)), (__curthread()), 0, (((void
*)0)), (0))
;
585 error = kern_shmctl_locked(td, shmid, cmd, buf, bufsz);
586 SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
;
587 return (error);
588}
589
590
591#ifndef _SYS_SYSPROTO_H_
592struct shmctl_args {
593 int shmid;
594 int cmd;
595 struct shmid_ds *buf;
596};
597#endif
598int
599sys_shmctl(struct thread *td, struct shmctl_args *uap)
600{
601 int error;
602 struct shmid_ds buf;
603 size_t bufsz;
604
605 /*
606 * The only reason IPC_INFO, SHM_INFO, SHM_STAT exists is to support
607 * Linux binaries. If we see the call come through the FreeBSD ABI,
608 * return an error back to the user since we do not to support this.
609 */
610 if (uap->cmd == IPC_INFO3 || uap->cmd == SHM_INFO14 ||
611 uap->cmd == SHM_STAT13)
612 return (EINVAL22);
613
614 /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
615 if (uap->cmd == IPC_SET1) {
616 if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
617 goto done;
618 }
619
620 error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
621 if (error)
622 goto done;
623
624 /* Cases in which we need to copyout */
625 switch (uap->cmd) {
626 case IPC_STAT2:
627 error = copyout(&buf, uap->buf, bufsz);
628 break;
629 }
630
631done:
632 if (error) {
633 /* Invalidate the return value */
634 td->td_retvaltd_uretoff.tdu_retval[0] = -1;
635 }
636 return (error);
637}
638
639
640static int
641shmget_existing(struct thread *td, struct shmget_args *uap, int mode,
642 int segnum)
643{
644 struct shmid_kernel *shmseg;
645#ifdef MAC1
646 int error;
647#endif
648
649 SYSVSHM_ASSERT_LOCKED()(void)0;
650 KASSERT(segnum >= 0 && segnum < shmalloced,do { } while (0)
651 ("segnum %d shmalloced %d", segnum, shmalloced))do { } while (0);
652 shmseg = &shmsegs[segnum];
653 if ((uap->shmflg & (IPC_CREAT001000 | IPC_EXCL002000)) == (IPC_CREAT001000 | IPC_EXCL002000))
654 return (EEXIST17);
655#ifdef MAC1
656 error = mac_sysvshm_check_shmget(td->td_ucred, shmseg, uap->shmflg);
657 if (error != 0)
658 return (error);
659#endif
660 if (uap->size != 0 && uap->size > shmseg->u.shm_segsz)
661 return (EINVAL22);
662 td->td_retvaltd_uretoff.tdu_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm)(((shmseg->u.shm_perm.seq) << 16) | (segnum & 0xffff
))
;
663 return (0);
664}
665
666static int
667shmget_allocate_segment(struct thread *td, struct shmget_args *uap, int mode)
668{
669 struct ucred *cred = td->td_ucred;
670 struct shmid_kernel *shmseg;
671 vm_object_t shm_object;
672 int i, segnum;
673 size_t size;
674
675 SYSVSHM_ASSERT_LOCKED()(void)0;
676
677 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
678 return (EINVAL22);
679 if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
680 return (ENOSPC28);
681 size = round_page(uap->size)((((unsigned long)(uap->size)) + ((1<<12)-1)) & ~
(((1<<12)-1)))
;
682 if (shm_committed + btoc(size)(((vm_offset_t)(size)+((1<<12)-1))>>12) > shminfo.shmall)
683 return (ENOMEM12);
684 if (shm_last_free < 0) {
685 shmrealloc(); /* Maybe expand the shmsegs[] array. */
686 for (i = 0; i < shmalloced; i++)
687 if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE0x0200)
688 break;
689 if (i == shmalloced)
690 return (ENOSPC28);
691 segnum = i;
692 } else {
693 segnum = shm_last_free;
694 shm_last_free = -1;
695 }
696 KASSERT(segnum >= 0 && segnum < shmalloced,do { } while (0)
697 ("segnum %d shmalloced %d", segnum, shmalloced))do { } while (0);
698 shmseg = &shmsegs[segnum];
699#ifdef RACCT1
700 if (racct_enable) {
701 PROC_LOCK(td->td_proc)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(td->td_proc)->p_mtx))))->mtx_lock != 0x00000004
|| !atomic_cmpset_long(&(((((&(td->td_proc)->p_mtx
)))))->mtx_lock, 0x00000004, (_tid)))) __mtx_lock_sleep(&
(((((&(td->td_proc)->p_mtx)))))->mtx_lock, _tid,
(((0))), ((((void *)0))), ((0))); else do { (void)0; do { if
(__builtin_expect((sdt_lockstat___adaptive__acquire->id),
0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->id
, (uintptr_t) (((&(td->td_proc)->p_mtx))), (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0
); } while (0); } while (0)
;
702 if (racct_add(td->td_proc, RACCT_NSHM17, 1)) {
703 PROC_UNLOCK(td->td_proc)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(td->td_proc)->p_mtx))))->lock_object.lo_data == 0) do
{ (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(td->td_proc)->p_mtx))), (
uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); }
while (0); } while (0); if (((((&(td->td_proc)->p_mtx
))))->mtx_lock != _tid || !atomic_cmpset_long(&(((((&
(td->td_proc)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(td->td_proc)->p_mtx
)))))->mtx_lock, (((0))), ((((void *)0))), ((0))); } while
(0)
;
704 return (ENOSPC28);
705 }
706 if (racct_add(td->td_proc, RACCT_SHMSIZE18, size)) {
707 racct_sub(td->td_proc, RACCT_NSHM17, 1);
708 PROC_UNLOCK(td->td_proc)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(td->td_proc)->p_mtx))))->lock_object.lo_data == 0) do
{ (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(td->td_proc)->p_mtx))), (
uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); }
while (0); } while (0); if (((((&(td->td_proc)->p_mtx
))))->mtx_lock != _tid || !atomic_cmpset_long(&(((((&
(td->td_proc)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(td->td_proc)->p_mtx
)))))->mtx_lock, (((0))), ((((void *)0))), ((0))); } while
(0)
;
709 return (ENOMEM12);
710 }
711 PROC_UNLOCK(td->td_proc)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(td->td_proc)->p_mtx))))->lock_object.lo_data == 0) do
{ (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(td->td_proc)->p_mtx))), (
uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); }
while (0); } while (0); if (((((&(td->td_proc)->p_mtx
))))->mtx_lock != _tid || !atomic_cmpset_long(&(((((&
(td->td_proc)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(td->td_proc)->p_mtx
)))))->mtx_lock, (((0))), ((((void *)0))), ((0))); } while
(0)
;
712 }
713#endif
714
715 /*
716 * We make sure that we have allocated a pager before we need
717 * to.
718 */
719 shm_object = vm_pager_allocate(shm_use_phys ? OBJT_PHYS : OBJT_SWAP,
720 0, size, VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)), 0, cred);
721 if (shm_object == NULL((void *)0)) {
722#ifdef RACCT1
723 if (racct_enable) {
724 PROC_LOCK(td->td_proc)do { uintptr_t _tid = (uintptr_t)((__curthread())); if ((((((
&(td->td_proc)->p_mtx))))->mtx_lock != 0x00000004
|| !atomic_cmpset_long(&(((((&(td->td_proc)->p_mtx
)))))->mtx_lock, 0x00000004, (_tid)))) __mtx_lock_sleep(&
(((((&(td->td_proc)->p_mtx)))))->mtx_lock, _tid,
(((0))), ((((void *)0))), ((0))); else do { (void)0; do { if
(__builtin_expect((sdt_lockstat___adaptive__acquire->id),
0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire->id
, (uintptr_t) (((&(td->td_proc)->p_mtx))), (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0
); } while (0); } while (0)
;
725 racct_sub(td->td_proc, RACCT_NSHM17, 1);
726 racct_sub(td->td_proc, RACCT_SHMSIZE18, size);
727 PROC_UNLOCK(td->td_proc)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((&
(td->td_proc)->p_mtx))))->lock_object.lo_data == 0) do
{ (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release
->id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release
->id, (uintptr_t) (((&(td->td_proc)->p_mtx))), (
uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); }
while (0); } while (0); if (((((&(td->td_proc)->p_mtx
))))->mtx_lock != _tid || !atomic_cmpset_long(&(((((&
(td->td_proc)->p_mtx)))))->mtx_lock, (_tid), 0x00000004
)) __mtx_unlock_sleep(&(((((&(td->td_proc)->p_mtx
)))))->mtx_lock, (((0))), ((((void *)0))), ((0))); } while
(0)
;
728 }
729#endif
730 return (ENOMEM12);
731 }
732 shm_object->pg_color = 0;
733 VM_OBJECT_WLOCK(shm_object)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((&
(shm_object)->lock))->rw_lock != ((0) << 4 | 0x01
) || !atomic_cmpset_long(&(((&(shm_object)->lock))
)->rw_lock, ((0) << 4 | 0x01), (_tid))) __rw_wlock_hard
(&(((&(shm_object)->lock)))->rw_lock, _tid, (((
void *)0)), (0)); else do { (void)0; do { if (__builtin_expect
((sdt_lockstat___rw__acquire->id), 0)) (*sdt_probe_func)(sdt_lockstat___rw__acquire
->id, (uintptr_t) (&(shm_object)->lock), (uintptr_t
) 0, (uintptr_t) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0
); } while (0); } while (0)
;
734 vm_object_clear_flag(shm_object, OBJ_ONEMAPPING0x2000);
735 vm_object_set_flag(shm_object, OBJ_COLORED0x1000 | OBJ_NOSPLIT0x0010);
736 VM_OBJECT_WUNLOCK(shm_object)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((&
(shm_object)->lock))->lock_object.lo_data) ((&(shm_object
)->lock))->lock_object.lo_data--; else { do { (void)0; do
{ if (__builtin_expect((sdt_lockstat___rw__release->id), 0
)) (*sdt_probe_func)(sdt_lockstat___rw__release->id, (uintptr_t
) (&(shm_object)->lock), (uintptr_t) 0, (uintptr_t) 0,
(uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); if (
((&(shm_object)->lock))->rw_lock != _tid || !atomic_cmpset_long
(&(((&(shm_object)->lock)))->rw_lock, (_tid), (
(0) << 4 | 0x01))) __rw_wunlock_hard(&(((&(shm_object
)->lock)))->rw_lock, _tid, (((void *)0)), (0)); } } while
(0)
;
737
738 shmseg->object = shm_object;
739 shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid;
740 shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = cred->cr_gidcr_groups[0];
741 shmseg->u.shm_perm.mode = (mode & ACCESSPERMS(0000700|0000070|0000007)) | SHMSEG_ALLOCATED0x0800;
742 shmseg->u.shm_perm.key = uap->key;
743 shmseg->u.shm_perm.seq = (shmseg->u.shm_perm.seq + 1) & 0x7fff;
744 shmseg->cred = crhold(cred);
745 shmseg->u.shm_segsz = uap->size;
746 shmseg->u.shm_cpid = td->td_proc->p_pid;
747 shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0;
748 shmseg->u.shm_atime = shmseg->u.shm_dtime = 0;
749#ifdef MAC1
750 mac_sysvshm_create(cred, shmseg);
751#endif
752 shmseg->u.shm_ctime = time_second;
753 shm_committed += btoc(size)(((vm_offset_t)(size)+((1<<12)-1))>>12);
754 shm_nused++;
755 td->td_retvaltd_uretoff.tdu_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm)(((shmseg->u.shm_perm.seq) << 16) | (segnum & 0xffff
))
;
756
757 return (0);
758}
759
760#ifndef _SYS_SYSPROTO_H_
761struct shmget_args {
762 key_t key;
763 size_t size;
764 int shmflg;
765};
766#endif
767int
768sys_shmget(struct thread *td, struct shmget_args *uap)
769{
770 int segnum, mode;
771 int error;
772
773 if (shm_find_prison(td->td_ucred) == NULL((void *)0))
774 return (ENOSYS78);
775 mode = uap->shmflg & ACCESSPERMS(0000700|0000070|0000007);
776 SYSVSHM_LOCK()(void)__sx_xlock(((&sysvshmsx)), (__curthread()), 0, (((void
*)0)), (0))
;
777 if (uap->key == IPC_PRIVATE(key_t)0) {
778 error = shmget_allocate_segment(td, uap, mode);
779 } else {
780 segnum = shm_find_segment_by_key(td->td_ucred->cr_prison,
781 uap->key);
782 if (segnum >= 0)
783 error = shmget_existing(td, uap, mode, segnum);
784 else if ((uap->shmflg & IPC_CREAT001000) == 0)
785 error = ENOENT2;
786 else
787 error = shmget_allocate_segment(td, uap, mode);
788 }
789 SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
;
790 return (error);
791}
792
793static void
794shmfork_myhook(struct proc *p1, struct proc *p2)
795{
796 struct shmmap_state *shmmap_s;
797 size_t size;
798 int i;
799
800 SYSVSHM_LOCK()(void)__sx_xlock(((&sysvshmsx)), (__curthread()), 0, (((void
*)0)), (0))
;
801 size = shminfo.shmseg * sizeof(struct shmmap_state);
802 shmmap_s = malloc(size, M_SHM, M_WAITOK0x0002);
803 bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
804 p2->p_vmspace->vm_shm = shmmap_s;
805 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
806 if (shmmap_s->shmid != -1) {
807 KASSERT(IPCID_TO_IX(shmmap_s->shmid) >= 0 &&do { } while (0)
808 IPCID_TO_IX(shmmap_s->shmid) < shmalloced,do { } while (0)
809 ("segnum %d shmalloced %d",do { } while (0)
810 IPCID_TO_IX(shmmap_s->shmid), shmalloced))do { } while (0);
811 shmsegs[IPCID_TO_IX(shmmap_s->shmid)((shmmap_s->shmid) & 0xffff)].u.shm_nattch++;
812 }
813 }
814 SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
;
815}
816
817static void
818shmexit_myhook(struct vmspace *vm)
819{
820 struct shmmap_state *base, *shm;
821 int i;
822
823 base = vm->vm_shm;
824 if (base != NULL((void *)0)) {
825 vm->vm_shm = NULL((void *)0);
826 SYSVSHM_LOCK()(void)__sx_xlock(((&sysvshmsx)), (__curthread()), 0, (((void
*)0)), (0))
;
827 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
828 if (shm->shmid != -1)
829 shm_delete_mapping(vm, shm);
830 }
831 SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
;
832 free(base, M_SHM);
833 }
834}
835
836static void
837shmrealloc(void)
838{
839 struct shmid_kernel *newsegs;
840 int i;
841
842 SYSVSHM_ASSERT_LOCKED()(void)0;
843
844 if (shmalloced >= shminfo.shmmni)
845 return;
846
847 newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK0x0002);
848 for (i = 0; i < shmalloced; i++)
849 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
850 for (; i < shminfo.shmmni; i++) {
851 newsegs[i].u.shm_perm.mode = SHMSEG_FREE0x0200;
852 newsegs[i].u.shm_perm.seq = 0;
853#ifdef MAC1
854 mac_sysvshm_init(&newsegs[i]);
855#endif
856 }
857 free(shmsegs, M_SHM);
858 shmsegs = newsegs;
859 shmalloced = shminfo.shmmni;
860}
861
862static struct syscall_helper_data shm_syscalls[] = {
863 SYSCALL_INIT_HELPER(shmat){ .new_sysent = { .sy_narg = (sizeof(struct shmat_args ) / sizeof
(register_t)), .sy_call = (sy_call_t *)& sys_shmat, .sy_auevent
= 96 }, .syscall_no = 228 }
,
864 SYSCALL_INIT_HELPER(shmctl){ .new_sysent = { .sy_narg = (sizeof(struct shmctl_args ) / sizeof
(register_t)), .sy_call = (sy_call_t *)& sys_shmctl, .sy_auevent
= 91 }, .syscall_no = 512 }
,
865 SYSCALL_INIT_HELPER(shmdt){ .new_sysent = { .sy_narg = (sizeof(struct shmdt_args ) / sizeof
(register_t)), .sy_call = (sy_call_t *)& sys_shmdt, .sy_auevent
= 97 }, .syscall_no = 230 }
,
866 SYSCALL_INIT_HELPER(shmget){ .new_sysent = { .sy_narg = (sizeof(struct shmget_args ) / sizeof
(register_t)), .sy_call = (sy_call_t *)& sys_shmget, .sy_auevent
= 95 }, .syscall_no = 231 }
,
867#if defined(COMPAT_FREEBSD41) || defined(COMPAT_FREEBSD51) || \
868 defined(COMPAT_FREEBSD61) || defined(COMPAT_FREEBSD71)
869 SYSCALL_INIT_HELPER_COMPAT(freebsd7_shmctl){ .new_sysent = { .sy_narg = (sizeof(struct freebsd7_shmctl_args
) / sizeof(register_t)), .sy_call = (sy_call_t *)& freebsd7_shmctl
, .sy_auevent = 91 }, .syscall_no = 229 }
,
870#endif
871#if defined(__i386__) && (defined(COMPAT_FREEBSD41) || defined(COMPAT_43))
872 SYSCALL_INIT_HELPER(shmsys){ .new_sysent = { .sy_narg = (sizeof(struct shmsys_args ) / sizeof
(register_t)), .sy_call = (sy_call_t *)& sys_shmsys, .sy_auevent
= 65 }, .syscall_no = 171 }
,
873#endif
874 SYSCALL_INIT_LAST{ .syscall_no = (-1) }
875};
876
877#ifdef COMPAT_FREEBSD321
878#include <compat/freebsd32/freebsd32.h>
879#include <compat/freebsd32/freebsd32_ipc.h>
880#include <compat/freebsd32/freebsd32_proto.h>
881#include <compat/freebsd32/freebsd32_signal.h>
882#include <compat/freebsd32/freebsd32_syscall.h>
883#include <compat/freebsd32/freebsd32_util.h>
884
885static struct syscall_helper_data shm32_syscalls[] = {
886 SYSCALL32_INIT_HELPER_COMPAT(shmat){ .new_sysent = { .sy_narg = (sizeof(struct shmat_args ) / sizeof
(register_t)), .sy_call = (sy_call_t *)& sys_shmat, }, .syscall_no
= 228 }
,
887 SYSCALL32_INIT_HELPER_COMPAT(shmdt){ .new_sysent = { .sy_narg = (sizeof(struct shmdt_args ) / sizeof
(register_t)), .sy_call = (sy_call_t *)& sys_shmdt, }, .syscall_no
= 230 }
,
888 SYSCALL32_INIT_HELPER_COMPAT(shmget){ .new_sysent = { .sy_narg = (sizeof(struct shmget_args ) / sizeof
(register_t)), .sy_call = (sy_call_t *)& sys_shmget, }, .
syscall_no = 231 }
,
889 SYSCALL32_INIT_HELPER(freebsd32_shmsys){ .new_sysent = { .sy_narg = (sizeof(struct freebsd32_shmsys_args
) / sizeof(register_t)), .sy_call = (sy_call_t *)& freebsd32_shmsys
, }, .syscall_no = 171 }
,
890 SYSCALL32_INIT_HELPER(freebsd32_shmctl){ .new_sysent = { .sy_narg = (sizeof(struct freebsd32_shmctl_args
) / sizeof(register_t)), .sy_call = (sy_call_t *)& freebsd32_shmctl
, }, .syscall_no = 512 }
,
891#if defined(COMPAT_FREEBSD41) || defined(COMPAT_FREEBSD51) || \
892 defined(COMPAT_FREEBSD61) || defined(COMPAT_FREEBSD71)
893 SYSCALL32_INIT_HELPER(freebsd7_freebsd32_shmctl){ .new_sysent = { .sy_narg = (sizeof(struct freebsd7_freebsd32_shmctl_args
) / sizeof(register_t)), .sy_call = (sy_call_t *)& freebsd7_freebsd32_shmctl
, }, .syscall_no = 229 }
,
894#endif
895 SYSCALL_INIT_LAST{ .syscall_no = (-1) }
896};
897#endif
898
899static int
900shminit(void)
901{
902 struct prison *pr;
903 void **rsv;
904 int i, error;
905 osd_method_t methods[PR_MAXMETHOD6] = {
906 [PR_METHOD_CHECK3] = shm_prison_check,
907 [PR_METHOD_SET2] = shm_prison_set,
908 [PR_METHOD_GET1] = shm_prison_get,
909 [PR_METHOD_REMOVE5] = shm_prison_remove,
910 };
911
912#ifndef BURN_BRIDGES
913 if (TUNABLE_ULONG_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall)getenv_ulong(("kern.ipc.shmmaxpgs"), (&shminfo.shmall)) != 0)
914 printf("kern.ipc.shmmaxpgs is now called kern.ipc.shmall!\n");
915#endif
916 if (shminfo.shmmax == SHMMAX(131072*(1<<12))) {
917 /* Initialize shmmax dealing with possible overflow. */
918 for (i = PAGE_SIZE(1<<12); i != 0; i--) {
919 shminfo.shmmax = shminfo.shmall * i;
920 if ((shminfo.shmmax / shminfo.shmall) == (u_long)i)
921 break;
922 }
923 }
924 shmalloced = shminfo.shmmni;
925 shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK0x0002);
926 for (i = 0; i < shmalloced; i++) {
927 shmsegs[i].u.shm_perm.mode = SHMSEG_FREE0x0200;
928 shmsegs[i].u.shm_perm.seq = 0;
929#ifdef MAC1
930 mac_sysvshm_init(&shmsegs[i]);
931#endif
932 }
933 shm_last_free = 0;
934 shm_nused = 0;
935 shm_committed = 0;
936 sx_init(&sysvshmsx, "sysvshmsx")sx_init_flags((&sysvshmsx), ("sysvshmsx"), 0);
937 shmexit_hook = &shmexit_myhook;
938 shmfork_hook = &shmfork_myhook;
939
940 /* Set current prisons according to their allow.sysvipc. */
941 shm_prison_slot = osd_jail_register(NULL, methods)osd_register(1, (((void *)0)), (methods));
942 rsv = osd_reserve(shm_prison_slot);
943 prison_lock(&prison0);
944 (void)osd_jail_set_reserved(&prison0, shm_prison_slot, rsv, &prison0)osd_set_reserved(1, &(&prison0)->pr_osd, (shm_prison_slot
), (rsv), (&prison0))
;
945 prison_unlock(&prison0);
946 rsv = NULL((void *)0);
947 sx_slock(&allprison_lock)(void)__sx_slock(((&allprison_lock)), 0, (((void *)0)), (
0))
;
948 TAILQ_FOREACH(pr, &allprison, pr_list)for ((pr) = (((&allprison))->tqh_first); (pr); (pr) = (
((pr))->pr_list.tqe_next))
{
949 if (rsv == NULL((void *)0))
950 rsv = osd_reserve(shm_prison_slot);
951 prison_lock(pr);
952 if ((pr->pr_allow & PR_ALLOW_SYSVIPC0x0002) && pr->pr_ref > 0) {
953 (void)osd_jail_set_reserved(pr, shm_prison_slot, rsv,osd_set_reserved(1, &(pr)->pr_osd, (shm_prison_slot), (
rsv), (&prison0))
954 &prison0)osd_set_reserved(1, &(pr)->pr_osd, (shm_prison_slot), (
rsv), (&prison0))
;
955 rsv = NULL((void *)0);
956 }
957 prison_unlock(pr);
958 }
959 if (rsv != NULL((void *)0))
960 osd_free_reserved(rsv);
961 sx_sunlock(&allprison_lock)__sx_sunlock(((&allprison_lock)), (((void *)0)), (0));
962
963 error = syscall_helper_register(shm_syscalls, SY_THR_STATIC_KLD0x1);
964 if (error != 0)
965 return (error);
966#ifdef COMPAT_FREEBSD321
967 error = syscall32_helper_register(shm32_syscalls, SY_THR_STATIC_KLD0x1);
968 if (error != 0)
969 return (error);
970#endif
971 return (0);
972}
973
974static int
975shmunload(void)
976{
977 int i;
978
979 if (shm_nused > 0)
980 return (EBUSY16);
981
982#ifdef COMPAT_FREEBSD321
983 syscall32_helper_unregister(shm32_syscalls);
984#endif
985 syscall_helper_unregister(shm_syscalls);
986 if (shm_prison_slot != 0)
987 osd_jail_deregister(shm_prison_slot)osd_deregister(1, (shm_prison_slot));
988
989 for (i = 0; i < shmalloced; i++) {
990#ifdef MAC1
991 mac_sysvshm_destroy(&shmsegs[i]);
992#endif
993 /*
994 * Objects might be still mapped into the processes
995 * address spaces. Actual free would happen on the
996 * last mapping destruction.
997 */
998 if (shmsegs[i].u.shm_perm.mode != SHMSEG_FREE0x0200)
999 vm_object_deallocate(shmsegs[i].object);
1000 }
1001 free(shmsegs, M_SHM);
1002 shmexit_hook = NULL((void *)0);
1003 shmfork_hook = NULL((void *)0);
1004 sx_destroy(&sysvshmsx);
1005 return (0);
1006}
1007
1008static int
1009sysctl_shmsegs(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req
*req
)
1010{
1011 struct shmid_kernel tshmseg;
1012 struct prison *pr, *rpr;
1013 int error, i;
1014
1015 SYSVSHM_LOCK()(void)__sx_xlock(((&sysvshmsx)), (__curthread()), 0, (((void
*)0)), (0))
;
1016 pr = req->td->td_ucred->cr_prison;
1017 rpr = shm_find_prison(req->td->td_ucred);
1018 error = 0;
1019 for (i = 0; i < shmalloced; i++) {
1020 if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED0x0800) == 0 ||
1021 rpr == NULL((void *)0) || shm_prison_cansee(rpr, &shmsegs[i]) != 0) {
1022 bzero(&tshmseg, sizeof(tshmseg));
1023 tshmseg.u.shm_perm.mode = SHMSEG_FREE0x0200;
1024 } else {
1025 tshmseg = shmsegs[i];
1026 if (tshmseg.cred->cr_prison != pr)
1027 tshmseg.u.shm_perm.key = IPC_PRIVATE(key_t)0;
1028 }
1029 error = SYSCTL_OUT(req, &tshmseg, sizeof(tshmseg))(req->oldfunc)(req, &tshmseg, sizeof(tshmseg));
1030 if (error != 0)
1031 break;
1032 }
1033 SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
;
1034 return (error);
1035}
1036
1037static int
1038shm_prison_check(void *obj, void *data)
1039{
1040 struct prison *pr = obj;
1041 struct prison *prpr;
1042 struct vfsoptlist *opts = data;
1043 int error, jsys;
1044
1045 /*
1046 * sysvshm is a jailsys integer.
1047 * It must be "disable" if the parent jail is disabled.
1048 */
1049 error = vfs_copyopt(opts, "sysvshm", &jsys, sizeof(jsys));
1050 if (error != ENOENT2) {
1051 if (error != 0)
1052 return (error);
1053 switch (jsys) {
1054 case JAIL_SYS_DISABLE0:
1055 break;
1056 case JAIL_SYS_NEW1:
1057 case JAIL_SYS_INHERIT2:
1058 prison_lock(pr->pr_parent);
1059 prpr = osd_jail_get(pr->pr_parent, shm_prison_slot)osd_get(1, &(pr->pr_parent)->pr_osd, (shm_prison_slot
))
;
1060 prison_unlock(pr->pr_parent);
1061 if (prpr == NULL((void *)0))
1062 return (EPERM1);
1063 break;
1064 default:
1065 return (EINVAL22);
1066 }
1067 }
1068
1069 return (0);
1070}
1071
1072static int
1073shm_prison_set(void *obj, void *data)
1074{
1075 struct prison *pr = obj;
1076 struct prison *tpr, *orpr, *nrpr, *trpr;
1077 struct vfsoptlist *opts = data;
1078 void *rsv;
1079 int jsys, descend;
1080
1081 /*
1082 * sysvshm controls which jail is the root of the associated segments
1083 * (this jail or same as the parent), or if the feature is available
1084 * at all.
1085 */
1086 if (vfs_copyopt(opts, "sysvshm", &jsys, sizeof(jsys)) == ENOENT2)
1087 jsys = vfs_flagopt(opts, "allow.sysvipc", NULL((void *)0), 0)
1088 ? JAIL_SYS_INHERIT2
1089 : vfs_flagopt(opts, "allow.nosysvipc", NULL((void *)0), 0)
1090 ? JAIL_SYS_DISABLE0
1091 : -1;
1092 if (jsys == JAIL_SYS_DISABLE0) {
1093 prison_lock(pr);
1094 orpr = osd_jail_get(pr, shm_prison_slot)osd_get(1, &(pr)->pr_osd, (shm_prison_slot));
1095 if (orpr != NULL((void *)0))
1096 osd_jail_del(pr, shm_prison_slot)osd_del(1, &(pr)->pr_osd, (shm_prison_slot));
1097 prison_unlock(pr);
1098 if (orpr != NULL((void *)0)) {
1099 if (orpr == pr)
1100 shm_prison_cleanup(pr);
1101 /* Disable all child jails as well. */
1102 FOREACH_PRISON_DESCENDANT(pr, tpr, descend)for ((tpr) = (pr), (descend) = 1; ((tpr) = (((descend) &&
!((&(tpr)->pr_children)->lh_first == ((void *)0)))
? ((&(tpr)->pr_children)->lh_first) : ((tpr) == (pr
) ? ((void *)0) : (((descend) = ((tpr)->pr_sibling.le_next
) != ((void *)0)) ? ((tpr)->pr_sibling.le_next) : (tpr)->
pr_parent))));) if (!(descend)) ; else
{
1103 prison_lock(tpr);
1104 trpr = osd_jail_get(tpr, shm_prison_slot)osd_get(1, &(tpr)->pr_osd, (shm_prison_slot));
1105 if (trpr != NULL((void *)0)) {
1106 osd_jail_del(tpr, shm_prison_slot)osd_del(1, &(tpr)->pr_osd, (shm_prison_slot));
1107 prison_unlock(tpr);
1108 if (trpr == tpr)
1109 shm_prison_cleanup(tpr);
1110 } else {
1111 prison_unlock(tpr);
1112 descend = 0;
1113 }
1114 }
1115 }
1116 } else if (jsys != -1) {
1117 if (jsys == JAIL_SYS_NEW1)
1118 nrpr = pr;
1119 else {
1120 prison_lock(pr->pr_parent);
1121 nrpr = osd_jail_get(pr->pr_parent, shm_prison_slot)osd_get(1, &(pr->pr_parent)->pr_osd, (shm_prison_slot
))
;
1122 prison_unlock(pr->pr_parent);
1123 }
1124 rsv = osd_reserve(shm_prison_slot);
1125 prison_lock(pr);
1126 orpr = osd_jail_get(pr, shm_prison_slot)osd_get(1, &(pr)->pr_osd, (shm_prison_slot));
1127 if (orpr != nrpr)
1128 (void)osd_jail_set_reserved(pr, shm_prison_slot, rsv,osd_set_reserved(1, &(pr)->pr_osd, (shm_prison_slot), (
rsv), (nrpr))
1129 nrpr)osd_set_reserved(1, &(pr)->pr_osd, (shm_prison_slot), (
rsv), (nrpr))
;
1130 else
1131 osd_free_reserved(rsv);
1132 prison_unlock(pr);
1133 if (orpr != nrpr) {
1134 if (orpr == pr)
1135 shm_prison_cleanup(pr);
1136 if (orpr != NULL((void *)0)) {
1137 /* Change child jails matching the old root, */
1138 FOREACH_PRISON_DESCENDANT(pr, tpr, descend)for ((tpr) = (pr), (descend) = 1; ((tpr) = (((descend) &&
!((&(tpr)->pr_children)->lh_first == ((void *)0)))
? ((&(tpr)->pr_children)->lh_first) : ((tpr) == (pr
) ? ((void *)0) : (((descend) = ((tpr)->pr_sibling.le_next
) != ((void *)0)) ? ((tpr)->pr_sibling.le_next) : (tpr)->
pr_parent))));) if (!(descend)) ; else
{
1139 prison_lock(tpr);
1140 trpr = osd_jail_get(tpr,osd_get(1, &(tpr)->pr_osd, (shm_prison_slot))
1141 shm_prison_slot)osd_get(1, &(tpr)->pr_osd, (shm_prison_slot));
1142 if (trpr == orpr) {
1143 (void)osd_jail_set(tpr,osd_set(1, &(tpr)->pr_osd, (shm_prison_slot), (nrpr))
1144 shm_prison_slot, nrpr)osd_set(1, &(tpr)->pr_osd, (shm_prison_slot), (nrpr));
1145 prison_unlock(tpr);
1146 if (trpr == tpr)
1147 shm_prison_cleanup(tpr);
1148 } else {
1149 prison_unlock(tpr);
1150 descend = 0;
1151 }
1152 }
1153 }
1154 }
1155 }
1156
1157 return (0);
1158}
1159
1160static int
1161shm_prison_get(void *obj, void *data)
1162{
1163 struct prison *pr = obj;
1164 struct prison *rpr;
1165 struct vfsoptlist *opts = data;
1166 int error, jsys;
1167
1168 /* Set sysvshm based on the jail's root prison. */
1169 prison_lock(pr);
1170 rpr = osd_jail_get(pr, shm_prison_slot)osd_get(1, &(pr)->pr_osd, (shm_prison_slot));
1171 prison_unlock(pr);
1172 jsys = rpr == NULL((void *)0) ? JAIL_SYS_DISABLE0
1173 : rpr == pr ? JAIL_SYS_NEW1 : JAIL_SYS_INHERIT2;
1174 error = vfs_setopt(opts, "sysvshm", &jsys, sizeof(jsys));
1175 if (error == ENOENT2)
1176 error = 0;
1177 return (error);
1178}
1179
1180static int
1181shm_prison_remove(void *obj, void *data __unused__attribute__((__unused__)))
1182{
1183 struct prison *pr = obj;
1184 struct prison *rpr;
1185
1186 SYSVSHM_LOCK()(void)__sx_xlock(((&sysvshmsx)), (__curthread()), 0, (((void
*)0)), (0))
;
1187 prison_lock(pr);
1188 rpr = osd_jail_get(pr, shm_prison_slot)osd_get(1, &(pr)->pr_osd, (shm_prison_slot));
1189 prison_unlock(pr);
1190 if (rpr == pr)
1191 shm_prison_cleanup(pr);
1192 SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
;
1193 return (0);
1194}
1195
1196static void
1197shm_prison_cleanup(struct prison *pr)
1198{
1199 struct shmid_kernel *shmseg;
1200 int i;
1201
1202 /* Remove any segments that belong to this jail. */
1203 for (i = 0; i < shmalloced; i++) {
1204 shmseg = &shmsegs[i];
1205 if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED0x0800) &&
1206 shmseg->cred != NULL((void *)0) && shmseg->cred->cr_prison == pr) {
1207 shm_remove(shmseg, i);
1208 }
1209 }
1210}
1211
1212SYSCTL_JAIL_PARAM_SYS_NODE(sysvshm, CTLFLAG_RW, "SYSV shared memory")struct sysctl_oid sysctl___security_jail_param_sysvshm = { .oid_parent
= ((&(&sysctl___security_jail_param)->oid_children
)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind
= (1|(0)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name
= ("sysvshm"), .oid_handler = (0), .oid_fmt = ("N"), .oid_descr
= "SYSV shared memory" }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___security_jail_param_sysvshm
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___security_jail_param_sysvshm); _Static_assert
(((0) & 0xf) == 0 || ((0) & 0) == 1, "compile-time assertion failed"
); static struct sysctl_oid sysctl___security_jail_param_sysvshm_
= { .oid_parent = ((&(&sysctl___security_jail_param_sysvshm
)->oid_children)), .oid_children = { ((void *)0) }, .oid_number
= ((-1)), .oid_kind = (((2 | ((0x80000000|0x40000000))) | 0x00040000
)), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), .oid_name = (
""), .oid_handler = (sysctl_jail_param), .oid_fmt = ("E,jailsys"
), .oid_descr = "SYSV shared memory" }; __asm__(".globl " "__start_set_sysctl_set"
); __asm__(".globl " "__stop_set_sysctl_set"); static void const
* const __set_sysctl_set_sym_sysctl___security_jail_param_sysvshm_
__attribute__((__section__("set_" "sysctl_set"))) __attribute__
((__used__)) = &(sysctl___security_jail_param_sysvshm_); _Static_assert
((((2 | ((0x80000000|0x40000000))) | 0x00040000) & 0xf) !=
0, "compile-time assertion failed")
;
1213
1214#if defined(__i386__) && (defined(COMPAT_FREEBSD41) || defined(COMPAT_43))
1215struct oshmid_ds {
1216 struct ipc_perm_old shm_perm; /* operation perms */
1217 int shm_segsz; /* size of segment (bytes) */
1218 u_short shm_cpid; /* pid, creator */
1219 u_short shm_lpid; /* pid, last operation */
1220 short shm_nattch; /* no. of current attaches */
1221 time_t shm_atime; /* last attach time */
1222 time_t shm_dtime; /* last detach time */
1223 time_t shm_ctime; /* last change time */
1224 void *shm_handle; /* internal handle for shm segment */
1225};
1226
1227struct oshmctl_args {
1228 int shmid;
1229 int cmd;
1230 struct oshmid_ds *ubuf;
1231};
1232
1233static int
1234oshmctl(struct thread *td, struct oshmctl_args *uap)
1235{
1236#ifdef COMPAT_43
1237 int error = 0;
1238 struct prison *rpr;
1239 struct shmid_kernel *shmseg;
1240 struct oshmid_ds outbuf;
1241
1242 rpr = shm_find_prison(td->td_ucred);
1243 if (rpr == NULL((void *)0))
1244 return (ENOSYS78);
1245 if (uap->cmd != IPC_STAT2) {
1246 return (freebsd7_shmctl(td,
1247 (struct freebsd7_shmctl_args *)uap));
1248 }
1249 SYSVSHM_LOCK()(void)__sx_xlock(((&sysvshmsx)), (__curthread()), 0, (((void
*)0)), (0))
;
1250 shmseg = shm_find_segment(rpr, uap->shmid, true1);
1251 if (shmseg == NULL((void *)0)) {
1252 SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
;
1253 return (EINVAL22);
1254 }
1255 error = ipcperm(td, &shmseg->u.shm_perm, IPC_R000400);
1256 if (error != 0) {
1257 SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
;
1258 return (error);
1259 }
1260#ifdef MAC1
1261 error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, uap->cmd);
1262 if (error != 0) {
1263 SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
;
1264 return (error);
1265 }
1266#endif
1267 ipcperm_new2old(&shmseg->u.shm_perm, &outbuf.shm_perm);
1268 outbuf.shm_segsz = shmseg->u.shm_segsz;
1269 outbuf.shm_cpid = shmseg->u.shm_cpid;
1270 outbuf.shm_lpid = shmseg->u.shm_lpid;
1271 outbuf.shm_nattch = shmseg->u.shm_nattch;
1272 outbuf.shm_atime = shmseg->u.shm_atime;
1273 outbuf.shm_dtime = shmseg->u.shm_dtime;
1274 outbuf.shm_ctime = shmseg->u.shm_ctime;
1275 outbuf.shm_handle = shmseg->object;
1276 SYSVSHM_UNLOCK()__sx_xunlock(((&sysvshmsx)), (__curthread()), (((void *)0
)), (0))
;
1277 return (copyout(&outbuf, uap->ubuf, sizeof(outbuf)));
1278#else
1279 return (EINVAL22);
1280#endif
1281}
1282
1283/* XXX casting to (sy_call_t *) is bogus, as usual. */
1284static sy_call_t *shmcalls[] = {
1285 (sy_call_t *)sys_shmat, (sy_call_t *)oshmctl,
1286 (sy_call_t *)sys_shmdt, (sy_call_t *)sys_shmget,
1287 (sy_call_t *)freebsd7_shmctl
1288};
1289
1290#ifndef _SYS_SYSPROTO_H_
1291/* XXX actually varargs. */
1292struct shmsys_args {
1293 int which;
1294 int a2;
1295 int a3;
1296 int a4;
1297};
1298#endif
1299int
1300sys_shmsys(struct thread *td, struct shmsys_args *uap)
1301{
1302
1303 if (uap->which < 0 || uap->which >= nitems(shmcalls)(sizeof((shmcalls)) / sizeof((shmcalls)[0])))
1304 return (EINVAL22);
1305 return ((*shmcalls[uap->which])(td, &uap->a2));
1306}
1307
1308#endif /* i386 && (COMPAT_FREEBSD4 || COMPAT_43) */
1309
1310#ifdef COMPAT_FREEBSD321
1311
1312int
1313freebsd32_shmsys(struct thread *td, struct freebsd32_shmsys_args *uap)
1314{
1315
1316#if defined(COMPAT_FREEBSD41) || defined(COMPAT_FREEBSD51) || \
1317 defined(COMPAT_FREEBSD61) || defined(COMPAT_FREEBSD71)
1318 switch (uap->which) {
1319 case 0: { /* shmat */
1320 struct shmat_args ap;
1321
1322 ap.shmid = uap->a2;
1323 ap.shmaddr = PTRIN(uap->a3)(void *)(uintptr_t) (uap->a3);
1324 ap.shmflg = uap->a4;
1325 return (sysent[SYS_shmat228].sy_call(td, &ap));
1326 }
1327 case 2: { /* shmdt */
1328 struct shmdt_args ap;
1329
1330 ap.shmaddr = PTRIN(uap->a2)(void *)(uintptr_t) (uap->a2);
1331 return (sysent[SYS_shmdt230].sy_call(td, &ap));
1332 }
1333 case 3: { /* shmget */
1334 struct shmget_args ap;
1335
1336 ap.key = uap->a2;
1337 ap.size = uap->a3;
1338 ap.shmflg = uap->a4;
1339 return (sysent[SYS_shmget231].sy_call(td, &ap));
1340 }
1341 case 4: { /* shmctl */
1342 struct freebsd7_freebsd32_shmctl_args ap;
1343
1344 ap.shmid = uap->a2;
1345 ap.cmd = uap->a3;
1346 ap.buf = PTRIN(uap->a4)(void *)(uintptr_t) (uap->a4);
1347 return (freebsd7_freebsd32_shmctl(td, &ap));
1348 }
1349 case 1: /* oshmctl */
1350 default:
1351 return (EINVAL22);
1352 }
1353#else
1354 return (nosys(td, NULL((void *)0)));
1355#endif
1356}
1357
1358#if defined(COMPAT_FREEBSD41) || defined(COMPAT_FREEBSD51) || \
1359 defined(COMPAT_FREEBSD61) || defined(COMPAT_FREEBSD71)
1360int
1361freebsd7_freebsd32_shmctl(struct thread *td,
1362 struct freebsd7_freebsd32_shmctl_args *uap)
1363{
1364 int error;
1365 union {
1366 struct shmid_ds shmid_ds;
1367 struct shm_info shm_info;
1368 struct shminfo shminfo;
1369 } u;
1370 union {
1371 struct shmid_ds32_old shmid_ds32;
1372 struct shm_info32 shm_info32;
1373 struct shminfo32 shminfo32;
1374 } u32;
1375 size_t sz;
1376
1377 if (uap->cmd == IPC_SET1) {
1378 if ((error = copyin(uap->buf, &u32.shmid_ds32,
1379 sizeof(u32.shmid_ds32))))
1380 goto done;
1381 freebsd32_ipcperm_old_in(&u32.shmid_ds32.shm_perm,
1382 &u.shmid_ds.shm_perm);
1383 CP(u32.shmid_ds32, u.shmid_ds, shm_segsz)do { (u.shmid_ds).shm_segsz = (u32.shmid_ds32).shm_segsz; } while
(0)
;
1384 CP(u32.shmid_ds32, u.shmid_ds, shm_lpid)do { (u.shmid_ds).shm_lpid = (u32.shmid_ds32).shm_lpid; } while
(0)
;
1385 CP(u32.shmid_ds32, u.shmid_ds, shm_cpid)do { (u.shmid_ds).shm_cpid = (u32.shmid_ds32).shm_cpid; } while
(0)
;
1386 CP(u32.shmid_ds32, u.shmid_ds, shm_nattch)do { (u.shmid_ds).shm_nattch = (u32.shmid_ds32).shm_nattch; }
while (0)
;
1387 CP(u32.shmid_ds32, u.shmid_ds, shm_atime)do { (u.shmid_ds).shm_atime = (u32.shmid_ds32).shm_atime; } while
(0)
;
1388 CP(u32.shmid_ds32, u.shmid_ds, shm_dtime)do { (u.shmid_ds).shm_dtime = (u32.shmid_ds32).shm_dtime; } while
(0)
;
1389 CP(u32.shmid_ds32, u.shmid_ds, shm_ctime)do { (u.shmid_ds).shm_ctime = (u32.shmid_ds32).shm_ctime; } while
(0)
;
1390 }
1391
1392 error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&u, &sz);
1393 if (error)
1394 goto done;
1395
1396 /* Cases in which we need to copyout */
1397 switch (uap->cmd) {
1398 case IPC_INFO3:
1399 CP(u.shminfo, u32.shminfo32, shmmax)do { (u32.shminfo32).shmmax = (u.shminfo).shmmax; } while (0);
1400 CP(u.shminfo, u32.shminfo32, shmmin)do { (u32.shminfo32).shmmin = (u.shminfo).shmmin; } while (0);
1401 CP(u.shminfo, u32.shminfo32, shmmni)do { (u32.shminfo32).shmmni = (u.shminfo).shmmni; } while (0);
1402 CP(u.shminfo, u32.shminfo32, shmseg)do { (u32.shminfo32).shmseg = (u.shminfo).shmseg; } while (0);
1403 CP(u.shminfo, u32.shminfo32, shmall)do { (u32.shminfo32).shmall = (u.shminfo).shmall; } while (0);
1404 error = copyout(&u32.shminfo32, uap->buf,
1405 sizeof(u32.shminfo32));
1406 break;
1407 case SHM_INFO14:
1408 CP(u.shm_info, u32.shm_info32, used_ids)do { (u32.shm_info32).used_ids = (u.shm_info).used_ids; } while
(0)
;
1409 CP(u.shm_info, u32.shm_info32, shm_rss)do { (u32.shm_info32).shm_rss = (u.shm_info).shm_rss; } while
(0)
;
1410 CP(u.shm_info, u32.shm_info32, shm_tot)do { (u32.shm_info32).shm_tot = (u.shm_info).shm_tot; } while
(0)
;
1411 CP(u.shm_info, u32.shm_info32, shm_swp)do { (u32.shm_info32).shm_swp = (u.shm_info).shm_swp; } while
(0)
;
1412 CP(u.shm_info, u32.shm_info32, swap_attempts)do { (u32.shm_info32).swap_attempts = (u.shm_info).swap_attempts
; } while (0)
;
1413 CP(u.shm_info, u32.shm_info32, swap_successes)do { (u32.shm_info32).swap_successes = (u.shm_info).swap_successes
; } while (0)
;
1414 error = copyout(&u32.shm_info32, uap->buf,
1415 sizeof(u32.shm_info32));
1416 break;
1417 case SHM_STAT13:
1418 case IPC_STAT2:
1419 freebsd32_ipcperm_old_out(&u.shmid_ds.shm_perm,
1420 &u32.shmid_ds32.shm_perm);
1421 if (u.shmid_ds.shm_segsz > INT32_MAX0x7fffffff)
1422 u32.shmid_ds32.shm_segsz = INT32_MAX0x7fffffff;
1423 else
1424 CP(u.shmid_ds, u32.shmid_ds32, shm_segsz)do { (u32.shmid_ds32).shm_segsz = (u.shmid_ds).shm_segsz; } while
(0)
;
1425 CP(u.shmid_ds, u32.shmid_ds32, shm_lpid)do { (u32.shmid_ds32).shm_lpid = (u.shmid_ds).shm_lpid; } while
(0)
;
1426 CP(u.shmid_ds, u32.shmid_ds32, shm_cpid)do { (u32.shmid_ds32).shm_cpid = (u.shmid_ds).shm_cpid; } while
(0)
;
1427 CP(u.shmid_ds, u32.shmid_ds32, shm_nattch)do { (u32.shmid_ds32).shm_nattch = (u.shmid_ds).shm_nattch; }
while (0)
;
1428 CP(u.shmid_ds, u32.shmid_ds32, shm_atime)do { (u32.shmid_ds32).shm_atime = (u.shmid_ds).shm_atime; } while
(0)
;
1429 CP(u.shmid_ds, u32.shmid_ds32, shm_dtime)do { (u32.shmid_ds32).shm_dtime = (u.shmid_ds).shm_dtime; } while
(0)
;
1430 CP(u.shmid_ds, u32.shmid_ds32, shm_ctime)do { (u32.shmid_ds32).shm_ctime = (u.shmid_ds).shm_ctime; } while
(0)
;
1431 u32.shmid_ds32.shm_internal = 0;
1432 error = copyout(&u32.shmid_ds32, uap->buf,
1433 sizeof(u32.shmid_ds32));
1434 break;
1435 }
1436
1437done:
1438 if (error) {
1439 /* Invalidate the return value */
1440 td->td_retvaltd_uretoff.tdu_retval[0] = -1;
1441 }
1442 return (error);
1443}
1444#endif
1445
1446int
1447freebsd32_shmctl(struct thread *td, struct freebsd32_shmctl_args *uap)
1448{
1449 int error;
1450 union {
1451 struct shmid_ds shmid_ds;
1452 struct shm_info shm_info;
1453 struct shminfo shminfo;
1454 } u;
1455 union {
1456 struct shmid_ds32 shmid_ds32;
1457 struct shm_info32 shm_info32;
1458 struct shminfo32 shminfo32;
1459 } u32;
1460 size_t sz;
1461
1462 if (uap->cmd == IPC_SET1) {
1463 if ((error = copyin(uap->buf, &u32.shmid_ds32,
1464 sizeof(u32.shmid_ds32))))
1465 goto done;
1466 freebsd32_ipcperm_in(&u32.shmid_ds32.shm_perm,
1467 &u.shmid_ds.shm_perm);
1468 CP(u32.shmid_ds32, u.shmid_ds, shm_segsz)do { (u.shmid_ds).shm_segsz = (u32.shmid_ds32).shm_segsz; } while
(0)
;
1469 CP(u32.shmid_ds32, u.shmid_ds, shm_lpid)do { (u.shmid_ds).shm_lpid = (u32.shmid_ds32).shm_lpid; } while
(0)
;
1470 CP(u32.shmid_ds32, u.shmid_ds, shm_cpid)do { (u.shmid_ds).shm_cpid = (u32.shmid_ds32).shm_cpid; } while
(0)
;
1471 CP(u32.shmid_ds32, u.shmid_ds, shm_nattch)do { (u.shmid_ds).shm_nattch = (u32.shmid_ds32).shm_nattch; }
while (0)
;
1472 CP(u32.shmid_ds32, u.shmid_ds, shm_atime)do { (u.shmid_ds).shm_atime = (u32.shmid_ds32).shm_atime; } while
(0)
;
1473 CP(u32.shmid_ds32, u.shmid_ds, shm_dtime)do { (u.shmid_ds).shm_dtime = (u32.shmid_ds32).shm_dtime; } while
(0)
;
1474 CP(u32.shmid_ds32, u.shmid_ds, shm_ctime)do { (u.shmid_ds).shm_ctime = (u32.shmid_ds32).shm_ctime; } while
(0)
;
1475 }
1476
1477 error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&u, &sz);
1478 if (error)
1479 goto done;
1480
1481 /* Cases in which we need to copyout */
1482 switch (uap->cmd) {
1483 case IPC_INFO3:
1484 CP(u.shminfo, u32.shminfo32, shmmax)do { (u32.shminfo32).shmmax = (u.shminfo).shmmax; } while (0);
1485 CP(u.shminfo, u32.shminfo32, shmmin)do { (u32.shminfo32).shmmin = (u.shminfo).shmmin; } while (0);
1486 CP(u.shminfo, u32.shminfo32, shmmni)do { (u32.shminfo32).shmmni = (u.shminfo).shmmni; } while (0);
1487 CP(u.shminfo, u32.shminfo32, shmseg)do { (u32.shminfo32).shmseg = (u.shminfo).shmseg; } while (0);
1488 CP(u.shminfo, u32.shminfo32, shmall)do { (u32.shminfo32).shmall = (u.shminfo).shmall; } while (0);
1489 error = copyout(&u32.shminfo32, uap->buf,
1490 sizeof(u32.shminfo32));
1491 break;
1492 case SHM_INFO14:
1493 CP(u.shm_info, u32.shm_info32, used_ids)do { (u32.shm_info32).used_ids = (u.shm_info).used_ids; } while
(0)
;
1494 CP(u.shm_info, u32.shm_info32, shm_rss)do { (u32.shm_info32).shm_rss = (u.shm_info).shm_rss; } while
(0)
;
1495 CP(u.shm_info, u32.shm_info32, shm_tot)do { (u32.shm_info32).shm_tot = (u.shm_info).shm_tot; } while
(0)
;
1496 CP(u.shm_info, u32.shm_info32, shm_swp)do { (u32.shm_info32).shm_swp = (u.shm_info).shm_swp; } while
(0)
;
1497 CP(u.shm_info, u32.shm_info32, swap_attempts)do { (u32.shm_info32).swap_attempts = (u.shm_info).swap_attempts
; } while (0)
;
1498 CP(u.shm_info, u32.shm_info32, swap_successes)do { (u32.shm_info32).swap_successes = (u.shm_info).swap_successes
; } while (0)
;
1499 error = copyout(&u32.shm_info32, uap->buf,
1500 sizeof(u32.shm_info32));
1501 break;
1502 case SHM_STAT13:
1503 case IPC_STAT2:
1504 freebsd32_ipcperm_out(&u.shmid_ds.shm_perm,
1505 &u32.shmid_ds32.shm_perm);
1506 if (u.shmid_ds.shm_segsz > INT32_MAX0x7fffffff)
1507 u32.shmid_ds32.shm_segsz = INT32_MAX0x7fffffff;
1508 else
1509 CP(u.shmid_ds, u32.shmid_ds32, shm_segsz)do { (u32.shmid_ds32).shm_segsz = (u.shmid_ds).shm_segsz; } while
(0)
;
1510 CP(u.shmid_ds, u32.shmid_ds32, shm_lpid)do { (u32.shmid_ds32).shm_lpid = (u.shmid_ds).shm_lpid; } while
(0)
;
1511 CP(u.shmid_ds, u32.shmid_ds32, shm_cpid)do { (u32.shmid_ds32).shm_cpid = (u.shmid_ds).shm_cpid; } while
(0)
;
1512 CP(u.shmid_ds, u32.shmid_ds32, shm_nattch)do { (u32.shmid_ds32).shm_nattch = (u.shmid_ds).shm_nattch; }
while (0)
;
1513 CP(u.shmid_ds, u32.shmid_ds32, shm_atime)do { (u32.shmid_ds32).shm_atime = (u.shmid_ds).shm_atime; } while
(0)
;
1514 CP(u.shmid_ds, u32.shmid_ds32, shm_dtime)do { (u32.shmid_ds32).shm_dtime = (u.shmid_ds).shm_dtime; } while
(0)
;
1515 CP(u.shmid_ds, u32.shmid_ds32, shm_ctime)do { (u32.shmid_ds32).shm_ctime = (u.shmid_ds).shm_ctime; } while
(0)
;
1516 error = copyout(&u32.shmid_ds32, uap->buf,
1517 sizeof(u32.shmid_ds32));
1518 break;
1519 }
1520
1521done:
1522 if (error) {
1523 /* Invalidate the return value */
1524 td->td_retvaltd_uretoff.tdu_retval[0] = -1;
1525 }
1526 return (error);
1527}
1528#endif
1529
1530#if defined(COMPAT_FREEBSD41) || defined(COMPAT_FREEBSD51) || \
1531 defined(COMPAT_FREEBSD61) || defined(COMPAT_FREEBSD71)
1532
1533#ifndef CP
1534#define CP(src, dst, fld)do { (dst).fld = (src).fld; } while (0) do { (dst).fld = (src).fld; } while (0)
1535#endif
1536
1537#ifndef _SYS_SYSPROTO_H_
1538struct freebsd7_shmctl_args {
1539 int shmid;
1540 int cmd;
1541 struct shmid_ds_old *buf;
1542};
1543#endif
1544int
1545freebsd7_shmctl(struct thread *td, struct freebsd7_shmctl_args *uap)
1546{
1547 int error;
1548 struct shmid_ds_old old;
1549 struct shmid_ds buf;
1550 size_t bufsz;
1551
1552 /*
1553 * The only reason IPC_INFO, SHM_INFO, SHM_STAT exists is to support
1554 * Linux binaries. If we see the call come through the FreeBSD ABI,
1555 * return an error back to the user since we do not to support this.
1556 */
1557 if (uap->cmd == IPC_INFO3 || uap->cmd == SHM_INFO14 ||
1
Taking false branch
1558 uap->cmd == SHM_STAT13)
1559 return (EINVAL22);
1560
1561 /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
1562 if (uap->cmd == IPC_SET1) {
2
Taking false branch
1563 if ((error = copyin(uap->buf, &old, sizeof(old))))
1564 goto done;
1565 ipcperm_old2new(&old.shm_perm, &buf.shm_perm);
1566 CP(old, buf, shm_segsz)do { (buf).shm_segsz = (old).shm_segsz; } while (0);
1567 CP(old, buf, shm_lpid)do { (buf).shm_lpid = (old).shm_lpid; } while (0);
1568 CP(old, buf, shm_cpid)do { (buf).shm_cpid = (old).shm_cpid; } while (0);
1569 CP(old, buf, shm_nattch)do { (buf).shm_nattch = (old).shm_nattch; } while (0);
1570 CP(old, buf, shm_atime)do { (buf).shm_atime = (old).shm_atime; } while (0);
1571 CP(old, buf, shm_dtime)do { (buf).shm_dtime = (old).shm_dtime; } while (0);
1572 CP(old, buf, shm_ctime)do { (buf).shm_ctime = (old).shm_ctime; } while (0);
1573 }
1574
1575 error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
1576 if (error)
3
Taking false branch
1577 goto done;
1578
1579 /* Cases in which we need to copyout */
1580 switch (uap->cmd) {
4
Control jumps to 'case 2:' at line 1581
1581 case IPC_STAT2:
1582 ipcperm_new2old(&buf.shm_perm, &old.shm_perm);
1583 if (buf.shm_segsz > INT_MAX0x7fffffff)
5
Taking true branch
1584 old.shm_segsz = INT_MAX0x7fffffff;
1585 else
1586 CP(buf, old, shm_segsz)do { (old).shm_segsz = (buf).shm_segsz; } while (0);
1587 CP(buf, old, shm_lpid)do { (old).shm_lpid = (buf).shm_lpid; } while (0);
1588 CP(buf, old, shm_cpid)do { (old).shm_cpid = (buf).shm_cpid; } while (0);
1589 if (buf.shm_nattch > SHRT_MAX0x7fff)
6
Taking true branch
1590 old.shm_nattch = SHRT_MAX0x7fff;
1591 else
1592 CP(buf, old, shm_nattch)do { (old).shm_nattch = (buf).shm_nattch; } while (0);
1593 CP(buf, old, shm_atime)do { (old).shm_atime = (buf).shm_atime; } while (0);
1594 CP(buf, old, shm_dtime)do { (old).shm_dtime = (buf).shm_dtime; } while (0);
1595 CP(buf, old, shm_ctime)do { (old).shm_ctime = (buf).shm_ctime; } while (0);
1596 old.shm_internal = NULL((void *)0);
1597 error = copyout(&old, uap->buf, sizeof(old));
7
Copies out a struct with uncleared padding (>= 2 bytes)
1598 break;
1599 }
1600
1601done:
1602 if (error) {
1603 /* Invalidate the return value */
1604 td->td_retvaltd_uretoff.tdu_retval[0] = -1;
1605 }
1606 return (error);
1607}
1608
1609#endif /* COMPAT_FREEBSD4 || COMPAT_FREEBSD5 || COMPAT_FREEBSD6 ||
1610 COMPAT_FREEBSD7 */
1611
1612static int
1613sysvshm_modload(struct module *module, int cmd, void *arg)
1614{
1615 int error = 0;
1616
1617 switch (cmd) {
1618 case MOD_LOAD:
1619 error = shminit();
1620 if (error != 0)
1621 shmunload();
1622 break;
1623 case MOD_UNLOAD:
1624 error = shmunload();
1625 break;
1626 case MOD_SHUTDOWN:
1627 break;
1628 default:
1629 error = EINVAL22;
1630 break;
1631 }
1632 return (error);
1633}
1634
1635static moduledata_t sysvshm_mod = {
1636 "sysvshm",
1637 &sysvshm_modload,
1638 NULL((void *)0)
1639};
1640
1641DECLARE_MODULE(sysvshm, sysvshm_mod, SI_SUB_SYSV_SHM, SI_ORDER_FIRST)static struct mod_depend _sysvshm_depend_on_kernel __attribute__
((__section__(".data"))) = { 1100122, 1100122, (((((1100122)+
((100000)-1))/(100000))*(100000)) - 1) }; static struct mod_metadata
_mod_metadata_md_sysvshm_on_kernel = { 1, 1, &_sysvshm_depend_on_kernel
, "kernel" }; __asm__(".globl " "__start_set_modmetadata_set"
); __asm__(".globl " "__stop_set_modmetadata_set"); static void
const * const __set_modmetadata_set_sym__mod_metadata_md_sysvshm_on_kernel
__attribute__((__section__("set_" "modmetadata_set"))) __attribute__
((__used__)) = &(_mod_metadata_md_sysvshm_on_kernel); static
struct mod_metadata _mod_metadata_md_sysvshm = { 1, 2, &
sysvshm_mod, "sysvshm" }; __asm__(".globl " "__start_set_modmetadata_set"
); __asm__(".globl " "__stop_set_modmetadata_set"); static void
const * const __set_modmetadata_set_sym__mod_metadata_md_sysvshm
__attribute__((__section__("set_" "modmetadata_set"))) __attribute__
((__used__)) = &(_mod_metadata_md_sysvshm); static struct
sysinit sysvshmmodule_sys_init = { SI_SUB_SYSV_SHM, SI_ORDER_FIRST
, (sysinit_cfunc_t)(sysinit_nfunc_t)module_register_init, ((void
*)(&sysvshm_mod)) }; __asm__(".globl " "__start_set_sysinit_set"
); __asm__(".globl " "__stop_set_sysinit_set"); static void const
* const __set_sysinit_set_sym_sysvshmmodule_sys_init __attribute__
((__section__("set_" "sysinit_set"))) __attribute__((__used__
)) = &(sysvshmmodule_sys_init); struct __hack
;
1642MODULE_VERSION(sysvshm, 1)static struct mod_version _sysvshm_version __attribute__((__section__
(".data"))) = { 1 }; static struct mod_metadata _mod_metadata_sysvshm_version
= { 1, 3, &_sysvshm_version, "sysvshm" }; __asm__(".globl "
"__start_set_modmetadata_set"); __asm__(".globl " "__stop_set_modmetadata_set"
); static void const * const __set_modmetadata_set_sym__mod_metadata_sysvshm_version
__attribute__((__section__("set_" "modmetadata_set"))) __attribute__
((__used__)) = &(_mod_metadata_sysvshm_version)
;