File: | amd64/amd64/mem.c |
Warning: | line 219, column 13 Copies out a struct with uncleared padding (>= 4 bytes) |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /*- | |||
2 | * SPDX-License-Identifier: BSD-3-Clause | |||
3 | * | |||
4 | * Copyright (c) 1988 University of Utah. | |||
5 | * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. | |||
6 | * All rights reserved. | |||
7 | * | |||
8 | * This code is derived from software contributed to Berkeley by | |||
9 | * the Systems Programming Group of the University of Utah Computer | |||
10 | * Science Department, and code derived from software contributed to | |||
11 | * Berkeley by William Jolitz. | |||
12 | * | |||
13 | * Redistribution and use in source and binary forms, with or without | |||
14 | * modification, are permitted provided that the following conditions | |||
15 | * are met: | |||
16 | * 1. Redistributions of source code must retain the above copyright | |||
17 | * notice, this list of conditions and the following disclaimer. | |||
18 | * 2. Redistributions in binary form must reproduce the above copyright | |||
19 | * notice, this list of conditions and the following disclaimer in the | |||
20 | * documentation and/or other materials provided with the distribution. | |||
21 | * 3. Neither the name of the University nor the names of its contributors | |||
22 | * may be used to endorse or promote products derived from this software | |||
23 | * without specific prior written permission. | |||
24 | * | |||
25 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |||
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
27 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
28 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |||
29 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
30 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |||
31 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |||
32 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |||
33 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |||
34 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |||
35 | * SUCH DAMAGE. | |||
36 | * | |||
37 | * from: Utah $Hdr: mem.c 1.13 89/10/08$ | |||
38 | * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 | |||
39 | */ | |||
40 | ||||
41 | #include <sys/cdefs.h> | |||
42 | __FBSDID("$FreeBSD$")__asm__(".ident\t\"" "$FreeBSD$" "\""); | |||
43 | ||||
44 | /* | |||
45 | * Memory special file | |||
46 | */ | |||
47 | ||||
48 | #include <sys/param.h> | |||
49 | #include <sys/conf.h> | |||
50 | #include <sys/fcntl.h> | |||
51 | #include <sys/ioccom.h> | |||
52 | #include <sys/kernel.h> | |||
53 | #include <sys/lock.h> | |||
54 | #include <sys/malloc.h> | |||
55 | #include <sys/memrange.h> | |||
56 | #include <sys/module.h> | |||
57 | #include <sys/mutex.h> | |||
58 | #include <sys/proc.h> | |||
59 | #include <sys/signalvar.h> | |||
60 | #include <sys/systm.h> | |||
61 | #include <sys/uio.h> | |||
62 | ||||
63 | #include <machine/md_var.h> | |||
64 | #include <machine/specialreg.h> | |||
65 | #include <machine/vmparam.h> | |||
66 | ||||
67 | #include <vm/vm.h> | |||
68 | #include <vm/pmap.h> | |||
69 | #include <vm/vm_extern.h> | |||
70 | ||||
71 | #include <machine/memdev.h> | |||
72 | ||||
73 | /* | |||
74 | * Used in /dev/mem drivers and elsewhere | |||
75 | */ | |||
76 | MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors")struct malloc_type M_MEMDESC[1] = { { ((void *)0), 877983977, "memdesc", ((void *)0) } }; static struct sysinit M_MEMDESC_init_sys_init = { SI_SUB_KMEM, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t )malloc_init, ((void *)(M_MEMDESC)) }; __asm__(".globl " "__start_set_sysinit_set" ); __asm__(".globl " "__stop_set_sysinit_set"); static void const * __set_sysinit_set_sym_M_MEMDESC_init_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(M_MEMDESC_init_sys_init); static struct sysinit M_MEMDESC_uninit_sys_uninit = { SI_SUB_KMEM, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t )malloc_uninit, ((void *)(M_MEMDESC)) }; __asm__(".globl " "__start_set_sysuninit_set" ); __asm__(".globl " "__stop_set_sysuninit_set"); static void const * __set_sysuninit_set_sym_M_MEMDESC_uninit_sys_uninit __attribute__ ((__section__("set_" "sysuninit_set"))) __attribute__((__used__ )) = &(M_MEMDESC_uninit_sys_uninit); | |||
77 | ||||
78 | /* ARGSUSED */ | |||
79 | int | |||
80 | memrw(struct cdev *dev, struct uio *uio, int flags) | |||
81 | { | |||
82 | struct iovec *iov; | |||
83 | void *p; | |||
84 | ssize_t orig_resid; | |||
85 | u_long v, vd; | |||
86 | u_int c; | |||
87 | int error; | |||
88 | ||||
89 | error = 0; | |||
90 | orig_resid = uio->uio_resid; | |||
91 | while (uio->uio_resid > 0 && error == 0) { | |||
92 | iov = uio->uio_iov; | |||
93 | if (iov->iov_len == 0) { | |||
94 | uio->uio_iov++; | |||
95 | uio->uio_iovcnt--; | |||
96 | if (uio->uio_iovcnt < 0) | |||
97 | panic("memrw"); | |||
98 | continue; | |||
99 | } | |||
100 | v = uio->uio_offset; | |||
101 | c = ulmin(iov->iov_len, PAGE_SIZE(1<<12) - (u_int)(v & PAGE_MASK((1<<12)-1))); | |||
102 | ||||
103 | switch (dev2unit(dev)((dev)->si_drv0)) { | |||
104 | case CDEV_MINOR_KMEM1: | |||
105 | /* | |||
106 | * Since c is clamped to be less or equal than | |||
107 | * PAGE_SIZE, the uiomove() call does not | |||
108 | * access past the end of the direct map. | |||
109 | */ | |||
110 | if (v >= DMAP_MIN_ADDRESS( ((unsigned long)-1 << 47) | ((unsigned long)(((((((1<< 12)/(sizeof (pml4_entry_t)))-4)-8)/(8))*(8))) << 39) | ( (unsigned long)(0) << 30) | ((unsigned long)(0) << 21) | ((unsigned long)(0) << 12)) && | |||
111 | v < DMAP_MIN_ADDRESS( ((unsigned long)-1 << 47) | ((unsigned long)(((((((1<< 12)/(sizeof (pml4_entry_t)))-4)-8)/(8))*(8))) << 39) | ( (unsigned long)(0) << 30) | ((unsigned long)(0) << 21) | ((unsigned long)(0) << 12)) + dmaplimit) { | |||
112 | error = uiomove((void *)v, c, uio); | |||
113 | break; | |||
114 | } | |||
115 | ||||
116 | if (!kernacc((void *)v, c, uio->uio_rw == UIO_READ ? | |||
117 | VM_PROT_READ((vm_prot_t) 0x01) : VM_PROT_WRITE((vm_prot_t) 0x02))) { | |||
118 | error = EFAULT14; | |||
119 | break; | |||
120 | } | |||
121 | ||||
122 | /* | |||
123 | * If the extracted address is not accessible | |||
124 | * through the direct map, then we make a | |||
125 | * private (uncached) mapping because we can't | |||
126 | * depend on the existing kernel mapping | |||
127 | * remaining valid until the completion of | |||
128 | * uiomove(). | |||
129 | * | |||
130 | * XXX We cannot provide access to the | |||
131 | * physical page 0 mapped into KVA. | |||
132 | */ | |||
133 | v = pmap_extract(kernel_pmap(&kernel_pmap_store), v); | |||
134 | if (v == 0) { | |||
135 | error = EFAULT14; | |||
136 | break; | |||
137 | } | |||
138 | /* FALLTHROUGH */ | |||
139 | case CDEV_MINOR_MEM0: | |||
140 | if (v < dmaplimit) { | |||
141 | vd = PHYS_TO_DMAP(v)({ do { if (__builtin_expect((!(dmaplimit == 0 || (v) < dmaplimit )), 0)) panic ("physical address %#jx not covered by the DMAP" , (uintmax_t)v); } while (0); (v) | ( ((unsigned long)-1 << 47) | ((unsigned long)(((((((1<<12)/(sizeof (pml4_entry_t )))-4)-8)/(8))*(8))) << 39) | ((unsigned long)(0) << 30) | ((unsigned long)(0) << 21) | ((unsigned long)(0) << 12)); }); | |||
142 | error = uiomove((void *)vd, c, uio); | |||
143 | break; | |||
144 | } | |||
145 | if (v > cpu_getmaxphyaddr()) { | |||
146 | error = EFAULT14; | |||
147 | break; | |||
148 | } | |||
149 | p = pmap_mapdev(v, PAGE_SIZE(1<<12)); | |||
150 | error = uiomove(p, c, uio); | |||
151 | pmap_unmapdev((vm_offset_t)p, PAGE_SIZE(1<<12)); | |||
152 | break; | |||
153 | } | |||
154 | } | |||
155 | /* | |||
156 | * Don't return error if any byte was written. Read and write | |||
157 | * can return error only if no i/o was performed. | |||
158 | */ | |||
159 | if (uio->uio_resid != orig_resid) | |||
160 | error = 0; | |||
161 | return (error); | |||
162 | } | |||
163 | ||||
164 | /* | |||
165 | * allow user processes to MMAP some memory sections | |||
166 | * instead of going through read/write | |||
167 | */ | |||
168 | /* ARGSUSED */ | |||
169 | int | |||
170 | memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, | |||
171 | int prot __unused__attribute__((__unused__)), vm_memattr_t *memattr __unused__attribute__((__unused__))) | |||
172 | { | |||
173 | if (dev2unit(dev)((dev)->si_drv0) == CDEV_MINOR_MEM0) { | |||
174 | if (offset > cpu_getmaxphyaddr()) | |||
175 | return (-1); | |||
176 | *paddr = offset; | |||
177 | return (0); | |||
178 | } | |||
179 | return (-1); | |||
180 | } | |||
181 | ||||
182 | /* | |||
183 | * Operations for changing memory attributes. | |||
184 | * | |||
185 | * This is basically just an ioctl shim for mem_range_attr_get | |||
186 | * and mem_range_attr_set. | |||
187 | */ | |||
188 | /* ARGSUSED */ | |||
189 | int | |||
190 | memioctl(struct cdev *dev __unused__attribute__((__unused__)), u_long cmd, caddr_t data, int flags, | |||
191 | struct thread *td) | |||
192 | { | |||
193 | int nd, error = 0; | |||
194 | struct mem_range_op *mo = (struct mem_range_op *)data; | |||
195 | struct mem_range_desc *md; | |||
196 | ||||
197 | /* is this for us? */ | |||
198 | if ((cmd != MEMRANGE_GET((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct mem_range_op)) & ((1 << 13) - 1)) << 16) | ( (('m')) << 8) | ((50))))) && | |||
| ||||
199 | (cmd != MEMRANGE_SET((unsigned long) ((0x80000000) | (((sizeof(struct mem_range_op )) & ((1 << 13) - 1)) << 16) | ((('m')) << 8) | ((51)))))) | |||
200 | return (ENOTTY25); | |||
201 | ||||
202 | /* any chance we can handle this? */ | |||
203 | if (mem_range_softc.mr_op == NULL((void *)0)) | |||
204 | return (EOPNOTSUPP45); | |||
205 | ||||
206 | /* do we have any descriptors? */ | |||
207 | if (mem_range_softc.mr_ndesc == 0) | |||
208 | return (ENXIO6); | |||
209 | ||||
210 | switch (cmd) { | |||
211 | case MEMRANGE_GET((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct mem_range_op)) & ((1 << 13) - 1)) << 16) | ( (('m')) << 8) | ((50)))): | |||
212 | nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); | |||
213 | if (nd > 0) { | |||
214 | md = (struct mem_range_desc *) | |||
215 | malloc(nd * sizeof(struct mem_range_desc), | |||
216 | M_MEMDESC, M_WAITOK0x0002); | |||
217 | error = mem_range_attr_get(md, &nd); | |||
218 | if (!error) | |||
219 | error = copyout(md, mo->mo_desc, | |||
| ||||
220 | nd * sizeof(struct mem_range_desc)); | |||
221 | free(md, M_MEMDESC); | |||
222 | } | |||
223 | else | |||
224 | nd = mem_range_softc.mr_ndesc; | |||
225 | mo->mo_arg[0] = nd; | |||
226 | break; | |||
227 | ||||
228 | case MEMRANGE_SET((unsigned long) ((0x80000000) | (((sizeof(struct mem_range_op )) & ((1 << 13) - 1)) << 16) | ((('m')) << 8) | ((51)))): | |||
229 | md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc), | |||
230 | M_MEMDESC, M_WAITOK0x0002); | |||
231 | error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); | |||
232 | /* clamp description string */ | |||
233 | md->mr_owner[sizeof(md->mr_owner) - 1] = 0; | |||
234 | if (error == 0) | |||
235 | error = mem_range_attr_set(md, &mo->mo_arg[0]); | |||
236 | free(md, M_MEMDESC); | |||
237 | break; | |||
238 | } | |||
239 | return (error); | |||
240 | } |