1 | /* $NetBSD: uvm_glue.c,v 1.163 2016/05/22 09:10:37 maxv Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. |
5 | * Copyright (c) 1991, 1993, The Regents of the University of California. |
6 | * |
7 | * All rights reserved. |
8 | * |
9 | * This code is derived from software contributed to Berkeley by |
10 | * The Mach Operating System project at Carnegie-Mellon University. |
11 | * |
12 | * Redistribution and use in source and binary forms, with or without |
13 | * modification, are permitted provided that the following conditions |
14 | * are met: |
15 | * 1. Redistributions of source code must retain the above copyright |
16 | * notice, this list of conditions and the following disclaimer. |
17 | * 2. Redistributions in binary form must reproduce the above copyright |
18 | * notice, this list of conditions and the following disclaimer in the |
19 | * documentation and/or other materials provided with the distribution. |
20 | * 3. Neither the name of the University nor the names of its contributors |
21 | * may be used to endorse or promote products derived from this software |
22 | * without specific prior written permission. |
23 | * |
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
34 | * SUCH DAMAGE. |
35 | * |
36 | * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 |
37 | * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp |
38 | * |
39 | * |
40 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. |
41 | * All rights reserved. |
42 | * |
43 | * Permission to use, copy, modify and distribute this software and |
44 | * its documentation is hereby granted, provided that both the copyright |
45 | * notice and this permission notice appear in all copies of the |
46 | * software, derivative works or modified versions, and any portions |
47 | * thereof, and that both notices appear in supporting documentation. |
48 | * |
49 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
50 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND |
51 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
52 | * |
53 | * Carnegie Mellon requests users of this software to return to |
54 | * |
55 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
56 | * School of Computer Science |
57 | * Carnegie Mellon University |
58 | * Pittsburgh PA 15213-3890 |
59 | * |
60 | * any improvements or extensions that they make and grant Carnegie the |
61 | * rights to redistribute these changes. |
62 | */ |
63 | |
64 | #include <sys/cdefs.h> |
65 | __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.163 2016/05/22 09:10:37 maxv Exp $" ); |
66 | |
67 | #include "opt_kgdb.h" |
68 | #include "opt_kstack.h" |
69 | #include "opt_uvmhist.h" |
70 | |
71 | /* |
72 | * uvm_glue.c: glue functions |
73 | */ |
74 | |
75 | #include <sys/param.h> |
76 | #include <sys/kernel.h> |
77 | |
78 | #include <sys/systm.h> |
79 | #include <sys/proc.h> |
80 | #include <sys/resourcevar.h> |
81 | #include <sys/buf.h> |
82 | #include <sys/syncobj.h> |
83 | #include <sys/cpu.h> |
84 | #include <sys/atomic.h> |
85 | #include <sys/lwp.h> |
86 | |
87 | #include <uvm/uvm.h> |
88 | |
89 | /* |
90 | * uvm_kernacc: test if kernel can access a memory region. |
91 | * |
92 | * => Currently used only by /dev/kmem driver (dev/mm.c). |
93 | */ |
94 | bool |
95 | uvm_kernacc(void *addr, size_t len, vm_prot_t prot) |
96 | { |
97 | vaddr_t saddr = trunc_page((vaddr_t)addr); |
98 | vaddr_t eaddr = round_page(saddr + len); |
99 | bool rv; |
100 | |
101 | vm_map_lock_read(kernel_map); |
102 | rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot); |
103 | vm_map_unlock_read(kernel_map); |
104 | |
105 | return rv; |
106 | } |
107 | |
108 | #ifdef KGDB |
109 | /* |
110 | * Change protections on kernel pages from addr to addr+len |
111 | * (presumably so debugger can plant a breakpoint). |
112 | * |
113 | * We force the protection change at the pmap level. If we were |
114 | * to use vm_map_protect a change to allow writing would be lazily- |
115 | * applied meaning we would still take a protection fault, something |
116 | * we really don't want to do. It would also fragment the kernel |
117 | * map unnecessarily. We cannot use pmap_protect since it also won't |
118 | * enforce a write-enable request. Using pmap_enter is the only way |
119 | * we can ensure the change takes place properly. |
120 | */ |
121 | void |
122 | uvm_chgkprot(void *addr, size_t len, int rw) |
123 | { |
124 | vm_prot_t prot; |
125 | paddr_t pa; |
126 | vaddr_t sva, eva; |
127 | |
128 | prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE; |
129 | eva = round_page((vaddr_t)addr + len); |
130 | for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) { |
131 | /* |
132 | * Extract physical address for the page. |
133 | */ |
134 | if (pmap_extract(pmap_kernel(), sva, &pa) == false) |
135 | panic("%s: invalid page" , __func__); |
136 | pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED); |
137 | } |
138 | pmap_update(pmap_kernel()); |
139 | } |
140 | #endif |
141 | |
142 | /* |
143 | * uvm_vslock: wire user memory for I/O |
144 | * |
145 | * - called from physio and sys___sysctl |
146 | * - XXXCDC: consider nuking this (or making it a macro?) |
147 | */ |
148 | |
149 | int |
150 | uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type) |
151 | { |
152 | struct vm_map *map; |
153 | vaddr_t start, end; |
154 | int error; |
155 | |
156 | map = &vs->vm_map; |
157 | start = trunc_page((vaddr_t)addr); |
158 | end = round_page((vaddr_t)addr + len); |
159 | error = uvm_fault_wire(map, start, end, access_type, 0); |
160 | return error; |
161 | } |
162 | |
163 | /* |
164 | * uvm_vsunlock: unwire user memory wired by uvm_vslock() |
165 | * |
166 | * - called from physio and sys___sysctl |
167 | * - XXXCDC: consider nuking this (or making it a macro?) |
168 | */ |
169 | |
170 | void |
171 | uvm_vsunlock(struct vmspace *vs, void *addr, size_t len) |
172 | { |
173 | uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr), |
174 | round_page((vaddr_t)addr + len)); |
175 | } |
176 | |
177 | /* |
178 | * uvm_proc_fork: fork a virtual address space |
179 | * |
180 | * - the address space is copied as per parent map's inherit values |
181 | */ |
182 | void |
183 | uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared) |
184 | { |
185 | |
186 | if (shared == true) { |
187 | p2->p_vmspace = NULL; |
188 | uvmspace_share(p1, p2); |
189 | } else { |
190 | p2->p_vmspace = uvmspace_fork(p1->p_vmspace); |
191 | } |
192 | |
193 | cpu_proc_fork(p1, p2); |
194 | } |
195 | |
196 | /* |
197 | * uvm_lwp_fork: fork a thread |
198 | * |
199 | * - a new PCB structure is allocated for the child process, |
200 | * and filled in by MD layer |
201 | * - if specified, the child gets a new user stack described by |
202 | * stack and stacksize |
203 | * - NOTE: the kernel stack may be at a different location in the child |
204 | * process, and thus addresses of automatic variables may be invalid |
205 | * after cpu_lwp_fork returns in the child process. We do nothing here |
206 | * after cpu_lwp_fork returns. |
207 | */ |
208 | void |
209 | uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, |
210 | void (*func)(void *), void *arg) |
211 | { |
212 | |
213 | /* Fill stack with magic number. */ |
214 | kstack_setup_magic(l2); |
215 | |
216 | /* |
217 | * cpu_lwp_fork() copy and update the pcb, and make the child ready |
218 | * to run. If this is a normal user fork, the child will exit |
219 | * directly to user mode via child_return() on its first time |
220 | * slice and will not return here. If this is a kernel thread, |
221 | * the specified entry point will be executed. |
222 | */ |
223 | cpu_lwp_fork(l1, l2, stack, stacksize, func, arg); |
224 | |
225 | /* Inactive emap for new LWP. */ |
226 | l2->l_emap_gen = UVM_EMAP_INACTIVE; |
227 | } |
228 | |
229 | #ifndef USPACE_ALIGN |
230 | #define USPACE_ALIGN 0 |
231 | #endif |
232 | |
233 | static pool_cache_t uvm_uarea_cache; |
234 | #if defined(__HAVE_CPU_UAREA_ROUTINES) |
235 | static pool_cache_t uvm_uarea_system_cache; |
236 | #else |
237 | #define uvm_uarea_system_cache uvm_uarea_cache |
238 | #endif |
239 | |
240 | static void * |
241 | uarea_poolpage_alloc(struct pool *pp, int flags) |
242 | { |
243 | #if defined(PMAP_MAP_POOLPAGE) |
244 | if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) { |
245 | struct vm_page *pg; |
246 | vaddr_t va; |
247 | |
248 | #if defined(PMAP_ALLOC_POOLPAGE) |
249 | pg = PMAP_ALLOC_POOLPAGE( |
250 | ((flags & PR_WAITOK) == 0 ? UVM_KMF_NOWAIT : 0)); |
251 | #else |
252 | pg = uvm_pagealloc(NULL, 0, NULL, |
253 | ((flags & PR_WAITOK) == 0 ? UVM_KMF_NOWAIT : 0)); |
254 | #endif |
255 | if (pg == NULL) |
256 | return NULL; |
257 | va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); |
258 | if (va == 0) |
259 | uvm_pagefree(pg); |
260 | return (void *)va; |
261 | } |
262 | #endif |
263 | #if defined(__HAVE_CPU_UAREA_ROUTINES) |
264 | void *va = cpu_uarea_alloc(false); |
265 | if (va) |
266 | return (void *)va; |
267 | #endif |
268 | return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz, |
269 | USPACE_ALIGN, UVM_KMF_WIRED | |
270 | ((flags & PR_WAITOK) ? UVM_KMF_WAITVA : |
271 | (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK))); |
272 | } |
273 | |
274 | static void |
275 | uarea_poolpage_free(struct pool *pp, void *addr) |
276 | { |
277 | #if defined(PMAP_MAP_POOLPAGE) |
278 | if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) { |
279 | paddr_t pa; |
280 | |
281 | pa = PMAP_UNMAP_POOLPAGE((vaddr_t) addr); |
282 | KASSERT(pa != 0); |
283 | uvm_pagefree(PHYS_TO_VM_PAGE(pa)); |
284 | return; |
285 | } |
286 | #endif |
287 | #if defined(__HAVE_CPU_UAREA_ROUTINES) |
288 | if (cpu_uarea_free(addr)) |
289 | return; |
290 | #endif |
291 | uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz, |
292 | UVM_KMF_WIRED); |
293 | } |
294 | |
295 | static struct pool_allocator uvm_uarea_allocator = { |
296 | .pa_alloc = uarea_poolpage_alloc, |
297 | .pa_free = uarea_poolpage_free, |
298 | .pa_pagesz = USPACE, |
299 | }; |
300 | |
301 | #if defined(__HAVE_CPU_UAREA_ROUTINES) |
302 | static void * |
303 | uarea_system_poolpage_alloc(struct pool *pp, int flags) |
304 | { |
305 | void * const va = cpu_uarea_alloc(true); |
306 | if (va != NULL) |
307 | return va; |
308 | |
309 | return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz, |
310 | USPACE_ALIGN, UVM_KMF_WIRED | |
311 | ((flags & PR_WAITOK) ? UVM_KMF_WAITVA : |
312 | (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK))); |
313 | } |
314 | |
315 | static void |
316 | uarea_system_poolpage_free(struct pool *pp, void *addr) |
317 | { |
318 | if (cpu_uarea_free(addr)) |
319 | return; |
320 | |
321 | uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz, |
322 | UVM_KMF_WIRED); |
323 | } |
324 | |
325 | static struct pool_allocator uvm_uarea_system_allocator = { |
326 | .pa_alloc = uarea_system_poolpage_alloc, |
327 | .pa_free = uarea_system_poolpage_free, |
328 | .pa_pagesz = USPACE, |
329 | }; |
330 | #endif /* __HAVE_CPU_UAREA_ROUTINES */ |
331 | |
332 | void |
333 | uvm_uarea_init(void) |
334 | { |
335 | int flags = PR_NOTOUCH; |
336 | |
337 | /* |
338 | * specify PR_NOALIGN unless the alignment provided by |
339 | * the backend (USPACE_ALIGN) is sufficient to provide |
340 | * pool page size (UPSACE) alignment. |
341 | */ |
342 | |
343 | if ((USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) || |
344 | (USPACE_ALIGN % USPACE) != 0) { |
345 | flags |= PR_NOALIGN; |
346 | } |
347 | |
348 | uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags, |
349 | "uarea" , &uvm_uarea_allocator, IPL_NONE, NULL, NULL, NULL); |
350 | #if defined(__HAVE_CPU_UAREA_ROUTINES) |
351 | uvm_uarea_system_cache = pool_cache_init(USPACE, USPACE_ALIGN, |
352 | 0, flags, "uareasys" , &uvm_uarea_system_allocator, |
353 | IPL_NONE, NULL, NULL, NULL); |
354 | #endif |
355 | } |
356 | |
357 | /* |
358 | * uvm_uarea_alloc: allocate a u-area |
359 | */ |
360 | |
361 | vaddr_t |
362 | uvm_uarea_alloc(void) |
363 | { |
364 | |
365 | return (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK); |
366 | } |
367 | |
368 | vaddr_t |
369 | uvm_uarea_system_alloc(struct cpu_info *ci) |
370 | { |
371 | #ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP |
372 | if (__predict_false(ci != NULL)) |
373 | return cpu_uarea_alloc_idlelwp(ci); |
374 | #endif |
375 | |
376 | return (vaddr_t)pool_cache_get(uvm_uarea_system_cache, PR_WAITOK); |
377 | } |
378 | |
379 | /* |
380 | * uvm_uarea_free: free a u-area |
381 | */ |
382 | |
383 | void |
384 | uvm_uarea_free(vaddr_t uaddr) |
385 | { |
386 | |
387 | pool_cache_put(uvm_uarea_cache, (void *)uaddr); |
388 | } |
389 | |
390 | void |
391 | uvm_uarea_system_free(vaddr_t uaddr) |
392 | { |
393 | |
394 | pool_cache_put(uvm_uarea_system_cache, (void *)uaddr); |
395 | } |
396 | |
397 | vaddr_t |
398 | uvm_lwp_getuarea(lwp_t *l) |
399 | { |
400 | |
401 | return (vaddr_t)l->l_addr - UAREA_PCB_OFFSET; |
402 | } |
403 | |
404 | void |
405 | uvm_lwp_setuarea(lwp_t *l, vaddr_t addr) |
406 | { |
407 | |
408 | l->l_addr = (void *)(addr + UAREA_PCB_OFFSET); |
409 | } |
410 | |
411 | /* |
412 | * uvm_proc_exit: exit a virtual address space |
413 | * |
414 | * - borrow proc0's address space because freeing the vmspace |
415 | * of the dead process may block. |
416 | */ |
417 | |
418 | void |
419 | uvm_proc_exit(struct proc *p) |
420 | { |
421 | struct lwp *l = curlwp; /* XXX */ |
422 | struct vmspace *ovm; |
423 | |
424 | KASSERT(p == l->l_proc); |
425 | ovm = p->p_vmspace; |
426 | KASSERT(ovm != NULL); |
427 | |
428 | if (__predict_false(ovm == proc0.p_vmspace)) |
429 | return; |
430 | |
431 | /* |
432 | * borrow proc0's address space. |
433 | */ |
434 | kpreempt_disable(); |
435 | pmap_deactivate(l); |
436 | p->p_vmspace = proc0.p_vmspace; |
437 | pmap_activate(l); |
438 | kpreempt_enable(); |
439 | |
440 | uvmspace_free(ovm); |
441 | } |
442 | |
443 | void |
444 | uvm_lwp_exit(struct lwp *l) |
445 | { |
446 | vaddr_t va = uvm_lwp_getuarea(l); |
447 | bool system = (l->l_flag & LW_SYSTEM) != 0; |
448 | |
449 | if (system) |
450 | uvm_uarea_system_free(va); |
451 | else |
452 | uvm_uarea_free(va); |
453 | #ifdef DIAGNOSTIC |
454 | uvm_lwp_setuarea(l, (vaddr_t)NULL); |
455 | #endif |
456 | } |
457 | |
458 | /* |
459 | * uvm_init_limit: init per-process VM limits |
460 | * |
461 | * - called for process 0 and then inherited by all others. |
462 | */ |
463 | |
464 | void |
465 | uvm_init_limits(struct proc *p) |
466 | { |
467 | |
468 | /* |
469 | * Set up the initial limits on process VM. Set the maximum |
470 | * resident set size to be all of (reasonably) available memory. |
471 | * This causes any single, large process to start random page |
472 | * replacement once it fills memory. |
473 | */ |
474 | |
475 | p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; |
476 | p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap; |
477 | p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; |
478 | p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap; |
479 | p->p_rlimit[RLIMIT_AS].rlim_cur = RLIM_INFINITY; |
480 | p->p_rlimit[RLIMIT_AS].rlim_max = RLIM_INFINITY; |
481 | p->p_rlimit[RLIMIT_RSS].rlim_cur = MIN( |
482 | VM_MAXUSER_ADDRESS, ctob((rlim_t)uvmexp.free)); |
483 | } |
484 | |
485 | /* |
486 | * uvm_scheduler: process zero main loop. |
487 | */ |
488 | |
489 | extern struct loadavg averunnable; |
490 | |
491 | void |
492 | uvm_scheduler(void) |
493 | { |
494 | lwp_t *l = curlwp; |
495 | |
496 | lwp_lock(l); |
497 | l->l_priority = PRI_VM; |
498 | l->l_class = SCHED_FIFO; |
499 | lwp_unlock(l); |
500 | |
501 | for (;;) { |
502 | sched_pstats(); |
503 | (void)kpause("uvm" , false, hz, NULL); |
504 | } |
505 | } |
506 | |