1 | /* $NetBSD: linux_sched.c,v 1.68 2015/07/03 02:24:28 christos Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 1999 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
9 | * NASA Ames Research Center; by Matthias Scheler. |
10 | * |
11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions |
13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. |
19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | * POSSIBILITY OF SUCH DAMAGE. |
31 | */ |
32 | |
33 | /* |
34 | * Linux compatibility module. Try to deal with scheduler related syscalls. |
35 | */ |
36 | |
37 | #include <sys/cdefs.h> |
38 | __KERNEL_RCSID(0, "$NetBSD: linux_sched.c,v 1.68 2015/07/03 02:24:28 christos Exp $" ); |
39 | |
40 | #include <sys/param.h> |
41 | #include <sys/mount.h> |
42 | #include <sys/proc.h> |
43 | #include <sys/systm.h> |
44 | #include <sys/sysctl.h> |
45 | #include <sys/syscallargs.h> |
46 | #include <sys/wait.h> |
47 | #include <sys/kauth.h> |
48 | #include <sys/ptrace.h> |
49 | #include <sys/atomic.h> |
50 | |
51 | #include <sys/cpu.h> |
52 | |
53 | #include <compat/linux/common/linux_types.h> |
54 | #include <compat/linux/common/linux_signal.h> |
55 | #include <compat/linux/common/linux_emuldata.h> |
56 | #include <compat/linux/common/linux_ipc.h> |
57 | #include <compat/linux/common/linux_sem.h> |
58 | #include <compat/linux/common/linux_exec.h> |
59 | #include <compat/linux/common/linux_machdep.h> |
60 | |
61 | #include <compat/linux/linux_syscallargs.h> |
62 | |
63 | #include <compat/linux/common/linux_sched.h> |
64 | |
65 | static int linux_clone_nptl(struct lwp *, const struct linux_sys_clone_args *, |
66 | register_t *); |
67 | |
68 | /* Unlike Linux, dynamically calculate CPU mask size */ |
69 | #define LINUX_CPU_MASK_SIZE (sizeof(long) * ((ncpu + LONG_BIT - 1) / LONG_BIT)) |
70 | |
71 | #if DEBUG_LINUX |
72 | #define DPRINTF(x) uprintf x |
73 | #else |
74 | #define DPRINTF(x) |
75 | #endif |
76 | |
77 | static void |
78 | linux_child_return(void *arg) |
79 | { |
80 | struct lwp *l = arg; |
81 | struct proc *p = l->l_proc; |
82 | struct linux_emuldata *led = l->l_emuldata; |
83 | void *ctp = led->led_child_tidptr; |
84 | int error; |
85 | |
86 | if (ctp) { |
87 | if ((error = copyout(&p->p_pid, ctp, sizeof(p->p_pid))) != 0) |
88 | printf("%s: LINUX_CLONE_CHILD_SETTID " |
89 | "failed (child_tidptr = %p, tid = %d error =%d)\n" , |
90 | __func__, ctp, p->p_pid, error); |
91 | } |
92 | child_return(arg); |
93 | } |
94 | |
95 | int |
96 | linux_sys_clone(struct lwp *l, const struct linux_sys_clone_args *uap, |
97 | register_t *retval) |
98 | { |
99 | /* { |
100 | syscallarg(int) flags; |
101 | syscallarg(void *) stack; |
102 | syscallarg(void *) parent_tidptr; |
103 | syscallarg(void *) tls; |
104 | syscallarg(void *) child_tidptr; |
105 | } */ |
106 | struct proc *p; |
107 | struct linux_emuldata *led; |
108 | int flags, sig, error; |
109 | |
110 | /* |
111 | * We don't support the Linux CLONE_PID or CLONE_PTRACE flags. |
112 | */ |
113 | if (SCARG(uap, flags) & (LINUX_CLONE_PID|LINUX_CLONE_PTRACE)) |
114 | return EINVAL; |
115 | |
116 | /* |
117 | * Thread group implies shared signals. Shared signals |
118 | * imply shared VM. This matches what Linux kernel does. |
119 | */ |
120 | if (SCARG(uap, flags) & LINUX_CLONE_THREAD |
121 | && (SCARG(uap, flags) & LINUX_CLONE_SIGHAND) == 0) |
122 | return EINVAL; |
123 | if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND |
124 | && (SCARG(uap, flags) & LINUX_CLONE_VM) == 0) |
125 | return EINVAL; |
126 | |
127 | /* |
128 | * The thread group flavor is implemented totally differently. |
129 | */ |
130 | if (SCARG(uap, flags) & LINUX_CLONE_THREAD) |
131 | return linux_clone_nptl(l, uap, retval); |
132 | |
133 | flags = 0; |
134 | if (SCARG(uap, flags) & LINUX_CLONE_VM) |
135 | flags |= FORK_SHAREVM; |
136 | if (SCARG(uap, flags) & LINUX_CLONE_FS) |
137 | flags |= FORK_SHARECWD; |
138 | if (SCARG(uap, flags) & LINUX_CLONE_FILES) |
139 | flags |= FORK_SHAREFILES; |
140 | if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND) |
141 | flags |= FORK_SHARESIGS; |
142 | if (SCARG(uap, flags) & LINUX_CLONE_VFORK) |
143 | flags |= FORK_PPWAIT; |
144 | |
145 | sig = SCARG(uap, flags) & LINUX_CLONE_CSIGNAL; |
146 | if (sig < 0 || sig >= LINUX__NSIG) |
147 | return EINVAL; |
148 | sig = linux_to_native_signo[sig]; |
149 | |
150 | if (SCARG(uap, flags) & LINUX_CLONE_CHILD_SETTID) { |
151 | led = l->l_emuldata; |
152 | led->led_child_tidptr = SCARG(uap, child_tidptr); |
153 | } |
154 | |
155 | /* |
156 | * Note that Linux does not provide a portable way of specifying |
157 | * the stack area; the caller must know if the stack grows up |
158 | * or down. So, we pass a stack size of 0, so that the code |
159 | * that makes this adjustment is a noop. |
160 | */ |
161 | if ((error = fork1(l, flags, sig, SCARG(uap, stack), 0, |
162 | linux_child_return, NULL, retval, &p)) != 0) { |
163 | DPRINTF(("%s: fork1: error %d\n" , __func__, error)); |
164 | return error; |
165 | } |
166 | |
167 | return 0; |
168 | } |
169 | |
170 | static int |
171 | linux_clone_nptl(struct lwp *l, const struct linux_sys_clone_args *uap, register_t *retval) |
172 | { |
173 | /* { |
174 | syscallarg(int) flags; |
175 | syscallarg(void *) stack; |
176 | syscallarg(void *) parent_tidptr; |
177 | syscallarg(void *) tls; |
178 | syscallarg(void *) child_tidptr; |
179 | } */ |
180 | struct proc *p; |
181 | struct lwp *l2; |
182 | struct linux_emuldata *led; |
183 | void *parent_tidptr, *tls, *child_tidptr; |
184 | struct schedstate_percpu *spc; |
185 | vaddr_t uaddr; |
186 | lwpid_t lid; |
187 | int flags, tnprocs, error; |
188 | |
189 | p = l->l_proc; |
190 | flags = SCARG(uap, flags); |
191 | parent_tidptr = SCARG(uap, parent_tidptr); |
192 | tls = SCARG(uap, tls); |
193 | child_tidptr = SCARG(uap, child_tidptr); |
194 | |
195 | tnprocs = atomic_inc_uint_nv(&nprocs); |
196 | if (__predict_false(tnprocs >= maxproc) || |
197 | kauth_authorize_process(l->l_cred, KAUTH_PROCESS_FORK, p, |
198 | KAUTH_ARG(tnprocs), NULL, NULL) != 0) { |
199 | atomic_dec_uint(&nprocs); |
200 | return EAGAIN; |
201 | } |
202 | |
203 | uaddr = uvm_uarea_alloc(); |
204 | if (__predict_false(uaddr == 0)) { |
205 | atomic_dec_uint(&nprocs); |
206 | return ENOMEM; |
207 | } |
208 | |
209 | error = lwp_create(l, p, uaddr, LWP_DETACHED | LWP_PIDLID, |
210 | SCARG(uap, stack), 0, child_return, NULL, &l2, l->l_class); |
211 | if (__predict_false(error)) { |
212 | DPRINTF(("%s: lwp_create error=%d\n" , __func__, error)); |
213 | atomic_dec_uint(&nprocs); |
214 | uvm_uarea_free(uaddr); |
215 | return error; |
216 | } |
217 | lid = l2->l_lid; |
218 | |
219 | /* LINUX_CLONE_CHILD_CLEARTID: clear TID in child's memory on exit() */ |
220 | if (flags & LINUX_CLONE_CHILD_CLEARTID) { |
221 | led = l2->l_emuldata; |
222 | led->led_clear_tid = child_tidptr; |
223 | } |
224 | |
225 | /* LINUX_CLONE_PARENT_SETTID: store child's TID in parent's memory */ |
226 | if (flags & LINUX_CLONE_PARENT_SETTID) { |
227 | if ((error = copyout(&lid, parent_tidptr, sizeof(lid))) != 0) |
228 | printf("%s: LINUX_CLONE_PARENT_SETTID " |
229 | "failed (parent_tidptr = %p tid = %d error=%d)\n" , |
230 | __func__, parent_tidptr, lid, error); |
231 | } |
232 | |
233 | /* LINUX_CLONE_CHILD_SETTID: store child's TID in child's memory */ |
234 | if (flags & LINUX_CLONE_CHILD_SETTID) { |
235 | if ((error = copyout(&lid, child_tidptr, sizeof(lid))) != 0) |
236 | printf("%s: LINUX_CLONE_CHILD_SETTID " |
237 | "failed (child_tidptr = %p, tid = %d error=%d)\n" , |
238 | __func__, child_tidptr, lid, error); |
239 | } |
240 | |
241 | if (flags & LINUX_CLONE_SETTLS) { |
242 | error = LINUX_LWP_SETPRIVATE(l2, tls); |
243 | if (error) { |
244 | DPRINTF(("%s: LINUX_LWP_SETPRIVATE %d\n" , __func__, |
245 | error)); |
246 | lwp_exit(l2); |
247 | return error; |
248 | } |
249 | } |
250 | |
251 | /* |
252 | * Set the new LWP running, unless the process is stopping, |
253 | * then the LWP is created stopped. |
254 | */ |
255 | mutex_enter(p->p_lock); |
256 | lwp_lock(l2); |
257 | spc = &l2->l_cpu->ci_schedstate; |
258 | if ((l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { |
259 | if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { |
260 | KASSERT(l2->l_wchan == NULL); |
261 | l2->l_stat = LSSTOP; |
262 | p->p_nrlwps--; |
263 | lwp_unlock_to(l2, spc->spc_lwplock); |
264 | } else { |
265 | KASSERT(lwp_locked(l2, spc->spc_mutex)); |
266 | l2->l_stat = LSRUN; |
267 | sched_enqueue(l2, false); |
268 | lwp_unlock(l2); |
269 | } |
270 | } else { |
271 | l2->l_stat = LSSUSPENDED; |
272 | p->p_nrlwps--; |
273 | lwp_unlock_to(l2, spc->spc_lwplock); |
274 | } |
275 | mutex_exit(p->p_lock); |
276 | |
277 | retval[0] = lid; |
278 | retval[1] = 0; |
279 | return 0; |
280 | } |
281 | |
282 | /* |
283 | * linux realtime priority |
284 | * |
285 | * - SCHED_RR and SCHED_FIFO tasks have priorities [1,99]. |
286 | * |
287 | * - SCHED_OTHER tasks don't have realtime priorities. |
288 | * in particular, sched_param::sched_priority is always 0. |
289 | */ |
290 | |
291 | #define LINUX_SCHED_RTPRIO_MIN 1 |
292 | #define LINUX_SCHED_RTPRIO_MAX 99 |
293 | |
294 | static int |
295 | sched_linux2native(int linux_policy, struct linux_sched_param *linux_params, |
296 | int *native_policy, struct sched_param *native_params) |
297 | { |
298 | |
299 | switch (linux_policy) { |
300 | case LINUX_SCHED_OTHER: |
301 | if (native_policy != NULL) { |
302 | *native_policy = SCHED_OTHER; |
303 | } |
304 | break; |
305 | |
306 | case LINUX_SCHED_FIFO: |
307 | if (native_policy != NULL) { |
308 | *native_policy = SCHED_FIFO; |
309 | } |
310 | break; |
311 | |
312 | case LINUX_SCHED_RR: |
313 | if (native_policy != NULL) { |
314 | *native_policy = SCHED_RR; |
315 | } |
316 | break; |
317 | |
318 | default: |
319 | return EINVAL; |
320 | } |
321 | |
322 | if (linux_params != NULL) { |
323 | int prio = linux_params->sched_priority; |
324 | |
325 | KASSERT(native_params != NULL); |
326 | |
327 | if (linux_policy == LINUX_SCHED_OTHER) { |
328 | if (prio != 0) { |
329 | return EINVAL; |
330 | } |
331 | native_params->sched_priority = PRI_NONE; /* XXX */ |
332 | } else { |
333 | if (prio < LINUX_SCHED_RTPRIO_MIN || |
334 | prio > LINUX_SCHED_RTPRIO_MAX) { |
335 | return EINVAL; |
336 | } |
337 | native_params->sched_priority = |
338 | (prio - LINUX_SCHED_RTPRIO_MIN) |
339 | * (SCHED_PRI_MAX - SCHED_PRI_MIN) |
340 | / (LINUX_SCHED_RTPRIO_MAX - LINUX_SCHED_RTPRIO_MIN) |
341 | + SCHED_PRI_MIN; |
342 | } |
343 | } |
344 | |
345 | return 0; |
346 | } |
347 | |
348 | static int |
349 | sched_native2linux(int native_policy, struct sched_param *native_params, |
350 | int *linux_policy, struct linux_sched_param *linux_params) |
351 | { |
352 | |
353 | switch (native_policy) { |
354 | case SCHED_OTHER: |
355 | if (linux_policy != NULL) { |
356 | *linux_policy = LINUX_SCHED_OTHER; |
357 | } |
358 | break; |
359 | |
360 | case SCHED_FIFO: |
361 | if (linux_policy != NULL) { |
362 | *linux_policy = LINUX_SCHED_FIFO; |
363 | } |
364 | break; |
365 | |
366 | case SCHED_RR: |
367 | if (linux_policy != NULL) { |
368 | *linux_policy = LINUX_SCHED_RR; |
369 | } |
370 | break; |
371 | |
372 | default: |
373 | panic("%s: unknown policy %d\n" , __func__, native_policy); |
374 | } |
375 | |
376 | if (native_params != NULL) { |
377 | int prio = native_params->sched_priority; |
378 | |
379 | KASSERT(prio >= SCHED_PRI_MIN); |
380 | KASSERT(prio <= SCHED_PRI_MAX); |
381 | KASSERT(linux_params != NULL); |
382 | |
383 | DPRINTF(("%s: native: policy %d, priority %d\n" , |
384 | __func__, native_policy, prio)); |
385 | |
386 | if (native_policy == SCHED_OTHER) { |
387 | linux_params->sched_priority = 0; |
388 | } else { |
389 | linux_params->sched_priority = |
390 | (prio - SCHED_PRI_MIN) |
391 | * (LINUX_SCHED_RTPRIO_MAX - LINUX_SCHED_RTPRIO_MIN) |
392 | / (SCHED_PRI_MAX - SCHED_PRI_MIN) |
393 | + LINUX_SCHED_RTPRIO_MIN; |
394 | } |
395 | DPRINTF(("%s: linux: policy %d, priority %d\n" , |
396 | __func__, -1, linux_params->sched_priority)); |
397 | } |
398 | |
399 | return 0; |
400 | } |
401 | |
402 | int |
403 | linux_sys_sched_setparam(struct lwp *l, const struct linux_sys_sched_setparam_args *uap, register_t *retval) |
404 | { |
405 | /* { |
406 | syscallarg(linux_pid_t) pid; |
407 | syscallarg(const struct linux_sched_param *) sp; |
408 | } */ |
409 | int error, policy; |
410 | struct linux_sched_param lp; |
411 | struct sched_param sp; |
412 | |
413 | if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL) { |
414 | error = EINVAL; |
415 | goto out; |
416 | } |
417 | |
418 | error = copyin(SCARG(uap, sp), &lp, sizeof(lp)); |
419 | if (error) |
420 | goto out; |
421 | |
422 | /* We need the current policy in Linux terms. */ |
423 | error = do_sched_getparam(SCARG(uap, pid), 0, &policy, NULL); |
424 | if (error) |
425 | goto out; |
426 | error = sched_native2linux(policy, NULL, &policy, NULL); |
427 | if (error) |
428 | goto out; |
429 | |
430 | error = sched_linux2native(policy, &lp, &policy, &sp); |
431 | if (error) |
432 | goto out; |
433 | |
434 | error = do_sched_setparam(SCARG(uap, pid), 0, policy, &sp); |
435 | if (error) |
436 | goto out; |
437 | |
438 | out: |
439 | return error; |
440 | } |
441 | |
442 | int |
443 | linux_sys_sched_getparam(struct lwp *l, const struct linux_sys_sched_getparam_args *uap, register_t *retval) |
444 | { |
445 | /* { |
446 | syscallarg(linux_pid_t) pid; |
447 | syscallarg(struct linux_sched_param *) sp; |
448 | } */ |
449 | struct linux_sched_param lp; |
450 | struct sched_param sp; |
451 | int error, policy; |
452 | |
453 | if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL) { |
454 | error = EINVAL; |
455 | goto out; |
456 | } |
457 | |
458 | error = do_sched_getparam(SCARG(uap, pid), 0, &policy, &sp); |
459 | if (error) |
460 | goto out; |
461 | DPRINTF(("%s: native: policy %d, priority %d\n" , |
462 | __func__, policy, sp.sched_priority)); |
463 | |
464 | error = sched_native2linux(policy, &sp, NULL, &lp); |
465 | if (error) |
466 | goto out; |
467 | DPRINTF(("%s: linux: policy %d, priority %d\n" , |
468 | __func__, policy, lp.sched_priority)); |
469 | |
470 | error = copyout(&lp, SCARG(uap, sp), sizeof(lp)); |
471 | if (error) |
472 | goto out; |
473 | |
474 | out: |
475 | return error; |
476 | } |
477 | |
478 | int |
479 | linux_sys_sched_setscheduler(struct lwp *l, const struct linux_sys_sched_setscheduler_args *uap, register_t *retval) |
480 | { |
481 | /* { |
482 | syscallarg(linux_pid_t) pid; |
483 | syscallarg(int) policy; |
484 | syscallarg(cont struct linux_sched_param *) sp; |
485 | } */ |
486 | int error, policy; |
487 | struct linux_sched_param lp; |
488 | struct sched_param sp; |
489 | |
490 | if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL) { |
491 | error = EINVAL; |
492 | goto out; |
493 | } |
494 | |
495 | error = copyin(SCARG(uap, sp), &lp, sizeof(lp)); |
496 | if (error) |
497 | goto out; |
498 | DPRINTF(("%s: linux: policy %d, priority %d\n" , |
499 | __func__, SCARG(uap, policy), lp.sched_priority)); |
500 | |
501 | error = sched_linux2native(SCARG(uap, policy), &lp, &policy, &sp); |
502 | if (error) |
503 | goto out; |
504 | DPRINTF(("%s: native: policy %d, priority %d\n" , |
505 | __func__, policy, sp.sched_priority)); |
506 | |
507 | error = do_sched_setparam(SCARG(uap, pid), 0, policy, &sp); |
508 | if (error) |
509 | goto out; |
510 | |
511 | out: |
512 | return error; |
513 | } |
514 | |
515 | int |
516 | linux_sys_sched_getscheduler(struct lwp *l, const struct linux_sys_sched_getscheduler_args *uap, register_t *retval) |
517 | { |
518 | /* { |
519 | syscallarg(linux_pid_t) pid; |
520 | } */ |
521 | int error, policy; |
522 | |
523 | *retval = -1; |
524 | |
525 | error = do_sched_getparam(SCARG(uap, pid), 0, &policy, NULL); |
526 | if (error) |
527 | goto out; |
528 | |
529 | error = sched_native2linux(policy, NULL, &policy, NULL); |
530 | if (error) |
531 | goto out; |
532 | |
533 | *retval = policy; |
534 | |
535 | out: |
536 | return error; |
537 | } |
538 | |
539 | int |
540 | linux_sys_sched_yield(struct lwp *l, const void *v, register_t *retval) |
541 | { |
542 | |
543 | yield(); |
544 | return 0; |
545 | } |
546 | |
547 | int |
548 | linux_sys_sched_get_priority_max(struct lwp *l, const struct linux_sys_sched_get_priority_max_args *uap, register_t *retval) |
549 | { |
550 | /* { |
551 | syscallarg(int) policy; |
552 | } */ |
553 | |
554 | switch (SCARG(uap, policy)) { |
555 | case LINUX_SCHED_OTHER: |
556 | *retval = 0; |
557 | break; |
558 | case LINUX_SCHED_FIFO: |
559 | case LINUX_SCHED_RR: |
560 | *retval = LINUX_SCHED_RTPRIO_MAX; |
561 | break; |
562 | default: |
563 | return EINVAL; |
564 | } |
565 | |
566 | return 0; |
567 | } |
568 | |
569 | int |
570 | linux_sys_sched_get_priority_min(struct lwp *l, const struct linux_sys_sched_get_priority_min_args *uap, register_t *retval) |
571 | { |
572 | /* { |
573 | syscallarg(int) policy; |
574 | } */ |
575 | |
576 | switch (SCARG(uap, policy)) { |
577 | case LINUX_SCHED_OTHER: |
578 | *retval = 0; |
579 | break; |
580 | case LINUX_SCHED_FIFO: |
581 | case LINUX_SCHED_RR: |
582 | *retval = LINUX_SCHED_RTPRIO_MIN; |
583 | break; |
584 | default: |
585 | return EINVAL; |
586 | } |
587 | |
588 | return 0; |
589 | } |
590 | |
591 | int |
592 | linux_sys_exit(struct lwp *l, const struct linux_sys_exit_args *uap, register_t *retval) |
593 | { |
594 | |
595 | lwp_exit(l); |
596 | return 0; |
597 | } |
598 | |
599 | #ifndef __m68k__ |
600 | /* Present on everything but m68k */ |
601 | int |
602 | linux_sys_exit_group(struct lwp *l, const struct linux_sys_exit_group_args *uap, register_t *retval) |
603 | { |
604 | |
605 | return sys_exit(l, (const void *)uap, retval); |
606 | } |
607 | #endif /* !__m68k__ */ |
608 | |
609 | int |
610 | linux_sys_set_tid_address(struct lwp *l, const struct linux_sys_set_tid_address_args *uap, register_t *retval) |
611 | { |
612 | /* { |
613 | syscallarg(int *) tidptr; |
614 | } */ |
615 | struct linux_emuldata *led; |
616 | |
617 | led = (struct linux_emuldata *)l->l_emuldata; |
618 | led->led_clear_tid = SCARG(uap, tid); |
619 | *retval = l->l_lid; |
620 | |
621 | return 0; |
622 | } |
623 | |
624 | /* ARGUSED1 */ |
625 | int |
626 | linux_sys_gettid(struct lwp *l, const void *v, register_t *retval) |
627 | { |
628 | |
629 | *retval = l->l_lid; |
630 | return 0; |
631 | } |
632 | |
633 | /* |
634 | * The affinity syscalls assume that the layout of our cpu kcpuset is |
635 | * the same as linux's: a linear bitmask. |
636 | */ |
637 | int |
638 | linux_sys_sched_getaffinity(struct lwp *l, const struct linux_sys_sched_getaffinity_args *uap, register_t *retval) |
639 | { |
640 | /* { |
641 | syscallarg(linux_pid_t) pid; |
642 | syscallarg(unsigned int) len; |
643 | syscallarg(unsigned long *) mask; |
644 | } */ |
645 | struct lwp *t; |
646 | kcpuset_t *kcset; |
647 | size_t size; |
648 | cpuid_t i; |
649 | int error; |
650 | |
651 | size = LINUX_CPU_MASK_SIZE; |
652 | if (SCARG(uap, len) < size) |
653 | return EINVAL; |
654 | |
655 | /* Lock the LWP */ |
656 | t = lwp_find2(SCARG(uap, pid), l->l_lid); |
657 | if (t == NULL) |
658 | return ESRCH; |
659 | |
660 | /* Check the permission */ |
661 | if (kauth_authorize_process(l->l_cred, |
662 | KAUTH_PROCESS_SCHEDULER_GETAFFINITY, t->l_proc, NULL, NULL, NULL)) { |
663 | mutex_exit(t->l_proc->p_lock); |
664 | return EPERM; |
665 | } |
666 | |
667 | kcpuset_create(&kcset, true); |
668 | lwp_lock(t); |
669 | if (t->l_affinity != NULL) |
670 | kcpuset_copy(kcset, t->l_affinity); |
671 | else { |
672 | /* |
673 | * All available CPUs should be masked when affinity has not |
674 | * been set. |
675 | */ |
676 | kcpuset_zero(kcset); |
677 | for (i = 0; i < ncpu; i++) |
678 | kcpuset_set(kcset, i); |
679 | } |
680 | lwp_unlock(t); |
681 | mutex_exit(t->l_proc->p_lock); |
682 | error = kcpuset_copyout(kcset, (cpuset_t *)SCARG(uap, mask), size); |
683 | kcpuset_unuse(kcset, NULL); |
684 | *retval = size; |
685 | return error; |
686 | } |
687 | |
688 | int |
689 | linux_sys_sched_setaffinity(struct lwp *l, const struct linux_sys_sched_setaffinity_args *uap, register_t *retval) |
690 | { |
691 | /* { |
692 | syscallarg(linux_pid_t) pid; |
693 | syscallarg(unsigned int) len; |
694 | syscallarg(unsigned long *) mask; |
695 | } */ |
696 | struct sys__sched_setaffinity_args ssa; |
697 | size_t size; |
698 | |
699 | size = LINUX_CPU_MASK_SIZE; |
700 | if (SCARG(uap, len) < size) |
701 | return EINVAL; |
702 | |
703 | SCARG(&ssa, pid) = SCARG(uap, pid); |
704 | SCARG(&ssa, lid) = l->l_lid; |
705 | SCARG(&ssa, size) = size; |
706 | SCARG(&ssa, cpuset) = (cpuset_t *)SCARG(uap, mask); |
707 | |
708 | return sys__sched_setaffinity(l, &ssa, retval); |
709 | } |
710 | |