Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/kern_exec.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/kern_exec.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.355.2.4 retrieving revision 1.356 diff -u -p -r1.355.2.4 -r1.356 --- src/sys/kern/kern_exec.c 2017/12/03 11:38:44 1.355.2.4 +++ src/sys/kern/kern_exec.c 2012/10/13 15:35:55 1.356 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_exec.c,v 1.355.2.4 2017/12/03 11:38:44 jdolecek Exp $ */ +/* $NetBSD: kern_exec.c,v 1.356 2012/10/13 15:35:55 christos Exp $ */ /*- * Copyright (c) 2008 The NetBSD Foundation, Inc. @@ -59,10 +59,9 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.355.2.4 2017/12/03 11:38:44 jdolecek Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.356 2012/10/13 15:35:55 christos Exp $"); #include "opt_exec.h" -#include "opt_execfmt.h" #include "opt_ktrace.h" #include "opt_modular.h" #include "opt_syscall_debug.h" @@ -75,11 +74,11 @@ __KERNEL_RCSID(0, "$NetBSD: kern_exec.c, #include #include #include +#include #include #include #include #include -#include #include #include #include @@ -105,6 +104,7 @@ __KERNEL_RCSID(0, "$NetBSD: kern_exec.c, #include #include #include +#include #include @@ -112,51 +112,32 @@ __KERNEL_RCSID(0, "$NetBSD: kern_exec.c, #include -#ifndef MD_TOPDOWN_INIT -#ifdef __USE_TOPDOWN_VM -#define MD_TOPDOWN_INIT(epp) (epp)->ep_flags |= EXEC_TOPDOWN_VM -#else -#define MD_TOPDOWN_INIT(epp) -#endif -#endif - -struct execve_data; - -extern int user_va0_disable; - -static size_t calcargs(struct execve_data * restrict, const size_t); -static size_t calcstack(struct execve_data * restrict, const size_t); -static int copyoutargs(struct execve_data * restrict, struct lwp *, - char * const); -static int copyoutpsstrs(struct execve_data * restrict, struct proc *); -static int copyinargs(struct execve_data * restrict, char * const *, - char * const *, execve_fetch_element_t, char **); -static int copyinargstrs(struct execve_data * restrict, char * const *, - execve_fetch_element_t, char **, size_t *, void (*)(const void *, size_t)); static int exec_sigcode_map(struct proc *, const struct emul *); -#if defined(DEBUG) && !defined(DEBUG_EXEC) -#define DEBUG_EXEC -#endif #ifdef DEBUG_EXEC #define DPRINTF(a) printf a #define COPYPRINTF(s, a, b) printf("%s, %d: copyout%s @%p %zu\n", __func__, \ __LINE__, (s), (a), (b)) -static void dump_vmcmds(const struct exec_package * const, size_t, int); -#define DUMPVMCMDS(p, x, e) do { dump_vmcmds((p), (x), (e)); } while (0) #else #define DPRINTF(a) #define COPYPRINTF(s, a, b) -#define DUMPVMCMDS(p, x, e) do {} while (0) #endif /* DEBUG_EXEC */ /* * DTrace SDT provider definitions */ -SDT_PROVIDER_DECLARE(proc); -SDT_PROBE_DEFINE1(proc, kernel, , exec, "char *"); -SDT_PROBE_DEFINE1(proc, kernel, , exec__success, "char *"); -SDT_PROBE_DEFINE1(proc, kernel, , exec__failure, "int"); +SDT_PROBE_DEFINE(proc,,,exec, + "char *", NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL); +SDT_PROBE_DEFINE(proc,,,exec_success, + "char *", NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL); +SDT_PROBE_DEFINE(proc,,,exec_failure, + "int", NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL); /* * Exec function switch: @@ -186,28 +167,16 @@ struct exec_entry { void syscall(void); #endif -/* NetBSD autoloadable syscalls */ -#ifdef MODULAR -#include -#endif - /* NetBSD emul struct */ struct emul emul_netbsd = { .e_name = "netbsd", -#ifdef EMUL_NATIVEROOT - .e_path = EMUL_NATIVEROOT, -#else .e_path = NULL, -#endif #ifndef __HAVE_MINIMAL_EMUL .e_flags = EMUL_HAS_SYS___syscall, .e_errno = NULL, .e_nosys = SYS_syscall, .e_nsysent = SYS_NSYSENT, #endif -#ifdef MODULAR - .e_sc_autoload = netbsd_syscalls_autoload, -#endif .e_sysent = sysent, #ifdef SYSCALL_DEBUG .e_syscallnames = syscallnames, @@ -260,7 +229,6 @@ struct execve_data { char *ed_resolvedpathbuf; size_t ed_ps_strings_sz; int ed_szsigcode; - size_t ed_argslen; long ed_argc; long ed_envc; }; @@ -280,14 +248,11 @@ struct spawn_exec_data { volatile uint32_t sed_refcnt; }; -static struct vm_map *exec_map; -static struct pool exec_pool; - static void * exec_pool_alloc(struct pool *pp, int flags) { - return (void *)uvm_km_alloc(exec_map, NCARGS, 0, + return (void *)uvm_km_alloc(kernel_map, NCARGS, 0, UVM_KMF_PAGEABLE | UVM_KMF_WAITVA); } @@ -295,9 +260,11 @@ static void exec_pool_free(struct pool *pp, void *addr) { - uvm_km_free(exec_map, (vaddr_t)addr, NCARGS, UVM_KMF_PAGEABLE); + uvm_km_free(kernel_map, (vaddr_t)addr, NCARGS, UVM_KMF_PAGEABLE); } +static struct pool exec_pool; + static struct pool_allocator exec_palloc = { .pa_alloc = exec_pool_alloc, .pa_free = exec_pool_free, @@ -338,25 +305,14 @@ check_exec(struct lwp *l, struct exec_pa struct nameidata nd; size_t resid; -#if 1 - // grab the absolute pathbuf here before namei() trashes it. - pathbuf_copystring(pb, epp->ep_resolvedname, PATH_MAX); -#endif NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb); /* first get the vnode */ if ((error = namei(&nd)) != 0) return error; epp->ep_vp = vp = nd.ni_vp; -#if 0 - /* - * XXX: can't use nd.ni_pnbuf, because although pb contains an - * absolute path, nd.ni_pnbuf does not if the path contains symlinks. - */ - /* normally this can't fail */ - error = copystr(nd.ni_pnbuf, epp->ep_resolvedname, PATH_MAX, NULL); - KASSERT(error == 0); -#endif + /* this cannot overflow as both are size PATH_MAX */ + strcpy(epp->ep_resolvedname, nd.ni_pnbuf); #ifdef DIAGNOSTIC /* paranoia (take this out once namei stuff stabilizes) */ @@ -414,10 +370,11 @@ check_exec(struct lwp *l, struct exec_pa /* * Set up default address space limits. Can be overridden * by individual exec packages. + * + * XXX probably should be all done in the exec packages. */ - epp->ep_vm_minaddr = exec_vm_minaddr(VM_MIN_ADDRESS); + epp->ep_vm_minaddr = VM_MIN_ADDRESS; epp->ep_vm_maxaddr = VM_MAXUSER_ADDRESS; - /* * set up the vmcmds for creation of the process * address space @@ -473,11 +430,6 @@ check_exec(struct lwp *l, struct exec_pa return 0; } - /* - * Reset all the fields that may have been modified by the - * loader. - */ - KASSERT(epp->ep_emul_arg == NULL); if (epp->ep_emul_root != NULL) { vrele(epp->ep_emul_root); epp->ep_emul_root = NULL; @@ -486,7 +438,6 @@ check_exec(struct lwp *l, struct exec_pa vrele(epp->ep_interp); epp->ep_interp = NULL; } - epp->ep_pax_flags = 0; /* make sure the first "interesting" error code is saved. */ if (error == ENOEXEC) @@ -552,7 +503,7 @@ sys_execve(struct lwp *l, const struct s SCARG(uap, envp), execve_fetch_element); } -int +int sys_fexecve(struct lwp *l, const struct sys_fexecve_args *uap, register_t *retval) { @@ -589,9 +540,15 @@ exec_autoload(void) "exec_coff", "exec_ecoff", "compat_aoutm68k", + "compat_freebsd", + "compat_ibcs2", + "compat_linux", + "compat_linux32", "compat_netbsd32", "compat_sunos", "compat_sunos32", + "compat_svr4", + "compat_svr4_32", "compat_ultrix", NULL }; @@ -600,95 +557,32 @@ exec_autoload(void) list = (nexecs == 0 ? native : compat); for (i = 0; list[i] != NULL; i++) { - if (module_autoload(list[i], MODULE_CLASS_EXEC) != 0) { - continue; + if (module_autoload(list[i], MODULE_CLASS_MISC) != 0) { + continue; } - yield(); + yield(); } #endif } static int -makepathbuf(struct lwp *l, const char *upath, struct pathbuf **pbp, - size_t *offs) -{ - char *path, *bp; - size_t len, tlen; - int error; - struct cwdinfo *cwdi; - - path = PNBUF_GET(); - error = copyinstr(upath, path, MAXPATHLEN, &len); - if (error) { - PNBUF_PUT(path); - DPRINTF(("%s: copyin path @%p %d\n", __func__, upath, error)); - return error; - } - - if (path[0] == '/') { - *offs = 0; - goto out; - } - - len++; - if (len + 1 >= MAXPATHLEN) - goto out; - bp = path + MAXPATHLEN - len; - memmove(bp, path, len); - *(--bp) = '/'; - - cwdi = l->l_proc->p_cwdi; - rw_enter(&cwdi->cwdi_lock, RW_READER); - error = getcwd_common(cwdi->cwdi_cdir, NULL, &bp, path, MAXPATHLEN / 2, - GETCWD_CHECK_ACCESS, l); - rw_exit(&cwdi->cwdi_lock); - - if (error) { - DPRINTF(("%s: getcwd_common path %s %d\n", __func__, path, - error)); - goto out; - } - tlen = path + MAXPATHLEN - bp; - - memmove(path, bp, tlen); - path[tlen] = '\0'; - *offs = tlen - len; -out: - *pbp = pathbuf_assimilate(path); - return 0; -} - -vaddr_t -exec_vm_minaddr(vaddr_t va_min) -{ - /* - * Increase va_min if we don't want NULL to be mappable by the - * process. - */ -#define VM_MIN_GUARD PAGE_SIZE - if (user_va0_disable && (va_min < VM_MIN_GUARD)) - return VM_MIN_GUARD; - return va_min; -} - -static int execve_loadvm(struct lwp *l, const char *path, char * const *args, char * const *envs, execve_fetch_element_t fetch_element, struct execve_data * restrict data) { - struct exec_package * const epp = &data->ed_pack; int error; struct proc *p; - char *dp; + char *dp, *sp; + size_t i, len; + struct exec_fakearg *tmpfap; u_int modgen; - size_t offs = 0; // XXX: GCC KASSERT(data != NULL); p = l->l_proc; - modgen = 0; + modgen = 0; - SDT_PROBE(proc, kernel, , exec, path, 0, 0, 0, 0); + SDT_PROBE(proc,,,exec, path, 0, 0, 0, 0); /* * Check if we have exceeded our number of processes limit. @@ -731,103 +625,204 @@ execve_loadvm(struct lwp *l, const char * functions call check_exec() recursively - for example, * see exec_script_makecmds(). */ - if ((error = makepathbuf(l, path, &data->ed_pathbuf, &offs)) != 0) + error = pathbuf_copyin(path, &data->ed_pathbuf); + if (error) { + DPRINTF(("%s: pathbuf_copyin path @%p %d\n", __func__, + path, error)); goto clrflg; + } data->ed_pathstring = pathbuf_stringcopy_get(data->ed_pathbuf); + data->ed_resolvedpathbuf = PNBUF_GET(); +#ifdef DIAGNOSTIC + strcpy(data->ed_resolvedpathbuf, "/wrong"); +#endif /* * initialize the fields of the exec package. */ - epp->ep_kname = data->ed_pathstring + offs; - epp->ep_resolvedname = data->ed_resolvedpathbuf; - epp->ep_hdr = kmem_alloc(exec_maxhdrsz, KM_SLEEP); - epp->ep_hdrlen = exec_maxhdrsz; - epp->ep_hdrvalid = 0; - epp->ep_emul_arg = NULL; - epp->ep_emul_arg_free = NULL; - memset(&epp->ep_vmcmds, 0, sizeof(epp->ep_vmcmds)); - epp->ep_vap = &data->ed_attr; - epp->ep_flags = (p->p_flag & PK_32) ? EXEC_FROM32 : 0; - MD_TOPDOWN_INIT(epp); - epp->ep_emul_root = NULL; - epp->ep_interp = NULL; - epp->ep_esch = NULL; - epp->ep_pax_flags = 0; - memset(epp->ep_machine_arch, 0, sizeof(epp->ep_machine_arch)); + data->ed_pack.ep_name = path; + data->ed_pack.ep_kname = data->ed_pathstring; + data->ed_pack.ep_resolvedname = data->ed_resolvedpathbuf; + data->ed_pack.ep_hdr = kmem_alloc(exec_maxhdrsz, KM_SLEEP); + data->ed_pack.ep_hdrlen = exec_maxhdrsz; + data->ed_pack.ep_hdrvalid = 0; + data->ed_pack.ep_emul_arg = NULL; + data->ed_pack.ep_emul_arg_free = NULL; + data->ed_pack.ep_vmcmds.evs_cnt = 0; + data->ed_pack.ep_vmcmds.evs_used = 0; + data->ed_pack.ep_vap = &data->ed_attr; + data->ed_pack.ep_flags = 0; + data->ed_pack.ep_emul_root = NULL; + data->ed_pack.ep_interp = NULL; + data->ed_pack.ep_esch = NULL; + data->ed_pack.ep_pax_flags = 0; rw_enter(&exec_lock, RW_READER); /* see if we can run it. */ - if ((error = check_exec(l, epp, data->ed_pathbuf)) != 0) { - if (error != ENOENT && error != EACCES) { - DPRINTF(("%s: check exec failed for %s, error %d\n", - __func__, epp->ep_kname, error)); + if ((error = check_exec(l, &data->ed_pack, data->ed_pathbuf)) != 0) { + if (error != ENOENT) { + DPRINTF(("%s: check exec failed %d\n", + __func__, error)); } goto freehdr; } + /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ + /* allocate an argument buffer */ data->ed_argp = pool_get(&exec_pool, PR_WAITOK); KASSERT(data->ed_argp != NULL); dp = data->ed_argp; + data->ed_argc = 0; - if ((error = copyinargs(data, args, envs, fetch_element, &dp)) != 0) { + /* copy the fake args list, if there's one, freeing it as we go */ + if (data->ed_pack.ep_flags & EXEC_HASARGL) { + tmpfap = data->ed_pack.ep_fa; + while (tmpfap->fa_arg != NULL) { + const char *cp; + + cp = tmpfap->fa_arg; + while (*cp) + *dp++ = *cp++; + *dp++ = '\0'; + ktrexecarg(tmpfap->fa_arg, cp - tmpfap->fa_arg); + + kmem_free(tmpfap->fa_arg, tmpfap->fa_len); + tmpfap++; data->ed_argc++; + } + kmem_free(data->ed_pack.ep_fa, data->ed_pack.ep_fa_len); + data->ed_pack.ep_flags &= ~EXEC_HASARGL; + } + + /* Now get argv & environment */ + if (args == NULL) { + DPRINTF(("%s: null args\n", __func__)); + error = EINVAL; goto bad; } + /* 'i' will index the argp/envp element to be retrieved */ + i = 0; + if (data->ed_pack.ep_flags & EXEC_SKIPARG) + i++; - /* - * Calculate the new stack size. - */ + while (1) { + len = data->ed_argp + ARG_MAX - dp; + if ((error = (*fetch_element)(args, i, &sp)) != 0) { + DPRINTF(("%s: fetch_element args %d\n", + __func__, error)); + goto bad; + } + if (!sp) + break; + if ((error = copyinstr(sp, dp, len, &len)) != 0) { + DPRINTF(("%s: copyinstr args %d\n", __func__, error)); + if (error == ENAMETOOLONG) + error = E2BIG; + goto bad; + } + ktrexecarg(dp, len - 1); + dp += len; + i++; + data->ed_argc++; + } + + data->ed_envc = 0; + /* environment need not be there */ + if (envs != NULL) { + i = 0; + while (1) { + len = data->ed_argp + ARG_MAX - dp; + if ((error = (*fetch_element)(envs, i, &sp)) != 0) { + DPRINTF(("%s: fetch_element env %d\n", + __func__, error)); + goto bad; + } + if (!sp) + break; + if ((error = copyinstr(sp, dp, len, &len)) != 0) { + DPRINTF(("%s: copyinstr env %d\n", + __func__, error)); + if (error == ENAMETOOLONG) + error = E2BIG; + goto bad; + } + + ktrexecenv(dp, len - 1); + dp += len; + i++; + data->ed_envc++; + } + } + + dp = (char *) ALIGN(dp); + + data->ed_szsigcode = data->ed_pack.ep_esch->es_emul->e_esigcode - + data->ed_pack.ep_esch->es_emul->e_sigcode; #ifdef __MACHINE_STACK_GROWS_UP -/* - * copyargs() fills argc/argv/envp from the lower address even on - * __MACHINE_STACK_GROWS_UP machines. Reserve a few words just below the SP - * so that _rtld() use it. - */ +/* See big comment lower down */ #define RTLD_GAP 32 #else #define RTLD_GAP 0 #endif - const size_t argenvstrlen = (char *)ALIGN(dp) - data->ed_argp; + /* Now check if args & environ fit into new stack */ + if (data->ed_pack.ep_flags & EXEC_32) { + data->ed_ps_strings_sz = sizeof(struct ps_strings32); + len = ((data->ed_argc + data->ed_envc + 2 + + data->ed_pack.ep_esch->es_arglen) * + sizeof(int) + sizeof(int) + dp + RTLD_GAP + + data->ed_szsigcode + data->ed_ps_strings_sz + STACK_PTHREADSPACE) + - data->ed_argp; + } else { + data->ed_ps_strings_sz = sizeof(struct ps_strings); + len = ((data->ed_argc + data->ed_envc + 2 + + data->ed_pack.ep_esch->es_arglen) * + sizeof(char *) + sizeof(int) + dp + RTLD_GAP + + data->ed_szsigcode + data->ed_ps_strings_sz + STACK_PTHREADSPACE) + - data->ed_argp; + } - data->ed_argslen = calcargs(data, argenvstrlen); +#ifdef PAX_ASLR + if (pax_aslr_active(l)) + len += (cprng_fast32() % PAGE_SIZE); +#endif /* PAX_ASLR */ - const size_t len = calcstack(data, pax_aslr_stack_gap(epp) + RTLD_GAP); + /* make the stack "safely" aligned */ + len = STACK_LEN_ALIGN(len, STACK_ALIGNBYTES); - if (len > epp->ep_ssize) { + if (len > data->ed_pack.ep_ssize) { /* in effect, compare to initial limit */ DPRINTF(("%s: stack limit exceeded %zu\n", __func__, len)); - error = ENOMEM; goto bad; } /* adjust "active stack depth" for process VSZ */ - epp->ep_ssize = len; + data->ed_pack.ep_ssize = len; return 0; bad: /* free the vmspace-creation commands, and release their references */ - kill_vmcmds(&epp->ep_vmcmds); + kill_vmcmds(&data->ed_pack.ep_vmcmds); /* kill any opened file descriptor, if necessary */ - if (epp->ep_flags & EXEC_HASFD) { - epp->ep_flags &= ~EXEC_HASFD; - fd_close(epp->ep_fd); + if (data->ed_pack.ep_flags & EXEC_HASFD) { + data->ed_pack.ep_flags &= ~EXEC_HASFD; + fd_close(data->ed_pack.ep_fd); } /* close and put the exec'd file */ - vn_lock(epp->ep_vp, LK_EXCLUSIVE | LK_RETRY); - VOP_CLOSE(epp->ep_vp, FREAD, l->l_cred); - vput(epp->ep_vp); + vn_lock(data->ed_pack.ep_vp, LK_EXCLUSIVE | LK_RETRY); + VOP_CLOSE(data->ed_pack.ep_vp, FREAD, l->l_cred); + vput(data->ed_pack.ep_vp); pool_put(&exec_pool, data->ed_argp); freehdr: - kmem_free(epp->ep_hdr, epp->ep_hdrlen); - if (epp->ep_emul_root != NULL) - vrele(epp->ep_emul_root); - if (epp->ep_interp != NULL) - vrele(epp->ep_interp); + kmem_free(data->ed_pack.ep_hdr, data->ed_pack.ep_hdrlen); + if (data->ed_pack.ep_emul_root != NULL) + vrele(data->ed_pack.ep_emul_root); + if (data->ed_pack.ep_interp != NULL) + vrele(data->ed_pack.ep_interp); rw_exit(&exec_lock); @@ -844,264 +839,54 @@ execve_loadvm(struct lwp *l, const char goto retry; } - SDT_PROBE(proc, kernel, , exec__failure, error, 0, 0, 0, 0); - return error; -} - -static int -execve_dovmcmds(struct lwp *l, struct execve_data * restrict data) -{ - struct exec_package * const epp = &data->ed_pack; - struct proc *p = l->l_proc; - struct exec_vmcmd *base_vcp; - int error = 0; - size_t i; - - /* record proc's vnode, for use by procfs and others */ - if (p->p_textvp) - vrele(p->p_textvp); - vref(epp->ep_vp); - p->p_textvp = epp->ep_vp; - - /* create the new process's VM space by running the vmcmds */ - KASSERTMSG(epp->ep_vmcmds.evs_used != 0, "%s: no vmcmds", __func__); - -#ifdef TRACE_EXEC - DUMPVMCMDS(epp, 0, 0); -#endif - - base_vcp = NULL; - - for (i = 0; i < epp->ep_vmcmds.evs_used && !error; i++) { - struct exec_vmcmd *vcp; - - vcp = &epp->ep_vmcmds.evs_cmds[i]; - if (vcp->ev_flags & VMCMD_RELATIVE) { - KASSERTMSG(base_vcp != NULL, - "%s: relative vmcmd with no base", __func__); - KASSERTMSG((vcp->ev_flags & VMCMD_BASE) == 0, - "%s: illegal base & relative vmcmd", __func__); - vcp->ev_addr += base_vcp->ev_addr; - } - error = (*vcp->ev_proc)(l, vcp); - if (error) - DUMPVMCMDS(epp, i, error); - if (vcp->ev_flags & VMCMD_BASE) - base_vcp = vcp; - } - - /* free the vmspace-creation commands, and release their references */ - kill_vmcmds(&epp->ep_vmcmds); - - vn_lock(epp->ep_vp, LK_EXCLUSIVE | LK_RETRY); - VOP_CLOSE(epp->ep_vp, FREAD, l->l_cred); - vput(epp->ep_vp); - - /* if an error happened, deallocate and punt */ - if (error != 0) { - DPRINTF(("%s: vmcmd %zu failed: %d\n", __func__, i - 1, error)); - } + SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0); return error; } static void execve_free_data(struct execve_data *data) { - struct exec_package * const epp = &data->ed_pack; /* free the vmspace-creation commands, and release their references */ - kill_vmcmds(&epp->ep_vmcmds); + kill_vmcmds(&data->ed_pack.ep_vmcmds); /* kill any opened file descriptor, if necessary */ - if (epp->ep_flags & EXEC_HASFD) { - epp->ep_flags &= ~EXEC_HASFD; - fd_close(epp->ep_fd); + if (data->ed_pack.ep_flags & EXEC_HASFD) { + data->ed_pack.ep_flags &= ~EXEC_HASFD; + fd_close(data->ed_pack.ep_fd); } /* close and put the exec'd file */ - vn_lock(epp->ep_vp, LK_EXCLUSIVE | LK_RETRY); - VOP_CLOSE(epp->ep_vp, FREAD, curlwp->l_cred); - vput(epp->ep_vp); + vn_lock(data->ed_pack.ep_vp, LK_EXCLUSIVE | LK_RETRY); + VOP_CLOSE(data->ed_pack.ep_vp, FREAD, curlwp->l_cred); + vput(data->ed_pack.ep_vp); pool_put(&exec_pool, data->ed_argp); - kmem_free(epp->ep_hdr, epp->ep_hdrlen); - if (epp->ep_emul_root != NULL) - vrele(epp->ep_emul_root); - if (epp->ep_interp != NULL) - vrele(epp->ep_interp); + kmem_free(data->ed_pack.ep_hdr, data->ed_pack.ep_hdrlen); + if (data->ed_pack.ep_emul_root != NULL) + vrele(data->ed_pack.ep_emul_root); + if (data->ed_pack.ep_interp != NULL) + vrele(data->ed_pack.ep_interp); pathbuf_stringcopy_put(data->ed_pathbuf, data->ed_pathstring); pathbuf_destroy(data->ed_pathbuf); PNBUF_PUT(data->ed_resolvedpathbuf); } -static void -pathexec(struct proc *p, const char *resolvedname) -{ - KASSERT(resolvedname[0] == '/'); - - /* set command name & other accounting info */ - strlcpy(p->p_comm, strrchr(resolvedname, '/') + 1, sizeof(p->p_comm)); - - kmem_strfree(p->p_path); - p->p_path = kmem_strdupsize(resolvedname, NULL, KM_SLEEP); -} - -/* XXX elsewhere */ -static int -credexec(struct lwp *l, struct vattr *attr) -{ - struct proc *p = l->l_proc; - int error; - - /* - * Deal with set[ug]id. MNT_NOSUID has already been used to disable - * s[ug]id. It's OK to check for PSL_TRACED here as we have blocked - * out additional references on the process for the moment. - */ - if ((p->p_slflag & PSL_TRACED) == 0 && - - (((attr->va_mode & S_ISUID) != 0 && - kauth_cred_geteuid(l->l_cred) != attr->va_uid) || - - ((attr->va_mode & S_ISGID) != 0 && - kauth_cred_getegid(l->l_cred) != attr->va_gid))) { - /* - * Mark the process as SUGID before we do - * anything that might block. - */ - proc_crmod_enter(); - proc_crmod_leave(NULL, NULL, true); - - /* Make sure file descriptors 0..2 are in use. */ - if ((error = fd_checkstd()) != 0) { - DPRINTF(("%s: fdcheckstd failed %d\n", - __func__, error)); - return error; - } - - /* - * Copy the credential so other references don't see our - * changes. - */ - l->l_cred = kauth_cred_copy(l->l_cred); -#ifdef KTRACE - /* - * If the persistent trace flag isn't set, turn off. - */ - if (p->p_tracep) { - mutex_enter(&ktrace_lock); - if (!(p->p_traceflag & KTRFAC_PERSISTENT)) - ktrderef(p); - mutex_exit(&ktrace_lock); - } -#endif - if (attr->va_mode & S_ISUID) - kauth_cred_seteuid(l->l_cred, attr->va_uid); - if (attr->va_mode & S_ISGID) - kauth_cred_setegid(l->l_cred, attr->va_gid); - } else { - if (kauth_cred_geteuid(l->l_cred) == - kauth_cred_getuid(l->l_cred) && - kauth_cred_getegid(l->l_cred) == - kauth_cred_getgid(l->l_cred)) - p->p_flag &= ~PK_SUGID; - } - - /* - * Copy the credential so other references don't see our changes. - * Test to see if this is necessary first, since in the common case - * we won't need a private reference. - */ - if (kauth_cred_geteuid(l->l_cred) != kauth_cred_getsvuid(l->l_cred) || - kauth_cred_getegid(l->l_cred) != kauth_cred_getsvgid(l->l_cred)) { - l->l_cred = kauth_cred_copy(l->l_cred); - kauth_cred_setsvuid(l->l_cred, kauth_cred_geteuid(l->l_cred)); - kauth_cred_setsvgid(l->l_cred, kauth_cred_getegid(l->l_cred)); - } - - /* Update the master credentials. */ - if (l->l_cred != p->p_cred) { - kauth_cred_t ocred; - - kauth_cred_hold(l->l_cred); - mutex_enter(p->p_lock); - ocred = p->p_cred; - p->p_cred = l->l_cred; - mutex_exit(p->p_lock); - kauth_cred_free(ocred); - } - - return 0; -} - -static void -emulexec(struct lwp *l, struct exec_package *epp) -{ - struct proc *p = l->l_proc; - - /* The emulation root will usually have been found when we looked - * for the elf interpreter (or similar), if not look now. */ - if (epp->ep_esch->es_emul->e_path != NULL && - epp->ep_emul_root == NULL) - emul_find_root(l, epp); - - /* Any old emulation root got removed by fdcloseexec */ - rw_enter(&p->p_cwdi->cwdi_lock, RW_WRITER); - p->p_cwdi->cwdi_edir = epp->ep_emul_root; - rw_exit(&p->p_cwdi->cwdi_lock); - epp->ep_emul_root = NULL; - if (epp->ep_interp != NULL) - vrele(epp->ep_interp); - - /* - * Call emulation specific exec hook. This can setup per-process - * p->p_emuldata or do any other per-process stuff an emulation needs. - * - * If we are executing process of different emulation than the - * original forked process, call e_proc_exit() of the old emulation - * first, then e_proc_exec() of new emulation. If the emulation is - * same, the exec hook code should deallocate any old emulation - * resources held previously by this process. - */ - if (p->p_emul && p->p_emul->e_proc_exit - && p->p_emul != epp->ep_esch->es_emul) - (*p->p_emul->e_proc_exit)(p); - - /* - * This is now LWP 1. - */ - /* XXX elsewhere */ - mutex_enter(p->p_lock); - p->p_nlwpid = 1; - l->l_lid = 1; - mutex_exit(p->p_lock); - - /* - * Call exec hook. Emulation code may NOT store reference to anything - * from &pack. - */ - if (epp->ep_esch->es_emul->e_proc_exec) - (*epp->ep_esch->es_emul->e_proc_exec)(p, epp); - - /* update p_emul, the old value is no longer needed */ - p->p_emul = epp->ep_esch->es_emul; - - /* ...and the same for p_execsw */ - p->p_execsw = epp->ep_esch; - -#ifdef __HAVE_SYSCALL_INTERN - (*p->p_emul->e_syscall_intern)(p); -#endif - ktremul(); -} - static int execve_runproc(struct lwp *l, struct execve_data * restrict data, bool no_local_exec_lock, bool is_spawn) { - struct exec_package * const epp = &data->ed_pack; int error = 0; struct proc *p; + size_t i; + char *stack, *dp; + const char *commandname; + struct ps_strings32 arginfo32; + struct exec_vmcmd *base_vcp; + void *aip; + struct vmspace *vm; + ksiginfo_t ksi; + ksiginfoq_t kq; /* * In case of a posix_spawn operation, the child doing the exec @@ -1109,10 +894,20 @@ execve_runproc(struct lwp *l, struct exe * will do this instead. */ KASSERT(no_local_exec_lock || rw_lock_held(&exec_lock)); - KASSERT(!no_local_exec_lock || is_spawn); KASSERT(data != NULL); + if (data == NULL) + return (EINVAL); p = l->l_proc; + if (no_local_exec_lock) + KASSERT(is_spawn); + + base_vcp = NULL; + + if (data->ed_pack.ep_flags & EXEC_32) + aip = &arginfo32; + else + aip = &data->ed_arginfo; /* Get rid of other LWPs. */ if (p->p_nlwps > 1) { @@ -1129,48 +924,232 @@ execve_runproc(struct lwp *l, struct exe /* Remove POSIX timers */ timers_free(p, TIMERS_POSIX); - /* Set the PaX flags. */ - pax_set_flags(epp, p); - /* * Do whatever is necessary to prepare the address space * for remapping. Note that this might replace the current * vmspace with another! */ if (is_spawn) - uvmspace_spawn(l, epp->ep_vm_minaddr, - epp->ep_vm_maxaddr, - epp->ep_flags & EXEC_TOPDOWN_VM); + uvmspace_spawn(l, data->ed_pack.ep_vm_minaddr, + data->ed_pack.ep_vm_maxaddr); else - uvmspace_exec(l, epp->ep_vm_minaddr, - epp->ep_vm_maxaddr, - epp->ep_flags & EXEC_TOPDOWN_VM); + uvmspace_exec(l, data->ed_pack.ep_vm_minaddr, + data->ed_pack.ep_vm_maxaddr); - struct vmspace *vm; + /* record proc's vnode, for use by procfs and others */ + if (p->p_textvp) + vrele(p->p_textvp); + vref(data->ed_pack.ep_vp); + p->p_textvp = data->ed_pack.ep_vp; + + /* Now map address space */ vm = p->p_vmspace; - vm->vm_taddr = (void *)epp->ep_taddr; - vm->vm_tsize = btoc(epp->ep_tsize); - vm->vm_daddr = (void*)epp->ep_daddr; - vm->vm_dsize = btoc(epp->ep_dsize); - vm->vm_ssize = btoc(epp->ep_ssize); + vm->vm_taddr = (void *)data->ed_pack.ep_taddr; + vm->vm_tsize = btoc(data->ed_pack.ep_tsize); + vm->vm_daddr = (void*)data->ed_pack.ep_daddr; + vm->vm_dsize = btoc(data->ed_pack.ep_dsize); + vm->vm_ssize = btoc(data->ed_pack.ep_ssize); vm->vm_issize = 0; - vm->vm_maxsaddr = (void *)epp->ep_maxsaddr; - vm->vm_minsaddr = (void *)epp->ep_minsaddr; + vm->vm_maxsaddr = (void *)data->ed_pack.ep_maxsaddr; + vm->vm_minsaddr = (void *)data->ed_pack.ep_minsaddr; + +#ifdef PAX_ASLR + pax_aslr_init(l, vm); +#endif /* PAX_ASLR */ - pax_aslr_init_vm(l, vm, epp); + /* create the new process's VM space by running the vmcmds */ +#ifdef DIAGNOSTIC + if (data->ed_pack.ep_vmcmds.evs_used == 0) + panic("%s: no vmcmds", __func__); +#endif - /* Now map address space. */ - error = execve_dovmcmds(l, data); - if (error != 0) +#ifdef DEBUG_EXEC + { + size_t j; + struct exec_vmcmd *vp = &data->ed_pack.ep_vmcmds.evs_cmds[0]; + DPRINTF(("vmcmds %u\n", data->ed_pack.ep_vmcmds.evs_used)); + for (j = 0; j < data->ed_pack.ep_vmcmds.evs_used; j++) { + DPRINTF(("vmcmd[%zu] = vmcmd_map_%s %#" + PRIxVADDR"/%#"PRIxVSIZE" fd@%#" + PRIxVSIZE" prot=0%o flags=%d\n", j, + vp[j].ev_proc == vmcmd_map_pagedvn ? + "pagedvn" : + vp[j].ev_proc == vmcmd_map_readvn ? + "readvn" : + vp[j].ev_proc == vmcmd_map_zero ? + "zero" : "*unknown*", + vp[j].ev_addr, vp[j].ev_len, + vp[j].ev_offset, vp[j].ev_prot, + vp[j].ev_flags)); + } + } +#endif /* DEBUG_EXEC */ + + for (i = 0; i < data->ed_pack.ep_vmcmds.evs_used && !error; i++) { + struct exec_vmcmd *vcp; + + vcp = &data->ed_pack.ep_vmcmds.evs_cmds[i]; + if (vcp->ev_flags & VMCMD_RELATIVE) { +#ifdef DIAGNOSTIC + if (base_vcp == NULL) + panic("%s: relative vmcmd with no base", + __func__); + if (vcp->ev_flags & VMCMD_BASE) + panic("%s: illegal base & relative vmcmd", + __func__); +#endif + vcp->ev_addr += base_vcp->ev_addr; + } + error = (*vcp->ev_proc)(l, vcp); +#ifdef DEBUG_EXEC + if (error) { + size_t j; + struct exec_vmcmd *vp = + &data->ed_pack.ep_vmcmds.evs_cmds[0]; + DPRINTF(("vmcmds %zu/%u, error %d\n", i, + data->ed_pack.ep_vmcmds.evs_used, error)); + for (j = 0; j < data->ed_pack.ep_vmcmds.evs_used; j++) { + DPRINTF(("vmcmd[%zu] = vmcmd_map_%s %#" + PRIxVADDR"/%#"PRIxVSIZE" fd@%#" + PRIxVSIZE" prot=0%o flags=%d\n", j, + vp[j].ev_proc == vmcmd_map_pagedvn ? + "pagedvn" : + vp[j].ev_proc == vmcmd_map_readvn ? + "readvn" : + vp[j].ev_proc == vmcmd_map_zero ? + "zero" : "*unknown*", + vp[j].ev_addr, vp[j].ev_len, + vp[j].ev_offset, vp[j].ev_prot, + vp[j].ev_flags)); + if (j == i) + DPRINTF((" ^--- failed\n")); + } + } +#endif /* DEBUG_EXEC */ + if (vcp->ev_flags & VMCMD_BASE) + base_vcp = vcp; + } + + /* free the vmspace-creation commands, and release their references */ + kill_vmcmds(&data->ed_pack.ep_vmcmds); + + vn_lock(data->ed_pack.ep_vp, LK_EXCLUSIVE | LK_RETRY); + VOP_CLOSE(data->ed_pack.ep_vp, FREAD, l->l_cred); + vput(data->ed_pack.ep_vp); + + /* if an error happened, deallocate and punt */ + if (error) { + DPRINTF(("%s: vmcmd %zu failed: %d\n", __func__, i - 1, error)); goto exec_abort; + } - pathexec(p, epp->ep_resolvedname); + /* remember information about the process */ + data->ed_arginfo.ps_nargvstr = data->ed_argc; + data->ed_arginfo.ps_nenvstr = data->ed_envc; - char * const newstack = STACK_GROW(vm->vm_minsaddr, epp->ep_ssize); + /* set command name & other accounting info */ + commandname = strrchr(data->ed_pack.ep_resolvedname, '/'); + if (commandname != NULL) { + commandname++; + } else { + commandname = data->ed_pack.ep_resolvedname; + } + i = min(strlen(commandname), MAXCOMLEN); + (void)memcpy(p->p_comm, commandname, i); + p->p_comm[i] = '\0'; - error = copyoutargs(data, l, newstack); - if (error != 0) + dp = PNBUF_GET(); + /* + * If the path starts with /, we don't need to do any work. + * This handles the majority of the cases. + * In the future perhaps we could canonicalize it? + */ + if (data->ed_pathstring[0] == '/') + (void)strlcpy(data->ed_pack.ep_path = dp, data->ed_pathstring, + MAXPATHLEN); +#ifdef notyet + /* + * Although this works most of the time [since the entry was just + * entered in the cache] we don't use it because it theoretically + * can fail and it is not the cleanest interface, because there + * could be races. When the namei cache is re-written, this can + * be changed to use the appropriate function. + */ + else if (!(error = vnode_to_path(dp, MAXPATHLEN, p->p_textvp, l, p))) + data->ed_pack.ep_path = dp; +#endif + else { +#ifdef notyet + printf("Cannot get path for pid %d [%s] (error %d)\n", + (int)p->p_pid, p->p_comm, error); +#endif + data->ed_pack.ep_path = NULL; + PNBUF_PUT(dp); + } + + stack = (char *)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr, + STACK_PTHREADSPACE + data->ed_ps_strings_sz + data->ed_szsigcode), + data->ed_pack.ep_ssize - (data->ed_ps_strings_sz + data->ed_szsigcode)); + +#ifdef __MACHINE_STACK_GROWS_UP + /* + * The copyargs call always copies into lower addresses + * first, moving towards higher addresses, starting with + * the stack pointer that we give. When the stack grows + * down, this puts argc/argv/envp very shallow on the + * stack, right at the first user stack pointer. + * When the stack grows up, the situation is reversed. + * + * Normally, this is no big deal. But the ld_elf.so _rtld() + * function expects to be called with a single pointer to + * a region that has a few words it can stash values into, + * followed by argc/argv/envp. When the stack grows down, + * it's easy to decrement the stack pointer a little bit to + * allocate the space for these few words and pass the new + * stack pointer to _rtld. When the stack grows up, however, + * a few words before argc is part of the signal trampoline, XXX + * so we have a problem. + * + * Instead of changing how _rtld works, we take the easy way + * out and steal 32 bytes before we call copyargs. + * This extra space was allowed for when 'pack.ep_ssize' was calculated. + */ + stack += RTLD_GAP; +#endif /* __MACHINE_STACK_GROWS_UP */ + + /* Now copy argc, args & environ to new stack */ + error = (*data->ed_pack.ep_esch->es_copyargs)(l, &data->ed_pack, + &data->ed_arginfo, &stack, data->ed_argp); + + if (data->ed_pack.ep_path) { + PNBUF_PUT(data->ed_pack.ep_path); + data->ed_pack.ep_path = NULL; + } + if (error) { + DPRINTF(("%s: copyargs failed %d\n", __func__, error)); goto exec_abort; + } + /* Move the stack back to original point */ + stack = (char *)STACK_GROW(vm->vm_minsaddr, data->ed_pack.ep_ssize); + + /* fill process ps_strings info */ + p->p_psstrp = (vaddr_t)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr, + STACK_PTHREADSPACE), data->ed_ps_strings_sz); + + if (data->ed_pack.ep_flags & EXEC_32) { + arginfo32.ps_argvstr = (vaddr_t)data->ed_arginfo.ps_argvstr; + arginfo32.ps_nargvstr = data->ed_arginfo.ps_nargvstr; + arginfo32.ps_envstr = (vaddr_t)data->ed_arginfo.ps_envstr; + arginfo32.ps_nenvstr = data->ed_arginfo.ps_nenvstr; + } + + /* copy out the process's ps_strings structure */ + if ((error = copyout(aip, (void *)p->p_psstrp, data->ed_ps_strings_sz)) + != 0) { + DPRINTF(("%s: ps_strings copyout %p->%p size %zu failed\n", + __func__, aip, (void *)p->p_psstrp, data->ed_ps_strings_sz)); + goto exec_abort; + } cwdexec(p); fd_closeexec(); /* handle close on exec */ @@ -1178,11 +1157,13 @@ execve_runproc(struct lwp *l, struct exe if (__predict_false(ktrace_on)) fd_ktrexecfd(); - execsigs(p); /* reset caught signals */ + execsigs(p); /* reset catched signals */ - mutex_enter(p->p_lock); l->l_ctxlink = NULL; /* reset ucontext link */ + + p->p_acflag &= ~AFORK; + mutex_enter(p->p_lock); p->p_flag |= PK_EXEC; mutex_exit(p->p_lock); @@ -1200,16 +1181,105 @@ execve_runproc(struct lwp *l, struct exe * exited and exec()/exit() are the only places it will be cleared. */ if ((p->p_lflag & PL_PPWAIT) != 0) { +#if 0 + lwp_t *lp; + + mutex_enter(proc_lock); + lp = p->p_vforklwp; + p->p_vforklwp = NULL; + + l->l_lwpctl = NULL; /* was on loan from blocked parent */ + p->p_lflag &= ~PL_PPWAIT; + + lp->l_pflag &= ~LP_VFORKWAIT; /* XXX */ + cv_broadcast(&lp->l_waitcv); + mutex_exit(proc_lock); +#else mutex_enter(proc_lock); l->l_lwpctl = NULL; /* was on loan from blocked parent */ p->p_lflag &= ~PL_PPWAIT; cv_broadcast(&p->p_pptr->p_waitcv); mutex_exit(proc_lock); +#endif + } + + /* + * Deal with set[ug]id. MNT_NOSUID has already been used to disable + * s[ug]id. It's OK to check for PSL_TRACED here as we have blocked + * out additional references on the process for the moment. + */ + if ((p->p_slflag & PSL_TRACED) == 0 && + + (((data->ed_attr.va_mode & S_ISUID) != 0 && + kauth_cred_geteuid(l->l_cred) != data->ed_attr.va_uid) || + + ((data->ed_attr.va_mode & S_ISGID) != 0 && + kauth_cred_getegid(l->l_cred) != data->ed_attr.va_gid))) { + /* + * Mark the process as SUGID before we do + * anything that might block. + */ + proc_crmod_enter(); + proc_crmod_leave(NULL, NULL, true); + + /* Make sure file descriptors 0..2 are in use. */ + if ((error = fd_checkstd()) != 0) { + DPRINTF(("%s: fdcheckstd failed %d\n", + __func__, error)); + goto exec_abort; + } + + /* + * Copy the credential so other references don't see our + * changes. + */ + l->l_cred = kauth_cred_copy(l->l_cred); +#ifdef KTRACE + /* + * If the persistent trace flag isn't set, turn off. + */ + if (p->p_tracep) { + mutex_enter(&ktrace_lock); + if (!(p->p_traceflag & KTRFAC_PERSISTENT)) + ktrderef(p); + mutex_exit(&ktrace_lock); + } +#endif + if (data->ed_attr.va_mode & S_ISUID) + kauth_cred_seteuid(l->l_cred, data->ed_attr.va_uid); + if (data->ed_attr.va_mode & S_ISGID) + kauth_cred_setegid(l->l_cred, data->ed_attr.va_gid); + } else { + if (kauth_cred_geteuid(l->l_cred) == + kauth_cred_getuid(l->l_cred) && + kauth_cred_getegid(l->l_cred) == + kauth_cred_getgid(l->l_cred)) + p->p_flag &= ~PK_SUGID; + } + + /* + * Copy the credential so other references don't see our changes. + * Test to see if this is necessary first, since in the common case + * we won't need a private reference. + */ + if (kauth_cred_geteuid(l->l_cred) != kauth_cred_getsvuid(l->l_cred) || + kauth_cred_getegid(l->l_cred) != kauth_cred_getsvgid(l->l_cred)) { + l->l_cred = kauth_cred_copy(l->l_cred); + kauth_cred_setsvuid(l->l_cred, kauth_cred_geteuid(l->l_cred)); + kauth_cred_setsvgid(l->l_cred, kauth_cred_getegid(l->l_cred)); } - error = credexec(l, &data->ed_attr); - if (error) - goto exec_abort; + /* Update the master credentials. */ + if (l->l_cred != p->p_cred) { + kauth_cred_t ocred; + + kauth_cred_hold(l->l_cred); + mutex_enter(p->p_lock); + ocred = p->p_cred; + p->p_cred = l->l_cred; + mutex_exit(p->p_lock); + kauth_cred_free(ocred); + } #if defined(__HAVE_RAS) /* @@ -1220,18 +1290,12 @@ execve_runproc(struct lwp *l, struct exe doexechooks(p); - /* - * Set initial SP at the top of the stack. - * - * Note that on machines where stack grows up (e.g. hppa), SP points to - * the end of arg/env strings. Userland guesses the address of argc - * via ps_strings::ps_argvstr. - */ - - /* Setup new registers and do misc. setup. */ - (*epp->ep_esch->es_emul->e_setregs)(l, epp, (vaddr_t)newstack); - if (epp->ep_esch->es_setregs) - (*epp->ep_esch->es_setregs)(l, epp, (vaddr_t)newstack); + /* setup new registers and do misc. setup. */ + (*data->ed_pack.ep_esch->es_emul->e_setregs)(l, &data->ed_pack, + (vaddr_t)stack); + if (data->ed_pack.ep_esch->es_setregs) + (*data->ed_pack.ep_esch->es_setregs)(l, &data->ed_pack, + (vaddr_t)stack); /* Provide a consistent LWP private setting */ (void)lwp_setprivate(l, NULL); @@ -1240,7 +1304,7 @@ execve_runproc(struct lwp *l, struct exe pcu_discard_all(l); /* map the process's signal trampoline code */ - if ((error = exec_sigcode_map(p, epp->ep_esch->es_emul)) != 0) { + if ((error = exec_sigcode_map(p, data->ed_pack.ep_esch->es_emul)) != 0) { DPRINTF(("%s: map sigcode failed %d\n", __func__, error)); goto exec_abort; } @@ -1250,11 +1314,63 @@ execve_runproc(struct lwp *l, struct exe /* notify others that we exec'd */ KNOTE(&p->p_klist, NOTE_EXEC); - kmem_free(epp->ep_hdr, epp->ep_hdrlen); + kmem_free(data->ed_pack.ep_hdr, data->ed_pack.ep_hdrlen); + + SDT_PROBE(proc,,,exec_success, data->ed_pack.ep_name, 0, 0, 0, 0); + + /* The emulation root will usually have been found when we looked + * for the elf interpreter (or similar), if not look now. */ + if (data->ed_pack.ep_esch->es_emul->e_path != NULL && + data->ed_pack.ep_emul_root == NULL) + emul_find_root(l, &data->ed_pack); + + /* Any old emulation root got removed by fdcloseexec */ + rw_enter(&p->p_cwdi->cwdi_lock, RW_WRITER); + p->p_cwdi->cwdi_edir = data->ed_pack.ep_emul_root; + rw_exit(&p->p_cwdi->cwdi_lock); + data->ed_pack.ep_emul_root = NULL; + if (data->ed_pack.ep_interp != NULL) + vrele(data->ed_pack.ep_interp); + + /* + * Call emulation specific exec hook. This can setup per-process + * p->p_emuldata or do any other per-process stuff an emulation needs. + * + * If we are executing process of different emulation than the + * original forked process, call e_proc_exit() of the old emulation + * first, then e_proc_exec() of new emulation. If the emulation is + * same, the exec hook code should deallocate any old emulation + * resources held previously by this process. + */ + if (p->p_emul && p->p_emul->e_proc_exit + && p->p_emul != data->ed_pack.ep_esch->es_emul) + (*p->p_emul->e_proc_exit)(p); + + /* + * This is now LWP 1. + */ + mutex_enter(p->p_lock); + p->p_nlwpid = 1; + l->l_lid = 1; + mutex_exit(p->p_lock); + + /* + * Call exec hook. Emulation code may NOT store reference to anything + * from &pack. + */ + if (data->ed_pack.ep_esch->es_emul->e_proc_exec) + (*data->ed_pack.ep_esch->es_emul->e_proc_exec)(p, &data->ed_pack); - SDT_PROBE(proc, kernel, , exec__success, epp->ep_kname, 0, 0, 0, 0); + /* update p_emul, the old value is no longer needed */ + p->p_emul = data->ed_pack.ep_esch->es_emul; + + /* ...and the same for p_execsw */ + p->p_execsw = data->ed_pack.ep_esch; - emulexec(l, epp); +#ifdef __HAVE_SYSCALL_INTERN + (*p->p_emul->e_syscall_intern)(p); +#endif + ktremul(); /* Allow new references from the debugger/procfs. */ rw_exit(&p->p_reflock); @@ -1264,21 +1380,16 @@ execve_runproc(struct lwp *l, struct exe mutex_enter(proc_lock); if ((p->p_slflag & (PSL_TRACED|PSL_SYSCALL)) == PSL_TRACED) { - ksiginfo_t ksi; - KSI_INIT_EMPTY(&ksi); ksi.ksi_signo = SIGTRAP; - ksi.ksi_code = TRAP_EXEC; ksi.ksi_lid = l->l_lid; kpsignal(p, &ksi, NULL); } if (p->p_sflag & PS_STOPEXEC) { - ksiginfoq_t kq; - KERNEL_UNLOCK_ALL(l, &l->l_biglocks); p->p_pptr->p_nstopchild++; - p->p_waited = 0; + p->p_pptr->p_waited = 0; mutex_enter(p->p_lock); ksiginfo_queue_init(&kq); sigclearall(p, &contsigmask, &kq); @@ -1300,13 +1411,11 @@ execve_runproc(struct lwp *l, struct exe pathbuf_stringcopy_put(data->ed_pathbuf, data->ed_pathstring); pathbuf_destroy(data->ed_pathbuf); PNBUF_PUT(data->ed_resolvedpathbuf); -#ifdef TRACE_EXEC DPRINTF(("%s finished\n", __func__)); -#endif - return EJUSTRETURN; + return (EJUSTRETURN); exec_abort: - SDT_PROBE(proc, kernel, , exec__failure, error, 0, 0, 0, 0); + SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0); rw_exit(&p->p_reflock); if (!no_local_exec_lock) rw_exit(&exec_lock); @@ -1323,18 +1432,18 @@ execve_runproc(struct lwp *l, struct exe uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); - exec_free_emul_arg(epp); + exec_free_emul_arg(&data->ed_pack); pool_put(&exec_pool, data->ed_argp); - kmem_free(epp->ep_hdr, epp->ep_hdrlen); - if (epp->ep_emul_root != NULL) - vrele(epp->ep_emul_root); - if (epp->ep_interp != NULL) - vrele(epp->ep_interp); + kmem_free(data->ed_pack.ep_hdr, data->ed_pack.ep_hdrlen); + if (data->ed_pack.ep_emul_root != NULL) + vrele(data->ed_pack.ep_emul_root); + if (data->ed_pack.ep_interp != NULL) + vrele(data->ed_pack.ep_interp); /* Acquire the sched-state mutex (exit1() will release it). */ if (!is_spawn) { mutex_enter(p->p_lock); - exit1(l, error, SIGABRT); + exit1(l, W_EXITCODE(error, SIGABRT)); } return error; @@ -1354,256 +1463,6 @@ execve1(struct lwp *l, const char *path, return error; } -static size_t -fromptrsz(const struct exec_package *epp) -{ - return (epp->ep_flags & EXEC_FROM32) ? sizeof(int) : sizeof(char *); -} - -static size_t -ptrsz(const struct exec_package *epp) -{ - return (epp->ep_flags & EXEC_32) ? sizeof(int) : sizeof(char *); -} - -static size_t -calcargs(struct execve_data * restrict data, const size_t argenvstrlen) -{ - struct exec_package * const epp = &data->ed_pack; - - const size_t nargenvptrs = - 1 + /* long argc */ - data->ed_argc + /* char *argv[] */ - 1 + /* \0 */ - data->ed_envc + /* char *env[] */ - 1; /* \0 */ - - return (nargenvptrs * ptrsz(epp)) /* pointers */ - + argenvstrlen /* strings */ - + epp->ep_esch->es_arglen; /* auxinfo */ -} - -static size_t -calcstack(struct execve_data * restrict data, const size_t gaplen) -{ - struct exec_package * const epp = &data->ed_pack; - - data->ed_szsigcode = epp->ep_esch->es_emul->e_esigcode - - epp->ep_esch->es_emul->e_sigcode; - - data->ed_ps_strings_sz = (epp->ep_flags & EXEC_32) ? - sizeof(struct ps_strings32) : sizeof(struct ps_strings); - - const size_t sigcode_psstr_sz = - data->ed_szsigcode + /* sigcode */ - data->ed_ps_strings_sz + /* ps_strings */ - STACK_PTHREADSPACE; /* pthread space */ - - const size_t stacklen = - data->ed_argslen + - gaplen + - sigcode_psstr_sz; - - /* make the stack "safely" aligned */ - return STACK_LEN_ALIGN(stacklen, STACK_ALIGNBYTES); -} - -static int -copyoutargs(struct execve_data * restrict data, struct lwp *l, - char * const newstack) -{ - struct exec_package * const epp = &data->ed_pack; - struct proc *p = l->l_proc; - int error; - - /* remember information about the process */ - data->ed_arginfo.ps_nargvstr = data->ed_argc; - data->ed_arginfo.ps_nenvstr = data->ed_envc; - - /* - * Allocate the stack address passed to the newly execve()'ed process. - * - * The new stack address will be set to the SP (stack pointer) register - * in setregs(). - */ - - char *newargs = STACK_ALLOC( - STACK_SHRINK(newstack, data->ed_argslen), data->ed_argslen); - - error = (*epp->ep_esch->es_copyargs)(l, epp, - &data->ed_arginfo, &newargs, data->ed_argp); - - if (error) { - DPRINTF(("%s: copyargs failed %d\n", __func__, error)); - return error; - } - - error = copyoutpsstrs(data, p); - if (error != 0) - return error; - - return 0; -} - -static int -copyoutpsstrs(struct execve_data * restrict data, struct proc *p) -{ - struct exec_package * const epp = &data->ed_pack; - struct ps_strings32 arginfo32; - void *aip; - int error; - - /* fill process ps_strings info */ - p->p_psstrp = (vaddr_t)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr, - STACK_PTHREADSPACE), data->ed_ps_strings_sz); - - if (epp->ep_flags & EXEC_32) { - aip = &arginfo32; - arginfo32.ps_argvstr = (vaddr_t)data->ed_arginfo.ps_argvstr; - arginfo32.ps_nargvstr = data->ed_arginfo.ps_nargvstr; - arginfo32.ps_envstr = (vaddr_t)data->ed_arginfo.ps_envstr; - arginfo32.ps_nenvstr = data->ed_arginfo.ps_nenvstr; - } else - aip = &data->ed_arginfo; - - /* copy out the process's ps_strings structure */ - if ((error = copyout(aip, (void *)p->p_psstrp, data->ed_ps_strings_sz)) - != 0) { - DPRINTF(("%s: ps_strings copyout %p->%p size %zu failed\n", - __func__, aip, (void *)p->p_psstrp, data->ed_ps_strings_sz)); - return error; - } - - return 0; -} - -static int -copyinargs(struct execve_data * restrict data, char * const *args, - char * const *envs, execve_fetch_element_t fetch_element, char **dpp) -{ - struct exec_package * const epp = &data->ed_pack; - char *dp; - size_t i; - int error; - - dp = *dpp; - - data->ed_argc = 0; - - /* copy the fake args list, if there's one, freeing it as we go */ - if (epp->ep_flags & EXEC_HASARGL) { - struct exec_fakearg *fa = epp->ep_fa; - - while (fa->fa_arg != NULL) { - const size_t maxlen = ARG_MAX - (dp - data->ed_argp); - size_t len; - - len = strlcpy(dp, fa->fa_arg, maxlen); - /* Count NUL into len. */ - if (len < maxlen) - len++; - else { - while (fa->fa_arg != NULL) { - kmem_free(fa->fa_arg, fa->fa_len); - fa++; - } - kmem_free(epp->ep_fa, epp->ep_fa_len); - epp->ep_flags &= ~EXEC_HASARGL; - return E2BIG; - } - ktrexecarg(fa->fa_arg, len - 1); - dp += len; - - kmem_free(fa->fa_arg, fa->fa_len); - fa++; - data->ed_argc++; - } - kmem_free(epp->ep_fa, epp->ep_fa_len); - epp->ep_flags &= ~EXEC_HASARGL; - } - - /* - * Read and count argument strings from user. - */ - - if (args == NULL) { - DPRINTF(("%s: null args\n", __func__)); - return EINVAL; - } - if (epp->ep_flags & EXEC_SKIPARG) - args = (const void *)((const char *)args + fromptrsz(epp)); - i = 0; - error = copyinargstrs(data, args, fetch_element, &dp, &i, ktr_execarg); - if (error != 0) { - DPRINTF(("%s: copyin arg %d\n", __func__, error)); - return error; - } - data->ed_argc += i; - - /* - * Read and count environment strings from user. - */ - - data->ed_envc = 0; - /* environment need not be there */ - if (envs == NULL) - goto done; - i = 0; - error = copyinargstrs(data, envs, fetch_element, &dp, &i, ktr_execenv); - if (error != 0) { - DPRINTF(("%s: copyin env %d\n", __func__, error)); - return error; - } - data->ed_envc += i; - -done: - *dpp = dp; - - return 0; -} - -static int -copyinargstrs(struct execve_data * restrict data, char * const *strs, - execve_fetch_element_t fetch_element, char **dpp, size_t *ip, - void (*ktr)(const void *, size_t)) -{ - char *dp, *sp; - size_t i; - int error; - - dp = *dpp; - - i = 0; - while (1) { - const size_t maxlen = ARG_MAX - (dp - data->ed_argp); - size_t len; - - if ((error = (*fetch_element)(strs, i, &sp)) != 0) { - return error; - } - if (!sp) - break; - if ((error = copyinstr(sp, dp, maxlen, &len)) != 0) { - if (error == ENAMETOOLONG) - error = E2BIG; - return error; - } - if (__predict_false(ktrace_on)) - (*ktr)(dp, len - 1); - dp += len; - i++; - } - - *dpp = dp; - *ip = i; - - return 0; -} - -/* - * Copy argv and env strings from kernel buffer (argp) to the new stack. - * Those strings are located just after auxinfo. - */ int copyargs(struct lwp *l, struct exec_package *pack, struct ps_strings *arginfo, char **stackp, void *argp) @@ -1618,24 +1477,14 @@ copyargs(struct lwp *l, struct exec_pack nullp = NULL; argc = arginfo->ps_nargvstr; envc = arginfo->ps_nenvstr; - - /* argc on stack is long */ - CTASSERT(sizeof(*cpp) == sizeof(argc)); - - dp = (char *)(cpp + - 1 + /* long argc */ - argc + /* char *argv[] */ - 1 + /* \0 */ - envc + /* char *env[] */ - 1) + /* \0 */ - pack->ep_esch->es_arglen; /* auxinfo */ - sp = argp; - if ((error = copyout(&argc, cpp++, sizeof(argc))) != 0) { COPYPRINTF("", cpp - 1, sizeof(argc)); return error; } + dp = (char *) (cpp + argc + envc + 2 + pack->ep_esch->es_arglen); + sp = argp; + /* XXX don't copy them out, remap them! */ arginfo->ps_argvstr = cpp; /* remember location of argv for later */ @@ -1769,6 +1618,30 @@ exec_remove(struct execsw *esp, int coun return 0; } +static int +sysctl_kern_usrstack(SYSCTLFN_ARGS) +{ + u_long ptr; + struct sysctlnode node = *rnode; + node.sysctl_data = &ptr; + ptr = l->l_proc->p_stackbase; + return sysctl_lookup(SYSCTLFN_CALL(&node)); +} + +static void +sysctl_kern_usrstack_setup(void) +{ + struct sysctllog *kern_usrstack_sysctllog; + + kern_usrstack_sysctllog = NULL; + sysctl_createv(&kern_usrstack_sysctllog, 0, NULL, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READONLY, + CTLTYPE_LONG, "usrstack", + SYSCTL_DESCR("User process stack base"), + sysctl_kern_usrstack, 0, NULL, 0, + CTL_KERN, KERN_USRSTACK, CTL_EOL); +} + /* * Initialize exec structures. If init_boot is true, also does necessary * one-time initialization (it's called from main() that way). @@ -1787,15 +1660,12 @@ exec_init(int init_boot) if (init_boot) { /* do one-time initializations */ - vaddr_t vmin = 0, vmax; - rw_init(&exec_lock); mutex_init(&sigobject_lock, MUTEX_DEFAULT, IPL_NONE); - exec_map = uvm_km_suballoc(kernel_map, &vmin, &vmax, - maxexec*NCARGS, VM_MAP_PAGEABLE, false, NULL); pool_init(&exec_pool, NCARGS, 0, 0, PR_NOALIGN|PR_NOTOUCH, "execargs", &exec_palloc, IPL_NONE); pool_sethardlimit(&exec_pool, maxexec, "should not happen", 0); + sysctl_kern_usrstack_setup(); } else { KASSERT(rw_write_held(&exec_lock)); } @@ -1897,7 +1767,7 @@ exec_sigcode_map(struct proc *p, const s printf("kernel mapping failed %d\n", error); (*uobj->pgops->pgo_detach)(uobj); mutex_exit(&sigobject_lock); - return error; + return (error); } memcpy((void *)va, e->e_sigcode, sz); #ifdef PMAP_NEED_PROCWR @@ -1911,7 +1781,7 @@ exec_sigcode_map(struct proc *p, const s /* Just a hint to uvm_map where to put it. */ va = e->e_vm_default_addr(p, (vaddr_t)p->p_vmspace->vm_daddr, - round_page(sz), p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN); + round_page(sz)); #ifdef __alpha__ /* @@ -1935,10 +1805,10 @@ exec_sigcode_map(struct proc *p, const s __func__, __LINE__, &p->p_vmspace->vm_map, round_page(sz), va, error)); (*uobj->pgops->pgo_detach)(uobj); - return error; + return (error); } p->p_sigctx.ps_sigcode = (void *)va; - return 0; + return (0); } /* @@ -1967,11 +1837,11 @@ spawn_exec_data_release(struct spawn_exe * A child lwp of a posix_spawn operation starts here and ends up in * cpu_spawn_return, dealing with all filedescriptor and scheduler * manipulations in between. - * The parent waits for the child, as it is not clear whether the child - * will be able to acquire its own exec_lock. If it can, the parent can + * The parent waits for the child, as it is not clear wether the child + * will be able to aquire its own exec_lock. If it can, the parent can * be released early and continue running in parallel. If not (or if the * magic debug flag is passed in the scheduler attribute struct), the - * child rides on the parent's exec lock until it is ready to return to + * child rides on the parent's exec lock untill it is ready to return to * to userland - and only then releases the parent. This method loses * concurrency, but improves error reporting. */ @@ -1981,7 +1851,6 @@ spawn_return(void *arg) struct spawn_exec_data *spawn_data = arg; struct lwp *l = curlwp; int error, newfd; - int ostat; size_t i; const struct posix_spawn_file_actions_entry *fae; pid_t ppid; @@ -2026,8 +1895,8 @@ spawn_return(void *arg) } error = fd_open(fae->fae_path, fae->fae_oflag, fae->fae_mode, &newfd); - if (error) - break; + if (error) + break; if (newfd != fae->fae_fildes) { error = dodup(l, newfd, fae->fae_fildes, 0, &retval); @@ -2062,18 +1931,7 @@ spawn_return(void *arg) * set state to SSTOP so that this proc can be found by pid. * see proc_enterprp, do_sched_setparam below */ - mutex_enter(proc_lock); - /* - * p_stat should be SACTIVE, so we need to adjust the - * parent's p_nstopchild here. For safety, just make - * we're on the good side of SDEAD before we adjust. - */ - ostat = l->l_proc->p_stat; - KASSERT(ostat < SSTOP); l->l_proc->p_stat = SSTOP; - l->l_proc->p_waited = 0; - l->l_proc->p_pptr->p_nstopchild++; - mutex_exit(proc_lock); /* Set process group */ if (spawn_data->sed_attrs->sa_flags & POSIX_SPAWN_SETPGROUP) { @@ -2086,7 +1944,7 @@ spawn_return(void *arg) error = proc_enterpgrp(spawn_data->sed_parent, mypid, pgrp, false); if (error) - goto report_error_stopped; + goto report_error; } /* Set scheduler policy */ @@ -2100,7 +1958,7 @@ spawn_return(void *arg) SCHED_NONE, &spawn_data->sed_attrs->sa_schedparam); } if (error) - goto report_error_stopped; + goto report_error; /* Reset user ID's */ if (spawn_data->sed_attrs->sa_flags & POSIX_SPAWN_RESETIDS) { @@ -2108,12 +1966,12 @@ spawn_return(void *arg) kauth_cred_getgid(l->l_cred), -1, ID_E_EQ_R | ID_E_EQ_S); if (error) - goto report_error_stopped; + goto report_error; error = do_setresuid(l, -1, kauth_cred_getuid(l->l_cred), -1, ID_E_EQ_R | ID_E_EQ_S); if (error) - goto report_error_stopped; + goto report_error; } /* Set signal masks/defaults */ @@ -2123,20 +1981,10 @@ spawn_return(void *arg) &spawn_data->sed_attrs->sa_sigmask, NULL); mutex_exit(l->l_proc->p_lock); if (error) - goto report_error_stopped; + goto report_error; } if (spawn_data->sed_attrs->sa_flags & POSIX_SPAWN_SETSIGDEF) { - /* - * The following sigaction call is using a sigaction - * version 0 trampoline which is in the compatibility - * code only. This is not a problem because for SIG_DFL - * and SIG_IGN, the trampolines are now ignored. If they - * were not, this would be a problem because we are - * holding the exec_lock, and the compat code needs - * to do the same in order to replace the trampoline - * code of the process. - */ for (i = 1; i <= NSIG; i++) { if (sigismember( &spawn_data->sed_attrs->sa_sigdefault, i)) @@ -2144,10 +1992,6 @@ spawn_return(void *arg) 0); } } - mutex_enter(proc_lock); - l->l_proc->p_stat = ostat; - l->l_proc->p_pptr->p_nstopchild--; - mutex_exit(proc_lock); } /* now do the real exec */ @@ -2168,20 +2012,15 @@ spawn_return(void *arg) /* release our refcount on the data */ spawn_exec_data_release(spawn_data); - /* and finally: leave to userland for the first time */ + /* and finaly: leave to userland for the first time */ cpu_spawn_return(l); /* NOTREACHED */ return; - report_error_stopped: - mutex_enter(proc_lock); - l->l_proc->p_stat = ostat; - l->l_proc->p_pptr->p_nstopchild--; - mutex_exit(proc_lock); report_error: - if (have_reflock) { - /* + if (have_reflock) { + /* * We have not passed through execve_runproc(), * which would have released the p_reflock and also * taken ownership of the sed_exec part of spawn_data, @@ -2213,7 +2052,7 @@ spawn_return(void *arg) * A NetBSD specific workaround is POSIX_SPAWN_RETURNERROR as * flag bit in the attrp argument to posix_spawn(2), see above. */ - exit1(l, 127, 0); + exit1(l, W_EXITCODE(127, 0)); } void @@ -2224,7 +2063,7 @@ posix_spawn_fa_free(struct posix_spawn_f struct posix_spawn_file_actions_entry *fae = &fa->fae[i]; if (fae->fae_action != FAE_OPEN) continue; - kmem_strfree(fae->fae_path); + kmem_free(fae->fae_path, strlen(fae->fae_path) + 1); } if (fa->len > 0) kmem_free(fa->fae, sizeof(*fa->fae) * fa->len); @@ -2233,7 +2072,7 @@ posix_spawn_fa_free(struct posix_spawn_f static int posix_spawn_fa_alloc(struct posix_spawn_file_actions **fap, - const struct posix_spawn_file_actions *ufa, rlim_t lim) + const struct posix_spawn_file_actions *ufa) { struct posix_spawn_file_actions *fa; struct posix_spawn_file_actions_entry *fae; @@ -2243,14 +2082,15 @@ posix_spawn_fa_alloc(struct posix_spawn_ fa = kmem_alloc(sizeof(*fa), KM_SLEEP); error = copyin(ufa, fa, sizeof(*fa)); - if (error || fa->len == 0) { - kmem_free(fa, sizeof(*fa)); - return error; /* 0 if not an error, and len == 0 */ + if (error) { + fa->fae = NULL; + fa->len = 0; + goto out; } - if (fa->len > lim) { + if (fa->len == 0) { kmem_free(fa, sizeof(*fa)); - return EINVAL; + return 0; } fa->size = fa->len; @@ -2395,7 +2235,7 @@ do_posix_spawn(struct lwp *l1, pid_t *pi (unsigned) ((char *)&p2->p_endcopy - (char *)&p2->p_startcopy)); p2->p_vmspace = proc0.p_vmspace; - TAILQ_INIT(&p2->p_sigpend.sp_info); + CIRCLEQ_INIT(&p2->p_sigpend.sp_info); LIST_INIT(&p2->p_lwps); LIST_INIT(&p2->p_sigwaiters); @@ -2496,7 +2336,7 @@ do_posix_spawn(struct lwp *l1, pid_t *pi /* create LWP */ lwp_create(l1, p2, uaddr, 0, NULL, 0, spawn_return, spawn_data, - &l2, l1->l_class, &l1->l_sigmask, &l1->l_sigstk); + &l2, l1->l_class); l2->l_ctxlink = NULL; /* reset ucontext link */ /* @@ -2581,14 +2421,14 @@ do_posix_spawn(struct lwp *l1, pid_t *pi return error; error_exit: - if (have_exec_lock) { + if (have_exec_lock) { execve_free_data(&spawn_data->sed_exec); rw_exit(&p1->p_reflock); - rw_exit(&exec_lock); + rw_exit(&exec_lock); } mutex_exit(&spawn_data->sed_mtx_child); spawn_exec_data_release(spawn_data); - + return error; } @@ -2610,8 +2450,6 @@ sys_posix_spawn(struct lwp *l1, const st struct posix_spawnattr *sa = NULL; pid_t pid; bool child_ok = false; - rlim_t max_fileactions; - proc_t *p = l1->l_proc; error = check_posix_spawn(l1); if (error) { @@ -2621,10 +2459,7 @@ sys_posix_spawn(struct lwp *l1, const st /* copy in file_actions struct */ if (SCARG(uap, file_actions) != NULL) { - max_fileactions = 2 * min(p->p_rlimit[RLIMIT_NOFILE].rlim_cur, - maxfiles); - error = posix_spawn_fa_alloc(&fa, SCARG(uap, file_actions), - max_fileactions); + error = posix_spawn_fa_alloc(&fa, SCARG(uap, file_actions)); if (error) goto error_exit; } @@ -2678,35 +2513,3 @@ exec_free_emul_arg(struct exec_package * KASSERT(epp->ep_emul_arg == NULL); } } - -#ifdef DEBUG_EXEC -static void -dump_vmcmds(const struct exec_package * const epp, size_t x, int error) -{ - struct exec_vmcmd *vp = &epp->ep_vmcmds.evs_cmds[0]; - size_t j; - - if (error == 0) - DPRINTF(("vmcmds %u\n", epp->ep_vmcmds.evs_used)); - else - DPRINTF(("vmcmds %zu/%u, error %d\n", x, - epp->ep_vmcmds.evs_used, error)); - - for (j = 0; j < epp->ep_vmcmds.evs_used; j++) { - DPRINTF(("vmcmd[%zu] = vmcmd_map_%s %#" - PRIxVADDR"/%#"PRIxVSIZE" fd@%#" - PRIxVSIZE" prot=0%o flags=%d\n", j, - vp[j].ev_proc == vmcmd_map_pagedvn ? - "pagedvn" : - vp[j].ev_proc == vmcmd_map_readvn ? - "readvn" : - vp[j].ev_proc == vmcmd_map_zero ? - "zero" : "*unknown*", - vp[j].ev_addr, vp[j].ev_len, - vp[j].ev_offset, vp[j].ev_prot, - vp[j].ev_flags)); - if (error != 0 && j == x) - DPRINTF((" ^--- failed\n")); - } -} -#endif