Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/uipc_sem.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/uipc_sem.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.10 retrieving revision 1.10.4.4 diff -u -p -r1.10 -r1.10.4.4 --- src/sys/kern/uipc_sem.c 2005/02/26 21:34:55 1.10 +++ src/sys/kern/uipc_sem.c 2007/09/03 14:41:18 1.10.4.4 @@ -1,11 +1,11 @@ -/* $NetBSD: uipc_sem.c,v 1.10 2005/02/26 21:34:55 perry Exp $ */ +/* $NetBSD: uipc_sem.c,v 1.10.4.4 2007/09/03 14:41:18 yamt Exp $ */ /*- - * Copyright (c) 2003 The NetBSD Foundation, Inc. + * Copyright (c) 2003, 2007 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation - * by Jason R. Thorpe of Wasabi Systems, Inc. + * by Jason R. Thorpe of Wasabi Systems, Inc, and by Andrew Doran. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -63,7 +63,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: uipc_sem.c,v 1.10 2005/02/26 21:34:55 perry Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uipc_sem.c,v 1.10.4.4 2007/09/03 14:41:18 yamt Exp $"); #include "opt_posix.h" @@ -73,11 +73,11 @@ __KERNEL_RCSID(0, "$NetBSD: uipc_sem.c,v #include #include #include -#include #include #include -#include +#include #include +#include #include @@ -89,8 +89,10 @@ __KERNEL_RCSID(0, "$NetBSD: uipc_sem.c,v #define SEM_MAX_NAMELEN 14 #define SEM_VALUE_MAX (~0U) +#define SEM_HASHTBL_SIZE 13 -#define SEM_TO_ID(x) ((intptr_t)(x)) +#define SEM_TO_ID(x) (((x)->ks_id)) +#define SEM_HASH(id) ((id) % SEM_HASHTBL_SIZE) MALLOC_DEFINE(M_SEM, "p1003_1b_sem", "p1003_1b semaphores"); @@ -101,14 +103,18 @@ MALLOC_DEFINE(M_SEM, "p1003_1b_sem", "p1 */ struct ksem { LIST_ENTRY(ksem) ks_entry; /* global list entry */ - struct simplelock ks_interlock; /* lock on this ksem */ - char *ks_name; /* if named, this is the name */ + LIST_ENTRY(ksem) ks_hash; /* hash list entry */ + kmutex_t ks_interlock; /* lock on this ksem */ + kcondvar_t ks_cv; /* condition variable */ unsigned int ks_ref; /* number of references */ + char *ks_name; /* if named, this is the name */ + size_t ks_namelen; /* length of name */ mode_t ks_mode; /* protection bits */ uid_t ks_uid; /* creator uid */ gid_t ks_gid; /* creator gid */ unsigned int ks_value; /* current value */ unsigned int ks_waiters; /* number of waiters */ + semid_t ks_id; /* unique identifier */ }; struct ksem_ref { @@ -117,59 +123,75 @@ struct ksem_ref { }; struct ksem_proc { - struct lock kp_lock; + krwlock_t kp_lock; LIST_HEAD(, ksem_ref) kp_ksems; }; +LIST_HEAD(ksem_list, ksem); + /* * ksem_slock protects ksem_head and nsems. Only named semaphores go * onto ksem_head. */ -static struct simplelock ksem_slock; -static LIST_HEAD(, ksem) ksem_head = LIST_HEAD_INITIALIZER(&ksem_head); +static kmutex_t ksem_mutex; +static struct ksem_list ksem_head = LIST_HEAD_INITIALIZER(&ksem_head); +static struct ksem_list ksem_hash[SEM_HASHTBL_SIZE]; static int nsems = 0; +/* + * ksem_counter is the last assigned semid_t. It needs to be COMPAT_NETBSD32 + * friendly, even though semid_t itself is defined as uintptr_t. + */ +static uint32_t ksem_counter = 1; + +static specificdata_key_t ksem_specificdata_key; + static void ksem_free(struct ksem *ks) { - LOCK_ASSERT(simple_lock_held(&ks->ks_interlock)); + KASSERT(mutex_owned(&ks->ks_interlock)); + /* * If the ksem is anonymous (or has been unlinked), then * this is the end if its life. */ if (ks->ks_name == NULL) { - simple_unlock(&ks->ks_interlock); - free(ks, M_SEM); + mutex_exit(&ks->ks_interlock); + mutex_destroy(&ks->ks_interlock); + cv_destroy(&ks->ks_cv); - simple_lock(&ksem_slock); + mutex_enter(&ksem_mutex); nsems--; - simple_unlock(&ksem_slock); + LIST_REMOVE(ks, ks_hash); + mutex_exit(&ksem_mutex); + + kmem_free(ks, sizeof(*ks)); return; } - simple_unlock(&ks->ks_interlock); + mutex_exit(&ks->ks_interlock); } -static __inline void +static inline void ksem_addref(struct ksem *ks) { - LOCK_ASSERT(simple_lock_held(&ks->ks_interlock)); + KASSERT(mutex_owned(&ks->ks_interlock)); ks->ks_ref++; - KASSERT(ks->ks_ref != 0); /* XXX KDASSERT */ + KASSERT(ks->ks_ref != 0); } -static __inline void +static inline void ksem_delref(struct ksem *ks) { - LOCK_ASSERT(simple_lock_held(&ks->ks_interlock)); - KASSERT(ks->ks_ref != 0); /* XXX KDASSERT */ + KASSERT(mutex_owned(&ks->ks_interlock)); + KASSERT(ks->ks_ref != 0); if (--ks->ks_ref == 0) { ksem_free(ks); return; } - simple_unlock(&ks->ks_interlock); + mutex_exit(&ks->ks_interlock); } static struct ksem_proc * @@ -177,31 +199,51 @@ ksem_proc_alloc(void) { struct ksem_proc *kp; - kp = malloc(sizeof(*kp), M_SEM, M_WAITOK); - lockinit(&kp->kp_lock, PWAIT, "ksproc", 0, 0); + kp = kmem_alloc(sizeof(*kp), KM_SLEEP); + rw_init(&kp->kp_lock); LIST_INIT(&kp->kp_ksems); return (kp); } static void +ksem_proc_dtor(void *arg) +{ + struct ksem_proc *kp = arg; + struct ksem_ref *ksr; + + rw_enter(&kp->kp_lock, RW_WRITER); + + while ((ksr = LIST_FIRST(&kp->kp_ksems)) != NULL) { + LIST_REMOVE(ksr, ksr_list); + mutex_enter(&ksr->ksr_ksem->ks_interlock); + ksem_delref(ksr->ksr_ksem); + kmem_free(ksr, sizeof(*ksr)); + } + + rw_exit(&kp->kp_lock); + rw_destroy(&kp->kp_lock); + kmem_free(kp, sizeof(*kp)); +} + +static void ksem_add_proc(struct proc *p, struct ksem *ks) { struct ksem_proc *kp; struct ksem_ref *ksr; - if (p->p_ksems == NULL) { + kp = proc_getspecific(p, ksem_specificdata_key); + if (kp == NULL) { kp = ksem_proc_alloc(); - p->p_ksems = kp; - } else - kp = p->p_ksems; + proc_setspecific(p, ksem_specificdata_key, kp); + } - ksr = malloc(sizeof(*ksr), M_SEM, M_WAITOK); + ksr = kmem_alloc(sizeof(*ksr), KM_SLEEP); ksr->ksr_ksem = ks; - lockmgr(&kp->kp_lock, LK_EXCLUSIVE, NULL); + rw_enter(&kp->kp_lock, RW_WRITER); LIST_INSERT_HEAD(&kp->kp_ksems, ksr, ksr_list); - lockmgr(&kp->kp_lock, LK_RELEASE, NULL); + rw_exit(&kp->kp_lock); } /* We MUST have a write lock on the ksem_proc list! */ @@ -210,7 +252,7 @@ ksem_drop_proc(struct ksem_proc *kp, str { struct ksem_ref *ksr; - LOCK_ASSERT(simple_lock_held(&ks->ks_interlock)); + KASSERT(mutex_owned(&ks->ks_interlock)); LIST_FOREACH(ksr, &kp->kp_ksems, ksr_list) { if (ksr->ksr_ksem == ks) { ksem_delref(ks); @@ -225,28 +267,42 @@ ksem_drop_proc(struct ksem_proc *kp, str } static int -ksem_perm(struct proc *p, struct ksem *ks) +ksem_perm(struct lwp *l, struct ksem *ks) { - struct ucred *uc; + kauth_cred_t uc; - LOCK_ASSERT(simple_lock_held(&ks->ks_interlock)); - uc = p->p_ucred; - if ((uc->cr_uid == ks->ks_uid && (ks->ks_mode & S_IWUSR) != 0) || - (uc->cr_gid == ks->ks_gid && (ks->ks_mode & S_IWGRP) != 0) || - (ks->ks_mode & S_IWOTH) != 0 || suser(uc, &p->p_acflag) == 0) + KASSERT(mutex_owned(&ks->ks_interlock)); + uc = l->l_cred; + if ((kauth_cred_geteuid(uc) == ks->ks_uid && (ks->ks_mode & S_IWUSR) != 0) || + (kauth_cred_getegid(uc) == ks->ks_gid && (ks->ks_mode & S_IWGRP) != 0) || + (ks->ks_mode & S_IWOTH) != 0 || + kauth_authorize_generic(uc, KAUTH_GENERIC_ISSUSER, NULL) == 0) return (0); return (EPERM); } static struct ksem * +ksem_lookup_byid(semid_t id) +{ + struct ksem *ks; + + KASSERT(mutex_owned(&ksem_mutex)); + LIST_FOREACH(ks, &ksem_hash[SEM_HASH(id)], ks_hash) { + if (ks->ks_id == id) + return ks; + } + return NULL; +} + +static struct ksem * ksem_lookup_byname(const char *name) { struct ksem *ks; - LOCK_ASSERT(simple_lock_held(&ksem_slock)); + KASSERT(mutex_owned(&ksem_mutex)); LIST_FOREACH(ks, &ksem_head, ks_entry) { if (strcmp(ks->ks_name, name) == 0) { - simple_lock(&ks->ks_interlock); + mutex_enter(&ks->ks_interlock); return (ks); } } @@ -254,29 +310,30 @@ ksem_lookup_byname(const char *name) } static int -ksem_create(struct proc *p, const char *name, struct ksem **ksret, +ksem_create(struct lwp *l, const char *name, struct ksem **ksret, mode_t mode, unsigned int value) { struct ksem *ret; - struct ucred *uc; + kauth_cred_t uc; size_t len; - uc = p->p_ucred; + uc = l->l_cred; if (value > SEM_VALUE_MAX) return (EINVAL); - ret = malloc(sizeof(*ret), M_SEM, M_WAITOK | M_ZERO); + ret = kmem_zalloc(sizeof(*ret), KM_SLEEP); if (name != NULL) { len = strlen(name); if (len > SEM_MAX_NAMELEN) { - free(ret, M_SEM); + kmem_free(ret, sizeof(*ret)); return (ENAMETOOLONG); } /* name must start with a '/' but not contain one. */ if (*name != '/' || len < 2 || strchr(name + 1, '/') != NULL) { - free(ret, M_SEM); + kmem_free(ret, sizeof(*ret)); return (EINVAL); } - ret->ks_name = malloc(len + 1, M_SEM, M_WAITOK); + ret->ks_namelen = len + 1; + ret->ks_name = kmem_alloc(ret->ks_namelen, KM_SLEEP); strlcpy(ret->ks_name, name, len + 1); } else ret->ks_name = NULL; @@ -284,20 +341,29 @@ ksem_create(struct proc *p, const char * ret->ks_value = value; ret->ks_ref = 1; ret->ks_waiters = 0; - ret->ks_uid = uc->cr_uid; - ret->ks_gid = uc->cr_gid; - simple_lock_init(&ret->ks_interlock); + ret->ks_uid = kauth_cred_geteuid(uc); + ret->ks_gid = kauth_cred_getegid(uc); + mutex_init(&ret->ks_interlock, MUTEX_DEFAULT, IPL_NONE); + cv_init(&ret->ks_cv, "psem"); - simple_lock(&ksem_slock); + mutex_enter(&ksem_mutex); if (nsems >= SEM_MAX) { - simple_unlock(&ksem_slock); + mutex_exit(&ksem_mutex); if (ret->ks_name != NULL) - free(ret->ks_name, M_SEM); - free(ret, M_SEM); + kmem_free(ret->ks_name, ret->ks_namelen); + kmem_free(ret, sizeof(*ret)); return (ENFILE); } nsems++; - simple_unlock(&ksem_slock); + while (ksem_lookup_byid(ksem_counter) != NULL) { + ksem_counter++; + /* 0 is a special value for libpthread */ + if (ksem_counter == 0) + ksem_counter++; + } + ret->ks_id = ksem_counter; + LIST_INSERT_HEAD(&ksem_hash[SEM_HASH(ret->ks_id)], ret, ks_hash); + mutex_exit(&ksem_mutex); *ksret = ret; return (0); @@ -310,18 +376,26 @@ sys__ksem_init(struct lwp *l, void *v, r unsigned int value; semid_t *idp; } */ *uap = v; + + return do_ksem_init(l, SCARG(uap, value), SCARG(uap, idp), copyout); +} + +int +do_ksem_init(struct lwp *l, unsigned int value, semid_t *idp, + copyout_t docopyout) +{ struct ksem *ks; semid_t id; int error; /* Note the mode does not matter for anonymous semaphores. */ - error = ksem_create(l->l_proc, NULL, &ks, 0, SCARG(uap, value)); + error = ksem_create(l, NULL, &ks, 0, value); if (error) return (error); id = SEM_TO_ID(ks); - error = copyout(&id, SCARG(uap, idp), sizeof(id)); + error = (*docopyout)(&id, idp, sizeof(id)); if (error) { - simple_lock(&ks->ks_interlock); + mutex_enter(&ks->ks_interlock); ksem_delref(ks); return (error); } @@ -341,26 +415,35 @@ sys__ksem_open(struct lwp *l, void *v, r unsigned int value; semid_t *idp; } */ *uap = v; + + return do_ksem_open(l, SCARG(uap, name), SCARG(uap, oflag), + SCARG(uap, mode), SCARG(uap, value), SCARG(uap, idp), copyout); +} + +int +do_ksem_open(struct lwp *l, const char *semname, int oflag, mode_t mode, + unsigned int value, semid_t *idp, copyout_t docopyout) +{ char name[SEM_MAX_NAMELEN + 1]; size_t done; int error; struct ksem *ksnew, *ks; semid_t id; - error = copyinstr(SCARG(uap, name), name, sizeof(name), &done); + error = copyinstr(semname, name, sizeof(name), &done); if (error) return (error); ksnew = NULL; - simple_lock(&ksem_slock); + mutex_enter(&ksem_mutex); ks = ksem_lookup_byname(name); /* Found one? */ if (ks != NULL) { /* Check for exclusive create. */ - if (SCARG(uap, oflag) & O_EXCL) { - simple_unlock(&ks->ks_interlock); - simple_unlock(&ksem_slock); + if (oflag & O_EXCL) { + mutex_exit(&ks->ks_interlock); + mutex_exit(&ksem_mutex); return (EEXIST); } found_one: @@ -368,19 +451,19 @@ sys__ksem_open(struct lwp *l, void *v, r * Verify permissions. If we can access it, add * this process's reference. */ - LOCK_ASSERT(simple_lock_held(&ks->ks_interlock)); - error = ksem_perm(l->l_proc, ks); + KASSERT(mutex_owned(&ks->ks_interlock)); + error = ksem_perm(l, ks); if (error == 0) ksem_addref(ks); - simple_unlock(&ks->ks_interlock); - simple_unlock(&ksem_slock); + mutex_exit(&ks->ks_interlock); + mutex_exit(&ksem_mutex); if (error) return (error); id = SEM_TO_ID(ks); - error = copyout(&id, SCARG(uap, idp), sizeof(id)); + error = (*docopyout)(&id, idp, sizeof(id)); if (error) { - simple_lock(&ks->ks_interlock); + mutex_enter(&ks->ks_interlock); ksem_delref(ks); return (error); } @@ -393,27 +476,26 @@ sys__ksem_open(struct lwp *l, void *v, r /* * didn't ask for creation? error. */ - if ((SCARG(uap, oflag) & O_CREAT) == 0) { - simple_unlock(&ksem_slock); + if ((oflag & O_CREAT) == 0) { + mutex_exit(&ksem_mutex); return (ENOENT); } /* * We may block during creation, so drop the lock. */ - simple_unlock(&ksem_slock); - error = ksem_create(l->l_proc, name, &ksnew, SCARG(uap, mode), - SCARG(uap, value)); + mutex_exit(&ksem_mutex); + error = ksem_create(l, name, &ksnew, mode, value); if (error != 0) return (error); id = SEM_TO_ID(ksnew); - error = copyout(&id, SCARG(uap, idp), sizeof(id)); + error = (*docopyout)(&id, idp, sizeof(id)); if (error) { - free(ksnew->ks_name, M_SEM); + kmem_free(ksnew->ks_name, ksnew->ks_namelen); ksnew->ks_name = NULL; - simple_lock(&ksnew->ks_interlock); + mutex_enter(&ksnew->ks_interlock); ksem_delref(ksnew); return (error); } @@ -422,16 +504,16 @@ sys__ksem_open(struct lwp *l, void *v, r * We need to make sure we haven't lost a race while * allocating during creation. */ - simple_lock(&ksem_slock); + mutex_enter(&ksem_mutex); if ((ks = ksem_lookup_byname(name)) != NULL) { - if (SCARG(uap, oflag) & O_EXCL) { - simple_unlock(&ks->ks_interlock); - simple_unlock(&ksem_slock); + if (oflag & O_EXCL) { + mutex_exit(&ks->ks_interlock); + mutex_exit(&ksem_mutex); - free(ksnew->ks_name, M_SEM); + kmem_free(ksnew->ks_name, ksnew->ks_namelen); ksnew->ks_name = NULL; - simple_lock(&ksnew->ks_interlock); + mutex_enter(&ksnew->ks_interlock); ksem_delref(ksnew); return (EEXIST); } @@ -439,7 +521,7 @@ sys__ksem_open(struct lwp *l, void *v, r } else { /* ksnew already has its initial reference. */ LIST_INSERT_HEAD(&ksem_head, ksnew, ks_entry); - simple_unlock(&ksem_slock); + mutex_exit(&ksem_mutex); ksem_add_proc(l->l_proc, ksnew); } @@ -453,8 +535,8 @@ ksem_lookup_proc(struct ksem_proc *kp, s struct ksem_ref *ksr; LIST_FOREACH(ksr, &kp->kp_ksems, ksr_list) { - if (id == (semid_t) ksr->ksr_ksem) { - simple_lock(&ksr->ksr_ksem->ks_interlock); + if (id == SEM_TO_ID(ksr->ksr_ksem)) { + mutex_enter(&ksr->ksr_ksem->ks_interlock); return (ksr->ksr_ksem); } } @@ -469,7 +551,7 @@ sys__ksem_unlink(struct lwp *l, void *v, const char *name; } */ *uap = v; char name[SEM_MAX_NAMELEN + 1], *cp; - size_t done; + size_t done, len; struct ksem *ks; int error; @@ -477,27 +559,28 @@ sys__ksem_unlink(struct lwp *l, void *v, if (error) return error; - simple_lock(&ksem_slock); + mutex_enter(&ksem_mutex); ks = ksem_lookup_byname(name); if (ks == NULL) { - simple_unlock(&ksem_slock); + mutex_exit(&ksem_mutex); return (ENOENT); } - LOCK_ASSERT(simple_lock_held(&ks->ks_interlock)); + KASSERT(mutex_owned(&ks->ks_interlock)); LIST_REMOVE(ks, ks_entry); cp = ks->ks_name; + len = ks->ks_namelen; ks->ks_name = NULL; - simple_unlock(&ksem_slock); + mutex_exit(&ksem_mutex); if (ks->ks_ref == 0) ksem_free(ks); else - simple_unlock(&ks->ks_interlock); + mutex_exit(&ks->ks_interlock); - free(cp, M_SEM); + kmem_free(cp, len); return (0); } @@ -512,27 +595,28 @@ sys__ksem_close(struct lwp *l, void *v, struct ksem_ref *ksr; struct ksem *ks; - if ((kp = l->l_proc->p_ksems) == NULL) + kp = proc_getspecific(l->l_proc, ksem_specificdata_key); + if (kp == NULL) return (EINVAL); - lockmgr(&kp->kp_lock, LK_EXCLUSIVE, NULL); + rw_enter(&kp->kp_lock, RW_WRITER); ks = ksem_lookup_proc(kp, SCARG(uap, id)); if (ks == NULL) { - lockmgr(&kp->kp_lock, LK_RELEASE, NULL); + rw_exit(&kp->kp_lock); return (EINVAL); } - LOCK_ASSERT(simple_lock_held(&ks->ks_interlock)); + KASSERT(mutex_owned(&ks->ks_interlock)); if (ks->ks_name == NULL) { - simple_unlock(&ks->ks_interlock); - lockmgr(&kp->kp_lock, LK_RELEASE, NULL); + mutex_exit(&ks->ks_interlock); + rw_exit(&kp->kp_lock); return (EINVAL); } ksr = ksem_drop_proc(kp, ks); - lockmgr(&kp->kp_lock, LK_RELEASE, NULL); - free(ksr, M_SEM); + rw_exit(&kp->kp_lock); + kmem_free(ksr, sizeof(*ksr)); return (0); } @@ -547,26 +631,27 @@ sys__ksem_post(struct lwp *l, void *v, r struct ksem *ks; int error; - if ((kp = l->l_proc->p_ksems) == NULL) + kp = proc_getspecific(l->l_proc, ksem_specificdata_key); + if (kp == NULL) return (EINVAL); - lockmgr(&kp->kp_lock, LK_SHARED, NULL); + rw_enter(&kp->kp_lock, RW_READER); ks = ksem_lookup_proc(kp, SCARG(uap, id)); - lockmgr(&kp->kp_lock, LK_RELEASE, NULL); + rw_exit(&kp->kp_lock); if (ks == NULL) return (EINVAL); - LOCK_ASSERT(simple_lock_held(&ks->ks_interlock)); + KASSERT(mutex_owned(&ks->ks_interlock)); if (ks->ks_value == SEM_VALUE_MAX) { error = EOVERFLOW; goto out; } ++ks->ks_value; if (ks->ks_waiters) - wakeup(ks); + cv_broadcast(&ks->ks_cv); error = 0; out: - simple_unlock(&ks->ks_interlock); + mutex_exit(&ks->ks_interlock); return (error); } @@ -577,21 +662,24 @@ ksem_wait(struct lwp *l, semid_t id, int struct ksem *ks; int error; - if ((kp = l->l_proc->p_ksems) == NULL) + kp = proc_getspecific(l->l_proc, ksem_specificdata_key); + if (kp == NULL) return (EINVAL); - lockmgr(&kp->kp_lock, LK_SHARED, NULL); + rw_enter(&kp->kp_lock, RW_READER); ks = ksem_lookup_proc(kp, id); - lockmgr(&kp->kp_lock, LK_RELEASE, NULL); + rw_exit(&kp->kp_lock); if (ks == NULL) return (EINVAL); - LOCK_ASSERT(simple_lock_held(&ks->ks_interlock)); + KASSERT(mutex_owned(&ks->ks_interlock)); ksem_addref(ks); while (ks->ks_value == 0) { ks->ks_waiters++; - error = tryflag ? EAGAIN : ltsleep(ks, PCATCH, "psem", 0, - &ks->ks_interlock); + if (tryflag) + error = EAGAIN; + else + error = cv_wait_sig(&ks->ks_cv, &ks->ks_interlock); ks->ks_waiters--; if (error) goto out; @@ -634,18 +722,19 @@ sys__ksem_getvalue(struct lwp *l, void * struct ksem *ks; unsigned int val; - if ((kp = l->l_proc->p_ksems) == NULL) + kp = proc_getspecific(l->l_proc, ksem_specificdata_key); + if (kp == NULL) return (EINVAL); - lockmgr(&kp->kp_lock, LK_SHARED, NULL); + rw_enter(&kp->kp_lock, RW_READER); ks = ksem_lookup_proc(kp, SCARG(uap, id)); - lockmgr(&kp->kp_lock, LK_RELEASE, NULL); + rw_exit(&kp->kp_lock); if (ks == NULL) return (EINVAL); - LOCK_ASSERT(simple_lock_held(&ks->ks_interlock)); + KASSERT(mutex_owned(&ks->ks_interlock)); val = ks->ks_value; - simple_unlock(&ks->ks_interlock); + mutex_exit(&ks->ks_interlock); return (copyout(&val, SCARG(uap, value), sizeof(val))); } @@ -660,18 +749,19 @@ sys__ksem_destroy(struct lwp *l, void *v struct ksem_ref *ksr; struct ksem *ks; - if ((kp = l->l_proc->p_ksems) == NULL) + kp = proc_getspecific(l->l_proc, ksem_specificdata_key); + if (kp == NULL) return (EINVAL); - lockmgr(&kp->kp_lock, LK_EXCLUSIVE, NULL); + rw_enter(&kp->kp_lock, RW_WRITER); ks = ksem_lookup_proc(kp, SCARG(uap, id)); if (ks == NULL) { - lockmgr(&kp->kp_lock, LK_RELEASE, NULL); + rw_exit(&kp->kp_lock); return (EINVAL); } - LOCK_ASSERT(simple_lock_held(&ks->ks_interlock)); + KASSERT(mutex_owned(&ks->ks_interlock)); /* * XXX This misses named semaphores which have been unlink'd, @@ -679,20 +769,20 @@ sys__ksem_destroy(struct lwp *l, void *v * XXX undefined, this is technically allowed. */ if (ks->ks_name != NULL) { - simple_unlock(&ks->ks_interlock); - lockmgr(&kp->kp_lock, LK_RELEASE, NULL); + mutex_exit(&ks->ks_interlock); + rw_exit(&kp->kp_lock); return (EINVAL); } if (ks->ks_waiters) { - simple_unlock(&ks->ks_interlock); - lockmgr(&kp->kp_lock, LK_RELEASE, NULL); + mutex_exit(&ks->ks_interlock); + rw_exit(&kp->kp_lock); return (EBUSY); } ksr = ksem_drop_proc(kp, ks); - lockmgr(&kp->kp_lock, LK_RELEASE, NULL); - free(ksr, M_SEM); + rw_exit(&kp->kp_lock); + kmem_free(ksr, sizeof(*ksr)); return (0); } @@ -703,54 +793,54 @@ ksem_forkhook(struct proc *p2, struct pr struct ksem_proc *kp1, *kp2; struct ksem_ref *ksr, *ksr1; - if ((kp1 = p1->p_ksems) == NULL) { - p2->p_ksems = NULL; + kp1 = proc_getspecific(p1, ksem_specificdata_key); + if (kp1 == NULL) return; - } - p2->p_ksems = kp2 = ksem_proc_alloc(); + kp2 = ksem_proc_alloc(); - lockmgr(&kp1->kp_lock, LK_SHARED, NULL); + rw_enter(&kp1->kp_lock, RW_READER); if (!LIST_EMPTY(&kp1->kp_ksems)) { LIST_FOREACH(ksr, &kp1->kp_ksems, ksr_list) { - ksr1 = malloc(sizeof(*ksr), M_SEM, M_WAITOK); + ksr1 = kmem_alloc(sizeof(*ksr), KM_SLEEP); ksr1->ksr_ksem = ksr->ksr_ksem; - simple_lock(&ksr->ksr_ksem->ks_interlock); + mutex_enter(&ksr->ksr_ksem->ks_interlock); ksem_addref(ksr->ksr_ksem); - simple_unlock(&ksr->ksr_ksem->ks_interlock); + mutex_exit(&ksr->ksr_ksem->ks_interlock); LIST_INSERT_HEAD(&kp2->kp_ksems, ksr1, ksr_list); } } - lockmgr(&kp1->kp_lock, LK_RELEASE, NULL); + rw_exit(&kp1->kp_lock); + proc_setspecific(p2, ksem_specificdata_key, kp2); } static void -ksem_exithook(struct proc *p, void *arg) +ksem_exechook(struct proc *p, void *arg) { struct ksem_proc *kp; - struct ksem_ref *ksr; - - if ((kp = p->p_ksems) == NULL) - return; - /* Don't bother locking; process is dying. */ - - while ((ksr = LIST_FIRST(&kp->kp_ksems)) != NULL) { - LIST_REMOVE(ksr, ksr_list); - simple_lock(&ksr->ksr_ksem->ks_interlock); - ksem_delref(ksr->ksr_ksem); - free(ksr, M_SEM); + kp = proc_getspecific(p, ksem_specificdata_key); + if (kp != NULL) { + proc_setspecific(p, ksem_specificdata_key, NULL); + ksem_proc_dtor(kp); } } void ksem_init(void) { + int i, error; - simple_lock_init(&ksem_slock); - exithook_establish(ksem_exithook, NULL); - exechook_establish(ksem_exithook, NULL); + mutex_init(&ksem_mutex, MUTEX_DEFAULT, IPL_NONE); + exechook_establish(ksem_exechook, NULL); forkhook_establish(ksem_forkhook); + + for (i = 0; i < SEM_HASHTBL_SIZE; i++) + LIST_INIT(&ksem_hash[i]); + + error = proc_specific_key_create(&ksem_specificdata_key, + ksem_proc_dtor); + KASSERT(error == 0); }