version 1.84, 2015/12/08 14:52:06 |
version 1.85, 2016/01/31 04:40:01 |
Line 986 kqueue_register(struct kqueue *kq, struc |
|
Line 986 kqueue_register(struct kqueue *kq, struc |
|
kev->data = 0; |
kev->data = 0; |
kn->kn_kevent = *kev; |
kn->kn_kevent = *kev; |
|
|
|
KASSERT(kn->kn_fop != NULL); |
/* |
/* |
* apply reference count to knote structure, and |
* apply reference count to knote structure, and |
* do not release it at the end of this routine. |
* do not release it at the end of this routine. |
Line 1043 kqueue_register(struct kqueue *kq, struc |
|
Line 1044 kqueue_register(struct kqueue *kq, struc |
|
* support events, and the attach routine is |
* support events, and the attach routine is |
* broken and does not return an error. |
* broken and does not return an error. |
*/ |
*/ |
|
KASSERT(kn->kn_fop != NULL); |
KASSERT(kn->kn_fop->f_event != NULL); |
KASSERT(kn->kn_fop->f_event != NULL); |
KERNEL_LOCK(1, NULL); /* XXXSMP */ |
KERNEL_LOCK(1, NULL); /* XXXSMP */ |
rv = (*kn->kn_fop->f_event)(kn, 0); |
rv = (*kn->kn_fop->f_event)(kn, 0); |
Line 1150 kqueue_scan(file_t *fp, size_t maxevents |
|
Line 1152 kqueue_scan(file_t *fp, size_t maxevents |
|
struct kqueue *kq; |
struct kqueue *kq; |
struct kevent *kevp; |
struct kevent *kevp; |
struct timespec ats, sleepts; |
struct timespec ats, sleepts; |
struct knote *kn, *marker; |
struct knote *kn, *marker, morker; |
size_t count, nkev, nevents; |
size_t count, nkev, nevents; |
int timeout, error, rv; |
int timeout, error, rv; |
filedesc_t *fdp; |
filedesc_t *fdp; |
Line 1178 kqueue_scan(file_t *fp, size_t maxevents |
|
Line 1180 kqueue_scan(file_t *fp, size_t maxevents |
|
timeout = 0; |
timeout = 0; |
} |
} |
|
|
marker = kmem_zalloc(sizeof(*marker), KM_SLEEP); |
memset(&morker, 0, sizeof(morker)); |
|
marker = &morker; |
marker->kn_status = KN_MARKER; |
marker->kn_status = KN_MARKER; |
mutex_spin_enter(&kq->kq_lock); |
mutex_spin_enter(&kq->kq_lock); |
retry: |
retry: |
Line 1219 kqueue_scan(file_t *fp, size_t maxevents |
|
Line 1222 kqueue_scan(file_t *fp, size_t maxevents |
|
kn = TAILQ_NEXT(kn, kn_tqe); |
kn = TAILQ_NEXT(kn, kn_tqe); |
} |
} |
kq_check(kq); |
kq_check(kq); |
TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); |
|
kq->kq_count--; |
kq->kq_count--; |
|
TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); |
kn->kn_status &= ~KN_QUEUED; |
kn->kn_status &= ~KN_QUEUED; |
|
kn->kn_status |= KN_BUSY; |
kq_check(kq); |
kq_check(kq); |
if (kn->kn_status & KN_DISABLED) { |
if (kn->kn_status & KN_DISABLED) { |
|
kn->kn_status &= ~KN_BUSY; |
/* don't want disabled events */ |
/* don't want disabled events */ |
continue; |
continue; |
} |
} |
if ((kn->kn_flags & EV_ONESHOT) == 0) { |
if ((kn->kn_flags & EV_ONESHOT) == 0) { |
mutex_spin_exit(&kq->kq_lock); |
mutex_spin_exit(&kq->kq_lock); |
|
KASSERT(kn->kn_fop != NULL); |
KASSERT(kn->kn_fop->f_event != NULL); |
KASSERT(kn->kn_fop->f_event != NULL); |
KERNEL_LOCK(1, NULL); /* XXXSMP */ |
KERNEL_LOCK(1, NULL); /* XXXSMP */ |
rv = (*kn->kn_fop->f_event)(kn, 0); |
rv = (*kn->kn_fop->f_event)(kn, 0); |
KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ |
KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ |
mutex_spin_enter(&kq->kq_lock); |
mutex_spin_enter(&kq->kq_lock); |
/* Re-poll if note was re-enqueued. */ |
/* Re-poll if note was re-enqueued. */ |
if ((kn->kn_status & KN_QUEUED) != 0) |
if ((kn->kn_status & KN_QUEUED) != 0) { |
|
kn->kn_status &= ~KN_BUSY; |
continue; |
continue; |
|
} |
if (rv == 0) { |
if (rv == 0) { |
/* |
/* |
* non-ONESHOT event that hasn't |
* non-ONESHOT event that hasn't |
* triggered again, so de-queue. |
* triggered again, so de-queue. |
*/ |
*/ |
kn->kn_status &= ~KN_ACTIVE; |
kn->kn_status &= ~(KN_ACTIVE|KN_BUSY); |
continue; |
continue; |
} |
} |
} |
} |
Line 1253 kqueue_scan(file_t *fp, size_t maxevents |
|
Line 1261 kqueue_scan(file_t *fp, size_t maxevents |
|
/* delete ONESHOT events after retrieval */ |
/* delete ONESHOT events after retrieval */ |
mutex_spin_exit(&kq->kq_lock); |
mutex_spin_exit(&kq->kq_lock); |
mutex_enter(&fdp->fd_lock); |
mutex_enter(&fdp->fd_lock); |
|
kn->kn_status &= ~KN_BUSY; |
knote_detach(kn, fdp, true); |
knote_detach(kn, fdp, true); |
mutex_spin_enter(&kq->kq_lock); |
mutex_spin_enter(&kq->kq_lock); |
} else if (kn->kn_flags & EV_CLEAR) { |
} else if (kn->kn_flags & EV_CLEAR) { |
/* clear state after retrieval */ |
/* clear state after retrieval */ |
kn->kn_data = 0; |
kn->kn_data = 0; |
kn->kn_fflags = 0; |
kn->kn_fflags = 0; |
kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE); |
kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE|KN_BUSY); |
} else if (kn->kn_flags & EV_DISPATCH) { |
} else if (kn->kn_flags & EV_DISPATCH) { |
kn->kn_status |= KN_DISABLED; |
kn->kn_status |= KN_DISABLED; |
kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE); |
kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE|KN_BUSY); |
} else { |
} else { |
/* add event back on list */ |
/* add event back on list */ |
kq_check(kq); |
kq_check(kq); |
|
kn->kn_status |= KN_QUEUED; |
|
kn->kn_status &= ~KN_BUSY; |
TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); |
TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); |
kq->kq_count++; |
kq->kq_count++; |
kn->kn_status |= KN_QUEUED; |
|
kq_check(kq); |
kq_check(kq); |
} |
} |
if (nkev == kevcnt) { |
if (nkev == kevcnt) { |
Line 1292 kqueue_scan(file_t *fp, size_t maxevents |
|
Line 1302 kqueue_scan(file_t *fp, size_t maxevents |
|
} |
} |
done: |
done: |
mutex_spin_exit(&kq->kq_lock); |
mutex_spin_exit(&kq->kq_lock); |
if (marker != NULL) |
|
kmem_free(marker, sizeof(*marker)); |
|
if (nkev != 0) { |
if (nkev != 0) { |
/* copyout remaining events */ |
/* copyout remaining events */ |
error = (*keops->keo_put_events)(keops->keo_private, |
error = (*keops->keo_put_events)(keops->keo_private, |
Line 1514 knote(struct klist *list, long hint) |
|
Line 1522 knote(struct klist *list, long hint) |
|
struct knote *kn, *tmpkn; |
struct knote *kn, *tmpkn; |
|
|
SLIST_FOREACH_SAFE(kn, list, kn_selnext, tmpkn) { |
SLIST_FOREACH_SAFE(kn, list, kn_selnext, tmpkn) { |
|
KASSERT(kn->kn_fop != NULL); |
KASSERT(kn->kn_fop->f_event != NULL); |
KASSERT(kn->kn_fop->f_event != NULL); |
if ((*kn->kn_fop->f_event)(kn, hint)) |
if ((*kn->kn_fop->f_event)(kn, hint)) |
knote_activate(kn); |
knote_activate(kn); |
Line 1555 knote_detach(struct knote *kn, filedesc_ |
|
Line 1564 knote_detach(struct knote *kn, filedesc_ |
|
KASSERT((kn->kn_status & KN_MARKER) == 0); |
KASSERT((kn->kn_status & KN_MARKER) == 0); |
KASSERT(mutex_owned(&fdp->fd_lock)); |
KASSERT(mutex_owned(&fdp->fd_lock)); |
|
|
|
KASSERT(kn->kn_fop != NULL); |
/* Remove from monitored object. */ |
/* Remove from monitored object. */ |
if (dofop) { |
if (dofop) { |
|
KASSERT(kn->kn_fop->f_detach != NULL); |
KERNEL_LOCK(1, NULL); /* XXXSMP */ |
KERNEL_LOCK(1, NULL); /* XXXSMP */ |
(*kn->kn_fop->f_detach)(kn); |
(*kn->kn_fop->f_detach)(kn); |
KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ |
KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ |
Line 1571 knote_detach(struct knote *kn, filedesc_ |
|
Line 1582 knote_detach(struct knote *kn, filedesc_ |
|
SLIST_REMOVE(list, kn, knote, kn_link); |
SLIST_REMOVE(list, kn, knote, kn_link); |
|
|
/* Remove from kqueue. */ |
/* Remove from kqueue. */ |
/* XXXAD should verify not in use by kqueue_scan. */ |
again: |
mutex_spin_enter(&kq->kq_lock); |
mutex_spin_enter(&kq->kq_lock); |
if ((kn->kn_status & KN_QUEUED) != 0) { |
if ((kn->kn_status & KN_QUEUED) != 0) { |
kq_check(kq); |
kq_check(kq); |
|
kq->kq_count--; |
TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); |
TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); |
kn->kn_status &= ~KN_QUEUED; |
kn->kn_status &= ~KN_QUEUED; |
kq->kq_count--; |
|
kq_check(kq); |
kq_check(kq); |
|
} else if (kn->kn_status & KN_BUSY) { |
|
mutex_spin_exit(&kq->kq_lock); |
|
goto again; |
} |
} |
mutex_spin_exit(&kq->kq_lock); |
mutex_spin_exit(&kq->kq_lock); |
|
|
Line 1607 knote_enqueue(struct knote *kn) |
|
Line 1621 knote_enqueue(struct knote *kn) |
|
} |
} |
if ((kn->kn_status & (KN_ACTIVE | KN_QUEUED)) == KN_ACTIVE) { |
if ((kn->kn_status & (KN_ACTIVE | KN_QUEUED)) == KN_ACTIVE) { |
kq_check(kq); |
kq_check(kq); |
TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); |
|
kn->kn_status |= KN_QUEUED; |
kn->kn_status |= KN_QUEUED; |
|
TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); |
kq->kq_count++; |
kq->kq_count++; |
kq_check(kq); |
kq_check(kq); |
cv_broadcast(&kq->kq_cv); |
cv_broadcast(&kq->kq_cv); |
Line 1632 knote_activate(struct knote *kn) |
|
Line 1646 knote_activate(struct knote *kn) |
|
kn->kn_status |= KN_ACTIVE; |
kn->kn_status |= KN_ACTIVE; |
if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) { |
if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) { |
kq_check(kq); |
kq_check(kq); |
TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); |
|
kn->kn_status |= KN_QUEUED; |
kn->kn_status |= KN_QUEUED; |
|
TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); |
kq->kq_count++; |
kq->kq_count++; |
kq_check(kq); |
kq_check(kq); |
cv_broadcast(&kq->kq_cv); |
cv_broadcast(&kq->kq_cv); |