33 #include <sys/types.h>
34 #ifdef HAVE_SYS_TIME_H
37 #include <sys/_time.h>
39 #include <sys/queue.h>
40 #include <sys/event.h>
48 #ifdef HAVE_INTTYPES_H
55 #if defined(HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__)
56 #define PTR_TO_UDATA(x) ((intptr_t)(x))
58 #define PTR_TO_UDATA(x) (x)
62 #include "event-internal.h"
64 #include "event-internal.h"
66 #define EVLIST_X_KQINKERNEL 0x1000
71 struct kevent *changes;
73 struct kevent *events;
74 struct event_list evsigevents[NSIG];
81 static int kq_add (
void *,
struct event *);
82 static int kq_del (
void *,
struct event *);
84 static int kq_insert (
struct kqop *,
struct kevent *);
85 static void kq_dealloc (
struct event_base *,
void *);
101 struct kqop *kqueueop;
104 if (getenv(
"EVENT_NOKQUEUE"))
107 if (!(kqueueop = calloc(1,
sizeof(
struct kqop))))
112 if ((kq = kqueue()) == -1) {
113 event_warn(
"kqueue");
120 kqueueop->pid = getpid();
123 kqueueop->changes = malloc(NEVENT *
sizeof(
struct kevent));
124 if (kqueueop->changes == NULL) {
128 kqueueop->events = malloc(NEVENT *
sizeof(
struct kevent));
129 if (kqueueop->events == NULL) {
130 free (kqueueop->changes);
134 kqueueop->nevents = NEVENT;
137 for (i = 0; i < NSIG; ++
i) {
138 TAILQ_INIT(&kqueueop->evsigevents[i]);
142 kqueueop->changes[0].ident = -1;
143 kqueueop->changes[0].filter = EVFILT_READ;
144 kqueueop->changes[0].flags = EV_ADD;
151 kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 ||
152 kqueueop->events[0].ident != -1 ||
153 kqueueop->events[0].flags != EV_ERROR) {
154 event_warn(
"%s: detected broken kqueue; not using.", __func__);
155 free(kqueueop->changes);
156 free(kqueueop->events);
166 kq_insert(
struct kqop *
kqop,
struct kevent *kev)
168 int nevents = kqop->nevents;
170 if (kqop->nchanges == nevents) {
171 struct kevent *newchange;
172 struct kevent *newresult;
176 newchange = realloc(kqop->changes,
177 nevents *
sizeof(
struct kevent));
178 if (newchange == NULL) {
179 event_warn(
"%s: malloc", __func__);
182 kqop->changes = newchange;
184 newresult = realloc(kqop->events,
185 nevents *
sizeof(
struct kevent));
191 if (newresult == NULL) {
192 event_warn(
"%s: malloc", __func__);
195 kqop->events = newresult;
197 kqop->nevents = nevents;
200 memcpy(&kqop->changes[kqop->nchanges++], kev,
sizeof(
struct kevent));
202 event_debug((
"%s: fd %d %s%s",
203 __func__, (
int)kev->ident,
204 kev->filter == EVFILT_READ ?
"EVFILT_READ" :
"EVFILT_WRITE",
205 kev->flags == EV_DELETE ?
" (del)" :
""));
211 kq_sighandler(
int sig)
219 struct kqop *kqop = arg;
220 struct kevent *changes = kqop->changes;
221 struct kevent *events = kqop->events;
227 TIMEVAL_TO_TIMESPEC(tv, &ts);
231 res = kevent(kqop->kq, changes, kqop->nchanges,
232 events, kqop->nevents, ts_p);
235 if (errno != EINTR) {
236 event_warn(
"kevent");
243 event_debug((
"%s: kevent reports %d", __func__, res));
245 for (i = 0; i < res; i++) {
248 if (events[i].
flags & EV_ERROR) {
261 if (events[i].data == EBADF ||
262 events[i].data == EINVAL ||
263 events[i].data == ENOENT)
265 errno = events[
i].data;
269 if (events[i].filter == EVFILT_READ) {
271 }
else if (events[i].filter == EVFILT_WRITE) {
273 }
else if (events[i].filter == EVFILT_SIGNAL) {
280 if (events[i].filter == EVFILT_SIGNAL) {
281 struct event_list *head =
282 (
struct event_list *)events[i].udata;
283 TAILQ_FOREACH(ev, head, ev_signal_next) {
284 event_active(ev, which, events[i].data);
287 ev = (
struct event *)events[i].udata;
289 if (!(ev->ev_events & EV_PERSIST))
290 ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
292 event_active(ev, which, 1);
301 kq_add(
void *arg,
struct event *ev)
303 struct kqop *kqop = arg;
306 if (ev->ev_events & EV_SIGNAL) {
307 int nsignal = EVENT_SIGNAL(ev);
309 assert(nsignal >= 0 && nsignal < NSIG);
310 if (TAILQ_EMPTY(&kqop->evsigevents[nsignal])) {
313 memset(&kev, 0,
sizeof(kev));
315 kev.filter = EVFILT_SIGNAL;
317 kev.udata = PTR_TO_UDATA(&kqop->evsigevents[nsignal]);
322 if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
325 if (_evsignal_set_handler(ev->ev_base, nsignal,
326 kq_sighandler) == -1)
330 TAILQ_INSERT_TAIL(&kqop->evsigevents[nsignal], ev,
332 ev->ev_flags |= EVLIST_X_KQINKERNEL;
336 if (ev->ev_events & EV_READ) {
337 memset(&kev, 0,
sizeof(kev));
338 kev.ident = ev->ev_fd;
339 kev.filter = EVFILT_READ;
342 kev.fflags = NOTE_EOF;
345 if (!(ev->ev_events & EV_PERSIST))
346 kev.flags |= EV_ONESHOT;
347 kev.udata = PTR_TO_UDATA(ev);
349 if (kq_insert(kqop, &kev) == -1)
352 ev->ev_flags |= EVLIST_X_KQINKERNEL;
355 if (ev->ev_events & EV_WRITE) {
356 memset(&kev, 0,
sizeof(kev));
357 kev.ident = ev->ev_fd;
358 kev.filter = EVFILT_WRITE;
360 if (!(ev->ev_events & EV_PERSIST))
361 kev.flags |= EV_ONESHOT;
362 kev.udata = PTR_TO_UDATA(ev);
364 if (kq_insert(kqop, &kev) == -1)
367 ev->ev_flags |= EVLIST_X_KQINKERNEL;
374 kq_del(
void *arg,
struct event *ev)
376 struct kqop *kqop = arg;
379 if (!(ev->ev_flags & EVLIST_X_KQINKERNEL))
382 if (ev->ev_events & EV_SIGNAL) {
383 int nsignal = EVENT_SIGNAL(ev);
386 assert(nsignal >= 0 && nsignal < NSIG);
387 TAILQ_REMOVE(&kqop->evsigevents[nsignal], ev, ev_signal_next);
388 if (TAILQ_EMPTY(&kqop->evsigevents[nsignal])) {
389 memset(&kev, 0,
sizeof(kev));
391 kev.filter = EVFILT_SIGNAL;
392 kev.flags = EV_DELETE;
397 if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
400 if (_evsignal_restore_handler(ev->ev_base,
405 ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
409 if (ev->ev_events & EV_READ) {
410 memset(&kev, 0,
sizeof(kev));
411 kev.ident = ev->ev_fd;
412 kev.filter = EVFILT_READ;
413 kev.flags = EV_DELETE;
415 if (kq_insert(kqop, &kev) == -1)
418 ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
421 if (ev->ev_events & EV_WRITE) {
422 memset(&kev, 0,
sizeof(kev));
423 kev.ident = ev->ev_fd;
424 kev.filter = EVFILT_WRITE;
425 kev.flags = EV_DELETE;
427 if (kq_insert(kqop, &kev) == -1)
430 ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
437 kq_dealloc(
struct event_base *base,
void *arg)
439 struct kqop *kqop = arg;
445 if (kqop->kq >= 0 && kqop->pid == getpid())
447 memset(kqop, 0,
sizeof(
struct kqop));