bac2e7df1c0833fcbaffd39ae8a0d63a3e215f1a
[dhcpcd-ui] / src / dhcpcd-curses / eloop.c
1 /*
2  * dhcpcd - DHCP client daemon
3  * Copyright (c) 2006-2015 Roy Marples <roy@marples.name>
4  * All rights reserved
5
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/time.h>
29
30 #include <assert.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <signal.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <unistd.h>
37
38 /* config.h should define HAVE_KQUEUE, HAVE_EPOLL, etc */
39 #include "config.h"
40 #include "eloop.h"
41
42 #ifndef UNUSED
43 #define UNUSED(a) (void)((a))
44 #endif
45 #ifndef __unused
46 #ifdef __GNUC__
47 #define __unused   __attribute__((__unused__))
48 #else
49 #define __unused
50 #endif
51 #endif
52
53 #ifndef MSEC_PER_SEC
54 #define MSEC_PER_SEC    1000L
55 #define NSEC_PER_MSEC   1000000L
56 #endif
57
58 #if defined(HAVE_KQUEUE)
59 #include <sys/event.h>
60 #include <fcntl.h>
61 #ifdef __NetBSD__
62 /* udata is void * except on NetBSD
63  * lengths are int except on NetBSD */
64 #define UPTR(x) ((intptr_t)(x))
65 #define LENC(x) (x)
66 #else
67 #define UPTR(x) (x)
68 #define LENC(x) ((int)(x))
69 #endif
70 #define eloop_event_setup_fds(eloop)
71 #elif defined(HAVE_EPOLL)
72 #include <sys/epoll.h>
73 #define eloop_event_setup_fds(eloop)
74 #else
75 #include <poll.h>
76 static void
77 eloop_event_setup_fds(struct eloop *eloop)
78 {
79         struct eloop_event *e;
80         size_t i;
81
82         i = 0;
83         TAILQ_FOREACH(e, &eloop->events, next) {
84                 eloop->fds[i].fd = e->fd;
85                 eloop->fds[i].events = 0;
86                 if (e->read_cb)
87                         eloop->fds[i].events |= POLLIN;
88                 if (e->write_cb)
89                         eloop->fds[i].events |= POLLOUT;
90                 eloop->fds[i].revents = 0;
91                 e->pollfd = &eloop->fds[i];
92                 i++;
93         }
94 }
95
96 #ifndef pollts
97 /* Wrapper around pselect, to imitate the NetBSD pollts call. */
98 static int
99 pollts(struct pollfd * fds, nfds_t nfds,
100     const struct timespec *ts, const sigset_t *sigmask)
101 {
102         fd_set read_fds;
103         nfds_t n;
104         int maxfd, r;
105
106         FD_ZERO(&read_fds);
107         maxfd = 0;
108         for (n = 0; n < nfds; n++) {
109                 if (fds[n].events & POLLIN) {
110                         FD_SET(fds[n].fd, &read_fds);
111                         if (fds[n].fd > maxfd)
112                                 maxfd = fds[n].fd;
113                 }
114         }
115
116         r = pselect(maxfd + 1, &read_fds, NULL, NULL, ts, sigmask);
117         if (r > 0) {
118                 for (n = 0; n < nfds; n++) {
119                         fds[n].revents =
120                             FD_ISSET(fds[n].fd, &read_fds) ? POLLIN : 0;
121                 }
122         }
123
124         return r;
125 }
126 #endif
127 #endif
128
129 int
130 eloop_event_add(struct eloop *eloop, int fd,
131     void (*read_cb)(void *), void *read_cb_arg,
132     void (*write_cb)(void *), void *write_cb_arg)
133 {
134         struct eloop_event *e;
135 #if defined(HAVE_KQUEUE)
136         struct kevent ke[2];
137 #elif defined(HAVE_EPOLL)
138         struct epoll_event epe;
139 #else
140         struct pollfd *nfds;
141 #endif
142
143         assert(eloop != NULL);
144         assert(read_cb != NULL || write_cb != NULL);
145         if (fd == -1) {
146                 errno = EINVAL;
147                 return -1;
148         }
149
150 #ifdef HAVE_EPOLL
151         memset(&epe, 0, sizeof(epe));
152         epe.data.fd = fd;
153         epe.events = EPOLLIN;
154         if (write_cb)
155                 epe.events |= EPOLLOUT;
156 #endif
157
158         /* We should only have one callback monitoring the fd */
159         TAILQ_FOREACH(e, &eloop->events, next) {
160                 if (e->fd == fd) {
161                         int error;
162
163 #if defined(HAVE_KQUEUE)
164                         EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ, EV_ADD,
165                             0, 0, UPTR(e));
166                         if (write_cb)
167                                 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
168                                     EV_ADD, 0, 0, UPTR(e));
169                         else if (e->write_cb)
170                                 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
171                                     EV_DELETE, 0, 0, UPTR(e));
172                         error = kevent(eloop->poll_fd, ke,
173                             e->write_cb || write_cb ? 2 : 1, NULL, 0, NULL);
174 #elif defined(HAVE_EPOLL)
175                         epe.data.ptr = e;
176                         error = epoll_ctl(eloop->poll_fd, EPOLL_CTL_MOD,
177                             fd, &epe);
178 #else
179                         error = 0;
180 #endif
181                         if (read_cb) {
182                                 e->read_cb = read_cb;
183                                 e->read_cb_arg = read_cb_arg;
184                         }
185                         if (write_cb) {
186                                 e->write_cb = write_cb;
187                                 e->write_cb_arg = write_cb_arg;
188                         }
189                         eloop_event_setup_fds(eloop);
190                         return error;
191                 }
192         }
193
194         /* Allocate a new event if no free ones already allocated */
195         if ((e = TAILQ_FIRST(&eloop->free_events))) {
196                 TAILQ_REMOVE(&eloop->free_events, e, next);
197         } else {
198                 e = malloc(sizeof(*e));
199                 if (e == NULL)
200                         goto err;
201         }
202
203         /* Ensure we can actually listen to it */
204         eloop->events_len++;
205 #if !defined(HAVE_KQUEUE) && !defined(HAVE_EPOLL)
206         if (eloop->events_len > eloop->fds_len) {
207                 nfds = realloc(eloop->fds,
208                     sizeof(*eloop->fds) * (eloop->fds_len + 5));
209                 if (nfds == NULL)
210                         goto err;
211                 eloop->fds_len += 5;
212                 eloop->fds = nfds;
213         }
214 #endif
215
216         /* Now populate the structure and add it to the list */
217         e->fd = fd;
218         e->read_cb = read_cb;
219         e->read_cb_arg = read_cb_arg;
220         e->write_cb = write_cb;
221         e->write_cb_arg = write_cb_arg;
222
223 #if defined(HAVE_KQUEUE)
224         if (read_cb != NULL)
225                 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
226                     EV_ADD, 0, 0, UPTR(e));
227         if (write_cb != NULL)
228                 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
229                     EV_ADD, 0, 0, UPTR(e));
230         if (kevent(eloop->poll_fd, ke, write_cb ? 2 : 1, NULL, 0, NULL) == -1)
231                 goto err;
232 #elif defined(HAVE_EPOLL)
233         epe.data.ptr = e;
234         if (epoll_ctl(eloop->poll_fd, EPOLL_CTL_ADD, fd, &epe) == -1)
235                 goto err;
236 #endif
237
238         /* The order of events should not matter.
239          * However, some PPP servers love to close the link right after
240          * sending their final message. So to ensure dhcpcd processes this
241          * message (which is likely to be that the DHCP addresses are wrong)
242          * we insert new events at the queue head as the link fd will be
243          * the first event added. */
244         TAILQ_INSERT_HEAD(&eloop->events, e, next);
245         eloop_event_setup_fds(eloop);
246         return 0;
247
248 err:
249         if (e) {
250                 eloop->events_len--;
251                 TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
252         }
253         return -1;
254 }
255
256 void
257 eloop_event_delete_write(struct eloop *eloop, int fd, int write_only)
258 {
259         struct eloop_event *e;
260 #if defined(HAVE_KQUEUE)
261         struct kevent ke[2];
262 #elif defined(HAVE_EPOLL)
263         struct epoll_event epe;
264 #endif
265
266         assert(eloop != NULL);
267
268         TAILQ_FOREACH(e, &eloop->events, next) {
269                 if (e->fd == fd) {
270                         if (write_only && e->read_cb != NULL) {
271                                 if (e->write_cb != NULL) {
272                                         e->write_cb = NULL;
273                                         e->write_cb_arg = NULL;
274 #if defined(HAVE_KQUEUE)
275                                         EV_SET(&ke[0], (uintptr_t)fd,
276                                             EVFILT_WRITE, EV_DELETE,
277                                             0, 0, UPTR(NULL));
278                                         kevent(eloop->poll_fd, ke, 1, NULL, 0,
279                                             NULL);
280 #elif defined(HAVE_EPOLL)
281                                         memset(&epe, 0, sizeof(epe));
282                                         epe.data.fd = e->fd;
283                                         epe.data.ptr = e;
284                                         epe.events = EPOLLIN;
285                                         epoll_ctl(eloop->poll_fd,
286                                             EPOLL_CTL_MOD, fd, &epe);
287 #endif
288                                 }
289                         } else {
290                                 TAILQ_REMOVE(&eloop->events, e, next);
291 #if defined(HAVE_KQUEUE)
292                                 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
293                                     EV_DELETE, 0, 0, UPTR(NULL));
294                                 if (e->write_cb)
295                                         EV_SET(&ke[1], (uintptr_t)fd,
296                                             EVFILT_WRITE, EV_DELETE,
297                                             0, 0, UPTR(NULL));
298                                 kevent(eloop->poll_fd, ke, e->write_cb ? 2 : 1,
299                                     NULL, 0, NULL);
300 #elif defined(HAVE_EPOLL)
301                                 /* NULL event is safe because we
302                                  * rely on epoll_pwait which as added
303                                  * after the delete without event was fixed. */
304                                 epoll_ctl(eloop->poll_fd, EPOLL_CTL_DEL,
305                                     fd, NULL);
306 #endif
307                                 TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
308                                 eloop->events_len--;
309                         }
310                         eloop_event_setup_fds(eloop);
311                         break;
312                 }
313         }
314 }
315
316 int
317 eloop_q_timeout_add_tv(struct eloop *eloop, int queue,
318     const struct timespec *when, void (*callback)(void *), void *arg)
319 {
320         struct timespec now, w;
321         struct eloop_timeout *t, *tt = NULL;
322
323         assert(eloop != NULL);
324         assert(when != NULL);
325         assert(callback != NULL);
326
327         clock_gettime(CLOCK_MONOTONIC, &now);
328         timespecadd(&now, when, &w);
329         /* Check for time_t overflow. */
330         if (timespeccmp(&w, &now, <)) {
331                 errno = ERANGE;
332                 return -1;
333         }
334
335         /* Remove existing timeout if present */
336         TAILQ_FOREACH(t, &eloop->timeouts, next) {
337                 if (t->callback == callback && t->arg == arg) {
338                         TAILQ_REMOVE(&eloop->timeouts, t, next);
339                         break;
340                 }
341         }
342
343         if (t == NULL) {
344                 /* No existing, so allocate or grab one from the free pool */
345                 if ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
346                         TAILQ_REMOVE(&eloop->free_timeouts, t, next);
347                 } else {
348                         if ((t = malloc(sizeof(*t))) == NULL)
349                                 return -1;
350                 }
351         }
352
353         t->when = w;
354         t->callback = callback;
355         t->arg = arg;
356         t->queue = queue;
357
358         /* The timeout list should be in chronological order,
359          * soonest first. */
360         TAILQ_FOREACH(tt, &eloop->timeouts, next) {
361                 if (timespeccmp(&t->when, &tt->when, <)) {
362                         TAILQ_INSERT_BEFORE(tt, t, next);
363                         return 0;
364                 }
365         }
366         TAILQ_INSERT_TAIL(&eloop->timeouts, t, next);
367         return 0;
368 }
369
370 int
371 eloop_q_timeout_add_sec(struct eloop *eloop, int queue, time_t when,
372     void (*callback)(void *), void *arg)
373 {
374         struct timespec tv;
375
376         tv.tv_sec = when;
377         tv.tv_nsec = 0;
378         return eloop_q_timeout_add_tv(eloop, queue, &tv, callback, arg);
379 }
380
381 int
382 eloop_q_timeout_add_msec(struct eloop *eloop, int queue, long when,
383     void (*callback)(void *), void *arg)
384 {
385         struct timespec tv;
386
387         tv.tv_sec = when / MSEC_PER_SEC;
388         tv.tv_nsec = (when % MSEC_PER_SEC) * NSEC_PER_MSEC;
389         return eloop_q_timeout_add_tv(eloop, queue, &tv, callback, arg);
390 }
391
392 #if !defined(HAVE_KQUEUE)
393 static int
394 eloop_timeout_add_now(struct eloop *eloop,
395     void (*callback)(void *), void *arg)
396 {
397
398         assert(eloop->timeout0 == NULL);
399         eloop->timeout0 = callback;
400         eloop->timeout0_arg = arg;
401         return 0;
402 }
403 #endif
404
405 void
406 eloop_q_timeout_delete(struct eloop *eloop, int queue,
407     void (*callback)(void *), void *arg)
408 {
409         struct eloop_timeout *t, *tt;
410
411         assert(eloop != NULL);
412
413         TAILQ_FOREACH_SAFE(t, &eloop->timeouts, next, tt) {
414                 if ((queue == 0 || t->queue == queue) &&
415                     t->arg == arg &&
416                     (!callback || t->callback == callback))
417                 {
418                         TAILQ_REMOVE(&eloop->timeouts, t, next);
419                         TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
420                 }
421         }
422 }
423
424 void
425 eloop_exit(struct eloop *eloop, int code)
426 {
427
428         assert(eloop != NULL);
429
430         eloop->exitcode = code;
431         eloop->exitnow = 1;
432 }
433
434 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
435 static int
436 eloop_open(struct eloop *eloop)
437 {
438
439 #if defined(HAVE_KQUEUE1)
440         return (eloop->poll_fd = kqueue1(O_CLOEXEC));
441 #elif defined(HAVE_KQUEUE)
442         int i;
443
444         if ((eloop->poll_fd = kqueue()) == -1)
445                 return -1;
446         if ((i = fcntl(eloop->poll_fd, F_GETFD, 0)) == -1 ||
447             fcntl(eloop->poll_fd, F_SETFD, i | FD_CLOEXEC) == -1)
448         {
449                 close(eloop->poll_fd);
450                 eloop->poll_fd = -1;
451                 return -1;
452         }
453
454         return eloop->poll_fd;
455 #elif defined (HAVE_EPOLL)
456         return (eloop->poll_fd = epoll_create1(EPOLL_CLOEXEC));
457 #endif
458 }
459
460 int
461 eloop_requeue(struct eloop *eloop)
462 {
463         struct eloop_event *e;
464         int error;
465 #if defined(HAVE_KQUEUE)
466         size_t i;
467         struct kevent *ke;
468 #elif defined(HAVE_EPOLL)
469         struct epoll_event epe;
470 #endif
471
472         assert(eloop != NULL);
473
474         if (eloop->poll_fd != -1)
475                 close(eloop->poll_fd);
476         if (eloop_open(eloop) == -1)
477                 return -1;
478 #if defined (HAVE_KQUEUE)
479         i = eloop->signals_len;
480         TAILQ_FOREACH(e, &eloop->events, next) {
481                 i++;
482                 if (e->write_cb)
483                         i++;
484         }
485
486         if ((ke = malloc(sizeof(*ke) * i)) == NULL)
487                 return -1;
488
489         for (i = 0; i < eloop->signals_len; i++)
490                 EV_SET(&ke[i], (uintptr_t)eloop->signals[i],
491                     EVFILT_SIGNAL, EV_ADD, 0, 0, UPTR(NULL));
492
493         TAILQ_FOREACH(e, &eloop->events, next) {
494                 EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_READ,
495                     EV_ADD, 0, 0, UPTR(e));
496                 i++;
497                 if (e->write_cb) {
498                         EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_WRITE,
499                             EV_ADD, 0, 0, UPTR(e));
500                         i++;
501                 }
502         }
503
504         error =  kevent(eloop->poll_fd, ke, LENC(i), NULL, 0, NULL);
505         free(ke);
506
507 #elif defined(HAVE_EPOLL)
508
509         error = 0;
510         TAILQ_FOREACH(e, &eloop->events, next) {
511                 memset(&epe, 0, sizeof(epe));
512                 epe.data.fd = e->fd;
513                 epe.events = EPOLLIN;
514                 if (e->write_cb)
515                         epe.events |= EPOLLOUT;
516                 epe.data.ptr = e;
517                 if (epoll_ctl(eloop->poll_fd, EPOLL_CTL_ADD, e->fd, &epe) == -1)
518                         error = -1;
519         }
520 #endif
521
522         return error;
523 }
524 #endif
525
526 int
527 eloop_signal_set_cb(struct eloop *eloop,
528     const int *signals, size_t signals_len,
529     void (*signal_cb)(int, void *), void *signal_cb_ctx)
530 {
531
532         assert(eloop != NULL);
533
534         eloop->signals = signals;
535         eloop->signals_len = signals_len;
536         eloop->signal_cb = signal_cb;
537         eloop->signal_cb_ctx = signal_cb_ctx;
538         return eloop_requeue(eloop);
539 }
540
541 #ifndef HAVE_KQUEUE
542 struct eloop_siginfo {
543         int sig;
544         struct eloop *eloop;
545 };
546 static struct eloop_siginfo _eloop_siginfo;
547 static struct eloop *_eloop;
548
549 static void
550 eloop_signal1(void *arg)
551 {
552         struct eloop_siginfo *si = arg;
553
554         si->eloop->signal_cb(si->sig, si->eloop->signal_cb_ctx);
555 }
556
557 static void
558 eloop_signal3(int sig, __unused siginfo_t *siginfo, __unused void *arg)
559 {
560
561         /* So that we can operate safely under a signal we instruct
562          * eloop to pass a copy of the siginfo structure to handle_signal1
563          * as the very first thing to do. */
564         _eloop_siginfo.eloop = _eloop;
565         _eloop_siginfo.sig = sig;
566         eloop_timeout_add_now(_eloop_siginfo.eloop,
567             eloop_signal1, &_eloop_siginfo);
568 }
569 #endif
570
571 int
572 eloop_signal_mask(struct eloop *eloop, sigset_t *oldset)
573 {
574         sigset_t newset;
575 #ifndef HAVE_KQUEUE
576         size_t i;
577         struct sigaction sa;
578 #endif
579
580         assert(eloop != NULL);
581
582         sigfillset(&newset);
583         if (sigprocmask(SIG_SETMASK, &newset, oldset) == -1)
584                 return -1;
585
586 #ifdef HAVE_KQUEUE
587         UNUSED(eloop);
588 #else
589         memset(&sa, 0, sizeof(sa));
590         sa.sa_sigaction = eloop_signal3;
591         sa.sa_flags = SA_SIGINFO;
592         sigemptyset(&sa.sa_mask);
593
594         for (i = 0; i < eloop->signals_len; i++) {
595                 if (sigaction(eloop->signals[i], &sa, NULL) == -1)
596                         return -1;
597         }
598 #endif
599         return 0;
600 }
601
602 struct eloop *
603 eloop_new(void)
604 {
605         struct eloop *eloop;
606         struct timespec now;
607
608         /* Check we have a working monotonic clock. */
609         if (clock_gettime(CLOCK_MONOTONIC, &now) == -1)
610                 return NULL;
611
612         eloop = calloc(1, sizeof(*eloop));
613         if (eloop) {
614                 TAILQ_INIT(&eloop->events);
615                 TAILQ_INIT(&eloop->free_events);
616                 TAILQ_INIT(&eloop->timeouts);
617                 TAILQ_INIT(&eloop->free_timeouts);
618                 eloop->exitcode = EXIT_FAILURE;
619 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
620                 eloop->poll_fd = -1;
621                 eloop_open(eloop);
622 #endif
623         }
624
625         return eloop;
626 }
627
628 void eloop_free(struct eloop *eloop)
629 {
630         struct eloop_event *e;
631         struct eloop_timeout *t;
632
633         if (eloop == NULL)
634                 return;
635
636         while ((e = TAILQ_FIRST(&eloop->events))) {
637                 TAILQ_REMOVE(&eloop->events, e, next);
638                 free(e);
639         }
640         while ((e = TAILQ_FIRST(&eloop->free_events))) {
641                 TAILQ_REMOVE(&eloop->free_events, e, next);
642                 free(e);
643         }
644         while ((t = TAILQ_FIRST(&eloop->timeouts))) {
645                 TAILQ_REMOVE(&eloop->timeouts, t, next);
646                 free(t);
647         }
648         while ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
649                 TAILQ_REMOVE(&eloop->free_timeouts, t, next);
650                 free(t);
651         }
652 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
653         close(eloop->poll_fd);
654 #else
655         free(eloop->fds);
656 #endif
657         free(eloop);
658 }
659
660 int
661 eloop_start(struct eloop *eloop, sigset_t *signals)
662 {
663         int n;
664         struct eloop_event *e;
665         struct eloop_timeout *t;
666         struct timespec now, ts, *tsp;
667         void (*t0)(void *);
668 #if defined(HAVE_KQUEUE)
669         struct kevent ke;
670         UNUSED(signals);
671 #elif defined(HAVE_EPOLL)
672         struct epoll_event epe;
673 #endif
674 #ifndef HAVE_KQUEUE
675         int timeout;
676
677         _eloop = eloop;
678 #endif
679
680         assert(eloop != NULL);
681
682         for (;;) {
683                 if (eloop->exitnow)
684                         break;
685
686                 /* Run all timeouts first */
687                 if (eloop->timeout0) {
688                         t0 = eloop->timeout0;
689                         eloop->timeout0 = NULL;
690                         t0(eloop->timeout0_arg);
691                         continue;
692                 }
693                 if ((t = TAILQ_FIRST(&eloop->timeouts))) {
694                         clock_gettime(CLOCK_MONOTONIC, &now);
695                         if (timespeccmp(&now, &t->when, >)) {
696                                 TAILQ_REMOVE(&eloop->timeouts, t, next);
697                                 t->callback(t->arg);
698                                 TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
699                                 continue;
700                         }
701                         timespecsub(&t->when, &now, &ts);
702                         tsp = &ts;
703                 } else
704                         /* No timeouts, so wait forever */
705                         tsp = NULL;
706
707                 if (tsp == NULL && eloop->events_len == 0)
708                         break;
709
710 #ifndef HAVE_KQUEUE
711                 if (tsp == NULL)
712                         timeout = -1;
713                 else if (tsp->tv_sec > INT_MAX / 1000 ||
714                     (tsp->tv_sec == INT_MAX / 1000 &&
715                     (tsp->tv_nsec + 999999) / 1000000 > INT_MAX % 1000000))
716                         timeout = INT_MAX;
717                 else
718                         timeout = (int)(tsp->tv_sec * 1000 +
719                             (tsp->tv_nsec + 999999) / 1000000);
720 #endif
721
722 #if defined(HAVE_KQUEUE)
723                 n = kevent(eloop->poll_fd, NULL, 0, &ke, 1, tsp);
724 #elif defined(HAVE_EPOLL)
725                 if (signals)
726                         n = epoll_pwait(eloop->poll_fd, &epe, 1,
727                             timeout, signals);
728                 else
729                         n = epoll_wait(eloop->poll_fd, &epe, 1, timeout);
730 #else
731                 if (signals)
732                         n = pollts(eloop->fds, (nfds_t)eloop->events_len,
733                             tsp, signals);
734                 else
735                         n = poll(eloop->fds, (nfds_t)eloop->events_len,
736                             timeout);
737 #endif
738                 if (n == -1) {
739                         if (errno == EINTR)
740                                 continue;
741                         return -errno;
742                 }
743
744                 /* Process any triggered events.
745                  * We go back to the start after calling each callback incase
746                  * the current event or next event is removed. */
747 #if defined(HAVE_KQUEUE)
748                 if (n) {
749                         if (ke.filter == EVFILT_SIGNAL) {
750                                 eloop->signal_cb((int)ke.ident,
751                                     eloop->signal_cb_ctx);
752                                 continue;
753                         }
754                         e = (struct eloop_event *)ke.udata;
755                         if (ke.filter == EVFILT_WRITE) {
756                                 e->write_cb(e->write_cb_arg);
757                                 continue;
758                         } else if (ke.filter == EVFILT_READ) {
759                                 e->read_cb(e->read_cb_arg);
760                                 continue;
761                         }
762                 }
763 #elif defined(HAVE_EPOLL)
764                 if (n) {
765                         e = (struct eloop_event *)epe.data.ptr;
766                         if (epe.events & EPOLLOUT && e->write_cb != NULL) {
767                                 e->write_cb(e->write_cb_arg);
768                                 continue;
769                         }
770                         if (epe.events &
771                             (EPOLLIN | EPOLLERR | EPOLLHUP) &&
772                             e->read_cb != NULL)
773                         {
774                                 e->read_cb(e->read_cb_arg);
775                                 continue;
776                         }
777                 }
778 #else
779                 if (n > 0) {
780                         TAILQ_FOREACH(e, &eloop->events, next) {
781                                 if (e->pollfd->revents & POLLOUT &&
782                                     e->write_cb != NULL)
783                                 {
784                                         e->write_cb(e->write_cb_arg);
785                                         break;
786                                 }
787                                 if (e->pollfd->revents && e->read_cb != NULL) {
788                                         e->read_cb(e->read_cb_arg);
789                                         break;
790                                 }
791                         }
792                 }
793 #endif
794         }
795
796         return eloop->exitcode;
797 }