EV-4.33/0000755000000000000000000000000013634420051010451 5ustar rootrootEV-4.33/t/0000755000000000000000000000000013634420051010714 5ustar rootrootEV-4.33/t/02_once.t0000644000000000000000000000150511462612620012331 0ustar rootrootBEGIN { if (exists $ENV{AUTOMATED_TESTING}) { print "1..0 # Skipped: Too many broken cpan tester setups.\n"; exit; } } BEGIN { $| = 1; print "1..30\n"; } no warnings; use strict; use Socket; use EV; for my $it ("", 1, 2) { for my $i (3..5) { EV::once undef, 0, ($i - 3) * 0.1 + 0.2, sub { print $_[0] == EV::TIMER ? "" : "not ", "ok $it$i\n"; }; } socketpair my $s1, my $s2, AF_UNIX, SOCK_STREAM, PF_UNSPEC; EV::once $s1, EV::WRITE, 0.1, sub { print $_[0] & EV::WRITE ? "" : "not ", "ok ${it}2\n"; }; print "ok ${it}1\n"; EV::run; print "ok ${it}6\n"; EV::signal INT => sub { }; print "ok ${it}7\n"; EV::async sub { }; print "ok ${it}8\n"; EV::default_destroy; print "ok ${it}9\n"; EV::default_loop; print "ok ", ${it}*10 + 10, "\n"; } EV-4.33/t/11_signal.t0000644000000000000000000000316311462612621012665 0ustar rootrootBEGIN { unless (exists $SIG{USR1}) { print < sub { print "not ok 2\n" }; print "ok 2\n"; my $usr2 = EV::signal USR2 => sub { print "ok 10\n" }; print "ok 3\n"; my $loop = new EV::Loop; print "ok 4\n"; my $usr1_0 = $loop->signal (USR1 => sub { print "not ok 8\n" }); my $usr1_1 = $loop->signal (USR1 => sub { print "ok 8\n"; $_[0]->stop }); my $usr1_2 = $loop->signal (USR1 => sub { print "not ok 8\n" }); print "ok 5\n"; kill USR1 => $$; kill USR2 => $$; print "ok 6\n"; undef $usr1_0; undef $usr1_2; print "ok 7\n"; $loop->run; print "ok 9\n"; EV::run (EV::RUN_ONCE); print "ok 11\n"; $usr2 = EV::signal USR2 => sub { print "ok 13\n" }; $usr1_0 = EV::signal USR1 => sub { print "ok 15\n" }; print "ok 12\n"; kill USR2 => $$; EV::run (EV::RUN_NOWAIT); print "ok 14\n"; kill USR1 => $$; EV::run (EV::RUN_NOWAIT); print "ok 16\n"; my $sig = $loop->signal (INT => sub { }); print "ok 17\n"; print eval { $loop->signal (USR2 => sub { }); 1 } ? "not " : "", "ok 18 # $@\n"; print eval { $sig->set ("USR2"); 1 } ? "not " : "", "ok 19 # $@\n"; $sig = $loop->signal (INT => sub { }); print eval { $sig->signal ("USR2"); 1 } ? "not " : "", "ok 20 # $@\n"; print eval { $sig->signal ("USR2"); 1 } ? "" : "not ", "ok 21 # $@\n"; # now inactive print eval { $sig->start; 1 } ? "not " : "", "ok 22 # $@\n"; print eval { $sig->signal ("USR2"); 1 } ? "" : "not ", "ok 23 # $@\n"; # now inactive $sig->signal ("INT"); print eval { $sig->start; 1 } ? "" : "not ", "ok 24 # $@\n"; EV-4.33/t/04_stat.t0000644000000000000000000000171011462612620012360 0ustar rootrootBEGIN { $| = 1; print "1..14\n"; } no warnings; use strict; use File::Temp; use EV; my $fh = new File::Temp UNLINK => 1; my $w = EV::stat "$fh", 0.1, sub { print "ok 5\n"; print 1 == $_[0]->prev ? "" : "not ", "ok 6\n"; print 13 == scalar (() = $_[0]->prev) ? "" : "not ", "ok 7\n"; print "0" eq -s _ ? "" : "not ", "ok 8\n"; print 1 == ($_[0]->prev)[3] ? "" : "not ", "ok 9\n"; print 0 == $_[0]->attr ? "" : "not ", "ok 10\n"; print 0 == ($_[0]->attr)[3] ? "" : "not ", "ok 11\n"; print 0 == $_[0]->stat ? "" : "not ", "ok 12\n"; print 0 == ($_[0]->stat)[3] ? "" : "not ", "ok 13\n"; EV::break; }; my $t = EV::timer 0.2, 0, sub { print "ok 2\n"; EV::break; }; print $w->stat ? "" : "not ", "ok 1\n"; EV::run; print "ok 3\n"; # delete the file, as windows will not update any stats otherwise :( undef $fh; my $t = EV::timer 0.2, 0, sub { print "no ok 5\n"; EV::break; }; print "ok 4\n"; EV::run; print "ok 14\n"; EV-4.33/t/01_timer.t0000644000000000000000000000303411462612620012523 0ustar rootrootBEGIN { # many testers have totally overloaded machines with virtual machines # running backwards in time etc. etc. if (exists $ENV{AUTOMATED_TESTING}) { print "1..0 # Skipped: Too many broken cpan tester setups.\n"; exit; } } BEGIN { $| = 1; print "1..6002\n"; } no warnings; use strict; use EV; my $fudge = 0.02; # allow rt and monotonic clock to disagree by this much my $id = 1; my @timer; my @periodic; my $base = EV::now; my $prev = EV::now; for my $i (1..1000) { my $t = $i * $i * 1.735435336; $t -= int $t; push @timer, EV::timer $t, 0, sub { my $now = EV::now; EV::default_loop->verify; print $now + $fudge >= $prev ? "" : "not ", "ok ", ++$id, " # t0 $i $now + $fudge >= $prev\n"; print $now + $fudge >= $base + $t ? "" : "not ", "ok ", ++$id, " # t1 $i $now + $fudge >= $base + $t\n"; unless ($id % 3) { $t *= 0.0625; $_[0]->set ($t); $_[0]->start; $t = $now + $t - $base; } $prev = $now; }; my $t = $i * $i * 1.375475771; $t -= int $t; push @periodic, EV::periodic $base + $t, 0, 0, sub { my $now = EV::now; EV::default_loop->verify; print $now >= $prev ? "" : "not ", "ok ", ++$id, " # p0 $i $now >= $prev\n"; print $now >= $base + $t ? "" : "not ", "ok ", ++$id, " # p1 $i $now >= $base + $t\n"; unless ($id % 3) { $t *= 1.0625; $_[0]->set ($base + $t); $_[0]->start; } $prev = $now; }; } print "ok 1\n"; EV::run; print "ok 6002\n"; EV-4.33/t/05_priority.t0000644000000000000000000000131711462612621013273 0ustar rootrootBEGIN { $| = 1; print "1..9\n"; } no warnings; use strict; use EV; my $t0 = EV::timer -1, 0, sub { print "ok 4\n" }; my $t_ = EV::timer -1, 0, sub { print "ok 5\n" }; $t_->priority (-1); my $t1 = EV::timer -1, 0, sub { print "ok 3\n" }; $t1->priority ( 1); my $i2 = EV::idle sub { print EV::iteration == 1 ? "" : "not ", "ok 2\n"; $_[0]->stop }; $i2->priority (10); my $i0 = EV::idle sub { print EV::iteration == 3 ? "" : "not ", "ok 7\n"; $_[0]->stop }; my $i1 = EV::idle sub { print EV::iteration == 2 ? "" : "not ", "ok 6\n"; $_[0]->stop }; $i1->priority ( 1); my $i_ = EV::idle sub { print EV::iteration == 4 ? "" : "not ", "ok 8\n"; $_[0]->stop }; $i_->priority (-1); print "ok 1\n"; EV::run; print "ok 9\n"; EV-4.33/t/07_loop_timer.t0000644000000000000000000000300511462612621013561 0ustar rootrootBEGIN { # many testers have totally overloaded machines with virtual machines # running backwards in time etc. etc. if (exists $ENV{AUTOMATED_TESTING}) { print "1..0 # Skipped: Too many broken cpan tester setups.\n"; exit; } } BEGIN { $| = 1; print "1..752\n"; } no warnings; use strict; use EV; my $l = new EV::Loop; my $fudge = 0.02; # allow rt and monotonic clock to disagree by this much my $id = 1; my @timer; my @periodic; my $base = $l->now; my $prev = $l->now; for my $i (1..125) { my $t = $i * $i * 1.735435336; $t -= int $t; push @timer, $l->timer ($t, 0, sub { my $now = $_[0]->loop->now; print $now + $fudge >= $prev ? "" : "not ", "ok ", ++$id, " # t0 $i $now + $fudge >= $prev\n"; print $now + $fudge >= $base + $t ? "" : "not ", "ok ", ++$id, " # t1 $i $now + $fudge >= $base + $t\n"; unless ($id % 3) { $t *= 0.0625; $_[0]->set ($t); $_[0]->start; $t = $now + $t - $base; } $prev = $now; }); my $t = $i * $i * 1.375475771; $t -= int $t; push @periodic, $l->periodic ($base + $t, 0, 0, sub { my $now = $l->now; print $now >= $prev ? "" : "not ", "ok ", ++$id, " # p0 $i $now >= $prev\n"; print $now >= $base + $t ? "" : "not ", "ok ", ++$id, " # p1 $i $now >= $base + $t\n"; unless ($id % 3) { $t *= 1.0625; $_[0]->set ($base + $t); $_[0]->start; } $prev = $now; }); } EV::run; print "ok 1\n"; $l->loop; print "ok 752\n"; EV-4.33/t/00_load.t0000644000000000000000000000074211462612620012324 0ustar rootrootBEGIN { $| = 1; print "1..5\n"; } BEGIN { $^W = 0; # work around some bugs in perl print eval { require EV } ? "" : "not ", "ok 1 # $@\n"; print eval { require EV::MakeMaker } ? "" : "not ", "ok 2 # $@\n"; } my $w = EV::idle sub { print "not ok 3\n"; $_[0]->stop }; $w->feed_event (EV::CUSTOM); $w->stop; EV::run; print "ok 3\n"; my $w = EV::idle sub { print "ok 4\n"; $_[0]->stop }; $w->feed_event (EV::CUSTOM); $w->clear_pending; EV::loop; print "ok 5\n"; EV-4.33/t/03_keepalive.t0000644000000000000000000000115111462612620013350 0ustar rootrootBEGIN { if (exists $ENV{AUTOMATED_TESTING}) { print "1..0 # Skipped: Too many broken cpan tester setups.\n"; exit; } } BEGIN { $| = 1; print "1..8\n"; } no warnings; use strict; use EV; my $timer = EV::timer_ns 1, 0.3, sub { print "ok 7\n"; $_[0]->stop }; $timer->keepalive (1); print "ok 1\n"; EV::run; print "ok 2\n"; $timer->start; $timer->keepalive (0); $timer->again; $timer->stop; $timer->start; my $timer2 = EV::timer -1, 0, sub { print "ok 4\n" }; $timer2->keepalive (0); print "ok 3\n"; EV::run; print "ok 5\n"; $timer->keepalive (1); print "ok 6\n"; EV::run; print "ok 8\n"; EV-4.33/t/09_brandon.t0000644000000000000000000000217611462612621013045 0ustar rootrootBEGIN { if (exists $ENV{AUTOMATED_TESTING}) { print "1..0 # Skipped: Too many broken cpan tester setups.\n"; exit; } } BEGIN { $| = 1; print "1..12\n"; } # a surprisingly effective test by brandon black no warnings; use strict; use EV; { my $a = EV::timer 1.6, 0, sub { print "not ok 2\n"; EV::break }; my $b = EV::timer 0.3, 0, sub { print "ok 2\n"; EV::break }; print "ok 1\n"; EV::run; print "ok 3\n"; } { my $b = EV::timer 0.3, 0, sub { print "ok 5\n"; EV::break }; my $a = EV::timer 1.6, 0, sub { print "not ok 5\n"; EV::break }; print "ok 4\n"; EV::run; print "ok 6\n"; } { my $a = EV::timer 1.9, 0, sub { print "not ok 8\n"; EV::break }; my $b = EV::timer 1.6, 0, sub { print "not ok 8\n"; EV::break }; my $c = EV::timer 0.3, 0, sub { print "ok 8\n"; EV::break }; print "ok 7\n"; EV::run; print "ok 9\n"; } { my $a = EV::timer 1.6, 0, sub { print "not ok 11\n"; EV::break }; my $b = EV::timer 0.3, 0, sub { print "ok 11\n"; EV::break }; my $c = EV::timer 1.9, 0, sub { print "not ok 11\n"; EV::break }; print "ok 10\n"; EV::run; print "ok 12\n"; } EV-4.33/t/06_loop_once.t0000644000000000000000000000107711462612621013373 0ustar rootrootBEGIN { if (exists $ENV{AUTOMATED_TESTING}) { print "1..0 # Skipped: Too many broken cpan tester setups.\n"; exit; } } BEGIN { $| = 1; print "1..6\n"; } no warnings; use strict; use Socket; use EV; my $l = new EV::Loop; for my $i (3..5) { $l->once (undef, 0, ($i - 3) * 0.1 + 0.2, sub { print $_[0] == EV::TIMER ? "" : "not ", "ok $i\n"; }); } socketpair my $s1, my $s2, AF_UNIX, SOCK_STREAM, PF_UNSPEC; $l->once ($s1, EV::WRITE, 0.1, sub { print $_[0] & EV::WRITE ? "" : "not ", "ok 2\n"; }); print "ok 1\n"; $l->run; print "ok 6\n"; EV-4.33/t/08_async.t0000644000000000000000000000204011462612621012524 0ustar rootrootBEGIN { $| = 1; print "1..13\n"; } no warnings; use strict; use EV; { my ($a1, $a2, $a3); $a3 = EV::async sub { print "not ok 1\n"; }; $a2 = EV::async sub { print "ok 5\n"; $a1->cb (sub { print "ok 6\n"; EV::break; }); $a1->send; }; $a1 = EV::async sub { print $a1->async_pending ? "not " : "", "ok 4\n"; $a2->send; }; print $a1->async_pending ? "not " : "", "ok 1\n"; $a1->send; print $a1->async_pending ? "" : "not ", "ok 2\n"; $a1->send; $a1->send; print "ok 3\n"; EV::run; print "ok 7\n"; } { my $l = new EV::Loop; my ($a1, $a2, $a3); $a3 = $l->async (sub { print "not ok 8\n"; }); $a2 = $l->async (sub { print "ok 11\n"; $a1->cb (sub { print "ok 12\n"; $l->break; }); $a1->send; }); $a1 = $l->async (sub { print "ok 10\n"; $a2->send; }); print "ok 8\n"; $a1->send; $a1->send; $a1->send; print "ok 9\n"; $l->run; print "ok 13\n"; } EV-4.33/libev/0000755000000000000000000000000013634420051011552 5ustar rootrootEV-4.33/libev/ev_linuxaio.c0000644000000000000000000005176613601426606014265 0ustar rootroot/* * libev linux aio fd activity backend * * Copyright (c) 2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* * general notes about linux aio: * * a) at first, the linux aio IOCB_CMD_POLL functionality introduced in * 4.18 looks too good to be true: both watchers and events can be * batched, and events can even be handled in userspace using * a ring buffer shared with the kernel. watchers can be canceled * regardless of whether the fd has been closed. no problems with fork. * ok, the ring buffer is 200% undocumented (there isn't even a * header file), but otherwise, it's pure bliss! * b) ok, watchers are one-shot, so you have to re-arm active ones * on every iteration. so much for syscall-less event handling, * but at least these re-arms can be batched, no big deal, right? * c) well, linux as usual: the documentation lies to you: io_submit * sometimes returns EINVAL because the kernel doesn't feel like * handling your poll mask - ttys can be polled for POLLOUT, * POLLOUT|POLLIN, but polling for POLLIN fails. just great, * so we have to fall back to something else (hello, epoll), * but at least the fallback can be slow, because these are * exceptional cases, right? * d) hmm, you have to tell the kernel the maximum number of watchers * you want to queue when initialising the aio context. but of * course the real limit is magically calculated in the kernel, and * is often higher then we asked for. so we just have to destroy * the aio context and re-create it a bit larger if we hit the limit. * (starts to remind you of epoll? well, it's a bit more deterministic * and less gambling, but still ugly as hell). * e) that's when you find out you can also hit an arbitrary system-wide * limit. or the kernel simply doesn't want to handle your watchers. * what the fuck do we do then? you guessed it, in the middle * of event handling we have to switch to 100% epoll polling. and * that better is as fast as normal epoll polling, so you practically * have to use the normal epoll backend with all its quirks. * f) end result of this train wreck: it inherits all the disadvantages * from epoll, while adding a number on its own. why even bother to use * it? because if conditions are right and your fds are supported and you * don't hit a limit, this backend is actually faster, doesn't gamble with * your fds, batches watchers and events and doesn't require costly state * recreates. well, until it does. * g) all of this makes this backend use almost twice as much code as epoll. * which in turn uses twice as much code as poll. and that#s not counting * the fact that this backend also depends on the epoll backend, making * it three times as much code as poll, or kqueue. * h) bleah. why can't linux just do kqueue. sure kqueue is ugly, but by now * it's clear that whatever linux comes up with is far, far, far worse. */ #include /* actually linux/time.h, but we must assume they are compatible */ #include #include /*****************************************************************************/ /* syscall wrapdadoop - this section has the raw api/abi definitions */ #include /* no glibc wrappers */ /* aio_abi.h is not versioned in any way, so we cannot test for its existance */ #define IOCB_CMD_POLL 5 /* taken from linux/fs/aio.c. yup, that's a .c file. * not only is this totally undocumented, not even the source code * can tell you what the future semantics of compat_features and * incompat_features are, or what header_length actually is for. */ #define AIO_RING_MAGIC 0xa10a10a1 #define EV_AIO_RING_INCOMPAT_FEATURES 0 struct aio_ring { unsigned id; /* kernel internal index number */ unsigned nr; /* number of io_events */ unsigned head; /* Written to by userland or by kernel. */ unsigned tail; unsigned magic; unsigned compat_features; unsigned incompat_features; unsigned header_length; /* size of aio_ring */ struct io_event io_events[0]; }; inline_size int evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) { return ev_syscall2 (SYS_io_setup, nr_events, ctx_idp); } inline_size int evsys_io_destroy (aio_context_t ctx_id) { return ev_syscall1 (SYS_io_destroy, ctx_id); } inline_size int evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) { return ev_syscall3 (SYS_io_submit, ctx_id, nr, cbp); } inline_size int evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) { return ev_syscall3 (SYS_io_cancel, ctx_id, cbp, result); } inline_size int evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) { return ev_syscall5 (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); } /*****************************************************************************/ /* actual backed implementation */ ecb_cold static int linuxaio_nr_events (EV_P) { /* we start with 16 iocbs and incraese from there * that's tiny, but the kernel has a rather low system-wide * limit that can be reached quickly, so let's be parsimonious * with this resource. * Rest assured, the kernel generously rounds up small and big numbers * in different ways (but doesn't seem to charge you for it). * The 15 here is because the kernel usually has a power of two as aio-max-nr, * and this helps to take advantage of that limit. */ /* we try to fill 4kB pages exactly. * the ring buffer header is 32 bytes, every io event is 32 bytes. * the kernel takes the io requests number, doubles it, adds 2 * and adds the ring buffer. * the way we use this is by starting low, and then roughly doubling the * size each time we hit a limit. */ int requests = 15 << linuxaio_iteration; int one_page = (4096 / sizeof (struct io_event) ) / 2; /* how many fit into one page */ int first_page = ((4096 - sizeof (struct aio_ring)) / sizeof (struct io_event) - 2) / 2; /* how many fit into the first page */ /* if everything fits into one page, use count exactly */ if (requests > first_page) /* otherwise, round down to full pages and add the first page */ requests = requests / one_page * one_page + first_page; return requests; } /* we use out own wrapper structure in case we ever want to do something "clever" */ typedef struct aniocb { struct iocb io; /*int inuse;*/ } *ANIOCBP; inline_size void linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count) { while (count--) { /* TODO: quite the overhead to allocate every iocb separately, maybe use our own allocator? */ ANIOCBP iocb = (ANIOCBP)ev_malloc (sizeof (*iocb)); /* full zero initialise is probably not required at the moment, but * this is not well documented, so we better do it. */ memset (iocb, 0, sizeof (*iocb)); iocb->io.aio_lio_opcode = IOCB_CMD_POLL; iocb->io.aio_fildes = offset; base [offset++] = iocb; } } ecb_cold static void linuxaio_free_iocbp (EV_P) { while (linuxaio_iocbpmax--) ev_free (linuxaio_iocbps [linuxaio_iocbpmax]); linuxaio_iocbpmax = 0; /* next resize will completely reallocate the array, at some overhead */ } static void linuxaio_modify (EV_P_ int fd, int oev, int nev) { array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); ANIOCBP iocb = linuxaio_iocbps [fd]; ANFD *anfd = &anfds [fd]; if (ecb_expect_false (iocb->io.aio_reqprio < 0)) { /* we handed this fd over to epoll, so undo this first */ /* we do it manually because the optimisations on epoll_modify won't do us any good */ epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); anfd->emask = 0; iocb->io.aio_reqprio = 0; } else if (ecb_expect_false (iocb->io.aio_buf)) { /* iocb active, so cancel it first before resubmit */ /* this assumes we only ever get one call per fd per loop iteration */ for (;;) { /* on all relevant kernels, io_cancel fails with EINPROGRESS on "success" */ if (ecb_expect_false (evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0) == 0)) break; if (ecb_expect_true (errno == EINPROGRESS)) break; /* the EINPROGRESS test is for nicer error message. clumsy. */ if (errno != EINTR) { assert (("libev: linuxaio unexpected io_cancel failed", errno != EINTR && errno != EINPROGRESS)); break; } } /* increment generation counter to avoid handling old events */ ++anfd->egen; } iocb->io.aio_buf = (nev & EV_READ ? POLLIN : 0) | (nev & EV_WRITE ? POLLOUT : 0); if (nev) { iocb->io.aio_data = (uint32_t)fd | ((__u64)(uint32_t)anfd->egen << 32); /* queue iocb up for io_submit */ /* this assumes we only ever get one call per fd per loop iteration */ ++linuxaio_submitcnt; array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io; } } static void linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) { epoll_poll (EV_A_ 0); } inline_speed void linuxaio_fd_rearm (EV_P_ int fd) { anfds [fd].events = 0; linuxaio_iocbps [fd]->io.aio_buf = 0; fd_change (EV_A_ fd, EV_ANFD_REIFY); } static void linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) { while (nr) { int fd = ev->data & 0xffffffff; uint32_t gen = ev->data >> 32; int res = ev->res; assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); /* only accept events if generation counter matches */ if (ecb_expect_true (gen == (uint32_t)anfds [fd].egen)) { /* feed events, we do not expect or handle POLLNVAL */ fd_event ( EV_A_ fd, (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) ); /* linux aio is oneshot: rearm fd. TODO: this does more work than strictly needed */ linuxaio_fd_rearm (EV_A_ fd); } --nr; ++ev; } } /* get any events from ring buffer, return true if any were handled */ static int linuxaio_get_events_from_ring (EV_P) { struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; unsigned head, tail; /* the kernel reads and writes both of these variables, */ /* as a C extension, we assume that volatile use here */ /* both makes reads atomic and once-only */ head = *(volatile unsigned *)&ring->head; ECB_MEMORY_FENCE_ACQUIRE; tail = *(volatile unsigned *)&ring->tail; if (head == tail) return 0; /* parse all available events, but only once, to avoid starvation */ if (ecb_expect_true (tail > head)) /* normal case around */ linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); else /* wrapped around */ { linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); linuxaio_parse_events (EV_A_ ring->io_events, tail); } ECB_MEMORY_FENCE_RELEASE; /* as an extension to C, we hope that the volatile will make this atomic and once-only */ *(volatile unsigned *)&ring->head = tail; return 1; } inline_size int linuxaio_ringbuf_valid (EV_P) { struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; return ecb_expect_true (ring->magic == AIO_RING_MAGIC) && ring->incompat_features == EV_AIO_RING_INCOMPAT_FEATURES && ring->header_length == sizeof (struct aio_ring); /* TODO: or use it to find io_event[0]? */ } /* read at least one event from kernel, or timeout */ inline_size void linuxaio_get_events (EV_P_ ev_tstamp timeout) { struct timespec ts; struct io_event ioev[8]; /* 256 octet stack space */ int want = 1; /* how many events to request */ int ringbuf_valid = linuxaio_ringbuf_valid (EV_A); if (ecb_expect_true (ringbuf_valid)) { /* if the ring buffer has any events, we don't wait or call the kernel at all */ if (linuxaio_get_events_from_ring (EV_A)) return; /* if the ring buffer is empty, and we don't have a timeout, then don't call the kernel */ if (!timeout) return; } else /* no ringbuffer, request slightly larger batch */ want = sizeof (ioev) / sizeof (ioev [0]); /* no events, so wait for some * for fairness reasons, we do this in a loop, to fetch all events */ for (;;) { int res; EV_RELEASE_CB; EV_TS_SET (ts, timeout); res = evsys_io_getevents (linuxaio_ctx, 1, want, ioev, &ts); EV_ACQUIRE_CB; if (res < 0) if (errno == EINTR) /* ignored, retry */; else ev_syserr ("(libev) linuxaio io_getevents"); else if (res) { /* at least one event available, handle them */ linuxaio_parse_events (EV_A_ ioev, res); if (ecb_expect_true (ringbuf_valid)) { /* if we have a ring buffer, handle any remaining events in it */ linuxaio_get_events_from_ring (EV_A); /* at this point, we should have handled all outstanding events */ break; } else if (res < want) /* otherwise, if there were fewere events than we wanted, we assume there are no more */ break; } else break; /* no events from the kernel, we are done */ timeout = EV_TS_CONST (0.); /* only wait in the first iteration */ } } inline_size int linuxaio_io_setup (EV_P) { linuxaio_ctx = 0; return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); } static void linuxaio_poll (EV_P_ ev_tstamp timeout) { int submitted; /* first phase: submit new iocbs */ /* io_submit might return less than the requested number of iocbs */ /* this is, afaics, only because of errors, but we go by the book and use a loop, */ /* which allows us to pinpoint the erroneous iocb */ for (submitted = 0; submitted < linuxaio_submitcnt; ) { int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); if (ecb_expect_false (res < 0)) if (errno == EINVAL) { /* This happens for unsupported fds, officially, but in my testing, * also randomly happens for supported fds. We fall back to good old * poll() here, under the assumption that this is a very rare case. * See https://lore.kernel.org/patchwork/patch/1047453/ to see * discussion about such a case (ttys) where polling for POLLIN * fails but POLLIN|POLLOUT works. */ struct iocb *iocb = linuxaio_submits [submitted]; epoll_modify (EV_A_ iocb->aio_fildes, 0, anfds [iocb->aio_fildes].events); iocb->aio_reqprio = -1; /* mark iocb as epoll */ res = 1; /* skip this iocb - another iocb, another chance */ } else if (errno == EAGAIN) { /* This happens when the ring buffer is full, or some other shit we * don't know and isn't documented. Most likely because we have too * many requests and linux aio can't be assed to handle them. * In this case, we try to allocate a larger ring buffer, freeing * ours first. This might fail, in which case we have to fall back to 100% * epoll. * God, how I hate linux not getting its act together. Ever. */ evsys_io_destroy (linuxaio_ctx); linuxaio_submitcnt = 0; /* rearm all fds with active iocbs */ { int fd; for (fd = 0; fd < linuxaio_iocbpmax; ++fd) if (linuxaio_iocbps [fd]->io.aio_buf) linuxaio_fd_rearm (EV_A_ fd); } ++linuxaio_iteration; if (linuxaio_io_setup (EV_A) < 0) { /* TODO: rearm all and recreate epoll backend from scratch */ /* TODO: might be more prudent? */ /* to bad, we can't get a new aio context, go 100% epoll */ linuxaio_free_iocbp (EV_A); ev_io_stop (EV_A_ &linuxaio_epoll_w); ev_ref (EV_A); linuxaio_ctx = 0; backend = EVBACKEND_EPOLL; backend_modify = epoll_modify; backend_poll = epoll_poll; } timeout = EV_TS_CONST (0.); /* it's easiest to handle this mess in another iteration */ return; } else if (errno == EBADF) { assert (("libev: event loop rejected bad fd", errno != EBADF)); fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); res = 1; /* skip this iocb */ } else if (errno == EINTR) /* not seen in reality, not documented */ res = 0; /* silently ignore and retry */ else { ev_syserr ("(libev) linuxaio io_submit"); res = 0; } submitted += res; } linuxaio_submitcnt = 0; /* second phase: fetch and parse events */ linuxaio_get_events (EV_A_ timeout); } inline_size int linuxaio_init (EV_P_ int flags) { /* would be great to have a nice test for IOCB_CMD_POLL instead */ /* also: test some semi-common fd types, such as files and ttys in recommended_backends */ /* 4.18 introduced IOCB_CMD_POLL, 4.19 made epoll work, and we need that */ if (ev_linux_version () < 0x041300) return 0; if (!epoll_init (EV_A_ 0)) return 0; linuxaio_iteration = 0; if (linuxaio_io_setup (EV_A) < 0) { epoll_destroy (EV_A); return 0; } ev_io_init (&linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); ev_io_start (EV_A_ &linuxaio_epoll_w); ev_unref (EV_A); /* watcher should not keep loop alive */ backend_modify = linuxaio_modify; backend_poll = linuxaio_poll; linuxaio_iocbpmax = 0; linuxaio_iocbps = 0; linuxaio_submits = 0; linuxaio_submitmax = 0; linuxaio_submitcnt = 0; return EVBACKEND_LINUXAIO; } inline_size void linuxaio_destroy (EV_P) { epoll_destroy (EV_A); linuxaio_free_iocbp (EV_A); evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ } ecb_cold static void linuxaio_fork (EV_P) { linuxaio_submitcnt = 0; /* all pointers were invalidated */ linuxaio_free_iocbp (EV_A); /* this frees all iocbs, which is very heavy-handed */ evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ linuxaio_iteration = 0; /* we start over in the child */ while (linuxaio_io_setup (EV_A) < 0) ev_syserr ("(libev) linuxaio io_setup"); /* forking epoll should also effectively unregister all fds from the backend */ epoll_fork (EV_A); /* epoll_fork already did this. hopefully */ /*fd_rearm_all (EV_A);*/ ev_io_stop (EV_A_ &linuxaio_epoll_w); ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ); ev_io_start (EV_A_ &linuxaio_epoll_w); } EV-4.33/libev/ev.pod0000644000000000000000000066760313631212545012716 0ustar rootroot=encoding utf-8 =head1 NAME libev - a high performance full-featured event loop written in C =head1 SYNOPSIS #include =head2 EXAMPLE PROGRAM // a single header file is required #include #include // for puts // every watcher type has its own typedef'd struct // with the name ev_TYPE ev_io stdin_watcher; ev_timer timeout_watcher; // all watcher callbacks have a similar signature // this callback is called when data is readable on stdin static void stdin_cb (EV_P_ ev_io *w, int revents) { puts ("stdin ready"); // for one-shot events, one must manually stop the watcher // with its corresponding stop function. ev_io_stop (EV_A_ w); // this causes all nested ev_run's to stop iterating ev_break (EV_A_ EVBREAK_ALL); } // another callback, this time for a time-out static void timeout_cb (EV_P_ ev_timer *w, int revents) { puts ("timeout"); // this causes the innermost ev_run to stop iterating ev_break (EV_A_ EVBREAK_ONE); } int main (void) { // use the default event loop unless you have special needs struct ev_loop *loop = EV_DEFAULT; // initialise an io watcher, then start it // this one will watch for stdin to become readable ev_io_init (&stdin_watcher, stdin_cb, /*STDIN_FILENO*/ 0, EV_READ); ev_io_start (loop, &stdin_watcher); // initialise a timer watcher, then start it // simple non-repeating 5.5 second timeout ev_timer_init (&timeout_watcher, timeout_cb, 5.5, 0.); ev_timer_start (loop, &timeout_watcher); // now wait for events to arrive ev_run (loop, 0); // break was called, so exit return 0; } =head1 ABOUT THIS DOCUMENT This document documents the libev software package. The newest version of this document is also available as an html-formatted web page you might find easier to navigate when reading it for the first time: L. While this document tries to be as complete as possible in documenting libev, its usage and the rationale behind its design, it is not a tutorial on event-based programming, nor will it introduce event-based programming with libev. Familiarity with event based programming techniques in general is assumed throughout this document. =head1 WHAT TO READ WHEN IN A HURRY This manual tries to be very detailed, but unfortunately, this also makes it very long. If you just want to know the basics of libev, I suggest reading L, then the L above and look up the missing functions in L and the C and C sections in L. =head1 ABOUT LIBEV Libev is an event loop: you register interest in certain events (such as a file descriptor being readable or a timeout occurring), and it will manage these event sources and provide your program with events. To do this, it must take more or less complete control over your process (or thread) by executing the I handler, and will then communicate events via a callback mechanism. You register interest in certain events by registering so-called I, which are relatively small C structures you initialise with the details of the event, and then hand it over to libev by I the watcher. =head2 FEATURES Libev supports C (files, many character devices...). Epoll is truly the train wreck among event poll mechanisms, a frankenpoll, cobbled together in a hurry, no thought to design or interaction with others. Oh, the pain, will it ever stop... While stopping, setting and starting an I/O watcher in the same iteration will result in some caching, there is still a system call per such incident (because the same I could point to a different I now), so its best to avoid that. Also, C'ed file descriptors might not work very well if you register events for both file descriptors. Best performance from this backend is achieved by not unregistering all watchers for a file descriptor until it has been closed, if possible, i.e. keep at least one watcher active per fd at all times. Stopping and starting a watcher (without re-setting it) also usually doesn't cause extra overhead. A fork can both result in spurious notifications as well as in libev having to destroy and recreate the epoll object, which can take considerable time and thus should be avoided. All this means that, in practice, C can be as fast or faster than epoll for maybe up to a hundred file descriptors, depending on the usage. So sad. While nominally embeddable in other event loops, this feature is broken in a lot of kernel revisions, but probably(!) works in current versions. This backend maps C and C in the same way as C. =item C (value 64, Linux) Use the Linux-specific Linux AIO (I C<< aio(7) >> but C<< io_submit(2) >>) event interface available in post-4.18 kernels (but libev only tries to use it in 4.19+). This is another Linux train wreck of an event interface. If this backend works for you (as of this writing, it was very experimental), it is the best event interface available on Linux and might be well worth enabling it - if it isn't available in your kernel this will be detected and this backend will be skipped. This backend can batch oneshot requests and supports a user-space ring buffer to receive events. It also doesn't suffer from most of the design problems of epoll (such as not being able to remove event sources from the epoll set), and generally sounds too good to be true. Because, this being the Linux kernel, of course it suffers from a whole new set of limitations, forcing you to fall back to epoll, inheriting all its design issues. For one, it is not easily embeddable (but probably could be done using an event fd at some extra overhead). It also is subject to a system wide limit that can be configured in F. If no AIO requests are left, this backend will be skipped during initialisation, and will switch to epoll when the loop is active. Most problematic in practice, however, is that not all file descriptors work with it. For example, in Linux 5.1, TCP sockets, pipes, event fds, files, F and many others are supported, but ttys do not work properly (a known bug that the kernel developers don't care about, see L), so this is not (yet?) a generic event polling interface. Overall, it seems the Linux developers just don't want it to have a generic event handling mechanism other than C which have a high overhead for the actual polling but can deliver many events at once. By setting a higher I you allow libev to spend more time collecting I/O events, so you can handle more events per iteration, at the cost of increasing latency. Timeouts (both C and C) will not be affected. Setting this to a non-null value will introduce an additional C call into most loop iterations. The sleep time ensures that libev will not poll for I/O events more often then once per this interval, on average (as long as the host time resolution is good enough). Likewise, by setting a higher I you allow libev to spend more time collecting timeouts, at the expense of increased latency/jitter/inexactness (the watcher callback will be called later). C watchers will not be affected. Setting this to a non-null value will not introduce any overhead in libev. Many (busy) programs can usually benefit by setting the I/O collect interval to a value near C<0.1> or so, which is often enough for interactive servers (of course not for games), likewise for timeouts. It usually doesn't make much sense to set it to a lower value than C<0.01>, as this approaches the timing granularity of most systems. Note that if you do transactions with the outside world and you can't increase the parallelity, then this setting will limit your transaction rate (if you need to poll once per transaction and the I/O collect interval is 0.01, then you can't do more than 100 transactions per second). Setting the I can improve the opportunity for saving power, as the program will "bundle" timer callback invocations that are "near" in time together, by delaying some, thus reducing the number of times the process sleeps and wakes up again. Another useful technique to reduce iterations/wake-ups is to use C watchers and make sure they fire on, say, one-second boundaries only. Example: we only need 0.1s timeout granularity, and we wish not to poll more often than 100 times per second: ev_set_timeout_collect_interval (EV_DEFAULT_UC_ 0.1); ev_set_io_collect_interval (EV_DEFAULT_UC_ 0.01); =item ev_invoke_pending (loop) This call will simply invoke all pending watchers while resetting their pending state. Normally, C does this automatically when required, but when overriding the invoke callback this call comes handy. This function can be invoked from a watcher - this can be useful for example when you want to do some lengthy calculation and want to pass further event handling to another thread (you still have to make sure only one thread executes within C or C of course). =item int ev_pending_count (loop) Returns the number of pending watchers - zero indicates that no watchers are pending. =item ev_set_invoke_pending_cb (loop, void (*invoke_pending_cb)(EV_P)) This overrides the invoke pending functionality of the loop: Instead of invoking all pending watchers when there are any, C will call this callback instead. This is useful, for example, when you want to invoke the actual watchers inside another context (another thread etc.). If you want to reset the callback, use C as new callback. =item ev_set_loop_release_cb (loop, void (*release)(EV_P) throw (), void (*acquire)(EV_P) throw ()) Sometimes you want to share the same loop between multiple threads. This can be done relatively simply by putting mutex_lock/unlock calls around each call to a libev function. However, C can run an indefinite time, so it is not feasible to wait for it to return. One way around this is to wake up the event loop via C and C, another way is to set these I and I callbacks on the loop. When set, then C will be called just before the thread is suspended waiting for new events, and C is called just afterwards. Ideally, C will just call your mutex_unlock function, and C will just call the mutex_lock function again. While event loop modifications are allowed between invocations of C and C (that's their only purpose after all), no modifications done will affect the event loop, i.e. adding watchers will have no effect on the set of file descriptors being watched, or the time waited. Use an C watcher to wake up C when you want it to take note of any changes you made. In theory, threads executing C will be async-cancel safe between invocations of C and C. See also the locking example in the C section later in this document. =item ev_set_userdata (loop, void *data) =item void *ev_userdata (loop) Set and retrieve a single C associated with a loop. When C has never been called, then C returns C<0>. These two functions can be used to associate arbitrary data with a loop, and are intended solely for the C, C and C callbacks described above, but of course can be (ab-)used for any other purpose as well. =item ev_verify (loop) This function only does something when C support has been compiled in, which is the default for non-minimal builds. It tries to go through all internal structures and checks them for validity. If anything is found to be inconsistent, it will print an error message to standard error and call C. This can be used to catch bugs inside libev itself: under normal circumstances, this function will never abort as of course libev keeps its data structures consistent. =back =head1 ANATOMY OF A WATCHER In the following description, uppercase C in names stands for the watcher type, e.g. C can mean C for timer watchers and C for I/O watchers. A watcher is an opaque structure that you allocate and register to record your interest in some event. To make a concrete example, imagine you want to wait for STDIN to become readable, you would create an C watcher for that: static void my_cb (struct ev_loop *loop, ev_io *w, int revents) { ev_io_stop (w); ev_break (loop, EVBREAK_ALL); } struct ev_loop *loop = ev_default_loop (0); ev_io stdin_watcher; ev_init (&stdin_watcher, my_cb); ev_io_set (&stdin_watcher, STDIN_FILENO, EV_READ); ev_io_start (loop, &stdin_watcher); ev_run (loop, 0); As you can see, you are responsible for allocating the memory for your watcher structures (and it is I a bad idea to do this on the stack). Each watcher has an associated watcher structure (called C or simply C, as typedefs are provided for all watcher structs). Each watcher structure must be initialised by a call to C, which expects a callback to be provided. This callback is invoked each time the event occurs (or, in the case of I/O watchers, each time the event loop detects that the file descriptor given is readable and/or writable). Each watcher type further has its own C<< ev_TYPE_set (watcher *, ...) >> macro to configure it, with arguments specific to the watcher type. There is also a macro to combine initialisation and setting in one call: C<< ev_TYPE_init (watcher *, callback, ...) >>. To make the watcher actually watch out for events, you have to start it with a watcher-specific start function (C<< ev_TYPE_start (loop, watcher *) >>), and you can stop watching for events at any time by calling the corresponding stop function (C<< ev_TYPE_stop (loop, watcher *) >>. As long as your watcher is active (has been started but not stopped) you must not touch the values stored in it except when explicitly documented otherwise. Most specifically you must never reinitialise it or call its C macro. Each and every callback receives the event loop pointer as first, the registered watcher structure as second, and a bitset of received events as third argument. The received events usually include a single bit per event type received (you can receive multiple events at the same time). The possible bit masks are: =over 4 =item C =item C The file descriptor in the C watcher has become readable and/or writable. =item C The C watcher has timed out. =item C The C watcher has timed out. =item C The signal specified in the C watcher has been received by a thread. =item C The pid specified in the C watcher has received a status change. =item C The path specified in the C watcher changed its attributes somehow. =item C The C watcher has determined that you have nothing better to do. =item C =item C All C watchers are invoked just I C starts to gather new events, and all C watchers are queued (not invoked) just after C has gathered them, but before it queues any callbacks for any received events. That means C watchers are the last watchers invoked before the event loop sleeps or polls for new events, and C watchers will be invoked before any other watchers of the same or lower priority within an event loop iteration. Callbacks of both watcher types can start and stop as many watchers as they want, and all of them will be taken into account (for example, a C watcher might start an idle watcher to keep C from blocking). =item C The embedded event loop specified in the C watcher needs attention. =item C The event loop has been resumed in the child process after fork (see C). =item C The event loop is about to be destroyed (see C). =item C The given async watcher has been asynchronously notified (see C). =item C Not ever sent (or otherwise used) by libev itself, but can be freely used by libev users to signal watchers (e.g. via C). =item C An unspecified error has occurred, the watcher has been stopped. This might happen because the watcher could not be properly started because libev ran out of memory, a file descriptor was found to be closed or any other problem. Libev considers these application bugs. You best act on it by reporting the problem and somehow coping with the watcher being stopped. Note that well-written programs should not receive an error ever, so when your watcher receives it, this usually indicates a bug in your program. Libev will usually signal a few "dummy" events together with an error, for example it might indicate that a fd is readable or writable, and if your callbacks is well-written it can just attempt the operation and cope with the error from read() or write(). This will not work in multi-threaded programs, though, as the fd could already be closed and reused for another thing, so beware. =back =head2 GENERIC WATCHER FUNCTIONS =over 4 =item C (ev_TYPE *watcher, callback) This macro initialises the generic portion of a watcher. The contents of the watcher object can be arbitrary (so C will do). Only the generic parts of the watcher are initialised, you I to call the type-specific C macro afterwards to initialise the type-specific parts. For each type there is also a C macro which rolls both calls into one. You can reinitialise a watcher at any time as long as it has been stopped (or never started) and there are no pending events outstanding. The callback is always of type C. Example: Initialise an C watcher in two steps. ev_io w; ev_init (&w, my_cb); ev_io_set (&w, STDIN_FILENO, EV_READ); =item C (ev_TYPE *watcher, [args]) This macro initialises the type-specific parts of a watcher. You need to call C at least once before you call this macro, but you can call C any number of times. You must not, however, call this macro on a watcher that is active (it can be pending, however, which is a difference to the C macro). Although some watcher types do not have type-specific arguments (e.g. C) you still need to call its C macro. See C, above, for an example. =item C (ev_TYPE *watcher, callback, [args]) This convenience macro rolls both C and C macro calls into a single call. This is the most convenient method to initialise a watcher. The same limitations apply, of course. Example: Initialise and set an C watcher in one step. ev_io_init (&w, my_cb, STDIN_FILENO, EV_READ); =item C (loop, ev_TYPE *watcher) Starts (activates) the given watcher. Only active watchers will receive events. If the watcher is already active nothing will happen. Example: Start the C watcher that is being abused as example in this whole section. ev_io_start (EV_DEFAULT_UC, &w); =item C (loop, ev_TYPE *watcher) Stops the given watcher if active, and clears the pending status (whether the watcher was active or not). It is possible that stopped watchers are pending - for example, non-repeating timers are being stopped when they become pending - but calling C ensures that the watcher is neither active nor pending. If you want to free or reuse the memory used by the watcher it is therefore a good idea to always call its C function. =item bool ev_is_active (ev_TYPE *watcher) Returns a true value iff the watcher is active (i.e. it has been started and not yet been stopped). As long as a watcher is active you must not modify it. =item bool ev_is_pending (ev_TYPE *watcher) Returns a true value iff the watcher is pending, (i.e. it has outstanding events but its callback has not yet been invoked). As long as a watcher is pending (but not active) you must not call an init function on it (but C is safe), you must not change its priority, and you must make sure the watcher is available to libev (e.g. you cannot C it). =item callback ev_cb (ev_TYPE *watcher) Returns the callback currently set on the watcher. =item ev_set_cb (ev_TYPE *watcher, callback) Change the callback. You can change the callback at virtually any time (modulo threads). =item ev_set_priority (ev_TYPE *watcher, int priority) =item int ev_priority (ev_TYPE *watcher) Set and query the priority of the watcher. The priority is a small integer between C (default: C<2>) and C (default: C<-2>). Pending watchers with higher priority will be invoked before watchers with lower priority, but priority will not keep watchers from being executed (except for C watchers). If you need to suppress invocation when higher priority events are pending you need to look at C watchers, which provide this functionality. You I change the priority of a watcher as long as it is active or pending. Setting a priority outside the range of C to C is fine, as long as you do not mind that the priority value you query might or might not have been clamped to the valid range. The default priority used by watchers when no priority has been set is always C<0>, which is supposed to not be too high and not be too low :). See L, below, for a more thorough treatment of priorities. =item ev_invoke (loop, ev_TYPE *watcher, int revents) Invoke the C with the given C and C. Neither C nor C need to be valid as long as the watcher callback can deal with that fact, as both are simply passed through to the callback. =item int ev_clear_pending (loop, ev_TYPE *watcher) If the watcher is pending, this function clears its pending status and returns its C bitset (as if its callback was invoked). If the watcher isn't pending it does nothing and returns C<0>. Sometimes it can be useful to "poll" a watcher instead of waiting for its callback to be invoked, which can be accomplished with this function. =item ev_feed_event (loop, ev_TYPE *watcher, int revents) Feeds the given event set into the event loop, as if the specified event had happened for the specified watcher (which must be a pointer to an initialised but not necessarily started event watcher). Obviously you must not free the watcher as long as it has pending events. Stopping the watcher, letting libev invoke it, or calling C will clear the pending event, even if the watcher was not started in the first place. See also C and C for related functions that do not need a watcher. =back See also the L and L idioms. =head2 WATCHER STATES There are various watcher states mentioned throughout this manual - active, pending and so on. In this section these states and the rules to transition between them will be described in more detail - and while these rules might look complicated, they usually do "the right thing". =over 4 =item initialised Before a watcher can be registered with the event loop it has to be initialised. This can be done with a call to C, or calls to C followed by the watcher-specific C function. In this state it is simply some block of memory that is suitable for use in an event loop. It can be moved around, freed, reused etc. at will - as long as you either keep the memory contents intact, or call C again. =item started/running/active Once a watcher has been started with a call to C it becomes property of the event loop, and is actively waiting for events. While in this state it cannot be accessed (except in a few documented ways), moved, freed or anything else - the only legal thing is to keep a pointer to it, and call libev functions on it that are documented to work on active watchers. =item pending If a watcher is active and libev determines that an event it is interested in has occurred (such as a timer expiring), it will become pending. It will stay in this pending state until either it is stopped or its callback is about to be invoked, so it is not normally pending inside the watcher callback. The watcher might or might not be active while it is pending (for example, an expired non-repeating timer can be pending but no longer active). If it is stopped, it can be freely accessed (e.g. by calling C), but it is still property of the event loop at this time, so cannot be moved, freed or reused. And if it is active the rules described in the previous item still apply. It is also possible to feed an event on a watcher that is not active (e.g. via C), in which case it becomes pending without being active. =item stopped A watcher can be stopped implicitly by libev (in which case it might still be pending), or explicitly by calling its C function. The latter will clear any pending state the watcher might be in, regardless of whether it was active or not, so stopping a watcher explicitly before freeing it is often a good idea. While stopped (and not pending) the watcher is essentially in the initialised state, that is, it can be reused, moved, modified in any way you wish (but when you trash the memory block, you need to C it again). =back =head2 WATCHER PRIORITY MODELS Many event loops support I, which are usually small integers that influence the ordering of event callback invocation between watchers in some way, all else being equal. In libev, watcher priorities can be set using C. See its description for the more technical details such as the actual priority range. There are two common ways how these these priorities are being interpreted by event loops: In the more common lock-out model, higher priorities "lock out" invocation of lower priority watchers, which means as long as higher priority watchers receive events, lower priority watchers are not being invoked. The less common only-for-ordering model uses priorities solely to order callback invocation within a single event loop iteration: Higher priority watchers are invoked before lower priority ones, but they all get invoked before polling for new events. Libev uses the second (only-for-ordering) model for all its watchers except for idle watchers (which use the lock-out model). The rationale behind this is that implementing the lock-out model for watchers is not well supported by most kernel interfaces, and most event libraries will just poll for the same events again and again as long as their callbacks have not been executed, which is very inefficient in the common case of one high-priority watcher locking out a mass of lower priority ones. Static (ordering) priorities are most useful when you have two or more watchers handling the same resource: a typical usage example is having an C watcher to receive data, and an associated C to handle timeouts. Under load, data might be received while the program handles other jobs, but since timers normally get invoked first, the timeout handler will be executed before checking for data. In that case, giving the timer a lower priority than the I/O watcher ensures that I/O will be handled first even under adverse conditions (which is usually, but not always, what you want). Since idle watchers use the "lock-out" model, meaning that idle watchers will only be executed when no same or higher priority watchers have received events, they can be used to implement the "lock-out" model when required. For example, to emulate how many other event libraries handle priorities, you can associate an C watcher to each such watcher, and in the normal watcher callback, you just start the idle watcher. The real processing is done in the idle watcher callback. This causes libev to continuously poll and process kernel event data for the watcher, but when the lock-out case is known to be rare (which in turn is rare :), this is workable. Usually, however, the lock-out model implemented that way will perform miserably under the type of load it was designed to handle. In that case, it might be preferable to stop the real watcher before starting the idle watcher, so the kernel will not have to process the event in case the actual processing will be delayed for considerable time. Here is an example of an I/O watcher that should run at a strictly lower priority than the default, and which should only process data when no other events are pending: ev_idle idle; // actual processing watcher ev_io io; // actual event watcher static void io_cb (EV_P_ ev_io *w, int revents) { // stop the I/O watcher, we received the event, but // are not yet ready to handle it. ev_io_stop (EV_A_ w); // start the idle watcher to handle the actual event. // it will not be executed as long as other watchers // with the default priority are receiving events. ev_idle_start (EV_A_ &idle); } static void idle_cb (EV_P_ ev_idle *w, int revents) { // actual processing read (STDIN_FILENO, ...); // have to start the I/O watcher again, as // we have handled the event ev_io_start (EV_P_ &io); } // initialisation ev_idle_init (&idle, idle_cb); ev_io_init (&io, io_cb, STDIN_FILENO, EV_READ); ev_io_start (EV_DEFAULT_ &io); In the "real" world, it might also be beneficial to start a timer, so that low-priority connections can not be locked out forever under load. This enables your program to keep a lower latency for important connections during short periods of high load, while not completely locking out less important ones. =head1 WATCHER TYPES This section describes each watcher in detail, but will not repeat information given in the last section. Any initialisation/set macros, functions and members specific to the watcher type are explained. Most members are additionally marked with either I<[read-only]>, meaning that, while the watcher is active, you can look at the member and expect some sensible content, but you must not modify it (you can modify it while the watcher is stopped to your hearts content), or I<[read-write]>, which means you can expect it to have some sensible content while the watcher is active, but you can also modify it (within the same thread as the event loop, i.e. without creating data races). Modifying it may not do something sensible or take immediate effect (or do anything at all), but libev will not crash or malfunction in any way. In any case, the documentation for each member will explain what the effects are, and if there are any additional access restrictions. =head2 C - is this file descriptor readable or writable? I/O watchers check whether a file descriptor is readable or writable in each iteration of the event loop, or, more precisely, when reading would not block the process and writing would at least be able to write some data. This behaviour is called level-triggering because you keep receiving events as long as the condition persists. Remember you can stop the watcher if you don't want to act on the event and neither want to receive future events. In general you can register as many read and/or write event watchers per fd as you want (as long as you don't confuse yourself). Setting all file descriptors to non-blocking mode is also usually a good idea (but not required if you know what you are doing). Another thing you have to watch out for is that it is quite easy to receive "spurious" readiness notifications, that is, your callback might be called with C but a subsequent C(2) will actually block because there is no data. It is very easy to get into this situation even with a relatively standard program structure. Thus it is best to always use non-blocking I/O: An extra C(2) returning C is far preferable to a program hanging until some data arrives. If you cannot run the fd in non-blocking mode (for example you should not play around with an Xlib connection), then you have to separately re-test whether a file descriptor is really ready with a known-to-be good interface such as poll (fortunately in the case of Xlib, it already does this on its own, so its quite safe to use). Some people additionally use C and an interval timer, just to be sure you won't block indefinitely. But really, best use non-blocking mode. =head3 The special problem of disappearing file descriptors Some backends (e.g. kqueue, epoll, linuxaio) need to be told about closing a file descriptor (either due to calling C explicitly or any other means, such as C). The reason is that you register interest in some file descriptor, but when it goes away, the operating system will silently drop this interest. If another file descriptor with the same number then is registered with libev, there is no efficient way to see that this is, in fact, a different file descriptor. To avoid having to explicitly tell libev about such cases, libev follows the following policy: Each time C is being called, libev will assume that this is potentially a new file descriptor, otherwise it is assumed that the file descriptor stays the same. That means that you I to call C (or C) when you change the descriptor even if the file descriptor number itself did not change. This is how one would do it normally anyway, the important point is that the libev application should not optimise around libev but should leave optimisations to libev. =head3 The special problem of dup'ed file descriptors Some backends (e.g. epoll), cannot register events for file descriptors, but only events for the underlying file descriptions. That means when you have C'ed file descriptors or weirder constellations, and register events for them, only one file descriptor might actually receive events. There is no workaround possible except not registering events for potentially C'ed file descriptors, or to resort to C or C. =head3 The special problem of files Many people try to use C. =item EV_USE_EVENTFD If defined to be C<1>, then libev will assume that C is available and will probe for kernel support at runtime. This will improve C and C performance and reduce resource consumption. If undefined, it will be enabled if the headers indicate GNU/Linux + Glibc 2.7 or newer, otherwise disabled. =item EV_USE_SIGNALFD If defined to be C<1>, then libev will assume that C is available and will probe for kernel support at runtime. This enables the use of EVFLAG_SIGNALFD for faster and simpler signal handling. If undefined, it will be enabled if the headers indicate GNU/Linux + Glibc 2.7 or newer, otherwise disabled. =item EV_USE_TIMERFD If defined to be C<1>, then libev will assume that C is available and will probe for kernel support at runtime. This allows libev to detect time jumps accurately. If undefined, it will be enabled if the headers indicate GNU/Linux + Glibc 2.8 or newer and define C, otherwise disabled. =item EV_USE_EVENTFD If defined to be C<1>, then libev will assume that C is available and will probe for kernel support at runtime. This will improve C and C performance and reduce resource consumption. If undefined, it will be enabled if the headers indicate GNU/Linux + Glibc 2.7 or newer, otherwise disabled. =item EV_USE_SELECT If undefined or defined to be C<1>, libev will compile in support for the C is buggy All that's left is C actively limits the number of file descriptors you can pass in to 1024 - your program suddenly crashes when you use more. There is an undocumented "workaround" for this - defining C<_DARWIN_UNLIMITED_SELECT>, which libev tries to use, so select I work on OS/X. =head2 SOLARIS PROBLEMS AND WORKAROUNDS =head3 C reentrancy The default compile environment on Solaris is unfortunately so thread-unsafe that you can't even use components/libraries compiled without C<-D_REENTRANT> in a threaded program, which, of course, isn't defined by default. A valid, if stupid, implementation choice. If you want to use libev in threaded environments you have to make sure it's compiled with C<_REENTRANT> defined. =head3 Event port backend The scalable event interface for Solaris is called "event ports". Unfortunately, this mechanism is very buggy in all major releases. If you run into high CPU usage, your program freezes or you get a large number of spurious wakeups, make sure you have all the relevant and latest kernel patches applied. No, I don't know which ones, but there are multiple ones to apply, and afterwards, event ports actually work great. If you can't get it to work, you can try running the program by setting the environment variable C to only allow C and C works fine with large bitsets on AIX, and AIX is dead anyway. =head2 WIN32 PLATFORM LIMITATIONS AND WORKAROUNDS =head3 General issues Win32 doesn't support any of the standards (e.g. POSIX) that libev requires, and its I/O model is fundamentally incompatible with the POSIX model. Libev still offers limited functionality on this platform in the form of the C backend, and only supports socket descriptors. This only applies when using Win32 natively, not when using e.g. cygwin. Actually, it only applies to the microsofts own compilers, as every compiler comes with a slightly differently broken/incompatible environment. Lifting these limitations would basically require the full re-implementation of the I/O system. If you are into this kind of thing, then note that glib does exactly that for you in a very portable way (note also that glib is the slowest event library known to man). There is no supported compilation method available on windows except embedding it into other applications. Sensible signal handling is officially unsupported by Microsoft - libev tries its best, but under most conditions, signals will simply not work. Not a libev limitation but worth mentioning: windows apparently doesn't accept large writes: instead of resulting in a partial write, windows will either accept everything or return C if the buffer is too large, so make sure you only write small amounts into your sockets (less than a megabyte seems safe, but this apparently depends on the amount of memory available). Due to the many, low, and arbitrary limits on the win32 platform and the abysmal performance of winsockets, using a large number of sockets is not recommended (and not reasonable). If your program needs to use more than a hundred or so sockets, then likely it needs to use a totally different implementation for windows, as libev offers the POSIX readiness notification model, which cannot be implemented efficiently on windows (due to Microsoft monopoly games). A typical way to use libev under windows is to embed it (see the embedding section for details) and use the following F header file instead of F: #define EV_STANDALONE /* keeps ev from requiring config.h */ #define EV_SELECT_IS_WINSOCKET 1 /* configure libev for windows select */ #include "ev.h" And compile the following F file into your project (make sure you do I compile the F or any other embedded source files!): #include "evwrap.h" #include "ev.c" =head3 The winsocket C function doesn't follow POSIX in that it requires socket I and not socket I (it is also extremely buggy). This makes select very inefficient, and also requires a mapping from file descriptors to socket handles (the Microsoft C runtime provides the function C<_open_osfhandle> for this). See the discussion of the C, C and C preprocessor symbols for more info. The configuration for a "naked" win32 using the Microsoft runtime libraries and raw winsocket select is: #define EV_USE_SELECT 1 #define EV_SELECT_IS_WINSOCKET 1 /* forces EV_SELECT_USE_FD_SET, too */ Note that winsockets handling of fd sets is O(n), so you can easily get a complexity in the O(n²) range when using win32. =head3 Limited number of file descriptors Windows has numerous arbitrary (and low) limits on things. Early versions of winsocket's select only supported waiting for a maximum of C<64> handles (probably owning to the fact that all windows kernels can only wait for C<64> things at the same time internally; Microsoft recommends spawning a chain of threads and wait for 63 handles and the previous thread in each. Sounds great!). Newer versions support more handles, but you need to define C to some high number (e.g. C<2048>) before compiling the winsocket select call (which might be in libev or elsewhere, for example, perl and many other interpreters do their own select emulation on windows). Another limit is the number of file descriptors in the Microsoft runtime libraries, which by default is C<64> (there must be a hidden I<64> fetish or something like this inside Microsoft). You can increase this by calling C<_setmaxstdio>, which can increase this limit to C<2048> (another arbitrary limit), but is broken in many versions of the Microsoft runtime libraries. This might get you to about C<512> or C<2048> sockets (depending on windows version and/or the phase of the moon). To get more, you need to wrap all I/O functions and provide your own fd management, but the cost of calling select (O(n²)) will likely make this unworkable. =head2 PORTABILITY REQUIREMENTS In addition to a working ISO-C implementation and of course the backend-specific APIs, libev relies on a few additional extensions: =over 4 =item C must have compatible calling conventions regardless of C. Libev assumes not only that all watcher pointers have the same internal structure (guaranteed by POSIX but not by ISO C for example), but it also assumes that the same (machine) code can be used to call any watcher callback: The watcher callbacks have different type signatures, but libev calls them using an C internally. =item null pointers and integer zero are represented by 0 bytes Libev uses C to initialise structs and arrays to C<0> bytes, and relies on this setting pointers and integers to null. =item pointer accesses must be thread-atomic Accessing a pointer value must be atomic, it must both be readable and writable in one piece - this is the case on all current architectures. =item C must be thread-atomic as well The type C (or whatever is defined as C) must be atomic with respect to accesses from different threads. This is not part of the specification for C, but is believed to be sufficiently portable. =item C must work in a threaded environment Libev uses C to temporarily block signals. This is not allowed in a threaded program (C has to be used). Typical pthread implementations will either allow C in the "main thread" or will block signals process-wide, both behaviours would be compatible with libev. Interaction between C and C could complicate things, however. The most portable way to handle signals is to block signals in all threads except the initial one, and run the signal handling loop in the initial thread as well. =item C must be large enough for common memory allocation sizes To improve portability and simplify its API, libev uses C internally instead of C when allocating its data structures. On non-POSIX systems (Microsoft...) this might be unexpectedly low, but is still at least 31 bits everywhere, which is enough for hundreds of millions of watchers. =item C must hold a time value in seconds with enough accuracy The type C is used to represent timestamps. It is required to have at least 51 bits of mantissa (and 9 bits of exponent), which is good enough for at least into the year 4000 with millisecond accuracy (the design goal for libev). This requirement is overfulfilled by implementations using IEEE 754, which is basically all existing ones. With IEEE 754 doubles, you get microsecond accuracy until at least the year 2255 (and millisecond accuracy till the year 287396 - by then, libev is either obsolete or somebody patched it to use C or something like that, just kidding). =back If you know of other additional requirements drop me a note. =head1 ALGORITHMIC COMPLEXITIES In this section the complexities of (many of) the algorithms used inside libev will be documented. For complexity discussions about backends see the documentation for C. All of the following are about amortised time: If an array needs to be extended, libev needs to realloc and move the whole array, but this happens asymptotically rarer with higher number of elements, so O(1) might mean that libev does a lengthy realloc operation in rare cases, but on average it is much faster and asymptotically approaches constant time. =over 4 =item Starting and stopping timer/periodic watchers: O(log skipped_other_timers) This means that, when you have a watcher that triggers in one hour and there are 100 watchers that would trigger before that, then inserting will have to skip roughly seven (C) of these watchers. =item Changing timer/periodic watchers (by autorepeat or calling again): O(log skipped_other_timers) That means that changing a timer costs less than removing/adding them, as only the relative motion in the event queue has to be paid for. =item Starting io/check/prepare/idle/signal/child/fork/async watchers: O(1) These just add the watcher into an array or at the head of a list. =item Stopping check/prepare/idle/fork/async watchers: O(1) =item Stopping an io/signal/child watcher: O(number_of_watchers_for_this_(fd/signal/pid % EV_PID_HASHSIZE)) These watchers are stored in lists, so they need to be walked to find the correct watcher to remove. The lists are usually short (you don't usually have many watchers waiting for the same fd or signal: one is typical, two is rare). =item Finding the next timer in each loop iteration: O(1) By virtue of using a binary or 4-heap, the next timer is always found at a fixed position in the storage array. =item Each change on a file descriptor per loop iteration: O(number_of_watchers_for_this_fd) A change means an I/O watcher gets started or stopped, which requires libev to recalculate its status (and possibly tell the kernel, depending on backend and whether C was used). =item Activating one watcher (putting it into the pending state): O(1) =item Priority handling: O(number_of_priorities) Priorities are implemented by allocating some space for each priority. When doing priority-based operations, libev usually has to linearly search all the priorities, but starting/stopping and activating watchers becomes O(1) with respect to priority handling. =item Sending an ev_async: O(1) =item Processing ev_async_send: O(number_of_async_watchers) =item Processing signals: O(max_signal_number) Sending involves a system call I there were no other C calls in the current loop iteration and the loop is currently blocked. Checking for async and signal events involves iterating over all running async watchers or all signal numbers. =back =head1 PORTING FROM LIBEV 3.X TO 4.X The major version 4 introduced some incompatible changes to the API. At the moment, the C header file provides compatibility definitions for all changes, so most programs should still compile. The compatibility layer might be removed in later versions of libev, so better update to the new API early than late. =over 4 =item C backwards compatibility mechanism The backward compatibility mechanism can be controlled by C. See L in the L section. =item C and C have been removed These calls can be replaced easily by their C counterparts: ev_loop_destroy (EV_DEFAULT_UC); ev_loop_fork (EV_DEFAULT); =item function/symbol renames A number of functions and symbols have been renamed: ev_loop => ev_run EVLOOP_NONBLOCK => EVRUN_NOWAIT EVLOOP_ONESHOT => EVRUN_ONCE ev_unloop => ev_break EVUNLOOP_CANCEL => EVBREAK_CANCEL EVUNLOOP_ONE => EVBREAK_ONE EVUNLOOP_ALL => EVBREAK_ALL EV_TIMEOUT => EV_TIMER ev_loop_count => ev_iteration ev_loop_depth => ev_depth ev_loop_verify => ev_verify Most functions working on C objects don't have an C prefix, so it was removed; C, C and associated constants have been renamed to not collide with the C anymore and C now follows the same naming scheme as all other watcher types. Note that C is still called C because it would otherwise clash with the C typedef. =item C mechanism replaced by C The preprocessor symbol C has been replaced by a different mechanism, C. Programs using C usually compile and work, but the library code will of course be larger. =back =head1 GLOSSARY =over 4 =item active A watcher is active as long as it has been started and not yet stopped. See L for details. =item application In this document, an application is whatever is using libev. =item backend The part of the code dealing with the operating system interfaces. =item callback The address of a function that is called when some event has been detected. Callbacks are being passed the event loop, the watcher that received the event, and the actual event bitset. =item callback/watcher invocation The act of calling the callback associated with a watcher. =item event A change of state of some external event, such as data now being available for reading on a file descriptor, time having passed or simply not having any other events happening anymore. In libev, events are represented as single bits (such as C or C). =item event library A software package implementing an event model and loop. =item event loop An entity that handles and processes external events and converts them into callback invocations. =item event model The model used to describe how an event loop handles and processes watchers and events. =item pending A watcher is pending as soon as the corresponding event has been detected. See L for details. =item real time The physical time that is observed. It is apparently strictly monotonic :) =item wall-clock time The time and date as shown on clocks. Unlike real time, it can actually be wrong and jump forwards and backwards, e.g. when you adjust your clock. =item watcher A data structure that describes interest in certain events. Watchers need to be started (attached to an event loop) before they can receive events. =back =head1 AUTHOR Marc Lehmann , with repeated corrections by Mikael Magnusson and Emanuele Giaquinta, and minor corrections by many others. EV-4.33/libev/ev_vars.h0000644000000000000000000001657513601423253013407 0ustar rootroot/* * loop member variable declarations * * Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #define VARx(type,name) VAR(name, type name) VARx(ev_tstamp, now_floor) /* last time we refreshed rt_time */ VARx(ev_tstamp, mn_now) /* monotonic clock "now" */ VARx(ev_tstamp, rtmn_diff) /* difference realtime - monotonic time */ /* for reverse feeding of events */ VARx(W *, rfeeds) VARx(int, rfeedmax) VARx(int, rfeedcnt) VAR (pendings, ANPENDING *pendings [NUMPRI]) VAR (pendingmax, int pendingmax [NUMPRI]) VAR (pendingcnt, int pendingcnt [NUMPRI]) VARx(int, pendingpri) /* highest priority currently pending */ VARx(ev_prepare, pending_w) /* dummy pending watcher */ VARx(ev_tstamp, io_blocktime) VARx(ev_tstamp, timeout_blocktime) VARx(int, backend) VARx(int, activecnt) /* total number of active events ("refcount") */ VARx(EV_ATOMIC_T, loop_done) /* signal by ev_break */ VARx(int, backend_fd) VARx(ev_tstamp, backend_mintime) /* assumed typical timer resolution */ VAR (backend_modify, void (*backend_modify)(EV_P_ int fd, int oev, int nev)) VAR (backend_poll , void (*backend_poll)(EV_P_ ev_tstamp timeout)) VARx(ANFD *, anfds) VARx(int, anfdmax) VAR (evpipe, int evpipe [2]) VARx(ev_io, pipe_w) VARx(EV_ATOMIC_T, pipe_write_wanted) VARx(EV_ATOMIC_T, pipe_write_skipped) #if !defined(_WIN32) || EV_GENWRAP VARx(pid_t, curpid) #endif VARx(char, postfork) /* true if we need to recreate kernel state after fork */ #if EV_USE_SELECT || EV_GENWRAP VARx(void *, vec_ri) VARx(void *, vec_ro) VARx(void *, vec_wi) VARx(void *, vec_wo) #if defined(_WIN32) || EV_GENWRAP VARx(void *, vec_eo) #endif VARx(int, vec_max) #endif #if EV_USE_POLL || EV_GENWRAP VARx(struct pollfd *, polls) VARx(int, pollmax) VARx(int, pollcnt) VARx(int *, pollidxs) /* maps fds into structure indices */ VARx(int, pollidxmax) #endif #if EV_USE_EPOLL || EV_GENWRAP VARx(struct epoll_event *, epoll_events) VARx(int, epoll_eventmax) VARx(int *, epoll_eperms) VARx(int, epoll_epermcnt) VARx(int, epoll_epermmax) #endif #if EV_USE_LINUXAIO || EV_GENWRAP VARx(aio_context_t, linuxaio_ctx) VARx(int, linuxaio_iteration) VARx(struct aniocb **, linuxaio_iocbps) VARx(int, linuxaio_iocbpmax) VARx(struct iocb **, linuxaio_submits) VARx(int, linuxaio_submitcnt) VARx(int, linuxaio_submitmax) VARx(ev_io, linuxaio_epoll_w) #endif #if EV_USE_IOURING || EV_GENWRAP VARx(int, iouring_fd) VARx(unsigned, iouring_to_submit); VARx(int, iouring_entries) VARx(int, iouring_max_entries) VARx(void *, iouring_sq_ring) VARx(void *, iouring_cq_ring) VARx(void *, iouring_sqes) VARx(uint32_t, iouring_sq_ring_size) VARx(uint32_t, iouring_cq_ring_size) VARx(uint32_t, iouring_sqes_size) VARx(uint32_t, iouring_sq_head) VARx(uint32_t, iouring_sq_tail) VARx(uint32_t, iouring_sq_ring_mask) VARx(uint32_t, iouring_sq_ring_entries) VARx(uint32_t, iouring_sq_flags) VARx(uint32_t, iouring_sq_dropped) VARx(uint32_t, iouring_sq_array) VARx(uint32_t, iouring_cq_head) VARx(uint32_t, iouring_cq_tail) VARx(uint32_t, iouring_cq_ring_mask) VARx(uint32_t, iouring_cq_ring_entries) VARx(uint32_t, iouring_cq_overflow) VARx(uint32_t, iouring_cq_cqes) VARx(ev_tstamp, iouring_tfd_to) VARx(int, iouring_tfd) VARx(ev_io, iouring_tfd_w) #endif #if EV_USE_KQUEUE || EV_GENWRAP VARx(pid_t, kqueue_fd_pid) VARx(struct kevent *, kqueue_changes) VARx(int, kqueue_changemax) VARx(int, kqueue_changecnt) VARx(struct kevent *, kqueue_events) VARx(int, kqueue_eventmax) #endif #if EV_USE_PORT || EV_GENWRAP VARx(struct port_event *, port_events) VARx(int, port_eventmax) #endif #if EV_USE_IOCP || EV_GENWRAP VARx(HANDLE, iocp) #endif VARx(int *, fdchanges) VARx(int, fdchangemax) VARx(int, fdchangecnt) VARx(ANHE *, timers) VARx(int, timermax) VARx(int, timercnt) #if EV_PERIODIC_ENABLE || EV_GENWRAP VARx(ANHE *, periodics) VARx(int, periodicmax) VARx(int, periodiccnt) #endif #if EV_IDLE_ENABLE || EV_GENWRAP VAR (idles, ev_idle **idles [NUMPRI]) VAR (idlemax, int idlemax [NUMPRI]) VAR (idlecnt, int idlecnt [NUMPRI]) #endif VARx(int, idleall) /* total number */ VARx(struct ev_prepare **, prepares) VARx(int, preparemax) VARx(int, preparecnt) VARx(struct ev_check **, checks) VARx(int, checkmax) VARx(int, checkcnt) #if EV_FORK_ENABLE || EV_GENWRAP VARx(struct ev_fork **, forks) VARx(int, forkmax) VARx(int, forkcnt) #endif #if EV_CLEANUP_ENABLE || EV_GENWRAP VARx(struct ev_cleanup **, cleanups) VARx(int, cleanupmax) VARx(int, cleanupcnt) #endif #if EV_ASYNC_ENABLE || EV_GENWRAP VARx(EV_ATOMIC_T, async_pending) VARx(struct ev_async **, asyncs) VARx(int, asyncmax) VARx(int, asynccnt) #endif #if EV_USE_INOTIFY || EV_GENWRAP VARx(int, fs_fd) VARx(ev_io, fs_w) VARx(char, fs_2625) /* whether we are running in linux 2.6.25 or newer */ VAR (fs_hash, ANFS fs_hash [EV_INOTIFY_HASHSIZE]) #endif VARx(EV_ATOMIC_T, sig_pending) #if EV_USE_SIGNALFD || EV_GENWRAP VARx(int, sigfd) VARx(ev_io, sigfd_w) VARx(sigset_t, sigfd_set) #endif #if EV_USE_TIMERFD || EV_GENWRAP VARx(int, timerfd) /* timerfd for time jump detection */ VARx(ev_io, timerfd_w) #endif VARx(unsigned int, origflags) /* original loop flags */ #if EV_FEATURE_API || EV_GENWRAP VARx(unsigned int, loop_count) /* total number of loop iterations/blocks */ VARx(unsigned int, loop_depth) /* #ev_run enters - #ev_run leaves */ VARx(void *, userdata) /* C++ doesn't support the ev_loop_callback typedef here. stinks. */ VAR (release_cb, void (*release_cb)(EV_P) EV_NOEXCEPT) VAR (acquire_cb, void (*acquire_cb)(EV_P) EV_NOEXCEPT) VAR (invoke_cb , ev_loop_callback invoke_cb) #endif #undef VARx EV-4.33/libev/README0000644000000000000000000000503613556456317012457 0ustar rootrootlibev is a high-performance event loop/event model with lots of features. (see benchmark at http://libev.schmorp.de/bench.html) ABOUT Homepage: http://software.schmorp.de/pkg/libev Mailinglist: libev@lists.schmorp.de http://lists.schmorp.de/cgi-bin/mailman/listinfo/libev Library Documentation: http://pod.tst.eu/http://cvs.schmorp.de/libev/ev.pod Libev is modelled (very losely) after libevent and the Event perl module, but is faster, scales better and is more correct, and also more featureful. And also smaller. Yay. Some of the specialties of libev not commonly found elsewhere are: - extensive and detailed, readable documentation (not doxygen garbage). - fully supports fork, can detect fork in various ways and automatically re-arms kernel mechanisms that do not support fork. - highly optimised select, poll, linux epoll, linux aio, bsd kqueue and solaris event ports backends. - filesystem object (path) watching (with optional linux inotify support). - wallclock-based times (using absolute time, cron-like). - relative timers/timeouts (handle time jumps). - fast intra-thread communication between multiple event loops (with optional fast linux eventfd backend). - extremely easy to embed (fully documented, no dependencies, autoconf supported but optional). - very small codebase, no bloated library, simple code. - fully extensible by being able to plug into the event loop, integrate other event loops, integrate other event loop users. - very little memory use (small watchers, small event loop data). - optional C++ interface allowing method and function callbacks at no extra memory or runtime overhead. - optional Perl interface with similar characteristics (capable of running Glib/Gtk2 on libev). - support for other languages (multiple C++ interfaces, D, Ruby, Python) available from third-parties. Examples of programs that embed libev: the EV perl module, node.js, auditd, rxvt-unicode, gvpe (GNU Virtual Private Ethernet), the Deliantra MMORPG server (http://www.deliantra.net/), Rubinius (a next-generation Ruby VM), the Ebb web server, the Rev event toolkit. CONTRIBUTORS libev was written and designed by Marc Lehmann and Emanuele Giaquinta. The following people sent in patches or made other noteworthy contributions to the design (for minor patches, see the Changes file. If I forgot to include you, please shout at me, it was an accident): W.C.A. Wijngaards Christopher Layne Chris Brody EV-4.33/libev/ev_iouring.c0000644000000000000000000005133413631212545014076 0ustar rootroot/* * libev linux io_uring fd activity backend * * Copyright (c) 2019-2020 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* * general notes about linux io_uring: * * a) it's the best interface I have seen so far. on linux. * b) best is not necessarily very good. * c) it's better than the aio mess, doesn't suffer from the fork problems * of linux aio or epoll and so on and so on. and you could do event stuff * without any syscalls. what's not to like? * d) ok, it's vastly more complex, but that's ok, really. * e) why two mmaps instead of one? one would be more space-efficient, * and I can't see what benefit two would have (other than being * somehow resizable/relocatable, but that's apparently not possible). * f) hmm, it's practically undebuggable (gdb can't access the memory, and * the bizarre way structure offsets are communicated makes it hard to * just print the ring buffer heads, even *iff* the memory were visible * in gdb. but then, that's also ok, really. * g) well, you cannot specify a timeout when waiting for events. no, * seriously, the interface doesn't support a timeout. never seen _that_ * before. sure, you can use a timerfd, but that's another syscall * you could have avoided. overall, this bizarre omission smells * like a µ-optimisation by the io_uring author for his personal * applications, to the detriment of everybody else who just wants * an event loop. but, umm, ok, if that's all, it could be worse. * (from what I gather from the author Jens Axboe, it simply didn't * occur to him, and he made good on it by adding an unlimited nuber * of timeouts later :). * h) initially there was a hardcoded limit of 4096 outstanding events. * later versions not only bump this to 32k, but also can handle * an unlimited amount of events, so this only affects the batch size. * i) unlike linux aio, you *can* register more then the limit * of fd events. while early verisons of io_uring signalled an overflow * and you ended up getting wet. 5.5+ does not do this anymore. * j) but, oh my! it had exactly the same bugs as the linux aio backend, * where some undocumented poll combinations just fail. fortunately, * after finally reaching the author, he was more than willing to fix * this probably in 5.6+. * k) overall, the *API* itself is, I dare to say, not a total trainwreck. * once the bugs ae fixed (probably in 5.6+), it will be without * competition. */ /* TODO: use internal TIMEOUT */ /* TODO: take advantage of single mmap, NODROP etc. */ /* TODO: resize cq/sq size independently */ #include #include #include #include #define IOURING_INIT_ENTRIES 32 /*****************************************************************************/ /* syscall wrapdadoop - this section has the raw api/abi definitions */ #include #include /* mostly directly taken from the kernel or documentation */ struct io_uring_sqe { __u8 opcode; __u8 flags; __u16 ioprio; __s32 fd; union { __u64 off; __u64 addr2; }; __u64 addr; __u32 len; union { __kernel_rwf_t rw_flags; __u32 fsync_flags; __u16 poll_events; __u32 sync_range_flags; __u32 msg_flags; __u32 timeout_flags; __u32 accept_flags; __u32 cancel_flags; __u32 open_flags; __u32 statx_flags; }; __u64 user_data; union { __u16 buf_index; __u64 __pad2[3]; }; }; struct io_uring_cqe { __u64 user_data; __s32 res; __u32 flags; }; struct io_sqring_offsets { __u32 head; __u32 tail; __u32 ring_mask; __u32 ring_entries; __u32 flags; __u32 dropped; __u32 array; __u32 resv1; __u64 resv2; }; struct io_cqring_offsets { __u32 head; __u32 tail; __u32 ring_mask; __u32 ring_entries; __u32 overflow; __u32 cqes; __u64 resv[2]; }; struct io_uring_params { __u32 sq_entries; __u32 cq_entries; __u32 flags; __u32 sq_thread_cpu; __u32 sq_thread_idle; __u32 features; __u32 resv[4]; struct io_sqring_offsets sq_off; struct io_cqring_offsets cq_off; }; #define IORING_SETUP_CQSIZE 0x00000008 #define IORING_OP_POLL_ADD 6 #define IORING_OP_POLL_REMOVE 7 #define IORING_OP_TIMEOUT 11 #define IORING_OP_TIMEOUT_REMOVE 12 /* relative or absolute, reference clock is CLOCK_MONOTONIC */ struct iouring_kernel_timespec { int64_t tv_sec; long long tv_nsec; }; #define IORING_TIMEOUT_ABS 0x00000001 #define IORING_ENTER_GETEVENTS 0x01 #define IORING_OFF_SQ_RING 0x00000000ULL #define IORING_OFF_CQ_RING 0x08000000ULL #define IORING_OFF_SQES 0x10000000ULL #define IORING_FEAT_SINGLE_MMAP 0x00000001 #define IORING_FEAT_NODROP 0x00000002 #define IORING_FEAT_SUBMIT_STABLE 0x00000004 inline_size int evsys_io_uring_setup (unsigned entries, struct io_uring_params *params) { return ev_syscall2 (SYS_io_uring_setup, entries, params); } inline_size int evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz) { return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz); } /*****************************************************************************/ /* actual backed implementation */ /* we hope that volatile will make the compiler access this variables only once */ #define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_sq_ring + iouring_sq_ ## name) #define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_cq_ring + iouring_cq_ ## name) /* the index array */ #define EV_SQ_ARRAY ((unsigned *)((char *)iouring_sq_ring + iouring_sq_array)) /* the submit/completion queue entries */ #define EV_SQES ((struct io_uring_sqe *) iouring_sqes) #define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes)) inline_speed int iouring_enter (EV_P_ ev_tstamp timeout) { int res; EV_RELEASE_CB; res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1, timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0); assert (("libev: io_uring_enter did not consume all sqes", (res < 0 || res == iouring_to_submit))); iouring_to_submit = 0; EV_ACQUIRE_CB; return res; } /* TODO: can we move things around so we don't need this forward-reference? */ static void iouring_poll (EV_P_ ev_tstamp timeout); static struct io_uring_sqe * iouring_sqe_get (EV_P) { unsigned tail; for (;;) { tail = EV_SQ_VAR (tail); if (ecb_expect_true (tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries))) break; /* whats the problem, we have free sqes */ /* queue full, need to flush and possibly handle some events */ #if EV_FEATURE_CODE /* first we ask the kernel nicely, most often this frees up some sqes */ int res = iouring_enter (EV_A_ EV_TS_CONST (0.)); ECB_MEMORY_FENCE_ACQUIRE; /* better safe than sorry */ if (res >= 0) continue; /* yes, it worked, try again */ #endif /* some problem, possibly EBUSY - do the full poll and let it handle any issues */ iouring_poll (EV_A_ EV_TS_CONST (0.)); /* iouring_poll should have done ECB_MEMORY_FENCE_ACQUIRE for us */ } /*assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)));*/ return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); } inline_size struct io_uring_sqe * iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe) { unsigned idx = sqe - EV_SQES; EV_SQ_ARRAY [idx] = idx; ECB_MEMORY_FENCE_RELEASE; ++EV_SQ_VAR (tail); /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */ ++iouring_to_submit; } /*****************************************************************************/ /* when the timerfd expires we simply note the fact, * as the purpose of the timerfd is to wake us up, nothing else. * the next iteration should re-set it. */ static void iouring_tfd_cb (EV_P_ struct ev_io *w, int revents) { iouring_tfd_to = EV_TSTAMP_HUGE; } /* called for full and partial cleanup */ ecb_cold static int iouring_internal_destroy (EV_P) { close (iouring_tfd); close (iouring_fd); if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size); if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size); if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size ); if (ev_is_active (&iouring_tfd_w)) { ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_tfd_w); } } ecb_cold static int iouring_internal_init (EV_P) { struct io_uring_params params = { 0 }; iouring_to_submit = 0; iouring_tfd = -1; iouring_sq_ring = MAP_FAILED; iouring_cq_ring = MAP_FAILED; iouring_sqes = MAP_FAILED; if (!have_monotonic) /* cannot really happen, but what if11 */ return -1; for (;;) { iouring_fd = evsys_io_uring_setup (iouring_entries, ¶ms); if (iouring_fd >= 0) break; /* yippie */ if (errno != EINVAL) return -1; /* we failed */ #if TODO if ((~params.features) & (IORING_FEAT_NODROP | IORING_FEATURE_SINGLE_MMAP | IORING_FEAT_SUBMIT_STABLE)) return -1; /* we require the above features */ #endif /* EINVAL: lots of possible reasons, but maybe * it is because we hit the unqueryable hardcoded size limit */ /* we hit the limit already, give up */ if (iouring_max_entries) return -1; /* first time we hit EINVAL? assume we hit the limit, so go back and retry */ iouring_entries >>= 1; iouring_max_entries = iouring_entries; } iouring_sq_ring_size = params.sq_off.array + params.sq_entries * sizeof (unsigned); iouring_cq_ring_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe); iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe); iouring_sq_ring = mmap (0, iouring_sq_ring_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING); iouring_cq_ring = mmap (0, iouring_cq_ring_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_CQ_RING); iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES); if (iouring_sq_ring == MAP_FAILED || iouring_cq_ring == MAP_FAILED || iouring_sqes == MAP_FAILED) return -1; iouring_sq_head = params.sq_off.head; iouring_sq_tail = params.sq_off.tail; iouring_sq_ring_mask = params.sq_off.ring_mask; iouring_sq_ring_entries = params.sq_off.ring_entries; iouring_sq_flags = params.sq_off.flags; iouring_sq_dropped = params.sq_off.dropped; iouring_sq_array = params.sq_off.array; iouring_cq_head = params.cq_off.head; iouring_cq_tail = params.cq_off.tail; iouring_cq_ring_mask = params.cq_off.ring_mask; iouring_cq_ring_entries = params.cq_off.ring_entries; iouring_cq_overflow = params.cq_off.overflow; iouring_cq_cqes = params.cq_off.cqes; iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC); if (iouring_tfd < 0) return iouring_tfd; iouring_tfd_to = EV_TSTAMP_HUGE; return 0; } ecb_cold static void iouring_fork (EV_P) { iouring_internal_destroy (EV_A); while (iouring_internal_init (EV_A) < 0) ev_syserr ("(libev) io_uring_setup"); fd_rearm_all (EV_A); ev_io_stop (EV_A_ &iouring_tfd_w); ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ); ev_io_start (EV_A_ &iouring_tfd_w); } /*****************************************************************************/ static void iouring_modify (EV_P_ int fd, int oev, int nev) { if (oev) { /* we assume the sqe's are all "properly" initialised */ struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); sqe->opcode = IORING_OP_POLL_REMOVE; sqe->fd = fd; /* Jens Axboe notified me that user_data is not what is documented, but is * some kind of unique ID that has to match, otherwise the request cannot * be removed. Since we don't *really* have that, we pass in the old * generation counter - if that fails, too bad, it will hopefully be removed * at close time and then be ignored. */ sqe->addr = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); sqe->user_data = (uint64_t)-1; iouring_sqe_submit (EV_A_ sqe); /* increment generation counter to avoid handling old events */ ++anfds [fd].egen; } if (nev) { struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); sqe->opcode = IORING_OP_POLL_ADD; sqe->fd = fd; sqe->addr = 0; sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); sqe->poll_events = (nev & EV_READ ? POLLIN : 0) | (nev & EV_WRITE ? POLLOUT : 0); iouring_sqe_submit (EV_A_ sqe); } } inline_size void iouring_tfd_update (EV_P_ ev_tstamp timeout) { ev_tstamp tfd_to = mn_now + timeout; /* we assume there will be many iterations per timer change, so * we only re-set the timerfd when we have to because its expiry * is too late. */ if (ecb_expect_false (tfd_to < iouring_tfd_to)) { struct itimerspec its; iouring_tfd_to = tfd_to; EV_TS_SET (its.it_interval, 0.); EV_TS_SET (its.it_value, tfd_to); if (timerfd_settime (iouring_tfd, TFD_TIMER_ABSTIME, &its, 0) < 0) assert (("libev: iouring timerfd_settime failed", 0)); } } inline_size void iouring_process_cqe (EV_P_ struct io_uring_cqe *cqe) { int fd = cqe->user_data & 0xffffffffU; uint32_t gen = cqe->user_data >> 32; int res = cqe->res; /* user_data -1 is a remove that we are not atm. interested in */ if (cqe->user_data == (uint64_t)-1) return; assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); /* documentation lies, of course. the result value is NOT like * normal syscalls, but like linux raw syscalls, i.e. negative * error numbers. fortunate, as otherwise there would be no way * to get error codes at all. still, why not document this? */ /* ignore event if generation doesn't match */ /* other than skipping removal events, */ /* this should actually be very rare */ if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) return; if (ecb_expect_false (res < 0)) { /*TODO: EINVAL handling (was something failed with this fd)*/ if (res == -EBADF) { assert (("libev: event loop rejected bad fd", res != -EBADF)); fd_kill (EV_A_ fd); } else { errno = -res; ev_syserr ("(libev) IORING_OP_POLL_ADD"); } return; } /* feed events, we do not expect or handle POLLNVAL */ fd_event ( EV_A_ fd, (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) ); /* io_uring is oneshot, so we need to re-arm the fd next iteration */ /* this also means we usually have to do at least one syscall per iteration */ anfds [fd].events = 0; fd_change (EV_A_ fd, EV_ANFD_REIFY); } /* called when the event queue overflows */ ecb_cold static void iouring_overflow (EV_P) { /* we have two options, resize the queue (by tearing down * everything and recreating it, or living with it * and polling. * we implement this by resizing the queue, and, if that fails, * we just recreate the state on every failure, which * kind of is a very inefficient poll. * one danger is, due to the bios toward lower fds, * we will only really get events for those, so * maybe we need a poll() fallback, after all. */ /*EV_CQ_VAR (overflow) = 0;*/ /* need to do this if we keep the state and poll manually */ fd_rearm_all (EV_A); /* we double the size until we hit the hard-to-probe maximum */ if (!iouring_max_entries) { iouring_entries <<= 1; iouring_fork (EV_A); } else { /* we hit the kernel limit, we should fall back to something else. * we can either poll() a few times and hope for the best, * poll always, or switch to epoll. * TODO: is this necessary with newer kernels? */ iouring_internal_destroy (EV_A); /* this should make it so that on return, we don't call any uring functions */ iouring_to_submit = 0; for (;;) { backend = epoll_init (EV_A_ 0); if (backend) break; ev_syserr ("(libev) iouring switch to epoll"); } } } /* handle any events in the completion queue, return true if there were any */ static int iouring_handle_cq (EV_P) { unsigned head, tail, mask; head = EV_CQ_VAR (head); ECB_MEMORY_FENCE_ACQUIRE; tail = EV_CQ_VAR (tail); if (head == tail) return 0; /* it can only overflow if we have events, yes, yes? */ if (ecb_expect_false (EV_CQ_VAR (overflow))) { iouring_overflow (EV_A); return 1; } mask = EV_CQ_VAR (ring_mask); do iouring_process_cqe (EV_A_ &EV_CQES [head++ & mask]); while (head != tail); EV_CQ_VAR (head) = head; ECB_MEMORY_FENCE_RELEASE; return 1; } static void iouring_poll (EV_P_ ev_tstamp timeout) { /* if we have events, no need for extra syscalls, but we might have to queue events */ /* we also clar the timeout if there are outstanding fdchanges */ /* the latter should only happen if both the sq and cq are full, most likely */ /* because we have a lot of event sources that immediately complete */ /* TODO: fdchacngecnt is always 0 because fd_reify does not have two buffers yet */ if (iouring_handle_cq (EV_A) || fdchangecnt) timeout = EV_TS_CONST (0.); else /* no events, so maybe wait for some */ iouring_tfd_update (EV_A_ timeout); /* only enter the kernel if we have something to submit, or we need to wait */ if (timeout || iouring_to_submit) { int res = iouring_enter (EV_A_ timeout); if (ecb_expect_false (res < 0)) if (errno == EINTR) /* ignore */; else if (errno == EBUSY) /* cq full, cannot submit - should be rare because we flush the cq first, so simply ignore */; else ev_syserr ("(libev) iouring setup"); else iouring_handle_cq (EV_A); } } inline_size int iouring_init (EV_P_ int flags) { iouring_entries = IOURING_INIT_ENTRIES; iouring_max_entries = 0; if (iouring_internal_init (EV_A) < 0) { iouring_internal_destroy (EV_A); return 0; } ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ); ev_set_priority (&iouring_tfd_w, EV_MINPRI); ev_io_start (EV_A_ &iouring_tfd_w); ev_unref (EV_A); /* watcher should not keep loop alive */ backend_modify = iouring_modify; backend_poll = iouring_poll; return EVBACKEND_IOURING; } inline_size void iouring_destroy (EV_P) { iouring_internal_destroy (EV_A); } EV-4.33/libev/ev_win32.c0000644000000000000000000001234213565045616013371 0ustar rootroot/* * libev win32 compatibility cruft (_not_ a backend) * * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifdef _WIN32 /* note: the comment below could not be substantiated, but what would I care */ /* MSDN says this is required to handle SIGFPE */ /* my wild guess would be that using something floating-pointy is required */ /* for the crt to do something about it */ volatile double SIGFPE_REQ = 0.0f; static SOCKET ev_tcp_socket (void) { #if EV_USE_WSASOCKET return WSASocket (AF_INET, SOCK_STREAM, 0, 0, 0, 0); #else return socket (AF_INET, SOCK_STREAM, 0); #endif } /* oh, the humanity! */ static int ev_pipe (int filedes [2]) { struct sockaddr_in addr = { 0 }; int addr_size = sizeof (addr); struct sockaddr_in adr2; int adr2_size = sizeof (adr2); SOCKET listener; SOCKET sock [2] = { -1, -1 }; if ((listener = ev_tcp_socket ()) == INVALID_SOCKET) return -1; addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl (INADDR_LOOPBACK); addr.sin_port = 0; if (bind (listener, (struct sockaddr *)&addr, addr_size)) goto fail; if (getsockname (listener, (struct sockaddr *)&addr, &addr_size)) goto fail; if (listen (listener, 1)) goto fail; if ((sock [0] = ev_tcp_socket ()) == INVALID_SOCKET) goto fail; if (connect (sock [0], (struct sockaddr *)&addr, addr_size)) goto fail; /* TODO: returns INVALID_SOCKET on winsock accept, not < 0. fix it */ /* when convenient, probably by just removing error checking altogether? */ if ((sock [1] = accept (listener, 0, 0)) < 0) goto fail; /* windows vista returns fantasy port numbers for sockets: * example for two interconnected tcp sockets: * * (Socket::unpack_sockaddr_in getsockname $sock0)[0] == 53364 * (Socket::unpack_sockaddr_in getpeername $sock0)[0] == 53363 * (Socket::unpack_sockaddr_in getsockname $sock1)[0] == 53363 * (Socket::unpack_sockaddr_in getpeername $sock1)[0] == 53365 * * wow! tridirectional sockets! * * this way of checking ports seems to work: */ if (getpeername (sock [0], (struct sockaddr *)&addr, &addr_size)) goto fail; if (getsockname (sock [1], (struct sockaddr *)&adr2, &adr2_size)) goto fail; errno = WSAEINVAL; if (addr_size != adr2_size || addr.sin_addr.s_addr != adr2.sin_addr.s_addr /* just to be sure, I mean, it's windows */ || addr.sin_port != adr2.sin_port) goto fail; closesocket (listener); #if EV_SELECT_IS_WINSOCKET filedes [0] = EV_WIN32_HANDLE_TO_FD (sock [0]); filedes [1] = EV_WIN32_HANDLE_TO_FD (sock [1]); #else /* when select isn't winsocket, we also expect socket, connect, accept etc. * to work on fds */ filedes [0] = sock [0]; filedes [1] = sock [1]; #endif return 0; fail: closesocket (listener); if (sock [0] != INVALID_SOCKET) closesocket (sock [0]); if (sock [1] != INVALID_SOCKET) closesocket (sock [1]); return -1; } #undef pipe #define pipe(filedes) ev_pipe (filedes) #define EV_HAVE_EV_TIME 1 ev_tstamp ev_time (void) { FILETIME ft; ULARGE_INTEGER ui; GetSystemTimeAsFileTime (&ft); ui.u.LowPart = ft.dwLowDateTime; ui.u.HighPart = ft.dwHighDateTime; /* also, msvc cannot convert ulonglong to double... yes, it is that sucky */ return EV_TS_FROM_USEC (((LONGLONG)(ui.QuadPart - 116444736000000000) * 1e-1)); } #endif EV-4.33/libev/Changes0000644000000000000000000007750313634411252013064 0ustar rootrootRevision history for libev, a high-performance and full-featured event loop. TODO: for next ABI/API change, consider moving EV__IOFDSSET into io->fd instead and provide a getter. TODO: document EV_TSTAMP_T 4.33 Wed Mar 18 13:22:29 CET 2020 - no changes w.r.t. 4.32. 4.32 (EV only) - the 4.31 timerfd code wrongly changed the priority of the signal fd watcher, which is usually harmless unless signal fds are also used (found via cpan tester service). - the documentation wrongly claimed that user may modify fd and events members in io watchers when the watcher was stopped (found by b_jonas). - new ev_io_modify mutator which changes only the events member, which can be faster. also added ev::io::set (int events) method to ev++.h. - officially allow a zero events mask for io watchers. this should work with older libev versions as well but was not officially allowed before. - do not wake up every minute when timerfd is used to detect timejumps. - do not wake up every minute when periodics are disabled and we have a monotonic clock. - support a lot more "uncommon" compile time configurations, such as ev_embed enabled but ev_timer disabled. - use a start/stop wrapper class to reduce code duplication in ev++.h and make it needlessly more c++-y. - the linux aio backend is no longer compiled in by default. - update to libecb version 0x00010008. 4.31 Fri Dec 20 21:58:29 CET 2019 - handle backends with minimum wait time a bit better by not waiting in the presence of already-expired timers (behaviour reported by Felipe Gasper). - new feature: use timerfd to detect timejumps quickly, can be disabled with the new EVFLAG_NOTIMERFD loop flag. - document EV_USE_SIGNALFD feature macro. 4.30 (EV only) - change non-autoconf test for __kernel_rwf_t by testing LINUX_VERSION_CODE, the most direct test I could find. - fix a bug in the io_uring backend that polled the wrong backend fd, causing it to not work in many cases. 4.29 (EV only) - add io uring autoconf and non-autoconf detection. - disable io_uring when some header files are too old. 4.28 (EV only) - linuxaio backend resulted in random memory corruption when loop is forked. - linuxaio backend might have tried to cancel an iocb multiple times (was unable to trigger this). - linuxaio backend now employs a generation counter to avoid handling spurious events from cancelled requests. - io_cancel can return EINTR, deal with it. also, assume io_submit also returns EINTR. - fix some other minor bugs in linuxaio backend. - ev_tstamp type can now be overriden by defining EV_TSTAMP_T. - cleanup: replace expect_true/false and noinline by their libecb counterparts. - move syscall infrastructure from ev_linuxaio.c to ev.c. - prepare io_uring integration. - tweak ev_floor. - epoll, poll, win32 Sleep and other places that use millisecond reslution now all try to round up times. - solaris port backend didn't compile. - abstract time constants into their macros, for more flexibility. 4.27 Thu Jun 27 22:43:44 CEST 2019 - linux aio backend almost completely rewritten to work around its limitations. - linux aio backend now requires linux 4.19+. - epoll backend now mandatory for linux aio backend. - fail assertions more aggressively on invalid fd's detected in the event loop, do not just silently fd_kill in case of user error. - ev_io_start/ev_io_stop now verify the watcher fd using a syscall when EV_VERIFY is 2 or higher. 4.26 (EV only) - update to libecb 0x00010006. - new experimental linux aio backend (linux 4.18+). - removed redundant 0-ptr check in ev_once. - updated/extended ev_set_allocator documentation. - replaced EMPTY2 macro by array_needsize_noinit. - minor code cleanups. - epoll backend now uses epoll_create1 also after fork. 4.25 Fri Dec 21 07:49:20 CET 2018 - INCOMPATIBLE CHANGE: EV_THROW was renamed to EV_NOEXCEPT (EV_THROW still provided) and now uses noexcept on C++11 or newer. - move the darwin select workaround higher in ev.c, as newer versions of darwin managed to break their broken select even more. - ANDROID => __ANDROID__ (reported by enh@google.com). - disable epoll_create1 on android because it has broken header files and google is unwilling to fix them (reported by enh@google.com). - avoid a minor compilation warning on win32. - c++: remove deprecated dynamic throw() specifications. - c++: improve the (unsupported) bad_loop exception class. - backport perl ev_periodic example to C, untested. - update libecb, biggets change is to include a memory fence in ECB_MEMORY_FENCE_RELEASE on x86/amd64. - minor autoconf/automake modernisation. 4.24 Wed Dec 28 05:19:55 CET 2016 - bump version to 4.24, as the release tarball inexplicably didn't have the right version in ev.h, even though the cvs-tagged version did have the right one (reported by Ales Teska). 4.23 Wed Nov 16 18:23:41 CET 2016 - move some declarations at the beginning to help certain retarded microsoft compilers, even though their documentation claims otherwise (reported by Ruslan Osmanov). 4.22 Sun Dec 20 22:11:50 CET 2015 - when epoll detects unremovable fds in the fd set, rebuild only the epoll descriptor, not the signal pipe, to avoid SIGPIPE in ev_async_send. This doesn't solve it on fork, so document what needs to be done in ev_loop_fork (analyzed by Benjamin Mahler). - remove superfluous sys/timeb.h include on win32 (analyzed by Jason Madden). - updated libecb. 4.20 Sat Jun 20 13:01:43 CEST 2015 - prefer noexcept over throw () with C++ 11. - update ecb.h due to incompatibilities with c11. - fix a potential aliasing issue when reading and writing watcher callbacks. 4.19 Thu Sep 25 08:18:25 CEST 2014 - ev.h wasn't valid C++ anymore, which tripped compilers other than clang, msvc or gcc (analyzed by Raphael 'kena' Poss). Unfortunately, C++ doesn't support typedefs for function pointers fully, so the affected declarations have to spell out the types each time. - when not using autoconf, tighten the check for clock_gettime and related functionality. 4.18 Fri Sep 5 17:55:26 CEST 2014 - events on files were not always generated properly with the epoll backend (testcase by Assaf Inbal). - mark event pipe fd as cloexec after a fork (analyzed by Sami Farin). - (ecb) support m68k, m88k and sh (patch by Miod Vallat). - use a reasonable fallback for EV_NSIG instead of erroring out when we can't detect the signal set size. - in the absence of autoconf, do not use the clock syscall on glibc >= 2.17 (avoids the syscall AND -lrt on systems doing clock_gettime in userspace). - ensure extern "C" function pointers are used for externally-visible loop callbacks (not watcher callbacks yet). - (ecb) work around memory barriers and volatile apparently both being broken in visual studio 2008 and later (analysed and patch by Nicolas Noble). 4.15 Fri Mar 1 12:04:50 CET 2013 - destroying a non-default loop would stop the global waitpid watcher (Denis Bilenko). - queueing pending watchers of higher priority from a watcher now invokes them in a timely fashion (reported by Denis Bilenko). - add throw() to all libev functions that cannot throw exceptions, for further code size decrease when compiling for C++. - add throw () to callbacks that must not throw exceptions (allocator, syserr, loop acquire/release, periodic reschedule cbs). - fix event_base_loop return code, add event_get_callback, event_base_new, event_base_get_method calls to improve libevent 1.x emulation and add some libevent 2.x functionality (based on a patch by Jeff Davey). - add more memory fences to fix a bug reported by Jeff Davey. Better be overfenced than underprotected. - ev_run now returns a boolean status (true meaning watchers are still active). - ev_once: undef EV_ERROR in ev_kqueue.c, to avoid clashing with libev's EV_ERROR (reported by 191919). - (ecb) add memory fence support for xlC (Darin McBride). - (ecb) add memory fence support for gcc-mips (Anton Kirilov). - (ecb) add memory fence support for gcc-alpha (Christian Weisgerber). - work around some kernels losing file descriptors by leaking the kqueue descriptor in the child. - work around linux inotify not reporting IN_ATTRIB changes for directories in many cases. - include sys/syscall.h instead of plain syscall.h. - check for io watcher loops in ev_verify, check for the most common reported usage bug in ev_io_start. - choose socket vs. WSASocket at compiletime using EV_USE_WSASOCKET. - always use WSASend/WSARecv directly on windows, hoping that this works in all cases (unlike read/write/send/recv...). - try to detect signals around a fork faster (test program by Denis Bilenko). - work around recent glibc versions that leak memory in realloc. - rename ev::embed::set to ev::embed::set_embed to avoid clashing the watcher base set (loop) method. - rewrite the async/signal pipe logic to always keep a valid fd, which simplifies (and hopefully correctifies :) the race checking on fork, at the cost of one extra fd. - add fat, msdos, jffs2, ramfs, ntfs and btrfs to the list of inotify-supporting filesystems. - move orig_CFLAGS assignment to after AC_INIT, as newer autoconf versions ignore it before (https://bugzilla.redhat.com/show_bug.cgi?id=908096). - add some untested android support. - enum expressions must be of type int (reported by Juan Pablo L). 4.11 Sat Feb 4 19:52:39 CET 2012 - INCOMPATIBLE CHANGE: ev_timer_again now clears the pending status, as was documented already, but not implemented in the repeating case. - new compiletime symbols: EV_NO_SMP and EV_NO_THREADS. - fix a race where the workaround against the epoll fork bugs caused signals to not be handled anymore. - correct backend_fudge for most backends, and implement a windows specific workaround to avoid looping because we call both select and Sleep, both with different time resolutions. - document range and guarantees of ev_sleep. - document reasonable ranges for periodics interval and offset. - rename backend_fudge to backend_mintime to avoid future confusion :) - change the default periodic reschedule function to hopefully be more exact and correct even in corner cases or in the far future. - do not rely on -lm anymore: use it when available but use our own floor () if it is missing. This should make it easier to embed, as no external libraries are required. - strategically import macros from libecb and mark rarely-used functions as cache-cold (saving almost 2k code size on typical amd64 setups). - add Symbols.ev and Symbols.event files, that were missing. - fix backend_mintime value for epoll (was 1/1024, is 1/1000 now). - fix #3 "be smart about timeouts" to not "deadlock" when timeout == now, also improve the section overall. - avoid "AVOIDING FINISHING BEFORE RETURNING" idiom. - support new EV_API_STATIC mode to make all libev symbols static. - supply default CFLAGS of -g -O3 with gcc when original CFLAGS were empty. 4.04 Wed Feb 16 09:01:51 CET 2011 - fix two problems in the native win32 backend, where reuse of fd's with different underlying handles caused handles not to be removed or added to the select set (analyzed and tested by Bert Belder). - do no rely on ceil() in ev_e?poll.c. - backport libev to HP-UX versions before 11 v3. - configure did not detect nanosleep and clock_gettime properly when they are available in the libc (as opposed to -lrt). 4.03 Tue Jan 11 14:37:25 CET 2011 - officially support polling files with all backends. - support files, /dev/zero etc. the same way as select in the epoll backend, by generating events on our own. - ports backend: work around solaris bug 6874410 and many related ones (EINTR, maybe more), with no performance loss (note that the solaris bug report is actually wrong, reality is far more bizarre and broken than that). - define EV_READ/EV_WRITE as macros in event.h, as some programs use #ifdef to test for them. - new (experimental) function: ev_feed_signal. - new (to become default) EVFLAG_NOSIGMASK flag. - new EVBACKEND_MASK symbol. - updated COMMON IDIOMS SECTION. 4.01 Fri Nov 5 21:51:29 CET 2010 - automake fucked it up, apparently, --add-missing -f is not quite enough to make it update its files, so 4.00 didn't install ev++.h and event.h on make install. grrr. - ev_loop(count|depth) didn't return anything (Robin Haberkorn). - change EV_UNDEF to 0xffffffff to silence some overzealous compilers. - use "(libev) " prefix for all libev error messages now. 4.00 Mon Oct 25 12:32:12 CEST 2010 - "PORTING FROM LIBEV 3.X TO 4.X" (in ev.pod) is recommended reading. - ev_embed_stop did not correctly stop the watcher (very good testcase by Vladimir Timofeev). - ev_run will now always update the current loop time - it erroneously didn't when idle watchers were active, causing timers not to fire. - fix a bug where a timeout of zero caused the timer not to fire in the libevent emulation (testcase by Péter Szabó). - applied win32 fixes by Michael Lenaghan (also James Mansion). - replace EV_MINIMAL by EV_FEATURES. - prefer EPOLL_CTL_ADD over EPOLL_CTL_MOD in some more cases, as it seems the former is *much* faster than the latter. - linux kernel version detection (for inotify bug workarounds) did not work properly. - reduce the number of spurious wake-ups with the ports backend. - remove dependency on sys/queue.h on freebsd (patch by Vanilla Hsu). - do async init within ev_async_start, not ev_async_set, which avoids an API quirk where the set function must be called in the C++ API even when there is nothing to set. - add (undocumented) EV_ENABLE when adding events with kqueue, this might help with OS X, which seems to need it despite documenting not to need it (helpfully pointed out by Tilghman Lesher). - do not use poll by default on freebsd, it's broken (what isn't on freebsd...). - allow to embed epoll on kernels >= 2.6.32. - configure now prepends -O3, not appends it, so one can still override it. - ev.pod: greatly expanded the portability section, added a porting section, a description of watcher states and made lots of minor fixes. - disable poll backend on AIX, the poll header spams the namespace and it's not worth working around dead platforms (reported and analyzed by Aivars Kalvans). - improve header file compatibility of the standalone eventfd code in an obscure case. - implement EV_AVOID_STDIO option. - do not use sscanf to parse linux version number (smaller, faster, no sscanf dependency). - new EV_CHILD_ENABLE and EV_SIGNAL_ENABLE configurable settings. - update libev.m4 HAVE_CLOCK_SYSCALL test for newer glibcs. - add section on accept() problems to the manpage. - rename EV_TIMEOUT to EV_TIMER. - rename ev_loop_count/depth/verify/loop/unloop. - remove ev_default_destroy and ev_default_fork. - switch to two-digit minor version. - work around an apparent gentoo compiler bug. - define _DARWIN_UNLIMITED_SELECT. just so. - use enum instead of #define for most constants. - improve compatibility to older C++ compilers. - (experimental) ev_run/ev_default_loop/ev_break/ev_loop_new have now default arguments when compiled as C++. - enable automake dependency tracking. - ev_loop_new no longer leaks memory when loop creation failed. - new ev_cleanup watcher type. 3.9 Thu Dec 31 07:59:59 CET 2009 - signalfd is no longer used by default and has to be requested explicitly - this means that easy to catch bugs become hard to catch race conditions, but the users have spoken. - point out the unspecified signal mask in the documentation, and that this is a race condition regardless of EV_SIGNALFD. - backport inotify code to C89. - inotify file descriptors could leak into child processes. - ev_stat watchers could keep an erroneous extra ref on the loop, preventing exit when unregistering all watchers (testcases provided by ry@tinyclouds.org). - implement EV_WIN32_HANDLE_TO_FD and EV_WIN32_CLOSE_FD configuration symbols to make it easier for apps to do their own fd management. - support EV_IDLE_ENABLE being disabled in ev++.h (patch by Didier Spezia). - take advantage of inotify_init1, if available, to set cloexec/nonblock on fd creation, to avoid races. - the signal handling pipe wasn't always initialised under windows (analysed by lekma). - changed minimum glibc requirement from glibc 2.9 to 2.7, for signalfd. - add missing string.h include (Denis F. Latypoff). - only replace ev_stat.prev when we detect an actual difference, so prev is (almost) always different to attr. this might have caused the problems with 04_stat.t. - add ev::timer->remaining () method to C++ API. 3.8 Sun Aug 9 14:30:45 CEST 2009 - incompatible change: do not necessarily reset signal handler to SIG_DFL when a sighandler is stopped. - ev_default_destroy did not properly free or zero some members, potentially causing crashes and memory corruption on repeated ev_default_destroy/ev_default_loop calls. - take advantage of signalfd on GNU/Linux systems. - document that the signal mask might be in an unspecified state when using libev's signal handling. - take advantage of some GNU/Linux calls to set cloexec/nonblock on fd creation, to avoid race conditions. 3.7 Fri Jul 17 16:36:32 CEST 2009 - ev_unloop and ev_loop wrongly used a global variable to exit loops, instead of using a per-loop variable (bug caught by accident...). - the ev_set_io_collect_interval interpretation has changed. - add new functionality: ev_set_userdata, ev_userdata, ev_set_invoke_pending_cb, ev_set_loop_release_cb, ev_invoke_pending, ev_pending_count, together with a long example about thread locking. - add ev_timer_remaining (as requested by Denis F. Latypoff). - add ev_loop_depth. - calling ev_unloop in fork/prepare watchers will no longer poll for new events. - Denis F. Latypoff corrected many typos in example code snippets. - honor autoconf detection of EV_USE_CLOCK_SYSCALL, also double- check that the syscall number is available before trying to use it (reported by ry@tinyclouds). - use GetSystemTimeAsFileTime instead of _timeb on windows, for slightly higher accuracy. - properly declare ev_loop_verify and ev_now_update even when !EV_MULTIPLICITY. - do not compile in any priority code when EV_MAXPRI == EV_MINPRI. - support EV_MINIMAL==2 for a reduced API. - actually 0-initialise struct sigaction when installing signals. - add section on hibernate and stopped processes to ev_timer docs. 3.6 Tue Apr 28 02:49:30 CEST 2009 - multiple timers becoming ready within an event loop iteration will be invoked in the "correct" order now. - do not leave the event loop early just because we have no active watchers, fixing a problem when embedding a kqueue loop that has active kernel events but no registered watchers (reported by blacksand blacksand). - correctly zero the idx values for arrays, so destroying and reinitialising the default loop actually works (patch by Malek Hadj-Ali). - implement ev_suspend and ev_resume. - new EV_CUSTOM revents flag for use by applications. - add documentation section about priorities. - add a glossary to the documentation. - extend the ev_fork description slightly. - optimize a jump out of call_pending. 3.53 Sun Feb 15 02:38:20 CET 2009 - fix a bug in event pipe creation on win32 that would cause a failed assertion on event loop creation (patch by Malek Hadj-Ali). - probe for CLOCK_REALTIME support at runtime as well and fall back to gettimeofday if there is an error, to support older operating systems with newer header files/libraries. - prefer gettimeofday over clock_gettime with USE_CLOCK_SYSCALL (default most everywhere), otherwise not. 3.52 Wed Jan 7 21:43:02 CET 2009 - fix compilation of select backend in fd_set mode when NFDBITS is missing (to get it to compile on QNX, reported by Rodrigo Campos). - better select-nfds handling when select backend is in fd_set mode. - diagnose fd_set overruns when select backend is in fd_set mode. - due to a thinko, instead of disabling everything but select on the borked OS X platform, everything but select was allowed (reported by Emanuele Giaquinta). - actually verify that local and remote port are matching in libev's socketpair emulation, which makes denial-of-service attacks harder (but not impossible - it's windows). Make sure it even works under vista, which thinks that getpeer/sockname should return fantasy port numbers. - include "libev" in all assertion messages for potentially clearer diagnostics. - event_get_version (libevent compatibility) returned a useless string instead of the expected version string (patch by W.C.A. Wijngaards). 3.51 Wed Dec 24 23:00:11 CET 2008 - fix a bug where an inotify watcher was added twice, causing freezes on hash collisions (reported and analysed by Graham Leggett). - new config symbol, EV_USE_CLOCK_SYSCALL, to make libev use a direct syscall - slower, but no dependency on librt et al. - assume negative return values != -1 signals success of port_getn (http://cvs.epicsol.org/cgi/viewcvs.cgi/epic5/source/newio.c?rev=1.52) (no known failure reports, but it doesn't hurt). - fork detection in ev_embed now stops and restarts the watcher automatically. - EXPERIMENTAL: default the method to operator () in ev++.h, to make it nicer to use functors (requested by Benedek László). - fixed const object callbacks in ev++.h. - replaced loop_ref argument of watcher.set (loop) by a direct ev_loop * in ev++.h, to avoid clashes with functor patch. - do not try to watch the empty string via inotify. - inotify watchers could be leaked under certain circumstances. - OS X 10.5 is actually even more broken than earlier versions, so fall back to select on that piece of garbage. - fixed some weirdness in the ev_embed documentation. 3.49 Wed Nov 19 11:26:53 CET 2008 - ev_stat watchers will now use inotify as a mere hint on kernels <2.6.25, or if the filesystem is not in the "known to be good" list. - better mingw32 compatibility (it's not as borked as native win32) (analysed by Roger Pack). - include stdio.h in the example program, as too many people are confused by the weird C language otherwise. I guess the next thing I get told is that the "..." ellipses in the examples don't compile with their C compiler. 3.48 Thu Oct 30 09:02:37 CET 2008 - further optimise away the EPOLL_CTL_ADD/MOD combo in the epoll backend by assuming the kernel event mask hasn't changed if ADD fails with EEXIST. - work around spurious event notification bugs in epoll by using a 32-bit generation counter. recreate kernel state if we receive spurious notifications or unwanted events. this is very costly, but I didn't come up with this horrible design. - use memset to initialise most arrays now and do away with the init functions. - expand time-out strategies into a "Be smart about timeouts" section. - drop the "struct" from all ev_watcher declarations in the documentation and did other clarifications (yeah, it was a mistake to have a struct AND a function called ev_loop). - fix a bug where ev_default would not initialise the default loop again after it was destroyed with ev_default_destroy. - rename syserr to ev_syserr to avoid name clashes when embedding, do similar changes for event.c. 3.45 Tue Oct 21 21:59:26 CEST 2008 - disable inotify usage on linux <2.6.25, as it is broken (reported by Yoann Vandoorselaere). - ev_stat erroneously would try to add inotify watchers even when inotify wasn't available (this should only have a performance impact). - ev_once now passes both timeout and io to the callback if both occur concurrently, instead of giving timeouts precedence. - disable EV_USE_INOTIFY when sys/inotify.h is too old. 3.44 Mon Sep 29 05:18:39 CEST 2008 - embed watchers now automatically invoke ev_loop_fork on the embedded loop when the parent loop forks. - new function: ev_now_update (loop). - verify_watcher was not marked static. - improve the "associating..." manpage section. - documentation tweaks here and there. 3.43 Sun Jul 6 05:34:41 CEST 2008 - include more include files on windows to get struct _stati64 (reported by Chris Hulbert, but doesn't quite fix his issue). - add missing #include in ev.c on windows (reported by Matt Tolton). 3.42 Tue Jun 17 12:12:07 CEST 2008 - work around yet another windows bug: FD_SET actually adds fd's multiple times to the fd_*SET*, despite official MSN docs claiming otherwise. Reported and well-analysed by Matt Tolton. - define NFDBITS to 0 when EV_SELECT_IS_WINSOCKET to make it compile (reported any analysed by Chris Hulbert). - fix a bug in ev_ebadf (this function is only used to catch programming errors in the libev user). reported by Matt Tolton. - fix a bug in fd_intern on win32 (could lead to compile errors under some circumstances, but would work correctly if it compiles). reported by Matt Tolton. - (try to) work around missing lstat on windows. - pass in the write fd set as except fd set under windows. windows is so uncontrollably lame that it requires this. this means that switching off oobinline is not supported (but tcp/ip doesn't have oob, so that would be stupid anyways. - use posix module symbol to auto-detect monotonic clock presence and some other default values. 3.41 Fri May 23 18:42:54 CEST 2008 - work around an obscure bug in winsocket select: if you provide only empty fd sets then select returns WSAEINVAL. how sucky. - improve timer scheduling stability and reduce use of time_epsilon. - use 1-based 2-heap for EV_MINIMAL, simplifies code, reduces codesize and makes for better cache-efficiency. - use 3-based 4-heap for !EV_MINIMAL. this makes better use of cpu cache lines and gives better growth behaviour than 2-based heaps. - cache timestamp within heap for !EV_MINIMAL, to avoid random memory accesses. - document/add EV_USE_4HEAP and EV_HEAP_CACHE_AT. - fix a potential aliasing issue in ev_timer_again. - add/document ev_periodic_at, retract direct access to ->at. - improve ev_stat docs. - add portability requirements section. - fix manpage headers etc. - normalise WSA error codes to lower range on windows. - add consistency check code that can be called automatically or on demand to check for internal structures (ev_loop_verify). 3.31 Wed Apr 16 20:45:04 CEST 2008 - added last minute fix for ev_poll.c by Brandon Black. 3.3 Wed Apr 16 19:04:10 CEST 2008 - event_base_loopexit should return 0 on success (W.C.A. Wijngaards). - added linux eventfd support. - try to autodetect epoll and inotify support by libc header version if not using autoconf. - new symbols: EV_DEFAULT_UC and EV_DEFAULT_UC_. - declare functions defined in ev.h as inline if C99 or gcc are available. - enable inlining with gcc versions 2 and 3. - work around broken poll implementations potentially not clearing revents field in ev_poll (Brandon Black) (no such systems are known at this time). - work around a bug in realloc on openbsd and darwin, also makes the erroneous valgrind complaints go away (noted by various people). - fix ev_async_pending, add c++ wrapper for ev_async (based on patch sent by Johannes Deisenhofer). - add sensible set method to ev::embed. - made integer constants type int in ev.h. 3.2 Wed Apr 2 17:11:19 CEST 2008 - fix a 64 bit overflow issue in the select backend, by using fd_mask instead of int for the mask. - rename internal sighandler to avoid clash with very old perls. - entering ev_loop will not clear the ONESHOT or NONBLOCKING flags of any outer loops anymore. - add ev_async_pending. 3.1 Thu Mar 13 13:45:22 CET 2008 - implement ev_async watchers. - only initialise signal pipe on demand. - make use of sig_atomic_t configurable. - improved documentation. 3.0 Mon Jan 28 13:14:47 CET 2008 - API/ABI bump to version 3.0. - ev++.h includes "ev.h" by default now, not . - slightly improved documentation. - speed up signal detection after a fork. - only optionally return trace status changed in ev_child watchers. - experimental (and undocumented) loop wrappers for ev++.h. 2.01 Tue Dec 25 08:04:41 CET 2007 - separate Changes file. - fix ev_path_set => ev_stat_set typo. - remove event_compat.h from the libev tarball. - change how include files are found. - doc updates. - update licenses, explicitly allow for GPL relicensing. 2.0 Sat Dec 22 17:47:03 CET 2007 - new ev_sleep, ev_set_(io|timeout)_collect_interval. - removed epoll from embeddable fd set. - fix embed watchers. - renamed ev_embed.loop to other. - added exported Symbol tables. - undefine member wrapper macros at the end of ev.c. - respect EV_H in ev++.h. 1.86 Tue Dec 18 02:36:57 CET 2007 - fix memleak on loop destroy (not relevant for perl). 1.85 Fri Dec 14 20:32:40 CET 2007 - fix some aliasing issues w.r.t. timers and periodics (not relevant for perl). (for historic versions refer to EV/Changes, found in the Perl interface) 0.1 Wed Oct 31 21:31:48 CET 2007 - original version; hacked together in <24h. EV-4.33/libev/ev.h0000644000000000000000000007306113634411272012351 0ustar rootroot/* * libev native API header * * Copyright (c) 2007-2020 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef EV_H_ #define EV_H_ #ifdef __cplusplus # define EV_CPP(x) x # if __cplusplus >= 201103L # define EV_NOEXCEPT noexcept # else # define EV_NOEXCEPT # endif #else # define EV_CPP(x) # define EV_NOEXCEPT #endif #define EV_THROW EV_NOEXCEPT /* pre-4.25, do not use in new code */ EV_CPP(extern "C" {) /*****************************************************************************/ /* pre-4.0 compatibility */ #ifndef EV_COMPAT3 # define EV_COMPAT3 1 #endif #ifndef EV_FEATURES # if defined __OPTIMIZE_SIZE__ # define EV_FEATURES 0x7c # else # define EV_FEATURES 0x7f # endif #endif #define EV_FEATURE_CODE ((EV_FEATURES) & 1) #define EV_FEATURE_DATA ((EV_FEATURES) & 2) #define EV_FEATURE_CONFIG ((EV_FEATURES) & 4) #define EV_FEATURE_API ((EV_FEATURES) & 8) #define EV_FEATURE_WATCHERS ((EV_FEATURES) & 16) #define EV_FEATURE_BACKENDS ((EV_FEATURES) & 32) #define EV_FEATURE_OS ((EV_FEATURES) & 64) /* these priorities are inclusive, higher priorities will be invoked earlier */ #ifndef EV_MINPRI # define EV_MINPRI (EV_FEATURE_CONFIG ? -2 : 0) #endif #ifndef EV_MAXPRI # define EV_MAXPRI (EV_FEATURE_CONFIG ? +2 : 0) #endif #ifndef EV_MULTIPLICITY # define EV_MULTIPLICITY EV_FEATURE_CONFIG #endif #ifndef EV_PERIODIC_ENABLE # define EV_PERIODIC_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_STAT_ENABLE # define EV_STAT_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_PREPARE_ENABLE # define EV_PREPARE_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_CHECK_ENABLE # define EV_CHECK_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_IDLE_ENABLE # define EV_IDLE_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_FORK_ENABLE # define EV_FORK_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_CLEANUP_ENABLE # define EV_CLEANUP_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_SIGNAL_ENABLE # define EV_SIGNAL_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_CHILD_ENABLE # ifdef _WIN32 # define EV_CHILD_ENABLE 0 # else # define EV_CHILD_ENABLE EV_FEATURE_WATCHERS #endif #endif #ifndef EV_ASYNC_ENABLE # define EV_ASYNC_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_EMBED_ENABLE # define EV_EMBED_ENABLE EV_FEATURE_WATCHERS #endif #ifndef EV_WALK_ENABLE # define EV_WALK_ENABLE 0 /* not yet */ #endif /*****************************************************************************/ #if EV_CHILD_ENABLE && !EV_SIGNAL_ENABLE # undef EV_SIGNAL_ENABLE # define EV_SIGNAL_ENABLE 1 #endif /*****************************************************************************/ #ifndef EV_TSTAMP_T # define EV_TSTAMP_T double #endif typedef EV_TSTAMP_T ev_tstamp; #include /* for memmove */ #ifndef EV_ATOMIC_T # include # define EV_ATOMIC_T sig_atomic_t volatile #endif #if EV_STAT_ENABLE # ifdef _WIN32 # include # include # endif # include #endif /* support multiple event loops? */ #if EV_MULTIPLICITY struct ev_loop; # define EV_P struct ev_loop *loop /* a loop as sole parameter in a declaration */ # define EV_P_ EV_P, /* a loop as first of multiple parameters */ # define EV_A loop /* a loop as sole argument to a function call */ # define EV_A_ EV_A, /* a loop as first of multiple arguments */ # define EV_DEFAULT_UC ev_default_loop_uc_ () /* the default loop, if initialised, as sole arg */ # define EV_DEFAULT_UC_ EV_DEFAULT_UC, /* the default loop as first of multiple arguments */ # define EV_DEFAULT ev_default_loop (0) /* the default loop as sole arg */ # define EV_DEFAULT_ EV_DEFAULT, /* the default loop as first of multiple arguments */ #else # define EV_P void # define EV_P_ # define EV_A # define EV_A_ # define EV_DEFAULT # define EV_DEFAULT_ # define EV_DEFAULT_UC # define EV_DEFAULT_UC_ # undef EV_EMBED_ENABLE #endif /* EV_INLINE is used for functions in header files */ #if __STDC_VERSION__ >= 199901L || __GNUC__ >= 3 # define EV_INLINE static inline #else # define EV_INLINE static #endif #ifdef EV_API_STATIC # define EV_API_DECL static #else # define EV_API_DECL extern #endif /* EV_PROTOTYPES can be used to switch of prototype declarations */ #ifndef EV_PROTOTYPES # define EV_PROTOTYPES 1 #endif /*****************************************************************************/ #define EV_VERSION_MAJOR 4 #define EV_VERSION_MINOR 33 /* eventmask, revents, events... */ enum { EV_UNDEF = (int)0xFFFFFFFF, /* guaranteed to be invalid */ EV_NONE = 0x00, /* no events */ EV_READ = 0x01, /* ev_io detected read will not block */ EV_WRITE = 0x02, /* ev_io detected write will not block */ EV__IOFDSET = 0x80, /* internal use only */ EV_IO = EV_READ, /* alias for type-detection */ EV_TIMER = 0x00000100, /* timer timed out */ #if EV_COMPAT3 EV_TIMEOUT = EV_TIMER, /* pre 4.0 API compatibility */ #endif EV_PERIODIC = 0x00000200, /* periodic timer timed out */ EV_SIGNAL = 0x00000400, /* signal was received */ EV_CHILD = 0x00000800, /* child/pid had status change */ EV_STAT = 0x00001000, /* stat data changed */ EV_IDLE = 0x00002000, /* event loop is idling */ EV_PREPARE = 0x00004000, /* event loop about to poll */ EV_CHECK = 0x00008000, /* event loop finished poll */ EV_EMBED = 0x00010000, /* embedded event loop needs sweep */ EV_FORK = 0x00020000, /* event loop resumed in child */ EV_CLEANUP = 0x00040000, /* event loop resumed in child */ EV_ASYNC = 0x00080000, /* async intra-loop signal */ EV_CUSTOM = 0x01000000, /* for use by user code */ EV_ERROR = (int)0x80000000 /* sent when an error occurs */ }; /* can be used to add custom fields to all watchers, while losing binary compatibility */ #ifndef EV_COMMON # define EV_COMMON void *data; #endif #ifndef EV_CB_DECLARE # define EV_CB_DECLARE(type) void (*cb)(EV_P_ struct type *w, int revents); #endif #ifndef EV_CB_INVOKE # define EV_CB_INVOKE(watcher,revents) (watcher)->cb (EV_A_ (watcher), (revents)) #endif /* not official, do not use */ #define EV_CB(type,name) void name (EV_P_ struct ev_ ## type *w, int revents) /* * struct member types: * private: you may look at them, but not change them, * and they might not mean anything to you. * ro: can be read anytime, but only changed when the watcher isn't active. * rw: can be read and modified anytime, even when the watcher is active. * * some internal details that might be helpful for debugging: * * active is either 0, which means the watcher is not active, * or the array index of the watcher (periodics, timers) * or the array index + 1 (most other watchers) * or simply 1 for watchers that aren't in some array. * pending is either 0, in which case the watcher isn't, * or the array index + 1 in the pendings array. */ #if EV_MINPRI == EV_MAXPRI # define EV_DECL_PRIORITY #elif !defined (EV_DECL_PRIORITY) # define EV_DECL_PRIORITY int priority; #endif /* shared by all watchers */ #define EV_WATCHER(type) \ int active; /* private */ \ int pending; /* private */ \ EV_DECL_PRIORITY /* private */ \ EV_COMMON /* rw */ \ EV_CB_DECLARE (type) /* private */ #define EV_WATCHER_LIST(type) \ EV_WATCHER (type) \ struct ev_watcher_list *next; /* private */ #define EV_WATCHER_TIME(type) \ EV_WATCHER (type) \ ev_tstamp at; /* private */ /* base class, nothing to see here unless you subclass */ typedef struct ev_watcher { EV_WATCHER (ev_watcher) } ev_watcher; /* base class, nothing to see here unless you subclass */ typedef struct ev_watcher_list { EV_WATCHER_LIST (ev_watcher_list) } ev_watcher_list; /* base class, nothing to see here unless you subclass */ typedef struct ev_watcher_time { EV_WATCHER_TIME (ev_watcher_time) } ev_watcher_time; /* invoked when fd is either EV_READable or EV_WRITEable */ /* revent EV_READ, EV_WRITE */ typedef struct ev_io { EV_WATCHER_LIST (ev_io) int fd; /* ro */ int events; /* ro */ } ev_io; /* invoked after a specific time, repeatable (based on monotonic clock) */ /* revent EV_TIMEOUT */ typedef struct ev_timer { EV_WATCHER_TIME (ev_timer) ev_tstamp repeat; /* rw */ } ev_timer; /* invoked at some specific time, possibly repeating at regular intervals (based on UTC) */ /* revent EV_PERIODIC */ typedef struct ev_periodic { EV_WATCHER_TIME (ev_periodic) ev_tstamp offset; /* rw */ ev_tstamp interval; /* rw */ ev_tstamp (*reschedule_cb)(struct ev_periodic *w, ev_tstamp now) EV_NOEXCEPT; /* rw */ } ev_periodic; /* invoked when the given signal has been received */ /* revent EV_SIGNAL */ typedef struct ev_signal { EV_WATCHER_LIST (ev_signal) int signum; /* ro */ } ev_signal; /* invoked when sigchld is received and waitpid indicates the given pid */ /* revent EV_CHILD */ /* does not support priorities */ typedef struct ev_child { EV_WATCHER_LIST (ev_child) int flags; /* private */ int pid; /* ro */ int rpid; /* rw, holds the received pid */ int rstatus; /* rw, holds the exit status, use the macros from sys/wait.h */ } ev_child; #if EV_STAT_ENABLE /* st_nlink = 0 means missing file or other error */ # ifdef _WIN32 typedef struct _stati64 ev_statdata; # else typedef struct stat ev_statdata; # endif /* invoked each time the stat data changes for a given path */ /* revent EV_STAT */ typedef struct ev_stat { EV_WATCHER_LIST (ev_stat) ev_timer timer; /* private */ ev_tstamp interval; /* ro */ const char *path; /* ro */ ev_statdata prev; /* ro */ ev_statdata attr; /* ro */ int wd; /* wd for inotify, fd for kqueue */ } ev_stat; #endif /* invoked when the nothing else needs to be done, keeps the process from blocking */ /* revent EV_IDLE */ typedef struct ev_idle { EV_WATCHER (ev_idle) } ev_idle; /* invoked for each run of the mainloop, just before the blocking call */ /* you can still change events in any way you like */ /* revent EV_PREPARE */ typedef struct ev_prepare { EV_WATCHER (ev_prepare) } ev_prepare; /* invoked for each run of the mainloop, just after the blocking call */ /* revent EV_CHECK */ typedef struct ev_check { EV_WATCHER (ev_check) } ev_check; /* the callback gets invoked before check in the child process when a fork was detected */ /* revent EV_FORK */ typedef struct ev_fork { EV_WATCHER (ev_fork) } ev_fork; /* is invoked just before the loop gets destroyed */ /* revent EV_CLEANUP */ typedef struct ev_cleanup { EV_WATCHER (ev_cleanup) } ev_cleanup; #if EV_EMBED_ENABLE /* used to embed an event loop inside another */ /* the callback gets invoked when the event loop has handled events, and can be 0 */ typedef struct ev_embed { EV_WATCHER (ev_embed) struct ev_loop *other; /* ro */ #undef EV_IO_ENABLE #define EV_IO_ENABLE 1 ev_io io; /* private */ #undef EV_PREPARE_ENABLE #define EV_PREPARE_ENABLE 1 ev_prepare prepare; /* private */ ev_check check; /* unused */ ev_timer timer; /* unused */ ev_periodic periodic; /* unused */ ev_idle idle; /* unused */ ev_fork fork; /* private */ ev_cleanup cleanup; /* unused */ } ev_embed; #endif #if EV_ASYNC_ENABLE /* invoked when somebody calls ev_async_send on the watcher */ /* revent EV_ASYNC */ typedef struct ev_async { EV_WATCHER (ev_async) EV_ATOMIC_T sent; /* private */ } ev_async; # define ev_async_pending(w) (+(w)->sent) #endif /* the presence of this union forces similar struct layout */ union ev_any_watcher { struct ev_watcher w; struct ev_watcher_list wl; struct ev_io io; struct ev_timer timer; struct ev_periodic periodic; struct ev_signal signal; struct ev_child child; #if EV_STAT_ENABLE struct ev_stat stat; #endif #if EV_IDLE_ENABLE struct ev_idle idle; #endif struct ev_prepare prepare; struct ev_check check; #if EV_FORK_ENABLE struct ev_fork fork; #endif #if EV_CLEANUP_ENABLE struct ev_cleanup cleanup; #endif #if EV_EMBED_ENABLE struct ev_embed embed; #endif #if EV_ASYNC_ENABLE struct ev_async async; #endif }; /* flag bits for ev_default_loop and ev_loop_new */ enum { /* the default */ EVFLAG_AUTO = 0x00000000U, /* not quite a mask */ /* flag bits */ EVFLAG_NOENV = 0x01000000U, /* do NOT consult environment */ EVFLAG_FORKCHECK = 0x02000000U, /* check for a fork in each iteration */ /* debugging/feature disable */ EVFLAG_NOINOTIFY = 0x00100000U, /* do not attempt to use inotify */ #if EV_COMPAT3 EVFLAG_NOSIGFD = 0, /* compatibility to pre-3.9 */ #endif EVFLAG_SIGNALFD = 0x00200000U, /* attempt to use signalfd */ EVFLAG_NOSIGMASK = 0x00400000U, /* avoid modifying the signal mask */ EVFLAG_NOTIMERFD = 0x00800000U /* avoid creating a timerfd */ }; /* method bits to be ored together */ enum { EVBACKEND_SELECT = 0x00000001U, /* available just about anywhere */ EVBACKEND_POLL = 0x00000002U, /* !win, !aix, broken on osx */ EVBACKEND_EPOLL = 0x00000004U, /* linux */ EVBACKEND_KQUEUE = 0x00000008U, /* bsd, broken on osx */ EVBACKEND_DEVPOLL = 0x00000010U, /* solaris 8 */ /* NYI */ EVBACKEND_PORT = 0x00000020U, /* solaris 10 */ EVBACKEND_LINUXAIO = 0x00000040U, /* linux AIO, 4.19+ */ EVBACKEND_IOURING = 0x00000080U, /* linux io_uring, 5.1+ */ EVBACKEND_ALL = 0x000000FFU, /* all known backends */ EVBACKEND_MASK = 0x0000FFFFU /* all future backends */ }; #if EV_PROTOTYPES EV_API_DECL int ev_version_major (void) EV_NOEXCEPT; EV_API_DECL int ev_version_minor (void) EV_NOEXCEPT; EV_API_DECL unsigned int ev_supported_backends (void) EV_NOEXCEPT; EV_API_DECL unsigned int ev_recommended_backends (void) EV_NOEXCEPT; EV_API_DECL unsigned int ev_embeddable_backends (void) EV_NOEXCEPT; EV_API_DECL ev_tstamp ev_time (void) EV_NOEXCEPT; EV_API_DECL void ev_sleep (ev_tstamp delay) EV_NOEXCEPT; /* sleep for a while */ /* Sets the allocation function to use, works like realloc. * It is used to allocate and free memory. * If it returns zero when memory needs to be allocated, the library might abort * or take some potentially destructive action. * The default is your system realloc function. */ EV_API_DECL void ev_set_allocator (void *(*cb)(void *ptr, long size) EV_NOEXCEPT) EV_NOEXCEPT; /* set the callback function to call on a * retryable syscall error * (such as failed select, poll, epoll_wait) */ EV_API_DECL void ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT; #if EV_MULTIPLICITY /* the default loop is the only one that handles signals and child watchers */ /* you can call this as often as you like */ EV_API_DECL struct ev_loop *ev_default_loop (unsigned int flags EV_CPP (= 0)) EV_NOEXCEPT; #ifdef EV_API_STATIC EV_API_DECL struct ev_loop *ev_default_loop_ptr; #endif EV_INLINE struct ev_loop * ev_default_loop_uc_ (void) EV_NOEXCEPT { extern struct ev_loop *ev_default_loop_ptr; return ev_default_loop_ptr; } EV_INLINE int ev_is_default_loop (EV_P) EV_NOEXCEPT { return EV_A == EV_DEFAULT_UC; } /* create and destroy alternative loops that don't handle signals */ EV_API_DECL struct ev_loop *ev_loop_new (unsigned int flags EV_CPP (= 0)) EV_NOEXCEPT; EV_API_DECL ev_tstamp ev_now (EV_P) EV_NOEXCEPT; /* time w.r.t. timers and the eventloop, updated after each poll */ #else EV_API_DECL int ev_default_loop (unsigned int flags EV_CPP (= 0)) EV_NOEXCEPT; /* returns true when successful */ EV_API_DECL ev_tstamp ev_rt_now; EV_INLINE ev_tstamp ev_now (void) EV_NOEXCEPT { return ev_rt_now; } /* looks weird, but ev_is_default_loop (EV_A) still works if this exists */ EV_INLINE int ev_is_default_loop (void) EV_NOEXCEPT { return 1; } #endif /* multiplicity */ /* destroy event loops, also works for the default loop */ EV_API_DECL void ev_loop_destroy (EV_P); /* this needs to be called after fork, to duplicate the loop */ /* when you want to re-use it in the child */ /* you can call it in either the parent or the child */ /* you can actually call it at any time, anywhere :) */ EV_API_DECL void ev_loop_fork (EV_P) EV_NOEXCEPT; EV_API_DECL unsigned int ev_backend (EV_P) EV_NOEXCEPT; /* backend in use by loop */ EV_API_DECL void ev_now_update (EV_P) EV_NOEXCEPT; /* update event loop time */ #if EV_WALK_ENABLE /* walk (almost) all watchers in the loop of a given type, invoking the */ /* callback on every such watcher. The callback might stop the watcher, */ /* but do nothing else with the loop */ EV_API_DECL void ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_NOEXCEPT; #endif #endif /* prototypes */ /* ev_run flags values */ enum { EVRUN_NOWAIT = 1, /* do not block/wait */ EVRUN_ONCE = 2 /* block *once* only */ }; /* ev_break how values */ enum { EVBREAK_CANCEL = 0, /* undo unloop */ EVBREAK_ONE = 1, /* unloop once */ EVBREAK_ALL = 2 /* unloop all loops */ }; #if EV_PROTOTYPES EV_API_DECL int ev_run (EV_P_ int flags EV_CPP (= 0)); EV_API_DECL void ev_break (EV_P_ int how EV_CPP (= EVBREAK_ONE)) EV_NOEXCEPT; /* break out of the loop */ /* * ref/unref can be used to add or remove a refcount on the mainloop. every watcher * keeps one reference. if you have a long-running watcher you never unregister that * should not keep ev_loop from running, unref() after starting, and ref() before stopping. */ EV_API_DECL void ev_ref (EV_P) EV_NOEXCEPT; EV_API_DECL void ev_unref (EV_P) EV_NOEXCEPT; /* * convenience function, wait for a single event, without registering an event watcher * if timeout is < 0, do wait indefinitely */ EV_API_DECL void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_NOEXCEPT; EV_API_DECL void ev_invoke_pending (EV_P); /* invoke all pending watchers */ # if EV_FEATURE_API EV_API_DECL unsigned int ev_iteration (EV_P) EV_NOEXCEPT; /* number of loop iterations */ EV_API_DECL unsigned int ev_depth (EV_P) EV_NOEXCEPT; /* #ev_loop enters - #ev_loop leaves */ EV_API_DECL void ev_verify (EV_P) EV_NOEXCEPT; /* abort if loop data corrupted */ EV_API_DECL void ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT; /* sleep at least this time, default 0 */ EV_API_DECL void ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT; /* sleep at least this time, default 0 */ /* advanced stuff for threading etc. support, see docs */ EV_API_DECL void ev_set_userdata (EV_P_ void *data) EV_NOEXCEPT; EV_API_DECL void *ev_userdata (EV_P) EV_NOEXCEPT; typedef void (*ev_loop_callback)(EV_P); EV_API_DECL void ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_NOEXCEPT; /* C++ doesn't allow the use of the ev_loop_callback typedef here, so we need to spell it out */ EV_API_DECL void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)(EV_P) EV_NOEXCEPT) EV_NOEXCEPT; EV_API_DECL unsigned int ev_pending_count (EV_P) EV_NOEXCEPT; /* number of pending events, if any */ /* * stop/start the timer handling. */ EV_API_DECL void ev_suspend (EV_P) EV_NOEXCEPT; EV_API_DECL void ev_resume (EV_P) EV_NOEXCEPT; #endif #endif /* these may evaluate ev multiple times, and the other arguments at most once */ /* either use ev_init + ev_TYPE_set, or the ev_TYPE_init macro, below, to first initialise a watcher */ #define ev_init(ev,cb_) do { \ ((ev_watcher *)(void *)(ev))->active = \ ((ev_watcher *)(void *)(ev))->pending = 0; \ ev_set_priority ((ev), 0); \ ev_set_cb ((ev), cb_); \ } while (0) #define ev_io_modify(ev,events_) do { (ev)->events = (ev)->events & EV__IOFDSET | (events_); } while (0) #define ev_io_set(ev,fd_,events_) do { (ev)->fd = (fd_); (ev)->events = (events_) | EV__IOFDSET; } while (0) #define ev_timer_set(ev,after_,repeat_) do { ((ev_watcher_time *)(ev))->at = (after_); (ev)->repeat = (repeat_); } while (0) #define ev_periodic_set(ev,ofs_,ival_,rcb_) do { (ev)->offset = (ofs_); (ev)->interval = (ival_); (ev)->reschedule_cb = (rcb_); } while (0) #define ev_signal_set(ev,signum_) do { (ev)->signum = (signum_); } while (0) #define ev_child_set(ev,pid_,trace_) do { (ev)->pid = (pid_); (ev)->flags = !!(trace_); } while (0) #define ev_stat_set(ev,path_,interval_) do { (ev)->path = (path_); (ev)->interval = (interval_); (ev)->wd = -2; } while (0) #define ev_idle_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_prepare_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_check_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_embed_set(ev,other_) do { (ev)->other = (other_); } while (0) #define ev_fork_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_cleanup_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_async_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_io_init(ev,cb,fd,events) do { ev_init ((ev), (cb)); ev_io_set ((ev),(fd),(events)); } while (0) #define ev_timer_init(ev,cb,after,repeat) do { ev_init ((ev), (cb)); ev_timer_set ((ev),(after),(repeat)); } while (0) #define ev_periodic_init(ev,cb,ofs,ival,rcb) do { ev_init ((ev), (cb)); ev_periodic_set ((ev),(ofs),(ival),(rcb)); } while (0) #define ev_signal_init(ev,cb,signum) do { ev_init ((ev), (cb)); ev_signal_set ((ev), (signum)); } while (0) #define ev_child_init(ev,cb,pid,trace) do { ev_init ((ev), (cb)); ev_child_set ((ev),(pid),(trace)); } while (0) #define ev_stat_init(ev,cb,path,interval) do { ev_init ((ev), (cb)); ev_stat_set ((ev),(path),(interval)); } while (0) #define ev_idle_init(ev,cb) do { ev_init ((ev), (cb)); ev_idle_set ((ev)); } while (0) #define ev_prepare_init(ev,cb) do { ev_init ((ev), (cb)); ev_prepare_set ((ev)); } while (0) #define ev_check_init(ev,cb) do { ev_init ((ev), (cb)); ev_check_set ((ev)); } while (0) #define ev_embed_init(ev,cb,other) do { ev_init ((ev), (cb)); ev_embed_set ((ev),(other)); } while (0) #define ev_fork_init(ev,cb) do { ev_init ((ev), (cb)); ev_fork_set ((ev)); } while (0) #define ev_cleanup_init(ev,cb) do { ev_init ((ev), (cb)); ev_cleanup_set ((ev)); } while (0) #define ev_async_init(ev,cb) do { ev_init ((ev), (cb)); ev_async_set ((ev)); } while (0) #define ev_is_pending(ev) (0 + ((ev_watcher *)(void *)(ev))->pending) /* ro, true when watcher is waiting for callback invocation */ #define ev_is_active(ev) (0 + ((ev_watcher *)(void *)(ev))->active) /* ro, true when the watcher has been started */ #define ev_cb_(ev) (ev)->cb /* rw */ #define ev_cb(ev) (memmove (&ev_cb_ (ev), &((ev_watcher *)(ev))->cb, sizeof (ev_cb_ (ev))), (ev)->cb) #if EV_MINPRI == EV_MAXPRI # define ev_priority(ev) ((ev), EV_MINPRI) # define ev_set_priority(ev,pri) ((ev), (pri)) #else # define ev_priority(ev) (+(((ev_watcher *)(void *)(ev))->priority)) # define ev_set_priority(ev,pri) ( (ev_watcher *)(void *)(ev))->priority = (pri) #endif #define ev_periodic_at(ev) (+((ev_watcher_time *)(ev))->at) #ifndef ev_set_cb /* memmove is used here to avoid strict aliasing violations, and hopefully is optimized out by any reasonable compiler */ # define ev_set_cb(ev,cb_) (ev_cb_ (ev) = (cb_), memmove (&((ev_watcher *)(ev))->cb, &ev_cb_ (ev), sizeof (ev_cb_ (ev)))) #endif /* stopping (enabling, adding) a watcher does nothing if it is already running */ /* stopping (disabling, deleting) a watcher does nothing unless it's already running */ #if EV_PROTOTYPES /* feeds an event into a watcher as if the event actually occurred */ /* accepts any ev_watcher type */ EV_API_DECL void ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT; EV_API_DECL void ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT; #if EV_SIGNAL_ENABLE EV_API_DECL void ev_feed_signal (int signum) EV_NOEXCEPT; EV_API_DECL void ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT; #endif EV_API_DECL void ev_invoke (EV_P_ void *w, int revents); EV_API_DECL int ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT; EV_API_DECL void ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT; EV_API_DECL void ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT; EV_API_DECL void ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT; EV_API_DECL void ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT; /* stops if active and no repeat, restarts if active and repeating, starts if inactive and repeating */ EV_API_DECL void ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT; /* return remaining time */ EV_API_DECL ev_tstamp ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT; #if EV_PERIODIC_ENABLE EV_API_DECL void ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT; EV_API_DECL void ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT; EV_API_DECL void ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT; #endif /* only supported in the default loop */ #if EV_SIGNAL_ENABLE EV_API_DECL void ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT; EV_API_DECL void ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT; #endif /* only supported in the default loop */ # if EV_CHILD_ENABLE EV_API_DECL void ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT; EV_API_DECL void ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT; # endif # if EV_STAT_ENABLE EV_API_DECL void ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT; EV_API_DECL void ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT; EV_API_DECL void ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT; # endif # if EV_IDLE_ENABLE EV_API_DECL void ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT; EV_API_DECL void ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT; # endif #if EV_PREPARE_ENABLE EV_API_DECL void ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT; EV_API_DECL void ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT; #endif #if EV_CHECK_ENABLE EV_API_DECL void ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT; EV_API_DECL void ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT; #endif # if EV_FORK_ENABLE EV_API_DECL void ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT; EV_API_DECL void ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT; # endif # if EV_CLEANUP_ENABLE EV_API_DECL void ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT; EV_API_DECL void ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT; # endif # if EV_EMBED_ENABLE /* only supported when loop to be embedded is in fact embeddable */ EV_API_DECL void ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT; EV_API_DECL void ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT; EV_API_DECL void ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT; # endif # if EV_ASYNC_ENABLE EV_API_DECL void ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT; EV_API_DECL void ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT; EV_API_DECL void ev_async_send (EV_P_ ev_async *w) EV_NOEXCEPT; # endif #if EV_COMPAT3 #define EVLOOP_NONBLOCK EVRUN_NOWAIT #define EVLOOP_ONESHOT EVRUN_ONCE #define EVUNLOOP_CANCEL EVBREAK_CANCEL #define EVUNLOOP_ONE EVBREAK_ONE #define EVUNLOOP_ALL EVBREAK_ALL #if EV_PROTOTYPES EV_INLINE void ev_loop (EV_P_ int flags) { ev_run (EV_A_ flags); } EV_INLINE void ev_unloop (EV_P_ int how ) { ev_break (EV_A_ how ); } EV_INLINE void ev_default_destroy (void) { ev_loop_destroy (EV_DEFAULT); } EV_INLINE void ev_default_fork (void) { ev_loop_fork (EV_DEFAULT); } #if EV_FEATURE_API EV_INLINE unsigned int ev_loop_count (EV_P) { return ev_iteration (EV_A); } EV_INLINE unsigned int ev_loop_depth (EV_P) { return ev_depth (EV_A); } EV_INLINE void ev_loop_verify (EV_P) { ev_verify (EV_A); } #endif #endif #else typedef struct ev_loop ev_loop; #endif #endif EV_CPP(}) #endif EV-4.33/libev/ev_poll.c0000644000000000000000000001102313556456317013374 0ustar rootroot/* * libev poll fd activity backend * * Copyright (c) 2007,2008,2009,2010,2011,2016,2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #include inline_size void array_needsize_pollidx (int *base, int offset, int count) { /* using memset (.., -1, ...) is tempting, we we try * to be ultraportable */ base += offset; while (count--) *base++ = -1; } static void poll_modify (EV_P_ int fd, int oev, int nev) { int idx; if (oev == nev) return; array_needsize (int, pollidxs, pollidxmax, fd + 1, array_needsize_pollidx); idx = pollidxs [fd]; if (idx < 0) /* need to allocate a new pollfd */ { pollidxs [fd] = idx = pollcnt++; array_needsize (struct pollfd, polls, pollmax, pollcnt, array_needsize_noinit); polls [idx].fd = fd; } assert (polls [idx].fd == fd); if (nev) polls [idx].events = (nev & EV_READ ? POLLIN : 0) | (nev & EV_WRITE ? POLLOUT : 0); else /* remove pollfd */ { pollidxs [fd] = -1; if (ecb_expect_true (idx < --pollcnt)) { polls [idx] = polls [pollcnt]; pollidxs [polls [idx].fd] = idx; } } } static void poll_poll (EV_P_ ev_tstamp timeout) { struct pollfd *p; int res; EV_RELEASE_CB; res = poll (polls, pollcnt, EV_TS_TO_MSEC (timeout)); EV_ACQUIRE_CB; if (ecb_expect_false (res < 0)) { if (errno == EBADF) fd_ebadf (EV_A); else if (errno == ENOMEM && !syserr_cb) fd_enomem (EV_A); else if (errno != EINTR) ev_syserr ("(libev) poll"); } else for (p = polls; res; ++p) { assert (("libev: poll returned illegal result, broken BSD kernel?", p < polls + pollcnt)); if (ecb_expect_false (p->revents)) /* this expect is debatable */ { --res; if (ecb_expect_false (p->revents & POLLNVAL)) { assert (("libev: poll found invalid fd in poll set", 0)); fd_kill (EV_A_ p->fd); } else fd_event ( EV_A_ p->fd, (p->revents & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) | (p->revents & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) ); } } } inline_size int poll_init (EV_P_ int flags) { backend_mintime = EV_TS_CONST (1e-3); backend_modify = poll_modify; backend_poll = poll_poll; pollidxs = 0; pollidxmax = 0; polls = 0; pollmax = 0; pollcnt = 0; return EVBACKEND_POLL; } inline_size void poll_destroy (EV_P) { ev_free (pollidxs); ev_free (polls); } EV-4.33/libev/ev_kqueue.c0000644000000000000000000001566013556456317013740 0ustar rootroot/* * libev kqueue backend * * Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2016,2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #include #include #include #include #include inline_speed void kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) { ++kqueue_changecnt; array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, array_needsize_noinit); EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); } /* OS X at least needs this */ #ifndef EV_ENABLE # define EV_ENABLE 0 #endif #ifndef NOTE_EOF # define NOTE_EOF 0 #endif static void kqueue_modify (EV_P_ int fd, int oev, int nev) { if (oev != nev) { if (oev & EV_READ) kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0); if (oev & EV_WRITE) kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0); } /* to detect close/reopen reliably, we have to re-add */ /* event requests even when oev == nev */ if (nev & EV_READ) kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD | EV_ENABLE, NOTE_EOF); if (nev & EV_WRITE) kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, NOTE_EOF); } static void kqueue_poll (EV_P_ ev_tstamp timeout) { int res, i; struct timespec ts; /* need to resize so there is enough space for errors */ if (kqueue_changecnt > kqueue_eventmax) { ev_free (kqueue_events); kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); } EV_RELEASE_CB; EV_TS_SET (ts, timeout); res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); EV_ACQUIRE_CB; kqueue_changecnt = 0; if (ecb_expect_false (res < 0)) { if (errno != EINTR) ev_syserr ("(libev) kqueue kevent"); return; } for (i = 0; i < res; ++i) { int fd = kqueue_events [i].ident; if (ecb_expect_false (kqueue_events [i].flags & EV_ERROR)) { int err = kqueue_events [i].data; /* we are only interested in errors for fds that we are interested in :) */ if (anfds [fd].events) { if (err == ENOENT) /* resubmit changes on ENOENT */ kqueue_modify (EV_A_ fd, 0, anfds [fd].events); else if (err == EBADF) /* on EBADF, we re-check the fd */ { if (fd_valid (fd)) kqueue_modify (EV_A_ fd, 0, anfds [fd].events); else { assert (("libev: kqueue found invalid fd", 0)); fd_kill (EV_A_ fd); } } else /* on all other errors, we error out on the fd */ { assert (("libev: kqueue found invalid fd", 0)); fd_kill (EV_A_ fd); } } } else fd_event ( EV_A_ fd, kqueue_events [i].filter == EVFILT_READ ? EV_READ : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE : 0 ); } if (ecb_expect_false (res == kqueue_eventmax)) { ev_free (kqueue_events); kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1); kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); } } inline_size int kqueue_init (EV_P_ int flags) { /* initialize the kernel queue */ kqueue_fd_pid = getpid (); if ((backend_fd = kqueue ()) < 0) return 0; fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ backend_mintime = EV_TS_CONST (1e-9); /* apparently, they did the right thing in freebsd */ backend_modify = kqueue_modify; backend_poll = kqueue_poll; kqueue_eventmax = 64; /* initial number of events receivable per poll */ kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); kqueue_changes = 0; kqueue_changemax = 0; kqueue_changecnt = 0; return EVBACKEND_KQUEUE; } inline_size void kqueue_destroy (EV_P) { ev_free (kqueue_events); ev_free (kqueue_changes); } inline_size void kqueue_fork (EV_P) { /* some BSD kernels don't just destroy the kqueue itself, * but also close the fd, which isn't documented, and * impossible to support properly. * we remember the pid of the kqueue call and only close * the fd if the pid is still the same. * this leaks fds on sane kernels, but BSD interfaces are * notoriously buggy and rarely get fixed. */ pid_t newpid = getpid (); if (newpid == kqueue_fd_pid) close (backend_fd); kqueue_fd_pid = newpid; while ((backend_fd = kqueue ()) < 0) ev_syserr ("(libev) kqueue"); fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* re-register interest in fds */ fd_rearm_all (EV_A); } /* sys/event.h defines EV_ERROR */ #undef EV_ERROR EV-4.33/libev/LICENSE0000644000000000000000000000400712337443634012573 0ustar rootrootAll files in libev are Copyright (c)2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Alternatively, the contents of this package may be used under the terms of the GNU General Public License ("GPL") version 2 or any later version, in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this package only under the terms of the GPL and not to allow others to use your version of this file under the BSD license, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL in this and the other files of this package. If you do not delete the provisions above, a recipient may use your version of this file under either the BSD or the GPL. EV-4.33/libev/ev_port.c0000644000000000000000000001460013556456317013416 0ustar rootroot/* * libev solaris event port backend * * Copyright (c) 2007,2008,2009,2010,2011,2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* useful reading: * * http://bugs.opensolaris.org/view_bug.do?bug_id=6268715 (random results) * http://bugs.opensolaris.org/view_bug.do?bug_id=6455223 (just totally broken) * http://bugs.opensolaris.org/view_bug.do?bug_id=6873782 (manpage ETIME) * http://bugs.opensolaris.org/view_bug.do?bug_id=6874410 (implementation ETIME) * http://www.mail-archive.com/networking-discuss@opensolaris.org/msg11898.html ETIME vs. nget * http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/lib/libc/port/gen/event_port.c (libc) * http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/fs/portfs/port.c#1325 (kernel) */ #include #include #include #include #include #include inline_speed void port_associate_and_check (EV_P_ int fd, int ev) { if (0 > port_associate ( backend_fd, PORT_SOURCE_FD, fd, (ev & EV_READ ? POLLIN : 0) | (ev & EV_WRITE ? POLLOUT : 0), 0 ) ) { if (errno == EBADFD) { assert (("libev: port_associate found invalid fd", errno != EBADFD)); fd_kill (EV_A_ fd); } else ev_syserr ("(libev) port_associate"); } } static void port_modify (EV_P_ int fd, int oev, int nev) { /* we need to reassociate no matter what, as closes are * once more silently being discarded. */ if (!nev) { if (oev) port_dissociate (backend_fd, PORT_SOURCE_FD, fd); } else port_associate_and_check (EV_A_ fd, nev); } static void port_poll (EV_P_ ev_tstamp timeout) { int res, i; struct timespec ts; uint_t nget = 1; /* we initialise this to something we will skip in the loop, as */ /* port_getn can return with nget unchanged, but no indication */ /* whether it was the original value or has been updated :/ */ port_events [0].portev_source = 0; EV_RELEASE_CB; EV_TS_SET (ts, timeout); res = port_getn (backend_fd, port_events, port_eventmax, &nget, &ts); EV_ACQUIRE_CB; /* port_getn may or may not set nget on error */ /* so we rely on port_events [0].portev_source not being updated */ if (res == -1 && errno != ETIME && errno != EINTR) ev_syserr ("(libev) port_getn (see http://bugs.opensolaris.org/view_bug.do?bug_id=6268715, try LIBEV_FLAGS=3 env variable)"); for (i = 0; i < nget; ++i) { if (port_events [i].portev_source == PORT_SOURCE_FD) { int fd = port_events [i].portev_object; fd_event ( EV_A_ fd, (port_events [i].portev_events & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) | (port_events [i].portev_events & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) ); fd_change (EV_A_ fd, EV__IOFDSET); } } if (ecb_expect_false (nget == port_eventmax)) { ev_free (port_events); port_eventmax = array_nextsize (sizeof (port_event_t), port_eventmax, port_eventmax + 1); port_events = (port_event_t *)ev_malloc (sizeof (port_event_t) * port_eventmax); } } inline_size int port_init (EV_P_ int flags) { /* Initialize the kernel queue */ if ((backend_fd = port_create ()) < 0) return 0; assert (("libev: PORT_SOURCE_FD must not be zero", PORT_SOURCE_FD)); fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ /* if my reading of the opensolaris kernel sources are correct, then * opensolaris does something very stupid: it checks if the time has already * elapsed and doesn't round up if that is the case, otherwise it DOES round * up. Since we can't know what the case is, we need to guess by using a * "large enough" timeout. Normally, 1e-9 would be correct. */ backend_mintime = EV_TS_CONST (1e-3); /* needed to compensate for port_getn returning early */ backend_modify = port_modify; backend_poll = port_poll; port_eventmax = 64; /* initial number of events receivable per poll */ port_events = (port_event_t *)ev_malloc (sizeof (port_event_t) * port_eventmax); return EVBACKEND_PORT; } inline_size void port_destroy (EV_P) { ev_free (port_events); } inline_size void port_fork (EV_P) { close (backend_fd); while ((backend_fd = port_create ()) < 0) ev_syserr ("(libev) port"); fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* re-register interest in fds */ fd_rearm_all (EV_A); } EV-4.33/libev/ev_select.c0000644000000000000000000002122513556456317013712 0ustar rootroot/* * libev select fd activity backend * * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef _WIN32 /* for unix systems */ # include # ifndef __hpux /* for REAL unix systems */ # include # endif #endif #ifndef EV_SELECT_USE_FD_SET # ifdef NFDBITS # define EV_SELECT_USE_FD_SET 0 # else # define EV_SELECT_USE_FD_SET 1 # endif #endif #if EV_SELECT_IS_WINSOCKET # undef EV_SELECT_USE_FD_SET # define EV_SELECT_USE_FD_SET 1 # undef NFDBITS # define NFDBITS 0 #endif #if !EV_SELECT_USE_FD_SET # define NFDBYTES (NFDBITS / 8) #endif #include static void select_modify (EV_P_ int fd, int oev, int nev) { if (oev == nev) return; { #if EV_SELECT_USE_FD_SET #if EV_SELECT_IS_WINSOCKET SOCKET handle = anfds [fd].handle; #else int handle = fd; #endif assert (("libev: fd >= FD_SETSIZE passed to fd_set-based select backend", fd < FD_SETSIZE)); /* FD_SET is broken on windows (it adds the fd to a set twice or more, * which eventually leads to overflows). Need to call it only on changes. */ #if EV_SELECT_IS_WINSOCKET if ((oev ^ nev) & EV_READ) #endif if (nev & EV_READ) FD_SET (handle, (fd_set *)vec_ri); else FD_CLR (handle, (fd_set *)vec_ri); #if EV_SELECT_IS_WINSOCKET if ((oev ^ nev) & EV_WRITE) #endif if (nev & EV_WRITE) FD_SET (handle, (fd_set *)vec_wi); else FD_CLR (handle, (fd_set *)vec_wi); #else int word = fd / NFDBITS; fd_mask mask = 1UL << (fd % NFDBITS); if (ecb_expect_false (vec_max <= word)) { int new_max = word + 1; vec_ri = ev_realloc (vec_ri, new_max * NFDBYTES); vec_ro = ev_realloc (vec_ro, new_max * NFDBYTES); /* could free/malloc */ vec_wi = ev_realloc (vec_wi, new_max * NFDBYTES); vec_wo = ev_realloc (vec_wo, new_max * NFDBYTES); /* could free/malloc */ #ifdef _WIN32 vec_eo = ev_realloc (vec_eo, new_max * NFDBYTES); /* could free/malloc */ #endif for (; vec_max < new_max; ++vec_max) ((fd_mask *)vec_ri) [vec_max] = ((fd_mask *)vec_wi) [vec_max] = 0; } ((fd_mask *)vec_ri) [word] |= mask; if (!(nev & EV_READ)) ((fd_mask *)vec_ri) [word] &= ~mask; ((fd_mask *)vec_wi) [word] |= mask; if (!(nev & EV_WRITE)) ((fd_mask *)vec_wi) [word] &= ~mask; #endif } } static void select_poll (EV_P_ ev_tstamp timeout) { struct timeval tv; int res; int fd_setsize; EV_RELEASE_CB; EV_TV_SET (tv, timeout); #if EV_SELECT_USE_FD_SET fd_setsize = sizeof (fd_set); #else fd_setsize = vec_max * NFDBYTES; #endif memcpy (vec_ro, vec_ri, fd_setsize); memcpy (vec_wo, vec_wi, fd_setsize); #ifdef _WIN32 /* pass in the write set as except set. * the idea behind this is to work around a windows bug that causes * errors to be reported as an exception and not by setting * the writable bit. this is so uncontrollably lame. */ memcpy (vec_eo, vec_wi, fd_setsize); res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, (fd_set *)vec_eo, &tv); #elif EV_SELECT_USE_FD_SET fd_setsize = anfdmax < FD_SETSIZE ? anfdmax : FD_SETSIZE; res = select (fd_setsize, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); #else res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); #endif EV_ACQUIRE_CB; if (ecb_expect_false (res < 0)) { #if EV_SELECT_IS_WINSOCKET errno = WSAGetLastError (); #endif #ifdef WSABASEERR /* on windows, select returns incompatible error codes, fix this */ if (errno >= WSABASEERR && errno < WSABASEERR + 1000) if (errno == WSAENOTSOCK) errno = EBADF; else errno -= WSABASEERR; #endif #ifdef _WIN32 /* select on windows erroneously returns EINVAL when no fd sets have been * provided (this is documented). what microsoft doesn't tell you that this bug * exists even when the fd sets _are_ provided, so we have to check for this bug * here and emulate by sleeping manually. * we also get EINVAL when the timeout is invalid, but we ignore this case here * and assume that EINVAL always means: you have to wait manually. */ if (errno == EINVAL) { if (timeout) { unsigned long ms = EV_TS_TO_MSEC (timeout); Sleep (ms ? ms : 1); } return; } #endif if (errno == EBADF) fd_ebadf (EV_A); else if (errno == ENOMEM && !syserr_cb) fd_enomem (EV_A); else if (errno != EINTR) ev_syserr ("(libev) select"); return; } #if EV_SELECT_USE_FD_SET { int fd; for (fd = 0; fd < anfdmax; ++fd) if (anfds [fd].events) { int events = 0; #if EV_SELECT_IS_WINSOCKET SOCKET handle = anfds [fd].handle; #else int handle = fd; #endif if (FD_ISSET (handle, (fd_set *)vec_ro)) events |= EV_READ; if (FD_ISSET (handle, (fd_set *)vec_wo)) events |= EV_WRITE; #ifdef _WIN32 if (FD_ISSET (handle, (fd_set *)vec_eo)) events |= EV_WRITE; #endif if (ecb_expect_true (events)) fd_event (EV_A_ fd, events); } } #else { int word, bit; for (word = vec_max; word--; ) { fd_mask word_r = ((fd_mask *)vec_ro) [word]; fd_mask word_w = ((fd_mask *)vec_wo) [word]; #ifdef _WIN32 word_w |= ((fd_mask *)vec_eo) [word]; #endif if (word_r || word_w) for (bit = NFDBITS; bit--; ) { fd_mask mask = 1UL << bit; int events = 0; events |= word_r & mask ? EV_READ : 0; events |= word_w & mask ? EV_WRITE : 0; if (ecb_expect_true (events)) fd_event (EV_A_ word * NFDBITS + bit, events); } } } #endif } inline_size int select_init (EV_P_ int flags) { backend_mintime = EV_TS_CONST (1e-6); backend_modify = select_modify; backend_poll = select_poll; #if EV_SELECT_USE_FD_SET vec_ri = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_ri); vec_ro = ev_malloc (sizeof (fd_set)); vec_wi = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_wi); vec_wo = ev_malloc (sizeof (fd_set)); #ifdef _WIN32 vec_eo = ev_malloc (sizeof (fd_set)); #endif #else vec_max = 0; vec_ri = 0; vec_ro = 0; vec_wi = 0; vec_wo = 0; #ifdef _WIN32 vec_eo = 0; #endif #endif return EVBACKEND_SELECT; } inline_size void select_destroy (EV_P) { ev_free (vec_ri); ev_free (vec_ro); ev_free (vec_wi); ev_free (vec_wo); #ifdef _WIN32 ev_free (vec_eo); #endif } EV-4.33/libev/ev_epoll.c0000644000000000000000000002417513556456317013555 0ustar rootroot/* * libev epoll fd activity backend * * Copyright (c) 2007,2008,2009,2010,2011,2016,2017,2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* * general notes about epoll: * * a) epoll silently removes fds from the fd set. as nothing tells us * that an fd has been removed otherwise, we have to continually * "rearm" fds that we suspect *might* have changed (same * problem with kqueue, but much less costly there). * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) * and seems not to have any advantage. * c) the inability to handle fork or file descriptors (think dup) * limits the applicability over poll, so this is not a generic * poll replacement. * d) epoll doesn't work the same as select with many file descriptors * (such as files). while not critical, no other advanced interface * seems to share this (rather non-unixy) limitation. * e) epoll claims to be embeddable, but in practise you never get * a ready event for the epoll fd (broken: <=2.6.26, working: >=2.6.32). * f) epoll_ctl returning EPERM means the fd is always ready. * * lots of "weird code" and complication handling in this file is due * to these design problems with epoll, as we try very hard to avoid * epoll_ctl syscalls for common usage patterns and handle the breakage * ensuing from receiving events for closed and otherwise long gone * file descriptors. */ #include #define EV_EMASK_EPERM 0x80 static void epoll_modify (EV_P_ int fd, int oev, int nev) { struct epoll_event ev; unsigned char oldmask; /* * we handle EPOLL_CTL_DEL by ignoring it here * on the assumption that the fd is gone anyways * if that is wrong, we have to handle the spurious * event in epoll_poll. * if the fd is added again, we try to ADD it, and, if that * fails, we assume it still has the same eventmask. */ if (!nev) return; oldmask = anfds [fd].emask; anfds [fd].emask = nev; /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */ ev.data.u64 = (uint64_t)(uint32_t)fd | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); ev.events = (nev & EV_READ ? EPOLLIN : 0) | (nev & EV_WRITE ? EPOLLOUT : 0); if (ecb_expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) return; if (ecb_expect_true (errno == ENOENT)) { /* if ENOENT then the fd went away, so try to do the right thing */ if (!nev) goto dec_egen; if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) return; } else if (ecb_expect_true (errno == EEXIST)) { /* EEXIST means we ignored a previous DEL, but the fd is still active */ /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ if (oldmask == nev) goto dec_egen; if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) return; } else if (ecb_expect_true (errno == EPERM)) { /* EPERM means the fd is always ready, but epoll is too snobbish */ /* to handle it, unlike select or poll. */ anfds [fd].emask = EV_EMASK_EPERM; /* add fd to epoll_eperms, if not already inside */ if (!(oldmask & EV_EMASK_EPERM)) { array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, array_needsize_noinit); epoll_eperms [epoll_epermcnt++] = fd; } return; } else assert (("libev: I/O watcher with invalid fd found in epoll_ctl", errno != EBADF && errno != ELOOP && errno != EINVAL)); fd_kill (EV_A_ fd); dec_egen: /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ --anfds [fd].egen; } static void epoll_poll (EV_P_ ev_tstamp timeout) { int i; int eventcnt; if (ecb_expect_false (epoll_epermcnt)) timeout = EV_TS_CONST (0.); /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */ /* the default libev max wait time, however. */ EV_RELEASE_CB; eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, EV_TS_TO_MSEC (timeout)); EV_ACQUIRE_CB; if (ecb_expect_false (eventcnt < 0)) { if (errno != EINTR) ev_syserr ("(libev) epoll_wait"); return; } for (i = 0; i < eventcnt; ++i) { struct epoll_event *ev = epoll_events + i; int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */ int want = anfds [fd].events; int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); /* * check for spurious notification. * this only finds spurious notifications on egen updates * other spurious notifications will be found by epoll_ctl, below * we assume that fd is always in range, as we never shrink the anfds array */ if (ecb_expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) { /* recreate kernel state */ postfork |= 2; continue; } if (ecb_expect_false (got & ~want)) { anfds [fd].emask = want; /* * we received an event but are not interested in it, try mod or del * this often happens because we optimistically do not unregister fds * when we are no longer interested in them, but also when we get spurious * notifications for fds from another process. this is partially handled * above with the gencounter check (== our fd is not the event fd), and * partially here, when epoll_ctl returns an error (== a child has the fd * but we closed it). * note: for events such as POLLHUP, where we can't know whether it refers * to EV_READ or EV_WRITE, we might issue redundant EPOLL_CTL_MOD calls. */ ev->events = (want & EV_READ ? EPOLLIN : 0) | (want & EV_WRITE ? EPOLLOUT : 0); /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */ /* which is fortunately easy to do for us. */ if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) { postfork |= 2; /* an error occurred, recreate kernel state */ continue; } } fd_event (EV_A_ fd, got); } /* if the receive array was full, increase its size */ if (ecb_expect_false (eventcnt == epoll_eventmax)) { ev_free (epoll_events); epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); } /* now synthesize events for all fds where epoll fails, while select works... */ for (i = epoll_epermcnt; i--; ) { int fd = epoll_eperms [i]; unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE); if (anfds [fd].emask & EV_EMASK_EPERM && events) fd_event (EV_A_ fd, events); else { epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; anfds [fd].emask = 0; } } } static int epoll_epoll_create (void) { int fd; #if defined EPOLL_CLOEXEC && !defined __ANDROID__ fd = epoll_create1 (EPOLL_CLOEXEC); if (fd < 0 && (errno == EINVAL || errno == ENOSYS)) #endif { fd = epoll_create (256); if (fd >= 0) fcntl (fd, F_SETFD, FD_CLOEXEC); } return fd; } inline_size int epoll_init (EV_P_ int flags) { if ((backend_fd = epoll_epoll_create ()) < 0) return 0; backend_mintime = EV_TS_CONST (1e-3); /* epoll does sometimes return early, this is just to avoid the worst */ backend_modify = epoll_modify; backend_poll = epoll_poll; epoll_eventmax = 64; /* initial number of events receivable per poll */ epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); return EVBACKEND_EPOLL; } inline_size void epoll_destroy (EV_P) { ev_free (epoll_events); array_free (epoll_eperm, EMPTY); } ecb_cold static void epoll_fork (EV_P) { close (backend_fd); while ((backend_fd = epoll_epoll_create ()) < 0) ev_syserr ("(libev) epoll_create"); fd_rearm_all (EV_A); } EV-4.33/libev/ev.c0000644000000000000000000045025513634412166012353 0ustar rootroot/* * libev event processing core, watcher management * * Copyright (c) 2007-2020 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ /* this big block deduces configuration from config.h */ #ifndef EV_STANDALONE # ifdef EV_CONFIG_H # include EV_CONFIG_H # else # include "config.h" # endif # if HAVE_FLOOR # ifndef EV_USE_FLOOR # define EV_USE_FLOOR 1 # endif # endif # if HAVE_CLOCK_SYSCALL # ifndef EV_USE_CLOCK_SYSCALL # define EV_USE_CLOCK_SYSCALL 1 # ifndef EV_USE_REALTIME # define EV_USE_REALTIME 0 # endif # ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 1 # endif # endif # elif !defined EV_USE_CLOCK_SYSCALL # define EV_USE_CLOCK_SYSCALL 0 # endif # if HAVE_CLOCK_GETTIME # ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 1 # endif # ifndef EV_USE_REALTIME # define EV_USE_REALTIME 0 # endif # else # ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 0 # endif # ifndef EV_USE_REALTIME # define EV_USE_REALTIME 0 # endif # endif # if HAVE_NANOSLEEP # ifndef EV_USE_NANOSLEEP # define EV_USE_NANOSLEEP EV_FEATURE_OS # endif # else # undef EV_USE_NANOSLEEP # define EV_USE_NANOSLEEP 0 # endif # if HAVE_SELECT && HAVE_SYS_SELECT_H # ifndef EV_USE_SELECT # define EV_USE_SELECT EV_FEATURE_BACKENDS # endif # else # undef EV_USE_SELECT # define EV_USE_SELECT 0 # endif # if HAVE_POLL && HAVE_POLL_H # ifndef EV_USE_POLL # define EV_USE_POLL EV_FEATURE_BACKENDS # endif # else # undef EV_USE_POLL # define EV_USE_POLL 0 # endif # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H # ifndef EV_USE_EPOLL # define EV_USE_EPOLL EV_FEATURE_BACKENDS # endif # else # undef EV_USE_EPOLL # define EV_USE_EPOLL 0 # endif # if HAVE_LINUX_AIO_ABI_H # ifndef EV_USE_LINUXAIO # define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */ # endif # else # undef EV_USE_LINUXAIO # define EV_USE_LINUXAIO 0 # endif # if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T # ifndef EV_USE_IOURING # define EV_USE_IOURING EV_FEATURE_BACKENDS # endif # else # undef EV_USE_IOURING # define EV_USE_IOURING 0 # endif # if HAVE_KQUEUE && HAVE_SYS_EVENT_H # ifndef EV_USE_KQUEUE # define EV_USE_KQUEUE EV_FEATURE_BACKENDS # endif # else # undef EV_USE_KQUEUE # define EV_USE_KQUEUE 0 # endif # if HAVE_PORT_H && HAVE_PORT_CREATE # ifndef EV_USE_PORT # define EV_USE_PORT EV_FEATURE_BACKENDS # endif # else # undef EV_USE_PORT # define EV_USE_PORT 0 # endif # if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H # ifndef EV_USE_INOTIFY # define EV_USE_INOTIFY EV_FEATURE_OS # endif # else # undef EV_USE_INOTIFY # define EV_USE_INOTIFY 0 # endif # if HAVE_SIGNALFD && HAVE_SYS_SIGNALFD_H # ifndef EV_USE_SIGNALFD # define EV_USE_SIGNALFD EV_FEATURE_OS # endif # else # undef EV_USE_SIGNALFD # define EV_USE_SIGNALFD 0 # endif # if HAVE_EVENTFD # ifndef EV_USE_EVENTFD # define EV_USE_EVENTFD EV_FEATURE_OS # endif # else # undef EV_USE_EVENTFD # define EV_USE_EVENTFD 0 # endif # if HAVE_SYS_TIMERFD_H # ifndef EV_USE_TIMERFD # define EV_USE_TIMERFD EV_FEATURE_OS # endif # else # undef EV_USE_TIMERFD # define EV_USE_TIMERFD 0 # endif #endif /* OS X, in its infinite idiocy, actually HARDCODES * a limit of 1024 into their select. Where people have brains, * OS X engineers apparently have a vacuum. Or maybe they were * ordered to have a vacuum, or they do anything for money. * This might help. Or not. * Note that this must be defined early, as other include files * will rely on this define as well. */ #define _DARWIN_UNLIMITED_SELECT 1 #include #include #include #include #include #include #include #include #include #include #include #ifdef EV_H # include EV_H #else # include "ev.h" #endif #if EV_NO_THREADS # undef EV_NO_SMP # define EV_NO_SMP 1 # undef ECB_NO_THREADS # define ECB_NO_THREADS 1 #endif #if EV_NO_SMP # undef EV_NO_SMP # define ECB_NO_SMP 1 #endif #ifndef _WIN32 # include # include # include #else # include # define WIN32_LEAN_AND_MEAN # include # include # ifndef EV_SELECT_IS_WINSOCKET # define EV_SELECT_IS_WINSOCKET 1 # endif # undef EV_AVOID_STDIO #endif /* this block tries to deduce configuration from header-defined symbols and defaults */ /* try to deduce the maximum number of signals on this platform */ #if defined EV_NSIG /* use what's provided */ #elif defined NSIG # define EV_NSIG (NSIG) #elif defined _NSIG # define EV_NSIG (_NSIG) #elif defined SIGMAX # define EV_NSIG (SIGMAX+1) #elif defined SIG_MAX # define EV_NSIG (SIG_MAX+1) #elif defined _SIG_MAX # define EV_NSIG (_SIG_MAX+1) #elif defined MAXSIG # define EV_NSIG (MAXSIG+1) #elif defined MAX_SIG # define EV_NSIG (MAX_SIG+1) #elif defined SIGARRAYSIZE # define EV_NSIG (SIGARRAYSIZE) /* Assume ary[SIGARRAYSIZE] */ #elif defined _sys_nsig # define EV_NSIG (_sys_nsig) /* Solaris 2.5 */ #else # define EV_NSIG (8 * sizeof (sigset_t) + 1) #endif #ifndef EV_USE_FLOOR # define EV_USE_FLOOR 0 #endif #ifndef EV_USE_CLOCK_SYSCALL # if __linux && __GLIBC__ == 2 && __GLIBC_MINOR__ < 17 # define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS # else # define EV_USE_CLOCK_SYSCALL 0 # endif #endif #if !(_POSIX_TIMERS > 0) # ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 0 # endif # ifndef EV_USE_REALTIME # define EV_USE_REALTIME 0 # endif #endif #ifndef EV_USE_MONOTONIC # if defined _POSIX_MONOTONIC_CLOCK && _POSIX_MONOTONIC_CLOCK >= 0 # define EV_USE_MONOTONIC EV_FEATURE_OS # else # define EV_USE_MONOTONIC 0 # endif #endif #ifndef EV_USE_REALTIME # define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL #endif #ifndef EV_USE_NANOSLEEP # if _POSIX_C_SOURCE >= 199309L # define EV_USE_NANOSLEEP EV_FEATURE_OS # else # define EV_USE_NANOSLEEP 0 # endif #endif #ifndef EV_USE_SELECT # define EV_USE_SELECT EV_FEATURE_BACKENDS #endif #ifndef EV_USE_POLL # ifdef _WIN32 # define EV_USE_POLL 0 # else # define EV_USE_POLL EV_FEATURE_BACKENDS # endif #endif #ifndef EV_USE_EPOLL # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) # define EV_USE_EPOLL EV_FEATURE_BACKENDS # else # define EV_USE_EPOLL 0 # endif #endif #ifndef EV_USE_KQUEUE # define EV_USE_KQUEUE 0 #endif #ifndef EV_USE_PORT # define EV_USE_PORT 0 #endif #ifndef EV_USE_LINUXAIO # if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */ # define EV_USE_LINUXAIO 0 /* was: 1, always off by default */ # else # define EV_USE_LINUXAIO 0 # endif #endif #ifndef EV_USE_IOURING # if __linux /* later checks might disable again */ # define EV_USE_IOURING 1 # else # define EV_USE_IOURING 0 # endif #endif #ifndef EV_USE_INOTIFY # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) # define EV_USE_INOTIFY EV_FEATURE_OS # else # define EV_USE_INOTIFY 0 # endif #endif #ifndef EV_PID_HASHSIZE # define EV_PID_HASHSIZE EV_FEATURE_DATA ? 16 : 1 #endif #ifndef EV_INOTIFY_HASHSIZE # define EV_INOTIFY_HASHSIZE EV_FEATURE_DATA ? 16 : 1 #endif #ifndef EV_USE_EVENTFD # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) # define EV_USE_EVENTFD EV_FEATURE_OS # else # define EV_USE_EVENTFD 0 # endif #endif #ifndef EV_USE_SIGNALFD # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) # define EV_USE_SIGNALFD EV_FEATURE_OS # else # define EV_USE_SIGNALFD 0 # endif #endif #ifndef EV_USE_TIMERFD # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8)) # define EV_USE_TIMERFD EV_FEATURE_OS # else # define EV_USE_TIMERFD 0 # endif #endif #if 0 /* debugging */ # define EV_VERIFY 3 # define EV_USE_4HEAP 1 # define EV_HEAP_CACHE_AT 1 #endif #ifndef EV_VERIFY # define EV_VERIFY (EV_FEATURE_API ? 1 : 0) #endif #ifndef EV_USE_4HEAP # define EV_USE_4HEAP EV_FEATURE_DATA #endif #ifndef EV_HEAP_CACHE_AT # define EV_HEAP_CACHE_AT EV_FEATURE_DATA #endif #ifdef __ANDROID__ /* supposedly, android doesn't typedef fd_mask */ # undef EV_USE_SELECT # define EV_USE_SELECT 0 /* supposedly, we need to include syscall.h, not sys/syscall.h, so just disable */ # undef EV_USE_CLOCK_SYSCALL # define EV_USE_CLOCK_SYSCALL 0 #endif /* aix's poll.h seems to cause lots of trouble */ #ifdef _AIX /* AIX has a completely broken poll.h header */ # undef EV_USE_POLL # define EV_USE_POLL 0 #endif /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */ /* which makes programs even slower. might work on other unices, too. */ #if EV_USE_CLOCK_SYSCALL # include # ifdef SYS_clock_gettime # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts)) # undef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 1 # define EV_NEED_SYSCALL 1 # else # undef EV_USE_CLOCK_SYSCALL # define EV_USE_CLOCK_SYSCALL 0 # endif #endif /* this block fixes any misconfiguration where we know we run into trouble otherwise */ #ifndef CLOCK_MONOTONIC # undef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 0 #endif #ifndef CLOCK_REALTIME # undef EV_USE_REALTIME # define EV_USE_REALTIME 0 #endif #if !EV_STAT_ENABLE # undef EV_USE_INOTIFY # define EV_USE_INOTIFY 0 #endif #if __linux && EV_USE_IOURING # include # if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) # undef EV_USE_IOURING # define EV_USE_IOURING 0 # endif #endif #if !EV_USE_NANOSLEEP /* hp-ux has it in sys/time.h, which we unconditionally include above */ # if !defined _WIN32 && !defined __hpux # include # endif #endif #if EV_USE_LINUXAIO # include # if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */ # define EV_NEED_SYSCALL 1 # else # undef EV_USE_LINUXAIO # define EV_USE_LINUXAIO 0 # endif #endif #if EV_USE_IOURING # include # if !SYS_io_uring_setup && __linux && !__alpha # define SYS_io_uring_setup 425 # define SYS_io_uring_enter 426 # define SYS_io_uring_wregister 427 # endif # if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */ # define EV_NEED_SYSCALL 1 # else # undef EV_USE_IOURING # define EV_USE_IOURING 0 # endif #endif #if EV_USE_INOTIFY # include # include /* some very old inotify.h headers don't have IN_DONT_FOLLOW */ # ifndef IN_DONT_FOLLOW # undef EV_USE_INOTIFY # define EV_USE_INOTIFY 0 # endif #endif #if EV_USE_EVENTFD /* our minimum requirement is glibc 2.7 which has the stub, but not the full header */ # include # ifndef EFD_NONBLOCK # define EFD_NONBLOCK O_NONBLOCK # endif # ifndef EFD_CLOEXEC # ifdef O_CLOEXEC # define EFD_CLOEXEC O_CLOEXEC # else # define EFD_CLOEXEC 02000000 # endif # endif EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags); #endif #if EV_USE_SIGNALFD /* our minimum requirement is glibc 2.7 which has the stub, but not the full header */ # include # ifndef SFD_NONBLOCK # define SFD_NONBLOCK O_NONBLOCK # endif # ifndef SFD_CLOEXEC # ifdef O_CLOEXEC # define SFD_CLOEXEC O_CLOEXEC # else # define SFD_CLOEXEC 02000000 # endif # endif EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags); struct signalfd_siginfo { uint32_t ssi_signo; char pad[128 - sizeof (uint32_t)]; }; #endif /* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */ #if EV_USE_TIMERFD # include /* timerfd is only used for periodics */ # if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE # undef EV_USE_TIMERFD # define EV_USE_TIMERFD 0 # endif #endif /*****************************************************************************/ #if EV_VERIFY >= 3 # define EV_FREQUENT_CHECK ev_verify (EV_A) #else # define EV_FREQUENT_CHECK do { } while (0) #endif /* * This is used to work around floating point rounding problems. * This value is good at least till the year 4000. */ #define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ /*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ #define MAX_BLOCKTIME2 1500001.07 /* same, but when timerfd is used to detect jumps, also safe delay to not overflow */ /* find a portable timestamp that is "always" in the future but fits into time_t. * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t, * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */ #define EV_TSTAMP_HUGE \ (sizeof (time_t) >= 8 ? 10000000000000. \ : 0 < (time_t)4294967295 ? 4294967295. \ : 2147483647.) \ #ifndef EV_TS_CONST # define EV_TS_CONST(nv) nv # define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999 # define EV_TS_FROM_USEC(us) us * 1e-6 # define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) # define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) # define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6) # define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9) #endif /* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ /* ECB.H BEGIN */ /* * libecb - http://software.schmorp.de/pkg/libecb * * Copyright (©) 2009-2015,2018-2020 Marc Alexander Lehmann * Copyright (©) 2011 Emanuele Giaquinta * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef ECB_H #define ECB_H /* 16 bits major, 16 bits minor */ #define ECB_VERSION 0x00010008 #include /* for memcpy */ #if defined (_WIN32) && !defined (__MINGW32__) typedef signed char int8_t; typedef unsigned char uint8_t; typedef signed char int_fast8_t; typedef unsigned char uint_fast8_t; typedef signed short int16_t; typedef unsigned short uint16_t; typedef signed int int_fast16_t; typedef unsigned int uint_fast16_t; typedef signed int int32_t; typedef unsigned int uint32_t; typedef signed int int_fast32_t; typedef unsigned int uint_fast32_t; #if __GNUC__ typedef signed long long int64_t; typedef unsigned long long uint64_t; #else /* _MSC_VER || __BORLANDC__ */ typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; #endif typedef int64_t int_fast64_t; typedef uint64_t uint_fast64_t; #ifdef _WIN64 #define ECB_PTRSIZE 8 typedef uint64_t uintptr_t; typedef int64_t intptr_t; #else #define ECB_PTRSIZE 4 typedef uint32_t uintptr_t; typedef int32_t intptr_t; #endif #else #include #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU #define ECB_PTRSIZE 8 #else #define ECB_PTRSIZE 4 #endif #endif #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) #ifndef ECB_OPTIMIZE_SIZE #if __OPTIMIZE_SIZE__ #define ECB_OPTIMIZE_SIZE 1 #else #define ECB_OPTIMIZE_SIZE 0 #endif #endif /* work around x32 idiocy by defining proper macros */ #if ECB_GCC_AMD64 || ECB_MSVC_AMD64 #if _ILP32 #define ECB_AMD64_X32 1 #else #define ECB_AMD64 1 #endif #endif /* many compilers define _GNUC_ to some versions but then only implement * what their idiot authors think are the "more important" extensions, * causing enormous grief in return for some better fake benchmark numbers. * or so. * we try to detect these and simply assume they are not gcc - if they have * an issue with that they should have done it right in the first place. */ #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__ #define ECB_GCC_VERSION(major,minor) 0 #else #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) #endif #define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor))) #if __clang__ && defined __has_builtin #define ECB_CLANG_BUILTIN(x) __has_builtin (x) #else #define ECB_CLANG_BUILTIN(x) 0 #endif #if __clang__ && defined __has_extension #define ECB_CLANG_EXTENSION(x) __has_extension (x) #else #define ECB_CLANG_EXTENSION(x) 0 #endif #define ECB_CPP (__cplusplus+0) #define ECB_CPP11 (__cplusplus >= 201103L) #define ECB_CPP14 (__cplusplus >= 201402L) #define ECB_CPP17 (__cplusplus >= 201703L) #if ECB_CPP #define ECB_C 0 #define ECB_STDC_VERSION 0 #else #define ECB_C 1 #define ECB_STDC_VERSION __STDC_VERSION__ #endif #define ECB_C99 (ECB_STDC_VERSION >= 199901L) #define ECB_C11 (ECB_STDC_VERSION >= 201112L) #define ECB_C17 (ECB_STDC_VERSION >= 201710L) #if ECB_CPP #define ECB_EXTERN_C extern "C" #define ECB_EXTERN_C_BEG ECB_EXTERN_C { #define ECB_EXTERN_C_END } #else #define ECB_EXTERN_C extern #define ECB_EXTERN_C_BEG #define ECB_EXTERN_C_END #endif /*****************************************************************************/ /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ #if ECB_NO_THREADS #define ECB_NO_SMP 1 #endif #if ECB_NO_SMP #define ECB_MEMORY_FENCE do { } while (0) #endif /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */ #if __xlC__ && ECB_CPP #include #endif #if 1400 <= _MSC_VER #include /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */ #endif #ifndef ECB_MEMORY_FENCE #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory") #if __i386 || __i386__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") #elif ECB_GCC_AMD64 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") #elif defined __ARM_ARCH_2__ \ || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \ || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \ || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \ || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \ || defined __ARM_ARCH_5TEJ__ /* should not need any, unless running old code on newer cpu - arm doesn't support that */ #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \ || defined __ARM_ARCH_6T2__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") #elif __aarch64__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8) #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") #elif defined __s390__ || defined __s390x__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") #elif defined __mips__ /* GNU/Linux emulates sync on mips1 architectures, so we force its use */ /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */ #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory") #elif defined __alpha__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") #elif defined __hppa__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") #elif defined __ia64__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory") #elif defined __m68k__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") #elif defined __m88k__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory") #elif defined __sh__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") #endif #endif #endif #ifndef ECB_MEMORY_FENCE #if ECB_GCC_VERSION(4,7) /* see comment below (stdatomic.h) about the C11 memory model. */ #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED) #elif ECB_CLANG_EXTENSION(c_atomic) /* see comment below (stdatomic.h) about the C11 memory model. */ #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED) #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ #define ECB_MEMORY_FENCE __sync_synchronize () #elif _MSC_VER >= 1500 /* VC++ 2008 */ /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier() #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */ #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier() #elif _MSC_VER >= 1400 /* VC++ 2005 */ #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) #define ECB_MEMORY_FENCE _ReadWriteBarrier () #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () #elif defined _WIN32 #include #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 #include #define ECB_MEMORY_FENCE __machine_rw_barrier () #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier () #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier () #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier () #elif __xlC__ #define ECB_MEMORY_FENCE __sync () #endif #endif #ifndef ECB_MEMORY_FENCE #if ECB_C11 && !defined __STDC_NO_ATOMICS__ /* we assume that these memory fences work on all variables/all memory accesses, */ /* not just C11 atomics and atomic accesses */ #include #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire) #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release) #endif #endif #ifndef ECB_MEMORY_FENCE #if !ECB_AVOID_PTHREADS /* * if you get undefined symbol references to pthread_mutex_lock, * or failure to find pthread.h, then you should implement * the ECB_MEMORY_FENCE operations for your cpu/compiler * OR provide pthread.h and link against the posix thread library * of your system. */ #include #define ECB_NEEDS_PTHREADS 1 #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) #endif #endif #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE #endif #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE #endif #if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */ #endif /*****************************************************************************/ #if ECB_CPP #define ecb_inline static inline #elif ECB_GCC_VERSION(2,5) #define ecb_inline static __inline__ #elif ECB_C99 #define ecb_inline static inline #else #define ecb_inline static #endif #if ECB_GCC_VERSION(3,3) #define ecb_restrict __restrict__ #elif ECB_C99 #define ecb_restrict restrict #else #define ecb_restrict #endif typedef int ecb_bool; #define ECB_CONCAT_(a, b) a ## b #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) #define ECB_STRINGIFY_(a) # a #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) #define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr)) #define ecb_function_ ecb_inline #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8) #define ecb_attribute(attrlist) __attribute__ (attrlist) #else #define ecb_attribute(attrlist) #endif #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p) #define ecb_is_constant(expr) __builtin_constant_p (expr) #else /* possible C11 impl for integral types typedef struct ecb_is_constant_struct ecb_is_constant_struct; #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */ #define ecb_is_constant(expr) 0 #endif #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect) #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) #else #define ecb_expect(expr,value) (expr) #endif #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch) #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) #else #define ecb_prefetch(addr,rw,locality) #endif /* no emulation for ecb_decltype */ #if ECB_CPP11 // older implementations might have problems with decltype(x)::type, work around it template struct ecb_decltype_t { typedef T type; }; #define ecb_decltype(x) ecb_decltype_t::type #elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8) #define ecb_decltype(x) __typeof__ (x) #endif #if _MSC_VER >= 1300 #define ecb_deprecated __declspec (deprecated) #else #define ecb_deprecated ecb_attribute ((__deprecated__)) #endif #if _MSC_VER >= 1500 #define ecb_deprecated_message(msg) __declspec (deprecated (msg)) #elif ECB_GCC_VERSION(4,5) #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg)) #else #define ecb_deprecated_message(msg) ecb_deprecated #endif #if _MSC_VER >= 1400 #define ecb_noinline __declspec (noinline) #else #define ecb_noinline ecb_attribute ((__noinline__)) #endif #define ecb_unused ecb_attribute ((__unused__)) #define ecb_const ecb_attribute ((__const__)) #define ecb_pure ecb_attribute ((__pure__)) #if ECB_C11 || __IBMC_NORETURN /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */ #define ecb_noreturn _Noreturn #elif ECB_CPP11 #define ecb_noreturn [[noreturn]] #elif _MSC_VER >= 1200 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */ #define ecb_noreturn __declspec (noreturn) #else #define ecb_noreturn ecb_attribute ((__noreturn__)) #endif #if ECB_GCC_VERSION(4,3) #define ecb_artificial ecb_attribute ((__artificial__)) #define ecb_hot ecb_attribute ((__hot__)) #define ecb_cold ecb_attribute ((__cold__)) #else #define ecb_artificial #define ecb_hot #define ecb_cold #endif /* put around conditional expressions if you are very sure that the */ /* expression is mostly true or mostly false. note that these return */ /* booleans, not the expression. */ #define ecb_expect_false(expr) ecb_expect (!!(expr), 0) #define ecb_expect_true(expr) ecb_expect (!!(expr), 1) /* for compatibility to the rest of the world */ #define ecb_likely(expr) ecb_expect_true (expr) #define ecb_unlikely(expr) ecb_expect_false (expr) /* count trailing zero bits and count # of one bits */ #if ECB_GCC_VERSION(3,4) \ || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \ && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \ && ECB_CLANG_BUILTIN(__builtin_popcount)) /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ #define ecb_ld32(x) (__builtin_clz (x) ^ 31) #define ecb_ld64(x) (__builtin_clzll (x) ^ 63) #define ecb_ctz32(x) __builtin_ctz (x) #define ecb_ctz64(x) __builtin_ctzll (x) #define ecb_popcount32(x) __builtin_popcount (x) /* no popcountll */ #else ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); ecb_function_ ecb_const int ecb_ctz32 (uint32_t x) { #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) unsigned long r; _BitScanForward (&r, x); return (int)r; #else int r = 0; x &= ~x + 1; /* this isolates the lowest bit */ #if ECB_branchless_on_i386 r += !!(x & 0xaaaaaaaa) << 0; r += !!(x & 0xcccccccc) << 1; r += !!(x & 0xf0f0f0f0) << 2; r += !!(x & 0xff00ff00) << 3; r += !!(x & 0xffff0000) << 4; #else if (x & 0xaaaaaaaa) r += 1; if (x & 0xcccccccc) r += 2; if (x & 0xf0f0f0f0) r += 4; if (x & 0xff00ff00) r += 8; if (x & 0xffff0000) r += 16; #endif return r; #endif } ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); ecb_function_ ecb_const int ecb_ctz64 (uint64_t x) { #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) unsigned long r; _BitScanForward64 (&r, x); return (int)r; #else int shift = x & 0xffffffff ? 0 : 32; return ecb_ctz32 (x >> shift) + shift; #endif } ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); ecb_function_ ecb_const int ecb_popcount32 (uint32_t x) { x -= (x >> 1) & 0x55555555; x = ((x >> 2) & 0x33333333) + (x & 0x33333333); x = ((x >> 4) + x) & 0x0f0f0f0f; x *= 0x01010101; return x >> 24; } ecb_function_ ecb_const int ecb_ld32 (uint32_t x); ecb_function_ ecb_const int ecb_ld32 (uint32_t x) { #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) unsigned long r; _BitScanReverse (&r, x); return (int)r; #else int r = 0; if (x >> 16) { x >>= 16; r += 16; } if (x >> 8) { x >>= 8; r += 8; } if (x >> 4) { x >>= 4; r += 4; } if (x >> 2) { x >>= 2; r += 2; } if (x >> 1) { r += 1; } return r; #endif } ecb_function_ ecb_const int ecb_ld64 (uint64_t x); ecb_function_ ecb_const int ecb_ld64 (uint64_t x) { #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) unsigned long r; _BitScanReverse64 (&r, x); return (int)r; #else int r = 0; if (x >> 32) { x >>= 32; r += 32; } return r + ecb_ld32 (x); #endif } #endif ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x); ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x); ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); } ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x); ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x) { return ( (x * 0x0802U & 0x22110U) | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; } ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x); ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x) { x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1); x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2); x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4); x = ( x >> 8 ) | ( x << 8); return x; } ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x); ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x) { x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1); x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2); x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4); x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8); x = ( x >> 16 ) | ( x << 16); return x; } /* popcount64 is only available on 64 bit cpus as gcc builtin */ /* so for this version we are lazy */ ecb_function_ ecb_const int ecb_popcount64 (uint64_t x); ecb_function_ ecb_const int ecb_popcount64 (uint64_t x) { return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); } ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count); ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count); ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count); ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count); ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } #if ECB_CPP inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); } inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); } inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); } inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); } inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); } inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); } inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); } inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); } inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); } inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); } inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); } inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); } inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); } inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); } inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); } inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); } inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); } inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); } inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); } inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); } inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); } inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); } inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); } inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); } inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); } inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); } inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); } #endif #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16) #define ecb_bswap16(x) __builtin_bswap16 (x) #else #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) #endif #define ecb_bswap32(x) __builtin_bswap32 (x) #define ecb_bswap64(x) __builtin_bswap64 (x) #elif _MSC_VER #include #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x))) #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x))) #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x))) #else ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x); ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x) { return ecb_rotl16 (x, 8); } ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x); ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x) { return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); } ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x); ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x) { return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); } #endif #if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable) #define ecb_unreachable() __builtin_unreachable () #else /* this seems to work fine, but gcc always emits a warning for it :/ */ ecb_inline ecb_noreturn void ecb_unreachable (void); ecb_inline ecb_noreturn void ecb_unreachable (void) { } #endif /* try to tell the compiler that some condition is definitely true */ #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 ecb_inline ecb_const uint32_t ecb_byteorder_helper (void); ecb_inline ecb_const uint32_t ecb_byteorder_helper (void) { /* the union code still generates code under pressure in gcc, */ /* but less than using pointers, and always seems to */ /* successfully return a constant. */ /* the reason why we have this horrible preprocessor mess */ /* is to avoid it in all cases, at least on common architectures */ /* or when using a recent enough gcc version (>= 4.6) */ #if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__) #define ECB_LITTLE_ENDIAN 1 return 0x44332211; #elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \ || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__) #define ECB_BIG_ENDIAN 1 return 0x11223344; #else union { uint8_t c[4]; uint32_t u; } u = { 0x11, 0x22, 0x33, 0x44 }; return u.u; #endif } ecb_inline ecb_const ecb_bool ecb_big_endian (void); ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; } ecb_inline ecb_const ecb_bool ecb_little_endian (void); ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } /*****************************************************************************/ /* unaligned load/store */ ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; } ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; } ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; } ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; } ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; } ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; } ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; } ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; } ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; } ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); } ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); } ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); } ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); } ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); } ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); } ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; } ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; } ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; } ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; } ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; } ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; } ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); } ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); } ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); } ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); } ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); } ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); } ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); } ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); } ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); } #if ECB_CPP inline uint8_t ecb_bswap (uint8_t v) { return v; } inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); } inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); } inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); } template inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; } template inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; } template inline T ecb_peek (const void *ptr) { return *(const T *)ptr; } template inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek (ptr)); } template inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek (ptr)); } template inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; } template inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u (ptr)); } template inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u (ptr)); } template inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; } template inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; } template inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; } template inline void ecb_poke_be (void *ptr, T v) { return ecb_poke (ptr, ecb_host_to_be (v)); } template inline void ecb_poke_le (void *ptr, T v) { return ecb_poke (ptr, ecb_host_to_le (v)); } template inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); } template inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u (ptr, ecb_host_to_be (v)); } template inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u (ptr, ecb_host_to_le (v)); } #endif /*****************************************************************************/ #if ECB_GCC_VERSION(3,0) || ECB_C99 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) #else #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) #endif #if ECB_CPP template static inline T ecb_div_rd (T val, T div) { return val < 0 ? - ((-val + div - 1) / div) : (val ) / div; } template static inline T ecb_div_ru (T val, T div) { return val < 0 ? - ((-val ) / div) : (val + div - 1) / div; } #else #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div)) #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div)) #endif #if ecb_cplusplus_does_not_suck /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ template static inline int ecb_array_length (const T (&arr)[N]) { return N; } #else #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) #endif /*****************************************************************************/ ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x); ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x) { unsigned int s = (x & 0x8000) << (31 - 15); int e = (x >> 10) & 0x001f; unsigned int m = x & 0x03ff; if (ecb_expect_false (e == 31)) /* infinity or NaN */ e = 255 - (127 - 15); else if (ecb_expect_false (!e)) { if (ecb_expect_true (!m)) /* zero, handled by code below by forcing e to 0 */ e = 0 - (127 - 15); else { /* subnormal, renormalise */ unsigned int s = 10 - ecb_ld32 (m); m = (m << s) & 0x3ff; /* mask implicit bit */ e -= s - 1; } } /* e and m now are normalised, or zero, (or inf or nan) */ e += 127 - 15; return s | (e << 23) | (m << (23 - 10)); } ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x); ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x) { unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */ unsigned int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */ unsigned int m = x & 0x007fffff; x &= 0x7fffffff; /* if it's within range of binary16 normals, use fast path */ if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff)) { /* mantissa round-to-even */ m += 0x00000fff + ((m >> (23 - 10)) & 1); /* handle overflow */ if (ecb_expect_false (m >= 0x00800000)) { m >>= 1; e += 1; } return s | (e << 10) | (m >> (23 - 10)); } /* handle large numbers and infinity */ if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000)) return s | 0x7c00; /* handle zero, subnormals and small numbers */ if (ecb_expect_true (x < 0x38800000)) { /* zero */ if (ecb_expect_true (!x)) return s; /* handle subnormals */ /* too small, will be zero */ if (e < (14 - 24)) /* might not be sharp, but is good enough */ return s; m |= 0x00800000; /* make implicit bit explicit */ /* very tricky - we need to round to the nearest e (+10) bit value */ { unsigned int bits = 14 - e; unsigned int half = (1 << (bits - 1)) - 1; unsigned int even = (m >> bits) & 1; /* if this overflows, we will end up with a normalised number */ m = (m + half + even) >> bits; } return s | m; } /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */ m >>= 13; return s | 0x7c00 | m | !m; } /*******************************************************************************/ /* floating point stuff, can be disabled by defining ECB_NO_LIBM */ /* basically, everything uses "ieee pure-endian" floating point numbers */ /* the only noteworthy exception is ancient armle, which uses order 43218765 */ #if 0 \ || __i386 || __i386__ \ || ECB_GCC_AMD64 \ || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \ || defined __s390__ || defined __s390x__ \ || defined __mips__ \ || defined __alpha__ \ || defined __hppa__ \ || defined __ia64__ \ || defined __m68k__ \ || defined __m88k__ \ || defined __sh__ \ || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \ || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ || defined __aarch64__ #define ECB_STDFP 1 #else #define ECB_STDFP 0 #endif #ifndef ECB_NO_LIBM #include /* for frexp*, ldexp*, INFINITY, NAN */ /* only the oldest of old doesn't have this one. solaris. */ #ifdef INFINITY #define ECB_INFINITY INFINITY #else #define ECB_INFINITY HUGE_VAL #endif #ifdef NAN #define ECB_NAN NAN #else #define ECB_NAN ECB_INFINITY #endif #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L #define ecb_ldexpf(x,e) ldexpf ((x), (e)) #define ecb_frexpf(x,e) frexpf ((x), (e)) #else #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e)) #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e)) #endif /* convert a float to ieee single/binary32 */ ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x); ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x) { uint32_t r; #if ECB_STDFP memcpy (&r, &x, 4); #else /* slow emulation, works for anything but -0 */ uint32_t m; int e; if (x == 0e0f ) return 0x00000000U; if (x > +3.40282346638528860e+38f) return 0x7f800000U; if (x < -3.40282346638528860e+38f) return 0xff800000U; if (x != x ) return 0x7fbfffffU; m = ecb_frexpf (x, &e) * 0x1000000U; r = m & 0x80000000U; if (r) m = -m; if (e <= -126) { m &= 0xffffffU; m >>= (-125 - e); e = -126; } r |= (e + 126) << 23; r |= m & 0x7fffffU; #endif return r; } /* converts an ieee single/binary32 to a float */ ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x); ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x) { float r; #if ECB_STDFP memcpy (&r, &x, 4); #else /* emulation, only works for normals and subnormals and +0 */ int neg = x >> 31; int e = (x >> 23) & 0xffU; x &= 0x7fffffU; if (e) x |= 0x800000U; else e = 1; /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */ r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126); r = neg ? -r : r; #endif return r; } /* convert a double to ieee double/binary64 */ ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x); ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x) { uint64_t r; #if ECB_STDFP memcpy (&r, &x, 8); #else /* slow emulation, works for anything but -0 */ uint64_t m; int e; if (x == 0e0 ) return 0x0000000000000000U; if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U; if (x < -1.79769313486231470e+308) return 0xfff0000000000000U; if (x != x ) return 0X7ff7ffffffffffffU; m = frexp (x, &e) * 0x20000000000000U; r = m & 0x8000000000000000;; if (r) m = -m; if (e <= -1022) { m &= 0x1fffffffffffffU; m >>= (-1021 - e); e = -1022; } r |= ((uint64_t)(e + 1022)) << 52; r |= m & 0xfffffffffffffU; #endif return r; } /* converts an ieee double/binary64 to a double */ ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x); ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x) { double r; #if ECB_STDFP memcpy (&r, &x, 8); #else /* emulation, only works for normals and subnormals and +0 */ int neg = x >> 63; int e = (x >> 52) & 0x7ffU; x &= 0xfffffffffffffU; if (e) x |= 0x10000000000000U; else e = 1; /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */ r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022); r = neg ? -r : r; #endif return r; } /* convert a float to ieee half/binary16 */ ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x); ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x) { return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x)); } /* convert an ieee half/binary16 to float */ ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x); ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x) { return ecb_binary32_to_float (ecb_binary16_to_binary32 (x)); } #endif #endif /* ECB.H END */ #if ECB_MEMORY_FENCE_NEEDS_PTHREADS /* if your architecture doesn't need memory fences, e.g. because it is * single-cpu/core, or if you use libev in a project that doesn't use libev * from multiple threads, then you can define ECB_NO_THREADS when compiling * libev, in which cases the memory fences become nops. * alternatively, you can remove this #error and link against libpthread, * which will then provide the memory fences. */ # error "memory fences not defined for your architecture, please report" #endif #ifndef ECB_MEMORY_FENCE # define ECB_MEMORY_FENCE do { } while (0) # define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE # define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE #endif #define inline_size ecb_inline #if EV_FEATURE_CODE # define inline_speed ecb_inline #else # define inline_speed ecb_noinline static #endif /*****************************************************************************/ /* raw syscall wrappers */ #if EV_NEED_SYSCALL #include /* * define some syscall wrappers for common architectures * this is mostly for nice looks during debugging, not performance. * our syscalls return < 0, not == -1, on error. which is good * enough for linux aio. * TODO: arm is also common nowadays, maybe even mips and x86 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove... */ #if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE /* the costly errno access probably kills this for size optimisation */ #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \ ({ \ long res; \ register unsigned long r6 __asm__ ("r9" ); \ register unsigned long r5 __asm__ ("r8" ); \ register unsigned long r4 __asm__ ("r10"); \ register unsigned long r3 __asm__ ("rdx"); \ register unsigned long r2 __asm__ ("rsi"); \ register unsigned long r1 __asm__ ("rdi"); \ if (narg >= 6) r6 = (unsigned long)(arg6); \ if (narg >= 5) r5 = (unsigned long)(arg5); \ if (narg >= 4) r4 = (unsigned long)(arg4); \ if (narg >= 3) r3 = (unsigned long)(arg3); \ if (narg >= 2) r2 = (unsigned long)(arg2); \ if (narg >= 1) r1 = (unsigned long)(arg1); \ __asm__ __volatile__ ( \ "syscall\n\t" \ : "=a" (res) \ : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \ : "cc", "r11", "cx", "memory"); \ errno = -res; \ res; \ }) #endif #ifdef ev_syscall #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0) #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0) #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0) #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0) #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0) #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0) #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6) #else #define ev_syscall0(nr) syscall (nr) #define ev_syscall1(nr,arg1) syscall (nr, arg1) #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2) #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3) #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4) #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5) #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6) #endif #endif /*****************************************************************************/ #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) #if EV_MINPRI == EV_MAXPRI # define ABSPRI(w) (((W)w), 0) #else # define ABSPRI(w) (((W)w)->priority - EV_MINPRI) #endif #define EMPTY /* required for microsofts broken pseudo-c compiler */ typedef ev_watcher *W; typedef ev_watcher_list *WL; typedef ev_watcher_time *WT; #define ev_active(w) ((W)(w))->active #define ev_at(w) ((WT)(w))->at #if EV_USE_REALTIME /* sig_atomic_t is used to avoid per-thread variables or locking but still */ /* giving it a reasonably high chance of working on typical architectures */ static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */ #endif #if EV_USE_MONOTONIC static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */ #endif #ifndef EV_FD_TO_WIN32_HANDLE # define EV_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd) #endif #ifndef EV_WIN32_HANDLE_TO_FD # define EV_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0) #endif #ifndef EV_WIN32_CLOSE_FD # define EV_WIN32_CLOSE_FD(fd) close (fd) #endif #ifdef _WIN32 # include "ev_win32.c" #endif /*****************************************************************************/ #if EV_USE_LINUXAIO # include /* probably only needed for aio_context_t */ #endif /* define a suitable floor function (only used by periodics atm) */ #if EV_USE_FLOOR # include # define ev_floor(v) floor (v) #else #include /* a floor() replacement function, should be independent of ev_tstamp type */ ecb_noinline static ev_tstamp ev_floor (ev_tstamp v) { /* the choice of shift factor is not terribly important */ #if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */ const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.; #else const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; #endif /* special treatment for negative arguments */ if (ecb_expect_false (v < 0.)) { ev_tstamp f = -ev_floor (-v); return f - (f == v ? 0 : 1); } /* argument too large for an unsigned long? then reduce it */ if (ecb_expect_false (v >= shift)) { ev_tstamp f; if (v == v - 1.) return v; /* very large numbers are assumed to be integer */ f = shift * ev_floor (v * (1. / shift)); return f + ev_floor (v - f); } /* fits into an unsigned long */ return (unsigned long)v; } #endif /*****************************************************************************/ #ifdef __linux # include #endif ecb_noinline ecb_cold static unsigned int ev_linux_version (void) { #ifdef __linux unsigned int v = 0; struct utsname buf; int i; char *p = buf.release; if (uname (&buf)) return 0; for (i = 3+1; --i; ) { unsigned int c = 0; for (;;) { if (*p >= '0' && *p <= '9') c = c * 10 + *p++ - '0'; else { p += *p == '.'; break; } } v = (v << 8) | c; } return v; #else return 0; #endif } /*****************************************************************************/ #if EV_AVOID_STDIO ecb_noinline ecb_cold static void ev_printerr (const char *msg) { write (STDERR_FILENO, msg, strlen (msg)); } #endif static void (*syserr_cb)(const char *msg) EV_NOEXCEPT; ecb_cold void ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT { syserr_cb = cb; } ecb_noinline ecb_cold static void ev_syserr (const char *msg) { if (!msg) msg = "(libev) system error"; if (syserr_cb) syserr_cb (msg); else { #if EV_AVOID_STDIO ev_printerr (msg); ev_printerr (": "); ev_printerr (strerror (errno)); ev_printerr ("\n"); #else perror (msg); #endif abort (); } } static void * ev_realloc_emul (void *ptr, long size) EV_NOEXCEPT { /* some systems, notably openbsd and darwin, fail to properly * implement realloc (x, 0) (as required by both ansi c-89 and * the single unix specification, so work around them here. * recently, also (at least) fedora and debian started breaking it, * despite documenting it otherwise. */ if (size) return realloc (ptr, size); free (ptr); return 0; } static void *(*alloc)(void *ptr, long size) EV_NOEXCEPT = ev_realloc_emul; ecb_cold void ev_set_allocator (void *(*cb)(void *ptr, long size) EV_NOEXCEPT) EV_NOEXCEPT { alloc = cb; } inline_speed void * ev_realloc (void *ptr, long size) { ptr = alloc (ptr, size); if (!ptr && size) { #if EV_AVOID_STDIO ev_printerr ("(libev) memory allocation failed, aborting.\n"); #else fprintf (stderr, "(libev) cannot allocate %ld bytes, aborting.", size); #endif abort (); } return ptr; } #define ev_malloc(size) ev_realloc (0, (size)) #define ev_free(ptr) ev_realloc ((ptr), 0) /*****************************************************************************/ /* set in reify when reification needed */ #define EV_ANFD_REIFY 1 /* file descriptor info structure */ typedef struct { WL head; unsigned char events; /* the events watched for */ unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ unsigned char emask; /* some backends store the actual kernel mask in here */ unsigned char eflags; /* flags field for use by backends */ #if EV_USE_EPOLL unsigned int egen; /* generation counter to counter epoll bugs */ #endif #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP SOCKET handle; #endif #if EV_USE_IOCP OVERLAPPED or, ow; #endif } ANFD; /* stores the pending event set for a given watcher */ typedef struct { W w; int events; /* the pending event set for the given watcher */ } ANPENDING; #if EV_USE_INOTIFY /* hash table entry per inotify-id */ typedef struct { WL head; } ANFS; #endif /* Heap Entry */ #if EV_HEAP_CACHE_AT /* a heap element */ typedef struct { ev_tstamp at; WT w; } ANHE; #define ANHE_w(he) (he).w /* access watcher, read-write */ #define ANHE_at(he) (he).at /* access cached at, read-only */ #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */ #else /* a heap element */ typedef WT ANHE; #define ANHE_w(he) (he) #define ANHE_at(he) (he)->at #define ANHE_at_cache(he) #endif #if EV_MULTIPLICITY struct ev_loop { ev_tstamp ev_rt_now; #define ev_rt_now ((loop)->ev_rt_now) #define VAR(name,decl) decl; #include "ev_vars.h" #undef VAR }; #include "ev_wrap.h" static struct ev_loop default_loop_struct; EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */ #else EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */ #define VAR(name,decl) static decl; #include "ev_vars.h" #undef VAR static int ev_default_loop_ptr; #endif #if EV_FEATURE_API # define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A) # define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A) # define EV_INVOKE_PENDING invoke_cb (EV_A) #else # define EV_RELEASE_CB (void)0 # define EV_ACQUIRE_CB (void)0 # define EV_INVOKE_PENDING ev_invoke_pending (EV_A) #endif #define EVBREAK_RECURSE 0x80 /*****************************************************************************/ #ifndef EV_HAVE_EV_TIME ev_tstamp ev_time (void) EV_NOEXCEPT { #if EV_USE_REALTIME if (ecb_expect_true (have_realtime)) { struct timespec ts; clock_gettime (CLOCK_REALTIME, &ts); return EV_TS_GET (ts); } #endif { struct timeval tv; gettimeofday (&tv, 0); return EV_TV_GET (tv); } } #endif inline_size ev_tstamp get_clock (void) { #if EV_USE_MONOTONIC if (ecb_expect_true (have_monotonic)) { struct timespec ts; clock_gettime (CLOCK_MONOTONIC, &ts); return EV_TS_GET (ts); } #endif return ev_time (); } #if EV_MULTIPLICITY ev_tstamp ev_now (EV_P) EV_NOEXCEPT { return ev_rt_now; } #endif void ev_sleep (ev_tstamp delay) EV_NOEXCEPT { if (delay > EV_TS_CONST (0.)) { #if EV_USE_NANOSLEEP struct timespec ts; EV_TS_SET (ts, delay); nanosleep (&ts, 0); #elif defined _WIN32 /* maybe this should round up, as ms is very low resolution */ /* compared to select (µs) or nanosleep (ns) */ Sleep ((unsigned long)(EV_TS_TO_MSEC (delay))); #else struct timeval tv; /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ /* something not guaranteed by newer posix versions, but guaranteed */ /* by older ones */ EV_TV_SET (tv, delay); select (0, 0, 0, 0, &tv); #endif } } /*****************************************************************************/ #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */ /* find a suitable new size for the given array, */ /* hopefully by rounding to a nice-to-malloc size */ inline_size int array_nextsize (int elem, int cur, int cnt) { int ncur = cur + 1; do ncur <<= 1; while (cnt > ncur); /* if size is large, round to MALLOC_ROUND - 4 * longs to accommodate malloc overhead */ if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4) { ncur *= elem; ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1); ncur = ncur - sizeof (void *) * 4; ncur /= elem; } return ncur; } ecb_noinline ecb_cold static void * array_realloc (int elem, void *base, int *cur, int cnt) { *cur = array_nextsize (elem, *cur, cnt); return ev_realloc (base, elem * *cur); } #define array_needsize_noinit(base,offset,count) #define array_needsize_zerofill(base,offset,count) \ memset ((void *)(base + offset), 0, sizeof (*(base)) * (count)) #define array_needsize(type,base,cur,cnt,init) \ if (ecb_expect_false ((cnt) > (cur))) \ { \ ecb_unused int ocur_ = (cur); \ (base) = (type *)array_realloc \ (sizeof (type), (base), &(cur), (cnt)); \ init ((base), ocur_, ((cur) - ocur_)); \ } #if 0 #define array_slim(type,stem) \ if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ { \ stem ## max = array_roundsize (stem ## cnt >> 1); \ base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\ fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ } #endif #define array_free(stem, idx) \ ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0 /*****************************************************************************/ /* dummy callback for pending events */ ecb_noinline static void pendingcb (EV_P_ ev_prepare *w, int revents) { } ecb_noinline void ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT { W w_ = (W)w; int pri = ABSPRI (w_); if (ecb_expect_false (w_->pending)) pendings [pri][w_->pending - 1].events |= revents; else { w_->pending = ++pendingcnt [pri]; array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit); pendings [pri][w_->pending - 1].w = w_; pendings [pri][w_->pending - 1].events = revents; } pendingpri = NUMPRI - 1; } inline_speed void feed_reverse (EV_P_ W w) { array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, array_needsize_noinit); rfeeds [rfeedcnt++] = w; } inline_size void feed_reverse_done (EV_P_ int revents) { do ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents); while (rfeedcnt); } inline_speed void queue_events (EV_P_ W *events, int eventcnt, int type) { int i; for (i = 0; i < eventcnt; ++i) ev_feed_event (EV_A_ events [i], type); } /*****************************************************************************/ inline_speed void fd_event_nocheck (EV_P_ int fd, int revents) { ANFD *anfd = anfds + fd; ev_io *w; for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) { int ev = w->events & revents; if (ev) ev_feed_event (EV_A_ (W)w, ev); } } /* do not submit kernel events for fds that have reify set */ /* because that means they changed while we were polling for new events */ inline_speed void fd_event (EV_P_ int fd, int revents) { ANFD *anfd = anfds + fd; if (ecb_expect_true (!anfd->reify)) fd_event_nocheck (EV_A_ fd, revents); } void ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT { if (fd >= 0 && fd < anfdmax) fd_event_nocheck (EV_A_ fd, revents); } /* make sure the external fd watch events are in-sync */ /* with the kernel/libev internal state */ inline_size void fd_reify (EV_P) { int i; /* most backends do not modify the fdchanges list in backend_modfiy. * except io_uring, which has fixed-size buffers which might force us * to handle events in backend_modify, causing fdchanges to be amended, * which could result in an endless loop. * to avoid this, we do not dynamically handle fds that were added * during fd_reify. that means that for those backends, fdchangecnt * might be non-zero during poll, which must cause them to not block. * to not put too much of a burden on other backends, this detail * needs to be handled in the backend. */ int changecnt = fdchangecnt; #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP for (i = 0; i < changecnt; ++i) { int fd = fdchanges [i]; ANFD *anfd = anfds + fd; if (anfd->reify & EV__IOFDSET && anfd->head) { SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd); if (handle != anfd->handle) { unsigned long arg; assert (("libev: only socket fds supported in this configuration", ioctlsocket (handle, FIONREAD, &arg) == 0)); /* handle changed, but fd didn't - we need to do it in two steps */ backend_modify (EV_A_ fd, anfd->events, 0); anfd->events = 0; anfd->handle = handle; } } } #endif for (i = 0; i < changecnt; ++i) { int fd = fdchanges [i]; ANFD *anfd = anfds + fd; ev_io *w; unsigned char o_events = anfd->events; unsigned char o_reify = anfd->reify; anfd->reify = 0; /*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */ { anfd->events = 0; for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) anfd->events |= (unsigned char)w->events; if (o_events != anfd->events) o_reify = EV__IOFDSET; /* actually |= */ } if (o_reify & EV__IOFDSET) backend_modify (EV_A_ fd, o_events, anfd->events); } /* normally, fdchangecnt hasn't changed. if it has, then new fds have been added. * this is a rare case (see beginning comment in this function), so we copy them to the * front and hope the backend handles this case. */ if (ecb_expect_false (fdchangecnt != changecnt)) memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges)); fdchangecnt -= changecnt; } /* something about the given fd changed */ inline_size void fd_change (EV_P_ int fd, int flags) { unsigned char reify = anfds [fd].reify; anfds [fd].reify = reify | flags; if (ecb_expect_true (!reify)) { ++fdchangecnt; array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit); fdchanges [fdchangecnt - 1] = fd; } } /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */ inline_speed ecb_cold void fd_kill (EV_P_ int fd) { ev_io *w; while ((w = (ev_io *)anfds [fd].head)) { ev_io_stop (EV_A_ w); ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); } } /* check whether the given fd is actually valid, for error recovery */ inline_size ecb_cold int fd_valid (int fd) { #ifdef _WIN32 return EV_FD_TO_WIN32_HANDLE (fd) != -1; #else return fcntl (fd, F_GETFD) != -1; #endif } /* called on EBADF to verify fds */ ecb_noinline ecb_cold static void fd_ebadf (EV_P) { int fd; for (fd = 0; fd < anfdmax; ++fd) if (anfds [fd].events) if (!fd_valid (fd) && errno == EBADF) fd_kill (EV_A_ fd); } /* called on ENOMEM in select/poll to kill some fds and retry */ ecb_noinline ecb_cold static void fd_enomem (EV_P) { int fd; for (fd = anfdmax; fd--; ) if (anfds [fd].events) { fd_kill (EV_A_ fd); break; } } /* usually called after fork if backend needs to re-arm all fds from scratch */ ecb_noinline static void fd_rearm_all (EV_P) { int fd; for (fd = 0; fd < anfdmax; ++fd) if (anfds [fd].events) { anfds [fd].events = 0; anfds [fd].emask = 0; fd_change (EV_A_ fd, EV__IOFDSET | EV_ANFD_REIFY); } } /* used to prepare libev internal fd's */ /* this is not fork-safe */ inline_speed void fd_intern (int fd) { #ifdef _WIN32 unsigned long arg = 1; ioctlsocket (EV_FD_TO_WIN32_HANDLE (fd), FIONBIO, &arg); #else fcntl (fd, F_SETFD, FD_CLOEXEC); fcntl (fd, F_SETFL, O_NONBLOCK); #endif } /*****************************************************************************/ /* * the heap functions want a real array index. array index 0 is guaranteed to not * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives * the branching factor of the d-tree. */ /* * at the moment we allow libev the luxury of two heaps, * a small-code-size 2-heap one and a ~1.5kb larger 4-heap * which is more cache-efficient. * the difference is about 5% with 50000+ watchers. */ #if EV_USE_4HEAP #define DHEAP 4 #define HEAP0 (DHEAP - 1) /* index of first element in heap */ #define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0) #define UPHEAP_DONE(p,k) ((p) == (k)) /* away from the root */ inline_speed void downheap (ANHE *heap, int N, int k) { ANHE he = heap [k]; ANHE *E = heap + N + HEAP0; for (;;) { ev_tstamp minat; ANHE *minpos; ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1; /* find minimum child */ if (ecb_expect_true (pos + DHEAP - 1 < E)) { /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos)); if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos)); if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos)); } else if (pos < E) { /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos)); if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos)); if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos)); } else break; if (ANHE_at (he) <= minat) break; heap [k] = *minpos; ev_active (ANHE_w (*minpos)) = k; k = minpos - heap; } heap [k] = he; ev_active (ANHE_w (he)) = k; } #else /* not 4HEAP */ #define HEAP0 1 #define HPARENT(k) ((k) >> 1) #define UPHEAP_DONE(p,k) (!(p)) /* away from the root */ inline_speed void downheap (ANHE *heap, int N, int k) { ANHE he = heap [k]; for (;;) { int c = k << 1; if (c >= N + HEAP0) break; c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1]) ? 1 : 0; if (ANHE_at (he) <= ANHE_at (heap [c])) break; heap [k] = heap [c]; ev_active (ANHE_w (heap [k])) = k; k = c; } heap [k] = he; ev_active (ANHE_w (he)) = k; } #endif /* towards the root */ inline_speed void upheap (ANHE *heap, int k) { ANHE he = heap [k]; for (;;) { int p = HPARENT (k); if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he)) break; heap [k] = heap [p]; ev_active (ANHE_w (heap [k])) = k; k = p; } heap [k] = he; ev_active (ANHE_w (he)) = k; } /* move an element suitably so it is in a correct place */ inline_size void adjustheap (ANHE *heap, int N, int k) { if (k > HEAP0 && ANHE_at (heap [k]) <= ANHE_at (heap [HPARENT (k)])) upheap (heap, k); else downheap (heap, N, k); } /* rebuild the heap: this function is used only once and executed rarely */ inline_size void reheap (ANHE *heap, int N) { int i; /* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */ /* also, this is easy to implement and correct for both 2-heaps and 4-heaps */ for (i = 0; i < N; ++i) upheap (heap, i + HEAP0); } /*****************************************************************************/ /* associate signal watchers to a signal */ typedef struct { EV_ATOMIC_T pending; #if EV_MULTIPLICITY EV_P; #endif WL head; } ANSIG; static ANSIG signals [EV_NSIG - 1]; /*****************************************************************************/ #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE ecb_noinline ecb_cold static void evpipe_init (EV_P) { if (!ev_is_active (&pipe_w)) { int fds [2]; # if EV_USE_EVENTFD fds [0] = -1; fds [1] = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC); if (fds [1] < 0 && errno == EINVAL) fds [1] = eventfd (0, 0); if (fds [1] < 0) # endif { while (pipe (fds)) ev_syserr ("(libev) error creating signal/async pipe"); fd_intern (fds [0]); } evpipe [0] = fds [0]; if (evpipe [1] < 0) evpipe [1] = fds [1]; /* first call, set write fd */ else { /* on subsequent calls, do not change evpipe [1] */ /* so that evpipe_write can always rely on its value. */ /* this branch does not do anything sensible on windows, */ /* so must not be executed on windows */ dup2 (fds [1], evpipe [1]); close (fds [1]); } fd_intern (evpipe [1]); ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ); ev_io_start (EV_A_ &pipe_w); ev_unref (EV_A); /* watcher should not keep loop alive */ } } inline_speed void evpipe_write (EV_P_ EV_ATOMIC_T *flag) { ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */ if (ecb_expect_true (*flag)) return; *flag = 1; ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ pipe_write_skipped = 1; ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */ if (pipe_write_wanted) { int old_errno; pipe_write_skipped = 0; ECB_MEMORY_FENCE_RELEASE; old_errno = errno; /* save errno because write will clobber it */ #if EV_USE_EVENTFD if (evpipe [0] < 0) { uint64_t counter = 1; write (evpipe [1], &counter, sizeof (uint64_t)); } else #endif { #ifdef _WIN32 WSABUF buf; DWORD sent; buf.buf = (char *)&buf; buf.len = 1; WSASend (EV_FD_TO_WIN32_HANDLE (evpipe [1]), &buf, 1, &sent, 0, 0, 0); #else write (evpipe [1], &(evpipe [1]), 1); #endif } errno = old_errno; } } /* called whenever the libev signal pipe */ /* got some events (signal, async) */ static void pipecb (EV_P_ ev_io *iow, int revents) { int i; if (revents & EV_READ) { #if EV_USE_EVENTFD if (evpipe [0] < 0) { uint64_t counter; read (evpipe [1], &counter, sizeof (uint64_t)); } else #endif { char dummy[4]; #ifdef _WIN32 WSABUF buf; DWORD recvd; DWORD flags = 0; buf.buf = dummy; buf.len = sizeof (dummy); WSARecv (EV_FD_TO_WIN32_HANDLE (evpipe [0]), &buf, 1, &recvd, &flags, 0, 0); #else read (evpipe [0], &dummy, sizeof (dummy)); #endif } } pipe_write_skipped = 0; ECB_MEMORY_FENCE; /* push out skipped, acquire flags */ #if EV_SIGNAL_ENABLE if (sig_pending) { sig_pending = 0; ECB_MEMORY_FENCE; for (i = EV_NSIG - 1; i--; ) if (ecb_expect_false (signals [i].pending)) ev_feed_signal_event (EV_A_ i + 1); } #endif #if EV_ASYNC_ENABLE if (async_pending) { async_pending = 0; ECB_MEMORY_FENCE; for (i = asynccnt; i--; ) if (asyncs [i]->sent) { asyncs [i]->sent = 0; ECB_MEMORY_FENCE_RELEASE; ev_feed_event (EV_A_ asyncs [i], EV_ASYNC); } } #endif } /*****************************************************************************/ void ev_feed_signal (int signum) EV_NOEXCEPT { #if EV_MULTIPLICITY EV_P; ECB_MEMORY_FENCE_ACQUIRE; EV_A = signals [signum - 1].loop; if (!EV_A) return; #endif signals [signum - 1].pending = 1; evpipe_write (EV_A_ &sig_pending); } static void ev_sighandler (int signum) { #ifdef _WIN32 signal (signum, ev_sighandler); #endif ev_feed_signal (signum); } ecb_noinline void ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT { WL w; if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG)) return; --signum; #if EV_MULTIPLICITY /* it is permissible to try to feed a signal to the wrong loop */ /* or, likely more useful, feeding a signal nobody is waiting for */ if (ecb_expect_false (signals [signum].loop != EV_A)) return; #endif signals [signum].pending = 0; ECB_MEMORY_FENCE_RELEASE; for (w = signals [signum].head; w; w = w->next) ev_feed_event (EV_A_ (W)w, EV_SIGNAL); } #if EV_USE_SIGNALFD static void sigfdcb (EV_P_ ev_io *iow, int revents) { struct signalfd_siginfo si[2], *sip; /* these structs are big */ for (;;) { ssize_t res = read (sigfd, si, sizeof (si)); /* not ISO-C, as res might be -1, but works with SuS */ for (sip = si; (char *)sip < (char *)si + res; ++sip) ev_feed_signal_event (EV_A_ sip->ssi_signo); if (res < (ssize_t)sizeof (si)) break; } } #endif #endif /*****************************************************************************/ #if EV_CHILD_ENABLE static WL childs [EV_PID_HASHSIZE]; static ev_signal childev; #ifndef WIFCONTINUED # define WIFCONTINUED(status) 0 #endif /* handle a single child status event */ inline_speed void child_reap (EV_P_ int chain, int pid, int status) { ev_child *w; int traced = WIFSTOPPED (status) || WIFCONTINUED (status); for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next) { if ((w->pid == pid || !w->pid) && (!traced || (w->flags & 1))) { ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */ w->rpid = pid; w->rstatus = status; ev_feed_event (EV_A_ (W)w, EV_CHILD); } } } #ifndef WCONTINUED # define WCONTINUED 0 #endif /* called on sigchld etc., calls waitpid */ static void childcb (EV_P_ ev_signal *sw, int revents) { int pid, status; /* some systems define WCONTINUED but then fail to support it (linux 2.4) */ if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) if (!WCONTINUED || errno != EINVAL || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED))) return; /* make sure we are called again until all children have been reaped */ /* we need to do it this way so that the callback gets called before we continue */ ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); child_reap (EV_A_ pid, pid, status); if ((EV_PID_HASHSIZE) > 1) child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */ } #endif /*****************************************************************************/ #if EV_USE_TIMERFD static void periodics_reschedule (EV_P); static void timerfdcb (EV_P_ ev_io *iow, int revents) { struct itimerspec its = { 0 }; its.it_value.tv_sec = ev_rt_now + (int)MAX_BLOCKTIME2; timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0); ev_rt_now = ev_time (); /* periodics_reschedule only needs ev_rt_now */ /* but maybe in the future we want the full treatment. */ /* now_floor = EV_TS_CONST (0.); time_update (EV_A_ EV_TSTAMP_HUGE); */ #if EV_PERIODIC_ENABLE periodics_reschedule (EV_A); #endif } ecb_noinline ecb_cold static void evtimerfd_init (EV_P) { if (!ev_is_active (&timerfd_w)) { timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC); if (timerfd >= 0) { fd_intern (timerfd); /* just to be sure */ ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ); ev_set_priority (&timerfd_w, EV_MINPRI); ev_io_start (EV_A_ &timerfd_w); ev_unref (EV_A); /* watcher should not keep loop alive */ /* (re-) arm timer */ timerfdcb (EV_A_ 0, 0); } } } #endif /*****************************************************************************/ #if EV_USE_IOCP # include "ev_iocp.c" #endif #if EV_USE_PORT # include "ev_port.c" #endif #if EV_USE_KQUEUE # include "ev_kqueue.c" #endif #if EV_USE_EPOLL # include "ev_epoll.c" #endif #if EV_USE_LINUXAIO # include "ev_linuxaio.c" #endif #if EV_USE_IOURING # include "ev_iouring.c" #endif #if EV_USE_POLL # include "ev_poll.c" #endif #if EV_USE_SELECT # include "ev_select.c" #endif ecb_cold int ev_version_major (void) EV_NOEXCEPT { return EV_VERSION_MAJOR; } ecb_cold int ev_version_minor (void) EV_NOEXCEPT { return EV_VERSION_MINOR; } /* return true if we are running with elevated privileges and should ignore env variables */ inline_size ecb_cold int enable_secure (void) { #ifdef _WIN32 return 0; #else return getuid () != geteuid () || getgid () != getegid (); #endif } ecb_cold unsigned int ev_supported_backends (void) EV_NOEXCEPT { unsigned int flags = 0; if (EV_USE_PORT ) flags |= EVBACKEND_PORT; if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; if (EV_USE_LINUXAIO ) flags |= EVBACKEND_LINUXAIO; if (EV_USE_IOURING && ev_linux_version () >= 0x050601) flags |= EVBACKEND_IOURING; /* 5.6.1+ */ if (EV_USE_POLL ) flags |= EVBACKEND_POLL; if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; return flags; } ecb_cold unsigned int ev_recommended_backends (void) EV_NOEXCEPT { unsigned int flags = ev_supported_backends (); #ifndef __NetBSD__ /* kqueue is borked on everything but netbsd apparently */ /* it usually doesn't work correctly on anything but sockets and pipes */ flags &= ~EVBACKEND_KQUEUE; #endif #ifdef __APPLE__ /* only select works correctly on that "unix-certified" platform */ flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */ flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */ #endif #ifdef __FreeBSD__ flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */ #endif /* TODO: linuxaio is very experimental */ #if !EV_RECOMMEND_LINUXAIO flags &= ~EVBACKEND_LINUXAIO; #endif /* TODO: linuxaio is super experimental */ #if !EV_RECOMMEND_IOURING flags &= ~EVBACKEND_IOURING; #endif return flags; } ecb_cold unsigned int ev_embeddable_backends (void) EV_NOEXCEPT { int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING; /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ flags &= ~EVBACKEND_EPOLL; /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */ return flags; } unsigned int ev_backend (EV_P) EV_NOEXCEPT { return backend; } #if EV_FEATURE_API unsigned int ev_iteration (EV_P) EV_NOEXCEPT { return loop_count; } unsigned int ev_depth (EV_P) EV_NOEXCEPT { return loop_depth; } void ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT { io_blocktime = interval; } void ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT { timeout_blocktime = interval; } void ev_set_userdata (EV_P_ void *data) EV_NOEXCEPT { userdata = data; } void * ev_userdata (EV_P) EV_NOEXCEPT { return userdata; } void ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_NOEXCEPT { invoke_cb = invoke_pending_cb; } void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)(EV_P) EV_NOEXCEPT) EV_NOEXCEPT { release_cb = release; acquire_cb = acquire; } #endif /* initialise a loop structure, must be zero-initialised */ ecb_noinline ecb_cold static void loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT { if (!backend) { origflags = flags; #if EV_USE_REALTIME if (!have_realtime) { struct timespec ts; if (!clock_gettime (CLOCK_REALTIME, &ts)) have_realtime = 1; } #endif #if EV_USE_MONOTONIC if (!have_monotonic) { struct timespec ts; if (!clock_gettime (CLOCK_MONOTONIC, &ts)) have_monotonic = 1; } #endif /* pid check not overridable via env */ #ifndef _WIN32 if (flags & EVFLAG_FORKCHECK) curpid = getpid (); #endif if (!(flags & EVFLAG_NOENV) && !enable_secure () && getenv ("LIBEV_FLAGS")) flags = atoi (getenv ("LIBEV_FLAGS")); ev_rt_now = ev_time (); mn_now = get_clock (); now_floor = mn_now; rtmn_diff = ev_rt_now - mn_now; #if EV_FEATURE_API invoke_cb = ev_invoke_pending; #endif io_blocktime = 0.; timeout_blocktime = 0.; backend = 0; backend_fd = -1; sig_pending = 0; #if EV_ASYNC_ENABLE async_pending = 0; #endif pipe_write_skipped = 0; pipe_write_wanted = 0; evpipe [0] = -1; evpipe [1] = -1; #if EV_USE_INOTIFY fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; #endif #if EV_USE_SIGNALFD sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; #endif #if EV_USE_TIMERFD timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2; #endif if (!(flags & EVBACKEND_MASK)) flags |= ev_recommended_backends (); #if EV_USE_IOCP if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags); #endif #if EV_USE_PORT if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); #endif #if EV_USE_KQUEUE if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags); #endif #if EV_USE_IOURING if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags); #endif #if EV_USE_LINUXAIO if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags); #endif #if EV_USE_EPOLL if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags); #endif #if EV_USE_POLL if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags); #endif #if EV_USE_SELECT if (!backend && (flags & EVBACKEND_SELECT )) backend = select_init (EV_A_ flags); #endif ev_prepare_init (&pending_w, pendingcb); #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE ev_init (&pipe_w, pipecb); ev_set_priority (&pipe_w, EV_MAXPRI); #endif } } /* free up a loop structure */ ecb_cold void ev_loop_destroy (EV_P) { int i; #if EV_MULTIPLICITY /* mimic free (0) */ if (!EV_A) return; #endif #if EV_CLEANUP_ENABLE /* queue cleanup watchers (and execute them) */ if (ecb_expect_false (cleanupcnt)) { queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP); EV_INVOKE_PENDING; } #endif #if EV_CHILD_ENABLE if (ev_is_default_loop (EV_A) && ev_is_active (&childev)) { ev_ref (EV_A); /* child watcher */ ev_signal_stop (EV_A_ &childev); } #endif if (ev_is_active (&pipe_w)) { /*ev_ref (EV_A);*/ /*ev_io_stop (EV_A_ &pipe_w);*/ if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]); if (evpipe [1] >= 0) EV_WIN32_CLOSE_FD (evpipe [1]); } #if EV_USE_SIGNALFD if (ev_is_active (&sigfd_w)) close (sigfd); #endif #if EV_USE_TIMERFD if (ev_is_active (&timerfd_w)) close (timerfd); #endif #if EV_USE_INOTIFY if (fs_fd >= 0) close (fs_fd); #endif if (backend_fd >= 0) close (backend_fd); #if EV_USE_IOCP if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A); #endif #if EV_USE_PORT if (backend == EVBACKEND_PORT ) port_destroy (EV_A); #endif #if EV_USE_KQUEUE if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A); #endif #if EV_USE_IOURING if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A); #endif #if EV_USE_LINUXAIO if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A); #endif #if EV_USE_EPOLL if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A); #endif #if EV_USE_POLL if (backend == EVBACKEND_POLL ) poll_destroy (EV_A); #endif #if EV_USE_SELECT if (backend == EVBACKEND_SELECT ) select_destroy (EV_A); #endif for (i = NUMPRI; i--; ) { array_free (pending, [i]); #if EV_IDLE_ENABLE array_free (idle, [i]); #endif } ev_free (anfds); anfds = 0; anfdmax = 0; /* have to use the microsoft-never-gets-it-right macro */ array_free (rfeed, EMPTY); array_free (fdchange, EMPTY); array_free (timer, EMPTY); #if EV_PERIODIC_ENABLE array_free (periodic, EMPTY); #endif #if EV_FORK_ENABLE array_free (fork, EMPTY); #endif #if EV_CLEANUP_ENABLE array_free (cleanup, EMPTY); #endif array_free (prepare, EMPTY); array_free (check, EMPTY); #if EV_ASYNC_ENABLE array_free (async, EMPTY); #endif backend = 0; #if EV_MULTIPLICITY if (ev_is_default_loop (EV_A)) #endif ev_default_loop_ptr = 0; #if EV_MULTIPLICITY else ev_free (EV_A); #endif } #if EV_USE_INOTIFY inline_size void infy_fork (EV_P); #endif inline_size void loop_fork (EV_P) { #if EV_USE_PORT if (backend == EVBACKEND_PORT ) port_fork (EV_A); #endif #if EV_USE_KQUEUE if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A); #endif #if EV_USE_IOURING if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A); #endif #if EV_USE_LINUXAIO if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A); #endif #if EV_USE_EPOLL if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); #endif #if EV_USE_INOTIFY infy_fork (EV_A); #endif if (postfork != 2) { #if EV_USE_SIGNALFD /* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */ #endif #if EV_USE_TIMERFD if (ev_is_active (&timerfd_w)) { ev_ref (EV_A); ev_io_stop (EV_A_ &timerfd_w); close (timerfd); timerfd = -2; evtimerfd_init (EV_A); /* reschedule periodics, in case we missed something */ ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM); } #endif #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE if (ev_is_active (&pipe_w)) { /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ ev_ref (EV_A); ev_io_stop (EV_A_ &pipe_w); if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]); evpipe_init (EV_A); /* iterate over everything, in case we missed something before */ ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); } #endif } postfork = 0; } #if EV_MULTIPLICITY ecb_cold struct ev_loop * ev_loop_new (unsigned int flags) EV_NOEXCEPT { EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop)); memset (EV_A, 0, sizeof (struct ev_loop)); loop_init (EV_A_ flags); if (ev_backend (EV_A)) return EV_A; ev_free (EV_A); return 0; } #endif /* multiplicity */ #if EV_VERIFY ecb_noinline ecb_cold static void verify_watcher (EV_P_ W w) { assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI)); if (w->pending) assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w)); } ecb_noinline ecb_cold static void verify_heap (EV_P_ ANHE *heap, int N) { int i; for (i = HEAP0; i < N + HEAP0; ++i) { assert (("libev: active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i)); assert (("libev: heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i]))); assert (("libev: heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i])))); verify_watcher (EV_A_ (W)ANHE_w (heap [i])); } } ecb_noinline ecb_cold static void array_verify (EV_P_ W *ws, int cnt) { while (cnt--) { assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1)); verify_watcher (EV_A_ ws [cnt]); } } #endif #if EV_FEATURE_API void ecb_cold ev_verify (EV_P) EV_NOEXCEPT { #if EV_VERIFY int i; WL w, w2; assert (activecnt >= -1); assert (fdchangemax >= fdchangecnt); for (i = 0; i < fdchangecnt; ++i) assert (("libev: negative fd in fdchanges", fdchanges [i] >= 0)); assert (anfdmax >= 0); for (i = 0; i < anfdmax; ++i) { int j = 0; for (w = w2 = anfds [i].head; w; w = w->next) { verify_watcher (EV_A_ (W)w); if (j++ & 1) { assert (("libev: io watcher list contains a loop", w != w2)); w2 = w2->next; } assert (("libev: inactive fd watcher on anfd list", ev_active (w) == 1)); assert (("libev: fd mismatch between watcher and anfd", ((ev_io *)w)->fd == i)); } } assert (timermax >= timercnt); verify_heap (EV_A_ timers, timercnt); #if EV_PERIODIC_ENABLE assert (periodicmax >= periodiccnt); verify_heap (EV_A_ periodics, periodiccnt); #endif for (i = NUMPRI; i--; ) { assert (pendingmax [i] >= pendingcnt [i]); #if EV_IDLE_ENABLE assert (idleall >= 0); assert (idlemax [i] >= idlecnt [i]); array_verify (EV_A_ (W *)idles [i], idlecnt [i]); #endif } #if EV_FORK_ENABLE assert (forkmax >= forkcnt); array_verify (EV_A_ (W *)forks, forkcnt); #endif #if EV_CLEANUP_ENABLE assert (cleanupmax >= cleanupcnt); array_verify (EV_A_ (W *)cleanups, cleanupcnt); #endif #if EV_ASYNC_ENABLE assert (asyncmax >= asynccnt); array_verify (EV_A_ (W *)asyncs, asynccnt); #endif #if EV_PREPARE_ENABLE assert (preparemax >= preparecnt); array_verify (EV_A_ (W *)prepares, preparecnt); #endif #if EV_CHECK_ENABLE assert (checkmax >= checkcnt); array_verify (EV_A_ (W *)checks, checkcnt); #endif # if 0 #if EV_CHILD_ENABLE for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next) for (signum = EV_NSIG; signum--; ) if (signals [signum].pending) #endif # endif #endif } #endif #if EV_MULTIPLICITY ecb_cold struct ev_loop * #else int #endif ev_default_loop (unsigned int flags) EV_NOEXCEPT { if (!ev_default_loop_ptr) { #if EV_MULTIPLICITY EV_P = ev_default_loop_ptr = &default_loop_struct; #else ev_default_loop_ptr = 1; #endif loop_init (EV_A_ flags); if (ev_backend (EV_A)) { #if EV_CHILD_ENABLE ev_signal_init (&childev, childcb, SIGCHLD); ev_set_priority (&childev, EV_MAXPRI); ev_signal_start (EV_A_ &childev); ev_unref (EV_A); /* child watcher should not keep loop alive */ #endif } else ev_default_loop_ptr = 0; } return ev_default_loop_ptr; } void ev_loop_fork (EV_P) EV_NOEXCEPT { postfork = 1; } /*****************************************************************************/ void ev_invoke (EV_P_ void *w, int revents) { EV_CB_INVOKE ((W)w, revents); } unsigned int ev_pending_count (EV_P) EV_NOEXCEPT { int pri; unsigned int count = 0; for (pri = NUMPRI; pri--; ) count += pendingcnt [pri]; return count; } ecb_noinline void ev_invoke_pending (EV_P) { pendingpri = NUMPRI; do { --pendingpri; /* pendingpri possibly gets modified in the inner loop */ while (pendingcnt [pendingpri]) { ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri]; p->w->pending = 0; EV_CB_INVOKE (p->w, p->events); EV_FREQUENT_CHECK; } } while (pendingpri); } #if EV_IDLE_ENABLE /* make idle watchers pending. this handles the "call-idle */ /* only when higher priorities are idle" logic */ inline_size void idle_reify (EV_P) { if (ecb_expect_false (idleall)) { int pri; for (pri = NUMPRI; pri--; ) { if (pendingcnt [pri]) break; if (idlecnt [pri]) { queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE); break; } } } } #endif /* make timers pending */ inline_size void timers_reify (EV_P) { EV_FREQUENT_CHECK; if (timercnt && ANHE_at (timers [HEAP0]) < mn_now) { do { ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]); /*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/ /* first reschedule or stop timer */ if (w->repeat) { ev_at (w) += w->repeat; if (ev_at (w) < mn_now) ev_at (w) = mn_now; assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.))); ANHE_at_cache (timers [HEAP0]); downheap (timers, timercnt, HEAP0); } else ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */ EV_FREQUENT_CHECK; feed_reverse (EV_A_ (W)w); } while (timercnt && ANHE_at (timers [HEAP0]) < mn_now); feed_reverse_done (EV_A_ EV_TIMER); } } #if EV_PERIODIC_ENABLE ecb_noinline static void periodic_recalc (EV_P_ ev_periodic *w) { ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL; ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval); /* the above almost always errs on the low side */ while (at <= ev_rt_now) { ev_tstamp nat = at + w->interval; /* when resolution fails us, we use ev_rt_now */ if (ecb_expect_false (nat == at)) { at = ev_rt_now; break; } at = nat; } ev_at (w) = at; } /* make periodics pending */ inline_size void periodics_reify (EV_P) { EV_FREQUENT_CHECK; while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now) { do { ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]); /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/ /* first reschedule or stop timer */ if (w->reschedule_cb) { ev_at (w) = w->reschedule_cb (w, ev_rt_now); assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now)); ANHE_at_cache (periodics [HEAP0]); downheap (periodics, periodiccnt, HEAP0); } else if (w->interval) { periodic_recalc (EV_A_ w); ANHE_at_cache (periodics [HEAP0]); downheap (periodics, periodiccnt, HEAP0); } else ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */ EV_FREQUENT_CHECK; feed_reverse (EV_A_ (W)w); } while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now); feed_reverse_done (EV_A_ EV_PERIODIC); } } /* simply recalculate all periodics */ /* TODO: maybe ensure that at least one event happens when jumping forward? */ ecb_noinline ecb_cold static void periodics_reschedule (EV_P) { int i; /* adjust periodics after time jump */ for (i = HEAP0; i < periodiccnt + HEAP0; ++i) { ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]); if (w->reschedule_cb) ev_at (w) = w->reschedule_cb (w, ev_rt_now); else if (w->interval) periodic_recalc (EV_A_ w); ANHE_at_cache (periodics [i]); } reheap (periodics, periodiccnt); } #endif /* adjust all timers by a given offset */ ecb_noinline ecb_cold static void timers_reschedule (EV_P_ ev_tstamp adjust) { int i; for (i = 0; i < timercnt; ++i) { ANHE *he = timers + i + HEAP0; ANHE_w (*he)->at += adjust; ANHE_at_cache (*he); } } /* fetch new monotonic and realtime times from the kernel */ /* also detect if there was a timejump, and act accordingly */ inline_speed void time_update (EV_P_ ev_tstamp max_block) { #if EV_USE_MONOTONIC if (ecb_expect_true (have_monotonic)) { int i; ev_tstamp odiff = rtmn_diff; mn_now = get_clock (); /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ /* interpolate in the meantime */ if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5))) { ev_rt_now = rtmn_diff + mn_now; return; } now_floor = mn_now; ev_rt_now = ev_time (); /* loop a few times, before making important decisions. * on the choice of "4": one iteration isn't enough, * in case we get preempted during the calls to * ev_time and get_clock. a second call is almost guaranteed * to succeed in that case, though. and looping a few more times * doesn't hurt either as we only do this on time-jumps or * in the unlikely event of having been preempted here. */ for (i = 4; --i; ) { ev_tstamp diff; rtmn_diff = ev_rt_now - mn_now; diff = odiff - rtmn_diff; if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP))) return; /* all is well */ ev_rt_now = ev_time (); mn_now = get_clock (); now_floor = mn_now; } /* no timer adjustment, as the monotonic clock doesn't jump */ /* timers_reschedule (EV_A_ rtmn_diff - odiff) */ # if EV_PERIODIC_ENABLE periodics_reschedule (EV_A); # endif } else #endif { ev_rt_now = ev_time (); if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP))) { /* adjust timers. this is easy, as the offset is the same for all of them */ timers_reschedule (EV_A_ ev_rt_now - mn_now); #if EV_PERIODIC_ENABLE periodics_reschedule (EV_A); #endif } mn_now = ev_rt_now; } } int ev_run (EV_P_ int flags) { #if EV_FEATURE_API ++loop_depth; #endif assert (("libev: ev_loop recursion during release detected", loop_done != EVBREAK_RECURSE)); loop_done = EVBREAK_CANCEL; EV_INVOKE_PENDING; /* in case we recurse, ensure ordering stays nice and clean */ do { #if EV_VERIFY >= 2 ev_verify (EV_A); #endif #ifndef _WIN32 if (ecb_expect_false (curpid)) /* penalise the forking check even more */ if (ecb_expect_false (getpid () != curpid)) { curpid = getpid (); postfork = 1; } #endif #if EV_FORK_ENABLE /* we might have forked, so queue fork handlers */ if (ecb_expect_false (postfork)) if (forkcnt) { queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); EV_INVOKE_PENDING; } #endif #if EV_PREPARE_ENABLE /* queue prepare watchers (and execute them) */ if (ecb_expect_false (preparecnt)) { queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); EV_INVOKE_PENDING; } #endif if (ecb_expect_false (loop_done)) break; /* we might have forked, so reify kernel state if necessary */ if (ecb_expect_false (postfork)) loop_fork (EV_A); /* update fd-related kernel structures */ fd_reify (EV_A); /* calculate blocking time */ { ev_tstamp waittime = 0.; ev_tstamp sleeptime = 0.; /* remember old timestamp for io_blocktime calculation */ ev_tstamp prev_mn_now = mn_now; /* update time to cancel out callback processing overhead */ time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE)); /* from now on, we want a pipe-wake-up */ pipe_write_wanted = 1; ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */ if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) { waittime = EV_TS_CONST (MAX_BLOCKTIME); #if EV_USE_TIMERFD /* sleep a lot longer when we can reliably detect timejumps */ if (ecb_expect_true (timerfd >= 0)) waittime = EV_TS_CONST (MAX_BLOCKTIME2); #endif #if !EV_PERIODIC_ENABLE /* without periodics but with monotonic clock there is no need */ /* for any time jump detection, so sleep longer */ if (ecb_expect_true (have_monotonic)) waittime = EV_TS_CONST (MAX_BLOCKTIME2); #endif if (timercnt) { ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; if (waittime > to) waittime = to; } #if EV_PERIODIC_ENABLE if (periodiccnt) { ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now; if (waittime > to) waittime = to; } #endif /* don't let timeouts decrease the waittime below timeout_blocktime */ if (ecb_expect_false (waittime < timeout_blocktime)) waittime = timeout_blocktime; /* now there are two more special cases left, either we have * already-expired timers, so we should not sleep, or we have timers * that expire very soon, in which case we need to wait for a minimum * amount of time for some event loop backends. */ if (ecb_expect_false (waittime < backend_mintime)) waittime = waittime <= EV_TS_CONST (0.) ? EV_TS_CONST (0.) : backend_mintime; /* extra check because io_blocktime is commonly 0 */ if (ecb_expect_false (io_blocktime)) { sleeptime = io_blocktime - (mn_now - prev_mn_now); if (sleeptime > waittime - backend_mintime) sleeptime = waittime - backend_mintime; if (ecb_expect_true (sleeptime > EV_TS_CONST (0.))) { ev_sleep (sleeptime); waittime -= sleeptime; } } } #if EV_FEATURE_API ++loop_count; #endif assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ backend_poll (EV_A_ waittime); assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ pipe_write_wanted = 0; /* just an optimisation, no fence needed */ ECB_MEMORY_FENCE_ACQUIRE; if (pipe_write_skipped) { assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); } /* update ev_rt_now, do magic */ time_update (EV_A_ waittime + sleeptime); } /* queue pending timers and reschedule them */ timers_reify (EV_A); /* relative timers called last */ #if EV_PERIODIC_ENABLE periodics_reify (EV_A); /* absolute timers called first */ #endif #if EV_IDLE_ENABLE /* queue idle watchers unless other events are pending */ idle_reify (EV_A); #endif #if EV_CHECK_ENABLE /* queue check watchers, to be executed first */ if (ecb_expect_false (checkcnt)) queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); #endif EV_INVOKE_PENDING; } while (ecb_expect_true ( activecnt && !loop_done && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT)) )); if (loop_done == EVBREAK_ONE) loop_done = EVBREAK_CANCEL; #if EV_FEATURE_API --loop_depth; #endif return activecnt; } void ev_break (EV_P_ int how) EV_NOEXCEPT { loop_done = how; } void ev_ref (EV_P) EV_NOEXCEPT { ++activecnt; } void ev_unref (EV_P) EV_NOEXCEPT { --activecnt; } void ev_now_update (EV_P) EV_NOEXCEPT { time_update (EV_A_ EV_TSTAMP_HUGE); } void ev_suspend (EV_P) EV_NOEXCEPT { ev_now_update (EV_A); } void ev_resume (EV_P) EV_NOEXCEPT { ev_tstamp mn_prev = mn_now; ev_now_update (EV_A); timers_reschedule (EV_A_ mn_now - mn_prev); #if EV_PERIODIC_ENABLE /* TODO: really do this? */ periodics_reschedule (EV_A); #endif } /*****************************************************************************/ /* singly-linked list management, used when the expected list length is short */ inline_size void wlist_add (WL *head, WL elem) { elem->next = *head; *head = elem; } inline_size void wlist_del (WL *head, WL elem) { while (*head) { if (ecb_expect_true (*head == elem)) { *head = elem->next; break; } head = &(*head)->next; } } /* internal, faster, version of ev_clear_pending */ inline_speed void clear_pending (EV_P_ W w) { if (w->pending) { pendings [ABSPRI (w)][w->pending - 1].w = (W)&pending_w; w->pending = 0; } } int ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT { W w_ = (W)w; int pending = w_->pending; if (ecb_expect_true (pending)) { ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1; p->w = (W)&pending_w; w_->pending = 0; return p->events; } else return 0; } inline_size void pri_adjust (EV_P_ W w) { int pri = ev_priority (w); pri = pri < EV_MINPRI ? EV_MINPRI : pri; pri = pri > EV_MAXPRI ? EV_MAXPRI : pri; ev_set_priority (w, pri); } inline_speed void ev_start (EV_P_ W w, int active) { pri_adjust (EV_A_ w); w->active = active; ev_ref (EV_A); } inline_size void ev_stop (EV_P_ W w) { ev_unref (EV_A); w->active = 0; } /*****************************************************************************/ ecb_noinline void ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT { int fd = w->fd; if (ecb_expect_false (ev_is_active (w))) return; assert (("libev: ev_io_start called with negative fd", fd >= 0)); assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE)))); #if EV_VERIFY >= 2 assert (("libev: ev_io_start called on watcher with invalid fd", fd_valid (fd))); #endif EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, 1); array_needsize (ANFD, anfds, anfdmax, fd + 1, array_needsize_zerofill); wlist_add (&anfds[fd].head, (WL)w); /* common bug, apparently */ assert (("libev: ev_io_start called with corrupted watcher", ((WL)w)->next != (WL)w)); fd_change (EV_A_ fd, w->events & EV__IOFDSET | EV_ANFD_REIFY); w->events &= ~EV__IOFDSET; EV_FREQUENT_CHECK; } ecb_noinline void ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); #if EV_VERIFY >= 2 assert (("libev: ev_io_stop called on watcher with invalid fd", fd_valid (w->fd))); #endif EV_FREQUENT_CHECK; wlist_del (&anfds[w->fd].head, (WL)w); ev_stop (EV_A_ (W)w); fd_change (EV_A_ w->fd, EV_ANFD_REIFY); EV_FREQUENT_CHECK; } ecb_noinline void ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; ev_at (w) += mn_now; assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); EV_FREQUENT_CHECK; ++timercnt; ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1); array_needsize (ANHE, timers, timermax, ev_active (w) + 1, array_needsize_noinit); ANHE_w (timers [ev_active (w)]) = (WT)w; ANHE_at_cache (timers [ev_active (w)]); upheap (timers, ev_active (w)); EV_FREQUENT_CHECK; /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/ } ecb_noinline void ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w)); --timercnt; if (ecb_expect_true (active < timercnt + HEAP0)) { timers [active] = timers [timercnt + HEAP0]; adjustheap (timers, timercnt, active); } } ev_at (w) -= mn_now; ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } ecb_noinline void ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT { EV_FREQUENT_CHECK; clear_pending (EV_A_ (W)w); if (ev_is_active (w)) { if (w->repeat) { ev_at (w) = mn_now + w->repeat; ANHE_at_cache (timers [ev_active (w)]); adjustheap (timers, timercnt, ev_active (w)); } else ev_timer_stop (EV_A_ w); } else if (w->repeat) { ev_at (w) = w->repeat; ev_timer_start (EV_A_ w); } EV_FREQUENT_CHECK; } ev_tstamp ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT { return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.)); } #if EV_PERIODIC_ENABLE ecb_noinline void ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; #if EV_USE_TIMERFD if (timerfd == -2) evtimerfd_init (EV_A); #endif if (w->reschedule_cb) ev_at (w) = w->reschedule_cb (w, ev_rt_now); else if (w->interval) { assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.)); periodic_recalc (EV_A_ w); } else ev_at (w) = w->offset; EV_FREQUENT_CHECK; ++periodiccnt; ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1); array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, array_needsize_noinit); ANHE_w (periodics [ev_active (w)]) = (WT)w; ANHE_at_cache (periodics [ev_active (w)]); upheap (periodics, ev_active (w)); EV_FREQUENT_CHECK; /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/ } ecb_noinline void ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w)); --periodiccnt; if (ecb_expect_true (active < periodiccnt + HEAP0)) { periodics [active] = periodics [periodiccnt + HEAP0]; adjustheap (periodics, periodiccnt, active); } } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } ecb_noinline void ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT { /* TODO: use adjustheap and recalculation */ ev_periodic_stop (EV_A_ w); ev_periodic_start (EV_A_ w); } #endif #ifndef SA_RESTART # define SA_RESTART 0 #endif #if EV_SIGNAL_ENABLE ecb_noinline void ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG)); #if EV_MULTIPLICITY assert (("libev: a signal must not be attached to two different loops", !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop)); signals [w->signum - 1].loop = EV_A; ECB_MEMORY_FENCE_RELEASE; #endif EV_FREQUENT_CHECK; #if EV_USE_SIGNALFD if (sigfd == -2) { sigfd = signalfd (-1, &sigfd_set, SFD_NONBLOCK | SFD_CLOEXEC); if (sigfd < 0 && errno == EINVAL) sigfd = signalfd (-1, &sigfd_set, 0); /* retry without flags */ if (sigfd >= 0) { fd_intern (sigfd); /* doing it twice will not hurt */ sigemptyset (&sigfd_set); ev_io_init (&sigfd_w, sigfdcb, sigfd, EV_READ); ev_set_priority (&sigfd_w, EV_MAXPRI); ev_io_start (EV_A_ &sigfd_w); ev_unref (EV_A); /* signalfd watcher should not keep loop alive */ } } if (sigfd >= 0) { /* TODO: check .head */ sigaddset (&sigfd_set, w->signum); sigprocmask (SIG_BLOCK, &sigfd_set, 0); signalfd (sigfd, &sigfd_set, 0); } #endif ev_start (EV_A_ (W)w, 1); wlist_add (&signals [w->signum - 1].head, (WL)w); if (!((WL)w)->next) # if EV_USE_SIGNALFD if (sigfd < 0) /*TODO*/ # endif { # ifdef _WIN32 evpipe_init (EV_A); signal (w->signum, ev_sighandler); # else struct sigaction sa; evpipe_init (EV_A); sa.sa_handler = ev_sighandler; sigfillset (&sa.sa_mask); sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */ sigaction (w->signum, &sa, 0); if (origflags & EVFLAG_NOSIGMASK) { sigemptyset (&sa.sa_mask); sigaddset (&sa.sa_mask, w->signum); sigprocmask (SIG_UNBLOCK, &sa.sa_mask, 0); } #endif } EV_FREQUENT_CHECK; } ecb_noinline void ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; wlist_del (&signals [w->signum - 1].head, (WL)w); ev_stop (EV_A_ (W)w); if (!signals [w->signum - 1].head) { #if EV_MULTIPLICITY signals [w->signum - 1].loop = 0; /* unattach from signal */ #endif #if EV_USE_SIGNALFD if (sigfd >= 0) { sigset_t ss; sigemptyset (&ss); sigaddset (&ss, w->signum); sigdelset (&sigfd_set, w->signum); signalfd (sigfd, &sigfd_set, 0); sigprocmask (SIG_UNBLOCK, &ss, 0); } else #endif signal (w->signum, SIG_DFL); } EV_FREQUENT_CHECK; } #endif #if EV_CHILD_ENABLE void ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT { #if EV_MULTIPLICITY assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr)); #endif if (ecb_expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, 1); wlist_add (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w); EV_FREQUENT_CHECK; } void ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w); ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_STAT_ENABLE # ifdef _WIN32 # undef lstat # define lstat(a,b) _stati64 (a,b) # endif #define DEF_STAT_INTERVAL 5.0074891 #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */ #define MIN_STAT_INTERVAL 0.1074891 ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents); #if EV_USE_INOTIFY /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */ # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX) ecb_noinline static void infy_add (EV_P_ ev_stat *w) { w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_DONT_FOLLOW | IN_MASK_ADD); if (w->wd >= 0) { struct statfs sfs; /* now local changes will be tracked by inotify, but remote changes won't */ /* unless the filesystem is known to be local, we therefore still poll */ /* also do poll on <2.6.25, but with normal frequency */ if (!fs_2625) w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; else if (!statfs (w->path, &sfs) && (sfs.f_type == 0x1373 /* devfs */ || sfs.f_type == 0x4006 /* fat */ || sfs.f_type == 0x4d44 /* msdos */ || sfs.f_type == 0xEF53 /* ext2/3 */ || sfs.f_type == 0x72b6 /* jffs2 */ || sfs.f_type == 0x858458f6 /* ramfs */ || sfs.f_type == 0x5346544e /* ntfs */ || sfs.f_type == 0x3153464a /* jfs */ || sfs.f_type == 0x9123683e /* btrfs */ || sfs.f_type == 0x52654973 /* reiser3 */ || sfs.f_type == 0x01021994 /* tmpfs */ || sfs.f_type == 0x58465342 /* xfs */)) w->timer.repeat = 0.; /* filesystem is local, kernel new enough */ else w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */ } else { /* can't use inotify, continue to stat */ w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; /* if path is not there, monitor some parent directory for speedup hints */ /* note that exceeding the hardcoded path limit is not a correctness issue, */ /* but an efficiency issue only */ if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096) { char path [4096]; strcpy (path, w->path); do { int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO); char *pend = strrchr (path, '/'); if (!pend || pend == path) break; *pend = 0; w->wd = inotify_add_watch (fs_fd, path, mask); } while (w->wd < 0 && (errno == ENOENT || errno == EACCES)); } } if (w->wd >= 0) wlist_add (&fs_hash [w->wd & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w); /* now re-arm timer, if required */ if (ev_is_active (&w->timer)) ev_ref (EV_A); ev_timer_again (EV_A_ &w->timer); if (ev_is_active (&w->timer)) ev_unref (EV_A); } ecb_noinline static void infy_del (EV_P_ ev_stat *w) { int slot; int wd = w->wd; if (wd < 0) return; w->wd = -2; slot = wd & ((EV_INOTIFY_HASHSIZE) - 1); wlist_del (&fs_hash [slot].head, (WL)w); /* remove this watcher, if others are watching it, they will rearm */ inotify_rm_watch (fs_fd, wd); } ecb_noinline static void infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev) { if (slot < 0) /* overflow, need to check for all hash slots */ for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot) infy_wd (EV_A_ slot, wd, ev); else { WL w_; for (w_ = fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head; w_; ) { ev_stat *w = (ev_stat *)w_; w_ = w_->next; /* lets us remove this watcher and all before it */ if (w->wd == wd || wd == -1) { if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF)) { wlist_del (&fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w); w->wd = -1; infy_add (EV_A_ w); /* re-add, no matter what */ } stat_timer_cb (EV_A_ &w->timer, 0); } } } } static void infy_cb (EV_P_ ev_io *w, int revents) { char buf [EV_INOTIFY_BUFSIZE]; int ofs; int len = read (fs_fd, buf, sizeof (buf)); for (ofs = 0; ofs < len; ) { struct inotify_event *ev = (struct inotify_event *)(buf + ofs); infy_wd (EV_A_ ev->wd, ev->wd, ev); ofs += sizeof (struct inotify_event) + ev->len; } } inline_size ecb_cold void ev_check_2625 (EV_P) { /* kernels < 2.6.25 are borked * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html */ if (ev_linux_version () < 0x020619) return; fs_2625 = 1; } inline_size int infy_newfd (void) { #if defined IN_CLOEXEC && defined IN_NONBLOCK int fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK); if (fd >= 0) return fd; #endif return inotify_init (); } inline_size void infy_init (EV_P) { if (fs_fd != -2) return; fs_fd = -1; ev_check_2625 (EV_A); fs_fd = infy_newfd (); if (fs_fd >= 0) { fd_intern (fs_fd); ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ); ev_set_priority (&fs_w, EV_MAXPRI); ev_io_start (EV_A_ &fs_w); ev_unref (EV_A); } } inline_size void infy_fork (EV_P) { int slot; if (fs_fd < 0) return; ev_ref (EV_A); ev_io_stop (EV_A_ &fs_w); close (fs_fd); fs_fd = infy_newfd (); if (fs_fd >= 0) { fd_intern (fs_fd); ev_io_set (&fs_w, fs_fd, EV_READ); ev_io_start (EV_A_ &fs_w); ev_unref (EV_A); } for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot) { WL w_ = fs_hash [slot].head; fs_hash [slot].head = 0; while (w_) { ev_stat *w = (ev_stat *)w_; w_ = w_->next; /* lets us add this watcher */ w->wd = -1; if (fs_fd >= 0) infy_add (EV_A_ w); /* re-add, no matter what */ else { w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; if (ev_is_active (&w->timer)) ev_ref (EV_A); ev_timer_again (EV_A_ &w->timer); if (ev_is_active (&w->timer)) ev_unref (EV_A); } } } } #endif #ifdef _WIN32 # define EV_LSTAT(p,b) _stati64 (p, b) #else # define EV_LSTAT(p,b) lstat (p, b) #endif void ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT { if (lstat (w->path, &w->attr) < 0) w->attr.st_nlink = 0; else if (!w->attr.st_nlink) w->attr.st_nlink = 1; } ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents) { ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); ev_statdata prev = w->attr; ev_stat_stat (EV_A_ w); /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */ if ( prev.st_dev != w->attr.st_dev || prev.st_ino != w->attr.st_ino || prev.st_mode != w->attr.st_mode || prev.st_nlink != w->attr.st_nlink || prev.st_uid != w->attr.st_uid || prev.st_gid != w->attr.st_gid || prev.st_rdev != w->attr.st_rdev || prev.st_size != w->attr.st_size || prev.st_atime != w->attr.st_atime || prev.st_mtime != w->attr.st_mtime || prev.st_ctime != w->attr.st_ctime ) { /* we only update w->prev on actual differences */ /* in case we test more often than invoke the callback, */ /* to ensure that prev is always different to attr */ w->prev = prev; #if EV_USE_INOTIFY if (fs_fd >= 0) { infy_del (EV_A_ w); infy_add (EV_A_ w); ev_stat_stat (EV_A_ w); /* avoid race... */ } #endif ev_feed_event (EV_A_ w, EV_STAT); } } void ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; ev_stat_stat (EV_A_ w); if (w->interval < MIN_STAT_INTERVAL && w->interval) w->interval = MIN_STAT_INTERVAL; ev_timer_init (&w->timer, stat_timer_cb, 0., w->interval ? w->interval : DEF_STAT_INTERVAL); ev_set_priority (&w->timer, ev_priority (w)); #if EV_USE_INOTIFY infy_init (EV_A); if (fs_fd >= 0) infy_add (EV_A_ w); else #endif { ev_timer_again (EV_A_ &w->timer); ev_unref (EV_A); } ev_start (EV_A_ (W)w, 1); EV_FREQUENT_CHECK; } void ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; #if EV_USE_INOTIFY infy_del (EV_A_ w); #endif if (ev_is_active (&w->timer)) { ev_ref (EV_A); ev_timer_stop (EV_A_ &w->timer); } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_IDLE_ENABLE void ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; pri_adjust (EV_A_ (W)w); EV_FREQUENT_CHECK; { int active = ++idlecnt [ABSPRI (w)]; ++idleall; ev_start (EV_A_ (W)w, active); array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, array_needsize_noinit); idles [ABSPRI (w)][active - 1] = w; } EV_FREQUENT_CHECK; } void ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]]; ev_active (idles [ABSPRI (w)][active - 1]) = active; ev_stop (EV_A_ (W)w); --idleall; } EV_FREQUENT_CHECK; } #endif #if EV_PREPARE_ENABLE void ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++preparecnt); array_needsize (ev_prepare *, prepares, preparemax, preparecnt, array_needsize_noinit); prepares [preparecnt - 1] = w; EV_FREQUENT_CHECK; } void ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); prepares [active - 1] = prepares [--preparecnt]; ev_active (prepares [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_CHECK_ENABLE void ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++checkcnt); array_needsize (ev_check *, checks, checkmax, checkcnt, array_needsize_noinit); checks [checkcnt - 1] = w; EV_FREQUENT_CHECK; } void ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); checks [active - 1] = checks [--checkcnt]; ev_active (checks [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_EMBED_ENABLE ecb_noinline void ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT { ev_run (w->other, EVRUN_NOWAIT); } static void embed_io_cb (EV_P_ ev_io *io, int revents) { ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io)); if (ev_cb (w)) ev_feed_event (EV_A_ (W)w, EV_EMBED); else ev_run (w->other, EVRUN_NOWAIT); } static void embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents) { ev_embed *w = (ev_embed *)(((char *)prepare) - offsetof (ev_embed, prepare)); { EV_P = w->other; while (fdchangecnt) { fd_reify (EV_A); ev_run (EV_A_ EVRUN_NOWAIT); } } } #if EV_FORK_ENABLE static void embed_fork_cb (EV_P_ ev_fork *fork_w, int revents) { ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork)); ev_embed_stop (EV_A_ w); { EV_P = w->other; ev_loop_fork (EV_A); ev_run (EV_A_ EVRUN_NOWAIT); } ev_embed_start (EV_A_ w); } #endif #if 0 static void embed_idle_cb (EV_P_ ev_idle *idle, int revents) { ev_idle_stop (EV_A_ idle); } #endif void ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; { EV_P = w->other; assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ())); ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ); } EV_FREQUENT_CHECK; ev_set_priority (&w->io, ev_priority (w)); ev_io_start (EV_A_ &w->io); ev_prepare_init (&w->prepare, embed_prepare_cb); ev_set_priority (&w->prepare, EV_MINPRI); ev_prepare_start (EV_A_ &w->prepare); #if EV_FORK_ENABLE ev_fork_init (&w->fork, embed_fork_cb); ev_fork_start (EV_A_ &w->fork); #endif /*ev_idle_init (&w->idle, e,bed_idle_cb);*/ ev_start (EV_A_ (W)w, 1); EV_FREQUENT_CHECK; } void ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_io_stop (EV_A_ &w->io); ev_prepare_stop (EV_A_ &w->prepare); #if EV_FORK_ENABLE ev_fork_stop (EV_A_ &w->fork); #endif ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_FORK_ENABLE void ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++forkcnt); array_needsize (ev_fork *, forks, forkmax, forkcnt, array_needsize_noinit); forks [forkcnt - 1] = w; EV_FREQUENT_CHECK; } void ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); forks [active - 1] = forks [--forkcnt]; ev_active (forks [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_CLEANUP_ENABLE void ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++cleanupcnt); array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, array_needsize_noinit); cleanups [cleanupcnt - 1] = w; /* cleanup watchers should never keep a refcount on the loop */ ev_unref (EV_A); EV_FREQUENT_CHECK; } void ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; ev_ref (EV_A); { int active = ev_active (w); cleanups [active - 1] = cleanups [--cleanupcnt]; ev_active (cleanups [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } #endif #if EV_ASYNC_ENABLE void ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT { if (ecb_expect_false (ev_is_active (w))) return; w->sent = 0; evpipe_init (EV_A); EV_FREQUENT_CHECK; ev_start (EV_A_ (W)w, ++asynccnt); array_needsize (ev_async *, asyncs, asyncmax, asynccnt, array_needsize_noinit); asyncs [asynccnt - 1] = w; EV_FREQUENT_CHECK; } void ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT { clear_pending (EV_A_ (W)w); if (ecb_expect_false (!ev_is_active (w))) return; EV_FREQUENT_CHECK; { int active = ev_active (w); asyncs [active - 1] = asyncs [--asynccnt]; ev_active (asyncs [active - 1]) = active; } ev_stop (EV_A_ (W)w); EV_FREQUENT_CHECK; } void ev_async_send (EV_P_ ev_async *w) EV_NOEXCEPT { w->sent = 1; evpipe_write (EV_A_ &async_pending); } #endif /*****************************************************************************/ struct ev_once { ev_io io; ev_timer to; void (*cb)(int revents, void *arg); void *arg; }; static void once_cb (EV_P_ struct ev_once *once, int revents) { void (*cb)(int revents, void *arg) = once->cb; void *arg = once->arg; ev_io_stop (EV_A_ &once->io); ev_timer_stop (EV_A_ &once->to); ev_free (once); cb (revents, arg); } static void once_cb_io (EV_P_ ev_io *w, int revents) { struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io)); once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->to)); } static void once_cb_to (EV_P_ ev_timer *w, int revents) { struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to)); once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->io)); } void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_NOEXCEPT { struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once)); once->cb = cb; once->arg = arg; ev_init (&once->io, once_cb_io); if (fd >= 0) { ev_io_set (&once->io, fd, events); ev_io_start (EV_A_ &once->io); } ev_init (&once->to, once_cb_to); if (timeout >= 0.) { ev_timer_set (&once->to, timeout, 0.); ev_timer_start (EV_A_ &once->to); } } /*****************************************************************************/ #if EV_WALK_ENABLE ecb_cold void ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_NOEXCEPT { int i, j; ev_watcher_list *wl, *wn; if (types & (EV_IO | EV_EMBED)) for (i = 0; i < anfdmax; ++i) for (wl = anfds [i].head; wl; ) { wn = wl->next; #if EV_EMBED_ENABLE if (ev_cb ((ev_io *)wl) == embed_io_cb) { if (types & EV_EMBED) cb (EV_A_ EV_EMBED, ((char *)wl) - offsetof (struct ev_embed, io)); } else #endif #if EV_USE_INOTIFY if (ev_cb ((ev_io *)wl) == infy_cb) ; else #endif if ((ev_io *)wl != &pipe_w) if (types & EV_IO) cb (EV_A_ EV_IO, wl); wl = wn; } if (types & (EV_TIMER | EV_STAT)) for (i = timercnt + HEAP0; i-- > HEAP0; ) #if EV_STAT_ENABLE /*TODO: timer is not always active*/ if (ev_cb ((ev_timer *)ANHE_w (timers [i])) == stat_timer_cb) { if (types & EV_STAT) cb (EV_A_ EV_STAT, ((char *)ANHE_w (timers [i])) - offsetof (struct ev_stat, timer)); } else #endif if (types & EV_TIMER) cb (EV_A_ EV_TIMER, ANHE_w (timers [i])); #if EV_PERIODIC_ENABLE if (types & EV_PERIODIC) for (i = periodiccnt + HEAP0; i-- > HEAP0; ) cb (EV_A_ EV_PERIODIC, ANHE_w (periodics [i])); #endif #if EV_IDLE_ENABLE if (types & EV_IDLE) for (j = NUMPRI; j--; ) for (i = idlecnt [j]; i--; ) cb (EV_A_ EV_IDLE, idles [j][i]); #endif #if EV_FORK_ENABLE if (types & EV_FORK) for (i = forkcnt; i--; ) if (ev_cb (forks [i]) != embed_fork_cb) cb (EV_A_ EV_FORK, forks [i]); #endif #if EV_ASYNC_ENABLE if (types & EV_ASYNC) for (i = asynccnt; i--; ) cb (EV_A_ EV_ASYNC, asyncs [i]); #endif #if EV_PREPARE_ENABLE if (types & EV_PREPARE) for (i = preparecnt; i--; ) # if EV_EMBED_ENABLE if (ev_cb (prepares [i]) != embed_prepare_cb) # endif cb (EV_A_ EV_PREPARE, prepares [i]); #endif #if EV_CHECK_ENABLE if (types & EV_CHECK) for (i = checkcnt; i--; ) cb (EV_A_ EV_CHECK, checks [i]); #endif #if EV_SIGNAL_ENABLE if (types & EV_SIGNAL) for (i = 0; i < EV_NSIG - 1; ++i) for (wl = signals [i].head; wl; ) { wn = wl->next; cb (EV_A_ EV_SIGNAL, wl); wl = wn; } #endif #if EV_CHILD_ENABLE if (types & EV_CHILD) for (i = (EV_PID_HASHSIZE); i--; ) for (wl = childs [i]; wl; ) { wn = wl->next; cb (EV_A_ EV_CHILD, wl); wl = wn; } #endif /* EV_STAT 0x00001000 /* stat data changed */ /* EV_EMBED 0x00010000 /* embedded event loop needs sweep */ } #endif #if EV_MULTIPLICITY #include "ev_wrap.h" #endif EV-4.33/libev/ev_wrap.h0000644000000000000000000002006013634411664013376 0ustar rootroot/* DO NOT EDIT, automatically generated by update_ev_wrap */ #ifndef EV_WRAP_H #define EV_WRAP_H #define acquire_cb ((loop)->acquire_cb) #define activecnt ((loop)->activecnt) #define anfdmax ((loop)->anfdmax) #define anfds ((loop)->anfds) #define async_pending ((loop)->async_pending) #define asynccnt ((loop)->asynccnt) #define asyncmax ((loop)->asyncmax) #define asyncs ((loop)->asyncs) #define backend ((loop)->backend) #define backend_fd ((loop)->backend_fd) #define backend_mintime ((loop)->backend_mintime) #define backend_modify ((loop)->backend_modify) #define backend_poll ((loop)->backend_poll) #define checkcnt ((loop)->checkcnt) #define checkmax ((loop)->checkmax) #define checks ((loop)->checks) #define cleanupcnt ((loop)->cleanupcnt) #define cleanupmax ((loop)->cleanupmax) #define cleanups ((loop)->cleanups) #define curpid ((loop)->curpid) #define epoll_epermcnt ((loop)->epoll_epermcnt) #define epoll_epermmax ((loop)->epoll_epermmax) #define epoll_eperms ((loop)->epoll_eperms) #define epoll_eventmax ((loop)->epoll_eventmax) #define epoll_events ((loop)->epoll_events) #define evpipe ((loop)->evpipe) #define fdchangecnt ((loop)->fdchangecnt) #define fdchangemax ((loop)->fdchangemax) #define fdchanges ((loop)->fdchanges) #define forkcnt ((loop)->forkcnt) #define forkmax ((loop)->forkmax) #define forks ((loop)->forks) #define fs_2625 ((loop)->fs_2625) #define fs_fd ((loop)->fs_fd) #define fs_hash ((loop)->fs_hash) #define fs_w ((loop)->fs_w) #define idleall ((loop)->idleall) #define idlecnt ((loop)->idlecnt) #define idlemax ((loop)->idlemax) #define idles ((loop)->idles) #define invoke_cb ((loop)->invoke_cb) #define io_blocktime ((loop)->io_blocktime) #define iocp ((loop)->iocp) #define iouring_cq_cqes ((loop)->iouring_cq_cqes) #define iouring_cq_head ((loop)->iouring_cq_head) #define iouring_cq_overflow ((loop)->iouring_cq_overflow) #define iouring_cq_ring ((loop)->iouring_cq_ring) #define iouring_cq_ring_entries ((loop)->iouring_cq_ring_entries) #define iouring_cq_ring_mask ((loop)->iouring_cq_ring_mask) #define iouring_cq_ring_size ((loop)->iouring_cq_ring_size) #define iouring_cq_tail ((loop)->iouring_cq_tail) #define iouring_entries ((loop)->iouring_entries) #define iouring_fd ((loop)->iouring_fd) #define iouring_max_entries ((loop)->iouring_max_entries) #define iouring_sq_array ((loop)->iouring_sq_array) #define iouring_sq_dropped ((loop)->iouring_sq_dropped) #define iouring_sq_flags ((loop)->iouring_sq_flags) #define iouring_sq_head ((loop)->iouring_sq_head) #define iouring_sq_ring ((loop)->iouring_sq_ring) #define iouring_sq_ring_entries ((loop)->iouring_sq_ring_entries) #define iouring_sq_ring_mask ((loop)->iouring_sq_ring_mask) #define iouring_sq_ring_size ((loop)->iouring_sq_ring_size) #define iouring_sq_tail ((loop)->iouring_sq_tail) #define iouring_sqes ((loop)->iouring_sqes) #define iouring_sqes_size ((loop)->iouring_sqes_size) #define iouring_tfd ((loop)->iouring_tfd) #define iouring_tfd_to ((loop)->iouring_tfd_to) #define iouring_tfd_w ((loop)->iouring_tfd_w) #define iouring_to_submit ((loop)->iouring_to_submit) #define kqueue_changecnt ((loop)->kqueue_changecnt) #define kqueue_changemax ((loop)->kqueue_changemax) #define kqueue_changes ((loop)->kqueue_changes) #define kqueue_eventmax ((loop)->kqueue_eventmax) #define kqueue_events ((loop)->kqueue_events) #define kqueue_fd_pid ((loop)->kqueue_fd_pid) #define linuxaio_ctx ((loop)->linuxaio_ctx) #define linuxaio_epoll_w ((loop)->linuxaio_epoll_w) #define linuxaio_iocbpmax ((loop)->linuxaio_iocbpmax) #define linuxaio_iocbps ((loop)->linuxaio_iocbps) #define linuxaio_iteration ((loop)->linuxaio_iteration) #define linuxaio_submitcnt ((loop)->linuxaio_submitcnt) #define linuxaio_submitmax ((loop)->linuxaio_submitmax) #define linuxaio_submits ((loop)->linuxaio_submits) #define loop_count ((loop)->loop_count) #define loop_depth ((loop)->loop_depth) #define loop_done ((loop)->loop_done) #define mn_now ((loop)->mn_now) #define now_floor ((loop)->now_floor) #define origflags ((loop)->origflags) #define pending_w ((loop)->pending_w) #define pendingcnt ((loop)->pendingcnt) #define pendingmax ((loop)->pendingmax) #define pendingpri ((loop)->pendingpri) #define pendings ((loop)->pendings) #define periodiccnt ((loop)->periodiccnt) #define periodicmax ((loop)->periodicmax) #define periodics ((loop)->periodics) #define pipe_w ((loop)->pipe_w) #define pipe_write_skipped ((loop)->pipe_write_skipped) #define pipe_write_wanted ((loop)->pipe_write_wanted) #define pollcnt ((loop)->pollcnt) #define pollidxmax ((loop)->pollidxmax) #define pollidxs ((loop)->pollidxs) #define pollmax ((loop)->pollmax) #define polls ((loop)->polls) #define port_eventmax ((loop)->port_eventmax) #define port_events ((loop)->port_events) #define postfork ((loop)->postfork) #define preparecnt ((loop)->preparecnt) #define preparemax ((loop)->preparemax) #define prepares ((loop)->prepares) #define release_cb ((loop)->release_cb) #define rfeedcnt ((loop)->rfeedcnt) #define rfeedmax ((loop)->rfeedmax) #define rfeeds ((loop)->rfeeds) #define rtmn_diff ((loop)->rtmn_diff) #define sig_pending ((loop)->sig_pending) #define sigfd ((loop)->sigfd) #define sigfd_set ((loop)->sigfd_set) #define sigfd_w ((loop)->sigfd_w) #define timeout_blocktime ((loop)->timeout_blocktime) #define timercnt ((loop)->timercnt) #define timerfd ((loop)->timerfd) #define timerfd_w ((loop)->timerfd_w) #define timermax ((loop)->timermax) #define timers ((loop)->timers) #define userdata ((loop)->userdata) #define vec_eo ((loop)->vec_eo) #define vec_max ((loop)->vec_max) #define vec_ri ((loop)->vec_ri) #define vec_ro ((loop)->vec_ro) #define vec_wi ((loop)->vec_wi) #define vec_wo ((loop)->vec_wo) #else #undef EV_WRAP_H #undef acquire_cb #undef activecnt #undef anfdmax #undef anfds #undef async_pending #undef asynccnt #undef asyncmax #undef asyncs #undef backend #undef backend_fd #undef backend_mintime #undef backend_modify #undef backend_poll #undef checkcnt #undef checkmax #undef checks #undef cleanupcnt #undef cleanupmax #undef cleanups #undef curpid #undef epoll_epermcnt #undef epoll_epermmax #undef epoll_eperms #undef epoll_eventmax #undef epoll_events #undef evpipe #undef fdchangecnt #undef fdchangemax #undef fdchanges #undef forkcnt #undef forkmax #undef forks #undef fs_2625 #undef fs_fd #undef fs_hash #undef fs_w #undef idleall #undef idlecnt #undef idlemax #undef idles #undef invoke_cb #undef io_blocktime #undef iocp #undef iouring_cq_cqes #undef iouring_cq_head #undef iouring_cq_overflow #undef iouring_cq_ring #undef iouring_cq_ring_entries #undef iouring_cq_ring_mask #undef iouring_cq_ring_size #undef iouring_cq_tail #undef iouring_entries #undef iouring_fd #undef iouring_max_entries #undef iouring_sq_array #undef iouring_sq_dropped #undef iouring_sq_flags #undef iouring_sq_head #undef iouring_sq_ring #undef iouring_sq_ring_entries #undef iouring_sq_ring_mask #undef iouring_sq_ring_size #undef iouring_sq_tail #undef iouring_sqes #undef iouring_sqes_size #undef iouring_tfd #undef iouring_tfd_to #undef iouring_tfd_w #undef iouring_to_submit #undef kqueue_changecnt #undef kqueue_changemax #undef kqueue_changes #undef kqueue_eventmax #undef kqueue_events #undef kqueue_fd_pid #undef linuxaio_ctx #undef linuxaio_epoll_w #undef linuxaio_iocbpmax #undef linuxaio_iocbps #undef linuxaio_iteration #undef linuxaio_submitcnt #undef linuxaio_submitmax #undef linuxaio_submits #undef loop_count #undef loop_depth #undef loop_done #undef mn_now #undef now_floor #undef origflags #undef pending_w #undef pendingcnt #undef pendingmax #undef pendingpri #undef pendings #undef periodiccnt #undef periodicmax #undef periodics #undef pipe_w #undef pipe_write_skipped #undef pipe_write_wanted #undef pollcnt #undef pollidxmax #undef pollidxs #undef pollmax #undef polls #undef port_eventmax #undef port_events #undef postfork #undef preparecnt #undef preparemax #undef prepares #undef release_cb #undef rfeedcnt #undef rfeedmax #undef rfeeds #undef rtmn_diff #undef sig_pending #undef sigfd #undef sigfd_set #undef sigfd_w #undef timeout_blocktime #undef timercnt #undef timerfd #undef timerfd_w #undef timermax #undef timers #undef userdata #undef vec_eo #undef vec_max #undef vec_ri #undef vec_ro #undef vec_wi #undef vec_wo #endif EV-4.33/EV.pm0000644000000000000000000012067113634412241011332 0ustar rootroot=head1 NAME EV - perl interface to libev, a high performance full-featured event loop =head1 SYNOPSIS use EV; # TIMERS my $w = EV::timer 2, 0, sub { warn "is called after 2s"; }; my $w = EV::timer 2, 2, sub { warn "is called roughly every 2s (repeat = 2)"; }; undef $w; # destroy event watcher again my $w = EV::periodic 0, 60, 0, sub { warn "is called every minute, on the minute, exactly"; }; # IO my $w = EV::io *STDIN, EV::READ, sub { my ($w, $revents) = @_; # all callbacks receive the watcher and event mask warn "stdin is readable, you entered: ", ; }; # SIGNALS my $w = EV::signal 'QUIT', sub { warn "sigquit received\n"; }; # CHILD/PID STATUS CHANGES my $w = EV::child 666, 0, sub { my ($w, $revents) = @_; my $status = $w->rstatus; }; # STAT CHANGES my $w = EV::stat "/etc/passwd", 10, sub { my ($w, $revents) = @_; warn $w->path, " has changed somehow.\n"; }; # MAINLOOP EV::run; # loop until EV::break is called or all watchers stop EV::run EV::RUN_ONCE; # block until at least one event could be handled EV::run EV::RUN_NOWAIT; # try to handle same events, but do not block =head1 BEFORE YOU START USING THIS MODULE If you only need timer, I/O, signal, child and idle watchers and not the advanced functionality of this module, consider using L instead, specifically the simplified API described in L. When used with EV as backend, the L API is as fast as the native L API, but your programs/modules will still run with many other event loops. =head1 DESCRIPTION This module provides an interface to libev (L). While the documentation below is comprehensive, one might also consult the documentation of libev itself (L or F) for more subtle details on watcher semantics or some discussion on the available backends, or how to force a specific backend with C, or just about in any case because it has much more detailed information. This module is very fast and scalable. It is actually so fast that you can use it through the L module, stay portable to other event loops (if you don't rely on any watcher types not available through it) and still be faster than with any other event loop currently supported in Perl. =head2 PORTING FROM EV 3.X to 4.X EV version 4 introduces a number of incompatible changes summarised here. According to the depreciation strategy used by libev, there is a compatibility layer in place so programs should continue to run unchanged (the XS interface lacks this layer, so programs using that one need to be updated). This compatibility layer will be switched off in some future release. All changes relevant to Perl are renames of symbols, functions and methods: EV::loop => EV::run EV::LOOP_NONBLOCK => EV::RUN_NOWAIT EV::LOOP_ONESHOT => EV::RUN_ONCE EV::unloop => EV::break EV::UNLOOP_CANCEL => EV::BREAK_CANCEL EV::UNLOOP_ONE => EV::BREAK_ONE EV::UNLOOP_ALL => EV::BREAK_ALL EV::TIMEOUT => EV::TIMER EV::loop_count => EV::iteration EV::loop_depth => EV::depth EV::loop_verify => EV::verify The loop object methods corresponding to the functions above have been similarly renamed. =head2 MODULE EXPORTS This module does not export any symbols. =cut package EV; use common::sense; BEGIN { our $VERSION = '4.33'; use XSLoader; local $^W = 0; # avoid spurious warning XSLoader::load "EV", $VERSION; } @EV::IO::ISA = @EV::Timer::ISA = @EV::Periodic::ISA = @EV::Signal::ISA = @EV::Child::ISA = @EV::Stat::ISA = @EV::Idle::ISA = @EV::Prepare::ISA = @EV::Check::ISA = @EV::Embed::ISA = @EV::Fork::ISA = @EV::Async::ISA = "EV::Watcher"; @EV::Loop::Default::ISA = "EV::Loop"; =head1 EVENT LOOPS EV supports multiple event loops: There is a single "default event loop" that can handle everything including signals and child watchers, and any number of "dynamic event loops" that can use different backends (with various limitations), but no child and signal watchers. You do not have to do anything to create the default event loop: When the module is loaded a suitable backend is selected on the premise of selecting a working backend (which for example rules out kqueue on most BSDs). Modules should, unless they have "special needs" always use the default loop as this is fastest (perl-wise), best supported by other modules (e.g. AnyEvent or Coro) and most portable event loop. For specific programs you can create additional event loops dynamically. If you want to take advantage of kqueue (which often works properly for sockets only) even though the default loop doesn't enable it, you can I a kqueue loop into the default loop: running the default loop will then also service the kqueue loop to some extent. See the example in the section about embed watchers for an example on how to achieve that. =over 4 =item $loop = new EV::Loop [$flags] Create a new event loop as per the specified flags. Please refer to the C function description in the libev documentation (L, or locally-installed as F manpage) for more info. The loop will automatically be destroyed when it is no longer referenced by any watcher and the loop object goes out of scope. If you are not embedding the loop, then Using C is recommended, as only the default event loop is protected by this module. If you I embedding this loop in the default loop, this is not necessary, as C automatically does the right thing on fork. =item $loop->loop_fork Must be called after a fork in the child, before entering or continuing the event loop. An alternative is to use C which calls this function automatically, at some performance loss (refer to the libev documentation). =item $loop->verify Calls C to make internal consistency checks (for debugging libev) and abort the program if any data structures were found to be corrupted. =item $loop = EV::default_loop [$flags] Return the default loop (which is a singleton object). Since this module already creates the default loop with default flags, specifying flags here will not have any effect unless you destroy the default loop first, which isn't supported. So in short: don't do it, and if you break it, you get to keep the pieces. =back =head1 BASIC INTERFACE =over 4 =item $EV::DIED Must contain a reference to a function that is called when a callback throws an exception (with $@ containing the error). The default prints an informative message and continues. If this callback throws an exception it will be silently ignored. =item $flags = EV::supported_backends =item $flags = EV::recommended_backends =item $flags = EV::embeddable_backends Returns the set (see C flags) of backends supported by this instance of EV, the set of recommended backends (supposed to be good) for this platform and the set of embeddable backends (see EMBED WATCHERS). =item EV::sleep $seconds Block the process for the given number of (fractional) seconds. =item $time = EV::time Returns the current time in (fractional) seconds since the epoch. =item $time = EV::now =item $time = $loop->now Returns the time the last event loop iteration has been started. This is the time that (relative) timers are based on, and referring to it is usually faster then calling EV::time. =item EV::now_update =item $loop->now_update Establishes the current time by querying the kernel, updating the time returned by C in the progress. This is a costly operation and is usually done automatically within C. This function is rarely useful, but when some event callback runs for a very long time without entering the event loop, updating libev's idea of the current time is a good idea. =item EV::suspend =item $loop->suspend =item EV::resume =item $loop->resume These two functions suspend and resume a loop, for use when the loop is not used for a while and timeouts should not be processed. A typical use case would be an interactive program such as a game: When the user presses C<^Z> to suspend the game and resumes it an hour later it would be best to handle timeouts as if no time had actually passed while the program was suspended. This can be achieved by calling C in your C handler, sending yourself a C and calling C directly afterwards to resume timer processing. Effectively, all C watchers will be delayed by the time spend between C and C, and all C watchers will be rescheduled (that is, they will lose any events that would have occured while suspended). After calling C you B call I function on the given loop other than C, and you B call C without a previous call to C. Calling C/C has the side effect of updating the event loop time (see C). =item $backend = EV::backend =item $backend = $loop->backend Returns an integer describing the backend used by libev (EV::BACKEND_SELECT or EV::BACKEND_EPOLL). =item $active = EV::run [$flags] =item $active = $loop->run ([$flags]) Begin checking for events and calling callbacks. It returns when a callback calls EV::break or the flags are nonzero (in which case the return value is true) or when there are no active watchers which reference the loop (keepalive is true), in which case the return value will be false. The return value can generally be interpreted as "if true, there is more work left to do". The $flags argument can be one of the following: 0 as above EV::RUN_ONCE block at most once (wait, but do not loop) EV::RUN_NOWAIT do not block at all (fetch/handle events but do not wait) =item EV::break [$how] =item $loop->break ([$how]) When called with no arguments or an argument of EV::BREAK_ONE, makes the innermost call to EV::run return. When called with an argument of EV::BREAK_ALL, all calls to EV::run will return as fast as possible. When called with an argument of EV::BREAK_CANCEL, any pending break will be cancelled. =item $count = EV::iteration =item $count = $loop->iteration Return the number of times the event loop has polled for new events. Sometimes useful as a generation counter. =item EV::once $fh_or_undef, $events, $timeout, $cb->($revents) =item $loop->once ($fh_or_undef, $events, $timeout, $cb->($revents)) This function rolls together an I/O and a timer watcher for a single one-shot event without the need for managing a watcher object. If C<$fh_or_undef> is a filehandle or file descriptor, then C<$events> must be a bitset containing either C, C or C, indicating the type of I/O event you want to wait for. If you do not want to wait for some I/O event, specify C for C<$fh_or_undef> and C<0> for C<$events>). If timeout is C or negative, then there will be no timeout. Otherwise an C with this value will be started. When an error occurs or either the timeout or I/O watcher triggers, then the callback will be called with the received event set (in general you can expect it to be a combination of C, C, C and C). EV::once doesn't return anything: the watchers stay active till either of them triggers, then they will be stopped and freed, and the callback invoked. =item EV::feed_fd_event $fd, $revents =item $loop->feed_fd_event ($fd, $revents) Feed an event on a file descriptor into EV. EV will react to this call as if the readyness notifications specified by C<$revents> (a combination of C and C) happened on the file descriptor C<$fd>. =item EV::feed_signal_event $signal Feed a signal event into the default loop. EV will react to this call as if the signal specified by C<$signal> had occured. =item EV::feed_signal $signal Feed a signal event into EV - unlike C, this works regardless of which loop has registered the signal, and is mainly useful for custom signal implementations. =item EV::set_io_collect_interval $time =item $loop->set_io_collect_interval ($time) =item EV::set_timeout_collect_interval $time =item $loop->set_timeout_collect_interval ($time) These advanced functions set the minimum block interval when polling for I/O events and the minimum wait interval for timer events. See the libev documentation at L (locally installed as F) for a more detailed discussion. =item $count = EV::pending_count =item $count = $loop->pending_count Returns the number of currently pending watchers. =item EV::invoke_pending =item $loop->invoke_pending Invoke all currently pending watchers. =back =head1 WATCHER OBJECTS A watcher is an object that gets created to record your interest in some event. For instance, if you want to wait for STDIN to become readable, you would create an EV::io watcher for that: my $watcher = EV::io *STDIN, EV::READ, sub { my ($watcher, $revents) = @_; warn "yeah, STDIN should now be readable without blocking!\n" }; All watchers can be active (waiting for events) or inactive (paused). Only active watchers will have their callbacks invoked. All callbacks will be called with at least two arguments: the watcher and a bitmask of received events. Each watcher type has its associated bit in revents, so you can use the same callback for multiple watchers. The event mask is named after the type, i.e. EV::child sets EV::CHILD, EV::prepare sets EV::PREPARE, EV::periodic sets EV::PERIODIC and so on, with the exception of I/O events (which can set both EV::READ and EV::WRITE bits). In the rare case where one wants to create a watcher but not start it at the same time, each constructor has a variant with a trailing C<_ns> in its name, e.g. EV::io has a non-starting variant EV::io_ns and so on. Please note that a watcher will automatically be stopped when the watcher object is destroyed, so you I to keep the watcher objects returned by the constructors. Also, all methods changing some aspect of a watcher (->set, ->priority, ->fh and so on) automatically stop and start it again if it is active, which means pending events get lost. =head2 COMMON WATCHER METHODS This section lists methods common to all watchers. =over 4 =item $w->start Starts a watcher if it isn't active already. Does nothing to an already active watcher. By default, all watchers start out in the active state (see the description of the C<_ns> variants if you need stopped watchers). =item $w->stop Stop a watcher if it is active. Also clear any pending events (events that have been received but that didn't yet result in a callback invocation), regardless of whether the watcher was active or not. =item $bool = $w->is_active Returns true if the watcher is active, false otherwise. =item $current_data = $w->data =item $old_data = $w->data ($new_data) Queries a freely usable data scalar on the watcher and optionally changes it. This is a way to associate custom data with a watcher: my $w = EV::timer 60, 0, sub { warn $_[0]->data; }; $w->data ("print me!"); =item $current_cb = $w->cb =item $old_cb = $w->cb ($new_cb) Queries the callback on the watcher and optionally changes it. You can do this at any time without the watcher restarting. =item $current_priority = $w->priority =item $old_priority = $w->priority ($new_priority) Queries the priority on the watcher and optionally changes it. Pending watchers with higher priority will be invoked first. The valid range of priorities lies between EV::MAXPRI (default 2) and EV::MINPRI (default -2). If the priority is outside this range it will automatically be normalised to the nearest valid priority. The default priority of any newly-created watcher is 0. Note that the priority semantics have not yet been fleshed out and are subject to almost certain change. =item $w->invoke ($revents) Call the callback *now* with the given event mask. =item $w->feed_event ($revents) Feed some events on this watcher into EV. EV will react to this call as if the watcher had received the given C<$revents> mask. =item $revents = $w->clear_pending If the watcher is pending, this function clears its pending status and returns its C<$revents> bitset (as if its callback was invoked). If the watcher isn't pending it does nothing and returns C<0>. =item $previous_state = $w->keepalive ($bool) Normally, C will return when there are no active watchers (which is a "deadlock" because no progress can be made anymore). This is convenient because it allows you to start your watchers (and your jobs), call C once and when it returns you know that all your jobs are finished (or they forgot to register some watchers for their task :). Sometimes, however, this gets in your way, for example when the module that calls C (usually the main program) is not the same module as a long-living watcher (for example a DNS client module written by somebody else even). Then you might want any outstanding requests to be handled, but you would not want to keep C from returning just because you happen to have this long-running UDP port watcher. In this case you can clear the keepalive status, which means that even though your watcher is active, it won't keep C from returning. The initial value for keepalive is true (enabled), and you can change it any time. Example: Register an I/O watcher for some UDP socket but do not keep the event loop from running just because of that watcher. my $udp_socket = ... my $udp_watcher = EV::io $udp_socket, EV::READ, sub { ... }; $udp_watcher->keepalive (0); =item $loop = $w->loop Return the loop that this watcher is attached to. =back =head1 WATCHER TYPES Each of the following subsections describes a single watcher type. =head3 I/O WATCHERS - is this file descriptor readable or writable? =over 4 =item $w = EV::io $fileno_or_fh, $eventmask, $callback =item $w = EV::io_ns $fileno_or_fh, $eventmask, $callback =item $w = $loop->io ($fileno_or_fh, $eventmask, $callback) =item $w = $loop->io_ns ($fileno_or_fh, $eventmask, $callback) As long as the returned watcher object is alive, call the C<$callback> when at least one of events specified in C<$eventmask> occurs. The $eventmask can be one or more of these constants ORed together: EV::READ wait until read() wouldn't block anymore EV::WRITE wait until write() wouldn't block anymore The C variant doesn't start (activate) the newly created watcher. =item $w->set ($fileno_or_fh, $eventmask) Reconfigures the watcher, see the constructor above for details. Can be called at any time. =item $current_fh = $w->fh =item $old_fh = $w->fh ($new_fh) Returns the previously set filehandle and optionally set a new one. =item $current_eventmask = $w->events =item $old_eventmask = $w->events ($new_eventmask) Returns the previously set event mask and optionally set a new one. =back =head3 TIMER WATCHERS - relative and optionally repeating timeouts =over 4 =item $w = EV::timer $after, $repeat, $callback =item $w = EV::timer_ns $after, $repeat, $callback =item $w = $loop->timer ($after, $repeat, $callback) =item $w = $loop->timer_ns ($after, $repeat, $callback) Calls the callback after C<$after> seconds (which may be fractional or negative). If C<$repeat> is non-zero, the timer will be restarted (with the $repeat value as $after) after the callback returns. This means that the callback would be called roughly after C<$after> seconds, and then every C<$repeat> seconds. The timer does his best not to drift, but it will not invoke the timer more often then once per event loop iteration, and might drift in other cases. If that isn't acceptable, look at EV::periodic, which can provide long-term stable timers. The timer is based on a monotonic clock, that is, if somebody is sitting in front of the machine while the timer is running and changes the system clock, the timer will nevertheless run (roughly) the same time. The C variant doesn't start (activate) the newly created watcher. =item $w->set ($after, $repeat = 0) Reconfigures the watcher, see the constructor above for details. Can be called at any time. =item $w->again =item $w->again ($repeat) Similar to the C method, but has special semantics for repeating timers: If the timer is active and non-repeating, it will be stopped. If the timer is active and repeating, reset the timeout to occur C<$repeat> seconds after now. If the timer is inactive and repeating, start it using the repeat value. Otherwise do nothing. This behaviour is useful when you have a timeout for some IO operation. You create a timer object with the same value for C<$after> and C<$repeat>, and then, in the read/write watcher, run the C method on the timeout. If called with a C<$repeat> argument, then it uses this a timer repeat value. =item $after = $w->remaining Calculates and returns the remaining time till the timer will fire. =item $repeat = $w->repeat =item $old_repeat = $w->repeat ($new_repeat) Returns the current value of the repeat attribute and optionally sets a new one. Setting the new one will not restart the watcher - if the watcher is active, the new repeat value is used whenever it expires next. =back =head3 PERIODIC WATCHERS - to cron or not to cron? =over 4 =item $w = EV::periodic $at, $interval, $reschedule_cb, $callback =item $w = EV::periodic_ns $at, $interval, $reschedule_cb, $callback =item $w = $loop->periodic ($at, $interval, $reschedule_cb, $callback) =item $w = $loop->periodic_ns ($at, $interval, $reschedule_cb, $callback) Similar to EV::timer, but is not based on relative timeouts but on absolute times. Apart from creating "simple" timers that trigger "at" the specified time, it can also be used for non-drifting absolute timers and more complex, cron-like, setups that are not adversely affected by time jumps (i.e. when the system clock is changed by explicit date -s or other means such as ntpd). It is also the most complex watcher type in EV. It has three distinct "modes": =over 4 =item * absolute timer ($interval = $reschedule_cb = 0) This time simply fires at the wallclock time C<$at> and doesn't repeat. It will not adjust when a time jump occurs, that is, if it is to be run at January 1st 2011 then it will run when the system time reaches or surpasses this time. =item * repeating interval timer ($interval > 0, $reschedule_cb = 0) In this mode the watcher will always be scheduled to time out at the next C<$at + N * $interval> time (for the lowest integer N) and then repeat, regardless of any time jumps. Note that, since C can be negative, the first trigger can happen before C<$at>. This can be used to create timers that do not drift with respect to system time: my $hourly = EV::periodic 0, 3600, 0, sub { print "once/hour\n" }; That doesn't mean there will always be 3600 seconds in between triggers, but only that the the callback will be called when the system time shows a full hour (UTC). Another way to think about it (for the mathematically inclined) is that EV::periodic will try to run the callback in this mode at the next possible time where C<$time = $at (mod $interval)>, regardless of any time jumps. =item * manual reschedule mode ($reschedule_cb = coderef) In this mode $interval and $at are both being ignored. Instead, each time the periodic watcher gets scheduled, the reschedule callback ($reschedule_cb) will be called with the watcher as first, and the current time as second argument. I. If you need to stop it, return 1e30 and stop it afterwards. You may create and start an C watcher for this task. It must return the next time to trigger, based on the passed time value (that is, the lowest time value larger than or equal to to the second argument). It will usually be called just before the callback will be triggered, but might be called at other times, too. This can be used to create very complex timers, such as a timer that triggers on each midnight, local time (actually one day after the last midnight, to keep the example simple): my $daily = EV::periodic 0, 0, sub { my ($w, $now) = @_; use Time::Local (); my (undef, undef, undef, $d, $m, $y) = localtime $now; Time::Local::timelocal_nocheck 0, 0, 0, $d + 1, $m, $y }, sub { print "it's midnight or likely shortly after, now\n"; }; =back The C variant doesn't start (activate) the newly created watcher. =item $w->set ($at, $interval, $reschedule_cb) Reconfigures the watcher, see the constructor above for details. Can be called at any time. =item $w->again Simply stops and starts the watcher again. =item $time = $w->at Return the time that the watcher is expected to trigger next. =item $offset = $w->offset =item $old_offset = $w->offset ($new_offset) Returns the current value of the offset attribute and optionally sets a new one. Setting the new one will not restart the watcher - if the watcher is active, the new offset value is used whenever it expires next. =item $interval = $w->interval =item $old_interval = $w->interval ($new_interval) See above, for the interval attribute. =item $reschedule_cb = $w->reschedule_cb =item $old_reschedule_cb = $w->reschedule_cb ($new_reschedule_cb) See above, for the reschedule callback. =back =head3 SIGNAL WATCHERS - signal me when a signal gets signalled! =over 4 =item $w = EV::signal $signal, $callback =item $w = EV::signal_ns $signal, $callback =item $w = $loop->signal ($signal, $callback) =item $w = $loop->signal_ns ($signal, $callback) Call the callback when $signal is received (the signal can be specified by number or by name, just as with C or C<%SIG>). Only one event loop can grab a given signal - attempting to grab the same signal from two EV loops will crash the program immediately or cause data corruption. EV will grab the signal for the process (the kernel only allows one component to receive a signal at a time) when you start a signal watcher, and removes it again when you stop it. Perl does the same when you add/remove callbacks to C<%SIG>, so watch out. You can have as many signal watchers per signal as you want. The C variant doesn't start (activate) the newly created watcher. =item $w->set ($signal) Reconfigures the watcher, see the constructor above for details. Can be called at any time. =item $current_signum = $w->signal =item $old_signum = $w->signal ($new_signal) Returns the previously set signal (always as a number not name) and optionally set a new one. =back =head3 CHILD WATCHERS - watch out for process status changes =over 4 =item $w = EV::child $pid, $trace, $callback =item $w = EV::child_ns $pid, $trace, $callback =item $w = $loop->child ($pid, $trace, $callback) =item $w = $loop->child_ns ($pid, $trace, $callback) Call the callback when a status change for pid C<$pid> (or any pid if C<$pid> is 0) has been received (a status change happens when the process terminates or is killed, or, when trace is true, additionally when it is stopped or continued). More precisely: when the process receives a C, EV will fetch the outstanding exit/wait status for all changed/zombie children and call the callback. It is valid (and fully supported) to install a child watcher after a child has exited but before the event loop has started its next iteration (for example, first you C, then the new child process might exit, and only then do you install a child watcher in the parent for the new pid). You can access both exit (or tracing) status and pid by using the C and C methods on the watcher object. You can have as many pid watchers per pid as you want, they will all be called. The C variant doesn't start (activate) the newly created watcher. =item $w->set ($pid, $trace) Reconfigures the watcher, see the constructor above for details. Can be called at any time. =item $current_pid = $w->pid Returns the previously set process id and optionally set a new one. =item $exit_status = $w->rstatus Return the exit/wait status (as returned by waitpid, see the waitpid entry in perlfunc). =item $pid = $w->rpid Return the pid of the awaited child (useful when you have installed a watcher for all pids). =back =head3 STAT WATCHERS - did the file attributes just change? =over 4 =item $w = EV::stat $path, $interval, $callback =item $w = EV::stat_ns $path, $interval, $callback =item $w = $loop->stat ($path, $interval, $callback) =item $w = $loop->stat_ns ($path, $interval, $callback) Call the callback when a file status change has been detected on C<$path>. The C<$path> does not need to exist, changing from "path exists" to "path does not exist" is a status change like any other. The C<$interval> is a recommended polling interval for systems where OS-supported change notifications don't exist or are not supported. If you use C<0> then an unspecified default is used (which is highly recommended!), which is to be expected to be around five seconds usually. This watcher type is not meant for massive numbers of stat watchers, as even with OS-supported change notifications, this can be resource-intensive. The C variant doesn't start (activate) the newly created watcher. =item ... = $w->stat This call is very similar to the perl C built-in: It stats (using C) the path specified in the watcher and sets perls stat cache (as well as EV's idea of the current stat values) to the values found. In scalar context, a boolean is return indicating success or failure of the stat. In list context, the same 13-value list as with stat is returned (except that the blksize and blocks fields are not reliable). In the case of an error, errno is set to C (regardless of the actual error value) and the C value is forced to zero (if the stat was successful then nlink is guaranteed to be non-zero). See also the next two entries for more info. =item ... = $w->attr Just like C<< $w->stat >>, but without the initial stat'ing: this returns the values most recently detected by EV. See the next entry for more info. =item ... = $w->prev Just like C<< $w->stat >>, but without the initial stat'ing: this returns the previous set of values, before the change. That is, when the watcher callback is invoked, C<< $w->prev >> will be set to the values found I a change was detected, while C<< $w->attr >> returns the values found leading to the change detection. The difference (if any) between C and C is what triggered the callback. If you did something to the filesystem object and do not want to trigger yet another change, you can call C to update EV's idea of what the current attributes are. =item $w->set ($path, $interval) Reconfigures the watcher, see the constructor above for details. Can be called at any time. =item $current_path = $w->path =item $old_path = $w->path ($new_path) Returns the previously set path and optionally set a new one. =item $current_interval = $w->interval =item $old_interval = $w->interval ($new_interval) Returns the previously set interval and optionally set a new one. Can be used to query the actual interval used. =back =head3 IDLE WATCHERS - when you've got nothing better to do... =over 4 =item $w = EV::idle $callback =item $w = EV::idle_ns $callback =item $w = $loop->idle ($callback) =item $w = $loop->idle_ns ($callback) Call the callback when there are no other pending watchers of the same or higher priority (excluding check, prepare and other idle watchers of the same or lower priority, of course). They are called idle watchers because when the watcher is the highest priority pending event in the process, the process is considered to be idle at that priority. If you want a watcher that is only ever called when I other events are outstanding you have to set the priority to C. The process will not block as long as any idle watchers are active, and they will be called repeatedly until stopped. For example, if you have idle watchers at priority C<0> and C<1>, and an I/O watcher at priority C<0>, then the idle watcher at priority C<1> and the I/O watcher will always run when ready. Only when the idle watcher at priority C<1> is stopped and the I/O watcher at priority C<0> is not pending with the C<0>-priority idle watcher be invoked. The C variant doesn't start (activate) the newly created watcher. =back =head3 PREPARE WATCHERS - customise your event loop! =over 4 =item $w = EV::prepare $callback =item $w = EV::prepare_ns $callback =item $w = $loop->prepare ($callback) =item $w = $loop->prepare_ns ($callback) Call the callback just before the process would block. You can still create/modify any watchers at this point. See the EV::check watcher, below, for explanations and an example. The C variant doesn't start (activate) the newly created watcher. =back =head3 CHECK WATCHERS - customise your event loop even more! =over 4 =item $w = EV::check $callback =item $w = EV::check_ns $callback =item $w = $loop->check ($callback) =item $w = $loop->check_ns ($callback) Call the callback just after the process wakes up again (after it has gathered events), but before any other callbacks have been invoked. This can be used to integrate other event-based software into the EV mainloop: You register a prepare callback and in there, you create io and timer watchers as required by the other software. Here is a real-world example of integrating Net::SNMP (with some details left out): our @snmp_watcher; our $snmp_prepare = EV::prepare sub { # do nothing unless active $dispatcher->{_event_queue_h} or return; # make the dispatcher handle any outstanding stuff ... not shown # create an I/O watcher for each and every socket @snmp_watcher = ( (map { EV::io $_, EV::READ, sub { } } keys %{ $dispatcher->{_descriptors} }), EV::timer +($event->[Net::SNMP::Dispatcher::_ACTIVE] ? $event->[Net::SNMP::Dispatcher::_TIME] - EV::now : 0), 0, sub { }, ); }; The callbacks are irrelevant (and are not even being called), the only purpose of those watchers is to wake up the process as soon as one of those events occurs (socket readable, or timer timed out). The corresponding EV::check watcher will then clean up: our $snmp_check = EV::check sub { # destroy all watchers @snmp_watcher = (); # make the dispatcher handle any new stuff ... not shown }; The callbacks of the created watchers will not be called as the watchers are destroyed before this can happen (remember EV::check gets called first). The C variant doesn't start (activate) the newly created watcher. =item EV::CHECK constant issues Like all other watcher types, there is a bitmask constant for use in C<$revents> and other places. The C is special as it has the same name as the C sub called by Perl. This doesn't cause big issues on newer perls (beginning with 5.8.9), but it means thatthe constant must be I, i.e. runtime calls will not work. That means that as long as you always C and then C you are on the safe side. =back =head3 FORK WATCHERS - the audacity to resume the event loop after a fork Fork watchers are called when a C was detected. The invocation is done before the event loop blocks next and before C watchers are being called, and only in the child after the fork. =over 4 =item $w = EV::fork $callback =item $w = EV::fork_ns $callback =item $w = $loop->fork ($callback) =item $w = $loop->fork_ns ($callback) Call the callback before the event loop is resumed in the child process after a fork. The C variant doesn't start (activate) the newly created watcher. =back =head3 EMBED WATCHERS - when one backend isn't enough... This is a rather advanced watcher type that lets you embed one event loop into another (currently only IO events are supported in the embedded loop, other types of watchers might be handled in a delayed or incorrect fashion and must not be used). See the libev documentation at L (locally installed as F) for more details. In short, this watcher is most useful on BSD systems without working kqueue to still be able to handle a large number of sockets: my $socket_loop; # check wether we use SELECT or POLL _and_ KQUEUE is supported if ( (EV::backend & (EV::BACKEND_POLL | EV::BACKEND_SELECT)) && (EV::supported_backends & EV::embeddable_backends & EV::BACKEND_KQUEUE) ) { # use kqueue for sockets $socket_loop = new EV::Loop EV::BACKEND_KQUEUE | EV::FLAG_NOENV; } # use the default loop otherwise $socket_loop ||= EV::default_loop; =over 4 =item $w = EV::embed $otherloop[, $callback] =item $w = EV::embed_ns $otherloop[, $callback] =item $w = $loop->embed ($otherloop[, $callback]) =item $w = $loop->embed_ns ($otherloop[, $callback]) Call the callback when the embedded event loop (C<$otherloop>) has any I/O activity. The C<$callback> is optional: if it is missing, then the embedded event loop will be managed automatically (which is recommended), otherwise you have to invoke C yourself. The C variant doesn't start (activate) the newly created watcher. =back =head3 ASYNC WATCHERS - how to wake up another event loop Async watchers are provided by EV, but have little use in perl directly, as perl neither supports threads running in parallel nor direct access to signal handlers or other contexts where they could be of value. It is, however, possible to use them from the XS level. Please see the libev documentation for further details. =over 4 =item $w = EV::async $callback =item $w = EV::async_ns $callback =item $w = $loop->async ($callback) =item $w = $loop->async_ns ($callback) =item $w->send =item $bool = $w->async_pending =back =head3 CLEANUP WATCHERS - how to clean up when the event loop goes away Cleanup watchers are not supported on the Perl level, they can only be used via XS currently. =head1 PERL SIGNALS While Perl signal handling (C<%SIG>) is not affected by EV, the behaviour with EV is as the same as any other C library: Perl-signals will only be handled when Perl runs, which means your signal handler might be invoked only the next time an event callback is invoked. The solution is to use EV signal watchers (see C), which will ensure proper operations with regards to other event watchers. If you cannot do this for whatever reason, you can also force a watcher to be called on every event loop iteration by installing a C watcher: my $async_check = EV::check sub { }; This ensures that perl gets into control for a short time to handle any pending signals, and also ensures (slightly) slower overall operation. =head1 ITHREADS Ithreads are not supported by this module in any way. Perl pseudo-threads is evil stuff and must die. Real threads as provided by Coro are fully supported (and enhanced support is available via L). =head1 FORK Most of the "improved" event delivering mechanisms of modern operating systems have quite a few problems with fork(2) (to put it bluntly: it is not supported and usually destructive). Libev makes it possible to work around this by having a function that recreates the kernel state after fork in the child. On non-win32 platforms, this module requires the pthread_atfork functionality to do this automatically for you. This function is quite buggy on most BSDs, though, so YMMV. The overhead for this is quite negligible, because everything the function currently does is set a flag that is checked only when the event loop gets used the next time, so when you do fork but not use EV, the overhead is minimal. On win32, there is no notion of fork so all this doesn't apply, of course. =cut our $DIED = sub { warn "EV: error in callback (ignoring): $@"; }; default_loop or die 'EV: cannot initialise libev backend. bad $ENV{LIBEV_FLAGS}?'; 1; =head1 SEE ALSO L - MakeMaker interface to XS API, L (asynchronous DNS), L (makes Glib/Gtk2 use EV as event loop), L (embed Glib into EV), L (efficient thread integration), L (asynchronous SNMP), L for event-loop agnostic and portable event driven programming. =head1 AUTHOR Marc Lehmann http://home.schmorp.de/ =cut EV-4.33/typemap0000644000000000000000000000250411462612617012064 0ustar rootrootSignal T_SIGNAL struct ev_loop * T_LOOP ev_watcher * T_WATCHER ev_io * T_WATCHER ev_timer * T_WATCHER ev_periodic * T_WATCHER ev_signal * T_WATCHER ev_idle * T_WATCHER ev_prepare * T_WATCHER ev_check * T_WATCHER ev_child * T_WATCHER ev_embed * T_WATCHER ev_stat * T_WATCHER ev_fork * T_WATCHER ev_cleanup * T_WATCHER ev_async * T_WATCHER char * T_PVbyte const char * T_PVbyte INPUT T_SIGNAL if (($var = s_signum_croak ($arg)) <= 0) croak (\"'%s' is not a valid signal number or name\", SvPV_nolen ($arg)); T_PVbyte $var = ($type)SvPVbyte_nolen ($arg) T_LOOP if (!(SvROK ($arg) && SvOBJECT (SvRV ($arg)) && (SvSTASH (SvRV ($arg)) == stash_loop || sv_derived_from ($arg, \"EV::Loop\")))) croak (\"object is not of type EV::Loop\"); $var = ($type)SvIVX (SvRV ($arg)); T_WATCHER if (!(SvROK ($arg) && SvOBJECT (SvRV ($arg)) && (SvSTASH (SvRV ($arg)) == stash_" . ($type =~ /ev_(\S+)/, "$1") . " || sv_derived_from ($arg, \"EV::" . ($type =~ /ev_(\S+)/, ucfirst "$1") . "\")))) croak (\"object is not of type EV::" . ($type =~ /ev_(\S+)/, ucfirst "$1") . "\"); $var = ($type)SvPVX (SvRV ($arg)); OUTPUT T_PVbyte sv_setpv ((SV *)$arg, $var); T_WATCHER $arg = e_bless ((struct ev_watcher *)$var, stash_${ ($type =~ /ev_(\S+)/, \"$1") }); EV-4.33/EV.xs0000644000000000000000000011660613631214445011356 0ustar rootroot#include "EXTERN.h" #include "perl.h" #include "XSUB.h" /* fix perl api breakage */ #ifndef WIN32 # undef signal # undef sigaction #endif #include "schmorp.h" /* old API compatibility */ static int sv_fileno (SV *fh) { return s_fileno (fh, 0); } #ifndef GvCV_set # define GvCV_set(gv,cv) GvCV (gv) = cv #endif #if EV_ENABLE_ASSERTIONS # undef NDEBUG #else # define NDEBUG 1 #endif /* make sure we get a real assert, not perl's incompatible version */ #undef assert #include #define EV_STANDALONE 1 #define EV_PROTOTYPES 1 #define EV_USE_NANOSLEEP EV_USE_MONOTONIC #define EV_USE_FLOOR 1 #define EV_API_STATIC #define EV_H "../libev/ev.h" #define EV_CONFIG_H error #include "EV/EVAPI.h" #define EV_SELECT_IS_WINSOCKET 0 #ifdef _WIN32 # define EV_SELECT_USE_FD_SET 0 # define NFDBITS PERL_NFDBITS # define fd_mask Perl_fd_mask #endif /* due to bugs in OS X we have to use libev/ explicitly here */ #include "libev/ev.c" #if !defined _WIN32 && !defined __minix && !EV_NO_ATFORK # include #endif #define e_loop(w) INT2PTR (struct ev_loop *, SvIVX (((ev_watcher *)(w))->loop)) #define e_flags(w) ((ev_watcher *)(w))->e_flags #define e_self(w) ((ev_watcher *)(w))->self #define e_fh(w) ((ev_watcher *)(w))->fh #define e_data(w) ((ev_watcher *)(w))->data #define WFLAG_KEEPALIVE 1 #define WFLAG_UNREFED 2 /* has been unref'ed */ #define UNREF(w) \ if (!(e_flags (w) & (WFLAG_KEEPALIVE | WFLAG_UNREFED)) \ && ev_is_active (w)) \ { \ ev_unref (e_loop (w)); \ e_flags (w) |= WFLAG_UNREFED; \ } #define REF(w) \ if (e_flags (w) & WFLAG_UNREFED) \ { \ e_flags (w) &= ~WFLAG_UNREFED; \ ev_ref (e_loop (w)); \ } #define START(type,w) \ do { \ ev_ ## type ## _start (e_loop (w), w); \ UNREF (w); \ } while (0) #define STOP(type,w) \ do { \ REF (w); \ ev_ ## type ## _stop (e_loop (w), w); \ } while (0) #define PAUSE(type) \ do { \ int active = ev_is_active (w); \ if (active) STOP (type, w) #define RESUME(type) \ if (active) START (type, w); \ } while (0) #define RESET(type,w,seta) \ PAUSE (type); \ ev_ ## type ## _set seta; \ RESUME (type) typedef int Signal; /* horrible... */ #define CHECK_SIGNAL_CAN_START(w) \ do { \ /* dive into the internals of libev to avoid aborting in libev */ \ if (signals [(w)->signum - 1].loop \ && signals [(w)->signum - 1].loop != e_loop (w)) \ croak ("unable to start signal watcher, signal %d already registered in another loop", w->signum); \ } while (0) #define START_SIGNAL(w) \ do { \ CHECK_SIGNAL_CAN_START (w); \ START (signal, w); \ } while (0) \ #define RESET_SIGNAL(w,seta) \ do { \ int active = ev_is_active (w); \ if (active) STOP (signal, w); \ ev_ ## signal ## _set seta; \ if (active) START_SIGNAL (w); \ } while (0) static SV *default_loop_sv; static struct EVAPI evapi; static HV *stash_loop, *stash_watcher, *stash_io, *stash_timer, *stash_periodic, *stash_signal, *stash_child, *stash_stat, *stash_idle, *stash_prepare, *stash_check, *stash_embed, *stash_fork, *stash_cleanup, *stash_async; ///////////////////////////////////////////////////////////////////////////// // Event static void e_cb (EV_P_ ev_watcher *w, int revents); static void * e_new (int size, SV *cb_sv, SV *loop) { SV *cv = cb_sv ? s_get_cv_croak (cb_sv) : 0; ev_watcher *w; SV *self = NEWSV (0, size); SvPOK_only (self); SvCUR_set (self, size); w = (ev_watcher *)SvPVX (self); ev_init (w, cv ? e_cb : 0); w->loop = SvREFCNT_inc (SvRV (loop)); w->e_flags = WFLAG_KEEPALIVE; w->data = 0; w->fh = 0; w->cb_sv = SvREFCNT_inc (cv); w->self = self; return (void *)w; } static void e_destroy (void *w_) { ev_watcher *w = (ev_watcher *)w_; SvREFCNT_dec (w->loop ); w->loop = 0; SvREFCNT_dec (w->fh ); w->fh = 0; SvREFCNT_dec (w->cb_sv); w->cb_sv = 0; SvREFCNT_dec (w->data ); w->data = 0; } static SV * e_bless (ev_watcher *w, HV *stash) { SV *rv; if (SvOBJECT (w->self)) rv = newRV_inc (w->self); else { rv = newRV_noinc (w->self); sv_bless (rv, stash); SvREADONLY_on (w->self); } return rv; } static SV *sv_self_cache, *sv_events_cache; static void e_cb (EV_P_ ev_watcher *w, int revents) { dSP; I32 mark = SP - PL_stack_base; SV *sv_self, *sv_events; /* libev might have stopped the watcher */ if (ecb_expect_false (w->e_flags & WFLAG_UNREFED) && !ev_is_active (w)) REF (w); if (ecb_expect_true (sv_self_cache)) { sv_self = sv_self_cache; sv_self_cache = 0; SvRV_set (sv_self, SvREFCNT_inc_NN (w->self)); } else { sv_self = newRV_inc (w->self); /* e_self (w) MUST be blessed by now */ SvREADONLY_on (sv_self); } if (ecb_expect_true (sv_events_cache)) { sv_events = sv_events_cache; sv_events_cache = 0; SvIV_set (sv_events, revents); SvIOK_only (sv_events); } else { sv_events = newSViv (revents); SvREADONLY_on (sv_events); } PUSHMARK (SP); EXTEND (SP, 2); PUSHs (sv_self); PUSHs (sv_events); PUTBACK; call_sv (w->cb_sv, G_DISCARD | G_VOID | G_EVAL); if (ecb_expect_false (SvREFCNT (sv_self) != 1 || sv_self_cache)) SvREFCNT_dec (sv_self); else { SvREFCNT_dec (SvRV (sv_self)); SvRV_set (sv_self, &PL_sv_undef); sv_self_cache = sv_self; } if (ecb_expect_false (SvREFCNT (sv_events) != 1 || sv_events_cache)) SvREFCNT_dec (sv_events); else sv_events_cache = sv_events; if (ecb_expect_false (SvTRUE (ERRSV))) { SPAGAIN; PUSHMARK (SP); PUTBACK; call_sv (get_sv ("EV::DIED", 1), G_DISCARD | G_VOID | G_EVAL | G_KEEPERR); } SP = PL_stack_base + mark; PUTBACK; } static void e_once_cb (int revents, void *arg) { dSP; I32 mark = SP - PL_stack_base; SV *sv_events; if (sv_events_cache) { sv_events = sv_events_cache; sv_events_cache = 0; SvIV_set (sv_events, revents); } else sv_events = newSViv (revents); PUSHMARK (SP); XPUSHs (sv_events); PUTBACK; call_sv ((SV *)arg, G_DISCARD | G_VOID | G_EVAL); SvREFCNT_dec ((SV *)arg); if (sv_events_cache) SvREFCNT_dec (sv_events); else sv_events_cache = sv_events; if (SvTRUE (ERRSV)) { SPAGAIN; PUSHMARK (SP); PUTBACK; call_sv (get_sv ("EV::DIED", 1), G_DISCARD | G_VOID | G_EVAL | G_KEEPERR); } SP = PL_stack_base + mark; PUTBACK; } static ev_tstamp e_periodic_cb (ev_periodic *w, ev_tstamp now) { ev_tstamp retval; int count; dSP; ENTER; SAVETMPS; PUSHMARK (SP); EXTEND (SP, 2); PUSHs (newRV_inc (e_self (w))); /* e_self (w) MUST be blessed by now */ PUSHs (newSVnv (now)); PUTBACK; count = call_sv (w->fh, G_SCALAR | G_EVAL); SPAGAIN; if (SvTRUE (ERRSV)) { PUSHMARK (SP); PUTBACK; call_sv (get_sv ("EV::DIED", 1), G_DISCARD | G_VOID | G_EVAL | G_KEEPERR); SPAGAIN; } if (count > 0) { retval = SvNV (TOPs); if (retval < now) retval = now; } else retval = now; FREETMPS; LEAVE; return retval; } #define CHECK_REPEAT(repeat) if (repeat < 0.) \ croak (# repeat " value must be >= 0"); #define CHECK_FD(fh,fd) if ((fd) < 0) \ croak ("illegal file descriptor or filehandle (either no attached file descriptor or illegal value): %s", SvPV_nolen (fh)); #define CHECK_SIG(sv,num) if ((num) < 0) \ croak ("illegal signal number or name: %s", SvPV_nolen (sv)); static void default_fork (void) { ev_loop_fork (EV_DEFAULT_UC); } ///////////////////////////////////////////////////////////////////////////// // XS interface functions MODULE = EV PACKAGE = EV PREFIX = ev_ PROTOTYPES: ENABLE BOOT: { HV *stash = gv_stashpv ("EV", 1); static const struct { const char *name; IV iv; } *civ, const_iv[] = { # define const_iv(pfx, name) { # name, (IV) pfx ## name }, const_iv (EV_, MINPRI) const_iv (EV_, MAXPRI) const_iv (EV_, UNDEF) const_iv (EV_, NONE) const_iv (EV_, READ) const_iv (EV_, WRITE) const_iv (EV_, IO) const_iv (EV_, TIMER) const_iv (EV_, PERIODIC) const_iv (EV_, SIGNAL) const_iv (EV_, CHILD) const_iv (EV_, STAT) const_iv (EV_, IDLE) const_iv (EV_, PREPARE) /*const_iv (EV_, CHECK) needs special tretament */ const_iv (EV_, EMBED) const_iv (EV_, FORK) const_iv (EV_, CLEANUP) const_iv (EV_, ASYNC) const_iv (EV_, CUSTOM) const_iv (EV_, ERROR) const_iv (EV, RUN_NOWAIT) const_iv (EV, RUN_ONCE) const_iv (EV, BREAK_CANCEL) const_iv (EV, BREAK_ONE) const_iv (EV, BREAK_ALL) const_iv (EV, BACKEND_SELECT) const_iv (EV, BACKEND_POLL) const_iv (EV, BACKEND_EPOLL) const_iv (EV, BACKEND_KQUEUE) const_iv (EV, BACKEND_DEVPOLL) const_iv (EV, BACKEND_PORT) const_iv (EV, BACKEND_ALL) const_iv (EV, BACKEND_MASK) const_iv (EV, FLAG_AUTO) const_iv (EV, FLAG_FORKCHECK) const_iv (EV, FLAG_SIGNALFD) const_iv (EV, FLAG_NOSIGMASK) const_iv (EV, FLAG_NOENV) const_iv (EV, FLAG_NOINOTIFY) const_iv (EV_, VERSION_MAJOR) const_iv (EV_, VERSION_MINOR) #if EV_COMPAT3 const_iv (EV, FLAG_NOSIGFD) /* compatibility, always 0 */ const_iv (EV_, TIMEOUT) const_iv (EV, LOOP_NONBLOCK) const_iv (EV, LOOP_ONESHOT) const_iv (EV, UNLOOP_CANCEL) const_iv (EV, UNLOOP_ONE) const_iv (EV, UNLOOP_ALL) #endif }; for (civ = const_iv + sizeof (const_iv) / sizeof (const_iv [0]); civ > const_iv; civ--) newCONSTSUB (stash, (char *)civ[-1].name, newSViv (civ[-1].iv)); /* since this clashes with perl CHECK blocks, */ /* but we are interested in constants, */ /* and not blocks, we treat CHECK specially. */ { /* the local $^W = 0 takes care of the warning */ CV *cv = newCONSTSUB (stash, "CHECK", newSViv (EV_CHECK)); /* now we need to re-set the gv, in case it was hijacked */ GvCV_set (gv_fetchpv ("EV::CHECK", GV_ADD, SVt_PVCV), cv); } stash_loop = gv_stashpv ("EV::Loop" , 1); stash_watcher = gv_stashpv ("EV::Watcher" , 1); stash_io = gv_stashpv ("EV::IO" , 1); stash_timer = gv_stashpv ("EV::Timer" , 1); stash_periodic = gv_stashpv ("EV::Periodic", 1); stash_signal = gv_stashpv ("EV::Signal" , 1); stash_idle = gv_stashpv ("EV::Idle" , 1); stash_prepare = gv_stashpv ("EV::Prepare" , 1); stash_check = gv_stashpv ("EV::Check" , 1); stash_child = gv_stashpv ("EV::Child" , 1); stash_embed = gv_stashpv ("EV::Embed" , 1); stash_stat = gv_stashpv ("EV::Stat" , 1); stash_fork = gv_stashpv ("EV::Fork" , 1); stash_cleanup = gv_stashpv ("EV::Cleanup" , 1); stash_async = gv_stashpv ("EV::Async" , 1); { SV *sv = perl_get_sv ("EV::API", TRUE); perl_get_sv ("EV::API", TRUE); /* silence 5.10 warning */ /* the poor man's shared library emulator */ evapi.ver = EV_API_VERSION; evapi.rev = EV_API_REVISION; evapi.sv_fileno = sv_fileno; evapi.sv_signum = s_signum; evapi.supported_backends = ev_supported_backends (); evapi.recommended_backends = ev_recommended_backends (); evapi.embeddable_backends = ev_embeddable_backends (); evapi.time_ = ev_time; evapi.sleep_ = ev_sleep; evapi.loop_new = ev_loop_new; evapi.loop_destroy = ev_loop_destroy; evapi.loop_fork = ev_loop_fork; evapi.iteration = ev_iteration; evapi.depth = ev_depth; evapi.set_userdata = ev_set_userdata; evapi.userdata = ev_userdata; evapi.now = ev_now; evapi.now_update = ev_now_update; evapi.suspend = ev_suspend; evapi.resume = ev_resume; evapi.backend = ev_backend; evapi.break_ = ev_break; evapi.invoke_pending = ev_invoke_pending; evapi.pending_count = ev_pending_count; evapi.verify = ev_verify; evapi.set_loop_release_cb = ev_set_loop_release_cb; evapi.set_invoke_pending_cb= ev_set_invoke_pending_cb; evapi.ref = ev_ref; evapi.unref = ev_unref; evapi.run = ev_run; evapi.once = ev_once; evapi.io_start = ev_io_start; evapi.io_stop = ev_io_stop; evapi.timer_start = ev_timer_start; evapi.timer_stop = ev_timer_stop; evapi.timer_again = ev_timer_again; evapi.timer_remaining = ev_timer_remaining; evapi.periodic_start = ev_periodic_start; evapi.periodic_stop = ev_periodic_stop; evapi.signal_start = ev_signal_start; evapi.signal_stop = ev_signal_stop; evapi.idle_start = ev_idle_start; evapi.idle_stop = ev_idle_stop; evapi.prepare_start = ev_prepare_start; evapi.prepare_stop = ev_prepare_stop; evapi.check_start = ev_check_start; evapi.check_stop = ev_check_stop; #if EV_CHILD_ENABLE evapi.child_start = ev_child_start; evapi.child_stop = ev_child_stop; #endif evapi.stat_start = ev_stat_start; evapi.stat_stop = ev_stat_stop; evapi.stat_stat = ev_stat_stat; evapi.embed_start = ev_embed_start; evapi.embed_stop = ev_embed_stop; evapi.embed_sweep = ev_embed_sweep; evapi.fork_start = ev_fork_start; evapi.fork_stop = ev_fork_stop; evapi.cleanup_start = ev_cleanup_start; evapi.cleanup_stop = ev_cleanup_stop; evapi.async_start = ev_async_start; evapi.async_stop = ev_async_stop; evapi.async_send = ev_async_send; evapi.clear_pending = ev_clear_pending; evapi.invoke = ev_invoke; sv_setiv (sv, (IV)&evapi); SvREADONLY_on (sv); } #if !defined _WIN32 && !defined _MINIX && !EV_NO_ATFORK /* unfortunately, musl neither implements the linux standard base, /* nor makes itself detectable via macros. yeah, right... */ #if __linux && (__GLIBC__ || __UCLIBC__) int __register_atfork(void (*prepare) (void), void (*parent) (void), void (*child) (void), void * __dso_handle); __register_atfork (0, 0, default_fork, 0); #else pthread_atfork (0, 0, default_fork); #endif #endif } SV *ev_default_loop (unsigned int flags = 0) CODE: { if (!default_loop_sv) { evapi.default_loop = ev_default_loop (flags); if (!evapi.default_loop) XSRETURN_UNDEF; default_loop_sv = sv_bless (newRV_noinc (newSViv (PTR2IV (evapi.default_loop))), stash_loop); } RETVAL = newSVsv (default_loop_sv); } OUTPUT: RETVAL void ev_default_destroy () CODE: ev_loop_destroy (EV_DEFAULT_UC); SvREFCNT_dec (default_loop_sv); default_loop_sv = 0; unsigned int ev_supported_backends () unsigned int ev_recommended_backends () unsigned int ev_embeddable_backends () void ev_sleep (NV interval) NV ev_time () void ev_feed_signal (SV *signal) CODE: { Signal signum = s_signum (signal); CHECK_SIG (signal, signum); ev_feed_signal (signum); } NV ev_now () C_ARGS: evapi.default_loop void ev_now_update () C_ARGS: evapi.default_loop void ev_suspend () C_ARGS: evapi.default_loop void ev_resume () C_ARGS: evapi.default_loop unsigned int ev_backend () C_ARGS: evapi.default_loop void ev_verify () ALIAS: loop_verify = 1 C_ARGS: evapi.default_loop unsigned int ev_iteration () ALIAS: loop_count = 1 C_ARGS: evapi.default_loop unsigned int ev_depth () ALIAS: loop_depth = 1 C_ARGS: evapi.default_loop void ev_set_io_collect_interval (NV interval) C_ARGS: evapi.default_loop, interval void ev_set_timeout_collect_interval (NV interval) C_ARGS: evapi.default_loop, interval int ev_run (int flags = 0) ALIAS: loop = 1 C_ARGS: evapi.default_loop, flags void ev_break (int how = EVBREAK_ONE) ALIAS: unloop = 1 C_ARGS: evapi.default_loop, how void ev_feed_fd_event (int fd, int revents = EV_NONE) C_ARGS: evapi.default_loop, fd, revents void ev_feed_signal_event (SV *signal) CODE: { Signal signum = s_signum (signal); CHECK_SIG (signal, signum); ev_feed_signal_event (evapi.default_loop, signum); } unsigned int ev_pending_count () C_ARGS: evapi.default_loop void ev_invoke_pending () C_ARGS: evapi.default_loop ev_io *io (SV *fh, int events, SV *cb) ALIAS: io_ns = 1 _ae_io = 2 CODE: { int fd = s_fileno (fh, events & EV_WRITE); CHECK_FD (fh, fd); if (ix == 2) { ix = 0; events = events ? EV_WRITE : EV_READ; } RETVAL = e_new (sizeof (ev_io), cb, default_loop_sv); e_fh (RETVAL) = newSVsv (fh); ev_io_set (RETVAL, fd, events); if (!ix) START (io, RETVAL); } OUTPUT: RETVAL ev_timer *timer (NV after, NV repeat, SV *cb) ALIAS: timer_ns = 1 INIT: CHECK_REPEAT (repeat); CODE: RETVAL = e_new (sizeof (ev_timer), cb, default_loop_sv); ev_timer_set (RETVAL, after, repeat); if (!ix) START (timer, RETVAL); OUTPUT: RETVAL SV *periodic (NV at, NV interval, SV *reschedule_cb, SV *cb) ALIAS: periodic_ns = 1 INIT: CHECK_REPEAT (interval); CODE: { ev_periodic *w; w = e_new (sizeof (ev_periodic), cb, default_loop_sv); e_fh (w) = SvTRUE (reschedule_cb) ? newSVsv (reschedule_cb) : 0; ev_periodic_set (w, at, interval, e_fh (w) ? e_periodic_cb : 0); RETVAL = e_bless ((ev_watcher *)w, stash_periodic); if (!ix) START (periodic, w); } OUTPUT: RETVAL ev_signal *signal (SV *signal, SV *cb) ALIAS: signal_ns = 1 CODE: { Signal signum = s_signum (signal); CHECK_SIG (signal, signum); RETVAL = e_new (sizeof (ev_signal), cb, default_loop_sv); ev_signal_set (RETVAL, signum); if (!ix) START_SIGNAL (RETVAL); } OUTPUT: RETVAL ev_idle *idle (SV *cb) ALIAS: idle_ns = 1 CODE: RETVAL = e_new (sizeof (ev_idle), cb, default_loop_sv); ev_idle_set (RETVAL); if (!ix) START (idle, RETVAL); OUTPUT: RETVAL ev_prepare *prepare (SV *cb) ALIAS: prepare_ns = 1 CODE: RETVAL = e_new (sizeof (ev_prepare), cb, default_loop_sv); ev_prepare_set (RETVAL); if (!ix) START (prepare, RETVAL); OUTPUT: RETVAL ev_check *check (SV *cb) ALIAS: check_ns = 1 CODE: RETVAL = e_new (sizeof (ev_check), cb, default_loop_sv); ev_check_set (RETVAL); if (!ix) START (check, RETVAL); OUTPUT: RETVAL ev_fork *fork (SV *cb) ALIAS: fork_ns = 1 CODE: RETVAL = e_new (sizeof (ev_fork), cb, default_loop_sv); ev_fork_set (RETVAL); if (!ix) START (fork, RETVAL); OUTPUT: RETVAL #if CLEANUP_ENABLED ev_cleanup *cleanup (SV *cb) ALIAS: cleanup_ns = 1 CODE: RETVAL = e_new (sizeof (ev_cleanup), cb, default_loop_sv); SvREFCNT_dec (RETVAL->loop); /* must not keep loop object alive */ ev_cleanup_set (RETVAL); if (!ix) START (cleanup, RETVAL); OUTPUT: RETVAL #endif ev_child *child (int pid, int trace, SV *cb) ALIAS: child_ns = 1 CODE: #if EV_CHILD_ENABLE RETVAL = e_new (sizeof (ev_child), cb, default_loop_sv); ev_child_set (RETVAL, pid, trace); if (!ix) START (child, RETVAL); #else croak ("EV::child watchers not supported on this platform"); #endif OUTPUT: RETVAL ev_stat *stat (SV *path, NV interval, SV *cb) ALIAS: stat_ns = 1 CODE: RETVAL = e_new (sizeof (ev_stat), cb, default_loop_sv); e_fh (RETVAL) = newSVsv (path); ev_stat_set (RETVAL, SvPVbyte_nolen (e_fh (RETVAL)), interval); if (!ix) START (stat, RETVAL); OUTPUT: RETVAL #ifndef EV_NO_LOOPS ev_embed *embed (struct ev_loop *loop, SV *cb = 0) ALIAS: embed_ns = 1 CODE: { if (!(ev_backend (loop) & ev_embeddable_backends ())) croak ("passed loop is not embeddable via EV::embed,"); RETVAL = e_new (sizeof (ev_embed), cb, default_loop_sv); e_fh (RETVAL) = newSVsv (ST (0)); ev_embed_set (RETVAL, loop); if (!ix) START (embed, RETVAL); } OUTPUT: RETVAL #endif ev_async *async (SV *cb) ALIAS: async_ns = 1 CODE: RETVAL = e_new (sizeof (ev_async), cb, default_loop_sv); ev_async_set (RETVAL); if (!ix) START (async, RETVAL); OUTPUT: RETVAL void once (SV *fh, int events, SV *timeout, SV *cb) CODE: ev_once ( evapi.default_loop, s_fileno (fh, events & EV_WRITE), events, SvOK (timeout) ? SvNV (timeout) : -1., e_once_cb, newSVsv (cb) ); PROTOTYPES: DISABLE MODULE = EV PACKAGE = EV::Watcher PREFIX = ev_ int ev_is_active (ev_watcher *w) int ev_is_pending (ev_watcher *w) void ev_invoke (ev_watcher *w, int revents = EV_NONE) C_ARGS: e_loop (w), w, revents int ev_clear_pending (ev_watcher *w) C_ARGS: e_loop (w), w void ev_feed_event (ev_watcher *w, int revents = EV_NONE) C_ARGS: e_loop (w), w, revents int keepalive (ev_watcher *w, SV *new_value = NO_INIT) CODE: { RETVAL = w->e_flags & WFLAG_KEEPALIVE; if (items > 1) { int value = SvTRUE (new_value) ? WFLAG_KEEPALIVE : 0; if ((value ^ w->e_flags) & WFLAG_KEEPALIVE) { w->e_flags = (w->e_flags & ~WFLAG_KEEPALIVE) | value; REF (w); UNREF (w); } } } OUTPUT: RETVAL SV *cb (ev_watcher *w, SV *new_cb = NO_INIT) CODE: { if (items > 1) { new_cb = s_get_cv_croak (new_cb); RETVAL = newRV_noinc (w->cb_sv); w->cb_sv = SvREFCNT_inc (new_cb); } else RETVAL = newRV_inc (w->cb_sv); } OUTPUT: RETVAL SV *data (ev_watcher *w, SV *new_data = NO_INIT) CODE: { RETVAL = w->data ? newSVsv (w->data) : &PL_sv_undef; if (items > 1) { SvREFCNT_dec (w->data); w->data = newSVsv (new_data); } } OUTPUT: RETVAL SV *loop (ev_watcher *w) CODE: RETVAL = newRV_inc (w->loop); OUTPUT: RETVAL int priority (ev_watcher *w, SV *new_priority = NO_INIT) CODE: { RETVAL = w->priority; if (items > 1) { int active = ev_is_active (w); if (active) { /* grrr. */ PUSHMARK (SP); XPUSHs (ST (0)); PUTBACK; call_method ("stop", G_DISCARD | G_VOID); } ev_set_priority (w, SvIV (new_priority)); if (active) { PUSHMARK (SP); XPUSHs (ST (0)); PUTBACK; call_method ("start", G_DISCARD | G_VOID); } } } OUTPUT: RETVAL MODULE = EV PACKAGE = EV::IO PREFIX = ev_io_ void ev_io_start (ev_io *w) CODE: START (io, w); void ev_io_stop (ev_io *w) CODE: STOP (io, w); void DESTROY (ev_io *w) CODE: STOP (io, w); e_destroy (w); void set (ev_io *w, SV *fh, int events) CODE: { int fd = s_fileno (fh, events & EV_WRITE); CHECK_FD (fh, fd); sv_setsv (e_fh (w), fh); RESET (io, w, (w, fd, events)); } SV *fh (ev_io *w, SV *new_fh = NO_INIT) CODE: { if (items > 1) { int fd = s_fileno (new_fh, w->events & EV_WRITE); CHECK_FD (new_fh, fd); RETVAL = e_fh (w); e_fh (w) = newSVsv (new_fh); RESET (io, w, (w, fd, w->events)); } else RETVAL = newSVsv (e_fh (w)); } OUTPUT: RETVAL int events (ev_io *w, int new_events = NO_INIT) CODE: { RETVAL = w->events; if (items > 1 && (new_events ^ w->events) & (EV_READ | EV_WRITE)) { PAUSE (io); ev_io_modify (w, new_events); RESUME (io); } } OUTPUT: RETVAL MODULE = EV PACKAGE = EV::Signal PREFIX = ev_signal_ void ev_signal_start (ev_signal *w) CODE: START_SIGNAL (w); void ev_signal_stop (ev_signal *w) CODE: STOP (signal, w); void DESTROY (ev_signal *w) CODE: STOP (signal, w); e_destroy (w); void set (ev_signal *w, SV *signal) CODE: { Signal signum = s_signum (signal); CHECK_SIG (signal, signum); RESET_SIGNAL (w, (w, signum)); } int signal (ev_signal *w, SV *new_signal = NO_INIT) CODE: { RETVAL = w->signum; if (items > 1) { Signal signum = s_signum (new_signal); CHECK_SIG (new_signal, signum); RESET_SIGNAL (w, (w, signum)); } } OUTPUT: RETVAL MODULE = EV PACKAGE = EV::Timer PREFIX = ev_timer_ void ev_timer_start (ev_timer *w) INIT: CHECK_REPEAT (w->repeat); CODE: START (timer, w); void ev_timer_stop (ev_timer *w) CODE: STOP (timer, w); void ev_timer_again (ev_timer *w, NV repeat = NO_INIT) CODE: { if (items > 1) { CHECK_REPEAT (repeat); w->repeat = repeat; } ev_timer_again (e_loop (w), w); UNREF (w); } NV ev_timer_remaining (ev_timer *w) C_ARGS: e_loop (w), w void DESTROY (ev_timer *w) CODE: STOP (timer, w); e_destroy (w); void set (ev_timer *w, NV after, NV repeat = 0.) INIT: CHECK_REPEAT (repeat); CODE: RESET (timer, w, (w, after, repeat)); NV repeat (ev_timer *w, SV *new_repeat = NO_INIT) CODE: RETVAL = w->repeat; if (items > 1) { NV repeat = SvNV (new_repeat); CHECK_REPEAT (repeat); w->repeat = repeat; } OUTPUT: RETVAL MODULE = EV PACKAGE = EV::Periodic PREFIX = ev_periodic_ void ev_periodic_start (ev_periodic *w) INIT: CHECK_REPEAT (w->interval); CODE: START (periodic, w); void ev_periodic_stop (ev_periodic *w) CODE: STOP (periodic, w); void ev_periodic_again (ev_periodic *w) CODE: ev_periodic_again (e_loop (w), w); UNREF (w); void DESTROY (ev_periodic *w) CODE: STOP (periodic, w); e_destroy (w); void set (ev_periodic *w, NV at, NV interval = 0., SV *reschedule_cb = &PL_sv_undef) INIT: CHECK_REPEAT (interval); CODE: { SvREFCNT_dec (e_fh (w)); e_fh (w) = SvTRUE (reschedule_cb) ? newSVsv (reschedule_cb) : 0; RESET (periodic, w, (w, at, interval, e_fh (w) ? e_periodic_cb : 0)); } NV at (ev_periodic *w) CODE: RETVAL = ev_periodic_at (w); OUTPUT: RETVAL NV offset (ev_periodic *w, SV *new_offset = NO_INIT) CODE: RETVAL = w->offset; if (items > 1) w->offset = SvNV (new_offset); OUTPUT: RETVAL NV interval (ev_periodic *w, SV *new_interval = NO_INIT) CODE: RETVAL = w->interval; if (items > 1) { NV interval = SvNV (new_interval); CHECK_REPEAT (interval); w->interval = interval; } OUTPUT: RETVAL SV *reschedule_cb (ev_periodic *w, SV *new_reschedule_cb = NO_INIT) CODE: RETVAL = e_fh (w) ? e_fh (w) : &PL_sv_undef; if (items > 1) { sv_2mortal (RETVAL); e_fh (w) = SvTRUE (new_reschedule_cb) ? newSVsv (new_reschedule_cb) : 0; } OUTPUT: RETVAL MODULE = EV PACKAGE = EV::Idle PREFIX = ev_idle_ void ev_idle_start (ev_idle *w) CODE: START (idle, w); void ev_idle_stop (ev_idle *w) CODE: STOP (idle, w); void DESTROY (ev_idle *w) CODE: STOP (idle, w); e_destroy (w); MODULE = EV PACKAGE = EV::Prepare PREFIX = ev_prepare_ void ev_prepare_start (ev_prepare *w) CODE: START (prepare, w); void ev_prepare_stop (ev_prepare *w) CODE: STOP (prepare, w); void DESTROY (ev_prepare *w) CODE: STOP (prepare, w); e_destroy (w); MODULE = EV PACKAGE = EV::Check PREFIX = ev_check_ void ev_check_start (ev_check *w) CODE: START (check, w); void ev_check_stop (ev_check *w) CODE: STOP (check, w); void DESTROY (ev_check *w) CODE: STOP (check, w); e_destroy (w); MODULE = EV PACKAGE = EV::Fork PREFIX = ev_fork_ void ev_fork_start (ev_fork *w) CODE: START (fork, w); void ev_fork_stop (ev_fork *w) CODE: STOP (fork, w); void DESTROY (ev_fork *w) CODE: STOP (fork, w); e_destroy (w); #if CLEANUP_ENABLED MODULE = EV PACKAGE = EV::Cleanup PREFIX = ev_cleanup_ void ev_cleanup_start (ev_cleanup *w) CODE: START (cleanup, w); void ev_cleanup_stop (ev_cleanup *w) CODE: STOP (cleanup, w); void DESTROY (ev_cleanup *w) CODE: STOP (cleanup, w); SvREFCNT_inc (w->loop); /* has been dec'ed on creation */ e_destroy (w); int keepalive (ev_watcher *w, SV *new_value = 0) CODE: RETVAL = 1; OUTPUT: RETVAL #endif MODULE = EV PACKAGE = EV::Child PREFIX = ev_child_ #if EV_CHILD_ENABLE void ev_child_start (ev_child *w) CODE: START (child, w); void ev_child_stop (ev_child *w) CODE: STOP (child, w); void DESTROY (ev_child *w) CODE: STOP (child, w); e_destroy (w); void set (ev_child *w, int pid, int trace) CODE: RESET (child, w, (w, pid, trace)); int pid (ev_child *w) ALIAS: rpid = 1 rstatus = 2 CODE: RETVAL = ix == 0 ? w->pid : ix == 1 ? w->rpid : w->rstatus; OUTPUT: RETVAL #endif MODULE = EV PACKAGE = EV::Stat PREFIX = ev_stat_ void ev_stat_start (ev_stat *w) CODE: START (stat, w); void ev_stat_stop (ev_stat *w) CODE: STOP (stat, w); void DESTROY (ev_stat *w) CODE: STOP (stat, w); e_destroy (w); void set (ev_stat *w, SV *path, NV interval) CODE: { sv_setsv (e_fh (w), path); RESET (stat, w, (w, SvPVbyte_nolen (e_fh (w)), interval)); } SV *path (ev_stat *w, SV *new_path = NO_INIT) CODE: { RETVAL = e_fh (w) ? e_fh (w) : &PL_sv_undef; if (items > 1) { sv_2mortal (RETVAL); e_fh (w) = newSVsv (new_path); RESET (stat, w, (w, SvPVbyte_nolen (e_fh (w)), w->interval)); } } OUTPUT: RETVAL NV interval (ev_stat *w, SV *new_interval = NO_INIT) CODE: RETVAL = w->interval; if (items > 1) { PAUSE (stat); w->interval = SvNV (new_interval); RESUME (stat); } OUTPUT: RETVAL void prev (ev_stat *w) ALIAS: stat = 1 attr = 2 PPCODE: { ev_statdata *s = ix ? &w->attr : &w->prev; if (ix == 1) ev_stat_stat (e_loop (w), w); else if (!s->st_nlink) errno = ENOENT; PL_statcache.st_dev = s->st_nlink; PL_statcache.st_ino = s->st_ino; PL_statcache.st_mode = s->st_mode; PL_statcache.st_nlink = s->st_nlink; PL_statcache.st_uid = s->st_uid; PL_statcache.st_gid = s->st_gid; PL_statcache.st_rdev = s->st_rdev; PL_statcache.st_size = s->st_size; PL_statcache.st_atime = s->st_atime; PL_statcache.st_mtime = s->st_mtime; PL_statcache.st_ctime = s->st_ctime; if (GIMME_V == G_SCALAR) XPUSHs (boolSV (s->st_nlink)); else if (GIMME_V == G_ARRAY && s->st_nlink) { EXTEND (SP, 13); PUSHs (sv_2mortal (newSViv (s->st_dev))); PUSHs (sv_2mortal (newSViv (s->st_ino))); PUSHs (sv_2mortal (newSVuv (s->st_mode))); PUSHs (sv_2mortal (newSVuv (s->st_nlink))); PUSHs (sv_2mortal (newSViv (s->st_uid))); PUSHs (sv_2mortal (newSViv (s->st_gid))); PUSHs (sv_2mortal (newSViv (s->st_rdev))); PUSHs (sv_2mortal (newSVnv ((NV)s->st_size))); PUSHs (sv_2mortal (newSVnv (s->st_atime))); PUSHs (sv_2mortal (newSVnv (s->st_mtime))); PUSHs (sv_2mortal (newSVnv (s->st_ctime))); PUSHs (sv_2mortal (newSVuv (4096))); PUSHs (sv_2mortal (newSVnv ((NV)((s->st_size + 4095) / 4096)))); } } MODULE = EV PACKAGE = EV::Embed PREFIX = ev_embed_ void ev_embed_start (ev_embed *w) CODE: START (embed, w); void ev_embed_stop (ev_embed *w) CODE: STOP (embed, w); void DESTROY (ev_embed *w) CODE: STOP (embed, w); e_destroy (w); void set (ev_embed *w, struct ev_loop *loop) CODE: { sv_setsv (e_fh (w), ST (1)); RESET (embed, w, (w, loop)); } SV *other (ev_embed *w) CODE: RETVAL = newSVsv (e_fh (w)); OUTPUT: RETVAL void ev_embed_sweep (ev_embed *w) C_ARGS: e_loop (w), w MODULE = EV PACKAGE = EV::Async PREFIX = ev_async_ void ev_async_start (ev_async *w) CODE: START (async, w); void ev_async_stop (ev_async *w) CODE: STOP (async, w); void DESTROY (ev_async *w) CODE: STOP (async, w); e_destroy (w); void ev_async_send (ev_async *w) C_ARGS: e_loop (w), w SV *ev_async_async_pending (ev_async *w) CODE: RETVAL = boolSV (ev_async_pending (w)); OUTPUT: RETVAL #ifndef EV_NO_LOOPS MODULE = EV PACKAGE = EV::Loop PREFIX = ev_ SV *new (SV *klass, unsigned int flags = 0) CODE: { struct ev_loop *loop = ev_loop_new (flags); if (!loop) XSRETURN_UNDEF; RETVAL = sv_bless (newRV_noinc (newSViv (PTR2IV (loop))), stash_loop); } OUTPUT: RETVAL void DESTROY (struct ev_loop *loop) CODE: /* 1. the default loop shouldn't be freed by destroying it's perl loop object */ /* 2. not doing so helps avoid many global destruction bugs in perl, too */ if (loop != evapi.default_loop) ev_loop_destroy (loop); void ev_loop_fork (struct ev_loop *loop) NV ev_now (struct ev_loop *loop) void ev_now_update (struct ev_loop *loop) void ev_suspend (struct ev_loop *loop) void ev_resume (struct ev_loop *loop) void ev_set_io_collect_interval (struct ev_loop *loop, NV interval) void ev_set_timeout_collect_interval (struct ev_loop *loop, NV interval) unsigned int ev_backend (struct ev_loop *loop) void ev_verify (struct ev_loop *loop) ALIAS: loop_verify = 1 unsigned int ev_iteration (struct ev_loop *loop) ALIAS: loop_count = 1 unsigned int ev_depth (struct ev_loop *loop) ALIAS: loop_depth = 1 int ev_run (struct ev_loop *loop, int flags = 0) ALIAS: loop = 1 void ev_break (struct ev_loop *loop, int how = 1) ALIAS: unloop = 1 void ev_feed_fd_event (struct ev_loop *loop, int fd, int revents = EV_NONE) unsigned int ev_pending_count (struct ev_loop *loop) void ev_invoke_pending (struct ev_loop *loop) #if 0 void ev_feed_signal_event (struct ev_loop *loop, SV *signal) CODE: { Signal signum = s_signum (signal); CHECK_SIG (signal, signum); ev_feed_signal_event (loop, signum); } #endif ev_io *io (struct ev_loop *loop, SV *fh, int events, SV *cb) ALIAS: io_ns = 1 CODE: { int fd = s_fileno (fh, events & EV_WRITE); CHECK_FD (fh, fd); RETVAL = e_new (sizeof (ev_io), cb, ST (0)); e_fh (RETVAL) = newSVsv (fh); ev_io_set (RETVAL, fd, events); if (!ix) START (io, RETVAL); } OUTPUT: RETVAL ev_timer *timer (struct ev_loop *loop, NV after, NV repeat, SV *cb) ALIAS: timer_ns = 1 INIT: CHECK_REPEAT (repeat); CODE: RETVAL = e_new (sizeof (ev_timer), cb, ST (0)); ev_timer_set (RETVAL, after, repeat); if (!ix) START (timer, RETVAL); OUTPUT: RETVAL SV *periodic (struct ev_loop *loop, NV at, NV interval, SV *reschedule_cb, SV *cb) ALIAS: periodic_ns = 1 INIT: CHECK_REPEAT (interval); CODE: { ev_periodic *w; w = e_new (sizeof (ev_periodic), cb, ST (0)); e_fh (w) = SvTRUE (reschedule_cb) ? newSVsv (reschedule_cb) : 0; ev_periodic_set (w, at, interval, e_fh (w) ? e_periodic_cb : 0); RETVAL = e_bless ((ev_watcher *)w, stash_periodic); if (!ix) START (periodic, w); } OUTPUT: RETVAL ev_signal *signal (struct ev_loop *loop, SV *signal, SV *cb) ALIAS: signal_ns = 1 CODE: { Signal signum = s_signum (signal); CHECK_SIG (signal, signum); RETVAL = e_new (sizeof (ev_signal), cb, ST (0)); ev_signal_set (RETVAL, signum); if (!ix) START_SIGNAL (RETVAL); } OUTPUT: RETVAL ev_idle *idle (struct ev_loop *loop, SV *cb) ALIAS: idle_ns = 1 CODE: RETVAL = e_new (sizeof (ev_idle), cb, ST (0)); ev_idle_set (RETVAL); if (!ix) START (idle, RETVAL); OUTPUT: RETVAL ev_prepare *prepare (struct ev_loop *loop, SV *cb) ALIAS: prepare_ns = 1 CODE: RETVAL = e_new (sizeof (ev_prepare), cb, ST (0)); ev_prepare_set (RETVAL); if (!ix) START (prepare, RETVAL); OUTPUT: RETVAL ev_check *check (struct ev_loop *loop, SV *cb) ALIAS: check_ns = 1 CODE: RETVAL = e_new (sizeof (ev_check), cb, ST (0)); ev_check_set (RETVAL); if (!ix) START (check, RETVAL); OUTPUT: RETVAL ev_fork *fork (struct ev_loop *loop, SV *cb) ALIAS: fork_ns = 1 CODE: RETVAL = e_new (sizeof (ev_fork), cb, ST (0)); ev_fork_set (RETVAL); if (!ix) START (fork, RETVAL); OUTPUT: RETVAL #if CLEANUP_ENABLED ev_cleanup *cleanup (struct ev_loop *loop, SV *cb) ALIAS: cleanup_ns = 1 CODE: RETVAL = e_new (sizeof (ev_cleanup), cb, ST (0)); SvREFCNT_dec (RETVAL->loop); /* must not keep loop object alive */ ev_cleanup_set (RETVAL); if (!ix) START (cleanup, RETVAL); OUTPUT: RETVAL #endif ev_child *child (struct ev_loop *loop, int pid, int trace, SV *cb) ALIAS: child_ns = 1 CODE: #if EV_CHILD_ENABLE RETVAL = e_new (sizeof (ev_child), cb, ST (0)); ev_child_set (RETVAL, pid, trace); if (!ix) START (child, RETVAL); #else croak ("EV::child watchers not supported on this platform"); #endif OUTPUT: RETVAL ev_stat *stat (struct ev_loop *loop, SV *path, NV interval, SV *cb) ALIAS: stat_ns = 1 CODE: RETVAL = e_new (sizeof (ev_stat), cb, ST (0)); e_fh (RETVAL) = newSVsv (path); ev_stat_set (RETVAL, SvPVbyte_nolen (e_fh (RETVAL)), interval); if (!ix) START (stat, RETVAL); OUTPUT: RETVAL ev_embed *embed (struct ev_loop *loop, struct ev_loop *other, SV *cb = 0) ALIAS: embed_ns = 1 CODE: { if (!(ev_backend (other) & ev_embeddable_backends ())) croak ("passed loop is not embeddable via EV::embed,"); RETVAL = e_new (sizeof (ev_embed), cb, ST (0)); e_fh (RETVAL) = newSVsv (ST (1)); ev_embed_set (RETVAL, other); if (!ix) START (embed, RETVAL); } OUTPUT: RETVAL ev_async *async (struct ev_loop *loop, SV *cb) ALIAS: async_ns = 1 CODE: RETVAL = e_new (sizeof (ev_async), cb, ST (0)); ev_async_set (RETVAL); if (!ix) START (async, RETVAL); OUTPUT: RETVAL void once (struct ev_loop *loop, SV *fh, int events, SV *timeout, SV *cb) CODE: ev_once ( loop, s_fileno (fh, events & EV_WRITE), events, SvOK (timeout) ? SvNV (timeout) : -1., e_once_cb, newSVsv (cb) ); #endif EV-4.33/EV/0000755000000000000000000000000013634420051010763 5ustar rootrootEV-4.33/EV/MakeMaker.pm0000644000000000000000000000433113265441031013160 0ustar rootrootpackage EV::MakeMaker; BEGIN { eval { require warnings } && warnings->unimport ("uninitialized") } use Config; use base 'Exporter'; @EXPORT_OK = qw(&ev_args $installsitearch); my %opt; for my $opt (split /:+/, $ENV{PERL_MM_OPT}) { my ($k,$v) = split /=/, $opt; $opt{$k} = $v; } my $extra = $Config{sitearch}; $extra =~ s/$Config{prefix}/$opt{PREFIX}/ if exists $opt{PREFIX}; for my $d ($extra, @INC) { if (-e "$d/EV/EVAPI.h") { $installsitearch = $d; last; } } sub ev_args { my %arg = @_; $arg{INC} .= " -I$installsitearch/EV -I$installsitearch"; %arg; } 1; __END__ =head1 NAME EV::MakeMaker - MakeMaker glue for the C-level EV API =head1 SYNOPSIS This allows you to access some libevent functionality from other perl modules. =head1 DESCRIPTION For optimal performance, hook into EV at the C-level. You'll need to make changes to your C, load C in your C file and add code to your C / C file(s). =head1 HOW TO =head2 Makefile.PL use EV::MakeMaker qw(ev_args); # ... set up %args ... WriteMakefile (ev_args (%args)); =head2 extension.pm use EV (); # imports optional =head2 extension.xs #include "EVAPI.h" [...] BOOT: I_EV_API (HvNAME (GvSTASH (CvGV (cv)))); =head1 API See the L header, which you should include instead of F. In short, all the functions and macros from F should work, except that the trailing underscore macros (C, C) are not available (except C :). Multiplicity is enabled. The C member in each watcher is of type C and not C (this might change at some point). =head1 EXAMPLE The L, L and L modules all give nice examples on how to use this module. Here are some F<.xs> fragments taken from EV::ADNS that should get you going: #include "EVAPI.h" static ev_prepare pw; static ev_idle iw; static void idle_cb (EV_P_ ev_idle *w, int revents) { ev_idle_stop (EV_A, w); } MODULE = ... BOOT: { I_EV_API ("EV::ADNS"); ev_prepare_init (&pw, prepare_cb); ev_init (&iw, idle_cb); ev_set_priority (&iw, EV_MINPRI); ev_idle_start (EV_DEFAULT, &iw); } =cut EV-4.33/EV/EVAPI.h0000644000000000000000000002026013631214770012006 0ustar rootroot#ifndef EV_API_H #define EV_API_H #include "EXTERN.h" #include "perl.h" #include "XSUB.h" #ifndef pTHX_ # define pTHX_ # define aTHX_ # define pTHX # define aTHX #endif #define EV_COMMON \ int e_flags; /* cheap on 64 bit systems */ \ SV *loop; \ SV *self; /* contains this struct */ \ SV *cb_sv, *fh, *data; #ifndef EV_PROTOTYPES # define EV_PROTOTYPES 0 #endif #ifndef EV_H # define EV_H #endif #include EV_H struct EVAPI { I32 ver; I32 rev; #define EV_API_VERSION 5 #define EV_API_REVISION 1 struct ev_loop *default_loop; unsigned int supported_backends; unsigned int recommended_backends; unsigned int embeddable_backends; /* TODO: remove on major API bump */ /* perl fh or fd int to fd */ int (*sv_fileno) (SV *fh); /* signal number/name to signum */ int (*sv_signum) (SV *fh); /* same as libev functions */ ev_tstamp (*time_)(void); void (*sleep_)(ev_tstamp); struct ev_loop *(*loop_new)(unsigned int); void (*loop_destroy)(EV_P); void (*loop_fork)(EV_P); unsigned int (*backend)(EV_P); unsigned int (*iteration)(EV_P); unsigned int (*depth)(EV_P); ev_tstamp (*now)(EV_P); void (*now_update)(EV_P); int (*run)(EV_P_ int flags); void (*break_)(EV_P_ int how); void (*suspend)(EV_P); void (*resume) (EV_P); void (*ref) (EV_P); void (*unref)(EV_P); void (*set_userdata)(EV_P_ void *data); void *(*userdata) (EV_P); void (*set_loop_release_cb) (EV_P_ void (*release)(EV_P), void (*acquire)(EV_P)); void (*set_invoke_pending_cb)(EV_P_ void (*invoke_pending_cb)(EV_P)); unsigned int (*pending_count)(EV_P); void (*invoke_pending) (EV_P); void (*verify) (EV_P); void (*once)(EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg); void (*invoke)(EV_P_ void *, int); int (*clear_pending)(EV_P_ void *); void (*io_start)(EV_P_ ev_io *); void (*io_stop) (EV_P_ ev_io *); void (*timer_start)(EV_P_ ev_timer *); void (*timer_stop) (EV_P_ ev_timer *); void (*timer_again)(EV_P_ ev_timer *); ev_tstamp (*timer_remaining) (EV_P_ ev_timer *); void (*periodic_start)(EV_P_ ev_periodic *); void (*periodic_stop) (EV_P_ ev_periodic *); void (*signal_start)(EV_P_ ev_signal *); void (*signal_stop) (EV_P_ ev_signal *); void (*child_start)(EV_P_ ev_child *); void (*child_stop) (EV_P_ ev_child *); void (*stat_start)(EV_P_ ev_stat *); void (*stat_stop) (EV_P_ ev_stat *); void (*stat_stat) (EV_P_ ev_stat *); void (*idle_start)(EV_P_ ev_idle *); void (*idle_stop) (EV_P_ ev_idle *); void (*prepare_start)(EV_P_ ev_prepare *); void (*prepare_stop) (EV_P_ ev_prepare *); void (*check_start)(EV_P_ ev_check *); void (*check_stop) (EV_P_ ev_check *); void (*embed_start)(EV_P_ ev_embed *); void (*embed_stop) (EV_P_ ev_embed *); void (*embed_sweep)(EV_P_ ev_embed *); void (*fork_start) (EV_P_ ev_fork *); void (*fork_stop) (EV_P_ ev_fork *); void (*cleanup_start) (EV_P_ ev_cleanup *); void (*cleanup_stop) (EV_P_ ev_cleanup *); void (*async_start)(EV_P_ ev_async *); void (*async_stop) (EV_P_ ev_async *); void (*async_send) (EV_P_ ev_async *); }; #if !EV_PROTOTYPES # undef EV_DEFAULT # undef EV_DEFAULT_ # undef EV_DEFAULT_UC # undef EV_DEFAULT_UC_ # undef EV_A_ # define EV_DEFAULT GEVAPI->default_loop # define EV_DEFAULT_UC GEVAPI->default_loop # define ev_supported_backends() GEVAPI->supported_backends # define ev_recommended_backends() GEVAPI->recommended_backends # define ev_embeddable_backends() GEVAPI->embeddable_backends # define sv_fileno(sv) GEVAPI->sv_fileno (sv) # define sv_signum(sv) GEVAPI->sv_signum (sv) # define ev_time() GEVAPI->time_ () # define ev_sleep(time) GEVAPI->sleep_ ((time)) # define ev_loop_new(flags) GEVAPI->loop_new ((flags)) # define ev_loop_destroy(loop) GEVAPI->loop_destroy ((loop)) # define ev_loop_fork(loop) GEVAPI->loop_fork ((loop)) # define ev_backend(loop) GEVAPI->backend ((loop)) # define ev_iteration(loop) GEVAPI->iteration ((loop)) # define ev_depth(loop) GEVAPI->depth ((depth)) # define ev_now(loop) GEVAPI->now ((loop)) # define ev_now_update(loop) GEVAPI->now_update ((loop)) # define ev_run(l,flags) GEVAPI->run ((l), (flags)) # define ev_break(loop,how) GEVAPI->break_ ((loop), (how)) # define ev_suspend(loop) GEVAPI->suspend ((loop)) # define ev_resume(loop) GEVAPI->resume ((loop)) # define ev_ref(loop) GEVAPI->ref (loop) # define ev_unref(loop) GEVAPI->unref (loop) # define ev_set_userdata(l,p) GEVAPI->set_userdata ((l), (p)) # define ev_userdata(l) GEVAPI->userdata (l) # define ev_set_loop_release_cb(l,r,a) GEVAPI->set_loop_release_cb ((l), (r), (a)) # define ev_set_invoke_pending_cb(l,c) GEVAPI->set_invoke_pending_cb ((l), (c)) # define ev_invoke_pending(l) GEVAPI->invoke_pending ((l)) # define ev_pending_count(l) GEVAPI->pending_count ((l)) # define ev_verify(l) GEVAPI->verify ((l)) # define ev_once(loop,fd,events,timeout,cb,arg) GEVAPI->once ((loop), (fd), (events), (timeout), (cb), (arg)) # define ev_invoke(l,w,rev) GEVAPI->invoke ((l), (w), (rev)) # define ev_clear_pending(l,w) GEVAPI->clear_pending ((l), (w)) # define ev_io_start(l,w) GEVAPI->io_start ((l), (w)) # define ev_io_stop(l,w) GEVAPI->io_stop ((l), (w)) # define ev_timer_start(l,w) GEVAPI->timer_start ((l), (w)) # define ev_timer_stop(l,w) GEVAPI->timer_stop ((l), (w)) # define ev_timer_again(l,w) GEVAPI->timer_again ((l), (w)) # define ev_timer_remaining(l,w) GEVAPI->timer_remaining ((l), (w)) # define ev_periodic_start(l,w) GEVAPI->periodic_start ((l), (w)) # define ev_periodic_stop(l,w) GEVAPI->periodic_stop ((l), (w)) # define ev_signal_start(l,w) GEVAPI->signal_start ((l), (w)) # define ev_signal_stop(l,w) GEVAPI->signal_stop ((l), (w)) # define ev_idle_start(l,w) GEVAPI->idle_start ((l), (w)) # define ev_idle_stop(l,w) GEVAPI->idle_stop ((l), (w)) # define ev_prepare_start(l,w) GEVAPI->prepare_start ((l), (w)) # define ev_prepare_stop(l,w) GEVAPI->prepare_stop ((l), (w)) # define ev_check_start(l,w) GEVAPI->check_start ((l), (w)) # define ev_check_stop(l,w) GEVAPI->check_stop ((l), (w)) # define ev_child_start(l,w) GEVAPI->child_start ((l), (w)) # define ev_child_stop(l,w) GEVAPI->child_stop ((l), (w)) # define ev_stat_start(l,w) GEVAPI->stat_start ((l), (w)) # define ev_stat_stop(l,w) GEVAPI->stat_stop ((l), (w)) # define ev_stat_stat(l,w) GEVAPI->stat_stat ((l), (w)) # define ev_embed_start(l,w) GEVAPI->embed_start ((l), (w)) # define ev_embed_stop(l,w) GEVAPI->embed_stop ((l), (w)) # define ev_embed_sweep(l,w) GEVAPI->embed_sweep ((l), (w)) # define ev_fork_start(l,w) GEVAPI->fork_start ((l), (w)) # define ev_fork_stop(l,w) GEVAPI->fork_stop ((l), (w)) # define ev_cleanup_start(l,w) GEVAPI->cleanup_start ((l), (w)) # define ev_cleanup_stop(l,w) GEVAPI->cleanup_stop ((l), (w)) # define ev_async_start(l,w) GEVAPI->async_start ((l), (w)) # define ev_async_stop(l,w) GEVAPI->async_stop ((l), (w)) # define ev_async_send(l,w) GEVAPI->async_send ((l), (w)) #endif static struct EVAPI *GEVAPI; #define I_EV_API(YourName) \ STMT_START { \ SV *sv = perl_get_sv ("EV::API", 0); \ if (!sv) croak ("EV::API not found"); \ GEVAPI = (struct EVAPI*) SvIV (sv); \ if (GEVAPI->ver != EV_API_VERSION \ || GEVAPI->rev < EV_API_REVISION) \ croak ("EV::API version mismatch (%d.%d vs. %d.%d) -- please recompile '%s'", \ (int)GEVAPI->ver, (int)GEVAPI->rev, EV_API_VERSION, EV_API_REVISION, YourName); \ } STMT_END #endif EV-4.33/MANIFEST0000644000000000000000000000131613634420051011603 0ustar rootrootREADME Changes MANIFEST COPYING Makefile.PL typemap schmorp.h EV.xs EV.pm EV/EVAPI.h EV/MakeMaker.pm t/00_load.t t/01_timer.t t/02_once.t t/03_keepalive.t t/04_stat.t t/05_priority.t t/06_loop_once.t t/07_loop_timer.t t/08_async.t t/09_brandon.t #t/10_nheap.t # too timing-sensitive t/11_signal.t libev/LICENSE libev/README libev/Changes libev/ev.c libev/ev.h libev/ev_vars.h libev/ev_wrap.h libev/ev_win32.c libev/ev_select.c libev/ev_poll.c libev/ev_epoll.c libev/ev_linuxaio.c libev/ev_iouring.c libev/ev_kqueue.c libev/ev_port.c libev/ev.pod META.yml Module YAML meta-data (added by MakeMaker) META.json Module JSON meta-data (added by MakeMaker) EV-4.33/schmorp.h0000644000000000000000000002455613534461107012317 0ustar rootroot#ifndef SCHMORP_PERL_H_ #define SCHMORP_PERL_H_ /* WARNING * This header file is a shared resource between many modules. * perl header files MUST already be included. */ #include #include #if defined(WIN32 ) || defined(_MINIX) # define SCHMORP_H_PREFER_SELECT 1 #endif #if !SCHMORP_H_PREFER_SELECT # include #endif /* useful stuff, used by schmorp mostly */ #include "patchlevel.h" #define PERL_VERSION_ATLEAST(a,b,c) \ (PERL_REVISION > (a) \ || (PERL_REVISION == (a) \ && (PERL_VERSION > (b) \ || (PERL_VERSION == (b) && PERL_SUBVERSION >= (c))))) #ifndef PERL_MAGIC_ext # define PERL_MAGIC_ext '~' #endif #if !PERL_VERSION_ATLEAST (5,6,0) # ifndef PL_ppaddr # define PL_ppaddr ppaddr # endif # ifndef call_sv # define call_sv perl_call_sv # endif # ifndef get_sv # define get_sv perl_get_sv # endif # ifndef get_cv # define get_cv perl_get_cv # endif # ifndef IS_PADGV # define IS_PADGV(v) 0 # endif # ifndef IS_PADCONST # define IS_PADCONST(v) 0 # endif #endif /* use NV for 32 bit perls as it allows larger offsets */ #if IVSIZE >= 8 typedef IV VAL64; # define SvVAL64(sv) SvIV (sv) # define newSVval64(i64) newSViv (i64) # define sv_setval64(sv,i64) sv_setiv ((sv), (i64)) #else typedef NV VAL64; # define SvVAL64(sv) SvNV (sv) # define newSVval64(i64) newSVnv (i64) # define sv_setval64(sv,i64) sv_setnv ((sv), (i64)) #endif /* typemap for the above */ /* VAL64 T_VAL64 INPUT T_VAL64 $var = ($type)SvVAL64 ($arg); OUTPUT T_VAL64 $arg = newSVval64 ($var); */ /* 5.11 */ #ifndef CxHASARGS # define CxHASARGS(cx) (cx)->blk_sub.hasargs #endif /* 5.10.0 */ #ifndef SvREFCNT_inc_NN # define SvREFCNT_inc_NN(sv) SvREFCNT_inc (sv) #endif /* 5.8.8 */ #ifndef GV_NOTQUAL # define GV_NOTQUAL 0 #endif #ifndef newSV # define newSV(l) NEWSV(0,l) #endif #ifndef CvISXSUB_on # define CvISXSUB_on(cv) (void)cv #endif #ifndef CvISXSUB # define CvISXSUB(cv) (CvXSUB (cv) ? TRUE : FALSE) #endif #ifndef Newx # define Newx(ptr,nitems,type) New (0,ptr,nitems,type) #endif /* 5.8.7 */ #ifndef SvRV_set # define SvRV_set(s,v) SvRV(s) = (v) #endif static int s_signum (SV *sig) { #ifndef SIG_SIZE /* kudos to Slaven Rezic for the idea */ static char sig_size [] = { SIG_NUM }; # define SIG_SIZE (sizeof (sig_size) + 1) #endif dTHX; int signum; SvGETMAGIC (sig); for (signum = 1; signum < SIG_SIZE; ++signum) if (strEQ (SvPV_nolen (sig), PL_sig_name [signum])) return signum; signum = SvIV (sig); if (signum > 0 && signum < SIG_SIZE) return signum; return -1; } static int s_signum_croak (SV *sig) { int signum = s_signum (sig); if (signum < 0) { dTHX; croak ("%s: invalid signal name or number", SvPV_nolen (sig)); } return signum; } static int s_fileno (SV *fh, int wr) { dTHX; SvGETMAGIC (fh); if (SvROK (fh)) { fh = SvRV (fh); SvGETMAGIC (fh); } if (SvTYPE (fh) == SVt_PVGV) return PerlIO_fileno (wr ? IoOFP (sv_2io (fh)) : IoIFP (sv_2io (fh))); if (SvOK (fh) && (SvIV (fh) >= 0) && (SvIV (fh) < 0x7fffffffL)) return SvIV (fh); return -1; } static int s_fileno_croak (SV *fh, int wr) { int fd = s_fileno (fh, wr); if (fd < 0) { dTHX; croak ("%s: illegal fh argument, either not an OS file or read/write mode mismatch", SvPV_nolen (fh)); } return fd; } static SV * s_get_cv (SV *cb_sv) { dTHX; HV *st; GV *gvp; return (SV *)sv_2cv (cb_sv, &st, &gvp, 0); } static SV * s_get_cv_croak (SV *cb_sv) { SV *cv = s_get_cv (cb_sv); if (!cv) { dTHX; croak ("%s: callback must be a CODE reference or another callable object", SvPV_nolen (cb_sv)); } return cv; } /*****************************************************************************/ /* gensub: simple closure generation utility */ #define S_GENSUB_ARG CvXSUBANY (cv).any_ptr /* create a closure from XS, returns a code reference */ /* the arg can be accessed via GENSUB_ARG from the callback */ /* the callback must use dXSARGS/XSRETURN */ static SV * s_gensub (pTHX_ void (*xsub)(pTHX_ CV *), void *arg) { CV *cv = (CV *)newSV (0); sv_upgrade ((SV *)cv, SVt_PVCV); CvANON_on (cv); CvISXSUB_on (cv); CvXSUB (cv) = xsub; S_GENSUB_ARG = arg; return newRV_noinc ((SV *)cv); } /*****************************************************************************/ /* portable pipe/socketpair */ #if defined(USE_SOCKETS_AS_HANDLES) || PERL_VERSION_ATLEAST(5,18,0) # define S_TO_HANDLE(x) ((HANDLE)win32_get_osfhandle (x)) #else # define S_TO_HANDLE(x) ((HANDLE)x) #endif #ifdef _WIN32 /* taken almost verbatim from libev's ev_win32.c */ /* oh, the humanity! */ static int s_pipe (int filedes [2]) { dTHX; struct sockaddr_in addr = { 0 }; int addr_size = sizeof (addr); struct sockaddr_in adr2; int adr2_size = sizeof (adr2); SOCKET listener; SOCKET sock [2] = { -1, -1 }; if ((listener = socket (AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) return -1; addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl (INADDR_LOOPBACK); addr.sin_port = 0; if (bind (listener, (struct sockaddr *)&addr, addr_size)) goto fail; if (getsockname (listener, (struct sockaddr *)&addr, &addr_size)) goto fail; if (listen (listener, 1)) goto fail; if ((sock [0] = socket (AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) goto fail; if (connect (sock [0], (struct sockaddr *)&addr, addr_size)) goto fail; if ((sock [1] = accept (listener, 0, 0)) < 0) goto fail; /* windows vista returns fantasy port numbers for getpeername. * example for two interconnected tcp sockets: * * (Socket::unpack_sockaddr_in getsockname $sock0)[0] == 53364 * (Socket::unpack_sockaddr_in getpeername $sock0)[0] == 53363 * (Socket::unpack_sockaddr_in getsockname $sock1)[0] == 53363 * (Socket::unpack_sockaddr_in getpeername $sock1)[0] == 53365 * * wow! tridirectional sockets! * * this way of checking ports seems to work: */ if (getpeername (sock [0], (struct sockaddr *)&addr, &addr_size)) goto fail; if (getsockname (sock [1], (struct sockaddr *)&adr2, &adr2_size)) goto fail; errno = WSAEINVAL; if (addr_size != adr2_size || addr.sin_addr.s_addr != adr2.sin_addr.s_addr /* just to be sure, I mean, it's windows */ || addr.sin_port != adr2.sin_port) goto fail; closesocket (listener); #if defined(USE_SOCKETS_AS_HANDLES) || PERL_VERSION_ATLEAST(5,18,0) /* when select isn't winsocket, we also expect socket, connect, accept etc. * to work on fds */ filedes [0] = sock [0]; filedes [1] = sock [1]; #else filedes [0] = _open_osfhandle (sock [0], 0); filedes [1] = _open_osfhandle (sock [1], 0); #endif return 0; fail: closesocket (listener); if (sock [0] != INVALID_SOCKET) closesocket (sock [0]); if (sock [1] != INVALID_SOCKET) closesocket (sock [1]); return -1; } #define s_socketpair(domain,type,protocol,filedes) s_pipe (filedes) static int s_fd_blocking (int fd, int blocking) { u_long nonblocking = !blocking; return ioctlsocket ((SOCKET)S_TO_HANDLE (fd), FIONBIO, &nonblocking); } #define s_fd_prepare(fd) s_fd_blocking (fd, 0) #else #define s_socketpair(domain,type,protocol,filedes) socketpair (domain, type, protocol, filedes) #define s_pipe(filedes) pipe (filedes) static int s_fd_blocking (int fd, int blocking) { return fcntl (fd, F_SETFL, blocking ? 0 : O_NONBLOCK); } static int s_fd_prepare (int fd) { return s_fd_blocking (fd, 0) || fcntl (fd, F_SETFD, FD_CLOEXEC); } #endif #if HAVE_EVENTFD # include #else # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) # define SCHMORP_H_HAVE_EVENTFD 1 /* our minimum requirement is glibc 2.7 which has the stub, but not the header */ # include # ifdef __cplusplus extern "C" { # endif int eventfd (unsigned int initval, int flags); # ifdef __cplusplus } # endif # else # define eventfd(initval,flags) -1 # endif #endif typedef struct { int fd[2]; /* read, write fd, might be equal */ int len; /* write length (1 pipe/socket, 8 eventfd) */ } s_epipe; static int s_epipe_new (s_epipe *epp) { s_epipe ep; ep.fd [0] = ep.fd [1] = eventfd (0, 0); if (ep.fd [0] >= 0) { s_fd_prepare (ep.fd [0]); ep.len = 8; } else { if (s_pipe (ep.fd)) return -1; if (s_fd_prepare (ep.fd [0]) || s_fd_prepare (ep.fd [1])) { dTHX; close (ep.fd [0]); close (ep.fd [1]); return -1; } ep.len = 1; } *epp = ep; return 0; } static void s_epipe_destroy (s_epipe *epp) { dTHX; close (epp->fd [0]); if (epp->fd [1] != epp->fd [0]) close (epp->fd [1]); epp->len = 0; } static void s_epipe_signal (s_epipe *epp) { #ifdef _WIN32 /* perl overrides send with a function that crashes in other threads. * unfortunately, it overrides it with an argument-less macro, so * there is no way to force usage of the real send function. * incompetent windows programmers - is this redundant? */ DWORD dummy; WriteFile (S_TO_HANDLE (epp->fd [1]), (LPCVOID)&dummy, 1, &dummy, 0); #else static uint64_t counter = 1; /* some modules accept fd's from outside, support eventfd here */ if (write (epp->fd [1], &counter, epp->len) < 0 && errno == EINVAL && epp->len != 8) write (epp->fd [1], &counter, (epp->len = 8)); #endif } static void s_epipe_drain (s_epipe *epp) { dTHX; char buf [9]; #ifdef _WIN32 recv (epp->fd [0], buf, sizeof (buf), 0); #else read (epp->fd [0], buf, sizeof (buf)); #endif } /* like new, but dups over old */ static int s_epipe_renew (s_epipe *epp) { dTHX; s_epipe epn; if (epp->fd [1] != epp->fd [0]) close (epp->fd [1]); if (s_epipe_new (&epn)) return -1; if (epp->len) { if (dup2 (epn.fd [0], epp->fd [0]) < 0) croak ("unable to dup over old event pipe"); /* should not croak */ close (epn.fd [0]); if (epn.fd [0] == epn.fd [1]) epn.fd [1] = epp->fd [0]; epn.fd [0] = epp->fd [0]; } *epp = epn; return 0; } #define s_epipe_fd(epp) ((epp)->fd [0]) static int s_epipe_wait (s_epipe *epp) { dTHX; #if SCHMORP_H_PREFER_SELECT fd_set rfd; int fd = s_epipe_fd (epp); FD_ZERO (&rfd); FD_SET (fd, &rfd); return PerlSock_select (fd + 1, &rfd, 0, 0, 0); #else /* poll is preferable on posix systems */ struct pollfd pfd; pfd.fd = s_epipe_fd (epp); pfd.events = POLLIN; return poll (&pfd, 1, -1); #endif } #endif EV-4.33/Changes0000644000000000000000000006212213634412224011752 0ustar rootrootRevision history for Perl extension EV Changes marked with (libev) are changes in libev, and might have more documentation in the libev Changes file. 4.33 Wed Mar 18 13:31:12 CET 2020 - the enable assertion makefile question failed to enable assertions due to a typo. - try harder to avoid perl's assert () which does not actually behave correctly. - updated libecb to make it compile under more windows environments. 4.32 Fri Jan 24 14:21:35 CET 2020 - (libev) fixed a bug introduced in 4.31 when timerfds and signalfds were used at the same time. - (libev) 0 is now allowed as requested event mask in io watchers. - (libev) once-per-minute wakeups will now be optimized away when timerfds are available - use the new ev_io_modify in EV::IO->events, instead of the potentially slower ev_io_set. - add a bunch of mutator methods, mostly untested(!): EV::Timer->repeat, EV::Periodic->offset, EV::Periodic->interval, EV::Periodic->reschedule_cb. - minor optimisations and bugfixes. 4.31 Fri Dec 20 21:57:00 CET 2019 - (libev) handle backends with minimum wait time a bit better by not waiting in the presence of already-expired timers (behaviour reported by Felipe Gasper). - (libev) use timerfd to detect timejumps. - (libev) new loop flag: EVFLAG_NOTIMERFD. 4.30 Fri Nov 22 21:00:00 CET 2019 - (libev) use a different and hopefully even more portable test to disable io_uring when header files are too old, by directly testing LINUX_VERSION_CODE. - (libev) fix a bug in the io_uring backend that polled the wrong backend fd, causing it to not work in many cases. 4.29 Fri Nov 22 15:34:29 CET 2019 - (libev) add io uring autoconf and non-autoconf detection, the latter of which should disable io_uring compilation on old systems. 4.28 Tue Nov 19 13:55:39 CET 2019 - (libev) fix ev_port backend, thanks to David H. Gutteridge for - (libev) many bugfixes in linuxaio backend. - (libev) experimental io uring interface. reminding me to actually release the fix. - try to name ev.h more explicitly, to hopefully improve portability. - opportunistically round up wait times for poll and epoll backend, to avoid unnecessary loop iterations. - add build dependency on ev_linuxaio.c. - quickly (re)-ported to minix 3.3 before minix crashed again. 4.27 Thu Jun 27 09:39:58 CEST 2019 - (libev) completely rewritten linuxaio backend, maybe usable as a general-use backend. - (libev) use more aggressive assertions to catch more usage errors. - allow users to re-enable assert() in case it is disabled by perl (which is typically the case). 4.26 Mon Jun 24 23:39:40 CEST 2019 - (libev) included experimental linux aio backend. - allow the linux aio backend to be used by default only when explicitly configured during Makefile.PL time. 4.25 Fri Dec 21 08:04:26 CET 2018 - (libev) updated to libev 4.25, minor fixes and enhancements. - document the requirement to "use EV" when using EV::MakeMaker. - only use __register_atfork with glibc and uclibc, as musl defines __linux__, but doesn't implement the linux standard base ABI, nor makes itself detectable via a macro, both apparently by design, winning the "broken by design 2016 award" - well done. - correct EV::periodic example 24h after midnight -> one day after midnight (reported by Felix Ostmann). 4.22 Sun Dec 20 02:34:39 CET 2015 - (libev) when epoll detects unremovable fds in the fd set, rebuild only the epoll descriptor, not the signal pipe, to avoid SIGPIPE in ev_async_send. This doesn't solve it on fork, so document what needs to be done in ev_loop_fork (analyzed by Benjamin Mahler). - (libev) remove superfluous sys/timeb.h include on win32 (analyzed by Jason Madden). - updated libecb. 4.21 Mon Jul 13 21:47:33 CEST 2015 - allow argument in timer->again. - document timer->remaining. - document default repeat value for timer->set. 4.20 Sat Jun 20 13:07:34 CEST 2015 - added stability canary support. - truly rely on automatic configuration for clock_gettime and others. unfortunately, this doesn't help with activeperl and similar perls, which define _POSIX_TIMERS without actually implementing any of it. - (libev, ecb) make it compile as C++ again. - (libev) fix a potential aliasing issue when accessing watcher cbs. 4.18 Sat Sep 6 20:37:23 CEST 2014 - use slightly better weay to find includes, to support multiarch on newer perls. 4.17 Fri Apr 11 06:22:38 CEST 2014 - perl5porters broke Async::Interrupt, BDB, EV, IO::AIO, OpenCL without warning by switching the meaning of USE_SOCKETS_AS_HANDLES in 5.18. What's so attractive about giving a shit about backwards compatibility - I will never understand. 4.16 Sat Mar 8 16:49:05 CET 2014 - make sure EV::CHECK is available at all times. - avoid a spurious warning when perl -w overrides "no warnings". - use perl's signal implementation on WIN32. - ensure extern "C" function pointers are used for externally-visible pointers. - (libev) mark event pipe fd as cloexec after a fork (analyzed by Sami Farin). - (ecb) support m68k, m88k and sh (patch by Miod Vallat). - (libev) in the absence of autoconf, do not use the clock syscall on glibc >= 2.17 (avoids the syscall AND -lrt on systems doing clock_gettime in userspace). - (ecb) work around memory barriers and volatile apparently both being broken in visual studio 2008 and later (analysed and patch by Nicolas Noble). 4.15 Fri Mar 1 12:15:53 CET 2013 - (libev) upgrade to 4.15 - too many changes to list. - EV::run now returns a boolean. - API version 5:1. - document that cleanup watchers are not available via perl. - cast I32 to int in error message printf. - remove dependencies on librt and libpthreads on GNU/Linux. 4.11 Sat Feb 4 19:56:26 CET 2012 - (libev) implement memory fences for (obsolete) llvm-gcc. 4.10 Thu Jan 19 18:54:23 CET 2012 - (libev) fix a race where the workaround against the epoll fork bugs caused signals to not be handled anymore. - (libev) correct backend_fudge for most backends, and implement a windows specific workaround to avoid looping because we call both select and Sleep, both with different time resolutions. - e_new wasn't declared static (causing very minor .so bloat). - replace more old api names by new ones. 4.03 Tue Jan 11 14:51:05 CET 2011 - do not avoid the clock_gettime call on GNU/Linux anymore, as EV links against -lpthread anyways - as a result, EV might now take advantage of fast userspace clock_gettime implementations, but also links against -lrt. - (libev) lots of event port bug workarounds. - (libev) officially support files in I/O watchers. - (libev) new function ev_feed_signal. - fix documentation parts still refering to the 3.x API. 4.02 Thu Dec 30 08:27:41 CET 2010 - the revents argument did not stringify correctly, as only the numeric value was updated, while the string value was kept from previous invocations. 4.01 Sun Dec 5 12:42:13 CET 2010 - fully support EV_COMPAT3=0. - default_fork was stupidly defined as inline. - ask cpan to upgrade AnyEvent if < 5.29. - support EV_EXTRA_DEFS during configuration. - support -DEV_NO_LOOPS for snakker build. 4.00 Mon Oct 25 13:30:09 CEST 2010 - many API changes, see the manual. - (libev) lots and lots of bugfixes, see the ev documentation. - fix a bug where inotify usage would parse the same event multiple times, causing various forms of breakage. - greatly reduce stack usage for inotify (8kb to <0.5kb). - expose ev_depth and ev_verify via the XS API. - implement ev_cleanup watchers. - (libev) ev_embed_stop did not correctly stop the watcher. - (libev) disable poll backend on AIX. - (libev) rename EV_TIMEOUT to EV_TIMER. - (libev) add section on accept() problems to the manpage. - (libev) no child watchers on win32. - make code more aliasing compliant, in case perl is ever translated to C. - document the EV::CHECK runtime unavailability. - ported to minix 3.1.7. 3.9 Thu Dec 31 07:59:59 CET 2009 - disable t/07* under automatic testing. - increase t/09* timeouts as netbsd has *horribly* broken select/kevent that *usually* sleep >>0.6s instead of 0.3. - add constants EV::FLAG_NOSIGFD, EV::FLAG_NOINOTIFY and EV::BACKEND_ALL. - (libev) signalfd is no longer used by default. - (libev) backport inotify code to C89. - (libev) inotify file descriptors could leak into child processes. - (libev) ev_stat watchers could keep an errornous extra ref on the loop. - (libev) take advantage of inotify_init1, if available. - (libev) the signal handling pipe wasn't always initialised under windows. - changed minimum glibc requirement from glibc 2.9 to 2.7, for signalfd. - (libev) only replace ev_stat.prev when we detect an actual difference. 3.8 Sun Aug 9 15:30:10 CEST 2009 - implement $loop->signal/signal_ns. - (libev) incompatible change: do not necessarily reset signal handler to SIG_DFL when a sighandler is stopped. - (libev) ev_default_destroy did not properly free or zero some members, potentially causing crashes and memory corruption on repated ev_default_destroy/ev_default_loop calls. - (libev) take advantage of signalfd on GNU/Linux systems. - (libev) document that the signal mask might be in an unspecified state when using libev's signal handling. - (libev) take advantage of some GNU/Linux calls to set cloexec/nonblock on fd creation, to avoid race conditions. - implement internal glue code to interface more efficiently with AnyEvent. 3.7 Fri Jul 17 16:49:16 CEST 2009 - add EV::loop_verify and EV::loop_depth. - use output filehandle for i/o watchers waiting for EV_WRITE, otherwise input filehandle. - use common schmorp.h header. - add EV::Timer->remaining. - allow for subclassing of EV::Loop (see the exciting EV::Loop::Async module for an example). - added EV::invoke_pending and EV::pending_count. - (libev) ev_unloop and ev_loop wrongly used a global variable to exit loops, instead of using a per-loop variable. - (libev) the ev_set_io_collect_interval interpretation has changed. - add new functionality: EV::invoke_pending, EV::pending_count. - add $timer->remaining. - add EV::loop_depth. - (libev) calling unloop in fork/prepare watchers will no longer poll for new events. - (libev) use GetSystemTimeAsFileTime instead of _timeb on windows, for slightly higher accuracy. - (libev) actually 0-initialise struct sigaction when installing signals. 3.6 Tue Apr 28 02:50:37 CEST 2009 - keepalive(0) could decrease the refcount of the loop permanently. - add ev_suspend/ev_resume, also make ev_now_update accessible via the XS API. - most EV:: constants were missing and have been added :/. - add EV::VERSION_MAJOR/VERSION_MINOR constants. - (libev) multiple timers becoming ready within an event loop iteration will be invoked in the "correct" order now. - (libev) do not leave the event loop early just because we have no active watchers, fixing a problem when embedding a kqueue loop that has active kernel events but no registered watchers (reported by blacksand blacksand). - (libev) correctly zero the idx values for arrays, so destroying and reinitialising the default loop actually works (patch by Malek Hadj-Ali). - (libev) new EV::CUSTOM revents flag for use by applications. - (libev) add documentation section about priorites. - (libev) add a glossary to the dcoumentation. - (libev) extend the ev_fork description slightly. - (libev) optimize a jump out of call_pending. - t/03_keepalive could fail when there was no actual error. 3.53 Sun Feb 15 02:38:20 CET 2009 - (libev) on win32, the event loop creation could randomly fail due to an initialised variable having the wrong value. - (libev) probe for CLOCK_REALTIME support at runtime as well and fall back to gettimeofday if there is an error, to support older operating systems with newer header files/libraries. - prefer gettimeofday over clock_gettime by default. 3.52 Wed Jan 7 21:46:14 CET 2009 - (libev) fix some issues in the select backend when in fd_set mode. - (libev) due to a thinko, instead of disabling everything but select on the borked OS X platform, everything but select was allowed (reported by Emanuele Giaquinta). - (libev) actually verify that local and remote port are matching in libev's socketpair emulation, which makes denial-of-service attacks harder (but not impossible - it's windows). Make sure it even works under vista, which thinks that getpeer/sockname should return fantasy port numbers. 3.51 Wed Dec 24 23:01:59 CET 2008 - do not cache the arguments passed to callbacks if the refcount indicates that the callback has stolen them. - (libev) try to avoid librt on GNU/Linux. - (libev) check port_getn return value dfferently, might potentially avoid problems. - (libev) fix a bug with stat watchers possibly causing freezes. - (libev) work around OS X 10.5 breaking poll, now select is the only viable choice left on that pile of garbage. - play tester whore: disable some tests that typically fail only on cpan tester machines. 3.49 Wed Nov 19 11:26:53 CET 2008 - fix typos in manpage (Alex Efros). - increase timing even further, for the ever-overloaded cpan-tester machines, and to the detriment of everybody else who wants a fast make test. - possible 5.6 compatibility. - (libev) use inotify even on buggy kernels, but don't rely on it. - (libev) use inotify only as an added hint on network filesystems. 3.48 Thu Oct 30 09:09:48 CET 2008 - (libev) use a generation counter to detect spurious epoll events and recreate the kernel in such a case (sorry, it's slow, but I didn't design epoll...). - (libev) optimise away an EPOLL_CTL_ADD/MOD combo in the epoll backend in some cases. - (libev) use memset to initialise most arrays now and do away with the init functions. - speed up event callback invocation further (~15%), by not creating the object reference each time. - EV::sleep was documented, but not implemented. fun. 3.45 Tue Oct 21 22:20:39 CEST 2008 - (libev) disable inotify usage on linux <2.6.25 (kernel bug). - (libev) ev_embed will now automatically follow fork. - (libev) ev_once will now pass both io and timeout events to the callback when both happen concurrently, instead of giving one precedence. - install C library documentation as EV::libev manpage (sorry, oesi). - (libev) fix a minor performance bug in ev_stat handling. 3.44 Mon Sep 29 05:16:31 CEST 2008 - (libev) add EV::now_update and $loop->now_update. 3.431 Sun Jul 13 00:19:02 CEST 2008 - made the EV::embed callback optional again. reported by Vladimir Timofeev. 3.43 Tue Jul 8 20:56:31 CEST 2008 - disabled warnings in EV.pm, the CHECK issue is now understood and harmless. 3.42 Mon May 26 07:36:46 CEST 2008 - (libev) work around another bug in windows perls and windows itself: failed connects do NOT set read or write flags in select, but this version of libev will. 3.41 Thu May 22 01:39:40 CEST 2008 - (libev) fix many heap-related bugs (timers, periodics). - (libev) improve timing stability of timers and periodics. - expose ev_loop_verify to perl code. - clarify documentation for periodic reschedule callbacks. - verify that the passed callback argument is indeed a code reference, for earlier error reporting and a nice calling speed increase (as well as saving memory). 3.4 Tue May 20 21:51:55 CEST 2008 - (libev) work around both a windows bug and a bug in perl's select on windows when not waiting for any file descriptors. - bundle ev.pod into the tarball, just to increase its size (and for the poor internetless person). 3.33 Sun May 18 12:43:04 CEST 2008 - (libev) use numerous enhancements such as a more cache-friendly 4-heap and heap index caching for timers. - remove undocumented ev_timer->at accessor. 3.31 Wed Apr 16 20:48:59 CEST 2008 - (libev) post-last-minute fix for ev_poll.c problem. 3.3 Wed Apr 16 19:04:47 CEST 2008 - (libev) linux eventfd support. - (libev) inline more with C99 compilers. - (libev) work around a number of bugs in valgrind. - (libev) work around broken realloc on openbsd and darwin. - added example to EV::MakeMaker manpage. - add async_pending method. 3.2 Wed Apr 2 17:08:16 CEST 2008 - "ported" again to the Microsoft "C" language. - relax testsuite timing for the benefit of freebsd users once more. - fix EVAPI's ev_time and ev_sleep macros. - (libev) fix select backend on 64 bit architetcures. - (libev) ev_loop flags are now local to each invocation. 3.1 Sat Mar 8 11:41:14 CET 2008 - add ev_sync_* and ev_fork_* to EVAPI.h. - provide EV::Embed->sweep method. - new watcher type: async (not very useful in perl). 3.0 Mon Jan 28 13:23:11 CET 2008 - upgrade to libev-3.0. - change child handler to incorporate tracing flag. 2.01 Mon Dec 31 01:59:19 CET 2007 - found a minor problem in the testsuite that is only caught by new Test::Harness versions (reported by Andreas König). 2.0 Sat Dec 22 17:47:03 CET 2007 - no longer force kqueue when enabled, enable it on more systems (as its now by default not used on most). - expose fork watchers. - switch to libev's MULTIPLICITY API. - use a slightly different include file name strategy. - get rid of (unused) libevent emulation. - 5% watcher management speedup due to callback optimisation when inline closures are used, 5% slowdown due to multiplicity. - very minimal support for dynamic event loops and embed watchers. - fix name of prepare start/stop methods. - create and export EV::sleep and EV::set_*_collect_interval. - fix typos in manpage (Alex Efros). 1.86 Tue Dec 18 02:36:57 CET 2007 - add periodic->at methods. 1.85 Fri Dec 14 20:32:40 CET 2007 - further optimise epoll backend by adding a heuristic that avoids EPOLL_CTL_DEL calls if possible. - EV::signal->start was not async-signal safe. - optimise start/stop when the fd hasn't changed. 1.8 Tue Dec 11 22:17:46 CET 2007 - API version 3:0. - reduced fudge factor to zero for select, poll, epoll and kqueue: your system better be posix-compliant even in its extensions :-> - improve long-term numerical stability in periodic watchers by introducing a separate offset value instead of reusing at. - recalculate real/monotonic clocks before blocking fully to avoid blocking for longer than necessary. - fix bugs in the heap functions. this rarely lead to illegal heap orderings. 1.72 Sat Dec 8 15:31:26 CET 2007 - add dummy loop arguments to some EVAPI functions so that clients can use EV_A and EV_DEFAULT. - expose ev_clear_pending. - renamed trigger to invoke. 1.71 Fri Dec 7 19:10:24 CET 2007 - changed/implemented idle watcher priority. - allow out-of-range priorities to be set. - fix ->priority to actually work on started watchers. - improved testsuite. 1.6 Wed Dec 5 15:06:20 CET 2007 - add a missing SPAGAIN. - ripped out EV::DNS, use EV::ADNS for a better backend. - make ev_time callable from EVAPI.h. - add EV::loop_count. - some space optimisations. 1.5 Wed Nov 28 20:19:09 CET 2007 - add inotify backend. - make testsuite even less sensible to timing issues, add more stat tests. - add ->attr, ->prev, ->stat calls to stat watcher. 1.4 Tue Nov 27 17:35:27 CET 2007 - work around a linux 2.4 kernel bug in child handlers. - implement stat watcher interface. - implement fork watcher interface. - io->set did not keep the fh alive. - actively check signal numbers/names to be valid. - cleanups, minor fixes, new bugs. - work around windows bugs in the testsuite. 1.3 Sun Nov 25 10:46:57 CET 2007 - stopping idle/check/prepare watchers could cause data corruption. - implement and document EV::once. - improved documentation, verify that netbsd indeed has the only working kqueue implementation (out of darwin, freebsd, netbsd and openbsd). Praise them! - fix the data method so it might work. - expose ev_ref/ev_unref to the C API. - expose ref/unref in form of the ->keepalive method to perl. - minor bugfixes and portability fixes. 1.2 Thu Nov 22 05:44:09 CET 2007 - disable kqueue by default on !netbsd, as its broken on freebsd, darwin and openbsd and thus almost everywhere. - add some allowance in t/01_timer.t for the uneven monotonic vs. realtime clock tick on at least freebsd. - add -lsocket -lnsl on solaris in case the perl guy forgot to configure them. 1.1 Wed Nov 21 06:08:48 CET 2007 - improved timer test to include periodics and withstand small timing variations. - many minor tweaks to libev. 1.0 Fri Nov 16 14:51:59 CET 2007 - require AnyEvent update if AnyEvent is installed. - add solaris 10 port-based backend. - add child_ns and fix check_ns. - treat yes/no as enforcement, not as hint, when configuring, documentation update (reported by Andy Grundman). 0.9 Wed Nov 14 22:24:49 CET 2007 - changed LIBEV_METHODS to LIBEV_FLAGS and the way this is used inside libev. - many, many bugfixes. - add unloop constants. - add timer test. - ev_loop will now terminate immediately when no watchers are active. 0.8 Mon Nov 12 02:28:46 CET 2007 - fix "testsuite" again :(). - fix check/idle/prepare/child watcher stop. - enourmously many fixes. - rewritten select backend (mostly for win32). - cache socket handles on win32. - provide considerably finer control over configuration. 0.7 Fri Nov 9 20:37:58 CET 2007 - move AnyEvent adaptor into the AnyEvent module. - use private copy of evdns.[ch]. - many minor fixes. 0.6 Thu Nov 8 18:23:43 CET 2007 - (libev) better native win32 support. - fix idle watchers. - implement and document periodic reschedule callbacks. - do not run dns test on !linux platforms (actually to exclude win32). - fix (unused in EV :) poll backend. 0.51 Tue Nov 6 19:50:22 CET 2007 - fix kqueue/poll compilation issue. - work around design issues in kqueue. - enable kqueue by default, seems to work. 0.5 Tue Nov 6 17:37:44 CET 2007 - add signal and pid mutators. - add rstatus/rpid accessors. - updated libev (lower cost for clock monotonic). - support event priorities. - try to find SIG_SIZE on perls that don't have it (Slaven Rezic). - improved signal handling, fixed child watchers. - experimentally add kqueue backend, completely untested. - ported to cygwin and native win32. 0.1 Thu Nov 1 18:29:22 CET 2007 - replaced libevent by libev (total rewrite). - many bugfixes w.r.t. libevent. - new watcher types: periodic, check, prepare, child, idle. - performance optimisations. - added interactive configuration. - added fork support. 0.03 Mon Oct 29 20:52:50 CET 2007 - add timed_io convenience constructors. - improve documentation. - support signal names in addition to signal numbers. - support signal anyevent watchers. - vastly improved testsuite (its all relative :). - add EV::MakeMaker, beginning of C-level API. - force strings to byte form. 0.02 Sun Oct 28 07:40:21 CET 2007 - call $EV::DIED in case a callback throws an exception. - add const char * to typemap for possible 5.6 compatibility. 0.01 Sat Oct 27 19:10:18 CEST 2007 - initial release. 0.00 Fri Oct 26 11:12:57 CEST 2007 - original version; cloned from JSON::XS EV-4.33/Makefile.PL0000644000000000000000000003654613623026014012440 0ustar rootrootuse strict qw(vars subs); use Canary::Stability EV => 1, 5.008002; use Config; use ExtUtils::MakeMaker; sub have_inc($) { scalar grep -r "$_/$_[0]", $Config{usrinc}, split / /, $Config{incpth} } my $DEFINE; unless (-e "libev/ev_iouring.c") { print < 5.29) : (); WriteMakefile( dist => { PREOP => 'pod2text EV.pm | tee README >$(DISTVNAME)/README; chmod -R u=rwX,go=rX . ;', COMPRESS => 'gzip -9v', SUFFIX => '.gz', }, depend => { "EV.c" => "EV/EVAPI.h " . "libev/ev.c libev/ev.h libev/ev_epoll.c libev/ev_select.c libev/ev_kqueue.c libev/ev_poll.c libev/ev_linuxaio.c " . "libev/ev_vars.h libev/ev_wrap.h", }, INC => "-Ilibev", DEFINE => "$DEFINE", NAME => "EV", LIBS => [$LIBS], PREREQ_PM => { @anyevent, "common::sense" => 0, }, CONFIGURE_REQUIRES => { "ExtUtils::MakeMaker" => 6.52, "Canary::Stability" => 0 }, VERSION_FROM => "EV.pm", PM => { 'EV.pm' => '$(INST_LIB)/EV.pm', 'EV/EVAPI.h' => '$(INST_LIB)/EV/EVAPI.h', 'EV/MakeMaker.pm' => '$(INST_LIB)/EV/MakeMaker.pm', 'libev/ev.h' => '$(INST_LIB)/EV/ev.h', 'libev/ev.pod' => '$(INST_LIB)/EV/libev.pod', }, MAN3PODS => { 'EV.pm' => '$(INST_MAN3DIR)/EV.$(MAN3EXT)', 'EV/MakeMaker.pm' => '$(INST_MAN3DIR)/EV::MakeMaker.$(MAN3EXT)', 'libev/ev.pod' => '$(INST_MAN3DIR)/EV::libev.$(MAN3EXT)', }, ); EV-4.33/COPYING0000644000000000000000000000020111347735662011515 0ustar rootrootThis module is licensed under the same terms as perl itself. (please note that the embedded libev uses a 2-clause BSD license) EV-4.33/README0000644000000000000000000012577613634420051011353 0ustar rootrootNAME EV - perl interface to libev, a high performance full-featured event loop SYNOPSIS use EV; # TIMERS my $w = EV::timer 2, 0, sub { warn "is called after 2s"; }; my $w = EV::timer 2, 2, sub { warn "is called roughly every 2s (repeat = 2)"; }; undef $w; # destroy event watcher again my $w = EV::periodic 0, 60, 0, sub { warn "is called every minute, on the minute, exactly"; }; # IO my $w = EV::io *STDIN, EV::READ, sub { my ($w, $revents) = @_; # all callbacks receive the watcher and event mask warn "stdin is readable, you entered: ", ; }; # SIGNALS my $w = EV::signal 'QUIT', sub { warn "sigquit received\n"; }; # CHILD/PID STATUS CHANGES my $w = EV::child 666, 0, sub { my ($w, $revents) = @_; my $status = $w->rstatus; }; # STAT CHANGES my $w = EV::stat "/etc/passwd", 10, sub { my ($w, $revents) = @_; warn $w->path, " has changed somehow.\n"; }; # MAINLOOP EV::run; # loop until EV::break is called or all watchers stop EV::run EV::RUN_ONCE; # block until at least one event could be handled EV::run EV::RUN_NOWAIT; # try to handle same events, but do not block BEFORE YOU START USING THIS MODULE If you only need timer, I/O, signal, child and idle watchers and not the advanced functionality of this module, consider using AnyEvent instead, specifically the simplified API described in AE. When used with EV as backend, the AE API is as fast as the native EV API, but your programs/modules will still run with many other event loops. DESCRIPTION This module provides an interface to libev (). While the documentation below is comprehensive, one might also consult the documentation of libev itself ( or perldoc EV::libev) for more subtle details on watcher semantics or some discussion on the available backends, or how to force a specific backend with "LIBEV_FLAGS", or just about in any case because it has much more detailed information. This module is very fast and scalable. It is actually so fast that you can use it through the AnyEvent module, stay portable to other event loops (if you don't rely on any watcher types not available through it) and still be faster than with any other event loop currently supported in Perl. PORTING FROM EV 3.X to 4.X EV version 4 introduces a number of incompatible changes summarised here. According to the depreciation strategy used by libev, there is a compatibility layer in place so programs should continue to run unchanged (the XS interface lacks this layer, so programs using that one need to be updated). This compatibility layer will be switched off in some future release. All changes relevant to Perl are renames of symbols, functions and methods: EV::loop => EV::run EV::LOOP_NONBLOCK => EV::RUN_NOWAIT EV::LOOP_ONESHOT => EV::RUN_ONCE EV::unloop => EV::break EV::UNLOOP_CANCEL => EV::BREAK_CANCEL EV::UNLOOP_ONE => EV::BREAK_ONE EV::UNLOOP_ALL => EV::BREAK_ALL EV::TIMEOUT => EV::TIMER EV::loop_count => EV::iteration EV::loop_depth => EV::depth EV::loop_verify => EV::verify The loop object methods corresponding to the functions above have been similarly renamed. MODULE EXPORTS This module does not export any symbols. EVENT LOOPS EV supports multiple event loops: There is a single "default event loop" that can handle everything including signals and child watchers, and any number of "dynamic event loops" that can use different backends (with various limitations), but no child and signal watchers. You do not have to do anything to create the default event loop: When the module is loaded a suitable backend is selected on the premise of selecting a working backend (which for example rules out kqueue on most BSDs). Modules should, unless they have "special needs" always use the default loop as this is fastest (perl-wise), best supported by other modules (e.g. AnyEvent or Coro) and most portable event loop. For specific programs you can create additional event loops dynamically. If you want to take advantage of kqueue (which often works properly for sockets only) even though the default loop doesn't enable it, you can *embed* a kqueue loop into the default loop: running the default loop will then also service the kqueue loop to some extent. See the example in the section about embed watchers for an example on how to achieve that. $loop = new EV::Loop [$flags] Create a new event loop as per the specified flags. Please refer to the "ev_loop_new ()" function description in the libev documentation (, or locally-installed as EV::libev manpage) for more info. The loop will automatically be destroyed when it is no longer referenced by any watcher and the loop object goes out of scope. If you are not embedding the loop, then Using "EV::FLAG_FORKCHECK" is recommended, as only the default event loop is protected by this module. If you *are* embedding this loop in the default loop, this is not necessary, as "EV::embed" automatically does the right thing on fork. $loop->loop_fork Must be called after a fork in the child, before entering or continuing the event loop. An alternative is to use "EV::FLAG_FORKCHECK" which calls this function automatically, at some performance loss (refer to the libev documentation). $loop->verify Calls "ev_verify" to make internal consistency checks (for debugging libev) and abort the program if any data structures were found to be corrupted. $loop = EV::default_loop [$flags] Return the default loop (which is a singleton object). Since this module already creates the default loop with default flags, specifying flags here will not have any effect unless you destroy the default loop first, which isn't supported. So in short: don't do it, and if you break it, you get to keep the pieces. BASIC INTERFACE $EV::DIED Must contain a reference to a function that is called when a callback throws an exception (with $@ containing the error). The default prints an informative message and continues. If this callback throws an exception it will be silently ignored. $flags = EV::supported_backends $flags = EV::recommended_backends $flags = EV::embeddable_backends Returns the set (see "EV::BACKEND_*" flags) of backends supported by this instance of EV, the set of recommended backends (supposed to be good) for this platform and the set of embeddable backends (see EMBED WATCHERS). EV::sleep $seconds Block the process for the given number of (fractional) seconds. $time = EV::time Returns the current time in (fractional) seconds since the epoch. $time = EV::now $time = $loop->now Returns the time the last event loop iteration has been started. This is the time that (relative) timers are based on, and referring to it is usually faster then calling EV::time. EV::now_update $loop->now_update Establishes the current time by querying the kernel, updating the time returned by "EV::now" in the progress. This is a costly operation and is usually done automatically within "EV::run". This function is rarely useful, but when some event callback runs for a very long time without entering the event loop, updating libev's idea of the current time is a good idea. EV::suspend $loop->suspend EV::resume $loop->resume These two functions suspend and resume a loop, for use when the loop is not used for a while and timeouts should not be processed. A typical use case would be an interactive program such as a game: When the user presses "^Z" to suspend the game and resumes it an hour later it would be best to handle timeouts as if no time had actually passed while the program was suspended. This can be achieved by calling "suspend" in your "SIGTSTP" handler, sending yourself a "SIGSTOP" and calling "resume" directly afterwards to resume timer processing. Effectively, all "timer" watchers will be delayed by the time spend between "suspend" and "resume", and all "periodic" watchers will be rescheduled (that is, they will lose any events that would have occured while suspended). After calling "suspend" you must not call *any* function on the given loop other than "resume", and you must not call "resume" without a previous call to "suspend". Calling "suspend"/"resume" has the side effect of updating the event loop time (see "now_update"). $backend = EV::backend $backend = $loop->backend Returns an integer describing the backend used by libev (EV::BACKEND_SELECT or EV::BACKEND_EPOLL). $active = EV::run [$flags] $active = $loop->run ([$flags]) Begin checking for events and calling callbacks. It returns when a callback calls EV::break or the flags are nonzero (in which case the return value is true) or when there are no active watchers which reference the loop (keepalive is true), in which case the return value will be false. The return value can generally be interpreted as "if true, there is more work left to do". The $flags argument can be one of the following: 0 as above EV::RUN_ONCE block at most once (wait, but do not loop) EV::RUN_NOWAIT do not block at all (fetch/handle events but do not wait) EV::break [$how] $loop->break ([$how]) When called with no arguments or an argument of EV::BREAK_ONE, makes the innermost call to EV::run return. When called with an argument of EV::BREAK_ALL, all calls to EV::run will return as fast as possible. When called with an argument of EV::BREAK_CANCEL, any pending break will be cancelled. $count = EV::iteration $count = $loop->iteration Return the number of times the event loop has polled for new events. Sometimes useful as a generation counter. EV::once $fh_or_undef, $events, $timeout, $cb->($revents) $loop->once ($fh_or_undef, $events, $timeout, $cb->($revents)) This function rolls together an I/O and a timer watcher for a single one-shot event without the need for managing a watcher object. If $fh_or_undef is a filehandle or file descriptor, then $events must be a bitset containing either "EV::READ", "EV::WRITE" or "EV::READ | EV::WRITE", indicating the type of I/O event you want to wait for. If you do not want to wait for some I/O event, specify "undef" for $fh_or_undef and 0 for $events). If timeout is "undef" or negative, then there will be no timeout. Otherwise an "EV::timer" with this value will be started. When an error occurs or either the timeout or I/O watcher triggers, then the callback will be called with the received event set (in general you can expect it to be a combination of "EV::ERROR", "EV::READ", "EV::WRITE" and "EV::TIMER"). EV::once doesn't return anything: the watchers stay active till either of them triggers, then they will be stopped and freed, and the callback invoked. EV::feed_fd_event $fd, $revents $loop->feed_fd_event ($fd, $revents) Feed an event on a file descriptor into EV. EV will react to this call as if the readyness notifications specified by $revents (a combination of "EV::READ" and "EV::WRITE") happened on the file descriptor $fd. EV::feed_signal_event $signal Feed a signal event into the default loop. EV will react to this call as if the signal specified by $signal had occured. EV::feed_signal $signal Feed a signal event into EV - unlike "EV::feed_signal_event", this works regardless of which loop has registered the signal, and is mainly useful for custom signal implementations. EV::set_io_collect_interval $time $loop->set_io_collect_interval ($time) EV::set_timeout_collect_interval $time $loop->set_timeout_collect_interval ($time) These advanced functions set the minimum block interval when polling for I/O events and the minimum wait interval for timer events. See the libev documentation at (locally installed as EV::libev) for a more detailed discussion. $count = EV::pending_count $count = $loop->pending_count Returns the number of currently pending watchers. EV::invoke_pending $loop->invoke_pending Invoke all currently pending watchers. WATCHER OBJECTS A watcher is an object that gets created to record your interest in some event. For instance, if you want to wait for STDIN to become readable, you would create an EV::io watcher for that: my $watcher = EV::io *STDIN, EV::READ, sub { my ($watcher, $revents) = @_; warn "yeah, STDIN should now be readable without blocking!\n" }; All watchers can be active (waiting for events) or inactive (paused). Only active watchers will have their callbacks invoked. All callbacks will be called with at least two arguments: the watcher and a bitmask of received events. Each watcher type has its associated bit in revents, so you can use the same callback for multiple watchers. The event mask is named after the type, i.e. EV::child sets EV::CHILD, EV::prepare sets EV::PREPARE, EV::periodic sets EV::PERIODIC and so on, with the exception of I/O events (which can set both EV::READ and EV::WRITE bits). In the rare case where one wants to create a watcher but not start it at the same time, each constructor has a variant with a trailing "_ns" in its name, e.g. EV::io has a non-starting variant EV::io_ns and so on. Please note that a watcher will automatically be stopped when the watcher object is destroyed, so you *need* to keep the watcher objects returned by the constructors. Also, all methods changing some aspect of a watcher (->set, ->priority, ->fh and so on) automatically stop and start it again if it is active, which means pending events get lost. COMMON WATCHER METHODS This section lists methods common to all watchers. $w->start Starts a watcher if it isn't active already. Does nothing to an already active watcher. By default, all watchers start out in the active state (see the description of the "_ns" variants if you need stopped watchers). $w->stop Stop a watcher if it is active. Also clear any pending events (events that have been received but that didn't yet result in a callback invocation), regardless of whether the watcher was active or not. $bool = $w->is_active Returns true if the watcher is active, false otherwise. $current_data = $w->data $old_data = $w->data ($new_data) Queries a freely usable data scalar on the watcher and optionally changes it. This is a way to associate custom data with a watcher: my $w = EV::timer 60, 0, sub { warn $_[0]->data; }; $w->data ("print me!"); $current_cb = $w->cb $old_cb = $w->cb ($new_cb) Queries the callback on the watcher and optionally changes it. You can do this at any time without the watcher restarting. $current_priority = $w->priority $old_priority = $w->priority ($new_priority) Queries the priority on the watcher and optionally changes it. Pending watchers with higher priority will be invoked first. The valid range of priorities lies between EV::MAXPRI (default 2) and EV::MINPRI (default -2). If the priority is outside this range it will automatically be normalised to the nearest valid priority. The default priority of any newly-created watcher is 0. Note that the priority semantics have not yet been fleshed out and are subject to almost certain change. $w->invoke ($revents) Call the callback *now* with the given event mask. $w->feed_event ($revents) Feed some events on this watcher into EV. EV will react to this call as if the watcher had received the given $revents mask. $revents = $w->clear_pending If the watcher is pending, this function clears its pending status and returns its $revents bitset (as if its callback was invoked). If the watcher isn't pending it does nothing and returns 0. $previous_state = $w->keepalive ($bool) Normally, "EV::run" will return when there are no active watchers (which is a "deadlock" because no progress can be made anymore). This is convenient because it allows you to start your watchers (and your jobs), call "EV::run" once and when it returns you know that all your jobs are finished (or they forgot to register some watchers for their task :). Sometimes, however, this gets in your way, for example when the module that calls "EV::run" (usually the main program) is not the same module as a long-living watcher (for example a DNS client module written by somebody else even). Then you might want any outstanding requests to be handled, but you would not want to keep "EV::run" from returning just because you happen to have this long-running UDP port watcher. In this case you can clear the keepalive status, which means that even though your watcher is active, it won't keep "EV::run" from returning. The initial value for keepalive is true (enabled), and you can change it any time. Example: Register an I/O watcher for some UDP socket but do not keep the event loop from running just because of that watcher. my $udp_socket = ... my $udp_watcher = EV::io $udp_socket, EV::READ, sub { ... }; $udp_watcher->keepalive (0); $loop = $w->loop Return the loop that this watcher is attached to. WATCHER TYPES Each of the following subsections describes a single watcher type. I/O WATCHERS - is this file descriptor readable or writable? $w = EV::io $fileno_or_fh, $eventmask, $callback $w = EV::io_ns $fileno_or_fh, $eventmask, $callback $w = $loop->io ($fileno_or_fh, $eventmask, $callback) $w = $loop->io_ns ($fileno_or_fh, $eventmask, $callback) As long as the returned watcher object is alive, call the $callback when at least one of events specified in $eventmask occurs. The $eventmask can be one or more of these constants ORed together: EV::READ wait until read() wouldn't block anymore EV::WRITE wait until write() wouldn't block anymore The "io_ns" variant doesn't start (activate) the newly created watcher. $w->set ($fileno_or_fh, $eventmask) Reconfigures the watcher, see the constructor above for details. Can be called at any time. $current_fh = $w->fh $old_fh = $w->fh ($new_fh) Returns the previously set filehandle and optionally set a new one. $current_eventmask = $w->events $old_eventmask = $w->events ($new_eventmask) Returns the previously set event mask and optionally set a new one. TIMER WATCHERS - relative and optionally repeating timeouts $w = EV::timer $after, $repeat, $callback $w = EV::timer_ns $after, $repeat, $callback $w = $loop->timer ($after, $repeat, $callback) $w = $loop->timer_ns ($after, $repeat, $callback) Calls the callback after $after seconds (which may be fractional or negative). If $repeat is non-zero, the timer will be restarted (with the $repeat value as $after) after the callback returns. This means that the callback would be called roughly after $after seconds, and then every $repeat seconds. The timer does his best not to drift, but it will not invoke the timer more often then once per event loop iteration, and might drift in other cases. If that isn't acceptable, look at EV::periodic, which can provide long-term stable timers. The timer is based on a monotonic clock, that is, if somebody is sitting in front of the machine while the timer is running and changes the system clock, the timer will nevertheless run (roughly) the same time. The "timer_ns" variant doesn't start (activate) the newly created watcher. $w->set ($after, $repeat = 0) Reconfigures the watcher, see the constructor above for details. Can be called at any time. $w->again $w->again ($repeat) Similar to the "start" method, but has special semantics for repeating timers: If the timer is active and non-repeating, it will be stopped. If the timer is active and repeating, reset the timeout to occur $repeat seconds after now. If the timer is inactive and repeating, start it using the repeat value. Otherwise do nothing. This behaviour is useful when you have a timeout for some IO operation. You create a timer object with the same value for $after and $repeat, and then, in the read/write watcher, run the "again" method on the timeout. If called with a $repeat argument, then it uses this a timer repeat value. $after = $w->remaining Calculates and returns the remaining time till the timer will fire. $repeat = $w->repeat $old_repeat = $w->repeat ($new_repeat) Returns the current value of the repeat attribute and optionally sets a new one. Setting the new one will not restart the watcher - if the watcher is active, the new repeat value is used whenever it expires next. PERIODIC WATCHERS - to cron or not to cron? $w = EV::periodic $at, $interval, $reschedule_cb, $callback $w = EV::periodic_ns $at, $interval, $reschedule_cb, $callback $w = $loop->periodic ($at, $interval, $reschedule_cb, $callback) $w = $loop->periodic_ns ($at, $interval, $reschedule_cb, $callback) Similar to EV::timer, but is not based on relative timeouts but on absolute times. Apart from creating "simple" timers that trigger "at" the specified time, it can also be used for non-drifting absolute timers and more complex, cron-like, setups that are not adversely affected by time jumps (i.e. when the system clock is changed by explicit date -s or other means such as ntpd). It is also the most complex watcher type in EV. It has three distinct "modes": * absolute timer ($interval = $reschedule_cb = 0) This time simply fires at the wallclock time $at and doesn't repeat. It will not adjust when a time jump occurs, that is, if it is to be run at January 1st 2011 then it will run when the system time reaches or surpasses this time. * repeating interval timer ($interval > 0, $reschedule_cb = 0) In this mode the watcher will always be scheduled to time out at the next "$at + N * $interval" time (for the lowest integer N) and then repeat, regardless of any time jumps. Note that, since "N" can be negative, the first trigger can happen before $at. This can be used to create timers that do not drift with respect to system time: my $hourly = EV::periodic 0, 3600, 0, sub { print "once/hour\n" }; That doesn't mean there will always be 3600 seconds in between triggers, but only that the the callback will be called when the system time shows a full hour (UTC). Another way to think about it (for the mathematically inclined) is that EV::periodic will try to run the callback in this mode at the next possible time where "$time = $at (mod $interval)", regardless of any time jumps. * manual reschedule mode ($reschedule_cb = coderef) In this mode $interval and $at are both being ignored. Instead, each time the periodic watcher gets scheduled, the reschedule callback ($reschedule_cb) will be called with the watcher as first, and the current time as second argument. *This callback MUST NOT stop or destroy this or any other periodic watcher, ever, and MUST NOT call any event loop functions or methods*. If you need to stop it, return 1e30 and stop it afterwards. You may create and start an "EV::prepare" watcher for this task. It must return the next time to trigger, based on the passed time value (that is, the lowest time value larger than or equal to to the second argument). It will usually be called just before the callback will be triggered, but might be called at other times, too. This can be used to create very complex timers, such as a timer that triggers on each midnight, local time (actually one day after the last midnight, to keep the example simple): my $daily = EV::periodic 0, 0, sub { my ($w, $now) = @_; use Time::Local (); my (undef, undef, undef, $d, $m, $y) = localtime $now; Time::Local::timelocal_nocheck 0, 0, 0, $d + 1, $m, $y }, sub { print "it's midnight or likely shortly after, now\n"; }; The "periodic_ns" variant doesn't start (activate) the newly created watcher. $w->set ($at, $interval, $reschedule_cb) Reconfigures the watcher, see the constructor above for details. Can be called at any time. $w->again Simply stops and starts the watcher again. $time = $w->at Return the time that the watcher is expected to trigger next. $offset = $w->offset $old_offset = $w->offset ($new_offset) Returns the current value of the offset attribute and optionally sets a new one. Setting the new one will not restart the watcher - if the watcher is active, the new offset value is used whenever it expires next. $interval = $w->interval $old_interval = $w->interval ($new_interval) See above, for the interval attribute. $reschedule_cb = $w->reschedule_cb $old_reschedule_cb = $w->reschedule_cb ($new_reschedule_cb) See above, for the reschedule callback. SIGNAL WATCHERS - signal me when a signal gets signalled! $w = EV::signal $signal, $callback $w = EV::signal_ns $signal, $callback $w = $loop->signal ($signal, $callback) $w = $loop->signal_ns ($signal, $callback) Call the callback when $signal is received (the signal can be specified by number or by name, just as with "kill" or %SIG). Only one event loop can grab a given signal - attempting to grab the same signal from two EV loops will crash the program immediately or cause data corruption. EV will grab the signal for the process (the kernel only allows one component to receive a signal at a time) when you start a signal watcher, and removes it again when you stop it. Perl does the same when you add/remove callbacks to %SIG, so watch out. You can have as many signal watchers per signal as you want. The "signal_ns" variant doesn't start (activate) the newly created watcher. $w->set ($signal) Reconfigures the watcher, see the constructor above for details. Can be called at any time. $current_signum = $w->signal $old_signum = $w->signal ($new_signal) Returns the previously set signal (always as a number not name) and optionally set a new one. CHILD WATCHERS - watch out for process status changes $w = EV::child $pid, $trace, $callback $w = EV::child_ns $pid, $trace, $callback $w = $loop->child ($pid, $trace, $callback) $w = $loop->child_ns ($pid, $trace, $callback) Call the callback when a status change for pid $pid (or any pid if $pid is 0) has been received (a status change happens when the process terminates or is killed, or, when trace is true, additionally when it is stopped or continued). More precisely: when the process receives a "SIGCHLD", EV will fetch the outstanding exit/wait status for all changed/zombie children and call the callback. It is valid (and fully supported) to install a child watcher after a child has exited but before the event loop has started its next iteration (for example, first you "fork", then the new child process might exit, and only then do you install a child watcher in the parent for the new pid). You can access both exit (or tracing) status and pid by using the "rstatus" and "rpid" methods on the watcher object. You can have as many pid watchers per pid as you want, they will all be called. The "child_ns" variant doesn't start (activate) the newly created watcher. $w->set ($pid, $trace) Reconfigures the watcher, see the constructor above for details. Can be called at any time. $current_pid = $w->pid Returns the previously set process id and optionally set a new one. $exit_status = $w->rstatus Return the exit/wait status (as returned by waitpid, see the waitpid entry in perlfunc). $pid = $w->rpid Return the pid of the awaited child (useful when you have installed a watcher for all pids). STAT WATCHERS - did the file attributes just change? $w = EV::stat $path, $interval, $callback $w = EV::stat_ns $path, $interval, $callback $w = $loop->stat ($path, $interval, $callback) $w = $loop->stat_ns ($path, $interval, $callback) Call the callback when a file status change has been detected on $path. The $path does not need to exist, changing from "path exists" to "path does not exist" is a status change like any other. The $interval is a recommended polling interval for systems where OS-supported change notifications don't exist or are not supported. If you use 0 then an unspecified default is used (which is highly recommended!), which is to be expected to be around five seconds usually. This watcher type is not meant for massive numbers of stat watchers, as even with OS-supported change notifications, this can be resource-intensive. The "stat_ns" variant doesn't start (activate) the newly created watcher. ... = $w->stat This call is very similar to the perl "stat" built-in: It stats (using "lstat") the path specified in the watcher and sets perls stat cache (as well as EV's idea of the current stat values) to the values found. In scalar context, a boolean is return indicating success or failure of the stat. In list context, the same 13-value list as with stat is returned (except that the blksize and blocks fields are not reliable). In the case of an error, errno is set to "ENOENT" (regardless of the actual error value) and the "nlink" value is forced to zero (if the stat was successful then nlink is guaranteed to be non-zero). See also the next two entries for more info. ... = $w->attr Just like "$w->stat", but without the initial stat'ing: this returns the values most recently detected by EV. See the next entry for more info. ... = $w->prev Just like "$w->stat", but without the initial stat'ing: this returns the previous set of values, before the change. That is, when the watcher callback is invoked, "$w->prev" will be set to the values found *before* a change was detected, while "$w->attr" returns the values found leading to the change detection. The difference (if any) between "prev" and "attr" is what triggered the callback. If you did something to the filesystem object and do not want to trigger yet another change, you can call "stat" to update EV's idea of what the current attributes are. $w->set ($path, $interval) Reconfigures the watcher, see the constructor above for details. Can be called at any time. $current_path = $w->path $old_path = $w->path ($new_path) Returns the previously set path and optionally set a new one. $current_interval = $w->interval $old_interval = $w->interval ($new_interval) Returns the previously set interval and optionally set a new one. Can be used to query the actual interval used. IDLE WATCHERS - when you've got nothing better to do... $w = EV::idle $callback $w = EV::idle_ns $callback $w = $loop->idle ($callback) $w = $loop->idle_ns ($callback) Call the callback when there are no other pending watchers of the same or higher priority (excluding check, prepare and other idle watchers of the same or lower priority, of course). They are called idle watchers because when the watcher is the highest priority pending event in the process, the process is considered to be idle at that priority. If you want a watcher that is only ever called when *no* other events are outstanding you have to set the priority to "EV::MINPRI". The process will not block as long as any idle watchers are active, and they will be called repeatedly until stopped. For example, if you have idle watchers at priority 0 and 1, and an I/O watcher at priority 0, then the idle watcher at priority 1 and the I/O watcher will always run when ready. Only when the idle watcher at priority 1 is stopped and the I/O watcher at priority 0 is not pending with the 0-priority idle watcher be invoked. The "idle_ns" variant doesn't start (activate) the newly created watcher. PREPARE WATCHERS - customise your event loop! $w = EV::prepare $callback $w = EV::prepare_ns $callback $w = $loop->prepare ($callback) $w = $loop->prepare_ns ($callback) Call the callback just before the process would block. You can still create/modify any watchers at this point. See the EV::check watcher, below, for explanations and an example. The "prepare_ns" variant doesn't start (activate) the newly created watcher. CHECK WATCHERS - customise your event loop even more! $w = EV::check $callback $w = EV::check_ns $callback $w = $loop->check ($callback) $w = $loop->check_ns ($callback) Call the callback just after the process wakes up again (after it has gathered events), but before any other callbacks have been invoked. This can be used to integrate other event-based software into the EV mainloop: You register a prepare callback and in there, you create io and timer watchers as required by the other software. Here is a real-world example of integrating Net::SNMP (with some details left out): our @snmp_watcher; our $snmp_prepare = EV::prepare sub { # do nothing unless active $dispatcher->{_event_queue_h} or return; # make the dispatcher handle any outstanding stuff ... not shown # create an I/O watcher for each and every socket @snmp_watcher = ( (map { EV::io $_, EV::READ, sub { } } keys %{ $dispatcher->{_descriptors} }), EV::timer +($event->[Net::SNMP::Dispatcher::_ACTIVE] ? $event->[Net::SNMP::Dispatcher::_TIME] - EV::now : 0), 0, sub { }, ); }; The callbacks are irrelevant (and are not even being called), the only purpose of those watchers is to wake up the process as soon as one of those events occurs (socket readable, or timer timed out). The corresponding EV::check watcher will then clean up: our $snmp_check = EV::check sub { # destroy all watchers @snmp_watcher = (); # make the dispatcher handle any new stuff ... not shown }; The callbacks of the created watchers will not be called as the watchers are destroyed before this can happen (remember EV::check gets called first). The "check_ns" variant doesn't start (activate) the newly created watcher. EV::CHECK constant issues Like all other watcher types, there is a bitmask constant for use in $revents and other places. The "EV::CHECK" is special as it has the same name as the "CHECK" sub called by Perl. This doesn't cause big issues on newer perls (beginning with 5.8.9), but it means thatthe constant must be *inlined*, i.e. runtime calls will not work. That means that as long as you always "use EV" and then "EV::CHECK" you are on the safe side. FORK WATCHERS - the audacity to resume the event loop after a fork Fork watchers are called when a "fork ()" was detected. The invocation is done before the event loop blocks next and before "check" watchers are being called, and only in the child after the fork. $w = EV::fork $callback $w = EV::fork_ns $callback $w = $loop->fork ($callback) $w = $loop->fork_ns ($callback) Call the callback before the event loop is resumed in the child process after a fork. The "fork_ns" variant doesn't start (activate) the newly created watcher. EMBED WATCHERS - when one backend isn't enough... This is a rather advanced watcher type that lets you embed one event loop into another (currently only IO events are supported in the embedded loop, other types of watchers might be handled in a delayed or incorrect fashion and must not be used). See the libev documentation at (locally installed as EV::libev) for more details. In short, this watcher is most useful on BSD systems without working kqueue to still be able to handle a large number of sockets: my $socket_loop; # check wether we use SELECT or POLL _and_ KQUEUE is supported if ( (EV::backend & (EV::BACKEND_POLL | EV::BACKEND_SELECT)) && (EV::supported_backends & EV::embeddable_backends & EV::BACKEND_KQUEUE) ) { # use kqueue for sockets $socket_loop = new EV::Loop EV::BACKEND_KQUEUE | EV::FLAG_NOENV; } # use the default loop otherwise $socket_loop ||= EV::default_loop; $w = EV::embed $otherloop[, $callback] $w = EV::embed_ns $otherloop[, $callback] $w = $loop->embed ($otherloop[, $callback]) $w = $loop->embed_ns ($otherloop[, $callback]) Call the callback when the embedded event loop ($otherloop) has any I/O activity. The $callback is optional: if it is missing, then the embedded event loop will be managed automatically (which is recommended), otherwise you have to invoke "sweep" yourself. The "embed_ns" variant doesn't start (activate) the newly created watcher. ASYNC WATCHERS - how to wake up another event loop Async watchers are provided by EV, but have little use in perl directly, as perl neither supports threads running in parallel nor direct access to signal handlers or other contexts where they could be of value. It is, however, possible to use them from the XS level. Please see the libev documentation for further details. $w = EV::async $callback $w = EV::async_ns $callback $w = $loop->async ($callback) $w = $loop->async_ns ($callback) $w->send $bool = $w->async_pending CLEANUP WATCHERS - how to clean up when the event loop goes away Cleanup watchers are not supported on the Perl level, they can only be used via XS currently. PERL SIGNALS While Perl signal handling (%SIG) is not affected by EV, the behaviour with EV is as the same as any other C library: Perl-signals will only be handled when Perl runs, which means your signal handler might be invoked only the next time an event callback is invoked. The solution is to use EV signal watchers (see "EV::signal"), which will ensure proper operations with regards to other event watchers. If you cannot do this for whatever reason, you can also force a watcher to be called on every event loop iteration by installing a "EV::check" watcher: my $async_check = EV::check sub { }; This ensures that perl gets into control for a short time to handle any pending signals, and also ensures (slightly) slower overall operation. ITHREADS Ithreads are not supported by this module in any way. Perl pseudo-threads is evil stuff and must die. Real threads as provided by Coro are fully supported (and enhanced support is available via Coro::EV). FORK Most of the "improved" event delivering mechanisms of modern operating systems have quite a few problems with fork(2) (to put it bluntly: it is not supported and usually destructive). Libev makes it possible to work around this by having a function that recreates the kernel state after fork in the child. On non-win32 platforms, this module requires the pthread_atfork functionality to do this automatically for you. This function is quite buggy on most BSDs, though, so YMMV. The overhead for this is quite negligible, because everything the function currently does is set a flag that is checked only when the event loop gets used the next time, so when you do fork but not use EV, the overhead is minimal. On win32, there is no notion of fork so all this doesn't apply, of course. SEE ALSO EV::MakeMaker - MakeMaker interface to XS API, EV::ADNS (asynchronous DNS), Glib::EV (makes Glib/Gtk2 use EV as event loop), EV::Glib (embed Glib into EV), Coro::EV (efficient thread integration), Net::SNMP::EV (asynchronous SNMP), AnyEvent for event-loop agnostic and portable event driven programming. AUTHOR Marc Lehmann http://home.schmorp.de/ EV-4.33/META.yml0000644000000000000000000000073013634420051011722 0ustar rootroot--- abstract: unknown author: - unknown build_requires: ExtUtils::MakeMaker: '0' configure_requires: Canary::Stability: '0' ExtUtils::MakeMaker: '6.52' dynamic_config: 1 generated_by: 'ExtUtils::MakeMaker version 7.34, CPAN::Meta::Converter version 2.150001' license: unknown meta-spec: url: http://module-build.sourceforge.net/META-spec-v1.4.html version: '1.4' name: EV no_index: directory: - t - inc requires: common::sense: '0' version: '4.33' EV-4.33/META.json0000644000000000000000000000154213634420051012074 0ustar rootroot{ "abstract" : "unknown", "author" : [ "unknown" ], "dynamic_config" : 1, "generated_by" : "ExtUtils::MakeMaker version 7.34, CPAN::Meta::Converter version 2.150001", "license" : [ "unknown" ], "meta-spec" : { "url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec", "version" : "2" }, "name" : "EV", "no_index" : { "directory" : [ "t", "inc" ] }, "prereqs" : { "build" : { "requires" : { "ExtUtils::MakeMaker" : "0" } }, "configure" : { "requires" : { "Canary::Stability" : "0", "ExtUtils::MakeMaker" : "6.52" } }, "runtime" : { "requires" : { "common::sense" : "0" } } }, "release_status" : "stable", "version" : "4.33" }